xref: /dpdk/drivers/net/virtio/virtio_rxtx.c (revision a131d9ec3f4367719ca6b82bfefae8e98cea74c4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
16 #include <rte_mbuf.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
23 #include <rte_net.h>
24 #include <rte_ip.h>
25 #include <rte_udp.h>
26 #include <rte_tcp.h>
27 
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
34 #include "virtio_ring.h"
35 
36 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
37 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #else
39 #define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
40 #endif
41 
42 void
43 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
44 {
45 	vq->vq_free_cnt += num;
46 	vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
47 }
48 
49 void
50 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
51 {
52 	struct vring_desc *dp, *dp_tail;
53 	struct vq_desc_extra *dxp;
54 	uint16_t desc_idx_last = desc_idx;
55 
56 	dp  = &vq->vq_split.ring.desc[desc_idx];
57 	dxp = &vq->vq_descx[desc_idx];
58 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
59 	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
60 		while (dp->flags & VRING_DESC_F_NEXT) {
61 			desc_idx_last = dp->next;
62 			dp = &vq->vq_split.ring.desc[dp->next];
63 		}
64 	}
65 	dxp->ndescs = 0;
66 
67 	/*
68 	 * We must append the existing free chain, if any, to the end of
69 	 * newly freed chain. If the virtqueue was completely used, then
70 	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
71 	 */
72 	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
73 		vq->vq_desc_head_idx = desc_idx;
74 	} else {
75 		dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
76 		dp_tail->next = desc_idx;
77 	}
78 
79 	vq->vq_desc_tail_idx = desc_idx_last;
80 	dp->next = VQ_RING_DESC_CHAIN_END;
81 }
82 
83 void
84 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
85 {
86 	uint32_t s = mbuf->pkt_len;
87 	struct rte_ether_addr *ea;
88 
89 	stats->bytes += s;
90 
91 	if (s == 64) {
92 		stats->size_bins[1]++;
93 	} else if (s > 64 && s < 1024) {
94 		uint32_t bin;
95 
96 		/* count zeros, and offset into correct bin */
97 		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
98 		stats->size_bins[bin]++;
99 	} else {
100 		if (s < 64)
101 			stats->size_bins[0]++;
102 		else if (s < 1519)
103 			stats->size_bins[6]++;
104 		else
105 			stats->size_bins[7]++;
106 	}
107 
108 	ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
109 	if (rte_is_multicast_ether_addr(ea)) {
110 		if (rte_is_broadcast_ether_addr(ea))
111 			stats->broadcast++;
112 		else
113 			stats->multicast++;
114 	}
115 }
116 
117 static inline void
118 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
119 {
120 	VIRTIO_DUMP_PACKET(m, m->data_len);
121 
122 	virtio_update_packet_stats(&rxvq->stats, m);
123 }
124 
125 static uint16_t
126 virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
127 				  struct rte_mbuf **rx_pkts,
128 				  uint32_t *len,
129 				  uint16_t num)
130 {
131 	struct rte_mbuf *cookie;
132 	uint16_t used_idx;
133 	uint16_t id;
134 	struct vring_packed_desc *desc;
135 	uint16_t i;
136 
137 	desc = vq->vq_packed.ring.desc;
138 
139 	for (i = 0; i < num; i++) {
140 		used_idx = vq->vq_used_cons_idx;
141 		/* desc_is_used has a load-acquire or rte_io_rmb inside
142 		 * and wait for used desc in virtqueue.
143 		 */
144 		if (!desc_is_used(&desc[used_idx], vq))
145 			return i;
146 		len[i] = desc[used_idx].len;
147 		id = desc[used_idx].id;
148 		cookie = (struct rte_mbuf *)vq->vq_descx[id].cookie;
149 		if (unlikely(cookie == NULL)) {
150 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
151 				vq->vq_used_cons_idx);
152 			break;
153 		}
154 		rte_prefetch0(cookie);
155 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
156 		rx_pkts[i] = cookie;
157 
158 		vq->vq_free_cnt++;
159 		vq->vq_used_cons_idx++;
160 		if (vq->vq_used_cons_idx >= vq->vq_nentries) {
161 			vq->vq_used_cons_idx -= vq->vq_nentries;
162 			vq->vq_packed.used_wrap_counter ^= 1;
163 		}
164 	}
165 
166 	return i;
167 }
168 
169 static uint16_t
170 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
171 			   uint32_t *len, uint16_t num)
172 {
173 	struct vring_used_elem *uep;
174 	struct rte_mbuf *cookie;
175 	uint16_t used_idx, desc_idx;
176 	uint16_t i;
177 
178 	/*  Caller does the check */
179 	for (i = 0; i < num ; i++) {
180 		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
181 		uep = &vq->vq_split.ring.used->ring[used_idx];
182 		desc_idx = (uint16_t) uep->id;
183 		len[i] = uep->len;
184 		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
185 
186 		if (unlikely(cookie == NULL)) {
187 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
188 				vq->vq_used_cons_idx);
189 			break;
190 		}
191 
192 		rte_prefetch0(cookie);
193 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
194 		rx_pkts[i]  = cookie;
195 		vq->vq_used_cons_idx++;
196 		vq_ring_free_chain(vq, desc_idx);
197 		vq->vq_descx[desc_idx].cookie = NULL;
198 	}
199 
200 	return i;
201 }
202 
203 static uint16_t
204 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
205 			struct rte_mbuf **rx_pkts,
206 			uint32_t *len,
207 			uint16_t num)
208 {
209 	struct vring_used_elem *uep;
210 	struct rte_mbuf *cookie;
211 	uint16_t used_idx = 0;
212 	uint16_t i;
213 
214 	if (unlikely(num == 0))
215 		return 0;
216 
217 	for (i = 0; i < num; i++) {
218 		used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
219 		/* Desc idx same as used idx */
220 		uep = &vq->vq_split.ring.used->ring[used_idx];
221 		len[i] = uep->len;
222 		cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
223 
224 		if (unlikely(cookie == NULL)) {
225 			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
226 				vq->vq_used_cons_idx);
227 			break;
228 		}
229 
230 		rte_prefetch0(cookie);
231 		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
232 		rx_pkts[i]  = cookie;
233 		vq->vq_used_cons_idx++;
234 		vq->vq_descx[used_idx].cookie = NULL;
235 	}
236 
237 	vq_ring_free_inorder(vq, used_idx, i);
238 	return i;
239 }
240 
241 static inline int
242 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
243 			struct rte_mbuf **cookies,
244 			uint16_t num)
245 {
246 	struct vq_desc_extra *dxp;
247 	struct virtio_hw *hw = vq->hw;
248 	struct vring_desc *start_dp;
249 	uint16_t head_idx, idx, i = 0;
250 
251 	if (unlikely(vq->vq_free_cnt == 0))
252 		return -ENOSPC;
253 	if (unlikely(vq->vq_free_cnt < num))
254 		return -EMSGSIZE;
255 
256 	head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
257 	start_dp = vq->vq_split.ring.desc;
258 
259 	while (i < num) {
260 		idx = head_idx & (vq->vq_nentries - 1);
261 		dxp = &vq->vq_descx[idx];
262 		dxp->cookie = (void *)cookies[i];
263 		dxp->ndescs = 1;
264 
265 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) +
266 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
267 		start_dp[idx].len = cookies[i]->buf_len -
268 			RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
269 		start_dp[idx].flags =  VRING_DESC_F_WRITE;
270 
271 		vq_update_avail_ring(vq, idx);
272 		head_idx++;
273 		i++;
274 	}
275 
276 	vq->vq_desc_head_idx += num;
277 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
278 	return 0;
279 }
280 
281 static inline int
282 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
283 				uint16_t num)
284 {
285 	struct vq_desc_extra *dxp;
286 	struct virtio_hw *hw = vq->hw;
287 	struct vring_desc *start_dp = vq->vq_split.ring.desc;
288 	uint16_t idx, i;
289 
290 	if (unlikely(vq->vq_free_cnt == 0))
291 		return -ENOSPC;
292 	if (unlikely(vq->vq_free_cnt < num))
293 		return -EMSGSIZE;
294 
295 	if (unlikely(vq->vq_desc_head_idx >= vq->vq_nentries))
296 		return -EFAULT;
297 
298 	for (i = 0; i < num; i++) {
299 		idx = vq->vq_desc_head_idx;
300 		dxp = &vq->vq_descx[idx];
301 		dxp->cookie = (void *)cookie[i];
302 		dxp->ndescs = 1;
303 
304 		start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
305 			RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
306 		start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
307 			hw->vtnet_hdr_size;
308 		start_dp[idx].flags = VRING_DESC_F_WRITE;
309 		vq->vq_desc_head_idx = start_dp[idx].next;
310 		vq_update_avail_ring(vq, idx);
311 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) {
312 			vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
313 			break;
314 		}
315 	}
316 
317 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
318 
319 	return 0;
320 }
321 
322 static inline void
323 virtqueue_refill_single_packed(struct virtqueue *vq,
324 			       struct vring_packed_desc *dp,
325 			       struct rte_mbuf *cookie)
326 {
327 	uint16_t flags = vq->vq_packed.cached_flags;
328 	struct virtio_hw *hw = vq->hw;
329 
330 	dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
331 	dp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
332 
333 	virtqueue_store_flags_packed(dp, flags, hw->weak_barriers);
334 
335 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
336 		vq->vq_avail_idx -= vq->vq_nentries;
337 		vq->vq_packed.cached_flags ^=
338 			VRING_PACKED_DESC_F_AVAIL_USED;
339 		flags = vq->vq_packed.cached_flags;
340 	}
341 }
342 
343 static inline int
344 virtqueue_enqueue_recv_refill_packed_init(struct virtqueue *vq,
345 				     struct rte_mbuf **cookie, uint16_t num)
346 {
347 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
348 	struct vq_desc_extra *dxp;
349 	uint16_t idx;
350 	int i;
351 
352 	if (unlikely(vq->vq_free_cnt == 0))
353 		return -ENOSPC;
354 	if (unlikely(vq->vq_free_cnt < num))
355 		return -EMSGSIZE;
356 
357 	for (i = 0; i < num; i++) {
358 		idx = vq->vq_avail_idx;
359 		dxp = &vq->vq_descx[idx];
360 		dxp->cookie = (void *)cookie[i];
361 		dxp->ndescs = 1;
362 
363 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
364 	}
365 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
366 	return 0;
367 }
368 
369 static inline int
370 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
371 				     struct rte_mbuf **cookie, uint16_t num)
372 {
373 	struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
374 	struct vq_desc_extra *dxp;
375 	uint16_t idx, did;
376 	int i;
377 
378 	if (unlikely(vq->vq_free_cnt == 0))
379 		return -ENOSPC;
380 	if (unlikely(vq->vq_free_cnt < num))
381 		return -EMSGSIZE;
382 
383 	for (i = 0; i < num; i++) {
384 		idx = vq->vq_avail_idx;
385 		did = start_dp[idx].id;
386 		dxp = &vq->vq_descx[did];
387 		dxp->cookie = (void *)cookie[i];
388 		dxp->ndescs = 1;
389 
390 		virtqueue_refill_single_packed(vq, &start_dp[idx], cookie[i]);
391 	}
392 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
393 	return 0;
394 }
395 
396 /* When doing TSO, the IP length is not included in the pseudo header
397  * checksum of the packet given to the PMD, but for virtio it is
398  * expected.
399  */
400 static void
401 virtio_tso_fix_cksum(struct rte_mbuf *m)
402 {
403 	/* common case: header is not fragmented */
404 	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
405 			m->l4_len)) {
406 		struct rte_ipv4_hdr *iph;
407 		struct rte_tcp_hdr *th;
408 		uint16_t prev_cksum, new_cksum;
409 		uint32_t ip_paylen;
410 		uint32_t tmp;
411 
412 		iph = rte_pktmbuf_mtod_offset(m,
413 					struct rte_ipv4_hdr *, m->l2_len);
414 		th = RTE_PTR_ADD(iph, m->l3_len);
415 
416 		/*
417 		 * Calculate IPv4 header checksum with current total length value
418 		 * (whatever it is) to have correct checksum after update on edits
419 		 * done by TSO.
420 		 */
421 		if ((iph->version_ihl >> 4) == 4) {
422 			iph->hdr_checksum = 0;
423 			iph->hdr_checksum = rte_ipv4_cksum(iph);
424 		}
425 
426 		/*
427 		 * Do not use IPv4 total length and IPv6 payload length fields to get
428 		 * TSO payload length since it could not fit into 16 bits.
429 		 */
430 		ip_paylen = rte_cpu_to_be_32(rte_pktmbuf_pkt_len(m) - m->l2_len -
431 					m->l3_len);
432 
433 		/* calculate the new phdr checksum not including ip_paylen */
434 		prev_cksum = th->cksum;
435 		tmp = prev_cksum;
436 		tmp += (ip_paylen & 0xffff) + (ip_paylen >> 16);
437 		tmp = (tmp & 0xffff) + (tmp >> 16);
438 		new_cksum = tmp;
439 
440 		/* replace it in the packet */
441 		th->cksum = new_cksum;
442 	}
443 }
444 
445 
446 
447 
448 static inline void
449 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
450 			struct rte_mbuf **cookies,
451 			uint16_t num)
452 {
453 	struct vq_desc_extra *dxp;
454 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
455 	struct vring_desc *start_dp;
456 	struct virtio_net_hdr *hdr;
457 	uint16_t idx;
458 	int16_t head_size = vq->hw->vtnet_hdr_size;
459 	uint16_t i = 0;
460 
461 	idx = vq->vq_desc_head_idx;
462 	start_dp = vq->vq_split.ring.desc;
463 
464 	while (i < num) {
465 		idx = idx & (vq->vq_nentries - 1);
466 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
467 		dxp->cookie = (void *)cookies[i];
468 		dxp->ndescs = 1;
469 		virtio_update_packet_stats(&txvq->stats, cookies[i]);
470 
471 		hdr = rte_pktmbuf_mtod_offset(cookies[i],
472 				struct virtio_net_hdr *, -head_size);
473 
474 		/* if offload disabled, hdr is not zeroed yet, do it now */
475 		if (!vq->hw->has_tx_offload)
476 			virtqueue_clear_net_hdr(hdr);
477 		else
478 			virtqueue_xmit_offload(hdr, cookies[i]);
479 
480 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
481 		start_dp[idx].len = cookies[i]->data_len + head_size;
482 		start_dp[idx].flags = 0;
483 
484 
485 		vq_update_avail_ring(vq, idx);
486 
487 		idx++;
488 		i++;
489 	};
490 
491 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
492 	vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
493 }
494 
495 static inline void
496 virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
497 				   struct rte_mbuf *cookie,
498 				   int in_order)
499 {
500 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
501 	struct vring_packed_desc *dp;
502 	struct vq_desc_extra *dxp;
503 	uint16_t idx, id, flags;
504 	int16_t head_size = vq->hw->vtnet_hdr_size;
505 	struct virtio_net_hdr *hdr;
506 
507 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
508 	idx = vq->vq_avail_idx;
509 	dp = &vq->vq_packed.ring.desc[idx];
510 
511 	dxp = &vq->vq_descx[id];
512 	dxp->ndescs = 1;
513 	dxp->cookie = cookie;
514 
515 	flags = vq->vq_packed.cached_flags;
516 
517 	/* prepend cannot fail, checked by caller */
518 	hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
519 				      -head_size);
520 
521 	/* if offload disabled, hdr is not zeroed yet, do it now */
522 	if (!vq->hw->has_tx_offload)
523 		virtqueue_clear_net_hdr(hdr);
524 	else
525 		virtqueue_xmit_offload(hdr, cookie);
526 
527 	dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
528 	dp->len = cookie->data_len + head_size;
529 	dp->id = id;
530 
531 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
532 		vq->vq_avail_idx -= vq->vq_nentries;
533 		vq->vq_packed.cached_flags ^= VRING_PACKED_DESC_F_AVAIL_USED;
534 	}
535 
536 	vq->vq_free_cnt--;
537 
538 	if (!in_order) {
539 		vq->vq_desc_head_idx = dxp->next;
540 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
541 			vq->vq_desc_tail_idx = VQ_RING_DESC_CHAIN_END;
542 	}
543 
544 	virtqueue_store_flags_packed(dp, flags, vq->hw->weak_barriers);
545 }
546 
547 static inline void
548 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
549 			uint16_t needed, int use_indirect, int can_push,
550 			int in_order)
551 {
552 	struct virtio_tx_region *txr = txvq->hdr_mz->addr;
553 	struct vq_desc_extra *dxp;
554 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
555 	struct vring_desc *start_dp;
556 	uint16_t seg_num = cookie->nb_segs;
557 	uint16_t head_idx, idx;
558 	int16_t head_size = vq->hw->vtnet_hdr_size;
559 	bool prepend_header = false;
560 	struct virtio_net_hdr *hdr;
561 
562 	head_idx = vq->vq_desc_head_idx;
563 	idx = head_idx;
564 	if (in_order)
565 		dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
566 	else
567 		dxp = &vq->vq_descx[idx];
568 	dxp->cookie = (void *)cookie;
569 	dxp->ndescs = needed;
570 
571 	start_dp = vq->vq_split.ring.desc;
572 
573 	if (can_push) {
574 		/* prepend cannot fail, checked by caller */
575 		hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
576 					      -head_size);
577 		prepend_header = true;
578 
579 		/* if offload disabled, it is not zeroed below, do it now */
580 		if (!vq->hw->has_tx_offload)
581 			virtqueue_clear_net_hdr(hdr);
582 	} else if (use_indirect) {
583 		/* setup tx ring slot to point to indirect
584 		 * descriptor list stored in reserved region.
585 		 *
586 		 * the first slot in indirect ring is already preset
587 		 * to point to the header in reserved region
588 		 */
589 		start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
590 		start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
591 		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
592 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
593 
594 		/* loop below will fill in rest of the indirect elements */
595 		start_dp = txr[idx].tx_indir;
596 		idx = 1;
597 	} else {
598 		/* setup first tx ring slot to point to header
599 		 * stored in reserved region.
600 		 */
601 		start_dp[idx].addr = txvq->hdr_mem + RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
602 		start_dp[idx].len = vq->hw->vtnet_hdr_size;
603 		start_dp[idx].flags = VRING_DESC_F_NEXT;
604 		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
605 
606 		idx = start_dp[idx].next;
607 	}
608 
609 	if (vq->hw->has_tx_offload)
610 		virtqueue_xmit_offload(hdr, cookie);
611 
612 	do {
613 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
614 		start_dp[idx].len = cookie->data_len;
615 		if (prepend_header) {
616 			start_dp[idx].addr -= head_size;
617 			start_dp[idx].len += head_size;
618 			prepend_header = false;
619 		}
620 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
621 		idx = start_dp[idx].next;
622 	} while ((cookie = cookie->next) != NULL);
623 
624 	if (use_indirect)
625 		idx = vq->vq_split.ring.desc[head_idx].next;
626 
627 	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
628 
629 	vq->vq_desc_head_idx = idx;
630 	vq_update_avail_ring(vq, head_idx);
631 
632 	if (!in_order) {
633 		if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
634 			vq->vq_desc_tail_idx = idx;
635 	}
636 }
637 
638 void
639 virtio_dev_cq_start(struct rte_eth_dev *dev)
640 {
641 	struct virtio_hw *hw = dev->data->dev_private;
642 
643 	if (hw->cvq) {
644 		rte_spinlock_init(&hw->cvq->lock);
645 		VIRTQUEUE_DUMP(virtnet_cq_to_vq(hw->cvq));
646 	}
647 }
648 
649 int
650 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
651 			uint16_t queue_idx,
652 			uint16_t nb_desc,
653 			unsigned int socket_id __rte_unused,
654 			const struct rte_eth_rxconf *rx_conf,
655 			struct rte_mempool *mp)
656 {
657 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
658 	struct virtio_hw *hw = dev->data->dev_private;
659 	struct virtqueue *vq = hw->vqs[vq_idx];
660 	struct virtnet_rx *rxvq;
661 	uint16_t rx_free_thresh;
662 	uint16_t buf_size;
663 	const char *error;
664 
665 	PMD_INIT_FUNC_TRACE();
666 
667 	if (rx_conf->rx_deferred_start) {
668 		PMD_INIT_LOG(ERR, "Rx deferred start is not supported");
669 		return -EINVAL;
670 	}
671 
672 	buf_size = virtio_rx_mem_pool_buf_size(mp);
673 	if (!virtio_rx_check_scatter(hw->max_rx_pkt_len, buf_size,
674 				     hw->rx_ol_scatter, &error)) {
675 		PMD_INIT_LOG(ERR, "RxQ %u Rx scatter check failed: %s",
676 			     queue_idx, error);
677 		return -EINVAL;
678 	}
679 
680 	rx_free_thresh = rx_conf->rx_free_thresh;
681 	if (rx_free_thresh == 0)
682 		rx_free_thresh =
683 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
684 
685 	if (rx_free_thresh & 0x3) {
686 		PMD_INIT_LOG(ERR, "rx_free_thresh must be multiples of four."
687 			" (rx_free_thresh=%u port=%u queue=%u)",
688 			rx_free_thresh, dev->data->port_id, queue_idx);
689 		return -EINVAL;
690 	}
691 
692 	if (rx_free_thresh >= vq->vq_nentries) {
693 		PMD_INIT_LOG(ERR, "rx_free_thresh must be less than the "
694 			"number of RX entries (%u)."
695 			" (rx_free_thresh=%u port=%u queue=%u)",
696 			vq->vq_nentries,
697 			rx_free_thresh, dev->data->port_id, queue_idx);
698 		return -EINVAL;
699 	}
700 	vq->vq_free_thresh = rx_free_thresh;
701 
702 	/*
703 	 * For split ring vectorized path descriptors number must be
704 	 * equal to the ring size.
705 	 */
706 	if (nb_desc > vq->vq_nentries ||
707 	    (!virtio_with_packed_queue(hw) && hw->use_vec_rx)) {
708 		nb_desc = vq->vq_nentries;
709 	}
710 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
711 
712 	rxvq = &vq->rxq;
713 	rxvq->mpool = mp;
714 	dev->data->rx_queues[queue_idx] = rxvq;
715 
716 	return 0;
717 }
718 
719 int
720 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
721 {
722 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
723 	struct virtio_hw *hw = dev->data->dev_private;
724 	struct virtqueue *vq = hw->vqs[vq_idx];
725 	struct virtnet_rx *rxvq = &vq->rxq;
726 	struct rte_mbuf *m;
727 	uint16_t desc_idx;
728 	int error, nbufs, i;
729 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
730 
731 	PMD_INIT_FUNC_TRACE();
732 
733 	/* Allocate blank mbufs for the each rx descriptor */
734 	nbufs = 0;
735 
736 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
737 		for (desc_idx = 0; desc_idx < vq->vq_nentries;
738 		     desc_idx++) {
739 			vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
740 			vq->vq_split.ring.desc[desc_idx].flags =
741 				VRING_DESC_F_WRITE;
742 		}
743 
744 		virtio_rxq_vec_setup(rxvq);
745 	}
746 
747 	if (hw->use_vec_rx) {
748 		memset(rxvq->fake_mbuf, 0, sizeof(*rxvq->fake_mbuf));
749 		for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; desc_idx++)
750 			vq->rxq.sw_ring[vq->vq_nentries + desc_idx] = rxvq->fake_mbuf;
751 	}
752 
753 	if (hw->use_vec_rx && !virtio_with_packed_queue(hw)) {
754 		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
755 			virtio_rxq_rearm_vec(rxvq);
756 			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
757 		}
758 	} else if (!virtio_with_packed_queue(vq->hw) && in_order) {
759 		if ((!virtqueue_full(vq))) {
760 			uint16_t free_cnt = vq->vq_free_cnt;
761 			struct rte_mbuf *pkts[free_cnt];
762 
763 			if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
764 				free_cnt)) {
765 				error = virtqueue_enqueue_refill_inorder(vq,
766 						pkts,
767 						free_cnt);
768 				if (unlikely(error)) {
769 					for (i = 0; i < free_cnt; i++)
770 						rte_pktmbuf_free(pkts[i]);
771 				} else {
772 					nbufs += free_cnt;
773 				}
774 			}
775 
776 			vq_update_avail_idx(vq);
777 		}
778 	} else {
779 		while (!virtqueue_full(vq)) {
780 			m = rte_mbuf_raw_alloc(rxvq->mpool);
781 			if (m == NULL)
782 				break;
783 
784 			/* Enqueue allocated buffers */
785 			if (virtio_with_packed_queue(vq->hw))
786 				error = virtqueue_enqueue_recv_refill_packed_init(vq,
787 						&m, 1);
788 			else
789 				error = virtqueue_enqueue_recv_refill(vq,
790 						&m, 1);
791 			if (error) {
792 				rte_pktmbuf_free(m);
793 				break;
794 			}
795 			nbufs++;
796 		}
797 
798 		if (!virtio_with_packed_queue(vq->hw))
799 			vq_update_avail_idx(vq);
800 	}
801 
802 	PMD_INIT_LOG(DEBUG, "Allocated %d bufs (port=%u queue=%u)", nbufs,
803 		     dev->data->port_id, queue_idx);
804 
805 	VIRTQUEUE_DUMP(vq);
806 
807 	return 0;
808 }
809 
810 /*
811  * struct rte_eth_dev *dev: Used to update dev
812  * uint16_t nb_desc: Defaults to values read from config space
813  * unsigned int socket_id: Used to allocate memzone
814  * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
815  * uint16_t queue_idx: Just used as an index in dev txq list
816  */
817 int
818 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
819 			uint16_t queue_idx,
820 			uint16_t nb_desc,
821 			unsigned int socket_id __rte_unused,
822 			const struct rte_eth_txconf *tx_conf)
823 {
824 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
825 	struct virtio_hw *hw = dev->data->dev_private;
826 	struct virtqueue *vq = hw->vqs[vq_idx];
827 	struct virtnet_tx *txvq;
828 	uint16_t tx_free_thresh;
829 
830 	PMD_INIT_FUNC_TRACE();
831 
832 	if (tx_conf->tx_deferred_start) {
833 		PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
834 		return -EINVAL;
835 	}
836 
837 	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
838 		nb_desc = vq->vq_nentries;
839 	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
840 
841 	txvq = &vq->txq;
842 
843 	tx_free_thresh = tx_conf->tx_free_thresh;
844 	if (tx_free_thresh == 0)
845 		tx_free_thresh =
846 			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
847 
848 	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
849 		PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
850 			"number of TX entries minus 3 (%u)."
851 			" (tx_free_thresh=%u port=%u queue=%u)",
852 			vq->vq_nentries - 3,
853 			tx_free_thresh, dev->data->port_id, queue_idx);
854 		return -EINVAL;
855 	}
856 
857 	vq->vq_free_thresh = tx_free_thresh;
858 
859 	dev->data->tx_queues[queue_idx] = txvq;
860 	return 0;
861 }
862 
863 int
864 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
865 				uint16_t queue_idx)
866 {
867 	uint16_t vq_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
868 	struct virtio_hw *hw = dev->data->dev_private;
869 	struct virtqueue *vq = hw->vqs[vq_idx];
870 
871 	PMD_INIT_FUNC_TRACE();
872 
873 	if (!virtio_with_packed_queue(hw)) {
874 		if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
875 			vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
876 	}
877 
878 	VIRTQUEUE_DUMP(vq);
879 
880 	return 0;
881 }
882 
883 static inline void
884 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
885 {
886 	int error;
887 	/*
888 	 * Requeue the discarded mbuf. This should always be
889 	 * successful since it was just dequeued.
890 	 */
891 	if (virtio_with_packed_queue(vq->hw))
892 		error = virtqueue_enqueue_recv_refill_packed(vq, &m, 1);
893 	else
894 		error = virtqueue_enqueue_recv_refill(vq, &m, 1);
895 
896 	if (unlikely(error)) {
897 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
898 		rte_pktmbuf_free(m);
899 	}
900 }
901 
902 static inline void
903 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
904 {
905 	int error;
906 
907 	error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
908 	if (unlikely(error)) {
909 		PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
910 		rte_pktmbuf_free(m);
911 	}
912 }
913 
914 /* Optionally fill offload information in structure */
915 static inline int
916 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
917 {
918 	struct rte_net_hdr_lens hdr_lens;
919 	uint32_t hdrlen, ptype;
920 	int l4_supported = 0;
921 
922 	/* nothing to do */
923 	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
924 		return 0;
925 
926 	m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
927 
928 	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
929 	m->packet_type = ptype;
930 	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
931 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
932 	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
933 		l4_supported = 1;
934 
935 	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
936 		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
937 		if (hdr->csum_start <= hdrlen && l4_supported) {
938 			m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
939 		} else {
940 			/* Unknown proto or tunnel, do sw cksum. We can assume
941 			 * the cksum field is in the first segment since the
942 			 * buffers we provided to the host are large enough.
943 			 * In case of SCTP, this will be wrong since it's a CRC
944 			 * but there's nothing we can do.
945 			 */
946 			uint16_t csum = 0, off;
947 
948 			if (rte_raw_cksum_mbuf(m, hdr->csum_start,
949 				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
950 				&csum) < 0)
951 				return -EINVAL;
952 			if (likely(csum != 0xffff))
953 				csum = ~csum;
954 			off = hdr->csum_offset + hdr->csum_start;
955 			if (rte_pktmbuf_data_len(m) >= off + 1)
956 				*rte_pktmbuf_mtod_offset(m, uint16_t *,
957 					off) = csum;
958 		}
959 	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
960 		m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
961 	}
962 
963 	/* GSO request, save required information in mbuf */
964 	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
965 		/* Check unsupported modes */
966 		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
967 		    (hdr->gso_size == 0)) {
968 			return -EINVAL;
969 		}
970 
971 		/* Update mss lengths in mbuf */
972 		m->tso_segsz = hdr->gso_size;
973 		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
974 			case VIRTIO_NET_HDR_GSO_TCPV4:
975 			case VIRTIO_NET_HDR_GSO_TCPV6:
976 				m->ol_flags |= RTE_MBUF_F_RX_LRO |
977 					RTE_MBUF_F_RX_L4_CKSUM_NONE;
978 				break;
979 			default:
980 				return -EINVAL;
981 		}
982 	}
983 
984 	return 0;
985 }
986 
987 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
988 uint16_t
989 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
990 {
991 	struct virtnet_rx *rxvq = rx_queue;
992 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
993 	struct virtio_hw *hw = vq->hw;
994 	struct rte_mbuf *rxm;
995 	uint16_t nb_used, num, nb_rx;
996 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
997 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
998 	int error;
999 	uint32_t i, nb_enqueued;
1000 	uint32_t hdr_size;
1001 	struct virtio_net_hdr *hdr;
1002 
1003 	nb_rx = 0;
1004 	if (unlikely(hw->started == 0))
1005 		return nb_rx;
1006 
1007 	nb_used = virtqueue_nused(vq);
1008 
1009 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1010 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1011 		num = VIRTIO_MBUF_BURST_SZ;
1012 	if (likely(num > DESC_PER_CACHELINE))
1013 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1014 
1015 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1016 	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
1017 
1018 	nb_enqueued = 0;
1019 	hdr_size = hw->vtnet_hdr_size;
1020 
1021 	for (i = 0; i < num ; i++) {
1022 		rxm = rcv_pkts[i];
1023 
1024 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1025 
1026 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1027 			PMD_RX_LOG(ERR, "Packet drop");
1028 			nb_enqueued++;
1029 			virtio_discard_rxbuf(vq, rxm);
1030 			rxvq->stats.errors++;
1031 			continue;
1032 		}
1033 
1034 		rxm->port = hw->port_id;
1035 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1036 		rxm->ol_flags = 0;
1037 		rxm->vlan_tci = 0;
1038 
1039 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1040 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1041 
1042 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1043 			RTE_PKTMBUF_HEADROOM - hdr_size);
1044 
1045 		if (hw->vlan_strip)
1046 			rte_vlan_strip(rxm);
1047 
1048 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1049 			virtio_discard_rxbuf(vq, rxm);
1050 			rxvq->stats.errors++;
1051 			continue;
1052 		}
1053 
1054 		virtio_rx_stats_updated(rxvq, rxm);
1055 
1056 		rx_pkts[nb_rx++] = rxm;
1057 	}
1058 
1059 	rxvq->stats.packets += nb_rx;
1060 
1061 	/* Allocate new mbuf for the used descriptor */
1062 	if (likely(!virtqueue_full(vq))) {
1063 		uint16_t free_cnt = vq->vq_free_cnt;
1064 		struct rte_mbuf *new_pkts[free_cnt];
1065 
1066 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1067 						free_cnt) == 0)) {
1068 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1069 					free_cnt);
1070 			if (unlikely(error)) {
1071 				for (i = 0; i < free_cnt; i++)
1072 					rte_pktmbuf_free(new_pkts[i]);
1073 			}
1074 			nb_enqueued += free_cnt;
1075 		} else {
1076 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1077 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1078 		}
1079 	}
1080 
1081 	if (likely(nb_enqueued)) {
1082 		vq_update_avail_idx(vq);
1083 
1084 		if (unlikely(virtqueue_kick_prepare(vq))) {
1085 			virtqueue_notify(vq);
1086 			PMD_RX_LOG(DEBUG, "Notified");
1087 		}
1088 	}
1089 
1090 	return nb_rx;
1091 }
1092 
1093 uint16_t
1094 virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
1095 			uint16_t nb_pkts)
1096 {
1097 	struct virtnet_rx *rxvq = rx_queue;
1098 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1099 	struct virtio_hw *hw = vq->hw;
1100 	struct rte_mbuf *rxm;
1101 	uint16_t num, nb_rx;
1102 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1103 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1104 	int error;
1105 	uint32_t i, nb_enqueued;
1106 	uint32_t hdr_size;
1107 	struct virtio_net_hdr *hdr;
1108 
1109 	nb_rx = 0;
1110 	if (unlikely(hw->started == 0))
1111 		return nb_rx;
1112 
1113 	num = RTE_MIN(VIRTIO_MBUF_BURST_SZ, nb_pkts);
1114 	if (likely(num > DESC_PER_CACHELINE))
1115 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1116 
1117 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1118 	PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1119 
1120 	nb_enqueued = 0;
1121 	hdr_size = hw->vtnet_hdr_size;
1122 
1123 	for (i = 0; i < num; i++) {
1124 		rxm = rcv_pkts[i];
1125 
1126 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1127 
1128 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1129 			PMD_RX_LOG(ERR, "Packet drop");
1130 			nb_enqueued++;
1131 			virtio_discard_rxbuf(vq, rxm);
1132 			rxvq->stats.errors++;
1133 			continue;
1134 		}
1135 
1136 		rxm->port = hw->port_id;
1137 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1138 		rxm->ol_flags = 0;
1139 		rxm->vlan_tci = 0;
1140 
1141 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1142 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1143 
1144 		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
1145 			RTE_PKTMBUF_HEADROOM - hdr_size);
1146 
1147 		if (hw->vlan_strip)
1148 			rte_vlan_strip(rxm);
1149 
1150 		if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
1151 			virtio_discard_rxbuf(vq, rxm);
1152 			rxvq->stats.errors++;
1153 			continue;
1154 		}
1155 
1156 		virtio_rx_stats_updated(rxvq, rxm);
1157 
1158 		rx_pkts[nb_rx++] = rxm;
1159 	}
1160 
1161 	rxvq->stats.packets += nb_rx;
1162 
1163 	/* Allocate new mbuf for the used descriptor */
1164 	if (likely(!virtqueue_full(vq))) {
1165 		uint16_t free_cnt = vq->vq_free_cnt;
1166 		struct rte_mbuf *new_pkts[free_cnt];
1167 
1168 		if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts,
1169 						free_cnt) == 0)) {
1170 			error = virtqueue_enqueue_recv_refill_packed(vq,
1171 					new_pkts, free_cnt);
1172 			if (unlikely(error)) {
1173 				for (i = 0; i < free_cnt; i++)
1174 					rte_pktmbuf_free(new_pkts[i]);
1175 			}
1176 			nb_enqueued += free_cnt;
1177 		} else {
1178 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1179 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1180 		}
1181 	}
1182 
1183 	if (likely(nb_enqueued)) {
1184 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1185 			virtqueue_notify(vq);
1186 			PMD_RX_LOG(DEBUG, "Notified");
1187 		}
1188 	}
1189 
1190 	return nb_rx;
1191 }
1192 
1193 
1194 uint16_t
1195 virtio_recv_pkts_inorder(void *rx_queue,
1196 			struct rte_mbuf **rx_pkts,
1197 			uint16_t nb_pkts)
1198 {
1199 	struct virtnet_rx *rxvq = rx_queue;
1200 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1201 	struct virtio_hw *hw = vq->hw;
1202 	struct rte_mbuf *rxm;
1203 	struct rte_mbuf *prev = NULL;
1204 	uint16_t nb_used, num, nb_rx;
1205 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1206 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1207 	int error;
1208 	uint32_t nb_enqueued;
1209 	uint32_t seg_num;
1210 	uint32_t seg_res;
1211 	uint32_t hdr_size;
1212 	int32_t i;
1213 
1214 	nb_rx = 0;
1215 	if (unlikely(hw->started == 0))
1216 		return nb_rx;
1217 
1218 	nb_used = virtqueue_nused(vq);
1219 	nb_used = RTE_MIN(nb_used, nb_pkts);
1220 	nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1221 
1222 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1223 
1224 	nb_enqueued = 0;
1225 	seg_num = 1;
1226 	seg_res = 0;
1227 	hdr_size = hw->vtnet_hdr_size;
1228 
1229 	num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1230 
1231 	for (i = 0; i < num; i++) {
1232 		struct virtio_net_hdr_mrg_rxbuf *header;
1233 
1234 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1235 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1236 
1237 		rxm = rcv_pkts[i];
1238 
1239 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1240 			PMD_RX_LOG(ERR, "Packet drop");
1241 			nb_enqueued++;
1242 			virtio_discard_rxbuf_inorder(vq, rxm);
1243 			rxvq->stats.errors++;
1244 			continue;
1245 		}
1246 
1247 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1248 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1249 			 - hdr_size);
1250 
1251 		if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1252 			seg_num = header->num_buffers;
1253 			if (seg_num == 0)
1254 				seg_num = 1;
1255 		} else {
1256 			seg_num = 1;
1257 		}
1258 
1259 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1260 		rxm->nb_segs = seg_num;
1261 		rxm->ol_flags = 0;
1262 		rxm->vlan_tci = 0;
1263 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1264 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1265 
1266 		rxm->port = hw->port_id;
1267 
1268 		rx_pkts[nb_rx] = rxm;
1269 		prev = rxm;
1270 
1271 		if (vq->hw->has_rx_offload &&
1272 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1273 			virtio_discard_rxbuf_inorder(vq, rxm);
1274 			rxvq->stats.errors++;
1275 			continue;
1276 		}
1277 
1278 		if (hw->vlan_strip)
1279 			rte_vlan_strip(rx_pkts[nb_rx]);
1280 
1281 		seg_res = seg_num - 1;
1282 
1283 		/* Merge remaining segments */
1284 		while (seg_res != 0 && i < (num - 1)) {
1285 			i++;
1286 
1287 			rxm = rcv_pkts[i];
1288 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1289 			rxm->pkt_len = (uint32_t)(len[i]);
1290 			rxm->data_len = (uint16_t)(len[i]);
1291 
1292 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1293 
1294 			prev->next = rxm;
1295 			prev = rxm;
1296 			seg_res -= 1;
1297 		}
1298 
1299 		if (!seg_res) {
1300 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1301 			nb_rx++;
1302 		}
1303 	}
1304 
1305 	/* Last packet still need merge segments */
1306 	while (seg_res != 0) {
1307 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1308 					VIRTIO_MBUF_BURST_SZ);
1309 
1310 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1311 			num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1312 							   rcv_cnt);
1313 			uint16_t extra_idx = 0;
1314 
1315 			rcv_cnt = num;
1316 			while (extra_idx < rcv_cnt) {
1317 				rxm = rcv_pkts[extra_idx];
1318 				rxm->data_off =
1319 					RTE_PKTMBUF_HEADROOM - hdr_size;
1320 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1321 				rxm->data_len = (uint16_t)(len[extra_idx]);
1322 				prev->next = rxm;
1323 				prev = rxm;
1324 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1325 				extra_idx += 1;
1326 			};
1327 			seg_res -= rcv_cnt;
1328 
1329 			if (!seg_res) {
1330 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1331 				nb_rx++;
1332 			}
1333 		} else {
1334 			PMD_RX_LOG(ERR,
1335 					"No enough segments for packet.");
1336 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1337 			rxvq->stats.errors++;
1338 			break;
1339 		}
1340 	}
1341 
1342 	rxvq->stats.packets += nb_rx;
1343 
1344 	/* Allocate new mbuf for the used descriptor */
1345 
1346 	if (likely(!virtqueue_full(vq))) {
1347 		/* free_cnt may include mrg descs */
1348 		uint16_t free_cnt = vq->vq_free_cnt;
1349 		struct rte_mbuf *new_pkts[free_cnt];
1350 
1351 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1352 			error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1353 					free_cnt);
1354 			if (unlikely(error)) {
1355 				for (i = 0; i < free_cnt; i++)
1356 					rte_pktmbuf_free(new_pkts[i]);
1357 			}
1358 			nb_enqueued += free_cnt;
1359 		} else {
1360 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1361 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1362 		}
1363 	}
1364 
1365 	if (likely(nb_enqueued)) {
1366 		vq_update_avail_idx(vq);
1367 
1368 		if (unlikely(virtqueue_kick_prepare(vq))) {
1369 			virtqueue_notify(vq);
1370 			PMD_RX_LOG(DEBUG, "Notified");
1371 		}
1372 	}
1373 
1374 	return nb_rx;
1375 }
1376 
1377 uint16_t
1378 virtio_recv_mergeable_pkts(void *rx_queue,
1379 			struct rte_mbuf **rx_pkts,
1380 			uint16_t nb_pkts)
1381 {
1382 	struct virtnet_rx *rxvq = rx_queue;
1383 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1384 	struct virtio_hw *hw = vq->hw;
1385 	struct rte_mbuf *rxm;
1386 	struct rte_mbuf *prev = NULL;
1387 	uint16_t nb_used, num, nb_rx = 0;
1388 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1389 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1390 	int error;
1391 	uint32_t nb_enqueued = 0;
1392 	uint32_t seg_num = 0;
1393 	uint32_t seg_res = 0;
1394 	uint32_t hdr_size = hw->vtnet_hdr_size;
1395 	int32_t i;
1396 
1397 	if (unlikely(hw->started == 0))
1398 		return nb_rx;
1399 
1400 	nb_used = virtqueue_nused(vq);
1401 
1402 	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1403 
1404 	num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
1405 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1406 		num = VIRTIO_MBUF_BURST_SZ;
1407 	if (likely(num > DESC_PER_CACHELINE))
1408 		num = num - ((vq->vq_used_cons_idx + num) %
1409 				DESC_PER_CACHELINE);
1410 
1411 
1412 	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
1413 
1414 	for (i = 0; i < num; i++) {
1415 		struct virtio_net_hdr_mrg_rxbuf *header;
1416 
1417 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1418 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1419 
1420 		rxm = rcv_pkts[i];
1421 
1422 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1423 			PMD_RX_LOG(ERR, "Packet drop");
1424 			nb_enqueued++;
1425 			virtio_discard_rxbuf(vq, rxm);
1426 			rxvq->stats.errors++;
1427 			continue;
1428 		}
1429 
1430 		header = (struct virtio_net_hdr_mrg_rxbuf *)
1431 			 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1432 			 - hdr_size);
1433 		seg_num = header->num_buffers;
1434 		if (seg_num == 0)
1435 			seg_num = 1;
1436 
1437 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1438 		rxm->nb_segs = seg_num;
1439 		rxm->ol_flags = 0;
1440 		rxm->vlan_tci = 0;
1441 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1442 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1443 
1444 		rxm->port = hw->port_id;
1445 
1446 		rx_pkts[nb_rx] = rxm;
1447 		prev = rxm;
1448 
1449 		if (hw->has_rx_offload &&
1450 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1451 			virtio_discard_rxbuf(vq, rxm);
1452 			rxvq->stats.errors++;
1453 			continue;
1454 		}
1455 
1456 		if (hw->vlan_strip)
1457 			rte_vlan_strip(rx_pkts[nb_rx]);
1458 
1459 		seg_res = seg_num - 1;
1460 
1461 		/* Merge remaining segments */
1462 		while (seg_res != 0 && i < (num - 1)) {
1463 			i++;
1464 
1465 			rxm = rcv_pkts[i];
1466 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1467 			rxm->pkt_len = (uint32_t)(len[i]);
1468 			rxm->data_len = (uint16_t)(len[i]);
1469 
1470 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1471 
1472 			prev->next = rxm;
1473 			prev = rxm;
1474 			seg_res -= 1;
1475 		}
1476 
1477 		if (!seg_res) {
1478 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1479 			nb_rx++;
1480 		}
1481 	}
1482 
1483 	/* Last packet still need merge segments */
1484 	while (seg_res != 0) {
1485 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1486 					VIRTIO_MBUF_BURST_SZ);
1487 
1488 		if (likely(virtqueue_nused(vq) >= rcv_cnt)) {
1489 			num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len,
1490 							   rcv_cnt);
1491 			uint16_t extra_idx = 0;
1492 
1493 			rcv_cnt = num;
1494 			while (extra_idx < rcv_cnt) {
1495 				rxm = rcv_pkts[extra_idx];
1496 				rxm->data_off =
1497 					RTE_PKTMBUF_HEADROOM - hdr_size;
1498 				rxm->pkt_len = (uint32_t)(len[extra_idx]);
1499 				rxm->data_len = (uint16_t)(len[extra_idx]);
1500 				prev->next = rxm;
1501 				prev = rxm;
1502 				rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1503 				extra_idx += 1;
1504 			};
1505 			seg_res -= rcv_cnt;
1506 
1507 			if (!seg_res) {
1508 				virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1509 				nb_rx++;
1510 			}
1511 		} else {
1512 			PMD_RX_LOG(ERR,
1513 					"No enough segments for packet.");
1514 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1515 			rxvq->stats.errors++;
1516 			break;
1517 		}
1518 	}
1519 
1520 	rxvq->stats.packets += nb_rx;
1521 
1522 	/* Allocate new mbuf for the used descriptor */
1523 	if (likely(!virtqueue_full(vq))) {
1524 		/* free_cnt may include mrg descs */
1525 		uint16_t free_cnt = vq->vq_free_cnt;
1526 		struct rte_mbuf *new_pkts[free_cnt];
1527 
1528 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1529 			error = virtqueue_enqueue_recv_refill(vq, new_pkts,
1530 					free_cnt);
1531 			if (unlikely(error)) {
1532 				for (i = 0; i < free_cnt; i++)
1533 					rte_pktmbuf_free(new_pkts[i]);
1534 			}
1535 			nb_enqueued += free_cnt;
1536 		} else {
1537 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1538 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1539 		}
1540 	}
1541 
1542 	if (likely(nb_enqueued)) {
1543 		vq_update_avail_idx(vq);
1544 
1545 		if (unlikely(virtqueue_kick_prepare(vq))) {
1546 			virtqueue_notify(vq);
1547 			PMD_RX_LOG(DEBUG, "Notified");
1548 		}
1549 	}
1550 
1551 	return nb_rx;
1552 }
1553 
1554 uint16_t
1555 virtio_recv_mergeable_pkts_packed(void *rx_queue,
1556 			struct rte_mbuf **rx_pkts,
1557 			uint16_t nb_pkts)
1558 {
1559 	struct virtnet_rx *rxvq = rx_queue;
1560 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1561 	struct virtio_hw *hw = vq->hw;
1562 	struct rte_mbuf *rxm;
1563 	struct rte_mbuf *prev = NULL;
1564 	uint16_t num, nb_rx = 0;
1565 	uint32_t len[VIRTIO_MBUF_BURST_SZ];
1566 	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1567 	uint32_t nb_enqueued = 0;
1568 	uint32_t seg_num = 0;
1569 	uint32_t seg_res = 0;
1570 	uint32_t hdr_size = hw->vtnet_hdr_size;
1571 	int32_t i;
1572 	int error;
1573 
1574 	if (unlikely(hw->started == 0))
1575 		return nb_rx;
1576 
1577 
1578 	num = nb_pkts;
1579 	if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
1580 		num = VIRTIO_MBUF_BURST_SZ;
1581 	if (likely(num > DESC_PER_CACHELINE))
1582 		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
1583 
1584 	num = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts, len, num);
1585 
1586 	for (i = 0; i < num; i++) {
1587 		struct virtio_net_hdr_mrg_rxbuf *header;
1588 
1589 		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1590 		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1591 
1592 		rxm = rcv_pkts[i];
1593 
1594 		if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
1595 			PMD_RX_LOG(ERR, "Packet drop");
1596 			nb_enqueued++;
1597 			virtio_discard_rxbuf(vq, rxm);
1598 			rxvq->stats.errors++;
1599 			continue;
1600 		}
1601 
1602 		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)
1603 			  rxm->buf_addr + RTE_PKTMBUF_HEADROOM - hdr_size);
1604 		seg_num = header->num_buffers;
1605 
1606 		if (seg_num == 0)
1607 			seg_num = 1;
1608 
1609 		rxm->data_off = RTE_PKTMBUF_HEADROOM;
1610 		rxm->nb_segs = seg_num;
1611 		rxm->ol_flags = 0;
1612 		rxm->vlan_tci = 0;
1613 		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1614 		rxm->data_len = (uint16_t)(len[i] - hdr_size);
1615 
1616 		rxm->port = hw->port_id;
1617 		rx_pkts[nb_rx] = rxm;
1618 		prev = rxm;
1619 
1620 		if (hw->has_rx_offload &&
1621 				virtio_rx_offload(rxm, &header->hdr) < 0) {
1622 			virtio_discard_rxbuf(vq, rxm);
1623 			rxvq->stats.errors++;
1624 			continue;
1625 		}
1626 
1627 		if (hw->vlan_strip)
1628 			rte_vlan_strip(rx_pkts[nb_rx]);
1629 
1630 		seg_res = seg_num - 1;
1631 
1632 		/* Merge remaining segments */
1633 		while (seg_res != 0 && i < (num - 1)) {
1634 			i++;
1635 
1636 			rxm = rcv_pkts[i];
1637 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1638 			rxm->pkt_len = (uint32_t)(len[i]);
1639 			rxm->data_len = (uint16_t)(len[i]);
1640 
1641 			rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1642 
1643 			prev->next = rxm;
1644 			prev = rxm;
1645 			seg_res -= 1;
1646 		}
1647 
1648 		if (!seg_res) {
1649 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1650 			nb_rx++;
1651 		}
1652 	}
1653 
1654 	/* Last packet still need merge segments */
1655 	while (seg_res != 0) {
1656 		uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1657 					VIRTIO_MBUF_BURST_SZ);
1658 		uint16_t extra_idx = 0;
1659 
1660 		rcv_cnt = virtqueue_dequeue_burst_rx_packed(vq, rcv_pkts,
1661 				len, rcv_cnt);
1662 		if (unlikely(rcv_cnt == 0)) {
1663 			PMD_RX_LOG(ERR, "No enough segments for packet.");
1664 			rte_pktmbuf_free(rx_pkts[nb_rx]);
1665 			rxvq->stats.errors++;
1666 			break;
1667 		}
1668 
1669 		while (extra_idx < rcv_cnt) {
1670 			rxm = rcv_pkts[extra_idx];
1671 
1672 			rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1673 			rxm->pkt_len = (uint32_t)(len[extra_idx]);
1674 			rxm->data_len = (uint16_t)(len[extra_idx]);
1675 
1676 			prev->next = rxm;
1677 			prev = rxm;
1678 			rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1679 			extra_idx += 1;
1680 		}
1681 		seg_res -= rcv_cnt;
1682 		if (!seg_res) {
1683 			virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1684 			nb_rx++;
1685 		}
1686 	}
1687 
1688 	rxvq->stats.packets += nb_rx;
1689 
1690 	/* Allocate new mbuf for the used descriptor */
1691 	if (likely(!virtqueue_full(vq))) {
1692 		/* free_cnt may include mrg descs */
1693 		uint16_t free_cnt = vq->vq_free_cnt;
1694 		struct rte_mbuf *new_pkts[free_cnt];
1695 
1696 		if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1697 			error = virtqueue_enqueue_recv_refill_packed(vq,
1698 					new_pkts, free_cnt);
1699 			if (unlikely(error)) {
1700 				for (i = 0; i < free_cnt; i++)
1701 					rte_pktmbuf_free(new_pkts[i]);
1702 			}
1703 			nb_enqueued += free_cnt;
1704 		} else {
1705 			struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
1706 			dev->data->rx_mbuf_alloc_failed += free_cnt;
1707 		}
1708 	}
1709 
1710 	if (likely(nb_enqueued)) {
1711 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1712 			virtqueue_notify(vq);
1713 			PMD_RX_LOG(DEBUG, "Notified");
1714 		}
1715 	}
1716 
1717 	return nb_rx;
1718 }
1719 
1720 uint16_t
1721 virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
1722 			uint16_t nb_pkts)
1723 {
1724 	uint16_t nb_tx;
1725 	int error;
1726 
1727 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1728 		struct rte_mbuf *m = tx_pkts[nb_tx];
1729 
1730 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1731 		error = rte_validate_tx_offload(m);
1732 		if (unlikely(error)) {
1733 			rte_errno = -error;
1734 			break;
1735 		}
1736 #endif
1737 
1738 		/* Do VLAN tag insertion */
1739 		if (unlikely(m->ol_flags & RTE_MBUF_F_TX_VLAN)) {
1740 			error = rte_vlan_insert(&m);
1741 			/* rte_vlan_insert() may change pointer
1742 			 * even in the case of failure
1743 			 */
1744 			tx_pkts[nb_tx] = m;
1745 
1746 			if (unlikely(error)) {
1747 				rte_errno = -error;
1748 				break;
1749 			}
1750 		}
1751 
1752 		error = rte_net_intel_cksum_prepare(m);
1753 		if (unlikely(error)) {
1754 			rte_errno = -error;
1755 			break;
1756 		}
1757 
1758 		if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
1759 			virtio_tso_fix_cksum(m);
1760 	}
1761 
1762 	return nb_tx;
1763 }
1764 
1765 uint16_t
1766 virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
1767 			uint16_t nb_pkts)
1768 {
1769 	struct virtnet_tx *txvq = tx_queue;
1770 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1771 	struct virtio_hw *hw = vq->hw;
1772 	uint16_t hdr_size = hw->vtnet_hdr_size;
1773 	uint16_t nb_tx = 0;
1774 	bool in_order = virtio_with_feature(hw, VIRTIO_F_IN_ORDER);
1775 
1776 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1777 		return nb_tx;
1778 
1779 	if (unlikely(nb_pkts < 1))
1780 		return nb_pkts;
1781 
1782 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1783 
1784 	if (nb_pkts > vq->vq_free_cnt)
1785 		virtio_xmit_cleanup_packed(vq, nb_pkts - vq->vq_free_cnt,
1786 					   in_order);
1787 
1788 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1789 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1790 		int can_push = 0, use_indirect = 0, slots, need;
1791 
1792 		/* optimize ring usage */
1793 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1794 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1795 		    rte_mbuf_refcnt_read(txm) == 1 &&
1796 		    RTE_MBUF_DIRECT(txm) &&
1797 		    txm->nb_segs == 1 &&
1798 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1799 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1800 			   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1801 			can_push = 1;
1802 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1803 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1804 			use_indirect = 1;
1805 		/* How many main ring entries are needed to this Tx?
1806 		 * indirect   => 1
1807 		 * any_layout => number of segments
1808 		 * default    => number of segments + 1
1809 		 */
1810 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1811 		need = slots - vq->vq_free_cnt;
1812 
1813 		/* Positive value indicates it need free vring descriptors */
1814 		if (unlikely(need > 0)) {
1815 			virtio_xmit_cleanup_packed(vq, need, in_order);
1816 			need = slots - vq->vq_free_cnt;
1817 			if (unlikely(need > 0)) {
1818 				PMD_TX_LOG(ERR,
1819 					   "No free tx descriptors to transmit");
1820 				break;
1821 			}
1822 		}
1823 
1824 		/* Enqueue Packet buffers */
1825 		if (can_push)
1826 			virtqueue_enqueue_xmit_packed_fast(txvq, txm, in_order);
1827 		else
1828 			virtqueue_enqueue_xmit_packed(txvq, txm, slots,
1829 						      use_indirect, 0,
1830 						      in_order);
1831 
1832 		virtio_update_packet_stats(&txvq->stats, txm);
1833 	}
1834 
1835 	txvq->stats.packets += nb_tx;
1836 
1837 	if (likely(nb_tx)) {
1838 		if (unlikely(virtqueue_kick_prepare_packed(vq))) {
1839 			virtqueue_notify(vq);
1840 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1841 		}
1842 	}
1843 
1844 	return nb_tx;
1845 }
1846 
1847 uint16_t
1848 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1849 {
1850 	struct virtnet_tx *txvq = tx_queue;
1851 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1852 	struct virtio_hw *hw = vq->hw;
1853 	uint16_t hdr_size = hw->vtnet_hdr_size;
1854 	uint16_t nb_used, nb_tx = 0;
1855 
1856 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1857 		return nb_tx;
1858 
1859 	if (unlikely(nb_pkts < 1))
1860 		return nb_pkts;
1861 
1862 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1863 
1864 	nb_used = virtqueue_nused(vq);
1865 
1866 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1867 		virtio_xmit_cleanup(vq, nb_used);
1868 
1869 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1870 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1871 		int can_push = 0, use_indirect = 0, slots, need;
1872 
1873 		/* optimize ring usage */
1874 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1875 		      virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1876 		    rte_mbuf_refcnt_read(txm) == 1 &&
1877 		    RTE_MBUF_DIRECT(txm) &&
1878 		    txm->nb_segs == 1 &&
1879 		    rte_pktmbuf_headroom(txm) >= hdr_size &&
1880 		    rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1881 				   __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1882 			can_push = 1;
1883 		else if (virtio_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1884 			 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1885 			use_indirect = 1;
1886 
1887 		/* How many main ring entries are needed to this Tx?
1888 		 * any_layout => number of segments
1889 		 * indirect   => 1
1890 		 * default    => number of segments + 1
1891 		 */
1892 		slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1893 		need = slots - vq->vq_free_cnt;
1894 
1895 		/* Positive value indicates it need free vring descriptors */
1896 		if (unlikely(need > 0)) {
1897 			nb_used = virtqueue_nused(vq);
1898 
1899 			need = RTE_MIN(need, (int)nb_used);
1900 
1901 			virtio_xmit_cleanup(vq, need);
1902 			need = slots - vq->vq_free_cnt;
1903 			if (unlikely(need > 0)) {
1904 				PMD_TX_LOG(ERR,
1905 					   "No free tx descriptors to transmit");
1906 				break;
1907 			}
1908 		}
1909 
1910 		/* Enqueue Packet buffers */
1911 		virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1912 			can_push, 0);
1913 
1914 		virtio_update_packet_stats(&txvq->stats, txm);
1915 	}
1916 
1917 	txvq->stats.packets += nb_tx;
1918 
1919 	if (likely(nb_tx)) {
1920 		vq_update_avail_idx(vq);
1921 
1922 		if (unlikely(virtqueue_kick_prepare(vq))) {
1923 			virtqueue_notify(vq);
1924 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1925 		}
1926 	}
1927 
1928 	return nb_tx;
1929 }
1930 
1931 static __rte_always_inline int
1932 virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
1933 {
1934 	uint16_t nb_used, nb_clean, nb_descs;
1935 
1936 	nb_descs = vq->vq_free_cnt + need;
1937 	nb_used = virtqueue_nused(vq);
1938 	nb_clean = RTE_MIN(need, (int)nb_used);
1939 
1940 	virtio_xmit_cleanup_inorder(vq, nb_clean);
1941 
1942 	return nb_descs - vq->vq_free_cnt;
1943 }
1944 
1945 uint16_t
1946 virtio_xmit_pkts_inorder(void *tx_queue,
1947 			struct rte_mbuf **tx_pkts,
1948 			uint16_t nb_pkts)
1949 {
1950 	struct virtnet_tx *txvq = tx_queue;
1951 	struct virtqueue *vq = virtnet_txq_to_vq(txvq);
1952 	struct virtio_hw *hw = vq->hw;
1953 	uint16_t hdr_size = hw->vtnet_hdr_size;
1954 	uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
1955 	struct rte_mbuf *inorder_pkts[nb_pkts];
1956 	int need;
1957 
1958 	if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1959 		return nb_tx;
1960 
1961 	if (unlikely(nb_pkts < 1))
1962 		return nb_pkts;
1963 
1964 	VIRTQUEUE_DUMP(vq);
1965 	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1966 	nb_used = virtqueue_nused(vq);
1967 
1968 	if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1969 		virtio_xmit_cleanup_inorder(vq, nb_used);
1970 
1971 	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1972 		struct rte_mbuf *txm = tx_pkts[nb_tx];
1973 		int slots;
1974 
1975 		/* optimize ring usage */
1976 		if ((virtio_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1977 		     virtio_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1978 		     rte_mbuf_refcnt_read(txm) == 1 &&
1979 		     RTE_MBUF_DIRECT(txm) &&
1980 		     txm->nb_segs == 1 &&
1981 		     rte_pktmbuf_headroom(txm) >= hdr_size &&
1982 		     rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1983 				__alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1984 			inorder_pkts[nb_inorder_pkts] = txm;
1985 			nb_inorder_pkts++;
1986 
1987 			continue;
1988 		}
1989 
1990 		if (nb_inorder_pkts) {
1991 			need = nb_inorder_pkts - vq->vq_free_cnt;
1992 			if (unlikely(need > 0)) {
1993 				need = virtio_xmit_try_cleanup_inorder(vq,
1994 								       need);
1995 				if (unlikely(need > 0)) {
1996 					PMD_TX_LOG(ERR,
1997 						"No free tx descriptors to "
1998 						"transmit");
1999 					break;
2000 				}
2001 			}
2002 			virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2003 							nb_inorder_pkts);
2004 			nb_inorder_pkts = 0;
2005 		}
2006 
2007 		slots = txm->nb_segs + 1;
2008 		need = slots - vq->vq_free_cnt;
2009 		if (unlikely(need > 0)) {
2010 			need = virtio_xmit_try_cleanup_inorder(vq, slots);
2011 
2012 			if (unlikely(need > 0)) {
2013 				PMD_TX_LOG(ERR,
2014 					"No free tx descriptors to transmit");
2015 				break;
2016 			}
2017 		}
2018 		/* Enqueue Packet buffers */
2019 		virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
2020 
2021 		virtio_update_packet_stats(&txvq->stats, txm);
2022 	}
2023 
2024 	/* Transmit all inorder packets */
2025 	if (nb_inorder_pkts) {
2026 		need = nb_inorder_pkts - vq->vq_free_cnt;
2027 		if (unlikely(need > 0)) {
2028 			need = virtio_xmit_try_cleanup_inorder(vq,
2029 								  need);
2030 			if (unlikely(need > 0)) {
2031 				PMD_TX_LOG(ERR,
2032 					"No free tx descriptors to transmit");
2033 				nb_inorder_pkts = vq->vq_free_cnt;
2034 				nb_tx -= need;
2035 			}
2036 		}
2037 
2038 		virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
2039 						nb_inorder_pkts);
2040 	}
2041 
2042 	txvq->stats.packets += nb_tx;
2043 
2044 	if (likely(nb_tx)) {
2045 		vq_update_avail_idx(vq);
2046 
2047 		if (unlikely(virtqueue_kick_prepare(vq))) {
2048 			virtqueue_notify(vq);
2049 			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
2050 		}
2051 	}
2052 
2053 	VIRTQUEUE_DUMP(vq);
2054 
2055 	return nb_tx;
2056 }
2057 
2058 __rte_weak uint16_t
2059 virtio_recv_pkts_packed_vec(void *rx_queue __rte_unused,
2060 			    struct rte_mbuf **rx_pkts __rte_unused,
2061 			    uint16_t nb_pkts __rte_unused)
2062 {
2063 	return 0;
2064 }
2065 
2066 __rte_weak uint16_t
2067 virtio_xmit_pkts_packed_vec(void *tx_queue __rte_unused,
2068 			    struct rte_mbuf **tx_pkts __rte_unused,
2069 			    uint16_t nb_pkts __rte_unused)
2070 {
2071 	return 0;
2072 }
2073