xref: /dpdk/drivers/net/virtio/virtio_ethdev.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <errno.h>
10 #include <unistd.h>
11 
12 #include <ethdev_driver.h>
13 #include <rte_memcpy.h>
14 #include <rte_string_fns.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_ether.h>
19 #include <rte_ip.h>
20 #include <rte_arp.h>
21 #include <rte_common.h>
22 #include <rte_errno.h>
23 #include <rte_cpuflags.h>
24 #include <rte_vect.h>
25 #include <rte_memory.h>
26 #include <rte_eal_paging.h>
27 #include <rte_eal.h>
28 #include <dev_driver.h>
29 #include <rte_cycles.h>
30 #include <rte_kvargs.h>
31 
32 #include "virtio_ethdev.h"
33 #include "virtio.h"
34 #include "virtio_logs.h"
35 #include "virtqueue.h"
36 #include "virtio_cvq.h"
37 #include "virtio_rxtx.h"
38 #include "virtio_rxtx_simple.h"
39 #include "virtio_user/virtio_user_dev.h"
40 
41 static int  virtio_dev_configure(struct rte_eth_dev *dev);
42 static int  virtio_dev_start(struct rte_eth_dev *dev);
43 static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
44 static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
45 static int virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
46 static int virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
47 static uint32_t virtio_dev_speed_capa_get(uint32_t speed);
48 static int virtio_dev_devargs_parse(struct rte_devargs *devargs,
49 	uint32_t *speed,
50 	int *vectorized);
51 static int virtio_dev_info_get(struct rte_eth_dev *dev,
52 				struct rte_eth_dev_info *dev_info);
53 static int virtio_dev_link_update(struct rte_eth_dev *dev,
54 	int wait_to_complete);
55 static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
56 static int virtio_dev_rss_hash_update(struct rte_eth_dev *dev,
57 		struct rte_eth_rss_conf *rss_conf);
58 static int virtio_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
59 		struct rte_eth_rss_conf *rss_conf);
60 static int virtio_dev_rss_reta_update(struct rte_eth_dev *dev,
61 			 struct rte_eth_rss_reta_entry64 *reta_conf,
62 			 uint16_t reta_size);
63 static int virtio_dev_rss_reta_query(struct rte_eth_dev *dev,
64 			 struct rte_eth_rss_reta_entry64 *reta_conf,
65 			 uint16_t reta_size);
66 
67 static void virtio_set_hwaddr(struct virtio_hw *hw);
68 static void virtio_get_hwaddr(struct virtio_hw *hw);
69 
70 static int virtio_dev_stats_get(struct rte_eth_dev *dev,
71 				 struct rte_eth_stats *stats);
72 static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
73 				 struct rte_eth_xstat *xstats, unsigned n);
74 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
75 				       struct rte_eth_xstat_name *xstats_names,
76 				       unsigned limit);
77 static int virtio_dev_stats_reset(struct rte_eth_dev *dev);
78 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
79 static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
80 				uint16_t vlan_id, int on);
81 static int virtio_mac_addr_add(struct rte_eth_dev *dev,
82 				struct rte_ether_addr *mac_addr,
83 				uint32_t index, uint32_t vmdq);
84 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
85 static int virtio_mac_addr_set(struct rte_eth_dev *dev,
86 				struct rte_ether_addr *mac_addr);
87 
88 static int virtio_intr_disable(struct rte_eth_dev *dev);
89 static int virtio_get_monitor_addr(void *rx_queue,
90 				struct rte_power_monitor_cond *pmc);
91 
92 static int virtio_dev_queue_stats_mapping_set(
93 	struct rte_eth_dev *eth_dev,
94 	uint16_t queue_id,
95 	uint8_t stat_idx,
96 	uint8_t is_rx);
97 
98 static void virtio_notify_peers(struct rte_eth_dev *dev);
99 static void virtio_ack_link_announce(struct rte_eth_dev *dev);
100 
101 struct rte_virtio_xstats_name_off {
102 	char name[RTE_ETH_XSTATS_NAME_SIZE];
103 	unsigned offset;
104 };
105 
106 /* [rt]x_qX_ is prepended to the name string here */
107 static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
108 	{"good_packets",           offsetof(struct virtnet_rx, stats.packets)},
109 	{"good_bytes",             offsetof(struct virtnet_rx, stats.bytes)},
110 	{"errors",                 offsetof(struct virtnet_rx, stats.errors)},
111 	{"multicast_packets",      offsetof(struct virtnet_rx, stats.multicast)},
112 	{"broadcast_packets",      offsetof(struct virtnet_rx, stats.broadcast)},
113 	{"undersize_packets",      offsetof(struct virtnet_rx, stats.size_bins[0])},
114 	{"size_64_packets",        offsetof(struct virtnet_rx, stats.size_bins[1])},
115 	{"size_65_127_packets",    offsetof(struct virtnet_rx, stats.size_bins[2])},
116 	{"size_128_255_packets",   offsetof(struct virtnet_rx, stats.size_bins[3])},
117 	{"size_256_511_packets",   offsetof(struct virtnet_rx, stats.size_bins[4])},
118 	{"size_512_1023_packets",  offsetof(struct virtnet_rx, stats.size_bins[5])},
119 	{"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
120 	{"size_1519_max_packets",  offsetof(struct virtnet_rx, stats.size_bins[7])},
121 };
122 
123 /* [rt]x_qX_ is prepended to the name string here */
124 static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
125 	{"good_packets",           offsetof(struct virtnet_tx, stats.packets)},
126 	{"good_bytes",             offsetof(struct virtnet_tx, stats.bytes)},
127 	{"multicast_packets",      offsetof(struct virtnet_tx, stats.multicast)},
128 	{"broadcast_packets",      offsetof(struct virtnet_tx, stats.broadcast)},
129 	{"undersize_packets",      offsetof(struct virtnet_tx, stats.size_bins[0])},
130 	{"size_64_packets",        offsetof(struct virtnet_tx, stats.size_bins[1])},
131 	{"size_65_127_packets",    offsetof(struct virtnet_tx, stats.size_bins[2])},
132 	{"size_128_255_packets",   offsetof(struct virtnet_tx, stats.size_bins[3])},
133 	{"size_256_511_packets",   offsetof(struct virtnet_tx, stats.size_bins[4])},
134 	{"size_512_1023_packets",  offsetof(struct virtnet_tx, stats.size_bins[5])},
135 	{"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
136 	{"size_1519_max_packets",  offsetof(struct virtnet_tx, stats.size_bins[7])},
137 };
138 
139 #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
140 			    sizeof(rte_virtio_rxq_stat_strings[0]))
141 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
142 			    sizeof(rte_virtio_txq_stat_strings[0]))
143 
144 struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
145 
146 static int
147 virtio_set_multiple_queues_rss(struct rte_eth_dev *dev, uint16_t nb_queues)
148 {
149 	struct virtio_hw *hw = dev->data->dev_private;
150 	struct virtio_pmd_ctrl ctrl;
151 	struct virtio_net_ctrl_rss rss;
152 	int dlen, ret;
153 
154 	rss.hash_types = hw->rss_hash_types & VIRTIO_NET_HASH_TYPE_MASK;
155 	RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(VIRTIO_NET_RSS_RETA_SIZE));
156 	rss.indirection_table_mask = VIRTIO_NET_RSS_RETA_SIZE - 1;
157 	rss.unclassified_queue = 0;
158 	memcpy(rss.indirection_table, hw->rss_reta, VIRTIO_NET_RSS_RETA_SIZE * sizeof(uint16_t));
159 	rss.max_tx_vq = nb_queues;
160 	rss.hash_key_length = VIRTIO_NET_RSS_KEY_SIZE;
161 	memcpy(rss.hash_key_data, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
162 
163 	ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
164 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_RSS_CONFIG;
165 	memcpy(ctrl.data, &rss, sizeof(rss));
166 
167 	dlen = sizeof(rss);
168 
169 	ret = virtio_send_command(hw->cvq, &ctrl, &dlen, 1);
170 	if (ret) {
171 		PMD_INIT_LOG(ERR, "RSS multiqueue configured but send command failed");
172 		return -EINVAL;
173 	}
174 
175 	return 0;
176 }
177 
178 static int
179 virtio_set_multiple_queues_auto(struct rte_eth_dev *dev, uint16_t nb_queues)
180 {
181 	struct virtio_hw *hw = dev->data->dev_private;
182 	struct virtio_pmd_ctrl ctrl;
183 	int dlen;
184 	int ret;
185 
186 	ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
187 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
188 	memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
189 
190 	dlen = sizeof(uint16_t);
191 
192 	ret = virtio_send_command(hw->cvq, &ctrl, &dlen, 1);
193 	if (ret) {
194 		PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
195 			  "failed, this is too late now...");
196 		return -EINVAL;
197 	}
198 
199 	return 0;
200 }
201 
202 static int
203 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
204 {
205 	struct virtio_hw *hw = dev->data->dev_private;
206 
207 	if (virtio_with_feature(hw, VIRTIO_NET_F_RSS))
208 		return virtio_set_multiple_queues_rss(dev, nb_queues);
209 	else
210 		return virtio_set_multiple_queues_auto(dev, nb_queues);
211 }
212 
213 static uint16_t
214 virtio_get_nr_vq(struct virtio_hw *hw)
215 {
216 	uint16_t nr_vq = hw->max_queue_pairs * 2;
217 
218 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
219 		nr_vq += 1;
220 
221 	return nr_vq;
222 }
223 
224 static void
225 virtio_control_queue_notify(struct virtqueue *vq, __rte_unused void *cookie)
226 {
227 	virtqueue_notify(vq);
228 }
229 
230 static int
231 virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
232 {
233 	char vq_name[VIRTQUEUE_MAX_NAME_SZ];
234 	unsigned int vq_size;
235 	struct virtio_hw *hw = dev->data->dev_private;
236 	struct virtqueue *vq;
237 	int queue_type = virtio_get_queue_type(hw, queue_idx);
238 	int ret;
239 	int numa_node = dev->device->numa_node;
240 
241 	PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
242 			queue_idx, numa_node);
243 
244 	/*
245 	 * Read the virtqueue size from the Queue Size field
246 	 * Always power of 2 and if 0 virtqueue does not exist
247 	 */
248 	vq_size = VIRTIO_OPS(hw)->get_queue_num(hw, queue_idx);
249 	PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
250 	if (vq_size == 0) {
251 		PMD_INIT_LOG(ERR, "virtqueue does not exist");
252 		return -EINVAL;
253 	}
254 
255 	if (!virtio_with_packed_queue(hw) && !rte_is_power_of_2(vq_size)) {
256 		PMD_INIT_LOG(ERR, "split virtqueue size is not power of 2");
257 		return -EINVAL;
258 	}
259 
260 	snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, queue_idx);
261 
262 	vq = virtqueue_alloc(hw, queue_idx, vq_size, queue_type, numa_node, vq_name);
263 	if (!vq) {
264 		PMD_INIT_LOG(ERR, "virtqueue init failed");
265 		return -ENOMEM;
266 	}
267 
268 	hw->vqs[queue_idx] = vq;
269 
270 	if (queue_type == VTNET_CQ) {
271 		hw->cvq = &vq->cq;
272 		vq->cq.notify_queue = &virtio_control_queue_notify;
273 	}
274 
275 	if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
276 		PMD_INIT_LOG(ERR, "setup_queue failed");
277 		ret = -EINVAL;
278 		goto clean_vq;
279 	}
280 
281 	return 0;
282 
283 clean_vq:
284 	if (queue_type == VTNET_CQ)
285 		hw->cvq = NULL;
286 	virtqueue_free(vq);
287 	hw->vqs[queue_idx] = NULL;
288 
289 	return ret;
290 }
291 
292 static void
293 virtio_free_queues(struct virtio_hw *hw)
294 {
295 	uint16_t nr_vq = virtio_get_nr_vq(hw);
296 	struct virtqueue *vq;
297 	uint16_t i;
298 
299 	if (hw->vqs == NULL)
300 		return;
301 
302 	for (i = 0; i < nr_vq; i++) {
303 		vq = hw->vqs[i];
304 		if (!vq)
305 			continue;
306 		virtqueue_free(vq);
307 		hw->vqs[i] = NULL;
308 	}
309 
310 	rte_free(hw->vqs);
311 	hw->vqs = NULL;
312 }
313 
314 static int
315 virtio_alloc_queues(struct rte_eth_dev *dev)
316 {
317 	struct virtio_hw *hw = dev->data->dev_private;
318 	uint16_t nr_vq = virtio_get_nr_vq(hw);
319 	uint16_t i;
320 	int ret;
321 
322 	hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
323 	if (!hw->vqs) {
324 		PMD_INIT_LOG(ERR, "failed to allocate vqs");
325 		return -ENOMEM;
326 	}
327 
328 	for (i = 0; i < nr_vq; i++) {
329 		ret = virtio_init_queue(dev, i);
330 		if (ret < 0) {
331 			virtio_free_queues(hw);
332 			return ret;
333 		}
334 	}
335 
336 	return 0;
337 }
338 
339 static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
340 
341 static void
342 virtio_free_rss(struct virtio_hw *hw)
343 {
344 	rte_free(hw->rss_key);
345 	hw->rss_key = NULL;
346 
347 	rte_free(hw->rss_reta);
348 	hw->rss_reta = NULL;
349 }
350 
351 int
352 virtio_dev_close(struct rte_eth_dev *dev)
353 {
354 	struct virtio_hw *hw = dev->data->dev_private;
355 	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
356 
357 	PMD_INIT_LOG(DEBUG, "virtio_dev_close");
358 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
359 		return 0;
360 
361 	if (!hw->opened)
362 		return 0;
363 	hw->opened = 0;
364 
365 	/* reset the NIC */
366 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
367 		VIRTIO_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
368 	if (intr_conf->rxq)
369 		virtio_queues_unbind_intr(dev);
370 
371 	if (intr_conf->lsc || intr_conf->rxq) {
372 		virtio_intr_disable(dev);
373 		rte_intr_efd_disable(dev->intr_handle);
374 		rte_intr_vec_list_free(dev->intr_handle);
375 	}
376 
377 	virtio_reset(hw);
378 	virtio_dev_free_mbufs(dev);
379 	virtio_free_queues(hw);
380 	virtio_free_rss(hw);
381 
382 	return VIRTIO_OPS(hw)->dev_close(hw);
383 }
384 
385 static int
386 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
387 {
388 	struct virtio_hw *hw = dev->data->dev_private;
389 	struct virtio_pmd_ctrl ctrl;
390 	int dlen[1];
391 	int ret;
392 
393 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
394 		PMD_INIT_LOG(INFO, "host does not support rx control");
395 		return -ENOTSUP;
396 	}
397 
398 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
399 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
400 	ctrl.data[0] = 1;
401 	dlen[0] = 1;
402 
403 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
404 	if (ret) {
405 		PMD_INIT_LOG(ERR, "Failed to enable promisc");
406 		return -EAGAIN;
407 	}
408 
409 	return 0;
410 }
411 
412 static int
413 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
414 {
415 	struct virtio_hw *hw = dev->data->dev_private;
416 	struct virtio_pmd_ctrl ctrl;
417 	int dlen[1];
418 	int ret;
419 
420 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
421 		PMD_INIT_LOG(INFO, "host does not support rx control");
422 		return -ENOTSUP;
423 	}
424 
425 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
426 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
427 	ctrl.data[0] = 0;
428 	dlen[0] = 1;
429 
430 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
431 	if (ret) {
432 		PMD_INIT_LOG(ERR, "Failed to disable promisc");
433 		return -EAGAIN;
434 	}
435 
436 	return 0;
437 }
438 
439 static int
440 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
441 {
442 	struct virtio_hw *hw = dev->data->dev_private;
443 	struct virtio_pmd_ctrl ctrl;
444 	int dlen[1];
445 	int ret;
446 
447 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
448 		PMD_INIT_LOG(INFO, "host does not support rx control");
449 		return -ENOTSUP;
450 	}
451 
452 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
453 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
454 	ctrl.data[0] = 1;
455 	dlen[0] = 1;
456 
457 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
458 	if (ret) {
459 		PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
460 		return -EAGAIN;
461 	}
462 
463 	return 0;
464 }
465 
466 static int
467 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
468 {
469 	struct virtio_hw *hw = dev->data->dev_private;
470 	struct virtio_pmd_ctrl ctrl;
471 	int dlen[1];
472 	int ret;
473 
474 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
475 		PMD_INIT_LOG(INFO, "host does not support rx control");
476 		return -ENOTSUP;
477 	}
478 
479 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
480 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
481 	ctrl.data[0] = 0;
482 	dlen[0] = 1;
483 
484 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
485 	if (ret) {
486 		PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
487 		return -EAGAIN;
488 	}
489 
490 	return 0;
491 }
492 
493 uint16_t
494 virtio_rx_mem_pool_buf_size(struct rte_mempool *mp)
495 {
496 	return rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
497 }
498 
499 bool
500 virtio_rx_check_scatter(uint16_t max_rx_pkt_len, uint16_t rx_buf_size,
501 			bool rx_scatter_enabled, const char **error)
502 {
503 	if (!rx_scatter_enabled && max_rx_pkt_len > rx_buf_size) {
504 		*error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
505 		return false;
506 	}
507 
508 	return true;
509 }
510 
511 static bool
512 virtio_check_scatter_on_all_rx_queues(struct rte_eth_dev *dev,
513 				      uint16_t frame_size)
514 {
515 	struct virtio_hw *hw = dev->data->dev_private;
516 	struct virtnet_rx *rxvq;
517 	struct virtqueue *vq;
518 	unsigned int qidx;
519 	uint16_t buf_size;
520 	const char *error;
521 
522 	if (hw->vqs == NULL)
523 		return true;
524 
525 	for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
526 		vq = hw->vqs[2 * qidx + VTNET_SQ_RQ_QUEUE_IDX];
527 		if (vq == NULL)
528 			continue;
529 
530 		rxvq = &vq->rxq;
531 		if (rxvq->mpool == NULL)
532 			continue;
533 		buf_size = virtio_rx_mem_pool_buf_size(rxvq->mpool);
534 
535 		if (!virtio_rx_check_scatter(frame_size, buf_size,
536 					     hw->rx_ol_scatter, &error)) {
537 			PMD_INIT_LOG(ERR, "MTU check for RxQ %u failed: %s",
538 				     qidx, error);
539 			return false;
540 		}
541 	}
542 
543 	return true;
544 }
545 
546 #define VLAN_TAG_LEN           4    /* 802.3ac tag (not DMA'd) */
547 static int
548 virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
549 {
550 	struct virtio_hw *hw = dev->data->dev_private;
551 	uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
552 				 hw->vtnet_hdr_size;
553 	uint32_t frame_size = mtu + ether_hdr_len;
554 	uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
555 
556 	max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
557 
558 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > max_frame_size) {
559 		PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
560 			RTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
561 		return -EINVAL;
562 	}
563 
564 	if (!virtio_check_scatter_on_all_rx_queues(dev, frame_size)) {
565 		PMD_INIT_LOG(ERR, "MTU vs Rx scatter and Rx buffers check failed");
566 		return -EINVAL;
567 	}
568 
569 	hw->max_rx_pkt_len = frame_size;
570 
571 	return 0;
572 }
573 
574 static int
575 virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
576 {
577 	struct virtio_hw *hw = dev->data->dev_private;
578 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
579 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
580 
581 	virtqueue_enable_intr(vq);
582 	virtio_mb(hw->weak_barriers);
583 	return 0;
584 }
585 
586 static int
587 virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
588 {
589 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
590 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
591 
592 	virtqueue_disable_intr(vq);
593 	return 0;
594 }
595 
596 static int
597 virtio_dev_priv_dump(struct rte_eth_dev *dev, FILE *f)
598 {
599 	struct virtio_hw *hw = dev->data->dev_private;
600 
601 	fprintf(f, "guest_features: 0x%" PRIx64 "\n", hw->guest_features);
602 	fprintf(f, "vtnet_hdr_size: %u\n", hw->vtnet_hdr_size);
603 	fprintf(f, "use_vec: rx-%u tx-%u\n", hw->use_vec_rx, hw->use_vec_tx);
604 	fprintf(f, "use_inorder: rx-%u tx-%u\n", hw->use_inorder_rx, hw->use_inorder_tx);
605 	fprintf(f, "intr_lsc: %u\n", hw->intr_lsc);
606 	fprintf(f, "max_mtu: %u\n", hw->max_mtu);
607 	fprintf(f, "max_rx_pkt_len: %zu\n", hw->max_rx_pkt_len);
608 	fprintf(f, "max_queue_pairs: %u\n", hw->max_queue_pairs);
609 	fprintf(f, "req_guest_features: 0x%" PRIx64 "\n", hw->req_guest_features);
610 
611 	return 0;
612 }
613 
614 /*
615  * dev_ops for virtio, bare necessities for basic operation
616  */
617 static const struct eth_dev_ops virtio_eth_dev_ops = {
618 	.dev_configure           = virtio_dev_configure,
619 	.dev_start               = virtio_dev_start,
620 	.dev_stop                = virtio_dev_stop,
621 	.dev_close               = virtio_dev_close,
622 	.promiscuous_enable      = virtio_dev_promiscuous_enable,
623 	.promiscuous_disable     = virtio_dev_promiscuous_disable,
624 	.allmulticast_enable     = virtio_dev_allmulticast_enable,
625 	.allmulticast_disable    = virtio_dev_allmulticast_disable,
626 	.mtu_set                 = virtio_mtu_set,
627 	.dev_infos_get           = virtio_dev_info_get,
628 	.stats_get               = virtio_dev_stats_get,
629 	.xstats_get              = virtio_dev_xstats_get,
630 	.xstats_get_names        = virtio_dev_xstats_get_names,
631 	.stats_reset             = virtio_dev_stats_reset,
632 	.xstats_reset            = virtio_dev_stats_reset,
633 	.link_update             = virtio_dev_link_update,
634 	.vlan_offload_set        = virtio_dev_vlan_offload_set,
635 	.rx_queue_setup          = virtio_dev_rx_queue_setup,
636 	.rx_queue_intr_enable    = virtio_dev_rx_queue_intr_enable,
637 	.rx_queue_intr_disable   = virtio_dev_rx_queue_intr_disable,
638 	.tx_queue_setup          = virtio_dev_tx_queue_setup,
639 	.rss_hash_update         = virtio_dev_rss_hash_update,
640 	.rss_hash_conf_get       = virtio_dev_rss_hash_conf_get,
641 	.reta_update             = virtio_dev_rss_reta_update,
642 	.reta_query              = virtio_dev_rss_reta_query,
643 	/* collect stats per queue */
644 	.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
645 	.vlan_filter_set         = virtio_vlan_filter_set,
646 	.mac_addr_add            = virtio_mac_addr_add,
647 	.mac_addr_remove         = virtio_mac_addr_remove,
648 	.mac_addr_set            = virtio_mac_addr_set,
649 	.get_monitor_addr        = virtio_get_monitor_addr,
650 	.eth_dev_priv_dump       = virtio_dev_priv_dump,
651 };
652 
653 /*
654  * dev_ops for virtio-user in secondary processes, as we just have
655  * some limited supports currently.
656  */
657 const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = {
658 	.dev_infos_get           = virtio_dev_info_get,
659 	.stats_get               = virtio_dev_stats_get,
660 	.xstats_get              = virtio_dev_xstats_get,
661 	.xstats_get_names        = virtio_dev_xstats_get_names,
662 	.stats_reset             = virtio_dev_stats_reset,
663 	.xstats_reset            = virtio_dev_stats_reset,
664 	/* collect stats per queue */
665 	.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
666 };
667 
668 static void
669 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
670 {
671 	unsigned i;
672 
673 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
674 		const struct virtnet_tx *txvq = dev->data->tx_queues[i];
675 		if (txvq == NULL)
676 			continue;
677 
678 		stats->opackets += txvq->stats.packets;
679 		stats->obytes += txvq->stats.bytes;
680 
681 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
682 			stats->q_opackets[i] = txvq->stats.packets;
683 			stats->q_obytes[i] = txvq->stats.bytes;
684 		}
685 	}
686 
687 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
688 		const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
689 		if (rxvq == NULL)
690 			continue;
691 
692 		stats->ipackets += rxvq->stats.packets;
693 		stats->ibytes += rxvq->stats.bytes;
694 		stats->ierrors += rxvq->stats.errors;
695 
696 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
697 			stats->q_ipackets[i] = rxvq->stats.packets;
698 			stats->q_ibytes[i] = rxvq->stats.bytes;
699 		}
700 	}
701 
702 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
703 }
704 
705 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
706 				       struct rte_eth_xstat_name *xstats_names,
707 				       __rte_unused unsigned limit)
708 {
709 	unsigned i;
710 	unsigned count = 0;
711 	unsigned t;
712 
713 	unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
714 		dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
715 
716 	if (xstats_names != NULL) {
717 		/* Note: limit checked in rte_eth_xstats_names() */
718 
719 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
720 			struct virtnet_rx *rxvq = dev->data->rx_queues[i];
721 			if (rxvq == NULL)
722 				continue;
723 			for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
724 				snprintf(xstats_names[count].name,
725 					sizeof(xstats_names[count].name),
726 					"rx_q%u_%s", i,
727 					rte_virtio_rxq_stat_strings[t].name);
728 				count++;
729 			}
730 		}
731 
732 		for (i = 0; i < dev->data->nb_tx_queues; i++) {
733 			struct virtnet_tx *txvq = dev->data->tx_queues[i];
734 			if (txvq == NULL)
735 				continue;
736 			for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
737 				snprintf(xstats_names[count].name,
738 					sizeof(xstats_names[count].name),
739 					"tx_q%u_%s", i,
740 					rte_virtio_txq_stat_strings[t].name);
741 				count++;
742 			}
743 		}
744 		return count;
745 	}
746 	return nstats;
747 }
748 
749 static int
750 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
751 		      unsigned n)
752 {
753 	unsigned i;
754 	unsigned count = 0;
755 
756 	unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
757 		dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
758 
759 	if (n < nstats)
760 		return nstats;
761 
762 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
763 		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
764 
765 		if (rxvq == NULL)
766 			continue;
767 
768 		unsigned t;
769 
770 		for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
771 			xstats[count].value = *(uint64_t *)(((char *)rxvq) +
772 				rte_virtio_rxq_stat_strings[t].offset);
773 			xstats[count].id = count;
774 			count++;
775 		}
776 	}
777 
778 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
779 		struct virtnet_tx *txvq = dev->data->tx_queues[i];
780 
781 		if (txvq == NULL)
782 			continue;
783 
784 		unsigned t;
785 
786 		for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
787 			xstats[count].value = *(uint64_t *)(((char *)txvq) +
788 				rte_virtio_txq_stat_strings[t].offset);
789 			xstats[count].id = count;
790 			count++;
791 		}
792 	}
793 
794 	return count;
795 }
796 
797 static int
798 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
799 {
800 	virtio_update_stats(dev, stats);
801 
802 	return 0;
803 }
804 
805 static int
806 virtio_dev_stats_reset(struct rte_eth_dev *dev)
807 {
808 	unsigned int i;
809 
810 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
811 		struct virtnet_tx *txvq = dev->data->tx_queues[i];
812 		if (txvq == NULL)
813 			continue;
814 
815 		txvq->stats.packets = 0;
816 		txvq->stats.bytes = 0;
817 		txvq->stats.multicast = 0;
818 		txvq->stats.broadcast = 0;
819 		memset(txvq->stats.size_bins, 0,
820 		       sizeof(txvq->stats.size_bins[0]) * 8);
821 	}
822 
823 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
824 		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
825 		if (rxvq == NULL)
826 			continue;
827 
828 		rxvq->stats.packets = 0;
829 		rxvq->stats.bytes = 0;
830 		rxvq->stats.errors = 0;
831 		rxvq->stats.multicast = 0;
832 		rxvq->stats.broadcast = 0;
833 		memset(rxvq->stats.size_bins, 0,
834 		       sizeof(rxvq->stats.size_bins[0]) * 8);
835 	}
836 
837 	return 0;
838 }
839 
840 static void
841 virtio_set_hwaddr(struct virtio_hw *hw)
842 {
843 	virtio_write_dev_config(hw,
844 			offsetof(struct virtio_net_config, mac),
845 			&hw->mac_addr, RTE_ETHER_ADDR_LEN);
846 }
847 
848 static void
849 virtio_get_hwaddr(struct virtio_hw *hw)
850 {
851 	if (virtio_with_feature(hw, VIRTIO_NET_F_MAC)) {
852 		virtio_read_dev_config(hw,
853 			offsetof(struct virtio_net_config, mac),
854 			&hw->mac_addr, RTE_ETHER_ADDR_LEN);
855 	} else {
856 		rte_eth_random_addr(&hw->mac_addr[0]);
857 		virtio_set_hwaddr(hw);
858 	}
859 }
860 
861 static int
862 virtio_mac_table_set(struct virtio_hw *hw,
863 		     const struct virtio_net_ctrl_mac *uc,
864 		     const struct virtio_net_ctrl_mac *mc)
865 {
866 	struct virtio_pmd_ctrl ctrl;
867 	int err, len[2];
868 
869 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
870 		PMD_DRV_LOG(INFO, "host does not support mac table");
871 		return -1;
872 	}
873 
874 	ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
875 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
876 
877 	len[0] = uc->entries * RTE_ETHER_ADDR_LEN + sizeof(uc->entries);
878 	memcpy(ctrl.data, uc, len[0]);
879 
880 	len[1] = mc->entries * RTE_ETHER_ADDR_LEN + sizeof(mc->entries);
881 	memcpy(ctrl.data + len[0], mc, len[1]);
882 
883 	err = virtio_send_command(hw->cvq, &ctrl, len, 2);
884 	if (err != 0)
885 		PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
886 	return err;
887 }
888 
889 static int
890 virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
891 		    uint32_t index, uint32_t vmdq __rte_unused)
892 {
893 	struct virtio_hw *hw = dev->data->dev_private;
894 	const struct rte_ether_addr *addrs = dev->data->mac_addrs;
895 	unsigned int i;
896 	struct virtio_net_ctrl_mac *uc, *mc;
897 
898 	if (index >= VIRTIO_MAX_MAC_ADDRS) {
899 		PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
900 		return -EINVAL;
901 	}
902 
903 	uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
904 		sizeof(uc->entries));
905 	uc->entries = 0;
906 	mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
907 		sizeof(mc->entries));
908 	mc->entries = 0;
909 
910 	for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
911 		const struct rte_ether_addr *addr
912 			= (i == index) ? mac_addr : addrs + i;
913 		struct virtio_net_ctrl_mac *tbl
914 			= rte_is_multicast_ether_addr(addr) ? mc : uc;
915 
916 		memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN);
917 	}
918 
919 	return virtio_mac_table_set(hw, uc, mc);
920 }
921 
922 static void
923 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
924 {
925 	struct virtio_hw *hw = dev->data->dev_private;
926 	struct rte_ether_addr *addrs = dev->data->mac_addrs;
927 	struct virtio_net_ctrl_mac *uc, *mc;
928 	unsigned int i;
929 
930 	if (index >= VIRTIO_MAX_MAC_ADDRS) {
931 		PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
932 		return;
933 	}
934 
935 	uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
936 		sizeof(uc->entries));
937 	uc->entries = 0;
938 	mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
939 		sizeof(mc->entries));
940 	mc->entries = 0;
941 
942 	for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
943 		struct virtio_net_ctrl_mac *tbl;
944 
945 		if (i == index || rte_is_zero_ether_addr(addrs + i))
946 			continue;
947 
948 		tbl = rte_is_multicast_ether_addr(addrs + i) ? mc : uc;
949 		memcpy(&tbl->macs[tbl->entries++], addrs + i,
950 			RTE_ETHER_ADDR_LEN);
951 	}
952 
953 	virtio_mac_table_set(hw, uc, mc);
954 }
955 
956 static int
957 virtio_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
958 {
959 	struct virtio_hw *hw = dev->data->dev_private;
960 
961 	memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
962 
963 	/* Use atomic update if available */
964 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
965 		struct virtio_pmd_ctrl ctrl;
966 		int len = RTE_ETHER_ADDR_LEN;
967 
968 		ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
969 		ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
970 
971 		memcpy(ctrl.data, mac_addr, RTE_ETHER_ADDR_LEN);
972 		return virtio_send_command(hw->cvq, &ctrl, &len, 1);
973 	}
974 
975 	if (!virtio_with_feature(hw, VIRTIO_NET_F_MAC))
976 		return -ENOTSUP;
977 
978 	virtio_set_hwaddr(hw);
979 	return 0;
980 }
981 
982 #define CLB_VAL_IDX 0
983 #define CLB_MSK_IDX 1
984 #define CLB_MATCH_IDX 2
985 static int
986 virtio_monitor_callback(const uint64_t value,
987 		const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
988 {
989 	const uint64_t m = opaque[CLB_MSK_IDX];
990 	const uint64_t v = opaque[CLB_VAL_IDX];
991 	const uint64_t c = opaque[CLB_MATCH_IDX];
992 
993 	if (c)
994 		return (value & m) == v ? -1 : 0;
995 	else
996 		return (value & m) == v ? 0 : -1;
997 }
998 
999 static int
1000 virtio_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
1001 {
1002 	struct virtnet_rx *rxvq = rx_queue;
1003 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1004 	struct virtio_hw *hw;
1005 
1006 	if (vq == NULL)
1007 		return -EINVAL;
1008 
1009 	hw = vq->hw;
1010 	if (virtio_with_packed_queue(hw)) {
1011 		struct vring_packed_desc *desc;
1012 		desc = vq->vq_packed.ring.desc;
1013 		pmc->addr = &desc[vq->vq_used_cons_idx].flags;
1014 		if (vq->vq_packed.used_wrap_counter)
1015 			pmc->opaque[CLB_VAL_IDX] =
1016 						VRING_PACKED_DESC_F_AVAIL_USED;
1017 		else
1018 			pmc->opaque[CLB_VAL_IDX] = 0;
1019 		pmc->opaque[CLB_MSK_IDX] = VRING_PACKED_DESC_F_AVAIL_USED;
1020 		pmc->opaque[CLB_MATCH_IDX] = 1;
1021 		pmc->size = sizeof(desc[vq->vq_used_cons_idx].flags);
1022 	} else {
1023 		pmc->addr = &vq->vq_split.ring.used->idx;
1024 		pmc->opaque[CLB_VAL_IDX] = vq->vq_used_cons_idx
1025 					& (vq->vq_nentries - 1);
1026 		pmc->opaque[CLB_MSK_IDX] = vq->vq_nentries - 1;
1027 		pmc->opaque[CLB_MATCH_IDX] = 0;
1028 		pmc->size = sizeof(vq->vq_split.ring.used->idx);
1029 	}
1030 	pmc->fn = virtio_monitor_callback;
1031 
1032 	return 0;
1033 }
1034 
1035 static int
1036 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1037 {
1038 	struct virtio_hw *hw = dev->data->dev_private;
1039 	struct virtio_pmd_ctrl ctrl;
1040 	int len;
1041 
1042 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
1043 		return -ENOTSUP;
1044 
1045 	ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
1046 	ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
1047 	memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
1048 	len = sizeof(vlan_id);
1049 
1050 	return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1051 }
1052 
1053 static int
1054 virtio_intr_unmask(struct rte_eth_dev *dev)
1055 {
1056 	struct virtio_hw *hw = dev->data->dev_private;
1057 
1058 	if (rte_intr_ack(dev->intr_handle) < 0)
1059 		return -1;
1060 
1061 	if (VIRTIO_OPS(hw)->intr_detect)
1062 		VIRTIO_OPS(hw)->intr_detect(hw);
1063 
1064 	return 0;
1065 }
1066 
1067 static int
1068 virtio_intr_enable(struct rte_eth_dev *dev)
1069 {
1070 	struct virtio_hw *hw = dev->data->dev_private;
1071 
1072 	if (rte_intr_enable(dev->intr_handle) < 0)
1073 		return -1;
1074 
1075 	if (VIRTIO_OPS(hw)->intr_detect)
1076 		VIRTIO_OPS(hw)->intr_detect(hw);
1077 
1078 	return 0;
1079 }
1080 
1081 static int
1082 virtio_intr_disable(struct rte_eth_dev *dev)
1083 {
1084 	struct virtio_hw *hw = dev->data->dev_private;
1085 
1086 	if (rte_intr_disable(dev->intr_handle) < 0)
1087 		return -1;
1088 
1089 	if (VIRTIO_OPS(hw)->intr_detect)
1090 		VIRTIO_OPS(hw)->intr_detect(hw);
1091 
1092 	return 0;
1093 }
1094 
1095 static int
1096 virtio_ethdev_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
1097 {
1098 	uint64_t host_features;
1099 
1100 	/* Prepare guest_features: feature that driver wants to support */
1101 	PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
1102 		req_features);
1103 
1104 	/* Read device(host) feature bits */
1105 	host_features = VIRTIO_OPS(hw)->get_features(hw);
1106 	PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
1107 		host_features);
1108 
1109 	/* If supported, ensure MTU value is valid before acknowledging it. */
1110 	if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
1111 		struct virtio_net_config config;
1112 
1113 		virtio_read_dev_config(hw,
1114 			offsetof(struct virtio_net_config, mtu),
1115 			&config.mtu, sizeof(config.mtu));
1116 
1117 		if (config.mtu < RTE_ETHER_MIN_MTU)
1118 			req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
1119 	}
1120 
1121 	/*
1122 	 * Negotiate features: Subset of device feature bits are written back
1123 	 * guest feature bits.
1124 	 */
1125 	hw->guest_features = req_features;
1126 	hw->guest_features = virtio_negotiate_features(hw, host_features);
1127 	PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
1128 		hw->guest_features);
1129 
1130 	if (VIRTIO_OPS(hw)->features_ok(hw) < 0)
1131 		return -1;
1132 
1133 	if (virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
1134 		virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1135 
1136 		if (!(virtio_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
1137 			PMD_INIT_LOG(ERR, "Failed to set FEATURES_OK status!");
1138 			return -1;
1139 		}
1140 	}
1141 
1142 	hw->req_guest_features = req_features;
1143 
1144 	return 0;
1145 }
1146 
1147 int
1148 virtio_dev_pause(struct rte_eth_dev *dev)
1149 {
1150 	struct virtio_hw *hw = dev->data->dev_private;
1151 
1152 	rte_spinlock_lock(&hw->state_lock);
1153 
1154 	if (hw->started == 0) {
1155 		/* Device is just stopped. */
1156 		rte_spinlock_unlock(&hw->state_lock);
1157 		return -1;
1158 	}
1159 	hw->started = 0;
1160 	/*
1161 	 * Prevent the worker threads from touching queues to avoid contention,
1162 	 * 1 ms should be enough for the ongoing Tx function to finish.
1163 	 */
1164 	rte_delay_ms(1);
1165 	return 0;
1166 }
1167 
1168 /*
1169  * Recover hw state to let the worker threads continue.
1170  */
1171 void
1172 virtio_dev_resume(struct rte_eth_dev *dev)
1173 {
1174 	struct virtio_hw *hw = dev->data->dev_private;
1175 
1176 	hw->started = 1;
1177 	rte_spinlock_unlock(&hw->state_lock);
1178 }
1179 
1180 /*
1181  * Should be called only after device is paused.
1182  */
1183 int
1184 virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
1185 		int nb_pkts)
1186 {
1187 	struct virtio_hw *hw = dev->data->dev_private;
1188 	struct virtnet_tx *txvq = dev->data->tx_queues[0];
1189 	int ret;
1190 
1191 	hw->inject_pkts = tx_pkts;
1192 	ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
1193 	hw->inject_pkts = NULL;
1194 
1195 	return ret;
1196 }
1197 
1198 static void
1199 virtio_notify_peers(struct rte_eth_dev *dev)
1200 {
1201 	struct virtio_hw *hw = dev->data->dev_private;
1202 	struct virtnet_rx *rxvq;
1203 	struct rte_mbuf *rarp_mbuf;
1204 
1205 	if (!dev->data->rx_queues)
1206 		return;
1207 
1208 	rxvq = dev->data->rx_queues[0];
1209 	if (!rxvq)
1210 		return;
1211 
1212 	rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
1213 			(struct rte_ether_addr *)hw->mac_addr);
1214 	if (rarp_mbuf == NULL) {
1215 		PMD_DRV_LOG(ERR, "failed to make RARP packet.");
1216 		return;
1217 	}
1218 
1219 	/* If virtio port just stopped, no need to send RARP */
1220 	if (virtio_dev_pause(dev) < 0) {
1221 		rte_pktmbuf_free(rarp_mbuf);
1222 		return;
1223 	}
1224 
1225 	virtio_inject_pkts(dev, &rarp_mbuf, 1);
1226 	virtio_dev_resume(dev);
1227 }
1228 
1229 static void
1230 virtio_ack_link_announce(struct rte_eth_dev *dev)
1231 {
1232 	struct virtio_hw *hw = dev->data->dev_private;
1233 	struct virtio_pmd_ctrl ctrl;
1234 
1235 	ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE;
1236 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK;
1237 
1238 	virtio_send_command(hw->cvq, &ctrl, NULL, 0);
1239 }
1240 
1241 /*
1242  * Process virtio config changed interrupt. Call the callback
1243  * if link state changed, generate gratuitous RARP packet if
1244  * the status indicates an ANNOUNCE.
1245  */
1246 void
1247 virtio_interrupt_handler(void *param)
1248 {
1249 	struct rte_eth_dev *dev = param;
1250 	struct virtio_hw *hw = dev->data->dev_private;
1251 	uint8_t isr;
1252 	uint16_t status;
1253 
1254 	/* Read interrupt status which clears interrupt */
1255 	isr = virtio_get_isr(hw);
1256 	PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
1257 
1258 	if (virtio_intr_unmask(dev) < 0)
1259 		PMD_DRV_LOG(ERR, "interrupt enable failed");
1260 
1261 	if (isr & VIRTIO_ISR_CONFIG) {
1262 		if (virtio_dev_link_update(dev, 0) == 0)
1263 			rte_eth_dev_callback_process(dev,
1264 						     RTE_ETH_EVENT_INTR_LSC,
1265 						     NULL);
1266 
1267 		if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1268 			virtio_read_dev_config(hw,
1269 				offsetof(struct virtio_net_config, status),
1270 				&status, sizeof(status));
1271 			if (status & VIRTIO_NET_S_ANNOUNCE) {
1272 				virtio_notify_peers(dev);
1273 				if (hw->cvq)
1274 					virtio_ack_link_announce(dev);
1275 			}
1276 		}
1277 	}
1278 }
1279 
1280 /* set rx and tx handlers according to what is supported */
1281 static void
1282 set_rxtx_funcs(struct rte_eth_dev *eth_dev)
1283 {
1284 	struct virtio_hw *hw = eth_dev->data->dev_private;
1285 
1286 	eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
1287 	if (virtio_with_packed_queue(hw)) {
1288 		PMD_INIT_LOG(INFO,
1289 			"virtio: using packed ring %s Tx path on port %u",
1290 			hw->use_vec_tx ? "vectorized" : "standard",
1291 			eth_dev->data->port_id);
1292 		if (hw->use_vec_tx)
1293 			eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed_vec;
1294 		else
1295 			eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
1296 	} else {
1297 		if (hw->use_inorder_tx) {
1298 			PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
1299 				eth_dev->data->port_id);
1300 			eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
1301 		} else {
1302 			PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
1303 				eth_dev->data->port_id);
1304 			eth_dev->tx_pkt_burst = virtio_xmit_pkts;
1305 		}
1306 	}
1307 
1308 	if (virtio_with_packed_queue(hw)) {
1309 		if (hw->use_vec_rx) {
1310 			PMD_INIT_LOG(INFO,
1311 				"virtio: using packed ring vectorized Rx path on port %u",
1312 				eth_dev->data->port_id);
1313 			eth_dev->rx_pkt_burst =
1314 				&virtio_recv_pkts_packed_vec;
1315 		} else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1316 			PMD_INIT_LOG(INFO,
1317 				"virtio: using packed ring mergeable buffer Rx path on port %u",
1318 				eth_dev->data->port_id);
1319 			eth_dev->rx_pkt_burst =
1320 				&virtio_recv_mergeable_pkts_packed;
1321 		} else {
1322 			PMD_INIT_LOG(INFO,
1323 				"virtio: using packed ring standard Rx path on port %u",
1324 				eth_dev->data->port_id);
1325 			eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
1326 		}
1327 	} else {
1328 		if (hw->use_vec_rx) {
1329 			PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u",
1330 				eth_dev->data->port_id);
1331 			eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
1332 		} else if (hw->use_inorder_rx) {
1333 			PMD_INIT_LOG(INFO,
1334 				"virtio: using inorder Rx path on port %u",
1335 				eth_dev->data->port_id);
1336 			eth_dev->rx_pkt_burst =	&virtio_recv_pkts_inorder;
1337 		} else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1338 			PMD_INIT_LOG(INFO,
1339 				"virtio: using mergeable buffer Rx path on port %u",
1340 				eth_dev->data->port_id);
1341 			eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
1342 		} else {
1343 			PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
1344 				eth_dev->data->port_id);
1345 			eth_dev->rx_pkt_burst = &virtio_recv_pkts;
1346 		}
1347 	}
1348 
1349 }
1350 
1351 /* Only support 1:1 queue/interrupt mapping so far.
1352  * TODO: support n:1 queue/interrupt mapping when there are limited number of
1353  * interrupt vectors (<N+1).
1354  */
1355 static int
1356 virtio_queues_bind_intr(struct rte_eth_dev *dev)
1357 {
1358 	uint32_t i;
1359 	struct virtio_hw *hw = dev->data->dev_private;
1360 
1361 	PMD_INIT_LOG(INFO, "queue/interrupt binding");
1362 	for (i = 0; i < dev->data->nb_rx_queues; ++i) {
1363 		if (rte_intr_vec_list_index_set(dev->intr_handle, i,
1364 						       i + 1))
1365 			return -rte_errno;
1366 		if (VIRTIO_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
1367 						 VIRTIO_MSI_NO_VECTOR) {
1368 			PMD_DRV_LOG(ERR, "failed to set queue vector");
1369 			return -EBUSY;
1370 		}
1371 	}
1372 
1373 	return 0;
1374 }
1375 
1376 static void
1377 virtio_queues_unbind_intr(struct rte_eth_dev *dev)
1378 {
1379 	uint32_t i;
1380 	struct virtio_hw *hw = dev->data->dev_private;
1381 
1382 	PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
1383 	for (i = 0; i < dev->data->nb_rx_queues; ++i)
1384 		VIRTIO_OPS(hw)->set_queue_irq(hw,
1385 					     hw->vqs[i * VTNET_CQ],
1386 					     VIRTIO_MSI_NO_VECTOR);
1387 }
1388 
1389 static int
1390 virtio_configure_intr(struct rte_eth_dev *dev)
1391 {
1392 	struct virtio_hw *hw = dev->data->dev_private;
1393 
1394 	if (!rte_intr_cap_multiple(dev->intr_handle)) {
1395 		PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
1396 		return -ENOTSUP;
1397 	}
1398 
1399 	if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
1400 		PMD_INIT_LOG(ERR, "Fail to create eventfd");
1401 		return -1;
1402 	}
1403 
1404 	if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",
1405 				    hw->max_queue_pairs)) {
1406 		PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
1407 			     hw->max_queue_pairs);
1408 		return -ENOMEM;
1409 	}
1410 
1411 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1412 		/* Re-register callback to update max_intr */
1413 		rte_intr_callback_unregister(dev->intr_handle,
1414 					     virtio_interrupt_handler,
1415 					     dev);
1416 		rte_intr_callback_register(dev->intr_handle,
1417 					   virtio_interrupt_handler,
1418 					   dev);
1419 	}
1420 
1421 	/* DO NOT try to remove this! This function will enable msix, or QEMU
1422 	 * will encounter SIGSEGV when DRIVER_OK is sent.
1423 	 * And for legacy devices, this should be done before queue/vec binding
1424 	 * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
1425 	 * (22) will be ignored.
1426 	 */
1427 	if (virtio_intr_enable(dev) < 0) {
1428 		PMD_DRV_LOG(ERR, "interrupt enable failed");
1429 		return -1;
1430 	}
1431 
1432 	if (virtio_queues_bind_intr(dev) < 0) {
1433 		PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
1434 		return -1;
1435 	}
1436 
1437 	return 0;
1438 }
1439 
1440 static void
1441 virtio_get_speed_duplex(struct rte_eth_dev *eth_dev,
1442 			struct rte_eth_link *link)
1443 {
1444 	struct virtio_hw *hw = eth_dev->data->dev_private;
1445 	struct virtio_net_config *config;
1446 	struct virtio_net_config local_config;
1447 
1448 	config = &local_config;
1449 	virtio_read_dev_config(hw,
1450 		offsetof(struct virtio_net_config, speed),
1451 		&config->speed, sizeof(config->speed));
1452 	virtio_read_dev_config(hw,
1453 		offsetof(struct virtio_net_config, duplex),
1454 		&config->duplex, sizeof(config->duplex));
1455 	hw->speed = config->speed;
1456 	hw->duplex = config->duplex;
1457 	if (link != NULL) {
1458 		link->link_duplex = hw->duplex;
1459 		link->link_speed  = hw->speed;
1460 	}
1461 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
1462 		     hw->speed, hw->duplex);
1463 }
1464 
1465 static uint64_t
1466 ethdev_to_virtio_rss_offloads(uint64_t ethdev_hash_types)
1467 {
1468 	uint64_t virtio_hash_types = 0;
1469 
1470 	if (ethdev_hash_types & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
1471 				RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
1472 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IPV4;
1473 
1474 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
1475 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCPV4;
1476 
1477 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
1478 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDPV4;
1479 
1480 	if (ethdev_hash_types & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
1481 				RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
1482 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IPV6;
1483 
1484 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
1485 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCPV6;
1486 
1487 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
1488 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDPV6;
1489 
1490 	if (ethdev_hash_types & RTE_ETH_RSS_IPV6_EX)
1491 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IP_EX;
1492 
1493 	if (ethdev_hash_types & RTE_ETH_RSS_IPV6_TCP_EX)
1494 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCP_EX;
1495 
1496 	if (ethdev_hash_types & RTE_ETH_RSS_IPV6_UDP_EX)
1497 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDP_EX;
1498 
1499 	return virtio_hash_types;
1500 }
1501 
1502 static uint64_t
1503 virtio_to_ethdev_rss_offloads(uint64_t virtio_hash_types)
1504 {
1505 	uint64_t rss_offloads = 0;
1506 
1507 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IPV4)
1508 		rss_offloads |= RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
1509 			RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
1510 
1511 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCPV4)
1512 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
1513 
1514 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDPV4)
1515 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1516 
1517 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IPV6)
1518 		rss_offloads |= RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
1519 			RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
1520 
1521 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCPV6)
1522 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
1523 
1524 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDPV6)
1525 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1526 
1527 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IP_EX)
1528 		rss_offloads |= RTE_ETH_RSS_IPV6_EX;
1529 
1530 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCP_EX)
1531 		rss_offloads |= RTE_ETH_RSS_IPV6_TCP_EX;
1532 
1533 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDP_EX)
1534 		rss_offloads |= RTE_ETH_RSS_IPV6_UDP_EX;
1535 
1536 	return rss_offloads;
1537 }
1538 
1539 static int
1540 virtio_dev_get_rss_config(struct virtio_hw *hw, uint32_t *rss_hash_types)
1541 {
1542 	struct virtio_net_config local_config;
1543 	struct virtio_net_config *config = &local_config;
1544 
1545 	virtio_read_dev_config(hw,
1546 			offsetof(struct virtio_net_config, rss_max_key_size),
1547 			&config->rss_max_key_size,
1548 			sizeof(config->rss_max_key_size));
1549 	if (config->rss_max_key_size < VIRTIO_NET_RSS_KEY_SIZE) {
1550 		PMD_INIT_LOG(ERR, "Invalid device RSS max key size (%u)",
1551 				config->rss_max_key_size);
1552 		return -EINVAL;
1553 	}
1554 
1555 	virtio_read_dev_config(hw,
1556 			offsetof(struct virtio_net_config,
1557 				rss_max_indirection_table_length),
1558 			&config->rss_max_indirection_table_length,
1559 			sizeof(config->rss_max_indirection_table_length));
1560 	if (config->rss_max_indirection_table_length < VIRTIO_NET_RSS_RETA_SIZE) {
1561 		PMD_INIT_LOG(ERR, "Invalid device RSS max reta size (%u)",
1562 				config->rss_max_indirection_table_length);
1563 		return -EINVAL;
1564 	}
1565 
1566 	virtio_read_dev_config(hw,
1567 			offsetof(struct virtio_net_config, supported_hash_types),
1568 			&config->supported_hash_types,
1569 			sizeof(config->supported_hash_types));
1570 	if ((config->supported_hash_types & VIRTIO_NET_HASH_TYPE_MASK) == 0) {
1571 		PMD_INIT_LOG(ERR, "Invalid device RSS hash types (0x%x)",
1572 				config->supported_hash_types);
1573 		return -EINVAL;
1574 	}
1575 
1576 	*rss_hash_types = config->supported_hash_types & VIRTIO_NET_HASH_TYPE_MASK;
1577 
1578 	PMD_INIT_LOG(DEBUG, "Device RSS config:");
1579 	PMD_INIT_LOG(DEBUG, "\t-Max key size: %u", config->rss_max_key_size);
1580 	PMD_INIT_LOG(DEBUG, "\t-Max reta size: %u", config->rss_max_indirection_table_length);
1581 	PMD_INIT_LOG(DEBUG, "\t-Supported hash types: 0x%x", *rss_hash_types);
1582 
1583 	return 0;
1584 }
1585 
1586 static int
1587 virtio_dev_rss_hash_update(struct rte_eth_dev *dev,
1588 		struct rte_eth_rss_conf *rss_conf)
1589 {
1590 	struct virtio_hw *hw = dev->data->dev_private;
1591 	char old_rss_key[VIRTIO_NET_RSS_KEY_SIZE];
1592 	uint32_t old_hash_types;
1593 	uint16_t nb_queues;
1594 	int ret;
1595 
1596 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1597 		return -ENOTSUP;
1598 
1599 	if (rss_conf->rss_hf & ~virtio_to_ethdev_rss_offloads(VIRTIO_NET_HASH_TYPE_MASK))
1600 		return -EINVAL;
1601 
1602 	old_hash_types = hw->rss_hash_types;
1603 	hw->rss_hash_types = ethdev_to_virtio_rss_offloads(rss_conf->rss_hf);
1604 
1605 	if (rss_conf->rss_key && rss_conf->rss_key_len) {
1606 		if (rss_conf->rss_key_len != VIRTIO_NET_RSS_KEY_SIZE) {
1607 			PMD_INIT_LOG(ERR, "Driver only supports %u RSS key length",
1608 					VIRTIO_NET_RSS_KEY_SIZE);
1609 			ret = -EINVAL;
1610 			goto restore_types;
1611 		}
1612 		memcpy(old_rss_key, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1613 		memcpy(hw->rss_key, rss_conf->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1614 	}
1615 
1616 	nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1617 	ret = virtio_set_multiple_queues_rss(dev, nb_queues);
1618 	if (ret < 0) {
1619 		PMD_INIT_LOG(ERR, "Failed to apply new RSS config to the device");
1620 		goto restore_key;
1621 	}
1622 
1623 	return 0;
1624 restore_key:
1625 	if (rss_conf->rss_key && rss_conf->rss_key_len)
1626 		memcpy(hw->rss_key, old_rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1627 restore_types:
1628 	hw->rss_hash_types = old_hash_types;
1629 
1630 	return ret;
1631 }
1632 
1633 static int
1634 virtio_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1635 		struct rte_eth_rss_conf *rss_conf)
1636 {
1637 	struct virtio_hw *hw = dev->data->dev_private;
1638 
1639 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1640 		return -ENOTSUP;
1641 
1642 	if (rss_conf->rss_key && rss_conf->rss_key_len >= VIRTIO_NET_RSS_KEY_SIZE)
1643 		memcpy(rss_conf->rss_key, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1644 	rss_conf->rss_key_len = VIRTIO_NET_RSS_KEY_SIZE;
1645 	rss_conf->rss_hf = virtio_to_ethdev_rss_offloads(hw->rss_hash_types);
1646 
1647 	return 0;
1648 }
1649 
1650 static int virtio_dev_rss_reta_update(struct rte_eth_dev *dev,
1651 			 struct rte_eth_rss_reta_entry64 *reta_conf,
1652 			 uint16_t reta_size)
1653 {
1654 	struct virtio_hw *hw = dev->data->dev_private;
1655 	uint16_t nb_queues;
1656 	uint16_t old_reta[VIRTIO_NET_RSS_RETA_SIZE];
1657 	int idx, pos, i, ret;
1658 
1659 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1660 		return -ENOTSUP;
1661 
1662 	if (reta_size != VIRTIO_NET_RSS_RETA_SIZE)
1663 		return -EINVAL;
1664 
1665 	memcpy(old_reta, hw->rss_reta, sizeof(old_reta));
1666 
1667 	for (i = 0; i < reta_size; i++) {
1668 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1669 		pos = i % RTE_ETH_RETA_GROUP_SIZE;
1670 
1671 		if (((reta_conf[idx].mask >> pos) & 0x1) == 0)
1672 			continue;
1673 
1674 		hw->rss_reta[i] = reta_conf[idx].reta[pos];
1675 	}
1676 
1677 	nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1678 	ret = virtio_set_multiple_queues_rss(dev, nb_queues);
1679 	if (ret < 0) {
1680 		PMD_INIT_LOG(ERR, "Failed to apply new RETA to the device");
1681 		memcpy(hw->rss_reta, old_reta, sizeof(old_reta));
1682 	}
1683 
1684 	hw->rss_rx_queues = dev->data->nb_rx_queues;
1685 
1686 	return ret;
1687 }
1688 
1689 static int virtio_dev_rss_reta_query(struct rte_eth_dev *dev,
1690 			 struct rte_eth_rss_reta_entry64 *reta_conf,
1691 			 uint16_t reta_size)
1692 {
1693 	struct virtio_hw *hw = dev->data->dev_private;
1694 	int idx, i;
1695 
1696 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1697 		return -ENOTSUP;
1698 
1699 	if (reta_size != VIRTIO_NET_RSS_RETA_SIZE)
1700 		return -EINVAL;
1701 
1702 	for (i = 0; i < reta_size; i++) {
1703 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1704 		reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = hw->rss_reta[i];
1705 	}
1706 
1707 	return 0;
1708 }
1709 
1710 /*
1711  * As default RSS hash key, it uses the default key of the
1712  * Intel IXGBE devices. It can be updated by the application
1713  * with any 40B key value.
1714  */
1715 static uint8_t rss_intel_key[VIRTIO_NET_RSS_KEY_SIZE] = {
1716 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1717 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1718 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1719 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1720 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1721 };
1722 
1723 static int
1724 virtio_dev_rss_init(struct rte_eth_dev *eth_dev)
1725 {
1726 	struct virtio_hw *hw = eth_dev->data->dev_private;
1727 	uint16_t nb_rx_queues = eth_dev->data->nb_rx_queues;
1728 	struct rte_eth_rss_conf *rss_conf;
1729 	int ret, i;
1730 
1731 	if (!nb_rx_queues) {
1732 		PMD_INIT_LOG(ERR, "Cannot init RSS if no Rx queues");
1733 		return -EINVAL;
1734 	}
1735 
1736 	rss_conf = &eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1737 
1738 	ret = virtio_dev_get_rss_config(hw, &hw->rss_hash_types);
1739 	if (ret)
1740 		return ret;
1741 
1742 	if (rss_conf->rss_hf) {
1743 		/*  Ensure requested hash types are supported by the device */
1744 		if (rss_conf->rss_hf & ~virtio_to_ethdev_rss_offloads(hw->rss_hash_types))
1745 			return -EINVAL;
1746 
1747 		hw->rss_hash_types = ethdev_to_virtio_rss_offloads(rss_conf->rss_hf);
1748 	}
1749 
1750 	if (!hw->rss_key) {
1751 		/* Setup default RSS key if not already setup by the user */
1752 		hw->rss_key = rte_malloc_socket("rss_key",
1753 				VIRTIO_NET_RSS_KEY_SIZE, 0,
1754 				eth_dev->device->numa_node);
1755 		if (!hw->rss_key) {
1756 			PMD_INIT_LOG(ERR, "Failed to allocate RSS key");
1757 			return -1;
1758 		}
1759 	}
1760 
1761 	if (rss_conf->rss_key && rss_conf->rss_key_len) {
1762 		if (rss_conf->rss_key_len != VIRTIO_NET_RSS_KEY_SIZE) {
1763 			PMD_INIT_LOG(ERR, "Driver only supports %u RSS key length",
1764 					VIRTIO_NET_RSS_KEY_SIZE);
1765 			return -EINVAL;
1766 		}
1767 		memcpy(hw->rss_key, rss_conf->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1768 	} else {
1769 		memcpy(hw->rss_key, rss_intel_key, VIRTIO_NET_RSS_KEY_SIZE);
1770 	}
1771 
1772 	if (!hw->rss_reta) {
1773 		/* Setup default RSS reta if not already setup by the user */
1774 		hw->rss_reta = rte_zmalloc_socket("rss_reta",
1775 				VIRTIO_NET_RSS_RETA_SIZE * sizeof(uint16_t), 0,
1776 				eth_dev->device->numa_node);
1777 		if (!hw->rss_reta) {
1778 			PMD_INIT_LOG(ERR, "Failed to allocate RSS reta");
1779 			return -1;
1780 		}
1781 
1782 		hw->rss_rx_queues = 0;
1783 	}
1784 
1785 	/* Re-initialize the RSS reta if the number of RX queues has changed */
1786 	if (hw->rss_rx_queues != nb_rx_queues) {
1787 		for (i = 0; i < VIRTIO_NET_RSS_RETA_SIZE; i++)
1788 			hw->rss_reta[i] = i % nb_rx_queues;
1789 		hw->rss_rx_queues = nb_rx_queues;
1790 	}
1791 
1792 	return 0;
1793 }
1794 
1795 #define DUPLEX_UNKNOWN   0xff
1796 /* reset device and renegotiate features if needed */
1797 static int
1798 virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
1799 {
1800 	struct virtio_hw *hw = eth_dev->data->dev_private;
1801 	struct virtio_net_config *config;
1802 	struct virtio_net_config local_config;
1803 	int ret;
1804 
1805 	/* Reset the device although not necessary at startup */
1806 	virtio_reset(hw);
1807 
1808 	if (hw->vqs) {
1809 		virtio_dev_free_mbufs(eth_dev);
1810 		virtio_free_queues(hw);
1811 	}
1812 
1813 	/* Tell the host we've noticed this device. */
1814 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1815 
1816 	/* Tell the host we've known how to drive the device. */
1817 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1818 	if (virtio_ethdev_negotiate_features(hw, req_features) < 0)
1819 		return -1;
1820 
1821 	hw->weak_barriers = !virtio_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
1822 
1823 	/* If host does not support both status and MSI-X then disable LSC */
1824 	if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->intr_lsc)
1825 		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1826 	else
1827 		eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1828 
1829 	eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1830 
1831 	/* Setting up rx_header size for the device */
1832 	if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
1833 	    virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
1834 	    virtio_with_packed_queue(hw))
1835 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1836 	else
1837 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
1838 
1839 	/* Copy the permanent MAC address to: virtio_hw */
1840 	virtio_get_hwaddr(hw);
1841 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
1842 			&eth_dev->data->mac_addrs[0]);
1843 	PMD_INIT_LOG(DEBUG,
1844 		     "PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
1845 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
1846 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
1847 
1848 	hw->get_speed_via_feat = hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN &&
1849 			     virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX);
1850 	if (hw->get_speed_via_feat)
1851 		virtio_get_speed_duplex(eth_dev, NULL);
1852 	if (hw->duplex == DUPLEX_UNKNOWN)
1853 		hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
1854 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
1855 		hw->speed, hw->duplex);
1856 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
1857 		config = &local_config;
1858 
1859 		virtio_read_dev_config(hw,
1860 			offsetof(struct virtio_net_config, mac),
1861 			&config->mac, sizeof(config->mac));
1862 
1863 		if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1864 			virtio_read_dev_config(hw,
1865 				offsetof(struct virtio_net_config, status),
1866 				&config->status, sizeof(config->status));
1867 		} else {
1868 			PMD_INIT_LOG(DEBUG,
1869 				     "VIRTIO_NET_F_STATUS is not supported");
1870 			config->status = 0;
1871 		}
1872 
1873 		if (virtio_with_feature(hw, VIRTIO_NET_F_MQ) ||
1874 				virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
1875 			virtio_read_dev_config(hw,
1876 				offsetof(struct virtio_net_config, max_virtqueue_pairs),
1877 				&config->max_virtqueue_pairs,
1878 				sizeof(config->max_virtqueue_pairs));
1879 		} else {
1880 			PMD_INIT_LOG(DEBUG,
1881 				     "Neither VIRTIO_NET_F_MQ nor VIRTIO_NET_F_RSS are supported");
1882 			config->max_virtqueue_pairs = 1;
1883 		}
1884 
1885 		hw->max_queue_pairs = config->max_virtqueue_pairs;
1886 
1887 		if (virtio_with_feature(hw, VIRTIO_NET_F_MTU)) {
1888 			virtio_read_dev_config(hw,
1889 				offsetof(struct virtio_net_config, mtu),
1890 				&config->mtu,
1891 				sizeof(config->mtu));
1892 
1893 			/*
1894 			 * MTU value has already been checked at negotiation
1895 			 * time, but check again in case it has changed since
1896 			 * then, which should not happen.
1897 			 */
1898 			if (config->mtu < RTE_ETHER_MIN_MTU) {
1899 				PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
1900 						config->mtu);
1901 				return -1;
1902 			}
1903 
1904 			hw->max_mtu = config->mtu;
1905 			/* Set initial MTU to maximum one supported by vhost */
1906 			eth_dev->data->mtu = config->mtu;
1907 
1908 		} else {
1909 			hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
1910 				VLAN_TAG_LEN - hw->vtnet_hdr_size;
1911 		}
1912 
1913 		hw->rss_hash_types = 0;
1914 		if (virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1915 			if (virtio_dev_rss_init(eth_dev))
1916 				return -1;
1917 
1918 		PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
1919 				config->max_virtqueue_pairs);
1920 		PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
1921 		PMD_INIT_LOG(DEBUG,
1922 				"PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
1923 				config->mac[0], config->mac[1],
1924 				config->mac[2], config->mac[3],
1925 				config->mac[4], config->mac[5]);
1926 	} else {
1927 		PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
1928 		hw->max_queue_pairs = 1;
1929 		hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
1930 			VLAN_TAG_LEN - hw->vtnet_hdr_size;
1931 	}
1932 
1933 	ret = virtio_alloc_queues(eth_dev);
1934 	if (ret < 0)
1935 		return ret;
1936 
1937 	if (eth_dev->data->dev_conf.intr_conf.rxq) {
1938 		if (virtio_configure_intr(eth_dev) < 0) {
1939 			PMD_INIT_LOG(ERR, "failed to configure interrupt");
1940 			virtio_free_queues(hw);
1941 			return -1;
1942 		}
1943 	}
1944 
1945 	virtio_reinit_complete(hw);
1946 
1947 	return 0;
1948 }
1949 
1950 /*
1951  * This function is based on probe() function in virtio_pci.c
1952  * It returns 0 on success.
1953  */
1954 int
1955 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
1956 {
1957 	struct virtio_hw *hw = eth_dev->data->dev_private;
1958 	uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
1959 	int vectorized = 0;
1960 	int ret;
1961 
1962 	if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
1963 		PMD_INIT_LOG(ERR,
1964 			"Not sufficient headroom required = %d, avail = %d",
1965 			(int)sizeof(struct virtio_net_hdr_mrg_rxbuf),
1966 			RTE_PKTMBUF_HEADROOM);
1967 
1968 		return -1;
1969 	}
1970 
1971 	eth_dev->dev_ops = &virtio_eth_dev_ops;
1972 
1973 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1974 		set_rxtx_funcs(eth_dev);
1975 		return 0;
1976 	}
1977 
1978 	ret = virtio_dev_devargs_parse(eth_dev->device->devargs, &speed, &vectorized);
1979 	if (ret < 0)
1980 		return ret;
1981 	hw->speed = speed;
1982 	hw->duplex = DUPLEX_UNKNOWN;
1983 
1984 	/* Allocate memory for storing MAC addresses */
1985 	eth_dev->data->mac_addrs = rte_zmalloc("virtio",
1986 				VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
1987 	if (eth_dev->data->mac_addrs == NULL) {
1988 		PMD_INIT_LOG(ERR,
1989 			"Failed to allocate %d bytes needed to store MAC addresses",
1990 			VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);
1991 		return -ENOMEM;
1992 	}
1993 
1994 	rte_spinlock_init(&hw->state_lock);
1995 
1996 	if (vectorized) {
1997 		hw->use_vec_rx = 1;
1998 		hw->use_vec_tx = 1;
1999 	}
2000 
2001 	/* reset device and negotiate default features */
2002 	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
2003 	if (ret < 0)
2004 		goto err_virtio_init;
2005 
2006 	if (vectorized) {
2007 		if (!virtio_with_packed_queue(hw)) {
2008 			hw->use_vec_tx = 0;
2009 		} else {
2010 #if !defined(CC_AVX512_SUPPORT) && !defined(RTE_ARCH_ARM)
2011 			hw->use_vec_rx = 0;
2012 			hw->use_vec_tx = 0;
2013 			PMD_DRV_LOG(INFO,
2014 				"building environment do not support packed ring vectorized");
2015 #endif
2016 		}
2017 	}
2018 
2019 	hw->opened = 1;
2020 
2021 	return 0;
2022 
2023 err_virtio_init:
2024 	rte_free(eth_dev->data->mac_addrs);
2025 	eth_dev->data->mac_addrs = NULL;
2026 	return ret;
2027 }
2028 
2029 static uint32_t
2030 virtio_dev_speed_capa_get(uint32_t speed)
2031 {
2032 	switch (speed) {
2033 	case RTE_ETH_SPEED_NUM_10G:
2034 		return RTE_ETH_LINK_SPEED_10G;
2035 	case RTE_ETH_SPEED_NUM_20G:
2036 		return RTE_ETH_LINK_SPEED_20G;
2037 	case RTE_ETH_SPEED_NUM_25G:
2038 		return RTE_ETH_LINK_SPEED_25G;
2039 	case RTE_ETH_SPEED_NUM_40G:
2040 		return RTE_ETH_LINK_SPEED_40G;
2041 	case RTE_ETH_SPEED_NUM_50G:
2042 		return RTE_ETH_LINK_SPEED_50G;
2043 	case RTE_ETH_SPEED_NUM_56G:
2044 		return RTE_ETH_LINK_SPEED_56G;
2045 	case RTE_ETH_SPEED_NUM_100G:
2046 		return RTE_ETH_LINK_SPEED_100G;
2047 	case RTE_ETH_SPEED_NUM_200G:
2048 		return RTE_ETH_LINK_SPEED_200G;
2049 	default:
2050 		return 0;
2051 	}
2052 }
2053 
2054 static int vectorized_check_handler(__rte_unused const char *key,
2055 		const char *value, void *ret_val)
2056 {
2057 	if (strcmp(value, "1") == 0)
2058 		*(int *)ret_val = 1;
2059 	else
2060 		*(int *)ret_val = 0;
2061 
2062 	return 0;
2063 }
2064 
2065 #define VIRTIO_ARG_SPEED      "speed"
2066 #define VIRTIO_ARG_VECTORIZED "vectorized"
2067 
2068 static int
2069 link_speed_handler(const char *key __rte_unused,
2070 		const char *value, void *ret_val)
2071 {
2072 	uint32_t val;
2073 	if (!value || !ret_val)
2074 		return -EINVAL;
2075 	val = strtoul(value, NULL, 0);
2076 	/* validate input */
2077 	if (virtio_dev_speed_capa_get(val) == 0)
2078 		return -EINVAL;
2079 	*(uint32_t *)ret_val = val;
2080 
2081 	return 0;
2082 }
2083 
2084 
2085 static int
2086 virtio_dev_devargs_parse(struct rte_devargs *devargs, uint32_t *speed, int *vectorized)
2087 {
2088 	struct rte_kvargs *kvlist;
2089 	int ret = 0;
2090 
2091 	if (devargs == NULL)
2092 		return 0;
2093 
2094 	kvlist = rte_kvargs_parse(devargs->args, NULL);
2095 	if (kvlist == NULL) {
2096 		PMD_INIT_LOG(ERR, "error when parsing param");
2097 		return 0;
2098 	}
2099 
2100 	if (speed && rte_kvargs_count(kvlist, VIRTIO_ARG_SPEED) == 1) {
2101 		ret = rte_kvargs_process(kvlist,
2102 					VIRTIO_ARG_SPEED,
2103 					link_speed_handler, speed);
2104 		if (ret < 0) {
2105 			PMD_INIT_LOG(ERR, "Failed to parse %s",
2106 					VIRTIO_ARG_SPEED);
2107 			goto exit;
2108 		}
2109 	}
2110 
2111 	if (vectorized &&
2112 		rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
2113 		ret = rte_kvargs_process(kvlist,
2114 				VIRTIO_ARG_VECTORIZED,
2115 				vectorized_check_handler, vectorized);
2116 		if (ret < 0) {
2117 			PMD_INIT_LOG(ERR, "Failed to parse %s",
2118 					VIRTIO_ARG_VECTORIZED);
2119 			goto exit;
2120 		}
2121 	}
2122 
2123 exit:
2124 	rte_kvargs_free(kvlist);
2125 	return ret;
2126 }
2127 
2128 static uint8_t
2129 rx_offload_enabled(struct virtio_hw *hw)
2130 {
2131 	return virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
2132 		virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2133 		virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
2134 }
2135 
2136 static uint8_t
2137 tx_offload_enabled(struct virtio_hw *hw)
2138 {
2139 	return virtio_with_feature(hw, VIRTIO_NET_F_CSUM) ||
2140 		virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
2141 		virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
2142 }
2143 
2144 /*
2145  * Configure virtio device
2146  * It returns 0 on success.
2147  */
2148 static int
2149 virtio_dev_configure(struct rte_eth_dev *dev)
2150 {
2151 	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2152 	const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
2153 	struct virtio_hw *hw = dev->data->dev_private;
2154 	uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
2155 		hw->vtnet_hdr_size;
2156 	uint64_t rx_offloads = rxmode->offloads;
2157 	uint64_t tx_offloads = txmode->offloads;
2158 	uint64_t req_features;
2159 	int ret;
2160 
2161 	PMD_INIT_LOG(DEBUG, "configure");
2162 	req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
2163 
2164 	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE && rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
2165 		PMD_DRV_LOG(ERR,
2166 			"Unsupported Rx multi queue mode %d",
2167 			rxmode->mq_mode);
2168 		return -EINVAL;
2169 	}
2170 
2171 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
2172 		PMD_DRV_LOG(ERR,
2173 			"Unsupported Tx multi queue mode %d",
2174 			txmode->mq_mode);
2175 		return -EINVAL;
2176 	}
2177 
2178 	if (dev->data->dev_conf.intr_conf.rxq) {
2179 		ret = virtio_init_device(dev, hw->req_guest_features);
2180 		if (ret < 0)
2181 			return ret;
2182 	}
2183 
2184 	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)
2185 		req_features |= (1ULL << VIRTIO_NET_F_RSS);
2186 
2187 	if (rxmode->mtu > hw->max_mtu)
2188 		req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
2189 
2190 	hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
2191 
2192 	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2193 			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
2194 		req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
2195 
2196 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
2197 		req_features |=
2198 			(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2199 			(1ULL << VIRTIO_NET_F_GUEST_TSO6);
2200 
2201 	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2202 			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
2203 		req_features |= (1ULL << VIRTIO_NET_F_CSUM);
2204 
2205 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
2206 		req_features |=
2207 			(1ULL << VIRTIO_NET_F_HOST_TSO4) |
2208 			(1ULL << VIRTIO_NET_F_HOST_TSO6);
2209 
2210 	/* if request features changed, reinit the device */
2211 	if (req_features != hw->req_guest_features) {
2212 		ret = virtio_init_device(dev, req_features);
2213 		if (ret < 0)
2214 			return ret;
2215 	}
2216 
2217 	/* if queues are not allocated, reinit the device */
2218 	if (hw->vqs == NULL) {
2219 		ret = virtio_init_device(dev, hw->req_guest_features);
2220 		if (ret < 0)
2221 			return ret;
2222 	}
2223 
2224 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
2225 			!virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
2226 		PMD_DRV_LOG(ERR, "RSS support requested but not supported by the device");
2227 		return -ENOTSUP;
2228 	}
2229 
2230 	if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2231 			    RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
2232 		!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
2233 		PMD_DRV_LOG(ERR,
2234 			"rx checksum not available on this host");
2235 		return -ENOTSUP;
2236 	}
2237 
2238 	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
2239 		(!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2240 		 !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
2241 		PMD_DRV_LOG(ERR,
2242 			"Large Receive Offload not available on this host");
2243 		return -ENOTSUP;
2244 	}
2245 
2246 	/* start control queue */
2247 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
2248 		virtio_dev_cq_start(dev);
2249 
2250 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
2251 		hw->vlan_strip = 1;
2252 
2253 	hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
2254 
2255 	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
2256 			!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
2257 		PMD_DRV_LOG(ERR,
2258 			    "vlan filtering not available on this host");
2259 		return -ENOTSUP;
2260 	}
2261 
2262 	hw->has_tx_offload = tx_offload_enabled(hw);
2263 	hw->has_rx_offload = rx_offload_enabled(hw);
2264 
2265 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
2266 		/* Enable vector (0) for Link State Interrupt */
2267 		if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) ==
2268 				VIRTIO_MSI_NO_VECTOR) {
2269 			PMD_DRV_LOG(ERR, "failed to set config vector");
2270 			return -EBUSY;
2271 		}
2272 
2273 	if (virtio_with_packed_queue(hw)) {
2274 #if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
2275 		if ((hw->use_vec_rx || hw->use_vec_tx) &&
2276 		    (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
2277 		     !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
2278 		     !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
2279 		     rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
2280 			PMD_DRV_LOG(INFO,
2281 				"disabled packed ring vectorized path for requirements not met");
2282 			hw->use_vec_rx = 0;
2283 			hw->use_vec_tx = 0;
2284 		}
2285 #elif defined(RTE_ARCH_ARM)
2286 		if ((hw->use_vec_rx || hw->use_vec_tx) &&
2287 		    (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) ||
2288 		     !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
2289 		     !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
2290 		     rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)) {
2291 			PMD_DRV_LOG(INFO,
2292 				"disabled packed ring vectorized path for requirements not met");
2293 			hw->use_vec_rx = 0;
2294 			hw->use_vec_tx = 0;
2295 		}
2296 #else
2297 		hw->use_vec_rx = 0;
2298 		hw->use_vec_tx = 0;
2299 #endif
2300 
2301 		if (hw->use_vec_rx) {
2302 			if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2303 				PMD_DRV_LOG(INFO,
2304 					"disabled packed ring vectorized rx for mrg_rxbuf enabled");
2305 				hw->use_vec_rx = 0;
2306 			}
2307 
2308 			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
2309 				PMD_DRV_LOG(INFO,
2310 					"disabled packed ring vectorized rx for TCP_LRO enabled");
2311 				hw->use_vec_rx = 0;
2312 			}
2313 		}
2314 	} else {
2315 		if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) {
2316 			hw->use_inorder_tx = 1;
2317 			hw->use_inorder_rx = 1;
2318 			hw->use_vec_rx = 0;
2319 		}
2320 
2321 		if (hw->use_vec_rx) {
2322 #if defined RTE_ARCH_ARM
2323 			if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
2324 				PMD_DRV_LOG(INFO,
2325 					"disabled split ring vectorized path for requirement not met");
2326 				hw->use_vec_rx = 0;
2327 			}
2328 #endif
2329 			if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2330 				PMD_DRV_LOG(INFO,
2331 					"disabled split ring vectorized rx for mrg_rxbuf enabled");
2332 				hw->use_vec_rx = 0;
2333 			}
2334 
2335 			if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2336 					   RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2337 					   RTE_ETH_RX_OFFLOAD_TCP_LRO |
2338 					   RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
2339 				PMD_DRV_LOG(INFO,
2340 					"disabled split ring vectorized rx for offloading enabled");
2341 				hw->use_vec_rx = 0;
2342 			}
2343 
2344 			if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
2345 				PMD_DRV_LOG(INFO,
2346 					"disabled split ring vectorized rx, max SIMD bitwidth too low");
2347 				hw->use_vec_rx = 0;
2348 			}
2349 		}
2350 	}
2351 
2352 	return 0;
2353 }
2354 
2355 
2356 static int
2357 virtio_dev_start(struct rte_eth_dev *dev)
2358 {
2359 	uint16_t nb_queues, i;
2360 	struct virtqueue *vq;
2361 	struct virtio_hw *hw = dev->data->dev_private;
2362 	int ret;
2363 
2364 	/* Finish the initialization of the queues */
2365 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2366 		ret = virtio_dev_rx_queue_setup_finish(dev, i);
2367 		if (ret < 0)
2368 			return ret;
2369 	}
2370 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2371 		ret = virtio_dev_tx_queue_setup_finish(dev, i);
2372 		if (ret < 0)
2373 			return ret;
2374 	}
2375 
2376 	/* check if lsc interrupt feature is enabled */
2377 	if (dev->data->dev_conf.intr_conf.lsc) {
2378 		if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
2379 			PMD_DRV_LOG(ERR, "link status not supported by host");
2380 			return -ENOTSUP;
2381 		}
2382 	}
2383 
2384 	/* Enable uio/vfio intr/eventfd mapping: although we already did that
2385 	 * in device configure, but it could be unmapped  when device is
2386 	 * stopped.
2387 	 */
2388 	if (dev->data->dev_conf.intr_conf.lsc ||
2389 	    dev->data->dev_conf.intr_conf.rxq) {
2390 		virtio_intr_disable(dev);
2391 
2392 		/* Setup interrupt callback  */
2393 		if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
2394 			rte_intr_callback_register(dev->intr_handle,
2395 						   virtio_interrupt_handler,
2396 						   dev);
2397 
2398 		if (virtio_intr_enable(dev) < 0) {
2399 			PMD_DRV_LOG(ERR, "interrupt enable failed");
2400 			return -EIO;
2401 		}
2402 	}
2403 
2404 	/*Notify the backend
2405 	 *Otherwise the tap backend might already stop its queue due to fullness.
2406 	 *vhost backend will have no chance to be waked up
2407 	 */
2408 	nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2409 	if (hw->max_queue_pairs > 1) {
2410 		if (virtio_set_multiple_queues(dev, nb_queues) != 0)
2411 			return -EINVAL;
2412 	}
2413 
2414 	PMD_INIT_LOG(DEBUG, "nb_queues=%u (port=%u)", nb_queues,
2415 		     dev->data->port_id);
2416 
2417 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2418 		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
2419 		/* Flush the old packets */
2420 		virtqueue_rxvq_flush(vq);
2421 		virtqueue_notify(vq);
2422 	}
2423 
2424 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2425 		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
2426 		virtqueue_notify(vq);
2427 	}
2428 
2429 	PMD_INIT_LOG(DEBUG, "Notified backend at initialization (port=%u)",
2430 		     dev->data->port_id);
2431 
2432 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2433 		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
2434 		VIRTQUEUE_DUMP(vq);
2435 	}
2436 
2437 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2438 		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
2439 		VIRTQUEUE_DUMP(vq);
2440 	}
2441 
2442 	set_rxtx_funcs(dev);
2443 	hw->started = 1;
2444 
2445 	/* Initialize Link state */
2446 	virtio_dev_link_update(dev, 0);
2447 
2448 	return 0;
2449 }
2450 
2451 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
2452 {
2453 	struct virtio_hw *hw = dev->data->dev_private;
2454 	uint16_t nr_vq = virtio_get_nr_vq(hw);
2455 	const char *type __rte_unused;
2456 	unsigned int i, mbuf_num = 0;
2457 	struct virtqueue *vq;
2458 	struct rte_mbuf *buf;
2459 	int queue_type;
2460 
2461 	if (hw->vqs == NULL)
2462 		return;
2463 
2464 	for (i = 0; i < nr_vq; i++) {
2465 		vq = hw->vqs[i];
2466 		if (!vq)
2467 			continue;
2468 
2469 		queue_type = virtio_get_queue_type(hw, i);
2470 		if (queue_type == VTNET_RQ)
2471 			type = "rxq";
2472 		else if (queue_type == VTNET_TQ)
2473 			type = "txq";
2474 		else
2475 			continue;
2476 
2477 		PMD_INIT_LOG(DEBUG,
2478 			"Before freeing %s[%d] used and unused buf",
2479 			type, i);
2480 		VIRTQUEUE_DUMP(vq);
2481 
2482 		while ((buf = virtqueue_detach_unused(vq)) != NULL) {
2483 			rte_pktmbuf_free(buf);
2484 			mbuf_num++;
2485 		}
2486 
2487 		PMD_INIT_LOG(DEBUG,
2488 			"After freeing %s[%d] used and unused buf",
2489 			type, i);
2490 		VIRTQUEUE_DUMP(vq);
2491 	}
2492 
2493 	PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
2494 }
2495 
2496 static void
2497 virtio_tx_completed_cleanup(struct rte_eth_dev *dev)
2498 {
2499 	struct virtio_hw *hw = dev->data->dev_private;
2500 	struct virtqueue *vq;
2501 	int qidx;
2502 	void (*xmit_cleanup)(struct virtqueue *vq, uint16_t nb_used);
2503 
2504 	if (virtio_with_packed_queue(hw)) {
2505 		if (hw->use_vec_tx)
2506 			xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
2507 		else if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
2508 			xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
2509 		else
2510 			xmit_cleanup = &virtio_xmit_cleanup_normal_packed;
2511 	} else {
2512 		if (hw->use_inorder_tx)
2513 			xmit_cleanup = &virtio_xmit_cleanup_inorder;
2514 		else
2515 			xmit_cleanup = &virtio_xmit_cleanup;
2516 	}
2517 
2518 	for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
2519 		vq = hw->vqs[2 * qidx + VTNET_SQ_TQ_QUEUE_IDX];
2520 		if (vq != NULL)
2521 			xmit_cleanup(vq, virtqueue_nused(vq));
2522 	}
2523 }
2524 
2525 /*
2526  * Stop device: disable interrupt and mark link down
2527  */
2528 int
2529 virtio_dev_stop(struct rte_eth_dev *dev)
2530 {
2531 	struct virtio_hw *hw = dev->data->dev_private;
2532 	struct rte_eth_link link;
2533 	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
2534 
2535 	PMD_INIT_LOG(DEBUG, "stop");
2536 	dev->data->dev_started = 0;
2537 
2538 	rte_spinlock_lock(&hw->state_lock);
2539 	if (!hw->started)
2540 		goto out_unlock;
2541 	hw->started = 0;
2542 
2543 	virtio_tx_completed_cleanup(dev);
2544 
2545 	if (intr_conf->lsc || intr_conf->rxq) {
2546 		virtio_intr_disable(dev);
2547 
2548 		/* Reset interrupt callback  */
2549 		if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
2550 			rte_intr_callback_unregister(dev->intr_handle,
2551 						     virtio_interrupt_handler,
2552 						     dev);
2553 		}
2554 	}
2555 
2556 	memset(&link, 0, sizeof(link));
2557 	rte_eth_linkstatus_set(dev, &link);
2558 out_unlock:
2559 	rte_spinlock_unlock(&hw->state_lock);
2560 
2561 	return 0;
2562 }
2563 
2564 static int
2565 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
2566 {
2567 	struct rte_eth_link link;
2568 	uint16_t status;
2569 	struct virtio_hw *hw = dev->data->dev_private;
2570 
2571 	memset(&link, 0, sizeof(link));
2572 	link.link_duplex = hw->duplex;
2573 	link.link_speed  = hw->speed;
2574 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
2575 
2576 	if (!hw->started) {
2577 		link.link_status = RTE_ETH_LINK_DOWN;
2578 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2579 	} else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
2580 		PMD_INIT_LOG(DEBUG, "Get link status from hw");
2581 		virtio_read_dev_config(hw,
2582 				offsetof(struct virtio_net_config, status),
2583 				&status, sizeof(status));
2584 		if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
2585 			link.link_status = RTE_ETH_LINK_DOWN;
2586 			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2587 			PMD_INIT_LOG(DEBUG, "Port %d is down",
2588 				     dev->data->port_id);
2589 		} else {
2590 			link.link_status = RTE_ETH_LINK_UP;
2591 			if (hw->get_speed_via_feat)
2592 				virtio_get_speed_duplex(dev, &link);
2593 			PMD_INIT_LOG(DEBUG, "Port %d is up",
2594 				     dev->data->port_id);
2595 		}
2596 	} else {
2597 		link.link_status = RTE_ETH_LINK_UP;
2598 		if (hw->get_speed_via_feat)
2599 			virtio_get_speed_duplex(dev, &link);
2600 	}
2601 
2602 	return rte_eth_linkstatus_set(dev, &link);
2603 }
2604 
2605 static int
2606 virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2607 {
2608 	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2609 	struct virtio_hw *hw = dev->data->dev_private;
2610 	uint64_t offloads = rxmode->offloads;
2611 
2612 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
2613 		if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
2614 				!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
2615 
2616 			PMD_DRV_LOG(NOTICE,
2617 				"vlan filtering not available on this host");
2618 
2619 			return -ENOTSUP;
2620 		}
2621 	}
2622 
2623 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
2624 		hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
2625 
2626 	return 0;
2627 }
2628 
2629 static int
2630 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2631 {
2632 	uint64_t tso_mask, host_features;
2633 	uint32_t rss_hash_types = 0;
2634 	struct virtio_hw *hw = dev->data->dev_private;
2635 	dev_info->speed_capa = virtio_dev_speed_capa_get(hw->speed);
2636 
2637 	dev_info->max_rx_queues =
2638 		RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
2639 	dev_info->max_tx_queues =
2640 		RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
2641 	dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
2642 	dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
2643 	dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
2644 	dev_info->max_mtu = hw->max_mtu;
2645 
2646 	host_features = VIRTIO_OPS(hw)->get_features(hw);
2647 	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2648 	if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
2649 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
2650 	if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
2651 		dev_info->rx_offload_capa |=
2652 			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2653 			RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
2654 	}
2655 	if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
2656 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
2657 	tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2658 		(1ULL << VIRTIO_NET_F_GUEST_TSO6);
2659 	if ((host_features & tso_mask) == tso_mask)
2660 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
2661 
2662 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
2663 				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
2664 	if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
2665 		dev_info->tx_offload_capa |=
2666 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2667 			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
2668 	}
2669 	tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2670 		(1ULL << VIRTIO_NET_F_HOST_TSO6);
2671 	if ((host_features & tso_mask) == tso_mask)
2672 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
2673 
2674 	if (host_features & (1ULL << VIRTIO_NET_F_RSS)) {
2675 		virtio_dev_get_rss_config(hw, &rss_hash_types);
2676 		dev_info->hash_key_size = VIRTIO_NET_RSS_KEY_SIZE;
2677 		dev_info->reta_size = VIRTIO_NET_RSS_RETA_SIZE;
2678 		dev_info->flow_type_rss_offloads =
2679 			virtio_to_ethdev_rss_offloads(rss_hash_types);
2680 	} else {
2681 		dev_info->hash_key_size = 0;
2682 		dev_info->reta_size = 0;
2683 		dev_info->flow_type_rss_offloads = 0;
2684 	}
2685 
2686 	if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) {
2687 		/*
2688 		 * According to 2.7 Packed Virtqueues,
2689 		 * 2.7.10.1 Structure Size and Alignment:
2690 		 * The Queue Size value does not have to be a power of 2.
2691 		 */
2692 		dev_info->rx_desc_lim.nb_max = UINT16_MAX;
2693 		dev_info->tx_desc_lim.nb_max = UINT16_MAX;
2694 	} else {
2695 		/*
2696 		 * According to 2.6 Split Virtqueues:
2697 		 * Queue Size value is always a power of 2. The maximum Queue
2698 		 * Size value is 32768.
2699 		 */
2700 		dev_info->rx_desc_lim.nb_max = 32768;
2701 		dev_info->tx_desc_lim.nb_max = 32768;
2702 	}
2703 	/*
2704 	 * Actual minimum is not the same for virtqueues of different kinds,
2705 	 * but to avoid tangling the code with separate branches, rely on
2706 	 * default thresholds since desc number must be at least of their size.
2707 	 */
2708 	dev_info->rx_desc_lim.nb_min = RTE_MAX(DEFAULT_RX_FREE_THRESH,
2709 					       RTE_VIRTIO_VPMD_RX_REARM_THRESH);
2710 	dev_info->tx_desc_lim.nb_min = DEFAULT_TX_FREE_THRESH;
2711 	dev_info->rx_desc_lim.nb_align = 1;
2712 	dev_info->tx_desc_lim.nb_align = 1;
2713 
2714 	return 0;
2715 }
2716 
2717 /*
2718  * It enables testpmd to collect per queue stats.
2719  */
2720 static int
2721 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
2722 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
2723 __rte_unused uint8_t is_rx)
2724 {
2725 	return 0;
2726 }
2727 
2728 RTE_LOG_REGISTER_SUFFIX(virtio_logtype_init, init, NOTICE);
2729 RTE_LOG_REGISTER_SUFFIX(virtio_logtype_driver, driver, NOTICE);
2730