xref: /dpdk/drivers/net/virtio/virtio_ethdev.c (revision 0f1dc8cb671203d52488fd66936f2fe6dcca03cc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <errno.h>
10 #include <unistd.h>
11 
12 #include <ethdev_driver.h>
13 #include <rte_memcpy.h>
14 #include <rte_string_fns.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_ether.h>
19 #include <rte_ip.h>
20 #include <rte_arp.h>
21 #include <rte_common.h>
22 #include <rte_errno.h>
23 #include <rte_cpuflags.h>
24 #include <rte_vect.h>
25 #include <rte_memory.h>
26 #include <rte_eal_paging.h>
27 #include <rte_eal.h>
28 #include <dev_driver.h>
29 #include <rte_cycles.h>
30 #include <rte_kvargs.h>
31 
32 #include "virtio_ethdev.h"
33 #include "virtio.h"
34 #include "virtio_logs.h"
35 #include "virtqueue.h"
36 #include "virtio_cvq.h"
37 #include "virtio_rxtx.h"
38 #include "virtio_rxtx_simple.h"
39 #include "virtio_user/virtio_user_dev.h"
40 
41 static int  virtio_dev_configure(struct rte_eth_dev *dev);
42 static int  virtio_dev_start(struct rte_eth_dev *dev);
43 static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
44 static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
45 static int virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
46 static int virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
47 static uint32_t virtio_dev_speed_capa_get(uint32_t speed);
48 static int virtio_dev_devargs_parse(struct rte_devargs *devargs,
49 	uint32_t *speed,
50 	int *vectorized);
51 static int virtio_dev_info_get(struct rte_eth_dev *dev,
52 				struct rte_eth_dev_info *dev_info);
53 static int virtio_dev_link_update(struct rte_eth_dev *dev,
54 	int wait_to_complete);
55 static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
56 static int virtio_dev_rss_hash_update(struct rte_eth_dev *dev,
57 		struct rte_eth_rss_conf *rss_conf);
58 static int virtio_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
59 		struct rte_eth_rss_conf *rss_conf);
60 static int virtio_dev_rss_reta_update(struct rte_eth_dev *dev,
61 			 struct rte_eth_rss_reta_entry64 *reta_conf,
62 			 uint16_t reta_size);
63 static int virtio_dev_rss_reta_query(struct rte_eth_dev *dev,
64 			 struct rte_eth_rss_reta_entry64 *reta_conf,
65 			 uint16_t reta_size);
66 
67 static void virtio_set_hwaddr(struct virtio_hw *hw);
68 static void virtio_get_hwaddr(struct virtio_hw *hw);
69 
70 static int virtio_dev_stats_get(struct rte_eth_dev *dev,
71 				 struct rte_eth_stats *stats);
72 static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
73 				 struct rte_eth_xstat *xstats, unsigned n);
74 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
75 				       struct rte_eth_xstat_name *xstats_names,
76 				       unsigned limit);
77 static int virtio_dev_stats_reset(struct rte_eth_dev *dev);
78 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
79 static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
80 				uint16_t vlan_id, int on);
81 static int virtio_mac_addr_add(struct rte_eth_dev *dev,
82 				struct rte_ether_addr *mac_addr,
83 				uint32_t index, uint32_t vmdq);
84 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
85 static int virtio_mac_addr_set(struct rte_eth_dev *dev,
86 				struct rte_ether_addr *mac_addr);
87 
88 static int virtio_intr_disable(struct rte_eth_dev *dev);
89 static int virtio_get_monitor_addr(void *rx_queue,
90 				struct rte_power_monitor_cond *pmc);
91 
92 static int virtio_dev_queue_stats_mapping_set(
93 	struct rte_eth_dev *eth_dev,
94 	uint16_t queue_id,
95 	uint8_t stat_idx,
96 	uint8_t is_rx);
97 
98 static void virtio_notify_peers(struct rte_eth_dev *dev);
99 static void virtio_ack_link_announce(struct rte_eth_dev *dev);
100 
101 struct rte_virtio_xstats_name_off {
102 	char name[RTE_ETH_XSTATS_NAME_SIZE];
103 	unsigned offset;
104 };
105 
106 /* [rt]x_qX_ is prepended to the name string here */
107 static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
108 	{"good_packets",           offsetof(struct virtnet_rx, stats.packets)},
109 	{"good_bytes",             offsetof(struct virtnet_rx, stats.bytes)},
110 	{"errors",                 offsetof(struct virtnet_rx, stats.errors)},
111 	{"multicast_packets",      offsetof(struct virtnet_rx, stats.multicast)},
112 	{"broadcast_packets",      offsetof(struct virtnet_rx, stats.broadcast)},
113 	{"undersize_packets",      offsetof(struct virtnet_rx, stats.size_bins[0])},
114 	{"size_64_packets",        offsetof(struct virtnet_rx, stats.size_bins[1])},
115 	{"size_65_127_packets",    offsetof(struct virtnet_rx, stats.size_bins[2])},
116 	{"size_128_255_packets",   offsetof(struct virtnet_rx, stats.size_bins[3])},
117 	{"size_256_511_packets",   offsetof(struct virtnet_rx, stats.size_bins[4])},
118 	{"size_512_1023_packets",  offsetof(struct virtnet_rx, stats.size_bins[5])},
119 	{"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
120 	{"size_1519_max_packets",  offsetof(struct virtnet_rx, stats.size_bins[7])},
121 };
122 
123 /* [rt]x_qX_ is prepended to the name string here */
124 static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
125 	{"good_packets",           offsetof(struct virtnet_tx, stats.packets)},
126 	{"good_bytes",             offsetof(struct virtnet_tx, stats.bytes)},
127 	{"multicast_packets",      offsetof(struct virtnet_tx, stats.multicast)},
128 	{"broadcast_packets",      offsetof(struct virtnet_tx, stats.broadcast)},
129 	{"undersize_packets",      offsetof(struct virtnet_tx, stats.size_bins[0])},
130 	{"size_64_packets",        offsetof(struct virtnet_tx, stats.size_bins[1])},
131 	{"size_65_127_packets",    offsetof(struct virtnet_tx, stats.size_bins[2])},
132 	{"size_128_255_packets",   offsetof(struct virtnet_tx, stats.size_bins[3])},
133 	{"size_256_511_packets",   offsetof(struct virtnet_tx, stats.size_bins[4])},
134 	{"size_512_1023_packets",  offsetof(struct virtnet_tx, stats.size_bins[5])},
135 	{"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
136 	{"size_1519_max_packets",  offsetof(struct virtnet_tx, stats.size_bins[7])},
137 };
138 
139 #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
140 			    sizeof(rte_virtio_rxq_stat_strings[0]))
141 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
142 			    sizeof(rte_virtio_txq_stat_strings[0]))
143 
144 struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
145 
146 static int
147 virtio_set_multiple_queues_rss(struct rte_eth_dev *dev, uint16_t nb_queues)
148 {
149 	struct virtio_hw *hw = dev->data->dev_private;
150 	struct virtio_pmd_ctrl ctrl;
151 	struct virtio_net_ctrl_rss rss;
152 	int dlen, ret;
153 
154 	rss.hash_types = hw->rss_hash_types & VIRTIO_NET_HASH_TYPE_MASK;
155 	RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(VIRTIO_NET_RSS_RETA_SIZE));
156 	rss.indirection_table_mask = VIRTIO_NET_RSS_RETA_SIZE - 1;
157 	rss.unclassified_queue = 0;
158 	memcpy(rss.indirection_table, hw->rss_reta, VIRTIO_NET_RSS_RETA_SIZE * sizeof(uint16_t));
159 	rss.max_tx_vq = nb_queues;
160 	rss.hash_key_length = VIRTIO_NET_RSS_KEY_SIZE;
161 	memcpy(rss.hash_key_data, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
162 
163 	ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
164 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_RSS_CONFIG;
165 	memcpy(ctrl.data, &rss, sizeof(rss));
166 
167 	dlen = sizeof(rss);
168 
169 	ret = virtio_send_command(hw->cvq, &ctrl, &dlen, 1);
170 	if (ret) {
171 		PMD_INIT_LOG(ERR, "RSS multiqueue configured but send command failed");
172 		return -EINVAL;
173 	}
174 
175 	return 0;
176 }
177 
178 static int
179 virtio_set_multiple_queues_auto(struct rte_eth_dev *dev, uint16_t nb_queues)
180 {
181 	struct virtio_hw *hw = dev->data->dev_private;
182 	struct virtio_pmd_ctrl ctrl;
183 	int dlen;
184 	int ret;
185 
186 	ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
187 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
188 	memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
189 
190 	dlen = sizeof(uint16_t);
191 
192 	ret = virtio_send_command(hw->cvq, &ctrl, &dlen, 1);
193 	if (ret) {
194 		PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
195 			  "failed, this is too late now...");
196 		return -EINVAL;
197 	}
198 
199 	return 0;
200 }
201 
202 static int
203 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
204 {
205 	struct virtio_hw *hw = dev->data->dev_private;
206 
207 	if (virtio_with_feature(hw, VIRTIO_NET_F_RSS))
208 		return virtio_set_multiple_queues_rss(dev, nb_queues);
209 	else
210 		return virtio_set_multiple_queues_auto(dev, nb_queues);
211 }
212 
213 static uint16_t
214 virtio_get_nr_vq(struct virtio_hw *hw)
215 {
216 	uint16_t nr_vq = hw->max_queue_pairs * 2;
217 
218 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
219 		nr_vq += 1;
220 
221 	return nr_vq;
222 }
223 
224 static void
225 virtio_control_queue_notify(struct virtqueue *vq, __rte_unused void *cookie)
226 {
227 	virtqueue_notify(vq);
228 }
229 
230 static int
231 virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
232 {
233 	char vq_name[VIRTQUEUE_MAX_NAME_SZ];
234 	unsigned int vq_size;
235 	struct virtio_hw *hw = dev->data->dev_private;
236 	struct virtqueue *vq;
237 	int queue_type = virtio_get_queue_type(hw, queue_idx);
238 	int ret;
239 	int numa_node = dev->device->numa_node;
240 
241 	PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
242 			queue_idx, numa_node);
243 
244 	/*
245 	 * Read the virtqueue size from the Queue Size field
246 	 * Always power of 2 and if 0 virtqueue does not exist
247 	 */
248 	vq_size = VIRTIO_OPS(hw)->get_queue_num(hw, queue_idx);
249 	PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
250 	if (vq_size == 0) {
251 		PMD_INIT_LOG(ERR, "virtqueue does not exist");
252 		return -EINVAL;
253 	}
254 
255 	if (!virtio_with_packed_queue(hw) && !rte_is_power_of_2(vq_size)) {
256 		PMD_INIT_LOG(ERR, "split virtqueue size is not power of 2");
257 		return -EINVAL;
258 	}
259 
260 	snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, queue_idx);
261 
262 	vq = virtqueue_alloc(hw, queue_idx, vq_size, queue_type, numa_node, vq_name);
263 	if (!vq) {
264 		PMD_INIT_LOG(ERR, "virtqueue init failed");
265 		return -ENOMEM;
266 	}
267 
268 	hw->vqs[queue_idx] = vq;
269 
270 	if (queue_type == VTNET_CQ) {
271 		hw->cvq = &vq->cq;
272 		vq->cq.notify_queue = &virtio_control_queue_notify;
273 	}
274 
275 	if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
276 		PMD_INIT_LOG(ERR, "setup_queue failed");
277 		ret = -EINVAL;
278 		goto clean_vq;
279 	}
280 
281 	return 0;
282 
283 clean_vq:
284 	if (queue_type == VTNET_CQ)
285 		hw->cvq = NULL;
286 	virtqueue_free(vq);
287 	hw->vqs[queue_idx] = NULL;
288 
289 	return ret;
290 }
291 
292 static void
293 virtio_free_queues(struct virtio_hw *hw)
294 {
295 	uint16_t nr_vq = virtio_get_nr_vq(hw);
296 	struct virtqueue *vq;
297 	uint16_t i;
298 
299 	if (hw->vqs == NULL)
300 		return;
301 
302 	for (i = 0; i < nr_vq; i++) {
303 		vq = hw->vqs[i];
304 		if (!vq)
305 			continue;
306 		virtqueue_free(vq);
307 		hw->vqs[i] = NULL;
308 	}
309 
310 	rte_free(hw->vqs);
311 	hw->vqs = NULL;
312 }
313 
314 static int
315 virtio_alloc_queues(struct rte_eth_dev *dev)
316 {
317 	struct virtio_hw *hw = dev->data->dev_private;
318 	uint16_t nr_vq = virtio_get_nr_vq(hw);
319 	uint16_t i;
320 	int ret;
321 
322 	hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
323 	if (!hw->vqs) {
324 		PMD_INIT_LOG(ERR, "failed to allocate vqs");
325 		return -ENOMEM;
326 	}
327 
328 	for (i = 0; i < nr_vq; i++) {
329 		ret = virtio_init_queue(dev, i);
330 		if (ret < 0) {
331 			virtio_free_queues(hw);
332 			return ret;
333 		}
334 	}
335 
336 	return 0;
337 }
338 
339 static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
340 
341 static void
342 virtio_free_rss(struct virtio_hw *hw)
343 {
344 	rte_free(hw->rss_key);
345 	hw->rss_key = NULL;
346 
347 	rte_free(hw->rss_reta);
348 	hw->rss_reta = NULL;
349 }
350 
351 int
352 virtio_dev_close(struct rte_eth_dev *dev)
353 {
354 	struct virtio_hw *hw = dev->data->dev_private;
355 	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
356 
357 	PMD_INIT_LOG(DEBUG, "virtio_dev_close");
358 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
359 		return 0;
360 
361 	if (!hw->opened)
362 		return 0;
363 	hw->opened = 0;
364 
365 	/* reset the NIC */
366 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
367 		VIRTIO_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
368 	if (intr_conf->rxq)
369 		virtio_queues_unbind_intr(dev);
370 
371 	if (intr_conf->lsc || intr_conf->rxq) {
372 		virtio_intr_disable(dev);
373 		rte_intr_efd_disable(dev->intr_handle);
374 		rte_intr_vec_list_free(dev->intr_handle);
375 	}
376 
377 	virtio_reset(hw);
378 	virtio_dev_free_mbufs(dev);
379 	virtio_free_queues(hw);
380 	virtio_free_rss(hw);
381 
382 	return VIRTIO_OPS(hw)->dev_close(hw);
383 }
384 
385 static int
386 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
387 {
388 	struct virtio_hw *hw = dev->data->dev_private;
389 	struct virtio_pmd_ctrl ctrl;
390 	int dlen[1];
391 	int ret;
392 
393 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
394 		PMD_INIT_LOG(INFO, "host does not support rx control");
395 		return -ENOTSUP;
396 	}
397 
398 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
399 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
400 	ctrl.data[0] = 1;
401 	dlen[0] = 1;
402 
403 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
404 	if (ret) {
405 		PMD_INIT_LOG(ERR, "Failed to enable promisc");
406 		return -EAGAIN;
407 	}
408 
409 	return 0;
410 }
411 
412 static int
413 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
414 {
415 	struct virtio_hw *hw = dev->data->dev_private;
416 	struct virtio_pmd_ctrl ctrl;
417 	int dlen[1];
418 	int ret;
419 
420 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
421 		PMD_INIT_LOG(INFO, "host does not support rx control");
422 		return -ENOTSUP;
423 	}
424 
425 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
426 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
427 	ctrl.data[0] = 0;
428 	dlen[0] = 1;
429 
430 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
431 	if (ret) {
432 		PMD_INIT_LOG(ERR, "Failed to disable promisc");
433 		return -EAGAIN;
434 	}
435 
436 	return 0;
437 }
438 
439 static int
440 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
441 {
442 	struct virtio_hw *hw = dev->data->dev_private;
443 	struct virtio_pmd_ctrl ctrl;
444 	int dlen[1];
445 	int ret;
446 
447 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
448 		PMD_INIT_LOG(INFO, "host does not support rx control");
449 		return -ENOTSUP;
450 	}
451 
452 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
453 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
454 	ctrl.data[0] = 1;
455 	dlen[0] = 1;
456 
457 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
458 	if (ret) {
459 		PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
460 		return -EAGAIN;
461 	}
462 
463 	return 0;
464 }
465 
466 static int
467 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
468 {
469 	struct virtio_hw *hw = dev->data->dev_private;
470 	struct virtio_pmd_ctrl ctrl;
471 	int dlen[1];
472 	int ret;
473 
474 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
475 		PMD_INIT_LOG(INFO, "host does not support rx control");
476 		return -ENOTSUP;
477 	}
478 
479 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
480 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
481 	ctrl.data[0] = 0;
482 	dlen[0] = 1;
483 
484 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
485 	if (ret) {
486 		PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
487 		return -EAGAIN;
488 	}
489 
490 	return 0;
491 }
492 
493 uint16_t
494 virtio_rx_mem_pool_buf_size(struct rte_mempool *mp)
495 {
496 	return rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
497 }
498 
499 bool
500 virtio_rx_check_scatter(uint16_t max_rx_pkt_len, uint16_t rx_buf_size,
501 			bool rx_scatter_enabled, const char **error)
502 {
503 	if (!rx_scatter_enabled && max_rx_pkt_len > rx_buf_size) {
504 		*error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
505 		return false;
506 	}
507 
508 	return true;
509 }
510 
511 static bool
512 virtio_check_scatter_on_all_rx_queues(struct rte_eth_dev *dev,
513 				      uint16_t frame_size)
514 {
515 	struct virtio_hw *hw = dev->data->dev_private;
516 	struct virtnet_rx *rxvq;
517 	struct virtqueue *vq;
518 	unsigned int qidx;
519 	uint16_t buf_size;
520 	const char *error;
521 
522 	if (hw->vqs == NULL)
523 		return true;
524 
525 	for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
526 		vq = hw->vqs[2 * qidx + VTNET_SQ_RQ_QUEUE_IDX];
527 		if (vq == NULL)
528 			continue;
529 
530 		rxvq = &vq->rxq;
531 		if (rxvq->mpool == NULL)
532 			continue;
533 		buf_size = virtio_rx_mem_pool_buf_size(rxvq->mpool);
534 
535 		if (!virtio_rx_check_scatter(frame_size, buf_size,
536 					     hw->rx_ol_scatter, &error)) {
537 			PMD_INIT_LOG(ERR, "MTU check for RxQ %u failed: %s",
538 				     qidx, error);
539 			return false;
540 		}
541 	}
542 
543 	return true;
544 }
545 
546 #define VLAN_TAG_LEN           4    /* 802.3ac tag (not DMA'd) */
547 static int
548 virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
549 {
550 	struct virtio_hw *hw = dev->data->dev_private;
551 	uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
552 				 hw->vtnet_hdr_size;
553 	uint32_t frame_size = mtu + ether_hdr_len;
554 	uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
555 
556 	max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
557 
558 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > max_frame_size) {
559 		PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
560 			RTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
561 		return -EINVAL;
562 	}
563 
564 	if (!virtio_check_scatter_on_all_rx_queues(dev, frame_size)) {
565 		PMD_INIT_LOG(ERR, "MTU vs Rx scatter and Rx buffers check failed");
566 		return -EINVAL;
567 	}
568 
569 	hw->max_rx_pkt_len = frame_size;
570 
571 	return 0;
572 }
573 
574 static int
575 virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
576 {
577 	struct virtio_hw *hw = dev->data->dev_private;
578 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
579 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
580 
581 	virtqueue_enable_intr(vq);
582 	virtio_mb(hw->weak_barriers);
583 	return 0;
584 }
585 
586 static int
587 virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
588 {
589 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
590 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
591 
592 	virtqueue_disable_intr(vq);
593 	return 0;
594 }
595 
596 static int
597 virtio_dev_priv_dump(struct rte_eth_dev *dev, FILE *f)
598 {
599 	struct virtio_hw *hw = dev->data->dev_private;
600 
601 	fprintf(f, "guest_features: 0x%" PRIx64 "\n", hw->guest_features);
602 	fprintf(f, "vtnet_hdr_size: %u\n", hw->vtnet_hdr_size);
603 	fprintf(f, "use_vec: rx-%u tx-%u\n", hw->use_vec_rx, hw->use_vec_tx);
604 	fprintf(f, "use_inorder: rx-%u tx-%u\n", hw->use_inorder_rx, hw->use_inorder_tx);
605 	fprintf(f, "intr_lsc: %u\n", hw->intr_lsc);
606 	fprintf(f, "max_mtu: %u\n", hw->max_mtu);
607 	fprintf(f, "max_rx_pkt_len: %zu\n", hw->max_rx_pkt_len);
608 	fprintf(f, "max_queue_pairs: %u\n", hw->max_queue_pairs);
609 	fprintf(f, "req_guest_features: 0x%" PRIx64 "\n", hw->req_guest_features);
610 
611 	return 0;
612 }
613 
614 /*
615  * dev_ops for virtio, bare necessities for basic operation
616  */
617 static const struct eth_dev_ops virtio_eth_dev_ops = {
618 	.dev_configure           = virtio_dev_configure,
619 	.dev_start               = virtio_dev_start,
620 	.dev_stop                = virtio_dev_stop,
621 	.dev_close               = virtio_dev_close,
622 	.promiscuous_enable      = virtio_dev_promiscuous_enable,
623 	.promiscuous_disable     = virtio_dev_promiscuous_disable,
624 	.allmulticast_enable     = virtio_dev_allmulticast_enable,
625 	.allmulticast_disable    = virtio_dev_allmulticast_disable,
626 	.mtu_set                 = virtio_mtu_set,
627 	.dev_infos_get           = virtio_dev_info_get,
628 	.stats_get               = virtio_dev_stats_get,
629 	.xstats_get              = virtio_dev_xstats_get,
630 	.xstats_get_names        = virtio_dev_xstats_get_names,
631 	.stats_reset             = virtio_dev_stats_reset,
632 	.xstats_reset            = virtio_dev_stats_reset,
633 	.link_update             = virtio_dev_link_update,
634 	.vlan_offload_set        = virtio_dev_vlan_offload_set,
635 	.rx_queue_setup          = virtio_dev_rx_queue_setup,
636 	.rx_queue_intr_enable    = virtio_dev_rx_queue_intr_enable,
637 	.rx_queue_intr_disable   = virtio_dev_rx_queue_intr_disable,
638 	.tx_queue_setup          = virtio_dev_tx_queue_setup,
639 	.rss_hash_update         = virtio_dev_rss_hash_update,
640 	.rss_hash_conf_get       = virtio_dev_rss_hash_conf_get,
641 	.reta_update             = virtio_dev_rss_reta_update,
642 	.reta_query              = virtio_dev_rss_reta_query,
643 	/* collect stats per queue */
644 	.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
645 	.vlan_filter_set         = virtio_vlan_filter_set,
646 	.mac_addr_add            = virtio_mac_addr_add,
647 	.mac_addr_remove         = virtio_mac_addr_remove,
648 	.mac_addr_set            = virtio_mac_addr_set,
649 	.get_monitor_addr        = virtio_get_monitor_addr,
650 	.eth_dev_priv_dump       = virtio_dev_priv_dump,
651 };
652 
653 /*
654  * dev_ops for virtio-user in secondary processes, as we just have
655  * some limited supports currently.
656  */
657 const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = {
658 	.dev_infos_get           = virtio_dev_info_get,
659 	.stats_get               = virtio_dev_stats_get,
660 	.xstats_get              = virtio_dev_xstats_get,
661 	.xstats_get_names        = virtio_dev_xstats_get_names,
662 	.stats_reset             = virtio_dev_stats_reset,
663 	.xstats_reset            = virtio_dev_stats_reset,
664 	/* collect stats per queue */
665 	.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
666 };
667 
668 static void
669 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
670 {
671 	unsigned i;
672 
673 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
674 		const struct virtnet_tx *txvq = dev->data->tx_queues[i];
675 		if (txvq == NULL)
676 			continue;
677 
678 		stats->opackets += txvq->stats.packets;
679 		stats->obytes += txvq->stats.bytes;
680 
681 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
682 			stats->q_opackets[i] = txvq->stats.packets;
683 			stats->q_obytes[i] = txvq->stats.bytes;
684 		}
685 	}
686 
687 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
688 		const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
689 		if (rxvq == NULL)
690 			continue;
691 
692 		stats->ipackets += rxvq->stats.packets;
693 		stats->ibytes += rxvq->stats.bytes;
694 		stats->ierrors += rxvq->stats.errors;
695 
696 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
697 			stats->q_ipackets[i] = rxvq->stats.packets;
698 			stats->q_ibytes[i] = rxvq->stats.bytes;
699 		}
700 	}
701 
702 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
703 }
704 
705 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
706 				       struct rte_eth_xstat_name *xstats_names,
707 				       __rte_unused unsigned limit)
708 {
709 	unsigned i;
710 	unsigned count = 0;
711 	unsigned t;
712 
713 	unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
714 		dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
715 
716 	if (xstats_names != NULL) {
717 		/* Note: limit checked in rte_eth_xstats_names() */
718 
719 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
720 			struct virtnet_rx *rxvq = dev->data->rx_queues[i];
721 			if (rxvq == NULL)
722 				continue;
723 			for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
724 				snprintf(xstats_names[count].name,
725 					sizeof(xstats_names[count].name),
726 					"rx_q%u_%s", i,
727 					rte_virtio_rxq_stat_strings[t].name);
728 				count++;
729 			}
730 		}
731 
732 		for (i = 0; i < dev->data->nb_tx_queues; i++) {
733 			struct virtnet_tx *txvq = dev->data->tx_queues[i];
734 			if (txvq == NULL)
735 				continue;
736 			for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
737 				snprintf(xstats_names[count].name,
738 					sizeof(xstats_names[count].name),
739 					"tx_q%u_%s", i,
740 					rte_virtio_txq_stat_strings[t].name);
741 				count++;
742 			}
743 		}
744 		return count;
745 	}
746 	return nstats;
747 }
748 
749 static int
750 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
751 		      unsigned n)
752 {
753 	unsigned i;
754 	unsigned count = 0;
755 
756 	unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
757 		dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
758 
759 	if (n < nstats)
760 		return nstats;
761 
762 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
763 		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
764 
765 		if (rxvq == NULL)
766 			continue;
767 
768 		unsigned t;
769 
770 		for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
771 			xstats[count].value = *(uint64_t *)(((char *)rxvq) +
772 				rte_virtio_rxq_stat_strings[t].offset);
773 			xstats[count].id = count;
774 			count++;
775 		}
776 	}
777 
778 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
779 		struct virtnet_tx *txvq = dev->data->tx_queues[i];
780 
781 		if (txvq == NULL)
782 			continue;
783 
784 		unsigned t;
785 
786 		for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
787 			xstats[count].value = *(uint64_t *)(((char *)txvq) +
788 				rte_virtio_txq_stat_strings[t].offset);
789 			xstats[count].id = count;
790 			count++;
791 		}
792 	}
793 
794 	return count;
795 }
796 
797 static int
798 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
799 {
800 	virtio_update_stats(dev, stats);
801 
802 	return 0;
803 }
804 
805 static int
806 virtio_dev_stats_reset(struct rte_eth_dev *dev)
807 {
808 	unsigned int i;
809 
810 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
811 		struct virtnet_tx *txvq = dev->data->tx_queues[i];
812 		if (txvq == NULL)
813 			continue;
814 
815 		txvq->stats.packets = 0;
816 		txvq->stats.bytes = 0;
817 		txvq->stats.multicast = 0;
818 		txvq->stats.broadcast = 0;
819 		memset(txvq->stats.size_bins, 0,
820 		       sizeof(txvq->stats.size_bins[0]) * 8);
821 	}
822 
823 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
824 		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
825 		if (rxvq == NULL)
826 			continue;
827 
828 		rxvq->stats.packets = 0;
829 		rxvq->stats.bytes = 0;
830 		rxvq->stats.errors = 0;
831 		rxvq->stats.multicast = 0;
832 		rxvq->stats.broadcast = 0;
833 		memset(rxvq->stats.size_bins, 0,
834 		       sizeof(rxvq->stats.size_bins[0]) * 8);
835 	}
836 
837 	return 0;
838 }
839 
840 static void
841 virtio_set_hwaddr(struct virtio_hw *hw)
842 {
843 	virtio_write_dev_config(hw,
844 			offsetof(struct virtio_net_config, mac),
845 			&hw->mac_addr, RTE_ETHER_ADDR_LEN);
846 }
847 
848 static void
849 virtio_get_hwaddr(struct virtio_hw *hw)
850 {
851 	if (virtio_with_feature(hw, VIRTIO_NET_F_MAC)) {
852 		virtio_read_dev_config(hw,
853 			offsetof(struct virtio_net_config, mac),
854 			&hw->mac_addr, RTE_ETHER_ADDR_LEN);
855 	} else {
856 		rte_eth_random_addr(&hw->mac_addr[0]);
857 		virtio_set_hwaddr(hw);
858 	}
859 }
860 
861 static int
862 virtio_mac_table_set(struct virtio_hw *hw,
863 		     const struct virtio_net_ctrl_mac *uc,
864 		     const struct virtio_net_ctrl_mac *mc)
865 {
866 	struct virtio_pmd_ctrl ctrl;
867 	int err, len[2];
868 
869 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
870 		PMD_DRV_LOG(INFO, "host does not support mac table");
871 		return -1;
872 	}
873 
874 	ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
875 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
876 
877 	len[0] = uc->entries * RTE_ETHER_ADDR_LEN + sizeof(uc->entries);
878 	memcpy(ctrl.data, uc, len[0]);
879 
880 	len[1] = mc->entries * RTE_ETHER_ADDR_LEN + sizeof(mc->entries);
881 	memcpy(ctrl.data + len[0], mc, len[1]);
882 
883 	err = virtio_send_command(hw->cvq, &ctrl, len, 2);
884 	if (err != 0)
885 		PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
886 	return err;
887 }
888 
889 static int
890 virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
891 		    uint32_t index, uint32_t vmdq __rte_unused)
892 {
893 	struct virtio_hw *hw = dev->data->dev_private;
894 	const struct rte_ether_addr *addrs = dev->data->mac_addrs;
895 	unsigned int i;
896 	struct virtio_net_ctrl_mac *uc, *mc;
897 
898 	if (index >= VIRTIO_MAX_MAC_ADDRS) {
899 		PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
900 		return -EINVAL;
901 	}
902 
903 	uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
904 		sizeof(uc->entries));
905 	uc->entries = 0;
906 	mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
907 		sizeof(mc->entries));
908 	mc->entries = 0;
909 
910 	for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
911 		const struct rte_ether_addr *addr
912 			= (i == index) ? mac_addr : addrs + i;
913 		struct virtio_net_ctrl_mac *tbl
914 			= rte_is_multicast_ether_addr(addr) ? mc : uc;
915 
916 		memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN);
917 	}
918 
919 	return virtio_mac_table_set(hw, uc, mc);
920 }
921 
922 static void
923 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
924 {
925 	struct virtio_hw *hw = dev->data->dev_private;
926 	struct rte_ether_addr *addrs = dev->data->mac_addrs;
927 	struct virtio_net_ctrl_mac *uc, *mc;
928 	unsigned int i;
929 
930 	if (index >= VIRTIO_MAX_MAC_ADDRS) {
931 		PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
932 		return;
933 	}
934 
935 	uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
936 		sizeof(uc->entries));
937 	uc->entries = 0;
938 	mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
939 		sizeof(mc->entries));
940 	mc->entries = 0;
941 
942 	for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
943 		struct virtio_net_ctrl_mac *tbl;
944 
945 		if (i == index || rte_is_zero_ether_addr(addrs + i))
946 			continue;
947 
948 		tbl = rte_is_multicast_ether_addr(addrs + i) ? mc : uc;
949 		memcpy(&tbl->macs[tbl->entries++], addrs + i,
950 			RTE_ETHER_ADDR_LEN);
951 	}
952 
953 	virtio_mac_table_set(hw, uc, mc);
954 }
955 
956 static int
957 virtio_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
958 {
959 	struct virtio_hw *hw = dev->data->dev_private;
960 
961 	memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
962 
963 	/* Use atomic update if available */
964 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
965 		struct virtio_pmd_ctrl ctrl;
966 		int len = RTE_ETHER_ADDR_LEN;
967 
968 		ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
969 		ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
970 
971 		memcpy(ctrl.data, mac_addr, RTE_ETHER_ADDR_LEN);
972 		return virtio_send_command(hw->cvq, &ctrl, &len, 1);
973 	}
974 
975 	if (!virtio_with_feature(hw, VIRTIO_NET_F_MAC))
976 		return -ENOTSUP;
977 
978 	virtio_set_hwaddr(hw);
979 	return 0;
980 }
981 
982 #define CLB_VAL_IDX 0
983 #define CLB_MSK_IDX 1
984 #define CLB_MATCH_IDX 2
985 static int
986 virtio_monitor_callback(const uint64_t value,
987 		const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
988 {
989 	const uint64_t m = opaque[CLB_MSK_IDX];
990 	const uint64_t v = opaque[CLB_VAL_IDX];
991 	const uint64_t c = opaque[CLB_MATCH_IDX];
992 
993 	if (c)
994 		return (value & m) == v ? -1 : 0;
995 	else
996 		return (value & m) == v ? 0 : -1;
997 }
998 
999 static int
1000 virtio_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
1001 {
1002 	struct virtnet_rx *rxvq = rx_queue;
1003 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
1004 	struct virtio_hw *hw;
1005 
1006 	if (vq == NULL)
1007 		return -EINVAL;
1008 
1009 	hw = vq->hw;
1010 	if (virtio_with_packed_queue(hw)) {
1011 		struct vring_packed_desc *desc;
1012 		desc = vq->vq_packed.ring.desc;
1013 		pmc->addr = &desc[vq->vq_used_cons_idx].flags;
1014 		if (vq->vq_packed.used_wrap_counter)
1015 			pmc->opaque[CLB_VAL_IDX] =
1016 						VRING_PACKED_DESC_F_AVAIL_USED;
1017 		else
1018 			pmc->opaque[CLB_VAL_IDX] = 0;
1019 		pmc->opaque[CLB_MSK_IDX] = VRING_PACKED_DESC_F_AVAIL_USED;
1020 		pmc->opaque[CLB_MATCH_IDX] = 1;
1021 		pmc->size = sizeof(desc[vq->vq_used_cons_idx].flags);
1022 	} else {
1023 		pmc->addr = &vq->vq_split.ring.used->idx;
1024 		pmc->opaque[CLB_VAL_IDX] = vq->vq_used_cons_idx
1025 					& (vq->vq_nentries - 1);
1026 		pmc->opaque[CLB_MSK_IDX] = vq->vq_nentries - 1;
1027 		pmc->opaque[CLB_MATCH_IDX] = 0;
1028 		pmc->size = sizeof(vq->vq_split.ring.used->idx);
1029 	}
1030 	pmc->fn = virtio_monitor_callback;
1031 
1032 	return 0;
1033 }
1034 
1035 static int
1036 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1037 {
1038 	struct virtio_hw *hw = dev->data->dev_private;
1039 	struct virtio_pmd_ctrl ctrl;
1040 	int len;
1041 
1042 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
1043 		return -ENOTSUP;
1044 
1045 	ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
1046 	ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
1047 	memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
1048 	len = sizeof(vlan_id);
1049 
1050 	return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1051 }
1052 
1053 static int
1054 virtio_intr_unmask(struct rte_eth_dev *dev)
1055 {
1056 	struct virtio_hw *hw = dev->data->dev_private;
1057 
1058 	if (rte_intr_ack(dev->intr_handle) < 0)
1059 		return -1;
1060 
1061 	if (VIRTIO_OPS(hw)->intr_detect)
1062 		VIRTIO_OPS(hw)->intr_detect(hw);
1063 
1064 	return 0;
1065 }
1066 
1067 static int
1068 virtio_intr_enable(struct rte_eth_dev *dev)
1069 {
1070 	struct virtio_hw *hw = dev->data->dev_private;
1071 
1072 	if (rte_intr_enable(dev->intr_handle) < 0)
1073 		return -1;
1074 
1075 	if (VIRTIO_OPS(hw)->intr_detect)
1076 		VIRTIO_OPS(hw)->intr_detect(hw);
1077 
1078 	return 0;
1079 }
1080 
1081 static int
1082 virtio_intr_disable(struct rte_eth_dev *dev)
1083 {
1084 	struct virtio_hw *hw = dev->data->dev_private;
1085 
1086 	if (rte_intr_disable(dev->intr_handle) < 0)
1087 		return -1;
1088 
1089 	if (VIRTIO_OPS(hw)->intr_detect)
1090 		VIRTIO_OPS(hw)->intr_detect(hw);
1091 
1092 	return 0;
1093 }
1094 
1095 static int
1096 virtio_ethdev_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
1097 {
1098 	uint64_t host_features;
1099 
1100 	/* Prepare guest_features: feature that driver wants to support */
1101 	PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
1102 		req_features);
1103 
1104 	/* Read device(host) feature bits */
1105 	host_features = VIRTIO_OPS(hw)->get_features(hw);
1106 	PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
1107 		host_features);
1108 
1109 	/* If supported, ensure MTU value is valid before acknowledging it. */
1110 	if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
1111 		struct virtio_net_config config;
1112 
1113 		virtio_read_dev_config(hw,
1114 			offsetof(struct virtio_net_config, mtu),
1115 			&config.mtu, sizeof(config.mtu));
1116 
1117 		if (config.mtu < RTE_ETHER_MIN_MTU)
1118 			req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
1119 	}
1120 
1121 	/*
1122 	 * Negotiate features: Subset of device feature bits are written back
1123 	 * guest feature bits.
1124 	 */
1125 	hw->guest_features = req_features;
1126 	hw->guest_features = virtio_negotiate_features(hw, host_features);
1127 	PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
1128 		hw->guest_features);
1129 
1130 	if (VIRTIO_OPS(hw)->features_ok(hw) < 0)
1131 		return -1;
1132 
1133 	if (virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
1134 		virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1135 
1136 		if (!(virtio_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
1137 			PMD_INIT_LOG(ERR, "Failed to set FEATURES_OK status!");
1138 			return -1;
1139 		}
1140 	}
1141 
1142 	hw->req_guest_features = req_features;
1143 
1144 	return 0;
1145 }
1146 
1147 static void
1148 virtio_notify_peers(struct rte_eth_dev *dev)
1149 {
1150 	struct virtio_hw *hw = dev->data->dev_private;
1151 	struct virtnet_rx *rxvq;
1152 	struct rte_mbuf *rarp_mbuf;
1153 
1154 	if (!dev->data->rx_queues)
1155 		return;
1156 
1157 	rxvq = dev->data->rx_queues[0];
1158 	if (!rxvq)
1159 		return;
1160 
1161 	rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
1162 			(struct rte_ether_addr *)hw->mac_addr);
1163 	if (rarp_mbuf == NULL) {
1164 		PMD_DRV_LOG(ERR, "failed to make RARP packet.");
1165 		return;
1166 	}
1167 
1168 	rte_spinlock_lock(&hw->state_lock);
1169 	if (hw->started == 0) {
1170 		/* If virtio port just stopped, no need to send RARP */
1171 		rte_pktmbuf_free(rarp_mbuf);
1172 		goto out;
1173 	}
1174 	hw->started = 0;
1175 
1176 	/*
1177 	 * Prevent the worker threads from touching queues to avoid contention,
1178 	 * 1 ms should be enough for the ongoing Tx function to finish.
1179 	 */
1180 	rte_delay_ms(1);
1181 
1182 	hw->inject_pkts = &rarp_mbuf;
1183 	dev->tx_pkt_burst(dev->data->tx_queues[0], &rarp_mbuf, 1);
1184 	hw->inject_pkts = NULL;
1185 
1186 	hw->started = 1;
1187 
1188 out:
1189 	rte_spinlock_unlock(&hw->state_lock);
1190 }
1191 
1192 static void
1193 virtio_ack_link_announce(struct rte_eth_dev *dev)
1194 {
1195 	struct virtio_hw *hw = dev->data->dev_private;
1196 	struct virtio_pmd_ctrl ctrl;
1197 
1198 	ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE;
1199 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK;
1200 
1201 	virtio_send_command(hw->cvq, &ctrl, NULL, 0);
1202 }
1203 
1204 /*
1205  * Process virtio config changed interrupt. Call the callback
1206  * if link state changed, generate gratuitous RARP packet if
1207  * the status indicates an ANNOUNCE.
1208  */
1209 void
1210 virtio_interrupt_handler(void *param)
1211 {
1212 	struct rte_eth_dev *dev = param;
1213 	struct virtio_hw *hw = dev->data->dev_private;
1214 	uint8_t isr;
1215 	uint16_t status;
1216 
1217 	/* Read interrupt status which clears interrupt */
1218 	isr = virtio_get_isr(hw);
1219 	PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
1220 
1221 	if (virtio_intr_unmask(dev) < 0)
1222 		PMD_DRV_LOG(ERR, "interrupt enable failed");
1223 
1224 	if (isr & VIRTIO_ISR_CONFIG) {
1225 		if (virtio_dev_link_update(dev, 0) == 0)
1226 			rte_eth_dev_callback_process(dev,
1227 						     RTE_ETH_EVENT_INTR_LSC,
1228 						     NULL);
1229 
1230 		if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1231 			virtio_read_dev_config(hw,
1232 				offsetof(struct virtio_net_config, status),
1233 				&status, sizeof(status));
1234 			if (status & VIRTIO_NET_S_ANNOUNCE) {
1235 				virtio_notify_peers(dev);
1236 				if (hw->cvq)
1237 					virtio_ack_link_announce(dev);
1238 			}
1239 		}
1240 	}
1241 }
1242 
1243 /* set rx and tx handlers according to what is supported */
1244 static void
1245 set_rxtx_funcs(struct rte_eth_dev *eth_dev)
1246 {
1247 	struct virtio_hw *hw = eth_dev->data->dev_private;
1248 
1249 	eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
1250 	if (virtio_with_packed_queue(hw)) {
1251 		PMD_INIT_LOG(INFO,
1252 			"virtio: using packed ring %s Tx path on port %u",
1253 			hw->use_vec_tx ? "vectorized" : "standard",
1254 			eth_dev->data->port_id);
1255 		if (hw->use_vec_tx)
1256 			eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed_vec;
1257 		else
1258 			eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
1259 	} else {
1260 		if (hw->use_inorder_tx) {
1261 			PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
1262 				eth_dev->data->port_id);
1263 			eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
1264 		} else {
1265 			PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
1266 				eth_dev->data->port_id);
1267 			eth_dev->tx_pkt_burst = virtio_xmit_pkts;
1268 		}
1269 	}
1270 
1271 	if (virtio_with_packed_queue(hw)) {
1272 		if (hw->use_vec_rx) {
1273 			PMD_INIT_LOG(INFO,
1274 				"virtio: using packed ring vectorized Rx path on port %u",
1275 				eth_dev->data->port_id);
1276 			eth_dev->rx_pkt_burst =
1277 				&virtio_recv_pkts_packed_vec;
1278 		} else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1279 			PMD_INIT_LOG(INFO,
1280 				"virtio: using packed ring mergeable buffer Rx path on port %u",
1281 				eth_dev->data->port_id);
1282 			eth_dev->rx_pkt_burst =
1283 				&virtio_recv_mergeable_pkts_packed;
1284 		} else {
1285 			PMD_INIT_LOG(INFO,
1286 				"virtio: using packed ring standard Rx path on port %u",
1287 				eth_dev->data->port_id);
1288 			eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
1289 		}
1290 	} else {
1291 		if (hw->use_vec_rx) {
1292 			PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u",
1293 				eth_dev->data->port_id);
1294 			eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
1295 		} else if (hw->use_inorder_rx) {
1296 			PMD_INIT_LOG(INFO,
1297 				"virtio: using inorder Rx path on port %u",
1298 				eth_dev->data->port_id);
1299 			eth_dev->rx_pkt_burst =	&virtio_recv_pkts_inorder;
1300 		} else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1301 			PMD_INIT_LOG(INFO,
1302 				"virtio: using mergeable buffer Rx path on port %u",
1303 				eth_dev->data->port_id);
1304 			eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
1305 		} else {
1306 			PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
1307 				eth_dev->data->port_id);
1308 			eth_dev->rx_pkt_burst = &virtio_recv_pkts;
1309 		}
1310 	}
1311 
1312 }
1313 
1314 /* Only support 1:1 queue/interrupt mapping so far.
1315  * TODO: support n:1 queue/interrupt mapping when there are limited number of
1316  * interrupt vectors (<N+1).
1317  */
1318 static int
1319 virtio_queues_bind_intr(struct rte_eth_dev *dev)
1320 {
1321 	uint32_t i;
1322 	struct virtio_hw *hw = dev->data->dev_private;
1323 
1324 	PMD_INIT_LOG(INFO, "queue/interrupt binding");
1325 	for (i = 0; i < dev->data->nb_rx_queues; ++i) {
1326 		if (rte_intr_vec_list_index_set(dev->intr_handle, i,
1327 						       i + 1))
1328 			return -rte_errno;
1329 		if (VIRTIO_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
1330 						 VIRTIO_MSI_NO_VECTOR) {
1331 			PMD_DRV_LOG(ERR, "failed to set queue vector");
1332 			return -EBUSY;
1333 		}
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static void
1340 virtio_queues_unbind_intr(struct rte_eth_dev *dev)
1341 {
1342 	uint32_t i;
1343 	struct virtio_hw *hw = dev->data->dev_private;
1344 
1345 	PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
1346 	for (i = 0; i < dev->data->nb_rx_queues; ++i)
1347 		VIRTIO_OPS(hw)->set_queue_irq(hw,
1348 					     hw->vqs[i * VTNET_CQ],
1349 					     VIRTIO_MSI_NO_VECTOR);
1350 }
1351 
1352 static int
1353 virtio_configure_intr(struct rte_eth_dev *dev)
1354 {
1355 	struct virtio_hw *hw = dev->data->dev_private;
1356 	int ret;
1357 
1358 	if (!rte_intr_cap_multiple(dev->intr_handle)) {
1359 		PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
1360 		return -ENOTSUP;
1361 	}
1362 
1363 	ret = rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues);
1364 	if (ret < 0) {
1365 		PMD_INIT_LOG(ERR, "Fail to create eventfd");
1366 		return ret;
1367 	}
1368 
1369 	ret = rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",
1370 				      hw->max_queue_pairs);
1371 	if (ret < 0) {
1372 		PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
1373 			     hw->max_queue_pairs);
1374 		return ret;
1375 	}
1376 
1377 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
1378 		/* Re-register callback to update max_intr */
1379 		rte_intr_callback_unregister(dev->intr_handle,
1380 					     virtio_interrupt_handler,
1381 					     dev);
1382 		rte_intr_callback_register(dev->intr_handle,
1383 					   virtio_interrupt_handler,
1384 					   dev);
1385 	}
1386 
1387 	/* DO NOT try to remove this! This function will enable msix, or QEMU
1388 	 * will encounter SIGSEGV when DRIVER_OK is sent.
1389 	 * And for legacy devices, this should be done before queue/vec binding
1390 	 * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
1391 	 * (22) will be ignored.
1392 	 */
1393 	if (virtio_intr_enable(dev) < 0) {
1394 		PMD_DRV_LOG(ERR, "interrupt enable failed");
1395 		return -EINVAL;
1396 	}
1397 
1398 	ret = virtio_queues_bind_intr(dev);
1399 	if (ret < 0) {
1400 		PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
1401 		return ret;
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static void
1408 virtio_get_speed_duplex(struct rte_eth_dev *eth_dev,
1409 			struct rte_eth_link *link)
1410 {
1411 	struct virtio_hw *hw = eth_dev->data->dev_private;
1412 	struct virtio_net_config *config;
1413 	struct virtio_net_config local_config;
1414 
1415 	config = &local_config;
1416 	virtio_read_dev_config(hw,
1417 		offsetof(struct virtio_net_config, speed),
1418 		&config->speed, sizeof(config->speed));
1419 	virtio_read_dev_config(hw,
1420 		offsetof(struct virtio_net_config, duplex),
1421 		&config->duplex, sizeof(config->duplex));
1422 	hw->speed = config->speed;
1423 	hw->duplex = config->duplex;
1424 	if (link != NULL) {
1425 		link->link_duplex = hw->duplex;
1426 		link->link_speed  = hw->speed;
1427 	}
1428 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
1429 		     hw->speed, hw->duplex);
1430 }
1431 
1432 static uint64_t
1433 ethdev_to_virtio_rss_offloads(uint64_t ethdev_hash_types)
1434 {
1435 	uint64_t virtio_hash_types = 0;
1436 
1437 	if (ethdev_hash_types & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
1438 				RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
1439 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IPV4;
1440 
1441 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
1442 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCPV4;
1443 
1444 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
1445 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDPV4;
1446 
1447 	if (ethdev_hash_types & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
1448 				RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
1449 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IPV6;
1450 
1451 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
1452 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCPV6;
1453 
1454 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
1455 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDPV6;
1456 
1457 	if (ethdev_hash_types & RTE_ETH_RSS_IPV6_EX)
1458 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IP_EX;
1459 
1460 	if (ethdev_hash_types & RTE_ETH_RSS_IPV6_TCP_EX)
1461 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCP_EX;
1462 
1463 	if (ethdev_hash_types & RTE_ETH_RSS_IPV6_UDP_EX)
1464 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDP_EX;
1465 
1466 	return virtio_hash_types;
1467 }
1468 
1469 static uint64_t
1470 virtio_to_ethdev_rss_offloads(uint64_t virtio_hash_types)
1471 {
1472 	uint64_t rss_offloads = 0;
1473 
1474 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IPV4)
1475 		rss_offloads |= RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
1476 			RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
1477 
1478 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCPV4)
1479 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
1480 
1481 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDPV4)
1482 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
1483 
1484 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IPV6)
1485 		rss_offloads |= RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
1486 			RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
1487 
1488 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCPV6)
1489 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
1490 
1491 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDPV6)
1492 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
1493 
1494 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IP_EX)
1495 		rss_offloads |= RTE_ETH_RSS_IPV6_EX;
1496 
1497 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCP_EX)
1498 		rss_offloads |= RTE_ETH_RSS_IPV6_TCP_EX;
1499 
1500 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDP_EX)
1501 		rss_offloads |= RTE_ETH_RSS_IPV6_UDP_EX;
1502 
1503 	return rss_offloads;
1504 }
1505 
1506 static int
1507 virtio_dev_get_rss_config(struct virtio_hw *hw, uint32_t *rss_hash_types)
1508 {
1509 	struct virtio_net_config local_config;
1510 	struct virtio_net_config *config = &local_config;
1511 
1512 	virtio_read_dev_config(hw,
1513 			offsetof(struct virtio_net_config, rss_max_key_size),
1514 			&config->rss_max_key_size,
1515 			sizeof(config->rss_max_key_size));
1516 	if (config->rss_max_key_size < VIRTIO_NET_RSS_KEY_SIZE) {
1517 		PMD_INIT_LOG(ERR, "Invalid device RSS max key size (%u)",
1518 				config->rss_max_key_size);
1519 		return -EINVAL;
1520 	}
1521 
1522 	virtio_read_dev_config(hw,
1523 			offsetof(struct virtio_net_config,
1524 				rss_max_indirection_table_length),
1525 			&config->rss_max_indirection_table_length,
1526 			sizeof(config->rss_max_indirection_table_length));
1527 	if (config->rss_max_indirection_table_length < VIRTIO_NET_RSS_RETA_SIZE) {
1528 		PMD_INIT_LOG(ERR, "Invalid device RSS max reta size (%u)",
1529 				config->rss_max_indirection_table_length);
1530 		return -EINVAL;
1531 	}
1532 
1533 	virtio_read_dev_config(hw,
1534 			offsetof(struct virtio_net_config, supported_hash_types),
1535 			&config->supported_hash_types,
1536 			sizeof(config->supported_hash_types));
1537 	if ((config->supported_hash_types & VIRTIO_NET_HASH_TYPE_MASK) == 0) {
1538 		PMD_INIT_LOG(ERR, "Invalid device RSS hash types (0x%x)",
1539 				config->supported_hash_types);
1540 		return -EINVAL;
1541 	}
1542 
1543 	*rss_hash_types = config->supported_hash_types & VIRTIO_NET_HASH_TYPE_MASK;
1544 
1545 	PMD_INIT_LOG(DEBUG, "Device RSS config:");
1546 	PMD_INIT_LOG(DEBUG, "\t-Max key size: %u", config->rss_max_key_size);
1547 	PMD_INIT_LOG(DEBUG, "\t-Max reta size: %u", config->rss_max_indirection_table_length);
1548 	PMD_INIT_LOG(DEBUG, "\t-Supported hash types: 0x%x", *rss_hash_types);
1549 
1550 	return 0;
1551 }
1552 
1553 static int
1554 virtio_dev_rss_hash_update(struct rte_eth_dev *dev,
1555 		struct rte_eth_rss_conf *rss_conf)
1556 {
1557 	struct virtio_hw *hw = dev->data->dev_private;
1558 	char old_rss_key[VIRTIO_NET_RSS_KEY_SIZE];
1559 	uint32_t old_hash_types;
1560 	uint16_t nb_queues;
1561 	int ret;
1562 
1563 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1564 		return -ENOTSUP;
1565 
1566 	if (rss_conf->rss_hf & ~virtio_to_ethdev_rss_offloads(VIRTIO_NET_HASH_TYPE_MASK))
1567 		return -EINVAL;
1568 
1569 	old_hash_types = hw->rss_hash_types;
1570 	hw->rss_hash_types = ethdev_to_virtio_rss_offloads(rss_conf->rss_hf);
1571 
1572 	if (rss_conf->rss_key && rss_conf->rss_key_len) {
1573 		if (rss_conf->rss_key_len != VIRTIO_NET_RSS_KEY_SIZE) {
1574 			PMD_INIT_LOG(ERR, "Driver only supports %u RSS key length",
1575 					VIRTIO_NET_RSS_KEY_SIZE);
1576 			ret = -EINVAL;
1577 			goto restore_types;
1578 		}
1579 		memcpy(old_rss_key, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1580 		memcpy(hw->rss_key, rss_conf->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1581 	}
1582 
1583 	nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1584 	ret = virtio_set_multiple_queues_rss(dev, nb_queues);
1585 	if (ret < 0) {
1586 		PMD_INIT_LOG(ERR, "Failed to apply new RSS config to the device");
1587 		goto restore_key;
1588 	}
1589 
1590 	return 0;
1591 restore_key:
1592 	if (rss_conf->rss_key && rss_conf->rss_key_len)
1593 		memcpy(hw->rss_key, old_rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1594 restore_types:
1595 	hw->rss_hash_types = old_hash_types;
1596 
1597 	return ret;
1598 }
1599 
1600 static int
1601 virtio_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1602 		struct rte_eth_rss_conf *rss_conf)
1603 {
1604 	struct virtio_hw *hw = dev->data->dev_private;
1605 
1606 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1607 		return -ENOTSUP;
1608 
1609 	if (rss_conf->rss_key && rss_conf->rss_key_len >= VIRTIO_NET_RSS_KEY_SIZE)
1610 		memcpy(rss_conf->rss_key, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1611 	rss_conf->rss_key_len = VIRTIO_NET_RSS_KEY_SIZE;
1612 	rss_conf->rss_hf = virtio_to_ethdev_rss_offloads(hw->rss_hash_types);
1613 
1614 	return 0;
1615 }
1616 
1617 static int virtio_dev_rss_reta_update(struct rte_eth_dev *dev,
1618 			 struct rte_eth_rss_reta_entry64 *reta_conf,
1619 			 uint16_t reta_size)
1620 {
1621 	struct virtio_hw *hw = dev->data->dev_private;
1622 	uint16_t nb_queues;
1623 	uint16_t old_reta[VIRTIO_NET_RSS_RETA_SIZE];
1624 	int idx, pos, i, ret;
1625 
1626 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1627 		return -ENOTSUP;
1628 
1629 	if (reta_size != VIRTIO_NET_RSS_RETA_SIZE)
1630 		return -EINVAL;
1631 
1632 	memcpy(old_reta, hw->rss_reta, sizeof(old_reta));
1633 
1634 	for (i = 0; i < reta_size; i++) {
1635 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1636 		pos = i % RTE_ETH_RETA_GROUP_SIZE;
1637 
1638 		if (((reta_conf[idx].mask >> pos) & 0x1) == 0)
1639 			continue;
1640 
1641 		hw->rss_reta[i] = reta_conf[idx].reta[pos];
1642 	}
1643 
1644 	nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1645 	ret = virtio_set_multiple_queues_rss(dev, nb_queues);
1646 	if (ret < 0) {
1647 		PMD_INIT_LOG(ERR, "Failed to apply new RETA to the device");
1648 		memcpy(hw->rss_reta, old_reta, sizeof(old_reta));
1649 	}
1650 
1651 	hw->rss_rx_queues = dev->data->nb_rx_queues;
1652 
1653 	return ret;
1654 }
1655 
1656 static int virtio_dev_rss_reta_query(struct rte_eth_dev *dev,
1657 			 struct rte_eth_rss_reta_entry64 *reta_conf,
1658 			 uint16_t reta_size)
1659 {
1660 	struct virtio_hw *hw = dev->data->dev_private;
1661 	int idx, i;
1662 
1663 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
1664 		return -ENOTSUP;
1665 
1666 	if (reta_size != VIRTIO_NET_RSS_RETA_SIZE)
1667 		return -EINVAL;
1668 
1669 	for (i = 0; i < reta_size; i++) {
1670 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
1671 		reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = hw->rss_reta[i];
1672 	}
1673 
1674 	return 0;
1675 }
1676 
1677 /*
1678  * As default RSS hash key, it uses the default key of the
1679  * Intel IXGBE devices. It can be updated by the application
1680  * with any 40B key value.
1681  */
1682 static uint8_t rss_intel_key[VIRTIO_NET_RSS_KEY_SIZE] = {
1683 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1684 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1685 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1686 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1687 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1688 };
1689 
1690 static int
1691 virtio_dev_rss_init(struct rte_eth_dev *eth_dev)
1692 {
1693 	struct virtio_hw *hw = eth_dev->data->dev_private;
1694 	uint16_t nb_rx_queues = eth_dev->data->nb_rx_queues;
1695 	struct rte_eth_rss_conf *rss_conf;
1696 	int ret, i;
1697 
1698 	if (!nb_rx_queues) {
1699 		PMD_INIT_LOG(ERR, "Cannot init RSS if no Rx queues");
1700 		return -EINVAL;
1701 	}
1702 
1703 	rss_conf = &eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1704 
1705 	ret = virtio_dev_get_rss_config(hw, &hw->rss_hash_types);
1706 	if (ret)
1707 		return ret;
1708 
1709 	if (rss_conf->rss_hf) {
1710 		/*  Ensure requested hash types are supported by the device */
1711 		if (rss_conf->rss_hf & ~virtio_to_ethdev_rss_offloads(hw->rss_hash_types))
1712 			return -EINVAL;
1713 
1714 		hw->rss_hash_types = ethdev_to_virtio_rss_offloads(rss_conf->rss_hf);
1715 	}
1716 
1717 	if (!hw->rss_key) {
1718 		/* Setup default RSS key if not already setup by the user */
1719 		hw->rss_key = rte_malloc_socket("rss_key",
1720 				VIRTIO_NET_RSS_KEY_SIZE, 0,
1721 				eth_dev->device->numa_node);
1722 		if (!hw->rss_key) {
1723 			PMD_INIT_LOG(ERR, "Failed to allocate RSS key");
1724 			return -ENOMEM;
1725 		}
1726 	}
1727 
1728 	if (rss_conf->rss_key && rss_conf->rss_key_len) {
1729 		if (rss_conf->rss_key_len != VIRTIO_NET_RSS_KEY_SIZE) {
1730 			PMD_INIT_LOG(ERR, "Driver only supports %u RSS key length",
1731 					VIRTIO_NET_RSS_KEY_SIZE);
1732 			return -EINVAL;
1733 		}
1734 		memcpy(hw->rss_key, rss_conf->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1735 	} else {
1736 		memcpy(hw->rss_key, rss_intel_key, VIRTIO_NET_RSS_KEY_SIZE);
1737 	}
1738 
1739 	if (!hw->rss_reta) {
1740 		/* Setup default RSS reta if not already setup by the user */
1741 		hw->rss_reta = rte_zmalloc_socket("rss_reta",
1742 				VIRTIO_NET_RSS_RETA_SIZE * sizeof(uint16_t), 0,
1743 				eth_dev->device->numa_node);
1744 		if (!hw->rss_reta) {
1745 			PMD_INIT_LOG(ERR, "Failed to allocate RSS reta");
1746 			return -ENOMEM;
1747 		}
1748 
1749 		hw->rss_rx_queues = 0;
1750 	}
1751 
1752 	/* Re-initialize the RSS reta if the number of RX queues has changed */
1753 	if (hw->rss_rx_queues != nb_rx_queues) {
1754 		for (i = 0; i < VIRTIO_NET_RSS_RETA_SIZE; i++)
1755 			hw->rss_reta[i] = i % nb_rx_queues;
1756 		hw->rss_rx_queues = nb_rx_queues;
1757 	}
1758 
1759 	return 0;
1760 }
1761 
1762 #define DUPLEX_UNKNOWN   0xff
1763 /* reset device and renegotiate features if needed */
1764 static int
1765 virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
1766 {
1767 	struct virtio_hw *hw = eth_dev->data->dev_private;
1768 	struct virtio_net_config *config;
1769 	struct virtio_net_config local_config;
1770 	int ret;
1771 
1772 	/* Reset the device although not necessary at startup */
1773 	virtio_reset(hw);
1774 
1775 	if (hw->vqs) {
1776 		virtio_dev_free_mbufs(eth_dev);
1777 		virtio_free_queues(hw);
1778 	}
1779 
1780 	/* Tell the host we've noticed this device. */
1781 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1782 
1783 	/* Tell the host we've known how to drive the device. */
1784 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1785 	if (virtio_ethdev_negotiate_features(hw, req_features) < 0)
1786 		return -EINVAL;
1787 
1788 	hw->weak_barriers = !virtio_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
1789 
1790 	/* If host does not support both status and MSI-X then disable LSC */
1791 	if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->intr_lsc)
1792 		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1793 	else
1794 		eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1795 
1796 	/* Setting up rx_header size for the device */
1797 	if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
1798 	    virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
1799 	    virtio_with_packed_queue(hw))
1800 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1801 	else
1802 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
1803 
1804 	/* Copy the permanent MAC address to: virtio_hw */
1805 	virtio_get_hwaddr(hw);
1806 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
1807 			&eth_dev->data->mac_addrs[0]);
1808 	PMD_INIT_LOG(DEBUG,
1809 		     "PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
1810 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
1811 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
1812 
1813 	hw->get_speed_via_feat = hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN &&
1814 			     virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX);
1815 	if (hw->get_speed_via_feat)
1816 		virtio_get_speed_duplex(eth_dev, NULL);
1817 	if (hw->duplex == DUPLEX_UNKNOWN)
1818 		hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
1819 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
1820 		hw->speed, hw->duplex);
1821 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
1822 		config = &local_config;
1823 
1824 		virtio_read_dev_config(hw,
1825 			offsetof(struct virtio_net_config, mac),
1826 			&config->mac, sizeof(config->mac));
1827 
1828 		if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1829 			virtio_read_dev_config(hw,
1830 				offsetof(struct virtio_net_config, status),
1831 				&config->status, sizeof(config->status));
1832 		} else {
1833 			PMD_INIT_LOG(DEBUG,
1834 				     "VIRTIO_NET_F_STATUS is not supported");
1835 			config->status = 0;
1836 		}
1837 
1838 		if (virtio_with_feature(hw, VIRTIO_NET_F_MQ) ||
1839 				virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
1840 			virtio_read_dev_config(hw,
1841 				offsetof(struct virtio_net_config, max_virtqueue_pairs),
1842 				&config->max_virtqueue_pairs,
1843 				sizeof(config->max_virtqueue_pairs));
1844 		} else {
1845 			PMD_INIT_LOG(DEBUG,
1846 				     "Neither VIRTIO_NET_F_MQ nor VIRTIO_NET_F_RSS are supported");
1847 			config->max_virtqueue_pairs = 1;
1848 		}
1849 
1850 		hw->max_queue_pairs = config->max_virtqueue_pairs;
1851 
1852 		if (virtio_with_feature(hw, VIRTIO_NET_F_MTU)) {
1853 			virtio_read_dev_config(hw,
1854 				offsetof(struct virtio_net_config, mtu),
1855 				&config->mtu,
1856 				sizeof(config->mtu));
1857 
1858 			/*
1859 			 * MTU value has already been checked at negotiation
1860 			 * time, but check again in case it has changed since
1861 			 * then, which should not happen.
1862 			 */
1863 			if (config->mtu < RTE_ETHER_MIN_MTU) {
1864 				PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
1865 						config->mtu);
1866 				return -EINVAL;
1867 			}
1868 
1869 			hw->max_mtu = config->mtu;
1870 			/* Set initial MTU to maximum one supported by vhost */
1871 			eth_dev->data->mtu = config->mtu;
1872 
1873 		} else {
1874 			hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
1875 				VLAN_TAG_LEN - hw->vtnet_hdr_size;
1876 		}
1877 
1878 		hw->rss_hash_types = 0;
1879 		if (virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
1880 			ret = virtio_dev_rss_init(eth_dev);
1881 			if (ret < 0)
1882 				return ret;
1883 		}
1884 
1885 		PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
1886 				config->max_virtqueue_pairs);
1887 		PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
1888 		PMD_INIT_LOG(DEBUG,
1889 				"PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
1890 				config->mac[0], config->mac[1],
1891 				config->mac[2], config->mac[3],
1892 				config->mac[4], config->mac[5]);
1893 	} else {
1894 		PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
1895 		hw->max_queue_pairs = 1;
1896 		hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
1897 			VLAN_TAG_LEN - hw->vtnet_hdr_size;
1898 	}
1899 
1900 	ret = virtio_alloc_queues(eth_dev);
1901 	if (ret < 0)
1902 		return ret;
1903 
1904 	if (eth_dev->data->dev_conf.intr_conf.rxq) {
1905 		ret = virtio_configure_intr(eth_dev);
1906 		if (ret < 0) {
1907 			PMD_INIT_LOG(ERR, "failed to configure interrupt");
1908 			virtio_free_queues(hw);
1909 			return ret;
1910 		}
1911 	}
1912 
1913 	if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1914 		/* Enable vector (0) for Link State Interrupt */
1915 		if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) ==
1916 				VIRTIO_MSI_NO_VECTOR) {
1917 			PMD_DRV_LOG(ERR, "failed to set config vector");
1918 			return -EBUSY;
1919 		}
1920 
1921 	virtio_reinit_complete(hw);
1922 
1923 	return 0;
1924 }
1925 
1926 /*
1927  * This function is based on probe() function in virtio_pci.c
1928  * It returns 0 on success.
1929  */
1930 int
1931 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
1932 {
1933 	struct virtio_hw *hw = eth_dev->data->dev_private;
1934 	uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
1935 	int vectorized = 0;
1936 	int ret;
1937 
1938 	if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
1939 		PMD_INIT_LOG(ERR,
1940 			"Not sufficient headroom required = %d, avail = %d",
1941 			(int)sizeof(struct virtio_net_hdr_mrg_rxbuf),
1942 			RTE_PKTMBUF_HEADROOM);
1943 
1944 		return -1;
1945 	}
1946 
1947 	eth_dev->dev_ops = &virtio_eth_dev_ops;
1948 
1949 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1950 		set_rxtx_funcs(eth_dev);
1951 		return 0;
1952 	}
1953 
1954 	ret = virtio_dev_devargs_parse(eth_dev->device->devargs, &speed, &vectorized);
1955 	if (ret < 0)
1956 		return ret;
1957 	hw->speed = speed;
1958 	hw->duplex = DUPLEX_UNKNOWN;
1959 
1960 	/* Allocate memory for storing MAC addresses */
1961 	eth_dev->data->mac_addrs = rte_zmalloc("virtio",
1962 				VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
1963 	if (eth_dev->data->mac_addrs == NULL) {
1964 		PMD_INIT_LOG(ERR,
1965 			"Failed to allocate %d bytes needed to store MAC addresses",
1966 			VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);
1967 		return -ENOMEM;
1968 	}
1969 
1970 	rte_spinlock_init(&hw->state_lock);
1971 
1972 	if (vectorized) {
1973 		hw->use_vec_rx = 1;
1974 		hw->use_vec_tx = 1;
1975 	}
1976 
1977 	/* reset device and negotiate default features */
1978 	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
1979 	if (ret < 0)
1980 		goto err_virtio_init;
1981 
1982 	if (vectorized) {
1983 		if (!virtio_with_packed_queue(hw)) {
1984 			hw->use_vec_tx = 0;
1985 		} else {
1986 #if !defined(CC_AVX512_SUPPORT) && !defined(RTE_ARCH_ARM)
1987 			hw->use_vec_rx = 0;
1988 			hw->use_vec_tx = 0;
1989 			PMD_DRV_LOG(INFO,
1990 				"building environment do not support packed ring vectorized");
1991 #endif
1992 		}
1993 	}
1994 
1995 	hw->opened = 1;
1996 
1997 	return 0;
1998 
1999 err_virtio_init:
2000 	rte_free(eth_dev->data->mac_addrs);
2001 	eth_dev->data->mac_addrs = NULL;
2002 	return ret;
2003 }
2004 
2005 static uint32_t
2006 virtio_dev_speed_capa_get(uint32_t speed)
2007 {
2008 	switch (speed) {
2009 	case RTE_ETH_SPEED_NUM_10G:
2010 		return RTE_ETH_LINK_SPEED_10G;
2011 	case RTE_ETH_SPEED_NUM_20G:
2012 		return RTE_ETH_LINK_SPEED_20G;
2013 	case RTE_ETH_SPEED_NUM_25G:
2014 		return RTE_ETH_LINK_SPEED_25G;
2015 	case RTE_ETH_SPEED_NUM_40G:
2016 		return RTE_ETH_LINK_SPEED_40G;
2017 	case RTE_ETH_SPEED_NUM_50G:
2018 		return RTE_ETH_LINK_SPEED_50G;
2019 	case RTE_ETH_SPEED_NUM_56G:
2020 		return RTE_ETH_LINK_SPEED_56G;
2021 	case RTE_ETH_SPEED_NUM_100G:
2022 		return RTE_ETH_LINK_SPEED_100G;
2023 	case RTE_ETH_SPEED_NUM_200G:
2024 		return RTE_ETH_LINK_SPEED_200G;
2025 	case RTE_ETH_SPEED_NUM_400G:
2026 		return RTE_ETH_LINK_SPEED_400G;
2027 	default:
2028 		return 0;
2029 	}
2030 }
2031 
2032 static int vectorized_check_handler(__rte_unused const char *key,
2033 		const char *value, void *ret_val)
2034 {
2035 	if (value == NULL || ret_val == NULL)
2036 		return -EINVAL;
2037 
2038 	if (strcmp(value, "1") == 0)
2039 		*(int *)ret_val = 1;
2040 	else
2041 		*(int *)ret_val = 0;
2042 
2043 	return 0;
2044 }
2045 
2046 #define VIRTIO_ARG_SPEED      "speed"
2047 #define VIRTIO_ARG_VECTORIZED "vectorized"
2048 
2049 static int
2050 link_speed_handler(const char *key __rte_unused,
2051 		const char *value, void *ret_val)
2052 {
2053 	uint32_t val;
2054 	if (!value || !ret_val)
2055 		return -EINVAL;
2056 	val = strtoul(value, NULL, 0);
2057 	/* validate input */
2058 	if (virtio_dev_speed_capa_get(val) == 0)
2059 		return -EINVAL;
2060 	*(uint32_t *)ret_val = val;
2061 
2062 	return 0;
2063 }
2064 
2065 
2066 static int
2067 virtio_dev_devargs_parse(struct rte_devargs *devargs, uint32_t *speed, int *vectorized)
2068 {
2069 	struct rte_kvargs *kvlist;
2070 	int ret = 0;
2071 
2072 	if (devargs == NULL)
2073 		return 0;
2074 
2075 	kvlist = rte_kvargs_parse(devargs->args, NULL);
2076 	if (kvlist == NULL) {
2077 		PMD_INIT_LOG(ERR, "error when parsing param");
2078 		return 0;
2079 	}
2080 
2081 	if (speed && rte_kvargs_count(kvlist, VIRTIO_ARG_SPEED) == 1) {
2082 		ret = rte_kvargs_process(kvlist,
2083 					VIRTIO_ARG_SPEED,
2084 					link_speed_handler, speed);
2085 		if (ret < 0) {
2086 			PMD_INIT_LOG(ERR, "Failed to parse %s",
2087 					VIRTIO_ARG_SPEED);
2088 			goto exit;
2089 		}
2090 	}
2091 
2092 	if (vectorized &&
2093 		rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
2094 		ret = rte_kvargs_process(kvlist,
2095 				VIRTIO_ARG_VECTORIZED,
2096 				vectorized_check_handler, vectorized);
2097 		if (ret < 0) {
2098 			PMD_INIT_LOG(ERR, "Failed to parse %s",
2099 					VIRTIO_ARG_VECTORIZED);
2100 			goto exit;
2101 		}
2102 	}
2103 
2104 exit:
2105 	rte_kvargs_free(kvlist);
2106 	return ret;
2107 }
2108 
2109 static uint8_t
2110 rx_offload_enabled(struct virtio_hw *hw)
2111 {
2112 	return virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
2113 		virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2114 		virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
2115 }
2116 
2117 static uint8_t
2118 tx_offload_enabled(struct virtio_hw *hw)
2119 {
2120 	return virtio_with_feature(hw, VIRTIO_NET_F_CSUM) ||
2121 		virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
2122 		virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
2123 }
2124 
2125 /*
2126  * Configure virtio device
2127  * It returns 0 on success.
2128  */
2129 static int
2130 virtio_dev_configure(struct rte_eth_dev *dev)
2131 {
2132 	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2133 	const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
2134 	struct virtio_hw *hw = dev->data->dev_private;
2135 	uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
2136 		hw->vtnet_hdr_size;
2137 	uint64_t rx_offloads = rxmode->offloads;
2138 	uint64_t tx_offloads = txmode->offloads;
2139 	uint64_t req_features;
2140 	int ret;
2141 
2142 	PMD_INIT_LOG(DEBUG, "configure");
2143 	req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
2144 
2145 	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE && rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
2146 		PMD_DRV_LOG(ERR,
2147 			"Unsupported Rx multi queue mode %d",
2148 			rxmode->mq_mode);
2149 		return -EINVAL;
2150 	}
2151 
2152 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
2153 		PMD_DRV_LOG(ERR,
2154 			"Unsupported Tx multi queue mode %d",
2155 			txmode->mq_mode);
2156 		return -EINVAL;
2157 	}
2158 
2159 	if (dev->data->dev_conf.intr_conf.rxq) {
2160 		ret = virtio_init_device(dev, hw->req_guest_features);
2161 		if (ret < 0)
2162 			return ret;
2163 	}
2164 
2165 	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)
2166 		req_features |= (1ULL << VIRTIO_NET_F_RSS);
2167 
2168 	if (rxmode->mtu > hw->max_mtu)
2169 		req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
2170 
2171 	hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
2172 
2173 	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2174 			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
2175 		req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
2176 
2177 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
2178 		req_features |=
2179 			(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2180 			(1ULL << VIRTIO_NET_F_GUEST_TSO6);
2181 
2182 	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2183 			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
2184 		req_features |= (1ULL << VIRTIO_NET_F_CSUM);
2185 
2186 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
2187 		req_features |=
2188 			(1ULL << VIRTIO_NET_F_HOST_TSO4) |
2189 			(1ULL << VIRTIO_NET_F_HOST_TSO6);
2190 
2191 	/* if request features changed, reinit the device */
2192 	if (req_features != hw->req_guest_features) {
2193 		ret = virtio_init_device(dev, req_features);
2194 		if (ret < 0)
2195 			return ret;
2196 	}
2197 
2198 	/* if queues are not allocated, reinit the device */
2199 	if (hw->vqs == NULL) {
2200 		ret = virtio_init_device(dev, hw->req_guest_features);
2201 		if (ret < 0)
2202 			return ret;
2203 	}
2204 
2205 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
2206 			!virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
2207 		PMD_DRV_LOG(ERR, "RSS support requested but not supported by the device");
2208 		return -ENOTSUP;
2209 	}
2210 
2211 	if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2212 			    RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
2213 		!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
2214 		PMD_DRV_LOG(ERR,
2215 			"rx checksum not available on this host");
2216 		return -ENOTSUP;
2217 	}
2218 
2219 	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
2220 		(!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2221 		 !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
2222 		PMD_DRV_LOG(ERR,
2223 			"Large Receive Offload not available on this host");
2224 		return -ENOTSUP;
2225 	}
2226 
2227 	/* start control queue */
2228 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
2229 		virtio_dev_cq_start(dev);
2230 
2231 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
2232 		hw->vlan_strip = 1;
2233 
2234 	hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
2235 
2236 	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
2237 			!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
2238 		PMD_DRV_LOG(ERR,
2239 			    "vlan filtering not available on this host");
2240 		return -ENOTSUP;
2241 	}
2242 
2243 	hw->has_tx_offload = tx_offload_enabled(hw);
2244 	hw->has_rx_offload = rx_offload_enabled(hw);
2245 
2246 	if (virtio_with_packed_queue(hw)) {
2247 #if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
2248 		if ((hw->use_vec_rx || hw->use_vec_tx) &&
2249 		    (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
2250 		     !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
2251 		     !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
2252 		     rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
2253 			PMD_DRV_LOG(INFO,
2254 				"disabled packed ring vectorized path for requirements not met");
2255 			hw->use_vec_rx = 0;
2256 			hw->use_vec_tx = 0;
2257 		}
2258 #elif defined(RTE_ARCH_ARM)
2259 		if ((hw->use_vec_rx || hw->use_vec_tx) &&
2260 		    (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) ||
2261 		     !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
2262 		     !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
2263 		     rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)) {
2264 			PMD_DRV_LOG(INFO,
2265 				"disabled packed ring vectorized path for requirements not met");
2266 			hw->use_vec_rx = 0;
2267 			hw->use_vec_tx = 0;
2268 		}
2269 #else
2270 		hw->use_vec_rx = 0;
2271 		hw->use_vec_tx = 0;
2272 #endif
2273 
2274 		if (hw->use_vec_rx) {
2275 			if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2276 				PMD_DRV_LOG(INFO,
2277 					"disabled packed ring vectorized rx for mrg_rxbuf enabled");
2278 				hw->use_vec_rx = 0;
2279 			}
2280 
2281 			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
2282 				PMD_DRV_LOG(INFO,
2283 					"disabled packed ring vectorized rx for TCP_LRO enabled");
2284 				hw->use_vec_rx = 0;
2285 			}
2286 		}
2287 	} else {
2288 		if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) {
2289 			hw->use_inorder_tx = 1;
2290 			hw->use_inorder_rx = 1;
2291 			hw->use_vec_rx = 0;
2292 		}
2293 
2294 		if (hw->use_vec_rx) {
2295 #if defined RTE_ARCH_ARM
2296 			if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
2297 				PMD_DRV_LOG(INFO,
2298 					"disabled split ring vectorized path for requirement not met");
2299 				hw->use_vec_rx = 0;
2300 			}
2301 #endif
2302 			if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2303 				PMD_DRV_LOG(INFO,
2304 					"disabled split ring vectorized rx for mrg_rxbuf enabled");
2305 				hw->use_vec_rx = 0;
2306 			}
2307 
2308 			if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2309 					   RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2310 					   RTE_ETH_RX_OFFLOAD_TCP_LRO |
2311 					   RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
2312 				PMD_DRV_LOG(INFO,
2313 					"disabled split ring vectorized rx for offloading enabled");
2314 				hw->use_vec_rx = 0;
2315 			}
2316 
2317 			if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
2318 				PMD_DRV_LOG(INFO,
2319 					"disabled split ring vectorized rx, max SIMD bitwidth too low");
2320 				hw->use_vec_rx = 0;
2321 			}
2322 		}
2323 	}
2324 
2325 	return 0;
2326 }
2327 
2328 
2329 static int
2330 virtio_dev_start(struct rte_eth_dev *dev)
2331 {
2332 	uint16_t nb_queues, i;
2333 	struct virtqueue *vq;
2334 	struct virtio_hw *hw = dev->data->dev_private;
2335 	int ret;
2336 
2337 	/* Finish the initialization of the queues */
2338 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2339 		ret = virtio_dev_rx_queue_setup_finish(dev, i);
2340 		if (ret < 0)
2341 			return ret;
2342 	}
2343 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2344 		ret = virtio_dev_tx_queue_setup_finish(dev, i);
2345 		if (ret < 0)
2346 			return ret;
2347 	}
2348 
2349 	/* check if lsc interrupt feature is enabled */
2350 	if (dev->data->dev_conf.intr_conf.lsc) {
2351 		if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
2352 			PMD_DRV_LOG(ERR, "link status not supported by host");
2353 			return -ENOTSUP;
2354 		}
2355 	}
2356 
2357 	/* Enable uio/vfio intr/eventfd mapping: although we already did that
2358 	 * in device configure, but it could be unmapped  when device is
2359 	 * stopped.
2360 	 */
2361 	if (dev->data->dev_conf.intr_conf.lsc ||
2362 	    dev->data->dev_conf.intr_conf.rxq) {
2363 		virtio_intr_disable(dev);
2364 
2365 		/* Setup interrupt callback  */
2366 		if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
2367 			rte_intr_callback_register(dev->intr_handle,
2368 						   virtio_interrupt_handler,
2369 						   dev);
2370 
2371 		if (virtio_intr_enable(dev) < 0) {
2372 			PMD_DRV_LOG(ERR, "interrupt enable failed");
2373 			return -EIO;
2374 		}
2375 	}
2376 
2377 	/*Notify the backend
2378 	 *Otherwise the tap backend might already stop its queue due to fullness.
2379 	 *vhost backend will have no chance to be waked up
2380 	 */
2381 	nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2382 	if (hw->max_queue_pairs > 1) {
2383 		if (virtio_set_multiple_queues(dev, nb_queues) != 0)
2384 			return -EINVAL;
2385 	}
2386 
2387 	PMD_INIT_LOG(DEBUG, "nb_queues=%u (port=%u)", nb_queues,
2388 		     dev->data->port_id);
2389 
2390 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2391 		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
2392 		/* Flush the old packets */
2393 		virtqueue_rxvq_flush(vq);
2394 		virtqueue_notify(vq);
2395 	}
2396 
2397 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2398 		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
2399 		virtqueue_notify(vq);
2400 	}
2401 
2402 	PMD_INIT_LOG(DEBUG, "Notified backend at initialization (port=%u)",
2403 		     dev->data->port_id);
2404 
2405 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2406 		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
2407 		VIRTQUEUE_DUMP(vq);
2408 	}
2409 
2410 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2411 		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
2412 		VIRTQUEUE_DUMP(vq);
2413 	}
2414 
2415 	set_rxtx_funcs(dev);
2416 	hw->started = 1;
2417 
2418 	for (i = 0; i < dev->data->nb_rx_queues; i++)
2419 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
2420 	for (i = 0; i < dev->data->nb_tx_queues; i++)
2421 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
2422 
2423 	/* Initialize Link state */
2424 	virtio_dev_link_update(dev, 0);
2425 
2426 	return 0;
2427 }
2428 
2429 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
2430 {
2431 	struct virtio_hw *hw = dev->data->dev_private;
2432 	uint16_t nr_vq = virtio_get_nr_vq(hw);
2433 	const char *type __rte_unused;
2434 	unsigned int i, mbuf_num = 0;
2435 	struct virtqueue *vq;
2436 	struct rte_mbuf *buf;
2437 	int queue_type;
2438 
2439 	if (hw->vqs == NULL)
2440 		return;
2441 
2442 	for (i = 0; i < nr_vq; i++) {
2443 		vq = hw->vqs[i];
2444 		if (!vq)
2445 			continue;
2446 
2447 		queue_type = virtio_get_queue_type(hw, i);
2448 		if (queue_type == VTNET_RQ)
2449 			type = "rxq";
2450 		else if (queue_type == VTNET_TQ)
2451 			type = "txq";
2452 		else
2453 			continue;
2454 
2455 		PMD_INIT_LOG(DEBUG,
2456 			"Before freeing %s[%d] used and unused buf",
2457 			type, i);
2458 		VIRTQUEUE_DUMP(vq);
2459 
2460 		while ((buf = virtqueue_detach_unused(vq)) != NULL) {
2461 			rte_pktmbuf_free(buf);
2462 			mbuf_num++;
2463 		}
2464 
2465 		PMD_INIT_LOG(DEBUG,
2466 			"After freeing %s[%d] used and unused buf",
2467 			type, i);
2468 		VIRTQUEUE_DUMP(vq);
2469 	}
2470 
2471 	PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
2472 }
2473 
2474 static void
2475 virtio_tx_completed_cleanup(struct rte_eth_dev *dev)
2476 {
2477 	struct virtio_hw *hw = dev->data->dev_private;
2478 	struct virtqueue *vq;
2479 	int qidx;
2480 	void (*xmit_cleanup)(struct virtqueue *vq, uint16_t nb_used);
2481 
2482 	if (virtio_with_packed_queue(hw)) {
2483 		if (hw->use_vec_tx)
2484 			xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
2485 		else if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
2486 			xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
2487 		else
2488 			xmit_cleanup = &virtio_xmit_cleanup_normal_packed;
2489 	} else {
2490 		if (hw->use_inorder_tx)
2491 			xmit_cleanup = &virtio_xmit_cleanup_inorder;
2492 		else
2493 			xmit_cleanup = &virtio_xmit_cleanup;
2494 	}
2495 
2496 	for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
2497 		vq = hw->vqs[2 * qidx + VTNET_SQ_TQ_QUEUE_IDX];
2498 		if (vq != NULL)
2499 			xmit_cleanup(vq, virtqueue_nused(vq));
2500 	}
2501 }
2502 
2503 /*
2504  * Stop device: disable interrupt and mark link down
2505  */
2506 int
2507 virtio_dev_stop(struct rte_eth_dev *dev)
2508 {
2509 	struct virtio_hw *hw = dev->data->dev_private;
2510 	struct rte_eth_link link;
2511 	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
2512 	uint16_t i;
2513 
2514 	PMD_INIT_LOG(DEBUG, "stop");
2515 	dev->data->dev_started = 0;
2516 
2517 	rte_spinlock_lock(&hw->state_lock);
2518 	if (!hw->started)
2519 		goto out_unlock;
2520 	hw->started = 0;
2521 
2522 	virtio_tx_completed_cleanup(dev);
2523 
2524 	if (intr_conf->lsc || intr_conf->rxq) {
2525 		virtio_intr_disable(dev);
2526 
2527 		/* Reset interrupt callback  */
2528 		if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
2529 			rte_intr_callback_unregister(dev->intr_handle,
2530 						     virtio_interrupt_handler,
2531 						     dev);
2532 		}
2533 	}
2534 
2535 	memset(&link, 0, sizeof(link));
2536 	rte_eth_linkstatus_set(dev, &link);
2537 out_unlock:
2538 	rte_spinlock_unlock(&hw->state_lock);
2539 
2540 	for (i = 0; i < dev->data->nb_rx_queues; i++)
2541 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2542 	for (i = 0; i < dev->data->nb_tx_queues; i++)
2543 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
2544 
2545 	return 0;
2546 }
2547 
2548 static int
2549 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
2550 {
2551 	struct rte_eth_link link;
2552 	uint16_t status;
2553 	struct virtio_hw *hw = dev->data->dev_private;
2554 
2555 	memset(&link, 0, sizeof(link));
2556 	link.link_duplex = hw->duplex;
2557 	link.link_speed  = hw->speed;
2558 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
2559 
2560 	if (!hw->started) {
2561 		link.link_status = RTE_ETH_LINK_DOWN;
2562 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2563 	} else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
2564 		PMD_INIT_LOG(DEBUG, "Get link status from hw");
2565 		virtio_read_dev_config(hw,
2566 				offsetof(struct virtio_net_config, status),
2567 				&status, sizeof(status));
2568 		if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
2569 			link.link_status = RTE_ETH_LINK_DOWN;
2570 			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2571 			PMD_INIT_LOG(DEBUG, "Port %d is down",
2572 				     dev->data->port_id);
2573 		} else {
2574 			link.link_status = RTE_ETH_LINK_UP;
2575 			if (hw->get_speed_via_feat)
2576 				virtio_get_speed_duplex(dev, &link);
2577 			PMD_INIT_LOG(DEBUG, "Port %d is up",
2578 				     dev->data->port_id);
2579 		}
2580 	} else {
2581 		link.link_status = RTE_ETH_LINK_UP;
2582 		if (hw->get_speed_via_feat)
2583 			virtio_get_speed_duplex(dev, &link);
2584 	}
2585 
2586 	return rte_eth_linkstatus_set(dev, &link);
2587 }
2588 
2589 static int
2590 virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2591 {
2592 	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2593 	struct virtio_hw *hw = dev->data->dev_private;
2594 	uint64_t offloads = rxmode->offloads;
2595 
2596 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
2597 		if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
2598 				!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
2599 
2600 			PMD_DRV_LOG(NOTICE,
2601 				"vlan filtering not available on this host");
2602 
2603 			return -ENOTSUP;
2604 		}
2605 	}
2606 
2607 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
2608 		hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
2609 
2610 	return 0;
2611 }
2612 
2613 static int
2614 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2615 {
2616 	uint64_t tso_mask, host_features;
2617 	uint32_t rss_hash_types = 0;
2618 	struct virtio_hw *hw = dev->data->dev_private;
2619 	dev_info->speed_capa = virtio_dev_speed_capa_get(hw->speed);
2620 
2621 	dev_info->max_rx_queues =
2622 		RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
2623 	dev_info->max_tx_queues =
2624 		RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
2625 	dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
2626 	dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
2627 	dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
2628 	dev_info->max_mtu = hw->max_mtu;
2629 
2630 	host_features = VIRTIO_OPS(hw)->get_features(hw);
2631 	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
2632 	if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
2633 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
2634 	if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
2635 		dev_info->rx_offload_capa |=
2636 			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2637 			RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
2638 	}
2639 	if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
2640 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
2641 	tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2642 		(1ULL << VIRTIO_NET_F_GUEST_TSO6);
2643 	if ((host_features & tso_mask) == tso_mask)
2644 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
2645 
2646 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
2647 				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
2648 	if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
2649 		dev_info->tx_offload_capa |=
2650 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2651 			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
2652 	}
2653 	tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2654 		(1ULL << VIRTIO_NET_F_HOST_TSO6);
2655 	if ((host_features & tso_mask) == tso_mask)
2656 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
2657 
2658 	if (host_features & (1ULL << VIRTIO_NET_F_RSS)) {
2659 		virtio_dev_get_rss_config(hw, &rss_hash_types);
2660 		dev_info->hash_key_size = VIRTIO_NET_RSS_KEY_SIZE;
2661 		dev_info->reta_size = VIRTIO_NET_RSS_RETA_SIZE;
2662 		dev_info->flow_type_rss_offloads =
2663 			virtio_to_ethdev_rss_offloads(rss_hash_types);
2664 	} else {
2665 		dev_info->hash_key_size = 0;
2666 		dev_info->reta_size = 0;
2667 		dev_info->flow_type_rss_offloads = 0;
2668 	}
2669 
2670 	if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) {
2671 		/*
2672 		 * According to 2.7 Packed Virtqueues,
2673 		 * 2.7.10.1 Structure Size and Alignment:
2674 		 * The Queue Size value does not have to be a power of 2.
2675 		 */
2676 		dev_info->rx_desc_lim.nb_max = UINT16_MAX;
2677 		dev_info->tx_desc_lim.nb_max = UINT16_MAX;
2678 	} else {
2679 		/*
2680 		 * According to 2.6 Split Virtqueues:
2681 		 * Queue Size value is always a power of 2. The maximum Queue
2682 		 * Size value is 32768.
2683 		 */
2684 		dev_info->rx_desc_lim.nb_max = 32768;
2685 		dev_info->tx_desc_lim.nb_max = 32768;
2686 	}
2687 	/*
2688 	 * Actual minimum is not the same for virtqueues of different kinds,
2689 	 * but to avoid tangling the code with separate branches, rely on
2690 	 * default thresholds since desc number must be at least of their size.
2691 	 */
2692 	dev_info->rx_desc_lim.nb_min = RTE_MAX(DEFAULT_RX_FREE_THRESH,
2693 					       RTE_VIRTIO_VPMD_RX_REARM_THRESH);
2694 	dev_info->tx_desc_lim.nb_min = DEFAULT_TX_FREE_THRESH;
2695 	dev_info->rx_desc_lim.nb_align = 1;
2696 	dev_info->tx_desc_lim.nb_align = 1;
2697 
2698 	return 0;
2699 }
2700 
2701 /*
2702  * It enables testpmd to collect per queue stats.
2703  */
2704 static int
2705 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
2706 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
2707 __rte_unused uint8_t is_rx)
2708 {
2709 	return 0;
2710 }
2711 
2712 RTE_LOG_REGISTER_SUFFIX(virtio_logtype_init, init, NOTICE);
2713 RTE_LOG_REGISTER_SUFFIX(virtio_logtype_driver, driver, NOTICE);
2714