xref: /dpdk/drivers/net/virtio/virtio_ethdev.c (revision 7698e655516c54558a43b8aaed1bff69928d96fb)
15566a3e3SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
25566a3e3SBruce Richardson  * Copyright(c) 2010-2016 Intel Corporation
36c3169a3SBruce Richardson  */
46c3169a3SBruce Richardson 
56c3169a3SBruce Richardson #include <stdint.h>
66c3169a3SBruce Richardson #include <string.h>
76c3169a3SBruce Richardson #include <stdio.h>
872b452c5SDmitry Kozlyuk #include <stdlib.h>
96c3169a3SBruce Richardson #include <errno.h>
106c3169a3SBruce Richardson #include <unistd.h>
116c3169a3SBruce Richardson 
12df96fd0dSBruce Richardson #include <ethdev_driver.h>
136c3169a3SBruce Richardson #include <rte_memcpy.h>
146c3169a3SBruce Richardson #include <rte_string_fns.h>
156c3169a3SBruce Richardson #include <rte_memzone.h>
166c3169a3SBruce Richardson #include <rte_malloc.h>
176c3169a3SBruce Richardson #include <rte_branch_prediction.h>
186c3169a3SBruce Richardson #include <rte_ether.h>
197365504fSXiao Wang #include <rte_ip.h>
207365504fSXiao Wang #include <rte_arp.h>
216c3169a3SBruce Richardson #include <rte_common.h>
22abf4c84bSBernard Iremonger #include <rte_errno.h>
234819eae8SOlivier Matz #include <rte_cpuflags.h>
247566f28aSCiara Power #include <rte_vect.h>
256c3169a3SBruce Richardson #include <rte_memory.h>
26924e6b76SThomas Monjalon #include <rte_eal_paging.h>
276c3169a3SBruce Richardson #include <rte_eal.h>
281acb7f54SDavid Marchand #include <dev_driver.h>
291978a9dcSXiao Wang #include <rte_cycles.h>
30440f03c2SXiao Wang #include <rte_kvargs.h>
316c3169a3SBruce Richardson 
326c3169a3SBruce Richardson #include "virtio_ethdev.h"
33b5ba7ee4SMaxime Coquelin #include "virtio.h"
346c3169a3SBruce Richardson #include "virtio_logs.h"
356c3169a3SBruce Richardson #include "virtqueue.h"
3635e7012eSMaxime Coquelin #include "virtio_cvq.h"
37cab04612SHuawei Xie #include "virtio_rxtx.h"
3831136836SIvan Ilchenko #include "virtio_rxtx_simple.h"
397f468b2eSTiwei Bie #include "virtio_user/virtio_user_dev.h"
406c3169a3SBruce Richardson 
416c3169a3SBruce Richardson static int  virtio_dev_configure(struct rte_eth_dev *dev);
426c3169a3SBruce Richardson static int  virtio_dev_start(struct rte_eth_dev *dev);
439039c812SAndrew Rybchenko static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
449039c812SAndrew Rybchenko static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
45ca041cd4SIvan Ilchenko static int virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
46ca041cd4SIvan Ilchenko static int virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
4749119e38SIvan Dyukov static uint32_t virtio_dev_speed_capa_get(uint32_t speed);
4849119e38SIvan Dyukov static int virtio_dev_devargs_parse(struct rte_devargs *devargs,
494710e16aSMarvin Liu 	uint32_t *speed,
504710e16aSMarvin Liu 	int *vectorized);
51bdad90d1SIvan Ilchenko static int virtio_dev_info_get(struct rte_eth_dev *dev,
526c3169a3SBruce Richardson 				struct rte_eth_dev_info *dev_info);
536c3169a3SBruce Richardson static int virtio_dev_link_update(struct rte_eth_dev *dev,
54dd2c630aSFerruh Yigit 	int wait_to_complete);
55289ba0c0SDavid Harton static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
560c9d6620SMaxime Coquelin static int virtio_dev_rss_hash_update(struct rte_eth_dev *dev,
570c9d6620SMaxime Coquelin 		struct rte_eth_rss_conf *rss_conf);
580c9d6620SMaxime Coquelin static int virtio_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
590c9d6620SMaxime Coquelin 		struct rte_eth_rss_conf *rss_conf);
600c9d6620SMaxime Coquelin static int virtio_dev_rss_reta_update(struct rte_eth_dev *dev,
610c9d6620SMaxime Coquelin 			 struct rte_eth_rss_reta_entry64 *reta_conf,
620c9d6620SMaxime Coquelin 			 uint16_t reta_size);
630c9d6620SMaxime Coquelin static int virtio_dev_rss_reta_query(struct rte_eth_dev *dev,
640c9d6620SMaxime Coquelin 			 struct rte_eth_rss_reta_entry64 *reta_conf,
650c9d6620SMaxime Coquelin 			 uint16_t reta_size);
666c3169a3SBruce Richardson 
676c3169a3SBruce Richardson static void virtio_set_hwaddr(struct virtio_hw *hw);
686c3169a3SBruce Richardson static void virtio_get_hwaddr(struct virtio_hw *hw);
696c3169a3SBruce Richardson 
70d5b0924bSMatan Azrad static int virtio_dev_stats_get(struct rte_eth_dev *dev,
7176d4c652SHarry van Haaren 				 struct rte_eth_stats *stats);
7276d4c652SHarry van Haaren static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
73e2aae1c1SRemy Horton 				 struct rte_eth_xstat *xstats, unsigned n);
74baf91c39SRemy Horton static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
75baf91c39SRemy Horton 				       struct rte_eth_xstat_name *xstats_names,
76baf91c39SRemy Horton 				       unsigned limit);
779970a9adSIgor Romanov static int virtio_dev_stats_reset(struct rte_eth_dev *dev);
786c3169a3SBruce Richardson static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
796c3169a3SBruce Richardson static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
806c3169a3SBruce Richardson 				uint16_t vlan_id, int on);
816d01e580SWei Dai static int virtio_mac_addr_add(struct rte_eth_dev *dev,
826d13ea8eSOlivier Matz 				struct rte_ether_addr *mac_addr,
83dd2c630aSFerruh Yigit 				uint32_t index, uint32_t vmdq);
846c3169a3SBruce Richardson static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
85caccf8b3SOlivier Matz static int virtio_mac_addr_set(struct rte_eth_dev *dev,
866d13ea8eSOlivier Matz 				struct rte_ether_addr *mac_addr);
876c3169a3SBruce Richardson 
88fe19d49cSZhiyong Yang static int virtio_intr_disable(struct rte_eth_dev *dev);
8964ac7e08SMiao Li static int virtio_get_monitor_addr(void *rx_queue,
9064ac7e08SMiao Li 				struct rte_power_monitor_cond *pmc);
91fe19d49cSZhiyong Yang 
926c3169a3SBruce Richardson static int virtio_dev_queue_stats_mapping_set(
93dd2c630aSFerruh Yigit 	struct rte_eth_dev *eth_dev,
94dd2c630aSFerruh Yigit 	uint16_t queue_id,
95dd2c630aSFerruh Yigit 	uint8_t stat_idx,
96dd2c630aSFerruh Yigit 	uint8_t is_rx);
976c3169a3SBruce Richardson 
987365504fSXiao Wang static void virtio_notify_peers(struct rte_eth_dev *dev);
997365504fSXiao Wang static void virtio_ack_link_announce(struct rte_eth_dev *dev);
1007365504fSXiao Wang 
10176d4c652SHarry van Haaren struct rte_virtio_xstats_name_off {
10276d4c652SHarry van Haaren 	char name[RTE_ETH_XSTATS_NAME_SIZE];
10376d4c652SHarry van Haaren 	unsigned offset;
10476d4c652SHarry van Haaren };
10576d4c652SHarry van Haaren 
10676d4c652SHarry van Haaren /* [rt]x_qX_ is prepended to the name string here */
10701ad44fdSHuawei Xie static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
10801ad44fdSHuawei Xie 	{"good_packets",           offsetof(struct virtnet_rx, stats.packets)},
10901ad44fdSHuawei Xie 	{"good_bytes",             offsetof(struct virtnet_rx, stats.bytes)},
11001ad44fdSHuawei Xie 	{"errors",                 offsetof(struct virtnet_rx, stats.errors)},
11101ad44fdSHuawei Xie 	{"multicast_packets",      offsetof(struct virtnet_rx, stats.multicast)},
11201ad44fdSHuawei Xie 	{"broadcast_packets",      offsetof(struct virtnet_rx, stats.broadcast)},
11301ad44fdSHuawei Xie 	{"undersize_packets",      offsetof(struct virtnet_rx, stats.size_bins[0])},
11401ad44fdSHuawei Xie 	{"size_64_packets",        offsetof(struct virtnet_rx, stats.size_bins[1])},
11501ad44fdSHuawei Xie 	{"size_65_127_packets",    offsetof(struct virtnet_rx, stats.size_bins[2])},
11601ad44fdSHuawei Xie 	{"size_128_255_packets",   offsetof(struct virtnet_rx, stats.size_bins[3])},
11701ad44fdSHuawei Xie 	{"size_256_511_packets",   offsetof(struct virtnet_rx, stats.size_bins[4])},
11801ad44fdSHuawei Xie 	{"size_512_1023_packets",  offsetof(struct virtnet_rx, stats.size_bins[5])},
11981f7234bSZhiyong Yang 	{"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
12081f7234bSZhiyong Yang 	{"size_1519_max_packets",  offsetof(struct virtnet_rx, stats.size_bins[7])},
12176d4c652SHarry van Haaren };
12276d4c652SHarry van Haaren 
12301ad44fdSHuawei Xie /* [rt]x_qX_ is prepended to the name string here */
12401ad44fdSHuawei Xie static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
12501ad44fdSHuawei Xie 	{"good_packets",           offsetof(struct virtnet_tx, stats.packets)},
12601ad44fdSHuawei Xie 	{"good_bytes",             offsetof(struct virtnet_tx, stats.bytes)},
12701ad44fdSHuawei Xie 	{"multicast_packets",      offsetof(struct virtnet_tx, stats.multicast)},
12801ad44fdSHuawei Xie 	{"broadcast_packets",      offsetof(struct virtnet_tx, stats.broadcast)},
12901ad44fdSHuawei Xie 	{"undersize_packets",      offsetof(struct virtnet_tx, stats.size_bins[0])},
13001ad44fdSHuawei Xie 	{"size_64_packets",        offsetof(struct virtnet_tx, stats.size_bins[1])},
13101ad44fdSHuawei Xie 	{"size_65_127_packets",    offsetof(struct virtnet_tx, stats.size_bins[2])},
13201ad44fdSHuawei Xie 	{"size_128_255_packets",   offsetof(struct virtnet_tx, stats.size_bins[3])},
13301ad44fdSHuawei Xie 	{"size_256_511_packets",   offsetof(struct virtnet_tx, stats.size_bins[4])},
13401ad44fdSHuawei Xie 	{"size_512_1023_packets",  offsetof(struct virtnet_tx, stats.size_bins[5])},
13581f7234bSZhiyong Yang 	{"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
13681f7234bSZhiyong Yang 	{"size_1519_max_packets",  offsetof(struct virtnet_tx, stats.size_bins[7])},
13701ad44fdSHuawei Xie };
13801ad44fdSHuawei Xie 
13901ad44fdSHuawei Xie #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
14001ad44fdSHuawei Xie 			    sizeof(rte_virtio_rxq_stat_strings[0]))
14101ad44fdSHuawei Xie #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
14201ad44fdSHuawei Xie 			    sizeof(rte_virtio_txq_stat_strings[0]))
14376d4c652SHarry van Haaren 
144553f4593SYuanhan Liu struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
145553f4593SYuanhan Liu 
1466c3169a3SBruce Richardson static int
virtio_set_multiple_queues_rss(struct rte_eth_dev * dev,uint16_t nb_queues)1470c9d6620SMaxime Coquelin virtio_set_multiple_queues_rss(struct rte_eth_dev *dev, uint16_t nb_queues)
1486c3169a3SBruce Richardson {
1496c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
1506c3169a3SBruce Richardson 	struct virtio_pmd_ctrl ctrl;
1510c9d6620SMaxime Coquelin 	struct virtio_net_ctrl_rss rss;
1520c9d6620SMaxime Coquelin 	int dlen, ret;
1530c9d6620SMaxime Coquelin 
1540c9d6620SMaxime Coquelin 	rss.hash_types = hw->rss_hash_types & VIRTIO_NET_HASH_TYPE_MASK;
1550c9d6620SMaxime Coquelin 	RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(VIRTIO_NET_RSS_RETA_SIZE));
1560c9d6620SMaxime Coquelin 	rss.indirection_table_mask = VIRTIO_NET_RSS_RETA_SIZE - 1;
1570c9d6620SMaxime Coquelin 	rss.unclassified_queue = 0;
1580c9d6620SMaxime Coquelin 	memcpy(rss.indirection_table, hw->rss_reta, VIRTIO_NET_RSS_RETA_SIZE * sizeof(uint16_t));
1590c9d6620SMaxime Coquelin 	rss.max_tx_vq = nb_queues;
1600c9d6620SMaxime Coquelin 	rss.hash_key_length = VIRTIO_NET_RSS_KEY_SIZE;
1610c9d6620SMaxime Coquelin 	memcpy(rss.hash_key_data, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
1620c9d6620SMaxime Coquelin 
1630c9d6620SMaxime Coquelin 	ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
1640c9d6620SMaxime Coquelin 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_RSS_CONFIG;
1650c9d6620SMaxime Coquelin 	memcpy(ctrl.data, &rss, sizeof(rss));
1660c9d6620SMaxime Coquelin 
1670c9d6620SMaxime Coquelin 	dlen = sizeof(rss);
1680c9d6620SMaxime Coquelin 
1690c9d6620SMaxime Coquelin 	ret = virtio_send_command(hw->cvq, &ctrl, &dlen, 1);
1700c9d6620SMaxime Coquelin 	if (ret) {
1710c9d6620SMaxime Coquelin 		PMD_INIT_LOG(ERR, "RSS multiqueue configured but send command failed");
1720c9d6620SMaxime Coquelin 		return -EINVAL;
1730c9d6620SMaxime Coquelin 	}
1740c9d6620SMaxime Coquelin 
1750c9d6620SMaxime Coquelin 	return 0;
1760c9d6620SMaxime Coquelin }
1770c9d6620SMaxime Coquelin 
1780c9d6620SMaxime Coquelin static int
virtio_set_multiple_queues_auto(struct rte_eth_dev * dev,uint16_t nb_queues)1790c9d6620SMaxime Coquelin virtio_set_multiple_queues_auto(struct rte_eth_dev *dev, uint16_t nb_queues)
1800c9d6620SMaxime Coquelin {
1810c9d6620SMaxime Coquelin 	struct virtio_hw *hw = dev->data->dev_private;
1820c9d6620SMaxime Coquelin 	struct virtio_pmd_ctrl ctrl;
1830c9d6620SMaxime Coquelin 	int dlen;
1846c3169a3SBruce Richardson 	int ret;
1856c3169a3SBruce Richardson 
1866c3169a3SBruce Richardson 	ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
1876c3169a3SBruce Richardson 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
1886c3169a3SBruce Richardson 	memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
1896c3169a3SBruce Richardson 
1900c9d6620SMaxime Coquelin 	dlen = sizeof(uint16_t);
1916c3169a3SBruce Richardson 
1920c9d6620SMaxime Coquelin 	ret = virtio_send_command(hw->cvq, &ctrl, &dlen, 1);
1936c3169a3SBruce Richardson 	if (ret) {
1946c3169a3SBruce Richardson 		PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
1956c3169a3SBruce Richardson 			  "failed, this is too late now...");
1966c3169a3SBruce Richardson 		return -EINVAL;
1976c3169a3SBruce Richardson 	}
1986c3169a3SBruce Richardson 
1996c3169a3SBruce Richardson 	return 0;
2006c3169a3SBruce Richardson }
2016c3169a3SBruce Richardson 
2020c9d6620SMaxime Coquelin static int
virtio_set_multiple_queues(struct rte_eth_dev * dev,uint16_t nb_queues)2030c9d6620SMaxime Coquelin virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
2040c9d6620SMaxime Coquelin {
2050c9d6620SMaxime Coquelin 	struct virtio_hw *hw = dev->data->dev_private;
2060c9d6620SMaxime Coquelin 
2070c9d6620SMaxime Coquelin 	if (virtio_with_feature(hw, VIRTIO_NET_F_RSS))
2080c9d6620SMaxime Coquelin 		return virtio_set_multiple_queues_rss(dev, nb_queues);
2090c9d6620SMaxime Coquelin 	else
2100c9d6620SMaxime Coquelin 		return virtio_set_multiple_queues_auto(dev, nb_queues);
2110c9d6620SMaxime Coquelin }
2120c9d6620SMaxime Coquelin 
21369c80d4eSYuanhan Liu static uint16_t
virtio_get_nr_vq(struct virtio_hw * hw)21469c80d4eSYuanhan Liu virtio_get_nr_vq(struct virtio_hw *hw)
21569c80d4eSYuanhan Liu {
21669c80d4eSYuanhan Liu 	uint16_t nr_vq = hw->max_queue_pairs * 2;
21769c80d4eSYuanhan Liu 
218b4f9a45aSMaxime Coquelin 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
21969c80d4eSYuanhan Liu 		nr_vq += 1;
22069c80d4eSYuanhan Liu 
22169c80d4eSYuanhan Liu 	return nr_vq;
22269c80d4eSYuanhan Liu }
22369c80d4eSYuanhan Liu 
224f4d1ad15SYuanhan Liu static void
virtio_control_queue_notify(struct virtqueue * vq,__rte_unused void * cookie)2254dd3477cSMaxime Coquelin virtio_control_queue_notify(struct virtqueue *vq, __rte_unused void *cookie)
2264dd3477cSMaxime Coquelin {
2274dd3477cSMaxime Coquelin 	virtqueue_notify(vq);
2284dd3477cSMaxime Coquelin }
2294dd3477cSMaxime Coquelin 
23069c80d4eSYuanhan Liu static int
virtio_init_queue(struct rte_eth_dev * dev,uint16_t queue_idx)231b5ba7ee4SMaxime Coquelin virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
2326c3169a3SBruce Richardson {
2336c3169a3SBruce Richardson 	char vq_name[VIRTQUEUE_MAX_NAME_SZ];
234b02b02b6SMaxime Coquelin 	unsigned int vq_size;
2356c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
23601ad44fdSHuawei Xie 	struct virtqueue *vq;
237b5ba7ee4SMaxime Coquelin 	int queue_type = virtio_get_queue_type(hw, queue_idx);
23801ad44fdSHuawei Xie 	int ret;
2394a5140abSMaxime Coquelin 	int numa_node = dev->device->numa_node;
2406c3169a3SBruce Richardson 
2414a5140abSMaxime Coquelin 	PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
242b5ba7ee4SMaxime Coquelin 			queue_idx, numa_node);
2436c3169a3SBruce Richardson 
2446c3169a3SBruce Richardson 	/*
2456c3169a3SBruce Richardson 	 * Read the virtqueue size from the Queue Size field
2466c3169a3SBruce Richardson 	 * Always power of 2 and if 0 virtqueue does not exist
2476c3169a3SBruce Richardson 	 */
248b5ba7ee4SMaxime Coquelin 	vq_size = VIRTIO_OPS(hw)->get_queue_num(hw, queue_idx);
24969c80d4eSYuanhan Liu 	PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
2506c3169a3SBruce Richardson 	if (vq_size == 0) {
2510bb159adSHuawei Xie 		PMD_INIT_LOG(ERR, "virtqueue does not exist");
2526c3169a3SBruce Richardson 		return -EINVAL;
253d78deadaSStephen Hemminger 	}
254d78deadaSStephen Hemminger 
255b4f9a45aSMaxime Coquelin 	if (!virtio_with_packed_queue(hw) && !rte_is_power_of_2(vq_size)) {
256df42dde5SMarvin Liu 		PMD_INIT_LOG(ERR, "split virtqueue size is not power of 2");
2576c3169a3SBruce Richardson 		return -EINVAL;
258d78deadaSStephen Hemminger 	}
259d78deadaSStephen Hemminger 
260a632f0f6SMaxime Coquelin 	snprintf(vq_name, sizeof(vq_name), "port%d_vq%d", dev->data->port_id, queue_idx);
26101ad44fdSHuawei Xie 
262b02b02b6SMaxime Coquelin 	vq = virtqueue_alloc(hw, queue_idx, vq_size, queue_type, numa_node, vq_name);
263b02b02b6SMaxime Coquelin 	if (!vq) {
264b02b02b6SMaxime Coquelin 		PMD_INIT_LOG(ERR, "virtqueue init failed");
265cab04612SHuawei Xie 		return -ENOMEM;
266cab04612SHuawei Xie 	}
267b02b02b6SMaxime Coquelin 
268b5ba7ee4SMaxime Coquelin 	hw->vqs[queue_idx] = vq;
26969c80d4eSYuanhan Liu 
270b02b02b6SMaxime Coquelin 	if (queue_type == VTNET_CQ) {
271b02b02b6SMaxime Coquelin 		hw->cvq = &vq->cq;
2724dd3477cSMaxime Coquelin 		vq->cq.notify_queue = &virtio_control_queue_notify;
273f24f8f9fSJianfeng Tan 	}
274f24f8f9fSJianfeng Tan 
275f8b60756SMaxime Coquelin 	if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
276595454c5SJianfeng Tan 		PMD_INIT_LOG(ERR, "setup_queue failed");
27776fd789cSMaxime Coquelin 		ret = -EINVAL;
27876fd789cSMaxime Coquelin 		goto clean_vq;
279595454c5SJianfeng Tan 	}
280595454c5SJianfeng Tan 
2816c3169a3SBruce Richardson 	return 0;
28201ad44fdSHuawei Xie 
28376fd789cSMaxime Coquelin clean_vq:
284b02b02b6SMaxime Coquelin 	if (queue_type == VTNET_CQ)
28576fd789cSMaxime Coquelin 		hw->cvq = NULL;
286b02b02b6SMaxime Coquelin 	virtqueue_free(vq);
2875d903aeeSGaoxiang Liu 	hw->vqs[queue_idx] = NULL;
28801ad44fdSHuawei Xie 
28901ad44fdSHuawei Xie 	return ret;
2906c3169a3SBruce Richardson }
2916c3169a3SBruce Richardson 
29269c80d4eSYuanhan Liu static void
virtio_free_queues(struct virtio_hw * hw)29369c80d4eSYuanhan Liu virtio_free_queues(struct virtio_hw *hw)
2946c3169a3SBruce Richardson {
29569c80d4eSYuanhan Liu 	uint16_t nr_vq = virtio_get_nr_vq(hw);
29669c80d4eSYuanhan Liu 	struct virtqueue *vq;
29769c80d4eSYuanhan Liu 	uint16_t i;
2986c3169a3SBruce Richardson 
2990e78cfddSHuanle Han 	if (hw->vqs == NULL)
3000e78cfddSHuanle Han 		return;
3010e78cfddSHuanle Han 
30269c80d4eSYuanhan Liu 	for (i = 0; i < nr_vq; i++) {
30369c80d4eSYuanhan Liu 		vq = hw->vqs[i];
30469c80d4eSYuanhan Liu 		if (!vq)
30569c80d4eSYuanhan Liu 			continue;
306b02b02b6SMaxime Coquelin 		virtqueue_free(vq);
3070e78cfddSHuanle Han 		hw->vqs[i] = NULL;
30869c80d4eSYuanhan Liu 	}
30969c80d4eSYuanhan Liu 
31069c80d4eSYuanhan Liu 	rte_free(hw->vqs);
3110e78cfddSHuanle Han 	hw->vqs = NULL;
31269c80d4eSYuanhan Liu }
31369c80d4eSYuanhan Liu 
31469c80d4eSYuanhan Liu static int
virtio_alloc_queues(struct rte_eth_dev * dev)31569c80d4eSYuanhan Liu virtio_alloc_queues(struct rte_eth_dev *dev)
31669c80d4eSYuanhan Liu {
31769c80d4eSYuanhan Liu 	struct virtio_hw *hw = dev->data->dev_private;
31869c80d4eSYuanhan Liu 	uint16_t nr_vq = virtio_get_nr_vq(hw);
31969c80d4eSYuanhan Liu 	uint16_t i;
32069c80d4eSYuanhan Liu 	int ret;
32169c80d4eSYuanhan Liu 
32269c80d4eSYuanhan Liu 	hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
32369c80d4eSYuanhan Liu 	if (!hw->vqs) {
32469c80d4eSYuanhan Liu 		PMD_INIT_LOG(ERR, "failed to allocate vqs");
32569c80d4eSYuanhan Liu 		return -ENOMEM;
32669c80d4eSYuanhan Liu 	}
32769c80d4eSYuanhan Liu 
32869c80d4eSYuanhan Liu 	for (i = 0; i < nr_vq; i++) {
32969c80d4eSYuanhan Liu 		ret = virtio_init_queue(dev, i);
3306c3169a3SBruce Richardson 		if (ret < 0) {
33169c80d4eSYuanhan Liu 			virtio_free_queues(hw);
3326c3169a3SBruce Richardson 			return ret;
3336c3169a3SBruce Richardson 		}
3346c3169a3SBruce Richardson 	}
3356c3169a3SBruce Richardson 
33669c80d4eSYuanhan Liu 	return 0;
337941d64b5SBernard Iremonger }
338941d64b5SBernard Iremonger 
3399ebdeefeSJianfeng Tan static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
3409ebdeefeSJianfeng Tan 
3410c9d6620SMaxime Coquelin static void
virtio_free_rss(struct virtio_hw * hw)3420c9d6620SMaxime Coquelin virtio_free_rss(struct virtio_hw *hw)
3430c9d6620SMaxime Coquelin {
3440c9d6620SMaxime Coquelin 	rte_free(hw->rss_key);
3450c9d6620SMaxime Coquelin 	hw->rss_key = NULL;
3460c9d6620SMaxime Coquelin 
3470c9d6620SMaxime Coquelin 	rte_free(hw->rss_reta);
3480c9d6620SMaxime Coquelin 	hw->rss_reta = NULL;
3490c9d6620SMaxime Coquelin }
3500c9d6620SMaxime Coquelin 
35136a7a2e7SMaxime Coquelin int
virtio_dev_close(struct rte_eth_dev * dev)3526c3169a3SBruce Richardson virtio_dev_close(struct rte_eth_dev *dev)
3536c3169a3SBruce Richardson {
3546c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
355295968d1SFerruh Yigit 	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
3566c3169a3SBruce Richardson 
3576c3169a3SBruce Richardson 	PMD_INIT_LOG(DEBUG, "virtio_dev_close");
35830410493SThomas Monjalon 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
35930410493SThomas Monjalon 		return 0;
3606c3169a3SBruce Richardson 
3612a821d81SChas Williams 	if (!hw->opened)
362b142387bSThomas Monjalon 		return 0;
3636e1d9c0cSMaxime Coquelin 	hw->opened = 0;
3642a821d81SChas Williams 
3656c3169a3SBruce Richardson 	/* reset the NIC */
36662a785a6SJianfeng Tan 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
367f8b60756SMaxime Coquelin 		VIRTIO_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
3689ebdeefeSJianfeng Tan 	if (intr_conf->rxq)
3699ebdeefeSJianfeng Tan 		virtio_queues_unbind_intr(dev);
3709ebdeefeSJianfeng Tan 
3719ebdeefeSJianfeng Tan 	if (intr_conf->lsc || intr_conf->rxq) {
372fe19d49cSZhiyong Yang 		virtio_intr_disable(dev);
3739ebdeefeSJianfeng Tan 		rte_intr_efd_disable(dev->intr_handle);
374d61138d4SHarman Kalra 		rte_intr_vec_list_free(dev->intr_handle);
3759ebdeefeSJianfeng Tan 	}
3769ebdeefeSJianfeng Tan 
3779328e105SMaxime Coquelin 	virtio_reset(hw);
3786c3169a3SBruce Richardson 	virtio_dev_free_mbufs(dev);
37969c80d4eSYuanhan Liu 	virtio_free_queues(hw);
3800c9d6620SMaxime Coquelin 	virtio_free_rss(hw);
3817f468b2eSTiwei Bie 
382f8b60756SMaxime Coquelin 	return VIRTIO_OPS(hw)->dev_close(hw);
3836c3169a3SBruce Richardson }
3846c3169a3SBruce Richardson 
3859039c812SAndrew Rybchenko static int
virtio_dev_promiscuous_enable(struct rte_eth_dev * dev)3866c3169a3SBruce Richardson virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
3876c3169a3SBruce Richardson {
3886c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
3896c3169a3SBruce Richardson 	struct virtio_pmd_ctrl ctrl;
3906c3169a3SBruce Richardson 	int dlen[1];
3916c3169a3SBruce Richardson 	int ret;
3926c3169a3SBruce Richardson 
393b4f9a45aSMaxime Coquelin 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
394f2462150SFerruh Yigit 		PMD_INIT_LOG(INFO, "host does not support rx control");
3959039c812SAndrew Rybchenko 		return -ENOTSUP;
396e9e414a4SStephen Hemminger 	}
397e9e414a4SStephen Hemminger 
3986c3169a3SBruce Richardson 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
3996c3169a3SBruce Richardson 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
4006c3169a3SBruce Richardson 	ctrl.data[0] = 1;
4016c3169a3SBruce Richardson 	dlen[0] = 1;
4026c3169a3SBruce Richardson 
4036c3169a3SBruce Richardson 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
4049039c812SAndrew Rybchenko 	if (ret) {
4056c3169a3SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to enable promisc");
4069039c812SAndrew Rybchenko 		return -EAGAIN;
4076c3169a3SBruce Richardson 	}
4086c3169a3SBruce Richardson 
4099039c812SAndrew Rybchenko 	return 0;
4109039c812SAndrew Rybchenko }
4119039c812SAndrew Rybchenko 
4129039c812SAndrew Rybchenko static int
virtio_dev_promiscuous_disable(struct rte_eth_dev * dev)4136c3169a3SBruce Richardson virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
4146c3169a3SBruce Richardson {
4156c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
4166c3169a3SBruce Richardson 	struct virtio_pmd_ctrl ctrl;
4176c3169a3SBruce Richardson 	int dlen[1];
4186c3169a3SBruce Richardson 	int ret;
4196c3169a3SBruce Richardson 
420b4f9a45aSMaxime Coquelin 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
421f2462150SFerruh Yigit 		PMD_INIT_LOG(INFO, "host does not support rx control");
4229039c812SAndrew Rybchenko 		return -ENOTSUP;
423e9e414a4SStephen Hemminger 	}
424e9e414a4SStephen Hemminger 
4256c3169a3SBruce Richardson 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
4266c3169a3SBruce Richardson 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
4276c3169a3SBruce Richardson 	ctrl.data[0] = 0;
4286c3169a3SBruce Richardson 	dlen[0] = 1;
4296c3169a3SBruce Richardson 
4306c3169a3SBruce Richardson 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
4319039c812SAndrew Rybchenko 	if (ret) {
4326c3169a3SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to disable promisc");
4339039c812SAndrew Rybchenko 		return -EAGAIN;
4349039c812SAndrew Rybchenko 	}
4359039c812SAndrew Rybchenko 
4369039c812SAndrew Rybchenko 	return 0;
4376c3169a3SBruce Richardson }
4386c3169a3SBruce Richardson 
439ca041cd4SIvan Ilchenko static int
virtio_dev_allmulticast_enable(struct rte_eth_dev * dev)4406c3169a3SBruce Richardson virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
4416c3169a3SBruce Richardson {
4426c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
4436c3169a3SBruce Richardson 	struct virtio_pmd_ctrl ctrl;
4446c3169a3SBruce Richardson 	int dlen[1];
4456c3169a3SBruce Richardson 	int ret;
4466c3169a3SBruce Richardson 
447b4f9a45aSMaxime Coquelin 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
448f2462150SFerruh Yigit 		PMD_INIT_LOG(INFO, "host does not support rx control");
449ca041cd4SIvan Ilchenko 		return -ENOTSUP;
450e9e414a4SStephen Hemminger 	}
451e9e414a4SStephen Hemminger 
4526c3169a3SBruce Richardson 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
4536c3169a3SBruce Richardson 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
4546c3169a3SBruce Richardson 	ctrl.data[0] = 1;
4556c3169a3SBruce Richardson 	dlen[0] = 1;
4566c3169a3SBruce Richardson 
4576c3169a3SBruce Richardson 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
458ca041cd4SIvan Ilchenko 	if (ret) {
4596c3169a3SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
460ca041cd4SIvan Ilchenko 		return -EAGAIN;
4616c3169a3SBruce Richardson 	}
4626c3169a3SBruce Richardson 
463ca041cd4SIvan Ilchenko 	return 0;
464ca041cd4SIvan Ilchenko }
465ca041cd4SIvan Ilchenko 
466ca041cd4SIvan Ilchenko static int
virtio_dev_allmulticast_disable(struct rte_eth_dev * dev)4676c3169a3SBruce Richardson virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
4686c3169a3SBruce Richardson {
4696c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
4706c3169a3SBruce Richardson 	struct virtio_pmd_ctrl ctrl;
4716c3169a3SBruce Richardson 	int dlen[1];
4726c3169a3SBruce Richardson 	int ret;
4736c3169a3SBruce Richardson 
474b4f9a45aSMaxime Coquelin 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
475f2462150SFerruh Yigit 		PMD_INIT_LOG(INFO, "host does not support rx control");
476ca041cd4SIvan Ilchenko 		return -ENOTSUP;
477e9e414a4SStephen Hemminger 	}
478e9e414a4SStephen Hemminger 
4796c3169a3SBruce Richardson 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
4806c3169a3SBruce Richardson 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
4816c3169a3SBruce Richardson 	ctrl.data[0] = 0;
4826c3169a3SBruce Richardson 	dlen[0] = 1;
4836c3169a3SBruce Richardson 
4846c3169a3SBruce Richardson 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
485ca041cd4SIvan Ilchenko 	if (ret) {
4866c3169a3SBruce Richardson 		PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
487ca041cd4SIvan Ilchenko 		return -EAGAIN;
488ca041cd4SIvan Ilchenko 	}
489ca041cd4SIvan Ilchenko 
490ca041cd4SIvan Ilchenko 	return 0;
4916c3169a3SBruce Richardson }
4926c3169a3SBruce Richardson 
4934e8169ebSIvan Ilchenko uint16_t
virtio_rx_mem_pool_buf_size(struct rte_mempool * mp)4944e8169ebSIvan Ilchenko virtio_rx_mem_pool_buf_size(struct rte_mempool *mp)
4954e8169ebSIvan Ilchenko {
4964e8169ebSIvan Ilchenko 	return rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
4974e8169ebSIvan Ilchenko }
4984e8169ebSIvan Ilchenko 
4994e8169ebSIvan Ilchenko bool
virtio_rx_check_scatter(uint16_t max_rx_pkt_len,uint16_t rx_buf_size,bool rx_scatter_enabled,const char ** error)5004e8169ebSIvan Ilchenko virtio_rx_check_scatter(uint16_t max_rx_pkt_len, uint16_t rx_buf_size,
5014e8169ebSIvan Ilchenko 			bool rx_scatter_enabled, const char **error)
5024e8169ebSIvan Ilchenko {
5034e8169ebSIvan Ilchenko 	if (!rx_scatter_enabled && max_rx_pkt_len > rx_buf_size) {
5044e8169ebSIvan Ilchenko 		*error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
5054e8169ebSIvan Ilchenko 		return false;
5064e8169ebSIvan Ilchenko 	}
5074e8169ebSIvan Ilchenko 
5084e8169ebSIvan Ilchenko 	return true;
5094e8169ebSIvan Ilchenko }
5104e8169ebSIvan Ilchenko 
5114e8169ebSIvan Ilchenko static bool
virtio_check_scatter_on_all_rx_queues(struct rte_eth_dev * dev,uint16_t frame_size)5124e8169ebSIvan Ilchenko virtio_check_scatter_on_all_rx_queues(struct rte_eth_dev *dev,
5134e8169ebSIvan Ilchenko 				      uint16_t frame_size)
5144e8169ebSIvan Ilchenko {
5154e8169ebSIvan Ilchenko 	struct virtio_hw *hw = dev->data->dev_private;
5164e8169ebSIvan Ilchenko 	struct virtnet_rx *rxvq;
5174e8169ebSIvan Ilchenko 	struct virtqueue *vq;
5184e8169ebSIvan Ilchenko 	unsigned int qidx;
5194e8169ebSIvan Ilchenko 	uint16_t buf_size;
5204e8169ebSIvan Ilchenko 	const char *error;
5214e8169ebSIvan Ilchenko 
5224e8169ebSIvan Ilchenko 	if (hw->vqs == NULL)
5234e8169ebSIvan Ilchenko 		return true;
5244e8169ebSIvan Ilchenko 
52584cc857bSZhihong Peng 	for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
52684cc857bSZhihong Peng 		vq = hw->vqs[2 * qidx + VTNET_SQ_RQ_QUEUE_IDX];
52784cc857bSZhihong Peng 		if (vq == NULL)
52884cc857bSZhihong Peng 			continue;
52984cc857bSZhihong Peng 
5304e8169ebSIvan Ilchenko 		rxvq = &vq->rxq;
5314e8169ebSIvan Ilchenko 		if (rxvq->mpool == NULL)
5324e8169ebSIvan Ilchenko 			continue;
5334e8169ebSIvan Ilchenko 		buf_size = virtio_rx_mem_pool_buf_size(rxvq->mpool);
5344e8169ebSIvan Ilchenko 
5354e8169ebSIvan Ilchenko 		if (!virtio_rx_check_scatter(frame_size, buf_size,
5364e8169ebSIvan Ilchenko 					     hw->rx_ol_scatter, &error)) {
5374e8169ebSIvan Ilchenko 			PMD_INIT_LOG(ERR, "MTU check for RxQ %u failed: %s",
5384e8169ebSIvan Ilchenko 				     qidx, error);
5394e8169ebSIvan Ilchenko 			return false;
5404e8169ebSIvan Ilchenko 		}
5414e8169ebSIvan Ilchenko 	}
5424e8169ebSIvan Ilchenko 
5434e8169ebSIvan Ilchenko 	return true;
5444e8169ebSIvan Ilchenko }
5454e8169ebSIvan Ilchenko 
5464ec2424aSSouvik Dey #define VLAN_TAG_LEN           4    /* 802.3ac tag (not DMA'd) */
5474ec2424aSSouvik Dey static int
virtio_mtu_set(struct rte_eth_dev * dev,uint16_t mtu)5484ec2424aSSouvik Dey virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
5494ec2424aSSouvik Dey {
5504ec2424aSSouvik Dey 	struct virtio_hw *hw = dev->data->dev_private;
55135b2d13fSOlivier Matz 	uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
5524ec2424aSSouvik Dey 				 hw->vtnet_hdr_size;
5534ec2424aSSouvik Dey 	uint32_t frame_size = mtu + ether_hdr_len;
55449d26d9eSMaxime Coquelin 	uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
5554ec2424aSSouvik Dey 
55649d26d9eSMaxime Coquelin 	max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
55749d26d9eSMaxime Coquelin 
55835b2d13fSOlivier Matz 	if (mtu < RTE_ETHER_MIN_MTU || frame_size > max_frame_size) {
559f2462150SFerruh Yigit 		PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
56035b2d13fSOlivier Matz 			RTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
5614ec2424aSSouvik Dey 		return -EINVAL;
5624ec2424aSSouvik Dey 	}
5634e8169ebSIvan Ilchenko 
5644e8169ebSIvan Ilchenko 	if (!virtio_check_scatter_on_all_rx_queues(dev, frame_size)) {
5654e8169ebSIvan Ilchenko 		PMD_INIT_LOG(ERR, "MTU vs Rx scatter and Rx buffers check failed");
5664e8169ebSIvan Ilchenko 		return -EINVAL;
5674e8169ebSIvan Ilchenko 	}
5684e8169ebSIvan Ilchenko 
5694e8169ebSIvan Ilchenko 	hw->max_rx_pkt_len = frame_size;
5704e8169ebSIvan Ilchenko 
5714ec2424aSSouvik Dey 	return 0;
5724ec2424aSSouvik Dey }
5734ec2424aSSouvik Dey 
574c056be23SJianfeng Tan static int
virtio_dev_rx_queue_intr_enable(struct rte_eth_dev * dev,uint16_t queue_id)575c056be23SJianfeng Tan virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
576c056be23SJianfeng Tan {
5778f66bc4aSTiwei Bie 	struct virtio_hw *hw = dev->data->dev_private;
578c056be23SJianfeng Tan 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
5793169550fSMaxime Coquelin 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
580c056be23SJianfeng Tan 
581c056be23SJianfeng Tan 	virtqueue_enable_intr(vq);
5828f66bc4aSTiwei Bie 	virtio_mb(hw->weak_barriers);
583c056be23SJianfeng Tan 	return 0;
584c056be23SJianfeng Tan }
585c056be23SJianfeng Tan 
586c056be23SJianfeng Tan static int
virtio_dev_rx_queue_intr_disable(struct rte_eth_dev * dev,uint16_t queue_id)587c056be23SJianfeng Tan virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
588c056be23SJianfeng Tan {
589c056be23SJianfeng Tan 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
5903169550fSMaxime Coquelin 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
591c056be23SJianfeng Tan 
592c056be23SJianfeng Tan 	virtqueue_disable_intr(vq);
593c056be23SJianfeng Tan 	return 0;
594c056be23SJianfeng Tan }
595c056be23SJianfeng Tan 
596426858d6SChengwen Feng static int
virtio_dev_priv_dump(struct rte_eth_dev * dev,FILE * f)597426858d6SChengwen Feng virtio_dev_priv_dump(struct rte_eth_dev *dev, FILE *f)
598426858d6SChengwen Feng {
599426858d6SChengwen Feng 	struct virtio_hw *hw = dev->data->dev_private;
600426858d6SChengwen Feng 
601426858d6SChengwen Feng 	fprintf(f, "guest_features: 0x%" PRIx64 "\n", hw->guest_features);
602426858d6SChengwen Feng 	fprintf(f, "vtnet_hdr_size: %u\n", hw->vtnet_hdr_size);
603426858d6SChengwen Feng 	fprintf(f, "use_vec: rx-%u tx-%u\n", hw->use_vec_rx, hw->use_vec_tx);
604426858d6SChengwen Feng 	fprintf(f, "use_inorder: rx-%u tx-%u\n", hw->use_inorder_rx, hw->use_inorder_tx);
605426858d6SChengwen Feng 	fprintf(f, "intr_lsc: %u\n", hw->intr_lsc);
606426858d6SChengwen Feng 	fprintf(f, "max_mtu: %u\n", hw->max_mtu);
607426858d6SChengwen Feng 	fprintf(f, "max_rx_pkt_len: %zu\n", hw->max_rx_pkt_len);
608426858d6SChengwen Feng 	fprintf(f, "max_queue_pairs: %u\n", hw->max_queue_pairs);
609426858d6SChengwen Feng 	fprintf(f, "req_guest_features: 0x%" PRIx64 "\n", hw->req_guest_features);
610426858d6SChengwen Feng 
611426858d6SChengwen Feng 	return 0;
612426858d6SChengwen Feng }
613426858d6SChengwen Feng 
6146c3169a3SBruce Richardson /*
6156c3169a3SBruce Richardson  * dev_ops for virtio, bare necessities for basic operation
6166c3169a3SBruce Richardson  */
6176c3169a3SBruce Richardson static const struct eth_dev_ops virtio_eth_dev_ops = {
6186c3169a3SBruce Richardson 	.dev_configure           = virtio_dev_configure,
6196c3169a3SBruce Richardson 	.dev_start               = virtio_dev_start,
6206c3169a3SBruce Richardson 	.dev_stop                = virtio_dev_stop,
6216c3169a3SBruce Richardson 	.dev_close               = virtio_dev_close,
6226c3169a3SBruce Richardson 	.promiscuous_enable      = virtio_dev_promiscuous_enable,
6236c3169a3SBruce Richardson 	.promiscuous_disable     = virtio_dev_promiscuous_disable,
6246c3169a3SBruce Richardson 	.allmulticast_enable     = virtio_dev_allmulticast_enable,
6256c3169a3SBruce Richardson 	.allmulticast_disable    = virtio_dev_allmulticast_disable,
6264ec2424aSSouvik Dey 	.mtu_set                 = virtio_mtu_set,
6276c3169a3SBruce Richardson 	.dev_infos_get           = virtio_dev_info_get,
6286c3169a3SBruce Richardson 	.stats_get               = virtio_dev_stats_get,
62976d4c652SHarry van Haaren 	.xstats_get              = virtio_dev_xstats_get,
630baf91c39SRemy Horton 	.xstats_get_names        = virtio_dev_xstats_get_names,
6316c3169a3SBruce Richardson 	.stats_reset             = virtio_dev_stats_reset,
63276d4c652SHarry van Haaren 	.xstats_reset            = virtio_dev_stats_reset,
6336c3169a3SBruce Richardson 	.link_update             = virtio_dev_link_update,
634289ba0c0SDavid Harton 	.vlan_offload_set        = virtio_dev_vlan_offload_set,
6356c3169a3SBruce Richardson 	.rx_queue_setup          = virtio_dev_rx_queue_setup,
636c056be23SJianfeng Tan 	.rx_queue_intr_enable    = virtio_dev_rx_queue_intr_enable,
637c056be23SJianfeng Tan 	.rx_queue_intr_disable   = virtio_dev_rx_queue_intr_disable,
6386c3169a3SBruce Richardson 	.tx_queue_setup          = virtio_dev_tx_queue_setup,
6390c9d6620SMaxime Coquelin 	.rss_hash_update         = virtio_dev_rss_hash_update,
6400c9d6620SMaxime Coquelin 	.rss_hash_conf_get       = virtio_dev_rss_hash_conf_get,
6410c9d6620SMaxime Coquelin 	.reta_update             = virtio_dev_rss_reta_update,
6420c9d6620SMaxime Coquelin 	.reta_query              = virtio_dev_rss_reta_query,
6436c3169a3SBruce Richardson 	/* collect stats per queue */
6446c3169a3SBruce Richardson 	.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
6456c3169a3SBruce Richardson 	.vlan_filter_set         = virtio_vlan_filter_set,
6466c3169a3SBruce Richardson 	.mac_addr_add            = virtio_mac_addr_add,
6476c3169a3SBruce Richardson 	.mac_addr_remove         = virtio_mac_addr_remove,
6486c3169a3SBruce Richardson 	.mac_addr_set            = virtio_mac_addr_set,
64964ac7e08SMiao Li 	.get_monitor_addr        = virtio_get_monitor_addr,
650426858d6SChengwen Feng 	.eth_dev_priv_dump       = virtio_dev_priv_dump,
6516c3169a3SBruce Richardson };
6526c3169a3SBruce Richardson 
6531c8489daSTiwei Bie /*
6541c8489daSTiwei Bie  * dev_ops for virtio-user in secondary processes, as we just have
6551c8489daSTiwei Bie  * some limited supports currently.
6561c8489daSTiwei Bie  */
6571c8489daSTiwei Bie const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = {
6581c8489daSTiwei Bie 	.dev_infos_get           = virtio_dev_info_get,
6591c8489daSTiwei Bie 	.stats_get               = virtio_dev_stats_get,
6601c8489daSTiwei Bie 	.xstats_get              = virtio_dev_xstats_get,
6611c8489daSTiwei Bie 	.xstats_get_names        = virtio_dev_xstats_get_names,
6621c8489daSTiwei Bie 	.stats_reset             = virtio_dev_stats_reset,
6631c8489daSTiwei Bie 	.xstats_reset            = virtio_dev_stats_reset,
6641c8489daSTiwei Bie 	/* collect stats per queue */
6651c8489daSTiwei Bie 	.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
6661c8489daSTiwei Bie };
6671c8489daSTiwei Bie 
6686c3169a3SBruce Richardson static void
virtio_update_stats(struct rte_eth_dev * dev,struct rte_eth_stats * stats)66976d4c652SHarry van Haaren virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
6706c3169a3SBruce Richardson {
6716c3169a3SBruce Richardson 	unsigned i;
6726c3169a3SBruce Richardson 
6736c3169a3SBruce Richardson 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
67401ad44fdSHuawei Xie 		const struct virtnet_tx *txvq = dev->data->tx_queues[i];
6756c3169a3SBruce Richardson 		if (txvq == NULL)
6766c3169a3SBruce Richardson 			continue;
6776c3169a3SBruce Richardson 
67801ad44fdSHuawei Xie 		stats->opackets += txvq->stats.packets;
67901ad44fdSHuawei Xie 		stats->obytes += txvq->stats.bytes;
6806c3169a3SBruce Richardson 
6816c3169a3SBruce Richardson 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
68201ad44fdSHuawei Xie 			stats->q_opackets[i] = txvq->stats.packets;
68301ad44fdSHuawei Xie 			stats->q_obytes[i] = txvq->stats.bytes;
6846c3169a3SBruce Richardson 		}
6856c3169a3SBruce Richardson 	}
6866c3169a3SBruce Richardson 
6876c3169a3SBruce Richardson 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
68801ad44fdSHuawei Xie 		const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
6896c3169a3SBruce Richardson 		if (rxvq == NULL)
6906c3169a3SBruce Richardson 			continue;
6916c3169a3SBruce Richardson 
69201ad44fdSHuawei Xie 		stats->ipackets += rxvq->stats.packets;
69301ad44fdSHuawei Xie 		stats->ibytes += rxvq->stats.bytes;
69401ad44fdSHuawei Xie 		stats->ierrors += rxvq->stats.errors;
6956c3169a3SBruce Richardson 
6966c3169a3SBruce Richardson 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
69701ad44fdSHuawei Xie 			stats->q_ipackets[i] = rxvq->stats.packets;
69801ad44fdSHuawei Xie 			stats->q_ibytes[i] = rxvq->stats.bytes;
6996c3169a3SBruce Richardson 		}
7006c3169a3SBruce Richardson 	}
7016c3169a3SBruce Richardson 
7026c3169a3SBruce Richardson 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
7036c3169a3SBruce Richardson }
7046c3169a3SBruce Richardson 
virtio_dev_xstats_get_names(struct rte_eth_dev * dev,struct rte_eth_xstat_name * xstats_names,__rte_unused unsigned limit)705baf91c39SRemy Horton static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
706baf91c39SRemy Horton 				       struct rte_eth_xstat_name *xstats_names,
707baf91c39SRemy Horton 				       __rte_unused unsigned limit)
708baf91c39SRemy Horton {
709baf91c39SRemy Horton 	unsigned i;
710baf91c39SRemy Horton 	unsigned count = 0;
711baf91c39SRemy Horton 	unsigned t;
712baf91c39SRemy Horton 
71301ad44fdSHuawei Xie 	unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
71401ad44fdSHuawei Xie 		dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
715baf91c39SRemy Horton 
7167e1eb993SYuanhan Liu 	if (xstats_names != NULL) {
717baf91c39SRemy Horton 		/* Note: limit checked in rte_eth_xstats_names() */
718baf91c39SRemy Horton 
719baf91c39SRemy Horton 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
72043ec842cSDidier Pallard 			struct virtnet_rx *rxvq = dev->data->rx_queues[i];
721baf91c39SRemy Horton 			if (rxvq == NULL)
722baf91c39SRemy Horton 				continue;
72301ad44fdSHuawei Xie 			for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
724baf91c39SRemy Horton 				snprintf(xstats_names[count].name,
725baf91c39SRemy Horton 					sizeof(xstats_names[count].name),
726baf91c39SRemy Horton 					"rx_q%u_%s", i,
72701ad44fdSHuawei Xie 					rte_virtio_rxq_stat_strings[t].name);
728baf91c39SRemy Horton 				count++;
729baf91c39SRemy Horton 			}
730baf91c39SRemy Horton 		}
731baf91c39SRemy Horton 
732baf91c39SRemy Horton 		for (i = 0; i < dev->data->nb_tx_queues; i++) {
73343ec842cSDidier Pallard 			struct virtnet_tx *txvq = dev->data->tx_queues[i];
734baf91c39SRemy Horton 			if (txvq == NULL)
735baf91c39SRemy Horton 				continue;
73601ad44fdSHuawei Xie 			for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
737baf91c39SRemy Horton 				snprintf(xstats_names[count].name,
738baf91c39SRemy Horton 					sizeof(xstats_names[count].name),
739baf91c39SRemy Horton 					"tx_q%u_%s", i,
74001ad44fdSHuawei Xie 					rte_virtio_txq_stat_strings[t].name);
741baf91c39SRemy Horton 				count++;
742baf91c39SRemy Horton 			}
743baf91c39SRemy Horton 		}
744baf91c39SRemy Horton 		return count;
745baf91c39SRemy Horton 	}
746baf91c39SRemy Horton 	return nstats;
747baf91c39SRemy Horton }
748baf91c39SRemy Horton 
74976d4c652SHarry van Haaren static int
virtio_dev_xstats_get(struct rte_eth_dev * dev,struct rte_eth_xstat * xstats,unsigned n)750e2aae1c1SRemy Horton virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
75176d4c652SHarry van Haaren 		      unsigned n)
75276d4c652SHarry van Haaren {
75376d4c652SHarry van Haaren 	unsigned i;
75476d4c652SHarry van Haaren 	unsigned count = 0;
75576d4c652SHarry van Haaren 
75601ad44fdSHuawei Xie 	unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
75701ad44fdSHuawei Xie 		dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
75876d4c652SHarry van Haaren 
75976d4c652SHarry van Haaren 	if (n < nstats)
76076d4c652SHarry van Haaren 		return nstats;
76176d4c652SHarry van Haaren 
76276d4c652SHarry van Haaren 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
76301ad44fdSHuawei Xie 		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
76476d4c652SHarry van Haaren 
76576d4c652SHarry van Haaren 		if (rxvq == NULL)
76676d4c652SHarry van Haaren 			continue;
76776d4c652SHarry van Haaren 
76876d4c652SHarry van Haaren 		unsigned t;
76976d4c652SHarry van Haaren 
77001ad44fdSHuawei Xie 		for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
77176d4c652SHarry van Haaren 			xstats[count].value = *(uint64_t *)(((char *)rxvq) +
77201ad44fdSHuawei Xie 				rte_virtio_rxq_stat_strings[t].offset);
773513c78aeSOlivier Matz 			xstats[count].id = count;
77476d4c652SHarry van Haaren 			count++;
77576d4c652SHarry van Haaren 		}
77676d4c652SHarry van Haaren 	}
77776d4c652SHarry van Haaren 
77876d4c652SHarry van Haaren 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
77901ad44fdSHuawei Xie 		struct virtnet_tx *txvq = dev->data->tx_queues[i];
78076d4c652SHarry van Haaren 
78176d4c652SHarry van Haaren 		if (txvq == NULL)
78276d4c652SHarry van Haaren 			continue;
78376d4c652SHarry van Haaren 
78476d4c652SHarry van Haaren 		unsigned t;
78576d4c652SHarry van Haaren 
78601ad44fdSHuawei Xie 		for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
78776d4c652SHarry van Haaren 			xstats[count].value = *(uint64_t *)(((char *)txvq) +
78801ad44fdSHuawei Xie 				rte_virtio_txq_stat_strings[t].offset);
789513c78aeSOlivier Matz 			xstats[count].id = count;
79076d4c652SHarry van Haaren 			count++;
79176d4c652SHarry van Haaren 		}
79276d4c652SHarry van Haaren 	}
79376d4c652SHarry van Haaren 
79476d4c652SHarry van Haaren 	return count;
79576d4c652SHarry van Haaren }
79676d4c652SHarry van Haaren 
797d5b0924bSMatan Azrad static int
virtio_dev_stats_get(struct rte_eth_dev * dev,struct rte_eth_stats * stats)79876d4c652SHarry van Haaren virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
79976d4c652SHarry van Haaren {
80076d4c652SHarry van Haaren 	virtio_update_stats(dev, stats);
801d5b0924bSMatan Azrad 
802d5b0924bSMatan Azrad 	return 0;
80376d4c652SHarry van Haaren }
80476d4c652SHarry van Haaren 
8059970a9adSIgor Romanov static int
virtio_dev_stats_reset(struct rte_eth_dev * dev)8066c3169a3SBruce Richardson virtio_dev_stats_reset(struct rte_eth_dev *dev)
8076c3169a3SBruce Richardson {
8086c3169a3SBruce Richardson 	unsigned int i;
8096c3169a3SBruce Richardson 
8106c3169a3SBruce Richardson 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
81101ad44fdSHuawei Xie 		struct virtnet_tx *txvq = dev->data->tx_queues[i];
8126c3169a3SBruce Richardson 		if (txvq == NULL)
8136c3169a3SBruce Richardson 			continue;
8146c3169a3SBruce Richardson 
81501ad44fdSHuawei Xie 		txvq->stats.packets = 0;
81601ad44fdSHuawei Xie 		txvq->stats.bytes = 0;
81701ad44fdSHuawei Xie 		txvq->stats.multicast = 0;
81801ad44fdSHuawei Xie 		txvq->stats.broadcast = 0;
81901ad44fdSHuawei Xie 		memset(txvq->stats.size_bins, 0,
82001ad44fdSHuawei Xie 		       sizeof(txvq->stats.size_bins[0]) * 8);
8216c3169a3SBruce Richardson 	}
8226c3169a3SBruce Richardson 
8236c3169a3SBruce Richardson 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
82401ad44fdSHuawei Xie 		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
8256c3169a3SBruce Richardson 		if (rxvq == NULL)
8266c3169a3SBruce Richardson 			continue;
8276c3169a3SBruce Richardson 
82801ad44fdSHuawei Xie 		rxvq->stats.packets = 0;
82901ad44fdSHuawei Xie 		rxvq->stats.bytes = 0;
83001ad44fdSHuawei Xie 		rxvq->stats.errors = 0;
83101ad44fdSHuawei Xie 		rxvq->stats.multicast = 0;
83201ad44fdSHuawei Xie 		rxvq->stats.broadcast = 0;
83301ad44fdSHuawei Xie 		memset(rxvq->stats.size_bins, 0,
83401ad44fdSHuawei Xie 		       sizeof(rxvq->stats.size_bins[0]) * 8);
8356c3169a3SBruce Richardson 	}
8369970a9adSIgor Romanov 
8379970a9adSIgor Romanov 	return 0;
8386c3169a3SBruce Richardson }
8396c3169a3SBruce Richardson 
8406c3169a3SBruce Richardson static void
virtio_set_hwaddr(struct virtio_hw * hw)8416c3169a3SBruce Richardson virtio_set_hwaddr(struct virtio_hw *hw)
8426c3169a3SBruce Richardson {
8439328e105SMaxime Coquelin 	virtio_write_dev_config(hw,
8446c3169a3SBruce Richardson 			offsetof(struct virtio_net_config, mac),
84535b2d13fSOlivier Matz 			&hw->mac_addr, RTE_ETHER_ADDR_LEN);
8466c3169a3SBruce Richardson }
8476c3169a3SBruce Richardson 
8486c3169a3SBruce Richardson static void
virtio_get_hwaddr(struct virtio_hw * hw)8496c3169a3SBruce Richardson virtio_get_hwaddr(struct virtio_hw *hw)
8506c3169a3SBruce Richardson {
851b4f9a45aSMaxime Coquelin 	if (virtio_with_feature(hw, VIRTIO_NET_F_MAC)) {
8529328e105SMaxime Coquelin 		virtio_read_dev_config(hw,
8536c3169a3SBruce Richardson 			offsetof(struct virtio_net_config, mac),
85435b2d13fSOlivier Matz 			&hw->mac_addr, RTE_ETHER_ADDR_LEN);
8556c3169a3SBruce Richardson 	} else {
856538da7a1SOlivier Matz 		rte_eth_random_addr(&hw->mac_addr[0]);
8576c3169a3SBruce Richardson 		virtio_set_hwaddr(hw);
8586c3169a3SBruce Richardson 	}
8596c3169a3SBruce Richardson }
8606c3169a3SBruce Richardson 
8616d01e580SWei Dai static int
virtio_mac_table_set(struct virtio_hw * hw,const struct virtio_net_ctrl_mac * uc,const struct virtio_net_ctrl_mac * mc)8626c3169a3SBruce Richardson virtio_mac_table_set(struct virtio_hw *hw,
8636c3169a3SBruce Richardson 		     const struct virtio_net_ctrl_mac *uc,
8646c3169a3SBruce Richardson 		     const struct virtio_net_ctrl_mac *mc)
8656c3169a3SBruce Richardson {
8666c3169a3SBruce Richardson 	struct virtio_pmd_ctrl ctrl;
8676c3169a3SBruce Richardson 	int err, len[2];
8686c3169a3SBruce Richardson 
869b4f9a45aSMaxime Coquelin 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
870e9083127SJianfeng Tan 		PMD_DRV_LOG(INFO, "host does not support mac table");
8716d01e580SWei Dai 		return -1;
87227046236SStephen Hemminger 	}
87327046236SStephen Hemminger 
8746c3169a3SBruce Richardson 	ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
8756c3169a3SBruce Richardson 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
8766c3169a3SBruce Richardson 
87735b2d13fSOlivier Matz 	len[0] = uc->entries * RTE_ETHER_ADDR_LEN + sizeof(uc->entries);
8786c3169a3SBruce Richardson 	memcpy(ctrl.data, uc, len[0]);
8796c3169a3SBruce Richardson 
88035b2d13fSOlivier Matz 	len[1] = mc->entries * RTE_ETHER_ADDR_LEN + sizeof(mc->entries);
8816c3169a3SBruce Richardson 	memcpy(ctrl.data + len[0], mc, len[1]);
8826c3169a3SBruce Richardson 
8836c3169a3SBruce Richardson 	err = virtio_send_command(hw->cvq, &ctrl, len, 2);
8846c3169a3SBruce Richardson 	if (err != 0)
8856c3169a3SBruce Richardson 		PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
8866d01e580SWei Dai 	return err;
8876c3169a3SBruce Richardson }
8886c3169a3SBruce Richardson 
8896d01e580SWei Dai static int
virtio_mac_addr_add(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr,uint32_t index,uint32_t vmdq __rte_unused)8906d13ea8eSOlivier Matz virtio_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
8916c3169a3SBruce Richardson 		    uint32_t index, uint32_t vmdq __rte_unused)
8926c3169a3SBruce Richardson {
8936c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
8946d13ea8eSOlivier Matz 	const struct rte_ether_addr *addrs = dev->data->mac_addrs;
8956c3169a3SBruce Richardson 	unsigned int i;
8966c3169a3SBruce Richardson 	struct virtio_net_ctrl_mac *uc, *mc;
8976c3169a3SBruce Richardson 
8986c3169a3SBruce Richardson 	if (index >= VIRTIO_MAX_MAC_ADDRS) {
8996c3169a3SBruce Richardson 		PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
9006d01e580SWei Dai 		return -EINVAL;
9016c3169a3SBruce Richardson 	}
9026c3169a3SBruce Richardson 
90335b2d13fSOlivier Matz 	uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
90435b2d13fSOlivier Matz 		sizeof(uc->entries));
9056c3169a3SBruce Richardson 	uc->entries = 0;
90635b2d13fSOlivier Matz 	mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
90735b2d13fSOlivier Matz 		sizeof(mc->entries));
9086c3169a3SBruce Richardson 	mc->entries = 0;
9096c3169a3SBruce Richardson 
9106c3169a3SBruce Richardson 	for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
9116d13ea8eSOlivier Matz 		const struct rte_ether_addr *addr
9126c3169a3SBruce Richardson 			= (i == index) ? mac_addr : addrs + i;
9136c3169a3SBruce Richardson 		struct virtio_net_ctrl_mac *tbl
914538da7a1SOlivier Matz 			= rte_is_multicast_ether_addr(addr) ? mc : uc;
9156c3169a3SBruce Richardson 
916*7698e655SSatha Rao 		if (rte_is_zero_ether_addr(addr))
917*7698e655SSatha Rao 			break;
91835b2d13fSOlivier Matz 		memcpy(&tbl->macs[tbl->entries++], addr, RTE_ETHER_ADDR_LEN);
9196c3169a3SBruce Richardson 	}
9206c3169a3SBruce Richardson 
9216d01e580SWei Dai 	return virtio_mac_table_set(hw, uc, mc);
9226c3169a3SBruce Richardson }
9236c3169a3SBruce Richardson 
9246c3169a3SBruce Richardson static void
virtio_mac_addr_remove(struct rte_eth_dev * dev,uint32_t index)9256c3169a3SBruce Richardson virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
9266c3169a3SBruce Richardson {
9276c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
9286d13ea8eSOlivier Matz 	struct rte_ether_addr *addrs = dev->data->mac_addrs;
9296c3169a3SBruce Richardson 	struct virtio_net_ctrl_mac *uc, *mc;
9306c3169a3SBruce Richardson 	unsigned int i;
9316c3169a3SBruce Richardson 
9326c3169a3SBruce Richardson 	if (index >= VIRTIO_MAX_MAC_ADDRS) {
9336c3169a3SBruce Richardson 		PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
9346c3169a3SBruce Richardson 		return;
9356c3169a3SBruce Richardson 	}
9366c3169a3SBruce Richardson 
93735b2d13fSOlivier Matz 	uc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
93835b2d13fSOlivier Matz 		sizeof(uc->entries));
9396c3169a3SBruce Richardson 	uc->entries = 0;
94035b2d13fSOlivier Matz 	mc = alloca(VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN +
94135b2d13fSOlivier Matz 		sizeof(mc->entries));
9426c3169a3SBruce Richardson 	mc->entries = 0;
9436c3169a3SBruce Richardson 
9446c3169a3SBruce Richardson 	for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
9456c3169a3SBruce Richardson 		struct virtio_net_ctrl_mac *tbl;
9466c3169a3SBruce Richardson 
947538da7a1SOlivier Matz 		if (i == index || rte_is_zero_ether_addr(addrs + i))
9486c3169a3SBruce Richardson 			continue;
9496c3169a3SBruce Richardson 
950538da7a1SOlivier Matz 		tbl = rte_is_multicast_ether_addr(addrs + i) ? mc : uc;
95135b2d13fSOlivier Matz 		memcpy(&tbl->macs[tbl->entries++], addrs + i,
95235b2d13fSOlivier Matz 			RTE_ETHER_ADDR_LEN);
9536c3169a3SBruce Richardson 	}
9546c3169a3SBruce Richardson 
9556c3169a3SBruce Richardson 	virtio_mac_table_set(hw, uc, mc);
9566c3169a3SBruce Richardson }
9576c3169a3SBruce Richardson 
958caccf8b3SOlivier Matz static int
virtio_mac_addr_set(struct rte_eth_dev * dev,struct rte_ether_addr * mac_addr)9596d13ea8eSOlivier Matz virtio_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
9606c3169a3SBruce Richardson {
9616c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
9626c3169a3SBruce Richardson 
96335b2d13fSOlivier Matz 	memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
9646c3169a3SBruce Richardson 
9656c3169a3SBruce Richardson 	/* Use atomic update if available */
966b4f9a45aSMaxime Coquelin 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
9676c3169a3SBruce Richardson 		struct virtio_pmd_ctrl ctrl;
96835b2d13fSOlivier Matz 		int len = RTE_ETHER_ADDR_LEN;
9696c3169a3SBruce Richardson 
9706c3169a3SBruce Richardson 		ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
9716c3169a3SBruce Richardson 		ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
9726c3169a3SBruce Richardson 
97335b2d13fSOlivier Matz 		memcpy(ctrl.data, mac_addr, RTE_ETHER_ADDR_LEN);
974caccf8b3SOlivier Matz 		return virtio_send_command(hw->cvq, &ctrl, &len, 1);
975caccf8b3SOlivier Matz 	}
976caccf8b3SOlivier Matz 
977b4f9a45aSMaxime Coquelin 	if (!virtio_with_feature(hw, VIRTIO_NET_F_MAC))
978caccf8b3SOlivier Matz 		return -ENOTSUP;
979caccf8b3SOlivier Matz 
9806c3169a3SBruce Richardson 	virtio_set_hwaddr(hw);
981caccf8b3SOlivier Matz 	return 0;
9826c3169a3SBruce Richardson }
9836c3169a3SBruce Richardson 
98464ac7e08SMiao Li #define CLB_VAL_IDX 0
98564ac7e08SMiao Li #define CLB_MSK_IDX 1
98664ac7e08SMiao Li #define CLB_MATCH_IDX 2
98764ac7e08SMiao Li static int
virtio_monitor_callback(const uint64_t value,const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])98864ac7e08SMiao Li virtio_monitor_callback(const uint64_t value,
98964ac7e08SMiao Li 		const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
99064ac7e08SMiao Li {
99164ac7e08SMiao Li 	const uint64_t m = opaque[CLB_MSK_IDX];
99264ac7e08SMiao Li 	const uint64_t v = opaque[CLB_VAL_IDX];
99364ac7e08SMiao Li 	const uint64_t c = opaque[CLB_MATCH_IDX];
99464ac7e08SMiao Li 
99564ac7e08SMiao Li 	if (c)
99664ac7e08SMiao Li 		return (value & m) == v ? -1 : 0;
99764ac7e08SMiao Li 	else
99864ac7e08SMiao Li 		return (value & m) == v ? 0 : -1;
99964ac7e08SMiao Li }
100064ac7e08SMiao Li 
100164ac7e08SMiao Li static int
virtio_get_monitor_addr(void * rx_queue,struct rte_power_monitor_cond * pmc)100264ac7e08SMiao Li virtio_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
100364ac7e08SMiao Li {
100464ac7e08SMiao Li 	struct virtnet_rx *rxvq = rx_queue;
100564ac7e08SMiao Li 	struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
100664ac7e08SMiao Li 	struct virtio_hw *hw;
100764ac7e08SMiao Li 
100864ac7e08SMiao Li 	if (vq == NULL)
100964ac7e08SMiao Li 		return -EINVAL;
101064ac7e08SMiao Li 
101164ac7e08SMiao Li 	hw = vq->hw;
101264ac7e08SMiao Li 	if (virtio_with_packed_queue(hw)) {
101364ac7e08SMiao Li 		struct vring_packed_desc *desc;
101464ac7e08SMiao Li 		desc = vq->vq_packed.ring.desc;
101564ac7e08SMiao Li 		pmc->addr = &desc[vq->vq_used_cons_idx].flags;
101664ac7e08SMiao Li 		if (vq->vq_packed.used_wrap_counter)
101764ac7e08SMiao Li 			pmc->opaque[CLB_VAL_IDX] =
101864ac7e08SMiao Li 						VRING_PACKED_DESC_F_AVAIL_USED;
101964ac7e08SMiao Li 		else
102064ac7e08SMiao Li 			pmc->opaque[CLB_VAL_IDX] = 0;
102164ac7e08SMiao Li 		pmc->opaque[CLB_MSK_IDX] = VRING_PACKED_DESC_F_AVAIL_USED;
102264ac7e08SMiao Li 		pmc->opaque[CLB_MATCH_IDX] = 1;
102364ac7e08SMiao Li 		pmc->size = sizeof(desc[vq->vq_used_cons_idx].flags);
102464ac7e08SMiao Li 	} else {
102564ac7e08SMiao Li 		pmc->addr = &vq->vq_split.ring.used->idx;
102664ac7e08SMiao Li 		pmc->opaque[CLB_VAL_IDX] = vq->vq_used_cons_idx
102764ac7e08SMiao Li 					& (vq->vq_nentries - 1);
102864ac7e08SMiao Li 		pmc->opaque[CLB_MSK_IDX] = vq->vq_nentries - 1;
102964ac7e08SMiao Li 		pmc->opaque[CLB_MATCH_IDX] = 0;
103064ac7e08SMiao Li 		pmc->size = sizeof(vq->vq_split.ring.used->idx);
103164ac7e08SMiao Li 	}
103264ac7e08SMiao Li 	pmc->fn = virtio_monitor_callback;
103364ac7e08SMiao Li 
103464ac7e08SMiao Li 	return 0;
103564ac7e08SMiao Li }
103664ac7e08SMiao Li 
10376c3169a3SBruce Richardson static int
virtio_vlan_filter_set(struct rte_eth_dev * dev,uint16_t vlan_id,int on)10386c3169a3SBruce Richardson virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
10396c3169a3SBruce Richardson {
10406c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
10416c3169a3SBruce Richardson 	struct virtio_pmd_ctrl ctrl;
10426c3169a3SBruce Richardson 	int len;
10436c3169a3SBruce Richardson 
1044b4f9a45aSMaxime Coquelin 	if (!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
10456c3169a3SBruce Richardson 		return -ENOTSUP;
10466c3169a3SBruce Richardson 
10476c3169a3SBruce Richardson 	ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
10486c3169a3SBruce Richardson 	ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
10496c3169a3SBruce Richardson 	memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
10506c3169a3SBruce Richardson 	len = sizeof(vlan_id);
10516c3169a3SBruce Richardson 
10526c3169a3SBruce Richardson 	return virtio_send_command(hw->cvq, &ctrl, &len, 1);
10536c3169a3SBruce Richardson }
10546c3169a3SBruce Richardson 
10556ba1f63bSYuanhan Liu static int
virtio_intr_unmask(struct rte_eth_dev * dev)10566bee9d5fSNithin Dabilpuram virtio_intr_unmask(struct rte_eth_dev *dev)
10576bee9d5fSNithin Dabilpuram {
10586bee9d5fSNithin Dabilpuram 	struct virtio_hw *hw = dev->data->dev_private;
10596bee9d5fSNithin Dabilpuram 
10606bee9d5fSNithin Dabilpuram 	if (rte_intr_ack(dev->intr_handle) < 0)
10616bee9d5fSNithin Dabilpuram 		return -1;
10626bee9d5fSNithin Dabilpuram 
1063f8b60756SMaxime Coquelin 	if (VIRTIO_OPS(hw)->intr_detect)
1064f8b60756SMaxime Coquelin 		VIRTIO_OPS(hw)->intr_detect(hw);
10656bee9d5fSNithin Dabilpuram 
10666bee9d5fSNithin Dabilpuram 	return 0;
10676bee9d5fSNithin Dabilpuram }
10686bee9d5fSNithin Dabilpuram 
10696bee9d5fSNithin Dabilpuram static int
virtio_intr_enable(struct rte_eth_dev * dev)1070fe19d49cSZhiyong Yang virtio_intr_enable(struct rte_eth_dev *dev)
1071fe19d49cSZhiyong Yang {
1072fe19d49cSZhiyong Yang 	struct virtio_hw *hw = dev->data->dev_private;
1073fe19d49cSZhiyong Yang 
1074fe19d49cSZhiyong Yang 	if (rte_intr_enable(dev->intr_handle) < 0)
1075fe19d49cSZhiyong Yang 		return -1;
1076fe19d49cSZhiyong Yang 
1077f8b60756SMaxime Coquelin 	if (VIRTIO_OPS(hw)->intr_detect)
1078f8b60756SMaxime Coquelin 		VIRTIO_OPS(hw)->intr_detect(hw);
1079fe19d49cSZhiyong Yang 
1080fe19d49cSZhiyong Yang 	return 0;
1081fe19d49cSZhiyong Yang }
1082fe19d49cSZhiyong Yang 
1083fe19d49cSZhiyong Yang static int
virtio_intr_disable(struct rte_eth_dev * dev)1084fe19d49cSZhiyong Yang virtio_intr_disable(struct rte_eth_dev *dev)
1085fe19d49cSZhiyong Yang {
1086fe19d49cSZhiyong Yang 	struct virtio_hw *hw = dev->data->dev_private;
1087fe19d49cSZhiyong Yang 
1088fe19d49cSZhiyong Yang 	if (rte_intr_disable(dev->intr_handle) < 0)
1089fe19d49cSZhiyong Yang 		return -1;
1090fe19d49cSZhiyong Yang 
1091f8b60756SMaxime Coquelin 	if (VIRTIO_OPS(hw)->intr_detect)
1092f8b60756SMaxime Coquelin 		VIRTIO_OPS(hw)->intr_detect(hw);
1093fe19d49cSZhiyong Yang 
1094fe19d49cSZhiyong Yang 	return 0;
1095fe19d49cSZhiyong Yang }
1096fe19d49cSZhiyong Yang 
1097fe19d49cSZhiyong Yang static int
virtio_ethdev_negotiate_features(struct virtio_hw * hw,uint64_t req_features)1098b4f9a45aSMaxime Coquelin virtio_ethdev_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
10996c3169a3SBruce Richardson {
11003891f233SYuanhan Liu 	uint64_t host_features;
11016c3169a3SBruce Richardson 
11026c3169a3SBruce Richardson 	/* Prepare guest_features: feature that driver wants to support */
11033891f233SYuanhan Liu 	PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
110460e6f470SOlivier Matz 		req_features);
11056c3169a3SBruce Richardson 
11066c3169a3SBruce Richardson 	/* Read device(host) feature bits */
1107f8b60756SMaxime Coquelin 	host_features = VIRTIO_OPS(hw)->get_features(hw);
11083891f233SYuanhan Liu 	PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
11096c3169a3SBruce Richardson 		host_features);
11106c3169a3SBruce Richardson 
111149d26d9eSMaxime Coquelin 	/* If supported, ensure MTU value is valid before acknowledging it. */
111249d26d9eSMaxime Coquelin 	if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
111349d26d9eSMaxime Coquelin 		struct virtio_net_config config;
111449d26d9eSMaxime Coquelin 
11159328e105SMaxime Coquelin 		virtio_read_dev_config(hw,
111649d26d9eSMaxime Coquelin 			offsetof(struct virtio_net_config, mtu),
111749d26d9eSMaxime Coquelin 			&config.mtu, sizeof(config.mtu));
111849d26d9eSMaxime Coquelin 
111935b2d13fSOlivier Matz 		if (config.mtu < RTE_ETHER_MIN_MTU)
112049d26d9eSMaxime Coquelin 			req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
112149d26d9eSMaxime Coquelin 	}
112249d26d9eSMaxime Coquelin 
11236c3169a3SBruce Richardson 	/*
11246c3169a3SBruce Richardson 	 * Negotiate features: Subset of device feature bits are written back
11256c3169a3SBruce Richardson 	 * guest feature bits.
11266c3169a3SBruce Richardson 	 */
112760e6f470SOlivier Matz 	hw->guest_features = req_features;
1128b4f9a45aSMaxime Coquelin 	hw->guest_features = virtio_negotiate_features(hw, host_features);
11293891f233SYuanhan Liu 	PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
11306c3169a3SBruce Richardson 		hw->guest_features);
11316ba1f63bSYuanhan Liu 
1132f8b60756SMaxime Coquelin 	if (VIRTIO_OPS(hw)->features_ok(hw) < 0)
11336ba1f63bSYuanhan Liu 		return -1;
1134ce40b4a8SAdrian Moreno 
1135b4f9a45aSMaxime Coquelin 	if (virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
11369328e105SMaxime Coquelin 		virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1137cbb135b3SMaxime Coquelin 
11389328e105SMaxime Coquelin 		if (!(virtio_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
1139cbb135b3SMaxime Coquelin 			PMD_INIT_LOG(ERR, "Failed to set FEATURES_OK status!");
11406ba1f63bSYuanhan Liu 			return -1;
11416ba1f63bSYuanhan Liu 		}
11426ba1f63bSYuanhan Liu 	}
11436ba1f63bSYuanhan Liu 
114460e6f470SOlivier Matz 	hw->req_guest_features = req_features;
114560e6f470SOlivier Matz 
11466ba1f63bSYuanhan Liu 	return 0;
11476c3169a3SBruce Richardson }
11486c3169a3SBruce Richardson 
11497365504fSXiao Wang static void
virtio_notify_peers(struct rte_eth_dev * dev)11507365504fSXiao Wang virtio_notify_peers(struct rte_eth_dev *dev)
11517365504fSXiao Wang {
11527365504fSXiao Wang 	struct virtio_hw *hw = dev->data->dev_private;
11537c7f2e60SZhiyong Yang 	struct virtnet_rx *rxvq;
11547365504fSXiao Wang 	struct rte_mbuf *rarp_mbuf;
11557365504fSXiao Wang 
11567c7f2e60SZhiyong Yang 	if (!dev->data->rx_queues)
11577c7f2e60SZhiyong Yang 		return;
11587c7f2e60SZhiyong Yang 
11597c7f2e60SZhiyong Yang 	rxvq = dev->data->rx_queues[0];
1160f42deafaSZhiyong Yang 	if (!rxvq)
1161f42deafaSZhiyong Yang 		return;
1162f42deafaSZhiyong Yang 
11637365504fSXiao Wang 	rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
11646d13ea8eSOlivier Matz 			(struct rte_ether_addr *)hw->mac_addr);
11657365504fSXiao Wang 	if (rarp_mbuf == NULL) {
11667365504fSXiao Wang 		PMD_DRV_LOG(ERR, "failed to make RARP packet.");
11677365504fSXiao Wang 		return;
11687365504fSXiao Wang 	}
11697365504fSXiao Wang 
11703666feb3SDavid Marchand 	rte_spinlock_lock(&hw->state_lock);
11713666feb3SDavid Marchand 	if (hw->started == 0) {
11727365504fSXiao Wang 		/* If virtio port just stopped, no need to send RARP */
11737365504fSXiao Wang 		rte_pktmbuf_free(rarp_mbuf);
11743666feb3SDavid Marchand 		goto out;
11757365504fSXiao Wang 	}
11763666feb3SDavid Marchand 	hw->started = 0;
11777365504fSXiao Wang 
11783666feb3SDavid Marchand 	/*
11793666feb3SDavid Marchand 	 * Prevent the worker threads from touching queues to avoid contention,
11803666feb3SDavid Marchand 	 * 1 ms should be enough for the ongoing Tx function to finish.
11813666feb3SDavid Marchand 	 */
11823666feb3SDavid Marchand 	rte_delay_ms(1);
11833666feb3SDavid Marchand 
11843666feb3SDavid Marchand 	hw->inject_pkts = &rarp_mbuf;
11853666feb3SDavid Marchand 	dev->tx_pkt_burst(dev->data->tx_queues[0], &rarp_mbuf, 1);
11863666feb3SDavid Marchand 	hw->inject_pkts = NULL;
11873666feb3SDavid Marchand 
11883666feb3SDavid Marchand 	hw->started = 1;
11893666feb3SDavid Marchand 
11903666feb3SDavid Marchand out:
11913666feb3SDavid Marchand 	rte_spinlock_unlock(&hw->state_lock);
11927365504fSXiao Wang }
11937365504fSXiao Wang 
11947365504fSXiao Wang static void
virtio_ack_link_announce(struct rte_eth_dev * dev)11957365504fSXiao Wang virtio_ack_link_announce(struct rte_eth_dev *dev)
11967365504fSXiao Wang {
11977365504fSXiao Wang 	struct virtio_hw *hw = dev->data->dev_private;
11987365504fSXiao Wang 	struct virtio_pmd_ctrl ctrl;
11997365504fSXiao Wang 
12007365504fSXiao Wang 	ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE;
12017365504fSXiao Wang 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK;
12027365504fSXiao Wang 
12037365504fSXiao Wang 	virtio_send_command(hw->cvq, &ctrl, NULL, 0);
12047365504fSXiao Wang }
12057365504fSXiao Wang 
12066c3169a3SBruce Richardson /*
12077365504fSXiao Wang  * Process virtio config changed interrupt. Call the callback
12087365504fSXiao Wang  * if link state changed, generate gratuitous RARP packet if
12097365504fSXiao Wang  * the status indicates an ANNOUNCE.
12106c3169a3SBruce Richardson  */
1211ef53b603SJianfeng Tan void
virtio_interrupt_handler(void * param)1212c23a1a30SQi Zhang virtio_interrupt_handler(void *param)
12136c3169a3SBruce Richardson {
12146c3169a3SBruce Richardson 	struct rte_eth_dev *dev = param;
12156c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
12166c3169a3SBruce Richardson 	uint8_t isr;
12175db1684eSTiwei Bie 	uint16_t status;
12186c3169a3SBruce Richardson 
12196c3169a3SBruce Richardson 	/* Read interrupt status which clears interrupt */
12206a504290SMaxime Coquelin 	isr = virtio_get_isr(hw);
12216c3169a3SBruce Richardson 	PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
12226c3169a3SBruce Richardson 
12236bee9d5fSNithin Dabilpuram 	if (virtio_intr_unmask(dev) < 0)
12246c3169a3SBruce Richardson 		PMD_DRV_LOG(ERR, "interrupt enable failed");
12256c3169a3SBruce Richardson 
12266a504290SMaxime Coquelin 	if (isr & VIRTIO_ISR_CONFIG) {
12276c3169a3SBruce Richardson 		if (virtio_dev_link_update(dev, 0) == 0)
12285723fbedSFerruh Yigit 			rte_eth_dev_callback_process(dev,
1229d6af1a13SBernard Iremonger 						     RTE_ETH_EVENT_INTR_LSC,
1230cebe3d7bSThomas Monjalon 						     NULL);
12316c3169a3SBruce Richardson 
1232b4f9a45aSMaxime Coquelin 		if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
12339328e105SMaxime Coquelin 			virtio_read_dev_config(hw,
12345db1684eSTiwei Bie 				offsetof(struct virtio_net_config, status),
12355db1684eSTiwei Bie 				&status, sizeof(status));
12365db1684eSTiwei Bie 			if (status & VIRTIO_NET_S_ANNOUNCE) {
12377365504fSXiao Wang 				virtio_notify_peers(dev);
1238ac860c86SZhiyong Yang 				if (hw->cvq)
12397365504fSXiao Wang 					virtio_ack_link_announce(dev);
12407365504fSXiao Wang 			}
12416c3169a3SBruce Richardson 		}
12425db1684eSTiwei Bie 	}
12435db1684eSTiwei Bie }
12446c3169a3SBruce Richardson 
12454819eae8SOlivier Matz /* set rx and tx handlers according to what is supported */
12466c3169a3SBruce Richardson static void
set_rxtx_funcs(struct rte_eth_dev * eth_dev)12474819eae8SOlivier Matz set_rxtx_funcs(struct rte_eth_dev *eth_dev)
12486c3169a3SBruce Richardson {
12496c3169a3SBruce Richardson 	struct virtio_hw *hw = eth_dev->data->dev_private;
12504819eae8SOlivier Matz 
125100a5ea02SDilshod Urazov 	eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
1252b4f9a45aSMaxime Coquelin 	if (virtio_with_packed_queue(hw)) {
1253892dc798SJens Freimann 		PMD_INIT_LOG(INFO,
12545c75a8efSTiwei Bie 			"virtio: using packed ring %s Tx path on port %u",
1255ccb10995SMarvin Liu 			hw->use_vec_tx ? "vectorized" : "standard",
1256892dc798SJens Freimann 			eth_dev->data->port_id);
1257ccb10995SMarvin Liu 		if (hw->use_vec_tx)
1258ccb10995SMarvin Liu 			eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed_vec;
1259ccb10995SMarvin Liu 		else
1260892dc798SJens Freimann 			eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
1261892dc798SJens Freimann 	} else {
1262892dc798SJens Freimann 		if (hw->use_inorder_tx) {
1263892dc798SJens Freimann 			PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
1264892dc798SJens Freimann 				eth_dev->data->port_id);
1265892dc798SJens Freimann 			eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
1266892dc798SJens Freimann 		} else {
1267892dc798SJens Freimann 			PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
1268892dc798SJens Freimann 				eth_dev->data->port_id);
1269892dc798SJens Freimann 			eth_dev->tx_pkt_burst = virtio_xmit_pkts;
1270892dc798SJens Freimann 		}
1271892dc798SJens Freimann 	}
1272892dc798SJens Freimann 
1273b4f9a45aSMaxime Coquelin 	if (virtio_with_packed_queue(hw)) {
1274ccb10995SMarvin Liu 		if (hw->use_vec_rx) {
1275ccb10995SMarvin Liu 			PMD_INIT_LOG(INFO,
1276ccb10995SMarvin Liu 				"virtio: using packed ring vectorized Rx path on port %u",
1277ccb10995SMarvin Liu 				eth_dev->data->port_id);
1278ccb10995SMarvin Liu 			eth_dev->rx_pkt_burst =
1279ccb10995SMarvin Liu 				&virtio_recv_pkts_packed_vec;
1280b4f9a45aSMaxime Coquelin 		} else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1281a76290c8SJens Freimann 			PMD_INIT_LOG(INFO,
1282a76290c8SJens Freimann 				"virtio: using packed ring mergeable buffer Rx path on port %u",
1283a76290c8SJens Freimann 				eth_dev->data->port_id);
1284a76290c8SJens Freimann 			eth_dev->rx_pkt_burst =
1285a76290c8SJens Freimann 				&virtio_recv_mergeable_pkts_packed;
1286a76290c8SJens Freimann 		} else {
1287a76290c8SJens Freimann 			PMD_INIT_LOG(INFO,
1288a76290c8SJens Freimann 				"virtio: using packed ring standard Rx path on port %u",
1289a76290c8SJens Freimann 				eth_dev->data->port_id);
1290a76290c8SJens Freimann 			eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
1291a76290c8SJens Freimann 		}
1292a76290c8SJens Freimann 	} else {
12934710e16aSMarvin Liu 		if (hw->use_vec_rx) {
12944710e16aSMarvin Liu 			PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u",
12954819eae8SOlivier Matz 				eth_dev->data->port_id);
12964819eae8SOlivier Matz 			eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
12978f3bd7e8SMarvin Liu 		} else if (hw->use_inorder_rx) {
12988f3bd7e8SMarvin Liu 			PMD_INIT_LOG(INFO,
1299efcda136SMaxime Coquelin 				"virtio: using inorder Rx path on port %u",
13008f3bd7e8SMarvin Liu 				eth_dev->data->port_id);
1301efcda136SMaxime Coquelin 			eth_dev->rx_pkt_burst =	&virtio_recv_pkts_inorder;
1302b4f9a45aSMaxime Coquelin 		} else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
13034819eae8SOlivier Matz 			PMD_INIT_LOG(INFO,
13044819eae8SOlivier Matz 				"virtio: using mergeable buffer Rx path on port %u",
13054819eae8SOlivier Matz 				eth_dev->data->port_id);
13066c3169a3SBruce Richardson 			eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
13074819eae8SOlivier Matz 		} else {
13084819eae8SOlivier Matz 			PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
13094819eae8SOlivier Matz 				eth_dev->data->port_id);
13106c3169a3SBruce Richardson 			eth_dev->rx_pkt_burst = &virtio_recv_pkts;
13116c3169a3SBruce Richardson 		}
1312a76290c8SJens Freimann 	}
13136c3169a3SBruce Richardson 
13144819eae8SOlivier Matz }
13154819eae8SOlivier Matz 
131626b683b4SJianfeng Tan /* Only support 1:1 queue/interrupt mapping so far.
131726b683b4SJianfeng Tan  * TODO: support n:1 queue/interrupt mapping when there are limited number of
131826b683b4SJianfeng Tan  * interrupt vectors (<N+1).
131926b683b4SJianfeng Tan  */
132026b683b4SJianfeng Tan static int
virtio_queues_bind_intr(struct rte_eth_dev * dev)132126b683b4SJianfeng Tan virtio_queues_bind_intr(struct rte_eth_dev *dev)
132226b683b4SJianfeng Tan {
132326b683b4SJianfeng Tan 	uint32_t i;
132426b683b4SJianfeng Tan 	struct virtio_hw *hw = dev->data->dev_private;
132526b683b4SJianfeng Tan 
1326f2462150SFerruh Yigit 	PMD_INIT_LOG(INFO, "queue/interrupt binding");
132726b683b4SJianfeng Tan 	for (i = 0; i < dev->data->nb_rx_queues; ++i) {
1328d61138d4SHarman Kalra 		if (rte_intr_vec_list_index_set(dev->intr_handle, i,
1329d61138d4SHarman Kalra 						       i + 1))
1330d61138d4SHarman Kalra 			return -rte_errno;
1331f8b60756SMaxime Coquelin 		if (VIRTIO_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
133226b683b4SJianfeng Tan 						 VIRTIO_MSI_NO_VECTOR) {
133326b683b4SJianfeng Tan 			PMD_DRV_LOG(ERR, "failed to set queue vector");
133426b683b4SJianfeng Tan 			return -EBUSY;
133526b683b4SJianfeng Tan 		}
133626b683b4SJianfeng Tan 	}
133726b683b4SJianfeng Tan 
133826b683b4SJianfeng Tan 	return 0;
133926b683b4SJianfeng Tan }
134026b683b4SJianfeng Tan 
13419ebdeefeSJianfeng Tan static void
virtio_queues_unbind_intr(struct rte_eth_dev * dev)13429ebdeefeSJianfeng Tan virtio_queues_unbind_intr(struct rte_eth_dev *dev)
13439ebdeefeSJianfeng Tan {
13449ebdeefeSJianfeng Tan 	uint32_t i;
13459ebdeefeSJianfeng Tan 	struct virtio_hw *hw = dev->data->dev_private;
13469ebdeefeSJianfeng Tan 
1347f2462150SFerruh Yigit 	PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
13489ebdeefeSJianfeng Tan 	for (i = 0; i < dev->data->nb_rx_queues; ++i)
1349f8b60756SMaxime Coquelin 		VIRTIO_OPS(hw)->set_queue_irq(hw,
13509ebdeefeSJianfeng Tan 					     hw->vqs[i * VTNET_CQ],
13519ebdeefeSJianfeng Tan 					     VIRTIO_MSI_NO_VECTOR);
13529ebdeefeSJianfeng Tan }
13539ebdeefeSJianfeng Tan 
135426b683b4SJianfeng Tan static int
virtio_configure_intr(struct rte_eth_dev * dev)135526b683b4SJianfeng Tan virtio_configure_intr(struct rte_eth_dev *dev)
135626b683b4SJianfeng Tan {
135726b683b4SJianfeng Tan 	struct virtio_hw *hw = dev->data->dev_private;
13585a475f73SBoleslav Stankevich 	int ret;
135926b683b4SJianfeng Tan 
136026b683b4SJianfeng Tan 	if (!rte_intr_cap_multiple(dev->intr_handle)) {
136126b683b4SJianfeng Tan 		PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
136226b683b4SJianfeng Tan 		return -ENOTSUP;
136326b683b4SJianfeng Tan 	}
136426b683b4SJianfeng Tan 
136555e19d06SBoleslav Stankevich 	ret = rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues);
136655e19d06SBoleslav Stankevich 	if (ret < 0) {
136726b683b4SJianfeng Tan 		PMD_INIT_LOG(ERR, "Fail to create eventfd");
136855e19d06SBoleslav Stankevich 		return ret;
136926b683b4SJianfeng Tan 	}
137026b683b4SJianfeng Tan 
13715a475f73SBoleslav Stankevich 	ret = rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",
13725a475f73SBoleslav Stankevich 				      hw->max_queue_pairs);
13735a475f73SBoleslav Stankevich 	if (ret < 0) {
137426b683b4SJianfeng Tan 		PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
137526b683b4SJianfeng Tan 			     hw->max_queue_pairs);
13765a475f73SBoleslav Stankevich 		return ret;
137726b683b4SJianfeng Tan 	}
137826b683b4SJianfeng Tan 
13795be2325eSDavid Marchand 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
138026b683b4SJianfeng Tan 		/* Re-register callback to update max_intr */
138126b683b4SJianfeng Tan 		rte_intr_callback_unregister(dev->intr_handle,
138226b683b4SJianfeng Tan 					     virtio_interrupt_handler,
138326b683b4SJianfeng Tan 					     dev);
138426b683b4SJianfeng Tan 		rte_intr_callback_register(dev->intr_handle,
138526b683b4SJianfeng Tan 					   virtio_interrupt_handler,
138626b683b4SJianfeng Tan 					   dev);
13875be2325eSDavid Marchand 	}
138826b683b4SJianfeng Tan 
138926b683b4SJianfeng Tan 	/* DO NOT try to remove this! This function will enable msix, or QEMU
139026b683b4SJianfeng Tan 	 * will encounter SIGSEGV when DRIVER_OK is sent.
139126b683b4SJianfeng Tan 	 * And for legacy devices, this should be done before queue/vec binding
139226b683b4SJianfeng Tan 	 * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
139326b683b4SJianfeng Tan 	 * (22) will be ignored.
139426b683b4SJianfeng Tan 	 */
1395fe19d49cSZhiyong Yang 	if (virtio_intr_enable(dev) < 0) {
139626b683b4SJianfeng Tan 		PMD_DRV_LOG(ERR, "interrupt enable failed");
139755e19d06SBoleslav Stankevich 		return -EINVAL;
139826b683b4SJianfeng Tan 	}
139926b683b4SJianfeng Tan 
140055e19d06SBoleslav Stankevich 	ret = virtio_queues_bind_intr(dev);
140155e19d06SBoleslav Stankevich 	if (ret < 0) {
140226b683b4SJianfeng Tan 		PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
140355e19d06SBoleslav Stankevich 		return ret;
140426b683b4SJianfeng Tan 	}
140526b683b4SJianfeng Tan 
140626b683b4SJianfeng Tan 	return 0;
140726b683b4SJianfeng Tan }
14080c9d6620SMaxime Coquelin 
14093c3c54cfSIvan Ilchenko static void
virtio_get_speed_duplex(struct rte_eth_dev * eth_dev,struct rte_eth_link * link)14103c3c54cfSIvan Ilchenko virtio_get_speed_duplex(struct rte_eth_dev *eth_dev,
14113c3c54cfSIvan Ilchenko 			struct rte_eth_link *link)
14123c3c54cfSIvan Ilchenko {
14133c3c54cfSIvan Ilchenko 	struct virtio_hw *hw = eth_dev->data->dev_private;
14143c3c54cfSIvan Ilchenko 	struct virtio_net_config *config;
14153c3c54cfSIvan Ilchenko 	struct virtio_net_config local_config;
14163c3c54cfSIvan Ilchenko 
14173c3c54cfSIvan Ilchenko 	config = &local_config;
14183c3c54cfSIvan Ilchenko 	virtio_read_dev_config(hw,
14193c3c54cfSIvan Ilchenko 		offsetof(struct virtio_net_config, speed),
14203c3c54cfSIvan Ilchenko 		&config->speed, sizeof(config->speed));
14213c3c54cfSIvan Ilchenko 	virtio_read_dev_config(hw,
14223c3c54cfSIvan Ilchenko 		offsetof(struct virtio_net_config, duplex),
14233c3c54cfSIvan Ilchenko 		&config->duplex, sizeof(config->duplex));
14243c3c54cfSIvan Ilchenko 	hw->speed = config->speed;
14253c3c54cfSIvan Ilchenko 	hw->duplex = config->duplex;
14263c3c54cfSIvan Ilchenko 	if (link != NULL) {
14273c3c54cfSIvan Ilchenko 		link->link_duplex = hw->duplex;
14283c3c54cfSIvan Ilchenko 		link->link_speed  = hw->speed;
14293c3c54cfSIvan Ilchenko 	}
14303c3c54cfSIvan Ilchenko 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
14313c3c54cfSIvan Ilchenko 		     hw->speed, hw->duplex);
14323c3c54cfSIvan Ilchenko }
14333c3c54cfSIvan Ilchenko 
14340c9d6620SMaxime Coquelin static uint64_t
ethdev_to_virtio_rss_offloads(uint64_t ethdev_hash_types)14350c9d6620SMaxime Coquelin ethdev_to_virtio_rss_offloads(uint64_t ethdev_hash_types)
14360c9d6620SMaxime Coquelin {
14370c9d6620SMaxime Coquelin 	uint64_t virtio_hash_types = 0;
14380c9d6620SMaxime Coquelin 
14390c9d6620SMaxime Coquelin 	if (ethdev_hash_types & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
14400c9d6620SMaxime Coquelin 				RTE_ETH_RSS_NONFRAG_IPV4_OTHER))
14410c9d6620SMaxime Coquelin 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IPV4;
14420c9d6620SMaxime Coquelin 
14430c9d6620SMaxime Coquelin 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
14440c9d6620SMaxime Coquelin 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCPV4;
14450c9d6620SMaxime Coquelin 
14460c9d6620SMaxime Coquelin 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
14470c9d6620SMaxime Coquelin 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDPV4;
14480c9d6620SMaxime Coquelin 
14490c9d6620SMaxime Coquelin 	if (ethdev_hash_types & (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
14500c9d6620SMaxime Coquelin 				RTE_ETH_RSS_NONFRAG_IPV6_OTHER))
14510c9d6620SMaxime Coquelin 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IPV6;
14520c9d6620SMaxime Coquelin 
14530c9d6620SMaxime Coquelin 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
14540c9d6620SMaxime Coquelin 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCPV6;
14550c9d6620SMaxime Coquelin 
14560c9d6620SMaxime Coquelin 	if (ethdev_hash_types & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
14570c9d6620SMaxime Coquelin 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDPV6;
14580c9d6620SMaxime Coquelin 
14590c9d6620SMaxime Coquelin 	if (ethdev_hash_types & RTE_ETH_RSS_IPV6_EX)
14600c9d6620SMaxime Coquelin 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_IP_EX;
14610c9d6620SMaxime Coquelin 
14620c9d6620SMaxime Coquelin 	if (ethdev_hash_types & RTE_ETH_RSS_IPV6_TCP_EX)
14630c9d6620SMaxime Coquelin 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_TCP_EX;
14640c9d6620SMaxime Coquelin 
14650c9d6620SMaxime Coquelin 	if (ethdev_hash_types & RTE_ETH_RSS_IPV6_UDP_EX)
14660c9d6620SMaxime Coquelin 		virtio_hash_types |= VIRTIO_NET_HASH_TYPE_UDP_EX;
14670c9d6620SMaxime Coquelin 
14680c9d6620SMaxime Coquelin 	return virtio_hash_types;
14690c9d6620SMaxime Coquelin }
14700c9d6620SMaxime Coquelin 
14710c9d6620SMaxime Coquelin static uint64_t
virtio_to_ethdev_rss_offloads(uint64_t virtio_hash_types)14720c9d6620SMaxime Coquelin virtio_to_ethdev_rss_offloads(uint64_t virtio_hash_types)
14730c9d6620SMaxime Coquelin {
14740c9d6620SMaxime Coquelin 	uint64_t rss_offloads = 0;
14750c9d6620SMaxime Coquelin 
14760c9d6620SMaxime Coquelin 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IPV4)
14770c9d6620SMaxime Coquelin 		rss_offloads |= RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
14780c9d6620SMaxime Coquelin 			RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
14790c9d6620SMaxime Coquelin 
14800c9d6620SMaxime Coquelin 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCPV4)
14810c9d6620SMaxime Coquelin 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
14820c9d6620SMaxime Coquelin 
14830c9d6620SMaxime Coquelin 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDPV4)
14840c9d6620SMaxime Coquelin 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
14850c9d6620SMaxime Coquelin 
14860c9d6620SMaxime Coquelin 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IPV6)
14870c9d6620SMaxime Coquelin 		rss_offloads |= RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
14880c9d6620SMaxime Coquelin 			RTE_ETH_RSS_NONFRAG_IPV6_OTHER;
14890c9d6620SMaxime Coquelin 
14900c9d6620SMaxime Coquelin 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCPV6)
14910c9d6620SMaxime Coquelin 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
14920c9d6620SMaxime Coquelin 
14930c9d6620SMaxime Coquelin 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDPV6)
14940c9d6620SMaxime Coquelin 		rss_offloads |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
14950c9d6620SMaxime Coquelin 
14960c9d6620SMaxime Coquelin 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_IP_EX)
14970c9d6620SMaxime Coquelin 		rss_offloads |= RTE_ETH_RSS_IPV6_EX;
14980c9d6620SMaxime Coquelin 
14990c9d6620SMaxime Coquelin 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_TCP_EX)
15000c9d6620SMaxime Coquelin 		rss_offloads |= RTE_ETH_RSS_IPV6_TCP_EX;
15010c9d6620SMaxime Coquelin 
15020c9d6620SMaxime Coquelin 	if (virtio_hash_types & VIRTIO_NET_HASH_TYPE_UDP_EX)
15030c9d6620SMaxime Coquelin 		rss_offloads |= RTE_ETH_RSS_IPV6_UDP_EX;
15040c9d6620SMaxime Coquelin 
15050c9d6620SMaxime Coquelin 	return rss_offloads;
15060c9d6620SMaxime Coquelin }
15070c9d6620SMaxime Coquelin 
15080c9d6620SMaxime Coquelin static int
virtio_dev_get_rss_config(struct virtio_hw * hw,uint32_t * rss_hash_types)15090c9d6620SMaxime Coquelin virtio_dev_get_rss_config(struct virtio_hw *hw, uint32_t *rss_hash_types)
15100c9d6620SMaxime Coquelin {
15110c9d6620SMaxime Coquelin 	struct virtio_net_config local_config;
15120c9d6620SMaxime Coquelin 	struct virtio_net_config *config = &local_config;
15130c9d6620SMaxime Coquelin 
15140c9d6620SMaxime Coquelin 	virtio_read_dev_config(hw,
15150c9d6620SMaxime Coquelin 			offsetof(struct virtio_net_config, rss_max_key_size),
15160c9d6620SMaxime Coquelin 			&config->rss_max_key_size,
15170c9d6620SMaxime Coquelin 			sizeof(config->rss_max_key_size));
15180c9d6620SMaxime Coquelin 	if (config->rss_max_key_size < VIRTIO_NET_RSS_KEY_SIZE) {
15190c9d6620SMaxime Coquelin 		PMD_INIT_LOG(ERR, "Invalid device RSS max key size (%u)",
15200c9d6620SMaxime Coquelin 				config->rss_max_key_size);
15210c9d6620SMaxime Coquelin 		return -EINVAL;
15220c9d6620SMaxime Coquelin 	}
15230c9d6620SMaxime Coquelin 
15240c9d6620SMaxime Coquelin 	virtio_read_dev_config(hw,
15250c9d6620SMaxime Coquelin 			offsetof(struct virtio_net_config,
15260c9d6620SMaxime Coquelin 				rss_max_indirection_table_length),
15270c9d6620SMaxime Coquelin 			&config->rss_max_indirection_table_length,
15280c9d6620SMaxime Coquelin 			sizeof(config->rss_max_indirection_table_length));
15290c9d6620SMaxime Coquelin 	if (config->rss_max_indirection_table_length < VIRTIO_NET_RSS_RETA_SIZE) {
15300c9d6620SMaxime Coquelin 		PMD_INIT_LOG(ERR, "Invalid device RSS max reta size (%u)",
15310c9d6620SMaxime Coquelin 				config->rss_max_indirection_table_length);
15320c9d6620SMaxime Coquelin 		return -EINVAL;
15330c9d6620SMaxime Coquelin 	}
15340c9d6620SMaxime Coquelin 
15350c9d6620SMaxime Coquelin 	virtio_read_dev_config(hw,
15360c9d6620SMaxime Coquelin 			offsetof(struct virtio_net_config, supported_hash_types),
15370c9d6620SMaxime Coquelin 			&config->supported_hash_types,
15380c9d6620SMaxime Coquelin 			sizeof(config->supported_hash_types));
15390c9d6620SMaxime Coquelin 	if ((config->supported_hash_types & VIRTIO_NET_HASH_TYPE_MASK) == 0) {
15400c9d6620SMaxime Coquelin 		PMD_INIT_LOG(ERR, "Invalid device RSS hash types (0x%x)",
15410c9d6620SMaxime Coquelin 				config->supported_hash_types);
15420c9d6620SMaxime Coquelin 		return -EINVAL;
15430c9d6620SMaxime Coquelin 	}
15440c9d6620SMaxime Coquelin 
15450c9d6620SMaxime Coquelin 	*rss_hash_types = config->supported_hash_types & VIRTIO_NET_HASH_TYPE_MASK;
15460c9d6620SMaxime Coquelin 
15470c9d6620SMaxime Coquelin 	PMD_INIT_LOG(DEBUG, "Device RSS config:");
15480c9d6620SMaxime Coquelin 	PMD_INIT_LOG(DEBUG, "\t-Max key size: %u", config->rss_max_key_size);
15490c9d6620SMaxime Coquelin 	PMD_INIT_LOG(DEBUG, "\t-Max reta size: %u", config->rss_max_indirection_table_length);
15500c9d6620SMaxime Coquelin 	PMD_INIT_LOG(DEBUG, "\t-Supported hash types: 0x%x", *rss_hash_types);
15510c9d6620SMaxime Coquelin 
15520c9d6620SMaxime Coquelin 	return 0;
15530c9d6620SMaxime Coquelin }
15540c9d6620SMaxime Coquelin 
15550c9d6620SMaxime Coquelin static int
virtio_dev_rss_hash_update(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)15560c9d6620SMaxime Coquelin virtio_dev_rss_hash_update(struct rte_eth_dev *dev,
15570c9d6620SMaxime Coquelin 		struct rte_eth_rss_conf *rss_conf)
15580c9d6620SMaxime Coquelin {
15590c9d6620SMaxime Coquelin 	struct virtio_hw *hw = dev->data->dev_private;
15600c9d6620SMaxime Coquelin 	char old_rss_key[VIRTIO_NET_RSS_KEY_SIZE];
15610c9d6620SMaxime Coquelin 	uint32_t old_hash_types;
15620c9d6620SMaxime Coquelin 	uint16_t nb_queues;
15630c9d6620SMaxime Coquelin 	int ret;
15640c9d6620SMaxime Coquelin 
15650c9d6620SMaxime Coquelin 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
15660c9d6620SMaxime Coquelin 		return -ENOTSUP;
15670c9d6620SMaxime Coquelin 
15680c9d6620SMaxime Coquelin 	if (rss_conf->rss_hf & ~virtio_to_ethdev_rss_offloads(VIRTIO_NET_HASH_TYPE_MASK))
15690c9d6620SMaxime Coquelin 		return -EINVAL;
15700c9d6620SMaxime Coquelin 
15710c9d6620SMaxime Coquelin 	old_hash_types = hw->rss_hash_types;
15720c9d6620SMaxime Coquelin 	hw->rss_hash_types = ethdev_to_virtio_rss_offloads(rss_conf->rss_hf);
15730c9d6620SMaxime Coquelin 
15740c9d6620SMaxime Coquelin 	if (rss_conf->rss_key && rss_conf->rss_key_len) {
15750c9d6620SMaxime Coquelin 		if (rss_conf->rss_key_len != VIRTIO_NET_RSS_KEY_SIZE) {
15760c9d6620SMaxime Coquelin 			PMD_INIT_LOG(ERR, "Driver only supports %u RSS key length",
15770c9d6620SMaxime Coquelin 					VIRTIO_NET_RSS_KEY_SIZE);
15780c9d6620SMaxime Coquelin 			ret = -EINVAL;
15790c9d6620SMaxime Coquelin 			goto restore_types;
15800c9d6620SMaxime Coquelin 		}
15810c9d6620SMaxime Coquelin 		memcpy(old_rss_key, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
15820c9d6620SMaxime Coquelin 		memcpy(hw->rss_key, rss_conf->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
15830c9d6620SMaxime Coquelin 	}
15840c9d6620SMaxime Coquelin 
15850c9d6620SMaxime Coquelin 	nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
15860c9d6620SMaxime Coquelin 	ret = virtio_set_multiple_queues_rss(dev, nb_queues);
15870c9d6620SMaxime Coquelin 	if (ret < 0) {
15880c9d6620SMaxime Coquelin 		PMD_INIT_LOG(ERR, "Failed to apply new RSS config to the device");
15890c9d6620SMaxime Coquelin 		goto restore_key;
15900c9d6620SMaxime Coquelin 	}
15910c9d6620SMaxime Coquelin 
15920c9d6620SMaxime Coquelin 	return 0;
15930c9d6620SMaxime Coquelin restore_key:
15940f7438e6SYunjian Wang 	if (rss_conf->rss_key && rss_conf->rss_key_len)
15950c9d6620SMaxime Coquelin 		memcpy(hw->rss_key, old_rss_key, VIRTIO_NET_RSS_KEY_SIZE);
15960c9d6620SMaxime Coquelin restore_types:
15970c9d6620SMaxime Coquelin 	hw->rss_hash_types = old_hash_types;
15980c9d6620SMaxime Coquelin 
15990c9d6620SMaxime Coquelin 	return ret;
16000c9d6620SMaxime Coquelin }
16010c9d6620SMaxime Coquelin 
16020c9d6620SMaxime Coquelin static int
virtio_dev_rss_hash_conf_get(struct rte_eth_dev * dev,struct rte_eth_rss_conf * rss_conf)16030c9d6620SMaxime Coquelin virtio_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
16040c9d6620SMaxime Coquelin 		struct rte_eth_rss_conf *rss_conf)
16050c9d6620SMaxime Coquelin {
16060c9d6620SMaxime Coquelin 	struct virtio_hw *hw = dev->data->dev_private;
16070c9d6620SMaxime Coquelin 
16080c9d6620SMaxime Coquelin 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
16090c9d6620SMaxime Coquelin 		return -ENOTSUP;
16100c9d6620SMaxime Coquelin 
16110c9d6620SMaxime Coquelin 	if (rss_conf->rss_key && rss_conf->rss_key_len >= VIRTIO_NET_RSS_KEY_SIZE)
16120c9d6620SMaxime Coquelin 		memcpy(rss_conf->rss_key, hw->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
16130c9d6620SMaxime Coquelin 	rss_conf->rss_key_len = VIRTIO_NET_RSS_KEY_SIZE;
16140c9d6620SMaxime Coquelin 	rss_conf->rss_hf = virtio_to_ethdev_rss_offloads(hw->rss_hash_types);
16150c9d6620SMaxime Coquelin 
16160c9d6620SMaxime Coquelin 	return 0;
16170c9d6620SMaxime Coquelin }
16180c9d6620SMaxime Coquelin 
virtio_dev_rss_reta_update(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)16190c9d6620SMaxime Coquelin static int virtio_dev_rss_reta_update(struct rte_eth_dev *dev,
16200c9d6620SMaxime Coquelin 			 struct rte_eth_rss_reta_entry64 *reta_conf,
16210c9d6620SMaxime Coquelin 			 uint16_t reta_size)
16220c9d6620SMaxime Coquelin {
16230c9d6620SMaxime Coquelin 	struct virtio_hw *hw = dev->data->dev_private;
16240c9d6620SMaxime Coquelin 	uint16_t nb_queues;
16250c9d6620SMaxime Coquelin 	uint16_t old_reta[VIRTIO_NET_RSS_RETA_SIZE];
16260c9d6620SMaxime Coquelin 	int idx, pos, i, ret;
16270c9d6620SMaxime Coquelin 
16280c9d6620SMaxime Coquelin 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
16290c9d6620SMaxime Coquelin 		return -ENOTSUP;
16300c9d6620SMaxime Coquelin 
16310c9d6620SMaxime Coquelin 	if (reta_size != VIRTIO_NET_RSS_RETA_SIZE)
16320c9d6620SMaxime Coquelin 		return -EINVAL;
16330c9d6620SMaxime Coquelin 
16340c9d6620SMaxime Coquelin 	memcpy(old_reta, hw->rss_reta, sizeof(old_reta));
16350c9d6620SMaxime Coquelin 
16360c9d6620SMaxime Coquelin 	for (i = 0; i < reta_size; i++) {
16370c9d6620SMaxime Coquelin 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
16380c9d6620SMaxime Coquelin 		pos = i % RTE_ETH_RETA_GROUP_SIZE;
16390c9d6620SMaxime Coquelin 
16400c9d6620SMaxime Coquelin 		if (((reta_conf[idx].mask >> pos) & 0x1) == 0)
16410c9d6620SMaxime Coquelin 			continue;
16420c9d6620SMaxime Coquelin 
16430c9d6620SMaxime Coquelin 		hw->rss_reta[i] = reta_conf[idx].reta[pos];
16440c9d6620SMaxime Coquelin 	}
16450c9d6620SMaxime Coquelin 
16460c9d6620SMaxime Coquelin 	nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
16470c9d6620SMaxime Coquelin 	ret = virtio_set_multiple_queues_rss(dev, nb_queues);
16480c9d6620SMaxime Coquelin 	if (ret < 0) {
16490c9d6620SMaxime Coquelin 		PMD_INIT_LOG(ERR, "Failed to apply new RETA to the device");
16500c9d6620SMaxime Coquelin 		memcpy(hw->rss_reta, old_reta, sizeof(old_reta));
16510c9d6620SMaxime Coquelin 	}
16520c9d6620SMaxime Coquelin 
16530c9d6620SMaxime Coquelin 	hw->rss_rx_queues = dev->data->nb_rx_queues;
16540c9d6620SMaxime Coquelin 
16550c9d6620SMaxime Coquelin 	return ret;
16560c9d6620SMaxime Coquelin }
16570c9d6620SMaxime Coquelin 
virtio_dev_rss_reta_query(struct rte_eth_dev * dev,struct rte_eth_rss_reta_entry64 * reta_conf,uint16_t reta_size)16580c9d6620SMaxime Coquelin static int virtio_dev_rss_reta_query(struct rte_eth_dev *dev,
16590c9d6620SMaxime Coquelin 			 struct rte_eth_rss_reta_entry64 *reta_conf,
16600c9d6620SMaxime Coquelin 			 uint16_t reta_size)
16610c9d6620SMaxime Coquelin {
16620c9d6620SMaxime Coquelin 	struct virtio_hw *hw = dev->data->dev_private;
16630c9d6620SMaxime Coquelin 	int idx, i;
16640c9d6620SMaxime Coquelin 
16650c9d6620SMaxime Coquelin 	if (!virtio_with_feature(hw, VIRTIO_NET_F_RSS))
16660c9d6620SMaxime Coquelin 		return -ENOTSUP;
16670c9d6620SMaxime Coquelin 
16680c9d6620SMaxime Coquelin 	if (reta_size != VIRTIO_NET_RSS_RETA_SIZE)
16690c9d6620SMaxime Coquelin 		return -EINVAL;
16700c9d6620SMaxime Coquelin 
16710c9d6620SMaxime Coquelin 	for (i = 0; i < reta_size; i++) {
16720c9d6620SMaxime Coquelin 		idx = i / RTE_ETH_RETA_GROUP_SIZE;
16730c9d6620SMaxime Coquelin 		reta_conf[idx].reta[i % RTE_ETH_RETA_GROUP_SIZE] = hw->rss_reta[i];
16740c9d6620SMaxime Coquelin 	}
16750c9d6620SMaxime Coquelin 
16760c9d6620SMaxime Coquelin 	return 0;
16770c9d6620SMaxime Coquelin }
16780c9d6620SMaxime Coquelin 
16790c9d6620SMaxime Coquelin /*
16800c9d6620SMaxime Coquelin  * As default RSS hash key, it uses the default key of the
16810c9d6620SMaxime Coquelin  * Intel IXGBE devices. It can be updated by the application
16820c9d6620SMaxime Coquelin  * with any 40B key value.
16830c9d6620SMaxime Coquelin  */
16840c9d6620SMaxime Coquelin static uint8_t rss_intel_key[VIRTIO_NET_RSS_KEY_SIZE] = {
16850c9d6620SMaxime Coquelin 	0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
16860c9d6620SMaxime Coquelin 	0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
16870c9d6620SMaxime Coquelin 	0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
16880c9d6620SMaxime Coquelin 	0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
16890c9d6620SMaxime Coquelin 	0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
16900c9d6620SMaxime Coquelin };
16910c9d6620SMaxime Coquelin 
16920c9d6620SMaxime Coquelin static int
virtio_dev_rss_init(struct rte_eth_dev * eth_dev)16930c9d6620SMaxime Coquelin virtio_dev_rss_init(struct rte_eth_dev *eth_dev)
16940c9d6620SMaxime Coquelin {
16950c9d6620SMaxime Coquelin 	struct virtio_hw *hw = eth_dev->data->dev_private;
16960c9d6620SMaxime Coquelin 	uint16_t nb_rx_queues = eth_dev->data->nb_rx_queues;
16970c9d6620SMaxime Coquelin 	struct rte_eth_rss_conf *rss_conf;
16980c9d6620SMaxime Coquelin 	int ret, i;
16990c9d6620SMaxime Coquelin 
17000c9d6620SMaxime Coquelin 	if (!nb_rx_queues) {
17010c9d6620SMaxime Coquelin 		PMD_INIT_LOG(ERR, "Cannot init RSS if no Rx queues");
17020c9d6620SMaxime Coquelin 		return -EINVAL;
17030c9d6620SMaxime Coquelin 	}
17040c9d6620SMaxime Coquelin 
17050c9d6620SMaxime Coquelin 	rss_conf = &eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
17060c9d6620SMaxime Coquelin 
17070c9d6620SMaxime Coquelin 	ret = virtio_dev_get_rss_config(hw, &hw->rss_hash_types);
17080c9d6620SMaxime Coquelin 	if (ret)
17090c9d6620SMaxime Coquelin 		return ret;
17100c9d6620SMaxime Coquelin 
17110c9d6620SMaxime Coquelin 	if (rss_conf->rss_hf) {
17120c9d6620SMaxime Coquelin 		/*  Ensure requested hash types are supported by the device */
17130c9d6620SMaxime Coquelin 		if (rss_conf->rss_hf & ~virtio_to_ethdev_rss_offloads(hw->rss_hash_types))
17140c9d6620SMaxime Coquelin 			return -EINVAL;
17150c9d6620SMaxime Coquelin 
17160c9d6620SMaxime Coquelin 		hw->rss_hash_types = ethdev_to_virtio_rss_offloads(rss_conf->rss_hf);
17170c9d6620SMaxime Coquelin 	}
17180c9d6620SMaxime Coquelin 
17190c9d6620SMaxime Coquelin 	if (!hw->rss_key) {
17200c9d6620SMaxime Coquelin 		/* Setup default RSS key if not already setup by the user */
17210c9d6620SMaxime Coquelin 		hw->rss_key = rte_malloc_socket("rss_key",
17220c9d6620SMaxime Coquelin 				VIRTIO_NET_RSS_KEY_SIZE, 0,
17230c9d6620SMaxime Coquelin 				eth_dev->device->numa_node);
17240c9d6620SMaxime Coquelin 		if (!hw->rss_key) {
17250c9d6620SMaxime Coquelin 			PMD_INIT_LOG(ERR, "Failed to allocate RSS key");
172655e19d06SBoleslav Stankevich 			return -ENOMEM;
17270c9d6620SMaxime Coquelin 		}
17280c9d6620SMaxime Coquelin 	}
17290c9d6620SMaxime Coquelin 
17300c9d6620SMaxime Coquelin 	if (rss_conf->rss_key && rss_conf->rss_key_len) {
17310c9d6620SMaxime Coquelin 		if (rss_conf->rss_key_len != VIRTIO_NET_RSS_KEY_SIZE) {
17320c9d6620SMaxime Coquelin 			PMD_INIT_LOG(ERR, "Driver only supports %u RSS key length",
17330c9d6620SMaxime Coquelin 					VIRTIO_NET_RSS_KEY_SIZE);
17340c9d6620SMaxime Coquelin 			return -EINVAL;
17350c9d6620SMaxime Coquelin 		}
17360c9d6620SMaxime Coquelin 		memcpy(hw->rss_key, rss_conf->rss_key, VIRTIO_NET_RSS_KEY_SIZE);
17370c9d6620SMaxime Coquelin 	} else {
17380c9d6620SMaxime Coquelin 		memcpy(hw->rss_key, rss_intel_key, VIRTIO_NET_RSS_KEY_SIZE);
17390c9d6620SMaxime Coquelin 	}
17400c9d6620SMaxime Coquelin 
17410c9d6620SMaxime Coquelin 	if (!hw->rss_reta) {
17420c9d6620SMaxime Coquelin 		/* Setup default RSS reta if not already setup by the user */
17430c9d6620SMaxime Coquelin 		hw->rss_reta = rte_zmalloc_socket("rss_reta",
17440c9d6620SMaxime Coquelin 				VIRTIO_NET_RSS_RETA_SIZE * sizeof(uint16_t), 0,
17450c9d6620SMaxime Coquelin 				eth_dev->device->numa_node);
17460c9d6620SMaxime Coquelin 		if (!hw->rss_reta) {
17470c9d6620SMaxime Coquelin 			PMD_INIT_LOG(ERR, "Failed to allocate RSS reta");
174855e19d06SBoleslav Stankevich 			return -ENOMEM;
17490c9d6620SMaxime Coquelin 		}
17500c9d6620SMaxime Coquelin 
17510c9d6620SMaxime Coquelin 		hw->rss_rx_queues = 0;
17520c9d6620SMaxime Coquelin 	}
17530c9d6620SMaxime Coquelin 
17540c9d6620SMaxime Coquelin 	/* Re-initialize the RSS reta if the number of RX queues has changed */
17550c9d6620SMaxime Coquelin 	if (hw->rss_rx_queues != nb_rx_queues) {
17560c9d6620SMaxime Coquelin 		for (i = 0; i < VIRTIO_NET_RSS_RETA_SIZE; i++)
17570c9d6620SMaxime Coquelin 			hw->rss_reta[i] = i % nb_rx_queues;
17580c9d6620SMaxime Coquelin 		hw->rss_rx_queues = nb_rx_queues;
17590c9d6620SMaxime Coquelin 	}
17600c9d6620SMaxime Coquelin 
17610c9d6620SMaxime Coquelin 	return 0;
17620c9d6620SMaxime Coquelin }
17630c9d6620SMaxime Coquelin 
17641357b4b3SIvan Dyukov #define DUPLEX_UNKNOWN   0xff
176560e6f470SOlivier Matz /* reset device and renegotiate features if needed */
1766198ab336SOlivier Matz static int
virtio_init_device(struct rte_eth_dev * eth_dev,uint64_t req_features)176760e6f470SOlivier Matz virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
17686c3169a3SBruce Richardson {
17696c3169a3SBruce Richardson 	struct virtio_hw *hw = eth_dev->data->dev_private;
17706c3169a3SBruce Richardson 	struct virtio_net_config *config;
17716c3169a3SBruce Richardson 	struct virtio_net_config local_config;
177269c80d4eSYuanhan Liu 	int ret;
17736c3169a3SBruce Richardson 
17746c3169a3SBruce Richardson 	/* Reset the device although not necessary at startup */
17759328e105SMaxime Coquelin 	virtio_reset(hw);
17766c3169a3SBruce Richardson 
17773669a1afSOlivier Matz 	if (hw->vqs) {
17783669a1afSOlivier Matz 		virtio_dev_free_mbufs(eth_dev);
17793669a1afSOlivier Matz 		virtio_free_queues(hw);
17803669a1afSOlivier Matz 	}
17813669a1afSOlivier Matz 
17826c3169a3SBruce Richardson 	/* Tell the host we've noticed this device. */
17839328e105SMaxime Coquelin 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
17846c3169a3SBruce Richardson 
17856c3169a3SBruce Richardson 	/* Tell the host we've known how to drive the device. */
17869328e105SMaxime Coquelin 	virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1787b4f9a45aSMaxime Coquelin 	if (virtio_ethdev_negotiate_features(hw, req_features) < 0)
178855e19d06SBoleslav Stankevich 		return -EINVAL;
17896c3169a3SBruce Richardson 
1790b4f9a45aSMaxime Coquelin 	hw->weak_barriers = !virtio_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
17919230ab8dSIlya Maximets 
1792a5ed8448SMatt Peters 	/* If host does not support both status and MSI-X then disable LSC */
17936a504290SMaxime Coquelin 	if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->intr_lsc)
1794198ab336SOlivier Matz 		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1795a5ed8448SMatt Peters 	else
1796a5ed8448SMatt Peters 		eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1797954ea115SStephen Hemminger 
17986c3169a3SBruce Richardson 	/* Setting up rx_header size for the device */
1799b4f9a45aSMaxime Coquelin 	if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
1800b4f9a45aSMaxime Coquelin 	    virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
1801b4f9a45aSMaxime Coquelin 	    virtio_with_packed_queue(hw))
18026c3169a3SBruce Richardson 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
18036c3169a3SBruce Richardson 	else
18046c3169a3SBruce Richardson 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
18056c3169a3SBruce Richardson 
18066c3169a3SBruce Richardson 	/* Copy the permanent MAC address to: virtio_hw */
18076c3169a3SBruce Richardson 	virtio_get_hwaddr(hw);
1808538da7a1SOlivier Matz 	rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
18096c3169a3SBruce Richardson 			&eth_dev->data->mac_addrs[0]);
18106c3169a3SBruce Richardson 	PMD_INIT_LOG(DEBUG,
1811c2c4f87bSAman Deep Singh 		     "PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
18126c3169a3SBruce Richardson 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
18136c3169a3SBruce Richardson 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
18146c3169a3SBruce Richardson 
18153c3c54cfSIvan Ilchenko 	hw->get_speed_via_feat = hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN &&
18163c3c54cfSIvan Ilchenko 			     virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX);
18173c3c54cfSIvan Ilchenko 	if (hw->get_speed_via_feat)
18183c3c54cfSIvan Ilchenko 		virtio_get_speed_duplex(eth_dev, NULL);
18191357b4b3SIvan Dyukov 	if (hw->duplex == DUPLEX_UNKNOWN)
1820295968d1SFerruh Yigit 		hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
18211357b4b3SIvan Dyukov 	PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
18221357b4b3SIvan Dyukov 		hw->speed, hw->duplex);
1823b4f9a45aSMaxime Coquelin 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
18246c3169a3SBruce Richardson 		config = &local_config;
18256c3169a3SBruce Richardson 
18269328e105SMaxime Coquelin 		virtio_read_dev_config(hw,
18276d7740e2SChangchun Ouyang 			offsetof(struct virtio_net_config, mac),
18286d7740e2SChangchun Ouyang 			&config->mac, sizeof(config->mac));
18296d7740e2SChangchun Ouyang 
1830b4f9a45aSMaxime Coquelin 		if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
18319328e105SMaxime Coquelin 			virtio_read_dev_config(hw,
18326d7740e2SChangchun Ouyang 				offsetof(struct virtio_net_config, status),
18336d7740e2SChangchun Ouyang 				&config->status, sizeof(config->status));
18346c3169a3SBruce Richardson 		} else {
18356c3169a3SBruce Richardson 			PMD_INIT_LOG(DEBUG,
18366c3169a3SBruce Richardson 				     "VIRTIO_NET_F_STATUS is not supported");
18376c3169a3SBruce Richardson 			config->status = 0;
18386c3169a3SBruce Richardson 		}
18396c3169a3SBruce Richardson 
18400c9d6620SMaxime Coquelin 		if (virtio_with_feature(hw, VIRTIO_NET_F_MQ) ||
18410c9d6620SMaxime Coquelin 				virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
18429328e105SMaxime Coquelin 			virtio_read_dev_config(hw,
18436d7740e2SChangchun Ouyang 				offsetof(struct virtio_net_config, max_virtqueue_pairs),
18446d7740e2SChangchun Ouyang 				&config->max_virtqueue_pairs,
18456d7740e2SChangchun Ouyang 				sizeof(config->max_virtqueue_pairs));
18466c3169a3SBruce Richardson 		} else {
18476c3169a3SBruce Richardson 			PMD_INIT_LOG(DEBUG,
18480c9d6620SMaxime Coquelin 				     "Neither VIRTIO_NET_F_MQ nor VIRTIO_NET_F_RSS are supported");
18496c3169a3SBruce Richardson 			config->max_virtqueue_pairs = 1;
18506c3169a3SBruce Richardson 		}
18516c3169a3SBruce Richardson 
185245e4acd4SOlivier Matz 		hw->max_queue_pairs = config->max_virtqueue_pairs;
18536c3169a3SBruce Richardson 
1854b4f9a45aSMaxime Coquelin 		if (virtio_with_feature(hw, VIRTIO_NET_F_MTU)) {
18559328e105SMaxime Coquelin 			virtio_read_dev_config(hw,
185649d26d9eSMaxime Coquelin 				offsetof(struct virtio_net_config, mtu),
185749d26d9eSMaxime Coquelin 				&config->mtu,
185849d26d9eSMaxime Coquelin 				sizeof(config->mtu));
185949d26d9eSMaxime Coquelin 
186049d26d9eSMaxime Coquelin 			/*
186149d26d9eSMaxime Coquelin 			 * MTU value has already been checked at negotiation
186249d26d9eSMaxime Coquelin 			 * time, but check again in case it has changed since
186349d26d9eSMaxime Coquelin 			 * then, which should not happen.
186449d26d9eSMaxime Coquelin 			 */
186535b2d13fSOlivier Matz 			if (config->mtu < RTE_ETHER_MIN_MTU) {
186649d26d9eSMaxime Coquelin 				PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
186749d26d9eSMaxime Coquelin 						config->mtu);
186855e19d06SBoleslav Stankevich 				return -EINVAL;
186949d26d9eSMaxime Coquelin 			}
187049d26d9eSMaxime Coquelin 
187149d26d9eSMaxime Coquelin 			hw->max_mtu = config->mtu;
187249d26d9eSMaxime Coquelin 			/* Set initial MTU to maximum one supported by vhost */
187349d26d9eSMaxime Coquelin 			eth_dev->data->mtu = config->mtu;
187449d26d9eSMaxime Coquelin 
187549d26d9eSMaxime Coquelin 		} else {
187635b2d13fSOlivier Matz 			hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
187749d26d9eSMaxime Coquelin 				VLAN_TAG_LEN - hw->vtnet_hdr_size;
187849d26d9eSMaxime Coquelin 		}
187949d26d9eSMaxime Coquelin 
18800c9d6620SMaxime Coquelin 		hw->rss_hash_types = 0;
188155e19d06SBoleslav Stankevich 		if (virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
188255e19d06SBoleslav Stankevich 			ret = virtio_dev_rss_init(eth_dev);
188355e19d06SBoleslav Stankevich 			if (ret < 0)
188455e19d06SBoleslav Stankevich 				return ret;
188555e19d06SBoleslav Stankevich 		}
18860c9d6620SMaxime Coquelin 
18876c3169a3SBruce Richardson 		PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
18886c3169a3SBruce Richardson 				config->max_virtqueue_pairs);
18896c3169a3SBruce Richardson 		PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
18906c3169a3SBruce Richardson 		PMD_INIT_LOG(DEBUG,
1891c2c4f87bSAman Deep Singh 				"PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
18926c3169a3SBruce Richardson 				config->mac[0], config->mac[1],
18936c3169a3SBruce Richardson 				config->mac[2], config->mac[3],
18946c3169a3SBruce Richardson 				config->mac[4], config->mac[5]);
18956c3169a3SBruce Richardson 	} else {
189645e4acd4SOlivier Matz 		PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
189745e4acd4SOlivier Matz 		hw->max_queue_pairs = 1;
189835b2d13fSOlivier Matz 		hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - RTE_ETHER_HDR_LEN -
1899240da8b2SZhike Wang 			VLAN_TAG_LEN - hw->vtnet_hdr_size;
19006c3169a3SBruce Richardson 	}
19016c3169a3SBruce Richardson 
190269c80d4eSYuanhan Liu 	ret = virtio_alloc_queues(eth_dev);
190369c80d4eSYuanhan Liu 	if (ret < 0)
190469c80d4eSYuanhan Liu 		return ret;
190526b683b4SJianfeng Tan 
190626b683b4SJianfeng Tan 	if (eth_dev->data->dev_conf.intr_conf.rxq) {
190755e19d06SBoleslav Stankevich 		ret = virtio_configure_intr(eth_dev);
190855e19d06SBoleslav Stankevich 		if (ret < 0) {
190926b683b4SJianfeng Tan 			PMD_INIT_LOG(ERR, "failed to configure interrupt");
19102b38151fSTiwei Bie 			virtio_free_queues(hw);
191155e19d06SBoleslav Stankevich 			return ret;
191226b683b4SJianfeng Tan 		}
191326b683b4SJianfeng Tan 	}
191426b683b4SJianfeng Tan 
1915efc3f842SWenwu Ma 	if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1916efc3f842SWenwu Ma 		/* Enable vector (0) for Link State Interrupt */
1917efc3f842SWenwu Ma 		if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) ==
1918efc3f842SWenwu Ma 				VIRTIO_MSI_NO_VECTOR) {
1919efc3f842SWenwu Ma 			PMD_DRV_LOG(ERR, "failed to set config vector");
1920efc3f842SWenwu Ma 			return -EBUSY;
1921efc3f842SWenwu Ma 		}
1922efc3f842SWenwu Ma 
19239328e105SMaxime Coquelin 	virtio_reinit_complete(hw);
192469c80d4eSYuanhan Liu 
1925198ab336SOlivier Matz 	return 0;
1926198ab336SOlivier Matz }
1927198ab336SOlivier Matz 
19286d890f8aSYuanhan Liu /*
1929198ab336SOlivier Matz  * This function is based on probe() function in virtio_pci.c
1930198ab336SOlivier Matz  * It returns 0 on success.
1931198ab336SOlivier Matz  */
1932198ab336SOlivier Matz int
eth_virtio_dev_init(struct rte_eth_dev * eth_dev)1933198ab336SOlivier Matz eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
1934198ab336SOlivier Matz {
1935198ab336SOlivier Matz 	struct virtio_hw *hw = eth_dev->data->dev_private;
1936295968d1SFerruh Yigit 	uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
19374710e16aSMarvin Liu 	int vectorized = 0;
1938198ab336SOlivier Matz 	int ret;
1939198ab336SOlivier Matz 
1940c1ada9b6SHemant Agrawal 	if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
1941c1ada9b6SHemant Agrawal 		PMD_INIT_LOG(ERR,
1942c1ada9b6SHemant Agrawal 			"Not sufficient headroom required = %d, avail = %d",
1943c1ada9b6SHemant Agrawal 			(int)sizeof(struct virtio_net_hdr_mrg_rxbuf),
1944c1ada9b6SHemant Agrawal 			RTE_PKTMBUF_HEADROOM);
1945c1ada9b6SHemant Agrawal 
1946c1ada9b6SHemant Agrawal 		return -1;
1947c1ada9b6SHemant Agrawal 	}
1948198ab336SOlivier Matz 
1949198ab336SOlivier Matz 	eth_dev->dev_ops = &virtio_eth_dev_ops;
1950198ab336SOlivier Matz 
1951198ab336SOlivier Matz 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
19524819eae8SOlivier Matz 		set_rxtx_funcs(eth_dev);
1953198ab336SOlivier Matz 		return 0;
1954198ab336SOlivier Matz 	}
1955512e27eeSMaxime Coquelin 
195636a7a2e7SMaxime Coquelin 	ret = virtio_dev_devargs_parse(eth_dev->device->devargs, &speed, &vectorized);
195749119e38SIvan Dyukov 	if (ret < 0)
195849119e38SIvan Dyukov 		return ret;
195949119e38SIvan Dyukov 	hw->speed = speed;
19601e9221aeSChenbo Xia 	hw->duplex = DUPLEX_UNKNOWN;
19617f468b2eSTiwei Bie 
1962198ab336SOlivier Matz 	/* Allocate memory for storing MAC addresses */
196335b2d13fSOlivier Matz 	eth_dev->data->mac_addrs = rte_zmalloc("virtio",
196435b2d13fSOlivier Matz 				VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN, 0);
1965198ab336SOlivier Matz 	if (eth_dev->data->mac_addrs == NULL) {
1966198ab336SOlivier Matz 		PMD_INIT_LOG(ERR,
1967198ab336SOlivier Matz 			"Failed to allocate %d bytes needed to store MAC addresses",
196835b2d13fSOlivier Matz 			VIRTIO_MAX_MAC_ADDRS * RTE_ETHER_ADDR_LEN);
1969198ab336SOlivier Matz 		return -ENOMEM;
1970198ab336SOlivier Matz 	}
1971198ab336SOlivier Matz 
19726ebbf410SXuan Ding 	rte_spinlock_init(&hw->state_lock);
19736ebbf410SXuan Ding 
1974d5284f0dSMaxime Coquelin 	if (vectorized) {
1975d5284f0dSMaxime Coquelin 		hw->use_vec_rx = 1;
1976d5284f0dSMaxime Coquelin 		hw->use_vec_tx = 1;
1977d5284f0dSMaxime Coquelin 	}
1978d5284f0dSMaxime Coquelin 
197960e6f470SOlivier Matz 	/* reset device and negotiate default features */
198096cb6711SOlivier Matz 	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
1981198ab336SOlivier Matz 	if (ret < 0)
19823484c8d8STiwei Bie 		goto err_virtio_init;
1983198ab336SOlivier Matz 
19844710e16aSMarvin Liu 	if (vectorized) {
1985b4f9a45aSMaxime Coquelin 		if (!virtio_with_packed_queue(hw)) {
1986d5284f0dSMaxime Coquelin 			hw->use_vec_tx = 0;
1987ccb10995SMarvin Liu 		} else {
1988d5284f0dSMaxime Coquelin #if !defined(CC_AVX512_SUPPORT) && !defined(RTE_ARCH_ARM)
1989d5284f0dSMaxime Coquelin 			hw->use_vec_rx = 0;
1990d5284f0dSMaxime Coquelin 			hw->use_vec_tx = 0;
19919ef38ddbSJoyce Kong 			PMD_DRV_LOG(INFO,
19929ef38ddbSJoyce Kong 				"building environment do not support packed ring vectorized");
1993ccb10995SMarvin Liu #endif
1994ccb10995SMarvin Liu 		}
19954710e16aSMarvin Liu 	}
19964710e16aSMarvin Liu 
19976e1d9c0cSMaxime Coquelin 	hw->opened = 1;
19987f468b2eSTiwei Bie 
19996c3169a3SBruce Richardson 	return 0;
200043d18765SPengzhen Liu 
20013484c8d8STiwei Bie err_virtio_init:
200243d18765SPengzhen Liu 	rte_free(eth_dev->data->mac_addrs);
20037dee8c79SAaron Conole 	eth_dev->data->mac_addrs = NULL;
200443d18765SPengzhen Liu 	return ret;
20056c3169a3SBruce Richardson }
20066c3169a3SBruce Richardson 
200749119e38SIvan Dyukov static uint32_t
virtio_dev_speed_capa_get(uint32_t speed)200849119e38SIvan Dyukov virtio_dev_speed_capa_get(uint32_t speed)
200949119e38SIvan Dyukov {
201049119e38SIvan Dyukov 	switch (speed) {
2011295968d1SFerruh Yigit 	case RTE_ETH_SPEED_NUM_10G:
2012295968d1SFerruh Yigit 		return RTE_ETH_LINK_SPEED_10G;
2013295968d1SFerruh Yigit 	case RTE_ETH_SPEED_NUM_20G:
2014295968d1SFerruh Yigit 		return RTE_ETH_LINK_SPEED_20G;
2015295968d1SFerruh Yigit 	case RTE_ETH_SPEED_NUM_25G:
2016295968d1SFerruh Yigit 		return RTE_ETH_LINK_SPEED_25G;
2017295968d1SFerruh Yigit 	case RTE_ETH_SPEED_NUM_40G:
2018295968d1SFerruh Yigit 		return RTE_ETH_LINK_SPEED_40G;
2019295968d1SFerruh Yigit 	case RTE_ETH_SPEED_NUM_50G:
2020295968d1SFerruh Yigit 		return RTE_ETH_LINK_SPEED_50G;
2021295968d1SFerruh Yigit 	case RTE_ETH_SPEED_NUM_56G:
2022295968d1SFerruh Yigit 		return RTE_ETH_LINK_SPEED_56G;
2023295968d1SFerruh Yigit 	case RTE_ETH_SPEED_NUM_100G:
2024295968d1SFerruh Yigit 		return RTE_ETH_LINK_SPEED_100G;
2025295968d1SFerruh Yigit 	case RTE_ETH_SPEED_NUM_200G:
2026295968d1SFerruh Yigit 		return RTE_ETH_LINK_SPEED_200G;
2027a131d9ecSThomas Monjalon 	case RTE_ETH_SPEED_NUM_400G:
2028a131d9ecSThomas Monjalon 		return RTE_ETH_LINK_SPEED_400G;
202949119e38SIvan Dyukov 	default:
203049119e38SIvan Dyukov 		return 0;
203149119e38SIvan Dyukov 	}
203249119e38SIvan Dyukov }
203349119e38SIvan Dyukov 
vectorized_check_handler(__rte_unused const char * key,const char * value,void * ret_val)20344710e16aSMarvin Liu static int vectorized_check_handler(__rte_unused const char *key,
20354710e16aSMarvin Liu 		const char *value, void *ret_val)
20364710e16aSMarvin Liu {
20371c1b35b5SChengwen Feng 	if (value == NULL || ret_val == NULL)
20381c1b35b5SChengwen Feng 		return -EINVAL;
20391c1b35b5SChengwen Feng 
20404710e16aSMarvin Liu 	if (strcmp(value, "1") == 0)
20414710e16aSMarvin Liu 		*(int *)ret_val = 1;
20424710e16aSMarvin Liu 	else
20434710e16aSMarvin Liu 		*(int *)ret_val = 0;
20444710e16aSMarvin Liu 
20454710e16aSMarvin Liu 	return 0;
20464710e16aSMarvin Liu }
204749119e38SIvan Dyukov 
204849119e38SIvan Dyukov #define VIRTIO_ARG_SPEED      "speed"
20494710e16aSMarvin Liu #define VIRTIO_ARG_VECTORIZED "vectorized"
205049119e38SIvan Dyukov 
2051440f03c2SXiao Wang static int
link_speed_handler(const char * key __rte_unused,const char * value,void * ret_val)205249119e38SIvan Dyukov link_speed_handler(const char *key __rte_unused,
205349119e38SIvan Dyukov 		const char *value, void *ret_val)
205449119e38SIvan Dyukov {
205549119e38SIvan Dyukov 	uint32_t val;
205649119e38SIvan Dyukov 	if (!value || !ret_val)
205749119e38SIvan Dyukov 		return -EINVAL;
205849119e38SIvan Dyukov 	val = strtoul(value, NULL, 0);
205949119e38SIvan Dyukov 	/* validate input */
206049119e38SIvan Dyukov 	if (virtio_dev_speed_capa_get(val) == 0)
206149119e38SIvan Dyukov 		return -EINVAL;
206249119e38SIvan Dyukov 	*(uint32_t *)ret_val = val;
206349119e38SIvan Dyukov 
206449119e38SIvan Dyukov 	return 0;
206549119e38SIvan Dyukov }
206649119e38SIvan Dyukov 
206749119e38SIvan Dyukov 
206849119e38SIvan Dyukov static int
virtio_dev_devargs_parse(struct rte_devargs * devargs,uint32_t * speed,int * vectorized)206936a7a2e7SMaxime Coquelin virtio_dev_devargs_parse(struct rte_devargs *devargs, uint32_t *speed, int *vectorized)
2070440f03c2SXiao Wang {
2071440f03c2SXiao Wang 	struct rte_kvargs *kvlist;
2072440f03c2SXiao Wang 	int ret = 0;
2073440f03c2SXiao Wang 
2074440f03c2SXiao Wang 	if (devargs == NULL)
2075440f03c2SXiao Wang 		return 0;
2076440f03c2SXiao Wang 
2077440f03c2SXiao Wang 	kvlist = rte_kvargs_parse(devargs->args, NULL);
207849119e38SIvan Dyukov 	if (kvlist == NULL) {
207949119e38SIvan Dyukov 		PMD_INIT_LOG(ERR, "error when parsing param");
2080440f03c2SXiao Wang 		return 0;
208149119e38SIvan Dyukov 	}
208236a7a2e7SMaxime Coquelin 
208349119e38SIvan Dyukov 	if (speed && rte_kvargs_count(kvlist, VIRTIO_ARG_SPEED) == 1) {
208449119e38SIvan Dyukov 		ret = rte_kvargs_process(kvlist,
208549119e38SIvan Dyukov 					VIRTIO_ARG_SPEED,
208649119e38SIvan Dyukov 					link_speed_handler, speed);
208749119e38SIvan Dyukov 		if (ret < 0) {
208849119e38SIvan Dyukov 			PMD_INIT_LOG(ERR, "Failed to parse %s",
208949119e38SIvan Dyukov 					VIRTIO_ARG_SPEED);
209049119e38SIvan Dyukov 			goto exit;
209149119e38SIvan Dyukov 		}
209249119e38SIvan Dyukov 	}
2093440f03c2SXiao Wang 
20944710e16aSMarvin Liu 	if (vectorized &&
20954710e16aSMarvin Liu 		rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
20964710e16aSMarvin Liu 		ret = rte_kvargs_process(kvlist,
20974710e16aSMarvin Liu 				VIRTIO_ARG_VECTORIZED,
20984710e16aSMarvin Liu 				vectorized_check_handler, vectorized);
20994710e16aSMarvin Liu 		if (ret < 0) {
21004710e16aSMarvin Liu 			PMD_INIT_LOG(ERR, "Failed to parse %s",
21014710e16aSMarvin Liu 					VIRTIO_ARG_VECTORIZED);
21024710e16aSMarvin Liu 			goto exit;
21034710e16aSMarvin Liu 		}
21044710e16aSMarvin Liu 	}
21054710e16aSMarvin Liu 
2106440f03c2SXiao Wang exit:
2107440f03c2SXiao Wang 	rte_kvargs_free(kvlist);
2108440f03c2SXiao Wang 	return ret;
2109440f03c2SXiao Wang }
2110440f03c2SXiao Wang 
21116e1d9c0cSMaxime Coquelin static uint8_t
rx_offload_enabled(struct virtio_hw * hw)2112db8d6790SMaxime Coquelin rx_offload_enabled(struct virtio_hw *hw)
2113db8d6790SMaxime Coquelin {
2114b4f9a45aSMaxime Coquelin 	return virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
2115b4f9a45aSMaxime Coquelin 		virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2116b4f9a45aSMaxime Coquelin 		virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
2117db8d6790SMaxime Coquelin }
2118db8d6790SMaxime Coquelin 
21196e1d9c0cSMaxime Coquelin static uint8_t
tx_offload_enabled(struct virtio_hw * hw)2120db8d6790SMaxime Coquelin tx_offload_enabled(struct virtio_hw *hw)
2121db8d6790SMaxime Coquelin {
2122b4f9a45aSMaxime Coquelin 	return virtio_with_feature(hw, VIRTIO_NET_F_CSUM) ||
2123b4f9a45aSMaxime Coquelin 		virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
2124b4f9a45aSMaxime Coquelin 		virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
2125db8d6790SMaxime Coquelin }
2126db8d6790SMaxime Coquelin 
21276c3169a3SBruce Richardson /*
21286c3169a3SBruce Richardson  * Configure virtio device
21296c3169a3SBruce Richardson  * It returns 0 on success.
21306c3169a3SBruce Richardson  */
21316c3169a3SBruce Richardson static int
virtio_dev_configure(struct rte_eth_dev * dev)21326c3169a3SBruce Richardson virtio_dev_configure(struct rte_eth_dev *dev)
21336c3169a3SBruce Richardson {
21346c3169a3SBruce Richardson 	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
21354174a7b5SMaxime Coquelin 	const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
21366c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
213735b2d13fSOlivier Matz 	uint32_t ether_hdr_len = RTE_ETHER_HDR_LEN + VLAN_TAG_LEN +
21388b90e435SJens Freimann 		hw->vtnet_hdr_size;
21399c7ce8bdSTiwei Bie 	uint64_t rx_offloads = rxmode->offloads;
21404174a7b5SMaxime Coquelin 	uint64_t tx_offloads = txmode->offloads;
2141ec9f3d12SOlivier Matz 	uint64_t req_features;
2142e7b9d1d2SJiayu Hu 	int ret;
21436c3169a3SBruce Richardson 
21446c3169a3SBruce Richardson 	PMD_INIT_LOG(DEBUG, "configure");
2145ec9f3d12SOlivier Matz 	req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
21464dab342bSStephen Hemminger 
21470c9d6620SMaxime Coquelin 	if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE && rxmode->mq_mode != RTE_ETH_MQ_RX_RSS) {
214813b3137fSDilshod Urazov 		PMD_DRV_LOG(ERR,
214913b3137fSDilshod Urazov 			"Unsupported Rx multi queue mode %d",
215013b3137fSDilshod Urazov 			rxmode->mq_mode);
215113b3137fSDilshod Urazov 		return -EINVAL;
215213b3137fSDilshod Urazov 	}
215313b3137fSDilshod Urazov 
2154295968d1SFerruh Yigit 	if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
21559fc963acSAndrew Rybchenko 		PMD_DRV_LOG(ERR,
21569fc963acSAndrew Rybchenko 			"Unsupported Tx multi queue mode %d",
21579fc963acSAndrew Rybchenko 			txmode->mq_mode);
21589fc963acSAndrew Rybchenko 		return -EINVAL;
21599fc963acSAndrew Rybchenko 	}
21609fc963acSAndrew Rybchenko 
2161e7b9d1d2SJiayu Hu 	if (dev->data->dev_conf.intr_conf.rxq) {
2162e7b9d1d2SJiayu Hu 		ret = virtio_init_device(dev, hw->req_guest_features);
2163e7b9d1d2SJiayu Hu 		if (ret < 0)
2164e7b9d1d2SJiayu Hu 			return ret;
2165e7b9d1d2SJiayu Hu 	}
2166e7b9d1d2SJiayu Hu 
21670c9d6620SMaxime Coquelin 	if (rxmode->mq_mode == RTE_ETH_MQ_RX_RSS)
21680c9d6620SMaxime Coquelin 		req_features |= (1ULL << VIRTIO_NET_F_RSS);
21690c9d6620SMaxime Coquelin 
21701bb4a528SFerruh Yigit 	if (rxmode->mtu > hw->max_mtu)
21718b90e435SJens Freimann 		req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
21728b90e435SJens Freimann 
21731bb4a528SFerruh Yigit 	hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
21744e8169ebSIvan Ilchenko 
2175295968d1SFerruh Yigit 	if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2176295968d1SFerruh Yigit 			   RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
2177d67d86ceSOlivier Matz 		req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
2178d67d86ceSOlivier Matz 
2179295968d1SFerruh Yigit 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
2180ec9f3d12SOlivier Matz 		req_features |=
2181ec9f3d12SOlivier Matz 			(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2182ec9f3d12SOlivier Matz 			(1ULL << VIRTIO_NET_F_GUEST_TSO6);
21836c3169a3SBruce Richardson 
2184295968d1SFerruh Yigit 	if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2185295968d1SFerruh Yigit 			   RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
21864174a7b5SMaxime Coquelin 		req_features |= (1ULL << VIRTIO_NET_F_CSUM);
21874174a7b5SMaxime Coquelin 
2188295968d1SFerruh Yigit 	if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
21894174a7b5SMaxime Coquelin 		req_features |=
21904174a7b5SMaxime Coquelin 			(1ULL << VIRTIO_NET_F_HOST_TSO4) |
21914174a7b5SMaxime Coquelin 			(1ULL << VIRTIO_NET_F_HOST_TSO6);
21924174a7b5SMaxime Coquelin 
2193ec9f3d12SOlivier Matz 	/* if request features changed, reinit the device */
2194ec9f3d12SOlivier Matz 	if (req_features != hw->req_guest_features) {
2195ec9f3d12SOlivier Matz 		ret = virtio_init_device(dev, req_features);
2196ec9f3d12SOlivier Matz 		if (ret < 0)
2197ec9f3d12SOlivier Matz 			return ret;
2198ec9f3d12SOlivier Matz 	}
2199ec9f3d12SOlivier Matz 
220052bd03e9SAlexander Chernavin 	/* if queues are not allocated, reinit the device */
220152bd03e9SAlexander Chernavin 	if (hw->vqs == NULL) {
220252bd03e9SAlexander Chernavin 		ret = virtio_init_device(dev, hw->req_guest_features);
220352bd03e9SAlexander Chernavin 		if (ret < 0)
220452bd03e9SAlexander Chernavin 			return ret;
220552bd03e9SAlexander Chernavin 	}
220652bd03e9SAlexander Chernavin 
22070c9d6620SMaxime Coquelin 	if ((rxmode->mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) &&
22080c9d6620SMaxime Coquelin 			!virtio_with_feature(hw, VIRTIO_NET_F_RSS)) {
22090c9d6620SMaxime Coquelin 		PMD_DRV_LOG(ERR, "RSS support requested but not supported by the device");
22100c9d6620SMaxime Coquelin 		return -ENOTSUP;
22110c9d6620SMaxime Coquelin 	}
22120c9d6620SMaxime Coquelin 
2213295968d1SFerruh Yigit 	if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2214295968d1SFerruh Yigit 			    RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
2215b4f9a45aSMaxime Coquelin 		!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
221678fd97c3SOlivier Matz 		PMD_DRV_LOG(ERR,
2217d67d86ceSOlivier Matz 			"rx checksum not available on this host");
2218d67d86ceSOlivier Matz 		return -ENOTSUP;
2219d67d86ceSOlivier Matz 	}
2220d67d86ceSOlivier Matz 
2221295968d1SFerruh Yigit 	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
2222b4f9a45aSMaxime Coquelin 		(!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
2223b4f9a45aSMaxime Coquelin 		 !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
222478fd97c3SOlivier Matz 		PMD_DRV_LOG(ERR,
2225ec9f3d12SOlivier Matz 			"Large Receive Offload not available on this host");
222686d59b21SOlivier Matz 		return -ENOTSUP;
222786d59b21SOlivier Matz 	}
222886d59b21SOlivier Matz 
222969c80d4eSYuanhan Liu 	/* start control queue */
2230b4f9a45aSMaxime Coquelin 	if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
223145e4acd4SOlivier Matz 		virtio_dev_cq_start(dev);
223245e4acd4SOlivier Matz 
2233295968d1SFerruh Yigit 	if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
22349c7ce8bdSTiwei Bie 		hw->vlan_strip = 1;
22356c3169a3SBruce Richardson 
2236295968d1SFerruh Yigit 	hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
22374e8169ebSIvan Ilchenko 
2238295968d1SFerruh Yigit 	if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
2239b4f9a45aSMaxime Coquelin 			!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
224078fd97c3SOlivier Matz 		PMD_DRV_LOG(ERR,
22416c3169a3SBruce Richardson 			    "vlan filtering not available on this host");
22426c3169a3SBruce Richardson 		return -ENOTSUP;
22436c3169a3SBruce Richardson 	}
22446c3169a3SBruce Richardson 
2245db8d6790SMaxime Coquelin 	hw->has_tx_offload = tx_offload_enabled(hw);
2246db8d6790SMaxime Coquelin 	hw->has_rx_offload = rx_offload_enabled(hw);
2247db8d6790SMaxime Coquelin 
2248b4f9a45aSMaxime Coquelin 	if (virtio_with_packed_queue(hw)) {
2249ccb10995SMarvin Liu #if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
2250ccb10995SMarvin Liu 		if ((hw->use_vec_rx || hw->use_vec_tx) &&
2251ccb10995SMarvin Liu 		    (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
2252b4f9a45aSMaxime Coquelin 		     !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
2253b4f9a45aSMaxime Coquelin 		     !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
22547566f28aSCiara Power 		     rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
2255ccb10995SMarvin Liu 			PMD_DRV_LOG(INFO,
2256ccb10995SMarvin Liu 				"disabled packed ring vectorized path for requirements not met");
2257ccb10995SMarvin Liu 			hw->use_vec_rx = 0;
2258ccb10995SMarvin Liu 			hw->use_vec_tx = 0;
2259ccb10995SMarvin Liu 		}
22609ef38ddbSJoyce Kong #elif defined(RTE_ARCH_ARM)
22619ef38ddbSJoyce Kong 		if ((hw->use_vec_rx || hw->use_vec_tx) &&
22629ef38ddbSJoyce Kong 		    (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) ||
2263b4f9a45aSMaxime Coquelin 		     !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
2264b4f9a45aSMaxime Coquelin 		     !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
22659ef38ddbSJoyce Kong 		     rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)) {
22669ef38ddbSJoyce Kong 			PMD_DRV_LOG(INFO,
22679ef38ddbSJoyce Kong 				"disabled packed ring vectorized path for requirements not met");
22689ef38ddbSJoyce Kong 			hw->use_vec_rx = 0;
22699ef38ddbSJoyce Kong 			hw->use_vec_tx = 0;
22709ef38ddbSJoyce Kong 		}
2271ccb10995SMarvin Liu #else
2272ccb10995SMarvin Liu 		hw->use_vec_rx = 0;
2273ccb10995SMarvin Liu 		hw->use_vec_tx = 0;
2274ccb10995SMarvin Liu #endif
2275ccb10995SMarvin Liu 
2276ccb10995SMarvin Liu 		if (hw->use_vec_rx) {
2277b4f9a45aSMaxime Coquelin 			if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2278ccb10995SMarvin Liu 				PMD_DRV_LOG(INFO,
2279ccb10995SMarvin Liu 					"disabled packed ring vectorized rx for mrg_rxbuf enabled");
2280ccb10995SMarvin Liu 				hw->use_vec_rx = 0;
2281ccb10995SMarvin Liu 			}
2282ccb10995SMarvin Liu 
2283295968d1SFerruh Yigit 			if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
2284ccb10995SMarvin Liu 				PMD_DRV_LOG(INFO,
2285ccb10995SMarvin Liu 					"disabled packed ring vectorized rx for TCP_LRO enabled");
2286ccb10995SMarvin Liu 				hw->use_vec_rx = 0;
2287ccb10995SMarvin Liu 			}
2288ccb10995SMarvin Liu 		}
2289ccb10995SMarvin Liu 	} else {
2290b4f9a45aSMaxime Coquelin 		if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER)) {
22918f3bd7e8SMarvin Liu 			hw->use_inorder_tx = 1;
22928f3bd7e8SMarvin Liu 			hw->use_inorder_rx = 1;
22934710e16aSMarvin Liu 			hw->use_vec_rx = 0;
22948f3bd7e8SMarvin Liu 		}
22958f3bd7e8SMarvin Liu 
2296ccb10995SMarvin Liu 		if (hw->use_vec_rx) {
2297e9b97392SRuifeng Wang #if defined RTE_ARCH_ARM
229809649363SOlivier Matz 			if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
2299ccb10995SMarvin Liu 				PMD_DRV_LOG(INFO,
2300ccb10995SMarvin Liu 					"disabled split ring vectorized path for requirement not met");
23014710e16aSMarvin Liu 				hw->use_vec_rx = 0;
230209649363SOlivier Matz 			}
23034819eae8SOlivier Matz #endif
2304b4f9a45aSMaxime Coquelin 			if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
2305ccb10995SMarvin Liu 				PMD_DRV_LOG(INFO,
2306ccb10995SMarvin Liu 					"disabled split ring vectorized rx for mrg_rxbuf enabled");
23074710e16aSMarvin Liu 				hw->use_vec_rx = 0;
230809649363SOlivier Matz 			}
23094819eae8SOlivier Matz 
2310295968d1SFerruh Yigit 			if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
2311295968d1SFerruh Yigit 					   RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2312295968d1SFerruh Yigit 					   RTE_ETH_RX_OFFLOAD_TCP_LRO |
2313295968d1SFerruh Yigit 					   RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
2314ccb10995SMarvin Liu 				PMD_DRV_LOG(INFO,
2315ccb10995SMarvin Liu 					"disabled split ring vectorized rx for offloading enabled");
23164710e16aSMarvin Liu 				hw->use_vec_rx = 0;
2317ccb10995SMarvin Liu 			}
23187566f28aSCiara Power 
23197566f28aSCiara Power 			if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
23207566f28aSCiara Power 				PMD_DRV_LOG(INFO,
23217566f28aSCiara Power 					"disabled split ring vectorized rx, max SIMD bitwidth too low");
23227566f28aSCiara Power 				hw->use_vec_rx = 0;
23237566f28aSCiara Power 			}
2324ccb10995SMarvin Liu 		}
2325ccb10995SMarvin Liu 	}
232616e48c9eSOlivier Matz 
23276c3169a3SBruce Richardson 	return 0;
23286c3169a3SBruce Richardson }
23296c3169a3SBruce Richardson 
23306c3169a3SBruce Richardson 
23316c3169a3SBruce Richardson static int
virtio_dev_start(struct rte_eth_dev * dev)23326c3169a3SBruce Richardson virtio_dev_start(struct rte_eth_dev *dev)
23336c3169a3SBruce Richardson {
23346c3169a3SBruce Richardson 	uint16_t nb_queues, i;
23353169550fSMaxime Coquelin 	struct virtqueue *vq;
2336663c7679SYuanhan Liu 	struct virtio_hw *hw = dev->data->dev_private;
2337efc83a1eSOlivier Matz 	int ret;
2338efc83a1eSOlivier Matz 
2339efc83a1eSOlivier Matz 	/* Finish the initialization of the queues */
2340efc83a1eSOlivier Matz 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
2341efc83a1eSOlivier Matz 		ret = virtio_dev_rx_queue_setup_finish(dev, i);
2342efc83a1eSOlivier Matz 		if (ret < 0)
2343efc83a1eSOlivier Matz 			return ret;
2344efc83a1eSOlivier Matz 	}
2345efc83a1eSOlivier Matz 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
2346efc83a1eSOlivier Matz 		ret = virtio_dev_tx_queue_setup_finish(dev, i);
2347efc83a1eSOlivier Matz 		if (ret < 0)
2348efc83a1eSOlivier Matz 			return ret;
2349efc83a1eSOlivier Matz 	}
23506c3169a3SBruce Richardson 
23516c3169a3SBruce Richardson 	/* check if lsc interrupt feature is enabled */
2352954ea115SStephen Hemminger 	if (dev->data->dev_conf.intr_conf.lsc) {
235362a785a6SJianfeng Tan 		if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
23546c3169a3SBruce Richardson 			PMD_DRV_LOG(ERR, "link status not supported by host");
23556c3169a3SBruce Richardson 			return -ENOTSUP;
23566c3169a3SBruce Richardson 		}
2357349a447bSJianfeng Tan 	}
2358349a447bSJianfeng Tan 
23597be78d02SJosh Soref 	/* Enable uio/vfio intr/eventfd mapping: although we already did that
2360349a447bSJianfeng Tan 	 * in device configure, but it could be unmapped  when device is
2361349a447bSJianfeng Tan 	 * stopped.
2362349a447bSJianfeng Tan 	 */
2363349a447bSJianfeng Tan 	if (dev->data->dev_conf.intr_conf.lsc ||
2364349a447bSJianfeng Tan 	    dev->data->dev_conf.intr_conf.rxq) {
2365fe19d49cSZhiyong Yang 		virtio_intr_disable(dev);
23666c3169a3SBruce Richardson 
2367f9b0d190SLuca Boccassi 		/* Setup interrupt callback  */
2368f9b0d190SLuca Boccassi 		if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
2369f9b0d190SLuca Boccassi 			rte_intr_callback_register(dev->intr_handle,
2370f9b0d190SLuca Boccassi 						   virtio_interrupt_handler,
2371f9b0d190SLuca Boccassi 						   dev);
2372f9b0d190SLuca Boccassi 
2373fe19d49cSZhiyong Yang 		if (virtio_intr_enable(dev) < 0) {
23746c3169a3SBruce Richardson 			PMD_DRV_LOG(ERR, "interrupt enable failed");
23756c3169a3SBruce Richardson 			return -EIO;
23766c3169a3SBruce Richardson 		}
23776c3169a3SBruce Richardson 	}
23786c3169a3SBruce Richardson 
23796c3169a3SBruce Richardson 	/*Notify the backend
23806c3169a3SBruce Richardson 	 *Otherwise the tap backend might already stop its queue due to fullness.
23816c3169a3SBruce Richardson 	 *vhost backend will have no chance to be waked up
23826c3169a3SBruce Richardson 	 */
238360d4a353SYuanhan Liu 	nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
2384663c7679SYuanhan Liu 	if (hw->max_queue_pairs > 1) {
23856c3169a3SBruce Richardson 		if (virtio_set_multiple_queues(dev, nb_queues) != 0)
23866c3169a3SBruce Richardson 			return -EINVAL;
23876c3169a3SBruce Richardson 	}
23886c3169a3SBruce Richardson 
2389974f216bSOlivier Matz 	PMD_INIT_LOG(DEBUG, "nb_queues=%u (port=%u)", nb_queues,
2390974f216bSOlivier Matz 		     dev->data->port_id);
23916c3169a3SBruce Richardson 
239260d4a353SYuanhan Liu 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
23933169550fSMaxime Coquelin 		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
2394d8227497STiwei Bie 		/* Flush the old packets */
23953169550fSMaxime Coquelin 		virtqueue_rxvq_flush(vq);
23963169550fSMaxime Coquelin 		virtqueue_notify(vq);
239701ad44fdSHuawei Xie 	}
23986c3169a3SBruce Richardson 
2399924da8f1SSteven Luong 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
24003169550fSMaxime Coquelin 		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
24013169550fSMaxime Coquelin 		virtqueue_notify(vq);
2402924da8f1SSteven Luong 	}
2403924da8f1SSteven Luong 
2404974f216bSOlivier Matz 	PMD_INIT_LOG(DEBUG, "Notified backend at initialization (port=%u)",
2405974f216bSOlivier Matz 		     dev->data->port_id);
24066c3169a3SBruce Richardson 
240701ad44fdSHuawei Xie 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
24083169550fSMaxime Coquelin 		vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
24093169550fSMaxime Coquelin 		VIRTQUEUE_DUMP(vq);
241001ad44fdSHuawei Xie 	}
24116c3169a3SBruce Richardson 
241201ad44fdSHuawei Xie 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
24133169550fSMaxime Coquelin 		vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
24143169550fSMaxime Coquelin 		VIRTQUEUE_DUMP(vq);
241501ad44fdSHuawei Xie 	}
241658d9fe40SJianfeng Tan 
24174819eae8SOlivier Matz 	set_rxtx_funcs(dev);
24186e1d9c0cSMaxime Coquelin 	hw->started = 1;
24196c3169a3SBruce Richardson 
24200170be6fSJie Hai 	for (i = 0; i < dev->data->nb_rx_queues; i++)
24210170be6fSJie Hai 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
24220170be6fSJie Hai 	for (i = 0; i < dev->data->nb_tx_queues; i++)
24230170be6fSJie Hai 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
24240170be6fSJie Hai 
242558d9fe40SJianfeng Tan 	/* Initialize Link state */
242658d9fe40SJianfeng Tan 	virtio_dev_link_update(dev, 0);
242758d9fe40SJianfeng Tan 
24286c3169a3SBruce Richardson 	return 0;
24296c3169a3SBruce Richardson }
24306c3169a3SBruce Richardson 
virtio_dev_free_mbufs(struct rte_eth_dev * dev)24316c3169a3SBruce Richardson static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
24326c3169a3SBruce Richardson {
2433bdb32afbSOlivier Matz 	struct virtio_hw *hw = dev->data->dev_private;
2434bdb32afbSOlivier Matz 	uint16_t nr_vq = virtio_get_nr_vq(hw);
2435bdb32afbSOlivier Matz 	const char *type __rte_unused;
2436bdb32afbSOlivier Matz 	unsigned int i, mbuf_num = 0;
2437bdb32afbSOlivier Matz 	struct virtqueue *vq;
24386c3169a3SBruce Richardson 	struct rte_mbuf *buf;
2439bdb32afbSOlivier Matz 	int queue_type;
24406c3169a3SBruce Richardson 
2441b87c0648SDavid Harton 	if (hw->vqs == NULL)
2442b87c0648SDavid Harton 		return;
2443b87c0648SDavid Harton 
2444bdb32afbSOlivier Matz 	for (i = 0; i < nr_vq; i++) {
2445bdb32afbSOlivier Matz 		vq = hw->vqs[i];
2446bdb32afbSOlivier Matz 		if (!vq)
2447bdb32afbSOlivier Matz 			continue;
244801ad44fdSHuawei Xie 
2449bdb32afbSOlivier Matz 		queue_type = virtio_get_queue_type(hw, i);
2450bdb32afbSOlivier Matz 		if (queue_type == VTNET_RQ)
2451bdb32afbSOlivier Matz 			type = "rxq";
2452bdb32afbSOlivier Matz 		else if (queue_type == VTNET_TQ)
2453bdb32afbSOlivier Matz 			type = "txq";
2454bdb32afbSOlivier Matz 		else
24553669a1afSOlivier Matz 			continue;
24563669a1afSOlivier Matz 
24576c3169a3SBruce Richardson 		PMD_INIT_LOG(DEBUG,
2458bdb32afbSOlivier Matz 			"Before freeing %s[%d] used and unused buf",
2459bdb32afbSOlivier Matz 			type, i);
2460bdb32afbSOlivier Matz 		VIRTQUEUE_DUMP(vq);
24616c3169a3SBruce Richardson 
2462727411f5SOlivier Matz 		while ((buf = virtqueue_detach_unused(vq)) != NULL) {
24636c3169a3SBruce Richardson 			rte_pktmbuf_free(buf);
24646c3169a3SBruce Richardson 			mbuf_num++;
24656c3169a3SBruce Richardson 		}
24666c3169a3SBruce Richardson 
24676c3169a3SBruce Richardson 		PMD_INIT_LOG(DEBUG,
2468bdb32afbSOlivier Matz 			"After freeing %s[%d] used and unused buf",
2469bdb32afbSOlivier Matz 			type, i);
2470bdb32afbSOlivier Matz 		VIRTQUEUE_DUMP(vq);
24716c3169a3SBruce Richardson 	}
24726c3169a3SBruce Richardson 
2473bdb32afbSOlivier Matz 	PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
24746c3169a3SBruce Richardson }
24756c3169a3SBruce Richardson 
24769de76dfbSIvan Ilchenko static void
virtio_tx_completed_cleanup(struct rte_eth_dev * dev)24779de76dfbSIvan Ilchenko virtio_tx_completed_cleanup(struct rte_eth_dev *dev)
24789de76dfbSIvan Ilchenko {
24799de76dfbSIvan Ilchenko 	struct virtio_hw *hw = dev->data->dev_private;
24809de76dfbSIvan Ilchenko 	struct virtqueue *vq;
24819de76dfbSIvan Ilchenko 	int qidx;
24829de76dfbSIvan Ilchenko 	void (*xmit_cleanup)(struct virtqueue *vq, uint16_t nb_used);
24839de76dfbSIvan Ilchenko 
24849de76dfbSIvan Ilchenko 	if (virtio_with_packed_queue(hw)) {
24859de76dfbSIvan Ilchenko 		if (hw->use_vec_tx)
24869de76dfbSIvan Ilchenko 			xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
24879de76dfbSIvan Ilchenko 		else if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
24889de76dfbSIvan Ilchenko 			xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
24899de76dfbSIvan Ilchenko 		else
24909de76dfbSIvan Ilchenko 			xmit_cleanup = &virtio_xmit_cleanup_normal_packed;
24919de76dfbSIvan Ilchenko 	} else {
24929de76dfbSIvan Ilchenko 		if (hw->use_inorder_tx)
24939de76dfbSIvan Ilchenko 			xmit_cleanup = &virtio_xmit_cleanup_inorder;
24949de76dfbSIvan Ilchenko 		else
24959de76dfbSIvan Ilchenko 			xmit_cleanup = &virtio_xmit_cleanup;
24969de76dfbSIvan Ilchenko 	}
24979de76dfbSIvan Ilchenko 
24989de76dfbSIvan Ilchenko 	for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
24999de76dfbSIvan Ilchenko 		vq = hw->vqs[2 * qidx + VTNET_SQ_TQ_QUEUE_IDX];
25009de76dfbSIvan Ilchenko 		if (vq != NULL)
25019de76dfbSIvan Ilchenko 			xmit_cleanup(vq, virtqueue_nused(vq));
25029de76dfbSIvan Ilchenko 	}
25039de76dfbSIvan Ilchenko }
25049de76dfbSIvan Ilchenko 
25056c3169a3SBruce Richardson /*
25066c3169a3SBruce Richardson  * Stop device: disable interrupt and mark link down
25076c3169a3SBruce Richardson  */
250836a7a2e7SMaxime Coquelin int
virtio_dev_stop(struct rte_eth_dev * dev)25096c3169a3SBruce Richardson virtio_dev_stop(struct rte_eth_dev *dev)
25106c3169a3SBruce Richardson {
2511aa9f0606SYuanhan Liu 	struct virtio_hw *hw = dev->data->dev_private;
25126c3169a3SBruce Richardson 	struct rte_eth_link link;
2513295968d1SFerruh Yigit 	struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
25140170be6fSJie Hai 	uint16_t i;
25156c3169a3SBruce Richardson 
25166c3169a3SBruce Richardson 	PMD_INIT_LOG(DEBUG, "stop");
2517b8f5d2aeSThomas Monjalon 	dev->data->dev_started = 0;
25186c3169a3SBruce Richardson 
25191978a9dcSXiao Wang 	rte_spinlock_lock(&hw->state_lock);
25202a821d81SChas Williams 	if (!hw->started)
25212a821d81SChas Williams 		goto out_unlock;
25226e1d9c0cSMaxime Coquelin 	hw->started = 0;
25232a821d81SChas Williams 
25249de76dfbSIvan Ilchenko 	virtio_tx_completed_cleanup(dev);
25259de76dfbSIvan Ilchenko 
2526f9b0d190SLuca Boccassi 	if (intr_conf->lsc || intr_conf->rxq) {
2527fe19d49cSZhiyong Yang 		virtio_intr_disable(dev);
25286c3169a3SBruce Richardson 
2529f9b0d190SLuca Boccassi 		/* Reset interrupt callback  */
2530f9b0d190SLuca Boccassi 		if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
2531f9b0d190SLuca Boccassi 			rte_intr_callback_unregister(dev->intr_handle,
2532f9b0d190SLuca Boccassi 						     virtio_interrupt_handler,
2533f9b0d190SLuca Boccassi 						     dev);
2534f9b0d190SLuca Boccassi 		}
2535f9b0d190SLuca Boccassi 	}
2536f9b0d190SLuca Boccassi 
25376c3169a3SBruce Richardson 	memset(&link, 0, sizeof(link));
2538717b2e8eSStephen Hemminger 	rte_eth_linkstatus_set(dev, &link);
25392a821d81SChas Williams out_unlock:
25401978a9dcSXiao Wang 	rte_spinlock_unlock(&hw->state_lock);
254162024eb8SIvan Ilchenko 
25420170be6fSJie Hai 	for (i = 0; i < dev->data->nb_rx_queues; i++)
25430170be6fSJie Hai 		dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
25440170be6fSJie Hai 	for (i = 0; i < dev->data->nb_tx_queues; i++)
25450170be6fSJie Hai 		dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
25460170be6fSJie Hai 
254762024eb8SIvan Ilchenko 	return 0;
25486c3169a3SBruce Richardson }
25496c3169a3SBruce Richardson 
25506c3169a3SBruce Richardson static int
virtio_dev_link_update(struct rte_eth_dev * dev,__rte_unused int wait_to_complete)25516c3169a3SBruce Richardson virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
25526c3169a3SBruce Richardson {
2553717b2e8eSStephen Hemminger 	struct rte_eth_link link;
25546c3169a3SBruce Richardson 	uint16_t status;
25556c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
2556717b2e8eSStephen Hemminger 
25576c3169a3SBruce Richardson 	memset(&link, 0, sizeof(link));
25581357b4b3SIvan Dyukov 	link.link_duplex = hw->duplex;
255949119e38SIvan Dyukov 	link.link_speed  = hw->speed;
2560295968d1SFerruh Yigit 	link.link_autoneg = RTE_ETH_LINK_AUTONEG;
25616c3169a3SBruce Richardson 
25622a821d81SChas Williams 	if (!hw->started) {
2563295968d1SFerruh Yigit 		link.link_status = RTE_ETH_LINK_DOWN;
2564295968d1SFerruh Yigit 		link.link_speed = RTE_ETH_SPEED_NUM_NONE;
2565b4f9a45aSMaxime Coquelin 	} else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
25666c3169a3SBruce Richardson 		PMD_INIT_LOG(DEBUG, "Get link status from hw");
25679328e105SMaxime Coquelin 		virtio_read_dev_config(hw,
25686c3169a3SBruce Richardson 				offsetof(struct virtio_net_config, status),
25696c3169a3SBruce Richardson 				&status, sizeof(status));
25706c3169a3SBruce Richardson 		if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
2571295968d1SFerruh Yigit 			link.link_status = RTE_ETH_LINK_DOWN;
2572295968d1SFerruh Yigit 			link.link_speed = RTE_ETH_SPEED_NUM_NONE;
25736c3169a3SBruce Richardson 			PMD_INIT_LOG(DEBUG, "Port %d is down",
25746c3169a3SBruce Richardson 				     dev->data->port_id);
25756c3169a3SBruce Richardson 		} else {
2576295968d1SFerruh Yigit 			link.link_status = RTE_ETH_LINK_UP;
25773c3c54cfSIvan Ilchenko 			if (hw->get_speed_via_feat)
25783c3c54cfSIvan Ilchenko 				virtio_get_speed_duplex(dev, &link);
25796c3169a3SBruce Richardson 			PMD_INIT_LOG(DEBUG, "Port %d is up",
25806c3169a3SBruce Richardson 				     dev->data->port_id);
25816c3169a3SBruce Richardson 		}
25826c3169a3SBruce Richardson 	} else {
2583295968d1SFerruh Yigit 		link.link_status = RTE_ETH_LINK_UP;
25843c3c54cfSIvan Ilchenko 		if (hw->get_speed_via_feat)
25853c3c54cfSIvan Ilchenko 			virtio_get_speed_duplex(dev, &link);
25866c3169a3SBruce Richardson 	}
25876c3169a3SBruce Richardson 
2588717b2e8eSStephen Hemminger 	return rte_eth_linkstatus_set(dev, &link);
25896c3169a3SBruce Richardson }
25906c3169a3SBruce Richardson 
2591289ba0c0SDavid Harton static int
virtio_dev_vlan_offload_set(struct rte_eth_dev * dev,int mask)2592289ba0c0SDavid Harton virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2593289ba0c0SDavid Harton {
2594289ba0c0SDavid Harton 	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
2595289ba0c0SDavid Harton 	struct virtio_hw *hw = dev->data->dev_private;
25969c7ce8bdSTiwei Bie 	uint64_t offloads = rxmode->offloads;
2597289ba0c0SDavid Harton 
2598295968d1SFerruh Yigit 	if (mask & RTE_ETH_VLAN_FILTER_MASK) {
2599295968d1SFerruh Yigit 		if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
2600b4f9a45aSMaxime Coquelin 				!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
2601289ba0c0SDavid Harton 
2602289ba0c0SDavid Harton 			PMD_DRV_LOG(NOTICE,
2603289ba0c0SDavid Harton 				"vlan filtering not available on this host");
2604289ba0c0SDavid Harton 
2605289ba0c0SDavid Harton 			return -ENOTSUP;
2606289ba0c0SDavid Harton 		}
2607289ba0c0SDavid Harton 	}
2608289ba0c0SDavid Harton 
2609295968d1SFerruh Yigit 	if (mask & RTE_ETH_VLAN_STRIP_MASK)
2610295968d1SFerruh Yigit 		hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
2611289ba0c0SDavid Harton 
2612289ba0c0SDavid Harton 	return 0;
2613289ba0c0SDavid Harton }
2614289ba0c0SDavid Harton 
2615bdad90d1SIvan Ilchenko static int
virtio_dev_info_get(struct rte_eth_dev * dev,struct rte_eth_dev_info * dev_info)26166c3169a3SBruce Richardson virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
26176c3169a3SBruce Richardson {
2618c1e55ed3SOlivier Matz 	uint64_t tso_mask, host_features;
26190c9d6620SMaxime Coquelin 	uint32_t rss_hash_types = 0;
26206c3169a3SBruce Richardson 	struct virtio_hw *hw = dev->data->dev_private;
262149119e38SIvan Dyukov 	dev_info->speed_capa = virtio_dev_speed_capa_get(hw->speed);
2622b392e987SIdo Barnea 
262345e4acd4SOlivier Matz 	dev_info->max_rx_queues =
262445e4acd4SOlivier Matz 		RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
262545e4acd4SOlivier Matz 	dev_info->max_tx_queues =
262645e4acd4SOlivier Matz 		RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
26276c3169a3SBruce Richardson 	dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
26286c3169a3SBruce Richardson 	dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
26296c3169a3SBruce Richardson 	dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
263011d7bc9fSIvan Ilchenko 	dev_info->max_mtu = hw->max_mtu;
263158169a9cSOlivier Matz 
2632f8b60756SMaxime Coquelin 	host_features = VIRTIO_OPS(hw)->get_features(hw);
2633295968d1SFerruh Yigit 	dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
26344e8169ebSIvan Ilchenko 	if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
2635295968d1SFerruh Yigit 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
2636c1e55ed3SOlivier Matz 	if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
2637c1e55ed3SOlivier Matz 		dev_info->rx_offload_capa |=
2638295968d1SFerruh Yigit 			RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
2639295968d1SFerruh Yigit 			RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
2640c1e55ed3SOlivier Matz 	}
26419c7ce8bdSTiwei Bie 	if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
2642295968d1SFerruh Yigit 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
2643c1e55ed3SOlivier Matz 	tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2644c1e55ed3SOlivier Matz 		(1ULL << VIRTIO_NET_F_GUEST_TSO6);
2645ec9f3d12SOlivier Matz 	if ((host_features & tso_mask) == tso_mask)
2646295968d1SFerruh Yigit 		dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
2647c1e55ed3SOlivier Matz 
2648295968d1SFerruh Yigit 	dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
2649295968d1SFerruh Yigit 				    RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
26504174a7b5SMaxime Coquelin 	if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
265158169a9cSOlivier Matz 		dev_info->tx_offload_capa |=
2652295968d1SFerruh Yigit 			RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2653295968d1SFerruh Yigit 			RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
265458169a9cSOlivier Matz 	}
265569657304SOlivier Matz 	tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
265669657304SOlivier Matz 		(1ULL << VIRTIO_NET_F_HOST_TSO6);
26574174a7b5SMaxime Coquelin 	if ((host_features & tso_mask) == tso_mask)
2658295968d1SFerruh Yigit 		dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
2659bdad90d1SIvan Ilchenko 
26600c9d6620SMaxime Coquelin 	if (host_features & (1ULL << VIRTIO_NET_F_RSS)) {
26610c9d6620SMaxime Coquelin 		virtio_dev_get_rss_config(hw, &rss_hash_types);
26620c9d6620SMaxime Coquelin 		dev_info->hash_key_size = VIRTIO_NET_RSS_KEY_SIZE;
26630c9d6620SMaxime Coquelin 		dev_info->reta_size = VIRTIO_NET_RSS_RETA_SIZE;
26640c9d6620SMaxime Coquelin 		dev_info->flow_type_rss_offloads =
26650c9d6620SMaxime Coquelin 			virtio_to_ethdev_rss_offloads(rss_hash_types);
26660c9d6620SMaxime Coquelin 	} else {
26670c9d6620SMaxime Coquelin 		dev_info->hash_key_size = 0;
26680c9d6620SMaxime Coquelin 		dev_info->reta_size = 0;
26690c9d6620SMaxime Coquelin 		dev_info->flow_type_rss_offloads = 0;
26700c9d6620SMaxime Coquelin 	}
26710c9d6620SMaxime Coquelin 
267231136836SIvan Ilchenko 	if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) {
267331136836SIvan Ilchenko 		/*
267431136836SIvan Ilchenko 		 * According to 2.7 Packed Virtqueues,
267531136836SIvan Ilchenko 		 * 2.7.10.1 Structure Size and Alignment:
267631136836SIvan Ilchenko 		 * The Queue Size value does not have to be a power of 2.
267731136836SIvan Ilchenko 		 */
267831136836SIvan Ilchenko 		dev_info->rx_desc_lim.nb_max = UINT16_MAX;
2679492a239cSIvan Ilchenko 		dev_info->tx_desc_lim.nb_max = UINT16_MAX;
268031136836SIvan Ilchenko 	} else {
268131136836SIvan Ilchenko 		/*
268231136836SIvan Ilchenko 		 * According to 2.6 Split Virtqueues:
268331136836SIvan Ilchenko 		 * Queue Size value is always a power of 2. The maximum Queue
268431136836SIvan Ilchenko 		 * Size value is 32768.
268531136836SIvan Ilchenko 		 */
268631136836SIvan Ilchenko 		dev_info->rx_desc_lim.nb_max = 32768;
2687492a239cSIvan Ilchenko 		dev_info->tx_desc_lim.nb_max = 32768;
268831136836SIvan Ilchenko 	}
268931136836SIvan Ilchenko 	/*
269031136836SIvan Ilchenko 	 * Actual minimum is not the same for virtqueues of different kinds,
269131136836SIvan Ilchenko 	 * but to avoid tangling the code with separate branches, rely on
269231136836SIvan Ilchenko 	 * default thresholds since desc number must be at least of their size.
269331136836SIvan Ilchenko 	 */
269431136836SIvan Ilchenko 	dev_info->rx_desc_lim.nb_min = RTE_MAX(DEFAULT_RX_FREE_THRESH,
269531136836SIvan Ilchenko 					       RTE_VIRTIO_VPMD_RX_REARM_THRESH);
2696492a239cSIvan Ilchenko 	dev_info->tx_desc_lim.nb_min = DEFAULT_TX_FREE_THRESH;
269731136836SIvan Ilchenko 	dev_info->rx_desc_lim.nb_align = 1;
2698492a239cSIvan Ilchenko 	dev_info->tx_desc_lim.nb_align = 1;
269931136836SIvan Ilchenko 
2700bdad90d1SIvan Ilchenko 	return 0;
27016c3169a3SBruce Richardson }
27026c3169a3SBruce Richardson 
27036c3169a3SBruce Richardson /*
27046c3169a3SBruce Richardson  * It enables testpmd to collect per queue stats.
27056c3169a3SBruce Richardson  */
27066c3169a3SBruce Richardson static int
virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev * eth_dev,__rte_unused uint16_t queue_id,__rte_unused uint8_t stat_idx,__rte_unused uint8_t is_rx)27076c3169a3SBruce Richardson virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
27086c3169a3SBruce Richardson __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
27096c3169a3SBruce Richardson __rte_unused uint8_t is_rx)
27106c3169a3SBruce Richardson {
27116c3169a3SBruce Richardson 	return 0;
27126c3169a3SBruce Richardson }
27136c3169a3SBruce Richardson 
2714eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(virtio_logtype_init, init, NOTICE);
2715eeded204SDavid Marchand RTE_LOG_REGISTER_SUFFIX(virtio_logtype_driver, driver, NOTICE);
2716