xref: /dpdk/drivers/net/virtio/virtio_ethdev.c (revision 5dba3b9c4c131b88a78bcecfef39db23ebc47873)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <string.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <unistd.h>
10 
11 #include <rte_ethdev.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_memcpy.h>
14 #include <rte_string_fns.h>
15 #include <rte_memzone.h>
16 #include <rte_malloc.h>
17 #include <rte_atomic.h>
18 #include <rte_branch_prediction.h>
19 #include <rte_pci.h>
20 #include <rte_bus_pci.h>
21 #include <rte_ether.h>
22 #include <rte_common.h>
23 #include <rte_errno.h>
24 #include <rte_cpuflags.h>
25 
26 #include <rte_memory.h>
27 #include <rte_eal.h>
28 #include <rte_dev.h>
29 
30 #include "virtio_ethdev.h"
31 #include "virtio_pci.h"
32 #include "virtio_logs.h"
33 #include "virtqueue.h"
34 #include "virtio_rxtx.h"
35 
36 static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
37 static int  virtio_dev_configure(struct rte_eth_dev *dev);
38 static int  virtio_dev_start(struct rte_eth_dev *dev);
39 static void virtio_dev_stop(struct rte_eth_dev *dev);
40 static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
41 static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
42 static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
43 static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
44 static void virtio_dev_info_get(struct rte_eth_dev *dev,
45 				struct rte_eth_dev_info *dev_info);
46 static int virtio_dev_link_update(struct rte_eth_dev *dev,
47 	int wait_to_complete);
48 static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
49 
50 static void virtio_set_hwaddr(struct virtio_hw *hw);
51 static void virtio_get_hwaddr(struct virtio_hw *hw);
52 
53 static int virtio_dev_stats_get(struct rte_eth_dev *dev,
54 				 struct rte_eth_stats *stats);
55 static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
56 				 struct rte_eth_xstat *xstats, unsigned n);
57 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
58 				       struct rte_eth_xstat_name *xstats_names,
59 				       unsigned limit);
60 static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
61 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
62 static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
63 				uint16_t vlan_id, int on);
64 static int virtio_mac_addr_add(struct rte_eth_dev *dev,
65 				struct ether_addr *mac_addr,
66 				uint32_t index, uint32_t vmdq);
67 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
68 static void virtio_mac_addr_set(struct rte_eth_dev *dev,
69 				struct ether_addr *mac_addr);
70 
71 static int virtio_intr_enable(struct rte_eth_dev *dev);
72 static int virtio_intr_disable(struct rte_eth_dev *dev);
73 
74 static int virtio_dev_queue_stats_mapping_set(
75 	struct rte_eth_dev *eth_dev,
76 	uint16_t queue_id,
77 	uint8_t stat_idx,
78 	uint8_t is_rx);
79 
80 int virtio_logtype_init;
81 int virtio_logtype_driver;
82 
83 /*
84  * The set of PCI devices this driver supports
85  */
86 static const struct rte_pci_id pci_id_virtio_map[] = {
87 	{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
88 	{ RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
89 	{ .vendor_id = 0, /* sentinel */ },
90 };
91 
92 struct rte_virtio_xstats_name_off {
93 	char name[RTE_ETH_XSTATS_NAME_SIZE];
94 	unsigned offset;
95 };
96 
97 /* [rt]x_qX_ is prepended to the name string here */
98 static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = {
99 	{"good_packets",           offsetof(struct virtnet_rx, stats.packets)},
100 	{"good_bytes",             offsetof(struct virtnet_rx, stats.bytes)},
101 	{"errors",                 offsetof(struct virtnet_rx, stats.errors)},
102 	{"multicast_packets",      offsetof(struct virtnet_rx, stats.multicast)},
103 	{"broadcast_packets",      offsetof(struct virtnet_rx, stats.broadcast)},
104 	{"undersize_packets",      offsetof(struct virtnet_rx, stats.size_bins[0])},
105 	{"size_64_packets",        offsetof(struct virtnet_rx, stats.size_bins[1])},
106 	{"size_65_127_packets",    offsetof(struct virtnet_rx, stats.size_bins[2])},
107 	{"size_128_255_packets",   offsetof(struct virtnet_rx, stats.size_bins[3])},
108 	{"size_256_511_packets",   offsetof(struct virtnet_rx, stats.size_bins[4])},
109 	{"size_512_1023_packets",  offsetof(struct virtnet_rx, stats.size_bins[5])},
110 	{"size_1024_1518_packets", offsetof(struct virtnet_rx, stats.size_bins[6])},
111 	{"size_1519_max_packets",  offsetof(struct virtnet_rx, stats.size_bins[7])},
112 };
113 
114 /* [rt]x_qX_ is prepended to the name string here */
115 static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
116 	{"good_packets",           offsetof(struct virtnet_tx, stats.packets)},
117 	{"good_bytes",             offsetof(struct virtnet_tx, stats.bytes)},
118 	{"errors",                 offsetof(struct virtnet_tx, stats.errors)},
119 	{"multicast_packets",      offsetof(struct virtnet_tx, stats.multicast)},
120 	{"broadcast_packets",      offsetof(struct virtnet_tx, stats.broadcast)},
121 	{"undersize_packets",      offsetof(struct virtnet_tx, stats.size_bins[0])},
122 	{"size_64_packets",        offsetof(struct virtnet_tx, stats.size_bins[1])},
123 	{"size_65_127_packets",    offsetof(struct virtnet_tx, stats.size_bins[2])},
124 	{"size_128_255_packets",   offsetof(struct virtnet_tx, stats.size_bins[3])},
125 	{"size_256_511_packets",   offsetof(struct virtnet_tx, stats.size_bins[4])},
126 	{"size_512_1023_packets",  offsetof(struct virtnet_tx, stats.size_bins[5])},
127 	{"size_1024_1518_packets", offsetof(struct virtnet_tx, stats.size_bins[6])},
128 	{"size_1519_max_packets",  offsetof(struct virtnet_tx, stats.size_bins[7])},
129 };
130 
131 #define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \
132 			    sizeof(rte_virtio_rxq_stat_strings[0]))
133 #define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
134 			    sizeof(rte_virtio_txq_stat_strings[0]))
135 
136 struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
137 
138 static int
139 virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
140 		int *dlen, int pkt_num)
141 {
142 	uint32_t head, i;
143 	int k, sum = 0;
144 	virtio_net_ctrl_ack status = ~0;
145 	struct virtio_pmd_ctrl *result;
146 	struct virtqueue *vq;
147 
148 	ctrl->status = status;
149 
150 	if (!cvq || !cvq->vq) {
151 		PMD_INIT_LOG(ERR, "Control queue is not supported.");
152 		return -1;
153 	}
154 	vq = cvq->vq;
155 	head = vq->vq_desc_head_idx;
156 
157 	PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
158 		"vq->hw->cvq = %p vq = %p",
159 		vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
160 
161 	if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))
162 		return -1;
163 
164 	memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
165 		sizeof(struct virtio_pmd_ctrl));
166 
167 	/*
168 	 * Format is enforced in qemu code:
169 	 * One TX packet for header;
170 	 * At least one TX packet per argument;
171 	 * One RX packet for ACK.
172 	 */
173 	vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
174 	vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
175 	vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
176 	vq->vq_free_cnt--;
177 	i = vq->vq_ring.desc[head].next;
178 
179 	for (k = 0; k < pkt_num; k++) {
180 		vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
181 		vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
182 			+ sizeof(struct virtio_net_ctrl_hdr)
183 			+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;
184 		vq->vq_ring.desc[i].len = dlen[k];
185 		sum += dlen[k];
186 		vq->vq_free_cnt--;
187 		i = vq->vq_ring.desc[i].next;
188 	}
189 
190 	vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
191 	vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
192 			+ sizeof(struct virtio_net_ctrl_hdr);
193 	vq->vq_ring.desc[i].len = sizeof(ctrl->status);
194 	vq->vq_free_cnt--;
195 
196 	vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
197 
198 	vq_update_avail_ring(vq, head);
199 	vq_update_avail_idx(vq);
200 
201 	PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
202 
203 	virtqueue_notify(vq);
204 
205 	rte_rmb();
206 	while (VIRTQUEUE_NUSED(vq) == 0) {
207 		rte_rmb();
208 		usleep(100);
209 	}
210 
211 	while (VIRTQUEUE_NUSED(vq)) {
212 		uint32_t idx, desc_idx, used_idx;
213 		struct vring_used_elem *uep;
214 
215 		used_idx = (uint32_t)(vq->vq_used_cons_idx
216 				& (vq->vq_nentries - 1));
217 		uep = &vq->vq_ring.used->ring[used_idx];
218 		idx = (uint32_t) uep->id;
219 		desc_idx = idx;
220 
221 		while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
222 			desc_idx = vq->vq_ring.desc[desc_idx].next;
223 			vq->vq_free_cnt++;
224 		}
225 
226 		vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
227 		vq->vq_desc_head_idx = idx;
228 
229 		vq->vq_used_cons_idx++;
230 		vq->vq_free_cnt++;
231 	}
232 
233 	PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
234 			vq->vq_free_cnt, vq->vq_desc_head_idx);
235 
236 	result = cvq->virtio_net_hdr_mz->addr;
237 
238 	return result->status;
239 }
240 
241 static int
242 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
243 {
244 	struct virtio_hw *hw = dev->data->dev_private;
245 	struct virtio_pmd_ctrl ctrl;
246 	int dlen[1];
247 	int ret;
248 
249 	ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
250 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
251 	memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
252 
253 	dlen[0] = sizeof(uint16_t);
254 
255 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
256 	if (ret) {
257 		PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
258 			  "failed, this is too late now...");
259 		return -EINVAL;
260 	}
261 
262 	return 0;
263 }
264 
265 static void
266 virtio_dev_queue_release(void *queue __rte_unused)
267 {
268 	/* do nothing */
269 }
270 
271 static int
272 virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
273 {
274 	if (vtpci_queue_idx == hw->max_queue_pairs * 2)
275 		return VTNET_CQ;
276 	else if (vtpci_queue_idx % 2 == 0)
277 		return VTNET_RQ;
278 	else
279 		return VTNET_TQ;
280 }
281 
282 static uint16_t
283 virtio_get_nr_vq(struct virtio_hw *hw)
284 {
285 	uint16_t nr_vq = hw->max_queue_pairs * 2;
286 
287 	if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
288 		nr_vq += 1;
289 
290 	return nr_vq;
291 }
292 
293 static void
294 virtio_init_vring(struct virtqueue *vq)
295 {
296 	int size = vq->vq_nentries;
297 	struct vring *vr = &vq->vq_ring;
298 	uint8_t *ring_mem = vq->vq_ring_virt_mem;
299 
300 	PMD_INIT_FUNC_TRACE();
301 
302 	/*
303 	 * Reinitialise since virtio port might have been stopped and restarted
304 	 */
305 	memset(ring_mem, 0, vq->vq_ring_size);
306 	vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
307 	vq->vq_used_cons_idx = 0;
308 	vq->vq_desc_head_idx = 0;
309 	vq->vq_avail_idx = 0;
310 	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
311 	vq->vq_free_cnt = vq->vq_nentries;
312 	memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
313 
314 	vring_desc_init(vr->desc, size);
315 
316 	/*
317 	 * Disable device(host) interrupting guest
318 	 */
319 	virtqueue_disable_intr(vq);
320 }
321 
322 static int
323 virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
324 {
325 	char vq_name[VIRTQUEUE_MAX_NAME_SZ];
326 	char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
327 	const struct rte_memzone *mz = NULL, *hdr_mz = NULL;
328 	unsigned int vq_size, size;
329 	struct virtio_hw *hw = dev->data->dev_private;
330 	struct virtnet_rx *rxvq = NULL;
331 	struct virtnet_tx *txvq = NULL;
332 	struct virtnet_ctl *cvq = NULL;
333 	struct virtqueue *vq;
334 	size_t sz_hdr_mz = 0;
335 	void *sw_ring = NULL;
336 	int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
337 	int ret;
338 
339 	PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
340 
341 	/*
342 	 * Read the virtqueue size from the Queue Size field
343 	 * Always power of 2 and if 0 virtqueue does not exist
344 	 */
345 	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
346 	PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
347 	if (vq_size == 0) {
348 		PMD_INIT_LOG(ERR, "virtqueue does not exist");
349 		return -EINVAL;
350 	}
351 
352 	if (!rte_is_power_of_2(vq_size)) {
353 		PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
354 		return -EINVAL;
355 	}
356 
357 	snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
358 		 dev->data->port_id, vtpci_queue_idx);
359 
360 	size = RTE_ALIGN_CEIL(sizeof(*vq) +
361 				vq_size * sizeof(struct vq_desc_extra),
362 				RTE_CACHE_LINE_SIZE);
363 	if (queue_type == VTNET_TQ) {
364 		/*
365 		 * For each xmit packet, allocate a virtio_net_hdr
366 		 * and indirect ring elements
367 		 */
368 		sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
369 	} else if (queue_type == VTNET_CQ) {
370 		/* Allocate a page for control vq command, data and status */
371 		sz_hdr_mz = PAGE_SIZE;
372 	}
373 
374 	vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
375 				SOCKET_ID_ANY);
376 	if (vq == NULL) {
377 		PMD_INIT_LOG(ERR, "can not allocate vq");
378 		return -ENOMEM;
379 	}
380 	hw->vqs[vtpci_queue_idx] = vq;
381 
382 	vq->hw = hw;
383 	vq->vq_queue_index = vtpci_queue_idx;
384 	vq->vq_nentries = vq_size;
385 
386 	/*
387 	 * Reserve a memzone for vring elements
388 	 */
389 	size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
390 	vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
391 	PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
392 		     size, vq->vq_ring_size);
393 
394 	mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
395 					 SOCKET_ID_ANY,
396 					 0, VIRTIO_PCI_VRING_ALIGN);
397 	if (mz == NULL) {
398 		if (rte_errno == EEXIST)
399 			mz = rte_memzone_lookup(vq_name);
400 		if (mz == NULL) {
401 			ret = -ENOMEM;
402 			goto fail_q_alloc;
403 		}
404 	}
405 
406 	memset(mz->addr, 0, mz->len);
407 
408 	vq->vq_ring_mem = mz->iova;
409 	vq->vq_ring_virt_mem = mz->addr;
410 	PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem:      0x%" PRIx64,
411 		     (uint64_t)mz->iova);
412 	PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
413 		     (uint64_t)(uintptr_t)mz->addr);
414 
415 	virtio_init_vring(vq);
416 
417 	if (sz_hdr_mz) {
418 		snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
419 			 dev->data->port_id, vtpci_queue_idx);
420 		hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
421 						     SOCKET_ID_ANY, 0,
422 						     RTE_CACHE_LINE_SIZE);
423 		if (hdr_mz == NULL) {
424 			if (rte_errno == EEXIST)
425 				hdr_mz = rte_memzone_lookup(vq_hdr_name);
426 			if (hdr_mz == NULL) {
427 				ret = -ENOMEM;
428 				goto fail_q_alloc;
429 			}
430 		}
431 	}
432 
433 	if (queue_type == VTNET_RQ) {
434 		size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
435 			       sizeof(vq->sw_ring[0]);
436 
437 		sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
438 				RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
439 		if (!sw_ring) {
440 			PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
441 			ret = -ENOMEM;
442 			goto fail_q_alloc;
443 		}
444 
445 		vq->sw_ring = sw_ring;
446 		rxvq = &vq->rxq;
447 		rxvq->vq = vq;
448 		rxvq->port_id = dev->data->port_id;
449 		rxvq->mz = mz;
450 	} else if (queue_type == VTNET_TQ) {
451 		txvq = &vq->txq;
452 		txvq->vq = vq;
453 		txvq->port_id = dev->data->port_id;
454 		txvq->mz = mz;
455 		txvq->virtio_net_hdr_mz = hdr_mz;
456 		txvq->virtio_net_hdr_mem = hdr_mz->iova;
457 	} else if (queue_type == VTNET_CQ) {
458 		cvq = &vq->cq;
459 		cvq->vq = vq;
460 		cvq->mz = mz;
461 		cvq->virtio_net_hdr_mz = hdr_mz;
462 		cvq->virtio_net_hdr_mem = hdr_mz->iova;
463 		memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
464 
465 		hw->cvq = cvq;
466 	}
467 
468 	/* For virtio_user case (that is when hw->dev is NULL), we use
469 	 * virtual address. And we need properly set _offset_, please see
470 	 * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
471 	 */
472 	if (!hw->virtio_user_dev)
473 		vq->offset = offsetof(struct rte_mbuf, buf_iova);
474 	else {
475 		vq->vq_ring_mem = (uintptr_t)mz->addr;
476 		vq->offset = offsetof(struct rte_mbuf, buf_addr);
477 		if (queue_type == VTNET_TQ)
478 			txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
479 		else if (queue_type == VTNET_CQ)
480 			cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
481 	}
482 
483 	if (queue_type == VTNET_TQ) {
484 		struct virtio_tx_region *txr;
485 		unsigned int i;
486 
487 		txr = hdr_mz->addr;
488 		memset(txr, 0, vq_size * sizeof(*txr));
489 		for (i = 0; i < vq_size; i++) {
490 			struct vring_desc *start_dp = txr[i].tx_indir;
491 
492 			vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
493 
494 			/* first indirect descriptor is always the tx header */
495 			start_dp->addr = txvq->virtio_net_hdr_mem
496 				+ i * sizeof(*txr)
497 				+ offsetof(struct virtio_tx_region, tx_hdr);
498 
499 			start_dp->len = hw->vtnet_hdr_size;
500 			start_dp->flags = VRING_DESC_F_NEXT;
501 		}
502 	}
503 
504 	if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
505 		PMD_INIT_LOG(ERR, "setup_queue failed");
506 		return -EINVAL;
507 	}
508 
509 	return 0;
510 
511 fail_q_alloc:
512 	rte_free(sw_ring);
513 	rte_memzone_free(hdr_mz);
514 	rte_memzone_free(mz);
515 	rte_free(vq);
516 
517 	return ret;
518 }
519 
520 static void
521 virtio_free_queues(struct virtio_hw *hw)
522 {
523 	uint16_t nr_vq = virtio_get_nr_vq(hw);
524 	struct virtqueue *vq;
525 	int queue_type;
526 	uint16_t i;
527 
528 	if (hw->vqs == NULL)
529 		return;
530 
531 	for (i = 0; i < nr_vq; i++) {
532 		vq = hw->vqs[i];
533 		if (!vq)
534 			continue;
535 
536 		queue_type = virtio_get_queue_type(hw, i);
537 		if (queue_type == VTNET_RQ) {
538 			rte_free(vq->sw_ring);
539 			rte_memzone_free(vq->rxq.mz);
540 		} else if (queue_type == VTNET_TQ) {
541 			rte_memzone_free(vq->txq.mz);
542 			rte_memzone_free(vq->txq.virtio_net_hdr_mz);
543 		} else {
544 			rte_memzone_free(vq->cq.mz);
545 			rte_memzone_free(vq->cq.virtio_net_hdr_mz);
546 		}
547 
548 		rte_free(vq);
549 		hw->vqs[i] = NULL;
550 	}
551 
552 	rte_free(hw->vqs);
553 	hw->vqs = NULL;
554 }
555 
556 static int
557 virtio_alloc_queues(struct rte_eth_dev *dev)
558 {
559 	struct virtio_hw *hw = dev->data->dev_private;
560 	uint16_t nr_vq = virtio_get_nr_vq(hw);
561 	uint16_t i;
562 	int ret;
563 
564 	hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
565 	if (!hw->vqs) {
566 		PMD_INIT_LOG(ERR, "failed to allocate vqs");
567 		return -ENOMEM;
568 	}
569 
570 	for (i = 0; i < nr_vq; i++) {
571 		ret = virtio_init_queue(dev, i);
572 		if (ret < 0) {
573 			virtio_free_queues(hw);
574 			return ret;
575 		}
576 	}
577 
578 	return 0;
579 }
580 
581 static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
582 
583 static void
584 virtio_dev_close(struct rte_eth_dev *dev)
585 {
586 	struct virtio_hw *hw = dev->data->dev_private;
587 	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
588 
589 	PMD_INIT_LOG(DEBUG, "virtio_dev_close");
590 
591 	/* reset the NIC */
592 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
593 		VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
594 	if (intr_conf->rxq)
595 		virtio_queues_unbind_intr(dev);
596 
597 	if (intr_conf->lsc || intr_conf->rxq) {
598 		virtio_intr_disable(dev);
599 		rte_intr_efd_disable(dev->intr_handle);
600 		rte_free(dev->intr_handle->intr_vec);
601 		dev->intr_handle->intr_vec = NULL;
602 	}
603 
604 	vtpci_reset(hw);
605 	virtio_dev_free_mbufs(dev);
606 	virtio_free_queues(hw);
607 }
608 
609 static void
610 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
611 {
612 	struct virtio_hw *hw = dev->data->dev_private;
613 	struct virtio_pmd_ctrl ctrl;
614 	int dlen[1];
615 	int ret;
616 
617 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
618 		PMD_INIT_LOG(INFO, "host does not support rx control");
619 		return;
620 	}
621 
622 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
623 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
624 	ctrl.data[0] = 1;
625 	dlen[0] = 1;
626 
627 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
628 	if (ret)
629 		PMD_INIT_LOG(ERR, "Failed to enable promisc");
630 }
631 
632 static void
633 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
634 {
635 	struct virtio_hw *hw = dev->data->dev_private;
636 	struct virtio_pmd_ctrl ctrl;
637 	int dlen[1];
638 	int ret;
639 
640 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
641 		PMD_INIT_LOG(INFO, "host does not support rx control");
642 		return;
643 	}
644 
645 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
646 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
647 	ctrl.data[0] = 0;
648 	dlen[0] = 1;
649 
650 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
651 	if (ret)
652 		PMD_INIT_LOG(ERR, "Failed to disable promisc");
653 }
654 
655 static void
656 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
657 {
658 	struct virtio_hw *hw = dev->data->dev_private;
659 	struct virtio_pmd_ctrl ctrl;
660 	int dlen[1];
661 	int ret;
662 
663 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
664 		PMD_INIT_LOG(INFO, "host does not support rx control");
665 		return;
666 	}
667 
668 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
669 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
670 	ctrl.data[0] = 1;
671 	dlen[0] = 1;
672 
673 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
674 	if (ret)
675 		PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
676 }
677 
678 static void
679 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
680 {
681 	struct virtio_hw *hw = dev->data->dev_private;
682 	struct virtio_pmd_ctrl ctrl;
683 	int dlen[1];
684 	int ret;
685 
686 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
687 		PMD_INIT_LOG(INFO, "host does not support rx control");
688 		return;
689 	}
690 
691 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
692 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
693 	ctrl.data[0] = 0;
694 	dlen[0] = 1;
695 
696 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
697 	if (ret)
698 		PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
699 }
700 
701 #define VLAN_TAG_LEN           4    /* 802.3ac tag (not DMA'd) */
702 static int
703 virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
704 {
705 	struct virtio_hw *hw = dev->data->dev_private;
706 	uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
707 				 hw->vtnet_hdr_size;
708 	uint32_t frame_size = mtu + ether_hdr_len;
709 	uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
710 
711 	max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
712 
713 	if (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) {
714 		PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
715 			ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
716 		return -EINVAL;
717 	}
718 	return 0;
719 }
720 
721 static int
722 virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
723 {
724 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
725 	struct virtqueue *vq = rxvq->vq;
726 
727 	virtqueue_enable_intr(vq);
728 	return 0;
729 }
730 
731 static int
732 virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
733 {
734 	struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
735 	struct virtqueue *vq = rxvq->vq;
736 
737 	virtqueue_disable_intr(vq);
738 	return 0;
739 }
740 
741 /*
742  * dev_ops for virtio, bare necessities for basic operation
743  */
744 static const struct eth_dev_ops virtio_eth_dev_ops = {
745 	.dev_configure           = virtio_dev_configure,
746 	.dev_start               = virtio_dev_start,
747 	.dev_stop                = virtio_dev_stop,
748 	.dev_close               = virtio_dev_close,
749 	.promiscuous_enable      = virtio_dev_promiscuous_enable,
750 	.promiscuous_disable     = virtio_dev_promiscuous_disable,
751 	.allmulticast_enable     = virtio_dev_allmulticast_enable,
752 	.allmulticast_disable    = virtio_dev_allmulticast_disable,
753 	.mtu_set                 = virtio_mtu_set,
754 	.dev_infos_get           = virtio_dev_info_get,
755 	.stats_get               = virtio_dev_stats_get,
756 	.xstats_get              = virtio_dev_xstats_get,
757 	.xstats_get_names        = virtio_dev_xstats_get_names,
758 	.stats_reset             = virtio_dev_stats_reset,
759 	.xstats_reset            = virtio_dev_stats_reset,
760 	.link_update             = virtio_dev_link_update,
761 	.vlan_offload_set        = virtio_dev_vlan_offload_set,
762 	.rx_queue_setup          = virtio_dev_rx_queue_setup,
763 	.rx_queue_intr_enable    = virtio_dev_rx_queue_intr_enable,
764 	.rx_queue_intr_disable   = virtio_dev_rx_queue_intr_disable,
765 	.rx_queue_release        = virtio_dev_queue_release,
766 	.rx_descriptor_done      = virtio_dev_rx_queue_done,
767 	.tx_queue_setup          = virtio_dev_tx_queue_setup,
768 	.tx_queue_release        = virtio_dev_queue_release,
769 	/* collect stats per queue */
770 	.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
771 	.vlan_filter_set         = virtio_vlan_filter_set,
772 	.mac_addr_add            = virtio_mac_addr_add,
773 	.mac_addr_remove         = virtio_mac_addr_remove,
774 	.mac_addr_set            = virtio_mac_addr_set,
775 };
776 
777 static inline int
778 virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
779 				struct rte_eth_link *link)
780 {
781 	struct rte_eth_link *dst = link;
782 	struct rte_eth_link *src = &(dev->data->dev_link);
783 
784 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
785 			*(uint64_t *)src) == 0)
786 		return -1;
787 
788 	return 0;
789 }
790 
791 /**
792  * Atomically writes the link status information into global
793  * structure rte_eth_dev.
794  *
795  * @param dev
796  *   - Pointer to the structure rte_eth_dev to read from.
797  *   - Pointer to the buffer to be saved with the link status.
798  *
799  * @return
800  *   - On success, zero.
801  *   - On failure, negative value.
802  */
803 static inline int
804 virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
805 		struct rte_eth_link *link)
806 {
807 	struct rte_eth_link *dst = &(dev->data->dev_link);
808 	struct rte_eth_link *src = link;
809 
810 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
811 					*(uint64_t *)src) == 0)
812 		return -1;
813 
814 	return 0;
815 }
816 
817 static void
818 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
819 {
820 	unsigned i;
821 
822 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
823 		const struct virtnet_tx *txvq = dev->data->tx_queues[i];
824 		if (txvq == NULL)
825 			continue;
826 
827 		stats->opackets += txvq->stats.packets;
828 		stats->obytes += txvq->stats.bytes;
829 		stats->oerrors += txvq->stats.errors;
830 
831 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
832 			stats->q_opackets[i] = txvq->stats.packets;
833 			stats->q_obytes[i] = txvq->stats.bytes;
834 		}
835 	}
836 
837 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
838 		const struct virtnet_rx *rxvq = dev->data->rx_queues[i];
839 		if (rxvq == NULL)
840 			continue;
841 
842 		stats->ipackets += rxvq->stats.packets;
843 		stats->ibytes += rxvq->stats.bytes;
844 		stats->ierrors += rxvq->stats.errors;
845 
846 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
847 			stats->q_ipackets[i] = rxvq->stats.packets;
848 			stats->q_ibytes[i] = rxvq->stats.bytes;
849 		}
850 	}
851 
852 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
853 }
854 
855 static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
856 				       struct rte_eth_xstat_name *xstats_names,
857 				       __rte_unused unsigned limit)
858 {
859 	unsigned i;
860 	unsigned count = 0;
861 	unsigned t;
862 
863 	unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
864 		dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
865 
866 	if (xstats_names != NULL) {
867 		/* Note: limit checked in rte_eth_xstats_names() */
868 
869 		for (i = 0; i < dev->data->nb_rx_queues; i++) {
870 			struct virtnet_rx *rxvq = dev->data->rx_queues[i];
871 			if (rxvq == NULL)
872 				continue;
873 			for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
874 				snprintf(xstats_names[count].name,
875 					sizeof(xstats_names[count].name),
876 					"rx_q%u_%s", i,
877 					rte_virtio_rxq_stat_strings[t].name);
878 				count++;
879 			}
880 		}
881 
882 		for (i = 0; i < dev->data->nb_tx_queues; i++) {
883 			struct virtnet_tx *txvq = dev->data->tx_queues[i];
884 			if (txvq == NULL)
885 				continue;
886 			for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
887 				snprintf(xstats_names[count].name,
888 					sizeof(xstats_names[count].name),
889 					"tx_q%u_%s", i,
890 					rte_virtio_txq_stat_strings[t].name);
891 				count++;
892 			}
893 		}
894 		return count;
895 	}
896 	return nstats;
897 }
898 
899 static int
900 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
901 		      unsigned n)
902 {
903 	unsigned i;
904 	unsigned count = 0;
905 
906 	unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS +
907 		dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS;
908 
909 	if (n < nstats)
910 		return nstats;
911 
912 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
913 		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
914 
915 		if (rxvq == NULL)
916 			continue;
917 
918 		unsigned t;
919 
920 		for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
921 			xstats[count].value = *(uint64_t *)(((char *)rxvq) +
922 				rte_virtio_rxq_stat_strings[t].offset);
923 			xstats[count].id = count;
924 			count++;
925 		}
926 	}
927 
928 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
929 		struct virtnet_tx *txvq = dev->data->tx_queues[i];
930 
931 		if (txvq == NULL)
932 			continue;
933 
934 		unsigned t;
935 
936 		for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
937 			xstats[count].value = *(uint64_t *)(((char *)txvq) +
938 				rte_virtio_txq_stat_strings[t].offset);
939 			xstats[count].id = count;
940 			count++;
941 		}
942 	}
943 
944 	return count;
945 }
946 
947 static int
948 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
949 {
950 	virtio_update_stats(dev, stats);
951 
952 	return 0;
953 }
954 
955 static void
956 virtio_dev_stats_reset(struct rte_eth_dev *dev)
957 {
958 	unsigned int i;
959 
960 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
961 		struct virtnet_tx *txvq = dev->data->tx_queues[i];
962 		if (txvq == NULL)
963 			continue;
964 
965 		txvq->stats.packets = 0;
966 		txvq->stats.bytes = 0;
967 		txvq->stats.errors = 0;
968 		txvq->stats.multicast = 0;
969 		txvq->stats.broadcast = 0;
970 		memset(txvq->stats.size_bins, 0,
971 		       sizeof(txvq->stats.size_bins[0]) * 8);
972 	}
973 
974 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
975 		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
976 		if (rxvq == NULL)
977 			continue;
978 
979 		rxvq->stats.packets = 0;
980 		rxvq->stats.bytes = 0;
981 		rxvq->stats.errors = 0;
982 		rxvq->stats.multicast = 0;
983 		rxvq->stats.broadcast = 0;
984 		memset(rxvq->stats.size_bins, 0,
985 		       sizeof(rxvq->stats.size_bins[0]) * 8);
986 	}
987 }
988 
989 static void
990 virtio_set_hwaddr(struct virtio_hw *hw)
991 {
992 	vtpci_write_dev_config(hw,
993 			offsetof(struct virtio_net_config, mac),
994 			&hw->mac_addr, ETHER_ADDR_LEN);
995 }
996 
997 static void
998 virtio_get_hwaddr(struct virtio_hw *hw)
999 {
1000 	if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
1001 		vtpci_read_dev_config(hw,
1002 			offsetof(struct virtio_net_config, mac),
1003 			&hw->mac_addr, ETHER_ADDR_LEN);
1004 	} else {
1005 		eth_random_addr(&hw->mac_addr[0]);
1006 		virtio_set_hwaddr(hw);
1007 	}
1008 }
1009 
1010 static int
1011 virtio_mac_table_set(struct virtio_hw *hw,
1012 		     const struct virtio_net_ctrl_mac *uc,
1013 		     const struct virtio_net_ctrl_mac *mc)
1014 {
1015 	struct virtio_pmd_ctrl ctrl;
1016 	int err, len[2];
1017 
1018 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1019 		PMD_DRV_LOG(INFO, "host does not support mac table");
1020 		return -1;
1021 	}
1022 
1023 	ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
1024 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1025 
1026 	len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries);
1027 	memcpy(ctrl.data, uc, len[0]);
1028 
1029 	len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries);
1030 	memcpy(ctrl.data + len[0], mc, len[1]);
1031 
1032 	err = virtio_send_command(hw->cvq, &ctrl, len, 2);
1033 	if (err != 0)
1034 		PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
1035 	return err;
1036 }
1037 
1038 static int
1039 virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1040 		    uint32_t index, uint32_t vmdq __rte_unused)
1041 {
1042 	struct virtio_hw *hw = dev->data->dev_private;
1043 	const struct ether_addr *addrs = dev->data->mac_addrs;
1044 	unsigned int i;
1045 	struct virtio_net_ctrl_mac *uc, *mc;
1046 
1047 	if (index >= VIRTIO_MAX_MAC_ADDRS) {
1048 		PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
1049 		return -EINVAL;
1050 	}
1051 
1052 	uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
1053 	uc->entries = 0;
1054 	mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
1055 	mc->entries = 0;
1056 
1057 	for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
1058 		const struct ether_addr *addr
1059 			= (i == index) ? mac_addr : addrs + i;
1060 		struct virtio_net_ctrl_mac *tbl
1061 			= is_multicast_ether_addr(addr) ? mc : uc;
1062 
1063 		memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
1064 	}
1065 
1066 	return virtio_mac_table_set(hw, uc, mc);
1067 }
1068 
1069 static void
1070 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1071 {
1072 	struct virtio_hw *hw = dev->data->dev_private;
1073 	struct ether_addr *addrs = dev->data->mac_addrs;
1074 	struct virtio_net_ctrl_mac *uc, *mc;
1075 	unsigned int i;
1076 
1077 	if (index >= VIRTIO_MAX_MAC_ADDRS) {
1078 		PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
1079 		return;
1080 	}
1081 
1082 	uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
1083 	uc->entries = 0;
1084 	mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
1085 	mc->entries = 0;
1086 
1087 	for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
1088 		struct virtio_net_ctrl_mac *tbl;
1089 
1090 		if (i == index || is_zero_ether_addr(addrs + i))
1091 			continue;
1092 
1093 		tbl = is_multicast_ether_addr(addrs + i) ? mc : uc;
1094 		memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);
1095 	}
1096 
1097 	virtio_mac_table_set(hw, uc, mc);
1098 }
1099 
1100 static void
1101 virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1102 {
1103 	struct virtio_hw *hw = dev->data->dev_private;
1104 
1105 	memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);
1106 
1107 	/* Use atomic update if available */
1108 	if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1109 		struct virtio_pmd_ctrl ctrl;
1110 		int len = ETHER_ADDR_LEN;
1111 
1112 		ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
1113 		ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
1114 
1115 		memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
1116 		virtio_send_command(hw->cvq, &ctrl, &len, 1);
1117 	} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
1118 		virtio_set_hwaddr(hw);
1119 }
1120 
1121 static int
1122 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1123 {
1124 	struct virtio_hw *hw = dev->data->dev_private;
1125 	struct virtio_pmd_ctrl ctrl;
1126 	int len;
1127 
1128 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
1129 		return -ENOTSUP;
1130 
1131 	ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
1132 	ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
1133 	memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
1134 	len = sizeof(vlan_id);
1135 
1136 	return virtio_send_command(hw->cvq, &ctrl, &len, 1);
1137 }
1138 
1139 static int
1140 virtio_intr_enable(struct rte_eth_dev *dev)
1141 {
1142 	struct virtio_hw *hw = dev->data->dev_private;
1143 
1144 	if (rte_intr_enable(dev->intr_handle) < 0)
1145 		return -1;
1146 
1147 	if (!hw->virtio_user_dev)
1148 		hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
1149 
1150 	return 0;
1151 }
1152 
1153 static int
1154 virtio_intr_disable(struct rte_eth_dev *dev)
1155 {
1156 	struct virtio_hw *hw = dev->data->dev_private;
1157 
1158 	if (rte_intr_disable(dev->intr_handle) < 0)
1159 		return -1;
1160 
1161 	if (!hw->virtio_user_dev)
1162 		hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
1163 
1164 	return 0;
1165 }
1166 
1167 static int
1168 virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
1169 {
1170 	uint64_t host_features;
1171 
1172 	/* Prepare guest_features: feature that driver wants to support */
1173 	PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
1174 		req_features);
1175 
1176 	/* Read device(host) feature bits */
1177 	host_features = VTPCI_OPS(hw)->get_features(hw);
1178 	PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
1179 		host_features);
1180 
1181 	/* If supported, ensure MTU value is valid before acknowledging it. */
1182 	if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
1183 		struct virtio_net_config config;
1184 
1185 		vtpci_read_dev_config(hw,
1186 			offsetof(struct virtio_net_config, mtu),
1187 			&config.mtu, sizeof(config.mtu));
1188 
1189 		if (config.mtu < ETHER_MIN_MTU)
1190 			req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
1191 	}
1192 
1193 	/*
1194 	 * Negotiate features: Subset of device feature bits are written back
1195 	 * guest feature bits.
1196 	 */
1197 	hw->guest_features = req_features;
1198 	hw->guest_features = vtpci_negotiate_features(hw, host_features);
1199 	PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
1200 		hw->guest_features);
1201 
1202 	if (hw->modern) {
1203 		if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
1204 			PMD_INIT_LOG(ERR,
1205 				"VIRTIO_F_VERSION_1 features is not enabled.");
1206 			return -1;
1207 		}
1208 		vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
1209 		if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
1210 			PMD_INIT_LOG(ERR,
1211 				"failed to set FEATURES_OK status!");
1212 			return -1;
1213 		}
1214 	}
1215 
1216 	hw->req_guest_features = req_features;
1217 
1218 	return 0;
1219 }
1220 
1221 /*
1222  * Process Virtio Config changed interrupt and call the callback
1223  * if link state changed.
1224  */
1225 void
1226 virtio_interrupt_handler(void *param)
1227 {
1228 	struct rte_eth_dev *dev = param;
1229 	struct virtio_hw *hw = dev->data->dev_private;
1230 	uint8_t isr;
1231 
1232 	/* Read interrupt status which clears interrupt */
1233 	isr = vtpci_isr(hw);
1234 	PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
1235 
1236 	if (virtio_intr_enable(dev) < 0)
1237 		PMD_DRV_LOG(ERR, "interrupt enable failed");
1238 
1239 	if (isr & VIRTIO_PCI_ISR_CONFIG) {
1240 		if (virtio_dev_link_update(dev, 0) == 0)
1241 			_rte_eth_dev_callback_process(dev,
1242 						      RTE_ETH_EVENT_INTR_LSC,
1243 						      NULL, NULL);
1244 	}
1245 
1246 }
1247 
1248 /* set rx and tx handlers according to what is supported */
1249 static void
1250 set_rxtx_funcs(struct rte_eth_dev *eth_dev)
1251 {
1252 	struct virtio_hw *hw = eth_dev->data->dev_private;
1253 
1254 	if (hw->use_simple_rx) {
1255 		PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
1256 			eth_dev->data->port_id);
1257 		eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
1258 	} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1259 		PMD_INIT_LOG(INFO,
1260 			"virtio: using mergeable buffer Rx path on port %u",
1261 			eth_dev->data->port_id);
1262 		eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
1263 	} else {
1264 		PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
1265 			eth_dev->data->port_id);
1266 		eth_dev->rx_pkt_burst = &virtio_recv_pkts;
1267 	}
1268 
1269 	if (hw->use_simple_tx) {
1270 		PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
1271 			eth_dev->data->port_id);
1272 		eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
1273 	} else {
1274 		PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
1275 			eth_dev->data->port_id);
1276 		eth_dev->tx_pkt_burst = virtio_xmit_pkts;
1277 	}
1278 }
1279 
1280 /* Only support 1:1 queue/interrupt mapping so far.
1281  * TODO: support n:1 queue/interrupt mapping when there are limited number of
1282  * interrupt vectors (<N+1).
1283  */
1284 static int
1285 virtio_queues_bind_intr(struct rte_eth_dev *dev)
1286 {
1287 	uint32_t i;
1288 	struct virtio_hw *hw = dev->data->dev_private;
1289 
1290 	PMD_INIT_LOG(INFO, "queue/interrupt binding");
1291 	for (i = 0; i < dev->data->nb_rx_queues; ++i) {
1292 		dev->intr_handle->intr_vec[i] = i + 1;
1293 		if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
1294 						 VIRTIO_MSI_NO_VECTOR) {
1295 			PMD_DRV_LOG(ERR, "failed to set queue vector");
1296 			return -EBUSY;
1297 		}
1298 	}
1299 
1300 	return 0;
1301 }
1302 
1303 static void
1304 virtio_queues_unbind_intr(struct rte_eth_dev *dev)
1305 {
1306 	uint32_t i;
1307 	struct virtio_hw *hw = dev->data->dev_private;
1308 
1309 	PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
1310 	for (i = 0; i < dev->data->nb_rx_queues; ++i)
1311 		VTPCI_OPS(hw)->set_queue_irq(hw,
1312 					     hw->vqs[i * VTNET_CQ],
1313 					     VIRTIO_MSI_NO_VECTOR);
1314 }
1315 
1316 static int
1317 virtio_configure_intr(struct rte_eth_dev *dev)
1318 {
1319 	struct virtio_hw *hw = dev->data->dev_private;
1320 
1321 	if (!rte_intr_cap_multiple(dev->intr_handle)) {
1322 		PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
1323 		return -ENOTSUP;
1324 	}
1325 
1326 	if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
1327 		PMD_INIT_LOG(ERR, "Fail to create eventfd");
1328 		return -1;
1329 	}
1330 
1331 	if (!dev->intr_handle->intr_vec) {
1332 		dev->intr_handle->intr_vec =
1333 			rte_zmalloc("intr_vec",
1334 				    hw->max_queue_pairs * sizeof(int), 0);
1335 		if (!dev->intr_handle->intr_vec) {
1336 			PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
1337 				     hw->max_queue_pairs);
1338 			return -ENOMEM;
1339 		}
1340 	}
1341 
1342 	/* Re-register callback to update max_intr */
1343 	rte_intr_callback_unregister(dev->intr_handle,
1344 				     virtio_interrupt_handler,
1345 				     dev);
1346 	rte_intr_callback_register(dev->intr_handle,
1347 				   virtio_interrupt_handler,
1348 				   dev);
1349 
1350 	/* DO NOT try to remove this! This function will enable msix, or QEMU
1351 	 * will encounter SIGSEGV when DRIVER_OK is sent.
1352 	 * And for legacy devices, this should be done before queue/vec binding
1353 	 * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
1354 	 * (22) will be ignored.
1355 	 */
1356 	if (virtio_intr_enable(dev) < 0) {
1357 		PMD_DRV_LOG(ERR, "interrupt enable failed");
1358 		return -1;
1359 	}
1360 
1361 	if (virtio_queues_bind_intr(dev) < 0) {
1362 		PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
1363 		return -1;
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 /* reset device and renegotiate features if needed */
1370 static int
1371 virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
1372 {
1373 	struct virtio_hw *hw = eth_dev->data->dev_private;
1374 	struct virtio_net_config *config;
1375 	struct virtio_net_config local_config;
1376 	struct rte_pci_device *pci_dev = NULL;
1377 	int ret;
1378 
1379 	/* Reset the device although not necessary at startup */
1380 	vtpci_reset(hw);
1381 
1382 	/* Tell the host we've noticed this device. */
1383 	vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1384 
1385 	/* Tell the host we've known how to drive the device. */
1386 	vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1387 	if (virtio_negotiate_features(hw, req_features) < 0)
1388 		return -1;
1389 
1390 	if (!hw->virtio_user_dev) {
1391 		pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1392 		rte_eth_copy_pci_info(eth_dev, pci_dev);
1393 	}
1394 
1395 	/* If host does not support both status and MSI-X then disable LSC */
1396 	if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) &&
1397 	    hw->use_msix != VIRTIO_MSIX_NONE)
1398 		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
1399 	else
1400 		eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
1401 
1402 	/* Setting up rx_header size for the device */
1403 	if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
1404 	    vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
1405 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1406 	else
1407 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
1408 
1409 	/* Copy the permanent MAC address to: virtio_hw */
1410 	virtio_get_hwaddr(hw);
1411 	ether_addr_copy((struct ether_addr *) hw->mac_addr,
1412 			&eth_dev->data->mac_addrs[0]);
1413 	PMD_INIT_LOG(DEBUG,
1414 		     "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1415 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
1416 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
1417 
1418 	if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
1419 		config = &local_config;
1420 
1421 		vtpci_read_dev_config(hw,
1422 			offsetof(struct virtio_net_config, mac),
1423 			&config->mac, sizeof(config->mac));
1424 
1425 		if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1426 			vtpci_read_dev_config(hw,
1427 				offsetof(struct virtio_net_config, status),
1428 				&config->status, sizeof(config->status));
1429 		} else {
1430 			PMD_INIT_LOG(DEBUG,
1431 				     "VIRTIO_NET_F_STATUS is not supported");
1432 			config->status = 0;
1433 		}
1434 
1435 		if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
1436 			vtpci_read_dev_config(hw,
1437 				offsetof(struct virtio_net_config, max_virtqueue_pairs),
1438 				&config->max_virtqueue_pairs,
1439 				sizeof(config->max_virtqueue_pairs));
1440 		} else {
1441 			PMD_INIT_LOG(DEBUG,
1442 				     "VIRTIO_NET_F_MQ is not supported");
1443 			config->max_virtqueue_pairs = 1;
1444 		}
1445 
1446 		hw->max_queue_pairs = config->max_virtqueue_pairs;
1447 
1448 		if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
1449 			vtpci_read_dev_config(hw,
1450 				offsetof(struct virtio_net_config, mtu),
1451 				&config->mtu,
1452 				sizeof(config->mtu));
1453 
1454 			/*
1455 			 * MTU value has already been checked at negotiation
1456 			 * time, but check again in case it has changed since
1457 			 * then, which should not happen.
1458 			 */
1459 			if (config->mtu < ETHER_MIN_MTU) {
1460 				PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
1461 						config->mtu);
1462 				return -1;
1463 			}
1464 
1465 			hw->max_mtu = config->mtu;
1466 			/* Set initial MTU to maximum one supported by vhost */
1467 			eth_dev->data->mtu = config->mtu;
1468 
1469 		} else {
1470 			hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
1471 				VLAN_TAG_LEN - hw->vtnet_hdr_size;
1472 		}
1473 
1474 		PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
1475 				config->max_virtqueue_pairs);
1476 		PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
1477 		PMD_INIT_LOG(DEBUG,
1478 				"PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1479 				config->mac[0], config->mac[1],
1480 				config->mac[2], config->mac[3],
1481 				config->mac[4], config->mac[5]);
1482 	} else {
1483 		PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
1484 		hw->max_queue_pairs = 1;
1485 	}
1486 
1487 	ret = virtio_alloc_queues(eth_dev);
1488 	if (ret < 0)
1489 		return ret;
1490 
1491 	if (eth_dev->data->dev_conf.intr_conf.rxq) {
1492 		if (virtio_configure_intr(eth_dev) < 0) {
1493 			PMD_INIT_LOG(ERR, "failed to configure interrupt");
1494 			return -1;
1495 		}
1496 	}
1497 
1498 	vtpci_reinit_complete(hw);
1499 
1500 	if (pci_dev)
1501 		PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1502 			eth_dev->data->port_id, pci_dev->id.vendor_id,
1503 			pci_dev->id.device_id);
1504 
1505 	return 0;
1506 }
1507 
1508 /*
1509  * Remap the PCI device again (IO port map for legacy device and
1510  * memory map for modern device), so that the secondary process
1511  * could have the PCI initiated correctly.
1512  */
1513 static int
1514 virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
1515 {
1516 	if (hw->modern) {
1517 		/*
1518 		 * We don't have to re-parse the PCI config space, since
1519 		 * rte_pci_map_device() makes sure the mapped address
1520 		 * in secondary process would equal to the one mapped in
1521 		 * the primary process: error will be returned if that
1522 		 * requirement is not met.
1523 		 *
1524 		 * That said, we could simply reuse all cap pointers
1525 		 * (such as dev_cfg, common_cfg, etc.) parsed from the
1526 		 * primary process, which is stored in shared memory.
1527 		 */
1528 		if (rte_pci_map_device(pci_dev)) {
1529 			PMD_INIT_LOG(DEBUG, "failed to map pci device!");
1530 			return -1;
1531 		}
1532 	} else {
1533 		if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
1534 			return -1;
1535 	}
1536 
1537 	return 0;
1538 }
1539 
1540 static void
1541 virtio_set_vtpci_ops(struct virtio_hw *hw)
1542 {
1543 #ifdef RTE_VIRTIO_USER
1544 	if (hw->virtio_user_dev)
1545 		VTPCI_OPS(hw) = &virtio_user_ops;
1546 	else
1547 #endif
1548 	if (hw->modern)
1549 		VTPCI_OPS(hw) = &modern_ops;
1550 	else
1551 		VTPCI_OPS(hw) = &legacy_ops;
1552 }
1553 
1554 /*
1555  * This function is based on probe() function in virtio_pci.c
1556  * It returns 0 on success.
1557  */
1558 int
1559 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
1560 {
1561 	struct virtio_hw *hw = eth_dev->data->dev_private;
1562 	int ret;
1563 
1564 	RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
1565 
1566 	eth_dev->dev_ops = &virtio_eth_dev_ops;
1567 
1568 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1569 		if (!hw->virtio_user_dev) {
1570 			ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
1571 			if (ret)
1572 				return ret;
1573 		}
1574 
1575 		virtio_set_vtpci_ops(hw);
1576 		set_rxtx_funcs(eth_dev);
1577 
1578 		return 0;
1579 	}
1580 
1581 	/* Allocate memory for storing MAC addresses */
1582 	eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
1583 	if (eth_dev->data->mac_addrs == NULL) {
1584 		PMD_INIT_LOG(ERR,
1585 			"Failed to allocate %d bytes needed to store MAC addresses",
1586 			VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
1587 		return -ENOMEM;
1588 	}
1589 
1590 	hw->port_id = eth_dev->data->port_id;
1591 	/* For virtio_user case the hw->virtio_user_dev is populated by
1592 	 * virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
1593 	 */
1594 	if (!hw->virtio_user_dev) {
1595 		ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
1596 		if (ret)
1597 			goto out;
1598 	}
1599 
1600 	/* reset device and negotiate default features */
1601 	ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
1602 	if (ret < 0)
1603 		goto out;
1604 
1605 	/* Setup interrupt callback  */
1606 	if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1607 		rte_intr_callback_register(eth_dev->intr_handle,
1608 			virtio_interrupt_handler, eth_dev);
1609 
1610 	return 0;
1611 
1612 out:
1613 	rte_free(eth_dev->data->mac_addrs);
1614 	return ret;
1615 }
1616 
1617 static int
1618 eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
1619 {
1620 	PMD_INIT_FUNC_TRACE();
1621 
1622 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1623 		return -EPERM;
1624 
1625 	virtio_dev_stop(eth_dev);
1626 	virtio_dev_close(eth_dev);
1627 
1628 	eth_dev->dev_ops = NULL;
1629 	eth_dev->tx_pkt_burst = NULL;
1630 	eth_dev->rx_pkt_burst = NULL;
1631 
1632 	rte_free(eth_dev->data->mac_addrs);
1633 	eth_dev->data->mac_addrs = NULL;
1634 
1635 	/* reset interrupt callback  */
1636 	if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1637 		rte_intr_callback_unregister(eth_dev->intr_handle,
1638 						virtio_interrupt_handler,
1639 						eth_dev);
1640 	if (eth_dev->device)
1641 		rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
1642 
1643 	PMD_INIT_LOG(DEBUG, "dev_uninit completed");
1644 
1645 	return 0;
1646 }
1647 
1648 static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1649 	struct rte_pci_device *pci_dev)
1650 {
1651 	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
1652 		eth_virtio_dev_init);
1653 }
1654 
1655 static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
1656 {
1657 	return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
1658 }
1659 
1660 static struct rte_pci_driver rte_virtio_pmd = {
1661 	.driver = {
1662 		.name = "net_virtio",
1663 	},
1664 	.id_table = pci_id_virtio_map,
1665 	.drv_flags = 0,
1666 	.probe = eth_virtio_pci_probe,
1667 	.remove = eth_virtio_pci_remove,
1668 };
1669 
1670 RTE_INIT(rte_virtio_pmd_init);
1671 static void
1672 rte_virtio_pmd_init(void)
1673 {
1674 	if (rte_eal_iopl_init() != 0) {
1675 		PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
1676 		return;
1677 	}
1678 
1679 	rte_pci_register(&rte_virtio_pmd);
1680 }
1681 
1682 /*
1683  * Configure virtio device
1684  * It returns 0 on success.
1685  */
1686 static int
1687 virtio_dev_configure(struct rte_eth_dev *dev)
1688 {
1689 	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1690 	struct virtio_hw *hw = dev->data->dev_private;
1691 	uint64_t req_features;
1692 	int ret;
1693 
1694 	PMD_INIT_LOG(DEBUG, "configure");
1695 	req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
1696 
1697 	if (dev->data->dev_conf.intr_conf.rxq) {
1698 		ret = virtio_init_device(dev, hw->req_guest_features);
1699 		if (ret < 0)
1700 			return ret;
1701 	}
1702 
1703 	/* The name hw_ip_checksum is a bit confusing since it can be
1704 	 * set by the application to request L3 and/or L4 checksums. In
1705 	 * case of virtio, only L4 checksum is supported.
1706 	 */
1707 	if (rxmode->hw_ip_checksum)
1708 		req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
1709 
1710 	if (rxmode->enable_lro)
1711 		req_features |=
1712 			(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
1713 			(1ULL << VIRTIO_NET_F_GUEST_TSO6);
1714 
1715 	/* if request features changed, reinit the device */
1716 	if (req_features != hw->req_guest_features) {
1717 		ret = virtio_init_device(dev, req_features);
1718 		if (ret < 0)
1719 			return ret;
1720 	}
1721 
1722 	if (rxmode->hw_ip_checksum &&
1723 		!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
1724 		PMD_DRV_LOG(ERR,
1725 			"rx checksum not available on this host");
1726 		return -ENOTSUP;
1727 	}
1728 
1729 	if (rxmode->enable_lro &&
1730 		(!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
1731 		 !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
1732 		PMD_DRV_LOG(ERR,
1733 			"Large Receive Offload not available on this host");
1734 		return -ENOTSUP;
1735 	}
1736 
1737 	/* start control queue */
1738 	if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
1739 		virtio_dev_cq_start(dev);
1740 
1741 	hw->vlan_strip = rxmode->hw_vlan_strip;
1742 
1743 	if (rxmode->hw_vlan_filter
1744 	    && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
1745 		PMD_DRV_LOG(ERR,
1746 			    "vlan filtering not available on this host");
1747 		return -ENOTSUP;
1748 	}
1749 
1750 	if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
1751 		/* Enable vector (0) for Link State Intrerrupt */
1752 		if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
1753 				VIRTIO_MSI_NO_VECTOR) {
1754 			PMD_DRV_LOG(ERR, "failed to set config vector");
1755 			return -EBUSY;
1756 		}
1757 
1758 	hw->use_simple_rx = 1;
1759 	hw->use_simple_tx = 1;
1760 
1761 #if defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
1762 	if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
1763 		hw->use_simple_rx = 0;
1764 		hw->use_simple_tx = 0;
1765 	}
1766 #endif
1767 	if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
1768 		hw->use_simple_rx = 0;
1769 		hw->use_simple_tx = 0;
1770 	}
1771 
1772 	if (rxmode->hw_ip_checksum)
1773 		hw->use_simple_rx = 0;
1774 
1775 	return 0;
1776 }
1777 
1778 
1779 static int
1780 virtio_dev_start(struct rte_eth_dev *dev)
1781 {
1782 	uint16_t nb_queues, i;
1783 	struct virtnet_rx *rxvq;
1784 	struct virtnet_tx *txvq __rte_unused;
1785 	struct virtio_hw *hw = dev->data->dev_private;
1786 	int ret;
1787 
1788 	/* Finish the initialization of the queues */
1789 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1790 		ret = virtio_dev_rx_queue_setup_finish(dev, i);
1791 		if (ret < 0)
1792 			return ret;
1793 	}
1794 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1795 		ret = virtio_dev_tx_queue_setup_finish(dev, i);
1796 		if (ret < 0)
1797 			return ret;
1798 	}
1799 
1800 	/* check if lsc interrupt feature is enabled */
1801 	if (dev->data->dev_conf.intr_conf.lsc) {
1802 		if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
1803 			PMD_DRV_LOG(ERR, "link status not supported by host");
1804 			return -ENOTSUP;
1805 		}
1806 	}
1807 
1808 	/* Enable uio/vfio intr/eventfd mapping: althrough we already did that
1809 	 * in device configure, but it could be unmapped  when device is
1810 	 * stopped.
1811 	 */
1812 	if (dev->data->dev_conf.intr_conf.lsc ||
1813 	    dev->data->dev_conf.intr_conf.rxq) {
1814 		virtio_intr_disable(dev);
1815 
1816 		if (virtio_intr_enable(dev) < 0) {
1817 			PMD_DRV_LOG(ERR, "interrupt enable failed");
1818 			return -EIO;
1819 		}
1820 	}
1821 
1822 	/*Notify the backend
1823 	 *Otherwise the tap backend might already stop its queue due to fullness.
1824 	 *vhost backend will have no chance to be waked up
1825 	 */
1826 	nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
1827 	if (hw->max_queue_pairs > 1) {
1828 		if (virtio_set_multiple_queues(dev, nb_queues) != 0)
1829 			return -EINVAL;
1830 	}
1831 
1832 	PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
1833 
1834 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1835 		rxvq = dev->data->rx_queues[i];
1836 		/* Flush the old packets */
1837 		virtqueue_rxvq_flush(rxvq->vq);
1838 		virtqueue_notify(rxvq->vq);
1839 	}
1840 
1841 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1842 		txvq = dev->data->tx_queues[i];
1843 		virtqueue_notify(txvq->vq);
1844 	}
1845 
1846 	PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
1847 
1848 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1849 		rxvq = dev->data->rx_queues[i];
1850 		VIRTQUEUE_DUMP(rxvq->vq);
1851 	}
1852 
1853 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1854 		txvq = dev->data->tx_queues[i];
1855 		VIRTQUEUE_DUMP(txvq->vq);
1856 	}
1857 
1858 	set_rxtx_funcs(dev);
1859 	hw->started = 1;
1860 
1861 	/* Initialize Link state */
1862 	virtio_dev_link_update(dev, 0);
1863 
1864 	return 0;
1865 }
1866 
1867 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
1868 {
1869 	struct rte_mbuf *buf;
1870 	int i, mbuf_num = 0;
1871 
1872 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1873 		struct virtnet_rx *rxvq = dev->data->rx_queues[i];
1874 
1875 		PMD_INIT_LOG(DEBUG,
1876 			     "Before freeing rxq[%d] used and unused buf", i);
1877 		VIRTQUEUE_DUMP(rxvq->vq);
1878 
1879 		PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq);
1880 		while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) {
1881 			rte_pktmbuf_free(buf);
1882 			mbuf_num++;
1883 		}
1884 
1885 		PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
1886 		PMD_INIT_LOG(DEBUG,
1887 			     "After freeing rxq[%d] used and unused buf", i);
1888 		VIRTQUEUE_DUMP(rxvq->vq);
1889 	}
1890 
1891 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1892 		struct virtnet_tx *txvq = dev->data->tx_queues[i];
1893 
1894 		PMD_INIT_LOG(DEBUG,
1895 			     "Before freeing txq[%d] used and unused bufs",
1896 			     i);
1897 		VIRTQUEUE_DUMP(txvq->vq);
1898 
1899 		mbuf_num = 0;
1900 		while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) {
1901 			rte_pktmbuf_free(buf);
1902 			mbuf_num++;
1903 		}
1904 
1905 		PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
1906 		PMD_INIT_LOG(DEBUG,
1907 			     "After freeing txq[%d] used and unused buf", i);
1908 		VIRTQUEUE_DUMP(txvq->vq);
1909 	}
1910 }
1911 
1912 /*
1913  * Stop device: disable interrupt and mark link down
1914  */
1915 static void
1916 virtio_dev_stop(struct rte_eth_dev *dev)
1917 {
1918 	struct virtio_hw *hw = dev->data->dev_private;
1919 	struct rte_eth_link link;
1920 	struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
1921 
1922 	PMD_INIT_LOG(DEBUG, "stop");
1923 
1924 	if (intr_conf->lsc || intr_conf->rxq)
1925 		virtio_intr_disable(dev);
1926 
1927 	hw->started = 0;
1928 	memset(&link, 0, sizeof(link));
1929 	virtio_dev_atomic_write_link_status(dev, &link);
1930 }
1931 
1932 static int
1933 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1934 {
1935 	struct rte_eth_link link, old;
1936 	uint16_t status;
1937 	struct virtio_hw *hw = dev->data->dev_private;
1938 	memset(&link, 0, sizeof(link));
1939 	virtio_dev_atomic_read_link_status(dev, &link);
1940 	old = link;
1941 	link.link_duplex = ETH_LINK_FULL_DUPLEX;
1942 	link.link_speed  = ETH_SPEED_NUM_10G;
1943 
1944 	if (hw->started == 0) {
1945 		link.link_status = ETH_LINK_DOWN;
1946 	} else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1947 		PMD_INIT_LOG(DEBUG, "Get link status from hw");
1948 		vtpci_read_dev_config(hw,
1949 				offsetof(struct virtio_net_config, status),
1950 				&status, sizeof(status));
1951 		if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
1952 			link.link_status = ETH_LINK_DOWN;
1953 			PMD_INIT_LOG(DEBUG, "Port %d is down",
1954 				     dev->data->port_id);
1955 		} else {
1956 			link.link_status = ETH_LINK_UP;
1957 			PMD_INIT_LOG(DEBUG, "Port %d is up",
1958 				     dev->data->port_id);
1959 		}
1960 	} else {
1961 		link.link_status = ETH_LINK_UP;
1962 	}
1963 	virtio_dev_atomic_write_link_status(dev, &link);
1964 
1965 	return (old.link_status == link.link_status) ? -1 : 0;
1966 }
1967 
1968 static int
1969 virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1970 {
1971 	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1972 	struct virtio_hw *hw = dev->data->dev_private;
1973 
1974 	if (mask & ETH_VLAN_FILTER_MASK) {
1975 		if (rxmode->hw_vlan_filter &&
1976 				!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
1977 
1978 			PMD_DRV_LOG(NOTICE,
1979 				"vlan filtering not available on this host");
1980 
1981 			return -ENOTSUP;
1982 		}
1983 	}
1984 
1985 	if (mask & ETH_VLAN_STRIP_MASK)
1986 		hw->vlan_strip = rxmode->hw_vlan_strip;
1987 
1988 	return 0;
1989 }
1990 
1991 static void
1992 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1993 {
1994 	uint64_t tso_mask, host_features;
1995 	struct virtio_hw *hw = dev->data->dev_private;
1996 
1997 	dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
1998 
1999 	dev_info->pci_dev = dev->device ? RTE_ETH_DEV_TO_PCI(dev) : NULL;
2000 	dev_info->max_rx_queues =
2001 		RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
2002 	dev_info->max_tx_queues =
2003 		RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
2004 	dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
2005 	dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
2006 	dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
2007 	dev_info->default_txconf = (struct rte_eth_txconf) {
2008 		.txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
2009 	};
2010 
2011 	host_features = VTPCI_OPS(hw)->get_features(hw);
2012 	dev_info->rx_offload_capa = 0;
2013 	if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
2014 		dev_info->rx_offload_capa |=
2015 			DEV_RX_OFFLOAD_TCP_CKSUM |
2016 			DEV_RX_OFFLOAD_UDP_CKSUM;
2017 	}
2018 	tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
2019 		(1ULL << VIRTIO_NET_F_GUEST_TSO6);
2020 	if ((host_features & tso_mask) == tso_mask)
2021 		dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
2022 
2023 	dev_info->tx_offload_capa = 0;
2024 	if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
2025 		dev_info->tx_offload_capa |=
2026 			DEV_TX_OFFLOAD_UDP_CKSUM |
2027 			DEV_TX_OFFLOAD_TCP_CKSUM;
2028 	}
2029 	tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
2030 		(1ULL << VIRTIO_NET_F_HOST_TSO6);
2031 	if ((hw->guest_features & tso_mask) == tso_mask)
2032 		dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
2033 }
2034 
2035 /*
2036  * It enables testpmd to collect per queue stats.
2037  */
2038 static int
2039 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
2040 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
2041 __rte_unused uint8_t is_rx)
2042 {
2043 	return 0;
2044 }
2045 
2046 RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
2047 RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
2048 RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
2049 
2050 RTE_INIT(virtio_init_log);
2051 static void
2052 virtio_init_log(void)
2053 {
2054 	virtio_logtype_init = rte_log_register("pmd.virtio.init");
2055 	if (virtio_logtype_init >= 0)
2056 		rte_log_set_level(virtio_logtype_init, RTE_LOG_NOTICE);
2057 	virtio_logtype_driver = rte_log_register("pmd.virtio.driver");
2058 	if (virtio_logtype_driver >= 0)
2059 		rte_log_set_level(virtio_logtype_driver, RTE_LOG_NOTICE);
2060 }
2061