xref: /dpdk/drivers/net/virtio/virtio_ethdev.c (revision 7e37aef78c54a1f6e2007bd68b9e6c48d9acc8a4)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdint.h>
35 #include <string.h>
36 #include <stdio.h>
37 #include <errno.h>
38 #include <unistd.h>
39 #ifdef RTE_EXEC_ENV_LINUXAPP
40 #include <dirent.h>
41 #include <fcntl.h>
42 #endif
43 
44 #include <rte_ethdev.h>
45 #include <rte_memcpy.h>
46 #include <rte_string_fns.h>
47 #include <rte_memzone.h>
48 #include <rte_malloc.h>
49 #include <rte_atomic.h>
50 #include <rte_branch_prediction.h>
51 #include <rte_pci.h>
52 #include <rte_ether.h>
53 #include <rte_common.h>
54 #include <rte_errno.h>
55 
56 #include <rte_memory.h>
57 #include <rte_eal.h>
58 #include <rte_dev.h>
59 
60 #include "virtio_ethdev.h"
61 #include "virtio_pci.h"
62 #include "virtio_logs.h"
63 #include "virtqueue.h"
64 #include "virtio_rxtx.h"
65 
66 
67 static int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
68 static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
69 static int  virtio_dev_configure(struct rte_eth_dev *dev);
70 static int  virtio_dev_start(struct rte_eth_dev *dev);
71 static void virtio_dev_stop(struct rte_eth_dev *dev);
72 static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
73 static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
74 static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
75 static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
76 static void virtio_dev_info_get(struct rte_eth_dev *dev,
77 				struct rte_eth_dev_info *dev_info);
78 static int virtio_dev_link_update(struct rte_eth_dev *dev,
79 	__rte_unused int wait_to_complete);
80 
81 static void virtio_set_hwaddr(struct virtio_hw *hw);
82 static void virtio_get_hwaddr(struct virtio_hw *hw);
83 
84 static void virtio_dev_stats_get(struct rte_eth_dev *dev,
85 				 struct rte_eth_stats *stats);
86 static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
87 				 struct rte_eth_xstats *xstats, unsigned n);
88 static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
89 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
90 static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
91 				uint16_t vlan_id, int on);
92 static void virtio_mac_addr_add(struct rte_eth_dev *dev,
93 				struct ether_addr *mac_addr,
94 				uint32_t index, uint32_t vmdq __rte_unused);
95 static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
96 static void virtio_mac_addr_set(struct rte_eth_dev *dev,
97 				struct ether_addr *mac_addr);
98 
99 static int virtio_dev_queue_stats_mapping_set(
100 	__rte_unused struct rte_eth_dev *eth_dev,
101 	__rte_unused uint16_t queue_id,
102 	__rte_unused uint8_t stat_idx,
103 	__rte_unused uint8_t is_rx);
104 
105 /*
106  * The set of PCI devices this driver supports
107  */
108 static const struct rte_pci_id pci_id_virtio_map[] = {
109 
110 #define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
111 #include "rte_pci_dev_ids.h"
112 
113 { .vendor_id = 0, /* sentinel */ },
114 };
115 
116 struct rte_virtio_xstats_name_off {
117 	char name[RTE_ETH_XSTATS_NAME_SIZE];
118 	unsigned offset;
119 };
120 
121 /* [rt]x_qX_ is prepended to the name string here */
122 static const struct rte_virtio_xstats_name_off rte_virtio_q_stat_strings[] = {
123 	{"good_packets",           offsetof(struct virtqueue, packets)},
124 	{"good_bytes",             offsetof(struct virtqueue, bytes)},
125 	{"errors",                 offsetof(struct virtqueue, errors)},
126 	{"multicast_packets",      offsetof(struct virtqueue, multicast)},
127 	{"broadcast_packets",      offsetof(struct virtqueue, broadcast)},
128 	{"undersize_packets",      offsetof(struct virtqueue, size_bins[0])},
129 	{"size_64_packets",        offsetof(struct virtqueue, size_bins[1])},
130 	{"size_65_127_packets",    offsetof(struct virtqueue, size_bins[2])},
131 	{"size_128_255_packets",   offsetof(struct virtqueue, size_bins[3])},
132 	{"size_256_511_packets",   offsetof(struct virtqueue, size_bins[4])},
133 	{"size_512_1023_packets",  offsetof(struct virtqueue, size_bins[5])},
134 	{"size_1024_1517_packets", offsetof(struct virtqueue, size_bins[6])},
135 	{"size_1518_max_packets",  offsetof(struct virtqueue, size_bins[7])},
136 };
137 
138 #define VIRTIO_NB_Q_XSTATS (sizeof(rte_virtio_q_stat_strings) / \
139 			    sizeof(rte_virtio_q_stat_strings[0]))
140 
141 static int
142 virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl,
143 		int *dlen, int pkt_num)
144 {
145 	uint32_t head, i;
146 	int k, sum = 0;
147 	virtio_net_ctrl_ack status = ~0;
148 	struct virtio_pmd_ctrl result;
149 
150 	ctrl->status = status;
151 
152 	if (!(vq && vq->hw->cvq)) {
153 		PMD_INIT_LOG(ERR,
154 			     "%s(): Control queue is not supported.",
155 			     __func__);
156 		return -1;
157 	}
158 	head = vq->vq_desc_head_idx;
159 
160 	PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
161 		"vq->hw->cvq = %p vq = %p",
162 		vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
163 
164 	if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))
165 		return -1;
166 
167 	memcpy(vq->virtio_net_hdr_mz->addr, ctrl,
168 		sizeof(struct virtio_pmd_ctrl));
169 
170 	/*
171 	 * Format is enforced in qemu code:
172 	 * One TX packet for header;
173 	 * At least one TX packet per argument;
174 	 * One RX packet for ACK.
175 	 */
176 	vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
177 	vq->vq_ring.desc[head].addr = vq->virtio_net_hdr_mz->phys_addr;
178 	vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
179 	vq->vq_free_cnt--;
180 	i = vq->vq_ring.desc[head].next;
181 
182 	for (k = 0; k < pkt_num; k++) {
183 		vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
184 		vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr
185 			+ sizeof(struct virtio_net_ctrl_hdr)
186 			+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;
187 		vq->vq_ring.desc[i].len = dlen[k];
188 		sum += dlen[k];
189 		vq->vq_free_cnt--;
190 		i = vq->vq_ring.desc[i].next;
191 	}
192 
193 	vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
194 	vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr
195 			+ sizeof(struct virtio_net_ctrl_hdr);
196 	vq->vq_ring.desc[i].len = sizeof(ctrl->status);
197 	vq->vq_free_cnt--;
198 
199 	vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
200 
201 	vq_update_avail_ring(vq, head);
202 	vq_update_avail_idx(vq);
203 
204 	PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
205 
206 	virtqueue_notify(vq);
207 
208 	rte_rmb();
209 	while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
210 		rte_rmb();
211 		usleep(100);
212 	}
213 
214 	while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
215 		uint32_t idx, desc_idx, used_idx;
216 		struct vring_used_elem *uep;
217 
218 		used_idx = (uint32_t)(vq->vq_used_cons_idx
219 				& (vq->vq_nentries - 1));
220 		uep = &vq->vq_ring.used->ring[used_idx];
221 		idx = (uint32_t) uep->id;
222 		desc_idx = idx;
223 
224 		while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
225 			desc_idx = vq->vq_ring.desc[desc_idx].next;
226 			vq->vq_free_cnt++;
227 		}
228 
229 		vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
230 		vq->vq_desc_head_idx = idx;
231 
232 		vq->vq_used_cons_idx++;
233 		vq->vq_free_cnt++;
234 	}
235 
236 	PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
237 			vq->vq_free_cnt, vq->vq_desc_head_idx);
238 
239 	memcpy(&result, vq->virtio_net_hdr_mz->addr,
240 			sizeof(struct virtio_pmd_ctrl));
241 
242 	return result.status;
243 }
244 
245 static int
246 virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
247 {
248 	struct virtio_hw *hw = dev->data->dev_private;
249 	struct virtio_pmd_ctrl ctrl;
250 	int dlen[1];
251 	int ret;
252 
253 	ctrl.hdr.class = VIRTIO_NET_CTRL_MQ;
254 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET;
255 	memcpy(ctrl.data, &nb_queues, sizeof(uint16_t));
256 
257 	dlen[0] = sizeof(uint16_t);
258 
259 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
260 	if (ret) {
261 		PMD_INIT_LOG(ERR, "Multiqueue configured but send command "
262 			  "failed, this is too late now...");
263 		return -EINVAL;
264 	}
265 
266 	return 0;
267 }
268 
269 void
270 virtio_dev_queue_release(struct virtqueue *vq) {
271 	struct virtio_hw *hw;
272 
273 	if (vq) {
274 		hw = vq->hw;
275 		/* Select and deactivate the queue */
276 		VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_SEL, vq->vq_queue_index);
277 		VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_QUEUE_PFN, 0);
278 
279 		rte_free(vq->sw_ring);
280 		rte_free(vq);
281 	}
282 }
283 
284 int virtio_dev_queue_setup(struct rte_eth_dev *dev,
285 			int queue_type,
286 			uint16_t queue_idx,
287 			uint16_t vtpci_queue_idx,
288 			uint16_t nb_desc,
289 			unsigned int socket_id,
290 			struct virtqueue **pvq)
291 {
292 	char vq_name[VIRTQUEUE_MAX_NAME_SZ];
293 	const struct rte_memzone *mz;
294 	unsigned int vq_size, size;
295 	struct virtio_hw *hw = dev->data->dev_private;
296 	struct virtqueue *vq = NULL;
297 
298 	/* Write the virtqueue index to the Queue Select Field */
299 	VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_SEL, vtpci_queue_idx);
300 	PMD_INIT_LOG(DEBUG, "selecting queue: %u", vtpci_queue_idx);
301 
302 	/*
303 	 * Read the virtqueue size from the Queue Size field
304 	 * Always power of 2 and if 0 virtqueue does not exist
305 	 */
306 	vq_size = VIRTIO_READ_REG_2(hw, VIRTIO_PCI_QUEUE_NUM);
307 	PMD_INIT_LOG(DEBUG, "vq_size: %u nb_desc:%u", vq_size, nb_desc);
308 	if (vq_size == 0) {
309 		PMD_INIT_LOG(ERR, "%s: virtqueue does not exist", __func__);
310 		return -EINVAL;
311 	}
312 
313 	if (!rte_is_power_of_2(vq_size)) {
314 		PMD_INIT_LOG(ERR, "%s: virtqueue size is not powerof 2", __func__);
315 		return -EINVAL;
316 	}
317 
318 	if (queue_type == VTNET_RQ) {
319 		snprintf(vq_name, sizeof(vq_name), "port%d_rvq%d",
320 			dev->data->port_id, queue_idx);
321 		vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
322 			vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
323 		vq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
324 			(RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) *
325 			sizeof(vq->sw_ring[0]), RTE_CACHE_LINE_SIZE, socket_id);
326 	} else if (queue_type == VTNET_TQ) {
327 		snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
328 			dev->data->port_id, queue_idx);
329 		vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
330 			vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE);
331 	} else if (queue_type == VTNET_CQ) {
332 		snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
333 			dev->data->port_id);
334 		vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
335 			vq_size * sizeof(struct vq_desc_extra),
336 			RTE_CACHE_LINE_SIZE);
337 	}
338 	if (vq == NULL) {
339 		PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue", __func__);
340 		return (-ENOMEM);
341 	}
342 	if (queue_type == VTNET_RQ && vq->sw_ring == NULL) {
343 		PMD_INIT_LOG(ERR, "%s: Can not allocate RX soft ring",
344 			__func__);
345 		rte_free(vq);
346 		return -ENOMEM;
347 	}
348 
349 	vq->hw = hw;
350 	vq->port_id = dev->data->port_id;
351 	vq->queue_id = queue_idx;
352 	vq->vq_queue_index = vtpci_queue_idx;
353 	vq->vq_nentries = vq_size;
354 
355 	if (nb_desc == 0 || nb_desc > vq_size)
356 		nb_desc = vq_size;
357 	vq->vq_free_cnt = nb_desc;
358 
359 	/*
360 	 * Reserve a memzone for vring elements
361 	 */
362 	size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
363 	vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
364 	PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size);
365 
366 	mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
367 		socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
368 	if (mz == NULL) {
369 		if (rte_errno == EEXIST)
370 			mz = rte_memzone_lookup(vq_name);
371 		if (mz == NULL) {
372 			rte_free(vq);
373 			return -ENOMEM;
374 		}
375 	}
376 
377 	/*
378 	 * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
379 	 * and only accepts 32 bit page frame number.
380 	 * Check if the allocated physical memory exceeds 16TB.
381 	 */
382 	if ((mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
383 		PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
384 		rte_free(vq);
385 		return -ENOMEM;
386 	}
387 
388 	memset(mz->addr, 0, sizeof(mz->len));
389 	vq->mz = mz;
390 	vq->vq_ring_mem = mz->phys_addr;
391 	vq->vq_ring_virt_mem = mz->addr;
392 	PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem:      0x%"PRIx64, (uint64_t)mz->phys_addr);
393 	PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64, (uint64_t)(uintptr_t)mz->addr);
394 	vq->virtio_net_hdr_mz  = NULL;
395 	vq->virtio_net_hdr_mem = 0;
396 
397 	if (queue_type == VTNET_TQ) {
398 		/*
399 		 * For each xmit packet, allocate a virtio_net_hdr
400 		 */
401 		snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone",
402 			dev->data->port_id, queue_idx);
403 		vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
404 			vq_size * hw->vtnet_hdr_size,
405 			socket_id, 0, RTE_CACHE_LINE_SIZE);
406 		if (vq->virtio_net_hdr_mz == NULL) {
407 			if (rte_errno == EEXIST)
408 				vq->virtio_net_hdr_mz =
409 					rte_memzone_lookup(vq_name);
410 			if (vq->virtio_net_hdr_mz == NULL) {
411 				rte_free(vq);
412 				return -ENOMEM;
413 			}
414 		}
415 		vq->virtio_net_hdr_mem =
416 			vq->virtio_net_hdr_mz->phys_addr;
417 		memset(vq->virtio_net_hdr_mz->addr, 0,
418 			vq_size * hw->vtnet_hdr_size);
419 	} else if (queue_type == VTNET_CQ) {
420 		/* Allocate a page for control vq command, data and status */
421 		snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone",
422 			dev->data->port_id);
423 		vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name,
424 			PAGE_SIZE, socket_id, 0, RTE_CACHE_LINE_SIZE);
425 		if (vq->virtio_net_hdr_mz == NULL) {
426 			if (rte_errno == EEXIST)
427 				vq->virtio_net_hdr_mz =
428 					rte_memzone_lookup(vq_name);
429 			if (vq->virtio_net_hdr_mz == NULL) {
430 				rte_free(vq);
431 				return -ENOMEM;
432 			}
433 		}
434 		vq->virtio_net_hdr_mem =
435 			vq->virtio_net_hdr_mz->phys_addr;
436 		memset(vq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
437 	}
438 
439 	/*
440 	 * Set guest physical address of the virtqueue
441 	 * in VIRTIO_PCI_QUEUE_PFN config register of device
442 	 */
443 	VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_QUEUE_PFN,
444 			mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
445 	*pvq = vq;
446 	return 0;
447 }
448 
449 static int
450 virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,
451 		uint32_t socket_id)
452 {
453 	struct virtqueue *vq;
454 	int ret;
455 	struct virtio_hw *hw = dev->data->dev_private;
456 
457 	PMD_INIT_FUNC_TRACE();
458 	ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,
459 			vtpci_queue_idx, 0, socket_id, &vq);
460 	if (ret < 0) {
461 		PMD_INIT_LOG(ERR, "control vq initialization failed");
462 		return ret;
463 	}
464 
465 	hw->cvq = vq;
466 	return 0;
467 }
468 
469 static void
470 virtio_free_queues(struct rte_eth_dev *dev)
471 {
472 	unsigned int i;
473 
474 	for (i = 0; i < dev->data->nb_rx_queues; i++)
475 		virtio_dev_rx_queue_release(dev->data->rx_queues[i]);
476 
477 	dev->data->nb_rx_queues = 0;
478 
479 	for (i = 0; i < dev->data->nb_tx_queues; i++)
480 		virtio_dev_tx_queue_release(dev->data->tx_queues[i]);
481 
482 	dev->data->nb_tx_queues = 0;
483 }
484 
485 static void
486 virtio_dev_close(struct rte_eth_dev *dev)
487 {
488 	struct virtio_hw *hw = dev->data->dev_private;
489 	struct rte_pci_device *pci_dev = dev->pci_dev;
490 
491 	PMD_INIT_LOG(DEBUG, "virtio_dev_close");
492 
493 	/* reset the NIC */
494 	if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
495 		vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR);
496 	vtpci_reset(hw);
497 	hw->started = 0;
498 	virtio_dev_free_mbufs(dev);
499 	virtio_free_queues(dev);
500 }
501 
502 static void
503 virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
504 {
505 	struct virtio_hw *hw = dev->data->dev_private;
506 	struct virtio_pmd_ctrl ctrl;
507 	int dlen[1];
508 	int ret;
509 
510 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
511 		PMD_INIT_LOG(INFO, "host does not support rx control\n");
512 		return;
513 	}
514 
515 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
516 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
517 	ctrl.data[0] = 1;
518 	dlen[0] = 1;
519 
520 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
521 	if (ret)
522 		PMD_INIT_LOG(ERR, "Failed to enable promisc");
523 }
524 
525 static void
526 virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
527 {
528 	struct virtio_hw *hw = dev->data->dev_private;
529 	struct virtio_pmd_ctrl ctrl;
530 	int dlen[1];
531 	int ret;
532 
533 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
534 		PMD_INIT_LOG(INFO, "host does not support rx control\n");
535 		return;
536 	}
537 
538 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
539 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_PROMISC;
540 	ctrl.data[0] = 0;
541 	dlen[0] = 1;
542 
543 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
544 	if (ret)
545 		PMD_INIT_LOG(ERR, "Failed to disable promisc");
546 }
547 
548 static void
549 virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
550 {
551 	struct virtio_hw *hw = dev->data->dev_private;
552 	struct virtio_pmd_ctrl ctrl;
553 	int dlen[1];
554 	int ret;
555 
556 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
557 		PMD_INIT_LOG(INFO, "host does not support rx control\n");
558 		return;
559 	}
560 
561 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
562 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
563 	ctrl.data[0] = 1;
564 	dlen[0] = 1;
565 
566 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
567 	if (ret)
568 		PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
569 }
570 
571 static void
572 virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
573 {
574 	struct virtio_hw *hw = dev->data->dev_private;
575 	struct virtio_pmd_ctrl ctrl;
576 	int dlen[1];
577 	int ret;
578 
579 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
580 		PMD_INIT_LOG(INFO, "host does not support rx control\n");
581 		return;
582 	}
583 
584 	ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
585 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_RX_ALLMULTI;
586 	ctrl.data[0] = 0;
587 	dlen[0] = 1;
588 
589 	ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
590 	if (ret)
591 		PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
592 }
593 
594 /*
595  * dev_ops for virtio, bare necessities for basic operation
596  */
597 static const struct eth_dev_ops virtio_eth_dev_ops = {
598 	.dev_configure           = virtio_dev_configure,
599 	.dev_start               = virtio_dev_start,
600 	.dev_stop                = virtio_dev_stop,
601 	.dev_close               = virtio_dev_close,
602 	.promiscuous_enable      = virtio_dev_promiscuous_enable,
603 	.promiscuous_disable     = virtio_dev_promiscuous_disable,
604 	.allmulticast_enable     = virtio_dev_allmulticast_enable,
605 	.allmulticast_disable    = virtio_dev_allmulticast_disable,
606 
607 	.dev_infos_get           = virtio_dev_info_get,
608 	.stats_get               = virtio_dev_stats_get,
609 	.xstats_get              = virtio_dev_xstats_get,
610 	.stats_reset             = virtio_dev_stats_reset,
611 	.xstats_reset            = virtio_dev_stats_reset,
612 	.link_update             = virtio_dev_link_update,
613 	.rx_queue_setup          = virtio_dev_rx_queue_setup,
614 	.rx_queue_release        = virtio_dev_rx_queue_release,
615 	.tx_queue_setup          = virtio_dev_tx_queue_setup,
616 	.tx_queue_release        = virtio_dev_tx_queue_release,
617 	/* collect stats per queue */
618 	.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
619 	.vlan_filter_set         = virtio_vlan_filter_set,
620 	.mac_addr_add            = virtio_mac_addr_add,
621 	.mac_addr_remove         = virtio_mac_addr_remove,
622 	.mac_addr_set            = virtio_mac_addr_set,
623 };
624 
625 static inline int
626 virtio_dev_atomic_read_link_status(struct rte_eth_dev *dev,
627 				struct rte_eth_link *link)
628 {
629 	struct rte_eth_link *dst = link;
630 	struct rte_eth_link *src = &(dev->data->dev_link);
631 
632 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
633 			*(uint64_t *)src) == 0)
634 		return -1;
635 
636 	return 0;
637 }
638 
639 /**
640  * Atomically writes the link status information into global
641  * structure rte_eth_dev.
642  *
643  * @param dev
644  *   - Pointer to the structure rte_eth_dev to read from.
645  *   - Pointer to the buffer to be saved with the link status.
646  *
647  * @return
648  *   - On success, zero.
649  *   - On failure, negative value.
650  */
651 static inline int
652 virtio_dev_atomic_write_link_status(struct rte_eth_dev *dev,
653 		struct rte_eth_link *link)
654 {
655 	struct rte_eth_link *dst = &(dev->data->dev_link);
656 	struct rte_eth_link *src = link;
657 
658 	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
659 					*(uint64_t *)src) == 0)
660 		return -1;
661 
662 	return 0;
663 }
664 
665 static void
666 virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
667 {
668 	unsigned i;
669 
670 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
671 		const struct virtqueue *txvq = dev->data->tx_queues[i];
672 		if (txvq == NULL)
673 			continue;
674 
675 		stats->opackets += txvq->packets;
676 		stats->obytes += txvq->bytes;
677 		stats->oerrors += txvq->errors;
678 
679 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
680 			stats->q_opackets[i] = txvq->packets;
681 			stats->q_obytes[i] = txvq->bytes;
682 		}
683 	}
684 
685 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
686 		const struct virtqueue *rxvq = dev->data->rx_queues[i];
687 		if (rxvq == NULL)
688 			continue;
689 
690 		stats->ipackets += rxvq->packets;
691 		stats->ibytes += rxvq->bytes;
692 		stats->ierrors += rxvq->errors;
693 
694 		if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
695 			stats->q_ipackets[i] = rxvq->packets;
696 			stats->q_ibytes[i] = rxvq->bytes;
697 		}
698 	}
699 
700 	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
701 }
702 
703 static int
704 virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
705 		      unsigned n)
706 {
707 	unsigned i;
708 	unsigned count = 0;
709 
710 	unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_Q_XSTATS +
711 		dev->data->nb_rx_queues * VIRTIO_NB_Q_XSTATS;
712 
713 	if (n < nstats)
714 		return nstats;
715 
716 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
717 		struct virtqueue *rxvq = dev->data->rx_queues[i];
718 
719 		if (rxvq == NULL)
720 			continue;
721 
722 		unsigned t;
723 
724 		for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {
725 			snprintf(xstats[count].name, sizeof(xstats[count].name),
726 				 "rx_q%u_%s", i,
727 				 rte_virtio_q_stat_strings[t].name);
728 			xstats[count].value = *(uint64_t *)(((char *)rxvq) +
729 				rte_virtio_q_stat_strings[t].offset);
730 			count++;
731 		}
732 	}
733 
734 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
735 		struct virtqueue *txvq = dev->data->tx_queues[i];
736 
737 		if (txvq == NULL)
738 			continue;
739 
740 		unsigned t;
741 
742 		for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) {
743 			snprintf(xstats[count].name, sizeof(xstats[count].name),
744 				 "tx_q%u_%s", i,
745 				 rte_virtio_q_stat_strings[t].name);
746 			xstats[count].value = *(uint64_t *)(((char *)txvq) +
747 				rte_virtio_q_stat_strings[t].offset);
748 			count++;
749 		}
750 	}
751 
752 	return count;
753 }
754 
755 static void
756 virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
757 {
758 	virtio_update_stats(dev, stats);
759 }
760 
761 static void
762 virtio_dev_stats_reset(struct rte_eth_dev *dev)
763 {
764 	unsigned int i;
765 
766 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
767 		struct virtqueue *txvq = dev->data->tx_queues[i];
768 		if (txvq == NULL)
769 			continue;
770 
771 		txvq->packets = 0;
772 		txvq->bytes = 0;
773 		txvq->errors = 0;
774 		txvq->multicast = 0;
775 		txvq->broadcast = 0;
776 		memset(txvq->size_bins, 0, sizeof(txvq->size_bins[0]) * 8);
777 	}
778 
779 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
780 		struct virtqueue *rxvq = dev->data->rx_queues[i];
781 		if (rxvq == NULL)
782 			continue;
783 
784 		rxvq->packets = 0;
785 		rxvq->bytes = 0;
786 		rxvq->errors = 0;
787 		rxvq->multicast = 0;
788 		rxvq->broadcast = 0;
789 		memset(rxvq->size_bins, 0, sizeof(rxvq->size_bins[0]) * 8);
790 	}
791 }
792 
793 static void
794 virtio_set_hwaddr(struct virtio_hw *hw)
795 {
796 	vtpci_write_dev_config(hw,
797 			offsetof(struct virtio_net_config, mac),
798 			&hw->mac_addr, ETHER_ADDR_LEN);
799 }
800 
801 static void
802 virtio_get_hwaddr(struct virtio_hw *hw)
803 {
804 	if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
805 		vtpci_read_dev_config(hw,
806 			offsetof(struct virtio_net_config, mac),
807 			&hw->mac_addr, ETHER_ADDR_LEN);
808 	} else {
809 		eth_random_addr(&hw->mac_addr[0]);
810 		virtio_set_hwaddr(hw);
811 	}
812 }
813 
814 static void
815 virtio_mac_table_set(struct virtio_hw *hw,
816 		     const struct virtio_net_ctrl_mac *uc,
817 		     const struct virtio_net_ctrl_mac *mc)
818 {
819 	struct virtio_pmd_ctrl ctrl;
820 	int err, len[2];
821 
822 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
823 		PMD_DRV_LOG(INFO, "host does not support mac table\n");
824 		return;
825 	}
826 
827 	ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
828 	ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
829 
830 	len[0] = uc->entries * ETHER_ADDR_LEN + sizeof(uc->entries);
831 	memcpy(ctrl.data, uc, len[0]);
832 
833 	len[1] = mc->entries * ETHER_ADDR_LEN + sizeof(mc->entries);
834 	memcpy(ctrl.data + len[0], mc, len[1]);
835 
836 	err = virtio_send_command(hw->cvq, &ctrl, len, 2);
837 	if (err != 0)
838 		PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
839 }
840 
841 static void
842 virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
843 		    uint32_t index, uint32_t vmdq __rte_unused)
844 {
845 	struct virtio_hw *hw = dev->data->dev_private;
846 	const struct ether_addr *addrs = dev->data->mac_addrs;
847 	unsigned int i;
848 	struct virtio_net_ctrl_mac *uc, *mc;
849 
850 	if (index >= VIRTIO_MAX_MAC_ADDRS) {
851 		PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
852 		return;
853 	}
854 
855 	uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
856 	uc->entries = 0;
857 	mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
858 	mc->entries = 0;
859 
860 	for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
861 		const struct ether_addr *addr
862 			= (i == index) ? mac_addr : addrs + i;
863 		struct virtio_net_ctrl_mac *tbl
864 			= is_multicast_ether_addr(addr) ? mc : uc;
865 
866 		memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
867 	}
868 
869 	virtio_mac_table_set(hw, uc, mc);
870 }
871 
872 static void
873 virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
874 {
875 	struct virtio_hw *hw = dev->data->dev_private;
876 	struct ether_addr *addrs = dev->data->mac_addrs;
877 	struct virtio_net_ctrl_mac *uc, *mc;
878 	unsigned int i;
879 
880 	if (index >= VIRTIO_MAX_MAC_ADDRS) {
881 		PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
882 		return;
883 	}
884 
885 	uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
886 	uc->entries = 0;
887 	mc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(mc->entries));
888 	mc->entries = 0;
889 
890 	for (i = 0; i < VIRTIO_MAX_MAC_ADDRS; i++) {
891 		struct virtio_net_ctrl_mac *tbl;
892 
893 		if (i == index || is_zero_ether_addr(addrs + i))
894 			continue;
895 
896 		tbl = is_multicast_ether_addr(addrs + i) ? mc : uc;
897 		memcpy(&tbl->macs[tbl->entries++], addrs + i, ETHER_ADDR_LEN);
898 	}
899 
900 	virtio_mac_table_set(hw, uc, mc);
901 }
902 
903 static void
904 virtio_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
905 {
906 	struct virtio_hw *hw = dev->data->dev_private;
907 
908 	memcpy(hw->mac_addr, mac_addr, ETHER_ADDR_LEN);
909 
910 	/* Use atomic update if available */
911 	if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
912 		struct virtio_pmd_ctrl ctrl;
913 		int len = ETHER_ADDR_LEN;
914 
915 		ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
916 		ctrl.hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET;
917 
918 		memcpy(ctrl.data, mac_addr, ETHER_ADDR_LEN);
919 		virtio_send_command(hw->cvq, &ctrl, &len, 1);
920 	} else if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC))
921 		virtio_set_hwaddr(hw);
922 }
923 
924 static int
925 virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
926 {
927 	struct virtio_hw *hw = dev->data->dev_private;
928 	struct virtio_pmd_ctrl ctrl;
929 	int len;
930 
931 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN))
932 		return -ENOTSUP;
933 
934 	ctrl.hdr.class = VIRTIO_NET_CTRL_VLAN;
935 	ctrl.hdr.cmd = on ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL;
936 	memcpy(ctrl.data, &vlan_id, sizeof(vlan_id));
937 	len = sizeof(vlan_id);
938 
939 	return virtio_send_command(hw->cvq, &ctrl, &len, 1);
940 }
941 
942 static void
943 virtio_negotiate_features(struct virtio_hw *hw)
944 {
945 	uint32_t host_features;
946 
947 	/* Prepare guest_features: feature that driver wants to support */
948 	hw->guest_features = VIRTIO_PMD_GUEST_FEATURES;
949 	PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %x",
950 		hw->guest_features);
951 
952 	/* Read device(host) feature bits */
953 	host_features = VIRTIO_READ_REG_4(hw, VIRTIO_PCI_HOST_FEATURES);
954 	PMD_INIT_LOG(DEBUG, "host_features before negotiate = %x",
955 		host_features);
956 
957 	/*
958 	 * Negotiate features: Subset of device feature bits are written back
959 	 * guest feature bits.
960 	 */
961 	hw->guest_features = vtpci_negotiate_features(hw, host_features);
962 	PMD_INIT_LOG(DEBUG, "features after negotiate = %x",
963 		hw->guest_features);
964 }
965 
966 #ifdef RTE_EXEC_ENV_LINUXAPP
967 static int
968 parse_sysfs_value(const char *filename, unsigned long *val)
969 {
970 	FILE *f;
971 	char buf[BUFSIZ];
972 	char *end = NULL;
973 
974 	f = fopen(filename, "r");
975 	if (f == NULL) {
976 		PMD_INIT_LOG(ERR, "%s(): cannot open sysfs value %s",
977 			     __func__, filename);
978 		return -1;
979 	}
980 
981 	if (fgets(buf, sizeof(buf), f) == NULL) {
982 		PMD_INIT_LOG(ERR, "%s(): cannot read sysfs value %s",
983 			     __func__, filename);
984 		fclose(f);
985 		return -1;
986 	}
987 	*val = strtoul(buf, &end, 0);
988 	if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
989 		PMD_INIT_LOG(ERR, "%s(): cannot parse sysfs value %s",
990 			     __func__, filename);
991 		fclose(f);
992 		return -1;
993 	}
994 	fclose(f);
995 	return 0;
996 }
997 
998 static int get_uio_dev(struct rte_pci_addr *loc, char *buf, unsigned int buflen,
999 			unsigned int *uio_num)
1000 {
1001 	struct dirent *e;
1002 	DIR *dir;
1003 	char dirname[PATH_MAX];
1004 
1005 	/* depending on kernel version, uio can be located in uio/uioX
1006 	 * or uio:uioX */
1007 	snprintf(dirname, sizeof(dirname),
1008 		     SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/uio",
1009 		     loc->domain, loc->bus, loc->devid, loc->function);
1010 	dir = opendir(dirname);
1011 	if (dir == NULL) {
1012 		/* retry with the parent directory */
1013 		snprintf(dirname, sizeof(dirname),
1014 			     SYSFS_PCI_DEVICES "/" PCI_PRI_FMT,
1015 			     loc->domain, loc->bus, loc->devid, loc->function);
1016 		dir = opendir(dirname);
1017 
1018 		if (dir == NULL) {
1019 			PMD_INIT_LOG(ERR, "Cannot opendir %s", dirname);
1020 			return -1;
1021 		}
1022 	}
1023 
1024 	/* take the first file starting with "uio" */
1025 	while ((e = readdir(dir)) != NULL) {
1026 		/* format could be uio%d ...*/
1027 		int shortprefix_len = sizeof("uio") - 1;
1028 		/* ... or uio:uio%d */
1029 		int longprefix_len = sizeof("uio:uio") - 1;
1030 		char *endptr;
1031 
1032 		if (strncmp(e->d_name, "uio", 3) != 0)
1033 			continue;
1034 
1035 		/* first try uio%d */
1036 		errno = 0;
1037 		*uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
1038 		if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
1039 			snprintf(buf, buflen, "%s/uio%u", dirname, *uio_num);
1040 			break;
1041 		}
1042 
1043 		/* then try uio:uio%d */
1044 		errno = 0;
1045 		*uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
1046 		if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
1047 			snprintf(buf, buflen, "%s/uio:uio%u", dirname,
1048 				     *uio_num);
1049 			break;
1050 		}
1051 	}
1052 	closedir(dir);
1053 
1054 	/* No uio resource found */
1055 	if (e == NULL) {
1056 		PMD_INIT_LOG(ERR, "Could not find uio resource");
1057 		return -1;
1058 	}
1059 
1060 	return 0;
1061 }
1062 
1063 static int
1064 virtio_has_msix(const struct rte_pci_addr *loc)
1065 {
1066 	DIR *d;
1067 	char dirname[PATH_MAX];
1068 
1069 	snprintf(dirname, sizeof(dirname),
1070 		     SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/msi_irqs",
1071 		     loc->domain, loc->bus, loc->devid, loc->function);
1072 
1073 	d = opendir(dirname);
1074 	if (d)
1075 		closedir(d);
1076 
1077 	return (d != NULL);
1078 }
1079 
1080 /* Extract I/O port numbers from sysfs */
1081 static int virtio_resource_init_by_uio(struct rte_pci_device *pci_dev)
1082 {
1083 	char dirname[PATH_MAX];
1084 	char filename[PATH_MAX];
1085 	unsigned long start, size;
1086 	unsigned int uio_num;
1087 
1088 	if (get_uio_dev(&pci_dev->addr, dirname, sizeof(dirname), &uio_num) < 0)
1089 		return -1;
1090 
1091 	/* get portio size */
1092 	snprintf(filename, sizeof(filename),
1093 		     "%s/portio/port0/size", dirname);
1094 	if (parse_sysfs_value(filename, &size) < 0) {
1095 		PMD_INIT_LOG(ERR, "%s(): cannot parse size",
1096 			     __func__);
1097 		return -1;
1098 	}
1099 
1100 	/* get portio start */
1101 	snprintf(filename, sizeof(filename),
1102 		 "%s/portio/port0/start", dirname);
1103 	if (parse_sysfs_value(filename, &start) < 0) {
1104 		PMD_INIT_LOG(ERR, "%s(): cannot parse portio start",
1105 			     __func__);
1106 		return -1;
1107 	}
1108 	pci_dev->mem_resource[0].addr = (void *)(uintptr_t)start;
1109 	pci_dev->mem_resource[0].len =  (uint64_t)size;
1110 	PMD_INIT_LOG(DEBUG,
1111 		     "PCI Port IO found start=0x%lx with size=0x%lx",
1112 		     start, size);
1113 
1114 	/* save fd */
1115 	memset(dirname, 0, sizeof(dirname));
1116 	snprintf(dirname, sizeof(dirname), "/dev/uio%u", uio_num);
1117 	pci_dev->intr_handle.fd = open(dirname, O_RDWR);
1118 	if (pci_dev->intr_handle.fd < 0) {
1119 		PMD_INIT_LOG(ERR, "Cannot open %s: %s\n",
1120 			dirname, strerror(errno));
1121 		return -1;
1122 	}
1123 
1124 	pci_dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
1125 	pci_dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC;
1126 
1127 	return 0;
1128 }
1129 
1130 /* Extract port I/O numbers from proc/ioports */
1131 static int virtio_resource_init_by_ioports(struct rte_pci_device *pci_dev)
1132 {
1133 	uint16_t start, end;
1134 	int size;
1135 	FILE *fp;
1136 	char *line = NULL;
1137 	char pci_id[16];
1138 	int found = 0;
1139 	size_t linesz;
1140 
1141 	snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT,
1142 		 pci_dev->addr.domain,
1143 		 pci_dev->addr.bus,
1144 		 pci_dev->addr.devid,
1145 		 pci_dev->addr.function);
1146 
1147 	fp = fopen("/proc/ioports", "r");
1148 	if (fp == NULL) {
1149 		PMD_INIT_LOG(ERR, "%s(): can't open ioports", __func__);
1150 		return -1;
1151 	}
1152 
1153 	while (getdelim(&line, &linesz, '\n', fp) > 0) {
1154 		char *ptr = line;
1155 		char *left;
1156 		int n;
1157 
1158 		n = strcspn(ptr, ":");
1159 		ptr[n] = 0;
1160 		left = &ptr[n+1];
1161 
1162 		while (*left && isspace(*left))
1163 			left++;
1164 
1165 		if (!strncmp(left, pci_id, strlen(pci_id))) {
1166 			found = 1;
1167 
1168 			while (*ptr && isspace(*ptr))
1169 				ptr++;
1170 
1171 			sscanf(ptr, "%04hx-%04hx", &start, &end);
1172 			size = end - start + 1;
1173 
1174 			break;
1175 		}
1176 	}
1177 
1178 	free(line);
1179 	fclose(fp);
1180 
1181 	if (!found)
1182 		return -1;
1183 
1184 	pci_dev->mem_resource[0].addr = (void *)(uintptr_t)(uint32_t)start;
1185 	pci_dev->mem_resource[0].len =  (uint64_t)size;
1186 	PMD_INIT_LOG(DEBUG,
1187 		"PCI Port IO found start=0x%x with size=0x%x",
1188 		start, size);
1189 
1190 	/* can't support lsc interrupt without uio */
1191 	pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
1192 
1193 	return 0;
1194 }
1195 
1196 /* Extract I/O port numbers from sysfs */
1197 static int virtio_resource_init(struct rte_pci_device *pci_dev)
1198 {
1199 	if (virtio_resource_init_by_uio(pci_dev) == 0)
1200 		return 0;
1201 	else
1202 		return virtio_resource_init_by_ioports(pci_dev);
1203 }
1204 
1205 #else
1206 static int
1207 virtio_has_msix(const struct rte_pci_addr *loc __rte_unused)
1208 {
1209 	/* nic_uio does not enable interrupts, return 0 (false). */
1210 	return 0;
1211 }
1212 
1213 static int virtio_resource_init(struct rte_pci_device *pci_dev __rte_unused)
1214 {
1215 	/* no setup required */
1216 	return 0;
1217 }
1218 #endif
1219 
1220 /*
1221  * Process Virtio Config changed interrupt and call the callback
1222  * if link state changed.
1223  */
1224 static void
1225 virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
1226 			 void *param)
1227 {
1228 	struct rte_eth_dev *dev = param;
1229 	struct virtio_hw *hw = dev->data->dev_private;
1230 	uint8_t isr;
1231 
1232 	/* Read interrupt status which clears interrupt */
1233 	isr = vtpci_isr(hw);
1234 	PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
1235 
1236 	if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0)
1237 		PMD_DRV_LOG(ERR, "interrupt enable failed");
1238 
1239 	if (isr & VIRTIO_PCI_ISR_CONFIG) {
1240 		if (virtio_dev_link_update(dev, 0) == 0)
1241 			_rte_eth_dev_callback_process(dev,
1242 						      RTE_ETH_EVENT_INTR_LSC);
1243 	}
1244 
1245 }
1246 
1247 static void
1248 rx_func_get(struct rte_eth_dev *eth_dev)
1249 {
1250 	struct virtio_hw *hw = eth_dev->data->dev_private;
1251 	if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
1252 		eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
1253 	else
1254 		eth_dev->rx_pkt_burst = &virtio_recv_pkts;
1255 }
1256 
1257 /*
1258  * This function is based on probe() function in virtio_pci.c
1259  * It returns 0 on success.
1260  */
1261 static int
1262 eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
1263 {
1264 	struct virtio_hw *hw = eth_dev->data->dev_private;
1265 	struct virtio_net_config *config;
1266 	struct virtio_net_config local_config;
1267 	struct rte_pci_device *pci_dev;
1268 
1269 	RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr));
1270 
1271 	eth_dev->dev_ops = &virtio_eth_dev_ops;
1272 	eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
1273 
1274 	if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1275 		rx_func_get(eth_dev);
1276 		return 0;
1277 	}
1278 
1279 	/* Allocate memory for storing MAC addresses */
1280 	eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
1281 	if (eth_dev->data->mac_addrs == NULL) {
1282 		PMD_INIT_LOG(ERR,
1283 			"Failed to allocate %d bytes needed to store MAC addresses",
1284 			VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
1285 		return -ENOMEM;
1286 	}
1287 
1288 	pci_dev = eth_dev->pci_dev;
1289 
1290 	if (virtio_resource_init(pci_dev) < 0)
1291 		return -1;
1292 
1293 	hw->use_msix = virtio_has_msix(&pci_dev->addr);
1294 	hw->io_base = (uint32_t)(uintptr_t)pci_dev->mem_resource[0].addr;
1295 
1296 	/* Reset the device although not necessary at startup */
1297 	vtpci_reset(hw);
1298 
1299 	/* Tell the host we've noticed this device. */
1300 	vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
1301 
1302 	/* Tell the host we've known how to drive the device. */
1303 	vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
1304 	virtio_negotiate_features(hw);
1305 
1306 	/* If host does not support status then disable LSC */
1307 	if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
1308 		pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
1309 
1310 	rte_eth_copy_pci_info(eth_dev, pci_dev);
1311 
1312 	rx_func_get(eth_dev);
1313 
1314 	/* Setting up rx_header size for the device */
1315 	if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
1316 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1317 	else
1318 		hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
1319 
1320 	/* Copy the permanent MAC address to: virtio_hw */
1321 	virtio_get_hwaddr(hw);
1322 	ether_addr_copy((struct ether_addr *) hw->mac_addr,
1323 			&eth_dev->data->mac_addrs[0]);
1324 	PMD_INIT_LOG(DEBUG,
1325 		     "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1326 		     hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
1327 		     hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
1328 
1329 	if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
1330 		config = &local_config;
1331 
1332 		vtpci_read_dev_config(hw,
1333 			offsetof(struct virtio_net_config, mac),
1334 			&config->mac, sizeof(config->mac));
1335 
1336 		if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1337 			vtpci_read_dev_config(hw,
1338 				offsetof(struct virtio_net_config, status),
1339 				&config->status, sizeof(config->status));
1340 		} else {
1341 			PMD_INIT_LOG(DEBUG,
1342 				     "VIRTIO_NET_F_STATUS is not supported");
1343 			config->status = 0;
1344 		}
1345 
1346 		if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
1347 			vtpci_read_dev_config(hw,
1348 				offsetof(struct virtio_net_config, max_virtqueue_pairs),
1349 				&config->max_virtqueue_pairs,
1350 				sizeof(config->max_virtqueue_pairs));
1351 		} else {
1352 			PMD_INIT_LOG(DEBUG,
1353 				     "VIRTIO_NET_F_MQ is not supported");
1354 			config->max_virtqueue_pairs = 1;
1355 		}
1356 
1357 		hw->max_rx_queues =
1358 			(VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ?
1359 			VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs;
1360 		hw->max_tx_queues =
1361 			(VIRTIO_MAX_TX_QUEUES < config->max_virtqueue_pairs) ?
1362 			VIRTIO_MAX_TX_QUEUES : config->max_virtqueue_pairs;
1363 
1364 		virtio_dev_cq_queue_setup(eth_dev,
1365 					config->max_virtqueue_pairs * 2,
1366 					SOCKET_ID_ANY);
1367 
1368 		PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
1369 				config->max_virtqueue_pairs);
1370 		PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
1371 		PMD_INIT_LOG(DEBUG,
1372 				"PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
1373 				config->mac[0], config->mac[1],
1374 				config->mac[2], config->mac[3],
1375 				config->mac[4], config->mac[5]);
1376 	} else {
1377 		hw->max_rx_queues = 1;
1378 		hw->max_tx_queues = 1;
1379 	}
1380 
1381 	eth_dev->data->nb_rx_queues = hw->max_rx_queues;
1382 	eth_dev->data->nb_tx_queues = hw->max_tx_queues;
1383 
1384 	PMD_INIT_LOG(DEBUG, "hw->max_rx_queues=%d   hw->max_tx_queues=%d",
1385 			hw->max_rx_queues, hw->max_tx_queues);
1386 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1387 			eth_dev->data->port_id, pci_dev->id.vendor_id,
1388 			pci_dev->id.device_id);
1389 
1390 	/* Setup interrupt callback  */
1391 	if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
1392 		rte_intr_callback_register(&pci_dev->intr_handle,
1393 				   virtio_interrupt_handler, eth_dev);
1394 
1395 	virtio_dev_cq_start(eth_dev);
1396 
1397 	return 0;
1398 }
1399 
1400 static int
1401 eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
1402 {
1403 	struct rte_pci_device *pci_dev;
1404 	struct virtio_hw *hw = eth_dev->data->dev_private;
1405 
1406 	PMD_INIT_FUNC_TRACE();
1407 
1408 	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1409 		return -EPERM;
1410 
1411 	if (hw->started == 1) {
1412 		virtio_dev_stop(eth_dev);
1413 		virtio_dev_close(eth_dev);
1414 	}
1415 	pci_dev = eth_dev->pci_dev;
1416 
1417 	eth_dev->dev_ops = NULL;
1418 	eth_dev->tx_pkt_burst = NULL;
1419 	eth_dev->rx_pkt_burst = NULL;
1420 
1421 	virtio_dev_queue_release(hw->cvq);
1422 
1423 	rte_free(eth_dev->data->mac_addrs);
1424 	eth_dev->data->mac_addrs = NULL;
1425 
1426 	/* reset interrupt callback  */
1427 	if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
1428 		rte_intr_callback_unregister(&pci_dev->intr_handle,
1429 						virtio_interrupt_handler,
1430 						eth_dev);
1431 
1432 	PMD_INIT_LOG(DEBUG, "dev_uninit completed");
1433 
1434 	return 0;
1435 }
1436 
1437 static struct eth_driver rte_virtio_pmd = {
1438 	.pci_drv = {
1439 		.name = "rte_virtio_pmd",
1440 		.id_table = pci_id_virtio_map,
1441 		.drv_flags = RTE_PCI_DRV_DETACHABLE,
1442 	},
1443 	.eth_dev_init = eth_virtio_dev_init,
1444 	.eth_dev_uninit = eth_virtio_dev_uninit,
1445 	.dev_private_size = sizeof(struct virtio_hw),
1446 };
1447 
1448 /*
1449  * Driver initialization routine.
1450  * Invoked once at EAL init time.
1451  * Register itself as the [Poll Mode] Driver of PCI virtio devices.
1452  * Returns 0 on success.
1453  */
1454 static int
1455 rte_virtio_pmd_init(const char *name __rte_unused,
1456 		    const char *param __rte_unused)
1457 {
1458 	if (rte_eal_iopl_init() != 0) {
1459 		PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
1460 		return -1;
1461 	}
1462 
1463 	rte_eth_driver_register(&rte_virtio_pmd);
1464 	return 0;
1465 }
1466 
1467 /*
1468  * Configure virtio device
1469  * It returns 0 on success.
1470  */
1471 static int
1472 virtio_dev_configure(struct rte_eth_dev *dev)
1473 {
1474 	const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1475 	struct virtio_hw *hw = dev->data->dev_private;
1476 	struct rte_pci_device *pci_dev = dev->pci_dev;
1477 
1478 	PMD_INIT_LOG(DEBUG, "configure");
1479 
1480 	if (rxmode->hw_ip_checksum) {
1481 		PMD_DRV_LOG(ERR, "HW IP checksum not supported");
1482 		return (-EINVAL);
1483 	}
1484 
1485 	hw->vlan_strip = rxmode->hw_vlan_strip;
1486 
1487 	if (rxmode->hw_vlan_filter
1488 	    && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
1489 		PMD_DRV_LOG(NOTICE,
1490 			    "vlan filtering not available on this host");
1491 		return -ENOTSUP;
1492 	}
1493 
1494 	if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
1495 		if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) {
1496 			PMD_DRV_LOG(ERR, "failed to set config vector");
1497 			return -EBUSY;
1498 		}
1499 
1500 	return 0;
1501 }
1502 
1503 
1504 static int
1505 virtio_dev_start(struct rte_eth_dev *dev)
1506 {
1507 	uint16_t nb_queues, i;
1508 	struct virtio_hw *hw = dev->data->dev_private;
1509 	struct rte_pci_device *pci_dev = dev->pci_dev;
1510 
1511 	/* check if lsc interrupt feature is enabled */
1512 	if (dev->data->dev_conf.intr_conf.lsc) {
1513 		if (!(pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
1514 			PMD_DRV_LOG(ERR, "link status not supported by host");
1515 			return -ENOTSUP;
1516 		}
1517 
1518 		if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) {
1519 			PMD_DRV_LOG(ERR, "interrupt enable failed");
1520 			return -EIO;
1521 		}
1522 	}
1523 
1524 	/* Initialize Link state */
1525 	virtio_dev_link_update(dev, 0);
1526 
1527 	/* On restart after stop do not touch queues */
1528 	if (hw->started)
1529 		return 0;
1530 
1531 	/* Do final configuration before rx/tx engine starts */
1532 	virtio_dev_rxtx_start(dev);
1533 	vtpci_reinit_complete(hw);
1534 
1535 	hw->started = 1;
1536 
1537 	/*Notify the backend
1538 	 *Otherwise the tap backend might already stop its queue due to fullness.
1539 	 *vhost backend will have no chance to be waked up
1540 	 */
1541 	nb_queues = dev->data->nb_rx_queues;
1542 	if (nb_queues > 1) {
1543 		if (virtio_set_multiple_queues(dev, nb_queues) != 0)
1544 			return -EINVAL;
1545 	}
1546 
1547 	PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
1548 
1549 	for (i = 0; i < nb_queues; i++)
1550 		virtqueue_notify(dev->data->rx_queues[i]);
1551 
1552 	PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
1553 
1554 	for (i = 0; i < dev->data->nb_rx_queues; i++)
1555 		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
1556 
1557 	for (i = 0; i < dev->data->nb_tx_queues; i++)
1558 		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
1559 
1560 	return 0;
1561 }
1562 
1563 static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
1564 {
1565 	struct rte_mbuf *buf;
1566 	int i, mbuf_num = 0;
1567 
1568 	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1569 		PMD_INIT_LOG(DEBUG,
1570 			     "Before freeing rxq[%d] used and unused buf", i);
1571 		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
1572 
1573 		PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p",
1574 				i, dev->data->rx_queues[i]);
1575 		while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
1576 					dev->data->rx_queues[i])) != NULL) {
1577 			rte_pktmbuf_free(buf);
1578 			mbuf_num++;
1579 		}
1580 
1581 		PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
1582 		PMD_INIT_LOG(DEBUG,
1583 			     "After freeing rxq[%d] used and unused buf", i);
1584 		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
1585 	}
1586 
1587 	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1588 		PMD_INIT_LOG(DEBUG,
1589 			     "Before freeing txq[%d] used and unused bufs",
1590 			     i);
1591 		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
1592 
1593 		mbuf_num = 0;
1594 		while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
1595 					dev->data->tx_queues[i])) != NULL) {
1596 			rte_pktmbuf_free(buf);
1597 
1598 			mbuf_num++;
1599 		}
1600 
1601 		PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num);
1602 		PMD_INIT_LOG(DEBUG,
1603 			     "After freeing txq[%d] used and unused buf", i);
1604 		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
1605 	}
1606 }
1607 
1608 /*
1609  * Stop device: disable interrupt and mark link down
1610  */
1611 static void
1612 virtio_dev_stop(struct rte_eth_dev *dev)
1613 {
1614 	struct rte_eth_link link;
1615 
1616 	PMD_INIT_LOG(DEBUG, "stop");
1617 
1618 	if (dev->data->dev_conf.intr_conf.lsc)
1619 		rte_intr_disable(&dev->pci_dev->intr_handle);
1620 
1621 	memset(&link, 0, sizeof(link));
1622 	virtio_dev_atomic_write_link_status(dev, &link);
1623 }
1624 
1625 static int
1626 virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
1627 {
1628 	struct rte_eth_link link, old;
1629 	uint16_t status;
1630 	struct virtio_hw *hw = dev->data->dev_private;
1631 	memset(&link, 0, sizeof(link));
1632 	virtio_dev_atomic_read_link_status(dev, &link);
1633 	old = link;
1634 	link.link_duplex = FULL_DUPLEX;
1635 	link.link_speed  = SPEED_10G;
1636 
1637 	if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
1638 		PMD_INIT_LOG(DEBUG, "Get link status from hw");
1639 		vtpci_read_dev_config(hw,
1640 				offsetof(struct virtio_net_config, status),
1641 				&status, sizeof(status));
1642 		if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
1643 			link.link_status = 0;
1644 			PMD_INIT_LOG(DEBUG, "Port %d is down",
1645 				     dev->data->port_id);
1646 		} else {
1647 			link.link_status = 1;
1648 			PMD_INIT_LOG(DEBUG, "Port %d is up",
1649 				     dev->data->port_id);
1650 		}
1651 	} else {
1652 		link.link_status = 1;   /* Link up */
1653 	}
1654 	virtio_dev_atomic_write_link_status(dev, &link);
1655 
1656 	return (old.link_status == link.link_status) ? -1 : 0;
1657 }
1658 
1659 static void
1660 virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1661 {
1662 	struct virtio_hw *hw = dev->data->dev_private;
1663 
1664 	dev_info->driver_name = dev->driver->pci_drv.name;
1665 	dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1666 	dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1667 	dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
1668 	dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
1669 	dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
1670 	dev_info->default_txconf = (struct rte_eth_txconf) {
1671 		.txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
1672 	};
1673 }
1674 
1675 /*
1676  * It enables testpmd to collect per queue stats.
1677  */
1678 static int
1679 virtio_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *eth_dev,
1680 __rte_unused uint16_t queue_id, __rte_unused uint8_t stat_idx,
1681 __rte_unused uint8_t is_rx)
1682 {
1683 	return 0;
1684 }
1685 
1686 static struct rte_driver rte_virtio_driver = {
1687 	.type = PMD_PDEV,
1688 	.init = rte_virtio_pmd_init,
1689 };
1690 
1691 PMD_REGISTER_DRIVER(rte_virtio_driver);
1692