xref: /dpdk/drivers/net/virtio/virtio_pci.c (revision ceb1ccd5d50c1a89ba8bdd97cc199e7f07422b98)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 #include <stdint.h>
34 
35 #ifdef RTE_EXEC_ENV_LINUXAPP
36  #include <dirent.h>
37  #include <fcntl.h>
38 #endif
39 
40 #include "virtio_pci.h"
41 #include "virtio_logs.h"
42 #include "virtqueue.h"
43 
44 /*
45  * Following macros are derived from linux/pci_regs.h, however,
46  * we can't simply include that header here, as there is no such
47  * file for non-Linux platform.
48  */
49 #define PCI_CAPABILITY_LIST	0x34
50 #define PCI_CAP_ID_VNDR		0x09
51 
52 /*
53  * The remaining space is defined by each driver as the per-driver
54  * configuration space.
55  */
56 #define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20)
57 
58 static void
59 legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
60 		       void *dst, int length)
61 {
62 	rte_eal_pci_ioport_read(&hw->io, dst, length,
63 				VIRTIO_PCI_CONFIG(hw) + offset);
64 }
65 
66 static void
67 legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
68 			const void *src, int length)
69 {
70 	rte_eal_pci_ioport_write(&hw->io, src, length,
71 				 VIRTIO_PCI_CONFIG(hw) + offset);
72 }
73 
74 static uint64_t
75 legacy_get_features(struct virtio_hw *hw)
76 {
77 	uint32_t dst;
78 
79 	rte_eal_pci_ioport_read(&hw->io, &dst, 4, VIRTIO_PCI_HOST_FEATURES);
80 	return dst;
81 }
82 
83 static void
84 legacy_set_features(struct virtio_hw *hw, uint64_t features)
85 {
86 	if ((features >> 32) != 0) {
87 		PMD_DRV_LOG(ERR,
88 			"only 32 bit features are allowed for legacy virtio!");
89 		return;
90 	}
91 	rte_eal_pci_ioport_write(&hw->io, &features, 4,
92 				 VIRTIO_PCI_GUEST_FEATURES);
93 }
94 
95 static uint8_t
96 legacy_get_status(struct virtio_hw *hw)
97 {
98 	uint8_t dst;
99 
100 	rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_STATUS);
101 	return dst;
102 }
103 
104 static void
105 legacy_set_status(struct virtio_hw *hw, uint8_t status)
106 {
107 	rte_eal_pci_ioport_write(&hw->io, &status, 1, VIRTIO_PCI_STATUS);
108 }
109 
110 static void
111 legacy_reset(struct virtio_hw *hw)
112 {
113 	legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
114 }
115 
116 static uint8_t
117 legacy_get_isr(struct virtio_hw *hw)
118 {
119 	uint8_t dst;
120 
121 	rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_ISR);
122 	return dst;
123 }
124 
125 /* Enable one vector (0) for Link State Intrerrupt */
126 static uint16_t
127 legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
128 {
129 	uint16_t dst;
130 
131 	rte_eal_pci_ioport_write(&hw->io, &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
132 	rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
133 	return dst;
134 }
135 
136 static uint16_t
137 legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
138 {
139 	uint16_t dst;
140 
141 	rte_eal_pci_ioport_write(&hw->io, &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
142 	rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_PCI_QUEUE_NUM);
143 	return dst;
144 }
145 
146 static void
147 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
148 {
149 	uint32_t src;
150 
151 	rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,
152 			 VIRTIO_PCI_QUEUE_SEL);
153 	src = vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
154 	rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN);
155 }
156 
157 static void
158 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
159 {
160 	uint32_t src = 0;
161 
162 	rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,
163 			 VIRTIO_PCI_QUEUE_SEL);
164 	rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN);
165 }
166 
167 static void
168 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
169 {
170 	rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,
171 			 VIRTIO_PCI_QUEUE_NOTIFY);
172 }
173 
174 #ifdef RTE_EXEC_ENV_LINUXAPP
175 static int
176 legacy_virtio_has_msix(const struct rte_pci_addr *loc)
177 {
178 	DIR *d;
179 	char dirname[PATH_MAX];
180 
181 	snprintf(dirname, sizeof(dirname),
182 		     SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/msi_irqs",
183 		     loc->domain, loc->bus, loc->devid, loc->function);
184 
185 	d = opendir(dirname);
186 	if (d)
187 		closedir(d);
188 
189 	return d != NULL;
190 }
191 #else
192 static int
193 legacy_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused)
194 {
195 	/* nic_uio does not enable interrupts, return 0 (false). */
196 	return 0;
197 }
198 #endif
199 
200 static int
201 legacy_virtio_resource_init(struct rte_pci_device *pci_dev,
202 			    struct virtio_hw *hw)
203 {
204 	if (rte_eal_pci_ioport_map(pci_dev, 0, &hw->io) < 0)
205 		return -1;
206 
207 	if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN)
208 		pci_dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC;
209 	else
210 		pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
211 
212 	return 0;
213 }
214 
215 static const struct virtio_pci_ops legacy_ops = {
216 	.read_dev_cfg	= legacy_read_dev_config,
217 	.write_dev_cfg	= legacy_write_dev_config,
218 	.reset		= legacy_reset,
219 	.get_status	= legacy_get_status,
220 	.set_status	= legacy_set_status,
221 	.get_features	= legacy_get_features,
222 	.set_features	= legacy_set_features,
223 	.get_isr	= legacy_get_isr,
224 	.set_config_irq	= legacy_set_config_irq,
225 	.get_queue_num	= legacy_get_queue_num,
226 	.setup_queue	= legacy_setup_queue,
227 	.del_queue	= legacy_del_queue,
228 	.notify_queue	= legacy_notify_queue,
229 };
230 
231 
232 static inline uint8_t
233 io_read8(uint8_t *addr)
234 {
235 	return *(volatile uint8_t *)addr;
236 }
237 
238 static inline void
239 io_write8(uint8_t val, uint8_t *addr)
240 {
241 	*(volatile uint8_t *)addr = val;
242 }
243 
244 static inline uint16_t
245 io_read16(uint16_t *addr)
246 {
247 	return *(volatile uint16_t *)addr;
248 }
249 
250 static inline void
251 io_write16(uint16_t val, uint16_t *addr)
252 {
253 	*(volatile uint16_t *)addr = val;
254 }
255 
256 static inline uint32_t
257 io_read32(uint32_t *addr)
258 {
259 	return *(volatile uint32_t *)addr;
260 }
261 
262 static inline void
263 io_write32(uint32_t val, uint32_t *addr)
264 {
265 	*(volatile uint32_t *)addr = val;
266 }
267 
268 static inline void
269 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
270 {
271 	io_write32(val & ((1ULL << 32) - 1), lo);
272 	io_write32(val >> 32,		     hi);
273 }
274 
275 static void
276 modern_read_dev_config(struct virtio_hw *hw, size_t offset,
277 		       void *dst, int length)
278 {
279 	int i;
280 	uint8_t *p;
281 	uint8_t old_gen, new_gen;
282 
283 	do {
284 		old_gen = io_read8(&hw->common_cfg->config_generation);
285 
286 		p = dst;
287 		for (i = 0;  i < length; i++)
288 			*p++ = io_read8((uint8_t *)hw->dev_cfg + offset + i);
289 
290 		new_gen = io_read8(&hw->common_cfg->config_generation);
291 	} while (old_gen != new_gen);
292 }
293 
294 static void
295 modern_write_dev_config(struct virtio_hw *hw, size_t offset,
296 			const void *src, int length)
297 {
298 	int i;
299 	const uint8_t *p = src;
300 
301 	for (i = 0;  i < length; i++)
302 		io_write8(*p++, (uint8_t *)hw->dev_cfg + offset + i);
303 }
304 
305 static uint64_t
306 modern_get_features(struct virtio_hw *hw)
307 {
308 	uint32_t features_lo, features_hi;
309 
310 	io_write32(0, &hw->common_cfg->device_feature_select);
311 	features_lo = io_read32(&hw->common_cfg->device_feature);
312 
313 	io_write32(1, &hw->common_cfg->device_feature_select);
314 	features_hi = io_read32(&hw->common_cfg->device_feature);
315 
316 	return ((uint64_t)features_hi << 32) | features_lo;
317 }
318 
319 static void
320 modern_set_features(struct virtio_hw *hw, uint64_t features)
321 {
322 	io_write32(0, &hw->common_cfg->guest_feature_select);
323 	io_write32(features & ((1ULL << 32) - 1),
324 		&hw->common_cfg->guest_feature);
325 
326 	io_write32(1, &hw->common_cfg->guest_feature_select);
327 	io_write32(features >> 32,
328 		&hw->common_cfg->guest_feature);
329 }
330 
331 static uint8_t
332 modern_get_status(struct virtio_hw *hw)
333 {
334 	return io_read8(&hw->common_cfg->device_status);
335 }
336 
337 static void
338 modern_set_status(struct virtio_hw *hw, uint8_t status)
339 {
340 	io_write8(status, &hw->common_cfg->device_status);
341 }
342 
343 static void
344 modern_reset(struct virtio_hw *hw)
345 {
346 	modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
347 	modern_get_status(hw);
348 }
349 
350 static uint8_t
351 modern_get_isr(struct virtio_hw *hw)
352 {
353 	return io_read8(hw->isr);
354 }
355 
356 static uint16_t
357 modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
358 {
359 	io_write16(vec, &hw->common_cfg->msix_config);
360 	return io_read16(&hw->common_cfg->msix_config);
361 }
362 
363 static uint16_t
364 modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
365 {
366 	io_write16(queue_id, &hw->common_cfg->queue_select);
367 	return io_read16(&hw->common_cfg->queue_size);
368 }
369 
370 static void
371 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
372 {
373 	uint64_t desc_addr, avail_addr, used_addr;
374 	uint16_t notify_off;
375 
376 	desc_addr = vq->mz->phys_addr;
377 	avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
378 	used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
379 							 ring[vq->vq_nentries]),
380 				   VIRTIO_PCI_VRING_ALIGN);
381 
382 	io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
383 
384 	io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
385 				      &hw->common_cfg->queue_desc_hi);
386 	io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
387 				       &hw->common_cfg->queue_avail_hi);
388 	io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
389 				      &hw->common_cfg->queue_used_hi);
390 
391 	notify_off = io_read16(&hw->common_cfg->queue_notify_off);
392 	vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
393 				notify_off * hw->notify_off_multiplier);
394 
395 	io_write16(1, &hw->common_cfg->queue_enable);
396 
397 	PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
398 	PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
399 	PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr);
400 	PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr);
401 	PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)",
402 		vq->notify_addr, notify_off);
403 }
404 
405 static void
406 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
407 {
408 	io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
409 
410 	io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
411 				  &hw->common_cfg->queue_desc_hi);
412 	io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
413 				  &hw->common_cfg->queue_avail_hi);
414 	io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
415 				  &hw->common_cfg->queue_used_hi);
416 
417 	io_write16(0, &hw->common_cfg->queue_enable);
418 }
419 
420 static void
421 modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
422 {
423 	io_write16(1, vq->notify_addr);
424 }
425 
426 static const struct virtio_pci_ops modern_ops = {
427 	.read_dev_cfg	= modern_read_dev_config,
428 	.write_dev_cfg	= modern_write_dev_config,
429 	.reset		= modern_reset,
430 	.get_status	= modern_get_status,
431 	.set_status	= modern_set_status,
432 	.get_features	= modern_get_features,
433 	.set_features	= modern_set_features,
434 	.get_isr	= modern_get_isr,
435 	.set_config_irq	= modern_set_config_irq,
436 	.get_queue_num	= modern_get_queue_num,
437 	.setup_queue	= modern_setup_queue,
438 	.del_queue	= modern_del_queue,
439 	.notify_queue	= modern_notify_queue,
440 };
441 
442 
443 void
444 vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
445 		      void *dst, int length)
446 {
447 	hw->vtpci_ops->read_dev_cfg(hw, offset, dst, length);
448 }
449 
450 void
451 vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
452 		       const void *src, int length)
453 {
454 	hw->vtpci_ops->write_dev_cfg(hw, offset, src, length);
455 }
456 
457 uint64_t
458 vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
459 {
460 	uint64_t features;
461 
462 	/*
463 	 * Limit negotiated features to what the driver, virtqueue, and
464 	 * host all support.
465 	 */
466 	features = host_features & hw->guest_features;
467 	hw->vtpci_ops->set_features(hw, features);
468 
469 	return features;
470 }
471 
472 void
473 vtpci_reset(struct virtio_hw *hw)
474 {
475 	hw->vtpci_ops->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
476 	/* flush status write */
477 	hw->vtpci_ops->get_status(hw);
478 }
479 
480 void
481 vtpci_reinit_complete(struct virtio_hw *hw)
482 {
483 	vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
484 }
485 
486 void
487 vtpci_set_status(struct virtio_hw *hw, uint8_t status)
488 {
489 	if (status != VIRTIO_CONFIG_STATUS_RESET)
490 		status |= hw->vtpci_ops->get_status(hw);
491 
492 	hw->vtpci_ops->set_status(hw, status);
493 }
494 
495 uint8_t
496 vtpci_get_status(struct virtio_hw *hw)
497 {
498 	return hw->vtpci_ops->get_status(hw);
499 }
500 
501 uint8_t
502 vtpci_isr(struct virtio_hw *hw)
503 {
504 	return hw->vtpci_ops->get_isr(hw);
505 }
506 
507 
508 /* Enable one vector (0) for Link State Intrerrupt */
509 uint16_t
510 vtpci_irq_config(struct virtio_hw *hw, uint16_t vec)
511 {
512 	return hw->vtpci_ops->set_config_irq(hw, vec);
513 }
514 
515 static void *
516 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
517 {
518 	uint8_t  bar    = cap->bar;
519 	uint32_t length = cap->length;
520 	uint32_t offset = cap->offset;
521 	uint8_t *base;
522 
523 	if (bar > 5) {
524 		PMD_INIT_LOG(ERR, "invalid bar: %u", bar);
525 		return NULL;
526 	}
527 
528 	if (offset + length < offset) {
529 		PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows",
530 			offset, length);
531 		return NULL;
532 	}
533 
534 	if (offset + length > dev->mem_resource[bar].len) {
535 		PMD_INIT_LOG(ERR,
536 			"invalid cap: overflows bar space: %u > %" PRIu64,
537 			offset + length, dev->mem_resource[bar].len);
538 		return NULL;
539 	}
540 
541 	base = dev->mem_resource[bar].addr;
542 	if (base == NULL) {
543 		PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);
544 		return NULL;
545 	}
546 
547 	return base + offset;
548 }
549 
550 static int
551 virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
552 {
553 	uint8_t pos;
554 	struct virtio_pci_cap cap;
555 	int ret;
556 
557 	if (rte_eal_pci_map_device(dev)) {
558 		PMD_INIT_LOG(DEBUG, "failed to map pci device!");
559 		return -1;
560 	}
561 
562 	ret = rte_eal_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
563 	if (ret < 0) {
564 		PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
565 		return -1;
566 	}
567 
568 	while (pos) {
569 		ret = rte_eal_pci_read_config(dev, &cap, sizeof(cap), pos);
570 		if (ret < 0) {
571 			PMD_INIT_LOG(ERR,
572 				"failed to read pci cap at pos: %x", pos);
573 			break;
574 		}
575 
576 		if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
577 			PMD_INIT_LOG(DEBUG,
578 				"[%2x] skipping non VNDR cap id: %02x",
579 				pos, cap.cap_vndr);
580 			goto next;
581 		}
582 
583 		PMD_INIT_LOG(DEBUG,
584 			"[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
585 			pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
586 
587 		switch (cap.cfg_type) {
588 		case VIRTIO_PCI_CAP_COMMON_CFG:
589 			hw->common_cfg = get_cfg_addr(dev, &cap);
590 			break;
591 		case VIRTIO_PCI_CAP_NOTIFY_CFG:
592 			rte_eal_pci_read_config(dev, &hw->notify_off_multiplier,
593 						4, pos + sizeof(cap));
594 			hw->notify_base = get_cfg_addr(dev, &cap);
595 			break;
596 		case VIRTIO_PCI_CAP_DEVICE_CFG:
597 			hw->dev_cfg = get_cfg_addr(dev, &cap);
598 			break;
599 		case VIRTIO_PCI_CAP_ISR_CFG:
600 			hw->isr = get_cfg_addr(dev, &cap);
601 			break;
602 		}
603 
604 next:
605 		pos = cap.cap_next;
606 	}
607 
608 	if (hw->common_cfg == NULL || hw->notify_base == NULL ||
609 	    hw->dev_cfg == NULL    || hw->isr == NULL) {
610 		PMD_INIT_LOG(INFO, "no modern virtio pci device found.");
611 		return -1;
612 	}
613 
614 	PMD_INIT_LOG(INFO, "found modern virtio pci device.");
615 
616 	PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg);
617 	PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg);
618 	PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr);
619 	PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u",
620 		hw->notify_base, hw->notify_off_multiplier);
621 
622 	return 0;
623 }
624 
625 /*
626  * Return -1:
627  *   if there is error mapping with VFIO/UIO.
628  *   if port map error when driver type is KDRV_NONE.
629  * Return 1 if kernel driver is managing the device.
630  * Return 0 on success.
631  */
632 int
633 vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
634 {
635 	hw->dev = dev;
636 
637 	/*
638 	 * Try if we can succeed reading virtio pci caps, which exists
639 	 * only on modern pci device. If failed, we fallback to legacy
640 	 * virtio handling.
641 	 */
642 	if (virtio_read_caps(dev, hw) == 0) {
643 		PMD_INIT_LOG(INFO, "modern virtio pci detected.");
644 		hw->vtpci_ops = &modern_ops;
645 		hw->modern    = 1;
646 		dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC;
647 		return 0;
648 	}
649 
650 	PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
651 	if (legacy_virtio_resource_init(dev, hw) < 0) {
652 		if (dev->kdrv == RTE_KDRV_UNKNOWN &&
653 		    dev->devargs->type != RTE_DEVTYPE_WHITELISTED_PCI) {
654 			PMD_INIT_LOG(INFO,
655 				"skip kernel managed virtio device.");
656 			return 1;
657 		}
658 		return -1;
659 	}
660 
661 	hw->vtpci_ops = &legacy_ops;
662 	hw->use_msix = legacy_virtio_has_msix(&dev->addr);
663 	hw->modern   = 0;
664 
665 	return 0;
666 }
667