1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 #include <stdint.h> 34 35 #ifdef RTE_EXEC_ENV_LINUXAPP 36 #include <dirent.h> 37 #include <fcntl.h> 38 #endif 39 40 #include "virtio_pci.h" 41 #include "virtio_logs.h" 42 #include "virtqueue.h" 43 44 /* 45 * Following macros are derived from linux/pci_regs.h, however, 46 * we can't simply include that header here, as there is no such 47 * file for non-Linux platform. 48 */ 49 #define PCI_CAPABILITY_LIST 0x34 50 #define PCI_CAP_ID_VNDR 0x09 51 52 /* 53 * The remaining space is defined by each driver as the per-driver 54 * configuration space. 55 */ 56 #define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20) 57 58 static inline int 59 check_vq_phys_addr_ok(struct virtqueue *vq) 60 { 61 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, 62 * and only accepts 32 bit page frame number. 63 * Check if the allocated physical memory exceeds 16TB. 64 */ 65 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> 66 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { 67 PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); 68 return 0; 69 } 70 71 return 1; 72 } 73 74 /* 75 * Since we are in legacy mode: 76 * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf 77 * 78 * "Note that this is possible because while the virtio header is PCI (i.e. 79 * little) endian, the device-specific region is encoded in the native endian of 80 * the guest (where such distinction is applicable)." 81 * 82 * For powerpc which supports both, qemu supposes that cpu is big endian and 83 * enforces this for the virtio-net stuff. 84 */ 85 static void 86 legacy_read_dev_config(struct virtio_hw *hw, size_t offset, 87 void *dst, int length) 88 { 89 #ifdef RTE_ARCH_PPC_64 90 int size; 91 92 while (length > 0) { 93 if (length >= 4) { 94 size = 4; 95 rte_eal_pci_ioport_read(&hw->io, dst, size, 96 VIRTIO_PCI_CONFIG(hw) + offset); 97 *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst); 98 } else if (length >= 2) { 99 size = 2; 100 rte_eal_pci_ioport_read(&hw->io, dst, size, 101 VIRTIO_PCI_CONFIG(hw) + offset); 102 *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst); 103 } else { 104 size = 1; 105 rte_eal_pci_ioport_read(&hw->io, dst, size, 106 VIRTIO_PCI_CONFIG(hw) + offset); 107 } 108 109 dst = (char *)dst + size; 110 offset += size; 111 length -= size; 112 } 113 #else 114 rte_eal_pci_ioport_read(&hw->io, dst, length, 115 VIRTIO_PCI_CONFIG(hw) + offset); 116 #endif 117 } 118 119 static void 120 legacy_write_dev_config(struct virtio_hw *hw, size_t offset, 121 const void *src, int length) 122 { 123 #ifdef RTE_ARCH_PPC_64 124 union { 125 uint32_t u32; 126 uint16_t u16; 127 } tmp; 128 int size; 129 130 while (length > 0) { 131 if (length >= 4) { 132 size = 4; 133 tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src); 134 rte_eal_pci_ioport_write(&hw->io, &tmp.u32, size, 135 VIRTIO_PCI_CONFIG(hw) + offset); 136 } else if (length >= 2) { 137 size = 2; 138 tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src); 139 rte_eal_pci_ioport_write(&hw->io, &tmp.u16, size, 140 VIRTIO_PCI_CONFIG(hw) + offset); 141 } else { 142 size = 1; 143 rte_eal_pci_ioport_write(&hw->io, src, size, 144 VIRTIO_PCI_CONFIG(hw) + offset); 145 } 146 147 src = (const char *)src + size; 148 offset += size; 149 length -= size; 150 } 151 #else 152 rte_eal_pci_ioport_write(&hw->io, src, length, 153 VIRTIO_PCI_CONFIG(hw) + offset); 154 #endif 155 } 156 157 static uint64_t 158 legacy_get_features(struct virtio_hw *hw) 159 { 160 uint32_t dst; 161 162 rte_eal_pci_ioport_read(&hw->io, &dst, 4, VIRTIO_PCI_HOST_FEATURES); 163 return dst; 164 } 165 166 static void 167 legacy_set_features(struct virtio_hw *hw, uint64_t features) 168 { 169 if ((features >> 32) != 0) { 170 PMD_DRV_LOG(ERR, 171 "only 32 bit features are allowed for legacy virtio!"); 172 return; 173 } 174 rte_eal_pci_ioport_write(&hw->io, &features, 4, 175 VIRTIO_PCI_GUEST_FEATURES); 176 } 177 178 static uint8_t 179 legacy_get_status(struct virtio_hw *hw) 180 { 181 uint8_t dst; 182 183 rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_STATUS); 184 return dst; 185 } 186 187 static void 188 legacy_set_status(struct virtio_hw *hw, uint8_t status) 189 { 190 rte_eal_pci_ioport_write(&hw->io, &status, 1, VIRTIO_PCI_STATUS); 191 } 192 193 static void 194 legacy_reset(struct virtio_hw *hw) 195 { 196 legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 197 } 198 199 static uint8_t 200 legacy_get_isr(struct virtio_hw *hw) 201 { 202 uint8_t dst; 203 204 rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_ISR); 205 return dst; 206 } 207 208 /* Enable one vector (0) for Link State Intrerrupt */ 209 static uint16_t 210 legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec) 211 { 212 uint16_t dst; 213 214 rte_eal_pci_ioport_write(&hw->io, &vec, 2, VIRTIO_MSI_CONFIG_VECTOR); 215 rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_MSI_CONFIG_VECTOR); 216 return dst; 217 } 218 219 static uint16_t 220 legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) 221 { 222 uint16_t dst; 223 224 rte_eal_pci_ioport_write(&hw->io, &queue_id, 2, VIRTIO_PCI_QUEUE_SEL); 225 rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_PCI_QUEUE_NUM); 226 return dst; 227 } 228 229 static int 230 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 231 { 232 uint32_t src; 233 234 if (!check_vq_phys_addr_ok(vq)) 235 return -1; 236 237 rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2, 238 VIRTIO_PCI_QUEUE_SEL); 239 src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 240 rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN); 241 242 return 0; 243 } 244 245 static void 246 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 247 { 248 uint32_t src = 0; 249 250 rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2, 251 VIRTIO_PCI_QUEUE_SEL); 252 rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN); 253 } 254 255 static void 256 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 257 { 258 rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2, 259 VIRTIO_PCI_QUEUE_NOTIFY); 260 } 261 262 #ifdef RTE_EXEC_ENV_LINUXAPP 263 static int 264 legacy_virtio_has_msix(const struct rte_pci_addr *loc) 265 { 266 DIR *d; 267 char dirname[PATH_MAX]; 268 269 snprintf(dirname, sizeof(dirname), 270 "%s/" PCI_PRI_FMT "/msi_irqs", pci_get_sysfs_path(), 271 loc->domain, loc->bus, loc->devid, loc->function); 272 273 d = opendir(dirname); 274 if (d) 275 closedir(d); 276 277 return d != NULL; 278 } 279 #else 280 static int 281 legacy_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused) 282 { 283 /* nic_uio does not enable interrupts, return 0 (false). */ 284 return 0; 285 } 286 #endif 287 288 static int 289 legacy_virtio_resource_init(struct rte_pci_device *pci_dev, 290 struct virtio_hw *hw, uint32_t *dev_flags) 291 { 292 if (rte_eal_pci_ioport_map(pci_dev, 0, &hw->io) < 0) 293 return -1; 294 295 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN) 296 *dev_flags |= RTE_ETH_DEV_INTR_LSC; 297 else 298 *dev_flags &= ~RTE_ETH_DEV_INTR_LSC; 299 300 return 0; 301 } 302 303 static const struct virtio_pci_ops legacy_ops = { 304 .read_dev_cfg = legacy_read_dev_config, 305 .write_dev_cfg = legacy_write_dev_config, 306 .reset = legacy_reset, 307 .get_status = legacy_get_status, 308 .set_status = legacy_set_status, 309 .get_features = legacy_get_features, 310 .set_features = legacy_set_features, 311 .get_isr = legacy_get_isr, 312 .set_config_irq = legacy_set_config_irq, 313 .get_queue_num = legacy_get_queue_num, 314 .setup_queue = legacy_setup_queue, 315 .del_queue = legacy_del_queue, 316 .notify_queue = legacy_notify_queue, 317 }; 318 319 320 static inline uint8_t 321 io_read8(uint8_t *addr) 322 { 323 return *(volatile uint8_t *)addr; 324 } 325 326 static inline void 327 io_write8(uint8_t val, uint8_t *addr) 328 { 329 *(volatile uint8_t *)addr = val; 330 } 331 332 static inline uint16_t 333 io_read16(uint16_t *addr) 334 { 335 return *(volatile uint16_t *)addr; 336 } 337 338 static inline void 339 io_write16(uint16_t val, uint16_t *addr) 340 { 341 *(volatile uint16_t *)addr = val; 342 } 343 344 static inline uint32_t 345 io_read32(uint32_t *addr) 346 { 347 return *(volatile uint32_t *)addr; 348 } 349 350 static inline void 351 io_write32(uint32_t val, uint32_t *addr) 352 { 353 *(volatile uint32_t *)addr = val; 354 } 355 356 static inline void 357 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) 358 { 359 io_write32(val & ((1ULL << 32) - 1), lo); 360 io_write32(val >> 32, hi); 361 } 362 363 static void 364 modern_read_dev_config(struct virtio_hw *hw, size_t offset, 365 void *dst, int length) 366 { 367 int i; 368 uint8_t *p; 369 uint8_t old_gen, new_gen; 370 371 do { 372 old_gen = io_read8(&hw->common_cfg->config_generation); 373 374 p = dst; 375 for (i = 0; i < length; i++) 376 *p++ = io_read8((uint8_t *)hw->dev_cfg + offset + i); 377 378 new_gen = io_read8(&hw->common_cfg->config_generation); 379 } while (old_gen != new_gen); 380 } 381 382 static void 383 modern_write_dev_config(struct virtio_hw *hw, size_t offset, 384 const void *src, int length) 385 { 386 int i; 387 const uint8_t *p = src; 388 389 for (i = 0; i < length; i++) 390 io_write8(*p++, (uint8_t *)hw->dev_cfg + offset + i); 391 } 392 393 static uint64_t 394 modern_get_features(struct virtio_hw *hw) 395 { 396 uint32_t features_lo, features_hi; 397 398 io_write32(0, &hw->common_cfg->device_feature_select); 399 features_lo = io_read32(&hw->common_cfg->device_feature); 400 401 io_write32(1, &hw->common_cfg->device_feature_select); 402 features_hi = io_read32(&hw->common_cfg->device_feature); 403 404 return ((uint64_t)features_hi << 32) | features_lo; 405 } 406 407 static void 408 modern_set_features(struct virtio_hw *hw, uint64_t features) 409 { 410 io_write32(0, &hw->common_cfg->guest_feature_select); 411 io_write32(features & ((1ULL << 32) - 1), 412 &hw->common_cfg->guest_feature); 413 414 io_write32(1, &hw->common_cfg->guest_feature_select); 415 io_write32(features >> 32, 416 &hw->common_cfg->guest_feature); 417 } 418 419 static uint8_t 420 modern_get_status(struct virtio_hw *hw) 421 { 422 return io_read8(&hw->common_cfg->device_status); 423 } 424 425 static void 426 modern_set_status(struct virtio_hw *hw, uint8_t status) 427 { 428 io_write8(status, &hw->common_cfg->device_status); 429 } 430 431 static void 432 modern_reset(struct virtio_hw *hw) 433 { 434 modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 435 modern_get_status(hw); 436 } 437 438 static uint8_t 439 modern_get_isr(struct virtio_hw *hw) 440 { 441 return io_read8(hw->isr); 442 } 443 444 static uint16_t 445 modern_set_config_irq(struct virtio_hw *hw, uint16_t vec) 446 { 447 io_write16(vec, &hw->common_cfg->msix_config); 448 return io_read16(&hw->common_cfg->msix_config); 449 } 450 451 static uint16_t 452 modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) 453 { 454 io_write16(queue_id, &hw->common_cfg->queue_select); 455 return io_read16(&hw->common_cfg->queue_size); 456 } 457 458 static int 459 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 460 { 461 uint64_t desc_addr, avail_addr, used_addr; 462 uint16_t notify_off; 463 464 if (!check_vq_phys_addr_ok(vq)) 465 return -1; 466 467 desc_addr = vq->vq_ring_mem; 468 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 469 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 470 ring[vq->vq_nentries]), 471 VIRTIO_PCI_VRING_ALIGN); 472 473 io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 474 475 io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, 476 &hw->common_cfg->queue_desc_hi); 477 io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, 478 &hw->common_cfg->queue_avail_hi); 479 io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, 480 &hw->common_cfg->queue_used_hi); 481 482 notify_off = io_read16(&hw->common_cfg->queue_notify_off); 483 vq->notify_addr = (void *)((uint8_t *)hw->notify_base + 484 notify_off * hw->notify_off_multiplier); 485 486 io_write16(1, &hw->common_cfg->queue_enable); 487 488 PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index); 489 PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr); 490 PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr); 491 PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr); 492 PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)", 493 vq->notify_addr, notify_off); 494 495 return 0; 496 } 497 498 static void 499 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 500 { 501 io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 502 503 io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, 504 &hw->common_cfg->queue_desc_hi); 505 io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, 506 &hw->common_cfg->queue_avail_hi); 507 io_write64_twopart(0, &hw->common_cfg->queue_used_lo, 508 &hw->common_cfg->queue_used_hi); 509 510 io_write16(0, &hw->common_cfg->queue_enable); 511 } 512 513 static void 514 modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq) 515 { 516 io_write16(1, vq->notify_addr); 517 } 518 519 static const struct virtio_pci_ops modern_ops = { 520 .read_dev_cfg = modern_read_dev_config, 521 .write_dev_cfg = modern_write_dev_config, 522 .reset = modern_reset, 523 .get_status = modern_get_status, 524 .set_status = modern_set_status, 525 .get_features = modern_get_features, 526 .set_features = modern_set_features, 527 .get_isr = modern_get_isr, 528 .set_config_irq = modern_set_config_irq, 529 .get_queue_num = modern_get_queue_num, 530 .setup_queue = modern_setup_queue, 531 .del_queue = modern_del_queue, 532 .notify_queue = modern_notify_queue, 533 }; 534 535 536 void 537 vtpci_read_dev_config(struct virtio_hw *hw, size_t offset, 538 void *dst, int length) 539 { 540 hw->vtpci_ops->read_dev_cfg(hw, offset, dst, length); 541 } 542 543 void 544 vtpci_write_dev_config(struct virtio_hw *hw, size_t offset, 545 const void *src, int length) 546 { 547 hw->vtpci_ops->write_dev_cfg(hw, offset, src, length); 548 } 549 550 uint64_t 551 vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features) 552 { 553 uint64_t features; 554 555 /* 556 * Limit negotiated features to what the driver, virtqueue, and 557 * host all support. 558 */ 559 features = host_features & hw->guest_features; 560 hw->vtpci_ops->set_features(hw, features); 561 562 return features; 563 } 564 565 void 566 vtpci_reset(struct virtio_hw *hw) 567 { 568 hw->vtpci_ops->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 569 /* flush status write */ 570 hw->vtpci_ops->get_status(hw); 571 } 572 573 void 574 vtpci_reinit_complete(struct virtio_hw *hw) 575 { 576 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); 577 } 578 579 void 580 vtpci_set_status(struct virtio_hw *hw, uint8_t status) 581 { 582 if (status != VIRTIO_CONFIG_STATUS_RESET) 583 status |= hw->vtpci_ops->get_status(hw); 584 585 hw->vtpci_ops->set_status(hw, status); 586 } 587 588 uint8_t 589 vtpci_get_status(struct virtio_hw *hw) 590 { 591 return hw->vtpci_ops->get_status(hw); 592 } 593 594 uint8_t 595 vtpci_isr(struct virtio_hw *hw) 596 { 597 return hw->vtpci_ops->get_isr(hw); 598 } 599 600 601 /* Enable one vector (0) for Link State Intrerrupt */ 602 uint16_t 603 vtpci_irq_config(struct virtio_hw *hw, uint16_t vec) 604 { 605 return hw->vtpci_ops->set_config_irq(hw, vec); 606 } 607 608 static void * 609 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) 610 { 611 uint8_t bar = cap->bar; 612 uint32_t length = cap->length; 613 uint32_t offset = cap->offset; 614 uint8_t *base; 615 616 if (bar > 5) { 617 PMD_INIT_LOG(ERR, "invalid bar: %u", bar); 618 return NULL; 619 } 620 621 if (offset + length < offset) { 622 PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows", 623 offset, length); 624 return NULL; 625 } 626 627 if (offset + length > dev->mem_resource[bar].len) { 628 PMD_INIT_LOG(ERR, 629 "invalid cap: overflows bar space: %u > %" PRIu64, 630 offset + length, dev->mem_resource[bar].len); 631 return NULL; 632 } 633 634 base = dev->mem_resource[bar].addr; 635 if (base == NULL) { 636 PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar); 637 return NULL; 638 } 639 640 return base + offset; 641 } 642 643 static int 644 virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw) 645 { 646 uint8_t pos; 647 struct virtio_pci_cap cap; 648 int ret; 649 650 if (rte_eal_pci_map_device(dev)) { 651 PMD_INIT_LOG(DEBUG, "failed to map pci device!"); 652 return -1; 653 } 654 655 ret = rte_eal_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); 656 if (ret < 0) { 657 PMD_INIT_LOG(DEBUG, "failed to read pci capability list"); 658 return -1; 659 } 660 661 while (pos) { 662 ret = rte_eal_pci_read_config(dev, &cap, sizeof(cap), pos); 663 if (ret < 0) { 664 PMD_INIT_LOG(ERR, 665 "failed to read pci cap at pos: %x", pos); 666 break; 667 } 668 669 if (cap.cap_vndr != PCI_CAP_ID_VNDR) { 670 PMD_INIT_LOG(DEBUG, 671 "[%2x] skipping non VNDR cap id: %02x", 672 pos, cap.cap_vndr); 673 goto next; 674 } 675 676 PMD_INIT_LOG(DEBUG, 677 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", 678 pos, cap.cfg_type, cap.bar, cap.offset, cap.length); 679 680 switch (cap.cfg_type) { 681 case VIRTIO_PCI_CAP_COMMON_CFG: 682 hw->common_cfg = get_cfg_addr(dev, &cap); 683 break; 684 case VIRTIO_PCI_CAP_NOTIFY_CFG: 685 rte_eal_pci_read_config(dev, &hw->notify_off_multiplier, 686 4, pos + sizeof(cap)); 687 hw->notify_base = get_cfg_addr(dev, &cap); 688 break; 689 case VIRTIO_PCI_CAP_DEVICE_CFG: 690 hw->dev_cfg = get_cfg_addr(dev, &cap); 691 break; 692 case VIRTIO_PCI_CAP_ISR_CFG: 693 hw->isr = get_cfg_addr(dev, &cap); 694 break; 695 } 696 697 next: 698 pos = cap.cap_next; 699 } 700 701 if (hw->common_cfg == NULL || hw->notify_base == NULL || 702 hw->dev_cfg == NULL || hw->isr == NULL) { 703 PMD_INIT_LOG(INFO, "no modern virtio pci device found."); 704 return -1; 705 } 706 707 PMD_INIT_LOG(INFO, "found modern virtio pci device."); 708 709 PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg); 710 PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg); 711 PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr); 712 PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u", 713 hw->notify_base, hw->notify_off_multiplier); 714 715 return 0; 716 } 717 718 /* 719 * Return -1: 720 * if there is error mapping with VFIO/UIO. 721 * if port map error when driver type is KDRV_NONE. 722 * if whitelisted but driver type is KDRV_UNKNOWN. 723 * Return 1 if kernel driver is managing the device. 724 * Return 0 on success. 725 */ 726 int 727 vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw, 728 uint32_t *dev_flags) 729 { 730 hw->dev = dev; 731 732 /* 733 * Try if we can succeed reading virtio pci caps, which exists 734 * only on modern pci device. If failed, we fallback to legacy 735 * virtio handling. 736 */ 737 if (virtio_read_caps(dev, hw) == 0) { 738 PMD_INIT_LOG(INFO, "modern virtio pci detected."); 739 hw->vtpci_ops = &modern_ops; 740 hw->modern = 1; 741 *dev_flags |= RTE_ETH_DEV_INTR_LSC; 742 return 0; 743 } 744 745 PMD_INIT_LOG(INFO, "trying with legacy virtio pci."); 746 if (legacy_virtio_resource_init(dev, hw, dev_flags) < 0) { 747 if (dev->kdrv == RTE_KDRV_UNKNOWN && 748 (!dev->devargs || 749 dev->devargs->type != RTE_DEVTYPE_WHITELISTED_PCI)) { 750 PMD_INIT_LOG(INFO, 751 "skip kernel managed virtio device."); 752 return 1; 753 } 754 return -1; 755 } 756 757 hw->vtpci_ops = &legacy_ops; 758 hw->use_msix = legacy_virtio_has_msix(&dev->addr); 759 hw->modern = 0; 760 761 return 0; 762 } 763