1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 #include <stdint.h> 34 35 #ifdef RTE_EXEC_ENV_LINUXAPP 36 #include <dirent.h> 37 #include <fcntl.h> 38 #endif 39 40 #include <rte_io.h> 41 42 #include "virtio_pci.h" 43 #include "virtio_logs.h" 44 #include "virtqueue.h" 45 46 /* 47 * Following macros are derived from linux/pci_regs.h, however, 48 * we can't simply include that header here, as there is no such 49 * file for non-Linux platform. 50 */ 51 #define PCI_CAPABILITY_LIST 0x34 52 #define PCI_CAP_ID_VNDR 0x09 53 #define PCI_CAP_ID_MSIX 0x11 54 55 /* 56 * The remaining space is defined by each driver as the per-driver 57 * configuration space. 58 */ 59 #define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20) 60 61 static inline int 62 check_vq_phys_addr_ok(struct virtqueue *vq) 63 { 64 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, 65 * and only accepts 32 bit page frame number. 66 * Check if the allocated physical memory exceeds 16TB. 67 */ 68 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> 69 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { 70 PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); 71 return 0; 72 } 73 74 return 1; 75 } 76 77 /* 78 * Since we are in legacy mode: 79 * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf 80 * 81 * "Note that this is possible because while the virtio header is PCI (i.e. 82 * little) endian, the device-specific region is encoded in the native endian of 83 * the guest (where such distinction is applicable)." 84 * 85 * For powerpc which supports both, qemu supposes that cpu is big endian and 86 * enforces this for the virtio-net stuff. 87 */ 88 static void 89 legacy_read_dev_config(struct virtio_hw *hw, size_t offset, 90 void *dst, int length) 91 { 92 #ifdef RTE_ARCH_PPC_64 93 int size; 94 95 while (length > 0) { 96 if (length >= 4) { 97 size = 4; 98 rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size, 99 VIRTIO_PCI_CONFIG(hw) + offset); 100 *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst); 101 } else if (length >= 2) { 102 size = 2; 103 rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size, 104 VIRTIO_PCI_CONFIG(hw) + offset); 105 *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst); 106 } else { 107 size = 1; 108 rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size, 109 VIRTIO_PCI_CONFIG(hw) + offset); 110 } 111 112 dst = (char *)dst + size; 113 offset += size; 114 length -= size; 115 } 116 #else 117 rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, length, 118 VIRTIO_PCI_CONFIG(hw) + offset); 119 #endif 120 } 121 122 static void 123 legacy_write_dev_config(struct virtio_hw *hw, size_t offset, 124 const void *src, int length) 125 { 126 #ifdef RTE_ARCH_PPC_64 127 union { 128 uint32_t u32; 129 uint16_t u16; 130 } tmp; 131 int size; 132 133 while (length > 0) { 134 if (length >= 4) { 135 size = 4; 136 tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src); 137 rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size, 138 VIRTIO_PCI_CONFIG(hw) + offset); 139 } else if (length >= 2) { 140 size = 2; 141 tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src); 142 rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size, 143 VIRTIO_PCI_CONFIG(hw) + offset); 144 } else { 145 size = 1; 146 rte_eal_pci_ioport_write(VTPCI_IO(hw), src, size, 147 VIRTIO_PCI_CONFIG(hw) + offset); 148 } 149 150 src = (const char *)src + size; 151 offset += size; 152 length -= size; 153 } 154 #else 155 rte_eal_pci_ioport_write(VTPCI_IO(hw), src, length, 156 VIRTIO_PCI_CONFIG(hw) + offset); 157 #endif 158 } 159 160 static uint64_t 161 legacy_get_features(struct virtio_hw *hw) 162 { 163 uint32_t dst; 164 165 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 4, 166 VIRTIO_PCI_HOST_FEATURES); 167 return dst; 168 } 169 170 static void 171 legacy_set_features(struct virtio_hw *hw, uint64_t features) 172 { 173 if ((features >> 32) != 0) { 174 PMD_DRV_LOG(ERR, 175 "only 32 bit features are allowed for legacy virtio!"); 176 return; 177 } 178 rte_eal_pci_ioport_write(VTPCI_IO(hw), &features, 4, 179 VIRTIO_PCI_GUEST_FEATURES); 180 } 181 182 static uint8_t 183 legacy_get_status(struct virtio_hw *hw) 184 { 185 uint8_t dst; 186 187 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS); 188 return dst; 189 } 190 191 static void 192 legacy_set_status(struct virtio_hw *hw, uint8_t status) 193 { 194 rte_eal_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS); 195 } 196 197 static void 198 legacy_reset(struct virtio_hw *hw) 199 { 200 legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 201 } 202 203 static uint8_t 204 legacy_get_isr(struct virtio_hw *hw) 205 { 206 uint8_t dst; 207 208 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR); 209 return dst; 210 } 211 212 /* Enable one vector (0) for Link State Intrerrupt */ 213 static uint16_t 214 legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec) 215 { 216 uint16_t dst; 217 218 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2, 219 VIRTIO_MSI_CONFIG_VECTOR); 220 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, 221 VIRTIO_MSI_CONFIG_VECTOR); 222 return dst; 223 } 224 225 static uint16_t 226 legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec) 227 { 228 uint16_t dst; 229 230 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 231 VIRTIO_PCI_QUEUE_SEL); 232 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2, 233 VIRTIO_MSI_QUEUE_VECTOR); 234 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR); 235 return dst; 236 } 237 238 static uint16_t 239 legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) 240 { 241 uint16_t dst; 242 243 rte_eal_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, 244 VIRTIO_PCI_QUEUE_SEL); 245 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM); 246 return dst; 247 } 248 249 static int 250 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 251 { 252 uint32_t src; 253 254 if (!check_vq_phys_addr_ok(vq)) 255 return -1; 256 257 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 258 VIRTIO_PCI_QUEUE_SEL); 259 src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 260 rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); 261 262 return 0; 263 } 264 265 static void 266 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 267 { 268 uint32_t src = 0; 269 270 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 271 VIRTIO_PCI_QUEUE_SEL); 272 rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); 273 } 274 275 static void 276 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 277 { 278 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 279 VIRTIO_PCI_QUEUE_NOTIFY); 280 } 281 282 const struct virtio_pci_ops legacy_ops = { 283 .read_dev_cfg = legacy_read_dev_config, 284 .write_dev_cfg = legacy_write_dev_config, 285 .reset = legacy_reset, 286 .get_status = legacy_get_status, 287 .set_status = legacy_set_status, 288 .get_features = legacy_get_features, 289 .set_features = legacy_set_features, 290 .get_isr = legacy_get_isr, 291 .set_config_irq = legacy_set_config_irq, 292 .set_queue_irq = legacy_set_queue_irq, 293 .get_queue_num = legacy_get_queue_num, 294 .setup_queue = legacy_setup_queue, 295 .del_queue = legacy_del_queue, 296 .notify_queue = legacy_notify_queue, 297 }; 298 299 static inline void 300 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) 301 { 302 rte_write32(val & ((1ULL << 32) - 1), lo); 303 rte_write32(val >> 32, hi); 304 } 305 306 static void 307 modern_read_dev_config(struct virtio_hw *hw, size_t offset, 308 void *dst, int length) 309 { 310 int i; 311 uint8_t *p; 312 uint8_t old_gen, new_gen; 313 314 do { 315 old_gen = rte_read8(&hw->common_cfg->config_generation); 316 317 p = dst; 318 for (i = 0; i < length; i++) 319 *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i); 320 321 new_gen = rte_read8(&hw->common_cfg->config_generation); 322 } while (old_gen != new_gen); 323 } 324 325 static void 326 modern_write_dev_config(struct virtio_hw *hw, size_t offset, 327 const void *src, int length) 328 { 329 int i; 330 const uint8_t *p = src; 331 332 for (i = 0; i < length; i++) 333 rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i)); 334 } 335 336 static uint64_t 337 modern_get_features(struct virtio_hw *hw) 338 { 339 uint32_t features_lo, features_hi; 340 341 rte_write32(0, &hw->common_cfg->device_feature_select); 342 features_lo = rte_read32(&hw->common_cfg->device_feature); 343 344 rte_write32(1, &hw->common_cfg->device_feature_select); 345 features_hi = rte_read32(&hw->common_cfg->device_feature); 346 347 return ((uint64_t)features_hi << 32) | features_lo; 348 } 349 350 static void 351 modern_set_features(struct virtio_hw *hw, uint64_t features) 352 { 353 rte_write32(0, &hw->common_cfg->guest_feature_select); 354 rte_write32(features & ((1ULL << 32) - 1), 355 &hw->common_cfg->guest_feature); 356 357 rte_write32(1, &hw->common_cfg->guest_feature_select); 358 rte_write32(features >> 32, 359 &hw->common_cfg->guest_feature); 360 } 361 362 static uint8_t 363 modern_get_status(struct virtio_hw *hw) 364 { 365 return rte_read8(&hw->common_cfg->device_status); 366 } 367 368 static void 369 modern_set_status(struct virtio_hw *hw, uint8_t status) 370 { 371 rte_write8(status, &hw->common_cfg->device_status); 372 } 373 374 static void 375 modern_reset(struct virtio_hw *hw) 376 { 377 modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 378 modern_get_status(hw); 379 } 380 381 static uint8_t 382 modern_get_isr(struct virtio_hw *hw) 383 { 384 return rte_read8(hw->isr); 385 } 386 387 static uint16_t 388 modern_set_config_irq(struct virtio_hw *hw, uint16_t vec) 389 { 390 rte_write16(vec, &hw->common_cfg->msix_config); 391 return rte_read16(&hw->common_cfg->msix_config); 392 } 393 394 static uint16_t 395 modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec) 396 { 397 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 398 rte_write16(vec, &hw->common_cfg->queue_msix_vector); 399 return rte_read16(&hw->common_cfg->queue_msix_vector); 400 } 401 402 static uint16_t 403 modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) 404 { 405 rte_write16(queue_id, &hw->common_cfg->queue_select); 406 return rte_read16(&hw->common_cfg->queue_size); 407 } 408 409 static int 410 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 411 { 412 uint64_t desc_addr, avail_addr, used_addr; 413 uint16_t notify_off; 414 415 if (!check_vq_phys_addr_ok(vq)) 416 return -1; 417 418 desc_addr = vq->vq_ring_mem; 419 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 420 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 421 ring[vq->vq_nentries]), 422 VIRTIO_PCI_VRING_ALIGN); 423 424 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 425 426 io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, 427 &hw->common_cfg->queue_desc_hi); 428 io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, 429 &hw->common_cfg->queue_avail_hi); 430 io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, 431 &hw->common_cfg->queue_used_hi); 432 433 notify_off = rte_read16(&hw->common_cfg->queue_notify_off); 434 vq->notify_addr = (void *)((uint8_t *)hw->notify_base + 435 notify_off * hw->notify_off_multiplier); 436 437 rte_write16(1, &hw->common_cfg->queue_enable); 438 439 PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index); 440 PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr); 441 PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr); 442 PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr); 443 PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)", 444 vq->notify_addr, notify_off); 445 446 return 0; 447 } 448 449 static void 450 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 451 { 452 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 453 454 io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, 455 &hw->common_cfg->queue_desc_hi); 456 io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, 457 &hw->common_cfg->queue_avail_hi); 458 io_write64_twopart(0, &hw->common_cfg->queue_used_lo, 459 &hw->common_cfg->queue_used_hi); 460 461 rte_write16(0, &hw->common_cfg->queue_enable); 462 } 463 464 static void 465 modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq) 466 { 467 rte_write16(vq->vq_queue_index, vq->notify_addr); 468 } 469 470 const struct virtio_pci_ops modern_ops = { 471 .read_dev_cfg = modern_read_dev_config, 472 .write_dev_cfg = modern_write_dev_config, 473 .reset = modern_reset, 474 .get_status = modern_get_status, 475 .set_status = modern_set_status, 476 .get_features = modern_get_features, 477 .set_features = modern_set_features, 478 .get_isr = modern_get_isr, 479 .set_config_irq = modern_set_config_irq, 480 .set_queue_irq = modern_set_queue_irq, 481 .get_queue_num = modern_get_queue_num, 482 .setup_queue = modern_setup_queue, 483 .del_queue = modern_del_queue, 484 .notify_queue = modern_notify_queue, 485 }; 486 487 488 void 489 vtpci_read_dev_config(struct virtio_hw *hw, size_t offset, 490 void *dst, int length) 491 { 492 VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length); 493 } 494 495 void 496 vtpci_write_dev_config(struct virtio_hw *hw, size_t offset, 497 const void *src, int length) 498 { 499 VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length); 500 } 501 502 uint64_t 503 vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features) 504 { 505 uint64_t features; 506 507 /* 508 * Limit negotiated features to what the driver, virtqueue, and 509 * host all support. 510 */ 511 features = host_features & hw->guest_features; 512 VTPCI_OPS(hw)->set_features(hw, features); 513 514 return features; 515 } 516 517 void 518 vtpci_reset(struct virtio_hw *hw) 519 { 520 VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 521 /* flush status write */ 522 VTPCI_OPS(hw)->get_status(hw); 523 } 524 525 void 526 vtpci_reinit_complete(struct virtio_hw *hw) 527 { 528 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); 529 } 530 531 void 532 vtpci_set_status(struct virtio_hw *hw, uint8_t status) 533 { 534 if (status != VIRTIO_CONFIG_STATUS_RESET) 535 status |= VTPCI_OPS(hw)->get_status(hw); 536 537 VTPCI_OPS(hw)->set_status(hw, status); 538 } 539 540 uint8_t 541 vtpci_get_status(struct virtio_hw *hw) 542 { 543 return VTPCI_OPS(hw)->get_status(hw); 544 } 545 546 uint8_t 547 vtpci_isr(struct virtio_hw *hw) 548 { 549 return VTPCI_OPS(hw)->get_isr(hw); 550 } 551 552 static void * 553 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) 554 { 555 uint8_t bar = cap->bar; 556 uint32_t length = cap->length; 557 uint32_t offset = cap->offset; 558 uint8_t *base; 559 560 if (bar > 5) { 561 PMD_INIT_LOG(ERR, "invalid bar: %u", bar); 562 return NULL; 563 } 564 565 if (offset + length < offset) { 566 PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows", 567 offset, length); 568 return NULL; 569 } 570 571 if (offset + length > dev->mem_resource[bar].len) { 572 PMD_INIT_LOG(ERR, 573 "invalid cap: overflows bar space: %u > %" PRIu64, 574 offset + length, dev->mem_resource[bar].len); 575 return NULL; 576 } 577 578 base = dev->mem_resource[bar].addr; 579 if (base == NULL) { 580 PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar); 581 return NULL; 582 } 583 584 return base + offset; 585 } 586 587 static int 588 virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw) 589 { 590 uint8_t pos; 591 struct virtio_pci_cap cap; 592 int ret; 593 594 if (rte_eal_pci_map_device(dev)) { 595 PMD_INIT_LOG(DEBUG, "failed to map pci device!"); 596 return -1; 597 } 598 599 ret = rte_eal_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); 600 if (ret < 0) { 601 PMD_INIT_LOG(DEBUG, "failed to read pci capability list"); 602 return -1; 603 } 604 605 while (pos) { 606 ret = rte_eal_pci_read_config(dev, &cap, sizeof(cap), pos); 607 if (ret < 0) { 608 PMD_INIT_LOG(ERR, 609 "failed to read pci cap at pos: %x", pos); 610 break; 611 } 612 613 if (cap.cap_vndr == PCI_CAP_ID_MSIX) 614 hw->use_msix = 1; 615 616 if (cap.cap_vndr != PCI_CAP_ID_VNDR) { 617 PMD_INIT_LOG(DEBUG, 618 "[%2x] skipping non VNDR cap id: %02x", 619 pos, cap.cap_vndr); 620 goto next; 621 } 622 623 PMD_INIT_LOG(DEBUG, 624 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", 625 pos, cap.cfg_type, cap.bar, cap.offset, cap.length); 626 627 switch (cap.cfg_type) { 628 case VIRTIO_PCI_CAP_COMMON_CFG: 629 hw->common_cfg = get_cfg_addr(dev, &cap); 630 break; 631 case VIRTIO_PCI_CAP_NOTIFY_CFG: 632 rte_eal_pci_read_config(dev, &hw->notify_off_multiplier, 633 4, pos + sizeof(cap)); 634 hw->notify_base = get_cfg_addr(dev, &cap); 635 break; 636 case VIRTIO_PCI_CAP_DEVICE_CFG: 637 hw->dev_cfg = get_cfg_addr(dev, &cap); 638 break; 639 case VIRTIO_PCI_CAP_ISR_CFG: 640 hw->isr = get_cfg_addr(dev, &cap); 641 break; 642 } 643 644 next: 645 pos = cap.cap_next; 646 } 647 648 if (hw->common_cfg == NULL || hw->notify_base == NULL || 649 hw->dev_cfg == NULL || hw->isr == NULL) { 650 PMD_INIT_LOG(INFO, "no modern virtio pci device found."); 651 return -1; 652 } 653 654 PMD_INIT_LOG(INFO, "found modern virtio pci device."); 655 656 PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg); 657 PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg); 658 PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr); 659 PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u", 660 hw->notify_base, hw->notify_off_multiplier); 661 662 return 0; 663 } 664 665 /* 666 * Return -1: 667 * if there is error mapping with VFIO/UIO. 668 * if port map error when driver type is KDRV_NONE. 669 * if whitelisted but driver type is KDRV_UNKNOWN. 670 * Return 1 if kernel driver is managing the device. 671 * Return 0 on success. 672 */ 673 int 674 vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw) 675 { 676 /* 677 * Try if we can succeed reading virtio pci caps, which exists 678 * only on modern pci device. If failed, we fallback to legacy 679 * virtio handling. 680 */ 681 if (virtio_read_caps(dev, hw) == 0) { 682 PMD_INIT_LOG(INFO, "modern virtio pci detected."); 683 virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops; 684 hw->modern = 1; 685 return 0; 686 } 687 688 PMD_INIT_LOG(INFO, "trying with legacy virtio pci."); 689 if (rte_eal_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) { 690 if (dev->kdrv == RTE_KDRV_UNKNOWN && 691 (!dev->device.devargs || 692 dev->device.devargs->type != 693 RTE_DEVTYPE_WHITELISTED_PCI)) { 694 PMD_INIT_LOG(INFO, 695 "skip kernel managed virtio device."); 696 return 1; 697 } 698 return -1; 699 } 700 701 virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops; 702 hw->modern = 0; 703 704 return 0; 705 } 706