1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 #include <stdint.h> 5 6 #ifdef RTE_EXEC_ENV_LINUXAPP 7 #include <dirent.h> 8 #include <fcntl.h> 9 #endif 10 11 #include <rte_io.h> 12 #include <rte_bus.h> 13 14 #include "virtio_pci.h" 15 #include "virtio_logs.h" 16 #include "virtqueue.h" 17 18 /* 19 * Following macros are derived from linux/pci_regs.h, however, 20 * we can't simply include that header here, as there is no such 21 * file for non-Linux platform. 22 */ 23 #define PCI_CAPABILITY_LIST 0x34 24 #define PCI_CAP_ID_VNDR 0x09 25 #define PCI_CAP_ID_MSIX 0x11 26 27 /* 28 * The remaining space is defined by each driver as the per-driver 29 * configuration space. 30 */ 31 #define VIRTIO_PCI_CONFIG(hw) \ 32 (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20) 33 34 static inline int 35 check_vq_phys_addr_ok(struct virtqueue *vq) 36 { 37 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, 38 * and only accepts 32 bit page frame number. 39 * Check if the allocated physical memory exceeds 16TB. 40 */ 41 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> 42 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { 43 PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); 44 return 0; 45 } 46 47 return 1; 48 } 49 50 /* 51 * Since we are in legacy mode: 52 * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf 53 * 54 * "Note that this is possible because while the virtio header is PCI (i.e. 55 * little) endian, the device-specific region is encoded in the native endian of 56 * the guest (where such distinction is applicable)." 57 * 58 * For powerpc which supports both, qemu supposes that cpu is big endian and 59 * enforces this for the virtio-net stuff. 60 */ 61 static void 62 legacy_read_dev_config(struct virtio_hw *hw, size_t offset, 63 void *dst, int length) 64 { 65 #ifdef RTE_ARCH_PPC_64 66 int size; 67 68 while (length > 0) { 69 if (length >= 4) { 70 size = 4; 71 rte_pci_ioport_read(VTPCI_IO(hw), dst, size, 72 VIRTIO_PCI_CONFIG(hw) + offset); 73 *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst); 74 } else if (length >= 2) { 75 size = 2; 76 rte_pci_ioport_read(VTPCI_IO(hw), dst, size, 77 VIRTIO_PCI_CONFIG(hw) + offset); 78 *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst); 79 } else { 80 size = 1; 81 rte_pci_ioport_read(VTPCI_IO(hw), dst, size, 82 VIRTIO_PCI_CONFIG(hw) + offset); 83 } 84 85 dst = (char *)dst + size; 86 offset += size; 87 length -= size; 88 } 89 #else 90 rte_pci_ioport_read(VTPCI_IO(hw), dst, length, 91 VIRTIO_PCI_CONFIG(hw) + offset); 92 #endif 93 } 94 95 static void 96 legacy_write_dev_config(struct virtio_hw *hw, size_t offset, 97 const void *src, int length) 98 { 99 #ifdef RTE_ARCH_PPC_64 100 union { 101 uint32_t u32; 102 uint16_t u16; 103 } tmp; 104 int size; 105 106 while (length > 0) { 107 if (length >= 4) { 108 size = 4; 109 tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src); 110 rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size, 111 VIRTIO_PCI_CONFIG(hw) + offset); 112 } else if (length >= 2) { 113 size = 2; 114 tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src); 115 rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size, 116 VIRTIO_PCI_CONFIG(hw) + offset); 117 } else { 118 size = 1; 119 rte_pci_ioport_write(VTPCI_IO(hw), src, size, 120 VIRTIO_PCI_CONFIG(hw) + offset); 121 } 122 123 src = (const char *)src + size; 124 offset += size; 125 length -= size; 126 } 127 #else 128 rte_pci_ioport_write(VTPCI_IO(hw), src, length, 129 VIRTIO_PCI_CONFIG(hw) + offset); 130 #endif 131 } 132 133 static uint64_t 134 legacy_get_features(struct virtio_hw *hw) 135 { 136 uint32_t dst; 137 138 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES); 139 return dst; 140 } 141 142 static void 143 legacy_set_features(struct virtio_hw *hw, uint64_t features) 144 { 145 if ((features >> 32) != 0) { 146 PMD_DRV_LOG(ERR, 147 "only 32 bit features are allowed for legacy virtio!"); 148 return; 149 } 150 rte_pci_ioport_write(VTPCI_IO(hw), &features, 4, 151 VIRTIO_PCI_GUEST_FEATURES); 152 } 153 154 static uint8_t 155 legacy_get_status(struct virtio_hw *hw) 156 { 157 uint8_t dst; 158 159 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS); 160 return dst; 161 } 162 163 static void 164 legacy_set_status(struct virtio_hw *hw, uint8_t status) 165 { 166 rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS); 167 } 168 169 static void 170 legacy_reset(struct virtio_hw *hw) 171 { 172 legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 173 } 174 175 static uint8_t 176 legacy_get_isr(struct virtio_hw *hw) 177 { 178 uint8_t dst; 179 180 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR); 181 return dst; 182 } 183 184 /* Enable one vector (0) for Link State Intrerrupt */ 185 static uint16_t 186 legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec) 187 { 188 uint16_t dst; 189 190 rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR); 191 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR); 192 return dst; 193 } 194 195 static uint16_t 196 legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec) 197 { 198 uint16_t dst; 199 200 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 201 VIRTIO_PCI_QUEUE_SEL); 202 rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR); 203 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR); 204 return dst; 205 } 206 207 static uint16_t 208 legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) 209 { 210 uint16_t dst; 211 212 rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL); 213 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM); 214 return dst; 215 } 216 217 static int 218 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 219 { 220 uint32_t src; 221 222 if (!check_vq_phys_addr_ok(vq)) 223 return -1; 224 225 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 226 VIRTIO_PCI_QUEUE_SEL); 227 src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 228 rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); 229 230 return 0; 231 } 232 233 static void 234 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 235 { 236 uint32_t src = 0; 237 238 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 239 VIRTIO_PCI_QUEUE_SEL); 240 rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); 241 } 242 243 static void 244 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 245 { 246 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 247 VIRTIO_PCI_QUEUE_NOTIFY); 248 } 249 250 const struct virtio_pci_ops legacy_ops = { 251 .read_dev_cfg = legacy_read_dev_config, 252 .write_dev_cfg = legacy_write_dev_config, 253 .reset = legacy_reset, 254 .get_status = legacy_get_status, 255 .set_status = legacy_set_status, 256 .get_features = legacy_get_features, 257 .set_features = legacy_set_features, 258 .get_isr = legacy_get_isr, 259 .set_config_irq = legacy_set_config_irq, 260 .set_queue_irq = legacy_set_queue_irq, 261 .get_queue_num = legacy_get_queue_num, 262 .setup_queue = legacy_setup_queue, 263 .del_queue = legacy_del_queue, 264 .notify_queue = legacy_notify_queue, 265 }; 266 267 static inline void 268 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) 269 { 270 rte_write32(val & ((1ULL << 32) - 1), lo); 271 rte_write32(val >> 32, hi); 272 } 273 274 static void 275 modern_read_dev_config(struct virtio_hw *hw, size_t offset, 276 void *dst, int length) 277 { 278 int i; 279 uint8_t *p; 280 uint8_t old_gen, new_gen; 281 282 do { 283 old_gen = rte_read8(&hw->common_cfg->config_generation); 284 285 p = dst; 286 for (i = 0; i < length; i++) 287 *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i); 288 289 new_gen = rte_read8(&hw->common_cfg->config_generation); 290 } while (old_gen != new_gen); 291 } 292 293 static void 294 modern_write_dev_config(struct virtio_hw *hw, size_t offset, 295 const void *src, int length) 296 { 297 int i; 298 const uint8_t *p = src; 299 300 for (i = 0; i < length; i++) 301 rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i)); 302 } 303 304 static uint64_t 305 modern_get_features(struct virtio_hw *hw) 306 { 307 uint32_t features_lo, features_hi; 308 309 rte_write32(0, &hw->common_cfg->device_feature_select); 310 features_lo = rte_read32(&hw->common_cfg->device_feature); 311 312 rte_write32(1, &hw->common_cfg->device_feature_select); 313 features_hi = rte_read32(&hw->common_cfg->device_feature); 314 315 return ((uint64_t)features_hi << 32) | features_lo; 316 } 317 318 static void 319 modern_set_features(struct virtio_hw *hw, uint64_t features) 320 { 321 rte_write32(0, &hw->common_cfg->guest_feature_select); 322 rte_write32(features & ((1ULL << 32) - 1), 323 &hw->common_cfg->guest_feature); 324 325 rte_write32(1, &hw->common_cfg->guest_feature_select); 326 rte_write32(features >> 32, 327 &hw->common_cfg->guest_feature); 328 } 329 330 static uint8_t 331 modern_get_status(struct virtio_hw *hw) 332 { 333 return rte_read8(&hw->common_cfg->device_status); 334 } 335 336 static void 337 modern_set_status(struct virtio_hw *hw, uint8_t status) 338 { 339 rte_write8(status, &hw->common_cfg->device_status); 340 } 341 342 static void 343 modern_reset(struct virtio_hw *hw) 344 { 345 modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 346 modern_get_status(hw); 347 } 348 349 static uint8_t 350 modern_get_isr(struct virtio_hw *hw) 351 { 352 return rte_read8(hw->isr); 353 } 354 355 static uint16_t 356 modern_set_config_irq(struct virtio_hw *hw, uint16_t vec) 357 { 358 rte_write16(vec, &hw->common_cfg->msix_config); 359 return rte_read16(&hw->common_cfg->msix_config); 360 } 361 362 static uint16_t 363 modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec) 364 { 365 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 366 rte_write16(vec, &hw->common_cfg->queue_msix_vector); 367 return rte_read16(&hw->common_cfg->queue_msix_vector); 368 } 369 370 static uint16_t 371 modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) 372 { 373 rte_write16(queue_id, &hw->common_cfg->queue_select); 374 return rte_read16(&hw->common_cfg->queue_size); 375 } 376 377 static int 378 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 379 { 380 uint64_t desc_addr, avail_addr, used_addr; 381 uint16_t notify_off; 382 383 if (!check_vq_phys_addr_ok(vq)) 384 return -1; 385 386 desc_addr = vq->vq_ring_mem; 387 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 388 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 389 ring[vq->vq_nentries]), 390 VIRTIO_PCI_VRING_ALIGN); 391 392 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 393 394 io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, 395 &hw->common_cfg->queue_desc_hi); 396 io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, 397 &hw->common_cfg->queue_avail_hi); 398 io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, 399 &hw->common_cfg->queue_used_hi); 400 401 notify_off = rte_read16(&hw->common_cfg->queue_notify_off); 402 vq->notify_addr = (void *)((uint8_t *)hw->notify_base + 403 notify_off * hw->notify_off_multiplier); 404 405 rte_write16(1, &hw->common_cfg->queue_enable); 406 407 PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index); 408 PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr); 409 PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr); 410 PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr); 411 PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)", 412 vq->notify_addr, notify_off); 413 414 return 0; 415 } 416 417 static void 418 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 419 { 420 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 421 422 io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, 423 &hw->common_cfg->queue_desc_hi); 424 io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, 425 &hw->common_cfg->queue_avail_hi); 426 io_write64_twopart(0, &hw->common_cfg->queue_used_lo, 427 &hw->common_cfg->queue_used_hi); 428 429 rte_write16(0, &hw->common_cfg->queue_enable); 430 } 431 432 static void 433 modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq) 434 { 435 rte_write16(vq->vq_queue_index, vq->notify_addr); 436 } 437 438 const struct virtio_pci_ops modern_ops = { 439 .read_dev_cfg = modern_read_dev_config, 440 .write_dev_cfg = modern_write_dev_config, 441 .reset = modern_reset, 442 .get_status = modern_get_status, 443 .set_status = modern_set_status, 444 .get_features = modern_get_features, 445 .set_features = modern_set_features, 446 .get_isr = modern_get_isr, 447 .set_config_irq = modern_set_config_irq, 448 .set_queue_irq = modern_set_queue_irq, 449 .get_queue_num = modern_get_queue_num, 450 .setup_queue = modern_setup_queue, 451 .del_queue = modern_del_queue, 452 .notify_queue = modern_notify_queue, 453 }; 454 455 456 void 457 vtpci_read_dev_config(struct virtio_hw *hw, size_t offset, 458 void *dst, int length) 459 { 460 VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length); 461 } 462 463 void 464 vtpci_write_dev_config(struct virtio_hw *hw, size_t offset, 465 const void *src, int length) 466 { 467 VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length); 468 } 469 470 uint64_t 471 vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features) 472 { 473 uint64_t features; 474 475 /* 476 * Limit negotiated features to what the driver, virtqueue, and 477 * host all support. 478 */ 479 features = host_features & hw->guest_features; 480 VTPCI_OPS(hw)->set_features(hw, features); 481 482 return features; 483 } 484 485 void 486 vtpci_reset(struct virtio_hw *hw) 487 { 488 VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 489 /* flush status write */ 490 VTPCI_OPS(hw)->get_status(hw); 491 } 492 493 void 494 vtpci_reinit_complete(struct virtio_hw *hw) 495 { 496 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); 497 } 498 499 void 500 vtpci_set_status(struct virtio_hw *hw, uint8_t status) 501 { 502 if (status != VIRTIO_CONFIG_STATUS_RESET) 503 status |= VTPCI_OPS(hw)->get_status(hw); 504 505 VTPCI_OPS(hw)->set_status(hw, status); 506 } 507 508 uint8_t 509 vtpci_get_status(struct virtio_hw *hw) 510 { 511 return VTPCI_OPS(hw)->get_status(hw); 512 } 513 514 uint8_t 515 vtpci_isr(struct virtio_hw *hw) 516 { 517 return VTPCI_OPS(hw)->get_isr(hw); 518 } 519 520 static void * 521 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) 522 { 523 uint8_t bar = cap->bar; 524 uint32_t length = cap->length; 525 uint32_t offset = cap->offset; 526 uint8_t *base; 527 528 if (bar >= PCI_MAX_RESOURCE) { 529 PMD_INIT_LOG(ERR, "invalid bar: %u", bar); 530 return NULL; 531 } 532 533 if (offset + length < offset) { 534 PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows", 535 offset, length); 536 return NULL; 537 } 538 539 if (offset + length > dev->mem_resource[bar].len) { 540 PMD_INIT_LOG(ERR, 541 "invalid cap: overflows bar space: %u > %" PRIu64, 542 offset + length, dev->mem_resource[bar].len); 543 return NULL; 544 } 545 546 base = dev->mem_resource[bar].addr; 547 if (base == NULL) { 548 PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar); 549 return NULL; 550 } 551 552 return base + offset; 553 } 554 555 #define PCI_MSIX_ENABLE 0x8000 556 557 static int 558 virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw) 559 { 560 uint8_t pos; 561 struct virtio_pci_cap cap; 562 int ret; 563 564 if (rte_pci_map_device(dev)) { 565 PMD_INIT_LOG(DEBUG, "failed to map pci device!"); 566 return -1; 567 } 568 569 ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); 570 if (ret != 1) { 571 PMD_INIT_LOG(DEBUG, 572 "failed to read pci capability list, ret %d", ret); 573 return -1; 574 } 575 576 while (pos) { 577 ret = rte_pci_read_config(dev, &cap, 2, pos); 578 if (ret != 2) { 579 PMD_INIT_LOG(DEBUG, 580 "failed to read pci cap at pos: %x ret %d", 581 pos, ret); 582 break; 583 } 584 585 if (cap.cap_vndr == PCI_CAP_ID_MSIX) { 586 /* Transitional devices would also have this capability, 587 * that's why we also check if msix is enabled. 588 * 1st byte is cap ID; 2nd byte is the position of next 589 * cap; next two bytes are the flags. 590 */ 591 uint16_t flags; 592 593 ret = rte_pci_read_config(dev, &flags, sizeof(flags), 594 pos + 2); 595 if (ret != sizeof(flags)) { 596 PMD_INIT_LOG(DEBUG, 597 "failed to read pci cap at pos:" 598 " %x ret %d", pos + 2, ret); 599 break; 600 } 601 602 if (flags & PCI_MSIX_ENABLE) 603 hw->use_msix = VIRTIO_MSIX_ENABLED; 604 else 605 hw->use_msix = VIRTIO_MSIX_DISABLED; 606 } 607 608 if (cap.cap_vndr != PCI_CAP_ID_VNDR) { 609 PMD_INIT_LOG(DEBUG, 610 "[%2x] skipping non VNDR cap id: %02x", 611 pos, cap.cap_vndr); 612 goto next; 613 } 614 615 ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos); 616 if (ret != sizeof(cap)) { 617 PMD_INIT_LOG(DEBUG, 618 "failed to read pci cap at pos: %x ret %d", 619 pos, ret); 620 break; 621 } 622 623 PMD_INIT_LOG(DEBUG, 624 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", 625 pos, cap.cfg_type, cap.bar, cap.offset, cap.length); 626 627 switch (cap.cfg_type) { 628 case VIRTIO_PCI_CAP_COMMON_CFG: 629 hw->common_cfg = get_cfg_addr(dev, &cap); 630 break; 631 case VIRTIO_PCI_CAP_NOTIFY_CFG: 632 rte_pci_read_config(dev, &hw->notify_off_multiplier, 633 4, pos + sizeof(cap)); 634 hw->notify_base = get_cfg_addr(dev, &cap); 635 break; 636 case VIRTIO_PCI_CAP_DEVICE_CFG: 637 hw->dev_cfg = get_cfg_addr(dev, &cap); 638 break; 639 case VIRTIO_PCI_CAP_ISR_CFG: 640 hw->isr = get_cfg_addr(dev, &cap); 641 break; 642 } 643 644 next: 645 pos = cap.cap_next; 646 } 647 648 if (hw->common_cfg == NULL || hw->notify_base == NULL || 649 hw->dev_cfg == NULL || hw->isr == NULL) { 650 PMD_INIT_LOG(INFO, "no modern virtio pci device found."); 651 return -1; 652 } 653 654 PMD_INIT_LOG(INFO, "found modern virtio pci device."); 655 656 PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg); 657 PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg); 658 PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr); 659 PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u", 660 hw->notify_base, hw->notify_off_multiplier); 661 662 return 0; 663 } 664 665 /* 666 * Return -1: 667 * if there is error mapping with VFIO/UIO. 668 * if port map error when driver type is KDRV_NONE. 669 * if whitelisted but driver type is KDRV_UNKNOWN. 670 * Return 1 if kernel driver is managing the device. 671 * Return 0 on success. 672 */ 673 int 674 vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw) 675 { 676 /* 677 * Try if we can succeed reading virtio pci caps, which exists 678 * only on modern pci device. If failed, we fallback to legacy 679 * virtio handling. 680 */ 681 if (virtio_read_caps(dev, hw) == 0) { 682 PMD_INIT_LOG(INFO, "modern virtio pci detected."); 683 virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops; 684 hw->modern = 1; 685 return 0; 686 } 687 688 PMD_INIT_LOG(INFO, "trying with legacy virtio pci."); 689 if (rte_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) { 690 if (dev->kdrv == RTE_KDRV_UNKNOWN && 691 (!dev->device.devargs || 692 dev->device.devargs->bus != 693 rte_bus_find_by_name("pci"))) { 694 PMD_INIT_LOG(INFO, 695 "skip kernel managed virtio device."); 696 return 1; 697 } 698 return -1; 699 } 700 701 virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops; 702 hw->modern = 0; 703 704 return 0; 705 } 706 707 enum virtio_msix_status 708 vtpci_msix_detect(struct rte_pci_device *dev) 709 { 710 uint8_t pos; 711 int ret; 712 713 ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); 714 if (ret != 1) { 715 PMD_INIT_LOG(DEBUG, 716 "failed to read pci capability list, ret %d", ret); 717 return VIRTIO_MSIX_NONE; 718 } 719 720 while (pos) { 721 uint8_t cap[2]; 722 723 ret = rte_pci_read_config(dev, cap, sizeof(cap), pos); 724 if (ret != sizeof(cap)) { 725 PMD_INIT_LOG(DEBUG, 726 "failed to read pci cap at pos: %x ret %d", 727 pos, ret); 728 break; 729 } 730 731 if (cap[0] == PCI_CAP_ID_MSIX) { 732 uint16_t flags; 733 734 ret = rte_pci_read_config(dev, &flags, sizeof(flags), 735 pos + sizeof(cap)); 736 if (ret != sizeof(flags)) { 737 PMD_INIT_LOG(DEBUG, 738 "failed to read pci cap at pos:" 739 " %x ret %d", pos + 2, ret); 740 break; 741 } 742 743 if (flags & PCI_MSIX_ENABLE) 744 return VIRTIO_MSIX_ENABLED; 745 else 746 return VIRTIO_MSIX_DISABLED; 747 } 748 749 pos = cap[1]; 750 } 751 752 return VIRTIO_MSIX_NONE; 753 } 754