1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 #include <stdint.h> 5 6 #ifdef RTE_EXEC_ENV_LINUX 7 #include <dirent.h> 8 #include <fcntl.h> 9 #endif 10 11 #include <rte_io.h> 12 #include <rte_bus.h> 13 14 #include "virtio_pci.h" 15 #include "virtio_logs.h" 16 #include "virtqueue.h" 17 18 /* 19 * Following macros are derived from linux/pci_regs.h, however, 20 * we can't simply include that header here, as there is no such 21 * file for non-Linux platform. 22 */ 23 #define PCI_CAPABILITY_LIST 0x34 24 #define PCI_CAP_ID_VNDR 0x09 25 #define PCI_CAP_ID_MSIX 0x11 26 27 /* 28 * The remaining space is defined by each driver as the per-driver 29 * configuration space. 30 */ 31 #define VIRTIO_PCI_CONFIG(hw) \ 32 (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20) 33 34 static inline int 35 check_vq_phys_addr_ok(struct virtqueue *vq) 36 { 37 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, 38 * and only accepts 32 bit page frame number. 39 * Check if the allocated physical memory exceeds 16TB. 40 */ 41 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> 42 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { 43 PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); 44 return 0; 45 } 46 47 return 1; 48 } 49 50 /* 51 * Since we are in legacy mode: 52 * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf 53 * 54 * "Note that this is possible because while the virtio header is PCI (i.e. 55 * little) endian, the device-specific region is encoded in the native endian of 56 * the guest (where such distinction is applicable)." 57 * 58 * For powerpc which supports both, qemu supposes that cpu is big endian and 59 * enforces this for the virtio-net stuff. 60 */ 61 static void 62 legacy_read_dev_config(struct virtio_hw *hw, size_t offset, 63 void *dst, int length) 64 { 65 #ifdef RTE_ARCH_PPC_64 66 int size; 67 68 while (length > 0) { 69 if (length >= 4) { 70 size = 4; 71 rte_pci_ioport_read(VTPCI_IO(hw), dst, size, 72 VIRTIO_PCI_CONFIG(hw) + offset); 73 *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst); 74 } else if (length >= 2) { 75 size = 2; 76 rte_pci_ioport_read(VTPCI_IO(hw), dst, size, 77 VIRTIO_PCI_CONFIG(hw) + offset); 78 *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst); 79 } else { 80 size = 1; 81 rte_pci_ioport_read(VTPCI_IO(hw), dst, size, 82 VIRTIO_PCI_CONFIG(hw) + offset); 83 } 84 85 dst = (char *)dst + size; 86 offset += size; 87 length -= size; 88 } 89 #else 90 rte_pci_ioport_read(VTPCI_IO(hw), dst, length, 91 VIRTIO_PCI_CONFIG(hw) + offset); 92 #endif 93 } 94 95 static void 96 legacy_write_dev_config(struct virtio_hw *hw, size_t offset, 97 const void *src, int length) 98 { 99 #ifdef RTE_ARCH_PPC_64 100 union { 101 uint32_t u32; 102 uint16_t u16; 103 } tmp; 104 int size; 105 106 while (length > 0) { 107 if (length >= 4) { 108 size = 4; 109 tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src); 110 rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size, 111 VIRTIO_PCI_CONFIG(hw) + offset); 112 } else if (length >= 2) { 113 size = 2; 114 tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src); 115 rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size, 116 VIRTIO_PCI_CONFIG(hw) + offset); 117 } else { 118 size = 1; 119 rte_pci_ioport_write(VTPCI_IO(hw), src, size, 120 VIRTIO_PCI_CONFIG(hw) + offset); 121 } 122 123 src = (const char *)src + size; 124 offset += size; 125 length -= size; 126 } 127 #else 128 rte_pci_ioport_write(VTPCI_IO(hw), src, length, 129 VIRTIO_PCI_CONFIG(hw) + offset); 130 #endif 131 } 132 133 static uint64_t 134 legacy_get_features(struct virtio_hw *hw) 135 { 136 uint32_t dst; 137 138 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES); 139 return dst; 140 } 141 142 static void 143 legacy_set_features(struct virtio_hw *hw, uint64_t features) 144 { 145 if ((features >> 32) != 0) { 146 PMD_DRV_LOG(ERR, 147 "only 32 bit features are allowed for legacy virtio!"); 148 return; 149 } 150 rte_pci_ioport_write(VTPCI_IO(hw), &features, 4, 151 VIRTIO_PCI_GUEST_FEATURES); 152 } 153 154 static uint8_t 155 legacy_get_status(struct virtio_hw *hw) 156 { 157 uint8_t dst; 158 159 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS); 160 return dst; 161 } 162 163 static void 164 legacy_set_status(struct virtio_hw *hw, uint8_t status) 165 { 166 rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS); 167 } 168 169 static uint8_t 170 legacy_get_isr(struct virtio_hw *hw) 171 { 172 uint8_t dst; 173 174 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR); 175 return dst; 176 } 177 178 /* Enable one vector (0) for Link State Intrerrupt */ 179 static uint16_t 180 legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec) 181 { 182 uint16_t dst; 183 184 rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR); 185 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR); 186 return dst; 187 } 188 189 static uint16_t 190 legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec) 191 { 192 uint16_t dst; 193 194 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 195 VIRTIO_PCI_QUEUE_SEL); 196 rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR); 197 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR); 198 return dst; 199 } 200 201 static uint16_t 202 legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) 203 { 204 uint16_t dst; 205 206 rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL); 207 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM); 208 return dst; 209 } 210 211 static int 212 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 213 { 214 uint32_t src; 215 216 if (!check_vq_phys_addr_ok(vq)) 217 return -1; 218 219 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 220 VIRTIO_PCI_QUEUE_SEL); 221 src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 222 rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); 223 224 return 0; 225 } 226 227 static void 228 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 229 { 230 uint32_t src = 0; 231 232 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 233 VIRTIO_PCI_QUEUE_SEL); 234 rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); 235 } 236 237 static void 238 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 239 { 240 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 241 VIRTIO_PCI_QUEUE_NOTIFY); 242 } 243 244 const struct virtio_pci_ops legacy_ops = { 245 .read_dev_cfg = legacy_read_dev_config, 246 .write_dev_cfg = legacy_write_dev_config, 247 .get_status = legacy_get_status, 248 .set_status = legacy_set_status, 249 .get_features = legacy_get_features, 250 .set_features = legacy_set_features, 251 .get_isr = legacy_get_isr, 252 .set_config_irq = legacy_set_config_irq, 253 .set_queue_irq = legacy_set_queue_irq, 254 .get_queue_num = legacy_get_queue_num, 255 .setup_queue = legacy_setup_queue, 256 .del_queue = legacy_del_queue, 257 .notify_queue = legacy_notify_queue, 258 }; 259 260 static inline void 261 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) 262 { 263 rte_write32(val & ((1ULL << 32) - 1), lo); 264 rte_write32(val >> 32, hi); 265 } 266 267 static void 268 modern_read_dev_config(struct virtio_hw *hw, size_t offset, 269 void *dst, int length) 270 { 271 int i; 272 uint8_t *p; 273 uint8_t old_gen, new_gen; 274 275 do { 276 old_gen = rte_read8(&hw->common_cfg->config_generation); 277 278 p = dst; 279 for (i = 0; i < length; i++) 280 *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i); 281 282 new_gen = rte_read8(&hw->common_cfg->config_generation); 283 } while (old_gen != new_gen); 284 } 285 286 static void 287 modern_write_dev_config(struct virtio_hw *hw, size_t offset, 288 const void *src, int length) 289 { 290 int i; 291 const uint8_t *p = src; 292 293 for (i = 0; i < length; i++) 294 rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i)); 295 } 296 297 static uint64_t 298 modern_get_features(struct virtio_hw *hw) 299 { 300 uint32_t features_lo, features_hi; 301 302 rte_write32(0, &hw->common_cfg->device_feature_select); 303 features_lo = rte_read32(&hw->common_cfg->device_feature); 304 305 rte_write32(1, &hw->common_cfg->device_feature_select); 306 features_hi = rte_read32(&hw->common_cfg->device_feature); 307 308 return ((uint64_t)features_hi << 32) | features_lo; 309 } 310 311 static void 312 modern_set_features(struct virtio_hw *hw, uint64_t features) 313 { 314 rte_write32(0, &hw->common_cfg->guest_feature_select); 315 rte_write32(features & ((1ULL << 32) - 1), 316 &hw->common_cfg->guest_feature); 317 318 rte_write32(1, &hw->common_cfg->guest_feature_select); 319 rte_write32(features >> 32, 320 &hw->common_cfg->guest_feature); 321 } 322 323 static uint8_t 324 modern_get_status(struct virtio_hw *hw) 325 { 326 return rte_read8(&hw->common_cfg->device_status); 327 } 328 329 static void 330 modern_set_status(struct virtio_hw *hw, uint8_t status) 331 { 332 rte_write8(status, &hw->common_cfg->device_status); 333 } 334 335 static uint8_t 336 modern_get_isr(struct virtio_hw *hw) 337 { 338 return rte_read8(hw->isr); 339 } 340 341 static uint16_t 342 modern_set_config_irq(struct virtio_hw *hw, uint16_t vec) 343 { 344 rte_write16(vec, &hw->common_cfg->msix_config); 345 return rte_read16(&hw->common_cfg->msix_config); 346 } 347 348 static uint16_t 349 modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec) 350 { 351 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 352 rte_write16(vec, &hw->common_cfg->queue_msix_vector); 353 return rte_read16(&hw->common_cfg->queue_msix_vector); 354 } 355 356 static uint16_t 357 modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) 358 { 359 rte_write16(queue_id, &hw->common_cfg->queue_select); 360 return rte_read16(&hw->common_cfg->queue_size); 361 } 362 363 static int 364 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 365 { 366 uint64_t desc_addr, avail_addr, used_addr; 367 uint16_t notify_off; 368 369 if (!check_vq_phys_addr_ok(vq)) 370 return -1; 371 372 desc_addr = vq->vq_ring_mem; 373 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 374 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 375 ring[vq->vq_nentries]), 376 VIRTIO_PCI_VRING_ALIGN); 377 378 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 379 380 io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, 381 &hw->common_cfg->queue_desc_hi); 382 io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, 383 &hw->common_cfg->queue_avail_hi); 384 io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, 385 &hw->common_cfg->queue_used_hi); 386 387 notify_off = rte_read16(&hw->common_cfg->queue_notify_off); 388 vq->notify_addr = (void *)((uint8_t *)hw->notify_base + 389 notify_off * hw->notify_off_multiplier); 390 391 rte_write16(1, &hw->common_cfg->queue_enable); 392 393 PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index); 394 PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr); 395 PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr); 396 PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr); 397 PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)", 398 vq->notify_addr, notify_off); 399 400 return 0; 401 } 402 403 static void 404 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 405 { 406 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 407 408 io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, 409 &hw->common_cfg->queue_desc_hi); 410 io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, 411 &hw->common_cfg->queue_avail_hi); 412 io_write64_twopart(0, &hw->common_cfg->queue_used_lo, 413 &hw->common_cfg->queue_used_hi); 414 415 rte_write16(0, &hw->common_cfg->queue_enable); 416 } 417 418 static void 419 modern_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 420 { 421 uint32_t notify_data; 422 423 if (!vtpci_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) { 424 rte_write16(vq->vq_queue_index, vq->notify_addr); 425 return; 426 } 427 428 if (vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) { 429 /* 430 * Bit[0:15]: vq queue index 431 * Bit[16:30]: avail index 432 * Bit[31]: avail wrap counter 433 */ 434 notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags & 435 VRING_PACKED_DESC_F_AVAIL)) << 31) | 436 ((uint32_t)vq->vq_avail_idx << 16) | 437 vq->vq_queue_index; 438 } else { 439 /* 440 * Bit[0:15]: vq queue index 441 * Bit[16:31]: avail index 442 */ 443 notify_data = ((uint32_t)vq->vq_avail_idx << 16) | 444 vq->vq_queue_index; 445 } 446 rte_write32(notify_data, vq->notify_addr); 447 } 448 449 const struct virtio_pci_ops modern_ops = { 450 .read_dev_cfg = modern_read_dev_config, 451 .write_dev_cfg = modern_write_dev_config, 452 .get_status = modern_get_status, 453 .set_status = modern_set_status, 454 .get_features = modern_get_features, 455 .set_features = modern_set_features, 456 .get_isr = modern_get_isr, 457 .set_config_irq = modern_set_config_irq, 458 .set_queue_irq = modern_set_queue_irq, 459 .get_queue_num = modern_get_queue_num, 460 .setup_queue = modern_setup_queue, 461 .del_queue = modern_del_queue, 462 .notify_queue = modern_notify_queue, 463 }; 464 465 466 void 467 vtpci_read_dev_config(struct virtio_hw *hw, size_t offset, 468 void *dst, int length) 469 { 470 VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length); 471 } 472 473 void 474 vtpci_write_dev_config(struct virtio_hw *hw, size_t offset, 475 const void *src, int length) 476 { 477 VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length); 478 } 479 480 uint64_t 481 vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features) 482 { 483 uint64_t features; 484 485 /* 486 * Limit negotiated features to what the driver, virtqueue, and 487 * host all support. 488 */ 489 features = host_features & hw->guest_features; 490 VTPCI_OPS(hw)->set_features(hw, features); 491 492 return features; 493 } 494 495 void 496 vtpci_reset(struct virtio_hw *hw) 497 { 498 VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 499 /* flush status write */ 500 VTPCI_OPS(hw)->get_status(hw); 501 } 502 503 void 504 vtpci_reinit_complete(struct virtio_hw *hw) 505 { 506 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); 507 } 508 509 void 510 vtpci_set_status(struct virtio_hw *hw, uint8_t status) 511 { 512 if (status != VIRTIO_CONFIG_STATUS_RESET) 513 status |= VTPCI_OPS(hw)->get_status(hw); 514 515 VTPCI_OPS(hw)->set_status(hw, status); 516 } 517 518 uint8_t 519 vtpci_get_status(struct virtio_hw *hw) 520 { 521 return VTPCI_OPS(hw)->get_status(hw); 522 } 523 524 uint8_t 525 vtpci_isr(struct virtio_hw *hw) 526 { 527 return VTPCI_OPS(hw)->get_isr(hw); 528 } 529 530 static void * 531 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) 532 { 533 uint8_t bar = cap->bar; 534 uint32_t length = cap->length; 535 uint32_t offset = cap->offset; 536 uint8_t *base; 537 538 if (bar >= PCI_MAX_RESOURCE) { 539 PMD_INIT_LOG(ERR, "invalid bar: %u", bar); 540 return NULL; 541 } 542 543 if (offset + length < offset) { 544 PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows", 545 offset, length); 546 return NULL; 547 } 548 549 if (offset + length > dev->mem_resource[bar].len) { 550 PMD_INIT_LOG(ERR, 551 "invalid cap: overflows bar space: %u > %" PRIu64, 552 offset + length, dev->mem_resource[bar].len); 553 return NULL; 554 } 555 556 base = dev->mem_resource[bar].addr; 557 if (base == NULL) { 558 PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar); 559 return NULL; 560 } 561 562 return base + offset; 563 } 564 565 #define PCI_MSIX_ENABLE 0x8000 566 567 static int 568 virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw) 569 { 570 uint8_t pos; 571 struct virtio_pci_cap cap; 572 int ret; 573 574 if (rte_pci_map_device(dev)) { 575 PMD_INIT_LOG(DEBUG, "failed to map pci device!"); 576 return -1; 577 } 578 579 ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); 580 if (ret != 1) { 581 PMD_INIT_LOG(DEBUG, 582 "failed to read pci capability list, ret %d", ret); 583 return -1; 584 } 585 586 while (pos) { 587 ret = rte_pci_read_config(dev, &cap, 2, pos); 588 if (ret != 2) { 589 PMD_INIT_LOG(DEBUG, 590 "failed to read pci cap at pos: %x ret %d", 591 pos, ret); 592 break; 593 } 594 595 if (cap.cap_vndr == PCI_CAP_ID_MSIX) { 596 /* Transitional devices would also have this capability, 597 * that's why we also check if msix is enabled. 598 * 1st byte is cap ID; 2nd byte is the position of next 599 * cap; next two bytes are the flags. 600 */ 601 uint16_t flags; 602 603 ret = rte_pci_read_config(dev, &flags, sizeof(flags), 604 pos + 2); 605 if (ret != sizeof(flags)) { 606 PMD_INIT_LOG(DEBUG, 607 "failed to read pci cap at pos:" 608 " %x ret %d", pos + 2, ret); 609 break; 610 } 611 612 if (flags & PCI_MSIX_ENABLE) 613 hw->use_msix = VIRTIO_MSIX_ENABLED; 614 else 615 hw->use_msix = VIRTIO_MSIX_DISABLED; 616 } 617 618 if (cap.cap_vndr != PCI_CAP_ID_VNDR) { 619 PMD_INIT_LOG(DEBUG, 620 "[%2x] skipping non VNDR cap id: %02x", 621 pos, cap.cap_vndr); 622 goto next; 623 } 624 625 ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos); 626 if (ret != sizeof(cap)) { 627 PMD_INIT_LOG(DEBUG, 628 "failed to read pci cap at pos: %x ret %d", 629 pos, ret); 630 break; 631 } 632 633 PMD_INIT_LOG(DEBUG, 634 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", 635 pos, cap.cfg_type, cap.bar, cap.offset, cap.length); 636 637 switch (cap.cfg_type) { 638 case VIRTIO_PCI_CAP_COMMON_CFG: 639 hw->common_cfg = get_cfg_addr(dev, &cap); 640 break; 641 case VIRTIO_PCI_CAP_NOTIFY_CFG: 642 ret = rte_pci_read_config(dev, 643 &hw->notify_off_multiplier, 644 4, pos + sizeof(cap)); 645 if (ret != 4) 646 PMD_INIT_LOG(DEBUG, 647 "failed to read notify_off_multiplier, ret %d", 648 ret); 649 else 650 hw->notify_base = get_cfg_addr(dev, &cap); 651 break; 652 case VIRTIO_PCI_CAP_DEVICE_CFG: 653 hw->dev_cfg = get_cfg_addr(dev, &cap); 654 break; 655 case VIRTIO_PCI_CAP_ISR_CFG: 656 hw->isr = get_cfg_addr(dev, &cap); 657 break; 658 } 659 660 next: 661 pos = cap.cap_next; 662 } 663 664 if (hw->common_cfg == NULL || hw->notify_base == NULL || 665 hw->dev_cfg == NULL || hw->isr == NULL) { 666 PMD_INIT_LOG(INFO, "no modern virtio pci device found."); 667 return -1; 668 } 669 670 PMD_INIT_LOG(INFO, "found modern virtio pci device."); 671 672 PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg); 673 PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg); 674 PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr); 675 PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u", 676 hw->notify_base, hw->notify_off_multiplier); 677 678 return 0; 679 } 680 681 /* 682 * Return -1: 683 * if there is error mapping with VFIO/UIO. 684 * if port map error when driver type is KDRV_NONE. 685 * if whitelisted but driver type is KDRV_UNKNOWN. 686 * Return 1 if kernel driver is managing the device. 687 * Return 0 on success. 688 */ 689 int 690 vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw) 691 { 692 /* 693 * Try if we can succeed reading virtio pci caps, which exists 694 * only on modern pci device. If failed, we fallback to legacy 695 * virtio handling. 696 */ 697 if (virtio_read_caps(dev, hw) == 0) { 698 PMD_INIT_LOG(INFO, "modern virtio pci detected."); 699 virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops; 700 hw->modern = 1; 701 return 0; 702 } 703 704 PMD_INIT_LOG(INFO, "trying with legacy virtio pci."); 705 if (rte_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) { 706 rte_pci_unmap_device(dev); 707 if (dev->kdrv == RTE_PCI_KDRV_UNKNOWN && 708 (!dev->device.devargs || 709 dev->device.devargs->bus != 710 rte_bus_find_by_name("pci"))) { 711 PMD_INIT_LOG(INFO, 712 "skip kernel managed virtio device."); 713 return 1; 714 } 715 return -1; 716 } 717 718 virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops; 719 hw->modern = 0; 720 721 return 0; 722 } 723 724 enum virtio_msix_status 725 vtpci_msix_detect(struct rte_pci_device *dev) 726 { 727 uint8_t pos; 728 int ret; 729 730 ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); 731 if (ret != 1) { 732 PMD_INIT_LOG(DEBUG, 733 "failed to read pci capability list, ret %d", ret); 734 return VIRTIO_MSIX_NONE; 735 } 736 737 while (pos) { 738 uint8_t cap[2]; 739 740 ret = rte_pci_read_config(dev, cap, sizeof(cap), pos); 741 if (ret != sizeof(cap)) { 742 PMD_INIT_LOG(DEBUG, 743 "failed to read pci cap at pos: %x ret %d", 744 pos, ret); 745 break; 746 } 747 748 if (cap[0] == PCI_CAP_ID_MSIX) { 749 uint16_t flags; 750 751 ret = rte_pci_read_config(dev, &flags, sizeof(flags), 752 pos + sizeof(cap)); 753 if (ret != sizeof(flags)) { 754 PMD_INIT_LOG(DEBUG, 755 "failed to read pci cap at pos:" 756 " %x ret %d", pos + 2, ret); 757 break; 758 } 759 760 if (flags & PCI_MSIX_ENABLE) 761 return VIRTIO_MSIX_ENABLED; 762 else 763 return VIRTIO_MSIX_DISABLED; 764 } 765 766 pos = cap[1]; 767 } 768 769 return VIRTIO_MSIX_NONE; 770 } 771