1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 #include <stdint.h> 34 35 #ifdef RTE_EXEC_ENV_LINUXAPP 36 #include <dirent.h> 37 #include <fcntl.h> 38 #endif 39 40 #include <rte_io.h> 41 42 #include "virtio_pci.h" 43 #include "virtio_logs.h" 44 #include "virtqueue.h" 45 46 /* 47 * Following macros are derived from linux/pci_regs.h, however, 48 * we can't simply include that header here, as there is no such 49 * file for non-Linux platform. 50 */ 51 #define PCI_CAPABILITY_LIST 0x34 52 #define PCI_CAP_ID_VNDR 0x09 53 #define PCI_CAP_ID_MSIX 0x11 54 55 /* 56 * The remaining space is defined by each driver as the per-driver 57 * configuration space. 58 */ 59 #define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20) 60 61 static inline int 62 check_vq_phys_addr_ok(struct virtqueue *vq) 63 { 64 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, 65 * and only accepts 32 bit page frame number. 66 * Check if the allocated physical memory exceeds 16TB. 67 */ 68 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> 69 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { 70 PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); 71 return 0; 72 } 73 74 return 1; 75 } 76 77 /* 78 * Since we are in legacy mode: 79 * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf 80 * 81 * "Note that this is possible because while the virtio header is PCI (i.e. 82 * little) endian, the device-specific region is encoded in the native endian of 83 * the guest (where such distinction is applicable)." 84 * 85 * For powerpc which supports both, qemu supposes that cpu is big endian and 86 * enforces this for the virtio-net stuff. 87 */ 88 static void 89 legacy_read_dev_config(struct virtio_hw *hw, size_t offset, 90 void *dst, int length) 91 { 92 #ifdef RTE_ARCH_PPC_64 93 int size; 94 95 while (length > 0) { 96 if (length >= 4) { 97 size = 4; 98 rte_pci_ioport_read(VTPCI_IO(hw), dst, size, 99 VIRTIO_PCI_CONFIG(hw) + offset); 100 *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst); 101 } else if (length >= 2) { 102 size = 2; 103 rte_pci_ioport_read(VTPCI_IO(hw), dst, size, 104 VIRTIO_PCI_CONFIG(hw) + offset); 105 *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst); 106 } else { 107 size = 1; 108 rte_pci_ioport_read(VTPCI_IO(hw), dst, size, 109 VIRTIO_PCI_CONFIG(hw) + offset); 110 } 111 112 dst = (char *)dst + size; 113 offset += size; 114 length -= size; 115 } 116 #else 117 rte_pci_ioport_read(VTPCI_IO(hw), dst, length, 118 VIRTIO_PCI_CONFIG(hw) + offset); 119 #endif 120 } 121 122 static void 123 legacy_write_dev_config(struct virtio_hw *hw, size_t offset, 124 const void *src, int length) 125 { 126 #ifdef RTE_ARCH_PPC_64 127 union { 128 uint32_t u32; 129 uint16_t u16; 130 } tmp; 131 int size; 132 133 while (length > 0) { 134 if (length >= 4) { 135 size = 4; 136 tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src); 137 rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size, 138 VIRTIO_PCI_CONFIG(hw) + offset); 139 } else if (length >= 2) { 140 size = 2; 141 tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src); 142 rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size, 143 VIRTIO_PCI_CONFIG(hw) + offset); 144 } else { 145 size = 1; 146 rte_pci_ioport_write(VTPCI_IO(hw), src, size, 147 VIRTIO_PCI_CONFIG(hw) + offset); 148 } 149 150 src = (const char *)src + size; 151 offset += size; 152 length -= size; 153 } 154 #else 155 rte_pci_ioport_write(VTPCI_IO(hw), src, length, 156 VIRTIO_PCI_CONFIG(hw) + offset); 157 #endif 158 } 159 160 static uint64_t 161 legacy_get_features(struct virtio_hw *hw) 162 { 163 uint32_t dst; 164 165 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES); 166 return dst; 167 } 168 169 static void 170 legacy_set_features(struct virtio_hw *hw, uint64_t features) 171 { 172 if ((features >> 32) != 0) { 173 PMD_DRV_LOG(ERR, 174 "only 32 bit features are allowed for legacy virtio!"); 175 return; 176 } 177 rte_pci_ioport_write(VTPCI_IO(hw), &features, 4, 178 VIRTIO_PCI_GUEST_FEATURES); 179 } 180 181 static uint8_t 182 legacy_get_status(struct virtio_hw *hw) 183 { 184 uint8_t dst; 185 186 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS); 187 return dst; 188 } 189 190 static void 191 legacy_set_status(struct virtio_hw *hw, uint8_t status) 192 { 193 rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS); 194 } 195 196 static void 197 legacy_reset(struct virtio_hw *hw) 198 { 199 legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 200 } 201 202 static uint8_t 203 legacy_get_isr(struct virtio_hw *hw) 204 { 205 uint8_t dst; 206 207 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR); 208 return dst; 209 } 210 211 /* Enable one vector (0) for Link State Intrerrupt */ 212 static uint16_t 213 legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec) 214 { 215 uint16_t dst; 216 217 rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR); 218 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR); 219 return dst; 220 } 221 222 static uint16_t 223 legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec) 224 { 225 uint16_t dst; 226 227 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 228 VIRTIO_PCI_QUEUE_SEL); 229 rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR); 230 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR); 231 return dst; 232 } 233 234 static uint16_t 235 legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) 236 { 237 uint16_t dst; 238 239 rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL); 240 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM); 241 return dst; 242 } 243 244 static int 245 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 246 { 247 uint32_t src; 248 249 if (!check_vq_phys_addr_ok(vq)) 250 return -1; 251 252 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 253 VIRTIO_PCI_QUEUE_SEL); 254 src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; 255 rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); 256 257 return 0; 258 } 259 260 static void 261 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 262 { 263 uint32_t src = 0; 264 265 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 266 VIRTIO_PCI_QUEUE_SEL); 267 rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); 268 } 269 270 static void 271 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) 272 { 273 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, 274 VIRTIO_PCI_QUEUE_NOTIFY); 275 } 276 277 const struct virtio_pci_ops legacy_ops = { 278 .read_dev_cfg = legacy_read_dev_config, 279 .write_dev_cfg = legacy_write_dev_config, 280 .reset = legacy_reset, 281 .get_status = legacy_get_status, 282 .set_status = legacy_set_status, 283 .get_features = legacy_get_features, 284 .set_features = legacy_set_features, 285 .get_isr = legacy_get_isr, 286 .set_config_irq = legacy_set_config_irq, 287 .set_queue_irq = legacy_set_queue_irq, 288 .get_queue_num = legacy_get_queue_num, 289 .setup_queue = legacy_setup_queue, 290 .del_queue = legacy_del_queue, 291 .notify_queue = legacy_notify_queue, 292 }; 293 294 static inline void 295 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) 296 { 297 rte_write32(val & ((1ULL << 32) - 1), lo); 298 rte_write32(val >> 32, hi); 299 } 300 301 static void 302 modern_read_dev_config(struct virtio_hw *hw, size_t offset, 303 void *dst, int length) 304 { 305 int i; 306 uint8_t *p; 307 uint8_t old_gen, new_gen; 308 309 do { 310 old_gen = rte_read8(&hw->common_cfg->config_generation); 311 312 p = dst; 313 for (i = 0; i < length; i++) 314 *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i); 315 316 new_gen = rte_read8(&hw->common_cfg->config_generation); 317 } while (old_gen != new_gen); 318 } 319 320 static void 321 modern_write_dev_config(struct virtio_hw *hw, size_t offset, 322 const void *src, int length) 323 { 324 int i; 325 const uint8_t *p = src; 326 327 for (i = 0; i < length; i++) 328 rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i)); 329 } 330 331 static uint64_t 332 modern_get_features(struct virtio_hw *hw) 333 { 334 uint32_t features_lo, features_hi; 335 336 rte_write32(0, &hw->common_cfg->device_feature_select); 337 features_lo = rte_read32(&hw->common_cfg->device_feature); 338 339 rte_write32(1, &hw->common_cfg->device_feature_select); 340 features_hi = rte_read32(&hw->common_cfg->device_feature); 341 342 return ((uint64_t)features_hi << 32) | features_lo; 343 } 344 345 static void 346 modern_set_features(struct virtio_hw *hw, uint64_t features) 347 { 348 rte_write32(0, &hw->common_cfg->guest_feature_select); 349 rte_write32(features & ((1ULL << 32) - 1), 350 &hw->common_cfg->guest_feature); 351 352 rte_write32(1, &hw->common_cfg->guest_feature_select); 353 rte_write32(features >> 32, 354 &hw->common_cfg->guest_feature); 355 } 356 357 static uint8_t 358 modern_get_status(struct virtio_hw *hw) 359 { 360 return rte_read8(&hw->common_cfg->device_status); 361 } 362 363 static void 364 modern_set_status(struct virtio_hw *hw, uint8_t status) 365 { 366 rte_write8(status, &hw->common_cfg->device_status); 367 } 368 369 static void 370 modern_reset(struct virtio_hw *hw) 371 { 372 modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 373 modern_get_status(hw); 374 } 375 376 static uint8_t 377 modern_get_isr(struct virtio_hw *hw) 378 { 379 return rte_read8(hw->isr); 380 } 381 382 static uint16_t 383 modern_set_config_irq(struct virtio_hw *hw, uint16_t vec) 384 { 385 rte_write16(vec, &hw->common_cfg->msix_config); 386 return rte_read16(&hw->common_cfg->msix_config); 387 } 388 389 static uint16_t 390 modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec) 391 { 392 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 393 rte_write16(vec, &hw->common_cfg->queue_msix_vector); 394 return rte_read16(&hw->common_cfg->queue_msix_vector); 395 } 396 397 static uint16_t 398 modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) 399 { 400 rte_write16(queue_id, &hw->common_cfg->queue_select); 401 return rte_read16(&hw->common_cfg->queue_size); 402 } 403 404 static int 405 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) 406 { 407 uint64_t desc_addr, avail_addr, used_addr; 408 uint16_t notify_off; 409 410 if (!check_vq_phys_addr_ok(vq)) 411 return -1; 412 413 desc_addr = vq->vq_ring_mem; 414 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 415 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, 416 ring[vq->vq_nentries]), 417 VIRTIO_PCI_VRING_ALIGN); 418 419 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 420 421 io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, 422 &hw->common_cfg->queue_desc_hi); 423 io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, 424 &hw->common_cfg->queue_avail_hi); 425 io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, 426 &hw->common_cfg->queue_used_hi); 427 428 notify_off = rte_read16(&hw->common_cfg->queue_notify_off); 429 vq->notify_addr = (void *)((uint8_t *)hw->notify_base + 430 notify_off * hw->notify_off_multiplier); 431 432 rte_write16(1, &hw->common_cfg->queue_enable); 433 434 PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index); 435 PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr); 436 PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr); 437 PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr); 438 PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)", 439 vq->notify_addr, notify_off); 440 441 return 0; 442 } 443 444 static void 445 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq) 446 { 447 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); 448 449 io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, 450 &hw->common_cfg->queue_desc_hi); 451 io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, 452 &hw->common_cfg->queue_avail_hi); 453 io_write64_twopart(0, &hw->common_cfg->queue_used_lo, 454 &hw->common_cfg->queue_used_hi); 455 456 rte_write16(0, &hw->common_cfg->queue_enable); 457 } 458 459 static void 460 modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq) 461 { 462 rte_write16(vq->vq_queue_index, vq->notify_addr); 463 } 464 465 const struct virtio_pci_ops modern_ops = { 466 .read_dev_cfg = modern_read_dev_config, 467 .write_dev_cfg = modern_write_dev_config, 468 .reset = modern_reset, 469 .get_status = modern_get_status, 470 .set_status = modern_set_status, 471 .get_features = modern_get_features, 472 .set_features = modern_set_features, 473 .get_isr = modern_get_isr, 474 .set_config_irq = modern_set_config_irq, 475 .set_queue_irq = modern_set_queue_irq, 476 .get_queue_num = modern_get_queue_num, 477 .setup_queue = modern_setup_queue, 478 .del_queue = modern_del_queue, 479 .notify_queue = modern_notify_queue, 480 }; 481 482 483 void 484 vtpci_read_dev_config(struct virtio_hw *hw, size_t offset, 485 void *dst, int length) 486 { 487 VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length); 488 } 489 490 void 491 vtpci_write_dev_config(struct virtio_hw *hw, size_t offset, 492 const void *src, int length) 493 { 494 VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length); 495 } 496 497 uint64_t 498 vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features) 499 { 500 uint64_t features; 501 502 /* 503 * Limit negotiated features to what the driver, virtqueue, and 504 * host all support. 505 */ 506 features = host_features & hw->guest_features; 507 VTPCI_OPS(hw)->set_features(hw, features); 508 509 return features; 510 } 511 512 void 513 vtpci_reset(struct virtio_hw *hw) 514 { 515 VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); 516 /* flush status write */ 517 VTPCI_OPS(hw)->get_status(hw); 518 } 519 520 void 521 vtpci_reinit_complete(struct virtio_hw *hw) 522 { 523 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); 524 } 525 526 void 527 vtpci_set_status(struct virtio_hw *hw, uint8_t status) 528 { 529 if (status != VIRTIO_CONFIG_STATUS_RESET) 530 status |= VTPCI_OPS(hw)->get_status(hw); 531 532 VTPCI_OPS(hw)->set_status(hw, status); 533 } 534 535 uint8_t 536 vtpci_get_status(struct virtio_hw *hw) 537 { 538 return VTPCI_OPS(hw)->get_status(hw); 539 } 540 541 uint8_t 542 vtpci_isr(struct virtio_hw *hw) 543 { 544 return VTPCI_OPS(hw)->get_isr(hw); 545 } 546 547 static void * 548 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) 549 { 550 uint8_t bar = cap->bar; 551 uint32_t length = cap->length; 552 uint32_t offset = cap->offset; 553 uint8_t *base; 554 555 if (bar > 5) { 556 PMD_INIT_LOG(ERR, "invalid bar: %u", bar); 557 return NULL; 558 } 559 560 if (offset + length < offset) { 561 PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows", 562 offset, length); 563 return NULL; 564 } 565 566 if (offset + length > dev->mem_resource[bar].len) { 567 PMD_INIT_LOG(ERR, 568 "invalid cap: overflows bar space: %u > %" PRIu64, 569 offset + length, dev->mem_resource[bar].len); 570 return NULL; 571 } 572 573 base = dev->mem_resource[bar].addr; 574 if (base == NULL) { 575 PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar); 576 return NULL; 577 } 578 579 return base + offset; 580 } 581 582 static int 583 virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw) 584 { 585 uint8_t pos; 586 struct virtio_pci_cap cap; 587 int ret; 588 589 if (rte_pci_map_device(dev)) { 590 PMD_INIT_LOG(DEBUG, "failed to map pci device!"); 591 return -1; 592 } 593 594 ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); 595 if (ret < 0) { 596 PMD_INIT_LOG(DEBUG, "failed to read pci capability list"); 597 return -1; 598 } 599 600 while (pos) { 601 ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos); 602 if (ret < 0) { 603 PMD_INIT_LOG(ERR, 604 "failed to read pci cap at pos: %x", pos); 605 break; 606 } 607 608 if (cap.cap_vndr == PCI_CAP_ID_MSIX) 609 hw->use_msix = 1; 610 611 if (cap.cap_vndr != PCI_CAP_ID_VNDR) { 612 PMD_INIT_LOG(DEBUG, 613 "[%2x] skipping non VNDR cap id: %02x", 614 pos, cap.cap_vndr); 615 goto next; 616 } 617 618 PMD_INIT_LOG(DEBUG, 619 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", 620 pos, cap.cfg_type, cap.bar, cap.offset, cap.length); 621 622 switch (cap.cfg_type) { 623 case VIRTIO_PCI_CAP_COMMON_CFG: 624 hw->common_cfg = get_cfg_addr(dev, &cap); 625 break; 626 case VIRTIO_PCI_CAP_NOTIFY_CFG: 627 rte_pci_read_config(dev, &hw->notify_off_multiplier, 628 4, pos + sizeof(cap)); 629 hw->notify_base = get_cfg_addr(dev, &cap); 630 break; 631 case VIRTIO_PCI_CAP_DEVICE_CFG: 632 hw->dev_cfg = get_cfg_addr(dev, &cap); 633 break; 634 case VIRTIO_PCI_CAP_ISR_CFG: 635 hw->isr = get_cfg_addr(dev, &cap); 636 break; 637 } 638 639 next: 640 pos = cap.cap_next; 641 } 642 643 if (hw->common_cfg == NULL || hw->notify_base == NULL || 644 hw->dev_cfg == NULL || hw->isr == NULL) { 645 PMD_INIT_LOG(INFO, "no modern virtio pci device found."); 646 return -1; 647 } 648 649 PMD_INIT_LOG(INFO, "found modern virtio pci device."); 650 651 PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg); 652 PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg); 653 PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr); 654 PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u", 655 hw->notify_base, hw->notify_off_multiplier); 656 657 return 0; 658 } 659 660 /* 661 * Return -1: 662 * if there is error mapping with VFIO/UIO. 663 * if port map error when driver type is KDRV_NONE. 664 * if whitelisted but driver type is KDRV_UNKNOWN. 665 * Return 1 if kernel driver is managing the device. 666 * Return 0 on success. 667 */ 668 int 669 vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw) 670 { 671 /* 672 * Try if we can succeed reading virtio pci caps, which exists 673 * only on modern pci device. If failed, we fallback to legacy 674 * virtio handling. 675 */ 676 if (virtio_read_caps(dev, hw) == 0) { 677 PMD_INIT_LOG(INFO, "modern virtio pci detected."); 678 virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops; 679 hw->modern = 1; 680 return 0; 681 } 682 683 PMD_INIT_LOG(INFO, "trying with legacy virtio pci."); 684 if (rte_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) { 685 if (dev->kdrv == RTE_KDRV_UNKNOWN && 686 (!dev->device.devargs || 687 dev->device.devargs->type != 688 RTE_DEVTYPE_WHITELISTED_PCI)) { 689 PMD_INIT_LOG(INFO, 690 "skip kernel managed virtio device."); 691 return 1; 692 } 693 return -1; 694 } 695 696 virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops; 697 hw->modern = 0; 698 699 return 0; 700 } 701