1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 3 * All rights reserved. 4 */ 5 6 #include "spdk/stdinc.h" 7 8 #include "spdk/memory.h" 9 #include "spdk/mmio.h" 10 #include "spdk/string.h" 11 #include "spdk/env.h" 12 13 #include "spdk_internal/virtio.h" 14 #include <linux/virtio_ids.h> 15 16 struct virtio_hw { 17 uint8_t use_msix; 18 uint32_t notify_off_multiplier; 19 uint8_t *isr; 20 uint16_t *notify_base; 21 22 struct { 23 /** Mem-mapped resources from given PCI BAR */ 24 void *vaddr; 25 26 /** Length of the address space */ 27 uint32_t len; 28 } pci_bar[6]; 29 30 struct virtio_pci_common_cfg *common_cfg; 31 struct spdk_pci_device *pci_dev; 32 33 /** Device-specific PCI config space */ 34 void *dev_cfg; 35 36 struct virtio_dev *vdev; 37 bool is_remapped; 38 bool is_removing; 39 TAILQ_ENTRY(virtio_hw) tailq; 40 }; 41 42 struct virtio_pci_probe_ctx { 43 virtio_pci_create_cb enum_cb; 44 void *enum_ctx; 45 uint16_t device_id; 46 }; 47 48 static TAILQ_HEAD(, virtio_hw) g_virtio_hws = TAILQ_HEAD_INITIALIZER(g_virtio_hws); 49 static pthread_mutex_t g_hw_mutex = PTHREAD_MUTEX_INITIALIZER; 50 __thread struct virtio_hw *g_thread_virtio_hw = NULL; 51 static uint16_t g_signal_lock; 52 static bool g_sigset = false; 53 54 /* 55 * Following macros are derived from linux/pci_regs.h, however, 56 * we can't simply include that header here, as there is no such 57 * file for non-Linux platform. 58 */ 59 #define PCI_CAPABILITY_LIST 0x34 60 #define PCI_CAP_ID_VNDR 0x09 61 #define PCI_CAP_ID_MSIX 0x11 62 63 static void 64 virtio_pci_dev_sigbus_handler(const void *failure_addr, void *ctx) 65 { 66 void *map_address = NULL; 67 uint16_t flag = 0; 68 int i; 69 70 if (!__atomic_compare_exchange_n(&g_signal_lock, &flag, 1, false, __ATOMIC_ACQUIRE, 71 __ATOMIC_RELAXED)) { 72 SPDK_DEBUGLOG(virtio_pci, "request g_signal_lock failed\n"); 73 return; 74 } 75 76 if (g_thread_virtio_hw == NULL || g_thread_virtio_hw->is_remapped) { 77 __atomic_store_n(&g_signal_lock, 0, __ATOMIC_RELEASE); 78 return; 79 } 80 81 /* We remap each bar to the same VA to avoid subsequent sigbus error. 82 * Because it is mapped to the same VA, such as hw->common_cfg and so on 83 * do not need to be modified. 84 */ 85 for (i = 0; i < 6; ++i) { 86 if (g_thread_virtio_hw->pci_bar[i].vaddr == NULL) { 87 continue; 88 } 89 90 map_address = mmap(g_thread_virtio_hw->pci_bar[i].vaddr, 91 g_thread_virtio_hw->pci_bar[i].len, 92 PROT_READ | PROT_WRITE, 93 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); 94 if (map_address == MAP_FAILED) { 95 SPDK_ERRLOG("mmap failed\n"); 96 goto fail; 97 } 98 memset(map_address, 0xFF, g_thread_virtio_hw->pci_bar[i].len); 99 } 100 101 g_thread_virtio_hw->is_remapped = true; 102 __atomic_store_n(&g_signal_lock, 0, __ATOMIC_RELEASE); 103 return; 104 fail: 105 for (--i; i >= 0; i--) { 106 if (g_thread_virtio_hw->pci_bar[i].vaddr == NULL) { 107 continue; 108 } 109 110 munmap(g_thread_virtio_hw->pci_bar[i].vaddr, g_thread_virtio_hw->pci_bar[i].len); 111 } 112 __atomic_store_n(&g_signal_lock, 0, __ATOMIC_RELEASE); 113 } 114 115 static struct virtio_hw * 116 virtio_pci_dev_get_by_addr(struct spdk_pci_addr *traddr) 117 { 118 struct virtio_hw *hw; 119 struct spdk_pci_addr addr; 120 121 pthread_mutex_lock(&g_hw_mutex); 122 TAILQ_FOREACH(hw, &g_virtio_hws, tailq) { 123 addr = spdk_pci_device_get_addr(hw->pci_dev); 124 if (!spdk_pci_addr_compare(&addr, traddr)) { 125 pthread_mutex_unlock(&g_hw_mutex); 126 return hw; 127 } 128 } 129 pthread_mutex_unlock(&g_hw_mutex); 130 131 return NULL; 132 } 133 134 static const char * 135 virtio_pci_dev_check(struct virtio_hw *hw, uint16_t device_id_match) 136 { 137 uint16_t pci_device_id, device_id; 138 139 pci_device_id = spdk_pci_device_get_device_id(hw->pci_dev); 140 if (pci_device_id < 0x1040) { 141 /* Transitional devices: use the PCI subsystem device id as 142 * virtio device id, same as legacy driver always did. 143 */ 144 device_id = spdk_pci_device_get_subdevice_id(hw->pci_dev); 145 } else { 146 /* Modern devices: simply use PCI device id, but start from 0x1040. */ 147 device_id = pci_device_id - 0x1040; 148 } 149 150 if (device_id == device_id_match) { 151 hw->is_removing = true; 152 return hw->vdev->name; 153 } 154 155 return NULL; 156 } 157 158 const char * 159 virtio_pci_dev_event_process(int fd, uint16_t device_id) 160 { 161 struct spdk_pci_event event; 162 struct virtio_hw *hw, *tmp; 163 const char *vdev_name; 164 165 /* UIO remove handler */ 166 if (spdk_pci_get_event(fd, &event) > 0) { 167 if (event.action == SPDK_UEVENT_REMOVE) { 168 hw = virtio_pci_dev_get_by_addr(&event.traddr); 169 if (hw == NULL || hw->is_removing) { 170 return NULL; 171 } 172 173 vdev_name = virtio_pci_dev_check(hw, device_id); 174 if (vdev_name != NULL) { 175 return vdev_name; 176 } 177 } 178 } 179 180 /* VFIO remove handler */ 181 pthread_mutex_lock(&g_hw_mutex); 182 TAILQ_FOREACH_SAFE(hw, &g_virtio_hws, tailq, tmp) { 183 if (spdk_pci_device_is_removed(hw->pci_dev) && !hw->is_removing) { 184 vdev_name = virtio_pci_dev_check(hw, device_id); 185 if (vdev_name != NULL) { 186 pthread_mutex_unlock(&g_hw_mutex); 187 return vdev_name; 188 } 189 } 190 } 191 pthread_mutex_unlock(&g_hw_mutex); 192 193 return NULL; 194 } 195 196 static inline int 197 check_vq_phys_addr_ok(struct virtqueue *vq) 198 { 199 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, 200 * and only accepts 32 bit page frame number. 201 * Check if the allocated physical memory exceeds 16TB. 202 */ 203 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> 204 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { 205 SPDK_ERRLOG("vring address shouldn't be above 16TB!\n"); 206 return 0; 207 } 208 209 return 1; 210 } 211 212 static void 213 free_virtio_hw(struct virtio_hw *hw) 214 { 215 unsigned i; 216 217 for (i = 0; i < 6; ++i) { 218 if (hw->pci_bar[i].vaddr == NULL) { 219 continue; 220 } 221 222 spdk_pci_device_unmap_bar(hw->pci_dev, i, hw->pci_bar[i].vaddr); 223 } 224 225 free(hw); 226 } 227 228 static void 229 pci_dump_json_info(struct virtio_dev *dev, struct spdk_json_write_ctx *w) 230 { 231 struct virtio_hw *hw = dev->ctx; 232 struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr((struct spdk_pci_device *)hw->pci_dev); 233 char addr[32]; 234 235 spdk_json_write_name(w, "type"); 236 if (dev->modern) { 237 spdk_json_write_string(w, "pci-modern"); 238 } else { 239 spdk_json_write_string(w, "pci-legacy"); 240 } 241 242 spdk_pci_addr_fmt(addr, sizeof(addr), &pci_addr); 243 spdk_json_write_named_string(w, "pci_address", addr); 244 } 245 246 static void 247 pci_write_json_config(struct virtio_dev *dev, struct spdk_json_write_ctx *w) 248 { 249 struct virtio_hw *hw = dev->ctx; 250 struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(hw->pci_dev); 251 char addr[32]; 252 253 spdk_pci_addr_fmt(addr, sizeof(addr), &pci_addr); 254 255 spdk_json_write_named_string(w, "trtype", "pci"); 256 spdk_json_write_named_string(w, "traddr", addr); 257 } 258 259 static inline void 260 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) 261 { 262 spdk_mmio_write_4(lo, val & ((1ULL << 32) - 1)); 263 spdk_mmio_write_4(hi, val >> 32); 264 } 265 266 static int 267 modern_read_dev_config(struct virtio_dev *dev, size_t offset, 268 void *dst, int length) 269 { 270 struct virtio_hw *hw = dev->ctx; 271 int i; 272 uint8_t *p; 273 uint8_t old_gen, new_gen; 274 275 g_thread_virtio_hw = hw; 276 do { 277 old_gen = spdk_mmio_read_1(&hw->common_cfg->config_generation); 278 279 p = dst; 280 for (i = 0; i < length; i++) { 281 *p++ = spdk_mmio_read_1((uint8_t *)hw->dev_cfg + offset + i); 282 } 283 284 new_gen = spdk_mmio_read_1(&hw->common_cfg->config_generation); 285 } while (old_gen != new_gen); 286 g_thread_virtio_hw = NULL; 287 288 return 0; 289 } 290 291 static int 292 modern_write_dev_config(struct virtio_dev *dev, size_t offset, 293 const void *src, int length) 294 { 295 struct virtio_hw *hw = dev->ctx; 296 int i; 297 const uint8_t *p = src; 298 299 g_thread_virtio_hw = hw; 300 for (i = 0; i < length; i++) { 301 spdk_mmio_write_1(((uint8_t *)hw->dev_cfg) + offset + i, *p++); 302 } 303 g_thread_virtio_hw = NULL; 304 305 return 0; 306 } 307 308 static uint64_t 309 modern_get_features(struct virtio_dev *dev) 310 { 311 struct virtio_hw *hw = dev->ctx; 312 uint32_t features_lo, features_hi; 313 314 g_thread_virtio_hw = hw; 315 spdk_mmio_write_4(&hw->common_cfg->device_feature_select, 0); 316 features_lo = spdk_mmio_read_4(&hw->common_cfg->device_feature); 317 318 spdk_mmio_write_4(&hw->common_cfg->device_feature_select, 1); 319 features_hi = spdk_mmio_read_4(&hw->common_cfg->device_feature); 320 g_thread_virtio_hw = NULL; 321 322 return ((uint64_t)features_hi << 32) | features_lo; 323 } 324 325 static int 326 modern_set_features(struct virtio_dev *dev, uint64_t features) 327 { 328 struct virtio_hw *hw = dev->ctx; 329 330 if ((features & (1ULL << VIRTIO_F_VERSION_1)) == 0) { 331 SPDK_ERRLOG("VIRTIO_F_VERSION_1 feature is not enabled.\n"); 332 return -EINVAL; 333 } 334 335 g_thread_virtio_hw = hw; 336 spdk_mmio_write_4(&hw->common_cfg->guest_feature_select, 0); 337 spdk_mmio_write_4(&hw->common_cfg->guest_feature, features & ((1ULL << 32) - 1)); 338 339 spdk_mmio_write_4(&hw->common_cfg->guest_feature_select, 1); 340 spdk_mmio_write_4(&hw->common_cfg->guest_feature, features >> 32); 341 g_thread_virtio_hw = NULL; 342 343 dev->negotiated_features = features; 344 345 return 0; 346 } 347 348 static void 349 modern_destruct_dev(struct virtio_dev *vdev) 350 { 351 struct virtio_hw *hw = vdev->ctx; 352 struct spdk_pci_device *pci_dev; 353 354 if (hw != NULL) { 355 pthread_mutex_lock(&g_hw_mutex); 356 TAILQ_REMOVE(&g_virtio_hws, hw, tailq); 357 pthread_mutex_unlock(&g_hw_mutex); 358 pci_dev = hw->pci_dev; 359 free_virtio_hw(hw); 360 if (pci_dev) { 361 spdk_pci_device_detach(pci_dev); 362 } 363 } 364 } 365 366 static uint8_t 367 modern_get_status(struct virtio_dev *dev) 368 { 369 struct virtio_hw *hw = dev->ctx; 370 uint8_t ret; 371 372 g_thread_virtio_hw = hw; 373 ret = spdk_mmio_read_1(&hw->common_cfg->device_status); 374 g_thread_virtio_hw = NULL; 375 376 return ret; 377 } 378 379 static void 380 modern_set_status(struct virtio_dev *dev, uint8_t status) 381 { 382 struct virtio_hw *hw = dev->ctx; 383 384 g_thread_virtio_hw = hw; 385 spdk_mmio_write_1(&hw->common_cfg->device_status, status); 386 g_thread_virtio_hw = NULL; 387 } 388 389 static uint16_t 390 modern_get_queue_size(struct virtio_dev *dev, uint16_t queue_id) 391 { 392 struct virtio_hw *hw = dev->ctx; 393 uint16_t ret; 394 395 g_thread_virtio_hw = hw; 396 spdk_mmio_write_2(&hw->common_cfg->queue_select, queue_id); 397 ret = spdk_mmio_read_2(&hw->common_cfg->queue_size); 398 g_thread_virtio_hw = NULL; 399 400 return ret; 401 } 402 403 static int 404 modern_setup_queue(struct virtio_dev *dev, struct virtqueue *vq) 405 { 406 struct virtio_hw *hw = dev->ctx; 407 uint64_t desc_addr, avail_addr, used_addr; 408 uint16_t notify_off; 409 void *queue_mem; 410 uint64_t queue_mem_phys_addr; 411 412 /* To ensure physical address contiguity we make the queue occupy 413 * only a single hugepage (2MB). As of Virtio 1.0, the queue size 414 * always falls within this limit. 415 */ 416 if (vq->vq_ring_size > VALUE_2MB) { 417 return -ENOMEM; 418 } 419 420 queue_mem = spdk_zmalloc(vq->vq_ring_size, VALUE_2MB, NULL, 421 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 422 if (queue_mem == NULL) { 423 return -ENOMEM; 424 } 425 426 queue_mem_phys_addr = spdk_vtophys(queue_mem, NULL); 427 if (queue_mem_phys_addr == SPDK_VTOPHYS_ERROR) { 428 spdk_free(queue_mem); 429 return -EFAULT; 430 } 431 432 vq->vq_ring_mem = queue_mem_phys_addr; 433 vq->vq_ring_virt_mem = queue_mem; 434 435 if (!check_vq_phys_addr_ok(vq)) { 436 spdk_free(queue_mem); 437 return -ENOMEM; 438 } 439 440 desc_addr = vq->vq_ring_mem; 441 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 442 used_addr = (avail_addr + offsetof(struct vring_avail, ring[vq->vq_nentries]) 443 + VIRTIO_PCI_VRING_ALIGN - 1) & ~(VIRTIO_PCI_VRING_ALIGN - 1); 444 445 g_thread_virtio_hw = hw; 446 spdk_mmio_write_2(&hw->common_cfg->queue_select, vq->vq_queue_index); 447 448 io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, 449 &hw->common_cfg->queue_desc_hi); 450 io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, 451 &hw->common_cfg->queue_avail_hi); 452 io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, 453 &hw->common_cfg->queue_used_hi); 454 455 notify_off = spdk_mmio_read_2(&hw->common_cfg->queue_notify_off); 456 vq->notify_addr = (void *)((uint8_t *)hw->notify_base + 457 notify_off * hw->notify_off_multiplier); 458 459 spdk_mmio_write_2(&hw->common_cfg->queue_enable, 1); 460 g_thread_virtio_hw = NULL; 461 462 SPDK_DEBUGLOG(virtio_pci, "queue %"PRIu16" addresses:\n", vq->vq_queue_index); 463 SPDK_DEBUGLOG(virtio_pci, "\t desc_addr: %" PRIx64 "\n", desc_addr); 464 SPDK_DEBUGLOG(virtio_pci, "\t aval_addr: %" PRIx64 "\n", avail_addr); 465 SPDK_DEBUGLOG(virtio_pci, "\t used_addr: %" PRIx64 "\n", used_addr); 466 SPDK_DEBUGLOG(virtio_pci, "\t notify addr: %p (notify offset: %"PRIu16")\n", 467 vq->notify_addr, notify_off); 468 469 return 0; 470 } 471 472 static void 473 modern_del_queue(struct virtio_dev *dev, struct virtqueue *vq) 474 { 475 struct virtio_hw *hw = dev->ctx; 476 477 g_thread_virtio_hw = hw; 478 spdk_mmio_write_2(&hw->common_cfg->queue_select, vq->vq_queue_index); 479 480 io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, 481 &hw->common_cfg->queue_desc_hi); 482 io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, 483 &hw->common_cfg->queue_avail_hi); 484 io_write64_twopart(0, &hw->common_cfg->queue_used_lo, 485 &hw->common_cfg->queue_used_hi); 486 487 spdk_mmio_write_2(&hw->common_cfg->queue_enable, 0); 488 g_thread_virtio_hw = NULL; 489 490 spdk_free(vq->vq_ring_virt_mem); 491 } 492 493 static void 494 modern_notify_queue(struct virtio_dev *dev, struct virtqueue *vq) 495 { 496 g_thread_virtio_hw = dev->ctx; 497 spdk_mmio_write_2(vq->notify_addr, vq->vq_queue_index); 498 g_thread_virtio_hw = NULL; 499 } 500 501 static const struct virtio_dev_ops modern_ops = { 502 .read_dev_cfg = modern_read_dev_config, 503 .write_dev_cfg = modern_write_dev_config, 504 .get_status = modern_get_status, 505 .set_status = modern_set_status, 506 .get_features = modern_get_features, 507 .set_features = modern_set_features, 508 .destruct_dev = modern_destruct_dev, 509 .get_queue_size = modern_get_queue_size, 510 .setup_queue = modern_setup_queue, 511 .del_queue = modern_del_queue, 512 .notify_queue = modern_notify_queue, 513 .dump_json_info = pci_dump_json_info, 514 .write_json_config = pci_write_json_config, 515 }; 516 517 static void * 518 get_cfg_addr(struct virtio_hw *hw, struct virtio_pci_cap *cap) 519 { 520 uint8_t bar = cap->bar; 521 uint32_t length = cap->length; 522 uint32_t offset = cap->offset; 523 524 if (bar > 5) { 525 SPDK_ERRLOG("invalid bar: %"PRIu8"\n", bar); 526 return NULL; 527 } 528 529 if (offset + length < offset) { 530 SPDK_ERRLOG("offset(%"PRIu32") + length(%"PRIu32") overflows\n", 531 offset, length); 532 return NULL; 533 } 534 535 if (offset + length > hw->pci_bar[bar].len) { 536 SPDK_ERRLOG("invalid cap: overflows bar space: %"PRIu32" > %"PRIu32"\n", 537 offset + length, hw->pci_bar[bar].len); 538 return NULL; 539 } 540 541 if (hw->pci_bar[bar].vaddr == NULL) { 542 SPDK_ERRLOG("bar %"PRIu8" base addr is NULL\n", bar); 543 return NULL; 544 } 545 546 return hw->pci_bar[bar].vaddr + offset; 547 } 548 549 static int 550 virtio_read_caps(struct virtio_hw *hw) 551 { 552 uint8_t pos; 553 struct virtio_pci_cap cap; 554 int ret; 555 556 ret = spdk_pci_device_cfg_read(hw->pci_dev, &pos, 1, PCI_CAPABILITY_LIST); 557 if (ret < 0) { 558 SPDK_DEBUGLOG(virtio_pci, "failed to read pci capability list\n"); 559 return ret; 560 } 561 562 while (pos) { 563 ret = spdk_pci_device_cfg_read(hw->pci_dev, &cap, sizeof(cap), pos); 564 if (ret < 0) { 565 SPDK_ERRLOG("failed to read pci cap at pos: %"PRIx8"\n", pos); 566 break; 567 } 568 569 if (cap.cap_vndr == PCI_CAP_ID_MSIX) { 570 hw->use_msix = 1; 571 } 572 573 if (cap.cap_vndr != PCI_CAP_ID_VNDR) { 574 SPDK_DEBUGLOG(virtio_pci, 575 "[%2"PRIx8"] skipping non VNDR cap id: %02"PRIx8"\n", 576 pos, cap.cap_vndr); 577 goto next; 578 } 579 580 SPDK_DEBUGLOG(virtio_pci, 581 "[%2"PRIx8"] cfg type: %"PRIu8", bar: %"PRIu8", offset: %04"PRIx32", len: %"PRIu32"\n", 582 pos, cap.cfg_type, cap.bar, cap.offset, cap.length); 583 584 switch (cap.cfg_type) { 585 case VIRTIO_PCI_CAP_COMMON_CFG: 586 hw->common_cfg = get_cfg_addr(hw, &cap); 587 break; 588 case VIRTIO_PCI_CAP_NOTIFY_CFG: 589 spdk_pci_device_cfg_read(hw->pci_dev, &hw->notify_off_multiplier, 590 4, pos + sizeof(cap)); 591 hw->notify_base = get_cfg_addr(hw, &cap); 592 break; 593 case VIRTIO_PCI_CAP_DEVICE_CFG: 594 hw->dev_cfg = get_cfg_addr(hw, &cap); 595 break; 596 case VIRTIO_PCI_CAP_ISR_CFG: 597 hw->isr = get_cfg_addr(hw, &cap); 598 break; 599 } 600 601 next: 602 pos = cap.cap_next; 603 } 604 605 if (hw->common_cfg == NULL || hw->notify_base == NULL || 606 hw->dev_cfg == NULL || hw->isr == NULL) { 607 SPDK_DEBUGLOG(virtio_pci, "no modern virtio pci device found.\n"); 608 if (ret < 0) { 609 return ret; 610 } else { 611 return -EINVAL; 612 } 613 } 614 615 SPDK_DEBUGLOG(virtio_pci, "found modern virtio pci device.\n"); 616 617 SPDK_DEBUGLOG(virtio_pci, "common cfg mapped at: %p\n", hw->common_cfg); 618 SPDK_DEBUGLOG(virtio_pci, "device cfg mapped at: %p\n", hw->dev_cfg); 619 SPDK_DEBUGLOG(virtio_pci, "isr cfg mapped at: %p\n", hw->isr); 620 SPDK_DEBUGLOG(virtio_pci, "notify base: %p, notify off multiplier: %u\n", 621 hw->notify_base, hw->notify_off_multiplier); 622 623 return 0; 624 } 625 626 static int 627 virtio_pci_dev_probe(struct spdk_pci_device *pci_dev, struct virtio_pci_probe_ctx *ctx) 628 { 629 struct virtio_hw *hw; 630 uint8_t *bar_vaddr; 631 uint64_t bar_paddr, bar_len; 632 int rc; 633 unsigned i; 634 char bdf[32]; 635 struct spdk_pci_addr addr; 636 637 addr = spdk_pci_device_get_addr(pci_dev); 638 rc = spdk_pci_addr_fmt(bdf, sizeof(bdf), &addr); 639 if (rc != 0) { 640 SPDK_ERRLOG("Ignoring a device with non-parseable PCI address\n"); 641 return -1; 642 } 643 644 hw = calloc(1, sizeof(*hw)); 645 if (hw == NULL) { 646 SPDK_ERRLOG("%s: calloc failed\n", bdf); 647 return -1; 648 } 649 650 hw->pci_dev = pci_dev; 651 652 for (i = 0; i < 6; ++i) { 653 rc = spdk_pci_device_map_bar(pci_dev, i, (void *) &bar_vaddr, &bar_paddr, 654 &bar_len); 655 if (rc != 0) { 656 SPDK_ERRLOG("%s: failed to memmap PCI BAR %u\n", bdf, i); 657 free_virtio_hw(hw); 658 return -1; 659 } 660 661 hw->pci_bar[i].vaddr = bar_vaddr; 662 hw->pci_bar[i].len = bar_len; 663 } 664 665 /* Virtio PCI caps exist only on modern PCI devices. 666 * Legacy devices are not supported. 667 */ 668 if (virtio_read_caps(hw) != 0) { 669 SPDK_NOTICELOG("Ignoring legacy PCI device at %s\n", bdf); 670 free_virtio_hw(hw); 671 return -1; 672 } 673 674 rc = ctx->enum_cb((struct virtio_pci_ctx *)hw, ctx->enum_ctx); 675 if (rc != 0) { 676 free_virtio_hw(hw); 677 return rc; 678 } 679 680 if (g_sigset != true) { 681 spdk_pci_register_error_handler(virtio_pci_dev_sigbus_handler, 682 NULL); 683 g_sigset = true; 684 } 685 686 pthread_mutex_lock(&g_hw_mutex); 687 TAILQ_INSERT_TAIL(&g_virtio_hws, hw, tailq); 688 pthread_mutex_unlock(&g_hw_mutex); 689 690 return 0; 691 } 692 693 static int 694 virtio_pci_dev_probe_cb(void *probe_ctx, struct spdk_pci_device *pci_dev) 695 { 696 struct virtio_pci_probe_ctx *ctx = probe_ctx; 697 uint16_t pci_device_id = spdk_pci_device_get_device_id(pci_dev); 698 uint16_t device_id; 699 700 if (pci_device_id < 0x1000 || pci_device_id > 0x107f) { 701 SPDK_ERRLOG("Probe device is not a virtio device\n"); 702 return 1; 703 } 704 705 if (pci_device_id < 0x1040) { 706 /* Transitional devices: use the PCI subsystem device id as 707 * virtio device id, same as legacy driver always did. 708 */ 709 device_id = spdk_pci_device_get_subdevice_id(pci_dev); 710 } else { 711 /* Modern devices: simply use PCI device id, but start from 0x1040. */ 712 device_id = pci_device_id - 0x1040; 713 } 714 715 if (device_id != ctx->device_id) { 716 return 1; 717 } 718 719 return virtio_pci_dev_probe(pci_dev, ctx); 720 } 721 722 int 723 virtio_pci_dev_enumerate(virtio_pci_create_cb enum_cb, void *enum_ctx, 724 uint16_t pci_device_id) 725 { 726 struct virtio_pci_probe_ctx ctx; 727 728 if (!spdk_process_is_primary()) { 729 SPDK_WARNLOG("virtio_pci secondary process support is not implemented yet.\n"); 730 return 0; 731 } 732 733 ctx.enum_cb = enum_cb; 734 ctx.enum_ctx = enum_ctx; 735 ctx.device_id = pci_device_id; 736 737 return spdk_pci_enumerate(spdk_pci_virtio_get_driver(), 738 virtio_pci_dev_probe_cb, &ctx); 739 } 740 741 int 742 virtio_pci_dev_attach(virtio_pci_create_cb enum_cb, void *enum_ctx, 743 uint16_t device_id, struct spdk_pci_addr *pci_address) 744 { 745 struct virtio_pci_probe_ctx ctx; 746 747 if (!spdk_process_is_primary()) { 748 SPDK_WARNLOG("virtio_pci secondary process support is not implemented yet.\n"); 749 return 0; 750 } 751 752 ctx.enum_cb = enum_cb; 753 ctx.enum_ctx = enum_ctx; 754 ctx.device_id = device_id; 755 756 return spdk_pci_device_attach(spdk_pci_virtio_get_driver(), 757 virtio_pci_dev_probe_cb, &ctx, pci_address); 758 } 759 760 int 761 virtio_pci_dev_init(struct virtio_dev *vdev, const char *name, 762 struct virtio_pci_ctx *pci_ctx) 763 { 764 int rc; 765 struct virtio_hw *hw = (struct virtio_hw *)pci_ctx; 766 767 rc = virtio_dev_construct(vdev, name, &modern_ops, pci_ctx); 768 if (rc != 0) { 769 return rc; 770 } 771 772 vdev->is_hw = 1; 773 vdev->modern = 1; 774 hw->vdev = vdev; 775 776 return 0; 777 } 778 779 SPDK_LOG_REGISTER_COMPONENT(virtio_pci) 780