1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/memory.h" 37 #include "spdk/mmio.h" 38 #include "spdk/string.h" 39 #include "spdk/env.h" 40 41 #include "spdk_internal/virtio.h" 42 43 struct virtio_hw { 44 uint8_t use_msix; 45 uint32_t notify_off_multiplier; 46 uint8_t *isr; 47 uint16_t *notify_base; 48 49 struct { 50 /** Mem-mapped resources from given PCI BAR */ 51 void *vaddr; 52 53 /** Length of the address space */ 54 uint32_t len; 55 } pci_bar[6]; 56 57 struct virtio_pci_common_cfg *common_cfg; 58 struct spdk_pci_device *pci_dev; 59 60 /** Device-specific PCI config space */ 61 void *dev_cfg; 62 }; 63 64 struct virtio_pci_probe_ctx { 65 virtio_pci_create_cb enum_cb; 66 void *enum_ctx; 67 uint16_t device_id; 68 }; 69 70 /* 71 * Following macros are derived from linux/pci_regs.h, however, 72 * we can't simply include that header here, as there is no such 73 * file for non-Linux platform. 74 */ 75 #define PCI_CAPABILITY_LIST 0x34 76 #define PCI_CAP_ID_VNDR 0x09 77 #define PCI_CAP_ID_MSIX 0x11 78 79 static inline int 80 check_vq_phys_addr_ok(struct virtqueue *vq) 81 { 82 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, 83 * and only accepts 32 bit page frame number. 84 * Check if the allocated physical memory exceeds 16TB. 85 */ 86 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> 87 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { 88 SPDK_ERRLOG("vring address shouldn't be above 16TB!\n"); 89 return 0; 90 } 91 92 return 1; 93 } 94 95 static void 96 free_virtio_hw(struct virtio_hw *hw) 97 { 98 unsigned i; 99 100 for (i = 0; i < 6; ++i) { 101 if (hw->pci_bar[i].vaddr == NULL) { 102 continue; 103 } 104 105 spdk_pci_device_unmap_bar(hw->pci_dev, i, hw->pci_bar[i].vaddr); 106 } 107 108 free(hw); 109 } 110 111 static void 112 pci_dump_json_info(struct virtio_dev *dev, struct spdk_json_write_ctx *w) 113 { 114 struct virtio_hw *hw = dev->ctx; 115 struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr((struct spdk_pci_device *)hw->pci_dev); 116 char addr[32]; 117 118 spdk_json_write_name(w, "type"); 119 if (dev->modern) { 120 spdk_json_write_string(w, "pci-modern"); 121 } else { 122 spdk_json_write_string(w, "pci-legacy"); 123 } 124 125 spdk_pci_addr_fmt(addr, sizeof(addr), &pci_addr); 126 spdk_json_write_named_string(w, "pci_address", addr); 127 } 128 129 static void 130 pci_write_json_config(struct virtio_dev *dev, struct spdk_json_write_ctx *w) 131 { 132 struct virtio_hw *hw = dev->ctx; 133 struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(hw->pci_dev); 134 char addr[32]; 135 136 spdk_pci_addr_fmt(addr, sizeof(addr), &pci_addr); 137 138 spdk_json_write_named_string(w, "trtype", "pci"); 139 spdk_json_write_named_string(w, "traddr", addr); 140 } 141 142 static inline void 143 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) 144 { 145 spdk_mmio_write_4(lo, val & ((1ULL << 32) - 1)); 146 spdk_mmio_write_4(hi, val >> 32); 147 } 148 149 static int 150 modern_read_dev_config(struct virtio_dev *dev, size_t offset, 151 void *dst, int length) 152 { 153 struct virtio_hw *hw = dev->ctx; 154 int i; 155 uint8_t *p; 156 uint8_t old_gen, new_gen; 157 158 do { 159 old_gen = spdk_mmio_read_1(&hw->common_cfg->config_generation); 160 161 p = dst; 162 for (i = 0; i < length; i++) { 163 *p++ = spdk_mmio_read_1((uint8_t *)hw->dev_cfg + offset + i); 164 } 165 166 new_gen = spdk_mmio_read_1(&hw->common_cfg->config_generation); 167 } while (old_gen != new_gen); 168 169 return 0; 170 } 171 172 static int 173 modern_write_dev_config(struct virtio_dev *dev, size_t offset, 174 const void *src, int length) 175 { 176 struct virtio_hw *hw = dev->ctx; 177 int i; 178 const uint8_t *p = src; 179 180 for (i = 0; i < length; i++) { 181 spdk_mmio_write_1(((uint8_t *)hw->dev_cfg) + offset + i, *p++); 182 } 183 184 return 0; 185 } 186 187 static uint64_t 188 modern_get_features(struct virtio_dev *dev) 189 { 190 struct virtio_hw *hw = dev->ctx; 191 uint32_t features_lo, features_hi; 192 193 spdk_mmio_write_4(&hw->common_cfg->device_feature_select, 0); 194 features_lo = spdk_mmio_read_4(&hw->common_cfg->device_feature); 195 196 spdk_mmio_write_4(&hw->common_cfg->device_feature_select, 1); 197 features_hi = spdk_mmio_read_4(&hw->common_cfg->device_feature); 198 199 return ((uint64_t)features_hi << 32) | features_lo; 200 } 201 202 static int 203 modern_set_features(struct virtio_dev *dev, uint64_t features) 204 { 205 struct virtio_hw *hw = dev->ctx; 206 207 if ((features & (1ULL << VIRTIO_F_VERSION_1)) == 0) { 208 SPDK_ERRLOG("VIRTIO_F_VERSION_1 feature is not enabled.\n"); 209 return -EINVAL; 210 } 211 212 spdk_mmio_write_4(&hw->common_cfg->guest_feature_select, 0); 213 spdk_mmio_write_4(&hw->common_cfg->guest_feature, features & ((1ULL << 32) - 1)); 214 215 spdk_mmio_write_4(&hw->common_cfg->guest_feature_select, 1); 216 spdk_mmio_write_4(&hw->common_cfg->guest_feature, features >> 32); 217 218 dev->negotiated_features = features; 219 220 return 0; 221 } 222 223 static void 224 modern_destruct_dev(struct virtio_dev *vdev) 225 { 226 struct virtio_hw *hw = vdev->ctx; 227 struct spdk_pci_device *pci_dev; 228 229 if (hw != NULL) { 230 pci_dev = hw->pci_dev; 231 free_virtio_hw(hw); 232 if (pci_dev) { 233 spdk_pci_device_detach(pci_dev); 234 } 235 } 236 } 237 238 static uint8_t 239 modern_get_status(struct virtio_dev *dev) 240 { 241 struct virtio_hw *hw = dev->ctx; 242 243 return spdk_mmio_read_1(&hw->common_cfg->device_status); 244 } 245 246 static void 247 modern_set_status(struct virtio_dev *dev, uint8_t status) 248 { 249 struct virtio_hw *hw = dev->ctx; 250 251 spdk_mmio_write_1(&hw->common_cfg->device_status, status); 252 } 253 254 static uint16_t 255 modern_get_queue_size(struct virtio_dev *dev, uint16_t queue_id) 256 { 257 struct virtio_hw *hw = dev->ctx; 258 259 spdk_mmio_write_2(&hw->common_cfg->queue_select, queue_id); 260 return spdk_mmio_read_2(&hw->common_cfg->queue_size); 261 } 262 263 static int 264 modern_setup_queue(struct virtio_dev *dev, struct virtqueue *vq) 265 { 266 struct virtio_hw *hw = dev->ctx; 267 uint64_t desc_addr, avail_addr, used_addr; 268 uint16_t notify_off; 269 void *queue_mem; 270 uint64_t queue_mem_phys_addr; 271 272 /* To ensure physical address contiguity we make the queue occupy 273 * only a single hugepage (2MB). As of Virtio 1.0, the queue size 274 * always falls within this limit. 275 */ 276 if (vq->vq_ring_size > VALUE_2MB) { 277 return -ENOMEM; 278 } 279 280 queue_mem = spdk_zmalloc(vq->vq_ring_size, VALUE_2MB, NULL, 281 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 282 if (queue_mem == NULL) { 283 return -ENOMEM; 284 } 285 286 queue_mem_phys_addr = spdk_vtophys(queue_mem, NULL); 287 if (queue_mem_phys_addr == SPDK_VTOPHYS_ERROR) { 288 spdk_free(queue_mem); 289 return -EFAULT; 290 } 291 292 vq->vq_ring_mem = queue_mem_phys_addr; 293 vq->vq_ring_virt_mem = queue_mem; 294 295 if (!check_vq_phys_addr_ok(vq)) { 296 spdk_free(queue_mem); 297 return -ENOMEM; 298 } 299 300 desc_addr = vq->vq_ring_mem; 301 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 302 used_addr = (avail_addr + offsetof(struct vring_avail, ring[vq->vq_nentries]) 303 + VIRTIO_PCI_VRING_ALIGN - 1) & ~(VIRTIO_PCI_VRING_ALIGN - 1); 304 305 spdk_mmio_write_2(&hw->common_cfg->queue_select, vq->vq_queue_index); 306 307 io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, 308 &hw->common_cfg->queue_desc_hi); 309 io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, 310 &hw->common_cfg->queue_avail_hi); 311 io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, 312 &hw->common_cfg->queue_used_hi); 313 314 notify_off = spdk_mmio_read_2(&hw->common_cfg->queue_notify_off); 315 vq->notify_addr = (void *)((uint8_t *)hw->notify_base + 316 notify_off * hw->notify_off_multiplier); 317 318 spdk_mmio_write_2(&hw->common_cfg->queue_enable, 1); 319 320 SPDK_DEBUGLOG(virtio_pci, "queue %"PRIu16" addresses:\n", vq->vq_queue_index); 321 SPDK_DEBUGLOG(virtio_pci, "\t desc_addr: %" PRIx64 "\n", desc_addr); 322 SPDK_DEBUGLOG(virtio_pci, "\t aval_addr: %" PRIx64 "\n", avail_addr); 323 SPDK_DEBUGLOG(virtio_pci, "\t used_addr: %" PRIx64 "\n", used_addr); 324 SPDK_DEBUGLOG(virtio_pci, "\t notify addr: %p (notify offset: %"PRIu16")\n", 325 vq->notify_addr, notify_off); 326 327 return 0; 328 } 329 330 static void 331 modern_del_queue(struct virtio_dev *dev, struct virtqueue *vq) 332 { 333 struct virtio_hw *hw = dev->ctx; 334 335 spdk_mmio_write_2(&hw->common_cfg->queue_select, vq->vq_queue_index); 336 337 io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, 338 &hw->common_cfg->queue_desc_hi); 339 io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, 340 &hw->common_cfg->queue_avail_hi); 341 io_write64_twopart(0, &hw->common_cfg->queue_used_lo, 342 &hw->common_cfg->queue_used_hi); 343 344 spdk_mmio_write_2(&hw->common_cfg->queue_enable, 0); 345 346 spdk_free(vq->vq_ring_virt_mem); 347 } 348 349 static void 350 modern_notify_queue(struct virtio_dev *dev, struct virtqueue *vq) 351 { 352 spdk_mmio_write_2(vq->notify_addr, vq->vq_queue_index); 353 } 354 355 static const struct virtio_dev_ops modern_ops = { 356 .read_dev_cfg = modern_read_dev_config, 357 .write_dev_cfg = modern_write_dev_config, 358 .get_status = modern_get_status, 359 .set_status = modern_set_status, 360 .get_features = modern_get_features, 361 .set_features = modern_set_features, 362 .destruct_dev = modern_destruct_dev, 363 .get_queue_size = modern_get_queue_size, 364 .setup_queue = modern_setup_queue, 365 .del_queue = modern_del_queue, 366 .notify_queue = modern_notify_queue, 367 .dump_json_info = pci_dump_json_info, 368 .write_json_config = pci_write_json_config, 369 }; 370 371 static void * 372 get_cfg_addr(struct virtio_hw *hw, struct virtio_pci_cap *cap) 373 { 374 uint8_t bar = cap->bar; 375 uint32_t length = cap->length; 376 uint32_t offset = cap->offset; 377 378 if (bar > 5) { 379 SPDK_ERRLOG("invalid bar: %"PRIu8"\n", bar); 380 return NULL; 381 } 382 383 if (offset + length < offset) { 384 SPDK_ERRLOG("offset(%"PRIu32") + length(%"PRIu32") overflows\n", 385 offset, length); 386 return NULL; 387 } 388 389 if (offset + length > hw->pci_bar[bar].len) { 390 SPDK_ERRLOG("invalid cap: overflows bar space: %"PRIu32" > %"PRIu32"\n", 391 offset + length, hw->pci_bar[bar].len); 392 return NULL; 393 } 394 395 if (hw->pci_bar[bar].vaddr == NULL) { 396 SPDK_ERRLOG("bar %"PRIu8" base addr is NULL\n", bar); 397 return NULL; 398 } 399 400 return hw->pci_bar[bar].vaddr + offset; 401 } 402 403 static int 404 virtio_read_caps(struct virtio_hw *hw) 405 { 406 uint8_t pos; 407 struct virtio_pci_cap cap; 408 int ret; 409 410 ret = spdk_pci_device_cfg_read(hw->pci_dev, &pos, 1, PCI_CAPABILITY_LIST); 411 if (ret < 0) { 412 SPDK_DEBUGLOG(virtio_pci, "failed to read pci capability list\n"); 413 return ret; 414 } 415 416 while (pos) { 417 ret = spdk_pci_device_cfg_read(hw->pci_dev, &cap, sizeof(cap), pos); 418 if (ret < 0) { 419 SPDK_ERRLOG("failed to read pci cap at pos: %"PRIx8"\n", pos); 420 break; 421 } 422 423 if (cap.cap_vndr == PCI_CAP_ID_MSIX) { 424 hw->use_msix = 1; 425 } 426 427 if (cap.cap_vndr != PCI_CAP_ID_VNDR) { 428 SPDK_DEBUGLOG(virtio_pci, 429 "[%2"PRIx8"] skipping non VNDR cap id: %02"PRIx8"\n", 430 pos, cap.cap_vndr); 431 goto next; 432 } 433 434 SPDK_DEBUGLOG(virtio_pci, 435 "[%2"PRIx8"] cfg type: %"PRIu8", bar: %"PRIu8", offset: %04"PRIx32", len: %"PRIu32"\n", 436 pos, cap.cfg_type, cap.bar, cap.offset, cap.length); 437 438 switch (cap.cfg_type) { 439 case VIRTIO_PCI_CAP_COMMON_CFG: 440 hw->common_cfg = get_cfg_addr(hw, &cap); 441 break; 442 case VIRTIO_PCI_CAP_NOTIFY_CFG: 443 spdk_pci_device_cfg_read(hw->pci_dev, &hw->notify_off_multiplier, 444 4, pos + sizeof(cap)); 445 hw->notify_base = get_cfg_addr(hw, &cap); 446 break; 447 case VIRTIO_PCI_CAP_DEVICE_CFG: 448 hw->dev_cfg = get_cfg_addr(hw, &cap); 449 break; 450 case VIRTIO_PCI_CAP_ISR_CFG: 451 hw->isr = get_cfg_addr(hw, &cap); 452 break; 453 } 454 455 next: 456 pos = cap.cap_next; 457 } 458 459 if (hw->common_cfg == NULL || hw->notify_base == NULL || 460 hw->dev_cfg == NULL || hw->isr == NULL) { 461 SPDK_DEBUGLOG(virtio_pci, "no modern virtio pci device found.\n"); 462 if (ret < 0) { 463 return ret; 464 } else { 465 return -EINVAL; 466 } 467 } 468 469 SPDK_DEBUGLOG(virtio_pci, "found modern virtio pci device.\n"); 470 471 SPDK_DEBUGLOG(virtio_pci, "common cfg mapped at: %p\n", hw->common_cfg); 472 SPDK_DEBUGLOG(virtio_pci, "device cfg mapped at: %p\n", hw->dev_cfg); 473 SPDK_DEBUGLOG(virtio_pci, "isr cfg mapped at: %p\n", hw->isr); 474 SPDK_DEBUGLOG(virtio_pci, "notify base: %p, notify off multiplier: %u\n", 475 hw->notify_base, hw->notify_off_multiplier); 476 477 return 0; 478 } 479 480 static int 481 virtio_pci_dev_probe(struct spdk_pci_device *pci_dev, struct virtio_pci_probe_ctx *ctx) 482 { 483 struct virtio_hw *hw; 484 uint8_t *bar_vaddr; 485 uint64_t bar_paddr, bar_len; 486 int rc; 487 unsigned i; 488 char bdf[32]; 489 struct spdk_pci_addr addr; 490 491 addr = spdk_pci_device_get_addr(pci_dev); 492 rc = spdk_pci_addr_fmt(bdf, sizeof(bdf), &addr); 493 if (rc != 0) { 494 SPDK_ERRLOG("Ignoring a device with non-parseable PCI address\n"); 495 return -1; 496 } 497 498 hw = calloc(1, sizeof(*hw)); 499 if (hw == NULL) { 500 SPDK_ERRLOG("%s: calloc failed\n", bdf); 501 return -1; 502 } 503 504 hw->pci_dev = pci_dev; 505 506 for (i = 0; i < 6; ++i) { 507 rc = spdk_pci_device_map_bar(pci_dev, i, (void *) &bar_vaddr, &bar_paddr, 508 &bar_len); 509 if (rc != 0) { 510 SPDK_ERRLOG("%s: failed to memmap PCI BAR %u\n", bdf, i); 511 free_virtio_hw(hw); 512 return -1; 513 } 514 515 hw->pci_bar[i].vaddr = bar_vaddr; 516 hw->pci_bar[i].len = bar_len; 517 } 518 519 /* Virtio PCI caps exist only on modern PCI devices. 520 * Legacy devices are not supported. 521 */ 522 if (virtio_read_caps(hw) != 0) { 523 SPDK_NOTICELOG("Ignoring legacy PCI device at %s\n", bdf); 524 free_virtio_hw(hw); 525 return -1; 526 } 527 528 rc = ctx->enum_cb((struct virtio_pci_ctx *)hw, ctx->enum_ctx); 529 if (rc != 0) { 530 free_virtio_hw(hw); 531 } 532 533 return rc; 534 } 535 536 static int 537 virtio_pci_dev_probe_cb(void *probe_ctx, struct spdk_pci_device *pci_dev) 538 { 539 struct virtio_pci_probe_ctx *ctx = probe_ctx; 540 uint16_t pci_device_id = spdk_pci_device_get_device_id(pci_dev); 541 uint16_t device_id; 542 543 if (pci_device_id < 0x1000 || pci_device_id > 0x107f) { 544 SPDK_ERRLOG("Probe device is not a virtio device\n"); 545 return 1; 546 } 547 548 if (pci_device_id < 0x1040) { 549 /* Transitional devices: use the PCI subsystem device id as 550 * virtio device id, same as legacy driver always did. 551 */ 552 device_id = spdk_pci_device_get_subdevice_id(pci_dev); 553 } else { 554 /* Modern devices: simply use PCI device id, but start from 0x1040. */ 555 device_id = pci_device_id - 0x1040; 556 } 557 558 if (device_id != ctx->device_id) { 559 return 1; 560 } 561 562 return virtio_pci_dev_probe(pci_dev, ctx); 563 } 564 565 int 566 virtio_pci_dev_enumerate(virtio_pci_create_cb enum_cb, void *enum_ctx, 567 uint16_t pci_device_id) 568 { 569 struct virtio_pci_probe_ctx ctx; 570 571 if (!spdk_process_is_primary()) { 572 SPDK_WARNLOG("virtio_pci secondary process support is not implemented yet.\n"); 573 return 0; 574 } 575 576 ctx.enum_cb = enum_cb; 577 ctx.enum_ctx = enum_ctx; 578 ctx.device_id = pci_device_id; 579 580 return spdk_pci_enumerate(spdk_pci_virtio_get_driver(), 581 virtio_pci_dev_probe_cb, &ctx); 582 } 583 584 int 585 virtio_pci_dev_attach(virtio_pci_create_cb enum_cb, void *enum_ctx, 586 uint16_t device_id, struct spdk_pci_addr *pci_address) 587 { 588 struct virtio_pci_probe_ctx ctx; 589 590 if (!spdk_process_is_primary()) { 591 SPDK_WARNLOG("virtio_pci secondary process support is not implemented yet.\n"); 592 return 0; 593 } 594 595 ctx.enum_cb = enum_cb; 596 ctx.enum_ctx = enum_ctx; 597 ctx.device_id = device_id; 598 599 return spdk_pci_device_attach(spdk_pci_virtio_get_driver(), 600 virtio_pci_dev_probe_cb, &ctx, pci_address); 601 } 602 603 int 604 virtio_pci_dev_init(struct virtio_dev *vdev, const char *name, 605 struct virtio_pci_ctx *pci_ctx) 606 { 607 int rc; 608 609 rc = virtio_dev_construct(vdev, name, &modern_ops, pci_ctx); 610 if (rc != 0) { 611 return rc; 612 } 613 614 vdev->is_hw = 1; 615 vdev->modern = 1; 616 617 return 0; 618 } 619 620 SPDK_LOG_REGISTER_COMPONENT(virtio_pci) 621