1 /*- 2 * BSD LICENSE 3 * 4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk/memory.h" 37 #include "spdk/mmio.h" 38 #include "spdk/string.h" 39 #include "spdk/env.h" 40 41 #include "spdk_internal/virtio.h" 42 43 struct virtio_hw { 44 uint8_t use_msix; 45 uint32_t notify_off_multiplier; 46 uint8_t *isr; 47 uint16_t *notify_base; 48 49 struct { 50 /** Mem-mapped resources from given PCI BAR */ 51 void *vaddr; 52 53 /** Length of the address space */ 54 uint32_t len; 55 } pci_bar[6]; 56 57 struct virtio_pci_common_cfg *common_cfg; 58 struct spdk_pci_device *pci_dev; 59 60 /** Device-specific PCI config space */ 61 void *dev_cfg; 62 }; 63 64 struct virtio_pci_probe_ctx { 65 virtio_pci_create_cb enum_cb; 66 void *enum_ctx; 67 uint16_t device_id; 68 }; 69 70 /* 71 * Following macros are derived from linux/pci_regs.h, however, 72 * we can't simply include that header here, as there is no such 73 * file for non-Linux platform. 74 */ 75 #define PCI_CAPABILITY_LIST 0x34 76 #define PCI_CAP_ID_VNDR 0x09 77 #define PCI_CAP_ID_MSIX 0x11 78 79 static inline int 80 check_vq_phys_addr_ok(struct virtqueue *vq) 81 { 82 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, 83 * and only accepts 32 bit page frame number. 84 * Check if the allocated physical memory exceeds 16TB. 85 */ 86 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> 87 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { 88 SPDK_ERRLOG("vring address shouldn't be above 16TB!\n"); 89 return 0; 90 } 91 92 return 1; 93 } 94 95 static void 96 free_virtio_hw(struct virtio_hw *hw) 97 { 98 unsigned i; 99 100 for (i = 0; i < 6; ++i) { 101 if (hw->pci_bar[i].vaddr == NULL) { 102 continue; 103 } 104 105 spdk_pci_device_unmap_bar(hw->pci_dev, i, hw->pci_bar[i].vaddr); 106 } 107 108 free(hw); 109 } 110 111 static void 112 pci_dump_json_info(struct virtio_dev *dev, struct spdk_json_write_ctx *w) 113 { 114 struct virtio_hw *hw = dev->ctx; 115 struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr((struct spdk_pci_device *)hw->pci_dev); 116 char addr[32]; 117 118 spdk_json_write_name(w, "type"); 119 if (dev->modern) { 120 spdk_json_write_string(w, "pci-modern"); 121 } else { 122 spdk_json_write_string(w, "pci-legacy"); 123 } 124 125 spdk_pci_addr_fmt(addr, sizeof(addr), &pci_addr); 126 spdk_json_write_named_string(w, "pci_address", addr); 127 } 128 129 static void 130 pci_write_json_config(struct virtio_dev *dev, struct spdk_json_write_ctx *w) 131 { 132 struct virtio_hw *hw = dev->ctx; 133 struct spdk_pci_addr pci_addr = spdk_pci_device_get_addr(hw->pci_dev); 134 char addr[32]; 135 136 spdk_pci_addr_fmt(addr, sizeof(addr), &pci_addr); 137 138 spdk_json_write_named_string(w, "trtype", "pci"); 139 spdk_json_write_named_string(w, "traddr", addr); 140 } 141 142 static inline void 143 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) 144 { 145 spdk_mmio_write_4(lo, val & ((1ULL << 32) - 1)); 146 spdk_mmio_write_4(hi, val >> 32); 147 } 148 149 static int 150 modern_read_dev_config(struct virtio_dev *dev, size_t offset, 151 void *dst, int length) 152 { 153 struct virtio_hw *hw = dev->ctx; 154 int i; 155 uint8_t *p; 156 uint8_t old_gen, new_gen; 157 158 do { 159 old_gen = spdk_mmio_read_1(&hw->common_cfg->config_generation); 160 161 p = dst; 162 for (i = 0; i < length; i++) { 163 *p++ = spdk_mmio_read_1((uint8_t *)hw->dev_cfg + offset + i); 164 } 165 166 new_gen = spdk_mmio_read_1(&hw->common_cfg->config_generation); 167 } while (old_gen != new_gen); 168 169 return 0; 170 } 171 172 static int 173 modern_write_dev_config(struct virtio_dev *dev, size_t offset, 174 const void *src, int length) 175 { 176 struct virtio_hw *hw = dev->ctx; 177 int i; 178 const uint8_t *p = src; 179 180 for (i = 0; i < length; i++) { 181 spdk_mmio_write_1(((uint8_t *)hw->dev_cfg) + offset + i, *p++); 182 } 183 184 return 0; 185 } 186 187 static uint64_t 188 modern_get_features(struct virtio_dev *dev) 189 { 190 struct virtio_hw *hw = dev->ctx; 191 uint32_t features_lo, features_hi; 192 193 spdk_mmio_write_4(&hw->common_cfg->device_feature_select, 0); 194 features_lo = spdk_mmio_read_4(&hw->common_cfg->device_feature); 195 196 spdk_mmio_write_4(&hw->common_cfg->device_feature_select, 1); 197 features_hi = spdk_mmio_read_4(&hw->common_cfg->device_feature); 198 199 return ((uint64_t)features_hi << 32) | features_lo; 200 } 201 202 static int 203 modern_set_features(struct virtio_dev *dev, uint64_t features) 204 { 205 struct virtio_hw *hw = dev->ctx; 206 207 if ((features & (1ULL << VIRTIO_F_VERSION_1)) == 0) { 208 SPDK_ERRLOG("VIRTIO_F_VERSION_1 feature is not enabled.\n"); 209 return -EINVAL; 210 } 211 212 spdk_mmio_write_4(&hw->common_cfg->guest_feature_select, 0); 213 spdk_mmio_write_4(&hw->common_cfg->guest_feature, features & ((1ULL << 32) - 1)); 214 215 spdk_mmio_write_4(&hw->common_cfg->guest_feature_select, 1); 216 spdk_mmio_write_4(&hw->common_cfg->guest_feature, features >> 32); 217 218 dev->negotiated_features = features; 219 220 return 0; 221 } 222 223 static void 224 modern_destruct_dev(struct virtio_dev *vdev) 225 { 226 struct virtio_hw *hw = vdev->ctx; 227 228 if (hw != NULL) { 229 free_virtio_hw(hw); 230 spdk_pci_device_detach(hw->pci_dev); 231 } 232 } 233 234 static uint8_t 235 modern_get_status(struct virtio_dev *dev) 236 { 237 struct virtio_hw *hw = dev->ctx; 238 239 return spdk_mmio_read_1(&hw->common_cfg->device_status); 240 } 241 242 static void 243 modern_set_status(struct virtio_dev *dev, uint8_t status) 244 { 245 struct virtio_hw *hw = dev->ctx; 246 247 spdk_mmio_write_1(&hw->common_cfg->device_status, status); 248 } 249 250 static uint16_t 251 modern_get_queue_size(struct virtio_dev *dev, uint16_t queue_id) 252 { 253 struct virtio_hw *hw = dev->ctx; 254 255 spdk_mmio_write_2(&hw->common_cfg->queue_select, queue_id); 256 return spdk_mmio_read_2(&hw->common_cfg->queue_size); 257 } 258 259 static int 260 modern_setup_queue(struct virtio_dev *dev, struct virtqueue *vq) 261 { 262 struct virtio_hw *hw = dev->ctx; 263 uint64_t desc_addr, avail_addr, used_addr; 264 uint16_t notify_off; 265 void *queue_mem; 266 uint64_t queue_mem_phys_addr; 267 268 /* To ensure physical address contiguity we make the queue occupy 269 * only a single hugepage (2MB). As of Virtio 1.0, the queue size 270 * always falls within this limit. 271 */ 272 if (vq->vq_ring_size > VALUE_2MB) { 273 return -ENOMEM; 274 } 275 276 queue_mem = spdk_zmalloc(vq->vq_ring_size, VALUE_2MB, NULL, 277 SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA); 278 if (queue_mem == NULL) { 279 return -ENOMEM; 280 } 281 282 queue_mem_phys_addr = spdk_vtophys(queue_mem, NULL); 283 if (queue_mem_phys_addr == SPDK_VTOPHYS_ERROR) { 284 spdk_free(queue_mem); 285 return -EFAULT; 286 } 287 288 vq->vq_ring_mem = queue_mem_phys_addr; 289 vq->vq_ring_virt_mem = queue_mem; 290 291 if (!check_vq_phys_addr_ok(vq)) { 292 spdk_free(queue_mem); 293 return -ENOMEM; 294 } 295 296 desc_addr = vq->vq_ring_mem; 297 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); 298 used_addr = (avail_addr + offsetof(struct vring_avail, ring[vq->vq_nentries]) 299 + VIRTIO_PCI_VRING_ALIGN - 1) & ~(VIRTIO_PCI_VRING_ALIGN - 1); 300 301 spdk_mmio_write_2(&hw->common_cfg->queue_select, vq->vq_queue_index); 302 303 io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, 304 &hw->common_cfg->queue_desc_hi); 305 io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, 306 &hw->common_cfg->queue_avail_hi); 307 io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, 308 &hw->common_cfg->queue_used_hi); 309 310 notify_off = spdk_mmio_read_2(&hw->common_cfg->queue_notify_off); 311 vq->notify_addr = (void *)((uint8_t *)hw->notify_base + 312 notify_off * hw->notify_off_multiplier); 313 314 spdk_mmio_write_2(&hw->common_cfg->queue_enable, 1); 315 316 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "queue %"PRIu16" addresses:\n", vq->vq_queue_index); 317 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "\t desc_addr: %" PRIx64 "\n", desc_addr); 318 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "\t aval_addr: %" PRIx64 "\n", avail_addr); 319 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "\t used_addr: %" PRIx64 "\n", used_addr); 320 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "\t notify addr: %p (notify offset: %"PRIu16")\n", 321 vq->notify_addr, notify_off); 322 323 return 0; 324 } 325 326 static void 327 modern_del_queue(struct virtio_dev *dev, struct virtqueue *vq) 328 { 329 struct virtio_hw *hw = dev->ctx; 330 331 spdk_mmio_write_2(&hw->common_cfg->queue_select, vq->vq_queue_index); 332 333 io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, 334 &hw->common_cfg->queue_desc_hi); 335 io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, 336 &hw->common_cfg->queue_avail_hi); 337 io_write64_twopart(0, &hw->common_cfg->queue_used_lo, 338 &hw->common_cfg->queue_used_hi); 339 340 spdk_mmio_write_2(&hw->common_cfg->queue_enable, 0); 341 342 spdk_free(vq->vq_ring_virt_mem); 343 } 344 345 static void 346 modern_notify_queue(struct virtio_dev *dev, struct virtqueue *vq) 347 { 348 spdk_mmio_write_2(vq->notify_addr, vq->vq_queue_index); 349 } 350 351 static const struct virtio_dev_ops modern_ops = { 352 .read_dev_cfg = modern_read_dev_config, 353 .write_dev_cfg = modern_write_dev_config, 354 .get_status = modern_get_status, 355 .set_status = modern_set_status, 356 .get_features = modern_get_features, 357 .set_features = modern_set_features, 358 .destruct_dev = modern_destruct_dev, 359 .get_queue_size = modern_get_queue_size, 360 .setup_queue = modern_setup_queue, 361 .del_queue = modern_del_queue, 362 .notify_queue = modern_notify_queue, 363 .dump_json_info = pci_dump_json_info, 364 .write_json_config = pci_write_json_config, 365 }; 366 367 static void * 368 get_cfg_addr(struct virtio_hw *hw, struct virtio_pci_cap *cap) 369 { 370 uint8_t bar = cap->bar; 371 uint32_t length = cap->length; 372 uint32_t offset = cap->offset; 373 374 if (bar > 5) { 375 SPDK_ERRLOG("invalid bar: %"PRIu8"\n", bar); 376 return NULL; 377 } 378 379 if (offset + length < offset) { 380 SPDK_ERRLOG("offset(%"PRIu32") + length(%"PRIu32") overflows\n", 381 offset, length); 382 return NULL; 383 } 384 385 if (offset + length > hw->pci_bar[bar].len) { 386 SPDK_ERRLOG("invalid cap: overflows bar space: %"PRIu32" > %"PRIu32"\n", 387 offset + length, hw->pci_bar[bar].len); 388 return NULL; 389 } 390 391 if (hw->pci_bar[bar].vaddr == NULL) { 392 SPDK_ERRLOG("bar %"PRIu8" base addr is NULL\n", bar); 393 return NULL; 394 } 395 396 return hw->pci_bar[bar].vaddr + offset; 397 } 398 399 static int 400 virtio_read_caps(struct virtio_hw *hw) 401 { 402 uint8_t pos; 403 struct virtio_pci_cap cap; 404 int ret; 405 406 ret = spdk_pci_device_cfg_read(hw->pci_dev, &pos, 1, PCI_CAPABILITY_LIST); 407 if (ret < 0) { 408 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "failed to read pci capability list\n"); 409 return ret; 410 } 411 412 while (pos) { 413 ret = spdk_pci_device_cfg_read(hw->pci_dev, &cap, sizeof(cap), pos); 414 if (ret < 0) { 415 SPDK_ERRLOG("failed to read pci cap at pos: %"PRIx8"\n", pos); 416 break; 417 } 418 419 if (cap.cap_vndr == PCI_CAP_ID_MSIX) { 420 hw->use_msix = 1; 421 } 422 423 if (cap.cap_vndr != PCI_CAP_ID_VNDR) { 424 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, 425 "[%2"PRIx8"] skipping non VNDR cap id: %02"PRIx8"\n", 426 pos, cap.cap_vndr); 427 goto next; 428 } 429 430 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, 431 "[%2"PRIx8"] cfg type: %"PRIu8", bar: %"PRIu8", offset: %04"PRIx32", len: %"PRIu32"\n", 432 pos, cap.cfg_type, cap.bar, cap.offset, cap.length); 433 434 switch (cap.cfg_type) { 435 case VIRTIO_PCI_CAP_COMMON_CFG: 436 hw->common_cfg = get_cfg_addr(hw, &cap); 437 break; 438 case VIRTIO_PCI_CAP_NOTIFY_CFG: 439 spdk_pci_device_cfg_read(hw->pci_dev, &hw->notify_off_multiplier, 440 4, pos + sizeof(cap)); 441 hw->notify_base = get_cfg_addr(hw, &cap); 442 break; 443 case VIRTIO_PCI_CAP_DEVICE_CFG: 444 hw->dev_cfg = get_cfg_addr(hw, &cap); 445 break; 446 case VIRTIO_PCI_CAP_ISR_CFG: 447 hw->isr = get_cfg_addr(hw, &cap); 448 break; 449 } 450 451 next: 452 pos = cap.cap_next; 453 } 454 455 if (hw->common_cfg == NULL || hw->notify_base == NULL || 456 hw->dev_cfg == NULL || hw->isr == NULL) { 457 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "no modern virtio pci device found.\n"); 458 if (ret < 0) { 459 return ret; 460 } else { 461 return -EINVAL; 462 } 463 } 464 465 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "found modern virtio pci device.\n"); 466 467 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "common cfg mapped at: %p\n", hw->common_cfg); 468 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "device cfg mapped at: %p\n", hw->dev_cfg); 469 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "isr cfg mapped at: %p\n", hw->isr); 470 SPDK_DEBUGLOG(SPDK_LOG_VIRTIO_PCI, "notify base: %p, notify off multiplier: %u\n", 471 hw->notify_base, hw->notify_off_multiplier); 472 473 return 0; 474 } 475 476 static int 477 virtio_pci_dev_probe(struct spdk_pci_device *pci_dev, struct virtio_pci_probe_ctx *ctx) 478 { 479 struct virtio_hw *hw; 480 uint8_t *bar_vaddr; 481 uint64_t bar_paddr, bar_len; 482 int rc; 483 unsigned i; 484 char bdf[32]; 485 struct spdk_pci_addr addr; 486 487 addr = spdk_pci_device_get_addr(pci_dev); 488 rc = spdk_pci_addr_fmt(bdf, sizeof(bdf), &addr); 489 if (rc != 0) { 490 SPDK_ERRLOG("Ignoring a device with non-parseable PCI address\n"); 491 return -1; 492 } 493 494 hw = calloc(1, sizeof(*hw)); 495 if (hw == NULL) { 496 SPDK_ERRLOG("%s: calloc failed\n", bdf); 497 return -1; 498 } 499 500 hw->pci_dev = pci_dev; 501 502 for (i = 0; i < 6; ++i) { 503 rc = spdk_pci_device_map_bar(pci_dev, i, (void *) &bar_vaddr, &bar_paddr, 504 &bar_len); 505 if (rc != 0) { 506 SPDK_ERRLOG("%s: failed to memmap PCI BAR %u\n", bdf, i); 507 free_virtio_hw(hw); 508 return -1; 509 } 510 511 hw->pci_bar[i].vaddr = bar_vaddr; 512 hw->pci_bar[i].len = bar_len; 513 } 514 515 /* Virtio PCI caps exist only on modern PCI devices. 516 * Legacy devices are not supported. 517 */ 518 if (virtio_read_caps(hw) != 0) { 519 SPDK_NOTICELOG("Ignoring legacy PCI device at %s\n", bdf); 520 free_virtio_hw(hw); 521 return -1; 522 } 523 524 rc = ctx->enum_cb((struct virtio_pci_ctx *)hw, ctx->enum_ctx); 525 if (rc != 0) { 526 free_virtio_hw(hw); 527 } 528 529 return rc; 530 } 531 532 static int 533 virtio_pci_dev_probe_cb(void *probe_ctx, struct spdk_pci_device *pci_dev) 534 { 535 struct virtio_pci_probe_ctx *ctx = probe_ctx; 536 uint16_t pci_device_id = spdk_pci_device_get_device_id(pci_dev); 537 538 if (pci_device_id != ctx->device_id) { 539 return 1; 540 } 541 542 return virtio_pci_dev_probe(pci_dev, ctx); 543 } 544 545 int 546 virtio_pci_dev_enumerate(virtio_pci_create_cb enum_cb, void *enum_ctx, 547 uint16_t pci_device_id) 548 { 549 struct virtio_pci_probe_ctx ctx; 550 551 if (!spdk_process_is_primary()) { 552 SPDK_WARNLOG("virtio_pci secondary process support is not implemented yet.\n"); 553 return 0; 554 } 555 556 ctx.enum_cb = enum_cb; 557 ctx.enum_ctx = enum_ctx; 558 ctx.device_id = pci_device_id; 559 560 return spdk_pci_enumerate(spdk_pci_virtio_get_driver(), 561 virtio_pci_dev_probe_cb, &ctx); 562 } 563 564 int 565 virtio_pci_dev_attach(virtio_pci_create_cb enum_cb, void *enum_ctx, 566 uint16_t pci_device_id, struct spdk_pci_addr *pci_address) 567 { 568 struct virtio_pci_probe_ctx ctx; 569 570 if (!spdk_process_is_primary()) { 571 SPDK_WARNLOG("virtio_pci secondary process support is not implemented yet.\n"); 572 return 0; 573 } 574 575 ctx.enum_cb = enum_cb; 576 ctx.enum_ctx = enum_ctx; 577 ctx.device_id = pci_device_id; 578 579 return spdk_pci_device_attach(spdk_pci_virtio_get_driver(), 580 virtio_pci_dev_probe_cb, &ctx, pci_address); 581 } 582 583 int 584 virtio_pci_dev_init(struct virtio_dev *vdev, const char *name, 585 struct virtio_pci_ctx *pci_ctx) 586 { 587 int rc; 588 589 rc = virtio_dev_construct(vdev, name, &modern_ops, pci_ctx); 590 if (rc != 0) { 591 return rc; 592 } 593 594 vdev->is_hw = 1; 595 vdev->modern = 1; 596 597 return 0; 598 } 599 600 SPDK_LOG_REGISTER_COMPONENT("virtio_pci", SPDK_LOG_VIRTIO_PCI) 601