1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2020 Intel Corporation. 3 * Copyright (c) 2019-2022, Nutanix Inc. All rights reserved. 4 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 /* 8 * NVMe over vfio-user transport 9 */ 10 11 #include <sys/param.h> 12 13 #include <vfio-user/libvfio-user.h> 14 #include <vfio-user/pci_defs.h> 15 16 #include "spdk/barrier.h" 17 #include "spdk/stdinc.h" 18 #include "spdk/assert.h" 19 #include "spdk/thread.h" 20 #include "spdk/nvmf_transport.h" 21 #include "spdk/sock.h" 22 #include "spdk/string.h" 23 #include "spdk/util.h" 24 #include "spdk/log.h" 25 26 #include "transport.h" 27 28 #include "nvmf_internal.h" 29 30 #define SWAP(x, y) \ 31 do \ 32 { \ 33 typeof(x) _tmp = x; \ 34 x = y; \ 35 y = _tmp; \ 36 } while (0) 37 38 #define NVMF_VFIO_USER_DEFAULT_MAX_QUEUE_DEPTH 256 39 #define NVMF_VFIO_USER_DEFAULT_AQ_DEPTH 32 40 #define NVMF_VFIO_USER_DEFAULT_MAX_IO_SIZE ((NVMF_REQ_MAX_BUFFERS - 1) << SHIFT_4KB) 41 #define NVMF_VFIO_USER_DEFAULT_IO_UNIT_SIZE NVMF_VFIO_USER_DEFAULT_MAX_IO_SIZE 42 43 #define NVME_DOORBELLS_OFFSET 0x1000 44 #define NVMF_VFIO_USER_SHADOW_DOORBELLS_BUFFER_COUNT 2 45 #define NVMF_VFIO_USER_SET_EVENTIDX_MAX_ATTEMPTS 3 46 #define NVMF_VFIO_USER_EVENTIDX_POLL UINT32_MAX 47 48 #define NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR 512 49 #define NVMF_VFIO_USER_DEFAULT_MAX_QPAIRS_PER_CTRLR (NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR / 4) 50 51 /* NVMe spec 1.4, section 5.21.1.7 */ 52 SPDK_STATIC_ASSERT(NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR >= 2 && 53 NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR <= SPDK_NVME_MAX_IO_QUEUES, 54 "bad number of queues"); 55 56 /* 57 * NVMe driver reads 4096 bytes, which is the extended PCI configuration space 58 * available on PCI-X 2.0 and PCI Express buses 59 */ 60 #define NVME_REG_CFG_SIZE 0x1000 61 62 /* 63 * Doorbells must be page aligned so that they can memory mapped. 64 * 65 * TODO does the NVMe spec also require this? Document it. 66 */ 67 #define NVMF_VFIO_USER_DOORBELLS_SIZE \ 68 SPDK_ALIGN_CEIL( \ 69 (NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR * 2 * SPDK_NVME_DOORBELL_REGISTER_SIZE), \ 70 0x1000) 71 #define NVME_REG_BAR0_SIZE (NVME_DOORBELLS_OFFSET + NVMF_VFIO_USER_DOORBELLS_SIZE) 72 73 /* 74 * TODO check the PCI spec whether BAR4 and BAR5 really have to be at least one 75 * page and a multiple of page size (maybe QEMU also needs this?). Document all 76 * this. 77 */ 78 79 /* 80 * MSI-X Pending Bit Array Size 81 * 82 * TODO according to the PCI spec we need one bit per vector, document the 83 * relevant section. 84 * 85 * If the first argument to SPDK_ALIGN_CEIL is 0 then the result is 0, so we 86 * would end up with a 0-size BAR5. 87 */ 88 #define NVME_IRQ_MSIX_NUM MAX(CHAR_BIT, NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR) 89 #define NVME_BAR5_SIZE SPDK_ALIGN_CEIL((NVME_IRQ_MSIX_NUM / CHAR_BIT), 0x1000) 90 SPDK_STATIC_ASSERT(NVME_BAR5_SIZE > 0, "Incorrect size"); 91 92 /* MSI-X Table Size */ 93 #define NVME_BAR4_SIZE SPDK_ALIGN_CEIL((NVME_IRQ_MSIX_NUM * 16), 0x1000) 94 SPDK_STATIC_ASSERT(NVME_BAR4_SIZE > 0, "Incorrect size"); 95 96 struct nvmf_vfio_user_req; 97 98 typedef int (*nvmf_vfio_user_req_cb_fn)(struct nvmf_vfio_user_req *req, void *cb_arg); 99 100 /* 1 more for PRP2 list itself */ 101 #define NVMF_VFIO_USER_MAX_IOVECS (NVMF_REQ_MAX_BUFFERS + 1) 102 103 enum nvmf_vfio_user_req_state { 104 VFIO_USER_REQUEST_STATE_FREE = 0, 105 VFIO_USER_REQUEST_STATE_EXECUTING, 106 }; 107 108 /* 109 * Support for live migration in NVMf/vfio-user: live migration is implemented 110 * by stopping the NVMf subsystem when the device is instructed to enter the 111 * stop-and-copy state and then trivially, and most importantly safely, 112 * collecting migration state and providing it to the vfio-user client. We 113 * don't provide any migration state at the pre-copy state as that's too 114 * complicated to do, we might support this in the future. 115 */ 116 117 118 /* NVMe device state representation */ 119 struct nvme_migr_sq_state { 120 uint16_t sqid; 121 uint16_t cqid; 122 uint32_t head; 123 uint32_t size; 124 uint32_t reserved; 125 uint64_t dma_addr; 126 }; 127 SPDK_STATIC_ASSERT(sizeof(struct nvme_migr_sq_state) == 0x18, "Incorrect size"); 128 129 struct nvme_migr_cq_state { 130 uint16_t cqid; 131 uint16_t phase; 132 uint32_t tail; 133 uint32_t size; 134 uint32_t iv; 135 uint32_t ien; 136 uint32_t reserved; 137 uint64_t dma_addr; 138 }; 139 SPDK_STATIC_ASSERT(sizeof(struct nvme_migr_cq_state) == 0x20, "Incorrect size"); 140 141 #define VFIO_USER_NVME_MIGR_MAGIC 0xAFEDBC23 142 143 /* The device state is in VFIO MIGRATION BAR(9) region, keep the device state page aligned. 144 * 145 * NVMe device migration region is defined as below: 146 * ------------------------------------------------------------------------- 147 * | vfio_user_nvme_migr_header | nvmf controller data | queue pairs | BARs | 148 * ------------------------------------------------------------------------- 149 * 150 * Keep vfio_user_nvme_migr_header as a fixed 0x1000 length, all new added fields 151 * can use the reserved space at the end of the data structure. 152 */ 153 struct vfio_user_nvme_migr_header { 154 /* Magic value to validate migration data */ 155 uint32_t magic; 156 /* Version to check the data is same from source to destination */ 157 uint32_t version; 158 159 /* The library uses this field to know how many fields in this 160 * structure are valid, starting at the beginning of this data 161 * structure. New added fields in future use `unused` memory 162 * spaces. 163 */ 164 uint32_t opts_size; 165 uint32_t reserved0; 166 167 /* BARs information */ 168 uint64_t bar_offset[VFU_PCI_DEV_NUM_REGIONS]; 169 uint64_t bar_len[VFU_PCI_DEV_NUM_REGIONS]; 170 171 /* Queue pair start offset, starting at the beginning of this 172 * data structure. 173 */ 174 uint64_t qp_offset; 175 uint64_t qp_len; 176 177 /* Controller data structure */ 178 uint32_t num_io_queues; 179 uint32_t reserved1; 180 181 /* NVMf controller data offset and length if exist, starting at 182 * the beginning of this data structure. 183 */ 184 uint64_t nvmf_data_offset; 185 uint64_t nvmf_data_len; 186 187 /* 188 * Whether or not shadow doorbells are used in the source. 0 is a valid DMA 189 * address. 190 */ 191 uint32_t sdbl; 192 193 /* Shadow doorbell DMA addresses. */ 194 uint64_t shadow_doorbell_buffer; 195 uint64_t eventidx_buffer; 196 197 /* Reserved memory space for new added fields, the 198 * field is always at the end of this data structure. 199 */ 200 uint8_t unused[3856]; 201 }; 202 SPDK_STATIC_ASSERT(sizeof(struct vfio_user_nvme_migr_header) == 0x1000, "Incorrect size"); 203 204 struct vfio_user_nvme_migr_qp { 205 struct nvme_migr_sq_state sq; 206 struct nvme_migr_cq_state cq; 207 }; 208 209 /* NVMe state definition used to load/restore from/to NVMe migration BAR region */ 210 struct vfio_user_nvme_migr_state { 211 struct vfio_user_nvme_migr_header ctrlr_header; 212 struct spdk_nvmf_ctrlr_migr_data nvmf_data; 213 struct vfio_user_nvme_migr_qp qps[NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR]; 214 uint8_t doorbells[NVMF_VFIO_USER_DOORBELLS_SIZE]; 215 uint8_t cfg[NVME_REG_CFG_SIZE]; 216 }; 217 218 struct nvmf_vfio_user_req { 219 struct spdk_nvmf_request req; 220 struct spdk_nvme_cpl rsp; 221 struct spdk_nvme_cmd cmd; 222 223 enum nvmf_vfio_user_req_state state; 224 nvmf_vfio_user_req_cb_fn cb_fn; 225 void *cb_arg; 226 227 /* old CC before prop_set_cc fabric command */ 228 union spdk_nvme_cc_register cc; 229 230 TAILQ_ENTRY(nvmf_vfio_user_req) link; 231 232 struct iovec iov[NVMF_VFIO_USER_MAX_IOVECS]; 233 uint8_t iovcnt; 234 235 /* NVMF_VFIO_USER_MAX_IOVECS worth of dma_sg_t. */ 236 uint8_t sg[]; 237 }; 238 239 /* 240 * Mapping of an NVMe queue. 241 * 242 * This holds the information tracking a local process mapping of an NVMe queue 243 * shared by the client. 244 */ 245 struct nvme_q_mapping { 246 /* iov of local process mapping. */ 247 struct iovec iov; 248 /* Stored sg, needed for unmap. */ 249 dma_sg_t *sg; 250 /* Client PRP of queue. */ 251 uint64_t prp1; 252 }; 253 254 enum nvmf_vfio_user_sq_state { 255 VFIO_USER_SQ_UNUSED = 0, 256 VFIO_USER_SQ_CREATED, 257 VFIO_USER_SQ_DELETED, 258 VFIO_USER_SQ_ACTIVE, 259 VFIO_USER_SQ_INACTIVE 260 }; 261 262 enum nvmf_vfio_user_cq_state { 263 VFIO_USER_CQ_UNUSED = 0, 264 VFIO_USER_CQ_CREATED, 265 VFIO_USER_CQ_DELETED, 266 }; 267 268 enum nvmf_vfio_user_ctrlr_state { 269 VFIO_USER_CTRLR_CREATING = 0, 270 VFIO_USER_CTRLR_RUNNING, 271 /* Quiesce requested by libvfio-user */ 272 VFIO_USER_CTRLR_PAUSING, 273 /* NVMf subsystem is paused, it's safe to do PCI reset, memory register, 274 * memory unergister, and vfio migration state transition in this state. 275 */ 276 VFIO_USER_CTRLR_PAUSED, 277 /* 278 * Implies that the NVMf subsystem is paused. Device will be unquiesced (PCI 279 * reset, memory register and unregister, controller in destination VM has 280 * been restored). NVMf subsystem resume has been requested. 281 */ 282 VFIO_USER_CTRLR_RESUMING, 283 /* 284 * Implies that the NVMf subsystem is paused. Both controller in source VM and 285 * destinatiom VM is in this state when doing live migration. 286 */ 287 VFIO_USER_CTRLR_MIGRATING 288 }; 289 290 struct nvmf_vfio_user_sq { 291 struct spdk_nvmf_qpair qpair; 292 struct spdk_nvmf_transport_poll_group *group; 293 struct nvmf_vfio_user_ctrlr *ctrlr; 294 295 uint32_t qid; 296 /* Number of entries in queue. */ 297 uint32_t size; 298 struct nvme_q_mapping mapping; 299 enum nvmf_vfio_user_sq_state sq_state; 300 301 uint32_t head; 302 volatile uint32_t *dbl_tailp; 303 304 /* Whether a shadow doorbell eventidx needs setting. */ 305 bool need_rearm; 306 307 /* multiple SQs can be mapped to the same CQ */ 308 uint16_t cqid; 309 310 /* handle_queue_connect_rsp() can be used both for CREATE IO SQ response 311 * and SQ re-connect response in the destination VM, for the prior case, 312 * we will post a NVMe completion to VM, we will not set this flag when 313 * re-connecting SQs in the destination VM. 314 */ 315 bool post_create_io_sq_completion; 316 /* Copy of Create IO SQ command, this field is used together with 317 * `post_create_io_sq_completion` flag. 318 */ 319 struct spdk_nvme_cmd create_io_sq_cmd; 320 321 /* Currently unallocated reqs. */ 322 TAILQ_HEAD(, nvmf_vfio_user_req) free_reqs; 323 /* Poll group entry */ 324 TAILQ_ENTRY(nvmf_vfio_user_sq) link; 325 /* Connected SQ entry */ 326 TAILQ_ENTRY(nvmf_vfio_user_sq) tailq; 327 }; 328 329 struct nvmf_vfio_user_cq { 330 struct spdk_nvmf_transport_poll_group *group; 331 int cq_ref; 332 333 uint32_t qid; 334 /* Number of entries in queue. */ 335 uint32_t size; 336 struct nvme_q_mapping mapping; 337 enum nvmf_vfio_user_cq_state cq_state; 338 339 uint32_t tail; 340 volatile uint32_t *dbl_headp; 341 342 bool phase; 343 344 uint16_t iv; 345 bool ien; 346 347 uint32_t last_head; 348 uint32_t last_trigger_irq_tail; 349 }; 350 351 struct nvmf_vfio_user_poll_group { 352 struct spdk_nvmf_transport_poll_group group; 353 TAILQ_ENTRY(nvmf_vfio_user_poll_group) link; 354 TAILQ_HEAD(, nvmf_vfio_user_sq) sqs; 355 struct spdk_interrupt *intr; 356 int intr_fd; 357 struct { 358 359 /* 360 * ctrlr_intr and ctrlr_kicks will be zero for all other poll 361 * groups. However, they can be zero even for the poll group 362 * the controller belongs are if no vfio-user message has been 363 * received or the controller hasn't been kicked yet. 364 */ 365 366 /* 367 * Number of times vfio_user_ctrlr_intr() has run: 368 * vfio-user file descriptor has been ready or explicitly 369 * kicked (see below). 370 */ 371 uint64_t ctrlr_intr; 372 373 /* 374 * Kicks to the controller by ctrlr_kick(). 375 * ctrlr_intr - ctrlr_kicks is the number of times the 376 * vfio-user poll file descriptor has been ready. 377 */ 378 uint64_t ctrlr_kicks; 379 380 /* 381 * How many times we won the race arming an SQ. 382 */ 383 uint64_t won; 384 385 /* 386 * How many times we lost the race arming an SQ 387 */ 388 uint64_t lost; 389 390 /* 391 * How many requests we processed in total each time we lost 392 * the rearm race. 393 */ 394 uint64_t lost_count; 395 396 /* 397 * Number of attempts we attempted to rearm all the SQs in the 398 * poll group. 399 */ 400 uint64_t rearms; 401 402 uint64_t pg_process_count; 403 uint64_t intr; 404 uint64_t polls; 405 uint64_t polls_spurious; 406 uint64_t poll_reqs; 407 uint64_t poll_reqs_squared; 408 uint64_t cqh_admin_writes; 409 uint64_t cqh_io_writes; 410 } stats; 411 }; 412 413 struct nvmf_vfio_user_shadow_doorbells { 414 volatile uint32_t *shadow_doorbells; 415 volatile uint32_t *eventidxs; 416 dma_sg_t *sgs; 417 struct iovec *iovs; 418 }; 419 420 struct nvmf_vfio_user_ctrlr { 421 struct nvmf_vfio_user_endpoint *endpoint; 422 struct nvmf_vfio_user_transport *transport; 423 424 /* Connected SQs list */ 425 TAILQ_HEAD(, nvmf_vfio_user_sq) connected_sqs; 426 enum nvmf_vfio_user_ctrlr_state state; 427 428 /* 429 * Tells whether live migration data have been prepared. This is used 430 * by the get_pending_bytes callback to tell whether or not the 431 * previous iteration finished. 432 */ 433 bool migr_data_prepared; 434 435 /* Controller is in source VM when doing live migration */ 436 bool in_source_vm; 437 438 struct spdk_thread *thread; 439 struct spdk_poller *vfu_ctx_poller; 440 struct spdk_interrupt *intr; 441 int intr_fd; 442 443 bool queued_quiesce; 444 445 bool reset_shn; 446 bool disconnect; 447 448 uint16_t cntlid; 449 struct spdk_nvmf_ctrlr *ctrlr; 450 451 struct nvmf_vfio_user_sq *sqs[NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR]; 452 struct nvmf_vfio_user_cq *cqs[NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR]; 453 454 TAILQ_ENTRY(nvmf_vfio_user_ctrlr) link; 455 456 volatile uint32_t *bar0_doorbells; 457 struct nvmf_vfio_user_shadow_doorbells *sdbl; 458 /* 459 * Shadow doorbells PRPs to provide during the stop-and-copy state. 460 */ 461 uint64_t shadow_doorbell_buffer; 462 uint64_t eventidx_buffer; 463 464 bool adaptive_irqs_enabled; 465 }; 466 467 /* Endpoint in vfio-user is associated with a socket file, which 468 * is the representative of a PCI endpoint. 469 */ 470 struct nvmf_vfio_user_endpoint { 471 struct nvmf_vfio_user_transport *transport; 472 vfu_ctx_t *vfu_ctx; 473 struct spdk_poller *accept_poller; 474 struct spdk_thread *accept_thread; 475 bool interrupt_mode; 476 struct msixcap *msix; 477 vfu_pci_config_space_t *pci_config_space; 478 int devmem_fd; 479 int accept_intr_fd; 480 struct spdk_interrupt *accept_intr; 481 482 volatile uint32_t *bar0_doorbells; 483 484 int migr_fd; 485 void *migr_data; 486 487 struct spdk_nvme_transport_id trid; 488 struct spdk_nvmf_subsystem *subsystem; 489 490 /* Controller is associated with an active socket connection, 491 * the lifecycle of the controller is same as the VM. 492 * Currently we only support one active connection, as the NVMe 493 * specification defines, we may support multiple controllers in 494 * future, so that it can support e.g: RESERVATION. 495 */ 496 struct nvmf_vfio_user_ctrlr *ctrlr; 497 pthread_mutex_t lock; 498 499 bool need_async_destroy; 500 /* The subsystem is in PAUSED state and need to be resumed, TRUE 501 * only when migration is done successfully and the controller is 502 * in source VM. 503 */ 504 bool need_resume; 505 /* Start the accept poller again after destroying the controller */ 506 bool need_relisten; 507 508 TAILQ_ENTRY(nvmf_vfio_user_endpoint) link; 509 }; 510 511 struct nvmf_vfio_user_transport_opts { 512 bool disable_mappable_bar0; 513 bool disable_adaptive_irq; 514 bool disable_shadow_doorbells; 515 bool disable_compare; 516 bool enable_intr_mode_sq_spreading; 517 }; 518 519 struct nvmf_vfio_user_transport { 520 struct spdk_nvmf_transport transport; 521 struct nvmf_vfio_user_transport_opts transport_opts; 522 bool intr_mode_supported; 523 pthread_mutex_t lock; 524 TAILQ_HEAD(, nvmf_vfio_user_endpoint) endpoints; 525 526 pthread_mutex_t pg_lock; 527 TAILQ_HEAD(, nvmf_vfio_user_poll_group) poll_groups; 528 struct nvmf_vfio_user_poll_group *next_pg; 529 }; 530 531 /* 532 * function prototypes 533 */ 534 static int nvmf_vfio_user_req_free(struct spdk_nvmf_request *req); 535 536 static struct nvmf_vfio_user_req *get_nvmf_vfio_user_req(struct nvmf_vfio_user_sq *sq); 537 538 /* 539 * Local process virtual address of a queue. 540 */ 541 static inline void * 542 q_addr(struct nvme_q_mapping *mapping) 543 { 544 return mapping->iov.iov_base; 545 } 546 547 static inline int 548 queue_index(uint16_t qid, bool is_cq) 549 { 550 return (qid * 2) + is_cq; 551 } 552 553 static inline volatile uint32_t * 554 sq_headp(struct nvmf_vfio_user_sq *sq) 555 { 556 assert(sq != NULL); 557 return &sq->head; 558 } 559 560 static inline volatile uint32_t * 561 sq_dbl_tailp(struct nvmf_vfio_user_sq *sq) 562 { 563 assert(sq != NULL); 564 return sq->dbl_tailp; 565 } 566 567 static inline volatile uint32_t * 568 cq_dbl_headp(struct nvmf_vfio_user_cq *cq) 569 { 570 assert(cq != NULL); 571 return cq->dbl_headp; 572 } 573 574 static inline volatile uint32_t * 575 cq_tailp(struct nvmf_vfio_user_cq *cq) 576 { 577 assert(cq != NULL); 578 return &cq->tail; 579 } 580 581 static inline void 582 sq_head_advance(struct nvmf_vfio_user_sq *sq) 583 { 584 assert(sq != NULL); 585 586 assert(*sq_headp(sq) < sq->size); 587 (*sq_headp(sq))++; 588 589 if (spdk_unlikely(*sq_headp(sq) == sq->size)) { 590 *sq_headp(sq) = 0; 591 } 592 } 593 594 static inline void 595 cq_tail_advance(struct nvmf_vfio_user_cq *cq) 596 { 597 assert(cq != NULL); 598 599 assert(*cq_tailp(cq) < cq->size); 600 (*cq_tailp(cq))++; 601 602 if (spdk_unlikely(*cq_tailp(cq) == cq->size)) { 603 *cq_tailp(cq) = 0; 604 cq->phase = !cq->phase; 605 } 606 } 607 608 /* 609 * As per NVMe Base spec 3.3.1.2.1, we are supposed to implement CQ flow 610 * control: if there is no space in the CQ, we should wait until there is. 611 * 612 * In practice, we just fail the controller instead: as it happens, all host 613 * implementations we care about right-size the CQ: this is required anyway for 614 * NVMEoF support (see 3.3.2.8). 615 * 616 * Since reading the head doorbell is relatively expensive, we use the cached 617 * value, so we only have to read it for real if it appears that we are full. 618 */ 619 static inline bool 620 cq_is_full(struct nvmf_vfio_user_cq *cq) 621 { 622 uint32_t qindex; 623 624 assert(cq != NULL); 625 626 qindex = *cq_tailp(cq) + 1; 627 if (spdk_unlikely(qindex == cq->size)) { 628 qindex = 0; 629 } 630 631 if (qindex != cq->last_head) { 632 return false; 633 } 634 635 cq->last_head = *cq_dbl_headp(cq); 636 637 return qindex == cq->last_head; 638 } 639 640 static bool 641 io_q_exists(struct nvmf_vfio_user_ctrlr *vu_ctrlr, const uint16_t qid, const bool is_cq) 642 { 643 assert(vu_ctrlr != NULL); 644 645 if (qid == 0 || qid >= NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR) { 646 return false; 647 } 648 649 if (is_cq) { 650 if (vu_ctrlr->cqs[qid] == NULL) { 651 return false; 652 } 653 654 return (vu_ctrlr->cqs[qid]->cq_state != VFIO_USER_CQ_DELETED && 655 vu_ctrlr->cqs[qid]->cq_state != VFIO_USER_CQ_UNUSED); 656 } 657 658 if (vu_ctrlr->sqs[qid] == NULL) { 659 return false; 660 } 661 662 return (vu_ctrlr->sqs[qid]->sq_state != VFIO_USER_SQ_DELETED && 663 vu_ctrlr->sqs[qid]->sq_state != VFIO_USER_SQ_UNUSED); 664 } 665 666 static char * 667 endpoint_id(struct nvmf_vfio_user_endpoint *endpoint) 668 { 669 return endpoint->trid.traddr; 670 } 671 672 static char * 673 ctrlr_id(struct nvmf_vfio_user_ctrlr *ctrlr) 674 { 675 if (!ctrlr || !ctrlr->endpoint) { 676 return "Null Ctrlr"; 677 } 678 679 return endpoint_id(ctrlr->endpoint); 680 } 681 682 /* Return the poll group for the admin queue of the controller. */ 683 static inline struct nvmf_vfio_user_poll_group * 684 ctrlr_to_poll_group(struct nvmf_vfio_user_ctrlr *vu_ctrlr) 685 { 686 return SPDK_CONTAINEROF(vu_ctrlr->sqs[0]->group, 687 struct nvmf_vfio_user_poll_group, 688 group); 689 } 690 691 static inline struct spdk_thread * 692 poll_group_to_thread(struct nvmf_vfio_user_poll_group *vu_pg) 693 { 694 return vu_pg->group.group->thread; 695 } 696 697 static dma_sg_t * 698 index_to_sg_t(void *arr, size_t i) 699 { 700 return (dma_sg_t *)((uintptr_t)arr + i * dma_sg_size()); 701 } 702 703 static inline size_t 704 vfio_user_migr_data_len(void) 705 { 706 return SPDK_ALIGN_CEIL(sizeof(struct vfio_user_nvme_migr_state), PAGE_SIZE); 707 } 708 709 static inline bool 710 in_interrupt_mode(struct nvmf_vfio_user_transport *vu_transport) 711 { 712 return spdk_interrupt_mode_is_enabled() && 713 vu_transport->intr_mode_supported; 714 } 715 716 static int vfio_user_ctrlr_intr(void *ctx); 717 718 static void 719 vfio_user_msg_ctrlr_intr(void *ctx) 720 { 721 struct nvmf_vfio_user_ctrlr *vu_ctrlr = ctx; 722 struct nvmf_vfio_user_poll_group *vu_ctrlr_group = ctrlr_to_poll_group(vu_ctrlr); 723 724 vu_ctrlr_group->stats.ctrlr_kicks++; 725 726 vfio_user_ctrlr_intr(ctx); 727 } 728 729 /* 730 * Kick (force a wakeup) of all poll groups for this controller. 731 * vfio_user_ctrlr_intr() itself arranges for kicking other poll groups if 732 * needed. 733 */ 734 static void 735 ctrlr_kick(struct nvmf_vfio_user_ctrlr *vu_ctrlr) 736 { 737 struct nvmf_vfio_user_poll_group *vu_ctrlr_group; 738 739 SPDK_DEBUGLOG(vfio_user_db, "%s: kicked\n", ctrlr_id(vu_ctrlr)); 740 741 vu_ctrlr_group = ctrlr_to_poll_group(vu_ctrlr); 742 743 spdk_thread_send_msg(poll_group_to_thread(vu_ctrlr_group), 744 vfio_user_msg_ctrlr_intr, vu_ctrlr); 745 } 746 747 /* 748 * Make the given DMA address and length available (locally mapped) via iov. 749 */ 750 static void * 751 map_one(vfu_ctx_t *ctx, uint64_t addr, uint64_t len, dma_sg_t *sg, 752 struct iovec *iov, int prot) 753 { 754 int ret; 755 756 assert(ctx != NULL); 757 assert(sg != NULL); 758 assert(iov != NULL); 759 760 ret = vfu_addr_to_sgl(ctx, (void *)(uintptr_t)addr, len, sg, 1, prot); 761 if (ret < 0) { 762 return NULL; 763 } 764 765 ret = vfu_sgl_get(ctx, sg, iov, 1, 0); 766 if (ret != 0) { 767 return NULL; 768 } 769 770 assert(iov->iov_base != NULL); 771 return iov->iov_base; 772 } 773 774 static int 775 nvme_cmd_map_prps(void *prv, struct spdk_nvme_cmd *cmd, struct iovec *iovs, 776 uint32_t max_iovcnt, uint32_t len, size_t mps, 777 void *(*gpa_to_vva)(void *prv, uint64_t addr, uint64_t len, int prot)) 778 { 779 uint64_t prp1, prp2; 780 void *vva; 781 uint32_t i; 782 uint32_t residue_len, nents; 783 uint64_t *prp_list; 784 uint32_t iovcnt; 785 786 assert(max_iovcnt > 0); 787 788 prp1 = cmd->dptr.prp.prp1; 789 prp2 = cmd->dptr.prp.prp2; 790 791 /* PRP1 may started with unaligned page address */ 792 residue_len = mps - (prp1 % mps); 793 residue_len = spdk_min(len, residue_len); 794 795 vva = gpa_to_vva(prv, prp1, residue_len, PROT_READ | PROT_WRITE); 796 if (spdk_unlikely(vva == NULL)) { 797 SPDK_ERRLOG("GPA to VVA failed\n"); 798 return -EINVAL; 799 } 800 len -= residue_len; 801 if (len && max_iovcnt < 2) { 802 SPDK_ERRLOG("Too many page entries, at least two iovs are required\n"); 803 return -ERANGE; 804 } 805 iovs[0].iov_base = vva; 806 iovs[0].iov_len = residue_len; 807 808 if (len) { 809 if (spdk_unlikely(prp2 == 0)) { 810 SPDK_ERRLOG("no PRP2, %d remaining\n", len); 811 return -EINVAL; 812 } 813 814 if (len <= mps) { 815 /* 2 PRP used */ 816 iovcnt = 2; 817 vva = gpa_to_vva(prv, prp2, len, PROT_READ | PROT_WRITE); 818 if (spdk_unlikely(vva == NULL)) { 819 SPDK_ERRLOG("no VVA for %#" PRIx64 ", len%#x\n", 820 prp2, len); 821 return -EINVAL; 822 } 823 iovs[1].iov_base = vva; 824 iovs[1].iov_len = len; 825 } else { 826 /* PRP list used */ 827 nents = (len + mps - 1) / mps; 828 if (spdk_unlikely(nents + 1 > max_iovcnt)) { 829 SPDK_ERRLOG("Too many page entries\n"); 830 return -ERANGE; 831 } 832 833 vva = gpa_to_vva(prv, prp2, nents * sizeof(*prp_list), PROT_READ); 834 if (spdk_unlikely(vva == NULL)) { 835 SPDK_ERRLOG("no VVA for %#" PRIx64 ", nents=%#x\n", 836 prp2, nents); 837 return -EINVAL; 838 } 839 prp_list = vva; 840 i = 0; 841 while (len != 0) { 842 residue_len = spdk_min(len, mps); 843 vva = gpa_to_vva(prv, prp_list[i], residue_len, PROT_READ | PROT_WRITE); 844 if (spdk_unlikely(vva == NULL)) { 845 SPDK_ERRLOG("no VVA for %#" PRIx64 ", residue_len=%#x\n", 846 prp_list[i], residue_len); 847 return -EINVAL; 848 } 849 iovs[i + 1].iov_base = vva; 850 iovs[i + 1].iov_len = residue_len; 851 len -= residue_len; 852 i++; 853 } 854 iovcnt = i + 1; 855 } 856 } else { 857 /* 1 PRP used */ 858 iovcnt = 1; 859 } 860 861 assert(iovcnt <= max_iovcnt); 862 return iovcnt; 863 } 864 865 static int 866 nvme_cmd_map_sgls_data(void *prv, struct spdk_nvme_sgl_descriptor *sgls, uint32_t num_sgls, 867 struct iovec *iovs, uint32_t max_iovcnt, 868 void *(*gpa_to_vva)(void *prv, uint64_t addr, uint64_t len, int prot)) 869 { 870 uint32_t i; 871 void *vva; 872 873 if (spdk_unlikely(max_iovcnt < num_sgls)) { 874 return -ERANGE; 875 } 876 877 for (i = 0; i < num_sgls; i++) { 878 if (spdk_unlikely(sgls[i].unkeyed.type != SPDK_NVME_SGL_TYPE_DATA_BLOCK)) { 879 SPDK_ERRLOG("Invalid SGL type %u\n", sgls[i].unkeyed.type); 880 return -EINVAL; 881 } 882 vva = gpa_to_vva(prv, sgls[i].address, sgls[i].unkeyed.length, PROT_READ | PROT_WRITE); 883 if (spdk_unlikely(vva == NULL)) { 884 SPDK_ERRLOG("GPA to VVA failed\n"); 885 return -EINVAL; 886 } 887 iovs[i].iov_base = vva; 888 iovs[i].iov_len = sgls[i].unkeyed.length; 889 } 890 891 return num_sgls; 892 } 893 894 static int 895 nvme_cmd_map_sgls(void *prv, struct spdk_nvme_cmd *cmd, struct iovec *iovs, uint32_t max_iovcnt, 896 uint32_t len, size_t mps, 897 void *(*gpa_to_vva)(void *prv, uint64_t addr, uint64_t len, int prot)) 898 { 899 struct spdk_nvme_sgl_descriptor *sgl, *last_sgl; 900 uint32_t num_sgls, seg_len; 901 void *vva; 902 int ret; 903 uint32_t total_iovcnt = 0; 904 905 /* SGL cases */ 906 sgl = &cmd->dptr.sgl1; 907 908 /* only one SGL segment */ 909 if (sgl->unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK) { 910 assert(max_iovcnt > 0); 911 vva = gpa_to_vva(prv, sgl->address, sgl->unkeyed.length, PROT_READ | PROT_WRITE); 912 if (spdk_unlikely(vva == NULL)) { 913 SPDK_ERRLOG("GPA to VVA failed\n"); 914 return -EINVAL; 915 } 916 iovs[0].iov_base = vva; 917 iovs[0].iov_len = sgl->unkeyed.length; 918 assert(sgl->unkeyed.length == len); 919 920 return 1; 921 } 922 923 for (;;) { 924 if (spdk_unlikely((sgl->unkeyed.type != SPDK_NVME_SGL_TYPE_SEGMENT) && 925 (sgl->unkeyed.type != SPDK_NVME_SGL_TYPE_LAST_SEGMENT))) { 926 SPDK_ERRLOG("Invalid SGL type %u\n", sgl->unkeyed.type); 927 return -EINVAL; 928 } 929 930 seg_len = sgl->unkeyed.length; 931 if (spdk_unlikely(seg_len % sizeof(struct spdk_nvme_sgl_descriptor))) { 932 SPDK_ERRLOG("Invalid SGL segment len %u\n", seg_len); 933 return -EINVAL; 934 } 935 936 num_sgls = seg_len / sizeof(struct spdk_nvme_sgl_descriptor); 937 vva = gpa_to_vva(prv, sgl->address, sgl->unkeyed.length, PROT_READ); 938 if (spdk_unlikely(vva == NULL)) { 939 SPDK_ERRLOG("GPA to VVA failed\n"); 940 return -EINVAL; 941 } 942 943 /* sgl point to the first segment */ 944 sgl = (struct spdk_nvme_sgl_descriptor *)vva; 945 last_sgl = &sgl[num_sgls - 1]; 946 947 /* we are done */ 948 if (last_sgl->unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK) { 949 /* map whole sgl list */ 950 ret = nvme_cmd_map_sgls_data(prv, sgl, num_sgls, &iovs[total_iovcnt], 951 max_iovcnt - total_iovcnt, gpa_to_vva); 952 if (spdk_unlikely(ret < 0)) { 953 return ret; 954 } 955 total_iovcnt += ret; 956 957 return total_iovcnt; 958 } 959 960 if (num_sgls > 1) { 961 /* map whole sgl exclude last_sgl */ 962 ret = nvme_cmd_map_sgls_data(prv, sgl, num_sgls - 1, &iovs[total_iovcnt], 963 max_iovcnt - total_iovcnt, gpa_to_vva); 964 if (spdk_unlikely(ret < 0)) { 965 return ret; 966 } 967 total_iovcnt += ret; 968 } 969 970 /* move to next level's segments */ 971 sgl = last_sgl; 972 } 973 974 return 0; 975 } 976 977 static int 978 nvme_map_cmd(void *prv, struct spdk_nvme_cmd *cmd, struct iovec *iovs, uint32_t max_iovcnt, 979 uint32_t len, size_t mps, 980 void *(*gpa_to_vva)(void *prv, uint64_t addr, uint64_t len, int prot)) 981 { 982 if (cmd->psdt == SPDK_NVME_PSDT_PRP) { 983 return nvme_cmd_map_prps(prv, cmd, iovs, max_iovcnt, len, mps, gpa_to_vva); 984 } 985 986 return nvme_cmd_map_sgls(prv, cmd, iovs, max_iovcnt, len, mps, gpa_to_vva); 987 } 988 989 /* 990 * For each queue, update the location of its doorbell to the correct location: 991 * either our own BAR0, or the guest's configured shadow doorbell area. 992 * 993 * The Admin queue (qid: 0) does not ever use shadow doorbells. 994 */ 995 static void 996 vfio_user_ctrlr_switch_doorbells(struct nvmf_vfio_user_ctrlr *ctrlr, bool shadow) 997 { 998 volatile uint32_t *doorbells = shadow ? ctrlr->sdbl->shadow_doorbells : 999 ctrlr->bar0_doorbells; 1000 1001 assert(doorbells != NULL); 1002 1003 for (size_t i = 1; i < NVMF_VFIO_USER_DEFAULT_MAX_QPAIRS_PER_CTRLR; i++) { 1004 struct nvmf_vfio_user_sq *sq = ctrlr->sqs[i]; 1005 struct nvmf_vfio_user_cq *cq = ctrlr->cqs[i]; 1006 1007 if (sq != NULL) { 1008 sq->dbl_tailp = doorbells + queue_index(sq->qid, false); 1009 1010 ctrlr->sqs[i]->need_rearm = shadow; 1011 } 1012 1013 if (cq != NULL) { 1014 cq->dbl_headp = doorbells + queue_index(cq->qid, true); 1015 } 1016 } 1017 } 1018 1019 static void 1020 unmap_sdbl(vfu_ctx_t *vfu_ctx, struct nvmf_vfio_user_shadow_doorbells *sdbl) 1021 { 1022 assert(vfu_ctx != NULL); 1023 assert(sdbl != NULL); 1024 1025 /* 1026 * An allocation error would result in only one of the two being 1027 * non-NULL. If that is the case, no memory should have been mapped. 1028 */ 1029 if (sdbl->iovs == NULL || sdbl->sgs == NULL) { 1030 return; 1031 } 1032 1033 for (size_t i = 0; i < NVMF_VFIO_USER_SHADOW_DOORBELLS_BUFFER_COUNT; ++i) { 1034 struct iovec *iov; 1035 dma_sg_t *sg; 1036 1037 if (!sdbl->iovs[i].iov_len) { 1038 continue; 1039 } 1040 1041 sg = index_to_sg_t(sdbl->sgs, i); 1042 iov = sdbl->iovs + i; 1043 1044 vfu_sgl_put(vfu_ctx, sg, iov, 1); 1045 } 1046 } 1047 1048 static void 1049 free_sdbl(vfu_ctx_t *vfu_ctx, struct nvmf_vfio_user_shadow_doorbells *sdbl) 1050 { 1051 if (sdbl == NULL) { 1052 return; 1053 } 1054 1055 unmap_sdbl(vfu_ctx, sdbl); 1056 1057 /* 1058 * sdbl->shadow_doorbells and sdbl->eventidxs were mapped, 1059 * not allocated, so don't free() them. 1060 */ 1061 free(sdbl->sgs); 1062 free(sdbl->iovs); 1063 free(sdbl); 1064 } 1065 1066 static struct nvmf_vfio_user_shadow_doorbells * 1067 map_sdbl(vfu_ctx_t *vfu_ctx, uint64_t prp1, uint64_t prp2, size_t len) 1068 { 1069 struct nvmf_vfio_user_shadow_doorbells *sdbl = NULL; 1070 dma_sg_t *sg2 = NULL; 1071 void *p; 1072 1073 assert(vfu_ctx != NULL); 1074 1075 sdbl = calloc(1, sizeof(*sdbl)); 1076 if (sdbl == NULL) { 1077 goto err; 1078 } 1079 1080 sdbl->sgs = calloc(NVMF_VFIO_USER_SHADOW_DOORBELLS_BUFFER_COUNT, dma_sg_size()); 1081 sdbl->iovs = calloc(NVMF_VFIO_USER_SHADOW_DOORBELLS_BUFFER_COUNT, sizeof(*sdbl->iovs)); 1082 if (sdbl->sgs == NULL || sdbl->iovs == NULL) { 1083 goto err; 1084 } 1085 1086 /* Map shadow doorbell buffer (PRP1). */ 1087 p = map_one(vfu_ctx, prp1, len, sdbl->sgs, sdbl->iovs, 1088 PROT_READ | PROT_WRITE); 1089 1090 if (p == NULL) { 1091 goto err; 1092 } 1093 1094 /* 1095 * Map eventidx buffer (PRP2). 1096 * Should only be written to by the controller. 1097 */ 1098 1099 sg2 = index_to_sg_t(sdbl->sgs, 1); 1100 1101 p = map_one(vfu_ctx, prp2, len, sg2, sdbl->iovs + 1, 1102 PROT_READ | PROT_WRITE); 1103 1104 if (p == NULL) { 1105 goto err; 1106 } 1107 1108 sdbl->shadow_doorbells = (uint32_t *)sdbl->iovs[0].iov_base; 1109 sdbl->eventidxs = (uint32_t *)sdbl->iovs[1].iov_base; 1110 1111 return sdbl; 1112 1113 err: 1114 free_sdbl(vfu_ctx, sdbl); 1115 return NULL; 1116 } 1117 1118 /* 1119 * Copy doorbells from one buffer to the other, during switches betweeen BAR0 1120 * doorbells and shadow doorbells. 1121 */ 1122 static void 1123 copy_doorbells(struct nvmf_vfio_user_ctrlr *ctrlr, 1124 const volatile uint32_t *from, volatile uint32_t *to) 1125 { 1126 assert(ctrlr != NULL); 1127 assert(from != NULL); 1128 assert(to != NULL); 1129 1130 SPDK_DEBUGLOG(vfio_user_db, 1131 "%s: migrating shadow doorbells from %p to %p\n", 1132 ctrlr_id(ctrlr), from, to); 1133 1134 /* Can't use memcpy because it doesn't respect volatile semantics. */ 1135 for (size_t i = 0; i < NVMF_VFIO_USER_DEFAULT_MAX_QPAIRS_PER_CTRLR; ++i) { 1136 if (ctrlr->sqs[i] != NULL) { 1137 to[queue_index(i, false)] = from[queue_index(i, false)]; 1138 } 1139 1140 if (ctrlr->cqs[i] != NULL) { 1141 to[queue_index(i, true)] = from[queue_index(i, true)]; 1142 } 1143 } 1144 } 1145 1146 static void 1147 fail_ctrlr(struct nvmf_vfio_user_ctrlr *vu_ctrlr) 1148 { 1149 const struct spdk_nvmf_registers *regs; 1150 1151 assert(vu_ctrlr != NULL); 1152 assert(vu_ctrlr->ctrlr != NULL); 1153 1154 regs = spdk_nvmf_ctrlr_get_regs(vu_ctrlr->ctrlr); 1155 if (regs->csts.bits.cfs == 0) { 1156 SPDK_ERRLOG(":%s failing controller\n", ctrlr_id(vu_ctrlr)); 1157 } 1158 1159 nvmf_ctrlr_set_fatal_status(vu_ctrlr->ctrlr); 1160 } 1161 1162 static inline bool 1163 ctrlr_interrupt_enabled(struct nvmf_vfio_user_ctrlr *vu_ctrlr) 1164 { 1165 assert(vu_ctrlr != NULL); 1166 assert(vu_ctrlr->endpoint != NULL); 1167 1168 vfu_pci_config_space_t *pci = vu_ctrlr->endpoint->pci_config_space; 1169 1170 return (!pci->hdr.cmd.id || vu_ctrlr->endpoint->msix->mxc.mxe); 1171 } 1172 1173 static void 1174 nvmf_vfio_user_destroy_endpoint(struct nvmf_vfio_user_endpoint *endpoint) 1175 { 1176 SPDK_DEBUGLOG(nvmf_vfio, "destroy endpoint %s\n", endpoint_id(endpoint)); 1177 1178 spdk_interrupt_unregister(&endpoint->accept_intr); 1179 spdk_poller_unregister(&endpoint->accept_poller); 1180 1181 if (endpoint->bar0_doorbells) { 1182 munmap((void *)endpoint->bar0_doorbells, NVMF_VFIO_USER_DOORBELLS_SIZE); 1183 } 1184 1185 if (endpoint->devmem_fd > 0) { 1186 close(endpoint->devmem_fd); 1187 } 1188 1189 if (endpoint->migr_data) { 1190 munmap(endpoint->migr_data, vfio_user_migr_data_len()); 1191 } 1192 1193 if (endpoint->migr_fd > 0) { 1194 close(endpoint->migr_fd); 1195 } 1196 1197 if (endpoint->vfu_ctx) { 1198 vfu_destroy_ctx(endpoint->vfu_ctx); 1199 } 1200 1201 pthread_mutex_destroy(&endpoint->lock); 1202 free(endpoint); 1203 } 1204 1205 /* called when process exits */ 1206 static int 1207 nvmf_vfio_user_destroy(struct spdk_nvmf_transport *transport, 1208 spdk_nvmf_transport_destroy_done_cb cb_fn, void *cb_arg) 1209 { 1210 struct nvmf_vfio_user_transport *vu_transport; 1211 struct nvmf_vfio_user_endpoint *endpoint, *tmp; 1212 1213 SPDK_DEBUGLOG(nvmf_vfio, "destroy transport\n"); 1214 1215 vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport, 1216 transport); 1217 1218 pthread_mutex_destroy(&vu_transport->lock); 1219 pthread_mutex_destroy(&vu_transport->pg_lock); 1220 1221 TAILQ_FOREACH_SAFE(endpoint, &vu_transport->endpoints, link, tmp) { 1222 TAILQ_REMOVE(&vu_transport->endpoints, endpoint, link); 1223 nvmf_vfio_user_destroy_endpoint(endpoint); 1224 } 1225 1226 free(vu_transport); 1227 1228 if (cb_fn) { 1229 cb_fn(cb_arg); 1230 } 1231 1232 return 0; 1233 } 1234 1235 static const struct spdk_json_object_decoder vfio_user_transport_opts_decoder[] = { 1236 { 1237 "disable_mappable_bar0", 1238 offsetof(struct nvmf_vfio_user_transport, transport_opts.disable_mappable_bar0), 1239 spdk_json_decode_bool, true 1240 }, 1241 { 1242 "disable_adaptive_irq", 1243 offsetof(struct nvmf_vfio_user_transport, transport_opts.disable_adaptive_irq), 1244 spdk_json_decode_bool, true 1245 }, 1246 { 1247 "disable_shadow_doorbells", 1248 offsetof(struct nvmf_vfio_user_transport, transport_opts.disable_shadow_doorbells), 1249 spdk_json_decode_bool, true 1250 }, 1251 { 1252 "disable_compare", 1253 offsetof(struct nvmf_vfio_user_transport, transport_opts.disable_compare), 1254 spdk_json_decode_bool, true 1255 }, 1256 { 1257 "enable_intr_mode_sq_spreading", 1258 offsetof(struct nvmf_vfio_user_transport, transport_opts.enable_intr_mode_sq_spreading), 1259 spdk_json_decode_bool, true 1260 }, 1261 }; 1262 1263 static struct spdk_nvmf_transport * 1264 nvmf_vfio_user_create(struct spdk_nvmf_transport_opts *opts) 1265 { 1266 struct nvmf_vfio_user_transport *vu_transport; 1267 int err; 1268 1269 if (opts->max_qpairs_per_ctrlr > NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR) { 1270 SPDK_ERRLOG("Invalid max_qpairs_per_ctrlr=%d, supported max_qpairs_per_ctrlr=%d\n", 1271 opts->max_qpairs_per_ctrlr, NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR); 1272 return NULL; 1273 } 1274 1275 vu_transport = calloc(1, sizeof(*vu_transport)); 1276 if (vu_transport == NULL) { 1277 SPDK_ERRLOG("Transport alloc fail: %m\n"); 1278 return NULL; 1279 } 1280 1281 err = pthread_mutex_init(&vu_transport->lock, NULL); 1282 if (err != 0) { 1283 SPDK_ERRLOG("Pthread initialisation failed (%d)\n", err); 1284 goto err; 1285 } 1286 TAILQ_INIT(&vu_transport->endpoints); 1287 1288 err = pthread_mutex_init(&vu_transport->pg_lock, NULL); 1289 if (err != 0) { 1290 pthread_mutex_destroy(&vu_transport->lock); 1291 SPDK_ERRLOG("Pthread initialisation failed (%d)\n", err); 1292 goto err; 1293 } 1294 TAILQ_INIT(&vu_transport->poll_groups); 1295 1296 if (opts->transport_specific != NULL && 1297 spdk_json_decode_object_relaxed(opts->transport_specific, vfio_user_transport_opts_decoder, 1298 SPDK_COUNTOF(vfio_user_transport_opts_decoder), 1299 vu_transport)) { 1300 SPDK_ERRLOG("spdk_json_decode_object_relaxed failed\n"); 1301 goto cleanup; 1302 } 1303 1304 /* 1305 * To support interrupt mode, the transport must be configured with 1306 * mappable BAR0 disabled: we need a vfio-user message to wake us up 1307 * when a client writes new doorbell values to BAR0, via the 1308 * libvfio-user socket fd. 1309 */ 1310 vu_transport->intr_mode_supported = 1311 vu_transport->transport_opts.disable_mappable_bar0; 1312 1313 /* 1314 * If BAR0 is mappable, it doesn't make sense to support shadow 1315 * doorbells, so explicitly turn it off. 1316 */ 1317 if (!vu_transport->transport_opts.disable_mappable_bar0) { 1318 vu_transport->transport_opts.disable_shadow_doorbells = true; 1319 } 1320 1321 if (spdk_interrupt_mode_is_enabled()) { 1322 if (!vu_transport->intr_mode_supported) { 1323 SPDK_ERRLOG("interrupt mode not supported\n"); 1324 goto cleanup; 1325 } 1326 1327 /* 1328 * If we are in interrupt mode, we cannot support adaptive IRQs, 1329 * as there is no guarantee the SQ poller will run subsequently 1330 * to send pending IRQs. 1331 */ 1332 vu_transport->transport_opts.disable_adaptive_irq = true; 1333 } 1334 1335 SPDK_DEBUGLOG(nvmf_vfio, "vfio_user transport: disable_mappable_bar0=%d\n", 1336 vu_transport->transport_opts.disable_mappable_bar0); 1337 SPDK_DEBUGLOG(nvmf_vfio, "vfio_user transport: disable_adaptive_irq=%d\n", 1338 vu_transport->transport_opts.disable_adaptive_irq); 1339 SPDK_DEBUGLOG(nvmf_vfio, "vfio_user transport: disable_shadow_doorbells=%d\n", 1340 vu_transport->transport_opts.disable_shadow_doorbells); 1341 1342 return &vu_transport->transport; 1343 1344 cleanup: 1345 pthread_mutex_destroy(&vu_transport->lock); 1346 pthread_mutex_destroy(&vu_transport->pg_lock); 1347 err: 1348 free(vu_transport); 1349 return NULL; 1350 } 1351 1352 static uint32_t 1353 max_queue_size(struct nvmf_vfio_user_ctrlr const *vu_ctrlr) 1354 { 1355 assert(vu_ctrlr != NULL); 1356 assert(vu_ctrlr->ctrlr != NULL); 1357 1358 return vu_ctrlr->ctrlr->vcprop.cap.bits.mqes + 1; 1359 } 1360 1361 static uint32_t 1362 doorbell_stride(const struct nvmf_vfio_user_ctrlr *vu_ctrlr) 1363 { 1364 assert(vu_ctrlr != NULL); 1365 assert(vu_ctrlr->ctrlr != NULL); 1366 1367 return vu_ctrlr->ctrlr->vcprop.cap.bits.dstrd; 1368 } 1369 1370 static uintptr_t 1371 memory_page_size(const struct nvmf_vfio_user_ctrlr *vu_ctrlr) 1372 { 1373 uint32_t memory_page_shift = vu_ctrlr->ctrlr->vcprop.cc.bits.mps + 12; 1374 return 1ul << memory_page_shift; 1375 } 1376 1377 static uintptr_t 1378 memory_page_mask(const struct nvmf_vfio_user_ctrlr *ctrlr) 1379 { 1380 return ~(memory_page_size(ctrlr) - 1); 1381 } 1382 1383 static int 1384 map_q(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvme_q_mapping *mapping, 1385 uint32_t q_size, bool is_cq, bool unmap) 1386 { 1387 uint64_t len; 1388 void *ret; 1389 1390 assert(q_size); 1391 assert(q_addr(mapping) == NULL); 1392 1393 if (is_cq) { 1394 len = q_size * sizeof(struct spdk_nvme_cpl); 1395 } else { 1396 len = q_size * sizeof(struct spdk_nvme_cmd); 1397 } 1398 1399 ret = map_one(vu_ctrlr->endpoint->vfu_ctx, mapping->prp1, len, 1400 mapping->sg, &mapping->iov, 1401 is_cq ? PROT_READ | PROT_WRITE : PROT_READ); 1402 if (ret == NULL) { 1403 return -EFAULT; 1404 } 1405 1406 if (unmap) { 1407 memset(q_addr(mapping), 0, len); 1408 } 1409 1410 return 0; 1411 } 1412 1413 static inline void 1414 unmap_q(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvme_q_mapping *mapping) 1415 { 1416 if (q_addr(mapping) != NULL) { 1417 vfu_sgl_put(vu_ctrlr->endpoint->vfu_ctx, mapping->sg, 1418 &mapping->iov, 1); 1419 mapping->iov.iov_base = NULL; 1420 } 1421 } 1422 1423 static int 1424 asq_setup(struct nvmf_vfio_user_ctrlr *ctrlr) 1425 { 1426 struct nvmf_vfio_user_sq *sq; 1427 const struct spdk_nvmf_registers *regs; 1428 int ret; 1429 1430 assert(ctrlr != NULL); 1431 1432 sq = ctrlr->sqs[0]; 1433 1434 assert(sq != NULL); 1435 assert(q_addr(&sq->mapping) == NULL); 1436 /* XXX ctrlr->asq == 0 is a valid memory address */ 1437 1438 regs = spdk_nvmf_ctrlr_get_regs(ctrlr->ctrlr); 1439 sq->qid = 0; 1440 sq->size = regs->aqa.bits.asqs + 1; 1441 sq->mapping.prp1 = regs->asq; 1442 *sq_headp(sq) = 0; 1443 sq->cqid = 0; 1444 1445 ret = map_q(ctrlr, &sq->mapping, sq->size, false, true); 1446 if (ret) { 1447 return ret; 1448 } 1449 1450 /* The Admin queue (qid: 0) does not ever use shadow doorbells. */ 1451 sq->dbl_tailp = ctrlr->bar0_doorbells + queue_index(0, false); 1452 1453 *sq_dbl_tailp(sq) = 0; 1454 1455 return 0; 1456 } 1457 1458 /* 1459 * Updates eventidx to set an SQ into interrupt or polling mode. 1460 * 1461 * Returns false if the current SQ tail does not match the SQ head, as 1462 * this means that the host has submitted more items to the queue while we were 1463 * not looking - or during the event index update. In that case, we must retry, 1464 * or otherwise make sure we are going to wake up again. 1465 */ 1466 static bool 1467 set_sq_eventidx(struct nvmf_vfio_user_sq *sq) 1468 { 1469 struct nvmf_vfio_user_ctrlr *ctrlr; 1470 volatile uint32_t *sq_tail_eidx; 1471 uint32_t old_tail, new_tail; 1472 1473 assert(sq != NULL); 1474 assert(sq->ctrlr != NULL); 1475 assert(sq->ctrlr->sdbl != NULL); 1476 assert(sq->need_rearm); 1477 assert(sq->qid != 0); 1478 1479 ctrlr = sq->ctrlr; 1480 1481 SPDK_DEBUGLOG(vfio_user_db, "%s: updating eventidx of sqid:%u\n", 1482 ctrlr_id(ctrlr), sq->qid); 1483 1484 sq_tail_eidx = ctrlr->sdbl->eventidxs + queue_index(sq->qid, false); 1485 1486 assert(ctrlr->endpoint != NULL); 1487 1488 if (!ctrlr->endpoint->interrupt_mode) { 1489 /* No synchronisation necessary. */ 1490 *sq_tail_eidx = NVMF_VFIO_USER_EVENTIDX_POLL; 1491 return true; 1492 } 1493 1494 old_tail = *sq_dbl_tailp(sq); 1495 *sq_tail_eidx = old_tail; 1496 1497 /* 1498 * Ensure that the event index is updated before re-reading the tail 1499 * doorbell. If it's not, then the host might race us and update the 1500 * tail after the second read but before the event index is written, so 1501 * it won't write to BAR0 and we'll miss the update. 1502 * 1503 * The driver should provide similar ordering with an mb(). 1504 */ 1505 spdk_mb(); 1506 1507 /* 1508 * Check if the host has updated the tail doorbell after we've read it 1509 * for the first time, but before the event index was written. If that's 1510 * the case, then we've lost the race and we need to update the event 1511 * index again (after polling the queue, since the host won't write to 1512 * BAR0). 1513 */ 1514 new_tail = *sq_dbl_tailp(sq); 1515 1516 /* 1517 * We might poll the queue straight after this function returns if the 1518 * tail has been updated, so we need to ensure that any changes to the 1519 * queue will be visible to us if the doorbell has been updated. 1520 * 1521 * The driver should provide similar ordering with a wmb() to ensure 1522 * that the queue is written before it updates the tail doorbell. 1523 */ 1524 spdk_rmb(); 1525 1526 SPDK_DEBUGLOG(vfio_user_db, "%s: sqid:%u, old_tail=%u, new_tail=%u, " 1527 "sq_head=%u\n", ctrlr_id(ctrlr), sq->qid, old_tail, 1528 new_tail, *sq_headp(sq)); 1529 1530 if (new_tail == *sq_headp(sq)) { 1531 sq->need_rearm = false; 1532 return true; 1533 } 1534 1535 /* 1536 * We've lost the race: the tail was updated since we last polled, 1537 * including if it happened within this routine. 1538 * 1539 * The caller should retry after polling (think of this as a cmpxchg 1540 * loop); if we go to sleep while the SQ is not empty, then we won't 1541 * process the remaining events. 1542 */ 1543 return false; 1544 } 1545 1546 static int nvmf_vfio_user_sq_poll(struct nvmf_vfio_user_sq *sq); 1547 1548 /* 1549 * Arrange for an SQ to interrupt us if written. Returns non-zero if we 1550 * processed some SQ entries. 1551 */ 1552 static int 1553 vfio_user_sq_rearm(struct nvmf_vfio_user_ctrlr *ctrlr, 1554 struct nvmf_vfio_user_sq *sq, 1555 struct nvmf_vfio_user_poll_group *vu_group) 1556 { 1557 int count = 0; 1558 size_t i; 1559 1560 assert(sq->need_rearm); 1561 1562 for (i = 0; i < NVMF_VFIO_USER_SET_EVENTIDX_MAX_ATTEMPTS; i++) { 1563 int ret; 1564 1565 if (set_sq_eventidx(sq)) { 1566 /* We won the race and set eventidx; done. */ 1567 vu_group->stats.won++; 1568 return count; 1569 } 1570 1571 ret = nvmf_vfio_user_sq_poll(sq); 1572 1573 count += (ret < 0) ? 1 : ret; 1574 1575 /* 1576 * set_sq_eventidx() hit the race, so we expected 1577 * to process at least one command from this queue. 1578 * If there were no new commands waiting for us, then 1579 * we must have hit an unexpected race condition. 1580 */ 1581 if (ret == 0) { 1582 SPDK_ERRLOG("%s: unexpected race condition detected " 1583 "while updating the shadow doorbell buffer\n", 1584 ctrlr_id(ctrlr)); 1585 1586 fail_ctrlr(ctrlr); 1587 return count; 1588 } 1589 } 1590 1591 SPDK_DEBUGLOG(vfio_user_db, 1592 "%s: set_sq_eventidx() lost the race %zu times\n", 1593 ctrlr_id(ctrlr), i); 1594 1595 vu_group->stats.lost++; 1596 vu_group->stats.lost_count += count; 1597 1598 /* 1599 * We couldn't arrange an eventidx guaranteed to cause a BAR0 write, as 1600 * we raced with the producer too many times; force ourselves to wake up 1601 * instead. We'll process all queues at that point. 1602 */ 1603 ctrlr_kick(ctrlr); 1604 1605 return count; 1606 } 1607 1608 /* 1609 * We're in interrupt mode, and potentially about to go to sleep. We need to 1610 * make sure any further I/O submissions are guaranteed to wake us up: for 1611 * shadow doorbells that means we may need to go through set_sq_eventidx() for 1612 * every SQ that needs re-arming. 1613 * 1614 * Returns non-zero if we processed something. 1615 */ 1616 static int 1617 vfio_user_poll_group_rearm(struct nvmf_vfio_user_poll_group *vu_group) 1618 { 1619 struct nvmf_vfio_user_sq *sq; 1620 int count = 0; 1621 1622 vu_group->stats.rearms++; 1623 1624 TAILQ_FOREACH(sq, &vu_group->sqs, link) { 1625 if (spdk_unlikely(sq->sq_state != VFIO_USER_SQ_ACTIVE || !sq->size)) { 1626 continue; 1627 } 1628 1629 if (sq->need_rearm) { 1630 count += vfio_user_sq_rearm(sq->ctrlr, sq, vu_group); 1631 } 1632 } 1633 1634 return count; 1635 } 1636 1637 static int 1638 acq_setup(struct nvmf_vfio_user_ctrlr *ctrlr) 1639 { 1640 struct nvmf_vfio_user_cq *cq; 1641 const struct spdk_nvmf_registers *regs; 1642 int ret; 1643 1644 assert(ctrlr != NULL); 1645 1646 cq = ctrlr->cqs[0]; 1647 1648 assert(cq != NULL); 1649 1650 assert(q_addr(&cq->mapping) == NULL); 1651 1652 regs = spdk_nvmf_ctrlr_get_regs(ctrlr->ctrlr); 1653 assert(regs != NULL); 1654 cq->qid = 0; 1655 cq->size = regs->aqa.bits.acqs + 1; 1656 cq->mapping.prp1 = regs->acq; 1657 *cq_tailp(cq) = 0; 1658 cq->ien = true; 1659 cq->phase = true; 1660 1661 ret = map_q(ctrlr, &cq->mapping, cq->size, true, true); 1662 if (ret) { 1663 return ret; 1664 } 1665 1666 /* The Admin queue (qid: 0) does not ever use shadow doorbells. */ 1667 cq->dbl_headp = ctrlr->bar0_doorbells + queue_index(0, true); 1668 1669 *cq_dbl_headp(cq) = 0; 1670 1671 return 0; 1672 } 1673 1674 static void * 1675 _map_one(void *prv, uint64_t addr, uint64_t len, int prot) 1676 { 1677 struct spdk_nvmf_request *req = (struct spdk_nvmf_request *)prv; 1678 struct spdk_nvmf_qpair *qpair; 1679 struct nvmf_vfio_user_req *vu_req; 1680 struct nvmf_vfio_user_sq *sq; 1681 void *ret; 1682 1683 assert(req != NULL); 1684 qpair = req->qpair; 1685 vu_req = SPDK_CONTAINEROF(req, struct nvmf_vfio_user_req, req); 1686 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair); 1687 1688 assert(vu_req->iovcnt < NVMF_VFIO_USER_MAX_IOVECS); 1689 ret = map_one(sq->ctrlr->endpoint->vfu_ctx, addr, len, 1690 index_to_sg_t(vu_req->sg, vu_req->iovcnt), 1691 &vu_req->iov[vu_req->iovcnt], prot); 1692 if (spdk_likely(ret != NULL)) { 1693 vu_req->iovcnt++; 1694 } 1695 return ret; 1696 } 1697 1698 static int 1699 vfio_user_map_cmd(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvmf_request *req, 1700 struct iovec *iov, uint32_t length) 1701 { 1702 /* Map PRP list to from Guest physical memory to 1703 * virtual memory address. 1704 */ 1705 return nvme_map_cmd(req, &req->cmd->nvme_cmd, iov, NVMF_REQ_MAX_BUFFERS, 1706 length, 4096, _map_one); 1707 } 1708 1709 static int handle_cmd_req(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd, 1710 struct nvmf_vfio_user_sq *sq); 1711 1712 /* 1713 * Posts a CQE in the completion queue. 1714 * 1715 * @ctrlr: the vfio-user controller 1716 * @cq: the completion queue 1717 * @cdw0: cdw0 as reported by NVMf 1718 * @sqid: submission queue ID 1719 * @cid: command identifier in NVMe command 1720 * @sc: the NVMe CQE status code 1721 * @sct: the NVMe CQE status code type 1722 */ 1723 static int 1724 post_completion(struct nvmf_vfio_user_ctrlr *ctrlr, struct nvmf_vfio_user_cq *cq, 1725 uint32_t cdw0, uint16_t sqid, uint16_t cid, uint16_t sc, uint16_t sct) 1726 { 1727 struct spdk_nvme_status cpl_status = { 0 }; 1728 struct spdk_nvme_cpl *cpl; 1729 int err; 1730 1731 assert(ctrlr != NULL); 1732 1733 if (spdk_unlikely(cq == NULL || q_addr(&cq->mapping) == NULL)) { 1734 return 0; 1735 } 1736 1737 if (cq->qid == 0) { 1738 assert(spdk_get_thread() == cq->group->group->thread); 1739 } 1740 1741 if (cq_is_full(cq)) { 1742 SPDK_ERRLOG("%s: cqid:%d full (tail=%d, head=%d)\n", 1743 ctrlr_id(ctrlr), cq->qid, *cq_tailp(cq), 1744 *cq_dbl_headp(cq)); 1745 return -1; 1746 } 1747 1748 cpl = ((struct spdk_nvme_cpl *)q_addr(&cq->mapping)) + *cq_tailp(cq); 1749 1750 assert(ctrlr->sqs[sqid] != NULL); 1751 SPDK_DEBUGLOG(nvmf_vfio, 1752 "%s: request complete sqid:%d cid=%d status=%#x " 1753 "sqhead=%d cq tail=%d\n", ctrlr_id(ctrlr), sqid, cid, sc, 1754 *sq_headp(ctrlr->sqs[sqid]), *cq_tailp(cq)); 1755 1756 cpl->sqhd = *sq_headp(ctrlr->sqs[sqid]); 1757 cpl->sqid = sqid; 1758 cpl->cid = cid; 1759 cpl->cdw0 = cdw0; 1760 1761 /* 1762 * This is a bitfield: instead of setting the individual bits we need 1763 * directly in cpl->status, which would cause a read-modify-write cycle, 1764 * we'll avoid reading from the CPL altogether by filling in a local 1765 * cpl_status variable, then writing the whole thing. 1766 */ 1767 cpl_status.sct = sct; 1768 cpl_status.sc = sc; 1769 cpl_status.p = cq->phase; 1770 cpl->status = cpl_status; 1771 1772 /* Ensure the Completion Queue Entry is visible. */ 1773 spdk_wmb(); 1774 cq_tail_advance(cq); 1775 1776 if ((cq->qid == 0 || !ctrlr->adaptive_irqs_enabled) && 1777 cq->ien && ctrlr_interrupt_enabled(ctrlr)) { 1778 err = vfu_irq_trigger(ctrlr->endpoint->vfu_ctx, cq->iv); 1779 if (err != 0) { 1780 SPDK_ERRLOG("%s: failed to trigger interrupt: %m\n", 1781 ctrlr_id(ctrlr)); 1782 return err; 1783 } 1784 } 1785 1786 return 0; 1787 } 1788 1789 static void 1790 free_sq_reqs(struct nvmf_vfio_user_sq *sq) 1791 { 1792 while (!TAILQ_EMPTY(&sq->free_reqs)) { 1793 struct nvmf_vfio_user_req *vu_req = TAILQ_FIRST(&sq->free_reqs); 1794 TAILQ_REMOVE(&sq->free_reqs, vu_req, link); 1795 free(vu_req); 1796 } 1797 } 1798 1799 static void 1800 delete_cq_done(struct nvmf_vfio_user_ctrlr *ctrlr, struct nvmf_vfio_user_cq *cq) 1801 { 1802 assert(cq->cq_ref == 0); 1803 unmap_q(ctrlr, &cq->mapping); 1804 cq->size = 0; 1805 cq->cq_state = VFIO_USER_CQ_DELETED; 1806 cq->group = NULL; 1807 } 1808 1809 /* Deletes a SQ, if this SQ is the last user of the associated CQ 1810 * and the controller is being shut down/reset or vfio-user client disconnects, 1811 * then the CQ is also deleted. 1812 */ 1813 static void 1814 delete_sq_done(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *sq) 1815 { 1816 struct nvmf_vfio_user_cq *cq; 1817 uint16_t cqid; 1818 1819 SPDK_DEBUGLOG(nvmf_vfio, "%s: delete sqid:%d=%p done\n", ctrlr_id(vu_ctrlr), 1820 sq->qid, sq); 1821 1822 /* Free SQ resources */ 1823 unmap_q(vu_ctrlr, &sq->mapping); 1824 1825 free_sq_reqs(sq); 1826 1827 sq->size = 0; 1828 1829 sq->sq_state = VFIO_USER_SQ_DELETED; 1830 1831 /* Controller RESET and SHUTDOWN are special cases, 1832 * VM may not send DELETE IO SQ/CQ commands, NVMf library 1833 * will disconnect IO queue pairs. 1834 */ 1835 if (vu_ctrlr->reset_shn || vu_ctrlr->disconnect) { 1836 cqid = sq->cqid; 1837 cq = vu_ctrlr->cqs[cqid]; 1838 1839 SPDK_DEBUGLOG(nvmf_vfio, "%s: try to delete cqid:%u=%p\n", ctrlr_id(vu_ctrlr), 1840 cq->qid, cq); 1841 1842 assert(cq->cq_ref > 0); 1843 if (--cq->cq_ref == 0) { 1844 delete_cq_done(vu_ctrlr, cq); 1845 } 1846 } 1847 } 1848 1849 static void 1850 free_qp(struct nvmf_vfio_user_ctrlr *ctrlr, uint16_t qid) 1851 { 1852 struct nvmf_vfio_user_sq *sq; 1853 struct nvmf_vfio_user_cq *cq; 1854 1855 if (ctrlr == NULL) { 1856 return; 1857 } 1858 1859 sq = ctrlr->sqs[qid]; 1860 if (sq) { 1861 SPDK_DEBUGLOG(nvmf_vfio, "%s: Free SQ %u\n", ctrlr_id(ctrlr), qid); 1862 unmap_q(ctrlr, &sq->mapping); 1863 1864 free_sq_reqs(sq); 1865 1866 free(sq->mapping.sg); 1867 free(sq); 1868 ctrlr->sqs[qid] = NULL; 1869 } 1870 1871 cq = ctrlr->cqs[qid]; 1872 if (cq) { 1873 SPDK_DEBUGLOG(nvmf_vfio, "%s: Free cqid:%u\n", ctrlr_id(ctrlr), qid); 1874 unmap_q(ctrlr, &cq->mapping); 1875 free(cq->mapping.sg); 1876 free(cq); 1877 ctrlr->cqs[qid] = NULL; 1878 } 1879 } 1880 1881 static int 1882 init_sq(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvmf_transport *transport, 1883 const uint16_t id) 1884 { 1885 struct nvmf_vfio_user_sq *sq; 1886 1887 assert(ctrlr != NULL); 1888 assert(transport != NULL); 1889 assert(ctrlr->sqs[id] == NULL); 1890 1891 sq = calloc(1, sizeof(*sq)); 1892 if (sq == NULL) { 1893 return -ENOMEM; 1894 } 1895 sq->mapping.sg = calloc(1, dma_sg_size()); 1896 if (sq->mapping.sg == NULL) { 1897 free(sq); 1898 return -ENOMEM; 1899 } 1900 1901 sq->qid = id; 1902 sq->qpair.qid = id; 1903 sq->qpair.transport = transport; 1904 sq->ctrlr = ctrlr; 1905 ctrlr->sqs[id] = sq; 1906 1907 TAILQ_INIT(&sq->free_reqs); 1908 1909 return 0; 1910 } 1911 1912 static int 1913 init_cq(struct nvmf_vfio_user_ctrlr *vu_ctrlr, const uint16_t id) 1914 { 1915 struct nvmf_vfio_user_cq *cq; 1916 1917 assert(vu_ctrlr != NULL); 1918 assert(vu_ctrlr->cqs[id] == NULL); 1919 1920 cq = calloc(1, sizeof(*cq)); 1921 if (cq == NULL) { 1922 return -ENOMEM; 1923 } 1924 cq->mapping.sg = calloc(1, dma_sg_size()); 1925 if (cq->mapping.sg == NULL) { 1926 free(cq); 1927 return -ENOMEM; 1928 } 1929 1930 cq->qid = id; 1931 vu_ctrlr->cqs[id] = cq; 1932 1933 return 0; 1934 } 1935 1936 static int 1937 alloc_sq_reqs(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *sq) 1938 { 1939 struct nvmf_vfio_user_req *vu_req, *tmp; 1940 size_t req_size; 1941 uint32_t i; 1942 1943 req_size = sizeof(struct nvmf_vfio_user_req) + 1944 (dma_sg_size() * NVMF_VFIO_USER_MAX_IOVECS); 1945 1946 for (i = 0; i < sq->size; i++) { 1947 struct spdk_nvmf_request *req; 1948 1949 vu_req = calloc(1, req_size); 1950 if (vu_req == NULL) { 1951 goto err; 1952 } 1953 1954 req = &vu_req->req; 1955 req->qpair = &sq->qpair; 1956 req->rsp = (union nvmf_c2h_msg *)&vu_req->rsp; 1957 req->cmd = (union nvmf_h2c_msg *)&vu_req->cmd; 1958 req->stripped_data = NULL; 1959 1960 TAILQ_INSERT_TAIL(&sq->free_reqs, vu_req, link); 1961 } 1962 1963 return 0; 1964 1965 err: 1966 TAILQ_FOREACH_SAFE(vu_req, &sq->free_reqs, link, tmp) { 1967 free(vu_req); 1968 } 1969 return -ENOMEM; 1970 } 1971 1972 static volatile uint32_t * 1973 ctrlr_doorbell_ptr(struct nvmf_vfio_user_ctrlr *ctrlr) 1974 { 1975 return ctrlr->sdbl != NULL ? 1976 ctrlr->sdbl->shadow_doorbells : 1977 ctrlr->bar0_doorbells; 1978 } 1979 1980 static uint16_t 1981 handle_create_io_sq(struct nvmf_vfio_user_ctrlr *ctrlr, 1982 struct spdk_nvme_cmd *cmd, uint16_t *sct) 1983 { 1984 struct nvmf_vfio_user_transport *vu_transport = ctrlr->transport; 1985 struct nvmf_vfio_user_sq *sq; 1986 uint32_t qsize; 1987 uint16_t cqid; 1988 uint16_t qid; 1989 int err; 1990 1991 qid = cmd->cdw10_bits.create_io_q.qid; 1992 cqid = cmd->cdw11_bits.create_io_sq.cqid; 1993 qsize = cmd->cdw10_bits.create_io_q.qsize + 1; 1994 1995 if (ctrlr->sqs[qid] == NULL) { 1996 err = init_sq(ctrlr, ctrlr->sqs[0]->qpair.transport, qid); 1997 if (err != 0) { 1998 *sct = SPDK_NVME_SCT_GENERIC; 1999 return SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2000 } 2001 } 2002 2003 if (cqid == 0 || cqid >= vu_transport->transport.opts.max_qpairs_per_ctrlr) { 2004 SPDK_ERRLOG("%s: invalid cqid:%u\n", ctrlr_id(ctrlr), cqid); 2005 *sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 2006 return SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 2007 } 2008 2009 /* CQ must be created before SQ. */ 2010 if (!io_q_exists(ctrlr, cqid, true)) { 2011 SPDK_ERRLOG("%s: cqid:%u does not exist\n", ctrlr_id(ctrlr), cqid); 2012 *sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 2013 return SPDK_NVME_SC_COMPLETION_QUEUE_INVALID; 2014 } 2015 2016 if (cmd->cdw11_bits.create_io_sq.pc != 0x1) { 2017 SPDK_ERRLOG("%s: non-PC SQ not supported\n", ctrlr_id(ctrlr)); 2018 *sct = SPDK_NVME_SCT_GENERIC; 2019 return SPDK_NVME_SC_INVALID_FIELD; 2020 } 2021 2022 sq = ctrlr->sqs[qid]; 2023 sq->size = qsize; 2024 2025 SPDK_DEBUGLOG(nvmf_vfio, "%s: sqid:%d cqid:%d\n", ctrlr_id(ctrlr), 2026 qid, cqid); 2027 2028 sq->mapping.prp1 = cmd->dptr.prp.prp1; 2029 2030 err = map_q(ctrlr, &sq->mapping, sq->size, false, true); 2031 if (err) { 2032 SPDK_ERRLOG("%s: failed to map I/O queue: %m\n", ctrlr_id(ctrlr)); 2033 *sct = SPDK_NVME_SCT_GENERIC; 2034 return SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2035 } 2036 2037 SPDK_DEBUGLOG(nvmf_vfio, "%s: mapped sqid:%d IOVA=%#lx vaddr=%p\n", 2038 ctrlr_id(ctrlr), qid, cmd->dptr.prp.prp1, 2039 q_addr(&sq->mapping)); 2040 2041 err = alloc_sq_reqs(ctrlr, sq); 2042 if (err < 0) { 2043 SPDK_ERRLOG("%s: failed to allocate SQ requests: %m\n", ctrlr_id(ctrlr)); 2044 *sct = SPDK_NVME_SCT_GENERIC; 2045 return SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2046 } 2047 2048 sq->cqid = cqid; 2049 ctrlr->cqs[sq->cqid]->cq_ref++; 2050 sq->sq_state = VFIO_USER_SQ_CREATED; 2051 *sq_headp(sq) = 0; 2052 2053 sq->dbl_tailp = ctrlr_doorbell_ptr(ctrlr) + queue_index(qid, false); 2054 2055 /* 2056 * We should always reset the doorbells. 2057 * 2058 * The Specification prohibits the controller from writing to the shadow 2059 * doorbell buffer, however older versions of the Linux NVMe driver 2060 * don't reset the shadow doorbell buffer after a Queue-Level or 2061 * Controller-Level reset, which means that we're left with garbage 2062 * doorbell values. 2063 */ 2064 *sq_dbl_tailp(sq) = 0; 2065 2066 if (ctrlr->sdbl != NULL) { 2067 sq->need_rearm = true; 2068 2069 if (!set_sq_eventidx(sq)) { 2070 SPDK_ERRLOG("%s: host updated SQ tail doorbell before " 2071 "sqid:%hu was initialized\n", 2072 ctrlr_id(ctrlr), qid); 2073 fail_ctrlr(ctrlr); 2074 *sct = SPDK_NVME_SCT_GENERIC; 2075 return SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2076 } 2077 } 2078 2079 /* 2080 * Create our new I/O qpair. This asynchronously invokes, on a suitable 2081 * poll group, the nvmf_vfio_user_poll_group_add() callback, which will 2082 * call spdk_nvmf_request_exec_fabrics() with a generated fabrics 2083 * connect command. This command is then eventually completed via 2084 * handle_queue_connect_rsp(). 2085 */ 2086 sq->create_io_sq_cmd = *cmd; 2087 sq->post_create_io_sq_completion = true; 2088 2089 spdk_nvmf_tgt_new_qpair(ctrlr->transport->transport.tgt, 2090 &sq->qpair); 2091 2092 *sct = SPDK_NVME_SCT_GENERIC; 2093 return SPDK_NVME_SC_SUCCESS; 2094 } 2095 2096 static uint16_t 2097 handle_create_io_cq(struct nvmf_vfio_user_ctrlr *ctrlr, 2098 struct spdk_nvme_cmd *cmd, uint16_t *sct) 2099 { 2100 struct nvmf_vfio_user_cq *cq; 2101 uint32_t qsize; 2102 uint16_t qid; 2103 int err; 2104 2105 qid = cmd->cdw10_bits.create_io_q.qid; 2106 qsize = cmd->cdw10_bits.create_io_q.qsize + 1; 2107 2108 if (ctrlr->cqs[qid] == NULL) { 2109 err = init_cq(ctrlr, qid); 2110 if (err != 0) { 2111 *sct = SPDK_NVME_SCT_GENERIC; 2112 return SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2113 } 2114 } 2115 2116 if (cmd->cdw11_bits.create_io_cq.pc != 0x1) { 2117 SPDK_ERRLOG("%s: non-PC CQ not supported\n", ctrlr_id(ctrlr)); 2118 *sct = SPDK_NVME_SCT_GENERIC; 2119 return SPDK_NVME_SC_INVALID_FIELD; 2120 } 2121 2122 if (cmd->cdw11_bits.create_io_cq.iv > NVME_IRQ_MSIX_NUM - 1) { 2123 SPDK_ERRLOG("%s: IV is too big\n", ctrlr_id(ctrlr)); 2124 *sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 2125 return SPDK_NVME_SC_INVALID_INTERRUPT_VECTOR; 2126 } 2127 2128 cq = ctrlr->cqs[qid]; 2129 cq->size = qsize; 2130 2131 cq->mapping.prp1 = cmd->dptr.prp.prp1; 2132 2133 cq->dbl_headp = ctrlr_doorbell_ptr(ctrlr) + queue_index(qid, true); 2134 2135 err = map_q(ctrlr, &cq->mapping, cq->size, true, true); 2136 if (err) { 2137 SPDK_ERRLOG("%s: failed to map I/O queue: %m\n", ctrlr_id(ctrlr)); 2138 *sct = SPDK_NVME_SCT_GENERIC; 2139 return SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2140 } 2141 2142 SPDK_DEBUGLOG(nvmf_vfio, "%s: mapped cqid:%u IOVA=%#lx vaddr=%p\n", 2143 ctrlr_id(ctrlr), qid, cmd->dptr.prp.prp1, 2144 q_addr(&cq->mapping)); 2145 2146 cq->ien = cmd->cdw11_bits.create_io_cq.ien; 2147 cq->iv = cmd->cdw11_bits.create_io_cq.iv; 2148 cq->phase = true; 2149 cq->cq_state = VFIO_USER_CQ_CREATED; 2150 2151 *cq_tailp(cq) = 0; 2152 2153 /* 2154 * We should always reset the doorbells. 2155 * 2156 * The Specification prohibits the controller from writing to the shadow 2157 * doorbell buffer, however older versions of the Linux NVMe driver 2158 * don't reset the shadow doorbell buffer after a Queue-Level or 2159 * Controller-Level reset, which means that we're left with garbage 2160 * doorbell values. 2161 */ 2162 *cq_dbl_headp(cq) = 0; 2163 2164 *sct = SPDK_NVME_SCT_GENERIC; 2165 return SPDK_NVME_SC_SUCCESS; 2166 } 2167 2168 /* 2169 * Creates a completion or submission I/O queue. Returns 0 on success, -errno 2170 * on error. 2171 */ 2172 static int 2173 handle_create_io_q(struct nvmf_vfio_user_ctrlr *ctrlr, 2174 struct spdk_nvme_cmd *cmd, const bool is_cq) 2175 { 2176 struct nvmf_vfio_user_transport *vu_transport = ctrlr->transport; 2177 uint16_t sct = SPDK_NVME_SCT_GENERIC; 2178 uint16_t sc = SPDK_NVME_SC_SUCCESS; 2179 uint32_t qsize; 2180 uint16_t qid; 2181 2182 assert(ctrlr != NULL); 2183 assert(cmd != NULL); 2184 2185 qid = cmd->cdw10_bits.create_io_q.qid; 2186 if (qid == 0 || qid >= vu_transport->transport.opts.max_qpairs_per_ctrlr) { 2187 SPDK_ERRLOG("%s: invalid qid=%d, max=%d\n", ctrlr_id(ctrlr), 2188 qid, vu_transport->transport.opts.max_qpairs_per_ctrlr); 2189 sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 2190 sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 2191 goto out; 2192 } 2193 2194 if (io_q_exists(ctrlr, qid, is_cq)) { 2195 SPDK_ERRLOG("%s: %cqid:%d already exists\n", ctrlr_id(ctrlr), 2196 is_cq ? 'c' : 's', qid); 2197 sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 2198 sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 2199 goto out; 2200 } 2201 2202 qsize = cmd->cdw10_bits.create_io_q.qsize + 1; 2203 if (qsize == 1 || qsize > max_queue_size(ctrlr)) { 2204 SPDK_ERRLOG("%s: invalid I/O queue size %u\n", ctrlr_id(ctrlr), qsize); 2205 sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 2206 sc = SPDK_NVME_SC_INVALID_QUEUE_SIZE; 2207 goto out; 2208 } 2209 2210 if (is_cq) { 2211 sc = handle_create_io_cq(ctrlr, cmd, &sct); 2212 } else { 2213 sc = handle_create_io_sq(ctrlr, cmd, &sct); 2214 2215 if (sct == SPDK_NVME_SCT_GENERIC && 2216 sc == SPDK_NVME_SC_SUCCESS) { 2217 /* Completion posted asynchronously. */ 2218 return 0; 2219 } 2220 } 2221 2222 out: 2223 return post_completion(ctrlr, ctrlr->cqs[0], 0, 0, cmd->cid, sc, sct); 2224 } 2225 2226 /* For ADMIN I/O DELETE SUBMISSION QUEUE the NVMf library will disconnect and free 2227 * queue pair, so save the command in a context. 2228 */ 2229 struct vfio_user_delete_sq_ctx { 2230 struct nvmf_vfio_user_ctrlr *vu_ctrlr; 2231 struct spdk_nvme_cmd delete_io_sq_cmd; 2232 }; 2233 2234 static void 2235 vfio_user_qpair_delete_cb(void *cb_arg) 2236 { 2237 struct vfio_user_delete_sq_ctx *ctx = cb_arg; 2238 struct nvmf_vfio_user_ctrlr *vu_ctrlr = ctx->vu_ctrlr; 2239 struct nvmf_vfio_user_cq *admin_cq = vu_ctrlr->cqs[0]; 2240 2241 assert(admin_cq != NULL); 2242 assert(admin_cq->group != NULL); 2243 assert(admin_cq->group->group->thread != NULL); 2244 if (admin_cq->group->group->thread != spdk_get_thread()) { 2245 spdk_thread_send_msg(admin_cq->group->group->thread, 2246 vfio_user_qpair_delete_cb, 2247 cb_arg); 2248 } else { 2249 post_completion(vu_ctrlr, admin_cq, 0, 0, 2250 ctx->delete_io_sq_cmd.cid, 2251 SPDK_NVME_SC_SUCCESS, SPDK_NVME_SCT_GENERIC); 2252 free(ctx); 2253 } 2254 } 2255 2256 /* 2257 * Deletes a completion or submission I/O queue. 2258 */ 2259 static int 2260 handle_del_io_q(struct nvmf_vfio_user_ctrlr *ctrlr, 2261 struct spdk_nvme_cmd *cmd, const bool is_cq) 2262 { 2263 uint16_t sct = SPDK_NVME_SCT_GENERIC; 2264 uint16_t sc = SPDK_NVME_SC_SUCCESS; 2265 struct nvmf_vfio_user_sq *sq; 2266 struct nvmf_vfio_user_cq *cq; 2267 struct vfio_user_delete_sq_ctx *ctx; 2268 2269 SPDK_DEBUGLOG(nvmf_vfio, "%s: delete I/O %cqid:%d\n", 2270 ctrlr_id(ctrlr), is_cq ? 'c' : 's', 2271 cmd->cdw10_bits.delete_io_q.qid); 2272 2273 if (!io_q_exists(ctrlr, cmd->cdw10_bits.delete_io_q.qid, is_cq)) { 2274 SPDK_ERRLOG("%s: I/O %cqid:%d does not exist\n", ctrlr_id(ctrlr), 2275 is_cq ? 'c' : 's', cmd->cdw10_bits.delete_io_q.qid); 2276 sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 2277 sc = SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER; 2278 goto out; 2279 } 2280 2281 if (is_cq) { 2282 cq = ctrlr->cqs[cmd->cdw10_bits.delete_io_q.qid]; 2283 if (cq->cq_ref) { 2284 SPDK_ERRLOG("%s: the associated SQ must be deleted first\n", ctrlr_id(ctrlr)); 2285 sct = SPDK_NVME_SCT_COMMAND_SPECIFIC; 2286 sc = SPDK_NVME_SC_INVALID_QUEUE_DELETION; 2287 goto out; 2288 } 2289 delete_cq_done(ctrlr, cq); 2290 } else { 2291 /* 2292 * Deletion of the CQ is only deferred to delete_sq_done() on 2293 * VM reboot or CC.EN change, so we have to delete it in all 2294 * other cases. 2295 */ 2296 ctx = calloc(1, sizeof(*ctx)); 2297 if (!ctx) { 2298 sct = SPDK_NVME_SCT_GENERIC; 2299 sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 2300 goto out; 2301 } 2302 ctx->vu_ctrlr = ctrlr; 2303 ctx->delete_io_sq_cmd = *cmd; 2304 2305 sq = ctrlr->sqs[cmd->cdw10_bits.delete_io_q.qid]; 2306 sq->sq_state = VFIO_USER_SQ_DELETED; 2307 assert(ctrlr->cqs[sq->cqid]->cq_ref); 2308 ctrlr->cqs[sq->cqid]->cq_ref--; 2309 2310 spdk_nvmf_qpair_disconnect(&sq->qpair, vfio_user_qpair_delete_cb, ctx); 2311 return 0; 2312 } 2313 2314 out: 2315 return post_completion(ctrlr, ctrlr->cqs[0], 0, 0, cmd->cid, sc, sct); 2316 } 2317 2318 /* 2319 * Configures Shadow Doorbells. 2320 */ 2321 static int 2322 handle_doorbell_buffer_config(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd) 2323 { 2324 struct nvmf_vfio_user_shadow_doorbells *sdbl = NULL; 2325 uint32_t dstrd; 2326 uintptr_t page_size, page_mask; 2327 uint64_t prp1, prp2; 2328 uint16_t sct = SPDK_NVME_SCT_GENERIC; 2329 uint16_t sc = SPDK_NVME_SC_INVALID_FIELD; 2330 2331 assert(ctrlr != NULL); 2332 assert(ctrlr->endpoint != NULL); 2333 assert(cmd != NULL); 2334 2335 dstrd = doorbell_stride(ctrlr); 2336 page_size = memory_page_size(ctrlr); 2337 page_mask = memory_page_mask(ctrlr); 2338 2339 /* FIXME: we don't check doorbell stride when setting queue doorbells. */ 2340 if ((4u << dstrd) * NVMF_VFIO_USER_DEFAULT_MAX_QPAIRS_PER_CTRLR > page_size) { 2341 SPDK_ERRLOG("%s: doorbells do not fit in a single host page", 2342 ctrlr_id(ctrlr)); 2343 2344 goto out; 2345 } 2346 2347 /* Verify guest physical addresses passed as PRPs. */ 2348 if (cmd->psdt != SPDK_NVME_PSDT_PRP) { 2349 SPDK_ERRLOG("%s: received Doorbell Buffer Config without PRPs", 2350 ctrlr_id(ctrlr)); 2351 2352 goto out; 2353 } 2354 2355 prp1 = cmd->dptr.prp.prp1; 2356 prp2 = cmd->dptr.prp.prp2; 2357 2358 SPDK_DEBUGLOG(nvmf_vfio, 2359 "%s: configuring shadow doorbells with PRP1=%#lx and PRP2=%#lx (GPAs)\n", 2360 ctrlr_id(ctrlr), prp1, prp2); 2361 2362 if (prp1 == prp2 2363 || prp1 != (prp1 & page_mask) 2364 || prp2 != (prp2 & page_mask)) { 2365 SPDK_ERRLOG("%s: invalid shadow doorbell GPAs\n", 2366 ctrlr_id(ctrlr)); 2367 2368 goto out; 2369 } 2370 2371 /* Map guest physical addresses to our virtual address space. */ 2372 sdbl = map_sdbl(ctrlr->endpoint->vfu_ctx, prp1, prp2, page_size); 2373 if (sdbl == NULL) { 2374 SPDK_ERRLOG("%s: failed to map shadow doorbell buffers\n", 2375 ctrlr_id(ctrlr)); 2376 2377 goto out; 2378 } 2379 2380 ctrlr->shadow_doorbell_buffer = prp1; 2381 ctrlr->eventidx_buffer = prp2; 2382 2383 SPDK_DEBUGLOG(nvmf_vfio, 2384 "%s: mapped shadow doorbell buffers [%p, %p) and [%p, %p)\n", 2385 ctrlr_id(ctrlr), 2386 sdbl->iovs[0].iov_base, 2387 sdbl->iovs[0].iov_base + sdbl->iovs[0].iov_len, 2388 sdbl->iovs[1].iov_base, 2389 sdbl->iovs[1].iov_base + sdbl->iovs[1].iov_len); 2390 2391 2392 /* 2393 * Set all possible CQ head doorbells to polling mode now, such that we 2394 * don't have to worry about it later if the host creates more queues. 2395 * 2396 * We only ever want interrupts for writes to the SQ tail doorbells 2397 * (which are initialised in set_ctrlr_intr_mode() below). 2398 */ 2399 for (uint16_t i = 0; i < NVMF_VFIO_USER_DEFAULT_MAX_QPAIRS_PER_CTRLR; ++i) { 2400 sdbl->eventidxs[queue_index(i, true)] = NVMF_VFIO_USER_EVENTIDX_POLL; 2401 } 2402 2403 /* Update controller. */ 2404 SWAP(ctrlr->sdbl, sdbl); 2405 2406 /* 2407 * Copy doorbells from either the previous shadow doorbell buffer or the 2408 * BAR0 doorbells and make I/O queue doorbells point to the new buffer. 2409 * 2410 * This needs to account for older versions of the Linux NVMe driver, 2411 * which don't clear out the buffer after a controller reset. 2412 */ 2413 copy_doorbells(ctrlr, sdbl != NULL ? 2414 sdbl->shadow_doorbells : ctrlr->bar0_doorbells, 2415 ctrlr->sdbl->shadow_doorbells); 2416 2417 vfio_user_ctrlr_switch_doorbells(ctrlr, true); 2418 2419 ctrlr_kick(ctrlr); 2420 2421 sc = SPDK_NVME_SC_SUCCESS; 2422 2423 out: 2424 /* 2425 * Unmap existing buffers, in case Doorbell Buffer Config was sent 2426 * more than once (pointless, but not prohibited by the spec), or 2427 * in case of an error. 2428 * 2429 * If this is the first time Doorbell Buffer Config was processed, 2430 * then we've just swapped a NULL from ctrlr->sdbl into sdbl, so 2431 * free_sdbl() becomes a noop. 2432 */ 2433 free_sdbl(ctrlr->endpoint->vfu_ctx, sdbl); 2434 2435 return post_completion(ctrlr, ctrlr->cqs[0], 0, 0, cmd->cid, sc, sct); 2436 } 2437 2438 /* Returns 0 on success and -errno on error. */ 2439 static int 2440 consume_admin_cmd(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd) 2441 { 2442 assert(ctrlr != NULL); 2443 assert(cmd != NULL); 2444 2445 if (cmd->fuse != 0) { 2446 /* Fused admin commands are not supported. */ 2447 return post_completion(ctrlr, ctrlr->cqs[0], 0, 0, cmd->cid, 2448 SPDK_NVME_SC_INVALID_FIELD, 2449 SPDK_NVME_SCT_GENERIC); 2450 } 2451 2452 switch (cmd->opc) { 2453 case SPDK_NVME_OPC_CREATE_IO_CQ: 2454 case SPDK_NVME_OPC_CREATE_IO_SQ: 2455 return handle_create_io_q(ctrlr, cmd, 2456 cmd->opc == SPDK_NVME_OPC_CREATE_IO_CQ); 2457 case SPDK_NVME_OPC_DELETE_IO_SQ: 2458 case SPDK_NVME_OPC_DELETE_IO_CQ: 2459 return handle_del_io_q(ctrlr, cmd, 2460 cmd->opc == SPDK_NVME_OPC_DELETE_IO_CQ); 2461 case SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG: 2462 if (!ctrlr->transport->transport_opts.disable_shadow_doorbells) { 2463 return handle_doorbell_buffer_config(ctrlr, cmd); 2464 } 2465 /* FALLTHROUGH */ 2466 default: 2467 return handle_cmd_req(ctrlr, cmd, ctrlr->sqs[0]); 2468 } 2469 } 2470 2471 static int 2472 handle_cmd_rsp(struct nvmf_vfio_user_req *vu_req, void *cb_arg) 2473 { 2474 struct nvmf_vfio_user_sq *sq = cb_arg; 2475 struct nvmf_vfio_user_ctrlr *vu_ctrlr = sq->ctrlr; 2476 uint16_t sqid, cqid; 2477 2478 assert(sq != NULL); 2479 assert(vu_req != NULL); 2480 assert(vu_ctrlr != NULL); 2481 2482 if (spdk_likely(vu_req->iovcnt)) { 2483 vfu_sgl_put(vu_ctrlr->endpoint->vfu_ctx, 2484 index_to_sg_t(vu_req->sg, 0), 2485 vu_req->iov, vu_req->iovcnt); 2486 } 2487 sqid = sq->qid; 2488 cqid = sq->cqid; 2489 2490 return post_completion(vu_ctrlr, vu_ctrlr->cqs[cqid], 2491 vu_req->req.rsp->nvme_cpl.cdw0, 2492 sqid, 2493 vu_req->req.cmd->nvme_cmd.cid, 2494 vu_req->req.rsp->nvme_cpl.status.sc, 2495 vu_req->req.rsp->nvme_cpl.status.sct); 2496 } 2497 2498 static int 2499 consume_cmd(struct nvmf_vfio_user_ctrlr *ctrlr, struct nvmf_vfio_user_sq *sq, 2500 struct spdk_nvme_cmd *cmd) 2501 { 2502 assert(sq != NULL); 2503 if (spdk_unlikely(nvmf_qpair_is_admin_queue(&sq->qpair))) { 2504 return consume_admin_cmd(ctrlr, cmd); 2505 } 2506 2507 return handle_cmd_req(ctrlr, cmd, sq); 2508 } 2509 2510 /* Returns the number of commands processed, or a negative value on error. */ 2511 static int 2512 handle_sq_tdbl_write(struct nvmf_vfio_user_ctrlr *ctrlr, const uint32_t new_tail, 2513 struct nvmf_vfio_user_sq *sq) 2514 { 2515 struct spdk_nvme_cmd *queue; 2516 int count = 0; 2517 2518 assert(ctrlr != NULL); 2519 assert(sq != NULL); 2520 2521 if (ctrlr->sdbl != NULL && sq->qid != 0) { 2522 /* 2523 * Submission queue index has moved past the event index, so it 2524 * needs to be re-armed before we go to sleep. 2525 */ 2526 sq->need_rearm = true; 2527 } 2528 2529 queue = q_addr(&sq->mapping); 2530 while (*sq_headp(sq) != new_tail) { 2531 int err; 2532 struct spdk_nvme_cmd *cmd = &queue[*sq_headp(sq)]; 2533 2534 count++; 2535 2536 /* 2537 * SQHD must contain the new head pointer, so we must increase 2538 * it before we generate a completion. 2539 */ 2540 sq_head_advance(sq); 2541 2542 err = consume_cmd(ctrlr, sq, cmd); 2543 if (spdk_unlikely(err != 0)) { 2544 return err; 2545 } 2546 } 2547 2548 return count; 2549 } 2550 2551 /* Checks whether endpoint is connected from the same process */ 2552 static bool 2553 is_peer_same_process(struct nvmf_vfio_user_endpoint *endpoint) 2554 { 2555 struct ucred ucred; 2556 socklen_t ucredlen = sizeof(ucred); 2557 2558 if (endpoint == NULL) { 2559 return false; 2560 } 2561 2562 if (getsockopt(vfu_get_poll_fd(endpoint->vfu_ctx), SOL_SOCKET, SO_PEERCRED, &ucred, 2563 &ucredlen) < 0) { 2564 SPDK_ERRLOG("getsockopt(SO_PEERCRED): %s\n", strerror(errno)); 2565 return false; 2566 } 2567 2568 return ucred.pid == getpid(); 2569 } 2570 2571 static void 2572 memory_region_add_cb(vfu_ctx_t *vfu_ctx, vfu_dma_info_t *info) 2573 { 2574 struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx); 2575 struct nvmf_vfio_user_ctrlr *ctrlr; 2576 struct nvmf_vfio_user_sq *sq; 2577 struct nvmf_vfio_user_cq *cq; 2578 void *map_start, *map_end; 2579 int ret; 2580 2581 /* 2582 * We're not interested in any DMA regions that aren't mappable (we don't 2583 * support clients that don't share their memory). 2584 */ 2585 if (!info->vaddr) { 2586 return; 2587 } 2588 2589 map_start = info->mapping.iov_base; 2590 map_end = info->mapping.iov_base + info->mapping.iov_len; 2591 2592 if (((uintptr_t)info->mapping.iov_base & MASK_2MB) || 2593 (info->mapping.iov_len & MASK_2MB)) { 2594 SPDK_DEBUGLOG(nvmf_vfio, "Invalid memory region vaddr %p, IOVA %p-%p\n", 2595 info->vaddr, map_start, map_end); 2596 return; 2597 } 2598 2599 assert(endpoint != NULL); 2600 if (endpoint->ctrlr == NULL) { 2601 return; 2602 } 2603 ctrlr = endpoint->ctrlr; 2604 2605 SPDK_DEBUGLOG(nvmf_vfio, "%s: map IOVA %p-%p\n", endpoint_id(endpoint), 2606 map_start, map_end); 2607 2608 /* VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE are enabled when registering to VFIO, here we also 2609 * check the protection bits before registering. When vfio client and server are run in same process 2610 * there is no need to register the same memory again. 2611 */ 2612 if (info->prot == (PROT_WRITE | PROT_READ) && !is_peer_same_process(endpoint)) { 2613 ret = spdk_mem_register(info->mapping.iov_base, info->mapping.iov_len); 2614 if (ret) { 2615 SPDK_ERRLOG("Memory region register %p-%p failed, ret=%d\n", 2616 map_start, map_end, ret); 2617 } 2618 } 2619 2620 pthread_mutex_lock(&endpoint->lock); 2621 TAILQ_FOREACH(sq, &ctrlr->connected_sqs, tailq) { 2622 if (sq->sq_state != VFIO_USER_SQ_INACTIVE) { 2623 continue; 2624 } 2625 2626 cq = ctrlr->cqs[sq->cqid]; 2627 2628 /* For shared CQ case, we will use q_addr() to avoid mapping CQ multiple times */ 2629 if (cq->size && q_addr(&cq->mapping) == NULL) { 2630 ret = map_q(ctrlr, &cq->mapping, cq->size, true, false); 2631 if (ret) { 2632 SPDK_DEBUGLOG(nvmf_vfio, "Memory isn't ready to remap cqid:%d %#lx-%#lx\n", 2633 cq->qid, cq->mapping.prp1, 2634 cq->mapping.prp1 + cq->size * sizeof(struct spdk_nvme_cpl)); 2635 continue; 2636 } 2637 } 2638 2639 if (sq->size) { 2640 ret = map_q(ctrlr, &sq->mapping, sq->size, false, false); 2641 if (ret) { 2642 SPDK_DEBUGLOG(nvmf_vfio, "Memory isn't ready to remap sqid:%d %#lx-%#lx\n", 2643 sq->qid, sq->mapping.prp1, 2644 sq->mapping.prp1 + sq->size * sizeof(struct spdk_nvme_cmd)); 2645 continue; 2646 } 2647 } 2648 sq->sq_state = VFIO_USER_SQ_ACTIVE; 2649 SPDK_DEBUGLOG(nvmf_vfio, "Remap sqid:%u successfully\n", sq->qid); 2650 } 2651 pthread_mutex_unlock(&endpoint->lock); 2652 } 2653 2654 static void 2655 memory_region_remove_cb(vfu_ctx_t *vfu_ctx, vfu_dma_info_t *info) 2656 { 2657 struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx); 2658 struct nvmf_vfio_user_sq *sq; 2659 struct nvmf_vfio_user_cq *cq; 2660 void *map_start, *map_end; 2661 int ret = 0; 2662 2663 if (!info->vaddr) { 2664 return; 2665 } 2666 2667 map_start = info->mapping.iov_base; 2668 map_end = info->mapping.iov_base + info->mapping.iov_len; 2669 2670 if (((uintptr_t)info->mapping.iov_base & MASK_2MB) || 2671 (info->mapping.iov_len & MASK_2MB)) { 2672 SPDK_DEBUGLOG(nvmf_vfio, "Invalid memory region vaddr %p, IOVA %p-%p\n", 2673 info->vaddr, map_start, map_end); 2674 return; 2675 } 2676 2677 assert(endpoint != NULL); 2678 SPDK_DEBUGLOG(nvmf_vfio, "%s: unmap IOVA %p-%p\n", endpoint_id(endpoint), 2679 map_start, map_end); 2680 2681 if (endpoint->ctrlr != NULL) { 2682 struct nvmf_vfio_user_ctrlr *ctrlr; 2683 ctrlr = endpoint->ctrlr; 2684 2685 pthread_mutex_lock(&endpoint->lock); 2686 TAILQ_FOREACH(sq, &ctrlr->connected_sqs, tailq) { 2687 if (q_addr(&sq->mapping) >= map_start && q_addr(&sq->mapping) <= map_end) { 2688 unmap_q(ctrlr, &sq->mapping); 2689 sq->sq_state = VFIO_USER_SQ_INACTIVE; 2690 } 2691 2692 cq = ctrlr->cqs[sq->cqid]; 2693 if (q_addr(&cq->mapping) >= map_start && q_addr(&cq->mapping) <= map_end) { 2694 unmap_q(ctrlr, &cq->mapping); 2695 } 2696 } 2697 2698 if (ctrlr->sdbl != NULL) { 2699 size_t i; 2700 2701 for (i = 0; i < NVMF_VFIO_USER_SHADOW_DOORBELLS_BUFFER_COUNT; i++) { 2702 const void *const iov_base = ctrlr->sdbl->iovs[i].iov_base; 2703 2704 if (iov_base >= map_start && iov_base < map_end) { 2705 copy_doorbells(ctrlr, 2706 ctrlr->sdbl->shadow_doorbells, 2707 ctrlr->bar0_doorbells); 2708 vfio_user_ctrlr_switch_doorbells(ctrlr, false); 2709 free_sdbl(endpoint->vfu_ctx, ctrlr->sdbl); 2710 ctrlr->sdbl = NULL; 2711 break; 2712 } 2713 } 2714 } 2715 2716 pthread_mutex_unlock(&endpoint->lock); 2717 } 2718 2719 if (info->prot == (PROT_WRITE | PROT_READ) && !is_peer_same_process(endpoint)) { 2720 ret = spdk_mem_unregister(info->mapping.iov_base, info->mapping.iov_len); 2721 if (ret) { 2722 SPDK_ERRLOG("Memory region unregister %p-%p failed, ret=%d\n", 2723 map_start, map_end, ret); 2724 } 2725 } 2726 } 2727 2728 /* Used to initiate a controller-level reset or a controller shutdown. */ 2729 static void 2730 disable_ctrlr(struct nvmf_vfio_user_ctrlr *vu_ctrlr) 2731 { 2732 SPDK_DEBUGLOG(nvmf_vfio, "%s: disabling controller\n", 2733 ctrlr_id(vu_ctrlr)); 2734 2735 /* Unmap Admin queue. */ 2736 2737 assert(vu_ctrlr->sqs[0] != NULL); 2738 assert(vu_ctrlr->cqs[0] != NULL); 2739 2740 unmap_q(vu_ctrlr, &vu_ctrlr->sqs[0]->mapping); 2741 unmap_q(vu_ctrlr, &vu_ctrlr->cqs[0]->mapping); 2742 2743 vu_ctrlr->sqs[0]->size = 0; 2744 *sq_headp(vu_ctrlr->sqs[0]) = 0; 2745 2746 vu_ctrlr->sqs[0]->sq_state = VFIO_USER_SQ_INACTIVE; 2747 2748 vu_ctrlr->cqs[0]->size = 0; 2749 *cq_tailp(vu_ctrlr->cqs[0]) = 0; 2750 2751 /* 2752 * For PCIe controller reset or shutdown, we will drop all AER 2753 * responses. 2754 */ 2755 nvmf_ctrlr_abort_aer(vu_ctrlr->ctrlr); 2756 2757 /* Free the shadow doorbell buffer. */ 2758 vfio_user_ctrlr_switch_doorbells(vu_ctrlr, false); 2759 free_sdbl(vu_ctrlr->endpoint->vfu_ctx, vu_ctrlr->sdbl); 2760 vu_ctrlr->sdbl = NULL; 2761 } 2762 2763 /* Used to re-enable the controller after a controller-level reset. */ 2764 static int 2765 enable_ctrlr(struct nvmf_vfio_user_ctrlr *vu_ctrlr) 2766 { 2767 int err; 2768 2769 assert(vu_ctrlr != NULL); 2770 2771 SPDK_DEBUGLOG(nvmf_vfio, "%s: enabling controller\n", 2772 ctrlr_id(vu_ctrlr)); 2773 2774 err = acq_setup(vu_ctrlr); 2775 if (err != 0) { 2776 return err; 2777 } 2778 2779 err = asq_setup(vu_ctrlr); 2780 if (err != 0) { 2781 return err; 2782 } 2783 2784 vu_ctrlr->sqs[0]->sq_state = VFIO_USER_SQ_ACTIVE; 2785 2786 return 0; 2787 } 2788 2789 static int 2790 nvmf_vfio_user_prop_req_rsp_set(struct nvmf_vfio_user_req *req, 2791 struct nvmf_vfio_user_sq *sq) 2792 { 2793 struct nvmf_vfio_user_ctrlr *vu_ctrlr; 2794 union spdk_nvme_cc_register cc, diff; 2795 2796 assert(req->req.cmd->prop_set_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET); 2797 assert(sq->ctrlr != NULL); 2798 vu_ctrlr = sq->ctrlr; 2799 2800 if (req->req.cmd->prop_set_cmd.ofst != offsetof(struct spdk_nvme_registers, cc)) { 2801 return 0; 2802 } 2803 2804 cc.raw = req->req.cmd->prop_set_cmd.value.u64; 2805 diff.raw = cc.raw ^ req->cc.raw; 2806 2807 if (diff.bits.en) { 2808 if (cc.bits.en) { 2809 int ret = enable_ctrlr(vu_ctrlr); 2810 if (ret) { 2811 SPDK_ERRLOG("%s: failed to enable ctrlr\n", ctrlr_id(vu_ctrlr)); 2812 return ret; 2813 } 2814 vu_ctrlr->reset_shn = false; 2815 } else { 2816 vu_ctrlr->reset_shn = true; 2817 } 2818 } 2819 2820 if (diff.bits.shn) { 2821 if (cc.bits.shn == SPDK_NVME_SHN_NORMAL || cc.bits.shn == SPDK_NVME_SHN_ABRUPT) { 2822 vu_ctrlr->reset_shn = true; 2823 } 2824 } 2825 2826 if (vu_ctrlr->reset_shn) { 2827 disable_ctrlr(vu_ctrlr); 2828 } 2829 return 0; 2830 } 2831 2832 static int 2833 nvmf_vfio_user_prop_req_rsp(struct nvmf_vfio_user_req *req, void *cb_arg) 2834 { 2835 struct nvmf_vfio_user_sq *sq = cb_arg; 2836 2837 assert(sq != NULL); 2838 assert(req != NULL); 2839 2840 if (req->req.cmd->prop_get_cmd.fctype == SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET) { 2841 assert(sq->ctrlr != NULL); 2842 assert(req != NULL); 2843 2844 memcpy(req->req.data, 2845 &req->req.rsp->prop_get_rsp.value.u64, 2846 req->req.length); 2847 return 0; 2848 } 2849 2850 return nvmf_vfio_user_prop_req_rsp_set(req, sq); 2851 } 2852 2853 /* 2854 * Handles a write at offset 0x1000 or more; this is the non-mapped path when a 2855 * doorbell is written via access_bar0_fn(). 2856 * 2857 * DSTRD is set to fixed value 0 for NVMf. 2858 * 2859 */ 2860 static int 2861 handle_dbl_access(struct nvmf_vfio_user_ctrlr *ctrlr, uint32_t *buf, 2862 const size_t count, loff_t pos, const bool is_write) 2863 { 2864 struct nvmf_vfio_user_poll_group *group; 2865 2866 assert(ctrlr != NULL); 2867 assert(buf != NULL); 2868 2869 if (spdk_unlikely(!is_write)) { 2870 SPDK_WARNLOG("%s: host tried to read BAR0 doorbell %#lx\n", 2871 ctrlr_id(ctrlr), pos); 2872 errno = EPERM; 2873 return -1; 2874 } 2875 2876 if (spdk_unlikely(count != sizeof(uint32_t))) { 2877 SPDK_ERRLOG("%s: bad doorbell buffer size %ld\n", 2878 ctrlr_id(ctrlr), count); 2879 errno = EINVAL; 2880 return -1; 2881 } 2882 2883 pos -= NVME_DOORBELLS_OFFSET; 2884 2885 /* pos must be dword aligned */ 2886 if (spdk_unlikely((pos & 0x3) != 0)) { 2887 SPDK_ERRLOG("%s: bad doorbell offset %#lx\n", ctrlr_id(ctrlr), pos); 2888 errno = EINVAL; 2889 return -1; 2890 } 2891 2892 /* convert byte offset to array index */ 2893 pos >>= 2; 2894 2895 if (spdk_unlikely(pos >= NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR * 2)) { 2896 SPDK_ERRLOG("%s: bad doorbell index %#lx\n", ctrlr_id(ctrlr), pos); 2897 errno = EINVAL; 2898 return -1; 2899 } 2900 2901 ctrlr->bar0_doorbells[pos] = *buf; 2902 spdk_wmb(); 2903 2904 group = ctrlr_to_poll_group(ctrlr); 2905 if (pos == 1) { 2906 group->stats.cqh_admin_writes++; 2907 } else if (pos & 1) { 2908 group->stats.cqh_io_writes++; 2909 } 2910 2911 SPDK_DEBUGLOG(vfio_user_db, "%s: updating BAR0 doorbell %s:%ld to %u\n", 2912 ctrlr_id(ctrlr), (pos & 1) ? "cqid" : "sqid", 2913 pos / 2, *buf); 2914 2915 2916 return 0; 2917 } 2918 2919 static size_t 2920 vfio_user_property_access(struct nvmf_vfio_user_ctrlr *vu_ctrlr, 2921 char *buf, size_t count, loff_t pos, 2922 bool is_write) 2923 { 2924 struct nvmf_vfio_user_req *req; 2925 const struct spdk_nvmf_registers *regs; 2926 2927 if ((count != 4) && (count != 8)) { 2928 errno = EINVAL; 2929 return -1; 2930 } 2931 2932 /* Construct a Fabric Property Get/Set command and send it */ 2933 req = get_nvmf_vfio_user_req(vu_ctrlr->sqs[0]); 2934 if (req == NULL) { 2935 errno = ENOBUFS; 2936 return -1; 2937 } 2938 regs = spdk_nvmf_ctrlr_get_regs(vu_ctrlr->ctrlr); 2939 req->cc.raw = regs->cc.raw; 2940 2941 req->cb_fn = nvmf_vfio_user_prop_req_rsp; 2942 req->cb_arg = vu_ctrlr->sqs[0]; 2943 req->req.cmd->prop_set_cmd.opcode = SPDK_NVME_OPC_FABRIC; 2944 req->req.cmd->prop_set_cmd.cid = 0; 2945 if (count == 4) { 2946 req->req.cmd->prop_set_cmd.attrib.size = 0; 2947 } else { 2948 req->req.cmd->prop_set_cmd.attrib.size = 1; 2949 } 2950 req->req.cmd->prop_set_cmd.ofst = pos; 2951 if (is_write) { 2952 req->req.cmd->prop_set_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_SET; 2953 if (req->req.cmd->prop_set_cmd.attrib.size) { 2954 req->req.cmd->prop_set_cmd.value.u64 = *(uint64_t *)buf; 2955 } else { 2956 req->req.cmd->prop_set_cmd.value.u32.high = 0; 2957 req->req.cmd->prop_set_cmd.value.u32.low = *(uint32_t *)buf; 2958 } 2959 } else { 2960 req->req.cmd->prop_get_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET; 2961 } 2962 req->req.length = count; 2963 req->req.data = buf; 2964 2965 spdk_nvmf_request_exec_fabrics(&req->req); 2966 2967 return count; 2968 } 2969 2970 static ssize_t 2971 access_bar0_fn(vfu_ctx_t *vfu_ctx, char *buf, size_t count, loff_t pos, 2972 bool is_write) 2973 { 2974 struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx); 2975 struct nvmf_vfio_user_ctrlr *ctrlr; 2976 int ret; 2977 2978 ctrlr = endpoint->ctrlr; 2979 if (spdk_unlikely(endpoint->need_async_destroy || !ctrlr)) { 2980 errno = EIO; 2981 return -1; 2982 } 2983 2984 if (pos >= NVME_DOORBELLS_OFFSET) { 2985 /* 2986 * The fact that the doorbells can be memory mapped doesn't mean 2987 * that the client (VFIO in QEMU) is obliged to memory map them, 2988 * it might still elect to access them via regular read/write; 2989 * we might also have had disable_mappable_bar0 set. 2990 */ 2991 ret = handle_dbl_access(ctrlr, (uint32_t *)buf, count, 2992 pos, is_write); 2993 if (ret == 0) { 2994 return count; 2995 } 2996 return ret; 2997 } 2998 2999 return vfio_user_property_access(ctrlr, buf, count, pos, is_write); 3000 } 3001 3002 static ssize_t 3003 access_pci_config(vfu_ctx_t *vfu_ctx, char *buf, size_t count, loff_t offset, 3004 bool is_write) 3005 { 3006 struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx); 3007 3008 if (is_write) { 3009 SPDK_ERRLOG("%s: write %#lx-%#lx not supported\n", 3010 endpoint_id(endpoint), offset, offset + count); 3011 errno = EINVAL; 3012 return -1; 3013 } 3014 3015 if (offset + count > NVME_REG_CFG_SIZE) { 3016 SPDK_ERRLOG("%s: access past end of extended PCI configuration space, want=%ld+%ld, max=%d\n", 3017 endpoint_id(endpoint), offset, count, 3018 NVME_REG_CFG_SIZE); 3019 errno = ERANGE; 3020 return -1; 3021 } 3022 3023 memcpy(buf, ((unsigned char *)endpoint->pci_config_space) + offset, count); 3024 3025 return count; 3026 } 3027 3028 static void 3029 vfio_user_log(vfu_ctx_t *vfu_ctx, int level, char const *msg) 3030 { 3031 struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx); 3032 3033 if (level >= LOG_DEBUG) { 3034 SPDK_DEBUGLOG(nvmf_vfio, "%s: %s\n", endpoint_id(endpoint), msg); 3035 } else if (level >= LOG_INFO) { 3036 SPDK_INFOLOG(nvmf_vfio, "%s: %s\n", endpoint_id(endpoint), msg); 3037 } else if (level >= LOG_NOTICE) { 3038 SPDK_NOTICELOG("%s: %s\n", endpoint_id(endpoint), msg); 3039 } else if (level >= LOG_WARNING) { 3040 SPDK_WARNLOG("%s: %s\n", endpoint_id(endpoint), msg); 3041 } else { 3042 SPDK_ERRLOG("%s: %s\n", endpoint_id(endpoint), msg); 3043 } 3044 } 3045 3046 static int 3047 vfio_user_get_log_level(void) 3048 { 3049 int level; 3050 3051 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf_vfio")) { 3052 return LOG_DEBUG; 3053 } 3054 3055 level = spdk_log_to_syslog_level(spdk_log_get_level()); 3056 if (level < 0) { 3057 return LOG_ERR; 3058 } 3059 3060 return level; 3061 } 3062 3063 static void 3064 init_pci_config_space(vfu_pci_config_space_t *p) 3065 { 3066 /* MLBAR */ 3067 p->hdr.bars[0].raw = 0x0; 3068 /* MUBAR */ 3069 p->hdr.bars[1].raw = 0x0; 3070 3071 /* vendor specific, let's set them to zero for now */ 3072 p->hdr.bars[3].raw = 0x0; 3073 p->hdr.bars[4].raw = 0x0; 3074 p->hdr.bars[5].raw = 0x0; 3075 3076 /* enable INTx */ 3077 p->hdr.intr.ipin = 0x1; 3078 } 3079 3080 struct ctrlr_quiesce_ctx { 3081 struct nvmf_vfio_user_endpoint *endpoint; 3082 struct nvmf_vfio_user_poll_group *group; 3083 int status; 3084 }; 3085 3086 static void ctrlr_quiesce(struct nvmf_vfio_user_ctrlr *vu_ctrlr); 3087 3088 static void 3089 _vfio_user_endpoint_resume_done_msg(void *ctx) 3090 { 3091 struct nvmf_vfio_user_endpoint *endpoint = ctx; 3092 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr; 3093 3094 endpoint->need_resume = false; 3095 3096 if (!vu_ctrlr) { 3097 return; 3098 } 3099 3100 if (!vu_ctrlr->queued_quiesce) { 3101 vu_ctrlr->state = VFIO_USER_CTRLR_RUNNING; 3102 3103 /* 3104 * We might have ignored new SQ entries while we were quiesced: 3105 * kick ourselves so we'll definitely check again while in 3106 * VFIO_USER_CTRLR_RUNNING state. 3107 */ 3108 if (in_interrupt_mode(endpoint->transport)) { 3109 ctrlr_kick(vu_ctrlr); 3110 } 3111 return; 3112 } 3113 3114 3115 /* 3116 * Basically, once we call `vfu_device_quiesced` the device is 3117 * unquiesced from libvfio-user's perspective so from the moment 3118 * `vfio_user_quiesce_done` returns libvfio-user might quiesce the device 3119 * again. However, because the NVMf subsytem is an asynchronous 3120 * operation, this quiesce might come _before_ the NVMf subsystem has 3121 * been resumed, so in the callback of `spdk_nvmf_subsystem_resume` we 3122 * need to check whether a quiesce was requested. 3123 */ 3124 SPDK_DEBUGLOG(nvmf_vfio, "%s has queued quiesce event, quiesce again\n", 3125 ctrlr_id(vu_ctrlr)); 3126 ctrlr_quiesce(vu_ctrlr); 3127 } 3128 3129 static void 3130 vfio_user_endpoint_resume_done(struct spdk_nvmf_subsystem *subsystem, 3131 void *cb_arg, int status) 3132 { 3133 struct nvmf_vfio_user_endpoint *endpoint = cb_arg; 3134 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr; 3135 3136 SPDK_DEBUGLOG(nvmf_vfio, "%s resumed done with status %d\n", endpoint_id(endpoint), status); 3137 3138 if (!vu_ctrlr) { 3139 return; 3140 } 3141 3142 spdk_thread_send_msg(vu_ctrlr->thread, _vfio_user_endpoint_resume_done_msg, endpoint); 3143 } 3144 3145 static void 3146 vfio_user_quiesce_done(void *ctx) 3147 { 3148 struct ctrlr_quiesce_ctx *quiesce_ctx = ctx; 3149 struct nvmf_vfio_user_endpoint *endpoint = quiesce_ctx->endpoint; 3150 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr; 3151 int ret; 3152 3153 if (!vu_ctrlr) { 3154 free(quiesce_ctx); 3155 return; 3156 } 3157 3158 SPDK_DEBUGLOG(nvmf_vfio, "%s device quiesced\n", ctrlr_id(vu_ctrlr)); 3159 3160 assert(vu_ctrlr->state == VFIO_USER_CTRLR_PAUSING); 3161 vu_ctrlr->state = VFIO_USER_CTRLR_PAUSED; 3162 vfu_device_quiesced(endpoint->vfu_ctx, quiesce_ctx->status); 3163 vu_ctrlr->queued_quiesce = false; 3164 free(quiesce_ctx); 3165 3166 /* `vfu_device_quiesced` can change the migration state, 3167 * so we need to re-check `vu_ctrlr->state`. 3168 */ 3169 if (vu_ctrlr->state == VFIO_USER_CTRLR_MIGRATING) { 3170 SPDK_DEBUGLOG(nvmf_vfio, "%s is in MIGRATION state\n", ctrlr_id(vu_ctrlr)); 3171 return; 3172 } 3173 3174 SPDK_DEBUGLOG(nvmf_vfio, "%s start to resume\n", ctrlr_id(vu_ctrlr)); 3175 vu_ctrlr->state = VFIO_USER_CTRLR_RESUMING; 3176 ret = spdk_nvmf_subsystem_resume((struct spdk_nvmf_subsystem *)endpoint->subsystem, 3177 vfio_user_endpoint_resume_done, endpoint); 3178 if (ret < 0) { 3179 vu_ctrlr->state = VFIO_USER_CTRLR_PAUSED; 3180 SPDK_ERRLOG("%s: failed to resume, ret=%d\n", endpoint_id(endpoint), ret); 3181 } 3182 } 3183 3184 static void 3185 vfio_user_pause_done(struct spdk_nvmf_subsystem *subsystem, 3186 void *ctx, int status) 3187 { 3188 struct ctrlr_quiesce_ctx *quiesce_ctx = ctx; 3189 struct nvmf_vfio_user_endpoint *endpoint = quiesce_ctx->endpoint; 3190 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr; 3191 3192 if (!vu_ctrlr) { 3193 free(quiesce_ctx); 3194 return; 3195 } 3196 3197 quiesce_ctx->status = status; 3198 3199 SPDK_DEBUGLOG(nvmf_vfio, "%s pause done with status %d\n", 3200 ctrlr_id(vu_ctrlr), status); 3201 3202 spdk_thread_send_msg(vu_ctrlr->thread, 3203 vfio_user_quiesce_done, ctx); 3204 } 3205 3206 /* 3207 * Ensure that, for this PG, we've stopped running in nvmf_vfio_user_sq_poll(); 3208 * we've already set ctrlr->state, so we won't process new entries, but we need 3209 * to ensure that this PG is quiesced. This only works because there's no 3210 * callback context set up between polling the SQ and spdk_nvmf_request_exec(). 3211 * 3212 * Once we've walked all PGs, we need to pause any submitted I/O via 3213 * spdk_nvmf_subsystem_pause(SPDK_NVME_GLOBAL_NS_TAG). 3214 */ 3215 static void 3216 vfio_user_quiesce_pg(void *ctx) 3217 { 3218 struct ctrlr_quiesce_ctx *quiesce_ctx = ctx; 3219 struct nvmf_vfio_user_endpoint *endpoint = quiesce_ctx->endpoint; 3220 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr; 3221 struct nvmf_vfio_user_poll_group *vu_group = quiesce_ctx->group; 3222 struct spdk_nvmf_subsystem *subsystem = endpoint->subsystem; 3223 int ret; 3224 3225 SPDK_DEBUGLOG(nvmf_vfio, "quiesced pg:%p\n", vu_group); 3226 3227 if (!vu_ctrlr) { 3228 free(quiesce_ctx); 3229 return; 3230 } 3231 3232 quiesce_ctx->group = TAILQ_NEXT(vu_group, link); 3233 if (quiesce_ctx->group != NULL) { 3234 spdk_thread_send_msg(poll_group_to_thread(quiesce_ctx->group), 3235 vfio_user_quiesce_pg, quiesce_ctx); 3236 return; 3237 } 3238 3239 ret = spdk_nvmf_subsystem_pause(subsystem, SPDK_NVME_GLOBAL_NS_TAG, 3240 vfio_user_pause_done, quiesce_ctx); 3241 if (ret < 0) { 3242 SPDK_ERRLOG("%s: failed to pause, ret=%d\n", 3243 endpoint_id(endpoint), ret); 3244 vu_ctrlr->state = VFIO_USER_CTRLR_RUNNING; 3245 fail_ctrlr(vu_ctrlr); 3246 free(quiesce_ctx); 3247 } 3248 } 3249 3250 static void 3251 ctrlr_quiesce(struct nvmf_vfio_user_ctrlr *vu_ctrlr) 3252 { 3253 struct ctrlr_quiesce_ctx *quiesce_ctx; 3254 3255 vu_ctrlr->state = VFIO_USER_CTRLR_PAUSING; 3256 3257 quiesce_ctx = calloc(1, sizeof(*quiesce_ctx)); 3258 if (!quiesce_ctx) { 3259 SPDK_ERRLOG("Failed to allocate subsystem pause context\n"); 3260 assert(false); 3261 return; 3262 } 3263 3264 quiesce_ctx->endpoint = vu_ctrlr->endpoint; 3265 quiesce_ctx->status = 0; 3266 quiesce_ctx->group = TAILQ_FIRST(&vu_ctrlr->transport->poll_groups); 3267 3268 spdk_thread_send_msg(poll_group_to_thread(quiesce_ctx->group), 3269 vfio_user_quiesce_pg, quiesce_ctx); 3270 } 3271 3272 static int 3273 vfio_user_dev_quiesce_cb(vfu_ctx_t *vfu_ctx) 3274 { 3275 struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx); 3276 struct spdk_nvmf_subsystem *subsystem = endpoint->subsystem; 3277 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr; 3278 3279 if (!vu_ctrlr) { 3280 return 0; 3281 } 3282 3283 /* NVMf library will destruct controller when no 3284 * connected queue pairs. 3285 */ 3286 if (!nvmf_subsystem_get_ctrlr(subsystem, vu_ctrlr->cntlid)) { 3287 return 0; 3288 } 3289 3290 SPDK_DEBUGLOG(nvmf_vfio, "%s starts to quiesce\n", ctrlr_id(vu_ctrlr)); 3291 3292 /* There is no race condition here as device quiesce callback 3293 * and nvmf_prop_set_cc() are running in the same thread context. 3294 */ 3295 if (!vu_ctrlr->ctrlr->vcprop.cc.bits.en) { 3296 return 0; 3297 } else if (!vu_ctrlr->ctrlr->vcprop.csts.bits.rdy) { 3298 return 0; 3299 } else if (vu_ctrlr->ctrlr->vcprop.csts.bits.shst == SPDK_NVME_SHST_COMPLETE) { 3300 return 0; 3301 } 3302 3303 switch (vu_ctrlr->state) { 3304 case VFIO_USER_CTRLR_PAUSED: 3305 case VFIO_USER_CTRLR_MIGRATING: 3306 return 0; 3307 case VFIO_USER_CTRLR_RUNNING: 3308 ctrlr_quiesce(vu_ctrlr); 3309 break; 3310 case VFIO_USER_CTRLR_RESUMING: 3311 vu_ctrlr->queued_quiesce = true; 3312 SPDK_DEBUGLOG(nvmf_vfio, "%s is busy to quiesce, current state %u\n", ctrlr_id(vu_ctrlr), 3313 vu_ctrlr->state); 3314 break; 3315 default: 3316 assert(vu_ctrlr->state != VFIO_USER_CTRLR_PAUSING); 3317 break; 3318 } 3319 3320 errno = EBUSY; 3321 return -1; 3322 } 3323 3324 static void 3325 vfio_user_ctrlr_dump_migr_data(const char *name, 3326 struct vfio_user_nvme_migr_state *migr_data, 3327 struct nvmf_vfio_user_shadow_doorbells *sdbl) 3328 { 3329 struct spdk_nvmf_registers *regs; 3330 struct nvme_migr_sq_state *sq; 3331 struct nvme_migr_cq_state *cq; 3332 uint32_t *doorbell_base; 3333 uint32_t i; 3334 3335 SPDK_NOTICELOG("Dump %s\n", name); 3336 3337 regs = &migr_data->nvmf_data.regs; 3338 doorbell_base = (uint32_t *)&migr_data->doorbells; 3339 3340 SPDK_NOTICELOG("Registers\n"); 3341 SPDK_NOTICELOG("CSTS 0x%x\n", regs->csts.raw); 3342 SPDK_NOTICELOG("CAP 0x%"PRIx64"\n", regs->cap.raw); 3343 SPDK_NOTICELOG("VS 0x%x\n", regs->vs.raw); 3344 SPDK_NOTICELOG("CC 0x%x\n", regs->cc.raw); 3345 SPDK_NOTICELOG("AQA 0x%x\n", regs->aqa.raw); 3346 SPDK_NOTICELOG("ASQ 0x%"PRIx64"\n", regs->asq); 3347 SPDK_NOTICELOG("ACQ 0x%"PRIx64"\n", regs->acq); 3348 3349 SPDK_NOTICELOG("Number of IO Queues %u\n", migr_data->ctrlr_header.num_io_queues); 3350 3351 if (sdbl != NULL) { 3352 SPDK_NOTICELOG("shadow doorbell buffer=%#lx\n", 3353 migr_data->ctrlr_header.shadow_doorbell_buffer); 3354 SPDK_NOTICELOG("eventidx buffer=%#lx\n", 3355 migr_data->ctrlr_header.eventidx_buffer); 3356 } 3357 3358 for (i = 0; i < NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR; i++) { 3359 sq = &migr_data->qps[i].sq; 3360 cq = &migr_data->qps[i].cq; 3361 3362 if (sq->size) { 3363 SPDK_NOTICELOG("sqid:%u, bar0_doorbell:%u\n", sq->sqid, doorbell_base[i * 2]); 3364 if (i > 0 && sdbl != NULL) { 3365 SPDK_NOTICELOG("sqid:%u, shadow_doorbell:%u, eventidx:%u\n", 3366 sq->sqid, 3367 sdbl->shadow_doorbells[queue_index(i, false)], 3368 sdbl->eventidxs[queue_index(i, false)]); 3369 } 3370 SPDK_NOTICELOG("SQ sqid:%u, cqid:%u, sqhead:%u, size:%u, dma_addr:0x%"PRIx64"\n", 3371 sq->sqid, sq->cqid, sq->head, sq->size, sq->dma_addr); 3372 } 3373 3374 if (cq->size) { 3375 SPDK_NOTICELOG("cqid:%u, bar0_doorbell:%u\n", cq->cqid, doorbell_base[i * 2 + 1]); 3376 if (i > 0 && sdbl != NULL) { 3377 SPDK_NOTICELOG("cqid:%u, shadow_doorbell:%u, eventidx:%u\n", 3378 cq->cqid, 3379 sdbl->shadow_doorbells[queue_index(i, true)], 3380 sdbl->eventidxs[queue_index(i, true)]); 3381 } 3382 SPDK_NOTICELOG("CQ cqid:%u, phase:%u, cqtail:%u, size:%u, iv:%u, ien:%u, dma_addr:0x%"PRIx64"\n", 3383 cq->cqid, cq->phase, cq->tail, cq->size, cq->iv, cq->ien, cq->dma_addr); 3384 } 3385 } 3386 3387 SPDK_NOTICELOG("%s Dump Done\n", name); 3388 } 3389 3390 /* Read region 9 content and restore it to migration data structures */ 3391 static int 3392 vfio_user_migr_stream_to_data(struct nvmf_vfio_user_endpoint *endpoint, 3393 struct vfio_user_nvme_migr_state *migr_state) 3394 { 3395 void *data_ptr = endpoint->migr_data; 3396 3397 /* Load vfio_user_nvme_migr_header first */ 3398 memcpy(&migr_state->ctrlr_header, data_ptr, sizeof(struct vfio_user_nvme_migr_header)); 3399 /* TODO: version check */ 3400 if (migr_state->ctrlr_header.magic != VFIO_USER_NVME_MIGR_MAGIC) { 3401 SPDK_ERRLOG("%s: bad magic number %x\n", endpoint_id(endpoint), migr_state->ctrlr_header.magic); 3402 return -EINVAL; 3403 } 3404 3405 /* Load nvmf controller data */ 3406 data_ptr = endpoint->migr_data + migr_state->ctrlr_header.nvmf_data_offset; 3407 memcpy(&migr_state->nvmf_data, data_ptr, migr_state->ctrlr_header.nvmf_data_len); 3408 3409 /* Load queue pairs */ 3410 data_ptr = endpoint->migr_data + migr_state->ctrlr_header.qp_offset; 3411 memcpy(&migr_state->qps, data_ptr, migr_state->ctrlr_header.qp_len); 3412 3413 /* Load doorbells */ 3414 data_ptr = endpoint->migr_data + migr_state->ctrlr_header.bar_offset[VFU_PCI_DEV_BAR0_REGION_IDX]; 3415 memcpy(&migr_state->doorbells, data_ptr, 3416 migr_state->ctrlr_header.bar_len[VFU_PCI_DEV_BAR0_REGION_IDX]); 3417 3418 /* Load CFG */ 3419 data_ptr = endpoint->migr_data + migr_state->ctrlr_header.bar_offset[VFU_PCI_DEV_CFG_REGION_IDX]; 3420 memcpy(&migr_state->cfg, data_ptr, migr_state->ctrlr_header.bar_len[VFU_PCI_DEV_CFG_REGION_IDX]); 3421 3422 return 0; 3423 } 3424 3425 3426 static void 3427 vfio_user_migr_ctrlr_save_data(struct nvmf_vfio_user_ctrlr *vu_ctrlr) 3428 { 3429 struct spdk_nvmf_ctrlr *ctrlr = vu_ctrlr->ctrlr; 3430 struct nvmf_vfio_user_endpoint *endpoint = vu_ctrlr->endpoint; 3431 struct nvmf_vfio_user_sq *sq; 3432 struct nvmf_vfio_user_cq *cq; 3433 uint64_t data_offset; 3434 void *data_ptr; 3435 uint32_t *doorbell_base; 3436 uint32_t i = 0; 3437 uint16_t sqid, cqid; 3438 struct vfio_user_nvme_migr_state migr_state = { 3439 .nvmf_data = { 3440 .data_size = offsetof(struct spdk_nvmf_ctrlr_migr_data, unused), 3441 .regs_size = sizeof(struct spdk_nvmf_registers), 3442 .feat_size = sizeof(struct spdk_nvmf_ctrlr_feat) 3443 } 3444 }; 3445 3446 /* Save all data to vfio_user_nvme_migr_state first, then we will 3447 * copy it to device migration region at last. 3448 */ 3449 3450 /* save magic number */ 3451 migr_state.ctrlr_header.magic = VFIO_USER_NVME_MIGR_MAGIC; 3452 3453 /* save controller data */ 3454 spdk_nvmf_ctrlr_save_migr_data(ctrlr, &migr_state.nvmf_data); 3455 3456 /* save connected queue pairs */ 3457 TAILQ_FOREACH(sq, &vu_ctrlr->connected_sqs, tailq) { 3458 /* save sq */ 3459 sqid = sq->qid; 3460 migr_state.qps[sqid].sq.sqid = sq->qid; 3461 migr_state.qps[sqid].sq.cqid = sq->cqid; 3462 migr_state.qps[sqid].sq.head = *sq_headp(sq); 3463 migr_state.qps[sqid].sq.size = sq->size; 3464 migr_state.qps[sqid].sq.dma_addr = sq->mapping.prp1; 3465 3466 /* save cq, for shared cq case, cq may be saved multiple times */ 3467 cqid = sq->cqid; 3468 cq = vu_ctrlr->cqs[cqid]; 3469 migr_state.qps[cqid].cq.cqid = cqid; 3470 migr_state.qps[cqid].cq.tail = *cq_tailp(cq); 3471 migr_state.qps[cqid].cq.ien = cq->ien; 3472 migr_state.qps[cqid].cq.iv = cq->iv; 3473 migr_state.qps[cqid].cq.size = cq->size; 3474 migr_state.qps[cqid].cq.phase = cq->phase; 3475 migr_state.qps[cqid].cq.dma_addr = cq->mapping.prp1; 3476 i++; 3477 } 3478 3479 assert(i > 0); 3480 migr_state.ctrlr_header.num_io_queues = i - 1; 3481 3482 /* Save doorbells */ 3483 doorbell_base = (uint32_t *)&migr_state.doorbells; 3484 memcpy(doorbell_base, (void *)vu_ctrlr->bar0_doorbells, NVMF_VFIO_USER_DOORBELLS_SIZE); 3485 3486 /* Save PCI configuration space */ 3487 memcpy(&migr_state.cfg, (void *)endpoint->pci_config_space, NVME_REG_CFG_SIZE); 3488 3489 /* Save all data to device migration region */ 3490 data_ptr = endpoint->migr_data; 3491 3492 /* Copy nvmf controller data */ 3493 data_offset = sizeof(struct vfio_user_nvme_migr_header); 3494 data_ptr += data_offset; 3495 migr_state.ctrlr_header.nvmf_data_offset = data_offset; 3496 migr_state.ctrlr_header.nvmf_data_len = sizeof(struct spdk_nvmf_ctrlr_migr_data); 3497 memcpy(data_ptr, &migr_state.nvmf_data, sizeof(struct spdk_nvmf_ctrlr_migr_data)); 3498 3499 /* Copy queue pairs */ 3500 data_offset += sizeof(struct spdk_nvmf_ctrlr_migr_data); 3501 data_ptr += sizeof(struct spdk_nvmf_ctrlr_migr_data); 3502 migr_state.ctrlr_header.qp_offset = data_offset; 3503 migr_state.ctrlr_header.qp_len = i * (sizeof(struct nvme_migr_sq_state) + sizeof( 3504 struct nvme_migr_cq_state)); 3505 memcpy(data_ptr, &migr_state.qps, migr_state.ctrlr_header.qp_len); 3506 3507 /* Copy doorbells */ 3508 data_offset += migr_state.ctrlr_header.qp_len; 3509 data_ptr += migr_state.ctrlr_header.qp_len; 3510 migr_state.ctrlr_header.bar_offset[VFU_PCI_DEV_BAR0_REGION_IDX] = data_offset; 3511 migr_state.ctrlr_header.bar_len[VFU_PCI_DEV_BAR0_REGION_IDX] = NVMF_VFIO_USER_DOORBELLS_SIZE; 3512 memcpy(data_ptr, &migr_state.doorbells, NVMF_VFIO_USER_DOORBELLS_SIZE); 3513 3514 /* Copy CFG */ 3515 data_offset += NVMF_VFIO_USER_DOORBELLS_SIZE; 3516 data_ptr += NVMF_VFIO_USER_DOORBELLS_SIZE; 3517 migr_state.ctrlr_header.bar_offset[VFU_PCI_DEV_CFG_REGION_IDX] = data_offset; 3518 migr_state.ctrlr_header.bar_len[VFU_PCI_DEV_CFG_REGION_IDX] = NVME_REG_CFG_SIZE; 3519 memcpy(data_ptr, &migr_state.cfg, NVME_REG_CFG_SIZE); 3520 3521 /* copy shadow doorbells */ 3522 if (vu_ctrlr->sdbl != NULL) { 3523 migr_state.ctrlr_header.sdbl = true; 3524 migr_state.ctrlr_header.shadow_doorbell_buffer = vu_ctrlr->shadow_doorbell_buffer; 3525 migr_state.ctrlr_header.eventidx_buffer = vu_ctrlr->eventidx_buffer; 3526 } 3527 3528 /* Copy nvme migration header finally */ 3529 memcpy(endpoint->migr_data, &migr_state.ctrlr_header, sizeof(struct vfio_user_nvme_migr_header)); 3530 3531 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf_vfio")) { 3532 vfio_user_ctrlr_dump_migr_data("SAVE", &migr_state, vu_ctrlr->sdbl); 3533 } 3534 } 3535 3536 /* 3537 * If we are about to close the connection, we need to unregister the interrupt, 3538 * as the library will subsequently close the file descriptor we registered. 3539 */ 3540 static int 3541 vfio_user_device_reset(vfu_ctx_t *vfu_ctx, vfu_reset_type_t type) 3542 { 3543 struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx); 3544 struct nvmf_vfio_user_ctrlr *ctrlr = endpoint->ctrlr; 3545 3546 SPDK_DEBUGLOG(nvmf_vfio, "Device reset type %u\n", type); 3547 3548 if (type == VFU_RESET_LOST_CONN) { 3549 if (ctrlr != NULL) { 3550 spdk_interrupt_unregister(&ctrlr->intr); 3551 ctrlr->intr_fd = -1; 3552 } 3553 return 0; 3554 } 3555 3556 /* FIXME: LOST_CONN case ? */ 3557 if (ctrlr->sdbl != NULL) { 3558 vfio_user_ctrlr_switch_doorbells(ctrlr, false); 3559 free_sdbl(vfu_ctx, ctrlr->sdbl); 3560 ctrlr->sdbl = NULL; 3561 } 3562 3563 /* FIXME: much more needed here. */ 3564 3565 return 0; 3566 } 3567 3568 static int 3569 vfio_user_migr_ctrlr_construct_qps(struct nvmf_vfio_user_ctrlr *vu_ctrlr, 3570 struct vfio_user_nvme_migr_state *migr_state) 3571 { 3572 uint32_t i, qsize = 0; 3573 uint16_t sqid, cqid; 3574 struct vfio_user_nvme_migr_qp migr_qp; 3575 void *addr; 3576 uint32_t cqs_ref[NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR] = {}; 3577 int ret; 3578 3579 if (SPDK_DEBUGLOG_FLAG_ENABLED("nvmf_vfio")) { 3580 vfio_user_ctrlr_dump_migr_data("RESUME", migr_state, vu_ctrlr->sdbl); 3581 } 3582 3583 /* restore submission queues */ 3584 for (i = 0; i < NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR; i++) { 3585 migr_qp = migr_state->qps[i]; 3586 3587 qsize = migr_qp.sq.size; 3588 if (qsize) { 3589 struct nvmf_vfio_user_sq *sq; 3590 3591 sqid = migr_qp.sq.sqid; 3592 if (sqid != i) { 3593 SPDK_ERRLOG("Expected sqid %u while got %u", i, sqid); 3594 return -EINVAL; 3595 } 3596 3597 /* allocate sq if necessary */ 3598 if (vu_ctrlr->sqs[sqid] == NULL) { 3599 ret = init_sq(vu_ctrlr, &vu_ctrlr->transport->transport, sqid); 3600 if (ret) { 3601 SPDK_ERRLOG("Construct qpair with qid %u failed\n", sqid); 3602 return -EFAULT; 3603 } 3604 } 3605 3606 sq = vu_ctrlr->sqs[sqid]; 3607 sq->size = qsize; 3608 3609 ret = alloc_sq_reqs(vu_ctrlr, sq); 3610 if (ret) { 3611 SPDK_ERRLOG("Construct sq with qid %u failed\n", sqid); 3612 return -EFAULT; 3613 } 3614 3615 /* restore sq */ 3616 sq->sq_state = VFIO_USER_SQ_CREATED; 3617 sq->cqid = migr_qp.sq.cqid; 3618 *sq_headp(sq) = migr_qp.sq.head; 3619 sq->mapping.prp1 = migr_qp.sq.dma_addr; 3620 addr = map_one(vu_ctrlr->endpoint->vfu_ctx, 3621 sq->mapping.prp1, sq->size * 64, 3622 sq->mapping.sg, &sq->mapping.iov, 3623 PROT_READ); 3624 if (addr == NULL) { 3625 SPDK_ERRLOG("Restore sq with qid %u PRP1 0x%"PRIx64" with size %u failed\n", 3626 sqid, sq->mapping.prp1, sq->size); 3627 return -EFAULT; 3628 } 3629 cqs_ref[sq->cqid]++; 3630 } 3631 } 3632 3633 /* restore completion queues */ 3634 for (i = 0; i < NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR; i++) { 3635 migr_qp = migr_state->qps[i]; 3636 3637 qsize = migr_qp.cq.size; 3638 if (qsize) { 3639 struct nvmf_vfio_user_cq *cq; 3640 3641 /* restore cq */ 3642 cqid = migr_qp.sq.cqid; 3643 assert(cqid == i); 3644 3645 /* allocate cq if necessary */ 3646 if (vu_ctrlr->cqs[cqid] == NULL) { 3647 ret = init_cq(vu_ctrlr, cqid); 3648 if (ret) { 3649 SPDK_ERRLOG("Construct qpair with qid %u failed\n", cqid); 3650 return -EFAULT; 3651 } 3652 } 3653 3654 cq = vu_ctrlr->cqs[cqid]; 3655 3656 cq->size = qsize; 3657 3658 cq->cq_state = VFIO_USER_CQ_CREATED; 3659 cq->cq_ref = cqs_ref[cqid]; 3660 *cq_tailp(cq) = migr_qp.cq.tail; 3661 cq->mapping.prp1 = migr_qp.cq.dma_addr; 3662 cq->ien = migr_qp.cq.ien; 3663 cq->iv = migr_qp.cq.iv; 3664 cq->phase = migr_qp.cq.phase; 3665 addr = map_one(vu_ctrlr->endpoint->vfu_ctx, 3666 cq->mapping.prp1, cq->size * 16, 3667 cq->mapping.sg, &cq->mapping.iov, 3668 PROT_READ | PROT_WRITE); 3669 if (addr == NULL) { 3670 SPDK_ERRLOG("Restore cq with qid %u PRP1 0x%"PRIx64" with size %u failed\n", 3671 cqid, cq->mapping.prp1, cq->size); 3672 return -EFAULT; 3673 } 3674 } 3675 } 3676 3677 return 0; 3678 } 3679 3680 static int 3681 vfio_user_migr_ctrlr_restore(struct nvmf_vfio_user_ctrlr *vu_ctrlr) 3682 { 3683 struct nvmf_vfio_user_endpoint *endpoint = vu_ctrlr->endpoint; 3684 struct spdk_nvmf_ctrlr *ctrlr = vu_ctrlr->ctrlr; 3685 uint32_t *doorbell_base; 3686 struct spdk_nvme_cmd cmd; 3687 uint16_t i; 3688 int rc = 0; 3689 struct vfio_user_nvme_migr_state migr_state = { 3690 .nvmf_data = { 3691 .data_size = offsetof(struct spdk_nvmf_ctrlr_migr_data, unused), 3692 .regs_size = sizeof(struct spdk_nvmf_registers), 3693 .feat_size = sizeof(struct spdk_nvmf_ctrlr_feat) 3694 } 3695 }; 3696 3697 assert(endpoint->migr_data != NULL); 3698 assert(ctrlr != NULL); 3699 rc = vfio_user_migr_stream_to_data(endpoint, &migr_state); 3700 if (rc) { 3701 return rc; 3702 } 3703 3704 /* restore shadow doorbells */ 3705 if (migr_state.ctrlr_header.sdbl) { 3706 struct nvmf_vfio_user_shadow_doorbells *sdbl; 3707 sdbl = map_sdbl(vu_ctrlr->endpoint->vfu_ctx, 3708 migr_state.ctrlr_header.shadow_doorbell_buffer, 3709 migr_state.ctrlr_header.eventidx_buffer, 3710 memory_page_size(vu_ctrlr)); 3711 if (sdbl == NULL) { 3712 SPDK_ERRLOG("%s: failed to re-map shadow doorbell buffers\n", 3713 ctrlr_id(vu_ctrlr)); 3714 return -1; 3715 } 3716 3717 vu_ctrlr->shadow_doorbell_buffer = migr_state.ctrlr_header.shadow_doorbell_buffer; 3718 vu_ctrlr->eventidx_buffer = migr_state.ctrlr_header.eventidx_buffer; 3719 3720 SWAP(vu_ctrlr->sdbl, sdbl); 3721 } 3722 3723 rc = vfio_user_migr_ctrlr_construct_qps(vu_ctrlr, &migr_state); 3724 if (rc) { 3725 return rc; 3726 } 3727 3728 /* restore PCI configuration space */ 3729 memcpy((void *)endpoint->pci_config_space, &migr_state.cfg, NVME_REG_CFG_SIZE); 3730 3731 doorbell_base = (uint32_t *)&migr_state.doorbells; 3732 /* restore doorbells from saved registers */ 3733 memcpy((void *)vu_ctrlr->bar0_doorbells, doorbell_base, NVMF_VFIO_USER_DOORBELLS_SIZE); 3734 3735 /* restore nvmf controller data */ 3736 rc = spdk_nvmf_ctrlr_restore_migr_data(ctrlr, &migr_state.nvmf_data); 3737 if (rc) { 3738 return rc; 3739 } 3740 3741 /* resubmit pending AERs */ 3742 for (i = 0; i < migr_state.nvmf_data.num_aer_cids; i++) { 3743 SPDK_DEBUGLOG(nvmf_vfio, "%s AER resubmit, CID %u\n", ctrlr_id(vu_ctrlr), 3744 migr_state.nvmf_data.aer_cids[i]); 3745 memset(&cmd, 0, sizeof(cmd)); 3746 cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST; 3747 cmd.cid = migr_state.nvmf_data.aer_cids[i]; 3748 rc = handle_cmd_req(vu_ctrlr, &cmd, vu_ctrlr->sqs[0]); 3749 if (spdk_unlikely(rc)) { 3750 break; 3751 } 3752 } 3753 3754 return rc; 3755 } 3756 3757 static void 3758 vfio_user_migr_ctrlr_enable_sqs(struct nvmf_vfio_user_ctrlr *vu_ctrlr) 3759 { 3760 uint32_t i; 3761 struct nvmf_vfio_user_sq *sq; 3762 3763 /* The Admin queue (qid: 0) does not ever use shadow doorbells. */ 3764 3765 if (vu_ctrlr->sqs[0] != NULL) { 3766 vu_ctrlr->sqs[0]->dbl_tailp = vu_ctrlr->bar0_doorbells + 3767 queue_index(0, false); 3768 } 3769 3770 if (vu_ctrlr->cqs[0] != NULL) { 3771 vu_ctrlr->cqs[0]->dbl_headp = vu_ctrlr->bar0_doorbells + 3772 queue_index(0, true); 3773 } 3774 3775 vfio_user_ctrlr_switch_doorbells(vu_ctrlr, vu_ctrlr->sdbl != NULL); 3776 3777 for (i = 0; i < NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR; i++) { 3778 sq = vu_ctrlr->sqs[i]; 3779 if (!sq || !sq->size) { 3780 continue; 3781 } 3782 3783 if (nvmf_qpair_is_admin_queue(&sq->qpair)) { 3784 /* ADMIN queue pair is always in the poll group, just enable it */ 3785 sq->sq_state = VFIO_USER_SQ_ACTIVE; 3786 } else { 3787 spdk_nvmf_tgt_new_qpair(vu_ctrlr->transport->transport.tgt, &sq->qpair); 3788 } 3789 } 3790 } 3791 3792 /* 3793 * We are in stop-and-copy state, but still potentially have some current dirty 3794 * sgls: while we're quiesced and thus should have no active requests, we still 3795 * have potentially dirty maps of the shadow doorbells and the CQs (SQs are 3796 * mapped read only). 3797 * 3798 * Since we won't be calling vfu_sgl_put() for them, we need to explicitly 3799 * mark them dirty now. 3800 */ 3801 static void 3802 vfio_user_migr_ctrlr_mark_dirty(struct nvmf_vfio_user_ctrlr *vu_ctrlr) 3803 { 3804 struct nvmf_vfio_user_endpoint *endpoint = vu_ctrlr->endpoint; 3805 3806 assert(vu_ctrlr->state == VFIO_USER_CTRLR_MIGRATING); 3807 3808 for (size_t i = 0; i < NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR; i++) { 3809 struct nvmf_vfio_user_cq *cq = vu_ctrlr->cqs[i]; 3810 3811 if (cq == NULL || q_addr(&cq->mapping) == NULL) { 3812 continue; 3813 } 3814 3815 vfu_sgl_mark_dirty(endpoint->vfu_ctx, cq->mapping.sg, 1); 3816 } 3817 3818 if (vu_ctrlr->sdbl != NULL) { 3819 dma_sg_t *sg; 3820 size_t i; 3821 3822 for (i = 0; i < NVMF_VFIO_USER_SHADOW_DOORBELLS_BUFFER_COUNT; 3823 ++i) { 3824 3825 if (!vu_ctrlr->sdbl->iovs[i].iov_len) { 3826 continue; 3827 } 3828 3829 sg = index_to_sg_t(vu_ctrlr->sdbl->sgs, i); 3830 3831 vfu_sgl_mark_dirty(endpoint->vfu_ctx, sg, 1); 3832 } 3833 } 3834 } 3835 3836 static int 3837 vfio_user_migration_device_state_transition(vfu_ctx_t *vfu_ctx, vfu_migr_state_t state) 3838 { 3839 struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx); 3840 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr; 3841 struct nvmf_vfio_user_sq *sq; 3842 int ret = 0; 3843 3844 SPDK_DEBUGLOG(nvmf_vfio, "%s controller state %u, migration state %u\n", endpoint_id(endpoint), 3845 vu_ctrlr->state, state); 3846 3847 switch (state) { 3848 case VFU_MIGR_STATE_STOP_AND_COPY: 3849 vu_ctrlr->in_source_vm = true; 3850 vu_ctrlr->state = VFIO_USER_CTRLR_MIGRATING; 3851 vfio_user_migr_ctrlr_mark_dirty(vu_ctrlr); 3852 vfio_user_migr_ctrlr_save_data(vu_ctrlr); 3853 break; 3854 case VFU_MIGR_STATE_STOP: 3855 vu_ctrlr->state = VFIO_USER_CTRLR_MIGRATING; 3856 /* The controller associates with source VM is dead now, we will resume 3857 * the subsystem after destroying the controller data structure, then the 3858 * subsystem can be re-used for another new client. 3859 */ 3860 if (vu_ctrlr->in_source_vm) { 3861 endpoint->need_resume = true; 3862 } 3863 break; 3864 case VFU_MIGR_STATE_PRE_COPY: 3865 assert(vu_ctrlr->state == VFIO_USER_CTRLR_PAUSED); 3866 break; 3867 case VFU_MIGR_STATE_RESUME: 3868 /* 3869 * Destination ADMIN queue pair is connected when starting the VM, 3870 * but the ADMIN queue pair isn't enabled in destination VM, the poll 3871 * group will do nothing to ADMIN queue pair for now. 3872 */ 3873 if (vu_ctrlr->state != VFIO_USER_CTRLR_RUNNING) { 3874 break; 3875 } 3876 3877 assert(!vu_ctrlr->in_source_vm); 3878 vu_ctrlr->state = VFIO_USER_CTRLR_MIGRATING; 3879 3880 sq = TAILQ_FIRST(&vu_ctrlr->connected_sqs); 3881 assert(sq != NULL); 3882 assert(sq->qpair.qid == 0); 3883 sq->sq_state = VFIO_USER_SQ_INACTIVE; 3884 3885 /* Free ADMIN SQ resources first, SQ resources will be 3886 * allocated based on queue size from source VM. 3887 */ 3888 free_sq_reqs(sq); 3889 sq->size = 0; 3890 break; 3891 case VFU_MIGR_STATE_RUNNING: 3892 3893 if (vu_ctrlr->state != VFIO_USER_CTRLR_MIGRATING) { 3894 break; 3895 } 3896 3897 if (!vu_ctrlr->in_source_vm) { 3898 /* Restore destination VM from BAR9 */ 3899 ret = vfio_user_migr_ctrlr_restore(vu_ctrlr); 3900 if (ret) { 3901 break; 3902 } 3903 3904 vfio_user_ctrlr_switch_doorbells(vu_ctrlr, false); 3905 vfio_user_migr_ctrlr_enable_sqs(vu_ctrlr); 3906 vu_ctrlr->state = VFIO_USER_CTRLR_RUNNING; 3907 /* FIXME where do we resume nvmf? */ 3908 } else { 3909 /* Rollback source VM */ 3910 vu_ctrlr->state = VFIO_USER_CTRLR_RESUMING; 3911 ret = spdk_nvmf_subsystem_resume((struct spdk_nvmf_subsystem *)endpoint->subsystem, 3912 vfio_user_endpoint_resume_done, endpoint); 3913 if (ret < 0) { 3914 /* TODO: fail controller with CFS bit set */ 3915 vu_ctrlr->state = VFIO_USER_CTRLR_PAUSED; 3916 SPDK_ERRLOG("%s: failed to resume, ret=%d\n", endpoint_id(endpoint), ret); 3917 } 3918 } 3919 vu_ctrlr->migr_data_prepared = false; 3920 vu_ctrlr->in_source_vm = false; 3921 break; 3922 3923 default: 3924 return -EINVAL; 3925 } 3926 3927 return ret; 3928 } 3929 3930 static uint64_t 3931 vfio_user_migration_get_pending_bytes(vfu_ctx_t *vfu_ctx) 3932 { 3933 struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx); 3934 struct nvmf_vfio_user_ctrlr *ctrlr = endpoint->ctrlr; 3935 uint64_t pending_bytes; 3936 3937 if (ctrlr->migr_data_prepared) { 3938 assert(ctrlr->state == VFIO_USER_CTRLR_MIGRATING); 3939 pending_bytes = 0; 3940 } else { 3941 pending_bytes = vfio_user_migr_data_len(); 3942 } 3943 3944 SPDK_DEBUGLOG(nvmf_vfio, 3945 "%s current state %u, pending bytes 0x%"PRIx64"\n", 3946 endpoint_id(endpoint), ctrlr->state, pending_bytes); 3947 3948 return pending_bytes; 3949 } 3950 3951 static int 3952 vfio_user_migration_prepare_data(vfu_ctx_t *vfu_ctx, uint64_t *offset, uint64_t *size) 3953 { 3954 struct nvmf_vfio_user_endpoint *endpoint = vfu_get_private(vfu_ctx); 3955 struct nvmf_vfio_user_ctrlr *ctrlr = endpoint->ctrlr; 3956 3957 /* 3958 * When transitioning to pre-copy state we set pending_bytes to 0, 3959 * so the vfio-user client shouldn't attempt to read any migration 3960 * data. This is not yet guaranteed by libvfio-user. 3961 */ 3962 if (ctrlr->state != VFIO_USER_CTRLR_MIGRATING) { 3963 assert(size != NULL); 3964 *offset = 0; 3965 *size = 0; 3966 return 0; 3967 } 3968 3969 if (ctrlr->in_source_vm) { /* migration source */ 3970 assert(size != NULL); 3971 *size = vfio_user_migr_data_len(); 3972 vfio_user_migr_ctrlr_save_data(ctrlr); 3973 } else { /* migration destination */ 3974 assert(size == NULL); 3975 assert(!ctrlr->migr_data_prepared); 3976 } 3977 *offset = 0; 3978 ctrlr->migr_data_prepared = true; 3979 3980 SPDK_DEBUGLOG(nvmf_vfio, "%s current state %u\n", endpoint_id(endpoint), ctrlr->state); 3981 3982 return 0; 3983 } 3984 3985 static ssize_t 3986 vfio_user_migration_read_data(vfu_ctx_t *vfu_ctx __attribute__((unused)), 3987 void *buf __attribute__((unused)), 3988 uint64_t count __attribute__((unused)), 3989 uint64_t offset __attribute__((unused))) 3990 { 3991 SPDK_DEBUGLOG(nvmf_vfio, "%s: migration read data not supported\n", 3992 endpoint_id(vfu_get_private(vfu_ctx))); 3993 errno = ENOTSUP; 3994 return -1; 3995 } 3996 3997 static ssize_t 3998 vfio_user_migration_write_data(vfu_ctx_t *vfu_ctx __attribute__((unused)), 3999 void *buf __attribute__((unused)), 4000 uint64_t count __attribute__((unused)), 4001 uint64_t offset __attribute__((unused))) 4002 { 4003 SPDK_DEBUGLOG(nvmf_vfio, "%s: migration write data not supported\n", 4004 endpoint_id(vfu_get_private(vfu_ctx))); 4005 errno = ENOTSUP; 4006 return -1; 4007 } 4008 4009 static int 4010 vfio_user_migration_data_written(vfu_ctx_t *vfu_ctx __attribute__((unused)), 4011 uint64_t count) 4012 { 4013 SPDK_DEBUGLOG(nvmf_vfio, "write 0x%"PRIx64"\n", (uint64_t)count); 4014 4015 if (count != vfio_user_migr_data_len()) { 4016 SPDK_DEBUGLOG(nvmf_vfio, "%s bad count %#lx\n", 4017 endpoint_id(vfu_get_private(vfu_ctx)), count); 4018 errno = EINVAL; 4019 return -1; 4020 } 4021 4022 return 0; 4023 } 4024 4025 static int 4026 vfio_user_dev_info_fill(struct nvmf_vfio_user_transport *vu_transport, 4027 struct nvmf_vfio_user_endpoint *endpoint) 4028 { 4029 int ret; 4030 ssize_t cap_offset; 4031 vfu_ctx_t *vfu_ctx = endpoint->vfu_ctx; 4032 struct iovec migr_sparse_mmap = {}; 4033 4034 struct pmcap pmcap = { .hdr.id = PCI_CAP_ID_PM, .pmcs.nsfrst = 0x1 }; 4035 struct pxcap pxcap = { 4036 .hdr.id = PCI_CAP_ID_EXP, 4037 .pxcaps.ver = 0x2, 4038 .pxdcap = {.rer = 0x1, .flrc = 0x1}, 4039 .pxdcap2.ctds = 0x1 4040 }; 4041 4042 struct msixcap msixcap = { 4043 .hdr.id = PCI_CAP_ID_MSIX, 4044 .mxc.ts = NVME_IRQ_MSIX_NUM - 1, 4045 .mtab = {.tbir = 0x4, .to = 0x0}, 4046 .mpba = {.pbir = 0x5, .pbao = 0x0} 4047 }; 4048 4049 struct iovec sparse_mmap[] = { 4050 { 4051 .iov_base = (void *)NVME_DOORBELLS_OFFSET, 4052 .iov_len = NVMF_VFIO_USER_DOORBELLS_SIZE, 4053 }, 4054 }; 4055 4056 const vfu_migration_callbacks_t migr_callbacks = { 4057 .version = VFU_MIGR_CALLBACKS_VERS, 4058 .transition = &vfio_user_migration_device_state_transition, 4059 .get_pending_bytes = &vfio_user_migration_get_pending_bytes, 4060 .prepare_data = &vfio_user_migration_prepare_data, 4061 .read_data = &vfio_user_migration_read_data, 4062 .data_written = &vfio_user_migration_data_written, 4063 .write_data = &vfio_user_migration_write_data 4064 }; 4065 4066 ret = vfu_pci_init(vfu_ctx, VFU_PCI_TYPE_EXPRESS, PCI_HEADER_TYPE_NORMAL, 0); 4067 if (ret < 0) { 4068 SPDK_ERRLOG("vfu_ctx %p failed to initialize PCI\n", vfu_ctx); 4069 return ret; 4070 } 4071 vfu_pci_set_id(vfu_ctx, SPDK_PCI_VID_NUTANIX, 0x0001, SPDK_PCI_VID_NUTANIX, 0); 4072 /* 4073 * 0x02, controller uses the NVM Express programming interface 4074 * 0x08, non-volatile memory controller 4075 * 0x01, mass storage controller 4076 */ 4077 vfu_pci_set_class(vfu_ctx, 0x01, 0x08, 0x02); 4078 4079 cap_offset = vfu_pci_add_capability(vfu_ctx, 0, 0, &pmcap); 4080 if (cap_offset < 0) { 4081 SPDK_ERRLOG("vfu_ctx %p failed add pmcap\n", vfu_ctx); 4082 return ret; 4083 } 4084 4085 cap_offset = vfu_pci_add_capability(vfu_ctx, 0, 0, &pxcap); 4086 if (cap_offset < 0) { 4087 SPDK_ERRLOG("vfu_ctx %p failed add pxcap\n", vfu_ctx); 4088 return ret; 4089 } 4090 4091 cap_offset = vfu_pci_add_capability(vfu_ctx, 0, 0, &msixcap); 4092 if (cap_offset < 0) { 4093 SPDK_ERRLOG("vfu_ctx %p failed add msixcap\n", vfu_ctx); 4094 return ret; 4095 } 4096 4097 ret = vfu_setup_region(vfu_ctx, VFU_PCI_DEV_CFG_REGION_IDX, NVME_REG_CFG_SIZE, 4098 access_pci_config, VFU_REGION_FLAG_RW, NULL, 0, -1, 0); 4099 if (ret < 0) { 4100 SPDK_ERRLOG("vfu_ctx %p failed to setup cfg\n", vfu_ctx); 4101 return ret; 4102 } 4103 4104 if (vu_transport->transport_opts.disable_mappable_bar0) { 4105 ret = vfu_setup_region(vfu_ctx, VFU_PCI_DEV_BAR0_REGION_IDX, NVME_REG_BAR0_SIZE, 4106 access_bar0_fn, VFU_REGION_FLAG_RW | VFU_REGION_FLAG_MEM, 4107 NULL, 0, -1, 0); 4108 } else { 4109 ret = vfu_setup_region(vfu_ctx, VFU_PCI_DEV_BAR0_REGION_IDX, NVME_REG_BAR0_SIZE, 4110 access_bar0_fn, VFU_REGION_FLAG_RW | VFU_REGION_FLAG_MEM, 4111 sparse_mmap, 1, endpoint->devmem_fd, 0); 4112 } 4113 4114 if (ret < 0) { 4115 SPDK_ERRLOG("vfu_ctx %p failed to setup bar 0\n", vfu_ctx); 4116 return ret; 4117 } 4118 4119 ret = vfu_setup_region(vfu_ctx, VFU_PCI_DEV_BAR4_REGION_IDX, NVME_BAR4_SIZE, 4120 NULL, VFU_REGION_FLAG_RW, NULL, 0, -1, 0); 4121 if (ret < 0) { 4122 SPDK_ERRLOG("vfu_ctx %p failed to setup bar 4\n", vfu_ctx); 4123 return ret; 4124 } 4125 4126 ret = vfu_setup_region(vfu_ctx, VFU_PCI_DEV_BAR5_REGION_IDX, NVME_BAR5_SIZE, 4127 NULL, VFU_REGION_FLAG_RW, NULL, 0, -1, 0); 4128 if (ret < 0) { 4129 SPDK_ERRLOG("vfu_ctx %p failed to setup bar 5\n", vfu_ctx); 4130 return ret; 4131 } 4132 4133 ret = vfu_setup_device_dma(vfu_ctx, memory_region_add_cb, memory_region_remove_cb); 4134 if (ret < 0) { 4135 SPDK_ERRLOG("vfu_ctx %p failed to setup dma callback\n", vfu_ctx); 4136 return ret; 4137 } 4138 4139 ret = vfu_setup_device_reset_cb(vfu_ctx, vfio_user_device_reset); 4140 if (ret < 0) { 4141 SPDK_ERRLOG("vfu_ctx %p failed to setup reset callback\n", vfu_ctx); 4142 return ret; 4143 } 4144 4145 ret = vfu_setup_device_nr_irqs(vfu_ctx, VFU_DEV_INTX_IRQ, 1); 4146 if (ret < 0) { 4147 SPDK_ERRLOG("vfu_ctx %p failed to setup INTX\n", vfu_ctx); 4148 return ret; 4149 } 4150 4151 ret = vfu_setup_device_nr_irqs(vfu_ctx, VFU_DEV_MSIX_IRQ, NVME_IRQ_MSIX_NUM); 4152 if (ret < 0) { 4153 SPDK_ERRLOG("vfu_ctx %p failed to setup MSIX\n", vfu_ctx); 4154 return ret; 4155 } 4156 4157 vfu_setup_device_quiesce_cb(vfu_ctx, vfio_user_dev_quiesce_cb); 4158 4159 migr_sparse_mmap.iov_base = (void *)4096; 4160 migr_sparse_mmap.iov_len = vfio_user_migr_data_len(); 4161 ret = vfu_setup_region(vfu_ctx, VFU_PCI_DEV_MIGR_REGION_IDX, 4162 vfu_get_migr_register_area_size() + vfio_user_migr_data_len(), 4163 NULL, VFU_REGION_FLAG_RW | VFU_REGION_FLAG_MEM, &migr_sparse_mmap, 4164 1, endpoint->migr_fd, 0); 4165 if (ret < 0) { 4166 SPDK_ERRLOG("vfu_ctx %p failed to setup migration region\n", vfu_ctx); 4167 return ret; 4168 } 4169 4170 ret = vfu_setup_device_migration_callbacks(vfu_ctx, &migr_callbacks, 4171 vfu_get_migr_register_area_size()); 4172 if (ret < 0) { 4173 SPDK_ERRLOG("vfu_ctx %p failed to setup migration callbacks\n", vfu_ctx); 4174 return ret; 4175 } 4176 4177 ret = vfu_realize_ctx(vfu_ctx); 4178 if (ret < 0) { 4179 SPDK_ERRLOG("vfu_ctx %p failed to realize\n", vfu_ctx); 4180 return ret; 4181 } 4182 4183 endpoint->pci_config_space = vfu_pci_get_config_space(endpoint->vfu_ctx); 4184 assert(endpoint->pci_config_space != NULL); 4185 init_pci_config_space(endpoint->pci_config_space); 4186 4187 assert(cap_offset != 0); 4188 endpoint->msix = (struct msixcap *)((uint8_t *)endpoint->pci_config_space + cap_offset); 4189 4190 return 0; 4191 } 4192 4193 static int nvmf_vfio_user_accept(void *ctx); 4194 4195 static void 4196 set_intr_mode_noop(struct spdk_poller *poller, void *arg, bool interrupt_mode) 4197 { 4198 /* Nothing for us to do here. */ 4199 } 4200 4201 /* 4202 * Register an "accept" poller: this is polling for incoming vfio-user socket 4203 * connections (on the listening socket). 4204 * 4205 * We need to do this on first listening, and also after destroying a 4206 * controller, so we can accept another connection. 4207 */ 4208 static int 4209 vfio_user_register_accept_poller(struct nvmf_vfio_user_endpoint *endpoint) 4210 { 4211 uint64_t poll_rate_us = endpoint->transport->transport.opts.acceptor_poll_rate; 4212 4213 SPDK_DEBUGLOG(nvmf_vfio, "registering accept poller\n"); 4214 4215 endpoint->accept_poller = SPDK_POLLER_REGISTER(nvmf_vfio_user_accept, 4216 endpoint, poll_rate_us); 4217 4218 if (!endpoint->accept_poller) { 4219 return -1; 4220 } 4221 4222 endpoint->accept_thread = spdk_get_thread(); 4223 endpoint->need_relisten = false; 4224 4225 if (!spdk_interrupt_mode_is_enabled()) { 4226 return 0; 4227 } 4228 4229 endpoint->accept_intr_fd = vfu_get_poll_fd(endpoint->vfu_ctx); 4230 assert(endpoint->accept_intr_fd != -1); 4231 4232 endpoint->accept_intr = SPDK_INTERRUPT_REGISTER(endpoint->accept_intr_fd, 4233 nvmf_vfio_user_accept, endpoint); 4234 4235 assert(endpoint->accept_intr != NULL); 4236 4237 spdk_poller_register_interrupt(endpoint->accept_poller, 4238 set_intr_mode_noop, NULL); 4239 return 0; 4240 } 4241 4242 static void 4243 _vfio_user_relisten(void *ctx) 4244 { 4245 struct nvmf_vfio_user_endpoint *endpoint = ctx; 4246 4247 vfio_user_register_accept_poller(endpoint); 4248 } 4249 4250 static void 4251 _free_ctrlr(void *ctx) 4252 { 4253 struct nvmf_vfio_user_ctrlr *ctrlr = ctx; 4254 struct nvmf_vfio_user_endpoint *endpoint = ctrlr->endpoint; 4255 4256 free_sdbl(ctrlr->endpoint->vfu_ctx, ctrlr->sdbl); 4257 4258 spdk_interrupt_unregister(&ctrlr->intr); 4259 ctrlr->intr_fd = -1; 4260 spdk_poller_unregister(&ctrlr->vfu_ctx_poller); 4261 4262 free(ctrlr); 4263 4264 if (endpoint == NULL) { 4265 return; 4266 } 4267 4268 if (endpoint->need_async_destroy) { 4269 nvmf_vfio_user_destroy_endpoint(endpoint); 4270 } else if (endpoint->need_relisten) { 4271 spdk_thread_send_msg(endpoint->accept_thread, 4272 _vfio_user_relisten, endpoint); 4273 } 4274 } 4275 4276 static void 4277 free_ctrlr(struct nvmf_vfio_user_ctrlr *ctrlr) 4278 { 4279 int i; 4280 assert(ctrlr != NULL); 4281 4282 SPDK_DEBUGLOG(nvmf_vfio, "free %s\n", ctrlr_id(ctrlr)); 4283 4284 for (i = 0; i < NVMF_VFIO_USER_MAX_QPAIRS_PER_CTRLR; i++) { 4285 free_qp(ctrlr, i); 4286 } 4287 4288 spdk_thread_exec_msg(ctrlr->thread, _free_ctrlr, ctrlr); 4289 } 4290 4291 static int 4292 nvmf_vfio_user_create_ctrlr(struct nvmf_vfio_user_transport *transport, 4293 struct nvmf_vfio_user_endpoint *endpoint) 4294 { 4295 struct nvmf_vfio_user_ctrlr *ctrlr; 4296 int err = 0; 4297 4298 SPDK_DEBUGLOG(nvmf_vfio, "%s\n", endpoint_id(endpoint)); 4299 4300 /* First, construct a vfio-user CUSTOM transport controller */ 4301 ctrlr = calloc(1, sizeof(*ctrlr)); 4302 if (ctrlr == NULL) { 4303 err = -ENOMEM; 4304 goto out; 4305 } 4306 /* We can only support one connection for now */ 4307 ctrlr->cntlid = 0x1; 4308 ctrlr->intr_fd = -1; 4309 ctrlr->transport = transport; 4310 ctrlr->endpoint = endpoint; 4311 ctrlr->bar0_doorbells = endpoint->bar0_doorbells; 4312 TAILQ_INIT(&ctrlr->connected_sqs); 4313 4314 ctrlr->adaptive_irqs_enabled = 4315 !transport->transport_opts.disable_adaptive_irq; 4316 4317 /* Then, construct an admin queue pair */ 4318 err = init_sq(ctrlr, &transport->transport, 0); 4319 if (err != 0) { 4320 free(ctrlr); 4321 goto out; 4322 } 4323 4324 err = init_cq(ctrlr, 0); 4325 if (err != 0) { 4326 free(ctrlr); 4327 goto out; 4328 } 4329 4330 ctrlr->sqs[0]->size = NVMF_VFIO_USER_DEFAULT_AQ_DEPTH; 4331 4332 err = alloc_sq_reqs(ctrlr, ctrlr->sqs[0]); 4333 if (err != 0) { 4334 free(ctrlr); 4335 goto out; 4336 } 4337 endpoint->ctrlr = ctrlr; 4338 4339 /* Notify the generic layer about the new admin queue pair */ 4340 spdk_nvmf_tgt_new_qpair(transport->transport.tgt, &ctrlr->sqs[0]->qpair); 4341 4342 out: 4343 if (err != 0) { 4344 SPDK_ERRLOG("%s: failed to create vfio-user controller: %s\n", 4345 endpoint_id(endpoint), strerror(-err)); 4346 } 4347 4348 return err; 4349 } 4350 4351 static int 4352 nvmf_vfio_user_listen(struct spdk_nvmf_transport *transport, 4353 const struct spdk_nvme_transport_id *trid, 4354 struct spdk_nvmf_listen_opts *listen_opts) 4355 { 4356 struct nvmf_vfio_user_transport *vu_transport; 4357 struct nvmf_vfio_user_endpoint *endpoint, *tmp; 4358 char path[PATH_MAX] = {}; 4359 char uuid[PATH_MAX] = {}; 4360 int ret; 4361 4362 vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport, 4363 transport); 4364 4365 pthread_mutex_lock(&vu_transport->lock); 4366 TAILQ_FOREACH_SAFE(endpoint, &vu_transport->endpoints, link, tmp) { 4367 /* Only compare traddr */ 4368 if (strncmp(endpoint->trid.traddr, trid->traddr, sizeof(endpoint->trid.traddr)) == 0) { 4369 pthread_mutex_unlock(&vu_transport->lock); 4370 return -EEXIST; 4371 } 4372 } 4373 pthread_mutex_unlock(&vu_transport->lock); 4374 4375 endpoint = calloc(1, sizeof(*endpoint)); 4376 if (!endpoint) { 4377 return -ENOMEM; 4378 } 4379 4380 pthread_mutex_init(&endpoint->lock, NULL); 4381 endpoint->devmem_fd = -1; 4382 memcpy(&endpoint->trid, trid, sizeof(endpoint->trid)); 4383 endpoint->transport = vu_transport; 4384 4385 ret = snprintf(path, PATH_MAX, "%s/bar0", endpoint_id(endpoint)); 4386 if (ret < 0 || ret >= PATH_MAX) { 4387 SPDK_ERRLOG("%s: error to get socket path: %s.\n", endpoint_id(endpoint), spdk_strerror(errno)); 4388 ret = -1; 4389 goto out; 4390 } 4391 4392 ret = open(path, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); 4393 if (ret == -1) { 4394 SPDK_ERRLOG("%s: failed to open device memory at %s: %s.\n", 4395 endpoint_id(endpoint), path, spdk_strerror(errno)); 4396 goto out; 4397 } 4398 unlink(path); 4399 4400 endpoint->devmem_fd = ret; 4401 ret = ftruncate(endpoint->devmem_fd, 4402 NVME_DOORBELLS_OFFSET + NVMF_VFIO_USER_DOORBELLS_SIZE); 4403 if (ret != 0) { 4404 SPDK_ERRLOG("%s: error to ftruncate file %s: %s.\n", endpoint_id(endpoint), path, 4405 spdk_strerror(errno)); 4406 goto out; 4407 } 4408 4409 endpoint->bar0_doorbells = mmap(NULL, NVMF_VFIO_USER_DOORBELLS_SIZE, 4410 PROT_READ | PROT_WRITE, MAP_SHARED, endpoint->devmem_fd, NVME_DOORBELLS_OFFSET); 4411 if (endpoint->bar0_doorbells == MAP_FAILED) { 4412 SPDK_ERRLOG("%s: error to mmap file %s: %s.\n", endpoint_id(endpoint), path, spdk_strerror(errno)); 4413 endpoint->bar0_doorbells = NULL; 4414 ret = -1; 4415 goto out; 4416 } 4417 4418 ret = snprintf(path, PATH_MAX, "%s/migr", endpoint_id(endpoint)); 4419 if (ret < 0 || ret >= PATH_MAX) { 4420 SPDK_ERRLOG("%s: error to get migration file path: %s.\n", endpoint_id(endpoint), 4421 spdk_strerror(errno)); 4422 ret = -1; 4423 goto out; 4424 } 4425 ret = open(path, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); 4426 if (ret == -1) { 4427 SPDK_ERRLOG("%s: failed to open device memory at %s: %s.\n", 4428 endpoint_id(endpoint), path, spdk_strerror(errno)); 4429 goto out; 4430 } 4431 unlink(path); 4432 4433 endpoint->migr_fd = ret; 4434 ret = ftruncate(endpoint->migr_fd, 4435 vfu_get_migr_register_area_size() + vfio_user_migr_data_len()); 4436 if (ret != 0) { 4437 SPDK_ERRLOG("%s: error to ftruncate migration file %s: %s.\n", endpoint_id(endpoint), path, 4438 spdk_strerror(errno)); 4439 goto out; 4440 } 4441 4442 endpoint->migr_data = mmap(NULL, vfio_user_migr_data_len(), 4443 PROT_READ | PROT_WRITE, MAP_SHARED, endpoint->migr_fd, vfu_get_migr_register_area_size()); 4444 if (endpoint->migr_data == MAP_FAILED) { 4445 SPDK_ERRLOG("%s: error to mmap file %s: %s.\n", endpoint_id(endpoint), path, spdk_strerror(errno)); 4446 endpoint->migr_data = NULL; 4447 ret = -1; 4448 goto out; 4449 } 4450 4451 ret = snprintf(uuid, PATH_MAX, "%s/cntrl", endpoint_id(endpoint)); 4452 if (ret < 0 || ret >= PATH_MAX) { 4453 SPDK_ERRLOG("%s: error to get ctrlr file path: %s\n", endpoint_id(endpoint), spdk_strerror(errno)); 4454 ret = -1; 4455 goto out; 4456 } 4457 4458 endpoint->vfu_ctx = vfu_create_ctx(VFU_TRANS_SOCK, uuid, LIBVFIO_USER_FLAG_ATTACH_NB, 4459 endpoint, VFU_DEV_TYPE_PCI); 4460 if (endpoint->vfu_ctx == NULL) { 4461 SPDK_ERRLOG("%s: error creating libmuser context: %m\n", 4462 endpoint_id(endpoint)); 4463 ret = -1; 4464 goto out; 4465 } 4466 4467 ret = vfu_setup_log(endpoint->vfu_ctx, vfio_user_log, 4468 vfio_user_get_log_level()); 4469 if (ret < 0) { 4470 goto out; 4471 } 4472 4473 4474 ret = vfio_user_dev_info_fill(vu_transport, endpoint); 4475 if (ret < 0) { 4476 goto out; 4477 } 4478 4479 ret = vfio_user_register_accept_poller(endpoint); 4480 4481 if (ret != 0) { 4482 goto out; 4483 } 4484 4485 pthread_mutex_lock(&vu_transport->lock); 4486 TAILQ_INSERT_TAIL(&vu_transport->endpoints, endpoint, link); 4487 pthread_mutex_unlock(&vu_transport->lock); 4488 4489 out: 4490 if (ret != 0) { 4491 nvmf_vfio_user_destroy_endpoint(endpoint); 4492 } 4493 4494 return ret; 4495 } 4496 4497 static void 4498 nvmf_vfio_user_stop_listen(struct spdk_nvmf_transport *transport, 4499 const struct spdk_nvme_transport_id *trid) 4500 { 4501 struct nvmf_vfio_user_transport *vu_transport; 4502 struct nvmf_vfio_user_endpoint *endpoint, *tmp; 4503 4504 assert(trid != NULL); 4505 assert(trid->traddr != NULL); 4506 4507 SPDK_DEBUGLOG(nvmf_vfio, "%s: stop listen\n", trid->traddr); 4508 4509 vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport, 4510 transport); 4511 4512 pthread_mutex_lock(&vu_transport->lock); 4513 TAILQ_FOREACH_SAFE(endpoint, &vu_transport->endpoints, link, tmp) { 4514 if (strcmp(trid->traddr, endpoint->trid.traddr) == 0) { 4515 TAILQ_REMOVE(&vu_transport->endpoints, endpoint, link); 4516 /* Defer to free endpoint resources until the controller 4517 * is freed. There are two cases when running here: 4518 * 1. kill nvmf target while VM is connected 4519 * 2. remove listener via RPC call 4520 * nvmf library will disconnect all queue paris. 4521 */ 4522 if (endpoint->ctrlr) { 4523 assert(!endpoint->need_async_destroy); 4524 endpoint->need_async_destroy = true; 4525 pthread_mutex_unlock(&vu_transport->lock); 4526 return; 4527 } 4528 4529 nvmf_vfio_user_destroy_endpoint(endpoint); 4530 pthread_mutex_unlock(&vu_transport->lock); 4531 return; 4532 } 4533 } 4534 pthread_mutex_unlock(&vu_transport->lock); 4535 4536 SPDK_DEBUGLOG(nvmf_vfio, "%s: not found\n", trid->traddr); 4537 } 4538 4539 static void 4540 nvmf_vfio_user_cdata_init(struct spdk_nvmf_transport *transport, 4541 struct spdk_nvmf_subsystem *subsystem, 4542 struct spdk_nvmf_ctrlr_data *cdata) 4543 { 4544 struct nvmf_vfio_user_transport *vu_transport; 4545 4546 vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport, transport); 4547 4548 cdata->vid = SPDK_PCI_VID_NUTANIX; 4549 cdata->ssvid = SPDK_PCI_VID_NUTANIX; 4550 cdata->ieee[0] = 0x8d; 4551 cdata->ieee[1] = 0x6b; 4552 cdata->ieee[2] = 0x50; 4553 memset(&cdata->sgls, 0, sizeof(struct spdk_nvme_cdata_sgls)); 4554 cdata->sgls.supported = SPDK_NVME_SGLS_SUPPORTED_DWORD_ALIGNED; 4555 cdata->oncs.compare = !vu_transport->transport_opts.disable_compare; 4556 /* libvfio-user can only support 1 connection for now */ 4557 cdata->oncs.reservations = 0; 4558 cdata->oacs.doorbell_buffer_config = !vu_transport->transport_opts.disable_shadow_doorbells; 4559 cdata->fuses.compare_and_write = !vu_transport->transport_opts.disable_compare; 4560 } 4561 4562 static int 4563 nvmf_vfio_user_listen_associate(struct spdk_nvmf_transport *transport, 4564 const struct spdk_nvmf_subsystem *subsystem, 4565 const struct spdk_nvme_transport_id *trid) 4566 { 4567 struct nvmf_vfio_user_transport *vu_transport; 4568 struct nvmf_vfio_user_endpoint *endpoint; 4569 4570 vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport, transport); 4571 4572 pthread_mutex_lock(&vu_transport->lock); 4573 TAILQ_FOREACH(endpoint, &vu_transport->endpoints, link) { 4574 if (strncmp(endpoint->trid.traddr, trid->traddr, sizeof(endpoint->trid.traddr)) == 0) { 4575 break; 4576 } 4577 } 4578 pthread_mutex_unlock(&vu_transport->lock); 4579 4580 if (endpoint == NULL) { 4581 return -ENOENT; 4582 } 4583 4584 /* Drop const - we will later need to pause/unpause. */ 4585 endpoint->subsystem = (struct spdk_nvmf_subsystem *)subsystem; 4586 4587 return 0; 4588 } 4589 4590 /* 4591 * Executed periodically at a default SPDK_NVMF_DEFAULT_ACCEPT_POLL_RATE_US 4592 * frequency. 4593 * 4594 * For this endpoint (which at the libvfio-user level corresponds to a socket), 4595 * if we don't currently have a controller set up, peek to see if the socket is 4596 * able to accept a new connection. 4597 */ 4598 static int 4599 nvmf_vfio_user_accept(void *ctx) 4600 { 4601 struct nvmf_vfio_user_endpoint *endpoint = ctx; 4602 struct nvmf_vfio_user_transport *vu_transport; 4603 int err; 4604 4605 vu_transport = endpoint->transport; 4606 4607 if (endpoint->ctrlr != NULL) { 4608 return SPDK_POLLER_IDLE; 4609 } 4610 4611 /* While we're here, the controller is already destroyed, 4612 * subsystem may still be in RESUMING state, we will wait 4613 * until the subsystem is in RUNNING state. 4614 */ 4615 if (endpoint->need_resume) { 4616 return SPDK_POLLER_IDLE; 4617 } 4618 4619 err = vfu_attach_ctx(endpoint->vfu_ctx); 4620 if (err == 0) { 4621 SPDK_DEBUGLOG(nvmf_vfio, "attach succeeded\n"); 4622 err = nvmf_vfio_user_create_ctrlr(vu_transport, endpoint); 4623 if (err == 0) { 4624 /* 4625 * Unregister ourselves: now we've accepted a 4626 * connection, there is nothing for us to poll for, and 4627 * we will poll the connection via vfu_run_ctx() 4628 * instead. 4629 */ 4630 spdk_interrupt_unregister(&endpoint->accept_intr); 4631 spdk_poller_unregister(&endpoint->accept_poller); 4632 } 4633 return SPDK_POLLER_BUSY; 4634 } 4635 4636 if (errno == EAGAIN || errno == EWOULDBLOCK) { 4637 return SPDK_POLLER_IDLE; 4638 } 4639 4640 return SPDK_POLLER_BUSY; 4641 } 4642 4643 static void 4644 nvmf_vfio_user_discover(struct spdk_nvmf_transport *transport, 4645 struct spdk_nvme_transport_id *trid, 4646 struct spdk_nvmf_discovery_log_page_entry *entry) 4647 { } 4648 4649 static int vfio_user_poll_group_intr(void *ctx); 4650 4651 static void 4652 vfio_user_poll_group_add_intr(struct nvmf_vfio_user_poll_group *vu_group, 4653 struct spdk_nvmf_poll_group *group) 4654 { 4655 vu_group->intr_fd = eventfd(0, EFD_NONBLOCK); 4656 assert(vu_group->intr_fd != -1); 4657 4658 vu_group->intr = SPDK_INTERRUPT_REGISTER(vu_group->intr_fd, 4659 vfio_user_poll_group_intr, vu_group); 4660 assert(vu_group->intr != NULL); 4661 4662 spdk_poller_register_interrupt(group->poller, set_intr_mode_noop, 4663 vu_group); 4664 } 4665 4666 static struct spdk_nvmf_transport_poll_group * 4667 nvmf_vfio_user_poll_group_create(struct spdk_nvmf_transport *transport, 4668 struct spdk_nvmf_poll_group *group) 4669 { 4670 struct nvmf_vfio_user_transport *vu_transport; 4671 struct nvmf_vfio_user_poll_group *vu_group; 4672 4673 vu_transport = SPDK_CONTAINEROF(transport, struct nvmf_vfio_user_transport, 4674 transport); 4675 4676 SPDK_DEBUGLOG(nvmf_vfio, "create poll group\n"); 4677 4678 vu_group = calloc(1, sizeof(*vu_group)); 4679 if (vu_group == NULL) { 4680 SPDK_ERRLOG("Error allocating poll group: %m"); 4681 return NULL; 4682 } 4683 4684 if (in_interrupt_mode(vu_transport)) { 4685 vfio_user_poll_group_add_intr(vu_group, group); 4686 } 4687 4688 TAILQ_INIT(&vu_group->sqs); 4689 4690 pthread_mutex_lock(&vu_transport->pg_lock); 4691 TAILQ_INSERT_TAIL(&vu_transport->poll_groups, vu_group, link); 4692 if (vu_transport->next_pg == NULL) { 4693 vu_transport->next_pg = vu_group; 4694 } 4695 pthread_mutex_unlock(&vu_transport->pg_lock); 4696 4697 return &vu_group->group; 4698 } 4699 4700 static struct spdk_nvmf_transport_poll_group * 4701 nvmf_vfio_user_get_optimal_poll_group(struct spdk_nvmf_qpair *qpair) 4702 { 4703 struct nvmf_vfio_user_transport *vu_transport; 4704 struct nvmf_vfio_user_poll_group **vu_group; 4705 struct nvmf_vfio_user_sq *sq; 4706 struct nvmf_vfio_user_cq *cq; 4707 4708 struct spdk_nvmf_transport_poll_group *result = NULL; 4709 4710 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair); 4711 cq = sq->ctrlr->cqs[sq->cqid]; 4712 assert(cq != NULL); 4713 vu_transport = SPDK_CONTAINEROF(qpair->transport, struct nvmf_vfio_user_transport, transport); 4714 4715 pthread_mutex_lock(&vu_transport->pg_lock); 4716 if (TAILQ_EMPTY(&vu_transport->poll_groups)) { 4717 goto out; 4718 } 4719 4720 if (!nvmf_qpair_is_admin_queue(qpair)) { 4721 /* 4722 * If this is shared IO CQ case, just return the used CQ's poll 4723 * group, so I/O completions don't have to use 4724 * spdk_thread_send_msg(). 4725 */ 4726 if (cq->group != NULL) { 4727 result = cq->group; 4728 goto out; 4729 } 4730 4731 /* 4732 * If we're in interrupt mode, align all qpairs for a controller 4733 * on the same poll group by default, unless requested. This can 4734 * be lower in performance than running on a single poll group, 4735 * so we disable spreading by default. 4736 */ 4737 if (in_interrupt_mode(vu_transport) && 4738 !vu_transport->transport_opts.enable_intr_mode_sq_spreading) { 4739 result = sq->ctrlr->sqs[0]->group; 4740 goto out; 4741 } 4742 4743 } 4744 4745 vu_group = &vu_transport->next_pg; 4746 assert(*vu_group != NULL); 4747 4748 result = &(*vu_group)->group; 4749 *vu_group = TAILQ_NEXT(*vu_group, link); 4750 if (*vu_group == NULL) { 4751 *vu_group = TAILQ_FIRST(&vu_transport->poll_groups); 4752 } 4753 4754 out: 4755 if (cq->group == NULL) { 4756 cq->group = result; 4757 } 4758 4759 pthread_mutex_unlock(&vu_transport->pg_lock); 4760 return result; 4761 } 4762 4763 static void 4764 vfio_user_poll_group_del_intr(struct nvmf_vfio_user_poll_group *vu_group) 4765 { 4766 assert(vu_group->intr_fd != -1); 4767 4768 spdk_interrupt_unregister(&vu_group->intr); 4769 4770 close(vu_group->intr_fd); 4771 vu_group->intr_fd = -1; 4772 } 4773 4774 /* called when process exits */ 4775 static void 4776 nvmf_vfio_user_poll_group_destroy(struct spdk_nvmf_transport_poll_group *group) 4777 { 4778 struct nvmf_vfio_user_poll_group *vu_group, *next_tgroup; 4779 struct nvmf_vfio_user_transport *vu_transport; 4780 4781 SPDK_DEBUGLOG(nvmf_vfio, "destroy poll group\n"); 4782 4783 vu_group = SPDK_CONTAINEROF(group, struct nvmf_vfio_user_poll_group, group); 4784 vu_transport = SPDK_CONTAINEROF(vu_group->group.transport, struct nvmf_vfio_user_transport, 4785 transport); 4786 4787 if (in_interrupt_mode(vu_transport)) { 4788 vfio_user_poll_group_del_intr(vu_group); 4789 } 4790 4791 pthread_mutex_lock(&vu_transport->pg_lock); 4792 next_tgroup = TAILQ_NEXT(vu_group, link); 4793 TAILQ_REMOVE(&vu_transport->poll_groups, vu_group, link); 4794 if (next_tgroup == NULL) { 4795 next_tgroup = TAILQ_FIRST(&vu_transport->poll_groups); 4796 } 4797 if (vu_transport->next_pg == vu_group) { 4798 vu_transport->next_pg = next_tgroup; 4799 } 4800 pthread_mutex_unlock(&vu_transport->pg_lock); 4801 4802 free(vu_group); 4803 } 4804 4805 static void 4806 _vfio_user_qpair_disconnect(void *ctx) 4807 { 4808 struct nvmf_vfio_user_sq *sq = ctx; 4809 4810 spdk_nvmf_qpair_disconnect(&sq->qpair, NULL, NULL); 4811 } 4812 4813 /* The function is used when socket connection is destroyed */ 4814 static int 4815 vfio_user_destroy_ctrlr(struct nvmf_vfio_user_ctrlr *ctrlr) 4816 { 4817 struct nvmf_vfio_user_sq *sq; 4818 struct nvmf_vfio_user_endpoint *endpoint; 4819 4820 SPDK_DEBUGLOG(nvmf_vfio, "%s stop processing\n", ctrlr_id(ctrlr)); 4821 4822 endpoint = ctrlr->endpoint; 4823 assert(endpoint != NULL); 4824 4825 pthread_mutex_lock(&endpoint->lock); 4826 endpoint->need_relisten = true; 4827 ctrlr->disconnect = true; 4828 if (TAILQ_EMPTY(&ctrlr->connected_sqs)) { 4829 endpoint->ctrlr = NULL; 4830 free_ctrlr(ctrlr); 4831 pthread_mutex_unlock(&endpoint->lock); 4832 return 0; 4833 } 4834 4835 TAILQ_FOREACH(sq, &ctrlr->connected_sqs, tailq) { 4836 /* add another round thread poll to avoid recursive endpoint lock */ 4837 spdk_thread_send_msg(ctrlr->thread, _vfio_user_qpair_disconnect, sq); 4838 } 4839 pthread_mutex_unlock(&endpoint->lock); 4840 4841 return 0; 4842 } 4843 4844 /* 4845 * Poll for and process any incoming vfio-user messages. 4846 */ 4847 static int 4848 vfio_user_poll_vfu_ctx(void *ctx) 4849 { 4850 struct nvmf_vfio_user_ctrlr *ctrlr = ctx; 4851 int ret; 4852 4853 assert(ctrlr != NULL); 4854 4855 /* This will call access_bar0_fn() if there are any writes 4856 * to the portion of the BAR that is not mmap'd */ 4857 ret = vfu_run_ctx(ctrlr->endpoint->vfu_ctx); 4858 if (spdk_unlikely(ret == -1)) { 4859 if (errno == EBUSY) { 4860 return SPDK_POLLER_IDLE; 4861 } 4862 4863 spdk_poller_unregister(&ctrlr->vfu_ctx_poller); 4864 4865 /* 4866 * We lost the client; the reset callback will already have 4867 * unregistered the interrupt. 4868 */ 4869 if (errno == ENOTCONN) { 4870 vfio_user_destroy_ctrlr(ctrlr); 4871 return SPDK_POLLER_BUSY; 4872 } 4873 4874 /* 4875 * We might not have got a reset callback in this case, so 4876 * explicitly unregister the interrupt here. 4877 */ 4878 spdk_interrupt_unregister(&ctrlr->intr); 4879 ctrlr->intr_fd = -1; 4880 fail_ctrlr(ctrlr); 4881 } 4882 4883 return ret != 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 4884 } 4885 4886 struct vfio_user_post_cpl_ctx { 4887 struct nvmf_vfio_user_ctrlr *ctrlr; 4888 struct nvmf_vfio_user_cq *cq; 4889 struct spdk_nvme_cpl cpl; 4890 }; 4891 4892 static void 4893 _post_completion_msg(void *ctx) 4894 { 4895 struct vfio_user_post_cpl_ctx *cpl_ctx = ctx; 4896 4897 post_completion(cpl_ctx->ctrlr, cpl_ctx->cq, cpl_ctx->cpl.cdw0, cpl_ctx->cpl.sqid, 4898 cpl_ctx->cpl.cid, cpl_ctx->cpl.status.sc, cpl_ctx->cpl.status.sct); 4899 free(cpl_ctx); 4900 } 4901 4902 static int nvmf_vfio_user_poll_group_poll(struct spdk_nvmf_transport_poll_group *group); 4903 4904 static int 4905 vfio_user_poll_group_process(void *ctx) 4906 { 4907 struct nvmf_vfio_user_poll_group *vu_group = ctx; 4908 int ret = 0; 4909 4910 SPDK_DEBUGLOG(vfio_user_db, "pg:%p got intr\n", vu_group); 4911 4912 ret |= nvmf_vfio_user_poll_group_poll(&vu_group->group); 4913 4914 /* 4915 * Re-arm the event indexes. NB: this also could rearm other 4916 * controller's SQs. 4917 */ 4918 ret |= vfio_user_poll_group_rearm(vu_group); 4919 4920 vu_group->stats.pg_process_count++; 4921 return ret != 0 ? SPDK_POLLER_BUSY : SPDK_POLLER_IDLE; 4922 } 4923 4924 static int 4925 vfio_user_poll_group_intr(void *ctx) 4926 { 4927 struct nvmf_vfio_user_poll_group *vu_group = ctx; 4928 eventfd_t val; 4929 4930 eventfd_read(vu_group->intr_fd, &val); 4931 4932 vu_group->stats.intr++; 4933 4934 return vfio_user_poll_group_process(ctx); 4935 } 4936 4937 /* 4938 * Handle an interrupt for the given controller: we must poll the vfu_ctx, and 4939 * the SQs assigned to our own poll group. Other poll groups are handled via 4940 * vfio_user_poll_group_intr(). 4941 */ 4942 static int 4943 vfio_user_ctrlr_intr(void *ctx) 4944 { 4945 struct nvmf_vfio_user_poll_group *vu_ctrlr_group; 4946 struct nvmf_vfio_user_ctrlr *vu_ctrlr = ctx; 4947 struct nvmf_vfio_user_poll_group *vu_group; 4948 int ret = SPDK_POLLER_IDLE; 4949 4950 vu_ctrlr_group = ctrlr_to_poll_group(vu_ctrlr); 4951 4952 SPDK_DEBUGLOG(vfio_user_db, "ctrlr pg:%p got intr\n", vu_ctrlr_group); 4953 4954 vu_ctrlr_group->stats.ctrlr_intr++; 4955 4956 /* 4957 * Poll vfio-user for this controller. We need to do this before polling 4958 * any SQs, as this is where doorbell writes may be handled. 4959 */ 4960 ret = vfio_user_poll_vfu_ctx(vu_ctrlr); 4961 4962 /* 4963 * `sqs[0]` could be set to NULL in vfio_user_poll_vfu_ctx() context, 4964 * just return for this case. 4965 */ 4966 if (vu_ctrlr->sqs[0] == NULL) { 4967 return ret; 4968 } 4969 4970 if (vu_ctrlr->transport->transport_opts.enable_intr_mode_sq_spreading) { 4971 /* 4972 * We may have just written to a doorbell owned by another 4973 * reactor: we need to prod them to make sure its SQs are polled 4974 * *after* the doorbell value is updated. 4975 */ 4976 TAILQ_FOREACH(vu_group, &vu_ctrlr->transport->poll_groups, link) { 4977 if (vu_group != vu_ctrlr_group) { 4978 SPDK_DEBUGLOG(vfio_user_db, "prodding pg:%p\n", vu_group); 4979 eventfd_write(vu_group->intr_fd, 1); 4980 } 4981 } 4982 } 4983 4984 ret |= vfio_user_poll_group_process(vu_ctrlr_group); 4985 4986 return ret; 4987 } 4988 4989 static void 4990 vfio_user_ctrlr_set_intr_mode(struct spdk_poller *poller, void *ctx, 4991 bool interrupt_mode) 4992 { 4993 struct nvmf_vfio_user_ctrlr *ctrlr = ctx; 4994 assert(ctrlr != NULL); 4995 assert(ctrlr->endpoint != NULL); 4996 4997 SPDK_DEBUGLOG(nvmf_vfio, "%s: setting interrupt mode to %d\n", 4998 ctrlr_id(ctrlr), interrupt_mode); 4999 5000 /* 5001 * interrupt_mode needs to persist across controller resets, so store 5002 * it in the endpoint instead. 5003 */ 5004 ctrlr->endpoint->interrupt_mode = interrupt_mode; 5005 5006 vfio_user_poll_group_rearm(ctrlr_to_poll_group(ctrlr)); 5007 } 5008 5009 /* 5010 * In response to the nvmf_vfio_user_create_ctrlr() path, the admin queue is now 5011 * set up and we can start operating on this controller. 5012 */ 5013 static void 5014 start_ctrlr(struct nvmf_vfio_user_ctrlr *vu_ctrlr, 5015 struct spdk_nvmf_ctrlr *ctrlr) 5016 { 5017 struct nvmf_vfio_user_endpoint *endpoint = vu_ctrlr->endpoint; 5018 5019 vu_ctrlr->ctrlr = ctrlr; 5020 vu_ctrlr->cntlid = ctrlr->cntlid; 5021 vu_ctrlr->thread = spdk_get_thread(); 5022 vu_ctrlr->state = VFIO_USER_CTRLR_RUNNING; 5023 5024 if (!in_interrupt_mode(endpoint->transport)) { 5025 vu_ctrlr->vfu_ctx_poller = SPDK_POLLER_REGISTER(vfio_user_poll_vfu_ctx, 5026 vu_ctrlr, 1000); 5027 return; 5028 } 5029 5030 vu_ctrlr->vfu_ctx_poller = SPDK_POLLER_REGISTER(vfio_user_poll_vfu_ctx, 5031 vu_ctrlr, 0); 5032 5033 vu_ctrlr->intr_fd = vfu_get_poll_fd(vu_ctrlr->endpoint->vfu_ctx); 5034 assert(vu_ctrlr->intr_fd != -1); 5035 5036 vu_ctrlr->intr = SPDK_INTERRUPT_REGISTER(vu_ctrlr->intr_fd, 5037 vfio_user_ctrlr_intr, vu_ctrlr); 5038 5039 assert(vu_ctrlr->intr != NULL); 5040 5041 spdk_poller_register_interrupt(vu_ctrlr->vfu_ctx_poller, 5042 vfio_user_ctrlr_set_intr_mode, 5043 vu_ctrlr); 5044 } 5045 5046 static int 5047 handle_queue_connect_rsp(struct nvmf_vfio_user_req *req, void *cb_arg) 5048 { 5049 struct nvmf_vfio_user_poll_group *vu_group; 5050 struct nvmf_vfio_user_sq *sq = cb_arg; 5051 struct nvmf_vfio_user_cq *admin_cq; 5052 struct nvmf_vfio_user_ctrlr *vu_ctrlr; 5053 struct nvmf_vfio_user_endpoint *endpoint; 5054 5055 assert(sq != NULL); 5056 assert(req != NULL); 5057 5058 vu_ctrlr = sq->ctrlr; 5059 assert(vu_ctrlr != NULL); 5060 endpoint = vu_ctrlr->endpoint; 5061 assert(endpoint != NULL); 5062 5063 if (spdk_nvme_cpl_is_error(&req->req.rsp->nvme_cpl)) { 5064 SPDK_ERRLOG("SC %u, SCT %u\n", req->req.rsp->nvme_cpl.status.sc, req->req.rsp->nvme_cpl.status.sct); 5065 endpoint->ctrlr = NULL; 5066 free_ctrlr(vu_ctrlr); 5067 return -1; 5068 } 5069 5070 vu_group = SPDK_CONTAINEROF(sq->group, struct nvmf_vfio_user_poll_group, group); 5071 TAILQ_INSERT_TAIL(&vu_group->sqs, sq, link); 5072 5073 admin_cq = vu_ctrlr->cqs[0]; 5074 assert(admin_cq != NULL); 5075 assert(admin_cq->group != NULL); 5076 assert(admin_cq->group->group->thread != NULL); 5077 5078 pthread_mutex_lock(&endpoint->lock); 5079 if (nvmf_qpair_is_admin_queue(&sq->qpair)) { 5080 assert(admin_cq->group->group->thread == spdk_get_thread()); 5081 /* 5082 * The admin queue is special as SQ0 and CQ0 are created 5083 * together. 5084 */ 5085 admin_cq->cq_ref = 1; 5086 start_ctrlr(vu_ctrlr, sq->qpair.ctrlr); 5087 } else { 5088 /* For I/O queues this command was generated in response to an 5089 * ADMIN I/O CREATE SUBMISSION QUEUE command which has not yet 5090 * been completed. Complete it now. 5091 */ 5092 if (sq->post_create_io_sq_completion) { 5093 if (admin_cq->group->group->thread != spdk_get_thread()) { 5094 struct vfio_user_post_cpl_ctx *cpl_ctx; 5095 5096 cpl_ctx = calloc(1, sizeof(*cpl_ctx)); 5097 if (!cpl_ctx) { 5098 return -ENOMEM; 5099 } 5100 cpl_ctx->ctrlr = vu_ctrlr; 5101 cpl_ctx->cq = admin_cq; 5102 cpl_ctx->cpl.sqid = 0; 5103 cpl_ctx->cpl.cdw0 = 0; 5104 cpl_ctx->cpl.cid = sq->create_io_sq_cmd.cid; 5105 cpl_ctx->cpl.status.sc = SPDK_NVME_SC_SUCCESS; 5106 cpl_ctx->cpl.status.sct = SPDK_NVME_SCT_GENERIC; 5107 5108 spdk_thread_send_msg(admin_cq->group->group->thread, 5109 _post_completion_msg, 5110 cpl_ctx); 5111 } else { 5112 post_completion(vu_ctrlr, admin_cq, 0, 0, 5113 sq->create_io_sq_cmd.cid, SPDK_NVME_SC_SUCCESS, SPDK_NVME_SCT_GENERIC); 5114 } 5115 sq->post_create_io_sq_completion = false; 5116 } else if (in_interrupt_mode(endpoint->transport)) { 5117 /* 5118 * If we're live migrating a guest, there is a window 5119 * where the I/O queues haven't been set up but the 5120 * device is in running state, during which the guest 5121 * might write to a doorbell. This doorbell write will 5122 * go unnoticed, so let's poll the whole controller to 5123 * pick that up. 5124 */ 5125 ctrlr_kick(vu_ctrlr); 5126 } 5127 sq->sq_state = VFIO_USER_SQ_ACTIVE; 5128 } 5129 5130 TAILQ_INSERT_TAIL(&vu_ctrlr->connected_sqs, sq, tailq); 5131 pthread_mutex_unlock(&endpoint->lock); 5132 5133 free(req->req.data); 5134 req->req.data = NULL; 5135 5136 return 0; 5137 } 5138 5139 /* 5140 * Add the given qpair to the given poll group. New qpairs are added via 5141 * spdk_nvmf_tgt_new_qpair(), which picks a poll group via 5142 * nvmf_vfio_user_get_optimal_poll_group(), then calls back here via 5143 * nvmf_transport_poll_group_add(). 5144 */ 5145 static int 5146 nvmf_vfio_user_poll_group_add(struct spdk_nvmf_transport_poll_group *group, 5147 struct spdk_nvmf_qpair *qpair) 5148 { 5149 struct nvmf_vfio_user_sq *sq; 5150 struct nvmf_vfio_user_req *vu_req; 5151 struct nvmf_vfio_user_ctrlr *ctrlr; 5152 struct spdk_nvmf_request *req; 5153 struct spdk_nvmf_fabric_connect_data *data; 5154 bool admin; 5155 5156 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair); 5157 sq->group = group; 5158 ctrlr = sq->ctrlr; 5159 5160 SPDK_DEBUGLOG(nvmf_vfio, "%s: add QP%d=%p(%p) to poll_group=%p\n", 5161 ctrlr_id(ctrlr), sq->qpair.qid, 5162 sq, qpair, group); 5163 5164 admin = nvmf_qpair_is_admin_queue(&sq->qpair); 5165 5166 vu_req = get_nvmf_vfio_user_req(sq); 5167 if (vu_req == NULL) { 5168 return -1; 5169 } 5170 5171 req = &vu_req->req; 5172 req->cmd->connect_cmd.opcode = SPDK_NVME_OPC_FABRIC; 5173 req->cmd->connect_cmd.cid = 0; 5174 req->cmd->connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT; 5175 req->cmd->connect_cmd.recfmt = 0; 5176 req->cmd->connect_cmd.sqsize = sq->size - 1; 5177 req->cmd->connect_cmd.qid = admin ? 0 : qpair->qid; 5178 5179 req->length = sizeof(struct spdk_nvmf_fabric_connect_data); 5180 req->data = calloc(1, req->length); 5181 if (req->data == NULL) { 5182 nvmf_vfio_user_req_free(req); 5183 return -ENOMEM; 5184 } 5185 5186 data = (struct spdk_nvmf_fabric_connect_data *)req->data; 5187 data->cntlid = ctrlr->cntlid; 5188 snprintf(data->subnqn, sizeof(data->subnqn), "%s", 5189 spdk_nvmf_subsystem_get_nqn(ctrlr->endpoint->subsystem)); 5190 5191 vu_req->cb_fn = handle_queue_connect_rsp; 5192 vu_req->cb_arg = sq; 5193 5194 SPDK_DEBUGLOG(nvmf_vfio, 5195 "%s: sending connect fabrics command for qid:%#x cntlid=%#x\n", 5196 ctrlr_id(ctrlr), qpair->qid, data->cntlid); 5197 5198 spdk_nvmf_request_exec_fabrics(req); 5199 return 0; 5200 } 5201 5202 static int 5203 nvmf_vfio_user_poll_group_remove(struct spdk_nvmf_transport_poll_group *group, 5204 struct spdk_nvmf_qpair *qpair) 5205 { 5206 struct nvmf_vfio_user_sq *sq; 5207 struct nvmf_vfio_user_poll_group *vu_group; 5208 5209 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair); 5210 5211 SPDK_DEBUGLOG(nvmf_vfio, 5212 "%s: remove NVMf QP%d=%p from NVMf poll_group=%p\n", 5213 ctrlr_id(sq->ctrlr), qpair->qid, qpair, group); 5214 5215 5216 vu_group = SPDK_CONTAINEROF(group, struct nvmf_vfio_user_poll_group, group); 5217 TAILQ_REMOVE(&vu_group->sqs, sq, link); 5218 5219 return 0; 5220 } 5221 5222 static void 5223 _nvmf_vfio_user_req_free(struct nvmf_vfio_user_sq *sq, struct nvmf_vfio_user_req *vu_req) 5224 { 5225 memset(&vu_req->cmd, 0, sizeof(vu_req->cmd)); 5226 memset(&vu_req->rsp, 0, sizeof(vu_req->rsp)); 5227 vu_req->iovcnt = 0; 5228 vu_req->state = VFIO_USER_REQUEST_STATE_FREE; 5229 5230 TAILQ_INSERT_TAIL(&sq->free_reqs, vu_req, link); 5231 } 5232 5233 static int 5234 nvmf_vfio_user_req_free(struct spdk_nvmf_request *req) 5235 { 5236 struct nvmf_vfio_user_sq *sq; 5237 struct nvmf_vfio_user_req *vu_req; 5238 5239 assert(req != NULL); 5240 5241 vu_req = SPDK_CONTAINEROF(req, struct nvmf_vfio_user_req, req); 5242 sq = SPDK_CONTAINEROF(req->qpair, struct nvmf_vfio_user_sq, qpair); 5243 5244 _nvmf_vfio_user_req_free(sq, vu_req); 5245 5246 return 0; 5247 } 5248 5249 static int 5250 nvmf_vfio_user_req_complete(struct spdk_nvmf_request *req) 5251 { 5252 struct nvmf_vfio_user_sq *sq; 5253 struct nvmf_vfio_user_req *vu_req; 5254 5255 assert(req != NULL); 5256 5257 vu_req = SPDK_CONTAINEROF(req, struct nvmf_vfio_user_req, req); 5258 sq = SPDK_CONTAINEROF(req->qpair, struct nvmf_vfio_user_sq, qpair); 5259 5260 if (vu_req->cb_fn != NULL) { 5261 if (vu_req->cb_fn(vu_req, vu_req->cb_arg) != 0) { 5262 fail_ctrlr(sq->ctrlr); 5263 } 5264 } 5265 5266 _nvmf_vfio_user_req_free(sq, vu_req); 5267 5268 return 0; 5269 } 5270 5271 static void 5272 nvmf_vfio_user_close_qpair(struct spdk_nvmf_qpair *qpair, 5273 spdk_nvmf_transport_qpair_fini_cb cb_fn, void *cb_arg) 5274 { 5275 struct nvmf_vfio_user_sq *sq; 5276 struct nvmf_vfio_user_ctrlr *vu_ctrlr; 5277 struct nvmf_vfio_user_endpoint *endpoint; 5278 5279 assert(qpair != NULL); 5280 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair); 5281 vu_ctrlr = sq->ctrlr; 5282 endpoint = vu_ctrlr->endpoint; 5283 5284 pthread_mutex_lock(&endpoint->lock); 5285 TAILQ_REMOVE(&vu_ctrlr->connected_sqs, sq, tailq); 5286 delete_sq_done(vu_ctrlr, sq); 5287 if (TAILQ_EMPTY(&vu_ctrlr->connected_sqs)) { 5288 endpoint->ctrlr = NULL; 5289 if (vu_ctrlr->in_source_vm && endpoint->need_resume) { 5290 /* The controller will be freed, we can resume the subsystem 5291 * now so that the endpoint can be ready to accept another 5292 * new connection. 5293 */ 5294 spdk_nvmf_subsystem_resume((struct spdk_nvmf_subsystem *)endpoint->subsystem, 5295 vfio_user_endpoint_resume_done, endpoint); 5296 } 5297 free_ctrlr(vu_ctrlr); 5298 } 5299 pthread_mutex_unlock(&endpoint->lock); 5300 5301 if (cb_fn) { 5302 cb_fn(cb_arg); 5303 } 5304 } 5305 5306 /** 5307 * Returns a preallocated request, or NULL if there isn't one available. 5308 */ 5309 static struct nvmf_vfio_user_req * 5310 get_nvmf_vfio_user_req(struct nvmf_vfio_user_sq *sq) 5311 { 5312 struct nvmf_vfio_user_req *req; 5313 5314 if (sq == NULL) { 5315 return NULL; 5316 } 5317 5318 req = TAILQ_FIRST(&sq->free_reqs); 5319 if (req == NULL) { 5320 return NULL; 5321 } 5322 5323 TAILQ_REMOVE(&sq->free_reqs, req, link); 5324 5325 return req; 5326 } 5327 5328 static int 5329 get_nvmf_io_req_length(struct spdk_nvmf_request *req) 5330 { 5331 uint16_t nr; 5332 uint32_t nlb, nsid; 5333 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 5334 struct spdk_nvmf_ctrlr *ctrlr = req->qpair->ctrlr; 5335 struct spdk_nvmf_ns *ns; 5336 5337 nsid = cmd->nsid; 5338 ns = _nvmf_subsystem_get_ns(ctrlr->subsys, nsid); 5339 if (ns == NULL || ns->bdev == NULL) { 5340 SPDK_ERRLOG("unsuccessful query for nsid %u\n", cmd->nsid); 5341 return -EINVAL; 5342 } 5343 5344 if (cmd->opc == SPDK_NVME_OPC_DATASET_MANAGEMENT) { 5345 nr = cmd->cdw10_bits.dsm.nr + 1; 5346 return nr * sizeof(struct spdk_nvme_dsm_range); 5347 } 5348 5349 nlb = (cmd->cdw12 & 0x0000ffffu) + 1; 5350 return nlb * spdk_bdev_get_block_size(ns->bdev); 5351 } 5352 5353 static int 5354 map_admin_cmd_req(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvmf_request *req) 5355 { 5356 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd; 5357 uint32_t len = 0; 5358 uint8_t fid; 5359 int iovcnt; 5360 5361 req->xfer = spdk_nvme_opc_get_data_transfer(cmd->opc); 5362 req->length = 0; 5363 req->data = NULL; 5364 5365 if (req->xfer == SPDK_NVME_DATA_NONE) { 5366 return 0; 5367 } 5368 5369 switch (cmd->opc) { 5370 case SPDK_NVME_OPC_IDENTIFY: 5371 len = 4096; 5372 break; 5373 case SPDK_NVME_OPC_GET_LOG_PAGE: 5374 len = (((cmd->cdw11_bits.get_log_page.numdu << 16) | cmd->cdw10_bits.get_log_page.numdl) + 1) * 4; 5375 break; 5376 case SPDK_NVME_OPC_GET_FEATURES: 5377 case SPDK_NVME_OPC_SET_FEATURES: 5378 fid = cmd->cdw10_bits.set_features.fid; 5379 switch (fid) { 5380 case SPDK_NVME_FEAT_LBA_RANGE_TYPE: 5381 len = 4096; 5382 break; 5383 case SPDK_NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION: 5384 len = 256; 5385 break; 5386 case SPDK_NVME_FEAT_TIMESTAMP: 5387 len = 8; 5388 break; 5389 case SPDK_NVME_FEAT_HOST_BEHAVIOR_SUPPORT: 5390 len = 512; 5391 break; 5392 case SPDK_NVME_FEAT_HOST_IDENTIFIER: 5393 if (cmd->cdw11_bits.feat_host_identifier.bits.exhid) { 5394 len = 16; 5395 } else { 5396 len = 8; 5397 } 5398 break; 5399 default: 5400 return 0; 5401 } 5402 break; 5403 default: 5404 return 0; 5405 } 5406 5407 /* ADMIN command will not use SGL */ 5408 if (cmd->psdt != 0) { 5409 return -EINVAL; 5410 } 5411 5412 iovcnt = vfio_user_map_cmd(ctrlr, req, req->iov, len); 5413 if (iovcnt < 0) { 5414 SPDK_ERRLOG("%s: map Admin Opc %x failed\n", 5415 ctrlr_id(ctrlr), cmd->opc); 5416 return -1; 5417 } 5418 req->length = len; 5419 req->data = req->iov[0].iov_base; 5420 req->iovcnt = iovcnt; 5421 5422 return 0; 5423 } 5424 5425 /* 5426 * Map an I/O command's buffers. 5427 * 5428 * Returns 0 on success and -errno on failure. 5429 */ 5430 static int 5431 map_io_cmd_req(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvmf_request *req) 5432 { 5433 int len, iovcnt; 5434 struct spdk_nvme_cmd *cmd; 5435 5436 assert(ctrlr != NULL); 5437 assert(req != NULL); 5438 5439 cmd = &req->cmd->nvme_cmd; 5440 req->xfer = spdk_nvme_opc_get_data_transfer(cmd->opc); 5441 req->length = 0; 5442 req->data = NULL; 5443 5444 if (spdk_unlikely(req->xfer == SPDK_NVME_DATA_NONE)) { 5445 return 0; 5446 } 5447 5448 len = get_nvmf_io_req_length(req); 5449 if (len < 0) { 5450 return -EINVAL; 5451 } 5452 req->length = len; 5453 5454 iovcnt = vfio_user_map_cmd(ctrlr, req, req->iov, req->length); 5455 if (iovcnt < 0) { 5456 SPDK_ERRLOG("%s: failed to map IO OPC %u\n", ctrlr_id(ctrlr), cmd->opc); 5457 return -EFAULT; 5458 } 5459 req->data = req->iov[0].iov_base; 5460 req->iovcnt = iovcnt; 5461 5462 return 0; 5463 } 5464 5465 static int 5466 handle_cmd_req(struct nvmf_vfio_user_ctrlr *ctrlr, struct spdk_nvme_cmd *cmd, 5467 struct nvmf_vfio_user_sq *sq) 5468 { 5469 int err; 5470 struct nvmf_vfio_user_req *vu_req; 5471 struct spdk_nvmf_request *req; 5472 5473 assert(ctrlr != NULL); 5474 assert(cmd != NULL); 5475 5476 vu_req = get_nvmf_vfio_user_req(sq); 5477 if (spdk_unlikely(vu_req == NULL)) { 5478 SPDK_ERRLOG("%s: no request for NVMe command opc 0x%x\n", ctrlr_id(ctrlr), cmd->opc); 5479 return post_completion(ctrlr, ctrlr->cqs[sq->cqid], 0, 0, cmd->cid, 5480 SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, SPDK_NVME_SCT_GENERIC); 5481 5482 } 5483 req = &vu_req->req; 5484 5485 assert(req->qpair != NULL); 5486 SPDK_DEBUGLOG(nvmf_vfio, "%s: handle sqid:%u, req opc=%#x cid=%d\n", 5487 ctrlr_id(ctrlr), req->qpair->qid, cmd->opc, cmd->cid); 5488 5489 vu_req->cb_fn = handle_cmd_rsp; 5490 vu_req->cb_arg = SPDK_CONTAINEROF(req->qpair, struct nvmf_vfio_user_sq, qpair); 5491 req->cmd->nvme_cmd = *cmd; 5492 5493 if (nvmf_qpair_is_admin_queue(req->qpair)) { 5494 err = map_admin_cmd_req(ctrlr, req); 5495 } else { 5496 switch (cmd->opc) { 5497 case SPDK_NVME_OPC_RESERVATION_REGISTER: 5498 case SPDK_NVME_OPC_RESERVATION_REPORT: 5499 case SPDK_NVME_OPC_RESERVATION_ACQUIRE: 5500 case SPDK_NVME_OPC_RESERVATION_RELEASE: 5501 err = -ENOTSUP; 5502 break; 5503 default: 5504 err = map_io_cmd_req(ctrlr, req); 5505 break; 5506 } 5507 } 5508 5509 if (spdk_unlikely(err < 0)) { 5510 SPDK_ERRLOG("%s: process NVMe command opc 0x%x failed\n", 5511 ctrlr_id(ctrlr), cmd->opc); 5512 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR; 5513 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC; 5514 err = handle_cmd_rsp(vu_req, vu_req->cb_arg); 5515 _nvmf_vfio_user_req_free(sq, vu_req); 5516 return err; 5517 } 5518 5519 vu_req->state = VFIO_USER_REQUEST_STATE_EXECUTING; 5520 spdk_nvmf_request_exec(req); 5521 5522 return 0; 5523 } 5524 5525 /* 5526 * If we suppressed an IRQ in post_completion(), check if it needs to be fired 5527 * here: if the host isn't up to date, and is apparently not actively processing 5528 * the queue (i.e. ->last_head isn't changing), we need an IRQ. 5529 */ 5530 static void 5531 handle_suppressed_irq(struct nvmf_vfio_user_ctrlr *ctrlr, 5532 struct nvmf_vfio_user_sq *sq) 5533 { 5534 struct nvmf_vfio_user_cq *cq = ctrlr->cqs[sq->cqid]; 5535 uint32_t cq_head; 5536 uint32_t cq_tail; 5537 5538 if (!cq->ien || cq->qid == 0 || !ctrlr_interrupt_enabled(ctrlr)) { 5539 return; 5540 } 5541 5542 cq_tail = *cq_tailp(cq); 5543 5544 /* Already sent? */ 5545 if (cq_tail == cq->last_trigger_irq_tail) { 5546 return; 5547 } 5548 5549 spdk_ivdt_dcache(cq_dbl_headp(cq)); 5550 cq_head = *cq_dbl_headp(cq); 5551 5552 if (cq_head != cq_tail && cq_head == cq->last_head) { 5553 int err = vfu_irq_trigger(ctrlr->endpoint->vfu_ctx, cq->iv); 5554 if (err != 0) { 5555 SPDK_ERRLOG("%s: failed to trigger interrupt: %m\n", 5556 ctrlr_id(ctrlr)); 5557 } else { 5558 cq->last_trigger_irq_tail = cq_tail; 5559 } 5560 } 5561 5562 cq->last_head = cq_head; 5563 } 5564 5565 /* Returns the number of commands processed, or a negative value on error. */ 5566 static int 5567 nvmf_vfio_user_sq_poll(struct nvmf_vfio_user_sq *sq) 5568 { 5569 struct nvmf_vfio_user_ctrlr *ctrlr; 5570 uint32_t new_tail; 5571 int count = 0; 5572 5573 assert(sq != NULL); 5574 5575 ctrlr = sq->ctrlr; 5576 5577 /* 5578 * A quiesced, or migrating, controller should never process new 5579 * commands. 5580 */ 5581 if (ctrlr->state != VFIO_USER_CTRLR_RUNNING) { 5582 return SPDK_POLLER_IDLE; 5583 } 5584 5585 if (ctrlr->adaptive_irqs_enabled) { 5586 handle_suppressed_irq(ctrlr, sq); 5587 } 5588 5589 /* On aarch64 platforms, doorbells update from guest VM may not be seen 5590 * on SPDK target side. This is because there is memory type mismatch 5591 * situation here. That is on guest VM side, the doorbells are treated as 5592 * device memory while on SPDK target side, it is treated as normal 5593 * memory. And this situation cause problem on ARM platform. 5594 * Refer to "https://developer.arm.com/documentation/102376/0100/ 5595 * Memory-aliasing-and-mismatched-memory-types". Only using spdk_mb() 5596 * cannot fix this. Use "dc civac" to invalidate cache may solve 5597 * this. 5598 */ 5599 spdk_ivdt_dcache(sq_dbl_tailp(sq)); 5600 5601 /* Load-Acquire. */ 5602 new_tail = *sq_dbl_tailp(sq); 5603 5604 new_tail = new_tail & 0xffffu; 5605 if (spdk_unlikely(new_tail >= sq->size)) { 5606 union spdk_nvme_async_event_completion event = {}; 5607 5608 SPDK_DEBUGLOG(nvmf_vfio, "%s: invalid sqid:%u doorbell value %u\n", ctrlr_id(ctrlr), sq->qid, 5609 new_tail); 5610 event.bits.async_event_type = SPDK_NVME_ASYNC_EVENT_TYPE_ERROR; 5611 event.bits.async_event_info = SPDK_NVME_ASYNC_EVENT_INVALID_DB_WRITE; 5612 nvmf_ctrlr_async_event_error_event(ctrlr->ctrlr, event); 5613 5614 return -1; 5615 } 5616 5617 if (*sq_headp(sq) == new_tail) { 5618 return 0; 5619 } 5620 5621 SPDK_DEBUGLOG(nvmf_vfio, "%s: sqid:%u doorbell old=%u new=%u\n", 5622 ctrlr_id(ctrlr), sq->qid, *sq_headp(sq), new_tail); 5623 if (ctrlr->sdbl != NULL) { 5624 SPDK_DEBUGLOG(nvmf_vfio, 5625 "%s: sqid:%u bar0_doorbell=%u shadow_doorbell=%u eventidx=%u\n", 5626 ctrlr_id(ctrlr), sq->qid, 5627 ctrlr->bar0_doorbells[queue_index(sq->qid, false)], 5628 ctrlr->sdbl->shadow_doorbells[queue_index(sq->qid, false)], 5629 ctrlr->sdbl->eventidxs[queue_index(sq->qid, false)]); 5630 } 5631 5632 /* 5633 * Ensure that changes to the queue are visible to us. 5634 * The host driver should write the queue first, do a wmb(), and then 5635 * update the SQ tail doorbell (their Store-Release). 5636 */ 5637 spdk_rmb(); 5638 5639 count = handle_sq_tdbl_write(ctrlr, new_tail, sq); 5640 if (spdk_unlikely(count < 0)) { 5641 fail_ctrlr(ctrlr); 5642 } 5643 5644 return count; 5645 } 5646 5647 /* 5648 * vfio-user transport poll handler. Note that the library context is polled in 5649 * a separate poller (->vfu_ctx_poller), so this poller only needs to poll the 5650 * active SQs. 5651 * 5652 * Returns the number of commands processed, or a negative value on error. 5653 */ 5654 static int 5655 nvmf_vfio_user_poll_group_poll(struct spdk_nvmf_transport_poll_group *group) 5656 { 5657 struct nvmf_vfio_user_poll_group *vu_group; 5658 struct nvmf_vfio_user_sq *sq, *tmp; 5659 int count = 0; 5660 5661 assert(group != NULL); 5662 5663 vu_group = SPDK_CONTAINEROF(group, struct nvmf_vfio_user_poll_group, group); 5664 5665 SPDK_DEBUGLOG(vfio_user_db, "polling all SQs\n"); 5666 5667 TAILQ_FOREACH_SAFE(sq, &vu_group->sqs, link, tmp) { 5668 int ret; 5669 5670 if (spdk_unlikely(sq->sq_state != VFIO_USER_SQ_ACTIVE || !sq->size)) { 5671 continue; 5672 } 5673 5674 ret = nvmf_vfio_user_sq_poll(sq); 5675 5676 if (spdk_unlikely(ret < 0)) { 5677 return ret; 5678 } 5679 5680 count += ret; 5681 } 5682 5683 vu_group->stats.polls++; 5684 vu_group->stats.poll_reqs += count; 5685 vu_group->stats.poll_reqs_squared += count * count; 5686 if (count == 0) { 5687 vu_group->stats.polls_spurious++; 5688 } 5689 5690 return count; 5691 } 5692 5693 static int 5694 nvmf_vfio_user_qpair_get_local_trid(struct spdk_nvmf_qpair *qpair, 5695 struct spdk_nvme_transport_id *trid) 5696 { 5697 struct nvmf_vfio_user_sq *sq; 5698 struct nvmf_vfio_user_ctrlr *ctrlr; 5699 5700 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair); 5701 ctrlr = sq->ctrlr; 5702 5703 memcpy(trid, &ctrlr->endpoint->trid, sizeof(*trid)); 5704 return 0; 5705 } 5706 5707 static int 5708 nvmf_vfio_user_qpair_get_peer_trid(struct spdk_nvmf_qpair *qpair, 5709 struct spdk_nvme_transport_id *trid) 5710 { 5711 return 0; 5712 } 5713 5714 static int 5715 nvmf_vfio_user_qpair_get_listen_trid(struct spdk_nvmf_qpair *qpair, 5716 struct spdk_nvme_transport_id *trid) 5717 { 5718 struct nvmf_vfio_user_sq *sq; 5719 struct nvmf_vfio_user_ctrlr *ctrlr; 5720 5721 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair); 5722 ctrlr = sq->ctrlr; 5723 5724 memcpy(trid, &ctrlr->endpoint->trid, sizeof(*trid)); 5725 return 0; 5726 } 5727 5728 static void 5729 nvmf_vfio_user_qpair_abort_request(struct spdk_nvmf_qpair *qpair, 5730 struct spdk_nvmf_request *req) 5731 { 5732 struct spdk_nvmf_request *req_to_abort = NULL; 5733 struct spdk_nvmf_request *temp_req = NULL; 5734 uint16_t cid; 5735 5736 cid = req->cmd->nvme_cmd.cdw10_bits.abort.cid; 5737 5738 TAILQ_FOREACH(temp_req, &qpair->outstanding, link) { 5739 struct nvmf_vfio_user_req *vu_req; 5740 5741 vu_req = SPDK_CONTAINEROF(temp_req, struct nvmf_vfio_user_req, req); 5742 5743 if (vu_req->state == VFIO_USER_REQUEST_STATE_EXECUTING && vu_req->cmd.cid == cid) { 5744 req_to_abort = temp_req; 5745 break; 5746 } 5747 } 5748 5749 if (req_to_abort == NULL) { 5750 spdk_nvmf_request_complete(req); 5751 return; 5752 } 5753 5754 req->req_to_abort = req_to_abort; 5755 nvmf_ctrlr_abort_request(req); 5756 } 5757 5758 static void 5759 nvmf_vfio_user_poll_group_dump_stat(struct spdk_nvmf_transport_poll_group *group, 5760 struct spdk_json_write_ctx *w) 5761 { 5762 struct nvmf_vfio_user_poll_group *vu_group = SPDK_CONTAINEROF(group, 5763 struct nvmf_vfio_user_poll_group, group); 5764 uint64_t polls_denom; 5765 5766 spdk_json_write_named_uint64(w, "ctrlr_intr", vu_group->stats.ctrlr_intr); 5767 spdk_json_write_named_uint64(w, "ctrlr_kicks", vu_group->stats.ctrlr_kicks); 5768 spdk_json_write_named_uint64(w, "won", vu_group->stats.won); 5769 spdk_json_write_named_uint64(w, "lost", vu_group->stats.lost); 5770 spdk_json_write_named_uint64(w, "lost_count", vu_group->stats.lost_count); 5771 spdk_json_write_named_uint64(w, "rearms", vu_group->stats.rearms); 5772 spdk_json_write_named_uint64(w, "pg_process_count", vu_group->stats.pg_process_count); 5773 spdk_json_write_named_uint64(w, "intr", vu_group->stats.intr); 5774 spdk_json_write_named_uint64(w, "polls", vu_group->stats.polls); 5775 spdk_json_write_named_uint64(w, "polls_spurious", vu_group->stats.polls_spurious); 5776 spdk_json_write_named_uint64(w, "poll_reqs", vu_group->stats.poll_reqs); 5777 polls_denom = vu_group->stats.polls * (vu_group->stats.polls - 1); 5778 if (polls_denom) { 5779 uint64_t n = vu_group->stats.polls * vu_group->stats.poll_reqs_squared - vu_group->stats.poll_reqs * 5780 vu_group->stats.poll_reqs; 5781 spdk_json_write_named_double(w, "poll_reqs_variance", sqrt(n / polls_denom)); 5782 } 5783 5784 spdk_json_write_named_uint64(w, "cqh_admin_writes", vu_group->stats.cqh_admin_writes); 5785 spdk_json_write_named_uint64(w, "cqh_io_writes", vu_group->stats.cqh_io_writes); 5786 } 5787 5788 static void 5789 nvmf_vfio_user_opts_init(struct spdk_nvmf_transport_opts *opts) 5790 { 5791 opts->max_queue_depth = NVMF_VFIO_USER_DEFAULT_MAX_QUEUE_DEPTH; 5792 opts->max_qpairs_per_ctrlr = NVMF_VFIO_USER_DEFAULT_MAX_QPAIRS_PER_CTRLR; 5793 opts->in_capsule_data_size = 0; 5794 opts->max_io_size = NVMF_VFIO_USER_DEFAULT_MAX_IO_SIZE; 5795 opts->io_unit_size = NVMF_VFIO_USER_DEFAULT_IO_UNIT_SIZE; 5796 opts->max_aq_depth = NVMF_VFIO_USER_DEFAULT_AQ_DEPTH; 5797 opts->num_shared_buffers = 0; 5798 opts->buf_cache_size = 0; 5799 opts->association_timeout = 0; 5800 opts->transport_specific = NULL; 5801 } 5802 5803 const struct spdk_nvmf_transport_ops spdk_nvmf_transport_vfio_user = { 5804 .name = "VFIOUSER", 5805 .type = SPDK_NVME_TRANSPORT_VFIOUSER, 5806 .opts_init = nvmf_vfio_user_opts_init, 5807 .create = nvmf_vfio_user_create, 5808 .destroy = nvmf_vfio_user_destroy, 5809 5810 .listen = nvmf_vfio_user_listen, 5811 .stop_listen = nvmf_vfio_user_stop_listen, 5812 .cdata_init = nvmf_vfio_user_cdata_init, 5813 .listen_associate = nvmf_vfio_user_listen_associate, 5814 5815 .listener_discover = nvmf_vfio_user_discover, 5816 5817 .poll_group_create = nvmf_vfio_user_poll_group_create, 5818 .get_optimal_poll_group = nvmf_vfio_user_get_optimal_poll_group, 5819 .poll_group_destroy = nvmf_vfio_user_poll_group_destroy, 5820 .poll_group_add = nvmf_vfio_user_poll_group_add, 5821 .poll_group_remove = nvmf_vfio_user_poll_group_remove, 5822 .poll_group_poll = nvmf_vfio_user_poll_group_poll, 5823 5824 .req_free = nvmf_vfio_user_req_free, 5825 .req_complete = nvmf_vfio_user_req_complete, 5826 5827 .qpair_fini = nvmf_vfio_user_close_qpair, 5828 .qpair_get_local_trid = nvmf_vfio_user_qpair_get_local_trid, 5829 .qpair_get_peer_trid = nvmf_vfio_user_qpair_get_peer_trid, 5830 .qpair_get_listen_trid = nvmf_vfio_user_qpair_get_listen_trid, 5831 .qpair_abort_request = nvmf_vfio_user_qpair_abort_request, 5832 5833 .poll_group_dump_stat = nvmf_vfio_user_poll_group_dump_stat, 5834 }; 5835 5836 SPDK_NVMF_TRANSPORT_REGISTER(muser, &spdk_nvmf_transport_vfio_user); 5837 SPDK_LOG_REGISTER_COMPONENT(nvmf_vfio) 5838 SPDK_LOG_REGISTER_COMPONENT(vfio_user_db) 5839