Lines Matching defs:vu_ctrlr

631 io_q_exists(struct nvmf_vfio_user_ctrlr *vu_ctrlr, const uint16_t qid, const bool is_cq)
633 assert(vu_ctrlr != NULL);
640 if (vu_ctrlr->cqs[qid] == NULL) {
644 return (vu_ctrlr->cqs[qid]->cq_state != VFIO_USER_CQ_DELETED &&
645 vu_ctrlr->cqs[qid]->cq_state != VFIO_USER_CQ_UNUSED);
648 if (vu_ctrlr->sqs[qid] == NULL) {
652 return (vu_ctrlr->sqs[qid]->sq_state != VFIO_USER_SQ_DELETED &&
653 vu_ctrlr->sqs[qid]->sq_state != VFIO_USER_SQ_UNUSED);
674 ctrlr_to_poll_group(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
676 return SPDK_CONTAINEROF(vu_ctrlr->sqs[0]->group,
727 ctrlr_kick(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
731 SPDK_DEBUGLOG(vfio_user_db, "%s: kicked\n", ctrlr_id(vu_ctrlr));
733 vu_ctrlr_group = ctrlr_to_poll_group(vu_ctrlr);
738 vfio_user_ctrlr_intr_msg, vu_ctrlr);
1167 fail_ctrlr(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
1171 assert(vu_ctrlr != NULL);
1172 assert(vu_ctrlr->ctrlr != NULL);
1174 regs = spdk_nvmf_ctrlr_get_regs(vu_ctrlr->ctrlr);
1176 SPDK_ERRLOG(":%s failing controller\n", ctrlr_id(vu_ctrlr));
1179 nvmf_ctrlr_set_fatal_status(vu_ctrlr->ctrlr);
1183 ctrlr_interrupt_enabled(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
1185 assert(vu_ctrlr != NULL);
1186 assert(vu_ctrlr->endpoint != NULL);
1188 vfu_pci_config_space_t *pci = vu_ctrlr->endpoint->pci_config_space;
1190 return (!pci->hdr.cmd.id || vu_ctrlr->endpoint->msix->mxc.mxe);
1373 max_queue_size(struct nvmf_vfio_user_ctrlr const *vu_ctrlr)
1375 assert(vu_ctrlr != NULL);
1376 assert(vu_ctrlr->ctrlr != NULL);
1378 return vu_ctrlr->ctrlr->vcprop.cap.bits.mqes + 1;
1382 doorbell_stride(const struct nvmf_vfio_user_ctrlr *vu_ctrlr)
1384 assert(vu_ctrlr != NULL);
1385 assert(vu_ctrlr->ctrlr != NULL);
1387 return vu_ctrlr->ctrlr->vcprop.cap.bits.dstrd;
1391 memory_page_size(const struct nvmf_vfio_user_ctrlr *vu_ctrlr)
1393 uint32_t memory_page_shift = vu_ctrlr->ctrlr->vcprop.cc.bits.mps + 12;
1404 map_q(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvme_q_mapping *mapping,
1412 ret = map_one(vu_ctrlr->endpoint->vfu_ctx, mapping->prp1, mapping->len,
1426 unmap_q(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvme_q_mapping *mapping)
1429 vfu_sgl_put(vu_ctrlr->endpoint->vfu_ctx, mapping->sg,
1868 delete_sq_done(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *sq)
1873 SPDK_DEBUGLOG(nvmf_vfio, "%s: delete sqid:%d=%p done\n", ctrlr_id(vu_ctrlr),
1877 unmap_q(vu_ctrlr, &sq->mapping);
1889 if (vu_ctrlr->reset_shn || vu_ctrlr->disconnect) {
1891 cq = vu_ctrlr->cqs[cqid];
1893 SPDK_DEBUGLOG(nvmf_vfio, "%s: try to delete cqid:%u=%p\n", ctrlr_id(vu_ctrlr),
1898 delete_cq_done(vu_ctrlr, cq);
1967 init_cq(struct nvmf_vfio_user_ctrlr *vu_ctrlr, const uint16_t id)
1971 assert(vu_ctrlr != NULL);
1972 assert(vu_ctrlr->cqs[id] == NULL);
1985 vu_ctrlr->cqs[id] = cq;
1991 alloc_sq_reqs(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *sq)
2286 struct nvmf_vfio_user_ctrlr *vu_ctrlr;
2294 struct nvmf_vfio_user_ctrlr *vu_ctrlr = ctx->vu_ctrlr;
2295 struct nvmf_vfio_user_cq *admin_cq = vu_ctrlr->cqs[0];
2305 post_completion(vu_ctrlr, admin_cq, 0, 0,
2358 sq->delete_ctx->vu_ctrlr = ctrlr;
2532 struct nvmf_vfio_user_ctrlr *vu_ctrlr = sq->ctrlr;
2537 assert(vu_ctrlr != NULL);
2540 vfu_sgl_put(vu_ctrlr->endpoint->vfu_ctx,
2547 return post_completion(vu_ctrlr, vu_ctrlr->cqs[cqid],
2833 disable_ctrlr(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
2835 SPDK_NOTICELOG("%s: disabling controller\n", ctrlr_id(vu_ctrlr));
2839 assert(vu_ctrlr->sqs[0] != NULL);
2840 assert(vu_ctrlr->cqs[0] != NULL);
2842 unmap_q(vu_ctrlr, &vu_ctrlr->sqs[0]->mapping);
2843 unmap_q(vu_ctrlr, &vu_ctrlr->cqs[0]->mapping);
2845 vu_ctrlr->sqs[0]->size = 0;
2846 *sq_headp(vu_ctrlr->sqs[0]) = 0;
2848 vu_ctrlr->sqs[0]->sq_state = VFIO_USER_SQ_INACTIVE;
2850 vu_ctrlr->cqs[0]->size = 0;
2851 *cq_tailp(vu_ctrlr->cqs[0]) = 0;
2857 spdk_nvmf_ctrlr_abort_aer(vu_ctrlr->ctrlr);
2860 vfio_user_ctrlr_switch_doorbells(vu_ctrlr, false);
2861 free_sdbl(vu_ctrlr->endpoint->vfu_ctx, vu_ctrlr->sdbl);
2862 vu_ctrlr->sdbl = NULL;
2867 enable_ctrlr(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
2871 assert(vu_ctrlr != NULL);
2873 SPDK_NOTICELOG("%s: enabling controller\n", ctrlr_id(vu_ctrlr));
2875 err = acq_setup(vu_ctrlr);
2880 err = asq_setup(vu_ctrlr);
2885 vu_ctrlr->sqs[0]->sq_state = VFIO_USER_SQ_ACTIVE;
2894 struct nvmf_vfio_user_ctrlr *vu_ctrlr;
2899 vu_ctrlr = sq->ctrlr;
2910 int ret = enable_ctrlr(vu_ctrlr);
2912 SPDK_ERRLOG("%s: failed to enable ctrlr\n", ctrlr_id(vu_ctrlr));
2915 vu_ctrlr->reset_shn = false;
2917 vu_ctrlr->reset_shn = true;
2923 vu_ctrlr->reset_shn = true;
2927 if (vu_ctrlr->reset_shn) {
2928 disable_ctrlr(vu_ctrlr);
3021 vfio_user_property_access(struct nvmf_vfio_user_ctrlr *vu_ctrlr,
3034 req = get_nvmf_vfio_user_req(vu_ctrlr->sqs[0]);
3039 regs = spdk_nvmf_ctrlr_get_regs(vu_ctrlr->ctrlr);
3043 req->cb_arg = vu_ctrlr->sqs[0];
3187 static void ctrlr_quiesce(struct nvmf_vfio_user_ctrlr *vu_ctrlr);
3193 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr;
3197 if (!vu_ctrlr) {
3201 if (!vu_ctrlr->queued_quiesce) {
3202 vu_ctrlr->state = VFIO_USER_CTRLR_RUNNING;
3210 ctrlr_kick(vu_ctrlr);
3226 ctrlr_id(vu_ctrlr));
3227 ctrlr_quiesce(vu_ctrlr);
3235 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr;
3239 if (!vu_ctrlr) {
3243 spdk_thread_send_msg(vu_ctrlr->thread, _vfio_user_endpoint_resume_done_msg, endpoint);
3251 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr;
3254 if (!vu_ctrlr) {
3259 SPDK_DEBUGLOG(nvmf_vfio, "%s device quiesced\n", ctrlr_id(vu_ctrlr));
3261 assert(vu_ctrlr->state == VFIO_USER_CTRLR_PAUSING);
3262 vu_ctrlr->state = VFIO_USER_CTRLR_PAUSED;
3264 vu_ctrlr->queued_quiesce = false;
3268 * so we need to re-check `vu_ctrlr->state`.
3270 if (vu_ctrlr->state == VFIO_USER_CTRLR_MIGRATING) {
3271 SPDK_DEBUGLOG(nvmf_vfio, "%s is in MIGRATION state\n", ctrlr_id(vu_ctrlr));
3275 SPDK_DEBUGLOG(nvmf_vfio, "%s start to resume\n", ctrlr_id(vu_ctrlr));
3276 vu_ctrlr->state = VFIO_USER_CTRLR_RESUMING;
3280 vu_ctrlr->state = VFIO_USER_CTRLR_PAUSED;
3291 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr;
3293 if (!vu_ctrlr) {
3301 ctrlr_id(vu_ctrlr), status);
3303 spdk_thread_send_msg(vu_ctrlr->thread,
3321 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr;
3328 if (!vu_ctrlr) {
3345 vu_ctrlr->state = VFIO_USER_CTRLR_RUNNING;
3346 fail_ctrlr(vu_ctrlr);
3352 ctrlr_quiesce(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
3356 vu_ctrlr->state = VFIO_USER_CTRLR_PAUSING;
3365 quiesce_ctx->endpoint = vu_ctrlr->endpoint;
3367 quiesce_ctx->group = TAILQ_FIRST(&vu_ctrlr->transport->poll_groups);
3378 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr;
3380 if (!vu_ctrlr) {
3387 if (!nvmf_subsystem_get_ctrlr(subsystem, vu_ctrlr->cntlid)) {
3391 SPDK_DEBUGLOG(nvmf_vfio, "%s starts to quiesce\n", ctrlr_id(vu_ctrlr));
3396 if (!vu_ctrlr->ctrlr->vcprop.cc.bits.en) {
3398 } else if (!vu_ctrlr->ctrlr->vcprop.csts.bits.rdy) {
3400 } else if (vu_ctrlr->ctrlr->vcprop.csts.bits.shst == SPDK_NVME_SHST_COMPLETE) {
3404 switch (vu_ctrlr->state) {
3409 ctrlr_quiesce(vu_ctrlr);
3412 vu_ctrlr->queued_quiesce = true;
3413 SPDK_DEBUGLOG(nvmf_vfio, "%s is busy to quiesce, current state %u\n", ctrlr_id(vu_ctrlr),
3414 vu_ctrlr->state);
3417 assert(vu_ctrlr->state != VFIO_USER_CTRLR_PAUSING);
3528 vfio_user_migr_ctrlr_save_data(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
3530 struct spdk_nvmf_ctrlr *ctrlr = vu_ctrlr->ctrlr;
3531 struct nvmf_vfio_user_endpoint *endpoint = vu_ctrlr->endpoint;
3558 TAILQ_FOREACH(sq, &vu_ctrlr->connected_sqs, tailq) {
3569 cq = vu_ctrlr->cqs[cqid];
3585 memcpy(doorbell_base, (void *)vu_ctrlr->bar0_doorbells, NVMF_VFIO_USER_DOORBELLS_SIZE);
3623 if (vu_ctrlr->sdbl != NULL) {
3625 migr_state.ctrlr_header.shadow_doorbell_buffer = vu_ctrlr->shadow_doorbell_buffer;
3626 migr_state.ctrlr_header.eventidx_buffer = vu_ctrlr->eventidx_buffer;
3633 vfio_user_ctrlr_dump_migr_data("SAVE", &migr_state, vu_ctrlr->sdbl);
3670 vfio_user_migr_ctrlr_construct_qps(struct nvmf_vfio_user_ctrlr *vu_ctrlr,
3681 vfio_user_ctrlr_dump_migr_data("RESUME", migr_state, vu_ctrlr->sdbl);
3699 if (vu_ctrlr->sqs[sqid] == NULL) {
3700 ret = init_sq(vu_ctrlr, &vu_ctrlr->transport->transport, sqid);
3707 sq = vu_ctrlr->sqs[sqid];
3710 ret = alloc_sq_reqs(vu_ctrlr, sq);
3722 addr = map_one(vu_ctrlr->endpoint->vfu_ctx,
3748 if (vu_ctrlr->cqs[cqid] == NULL) {
3749 ret = init_cq(vu_ctrlr, cqid);
3756 cq = vu_ctrlr->cqs[cqid];
3768 addr = map_one(vu_ctrlr->endpoint->vfu_ctx,
3784 vfio_user_migr_ctrlr_restore(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
3786 struct nvmf_vfio_user_endpoint *endpoint = vu_ctrlr->endpoint;
3787 struct spdk_nvmf_ctrlr *ctrlr = vu_ctrlr->ctrlr;
3810 sdbl = map_sdbl(vu_ctrlr->endpoint->vfu_ctx,
3813 memory_page_size(vu_ctrlr));
3816 ctrlr_id(vu_ctrlr));
3820 vu_ctrlr->shadow_doorbell_buffer = migr_state.ctrlr_header.shadow_doorbell_buffer;
3821 vu_ctrlr->eventidx_buffer = migr_state.ctrlr_header.eventidx_buffer;
3823 SWAP(vu_ctrlr->sdbl, sdbl);
3826 rc = vfio_user_migr_ctrlr_construct_qps(vu_ctrlr, &migr_state);
3836 memcpy((void *)vu_ctrlr->bar0_doorbells, doorbell_base, NVMF_VFIO_USER_DOORBELLS_SIZE);
3846 SPDK_DEBUGLOG(nvmf_vfio, "%s AER resubmit, CID %u\n", ctrlr_id(vu_ctrlr),
3851 rc = handle_cmd_req(vu_ctrlr, &cmd, vu_ctrlr->sqs[0]);
3861 vfio_user_migr_ctrlr_enable_sqs(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
3868 if (vu_ctrlr->sqs[0] != NULL) {
3869 vu_ctrlr->sqs[0]->dbl_tailp = vu_ctrlr->bar0_doorbells +
3873 if (vu_ctrlr->cqs[0] != NULL) {
3874 vu_ctrlr->cqs[0]->dbl_headp = vu_ctrlr->bar0_doorbells +
3878 vfio_user_ctrlr_switch_doorbells(vu_ctrlr, vu_ctrlr->sdbl != NULL);
3881 sq = vu_ctrlr->sqs[i];
3890 spdk_nvmf_tgt_new_qpair(vu_ctrlr->transport->transport.tgt, &sq->qpair);
3905 vfio_user_migr_ctrlr_mark_dirty(struct nvmf_vfio_user_ctrlr *vu_ctrlr)
3907 struct nvmf_vfio_user_endpoint *endpoint = vu_ctrlr->endpoint;
3909 assert(vu_ctrlr->state == VFIO_USER_CTRLR_MIGRATING);
3912 struct nvmf_vfio_user_cq *cq = vu_ctrlr->cqs[i];
3921 if (vu_ctrlr->sdbl != NULL) {
3928 if (!vu_ctrlr->sdbl->iovs[i].iov_len) {
3932 sg = index_to_sg_t(vu_ctrlr->sdbl->sgs, i);
3943 struct nvmf_vfio_user_ctrlr *vu_ctrlr = endpoint->ctrlr;
3948 vu_ctrlr->state, state);
3952 vu_ctrlr->in_source_vm = true;
3953 vu_ctrlr->state = VFIO_USER_CTRLR_MIGRATING;
3954 vfio_user_migr_ctrlr_mark_dirty(vu_ctrlr);
3955 vfio_user_migr_ctrlr_save_data(vu_ctrlr);
3958 vu_ctrlr->state = VFIO_USER_CTRLR_MIGRATING;
3963 if (vu_ctrlr->in_source_vm) {
3968 assert(vu_ctrlr->state == VFIO_USER_CTRLR_PAUSED);
3976 if (vu_ctrlr->state != VFIO_USER_CTRLR_RUNNING) {
3980 assert(!vu_ctrlr->in_source_vm);
3981 vu_ctrlr->state = VFIO_USER_CTRLR_MIGRATING;
3983 sq = TAILQ_FIRST(&vu_ctrlr->connected_sqs);
3996 if (vu_ctrlr->state != VFIO_USER_CTRLR_MIGRATING) {
4000 if (!vu_ctrlr->in_source_vm) {
4002 ret = vfio_user_migr_ctrlr_restore(vu_ctrlr);
4007 vfio_user_ctrlr_switch_doorbells(vu_ctrlr, false);
4008 vfio_user_migr_ctrlr_enable_sqs(vu_ctrlr);
4009 vu_ctrlr->state = VFIO_USER_CTRLR_RUNNING;
4013 vu_ctrlr->state = VFIO_USER_CTRLR_RESUMING;
4018 vu_ctrlr->state = VFIO_USER_CTRLR_PAUSED;
4022 vu_ctrlr->migr_data_prepared = false;
4023 vu_ctrlr->in_source_vm = false;
5041 struct nvmf_vfio_user_ctrlr *vu_ctrlr = ctx;
5045 vu_ctrlr_group = ctrlr_to_poll_group(vu_ctrlr);
5055 ret = vfio_user_poll_vfu_ctx(vu_ctrlr);
5061 if (vu_ctrlr->sqs[0] == NULL) {
5065 if (vu_ctrlr->transport->transport_opts.enable_intr_mode_sq_spreading) {
5071 TAILQ_FOREACH(vu_group, &vu_ctrlr->transport->poll_groups, link) {
5109 start_ctrlr(struct nvmf_vfio_user_ctrlr *vu_ctrlr,
5112 struct nvmf_vfio_user_endpoint *endpoint = vu_ctrlr->endpoint;
5114 vu_ctrlr->ctrlr = ctrlr;
5115 vu_ctrlr->cntlid = ctrlr->cntlid;
5116 vu_ctrlr->thread = spdk_get_thread();
5117 vu_ctrlr->state = VFIO_USER_CTRLR_RUNNING;
5120 vu_ctrlr->vfu_ctx_poller = SPDK_POLLER_REGISTER(vfio_user_poll_vfu_ctx,
5121 vu_ctrlr, 1000);
5125 vu_ctrlr->vfu_ctx_poller = SPDK_POLLER_REGISTER(vfio_user_poll_vfu_ctx,
5126 vu_ctrlr, 0);
5128 vu_ctrlr->intr_fd = vfu_get_poll_fd(vu_ctrlr->endpoint->vfu_ctx);
5129 assert(vu_ctrlr->intr_fd != -1);
5131 vu_ctrlr->intr = SPDK_INTERRUPT_REGISTER(vu_ctrlr->intr_fd,
5132 vfio_user_ctrlr_intr, vu_ctrlr);
5134 assert(vu_ctrlr->intr != NULL);
5136 spdk_poller_register_interrupt(vu_ctrlr->vfu_ctx_poller,
5138 vu_ctrlr);
5147 struct nvmf_vfio_user_ctrlr *vu_ctrlr;
5153 vu_ctrlr = sq->ctrlr;
5154 assert(vu_ctrlr != NULL);
5155 endpoint = vu_ctrlr->endpoint;
5161 free_ctrlr(vu_ctrlr);
5168 admin_cq = vu_ctrlr->cqs[0];
5181 start_ctrlr(vu_ctrlr, sq->qpair.ctrlr);
5195 cpl_ctx->ctrlr = vu_ctrlr;
5207 post_completion(vu_ctrlr, admin_cq, 0, 0,
5220 ctrlr_kick(vu_ctrlr);
5225 TAILQ_INSERT_TAIL(&vu_ctrlr->connected_sqs, sq, tailq);
5389 struct nvmf_vfio_user_ctrlr *vu_ctrlr;
5395 vu_ctrlr = sq->ctrlr;
5396 endpoint = vu_ctrlr->endpoint;
5401 TAILQ_REMOVE(&vu_ctrlr->connected_sqs, sq, tailq);
5402 delete_sq_done(vu_ctrlr, sq);
5403 if (TAILQ_EMPTY(&vu_ctrlr->connected_sqs)) {
5405 if (vu_ctrlr->in_source_vm && endpoint->need_resume) {
5413 free_ctrlr(vu_ctrlr);