Lines Matching defs:sq
202 struct nvme_migr_sq_state sq;
558 static struct nvmf_vfio_user_req *get_nvmf_vfio_user_req(struct nvmf_vfio_user_sq *sq);
576 sq_headp(struct nvmf_vfio_user_sq *sq)
578 assert(sq != NULL);
579 return &sq->head;
583 sq_dbl_tailp(struct nvmf_vfio_user_sq *sq)
585 assert(sq != NULL);
586 return sq->dbl_tailp;
604 sq_head_advance(struct nvmf_vfio_user_sq *sq)
606 assert(sq != NULL);
608 assert(*sq_headp(sq) < sq->size);
609 (*sq_headp(sq))++;
611 if (spdk_unlikely(*sq_headp(sq) == sq->size)) {
612 *sq_headp(sq) = 0;
682 sq_to_poll_group(struct nvmf_vfio_user_sq *sq)
684 return SPDK_CONTAINEROF(sq->group, struct nvmf_vfio_user_poll_group,
1026 struct nvmf_vfio_user_sq *sq = ctrlr->sqs[i];
1029 if (sq != NULL) {
1030 sq->dbl_tailp = doorbells + queue_index(sq->qid, false);
1438 struct nvmf_vfio_user_sq *sq;
1444 sq = ctrlr->sqs[0];
1446 assert(sq != NULL);
1447 assert(q_addr(&sq->mapping) == NULL);
1451 sq->qid = 0;
1452 sq->size = regs->aqa.bits.asqs + 1;
1453 sq->mapping.prp1 = regs->asq;
1454 sq->mapping.len = sq->size * sizeof(struct spdk_nvme_cmd);
1455 *sq_headp(sq) = 0;
1456 sq->cqid = 0;
1458 ret = map_q(ctrlr, &sq->mapping, MAP_INITIALIZE);
1464 sq->dbl_tailp = ctrlr->bar0_doorbells + queue_index(0, false);
1466 *sq_dbl_tailp(sq) = 0;
1480 set_sq_eventidx(struct nvmf_vfio_user_sq *sq)
1486 assert(sq != NULL);
1487 assert(sq->ctrlr != NULL);
1488 assert(sq->ctrlr->sdbl != NULL);
1489 assert(sq->need_rearm);
1490 assert(sq->qid != 0);
1492 ctrlr = sq->ctrlr;
1495 ctrlr_id(ctrlr), sq->qid);
1497 sq_tail_eidx = ctrlr->sdbl->eventidxs + queue_index(sq->qid, false);
1507 old_tail = *sq_dbl_tailp(sq);
1527 new_tail = *sq_dbl_tailp(sq);
1540 "sq_head=%u\n", ctrlr_id(ctrlr), sq->qid, old_tail,
1541 new_tail, *sq_headp(sq));
1543 if (new_tail == *sq_headp(sq)) {
1544 sq->need_rearm = false;
1559 static int nvmf_vfio_user_sq_poll(struct nvmf_vfio_user_sq *sq);
1567 struct nvmf_vfio_user_sq *sq,
1573 assert(sq->need_rearm);
1578 if (set_sq_eventidx(sq)) {
1584 ret = nvmf_vfio_user_sq_poll(sq);
1617 struct nvmf_vfio_user_sq *sq;
1622 TAILQ_FOREACH(sq, &vu_group->sqs, link) {
1623 if (spdk_unlikely(sq->sq_state != VFIO_USER_SQ_ACTIVE || !sq->size)) {
1627 if (sq->need_rearm) {
1628 count += vfio_user_sq_rearm(sq->ctrlr, sq, vu_group);
1684 struct nvmf_vfio_user_sq *sq;
1690 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair);
1693 ret = map_one(sq->ctrlr->endpoint->vfu_ctx, addr, len,
1714 struct nvmf_vfio_user_sq *sq);
1843 free_sq_reqs(struct nvmf_vfio_user_sq *sq)
1845 while (!TAILQ_EMPTY(&sq->free_reqs)) {
1846 struct nvmf_vfio_user_req *vu_req = TAILQ_FIRST(&sq->free_reqs);
1847 TAILQ_REMOVE(&sq->free_reqs, vu_req, link);
1868 delete_sq_done(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *sq)
1874 sq->qid, sq);
1877 unmap_q(vu_ctrlr, &sq->mapping);
1879 free_sq_reqs(sq);
1881 sq->size = 0;
1883 sq->sq_state = VFIO_USER_SQ_DELETED;
1890 cqid = sq->cqid;
1906 struct nvmf_vfio_user_sq *sq;
1913 sq = ctrlr->sqs[qid];
1914 if (sq) {
1916 unmap_q(ctrlr, &sq->mapping);
1918 free_sq_reqs(sq);
1920 free(sq->mapping.sg);
1921 free(sq);
1939 struct nvmf_vfio_user_sq *sq;
1945 sq = calloc(1, sizeof(*sq));
1946 if (sq == NULL) {
1949 sq->mapping.sg = calloc(1, dma_sg_size());
1950 if (sq->mapping.sg == NULL) {
1951 free(sq);
1955 sq->qid = id;
1956 sq->qpair.qid = id;
1957 sq->qpair.transport = transport;
1958 sq->ctrlr = ctrlr;
1959 ctrlr->sqs[id] = sq;
1961 TAILQ_INIT(&sq->free_reqs);
1991 alloc_sq_reqs(struct nvmf_vfio_user_ctrlr *vu_ctrlr, struct nvmf_vfio_user_sq *sq)
2000 for (i = 0; i < sq->size; i++) {
2009 req->qpair = &sq->qpair;
2014 TAILQ_INSERT_TAIL(&sq->free_reqs, vu_req, link);
2020 TAILQ_FOREACH_SAFE(vu_req, &sq->free_reqs, link, tmp) {
2039 struct nvmf_vfio_user_sq *sq;
2076 sq = ctrlr->sqs[qid];
2077 sq->size = qsize;
2082 sq->mapping.prp1 = cmd->dptr.prp.prp1;
2083 sq->mapping.len = sq->size * sizeof(struct spdk_nvme_cmd);
2085 err = map_q(ctrlr, &sq->mapping, MAP_INITIALIZE);
2094 q_addr(&sq->mapping));
2096 err = alloc_sq_reqs(ctrlr, sq);
2103 sq->cqid = cqid;
2104 ctrlr->cqs[sq->cqid]->cq_ref++;
2105 sq->sq_state = VFIO_USER_SQ_CREATED;
2106 *sq_headp(sq) = 0;
2108 sq->dbl_tailp = ctrlr_doorbell_ptr(ctrlr) + queue_index(qid, false);
2119 *sq_dbl_tailp(sq) = 0;
2122 sq->need_rearm = true;
2124 if (!set_sq_eventidx(sq)) {
2141 sq->create_io_sq_cmd = *cmd;
2142 sq->post_create_io_sq_completion = true;
2145 &sq->qpair);
2321 struct nvmf_vfio_user_sq *sq;
2351 sq = ctrlr->sqs[cmd->cdw10_bits.delete_io_q.qid];
2352 sq->delete_ctx = calloc(1, sizeof(*sq->delete_ctx));
2353 if (!sq->delete_ctx) {
2358 sq->delete_ctx->vu_ctrlr = ctrlr;
2359 sq->delete_ctx->cid = cmd->cid;
2360 sq->sq_state = VFIO_USER_SQ_DELETED;
2361 assert(ctrlr->cqs[sq->cqid]->cq_ref);
2362 ctrlr->cqs[sq->cqid]->cq_ref--;
2364 spdk_nvmf_qpair_disconnect(&sq->qpair);
2531 struct nvmf_vfio_user_sq *sq = cb_arg;
2532 struct nvmf_vfio_user_ctrlr *vu_ctrlr = sq->ctrlr;
2535 assert(sq != NULL);
2544 sqid = sq->qid;
2545 cqid = sq->cqid;
2556 consume_cmd(struct nvmf_vfio_user_ctrlr *ctrlr, struct nvmf_vfio_user_sq *sq,
2559 assert(sq != NULL);
2560 if (spdk_unlikely(nvmf_qpair_is_admin_queue(&sq->qpair))) {
2564 return handle_cmd_req(ctrlr, cmd, sq);
2570 struct nvmf_vfio_user_sq *sq)
2573 struct nvmf_vfio_user_cq *cq = ctrlr->cqs[sq->cqid];
2578 assert(sq != NULL);
2580 if (ctrlr->sdbl != NULL && sq->qid != 0) {
2585 sq->need_rearm = true;
2589 queue = q_addr(&sq->mapping);
2590 while (*sq_headp(sq) != new_tail) {
2618 vu_group = sq_to_poll_group(sq);
2634 cmd = &queue[*sq_headp(sq)];
2643 sq_head_advance(sq);
2645 err = consume_cmd(ctrlr, sq, cmd);
2679 struct nvmf_vfio_user_sq *sq;
2724 TAILQ_FOREACH(sq, &ctrlr->connected_sqs, tailq) {
2725 if (sq->sq_state != VFIO_USER_SQ_INACTIVE) {
2729 cq = ctrlr->cqs[sq->cqid];
2742 if (sq->size) {
2743 ret = map_q(ctrlr, &sq->mapping, MAP_R | MAP_QUIET);
2746 sq->qid, sq->mapping.prp1,
2747 sq->mapping.prp1 + sq->mapping.len);
2751 sq->sq_state = VFIO_USER_SQ_ACTIVE;
2752 SPDK_DEBUGLOG(nvmf_vfio, "Remap sqid:%u successfully\n", sq->qid);
2761 struct nvmf_vfio_user_sq *sq;
2789 TAILQ_FOREACH(sq, &ctrlr->connected_sqs, tailq) {
2790 if (q_addr(&sq->mapping) >= map_start && q_addr(&sq->mapping) <= map_end) {
2791 unmap_q(ctrlr, &sq->mapping);
2792 sq->sq_state = VFIO_USER_SQ_INACTIVE;
2795 cq = ctrlr->cqs[sq->cqid];
2892 struct nvmf_vfio_user_sq *sq)
2898 assert(sq->ctrlr != NULL);
2899 vu_ctrlr = sq->ctrlr;
2936 struct nvmf_vfio_user_sq *sq = cb_arg;
2938 assert(sq != NULL);
2942 assert(sq->ctrlr != NULL);
2951 return nvmf_vfio_user_prop_req_rsp_set(req, sq);
3431 struct nvme_migr_sq_state *sq;
3460 sq = &migr_data->qps[i].sq;
3463 if (sq->size) {
3464 SPDK_NOTICELOG("sqid:%u, bar0_doorbell:%u\n", sq->sqid, doorbell_base[i * 2]);
3467 sq->sqid,
3472 sq->sqid, sq->cqid, sq->head, sq->size, sq->dma_addr);
3532 struct nvmf_vfio_user_sq *sq;
3558 TAILQ_FOREACH(sq, &vu_ctrlr->connected_sqs, tailq) {
3559 /* save sq */
3560 sqid = sq->qid;
3561 migr_state.qps[sqid].sq.sqid = sq->qid;
3562 migr_state.qps[sqid].sq.cqid = sq->cqid;
3563 migr_state.qps[sqid].sq.head = *sq_headp(sq);
3564 migr_state.qps[sqid].sq.size = sq->size;
3565 migr_state.qps[sqid].sq.dma_addr = sq->mapping.prp1;
3568 cqid = sq->cqid;
3688 qsize = migr_qp.sq.size;
3690 struct nvmf_vfio_user_sq *sq;
3692 sqid = migr_qp.sq.sqid;
3698 /* allocate sq if necessary */
3707 sq = vu_ctrlr->sqs[sqid];
3708 sq->size = qsize;
3710 ret = alloc_sq_reqs(vu_ctrlr, sq);
3712 SPDK_ERRLOG("Construct sq with qid %u failed\n", sqid);
3716 /* restore sq */
3717 sq->sq_state = VFIO_USER_SQ_CREATED;
3718 sq->cqid = migr_qp.sq.cqid;
3719 *sq_headp(sq) = migr_qp.sq.head;
3720 sq->mapping.prp1 = migr_qp.sq.dma_addr;
3721 sq->mapping.len = sq->size * sizeof(struct spdk_nvme_cmd);
3723 sq->mapping.prp1, sq->mapping.len,
3724 sq->mapping.sg, &sq->mapping.iov,
3727 SPDK_ERRLOG("Restore sq with qid %u PRP1 0x%"PRIx64" with size %u failed\n",
3728 sqid, sq->mapping.prp1, sq->size);
3731 cqs_ref[sq->cqid]++;
3744 cqid = migr_qp.sq.cqid;
3864 struct nvmf_vfio_user_sq *sq;
3881 sq = vu_ctrlr->sqs[i];
3882 if (!sq || !sq->size) {
3886 if (nvmf_qpair_is_admin_queue(&sq->qpair)) {
3888 sq->sq_state = VFIO_USER_SQ_ACTIVE;
3890 spdk_nvmf_tgt_new_qpair(vu_ctrlr->transport->transport.tgt, &sq->qpair);
3944 struct nvmf_vfio_user_sq *sq;
3983 sq = TAILQ_FIRST(&vu_ctrlr->connected_sqs);
3984 assert(sq != NULL);
3985 assert(sq->qpair.qid == 0);
3986 sq->sq_state = VFIO_USER_SQ_INACTIVE;
3991 free_sq_reqs(sq);
3992 sq->size = 0;
4800 struct nvmf_vfio_user_sq *sq;
4805 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair);
4806 cq = sq->ctrlr->cqs[sq->cqid];
4834 result = sq->ctrlr->sqs[0]->group;
4903 struct nvmf_vfio_user_sq *sq = ctx;
4905 spdk_nvmf_qpair_disconnect(&sq->qpair);
4912 struct nvmf_vfio_user_sq *sq;
4930 TAILQ_FOREACH(sq, &ctrlr->connected_sqs, tailq) {
4932 spdk_thread_send_msg(ctrlr->thread, _vfio_user_qpair_disconnect, sq);
5145 struct nvmf_vfio_user_sq *sq = cb_arg;
5150 assert(sq != NULL);
5153 vu_ctrlr = sq->ctrlr;
5165 vu_group = SPDK_CONTAINEROF(sq->group, struct nvmf_vfio_user_poll_group, group);
5166 TAILQ_INSERT_TAIL(&vu_group->sqs, sq, link);
5174 if (nvmf_qpair_is_admin_queue(&sq->qpair)) {
5181 start_ctrlr(vu_ctrlr, sq->qpair.ctrlr);
5187 if (sq->post_create_io_sq_completion) {
5199 cpl_ctx->cpl.cid = sq->create_io_sq_cmd.cid;
5208 sq->create_io_sq_cmd.cid, SPDK_NVME_SC_SUCCESS, SPDK_NVME_SCT_GENERIC);
5210 sq->post_create_io_sq_completion = false;
5222 sq->sq_state = VFIO_USER_SQ_ACTIVE;
5225 TAILQ_INSERT_TAIL(&vu_ctrlr->connected_sqs, sq, tailq);
5251 struct nvmf_vfio_user_sq *sq;
5258 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair);
5259 sq->group = group;
5260 ctrlr = sq->ctrlr;
5263 ctrlr_id(ctrlr), sq->qpair.qid,
5264 sq, qpair, group);
5266 admin = nvmf_qpair_is_admin_queue(&sq->qpair);
5268 vu_req = get_nvmf_vfio_user_req(sq);
5278 req->cmd->connect_cmd.sqsize = sq->size - 1;
5296 vu_req->cb_arg = sq;
5317 struct nvmf_vfio_user_sq *sq;
5320 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair);
5324 ctrlr_id(sq->ctrlr), qpair->qid, qpair, group);
5328 TAILQ_REMOVE(&vu_group->sqs, sq, link);
5334 _nvmf_vfio_user_req_free(struct nvmf_vfio_user_sq *sq, struct nvmf_vfio_user_req *vu_req)
5343 TAILQ_INSERT_TAIL(&sq->free_reqs, vu_req, link);
5349 struct nvmf_vfio_user_sq *sq;
5355 sq = SPDK_CONTAINEROF(req->qpair, struct nvmf_vfio_user_sq, qpair);
5357 _nvmf_vfio_user_req_free(sq, vu_req);
5365 struct nvmf_vfio_user_sq *sq;
5371 sq = SPDK_CONTAINEROF(req->qpair, struct nvmf_vfio_user_sq, qpair);
5375 fail_ctrlr(sq->ctrlr);
5379 _nvmf_vfio_user_req_free(sq, vu_req);
5388 struct nvmf_vfio_user_sq *sq;
5394 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair);
5395 vu_ctrlr = sq->ctrlr;
5397 del_ctx = sq->delete_ctx;
5398 sq->delete_ctx = NULL;
5401 TAILQ_REMOVE(&vu_ctrlr->connected_sqs, sq, tailq);
5402 delete_sq_done(vu_ctrlr, sq);
5430 get_nvmf_vfio_user_req(struct nvmf_vfio_user_sq *sq)
5434 if (sq == NULL) {
5438 req = TAILQ_FIRST(&sq->free_reqs);
5443 TAILQ_REMOVE(&sq->free_reqs, req, link);
5593 struct nvmf_vfio_user_sq *sq)
5602 vu_req = get_nvmf_vfio_user_req(sq);
5605 return post_completion(ctrlr, ctrlr->cqs[sq->cqid], 0, 0, cmd->cid,
5644 _nvmf_vfio_user_req_free(sq, vu_req);
5661 struct nvmf_vfio_user_sq *sq)
5663 struct nvmf_vfio_user_cq *cq = ctrlr->cqs[sq->cqid];
5696 nvmf_vfio_user_sq_poll(struct nvmf_vfio_user_sq *sq)
5702 assert(sq != NULL);
5704 ctrlr = sq->ctrlr;
5715 handle_suppressed_irq(ctrlr, sq);
5728 spdk_ivdt_dcache(sq_dbl_tailp(sq));
5731 new_tail = *sq_dbl_tailp(sq);
5734 if (spdk_unlikely(new_tail >= sq->size)) {
5735 SPDK_DEBUGLOG(nvmf_vfio, "%s: invalid sqid:%u doorbell value %u\n", ctrlr_id(ctrlr), sq->qid,
5742 if (*sq_headp(sq) == new_tail) {
5747 ctrlr_id(ctrlr), sq->qid, *sq_headp(sq), new_tail);
5751 ctrlr_id(ctrlr), sq->qid,
5752 ctrlr->bar0_doorbells[queue_index(sq->qid, false)],
5753 ctrlr->sdbl->shadow_doorbells[queue_index(sq->qid, false)],
5754 ctrlr->sdbl->eventidxs[queue_index(sq->qid, false)]);
5764 count = handle_sq_tdbl_write(ctrlr, new_tail, sq);
5783 struct nvmf_vfio_user_sq *sq, *tmp;
5792 TAILQ_FOREACH_SAFE(sq, &vu_group->sqs, link, tmp) {
5795 if (spdk_unlikely(sq->sq_state != VFIO_USER_SQ_ACTIVE || !sq->size)) {
5799 ret = nvmf_vfio_user_sq_poll(sq);
5826 struct nvmf_vfio_user_sq *sq;
5829 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair);
5830 ctrlr = sq->ctrlr;
5847 struct nvmf_vfio_user_sq *sq;
5850 sq = SPDK_CONTAINEROF(qpair, struct nvmf_vfio_user_sq, qpair);
5851 ctrlr = sq->ctrlr;