Lines Matching defs:cq
203 struct nvme_migr_cq_state cq;
590 cq_dbl_headp(struct nvmf_vfio_user_cq *cq)
592 assert(cq != NULL);
593 return cq->dbl_headp;
597 cq_tailp(struct nvmf_vfio_user_cq *cq)
599 assert(cq != NULL);
600 return &cq->tail;
617 cq_tail_advance(struct nvmf_vfio_user_cq *cq)
619 assert(cq != NULL);
621 assert(*cq_tailp(cq) < cq->size);
622 (*cq_tailp(cq))++;
624 if (spdk_unlikely(*cq_tailp(cq) == cq->size)) {
625 *cq_tailp(cq) = 0;
626 cq->phase = !cq->phase;
1027 struct nvmf_vfio_user_cq *cq = ctrlr->cqs[i];
1035 if (cq != NULL) {
1036 cq->dbl_headp = doorbells + queue_index(cq->qid, true);
1642 struct nvmf_vfio_user_cq *cq;
1648 cq = ctrlr->cqs[0];
1650 assert(cq != NULL);
1652 assert(q_addr(&cq->mapping) == NULL);
1656 cq->qid = 0;
1657 cq->size = regs->aqa.bits.acqs + 1;
1658 cq->mapping.prp1 = regs->acq;
1659 cq->mapping.len = cq->size * sizeof(struct spdk_nvme_cpl);
1660 *cq_tailp(cq) = 0;
1661 cq->ien = true;
1662 cq->phase = true;
1663 cq->nr_outstanding = 0;
1665 ret = map_q(ctrlr, &cq->mapping, MAP_RW | MAP_INITIALIZE);
1671 cq->dbl_headp = ctrlr->bar0_doorbells + queue_index(0, true);
1673 *cq_dbl_headp(cq) = 0;
1717 cq_free_slots(struct nvmf_vfio_user_cq *cq)
1721 assert(cq != NULL);
1723 if (cq->tail == cq->last_head) {
1724 free_slots = cq->size;
1725 } else if (cq->tail > cq->last_head) {
1726 free_slots = cq->size - (cq->tail - cq->last_head);
1728 free_slots = cq->last_head - cq->tail;
1740 cq_is_full(struct nvmf_vfio_user_cq *cq)
1744 assert(cq != NULL);
1746 free_cq_slots = cq_free_slots(cq);
1749 cq->last_head = *cq_dbl_headp(cq);
1750 free_cq_slots = cq_free_slots(cq);
1760 * @cq: the completion queue
1768 post_completion(struct nvmf_vfio_user_ctrlr *ctrlr, struct nvmf_vfio_user_cq *cq,
1777 if (spdk_unlikely(cq == NULL || q_addr(&cq->mapping) == NULL)) {
1781 if (cq->qid == 0) {
1782 assert(spdk_get_thread() == cq->group->group->thread);
1792 if (cq_is_full(cq)) {
1794 ctrlr_id(ctrlr), cq->qid, *cq_tailp(cq),
1795 *cq_dbl_headp(cq));
1799 cpl = ((struct spdk_nvme_cpl *)q_addr(&cq->mapping)) + *cq_tailp(cq);
1804 "sqhead=%d cq tail=%d\n", ctrlr_id(ctrlr), sqid, cid, sc,
1805 *sq_headp(ctrlr->sqs[sqid]), *cq_tailp(cq));
1820 cpl_status.p = cq->phase;
1823 cq->nr_outstanding--;
1827 cq_tail_advance(cq);
1829 if ((cq->qid == 0 || !ctrlr->adaptive_irqs_enabled) &&
1830 cq->ien && ctrlr_interrupt_enabled(ctrlr)) {
1831 err = vfu_irq_trigger(ctrlr->endpoint->vfu_ctx, cq->iv);
1853 delete_cq_done(struct nvmf_vfio_user_ctrlr *ctrlr, struct nvmf_vfio_user_cq *cq)
1855 assert(cq->cq_ref == 0);
1856 unmap_q(ctrlr, &cq->mapping);
1857 cq->size = 0;
1858 cq->cq_state = VFIO_USER_CQ_DELETED;
1859 cq->group = NULL;
1860 cq->nr_outstanding = 0;
1870 struct nvmf_vfio_user_cq *cq;
1891 cq = vu_ctrlr->cqs[cqid];
1894 cq->qid, cq);
1896 assert(cq->cq_ref > 0);
1897 if (--cq->cq_ref == 0) {
1898 delete_cq_done(vu_ctrlr, cq);
1907 struct nvmf_vfio_user_cq *cq;
1925 cq = ctrlr->cqs[qid];
1926 if (cq) {
1928 unmap_q(ctrlr, &cq->mapping);
1929 free(cq->mapping.sg);
1930 free(cq);
1969 struct nvmf_vfio_user_cq *cq;
1974 cq = calloc(1, sizeof(*cq));
1975 if (cq == NULL) {
1978 cq->mapping.sg = calloc(1, dma_sg_size());
1979 if (cq->mapping.sg == NULL) {
1980 free(cq);
1984 cq->qid = id;
1985 vu_ctrlr->cqs[id] = cq;
2155 struct nvmf_vfio_user_cq *cq;
2183 cq = ctrlr->cqs[qid];
2184 cq->size = qsize;
2186 cq->mapping.prp1 = cmd->dptr.prp.prp1;
2187 cq->mapping.len = cq->size * sizeof(struct spdk_nvme_cpl);
2189 cq->dbl_headp = ctrlr_doorbell_ptr(ctrlr) + queue_index(qid, true);
2191 err = map_q(ctrlr, &cq->mapping, MAP_RW | MAP_INITIALIZE);
2200 q_addr(&cq->mapping));
2202 cq->ien = cmd->cdw11_bits.create_io_cq.ien;
2203 cq->iv = cmd->cdw11_bits.create_io_cq.iv;
2204 cq->phase = true;
2205 cq->cq_state = VFIO_USER_CQ_CREATED;
2207 *cq_tailp(cq) = 0;
2218 *cq_dbl_headp(cq) = 0;
2322 struct nvmf_vfio_user_cq *cq;
2337 cq = ctrlr->cqs[cmd->cdw10_bits.delete_io_q.qid];
2338 if (cq->cq_ref) {
2344 delete_cq_done(ctrlr, cq);
2573 struct nvmf_vfio_user_cq *cq = ctrlr->cqs[sq->cqid];
2588 free_cq_slots = cq_free_slots(cq);
2609 if ((free_cq_slots-- <= cq->nr_outstanding)) {
2611 cq->last_head = *cq_dbl_headp(cq);
2613 free_cq_slots = cq_free_slots(cq);
2614 if (free_cq_slots > cq->nr_outstanding) {
2637 cq->nr_outstanding++;
2680 struct nvmf_vfio_user_cq *cq;
2729 cq = ctrlr->cqs[sq->cqid];
2732 if (cq->size && q_addr(&cq->mapping) == NULL) {
2733 ret = map_q(ctrlr, &cq->mapping, MAP_RW | MAP_QUIET);
2736 cq->qid, cq->mapping.prp1,
2737 cq->mapping.prp1 + cq->mapping.len);
2762 struct nvmf_vfio_user_cq *cq;
2795 cq = ctrlr->cqs[sq->cqid];
2796 if (q_addr(&cq->mapping) >= map_start && q_addr(&cq->mapping) <= map_end) {
2797 unmap_q(ctrlr, &cq->mapping);
3432 struct nvme_migr_cq_state *cq;
3461 cq = &migr_data->qps[i].cq;
3475 if (cq->size) {
3476 SPDK_NOTICELOG("cqid:%u, bar0_doorbell:%u\n", cq->cqid, doorbell_base[i * 2 + 1]);
3479 cq->cqid,
3484 cq->cqid, cq->phase, cq->tail, cq->size, cq->iv, cq->ien, cq->dma_addr);
3533 struct nvmf_vfio_user_cq *cq;
3567 /* save cq, for shared cq case, cq may be saved multiple times */
3569 cq = vu_ctrlr->cqs[cqid];
3570 migr_state.qps[cqid].cq.cqid = cqid;
3571 migr_state.qps[cqid].cq.tail = *cq_tailp(cq);
3572 migr_state.qps[cqid].cq.ien = cq->ien;
3573 migr_state.qps[cqid].cq.iv = cq->iv;
3574 migr_state.qps[cqid].cq.size = cq->size;
3575 migr_state.qps[cqid].cq.phase = cq->phase;
3576 migr_state.qps[cqid].cq.dma_addr = cq->mapping.prp1;
3739 qsize = migr_qp.cq.size;
3741 struct nvmf_vfio_user_cq *cq;
3743 /* restore cq */
3747 /* allocate cq if necessary */
3756 cq = vu_ctrlr->cqs[cqid];
3758 cq->size = qsize;
3760 cq->cq_state = VFIO_USER_CQ_CREATED;
3761 cq->cq_ref = cqs_ref[cqid];
3762 *cq_tailp(cq) = migr_qp.cq.tail;
3763 cq->mapping.prp1 = migr_qp.cq.dma_addr;
3764 cq->mapping.len = cq->size * sizeof(struct spdk_nvme_cpl);
3765 cq->ien = migr_qp.cq.ien;
3766 cq->iv = migr_qp.cq.iv;
3767 cq->phase = migr_qp.cq.phase;
3769 cq->mapping.prp1, cq->mapping.len,
3770 cq->mapping.sg, &cq->mapping.iov,
3773 SPDK_ERRLOG("Restore cq with qid %u PRP1 0x%"PRIx64" with size %u failed\n",
3774 cqid, cq->mapping.prp1, cq->size);
3912 struct nvmf_vfio_user_cq *cq = vu_ctrlr->cqs[i];
3914 if (cq == NULL || q_addr(&cq->mapping) == NULL) {
3918 vfu_sgl_mark_dirty(endpoint->vfu_ctx, cq->mapping.sg, 1);
4801 struct nvmf_vfio_user_cq *cq;
4806 cq = sq->ctrlr->cqs[sq->cqid];
4807 assert(cq != NULL);
4821 if (cq->group != NULL) {
4822 result = cq->group;
4850 if (cq->group == NULL) {
4851 cq->group = result;
4983 struct nvmf_vfio_user_cq *cq;
4992 post_completion(cpl_ctx->ctrlr, cpl_ctx->cq, cpl_ctx->cpl.cdw0, cpl_ctx->cpl.sqid,
5196 cpl_ctx->cq = admin_cq;
5663 struct nvmf_vfio_user_cq *cq = ctrlr->cqs[sq->cqid];
5667 if (!cq->ien || cq->qid == 0 || !ctrlr_interrupt_enabled(ctrlr)) {
5671 cq_tail = *cq_tailp(cq);
5674 if (cq_tail == cq->last_trigger_irq_tail) {
5678 spdk_ivdt_dcache(cq_dbl_headp(cq));
5679 cq_head = *cq_dbl_headp(cq);
5681 if (cq_head != cq_tail && cq_head == cq->last_head) {
5682 int err = vfu_irq_trigger(ctrlr->endpoint->vfu_ctx, cq->iv);
5687 cq->last_trigger_irq_tail = cq_tail;
5691 cq->last_head = cq_head;