Lines Matching defs:qp
26 struct nvmf_qpair *qp;
34 volatile u_int qp_refs; /* Internal references on 'qp'. */
44 static int _nvmft_send_generic_error(struct nvmft_qpair *qp,
51 struct nvmft_qpair *qp = arg;
52 struct nvmft_controller *ctrlr = qp->ctrlr;
63 nvmft_printf(ctrlr, "error %d on %s\n", error, qp->name);
64 nvmft_controller_error(ctrlr, qp, error);
70 struct nvmft_qpair *qp = arg;
71 struct nvmft_controller *ctrlr = qp->ctrlr;
78 qp->name, le16toh(cmd->cid), cmd->opc);
85 _nvmft_send_generic_error(qp, nc, sc_status);
91 if (BIT_TEST_SET_ATOMIC(NUM_CIDS, cmd->cid, qp->cids)) {
92 _nvmft_send_generic_error(qp, nc, NVME_SC_COMMAND_ID_CONFLICT);
97 if (qp->admin)
100 nvmft_handle_io_command(qp, qp->qid, nc);
107 struct nvmft_qpair *qp;
109 qp = malloc(sizeof(*qp), M_NVMFT, M_WAITOK | M_ZERO);
110 qp->admin = nvlist_get_bool(params, "admin");
111 qp->sq_flow_control = nvlist_get_bool(params, "sq_flow_control");
112 qp->qsize = nvlist_get_number(params, "qsize");
113 qp->qid = qid;
114 qp->sqhd = nvlist_get_number(params, "sqhd");
115 strlcpy(qp->name, name, sizeof(qp->name));
116 mtx_init(&qp->lock, "nvmft qp", NULL, MTX_DEF);
117 qp->cids = BITSET_ALLOC(NUM_CIDS, M_NVMFT, M_WAITOK | M_ZERO);
118 STAILQ_INIT(&qp->datamove_queue);
119 TASK_INIT(&qp->datamove_task, 0, nvmft_datamove_task, qp);
121 qp->qp = nvmf_allocate_qpair(trtype, true, params, nvmft_qpair_error,
122 qp, nvmft_receive_capsule, qp);
123 if (qp->qp == NULL) {
124 mtx_destroy(&qp->lock);
125 free(qp->cids, M_NVMFT);
126 free(qp, M_NVMFT);
130 refcount_init(&qp->qp_refs, 1);
131 return (qp);
135 nvmft_qpair_shutdown(struct nvmft_qpair *qp)
142 mtx_lock(&qp->lock);
143 nq = qp->qp;
144 qp->qp = NULL;
145 STAILQ_CONCAT(&datamove_queue, &qp->datamove_queue);
146 mtx_unlock(&qp->lock);
147 if (nq != NULL && refcount_release(&qp->qp_refs))
155 nvmft_drain_task(&qp->datamove_task);
159 nvmft_qpair_destroy(struct nvmft_qpair *qp)
161 nvmft_qpair_shutdown(qp);
162 mtx_destroy(&qp->lock);
163 free(qp->cids, M_NVMFT);
164 free(qp, M_NVMFT);
168 nvmft_qpair_ctrlr(struct nvmft_qpair *qp)
170 return (qp->ctrlr);
174 nvmft_qpair_id(struct nvmft_qpair *qp)
176 return (qp->qid);
180 nvmft_qpair_name(struct nvmft_qpair *qp)
182 return (qp->name);
186 _nvmft_send_response(struct nvmft_qpair *qp, const void *cqe)
194 mtx_lock(&qp->lock);
195 nq = qp->qp;
197 mtx_unlock(&qp->lock);
200 refcount_acquire(&qp->qp_refs);
203 if (qp->sq_flow_control) {
204 qp->sqhd = (qp->sqhd + 1) % qp->qsize;
205 cpl.sqhd = htole16(qp->sqhd);
208 mtx_unlock(&qp->lock);
214 if (refcount_release(&qp->qp_refs))
220 nvmft_command_completed(struct nvmft_qpair *qp, struct nvmf_capsule *nc)
225 KASSERT(BIT_ISSET(NUM_CIDS, cmd->cid, qp->cids),
228 BIT_CLR_ATOMIC(NUM_CIDS, cmd->cid, qp->cids);
232 nvmft_send_response(struct nvmft_qpair *qp, const void *cqe)
237 KASSERT(BIT_ISSET(NUM_CIDS, cpl->cid, qp->cids),
240 BIT_CLR_ATOMIC(NUM_CIDS, cpl->cid, qp->cids);
241 return (_nvmft_send_response(qp, cqe));
256 nvmft_send_error(struct nvmft_qpair *qp, struct nvmf_capsule *nc,
265 return (nvmft_send_response(qp, &cpl));
269 nvmft_send_generic_error(struct nvmft_qpair *qp, struct nvmf_capsule *nc,
272 return (nvmft_send_error(qp, nc, NVME_SCT_GENERIC, sc_status));
276 * This version doesn't clear CID in qp->cids and is used for errors
280 _nvmft_send_generic_error(struct nvmft_qpair *qp, struct nvmf_capsule *nc,
289 return (_nvmft_send_response(qp, &cpl));
293 nvmft_send_success(struct nvmft_qpair *qp, struct nvmf_capsule *nc)
295 return (nvmft_send_generic_error(qp, nc, NVME_SC_SUCCESS));
308 nvmft_send_connect_response(struct nvmft_qpair *qp,
315 mtx_lock(&qp->lock);
316 nq = qp->qp;
318 mtx_unlock(&qp->lock);
321 refcount_acquire(&qp->qp_refs);
322 mtx_unlock(&qp->lock);
324 rc = nvmf_allocate_response(qp->qp, rsp, M_WAITOK);
328 if (refcount_release(&qp->qp_refs))
334 nvmft_connect_error(struct nvmft_qpair *qp,
344 nvmft_send_connect_response(qp, &rsp);
348 nvmft_connect_invalid_parameters(struct nvmft_qpair *qp,
358 nvmft_send_connect_response(qp, &rsp);
362 nvmft_finish_accept(struct nvmft_qpair *qp,
367 qp->ctrlr = ctrlr;
369 if (qp->sq_flow_control)
370 rsp.sqhd = htole16(qp->sqhd);
374 return (nvmft_send_connect_response(qp, &rsp));
378 nvmft_qpair_datamove(struct nvmft_qpair *qp, union ctl_io *io)
382 mtx_lock(&qp->lock);
383 if (qp->qp == NULL) {
384 mtx_unlock(&qp->lock);
388 enqueue_task = STAILQ_EMPTY(&qp->datamove_queue);
389 STAILQ_INSERT_TAIL(&qp->datamove_queue, &io->io_hdr, links);
390 mtx_unlock(&qp->lock);
392 nvmft_enqueue_task(&qp->datamove_task);
398 struct nvmft_qpair *qp = context;
402 mtx_lock(&qp->lock);
403 while (!STAILQ_EMPTY(&qp->datamove_queue)) {
404 io = (union ctl_io *)STAILQ_FIRST(&qp->datamove_queue);
405 STAILQ_REMOVE_HEAD(&qp->datamove_queue, links);
406 abort = (qp->qp == NULL);
407 mtx_unlock(&qp->lock);
412 mtx_lock(&qp->lock);
414 mtx_unlock(&qp->lock);