Lines Matching refs:ccb
509 struct nvme_ccb *ccb;
512 ccb = scsi_io_get(&sc->sc_iopool, 0);
513 KASSERT(ccb != NULL);
525 ccb->ccb_done = nvme_empty_done;
526 ccb->ccb_cookie = &sqe;
529 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_IDENT);
532 scsi_io_put(&sc->sc_iopool, ccb);
670 struct nvme_ccb *ccb = xs->io;
671 bus_dmamap_t dmap = ccb->ccb_dmamap;
677 ccb->ccb_done = nvme_scsi_io_done;
678 ccb->ccb_cookie = xs;
691 htolem64(&ccb->ccb_prpl[i - 1],
696 ccb->ccb_prpl_off,
697 sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
702 nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_io_fill, xs->timeout);
706 nvme_q_submit(sc, sc->sc_q, ccb, nvme_scsi_io_fill);
715 nvme_scsi_io_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
718 struct scsi_xfer *xs = ccb->ccb_cookie;
720 bus_dmamap_t dmap = ccb->ccb_dmamap;
739 htolem64(&sqe->entry.prp[1], ccb->ccb_prpl_dva);
748 nvme_scsi_io_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
751 struct scsi_xfer *xs = ccb->ccb_cookie;
752 bus_dmamap_t dmap = ccb->ccb_dmamap;
758 ccb->ccb_prpl_off,
759 sizeof(*ccb->ccb_prpl) * (dmap->dm_nsegs - 1),
783 struct nvme_ccb *ccb = xs->io;
785 ccb->ccb_done = nvme_scsi_sync_done;
786 ccb->ccb_cookie = xs;
789 nvme_poll(sc, sc->sc_q, ccb, nvme_scsi_sync_fill, xs->timeout);
793 nvme_q_submit(sc, sc->sc_q, ccb, nvme_scsi_sync_fill);
797 nvme_scsi_sync_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
800 struct scsi_xfer *xs = ccb->ccb_cookie;
808 nvme_scsi_sync_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
811 struct scsi_xfer *xs = ccb->ccb_cookie;
970 struct nvme_ccb *ccb = NULL;
974 ccb = nvme_ccb_get(sc);
975 if (ccb == NULL)
988 ccb->ccb_done = nvme_empty_done;
989 ccb->ccb_cookie = &sqe;
1012 flags = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_PT);
1033 if (ccb)
1034 nvme_ccb_put(sc, ccb);
1066 struct nvme_queue *q, struct nvme_ccb *ccb)
1069 return (nvme_op_sq_enter_locked(sc, q, ccb));
1074 struct nvme_queue *q, struct nvme_ccb *ccb)
1081 struct nvme_queue *q, struct nvme_ccb *ccb)
1094 struct nvme_queue *q, struct nvme_ccb *ccb)
1096 nvme_op_sq_leave_locked(sc, q, ccb);
1101 nvme_q_submit(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
1107 tail = sc->sc_ops->op_sq_enter(sc, q, ccb);
1114 (*fill)(sc, ccb, sqe);
1115 sqe->cid = ccb->ccb_id;
1119 sc->sc_ops->op_sq_leave(sc, q, ccb);
1128 nvme_poll(struct nvme_softc *sc, struct nvme_queue *q, struct nvme_ccb *ccb,
1138 (*fill)(sc, ccb, &state.s);
1140 done = ccb->ccb_done;
1141 cookie = ccb->ccb_cookie;
1143 ccb->ccb_done = nvme_poll_done;
1144 ccb->ccb_cookie = &state;
1146 nvme_q_submit(sc, q, ccb, nvme_poll_fill);
1155 ccb->ccb_cookie = cookie;
1156 done(sc, ccb, &state.c);
1164 nvme_poll_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
1167 struct nvme_poll_state *state = ccb->ccb_cookie;
1173 nvme_poll_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
1176 struct nvme_poll_state *state = ccb->ccb_cookie;
1183 nvme_sqe_fill(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
1185 struct nvme_sqe *src = ccb->ccb_cookie;
1192 nvme_empty_done(struct nvme_softc *sc, struct nvme_ccb *ccb,
1199 struct nvme_queue *q, struct nvme_ccb *ccb)
1207 struct nvme_ccb *ccb;
1227 ccb = &sc->sc_ccbs[cqe->cid];
1228 sc->sc_ops->op_cq_done(sc, q, ccb);
1229 ccb->ccb_done(sc, ccb, cqe);
1253 struct nvme_ccb *ccb;
1256 ccb = nvme_ccb_get(sc);
1257 if (ccb == NULL)
1264 ccb->ccb_done = nvme_empty_done;
1265 ccb->ccb_cookie = mem;
1268 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_fill_identify,
1272 nvme_ccb_put(sc, ccb);
1318 struct nvme_ccb *ccb;
1321 ccb = scsi_io_get(&sc->sc_iopool, 0);
1322 KASSERT(ccb != NULL);
1324 ccb->ccb_done = nvme_empty_done;
1325 ccb->ccb_cookie = &sqe;
1334 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
1338 ccb->ccb_done = nvme_empty_done;
1339 ccb->ccb_cookie = &sqe;
1349 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
1354 scsi_io_put(&sc->sc_iopool, ccb);
1362 struct nvme_ccb *ccb;
1365 ccb = scsi_io_get(&sc->sc_iopool, 0);
1366 KASSERT(ccb != NULL);
1368 ccb->ccb_done = nvme_empty_done;
1369 ccb->ccb_cookie = &sqe;
1375 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
1379 ccb->ccb_done = nvme_empty_done;
1380 ccb->ccb_cookie = &sqe;
1386 rv = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_QOP);
1393 scsi_io_put(&sc->sc_iopool, ccb);
1399 nvme_fill_identify(struct nvme_softc *sc, struct nvme_ccb *ccb, void *slot)
1402 struct nvme_dmamem *mem = ccb->ccb_cookie;
1412 struct nvme_ccb *ccb;
1417 sc->sc_ccbs = mallocarray(nccbs, sizeof(*ccb), M_DEVBUF,
1429 ccb = &sc->sc_ccbs[i];
1435 &ccb->ccb_dmamap) != 0)
1438 ccb->ccb_id = i;
1439 ccb->ccb_prpl = prpl;
1440 ccb->ccb_prpl_off = off;
1441 ccb->ccb_prpl_dva = NVME_DMA_DVA(sc->sc_ccb_prpls) + off;
1443 SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_list, ccb, ccb_entry);
1460 struct nvme_ccb *ccb;
1463 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list);
1464 if (ccb != NULL)
1468 return (ccb);
1475 struct nvme_ccb *ccb = io;
1478 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_list, ccb, ccb_entry);
1485 struct nvme_ccb *ccb;
1487 while ((ccb = SIMPLEQ_FIRST(&sc->sc_ccb_list)) != NULL) {
1489 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1493 free(sc->sc_ccbs, M_DEVBUF, nccbs * sizeof(*ccb));
1679 tail = sc->sc_ops->op_sq_enter_locked(sc, q, /* XXX ccb */ NULL);
1689 sc->sc_ops->op_sq_leave_locked(sc, q, /* XXX ccb */ NULL);
2169 struct nvme_ccb *ccb = NULL;
2176 ccb = nvme_ccb_get(sc);
2177 if (ccb == NULL)
2192 ccb->ccb_done = nvme_empty_done;
2193 ccb->ccb_cookie = &sqe;
2194 flags = nvme_poll(sc, sc->sc_admin_q, ccb, nvme_sqe_fill, NVME_TIMO_LOG_PAGE);
2224 if (ccb != NULL)
2225 nvme_ccb_put(sc, ccb);