Lines Matching defs:cm

113 xbd_cm_freeze(struct xbd_softc *sc, struct xbd_command *cm, xbdc_flag_t cm_flag)
115 if ((cm->cm_flags & XBDCF_FROZEN) != 0)
118 cm->cm_flags |= XBDCF_FROZEN|cm_flag;
123 xbd_cm_thaw(struct xbd_softc *sc, struct xbd_command *cm)
125 if ((cm->cm_flags & XBDCF_FROZEN) == 0)
128 cm->cm_flags &= ~XBDCF_FROZEN;
144 xbd_free_command(struct xbd_command *cm)
147 KASSERT((cm->cm_flags & XBDCF_Q_MASK) == XBD_Q_NONE,
149 cm->cm_flags & XBDCF_Q_MASK));
151 cm->cm_flags = XBDCF_INITIALIZER;
152 cm->cm_bp = NULL;
153 cm->cm_complete = NULL;
154 xbd_enqueue_cm(cm, XBD_Q_FREE);
155 xbd_thaw(cm->cm_sc, XBDF_CM_SHORTAGE);
213 struct xbd_command *cm;
216 cm = arg;
217 sc = cm->cm_sc;
220 cm->cm_bp->bio_error = EIO;
221 biodone(cm->cm_bp);
222 xbd_free_command(cm);
236 ring_req->id = cm->cm_id;
237 ring_req->operation = cm->cm_operation;
238 ring_req->sector_number = cm->cm_sector_number;
241 cm->cm_nseg = nsegs;
242 xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
244 cm->cm_operation == BLKIF_OP_WRITE,
245 cm->cm_sg_refs, ring_req->seg,
254 ring_req->id = cm->cm_id;
256 ring_req->indirect_op = cm->cm_operation;
257 ring_req->sector_number = cm->cm_sector_number;
260 cm->cm_nseg = nsegs;
261 xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
263 cm->cm_operation == BLKIF_OP_WRITE,
264 cm->cm_sg_refs, cm->cm_indirectionpages,
266 memcpy(ring_req->indirect_grefs, &cm->cm_indirectionrefs,
270 if (cm->cm_operation == BLKIF_OP_READ)
272 else if (cm->cm_operation == BLKIF_OP_WRITE)
276 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
278 gnttab_free_grant_references(cm->cm_gref_head);
280 xbd_enqueue_cm(cm, XBD_Q_BUSY);
289 if ((cm->cm_flags & XBDCF_ASYNC_MAPPING) != 0)
296 xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm)
300 if (cm->cm_bp != NULL)
301 error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map,
302 cm->cm_bp, xbd_queue_cb, cm, 0);
304 error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map,
305 cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, 0);
313 xbd_cm_freeze(sc, cm, XBDCF_ASYNC_MAPPING);
337 struct xbd_command *cm;
347 if ((cm = xbd_dequeue_cm(sc, XBD_Q_FREE)) == NULL) {
354 &cm->cm_gref_head) != 0) {
360 xbd_enqueue_cm(cm, XBD_Q_FREE);
364 cm->cm_bp = bp;
365 cm->cm_sector_number =
371 cm->cm_operation = BLKIF_OP_READ;
374 cm->cm_operation = BLKIF_OP_WRITE;
377 cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
382 cm->cm_flags |= XBDCF_Q_FREEZE;
389 xbd_requeue_cm(cm, XBD_Q_READY);
397 cm->cm_operation = BLKIF_OP_FLUSH_DISKCACHE;
399 cm->cm_operation = BLKIF_OP_WRITE_BARRIER;
405 xbd_enqueue_cm(cm, XBD_Q_FREE);
409 return (cm);
422 struct xbd_command *cm;
434 cm = xbd_dequeue_cm(sc, XBD_Q_READY);
436 if (cm == NULL)
437 cm = xbd_bio_command(sc);
439 if (cm == NULL)
442 if ((cm->cm_flags & XBDCF_Q_FREEZE) != 0) {
447 xbd_cm_freeze(sc, cm, XBDCF_Q_FREEZE);
450 if ((error = xbd_queue_request(sc, cm)) != 0) {
462 xbd_bio_complete(struct xbd_softc *sc, struct xbd_command *cm)
466 bp = cm->cm_bp;
468 if (__predict_false(cm->cm_status != BLKIF_RSP_OKAY)) {
470 printf(" status: %x\n", cm->cm_status);
479 xbd_free_command(cm);
487 struct xbd_command *cm;
505 cm = &sc->xbd_shadow[bret->id];
507 xbd_remove_cm(cm, XBD_Q_BUSY);
508 gnttab_end_foreign_access_references(cm->cm_nseg,
509 cm->cm_sg_refs);
512 if (cm->cm_operation == BLKIF_OP_READ)
514 else if (cm->cm_operation == BLKIF_OP_WRITE ||
515 cm->cm_operation == BLKIF_OP_WRITE_BARRIER)
519 bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op);
520 bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map);
526 xbd_cm_thaw(sc, cm);
532 cm->cm_status = bret->status;
533 if (cm->cm_bp)
534 xbd_bio_complete(sc, cm);
535 else if (cm->cm_complete != NULL)
536 cm->cm_complete(cm);
538 xbd_free_command(cm);
591 xbd_dump_complete(struct xbd_command *cm)
594 xbd_enqueue_cm(cm, XBD_Q_COMPLETE);
602 struct xbd_command *cm;
619 cm = xbd_dequeue_cm(sc, XBD_Q_FREE);
620 if (cm == NULL) {
627 &cm->cm_gref_head) != 0) {
628 xbd_free_command(cm);
636 cm->cm_data = virtual;
637 cm->cm_datalen = chunk;
638 cm->cm_operation = BLKIF_OP_WRITE;
639 cm->cm_sector_number = offset >> XBD_SECTOR_SHFT;
640 cm->cm_complete = xbd_dump_complete;
642 xbd_enqueue_cm(cm, XBD_Q_READY);
657 while ((cm = xbd_dequeue_cm(sc, XBD_Q_COMPLETE)) != NULL) {
658 if (cm->cm_status != BLKIF_RSP_OKAY) {
661 cm->cm_sector_number);
664 xbd_free_command(cm);
1072 struct xbd_command *cm;
1074 cm = &sc->xbd_shadow[i];
1075 if (cm->cm_sg_refs != NULL) {
1076 free(cm->cm_sg_refs, M_XENBLOCKFRONT);
1077 cm->cm_sg_refs = NULL;
1080 if (cm->cm_indirectionpages != NULL) {
1083 &cm->cm_indirectionrefs[0]);
1084 free(cm->cm_indirectionpages, M_XENBLOCKFRONT);
1085 cm->cm_indirectionpages = NULL;
1088 bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map);
1358 struct xbd_command *cm;
1361 cm = &sc->xbd_shadow[i];
1362 cm->cm_sg_refs = malloc(
1365 if (cm->cm_sg_refs == NULL)
1367 cm->cm_id = i;
1368 cm->cm_flags = XBDCF_INITIALIZER;
1369 cm->cm_sc = sc;
1370 if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0)
1387 &cm->cm_indirectionrefs[j]))
1394 cm->cm_indirectionpages = indirectpages;
1395 xbd_free_command(cm);