Lines Matching defs:rq

50     struct request *rq, boolean_t force_sync);
95 zvol_end_io(struct bio *bio, struct request *rq, int error)
101 blk_mq_end_request(rq, errno_to_bi_status(error));
129 struct request *rq;
133 struct request *rq;
165 struct request *rq = bd->rq;
166 zvol_state_t *zv = rq->q->queuedata;
169 blk_mq_start_request(rq);
171 if (blk_rq_is_passthrough(rq)) {
173 blk_mq_end_request(rq, BLK_STS_IOERR);
177 zvol_request_impl(zv, NULL, rq, 0);
232 struct request *rq = zvr->rq;
249 if (io_is_flush(bio, rq))
253 if (io_size(bio, rq) == 0) {
255 zvol_end_io(bio, rq, 0);
259 zfs_uio_bvec_init(&uio, bio, rq);
276 io_is_fua(bio, rq) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
322 zvol_end_io(bio, rq, -error);
337 struct request *rq = zvr->rq;
339 uint64_t start = io_offset(bio, rq);
340 uint64_t size = io_size(bio, rq);
362 sync = io_is_fua(bio, rq) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
375 if (!io_is_secure_erase(bio, rq)) {
411 zvol_end_io(bio, rq, -error);
426 struct request *rq = zvr->rq;
438 zfs_uio_bvec_init(&uio, bio, rq);
488 zvol_end_io(bio, rq, -error);
503 * Either 'bio' or 'rq' should be set depending on if we are processing a
510 zvol_request_impl(zvol_state_t *zv, struct bio *bio, struct request *rq,
514 uint64_t offset = io_offset(bio, rq);
515 uint64_t size = io_size(bio, rq);
516 int rw = io_data_dir(bio, rq);
519 zvol_end_io(bio, rq, -SET_ERROR(ENXIO));
529 .rq = rq,
532 if (io_has_data(bio, rq) && offset + size > zv->zv_volsize) {
538 zvol_end_io(bio, rq, -SET_ERROR(EIO));
547 if (rq)
549 blk_mq_hw_queue = rq->mq_hctx->queue_num;
552 rq->q->queue_hw_ctx[rq->q->mq_map[rq->cpu]]->queue_num;
560 zvol_end_io(bio, rq, -SET_ERROR(EROFS));
621 if (io_is_discard(bio, rq) || io_is_secure_erase(bio, rq)) {
645 zvol_end_io(bio, rq, 0);