Lines Matching refs:req
73 struct spdk_nvmf_request *req = cb_arg;
74 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
78 if (spdk_unlikely(req->first_fused)) {
79 struct spdk_nvmf_request *first_req = req->first_fused_req;
91 req->first_fused_req = NULL;
92 req->first_fused = false;
101 spdk_nvmf_request_complete(req);
109 struct spdk_nvmf_request *req = cb_arg;
111 if (req->cmd_cb_fn) {
112 req->cmd_cb_fn(req);
115 nvmf_bdev_ctrlr_complete_cmd(bdev_io, success, req);
311 struct spdk_nvmf_request *req = arg;
314 rc = nvmf_ctrlr_process_io_cmd(req);
316 spdk_nvmf_request_complete(req);
323 struct spdk_nvmf_request *req = arg;
326 rc = nvmf_ctrlr_process_admin_cmd(req);
328 spdk_nvmf_request_complete(req);
333 nvmf_bdev_ctrl_queue_io(struct spdk_nvmf_request *req, struct spdk_bdev *bdev,
338 req->bdev_io_wait.bdev = bdev;
339 req->bdev_io_wait.cb_fn = cb_fn;
340 req->bdev_io_wait.cb_arg = cb_arg;
342 rc = spdk_bdev_queue_io_wait(bdev, ch, &req->bdev_io_wait);
346 req->qpair->group->stat.pending_bdev_io++;
357 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
361 .memory_domain = req->memory_domain,
362 .memory_domain_ctx = req->memory_domain_ctx,
363 .accel_sequence = req->accel_sequence,
367 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
368 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
383 if (spdk_unlikely(num_blocks * block_size > req->length)) {
385 num_blocks, block_size, req->length);
391 assert(!spdk_nvmf_request_using_zcopy(req));
393 rc = spdk_bdev_readv_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
394 nvmf_bdev_ctrlr_complete_cmd, req, &opts);
397 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
410 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
414 .memory_domain = req->memory_domain,
415 .memory_domain_ctx = req->memory_domain_ctx,
416 .accel_sequence = req->accel_sequence,
420 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
421 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
436 if (spdk_unlikely(num_blocks * block_size > req->length)) {
438 num_blocks, block_size, req->length);
444 assert(!spdk_nvmf_request_using_zcopy(req));
446 rc = spdk_bdev_writev_blocks_ext(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
447 nvmf_bdev_ctrlr_complete_cmd, req, &opts);
450 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
463 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
467 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
468 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
482 if (spdk_unlikely(num_blocks * block_size > req->length)) {
484 num_blocks, block_size, req->length);
490 rc = spdk_bdev_comparev_blocks(desc, ch, req->iov, req->iovcnt, start_lba, num_blocks,
491 nvmf_bdev_ctrlr_complete_cmd, req);
494 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
562 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
565 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
566 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
567 uint64_t max_write_zeroes_size = req->qpair->ctrlr->subsys->max_write_zeroes_size_kib;
596 nvmf_bdev_ctrlr_complete_cmd, req);
599 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
612 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
614 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
628 nvmf_bdev_ctrlr_complete_cmd, req);
631 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
641 struct spdk_nvmf_request *req;
654 struct spdk_nvmf_request *req = unmap_ctx->req;
655 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
670 spdk_nvmf_request_complete(req);
677 struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
683 struct spdk_nvmf_request *req = unmap_ctx->req;
688 nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, unmap_ctx);
693 struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
697 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
698 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
699 uint64_t max_discard_size = req->qpair->ctrlr->subsys->max_discard_size_kib;
707 if (nr * sizeof(struct spdk_nvme_dsm_range) > req->length) {
720 unmap_ctx->req = req;
731 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
754 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_bdev_ctrlr_unmap_resubmit, unmap_ctx);
778 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
780 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
781 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
784 return nvmf_bdev_ctrlr_unmap(bdev, desc, ch, req, NULL);
794 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
796 struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
797 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
815 if (spdk_unlikely(req->length != (cmd->cdw12_bits.copy.nr + 1) *
838 spdk_iov_xfer_init(&ix, req->iov, req->iovcnt);
842 nvmf_bdev_ctrlr_complete_cmd, req);
845 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
859 struct spdk_io_channel *ch, struct spdk_nvmf_request *req)
863 rc = spdk_bdev_nvme_iov_passthru_md(desc, ch, &req->cmd->nvme_cmd, req->iov, req->iovcnt,
864 req->length, NULL, 0, nvmf_bdev_ctrlr_complete_cmd, req);
868 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
871 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
872 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
873 req->rsp->nvme_cpl.status.dnr = 1;
882 struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
887 if (spdk_unlikely(req->iovcnt > 1)) {
888 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
889 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
890 req->rsp->nvme_cpl.status.dnr = 1;
894 req->cmd_cb_fn = cb_fn;
896 rc = spdk_bdev_nvme_admin_passthru(desc, ch, &req->cmd->nvme_cmd, req->iov[0].iov_base, req->length,
897 nvmf_bdev_ctrlr_complete_admin_cmd, req);
900 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req);
903 req->rsp->nvme_cpl.status.sct = SPDK_NVME_SCT_GENERIC;
905 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
907 req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
910 req->rsp->nvme_cpl.status.dnr = 1;
920 struct spdk_nvmf_request *req = cb_arg;
923 req->rsp->nvme_cpl.cdw0 &= ~1U;
926 spdk_nvmf_request_complete(req);
932 struct spdk_io_channel *ch, struct spdk_nvmf_request *req,
937 assert((req->rsp->nvme_cpl.cdw0 & 1U) != 0);
939 rc = spdk_bdev_abort(desc, ch, req_to_abort, nvmf_bdev_ctrlr_complete_abort_cmd, req);
943 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_admin_cmd_resubmit, req);
991 struct spdk_nvmf_request *req = cb_arg;
998 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
1006 spdk_nvmf_request_complete(req);
1015 req->iovcnt = iovcnt;
1017 assert(req->iov == iov);
1019 req->zcopy_bdev_io = bdev_io; /* Preserve the bdev_io for the end zcopy */
1021 spdk_nvmf_request_complete(req);
1029 struct spdk_nvmf_request *req)
1031 struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
1038 nvmf_bdev_ctrlr_get_rw_params(&req->cmd->nvme_cmd, &start_lba, &num_blocks);
1047 if (spdk_unlikely(num_blocks * block_size > req->length)) {
1049 num_blocks, block_size, req->length);
1055 bool populate = (req->cmd->nvme_cmd.opc == SPDK_NVME_OPC_READ) ? true : false;
1057 rc = spdk_bdev_zcopy_start(desc, ch, req->iov, req->iovcnt, start_lba,
1058 num_blocks, populate, nvmf_bdev_ctrlr_zcopy_start_complete, req);
1061 nvmf_bdev_ctrl_queue_io(req, bdev, ch, nvmf_ctrlr_process_io_cmd_resubmit, req);
1076 struct spdk_nvmf_request *req = cb_arg;
1081 struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
1090 req->zcopy_bdev_io = NULL;
1091 spdk_nvmf_request_complete(req);
1095 nvmf_bdev_ctrlr_zcopy_end(struct spdk_nvmf_request *req, bool commit)
1099 rc = spdk_bdev_zcopy_end(req->zcopy_bdev_io, commit, nvmf_bdev_ctrlr_zcopy_end_complete, req);