Lines Matching defs:payload

13 		const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
77 const struct nvme_payload *payload,
85 child = _nvme_ns_cmd_rw(ns, qpair, payload, payload_offset, md_offset, lba, lba_count, cb_fn,
100 const struct nvme_payload *payload,
123 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
203 const struct nvme_payload *payload,
211 spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
212 spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
213 void *sgl_cb_arg = req->payload.contig_or_cb_arg;
308 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
323 /* No splitting was required, so setup the whole payload as one request. */
333 const struct nvme_payload *payload,
341 spdk_nvme_req_reset_sgl_cb reset_sgl_fn = req->payload.reset_sgl_fn;
342 spdk_nvme_req_next_sge_cb next_sge_fn = req->payload.next_sge_fn;
343 void *sgl_cb_arg = req->payload.contig_or_cb_arg;
373 * request. Checking if the child equals the full payload allows us to *not*
409 child = _nvme_add_child_request(ns, qpair, payload, payload_offset, md_offset,
425 /* No splitting was required, so setup the whole payload as one request. */
434 const struct nvme_payload *payload, uint32_t payload_offset, uint32_t md_offset,
447 req = nvme_allocate_request(qpair, payload, lba_count * sector_size, lba_count * ns->md_size,
477 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
483 return _nvme_ns_cmd_split_request(ns, qpair, payload, payload_offset, md_offset, lba, lba_count,
488 } else if (nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_SGL && check_sgl) {
490 return _nvme_ns_cmd_split_request_sgl(ns, qpair, payload, payload_offset, md_offset,
495 return _nvme_ns_cmd_split_request_prp(ns, qpair, payload, payload_offset, md_offset,
513 struct nvme_payload payload;
520 payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
522 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
546 struct nvme_payload payload;
553 payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
555 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
578 struct nvme_payload payload;
589 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
591 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
614 struct nvme_payload payload;
625 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
627 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg,
648 struct nvme_payload payload;
655 payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
657 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
679 struct nvme_payload payload;
686 payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
688 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
708 struct nvme_payload payload;
715 payload = NVME_PAYLOAD_CONTIG(buffer, opts->metadata);
726 payload.opts = opts;
728 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags,
759 struct nvme_payload payload;
770 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
772 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
794 struct nvme_payload payload;
805 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
807 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_READ,
827 struct nvme_payload payload;
837 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
849 payload.opts = opts;
850 payload.md = opts->metadata;
851 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, opts->io_flags,
855 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, opc, 0, 0, 0, 0,
888 struct nvme_payload payload;
895 payload = NVME_PAYLOAD_CONTIG(buffer, NULL);
897 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
937 struct nvme_payload payload;
949 payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
951 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
986 struct nvme_payload payload;
1002 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
1004 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, zslba, lba_count, cb_fn, cb_arg,
1012 * do not always cause a request to be split. These functions verify payload size,
1042 struct nvme_payload payload;
1049 payload = NVME_PAYLOAD_CONTIG(buffer, metadata);
1051 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1082 struct nvme_payload payload;
1093 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, NULL);
1095 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1117 struct nvme_payload payload;
1128 payload = NVME_PAYLOAD_SGL(reset_sgl_fn, next_sge_fn, cb_arg, metadata);
1130 req = _nvme_ns_cmd_rw(ns, qpair, &payload, 0, 0, lba, lba_count, cb_fn, cb_arg, SPDK_NVME_OPC_WRITE,
1343 struct spdk_nvme_reservation_register_data *payload,
1353 payload, sizeof(struct spdk_nvme_reservation_register_data),
1373 struct spdk_nvme_reservation_key_data *payload,
1383 payload, sizeof(struct spdk_nvme_reservation_key_data), cb_fn,
1403 struct spdk_nvme_reservation_acquire_data *payload,
1413 payload, sizeof(struct spdk_nvme_reservation_acquire_data),
1433 void *payload, uint32_t len,
1444 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
1461 void *payload, uint32_t len, uint8_t mo, uint16_t mos,
1472 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);
1492 void *payload, uint32_t len, uint8_t mo, uint16_t mos,
1498 req = nvme_allocate_request_user_copy(qpair, payload, len, cb_fn, cb_arg, false);