Lines Matching defs:rdma_req
549 struct spdk_nvmf_rdma_request *rdma_req);
585 _nvmf_rdma_request_free_data(struct spdk_nvmf_rdma_request *rdma_req,
592 uint64_t req_wrid = (uint64_t)&rdma_req->data_wr;
600 if (data_wr != &rdma_req->data.wr) {
606 data_wr = (!next_send_wr || next_send_wr == &rdma_req->rsp.wr) ? NULL : next_send_wr;
615 nvmf_rdma_request_free_data(struct spdk_nvmf_rdma_request *rdma_req,
618 rdma_req->num_outstanding_data_wr = 0;
620 _nvmf_rdma_request_free_data(rdma_req, rdma_req->transfer_wr, rtransport->data_wr_pool);
622 if (rdma_req->remaining_tranfer_in_wrs) {
623 _nvmf_rdma_request_free_data(rdma_req, rdma_req->remaining_tranfer_in_wrs,
625 rdma_req->remaining_tranfer_in_wrs = NULL;
628 rdma_req->data.wr.next = NULL;
629 rdma_req->rsp.wr.next = NULL;
673 struct spdk_nvmf_rdma_request *rdma_req;
773 rdma_req = &resources->reqs[i];
776 rdma_req->req.qpair = &opts->qpair->qpair;
778 rdma_req->req.qpair = NULL;
780 rdma_req->req.cmd = NULL;
781 rdma_req->req.iovcnt = 0;
782 rdma_req->req.stripped_data = NULL;
785 rdma_req->req.rsp = &resources->cpls[i];
787 rdma_req->rsp.sgl[0].addr = (uintptr_t)&resources->cpls[i];
788 rdma_req->rsp.sgl[0].length = sizeof(resources->cpls[i]);
794 rdma_req->rsp.sgl[0].lkey = spdk_rdma_utils_memory_translation_get_lkey(&translation);
796 rdma_req->rsp_wr.type = RDMA_WR_TYPE_SEND;
797 rdma_req->rsp.wr.wr_id = (uintptr_t)&rdma_req->rsp_wr;
798 rdma_req->rsp.wr.next = NULL;
799 rdma_req->rsp.wr.opcode = IBV_WR_SEND;
800 rdma_req->rsp.wr.send_flags = IBV_SEND_SIGNALED;
801 rdma_req->rsp.wr.sg_list = rdma_req->rsp.sgl;
802 rdma_req->rsp.wr.num_sge = SPDK_COUNTOF(rdma_req->rsp.sgl);
805 rdma_req->data_wr.type = RDMA_WR_TYPE_DATA;
806 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data_wr;
807 rdma_req->data.wr.next = NULL;
808 rdma_req->data.wr.send_flags = IBV_SEND_SIGNALED;
809 rdma_req->data.wr.sg_list = rdma_req->data.sgl;
810 rdma_req->data.wr.num_sge = SPDK_COUNTOF(rdma_req->data.sgl);
813 rdma_req->state = RDMA_REQUEST_STATE_FREE;
814 STAILQ_INSERT_TAIL(&resources->free_queue, rdma_req, state_link);
1088 struct spdk_nvmf_rdma_request *rdma_req;
1094 rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
1100 assert(rdma_req != NULL);
1102 if (spdk_rdma_provider_qp_queue_send_wrs(rqpair->rdma_qp, rdma_req->transfer_wr)) {
1109 assert(rqpair->current_read_depth + rdma_req->num_outstanding_data_wr <= rqpair->max_read_depth);
1110 rqpair->current_read_depth += rdma_req->num_outstanding_data_wr;
1111 assert(rqpair->current_send_depth + rdma_req->num_outstanding_data_wr <= rqpair->max_send_depth);
1112 rqpair->current_send_depth += rdma_req->num_outstanding_data_wr;
1116 nvmf_rdma_request_reset_transfer_in(struct spdk_nvmf_rdma_request *rdma_req,
1120 _nvmf_rdma_request_free_data(rdma_req, rdma_req->transfer_wr, rtransport->data_wr_pool);
1121 rdma_req->transfer_wr = rdma_req->remaining_tranfer_in_wrs;
1122 rdma_req->remaining_tranfer_in_wrs = NULL;
1123 rdma_req->num_outstanding_data_wr = rdma_req->num_remaining_data_wr;
1124 rdma_req->num_remaining_data_wr = 0;
1130 struct spdk_nvmf_rdma_request *rdma_req;
1134 rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
1137 assert(rdma_req != NULL);
1139 assert(rdma_req->num_outstanding_data_wr > num_reads_available);
1140 wr = rdma_req->transfer_wr;
1146 rdma_req->remaining_tranfer_in_wrs = wr->next;
1147 rdma_req->num_remaining_data_wr = rdma_req->num_outstanding_data_wr - num_reads_available;
1148 rdma_req->num_outstanding_data_wr = num_reads_available;
1159 struct spdk_nvmf_rdma_request *rdma_req;
1169 rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
1183 assert(rdma_req->recv != NULL);
1185 nvmf_rdma_qpair_queue_recv_wrs(rqpair, &rdma_req->recv->wr);
1187 rdma_req->recv = NULL;
1195 first = &rdma_req->rsp.wr;
1201 rdma_req->num_outstanding_data_wr = 0;
1203 first = rdma_req->transfer_wr;
1205 num_outstanding_data_wr = rdma_req->num_outstanding_data_wr;
1413 struct spdk_nvmf_rdma_request *rdma_req,
1431 current_data_wr = &rdma_req->data;
1434 nvmf_rdma_setup_wr(¤t_data_wr->wr, &work_requests[i]->wr, rdma_req->req.xfer);
1438 current_data_wr->wr.wr_id = rdma_req->data.wr.wr_id;
1441 nvmf_rdma_setup_wr(¤t_data_wr->wr, &rdma_req->rsp.wr, rdma_req->req.xfer);
1447 nvmf_rdma_setup_request(struct spdk_nvmf_rdma_request *rdma_req)
1449 struct ibv_send_wr *wr = &rdma_req->data.wr;
1450 struct spdk_nvme_sgl_descriptor *sgl = &rdma_req->req.cmd->nvme_cmd.dptr.sgl1;
1454 nvmf_rdma_setup_wr(wr, &rdma_req->rsp.wr, rdma_req->req.xfer);
1458 nvmf_rdma_update_remote_addr(struct spdk_nvmf_rdma_request *rdma_req, uint32_t num_wrs)
1460 struct ibv_send_wr *wr = &rdma_req->data.wr;
1461 struct spdk_nvme_sgl_descriptor *sgl = &rdma_req->req.cmd->nvme_cmd.dptr.sgl1;
1478 struct spdk_nvmf_rdma_request *rdma_req,
1491 iov = &rdma_req->req.iov[rdma_req->iovpos];
1499 remaining = spdk_min((uint32_t)iov->iov_len - rdma_req->offset, total_length);
1502 sg_ele->addr = (uintptr_t)iov->iov_base + rdma_req->offset;
1506 rdma_req->offset += sg_ele->length;
1510 if (rdma_req->offset == iov->iov_len) {
1511 rdma_req->offset = 0;
1512 rdma_req->iovpos++;
1526 struct spdk_nvmf_rdma_request *rdma_req,
1532 struct spdk_dif_ctx *dif_ctx = &rdma_req->req.dif.dif_ctx;
1543 if (spdk_likely(!rdma_req->req.stripped_data)) {
1544 rdma_iov = rdma_req->req.iov;
1548 rdma_iov = rdma_req->req.stripped_data->iov;
1557 iov = rdma_iov + rdma_req->iovpos;
1565 remaining = spdk_min((uint32_t)iov->iov_len - rdma_req->offset, total_length);
1579 sg_ele->addr = (uintptr_t)((char *)iov->iov_base + rdma_req->offset);
1586 rdma_req->offset += sge_len;
1594 rdma_req->offset += md_size;
1604 rdma_req->offset -= spdk_min(iov->iov_len, rdma_req->offset);
1605 rdma_req->iovpos++;
1642 struct spdk_nvmf_rdma_request *rdma_req)
1646 struct spdk_nvmf_request *req = &rdma_req->req;
1647 struct ibv_send_wr *wr = &rdma_req->data.wr;
1656 nvmf_rdma_setup_request(rdma_req);
1684 rdma_req->iovpos = 0;
1690 rc = nvmf_request_alloc_wrs(rtransport, rdma_req, num_wrs - 1);
1696 rc = nvmf_rdma_fill_wr_sgl_with_dif(device, rdma_req, wr, length, num_wrs - 1);
1702 nvmf_rdma_update_remote_addr(rdma_req, num_wrs);
1705 rc = nvmf_rdma_fill_wr_sgl(device, rdma_req, wr, length);
1712 rdma_req->num_outstanding_data_wr = num_wrs;
1718 nvmf_rdma_request_free_data(rdma_req, rtransport);
1726 struct spdk_nvmf_rdma_request *rdma_req)
1731 struct spdk_nvmf_request *req = &rdma_req->req;
1738 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
1748 desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
1768 rc = nvmf_request_alloc_wrs(rtransport, rdma_req, num_sgl_descriptors - 1);
1775 nvmf_rdma_request_free_data(rdma_req, rtransport);
1791 current_wr = &rdma_req->data.wr;
1795 rdma_req->iovpos = 0;
1796 desc = (struct spdk_nvme_sgl_descriptor *)rdma_req->recv->buf + inline_segment->address;
1806 rc = nvmf_rdma_fill_wr_sgl(device, rdma_req, current_wr, lengths[i]);
1808 rc = nvmf_rdma_fill_wr_sgl_with_dif(device, rdma_req, current_wr,
1828 rdma_req->rsp.wr.opcode = IBV_WR_SEND_WITH_INV;
1829 rdma_req->rsp.wr.imm_data = desc->keyed.key;
1834 rdma_req->num_outstanding_data_wr = num_sgl_descriptors;
1840 nvmf_rdma_request_free_data(rdma_req, rtransport);
1847 struct spdk_nvmf_rdma_request *rdma_req)
1849 struct spdk_nvmf_request *req = &rdma_req->req;
1872 rdma_req->rsp.wr.opcode = IBV_WR_SEND_WITH_INV;
1873 rdma_req->rsp.wr.imm_data = sgl->keyed.key;
1881 rc = nvmf_rdma_request_fill_iovs(rtransport, device, rdma_req);
1889 SPDK_DEBUGLOG(rdma, "No available large data buffers. Queueing request %p\n", rdma_req);
1893 SPDK_DEBUGLOG(rdma, "Request %p took %d buffer/s from central pool\n", rdma_req,
1920 rdma_req->num_outstanding_data_wr = 0;
1924 req->iov[0].iov_base = rdma_req->recv->buf + offset;
1932 rc = nvmf_rdma_request_fill_iovs_multi_sgl(rtransport, device, rdma_req);
1934 SPDK_DEBUGLOG(rdma, "No available large data buffers. Queueing request %p\n", rdma_req);
1942 SPDK_DEBUGLOG(rdma, "Request %p took %d buffer/s from central pool\n", rdma_req,
1955 _nvmf_rdma_request_free(struct spdk_nvmf_rdma_request *rdma_req,
1961 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
1962 if (rdma_req->req.data_from_pool) {
1965 spdk_nvmf_request_free_buffers(&rdma_req->req, &rgroup->group, &rtransport->transport);
1967 if (rdma_req->req.stripped_data) {
1968 nvmf_request_free_stripped_buffers(&rdma_req->req,
1972 nvmf_rdma_request_free_data(rdma_req, rtransport);
1973 rdma_req->req.length = 0;
1974 rdma_req->req.iovcnt = 0;
1975 rdma_req->offset = 0;
1976 rdma_req->req.dif_enabled = false;
1977 rdma_req->fused_failed = false;
1978 rdma_req->transfer_wr = NULL;
1979 if (rdma_req->fused_pair) {
1985 rdma_req->fused_pair->fused_failed = true;
1986 if (rdma_req->fused_pair->state == RDMA_REQUEST_STATE_READY_TO_EXECUTE) {
1987 nvmf_rdma_request_process(rtransport, rdma_req->fused_pair);
1989 rdma_req->fused_pair = NULL;
1991 memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
1994 STAILQ_INSERT_HEAD(&rqpair->resources->free_queue, rdma_req, state_link);
1996 rdma_req->state = RDMA_REQUEST_STATE_FREE;
2002 struct spdk_nvmf_rdma_request *rdma_req)
2007 next = rdma_req->req.cmd->nvme_cmd.fuse;
2020 rqpair->fused_first->fused_pair = rdma_req;
2021 rdma_req->fused_pair = rqpair->fused_first;
2043 rqpair->fused_first = rdma_req;
2046 rdma_req->fused_failed = true;
2052 struct spdk_nvmf_rdma_request *rdma_req)
2061 if (spdk_likely(rdma_req->req.cmd->nvme_cmd.opc != SPDK_NVME_OPC_FABRIC)) {
2063 STAILQ_INSERT_TAIL(&rgroup->group.pending_buf_queue, &rdma_req->req, buf_link);
2072 STAILQ_INSERT_HEAD(&rgroup->group.pending_buf_queue, &rdma_req->req, buf_link);
2080 STAILQ_INSERT_AFTER(&rgroup->group.pending_buf_queue, r, &rdma_req->req, buf_link);
2090 struct spdk_nvmf_rdma_request *rdma_req)
2095 struct spdk_nvme_cpl *rsp = &rdma_req->req.rsp->nvme_cpl;
2103 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
2107 assert(rdma_req->state != RDMA_REQUEST_STATE_FREE);
2112 switch (rdma_req->state) {
2114 STAILQ_REMOVE(&rgroup->group.pending_buf_queue, &rdma_req->req, spdk_nvmf_request, buf_link);
2117 STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
2120 if (rdma_req->num_remaining_data_wr) {
2123 rdma_req->num_remaining_data_wr = 0;
2124 STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
2128 STAILQ_REMOVE(&rqpair->pending_rdma_write_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
2131 STAILQ_REMOVE(&rqpair->pending_rdma_send_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
2136 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
2141 prev_state = rdma_req->state;
2143 SPDK_DEBUGLOG(rdma, "Request %p entering state %d\n", rdma_req, prev_state);
2145 switch (rdma_req->state) {
2152 (uintptr_t)rdma_req, (uintptr_t)rqpair, rqpair->qpair.queue_depth);
2153 rdma_recv = rdma_req->recv;
2156 rdma_req->req.cmd = (union nvmf_h2c_msg *)rdma_recv->sgl[0].addr;
2157 memset(rdma_req->req.rsp, 0, sizeof(*rdma_req->req.rsp));
2158 rdma_req->transfer_wr = &rdma_req->data.wr;
2161 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
2165 if (spdk_unlikely(spdk_nvmf_request_get_dif_ctx(&rdma_req->req, &rdma_req->req.dif.dif_ctx))) {
2166 rdma_req->req.dif_enabled = true;
2169 nvmf_rdma_check_fused_ordering(rtransport, rqpair, rdma_req);
2172 rdma_req->rsp.wr.opcode = IBV_WR_SEND;
2173 rdma_req->rsp.wr.imm_data = 0;
2177 rdma_req->req.xfer = spdk_nvmf_req_get_xfer(&rdma_req->req);
2179 if (spdk_unlikely(rdma_req->req.xfer == SPDK_NVME_DATA_BIDIRECTIONAL)) {
2182 STAILQ_INSERT_TAIL(&rqpair->pending_rdma_send_queue, rdma_req, state_link);
2183 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE_PENDING;
2184 SPDK_DEBUGLOG(rdma, "Request %p: invalid xfer type (BIDIRECTIONAL)\n", rdma_req);
2189 if (rdma_req->req.xfer == SPDK_NVME_DATA_NONE) {
2190 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
2194 rdma_req->state = RDMA_REQUEST_STATE_NEED_BUFFER;
2195 nvmf_rdma_poll_group_insert_need_buffer_req(rgroup, rdma_req);
2199 (uintptr_t)rdma_req, (uintptr_t)rqpair);
2201 assert(rdma_req->req.xfer != SPDK_NVME_DATA_NONE);
2203 if (&rdma_req->req != STAILQ_FIRST(&rgroup->group.pending_buf_queue)) {
2209 rc = nvmf_rdma_request_parse_sgl(rtransport, device, rdma_req);
2212 STAILQ_INSERT_TAIL(&rqpair->pending_rdma_send_queue, rdma_req, state_link);
2213 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE_PENDING;
2217 if (rdma_req->req.iovcnt == 0) {
2228 if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER &&
2229 rdma_req->req.data_from_pool) {
2230 STAILQ_INSERT_TAIL(&rqpair->pending_rdma_read_queue, rdma_req, state_link);
2231 rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING;
2235 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
2239 (uintptr_t)rdma_req, (uintptr_t)rqpair);
2241 if (rdma_req != STAILQ_FIRST(&rqpair->pending_rdma_read_queue)) {
2249 if (rdma_req->num_outstanding_data_wr > qdepth ||
2250 rdma_req->num_outstanding_data_wr > num_rdma_reads_available) {
2253 request_prepare_transfer_in_part(&rdma_req->req, spdk_min(num_rdma_reads_available, qdepth));
2262 if (rdma_req->num_remaining_data_wr == 0) {
2266 request_transfer_in(&rdma_req->req);
2267 rdma_req->state = RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER;
2272 (uintptr_t)rdma_req, (uintptr_t)rqpair);
2278 (uintptr_t)rdma_req, (uintptr_t)rqpair);
2280 if (spdk_unlikely(rdma_req->req.dif_enabled)) {
2281 if (rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER) {
2283 num_blocks = SPDK_CEIL_DIV(rdma_req->req.dif.elba_length, rdma_req->req.dif.dif_ctx.block_size);
2286 rc = spdk_dif_generate(rdma_req->req.iov, rdma_req->req.iovcnt,
2287 num_blocks, &rdma_req->req.dif.dif_ctx);
2290 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
2296 assert(rdma_req->req.dif.elba_length >= rdma_req->req.length);
2298 rdma_req->req.length = rdma_req->req.dif.elba_length;
2301 if (rdma_req->req.cmd->nvme_cmd.fuse != SPDK_NVME_CMD_FUSE_NONE) {
2302 if (rdma_req->fused_failed) {
2308 STAILQ_INSERT_TAIL(&rqpair->pending_rdma_send_queue, rdma_req, state_link);
2309 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE_PENDING;
2313 if (rdma_req->fused_pair == NULL ||
2314 rdma_req->fused_pair->state != RDMA_REQUEST_STATE_READY_TO_EXECUTE) {
2332 if (rdma_req->req.cmd->nvme_cmd.fuse == SPDK_NVME_CMD_FUSE_SECOND) {
2333 assert(rdma_req->fused_pair != NULL);
2334 assert(rdma_req->fused_pair->fused_pair != NULL);
2335 rdma_req->fused_pair->state = RDMA_REQUEST_STATE_EXECUTING;
2336 spdk_nvmf_request_exec(&rdma_req->fused_pair->req);
2337 rdma_req->fused_pair->fused_pair = NULL;
2338 rdma_req->fused_pair = NULL;
2340 rdma_req->state = RDMA_REQUEST_STATE_EXECUTING;
2341 spdk_nvmf_request_exec(&rdma_req->req);
2342 if (rdma_req->req.cmd->nvme_cmd.fuse == SPDK_NVME_CMD_FUSE_FIRST) {
2343 assert(rdma_req->fused_pair != NULL);
2344 assert(rdma_req->fused_pair->fused_pair != NULL);
2345 rdma_req->fused_pair->state = RDMA_REQUEST_STATE_EXECUTING;
2346 spdk_nvmf_request_exec(&rdma_req->fused_pair->req);
2347 rdma_req->fused_pair->fused_pair = NULL;
2348 rdma_req->fused_pair = NULL;
2353 (uintptr_t)rdma_req, (uintptr_t)rqpair);
2359 (uintptr_t)rdma_req, (uintptr_t)rqpair);
2361 rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
2362 STAILQ_INSERT_TAIL(&rqpair->pending_rdma_write_queue, rdma_req, state_link);
2363 rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_HOST_PENDING;
2365 STAILQ_INSERT_TAIL(&rqpair->pending_rdma_send_queue, rdma_req, state_link);
2366 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE_PENDING;
2368 if (spdk_unlikely(rdma_req->req.dif_enabled)) {
2370 rdma_req->req.length = rdma_req->req.dif.orig_length;
2372 if (rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST) {
2375 num_blocks = SPDK_CEIL_DIV(rdma_req->req.dif.elba_length, rdma_req->req.dif.dif_ctx.block_size);
2376 if (!rdma_req->req.stripped_data) {
2377 rc = spdk_dif_verify(rdma_req->req.iov, rdma_req->req.iovcnt, num_blocks,
2378 &rdma_req->req.dif.dif_ctx, &error_blk);
2380 rc = spdk_dif_verify_copy(rdma_req->req.stripped_data->iov,
2381 rdma_req->req.stripped_data->iovcnt,
2382 rdma_req->req.iov, rdma_req->req.iovcnt, num_blocks,
2383 &rdma_req->req.dif.dif_ctx, &error_blk);
2386 struct spdk_nvme_cpl *rsp = &rdma_req->req.rsp->nvme_cpl;
2392 STAILQ_REMOVE(&rqpair->pending_rdma_write_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
2393 STAILQ_INSERT_TAIL(&rqpair->pending_rdma_send_queue, rdma_req, state_link);
2394 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE_PENDING;
2401 (uintptr_t)rdma_req, (uintptr_t)rqpair);
2403 if (rdma_req != STAILQ_FIRST(&rqpair->pending_rdma_write_queue)) {
2407 if ((rqpair->current_send_depth + rdma_req->num_outstanding_data_wr + 1) >
2422 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
2426 (uintptr_t)rdma_req, (uintptr_t)rqpair);
2428 if (rdma_req != STAILQ_FIRST(&rqpair->pending_rdma_send_queue)) {
2446 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_COMPLETE;
2450 (uintptr_t)rdma_req, (uintptr_t)rqpair);
2451 rc = request_transfer_out(&rdma_req->req, &data_posted);
2454 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
2456 rdma_req->state = data_posted ? RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST :
2462 (uintptr_t)rdma_req, (uintptr_t)rqpair);
2468 (uintptr_t)rdma_req, (uintptr_t)rqpair);
2474 (uintptr_t)rdma_req, (uintptr_t)rqpair, rqpair->qpair.queue_depth);
2476 rqpair->poller->stat.request_latency += spdk_get_ticks() - rdma_req->receive_tsc;
2477 _nvmf_rdma_request_free(rdma_req, rtransport);
2485 if (rdma_req->state != prev_state) {
2488 } while (rdma_req->state != prev_state);
3384 struct spdk_nvmf_rdma_request *rdma_req, *req_tmp;
3388 STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_send_queue, state_link, req_tmp) {
3389 if (nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
3395 STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_read_queue, state_link, req_tmp) {
3396 if (rdma_req->state != RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING) {
3402 if (nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
3408 STAILQ_FOREACH_SAFE(rdma_req, &rqpair->pending_rdma_write_queue, state_link, req_tmp) {
3409 if (nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
3416 rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
3417 if (nvmf_rdma_request_process(rtransport, rdma_req) == false && drain == false) {
3424 rdma_req = STAILQ_FIRST(&resources->free_queue);
3426 rdma_req->recv = STAILQ_FIRST(&resources->incoming_queue);
3430 rdma_req->req.qpair = &rdma_req->recv->qpair->qpair;
3431 rdma_req->recv->qpair->qd++;
3436 rdma_req->receive_tsc = rdma_req->recv->receive_tsc;
3437 rdma_req->state = RDMA_REQUEST_STATE_NEW;
3438 if (nvmf_rdma_request_process(rtransport, rdma_req) == false) {
3452 struct spdk_nvmf_rdma_request *rdma_req;
3455 rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
3456 if (nvmf_rdma_request_process(rtransport, rdma_req) == false) {
4411 struct spdk_nvmf_rdma_request *rdma_req = SPDK_CONTAINEROF(req, struct spdk_nvmf_rdma_request, req);
4414 struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair,
4422 if (rqpair->srq && rdma_req->recv) {
4426 spdk_rdma_provider_srq_queue_recv_wrs(rqpair->srq, &rdma_req->recv->wr);
4433 _nvmf_rdma_request_free(rdma_req, rtransport);
4442 struct spdk_nvmf_rdma_request *rdma_req = SPDK_CONTAINEROF(req,
4444 struct spdk_nvmf_rdma_qpair *rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair,
4449 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
4452 rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
4455 nvmf_rdma_request_process(rtransport, rdma_req);
4494 nvmf_rdma_req_is_completing(struct spdk_nvmf_rdma_request *rdma_req)
4496 return rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST ||
4497 rdma_req->state == RDMA_REQUEST_STATE_COMPLETING;
4679 struct spdk_nvmf_rdma_request *rdma_req;
4718 rdma_req = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_request, rsp_wr);
4719 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
4724 assert(nvmf_rdma_req_is_completing(rdma_req));
4727 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
4729 assert(rqpair->current_send_depth >= (uint32_t)rdma_req->num_outstanding_data_wr + 1);
4730 rqpair->current_send_depth -= rdma_req->num_outstanding_data_wr + 1;
4731 rdma_req->num_outstanding_data_wr = 0;
4733 nvmf_rdma_request_process(rtransport, rdma_req);
4775 rdma_req = SPDK_CONTAINEROF(rdma_wr, struct spdk_nvmf_rdma_request, data_wr);
4776 rqpair = SPDK_CONTAINEROF(rdma_req->req.qpair, struct spdk_nvmf_rdma_qpair, qpair);
4778 assert(rdma_req->num_outstanding_data_wr > 0);
4781 rdma_req->num_outstanding_data_wr--;
4785 /* wait for all outstanding reads associated with the same rdma_req to complete before proceeding. */
4786 if (rdma_req->num_outstanding_data_wr == 0) {
4787 if (rdma_req->num_remaining_data_wr) {
4789 nvmf_rdma_request_reset_transfer_in(rdma_req, rtransport);
4790 rdma_req->state = RDMA_REQUEST_STATE_DATA_TRANSFER_TO_CONTROLLER_PENDING;
4791 nvmf_rdma_request_process(rtransport, rdma_req);
4794 rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
4795 nvmf_rdma_request_process(rtransport, rdma_req);
4802 if (rdma_req->data.wr.opcode == IBV_WR_RDMA_READ) {
4804 if (rdma_req->num_outstanding_data_wr == 0) {
4805 if (rdma_req->num_remaining_data_wr) {
4808 rdma_req->num_remaining_data_wr = 0;
4809 STAILQ_REMOVE(&rqpair->pending_rdma_read_queue, rdma_req, spdk_nvmf_rdma_request, state_link);
4811 rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
4812 nvmf_rdma_request_process(rtransport, rdma_req);
5116 struct spdk_nvmf_rdma_request *rdma_req_to_abort = NULL, *rdma_req;
5126 rdma_req = &rqpair->resources->reqs[i];
5130 if (rdma_req->state != RDMA_REQUEST_STATE_FREE && rdma_req->req.cmd->nvme_cmd.cid == cid &&
5131 rdma_req->req.qpair == qpair) {
5132 rdma_req_to_abort = rdma_req;