Lines Matching defs:wr
212 struct ibv_recv_wr wr;
227 struct ibv_send_wr wr;
248 struct ibv_send_wr wr;
596 nvmf_data = SPDK_CONTAINEROF(data_wr, struct spdk_nvmf_rdma_request_data, wr);
600 if (data_wr != &rdma_req->data.wr) {
606 data_wr = (!next_send_wr || next_send_wr == &rdma_req->rsp.wr) ? NULL : next_send_wr;
628 rdma_req->data.wr.next = NULL;
629 rdma_req->rsp.wr.next = NULL;
640 SPDK_ERRLOG("\t\tRequest recv wr_id%lu\n", req->recv->wr.wr_id);
749 rdma_recv->wr.num_sge = 1;
760 rdma_recv->wr.num_sge++;
763 rdma_recv->wr.wr_id = (uintptr_t)&rdma_recv->rdma_wr;
764 rdma_recv->wr.sg_list = rdma_recv->sgl;
766 spdk_rdma_provider_srq_queue_recv_wrs(srq, &rdma_recv->wr);
768 spdk_rdma_provider_qp_queue_recv_wrs(qp, &rdma_recv->wr);
797 rdma_req->rsp.wr.wr_id = (uintptr_t)&rdma_req->rsp_wr;
798 rdma_req->rsp.wr.next = NULL;
799 rdma_req->rsp.wr.opcode = IBV_WR_SEND;
800 rdma_req->rsp.wr.send_flags = IBV_SEND_SIGNALED;
801 rdma_req->rsp.wr.sg_list = rdma_req->rsp.sgl;
802 rdma_req->rsp.wr.num_sge = SPDK_COUNTOF(rdma_req->rsp.sgl);
806 rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data_wr;
807 rdma_req->data.wr.next = NULL;
808 rdma_req->data.wr.send_flags = IBV_SEND_SIGNALED;
809 rdma_req->data.wr.sg_list = rdma_req->data.sgl;
810 rdma_req->data.wr.num_sge = SPDK_COUNTOF(rdma_req->data.sgl);
894 spdk_rdma_provider_srq_queue_recv_wrs(rqpair->srq, &rdma_recv->wr);
1064 /* Append the given recv wr structure to the resource structs outstanding recvs list. */
1065 /* This function accepts either a single wr or the first wr in a linked list. */
1131 struct ibv_send_wr *wr;
1140 wr = rdma_req->transfer_wr;
1143 wr = wr->next;
1146 rdma_req->remaining_tranfer_in_wrs = wr->next;
1150 wr->next = NULL;
1185 nvmf_rdma_qpair_queue_recv_wrs(rqpair, &rdma_req->recv->wr);
1195 first = &rdma_req->rsp.wr;
1214 /* +1 for the rsp wr */
1395 nvmf_rdma_setup_wr(struct ibv_send_wr *wr, struct ibv_send_wr *next,
1399 wr->opcode = IBV_WR_RDMA_WRITE;
1400 wr->send_flags = 0;
1401 wr->next = next;
1403 wr->opcode = IBV_WR_RDMA_READ;
1404 wr->send_flags = IBV_SEND_SIGNALED;
1405 wr->next = NULL;
1434 nvmf_rdma_setup_wr(¤t_data_wr->wr, &work_requests[i]->wr, rdma_req->req.xfer);
1435 current_data_wr->wr.next = &work_requests[i]->wr;
1437 current_data_wr->wr.sg_list = current_data_wr->sgl;
1438 current_data_wr->wr.wr_id = rdma_req->data.wr.wr_id;
1441 nvmf_rdma_setup_wr(¤t_data_wr->wr, &rdma_req->rsp.wr, rdma_req->req.xfer);
1449 struct ibv_send_wr *wr = &rdma_req->data.wr;
1452 wr->wr.rdma.rkey = sgl->keyed.key;
1453 wr->wr.rdma.remote_addr = sgl->address;
1454 nvmf_rdma_setup_wr(wr, &rdma_req->rsp.wr, rdma_req->req.xfer);
1460 struct ibv_send_wr *wr = &rdma_req->data.wr;
1467 wr->wr.rdma.rkey = sgl->keyed.key;
1468 wr->wr.rdma.remote_addr = sgl->address + remote_addr_offset;
1469 for (j = 0; j < wr->num_sge; ++j) {
1470 remote_addr_offset += wr->sg_list[j].length;
1472 wr = wr->next;
1479 struct ibv_send_wr *wr,
1488 wr->num_sge = 0;
1490 while (total_length && wr->num_sge < SPDK_NVMF_MAX_SGL_ENTRIES) {
1498 sg_ele = &wr->sg_list[wr->num_sge];
1504 SPDK_DEBUGLOG(rdma, "sge[%d] %p addr 0x%"PRIx64", len %u\n", wr->num_sge, sg_ele, sg_ele->addr,
1508 wr->num_sge++;
1527 struct ibv_send_wr *wr,
1554 wr->num_sge = 0;
1556 while (total_length && (num_extra_wrs || wr->num_sge < SPDK_NVMF_MAX_SGL_ENTRIES)) {
1564 sg_ele = &wr->sg_list[wr->num_sge];
1568 if (wr->num_sge >= SPDK_NVMF_MAX_SGL_ENTRIES) {
1569 if (num_extra_wrs > 0 && wr->next) {
1570 wr = wr->next;
1571 wr->num_sge = 0;
1572 sg_ele = &wr->sg_list[wr->num_sge];
1582 SPDK_DEBUGLOG(rdma, "sge[%d] %p addr 0x%"PRIx64", len %u\n", wr->num_sge, sg_ele,
1590 wr->num_sge++;
1647 struct ibv_send_wr *wr = &rdma_req->data.wr;
1655 /* rdma wr specifics */
1696 rc = nvmf_rdma_fill_wr_sgl_with_dif(device, rdma_req, wr, length, num_wrs - 1);
1705 rc = nvmf_rdma_fill_wr_sgl(device, rdma_req, wr, length);
1791 current_wr = &rdma_req->data.wr;
1817 current_wr->wr.rdma.rkey = desc->keyed.key;
1818 current_wr->wr.rdma.remote_addr = desc->address;
1828 rdma_req->rsp.wr.opcode = IBV_WR_SEND_WITH_INV;
1829 rdma_req->rsp.wr.imm_data = desc->keyed.key;
1872 rdma_req->rsp.wr.opcode = IBV_WR_SEND_WITH_INV;
1873 rdma_req->rsp.wr.imm_data = sgl->keyed.key;
2158 rdma_req->transfer_wr = &rdma_req->data.wr;
2172 rdma_req->rsp.wr.opcode = IBV_WR_SEND;
2173 rdma_req->rsp.wr.imm_data = 0;
2410 * +1 since each request has an additional wr in the resp. */
2931 SPDK_ERRLOG("transport wr pool count is %zu but should be %u\n",
4426 spdk_rdma_provider_srq_queue_recv_wrs(rqpair->srq, &rdma_req->recv->wr);
4586 /* this request was handled by an earlier wr. i.e. we were performing an nvme read. */
4630 /* bad wr always points to the first wr that failed. */
4747 rdma_recv->wr.next = NULL;
4748 spdk_rdma_provider_srq_queue_recv_wrs(rpoller->srq, &rdma_recv->wr);
4767 rdma_recv->wr.next = NULL;
4802 if (rdma_req->data.wr.opcode == IBV_WR_RDMA_READ) {