Lines Matching +full:x +full:- +full:rc

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
60 WARN_ON(ssk->tx_sa); in sdp_post_srcavail()
63 BUG_ON(!tx_sa->fmr || !tx_sa->fmr->fmr->lkey); in sdp_post_srcavail()
64 BUG_ON(!tx_sa->umem); in sdp_post_srcavail()
65 BUG_ON(!tx_sa->umem->chunk_list.next); in sdp_post_srcavail()
67 chunk = list_entry(tx_sa->umem->chunk_list.next, struct ib_umem_chunk, list); in sdp_post_srcavail()
68 BUG_ON(!chunk->nmap); in sdp_post_srcavail()
70 off = tx_sa->umem->offset; in sdp_post_srcavail()
71 len = tx_sa->umem->length; in sdp_post_srcavail()
73 tx_sa->bytes_sent = tx_sa->bytes_acked = 0; in sdp_post_srcavail()
75 mb = sdp_alloc_mb_srcavail(sk, len, tx_sa->fmr->fmr->lkey, off, 0); in sdp_post_srcavail()
77 return -ENOMEM; in sdp_post_srcavail()
83 ssk->tx_sa = tx_sa; in sdp_post_srcavail()
86 payload_len = MIN(tx_sa->umem->page_size - off, len); in sdp_post_srcavail()
87 payload_len = MIN(payload_len, ssk->xmit_size_goal - sizeof(struct sdp_srcah)); in sdp_post_srcavail()
88 payload_pg = sg_page(&chunk->page_list[0]); in sdp_post_srcavail()
91 sdp_dbg_data(sk, "payload: off: 0x%x, pg: %p, len: 0x%x\n", in sdp_post_srcavail()
94 mb_fill_page_desc(mb, mb_shinfo(mb)->nr_frags, in sdp_post_srcavail()
97 mb->len += payload_len; in sdp_post_srcavail()
98 mb->data_len = payload_len; in sdp_post_srcavail()
99 mb->truesize += payload_len; in sdp_post_srcavail()
100 // sk->sk_wmem_queued += payload_len; in sdp_post_srcavail()
101 // sk->sk_forward_alloc -= payload_len; in sdp_post_srcavail()
105 ssk->write_seq += payload_len; in sdp_post_srcavail()
106 SDP_SKB_CB(mb)->end_seq += payload_len; in sdp_post_srcavail()
108 tx_sa->bytes_sent = tx_sa->umem->length; in sdp_post_srcavail()
109 tx_sa->bytes_acked = payload_len; in sdp_post_srcavail()
121 sdp_dbg_data(ssk->socket, "Posting srcavail cancel\n"); in sdp_post_srcavail_cancel()
128 schedule_delayed_work(&ssk->srcavail_cancel_work, in sdp_post_srcavail_cancel()
138 struct socket *sk = ssk->socket; in srcavail_cancel_timeout()
144 sdp_set_error(sk, -ECONNRESET); in srcavail_cancel_timeout()
145 wake_up(&ssk->wq); in srcavail_cancel_timeout()
153 struct socket *sk = ssk->socket; in sdp_wait_rdmardcompl()
157 struct tx_srcavail_state *tx_sa = ssk->tx_sa; in sdp_wait_rdmardcompl()
162 while (ssk->qp_active) { in sdp_wait_rdmardcompl()
163 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); in sdp_wait_rdmardcompl()
166 err = -ETIME; in sdp_wait_rdmardcompl()
167 tx_sa->abort_flags |= TX_SA_TIMEDOUT; in sdp_wait_rdmardcompl()
173 else if (tx_sa->bytes_acked > tx_sa->bytes_sent) { in sdp_wait_rdmardcompl()
174 err = -EINVAL; in sdp_wait_rdmardcompl()
176 tx_sa->abort_flags |= TX_SA_ERROR; in sdp_wait_rdmardcompl()
180 if (tx_sa->abort_flags & TX_SA_SENDSM) { in sdp_wait_rdmardcompl()
183 err = -EAGAIN; in sdp_wait_rdmardcompl()
189 err = -EINTR; in sdp_wait_rdmardcompl()
191 tx_sa->abort_flags |= TX_SA_INTRRUPTED; in sdp_wait_rdmardcompl()
195 if (ssk->rx_sa && (tx_sa->bytes_acked < tx_sa->bytes_sent)) { in sdp_wait_rdmardcompl()
196 sdp_dbg_data(sk, "Crossing SrcAvail - aborting this\n"); in sdp_wait_rdmardcompl()
197 tx_sa->abort_flags |= TX_SA_CROSS_SEND; in sdp_wait_rdmardcompl()
199 err = -ETIME; in sdp_wait_rdmardcompl()
207 tx_sa->abort_flags && in sdp_wait_rdmardcompl()
208 ssk->rx_sa && in sdp_wait_rdmardcompl()
209 (tx_sa->bytes_acked < tx_sa->bytes_sent) && in sdp_wait_rdmardcompl()
211 sdp_dbg_data(ssk->socket, "woke up sleepers\n"); in sdp_wait_rdmardcompl()
215 if (tx_sa->bytes_acked == tx_sa->bytes_sent) in sdp_wait_rdmardcompl()
219 vm_wait -= current_timeo; in sdp_wait_rdmardcompl()
222 (current_timeo -= vm_wait) < 0) in sdp_wait_rdmardcompl()
229 finish_wait(sk->sk_sleep, &wait); in sdp_wait_rdmardcompl()
231 sdp_dbg_data(sk, "Finished waiting - RdmaRdCompl: %d/%d bytes, flags: 0x%x\n", in sdp_wait_rdmardcompl()
232 tx_sa->bytes_acked, tx_sa->bytes_sent, tx_sa->abort_flags); in sdp_wait_rdmardcompl()
234 if (!ssk->qp_active) { in sdp_wait_rdmardcompl()
236 return -EINVAL; in sdp_wait_rdmardcompl()
243 struct socket *sk = ssk->socket; in sdp_wait_rdma_wr_finished()
249 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); in sdp_wait_rdma_wr_finished()
251 if (!ssk->tx_ring.rdma_inflight->busy) { in sdp_wait_rdma_wr_finished()
256 if (!ssk->qp_active) { in sdp_wait_rdma_wr_finished()
271 !ssk->tx_ring.rdma_inflight->busy); in sdp_wait_rdma_wr_finished()
273 sdp_dbg_data(ssk->socket, "woke up sleepers\n"); in sdp_wait_rdma_wr_finished()
278 finish_wait(sk->sk_sleep, &wait); in sdp_wait_rdma_wr_finished()
287 int copied = rx_sa->used - rx_sa->reported; in sdp_post_rdma_rd_compl()
289 if (rx_sa->used <= rx_sa->reported) in sdp_post_rdma_rd_compl()
292 mb = sdp_alloc_mb_rdmardcompl(ssk->socket, copied, 0); in sdp_post_rdma_rd_compl()
294 rx_sa->reported += copied; in sdp_post_rdma_rd_compl()
313 sdp_dbg_data(sk, "updating consumed 0x%x bytes from iov\n", len); in sdp_update_iov_used()
315 if (iov->iov_len) { in sdp_update_iov_used()
316 int copy = min_t(unsigned int, iov->iov_len, len); in sdp_update_iov_used()
317 len -= copy; in sdp_update_iov_used()
318 iov->iov_len -= copy; in sdp_update_iov_used()
319 iov->iov_base += copy; in sdp_update_iov_used()
332 bytes += sge->length; in sge_bytes()
334 sge_cnt--; in sge_bytes()
341 struct socket *sk = ssk->socket; in sdp_handle_sendsm()
344 spin_lock_irqsave(&ssk->tx_sa_lock, flags); in sdp_handle_sendsm()
346 if (!ssk->tx_sa) { in sdp_handle_sendsm()
351 if (ssk->tx_sa->mseq > mseq_ack) { in sdp_handle_sendsm()
353 "SendSM mseq_ack: 0x%x, SrcAvail mseq: 0x%x\n", in sdp_handle_sendsm()
354 mseq_ack, ssk->tx_sa->mseq); in sdp_handle_sendsm()
358 sdp_dbg_data(sk, "Got SendSM - aborting SrcAvail\n"); in sdp_handle_sendsm()
360 ssk->tx_sa->abort_flags |= TX_SA_SENDSM; in sdp_handle_sendsm()
361 cancel_delayed_work(&ssk->srcavail_cancel_work); in sdp_handle_sendsm()
363 wake_up(sk->sk_sleep); in sdp_handle_sendsm()
367 spin_unlock_irqrestore(&ssk->tx_sa_lock, flags); in sdp_handle_sendsm()
373 struct socket *sk = ssk->socket; in sdp_handle_rdma_read_compl()
376 sdp_prf1(sk, NULL, "RdmaRdCompl ssk=%p tx_sa=%p", ssk, ssk->tx_sa); in sdp_handle_rdma_read_compl()
377 sdp_dbg_data(sk, "RdmaRdCompl ssk=%p tx_sa=%p\n", ssk, ssk->tx_sa); in sdp_handle_rdma_read_compl()
379 spin_lock_irqsave(&ssk->tx_sa_lock, flags); in sdp_handle_rdma_read_compl()
383 if (!ssk->tx_sa) { in sdp_handle_rdma_read_compl()
388 if (ssk->tx_sa->mseq > mseq_ack) { in sdp_handle_rdma_read_compl()
390 "SendSM mseq_ack: 0x%x, SrcAvail mseq: 0x%x\n", in sdp_handle_rdma_read_compl()
391 mseq_ack, ssk->tx_sa->mseq); in sdp_handle_rdma_read_compl()
395 ssk->tx_sa->bytes_acked += bytes_completed; in sdp_handle_rdma_read_compl()
397 wake_up(sk->sk_sleep); in sdp_handle_rdma_read_compl()
401 spin_unlock_irqrestore(&ssk->tx_sa_lock, flags); in sdp_handle_rdma_read_compl()
413 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; in sdp_get_max_memlockable_bytes()
414 avail = lock_limit - (current->mm->locked_vm << PAGE_SHIFT); in sdp_get_max_memlockable_bytes()
416 return avail - offset; in sdp_get_max_memlockable_bytes()
428 int rc = 0; in sdp_alloc_fmr() local
432 sdp_dbg_data(sk, "len:0x%lx > FMR_SIZE: 0x%lx\n", in sdp_alloc_fmr()
439 sdp_dbg_data(sk, "len:0x%lx > RLIMIT_MEMLOCK available: 0x%lx\n", in sdp_alloc_fmr()
444 sdp_dbg_data(sk, "user buf: %p, len:0x%lx max_lockable_bytes: 0x%lx\n", in sdp_alloc_fmr()
447 umem = ib_umem_get(&sdp_sk(sk)->context, (unsigned long)uaddr, len, in sdp_alloc_fmr()
451 rc = PTR_ERR(umem); in sdp_alloc_fmr()
452 sdp_warn(sk, "Error doing umem_get 0x%lx bytes: %d\n", len, rc); in sdp_alloc_fmr()
453 sdp_warn(sk, "RLIMIT_MEMLOCK: 0x%lx[cur] 0x%lx[max] CAP_IPC_LOCK: %d\n", in sdp_alloc_fmr()
454 current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur, in sdp_alloc_fmr()
455 current->signal->rlim[RLIMIT_MEMLOCK].rlim_max, in sdp_alloc_fmr()
460 sdp_dbg_data(sk, "umem->offset = 0x%x, length = 0x%lx\n", in sdp_alloc_fmr()
461 umem->offset, umem->length); in sdp_alloc_fmr()
469 dev = sdp_sk(sk)->ib_device; in sdp_alloc_fmr()
470 list_for_each_entry(chunk, &umem->chunk_list, list) { in sdp_alloc_fmr()
471 for (j = 0; j < chunk->nmap; ++j) { in sdp_alloc_fmr()
473 &chunk->page_list[j]) >> PAGE_SHIFT; in sdp_alloc_fmr()
477 &chunk->page_list[j]) + in sdp_alloc_fmr()
478 umem->page_size * k; in sdp_alloc_fmr()
484 fmr = ib_fmr_pool_map_phys(sdp_sk(sk)->sdp_dev->fmr_pool, pages, n, 0); in sdp_alloc_fmr()
505 return rc; in sdp_alloc_fmr()
510 if (!sdp_sk(sk)->qp_active) in sdp_free_fmr()
530 wr.wr.rdma.rkey = rx_sa->rkey; in sdp_post_rdma_read()
533 ssk->tx_ring.rdma_inflight = rx_sa; in sdp_post_rdma_read()
535 sge.addr = rx_sa->umem->offset; in sdp_post_rdma_read()
536 sge.length = rx_sa->umem->length; in sdp_post_rdma_read()
537 sge.lkey = rx_sa->fmr->fmr->lkey; in sdp_post_rdma_read()
539 wr.wr.rdma.remote_addr = rx_sa->vaddr + rx_sa->used; in sdp_post_rdma_read()
542 rx_sa->busy++; in sdp_post_rdma_read()
546 return ib_post_send(ssk->qp, &wr, &bad_wr); in sdp_post_rdma_read()
555 int rc = 0; in sdp_rdma_to_iovec() local
559 sdp_dbg_data(ssk->socket, "preparing RDMA read." in sdp_rdma_to_iovec()
560 " len: 0x%x. buffer len: 0x%lx\n", len, iov->iov_len); in sdp_rdma_to_iovec()
564 if (len > rx_sa->len) { in sdp_rdma_to_iovec()
565 sdp_warn(sk, "len:0x%x > rx_sa->len: 0x%x\n", len, rx_sa->len); in sdp_rdma_to_iovec()
567 len = rx_sa->len; in sdp_rdma_to_iovec()
570 rc = sdp_alloc_fmr(sk, iov->iov_base, len, &rx_sa->fmr, &rx_sa->umem); in sdp_rdma_to_iovec()
571 if (rc) { in sdp_rdma_to_iovec()
572 sdp_warn(sk, "Error allocating fmr: %d\n", rc); in sdp_rdma_to_iovec()
576 rc = sdp_post_rdma_read(sk, rx_sa); in sdp_rdma_to_iovec()
577 if (unlikely(rc)) { in sdp_rdma_to_iovec()
578 sdp_warn(sk, "ib_post_send failed with status %d.\n", rc); in sdp_rdma_to_iovec()
579 sdp_set_error(ssk->socket, -ECONNRESET); in sdp_rdma_to_iovec()
580 wake_up(&ssk->wq); in sdp_rdma_to_iovec()
584 sdp_prf(sk, mb, "Finished posting(rc=%d), now to wait", rc); in sdp_rdma_to_iovec()
586 got_srcavail_cancel = ssk->srcavail_cancel_mseq > rx_sa->mseq; in sdp_rdma_to_iovec()
592 sdp_prf(sk, mb, "Finished waiting(rc=%d)", rc); in sdp_rdma_to_iovec()
593 if (!ssk->qp_active) { in sdp_rdma_to_iovec()
595 rc = -EPIPE; in sdp_rdma_to_iovec()
599 copied = rx_sa->umem->length; in sdp_rdma_to_iovec()
602 rx_sa->used += copied; in sdp_rdma_to_iovec()
603 atomic_add(copied, &ssk->rcv_nxt); in sdp_rdma_to_iovec()
606 ssk->tx_ring.rdma_inflight = NULL; in sdp_rdma_to_iovec()
609 sdp_free_fmr(sk, &rx_sa->fmr, &rx_sa->umem); in sdp_rdma_to_iovec()
612 if (rc && ssk->qp_active) { in sdp_rdma_to_iovec()
613 sdp_warn(sk, "Couldn't do RDMA - post sendsm\n"); in sdp_rdma_to_iovec()
614 rx_sa->flags |= RX_SA_ABORTED; in sdp_rdma_to_iovec()
619 return rc; in sdp_rdma_to_iovec()
630 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in wait_for_sndbuf()
647 int rc = 0; in do_sdp_sendmsg_zcopy() local
650 rc = sdp_alloc_fmr(sk, iov->iov_base, iov->iov_len, in do_sdp_sendmsg_zcopy()
651 &tx_sa->fmr, &tx_sa->umem); in do_sdp_sendmsg_zcopy()
652 if (rc) { in do_sdp_sendmsg_zcopy()
653 sdp_warn(sk, "Error allocating fmr: %d\n", rc); in do_sdp_sendmsg_zcopy()
658 rc = wait_for_sndbuf(sk, timeo); in do_sdp_sendmsg_zcopy()
659 if (rc) { in do_sdp_sendmsg_zcopy()
665 rc = sdp_post_srcavail(sk, tx_sa); in do_sdp_sendmsg_zcopy()
666 if (rc) { in do_sdp_sendmsg_zcopy()
671 rc = sdp_wait_rdmardcompl(ssk, timeo, 0); in do_sdp_sendmsg_zcopy()
672 if (unlikely(rc)) { in do_sdp_sendmsg_zcopy()
673 enum tx_sa_flag f = tx_sa->abort_flags; in do_sdp_sendmsg_zcopy()
681 } else if (ssk->qp_active) { in do_sdp_sendmsg_zcopy()
691 cancel_delayed_work(&ssk->srcavail_cancel_work); in do_sdp_sendmsg_zcopy()
699 spin_lock_irqsave(&ssk->tx_sa_lock, lock_flags); in do_sdp_sendmsg_zcopy()
700 ssk->tx_sa = NULL; in do_sdp_sendmsg_zcopy()
701 spin_unlock_irqrestore(&ssk->tx_sa_lock, lock_flags); in do_sdp_sendmsg_zcopy()
704 sdp_update_iov_used(sk, iov, tx_sa->bytes_acked); in do_sdp_sendmsg_zcopy()
707 sdp_free_fmr(sk, &tx_sa->fmr, &tx_sa->umem); in do_sdp_sendmsg_zcopy()
710 return rc; in do_sdp_sendmsg_zcopy()
716 int rc = 0; in sdp_sendmsg_zcopy() local
723 sdp_dbg_data(sk, "Sending iov: %p, iov_len: 0x%lx\n", in sdp_sendmsg_zcopy()
724 iov->iov_base, iov->iov_len); in sdp_sendmsg_zcopy()
726 if (ssk->rx_sa) { in sdp_sendmsg_zcopy()
731 sock_hold(ssk->socket, SOCK_REF_ZCOPY); in sdp_sendmsg_zcopy()
738 offset = (unsigned long)iov->iov_base & (PAGE_SIZE - 1); in sdp_sendmsg_zcopy()
743 rc = -EAGAIN; /* Buffer too big - fallback to bcopy */ in sdp_sendmsg_zcopy()
747 bytes_to_copy = iov->iov_len; in sdp_sendmsg_zcopy()
751 rc = do_sdp_sendmsg_zcopy(sk, tx_sa, iov, &timeo); in sdp_sendmsg_zcopy()
753 if (iov->iov_len && iov->iov_len < sdp_zcopy_thresh) { in sdp_sendmsg_zcopy()
754 sdp_dbg_data(sk, "0x%lx bytes left, switching to bcopy\n", in sdp_sendmsg_zcopy()
755 iov->iov_len); in sdp_sendmsg_zcopy()
758 } while (!rc && iov->iov_len > 0 && !tx_sa->abort_flags); in sdp_sendmsg_zcopy()
762 copied = bytes_to_copy - iov->iov_len; in sdp_sendmsg_zcopy()
764 sdp_prf1(sk, NULL, "sdp_sendmsg_zcopy end rc: %d copied: %d", rc, copied); in sdp_sendmsg_zcopy()
766 sock_put(ssk->socket, SOCK_REF_ZCOPY); in sdp_sendmsg_zcopy()
768 if (rc < 0 && rc != -EAGAIN && rc != -ETIME) in sdp_sendmsg_zcopy()
769 return rc; in sdp_sendmsg_zcopy()
777 struct tx_srcavail_state *tx_sa = ssk->tx_sa; in sdp_abort_srcavail()
783 cancel_delayed_work(&ssk->srcavail_cancel_work); in sdp_abort_srcavail()
786 spin_lock_irqsave(&ssk->tx_sa_lock, flags); in sdp_abort_srcavail()
788 sdp_free_fmr(sk, &tx_sa->fmr, &tx_sa->umem); in sdp_abort_srcavail()
790 ssk->tx_sa = NULL; in sdp_abort_srcavail()
792 spin_unlock_irqrestore(&ssk->tx_sa_lock, flags); in sdp_abort_srcavail()
798 struct rx_srcavail_state *rx_sa = ssk->rx_sa; in sdp_abort_rdma_read()
803 sdp_free_fmr(sk, &rx_sa->fmr, &rx_sa->umem); in sdp_abort_rdma_read()
805 ssk->rx_sa = NULL; in sdp_abort_rdma_read()