Lines Matching +full:ctrl +full:- +full:len

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
63 return qp->buf.buf + qp->rq.offset + (n << qp->rq.wqe_shift); in get_recv_wqe()
68 return rwq->pbuff + (n << rwq->rq.wqe_shift); in get_wq_recv_wqe()
81 copy = min_t(long, *size, be32toh(scat->byte_count)); in copy_to_scat()
82 memcpy((void *)(unsigned long)be64toh(scat->addr), buf, copy); in copy_to_scat()
83 *size -= copy; in copy_to_scat()
96 int max = 1 << (qp->rq.wqe_shift - 4); in mlx5_copy_to_recv_wqe()
99 if (unlikely(qp->wq_sig)) in mlx5_copy_to_recv_wqe()
107 struct mlx5_wqe_ctrl_seg *ctrl; in mlx5_copy_to_send_wqe() local
112 idx &= (qp->sq.wqe_cnt - 1); in mlx5_copy_to_send_wqe()
113 ctrl = mlx5_get_send_wqe(qp, idx); in mlx5_copy_to_send_wqe()
114 if (qp->ibv_qp->qp_type != IBV_QPT_RC) { in mlx5_copy_to_send_wqe()
118 p = ctrl + 1; in mlx5_copy_to_send_wqe()
120 switch (be32toh(ctrl->opmod_idx_opcode) & 0xff) { in mlx5_copy_to_send_wqe()
133 be32toh(ctrl->opmod_idx_opcode) & 0xff); in mlx5_copy_to_send_wqe()
138 max = (be32toh(ctrl->qpn_ds) & 0x3F) - (((void *)scat - (void *)ctrl) >> 4); in mlx5_copy_to_send_wqe()
139 if (unlikely((void *)(scat + max) > qp->sq.qend)) { in mlx5_copy_to_send_wqe()
140 int tmp = ((void *)qp->sq.qend - (void *)scat) >> 4; in mlx5_copy_to_send_wqe()
145 max = max - tmp; in mlx5_copy_to_send_wqe()
146 buf += orig_size - size; in mlx5_copy_to_send_wqe()
155 return qp->sq_start + (n << MLX5_SEND_WQE_SHIFT); in mlx5_get_send_wqe()
160 rwq->rq.head = 0; in mlx5_init_rwq_indices()
161 rwq->rq.tail = 0; in mlx5_init_rwq_indices()
166 qp->sq.head = 0; in mlx5_init_qp_indices()
167 qp->sq.tail = 0; in mlx5_init_qp_indices()
168 qp->rq.head = 0; in mlx5_init_qp_indices()
169 qp->rq.tail = 0; in mlx5_init_qp_indices()
170 qp->sq.cur_post = 0; in mlx5_init_qp_indices()
177 cur = wq->head - wq->tail; in mlx5_wq_overflow()
178 if (cur + nreq < wq->max_post) in mlx5_wq_overflow()
181 mlx5_spin_lock(&cq->lock); in mlx5_wq_overflow()
182 cur = wq->head - wq->tail; in mlx5_wq_overflow()
183 mlx5_spin_unlock(&cq->lock); in mlx5_wq_overflow()
185 return cur + nreq >= wq->max_post; in mlx5_wq_overflow()
191 rseg->raddr = htobe64(remote_addr); in set_raddr_seg()
192 rseg->rkey = htobe32(rkey); in set_raddr_seg()
193 rseg->reserved = 0; in set_raddr_seg()
202 aseg->swap_add = htobe64(swap); in set_atomic_seg()
203 aseg->compare = htobe64(compare_add); in set_atomic_seg()
205 aseg->swap_add = htobe64(compare_add); in set_atomic_seg()
212 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof dseg->av); in set_datagram_seg()
213 dseg->av.dqp_dct = htobe32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV); in set_datagram_seg()
214 dseg->av.key.qkey.qkey = htobe32(wr->wr.ud.remote_qkey); in set_datagram_seg()
220 dseg->byte_count = htobe32(sg->length - offset); in set_data_ptr_seg()
221 dseg->lkey = htobe32(sg->lkey); in set_data_ptr_seg()
222 dseg->addr = htobe64(sg->addr + offset); in set_data_ptr_seg()
228 dseg->byte_count = htobe32(MLX5_ATOMIC_SIZE); in set_data_ptr_seg_atomic()
229 dseg->lkey = htobe32(sg->lkey); in set_data_ptr_seg_atomic()
230 dseg->addr = htobe64(sg->addr); in set_data_ptr_seg_atomic()
235 * implementations may use move-string-buffer assembler instructions,
250 bytecnt -= 8 * sizeof(unsigned long long); in mlx5_bf_copy()
251 if (unlikely(src == qp->sq.qend)) in mlx5_bf_copy()
252 src = qp->sq_start; in mlx5_bf_copy()
258 switch (wr->opcode) { in send_ieth()
261 return wr->imm_data; in send_ieth()
263 return htobe32(wr->imm_data); in send_ieth()
275 int len; in set_data_inl_seg() local
278 void *qend = qp->sq.qend; in set_data_inl_seg()
280 int offset = sg_copy_ptr->offset; in set_data_inl_seg()
284 for (i = sg_copy_ptr->index; i < wr->num_sge; ++i) { in set_data_inl_seg()
285 addr = (void *) (unsigned long)(wr->sg_list[i].addr + offset); in set_data_inl_seg()
286 len = wr->sg_list[i].length - offset; in set_data_inl_seg()
287 inl += len; in set_data_inl_seg()
290 if (unlikely(inl > qp->max_inline_data)) in set_data_inl_seg()
293 if (unlikely(wqe + len > qend)) { in set_data_inl_seg()
294 copy = qend - wqe; in set_data_inl_seg()
297 len -= copy; in set_data_inl_seg()
300 memcpy(wqe, addr, len); in set_data_inl_seg()
301 wqe += len; in set_data_inl_seg()
305 seg->byte_count = htobe32(inl | MLX5_INLINE_SEG); in set_data_inl_seg()
306 *sz = align(inl + sizeof seg->byte_count, 16) / 16; in set_data_inl_seg()
313 static uint8_t wq_sig(struct mlx5_wqe_ctrl_seg *ctrl) in wq_sig() argument
315 return calc_sig(ctrl, be32toh(ctrl->qpn_ds)); in wq_sig()
329 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); in dump_wqe()
348 addr = (void *)(unsigned long)be64toh(dpseg->addr); in mlx5_get_atomic_laddr()
366 FILE *fp = to_mctx(ibqp->context)->dbg_fp; in copy_eth_inline_headers()
368 if (unlikely(wr->num_sge < 1)) { in copy_eth_inline_headers()
370 wr->num_sge); in copy_eth_inline_headers()
374 if (likely(wr->sg_list[0].length >= MLX5_ETH_L2_INLINE_HEADER_SIZE)) { in copy_eth_inline_headers()
376 memcpy(eseg->inline_hdr_start, in copy_eth_inline_headers()
377 (void *)(uintptr_t)wr->sg_list[0].addr, in copy_eth_inline_headers()
380 for (j = 0; j < wr->num_sge && inl_hdr_size > 0; ++j) { in copy_eth_inline_headers()
381 inl_hdr_copy_size = min(wr->sg_list[j].length, in copy_eth_inline_headers()
383 memcpy(eseg->inline_hdr_start + in copy_eth_inline_headers()
384 (MLX5_ETH_L2_INLINE_HEADER_SIZE - inl_hdr_size), in copy_eth_inline_headers()
385 (void *)(uintptr_t)wr->sg_list[j].addr, in copy_eth_inline_headers()
387 inl_hdr_size -= inl_hdr_copy_size; in copy_eth_inline_headers()
393 --j; in copy_eth_inline_headers()
397 eseg->inline_hdr_sz = htobe16(MLX5_ETH_L2_INLINE_HEADER_SIZE); in copy_eth_inline_headers()
399 /* If we copied all the sge into the inline-headers, then we need to in copy_eth_inline_headers()
400 * start copying from the next sge into the data-segment. in copy_eth_inline_headers()
402 if (unlikely(wr->sg_list[j].length == inl_hdr_copy_size)) { in copy_eth_inline_headers()
407 sg_copy_ptr->index = j; in copy_eth_inline_headers()
408 sg_copy_ptr->offset = inl_hdr_copy_size; in copy_eth_inline_headers()
414 #define ALIGN(x, log_a) ((((x) + (1 << (log_a)) - 1)) & ~((1 << (log_a)) - 1))
430 data->klm.byte_count = htobe32(bind_info->length); in set_umr_data_seg()
431 data->klm.mkey = htobe32(bind_info->mr->lkey); in set_umr_data_seg()
432 data->klm.address = htobe64(bind_info->addr); in set_umr_data_seg()
434 memset(&data->klm + 1, 0, sizeof(data->reserved) - in set_umr_data_seg()
435 sizeof(data->klm)); in set_umr_data_seg()
447 mkey->qpn_mkey = htobe32((rkey & 0xFF) | in set_umr_mkey_seg()
448 ((type == IBV_MW_TYPE_1 || !bind_info->length) ? in set_umr_mkey_seg()
450 if (bind_info->length) { in set_umr_mkey_seg()
452 mkey->access_flags = 0; in set_umr_mkey_seg()
453 mkey->free = 0; in set_umr_mkey_seg()
454 if (bind_info->mw_access_flags & IBV_ACCESS_LOCAL_WRITE) in set_umr_mkey_seg()
455 mkey->access_flags |= in set_umr_mkey_seg()
457 if (bind_info->mw_access_flags & IBV_ACCESS_REMOTE_WRITE) in set_umr_mkey_seg()
458 mkey->access_flags |= in set_umr_mkey_seg()
460 if (bind_info->mw_access_flags & IBV_ACCESS_REMOTE_READ) in set_umr_mkey_seg()
461 mkey->access_flags |= in set_umr_mkey_seg()
463 if (bind_info->mw_access_flags & IBV_ACCESS_REMOTE_ATOMIC) in set_umr_mkey_seg()
464 mkey->access_flags |= in set_umr_mkey_seg()
466 if (bind_info->mw_access_flags & IBV_ACCESS_ZERO_BASED) in set_umr_mkey_seg()
467 mkey->start_addr = 0; in set_umr_mkey_seg()
469 mkey->start_addr = htobe64(bind_info->addr); in set_umr_mkey_seg()
470 mkey->len = htobe64(bind_info->length); in set_umr_mkey_seg()
472 mkey->free = MLX5_WQE_MKEY_CONTEXT_FREE; in set_umr_mkey_seg()
483 struct mlx5_wqe_umr_ctrl_seg *ctrl = *seg; in set_umr_control_seg() local
485 ctrl->flags = MLX5_WQE_UMR_CTRL_FLAG_TRNSLATION_OFFSET | in set_umr_control_seg()
487 ctrl->mkey_mask = htobe64(MLX5_WQE_UMR_CTRL_MKEY_MASK_FREE | in set_umr_control_seg()
489 ctrl->translation_offset = 0; in set_umr_control_seg()
490 memset(ctrl->rsvd0, 0, sizeof(ctrl->rsvd0)); in set_umr_control_seg()
491 memset(ctrl->rsvd1, 0, sizeof(ctrl->rsvd1)); in set_umr_control_seg()
494 ctrl->mkey_mask |= htobe64(MLX5_WQE_UMR_CTRL_MKEY_MASK_QPN); in set_umr_control_seg()
496 if (bind_info->length) { in set_umr_control_seg()
497 ctrl->klm_octowords = get_klm_octo(1); in set_umr_control_seg()
499 ctrl->flags |= MLX5_WQE_UMR_CTRL_FLAG_CHECK_FREE; in set_umr_control_seg()
500 ctrl->mkey_mask |= htobe64(MLX5_WQE_UMR_CTRL_MKEY_MASK_LEN | in set_umr_control_seg()
507 ctrl->klm_octowords = get_klm_octo(0); in set_umr_control_seg()
509 ctrl->flags |= MLX5_WQE_UMR_CTRL_FLAG_CHECK_QPN; in set_umr_control_seg()
520 void *qend = qp->sq.qend; in set_bind_wr()
523 if (bind_info->mw_access_flags & in set_bind_wr()
528 if (bind_info->mr && in set_bind_wr()
529 (bind_info->mr->addr > (void *)bind_info->addr || in set_bind_wr()
530 bind_info->mr->addr + bind_info->mr->length < in set_bind_wr()
531 (void *)bind_info->addr + bind_info->length || in set_bind_wr()
532 !(to_mmr(bind_info->mr)->alloc_flags & IBV_ACCESS_MW_BIND) || in set_bind_wr()
533 (bind_info->mw_access_flags & in set_bind_wr()
535 !(to_mmr(bind_info->mr)->alloc_flags & IBV_ACCESS_LOCAL_WRITE)))) in set_bind_wr()
540 /* check that len > 2GB because KLM support only 2GB */ in set_bind_wr()
541 if (bind_info->length > 1UL << 31) in set_bind_wr()
549 if (!bind_info->length) in set_bind_wr()
566 int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start); in set_tso_eth_seg()
568 void *pdata = wr->tso.hdr; in set_tso_eth_seg()
569 FILE *fp = to_mctx(qp->ibv_qp->context)->dbg_fp; in set_tso_eth_seg()
571 if (unlikely(wr->tso.hdr_sz < MLX5_ETH_L2_MIN_HEADER_SIZE || in set_tso_eth_seg()
572 wr->tso.hdr_sz > qp->max_tso_header)) { in set_tso_eth_seg()
576 qp->max_tso_header); in set_tso_eth_seg()
580 left = wr->tso.hdr_sz; in set_tso_eth_seg()
581 eseg->mss = htobe16(wr->tso.mss); in set_tso_eth_seg()
582 eseg->inline_hdr_sz = htobe16(wr->tso.hdr_sz); in set_tso_eth_seg()
588 left_len = qend - (void *)eseg->inline_hdr_start; in set_tso_eth_seg()
591 memcpy(eseg->inline_hdr_start, pdata, copy_sz); in set_tso_eth_seg()
593 /* The -1 is because there are already 16 bytes included in in set_tso_eth_seg()
594 * eseg->inline_hdr[16] in set_tso_eth_seg()
596 *seg += align(copy_sz - size_of_inl_hdr_start, 16) - 16; in set_tso_eth_seg()
597 *size += align(copy_sz - size_of_inl_hdr_start, 16) / 16 - 1; in set_tso_eth_seg()
602 left -= copy_sz; in set_tso_eth_seg()
619 struct mlx5_wqe_ctrl_seg *ctrl = NULL; in _mlx5_post_send() local
629 struct mlx5_bf *bf = qp->bf; in _mlx5_post_send()
630 void *qend = qp->sq.qend; in _mlx5_post_send()
636 FILE *fp = to_mctx(ibqp->context)->dbg_fp; /* The compiler ignores in non-debug mode */ in _mlx5_post_send()
638 mlx5_spin_lock(&qp->sq.lock); in _mlx5_post_send()
640 next_fence = qp->fm_cache; in _mlx5_post_send()
642 for (nreq = 0; wr; ++nreq, wr = wr->next) { in _mlx5_post_send()
643 if (unlikely(wr->opcode < 0 || in _mlx5_post_send()
644 wr->opcode >= sizeof mlx5_ib_opcode / sizeof mlx5_ib_opcode[0])) { in _mlx5_post_send()
645 mlx5_dbg(fp, MLX5_DBG_QP_SEND, "bad opcode %d\n", wr->opcode); in _mlx5_post_send()
651 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, in _mlx5_post_send()
652 to_mcq(qp->ibv_qp->send_cq)))) { in _mlx5_post_send()
659 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in _mlx5_post_send()
661 wr->num_sge, qp->sq.max_gs); in _mlx5_post_send()
667 if (wr->send_flags & IBV_SEND_FENCE) in _mlx5_post_send()
672 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); in _mlx5_post_send()
673 ctrl = seg = mlx5_get_send_wqe(qp, idx); in _mlx5_post_send()
675 ctrl->imm = send_ieth(wr); in _mlx5_post_send()
676 ctrl->fm_ce_se = qp->sq_signal_bits | fence | in _mlx5_post_send()
677 (wr->send_flags & IBV_SEND_SIGNALED ? in _mlx5_post_send()
679 (wr->send_flags & IBV_SEND_SOLICITED ? in _mlx5_post_send()
682 seg += sizeof *ctrl; in _mlx5_post_send()
683 size = sizeof *ctrl / 16; in _mlx5_post_send()
685 switch (ibqp->qp_type) { in _mlx5_post_send()
687 if (unlikely(wr->opcode != IBV_WR_BIND_MW && in _mlx5_post_send()
688 wr->opcode != IBV_WR_LOCAL_INV)) { in _mlx5_post_send()
690 xrc->xrc_srqn = htobe32(wr->qp_type.xrc.remote_srqn); in _mlx5_post_send()
696 switch (wr->opcode) { in _mlx5_post_send()
700 set_raddr_seg(seg, wr->wr.rdma.remote_addr, in _mlx5_post_send()
701 wr->wr.rdma.rkey); in _mlx5_post_send()
708 if (unlikely(!qp->atomics_enabled)) { in _mlx5_post_send()
714 set_raddr_seg(seg, wr->wr.atomic.remote_addr, in _mlx5_post_send()
715 wr->wr.atomic.rkey); in _mlx5_post_send()
718 set_atomic_seg(seg, wr->opcode, in _mlx5_post_send()
719 wr->wr.atomic.swap, in _mlx5_post_send()
720 wr->wr.atomic.compare_add); in _mlx5_post_send()
729 ctrl->imm = htobe32(wr->bind_mw.mw->rkey); in _mlx5_post_send()
730 err = set_bind_wr(qp, wr->bind_mw.mw->type, in _mlx5_post_send()
731 wr->bind_mw.rkey, in _mlx5_post_send()
732 &wr->bind_mw.bind_info, in _mlx5_post_send()
733 ibqp->qp_num, &seg, &size); in _mlx5_post_send()
739 qp->sq.wr_data[idx] = IBV_WC_BIND_MW; in _mlx5_post_send()
745 ctrl->imm = htobe32(wr->imm_data); in _mlx5_post_send()
747 &bind_info, ibqp->qp_num, in _mlx5_post_send()
754 qp->sq.wr_data[idx] = IBV_WC_LOCAL_INV; in _mlx5_post_send()
764 switch (wr->opcode) { in _mlx5_post_send()
767 set_raddr_seg(seg, wr->wr.rdma.remote_addr, in _mlx5_post_send()
768 wr->wr.rdma.rkey); in _mlx5_post_send()
774 ctrl->imm = htobe32(wr->bind_mw.mw->rkey); in _mlx5_post_send()
775 err = set_bind_wr(qp, wr->bind_mw.mw->type, in _mlx5_post_send()
776 wr->bind_mw.rkey, in _mlx5_post_send()
777 &wr->bind_mw.bind_info, in _mlx5_post_send()
778 ibqp->qp_num, &seg, &size); in _mlx5_post_send()
784 qp->sq.wr_data[idx] = IBV_WC_BIND_MW; in _mlx5_post_send()
790 ctrl->imm = htobe32(wr->imm_data); in _mlx5_post_send()
792 &bind_info, ibqp->qp_num, in _mlx5_post_send()
799 qp->sq.wr_data[idx] = IBV_WC_LOCAL_INV; in _mlx5_post_send()
820 if (wr->send_flags & IBV_SEND_IP_CSUM) { in _mlx5_post_send()
821 if (!(qp->qp_cap_cache & MLX5_CSUM_SUPPORT_RAW_OVER_ETH)) { in _mlx5_post_send()
827 eseg->cs_flags |= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; in _mlx5_post_send()
830 if (wr->opcode == IBV_WR_TSO) { in _mlx5_post_send()
831 max_tso = qp->max_tso; in _mlx5_post_send()
856 if (wr->send_flags & IBV_SEND_INLINE && wr->num_sge) { in _mlx5_post_send()
870 for (i = sg_copy_ptr.index; i < wr->num_sge; ++i) { in _mlx5_post_send()
875 if (likely(wr->sg_list[i].length)) { in _mlx5_post_send()
876 if (unlikely(wr->opcode == in _mlx5_post_send()
878 wr->opcode == in _mlx5_post_send()
880 set_data_ptr_seg_atomic(dpseg, wr->sg_list + i); in _mlx5_post_send()
882 if (unlikely(wr->opcode == IBV_WR_TSO)) { in _mlx5_post_send()
883 if (max_tso < wr->sg_list[i].length) { in _mlx5_post_send()
888 max_tso -= wr->sg_list[i].length; in _mlx5_post_send()
890 set_data_ptr_seg(dpseg, wr->sg_list + i, in _mlx5_post_send()
900 mlx5_opcode = mlx5_ib_opcode[wr->opcode]; in _mlx5_post_send()
901 ctrl->opmod_idx_opcode = htobe32(((qp->sq.cur_post & 0xffff) << 8) | in _mlx5_post_send()
904 ctrl->qpn_ds = htobe32(size | (ibqp->qp_num << 8)); in _mlx5_post_send()
906 if (unlikely(qp->wq_sig)) in _mlx5_post_send()
907 ctrl->signature = wq_sig(ctrl); in _mlx5_post_send()
909 qp->sq.wrid[idx] = wr->wr_id; in _mlx5_post_send()
910 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in _mlx5_post_send()
911 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); in _mlx5_post_send()
915 dump_wqe(to_mctx(ibqp->context)->dbg_fp, idx, size, qp); in _mlx5_post_send()
921 qp->sq.head += nreq; in _mlx5_post_send()
922 qp->fm_cache = next_fence; in _mlx5_post_send()
929 qp->db[MLX5_SND_DBR] = htobe32(qp->sq.cur_post & 0xffff); in _mlx5_post_send()
933 ctx = to_mctx(ibqp->context); in _mlx5_post_send()
934 if (bf->need_lock) in _mlx5_post_send()
935 mmio_wc_spinlock(&bf->lock.lock); in _mlx5_post_send()
939 if (!ctx->shut_up_bf && nreq == 1 && bf->uuarn && in _mlx5_post_send()
940 (inl || ctx->prefer_bf) && size > 1 && in _mlx5_post_send()
941 size <= bf->buf_size / 16) in _mlx5_post_send()
942 mlx5_bf_copy(bf->reg + bf->offset, (unsigned long long *)ctrl, in _mlx5_post_send()
945 mlx5_write64((__be32 *)ctrl, bf->reg + bf->offset, in _mlx5_post_send()
946 &ctx->lock32); in _mlx5_post_send()
959 bf->offset ^= bf->buf_size; in _mlx5_post_send()
960 if (bf->need_lock) in _mlx5_post_send()
961 mlx5_spin_unlock(&bf->lock); in _mlx5_post_send()
964 mlx5_spin_unlock(&qp->sq.lock); in _mlx5_post_send()
973 if (wr->opcode == IBV_WR_BIND_MW) { in mlx5_post_send()
974 if (wr->bind_mw.mw->type == IBV_MW_TYPE_1) in mlx5_post_send()
977 if (!wr->bind_mw.bind_info.mr || in mlx5_post_send()
978 !wr->bind_mw.bind_info.addr || in mlx5_post_send()
979 !wr->bind_mw.bind_info.length) in mlx5_post_send()
982 if (wr->bind_mw.bind_info.mr->pd != wr->bind_mw.mw->pd) in mlx5_post_send()
993 struct ibv_mw_bind_info *bind_info = &mw_bind->bind_info; in mlx5_bind_mw()
998 if (!bind_info->mr && (bind_info->addr || bind_info->length)) { in mlx5_bind_mw()
1003 if (bind_info->mw_access_flags & IBV_ACCESS_ZERO_BASED) { in mlx5_bind_mw()
1008 if (bind_info->mr) { in mlx5_bind_mw()
1009 if (to_mmr(bind_info->mr)->alloc_flags & IBV_ACCESS_ZERO_BASED) { in mlx5_bind_mw()
1014 if (mw->pd != bind_info->mr->pd) { in mlx5_bind_mw()
1022 wr.wr_id = mw_bind->wr_id; in mlx5_bind_mw()
1023 wr.send_flags = mw_bind->send_flags; in mlx5_bind_mw()
1024 wr.bind_mw.bind_info = mw_bind->bind_info; in mlx5_bind_mw()
1026 wr.bind_mw.rkey = ibv_inc_rkey(mw->rkey); in mlx5_bind_mw()
1032 mw->rkey = wr.bind_mw.rkey; in mlx5_bind_mw()
1041 uint32_t qpn = qp->ibv_qp->qp_num; in set_sig_seg()
1046 sig->signature = sign; in set_sig_seg()
1053 uint32_t qpn = rwq->wq.wq_num; in set_wq_sig_seg()
1058 sig->signature = sign; in set_wq_sig_seg()
1072 mlx5_spin_lock(&rwq->rq.lock); in mlx5_post_wq_recv()
1074 ind = rwq->rq.head & (rwq->rq.wqe_cnt - 1); in mlx5_post_wq_recv()
1076 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx5_post_wq_recv()
1077 if (unlikely(mlx5_wq_overflow(&rwq->rq, nreq, in mlx5_post_wq_recv()
1078 to_mcq(rwq->wq.cq)))) { in mlx5_post_wq_recv()
1084 if (unlikely(wr->num_sge > rwq->rq.max_gs)) { in mlx5_post_wq_recv()
1092 if (unlikely(rwq->wq_sig)) { in mlx5_post_wq_recv()
1093 memset(sig, 0, 1 << rwq->rq.wqe_shift); in mlx5_post_wq_recv()
1097 for (i = 0, j = 0; i < wr->num_sge; ++i) { in mlx5_post_wq_recv()
1098 if (unlikely(!wr->sg_list[i].length)) in mlx5_post_wq_recv()
1100 set_data_ptr_seg(scat + j++, wr->sg_list + i, 0); in mlx5_post_wq_recv()
1103 if (j < rwq->rq.max_gs) { in mlx5_post_wq_recv()
1109 if (unlikely(rwq->wq_sig)) in mlx5_post_wq_recv()
1110 set_wq_sig_seg(rwq, sig, (wr->num_sge + 1) << 4, in mlx5_post_wq_recv()
1111 rwq->rq.head & 0xffff); in mlx5_post_wq_recv()
1113 rwq->rq.wrid[ind] = wr->wr_id; in mlx5_post_wq_recv()
1115 ind = (ind + 1) & (rwq->rq.wqe_cnt - 1); in mlx5_post_wq_recv()
1120 rwq->rq.head += nreq; in mlx5_post_wq_recv()
1126 *(rwq->recv_db) = htobe32(rwq->rq.head & 0xffff); in mlx5_post_wq_recv()
1129 mlx5_spin_unlock(&rwq->rq.lock); in mlx5_post_wq_recv()
1145 mlx5_spin_lock(&qp->rq.lock); in mlx5_post_recv()
1147 ind = qp->rq.head & (qp->rq.wqe_cnt - 1); in mlx5_post_recv()
1149 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx5_post_recv()
1150 if (unlikely(mlx5_wq_overflow(&qp->rq, nreq, in mlx5_post_recv()
1151 to_mcq(qp->ibv_qp->recv_cq)))) { in mlx5_post_recv()
1157 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mlx5_post_recv()
1165 if (unlikely(qp->wq_sig)) { in mlx5_post_recv()
1166 memset(sig, 0, 1 << qp->rq.wqe_shift); in mlx5_post_recv()
1170 for (i = 0, j = 0; i < wr->num_sge; ++i) { in mlx5_post_recv()
1171 if (unlikely(!wr->sg_list[i].length)) in mlx5_post_recv()
1173 set_data_ptr_seg(scat + j++, wr->sg_list + i, 0); in mlx5_post_recv()
1176 if (j < qp->rq.max_gs) { in mlx5_post_recv()
1182 if (unlikely(qp->wq_sig)) in mlx5_post_recv()
1183 set_sig_seg(qp, sig, (wr->num_sge + 1) << 4, in mlx5_post_recv()
1184 qp->rq.head & 0xffff); in mlx5_post_recv()
1186 qp->rq.wrid[ind] = wr->wr_id; in mlx5_post_recv()
1188 ind = (ind + 1) & (qp->rq.wqe_cnt - 1); in mlx5_post_recv()
1193 qp->rq.head += nreq; in mlx5_post_recv()
1208 if (likely(!(ibqp->qp_type == IBV_QPT_RAW_PACKET && in mlx5_post_recv()
1209 ibqp->state < IBV_QPS_RTR))) in mlx5_post_recv()
1210 qp->db[MLX5_RCV_DBR] = htobe32(qp->rq.head & 0xffff); in mlx5_post_recv()
1213 mlx5_spin_unlock(&qp->rq.lock); in mlx5_post_recv()
1232 if (ctx->qp_table[tind].refcnt) in mlx5_find_qp()
1233 return ctx->qp_table[tind].table[qpn & MLX5_QP_TABLE_MASK]; in mlx5_find_qp()
1242 if (!ctx->qp_table[tind].refcnt) { in mlx5_store_qp()
1243 ctx->qp_table[tind].table = calloc(MLX5_QP_TABLE_MASK + 1, in mlx5_store_qp()
1245 if (!ctx->qp_table[tind].table) in mlx5_store_qp()
1246 return -1; in mlx5_store_qp()
1249 ++ctx->qp_table[tind].refcnt; in mlx5_store_qp()
1250 ctx->qp_table[tind].table[qpn & MLX5_QP_TABLE_MASK] = qp; in mlx5_store_qp()
1258 if (!--ctx->qp_table[tind].refcnt) in mlx5_clear_qp()
1259 free(ctx->qp_table[tind].table); in mlx5_clear_qp()
1261 ctx->qp_table[tind].table[qpn & MLX5_QP_TABLE_MASK] = NULL; in mlx5_clear_qp()