Lines Matching +full:cmd +full:- +full:db

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
48 #include "mlx5-abi.h"
60 struct ibv_query_device cmd; in mlx5_query_device() local
65 ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd, sizeof cmd); in mlx5_query_device()
73 snprintf(attr->fw_ver, sizeof attr->fw_ver, in mlx5_query_device()
86 if (!ctx->hca_core_clock) in mlx5_read_clock()
87 return -EOPNOTSUPP; in mlx5_read_clock()
91 clockhi = be32toh(READL(ctx->hca_core_clock)); in mlx5_read_clock()
92 clocklo = be32toh(READL(ctx->hca_core_clock + 4)); in mlx5_read_clock()
93 clockhi1 = be32toh(READL(ctx->hca_core_clock)); in mlx5_read_clock()
109 if (values->comp_mask & IBV_VALUES_MASK_RAW_CLOCK) { in mlx5_query_rt_values()
114 values->raw_clock.tv_sec = 0; in mlx5_query_rt_values()
115 values->raw_clock.tv_nsec = cycles; in mlx5_query_rt_values()
120 values->comp_mask = comp_mask; in mlx5_query_rt_values()
128 struct ibv_query_port cmd; in mlx5_query_port() local
130 return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd); in mlx5_query_port()
135 struct ibv_alloc_pd cmd; in mlx5_alloc_pd() local
143 if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd, in mlx5_alloc_pd()
149 pd->pdn = resp.pdn; in mlx5_alloc_pd()
151 return &pd->ibv_pd; in mlx5_alloc_pd()
170 struct ibv_reg_mr cmd; in mlx5_reg_mr() local
180 &(mr->ibv_mr), &cmd, sizeof(cmd), &resp, in mlx5_reg_mr()
183 mlx5_free_buf(&(mr->buf)); in mlx5_reg_mr()
187 mr->alloc_flags = acc; in mlx5_reg_mr()
189 return &mr->ibv_mr; in mlx5_reg_mr()
195 struct ibv_rereg_mr cmd; in mlx5_rereg_mr() local
202 access, pd, &cmd, sizeof(cmd), &resp, in mlx5_rereg_mr()
222 struct ibv_alloc_mw cmd; in mlx5_alloc_mw() local
232 ret = ibv_cmd_alloc_mw(pd, type, mw, &cmd, sizeof(cmd), &resp, in mlx5_alloc_mw()
245 struct ibv_dealloc_mw cmd; in mlx5_dealloc_mw() local
247 ret = ibv_cmd_dealloc_mw(mw, &cmd, sizeof(cmd)); in mlx5_dealloc_mw()
264 return -ENOMEM; in mlx5_round_up_power_of_two()
290 return -EINVAL; in get_cqe_size()
347 struct mlx5_create_cq cmd; in create_cq() local
354 FILE *fp = to_mctx(context)->dbg_fp; in create_cq()
356 if (!cq_attr->cqe) { in create_cq()
362 if (cq_attr->comp_mask & ~CREATE_CQ_SUPPORTED_COMP_MASK) { in create_cq()
369 if (cq_attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_FLAGS && in create_cq()
370 cq_attr->flags & ~CREATE_CQ_SUPPORTED_FLAGS) { in create_cq()
377 if (cq_attr->wc_flags & ~CREATE_CQ_SUPPORTED_WC_FLAGS) { in create_cq()
389 memset(&cmd, 0, sizeof cmd); in create_cq()
390 cq->cons_index = 0; in create_cq()
392 if (mlx5_spinlock_init(&cq->lock)) in create_cq()
395 ncqe = align_queue_size(cq_attr->cqe + 1); in create_cq()
396 if ((ncqe > (1 << 24)) || (ncqe < (cq_attr->cqe + 1))) { in create_cq()
405 errno = -cqe_sz; in create_cq()
409 if (mlx5_alloc_cq_buf(to_mctx(context), cq, &cq->buf_a, ncqe, cqe_sz)) { in create_cq()
414 cq->dbrec = mlx5_alloc_dbrec(to_mctx(context)); in create_cq()
415 if (!cq->dbrec) { in create_cq()
420 cq->dbrec[MLX5_CQ_SET_CI] = 0; in create_cq()
421 cq->dbrec[MLX5_CQ_ARM_DB] = 0; in create_cq()
422 cq->arm_sn = 0; in create_cq()
423 cq->cqe_sz = cqe_sz; in create_cq()
424 cq->flags = cq_alloc_flags; in create_cq()
426 if (cq_attr->comp_mask & IBV_CQ_INIT_ATTR_MASK_FLAGS && in create_cq()
427 cq_attr->flags & IBV_CREATE_CQ_ATTR_SINGLE_THREADED) in create_cq()
428 cq->flags |= MLX5_CQ_FLAGS_SINGLE_THREADED; in create_cq()
429 cmd.buf_addr = (uintptr_t) cq->buf_a.buf; in create_cq()
430 cmd.db_addr = (uintptr_t) cq->dbrec; in create_cq()
431 cmd.cqe_size = cqe_sz; in create_cq()
434 if (mlx5cq_attr->comp_mask & ~(MLX5DV_CQ_INIT_ATTR_MASK_RESERVED - 1)) { in create_cq()
441 if (mlx5cq_attr->comp_mask & MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE) { in create_cq()
442 if (mctx->cqe_comp_caps.max_num && in create_cq()
443 (mlx5cq_attr->cqe_comp_res_format & in create_cq()
444 mctx->cqe_comp_caps.supported_format)) { in create_cq()
445 cmd.cqe_comp_en = 1; in create_cq()
446 cmd.cqe_comp_res_format = mlx5cq_attr->cqe_comp_res_format; in create_cq()
455 ret = ibv_cmd_create_cq(context, ncqe - 1, cq_attr->channel, in create_cq()
456 cq_attr->comp_vector, in create_cq()
457 ibv_cq_ex_to_cq(&cq->ibv_cq), &cmd.ibv_cmd, in create_cq()
458 sizeof(cmd), &resp.ibv_resp, sizeof(resp)); in create_cq()
464 cq->active_buf = &cq->buf_a; in create_cq()
465 cq->resize_buf = NULL; in create_cq()
466 cq->cqn = resp.cqn; in create_cq()
467 cq->stall_enable = to_mctx(context)->stall_enable; in create_cq()
468 cq->stall_adaptive_enable = to_mctx(context)->stall_adaptive_enable; in create_cq()
469 cq->stall_cycles = to_mctx(context)->stall_cycles; in create_cq()
474 return &cq->ibv_cq; in create_cq()
477 mlx5_free_db(to_mctx(context), cq->dbrec); in create_cq()
480 mlx5_free_cq_buf(to_mctx(context), &cq->buf_a); in create_cq()
483 mlx5_spinlock_destroy(&cq->lock); in create_cq()
527 cq_attr->channel, cq_attr->cq_context); in mlx5dv_create_cq()
534 context->ops.destroy_cq(ibv_cq_ex_to_cq(cq)); in mlx5dv_create_cq()
543 struct mlx5_resize_cq cmd; in mlx5_resize_cq() local
544 struct mlx5_context *mctx = to_mctx(ibcq->context); in mlx5_resize_cq()
552 memset(&cmd, 0, sizeof(cmd)); in mlx5_resize_cq()
558 mlx5_spin_lock(&cq->lock); in mlx5_resize_cq()
559 cq->active_cqes = cq->ibv_cq.cqe; in mlx5_resize_cq()
560 if (cq->active_buf == &cq->buf_a) in mlx5_resize_cq()
561 cq->resize_buf = &cq->buf_b; in mlx5_resize_cq()
563 cq->resize_buf = &cq->buf_a; in mlx5_resize_cq()
566 if (cqe == ibcq->cqe + 1) { in mlx5_resize_cq()
567 cq->resize_buf = NULL; in mlx5_resize_cq()
573 cq->resize_cqe_sz = cq->cqe_sz; in mlx5_resize_cq()
574 cq->resize_cqes = cqe; in mlx5_resize_cq()
575 err = mlx5_alloc_cq_buf(mctx, cq, cq->resize_buf, cq->resize_cqes, cq->resize_cqe_sz); in mlx5_resize_cq()
577 cq->resize_buf = NULL; in mlx5_resize_cq()
582 cmd.buf_addr = (uintptr_t)cq->resize_buf->buf; in mlx5_resize_cq()
583 cmd.cqe_size = cq->resize_cqe_sz; in mlx5_resize_cq()
585 err = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof(cmd), in mlx5_resize_cq()
591 mlx5_free_cq_buf(mctx, cq->active_buf); in mlx5_resize_cq()
592 cq->active_buf = cq->resize_buf; in mlx5_resize_cq()
593 cq->ibv_cq.cqe = cqe - 1; in mlx5_resize_cq()
594 mlx5_spin_unlock(&cq->lock); in mlx5_resize_cq()
595 cq->resize_buf = NULL; in mlx5_resize_cq()
599 mlx5_free_cq_buf(mctx, cq->resize_buf); in mlx5_resize_cq()
600 cq->resize_buf = NULL; in mlx5_resize_cq()
603 mlx5_spin_unlock(&cq->lock); in mlx5_resize_cq()
617 mlx5_free_db(to_mctx(cq->context), to_mcq(cq)->dbrec); in mlx5_destroy_cq()
618 mlx5_free_cq_buf(to_mctx(cq->context), to_mcq(cq)->active_buf); in mlx5_destroy_cq()
619 mlx5_spinlock_destroy(&mcq->lock); in mlx5_destroy_cq()
628 struct mlx5_create_srq cmd; in mlx5_create_srq() local
636 ctx = to_mctx(pd->context); in mlx5_create_srq()
639 fprintf(stderr, "%s-%d:\n", __func__, __LINE__); in mlx5_create_srq()
642 ibsrq = &srq->vsrq.srq; in mlx5_create_srq()
644 memset(&cmd, 0, sizeof cmd); in mlx5_create_srq()
645 if (mlx5_spinlock_init(&srq->lock)) { in mlx5_create_srq()
646 fprintf(stderr, "%s-%d:\n", __func__, __LINE__); in mlx5_create_srq()
650 if (attr->attr.max_wr > ctx->max_srq_recv_wr) { in mlx5_create_srq()
651 fprintf(stderr, "%s-%d:max_wr %d, max_srq_recv_wr %d\n", __func__, __LINE__, in mlx5_create_srq()
652 attr->attr.max_wr, ctx->max_srq_recv_wr); in mlx5_create_srq()
662 max_sge = ctx->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); in mlx5_create_srq()
663 if (attr->attr.max_sge > max_sge) { in mlx5_create_srq()
664 fprintf(stderr, "%s-%d:max_wr %d, max_srq_recv_wr %d\n", __func__, __LINE__, in mlx5_create_srq()
665 attr->attr.max_wr, ctx->max_srq_recv_wr); in mlx5_create_srq()
670 srq->max = align_queue_size(attr->attr.max_wr + 1); in mlx5_create_srq()
671 srq->max_gs = attr->attr.max_sge; in mlx5_create_srq()
672 srq->counter = 0; in mlx5_create_srq()
674 if (mlx5_alloc_srq_buf(pd->context, srq)) { in mlx5_create_srq()
675 fprintf(stderr, "%s-%d:\n", __func__, __LINE__); in mlx5_create_srq()
679 srq->db = mlx5_alloc_dbrec(to_mctx(pd->context)); in mlx5_create_srq()
680 if (!srq->db) { in mlx5_create_srq()
681 fprintf(stderr, "%s-%d:\n", __func__, __LINE__); in mlx5_create_srq()
685 *srq->db = 0; in mlx5_create_srq()
687 cmd.buf_addr = (uintptr_t) srq->buf.buf; in mlx5_create_srq()
688 cmd.db_addr = (uintptr_t) srq->db; in mlx5_create_srq()
689 srq->wq_sig = srq_sig_enabled(); in mlx5_create_srq()
690 if (srq->wq_sig) in mlx5_create_srq()
691 cmd.flags = MLX5_SRQ_FLAG_SIGNATURE; in mlx5_create_srq()
693 attr->attr.max_sge = srq->max_gs; in mlx5_create_srq()
694 pthread_mutex_lock(&ctx->srq_table_mutex); in mlx5_create_srq()
695 ret = ibv_cmd_create_srq(pd, ibsrq, attr, &cmd.ibv_cmd, sizeof(cmd), in mlx5_create_srq()
704 pthread_mutex_unlock(&ctx->srq_table_mutex); in mlx5_create_srq()
706 srq->srqn = resp.srqn; in mlx5_create_srq()
707 srq->rsc.rsn = resp.srqn; in mlx5_create_srq()
708 srq->rsc.type = MLX5_RSC_TYPE_SRQ; in mlx5_create_srq()
716 pthread_mutex_unlock(&ctx->srq_table_mutex); in mlx5_create_srq()
717 mlx5_free_db(to_mctx(pd->context), srq->db); in mlx5_create_srq()
720 free(srq->wrid); in mlx5_create_srq()
721 mlx5_free_buf(&srq->buf); in mlx5_create_srq()
724 mlx5_spinlock_destroy(&srq->lock); in mlx5_create_srq()
736 struct ibv_modify_srq cmd; in mlx5_modify_srq() local
738 return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd); in mlx5_modify_srq()
744 struct ibv_query_srq cmd; in mlx5_query_srq() local
746 return ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd); in mlx5_query_srq()
753 struct mlx5_context *ctx = to_mctx(srq->context); in mlx5_destroy_srq()
759 if (ctx->cqe_version && msrq->rsc.type == MLX5_RSC_TYPE_XSRQ) in mlx5_destroy_srq()
760 mlx5_clear_uidx(ctx, msrq->rsc.rsn); in mlx5_destroy_srq()
762 mlx5_clear_srq(ctx, msrq->srqn); in mlx5_destroy_srq()
764 mlx5_free_db(ctx, msrq->db); in mlx5_destroy_srq()
765 mlx5_free_buf(&msrq->buf); in mlx5_destroy_srq()
766 free(msrq->wrid); in mlx5_destroy_srq()
767 mlx5_spinlock_destroy(&msrq->lock); in mlx5_destroy_srq()
816 return -EINVAL; in sq_overhead()
831 size = sq_overhead(attr->qp_type); in mlx5_calc_send_wqe()
835 if (attr->cap.max_inline_data) { in mlx5_calc_send_wqe()
837 attr->cap.max_inline_data, 16); in mlx5_calc_send_wqe()
840 if (attr->comp_mask & IBV_QP_INIT_ATTR_MAX_TSO_HEADER) { in mlx5_calc_send_wqe()
841 size += align(attr->max_tso_header, 16); in mlx5_calc_send_wqe()
842 qp->max_tso_header = attr->max_tso_header; in mlx5_calc_send_wqe()
845 max_gather = (ctx->max_sq_desc_sz - size) / in mlx5_calc_send_wqe()
847 if (attr->cap.max_send_sge > max_gather) in mlx5_calc_send_wqe()
848 return -EINVAL; in mlx5_calc_send_wqe()
850 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg); in mlx5_calc_send_wqe()
853 if (tot_size > ctx->max_sq_desc_sz) in mlx5_calc_send_wqe()
854 return -EINVAL; in mlx5_calc_send_wqe()
866 if (attr->srq) in mlx5_calc_rcv_wqe()
869 num_scatter = max_t(uint32_t, attr->cap.max_recv_sge, 1); in mlx5_calc_rcv_wqe()
871 if (qp->wq_sig) in mlx5_calc_rcv_wqe()
874 if (size > ctx->max_rq_desc_sz) in mlx5_calc_rcv_wqe()
875 return -EINVAL; in mlx5_calc_rcv_wqe()
888 FILE *fp = ctx->dbg_fp; in mlx5_calc_sq_size()
890 if (!attr->cap.max_send_wr) in mlx5_calc_sq_size()
899 if (wqe_size > ctx->max_sq_desc_sz) { in mlx5_calc_sq_size()
901 return -EINVAL; in mlx5_calc_sq_size()
904 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) - in mlx5_calc_sq_size()
906 attr->cap.max_inline_data = qp->max_inline_data; in mlx5_calc_sq_size()
912 if (attr->cap.max_send_wr > 0x7fffffff / ctx->max_sq_desc_sz) { in mlx5_calc_sq_size()
914 return -EINVAL; in mlx5_calc_sq_size()
917 wq_size = mlx5_round_up_power_of_two(attr->cap.max_send_wr * wqe_size); in mlx5_calc_sq_size()
918 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; in mlx5_calc_sq_size()
919 if (qp->sq.wqe_cnt > ctx->max_send_wqebb) { in mlx5_calc_sq_size()
921 return -EINVAL; in mlx5_calc_sq_size()
924 qp->sq.wqe_shift = mlx5_ilog2(MLX5_SEND_WQE_BB); in mlx5_calc_sq_size()
925 qp->sq.max_gs = attr->cap.max_send_sge; in mlx5_calc_sq_size()
926 qp->sq.max_post = wq_size / wqe_size; in mlx5_calc_sq_size()
940 if (!attr->max_wr) in mlx5_calc_rwq_size()
941 return -EINVAL; in mlx5_calc_rwq_size()
944 num_scatter = max_t(uint32_t, attr->max_sge, 1); in mlx5_calc_rwq_size()
947 if (rwq->wq_sig) in mlx5_calc_rwq_size()
950 if (wqe_size <= 0 || wqe_size > ctx->max_rq_desc_sz) in mlx5_calc_rwq_size()
951 return -EINVAL; in mlx5_calc_rwq_size()
954 wq_size = mlx5_round_up_power_of_two(attr->max_wr) * wqe_size; in mlx5_calc_rwq_size()
956 rwq->rq.wqe_cnt = wq_size / wqe_size; in mlx5_calc_rwq_size()
957 rwq->rq.wqe_shift = mlx5_ilog2(wqe_size); in mlx5_calc_rwq_size()
958 rwq->rq.max_post = 1 << mlx5_ilog2(wq_size / wqe_size); in mlx5_calc_rwq_size()
959 scat_spc = wqe_size - in mlx5_calc_rwq_size()
960 ((rwq->wq_sig) ? sizeof(struct mlx5_rwqe_sig) : 0); in mlx5_calc_rwq_size()
961 rwq->rq.max_gs = scat_spc / sizeof(struct mlx5_wqe_data_seg); in mlx5_calc_rwq_size()
972 FILE *fp = ctx->dbg_fp; in mlx5_calc_rq_size()
974 if (!attr->cap.max_recv_wr) in mlx5_calc_rq_size()
977 if (attr->cap.max_recv_wr > ctx->max_recv_wr) { in mlx5_calc_rq_size()
979 return -EINVAL; in mlx5_calc_rq_size()
983 if (wqe_size < 0 || wqe_size > ctx->max_rq_desc_sz) { in mlx5_calc_rq_size()
985 return -EINVAL; in mlx5_calc_rq_size()
988 wq_size = mlx5_round_up_power_of_two(attr->cap.max_recv_wr) * wqe_size; in mlx5_calc_rq_size()
991 qp->rq.wqe_cnt = wq_size / wqe_size; in mlx5_calc_rq_size()
992 qp->rq.wqe_shift = mlx5_ilog2(wqe_size); in mlx5_calc_rq_size()
993 qp->rq.max_post = 1 << mlx5_ilog2(wq_size / wqe_size); in mlx5_calc_rq_size()
994 scat_spc = wqe_size - in mlx5_calc_rq_size()
995 (qp->wq_sig ? sizeof(struct mlx5_rwqe_sig) : 0); in mlx5_calc_rq_size()
996 qp->rq.max_gs = scat_spc / sizeof(struct mlx5_wqe_data_seg); in mlx5_calc_rq_size()
998 qp->rq.wqe_cnt = 0; in mlx5_calc_rq_size()
999 qp->rq.wqe_shift = 0; in mlx5_calc_rq_size()
1000 qp->rq.max_post = 0; in mlx5_calc_rq_size()
1001 qp->rq.max_gs = 0; in mlx5_calc_rq_size()
1024 qp->sq.offset = ret; in mlx5_calc_wq_size()
1025 qp->rq.offset = 0; in mlx5_calc_wq_size()
1035 qp->bf = &ctx->bfs[uuar_index]; in map_uuar()
1059 if (qp->sq.wqe_cnt) { in mlx5_alloc_qp_buf()
1060 qp->sq.wrid = malloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid)); in mlx5_alloc_qp_buf()
1061 if (!qp->sq.wrid) { in mlx5_alloc_qp_buf()
1063 err = -1; in mlx5_alloc_qp_buf()
1067 qp->sq.wr_data = malloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data)); in mlx5_alloc_qp_buf()
1068 if (!qp->sq.wr_data) { in mlx5_alloc_qp_buf()
1070 err = -1; in mlx5_alloc_qp_buf()
1075 qp->sq.wqe_head = malloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head)); in mlx5_alloc_qp_buf()
1076 if (!qp->sq.wqe_head) { in mlx5_alloc_qp_buf()
1078 err = -1; in mlx5_alloc_qp_buf()
1082 if (qp->rq.wqe_cnt) { in mlx5_alloc_qp_buf()
1083 qp->rq.wrid = malloc(qp->rq.wqe_cnt * sizeof(uint64_t)); in mlx5_alloc_qp_buf()
1084 if (!qp->rq.wrid) { in mlx5_alloc_qp_buf()
1086 err = -1; in mlx5_alloc_qp_buf()
1092 qp_huge_key = qptype2key(qp->ibv_qp->qp_type); in mlx5_alloc_qp_buf()
1099 err = mlx5_alloc_prefered_buf(to_mctx(context), &qp->buf, in mlx5_alloc_qp_buf()
1100 align(qp->buf_size, to_mdev in mlx5_alloc_qp_buf()
1101 (context->device)->page_size), in mlx5_alloc_qp_buf()
1102 to_mdev(context->device)->page_size, in mlx5_alloc_qp_buf()
1107 err = -ENOMEM; in mlx5_alloc_qp_buf()
1111 memset(qp->buf.buf, 0, qp->buf_size); in mlx5_alloc_qp_buf()
1113 if (attr->qp_type == IBV_QPT_RAW_PACKET) { in mlx5_alloc_qp_buf()
1114 size_t aligned_sq_buf_size = align(qp->sq_buf_size, in mlx5_alloc_qp_buf()
1115 to_mdev(context->device)->page_size); in mlx5_alloc_qp_buf()
1117 err = mlx5_alloc_prefered_buf(to_mctx(context), &qp->sq_buf, in mlx5_alloc_qp_buf()
1119 to_mdev(context->device)->page_size, in mlx5_alloc_qp_buf()
1123 err = -ENOMEM; in mlx5_alloc_qp_buf()
1127 memset(qp->sq_buf.buf, 0, aligned_sq_buf_size); in mlx5_alloc_qp_buf()
1132 mlx5_free_actual_buf(to_mctx(qp->verbs_qp.qp.context), &qp->buf); in mlx5_alloc_qp_buf()
1134 if (qp->rq.wrid) in mlx5_alloc_qp_buf()
1135 free(qp->rq.wrid); in mlx5_alloc_qp_buf()
1137 if (qp->sq.wqe_head) in mlx5_alloc_qp_buf()
1138 free(qp->sq.wqe_head); in mlx5_alloc_qp_buf()
1140 if (qp->sq.wr_data) in mlx5_alloc_qp_buf()
1141 free(qp->sq.wr_data); in mlx5_alloc_qp_buf()
1142 if (qp->sq.wrid) in mlx5_alloc_qp_buf()
1143 free(qp->sq.wrid); in mlx5_alloc_qp_buf()
1150 struct mlx5_context *ctx = to_mctx(qp->ibv_qp->context); in mlx5_free_qp_buf()
1152 mlx5_free_actual_buf(ctx, &qp->buf); in mlx5_free_qp_buf()
1154 if (qp->sq_buf.buf) in mlx5_free_qp_buf()
1155 mlx5_free_actual_buf(ctx, &qp->sq_buf); in mlx5_free_qp_buf()
1157 if (qp->rq.wrid) in mlx5_free_qp_buf()
1158 free(qp->rq.wrid); in mlx5_free_qp_buf()
1160 if (qp->sq.wqe_head) in mlx5_free_qp_buf()
1161 free(qp->sq.wqe_head); in mlx5_free_qp_buf()
1163 if (qp->sq.wrid) in mlx5_free_qp_buf()
1164 free(qp->sq.wrid); in mlx5_free_qp_buf()
1166 if (qp->sq.wr_data) in mlx5_free_qp_buf()
1167 free(qp->sq.wr_data); in mlx5_free_qp_buf()
1178 if (attr->rx_hash_conf.rx_hash_key_len > sizeof(cmd_ex_rss.rx_hash_key)) { in mlx5_cmd_create_rss_qp()
1183 cmd_ex_rss.rx_hash_fields_mask = attr->rx_hash_conf.rx_hash_fields_mask; in mlx5_cmd_create_rss_qp()
1184 cmd_ex_rss.rx_hash_function = attr->rx_hash_conf.rx_hash_function; in mlx5_cmd_create_rss_qp()
1185 cmd_ex_rss.rx_key_len = attr->rx_hash_conf.rx_hash_key_len; in mlx5_cmd_create_rss_qp()
1186 memcpy(cmd_ex_rss.rx_hash_key, attr->rx_hash_conf.rx_hash_key, in mlx5_cmd_create_rss_qp()
1187 attr->rx_hash_conf.rx_hash_key_len); in mlx5_cmd_create_rss_qp()
1189 ret = ibv_cmd_create_qp_ex2(context, &qp->verbs_qp, in mlx5_cmd_create_rss_qp()
1190 sizeof(qp->verbs_qp), attr, in mlx5_cmd_create_rss_qp()
1197 qp->rss_qp = 1; in mlx5_cmd_create_rss_qp()
1203 struct mlx5_create_qp *cmd, in mlx5_cmd_create_qp_ex() argument
1211 memcpy(&cmd_ex.ibv_cmd.base, &cmd->ibv_cmd.user_handle, in mlx5_cmd_create_qp_ex()
1212 offsetof(typeof(cmd->ibv_cmd), is_srq) + in mlx5_cmd_create_qp_ex()
1213 sizeof(cmd->ibv_cmd.is_srq) - in mlx5_cmd_create_qp_ex()
1214 offsetof(typeof(cmd->ibv_cmd), user_handle)); in mlx5_cmd_create_qp_ex()
1216 memcpy(&cmd_ex.drv_ex, &cmd->buf_addr, in mlx5_cmd_create_qp_ex()
1217 offsetof(typeof(*cmd), sq_buf_addr) + in mlx5_cmd_create_qp_ex()
1218 sizeof(cmd->sq_buf_addr) - sizeof(cmd->ibv_cmd)); in mlx5_cmd_create_qp_ex()
1220 ret = ibv_cmd_create_qp_ex2(context, &qp->verbs_qp, in mlx5_cmd_create_qp_ex()
1221 sizeof(qp->verbs_qp), attr, in mlx5_cmd_create_qp_ex()
1223 sizeof(cmd_ex), &resp->ibv_resp, in mlx5_cmd_create_qp_ex()
1224 sizeof(resp->ibv_resp), sizeof(*resp)); in mlx5_cmd_create_qp_ex()
1248 struct mlx5_create_qp cmd; in create_qp() local
1257 FILE *fp = ctx->dbg_fp; in create_qp()
1259 if (attr->comp_mask & ~MLX5_CREATE_QP_SUP_COMP_MASK) in create_qp()
1262 if ((attr->comp_mask & IBV_QP_INIT_ATTR_MAX_TSO_HEADER) && in create_qp()
1263 (attr->qp_type != IBV_QPT_RAW_PACKET)) in create_qp()
1271 ibqp = (struct ibv_qp *)&qp->verbs_qp; in create_qp()
1272 qp->ibv_qp = ibqp; in create_qp()
1274 memset(&cmd, 0, sizeof(cmd)); in create_qp()
1278 if (attr->comp_mask & IBV_QP_INIT_ATTR_RX_HASH) { in create_qp()
1286 qp->wq_sig = qp_sig_enabled(); in create_qp()
1287 if (qp->wq_sig) in create_qp()
1288 cmd.flags |= MLX5_QP_FLAG_SIGNATURE; in create_qp()
1291 cmd.flags |= MLX5_QP_FLAG_SCATTER_CQE; in create_qp()
1295 errno = -ret; in create_qp()
1299 if (attr->qp_type == IBV_QPT_RAW_PACKET) { in create_qp()
1300 qp->buf_size = qp->sq.offset; in create_qp()
1301 qp->sq_buf_size = ret - qp->buf_size; in create_qp()
1302 qp->sq.offset = 0; in create_qp()
1304 qp->buf_size = ret; in create_qp()
1305 qp->sq_buf_size = 0; in create_qp()
1313 if (attr->qp_type == IBV_QPT_RAW_PACKET) { in create_qp()
1314 qp->sq_start = qp->sq_buf.buf; in create_qp()
1315 qp->sq.qend = qp->sq_buf.buf + in create_qp()
1316 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in create_qp()
1318 qp->sq_start = qp->buf.buf + qp->sq.offset; in create_qp()
1319 qp->sq.qend = qp->buf.buf + qp->sq.offset + in create_qp()
1320 (qp->sq.wqe_cnt << qp->sq.wqe_shift); in create_qp()
1325 if (mlx5_spinlock_init(&qp->sq.lock)) in create_qp()
1328 if (mlx5_spinlock_init(&qp->rq.lock)) in create_qp()
1331 qp->db = mlx5_alloc_dbrec(ctx); in create_qp()
1332 if (!qp->db) { in create_qp()
1337 qp->db[MLX5_RCV_DBR] = 0; in create_qp()
1338 qp->db[MLX5_SND_DBR] = 0; in create_qp()
1340 cmd.buf_addr = (uintptr_t) qp->buf.buf; in create_qp()
1341 cmd.sq_buf_addr = (attr->qp_type == IBV_QPT_RAW_PACKET) ? in create_qp()
1342 (uintptr_t) qp->sq_buf.buf : 0; in create_qp()
1343 cmd.db_addr = (uintptr_t) qp->db; in create_qp()
1344 cmd.sq_wqe_count = qp->sq.wqe_cnt; in create_qp()
1345 cmd.rq_wqe_count = qp->rq.wqe_cnt; in create_qp()
1346 cmd.rq_wqe_shift = qp->rq.wqe_shift; in create_qp()
1348 if (ctx->atomic_cap == IBV_ATOMIC_HCA) in create_qp()
1349 qp->atomics_enabled = 1; in create_qp()
1351 if (!ctx->cqe_version) { in create_qp()
1352 cmd.uidx = 0xffffff; in create_qp()
1353 pthread_mutex_lock(&ctx->qp_table_mutex); in create_qp()
1354 } else if (!is_xrc_tgt(attr->qp_type)) { in create_qp()
1361 cmd.uidx = usr_idx; in create_qp()
1364 if (attr->comp_mask & MLX5_CREATE_QP_EX2_COMP_MASK) in create_qp()
1365 ret = mlx5_cmd_create_qp_ex(context, attr, &cmd, qp, &resp_ex); in create_qp()
1367 ret = ibv_cmd_create_qp_ex(context, &qp->verbs_qp, sizeof(qp->verbs_qp), in create_qp()
1368 attr, &cmd.ibv_cmd, sizeof(cmd), in create_qp()
1375 uuar_index = (attr->comp_mask & MLX5_CREATE_QP_EX2_COMP_MASK) ? in create_qp()
1377 if (!ctx->cqe_version) { in create_qp()
1378 if (qp->sq.wqe_cnt || qp->rq.wqe_cnt) { in create_qp()
1379 ret = mlx5_store_qp(ctx, ibqp->qp_num, qp); in create_qp()
1386 pthread_mutex_unlock(&ctx->qp_table_mutex); in create_qp()
1391 qp->rq.max_post = qp->rq.wqe_cnt; in create_qp()
1392 if (attr->sq_sig_all) in create_qp()
1393 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; in create_qp()
1395 qp->sq_signal_bits = 0; in create_qp()
1397 attr->cap.max_send_wr = qp->sq.max_post; in create_qp()
1398 attr->cap.max_recv_wr = qp->rq.max_post; in create_qp()
1399 attr->cap.max_recv_sge = qp->rq.max_gs; in create_qp()
1401 qp->rsc.type = MLX5_RSC_TYPE_QP; in create_qp()
1402 qp->rsc.rsn = (ctx->cqe_version && !is_xrc_tgt(attr->qp_type)) ? in create_qp()
1403 usr_idx : ibqp->qp_num; in create_qp()
1411 if (!ctx->cqe_version) in create_qp()
1412 pthread_mutex_unlock(&to_mctx(context)->qp_table_mutex); in create_qp()
1413 else if (!is_xrc_tgt(attr->qp_type)) in create_qp()
1417 mlx5_free_db(to_mctx(context), qp->db); in create_qp()
1420 mlx5_spinlock_destroy(&qp->rq.lock); in create_qp()
1423 mlx5_spinlock_destroy(&qp->sq.lock); in create_qp()
1444 qp = create_qp(pd->context, &attrx); in mlx5_create_qp()
1453 struct mlx5_cq *send_cq = to_mcq(qp->send_cq); in mlx5_lock_cqs()
1454 struct mlx5_cq *recv_cq = to_mcq(qp->recv_cq); in mlx5_lock_cqs()
1458 mlx5_spin_lock(&send_cq->lock); in mlx5_lock_cqs()
1459 } else if (send_cq->cqn < recv_cq->cqn) { in mlx5_lock_cqs()
1460 mlx5_spin_lock(&send_cq->lock); in mlx5_lock_cqs()
1461 mlx5_spin_lock(&recv_cq->lock); in mlx5_lock_cqs()
1463 mlx5_spin_lock(&recv_cq->lock); in mlx5_lock_cqs()
1464 mlx5_spin_lock(&send_cq->lock); in mlx5_lock_cqs()
1467 mlx5_spin_lock(&send_cq->lock); in mlx5_lock_cqs()
1469 mlx5_spin_lock(&recv_cq->lock); in mlx5_lock_cqs()
1475 struct mlx5_cq *send_cq = to_mcq(qp->send_cq); in mlx5_unlock_cqs()
1476 struct mlx5_cq *recv_cq = to_mcq(qp->recv_cq); in mlx5_unlock_cqs()
1480 mlx5_spin_unlock(&send_cq->lock); in mlx5_unlock_cqs()
1481 } else if (send_cq->cqn < recv_cq->cqn) { in mlx5_unlock_cqs()
1482 mlx5_spin_unlock(&recv_cq->lock); in mlx5_unlock_cqs()
1483 mlx5_spin_unlock(&send_cq->lock); in mlx5_unlock_cqs()
1485 mlx5_spin_unlock(&send_cq->lock); in mlx5_unlock_cqs()
1486 mlx5_spin_unlock(&recv_cq->lock); in mlx5_unlock_cqs()
1489 mlx5_spin_unlock(&send_cq->lock); in mlx5_unlock_cqs()
1491 mlx5_spin_unlock(&recv_cq->lock); in mlx5_unlock_cqs()
1498 struct mlx5_context *ctx = to_mctx(ibqp->context); in mlx5_destroy_qp()
1501 if (qp->rss_qp) { in mlx5_destroy_qp()
1508 if (!ctx->cqe_version) in mlx5_destroy_qp()
1509 pthread_mutex_lock(&ctx->qp_table_mutex); in mlx5_destroy_qp()
1513 if (!ctx->cqe_version) in mlx5_destroy_qp()
1514 pthread_mutex_unlock(&ctx->qp_table_mutex); in mlx5_destroy_qp()
1520 __mlx5_cq_clean(to_mcq(ibqp->recv_cq), qp->rsc.rsn, in mlx5_destroy_qp()
1521 ibqp->srq ? to_msrq(ibqp->srq) : NULL); in mlx5_destroy_qp()
1522 if (ibqp->send_cq != ibqp->recv_cq) in mlx5_destroy_qp()
1523 __mlx5_cq_clean(to_mcq(ibqp->send_cq), qp->rsc.rsn, NULL); in mlx5_destroy_qp()
1525 if (!ctx->cqe_version) { in mlx5_destroy_qp()
1526 if (qp->sq.wqe_cnt || qp->rq.wqe_cnt) in mlx5_destroy_qp()
1527 mlx5_clear_qp(ctx, ibqp->qp_num); in mlx5_destroy_qp()
1531 if (!ctx->cqe_version) in mlx5_destroy_qp()
1532 pthread_mutex_unlock(&ctx->qp_table_mutex); in mlx5_destroy_qp()
1533 else if (!is_xrc_tgt(ibqp->qp_type)) in mlx5_destroy_qp()
1534 mlx5_clear_uidx(ctx, qp->rsc.rsn); in mlx5_destroy_qp()
1536 mlx5_free_db(ctx, qp->db); in mlx5_destroy_qp()
1537 mlx5_spinlock_destroy(&qp->rq.lock); in mlx5_destroy_qp()
1538 mlx5_spinlock_destroy(&qp->sq.lock); in mlx5_destroy_qp()
1549 struct ibv_query_qp cmd; in mlx5_query_qp() local
1553 if (qp->rss_qp) in mlx5_query_qp()
1556 ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof(cmd)); in mlx5_query_qp()
1560 init_attr->cap.max_send_wr = qp->sq.max_post; in mlx5_query_qp()
1561 init_attr->cap.max_send_sge = qp->sq.max_gs; in mlx5_query_qp()
1562 init_attr->cap.max_inline_data = qp->max_inline_data; in mlx5_query_qp()
1564 attr->cap = init_attr->cap; in mlx5_query_qp()
1576 struct ibv_modify_qp cmd = {}; in mlx5_modify_qp() local
1580 struct mlx5_context *context = to_mctx(qp->context); in mlx5_modify_qp()
1582 uint32_t *db; in mlx5_modify_qp() local
1584 if (mqp->rss_qp) in mlx5_modify_qp()
1588 switch (qp->qp_type) { in mlx5_modify_qp()
1590 if (context->cached_link_layer[attr->port_num - 1] == in mlx5_modify_qp()
1592 if (context->cached_device_cap_flags & in mlx5_modify_qp()
1594 mqp->qp_cap_cache |= in mlx5_modify_qp()
1599 context->cached_tso_caps.supported_qpts, in mlx5_modify_qp()
1601 mqp->max_tso = in mlx5_modify_qp()
1602 context->cached_tso_caps.max_tso; in mlx5_modify_qp()
1618 &cmd, sizeof(cmd)); in mlx5_modify_qp()
1622 attr->qp_state == IBV_QPS_RESET) { in mlx5_modify_qp()
1623 if (qp->recv_cq) { in mlx5_modify_qp()
1624 mlx5_cq_clean(to_mcq(qp->recv_cq), mqp->rsc.rsn, in mlx5_modify_qp()
1625 qp->srq ? to_msrq(qp->srq) : NULL); in mlx5_modify_qp()
1627 if (qp->send_cq != qp->recv_cq && qp->send_cq) in mlx5_modify_qp()
1628 mlx5_cq_clean(to_mcq(qp->send_cq), in mlx5_modify_qp()
1629 to_mqp(qp)->rsc.rsn, NULL); in mlx5_modify_qp()
1632 db = mqp->db; in mlx5_modify_qp()
1633 db[MLX5_RCV_DBR] = 0; in mlx5_modify_qp()
1634 db[MLX5_SND_DBR] = 0; in mlx5_modify_qp()
1647 attr->qp_state == IBV_QPS_RTR && in mlx5_modify_qp()
1648 qp->qp_type == IBV_QPT_RAW_PACKET) { in mlx5_modify_qp()
1649 mlx5_spin_lock(&mqp->rq.lock); in mlx5_modify_qp()
1650 mqp->db[MLX5_RCV_DBR] = htobe32(mqp->rq.head & 0xffff); in mlx5_modify_qp()
1651 mlx5_spin_unlock(&mqp->rq.lock); in mlx5_modify_qp()
1661 struct mlx5_context *ctx = to_mctx(pd->context); in mlx5_create_ah()
1669 if (attr->port_num < 1 || attr->port_num > ctx->num_ports) in mlx5_create_ah()
1672 if (ctx->cached_link_layer[attr->port_num - 1]) { in mlx5_create_ah()
1673 is_eth = ctx->cached_link_layer[attr->port_num - 1] == in mlx5_create_ah()
1676 if (ibv_query_port(pd->context, attr->port_num, &port_attr)) in mlx5_create_ah()
1682 if (unlikely((!attr->is_global) && is_eth)) { in mlx5_create_ah()
1692 if (ibv_query_gid_type(pd->context, attr->port_num, in mlx5_create_ah()
1693 attr->grh.sgid_index, &gid_type)) in mlx5_create_ah()
1697 ah->av.rlid = htobe16(rand() % (RROCE_UDP_SPORT_MAX + 1 in mlx5_create_ah()
1698 - RROCE_UDP_SPORT_MIN) in mlx5_create_ah()
1705 ah->av.fl_mlid = attr->src_path_bits & 0x7f; in mlx5_create_ah()
1706 ah->av.rlid = htobe16(attr->dlid); in mlx5_create_ah()
1709 ah->av.stat_rate_sl = (attr->static_rate << 4) | attr->sl; in mlx5_create_ah()
1710 if (attr->is_global) { in mlx5_create_ah()
1711 ah->av.tclass = attr->grh.traffic_class; in mlx5_create_ah()
1712 ah->av.hop_limit = attr->grh.hop_limit; in mlx5_create_ah()
1714 ((attr->grh.sgid_index & 0xff) << 20) | in mlx5_create_ah()
1715 (attr->grh.flow_label & 0xfffff)); in mlx5_create_ah()
1716 ah->av.grh_gid_fl = tmp; in mlx5_create_ah()
1717 memcpy(ah->av.rgid, attr->grh.dgid.raw, 16); in mlx5_create_ah()
1721 if (ctx->cmds_supp_uhw & MLX5_USER_CMDS_SUPP_UHW_CREATE_AH) { in mlx5_create_ah()
1724 if (ibv_cmd_create_ah(pd, &ah->ibv_ah, attr, &resp.ibv_resp, sizeof(resp))) in mlx5_create_ah()
1727 ah->kern_ah = true; in mlx5_create_ah()
1728 memcpy(ah->av.rmac, resp.dmac, ETHERNET_LL_SIZE); in mlx5_create_ah()
1732 if (ibv_resolve_eth_l2_from_gid(pd->context, attr, in mlx5_create_ah()
1733 ah->av.rmac, &vid)) in mlx5_create_ah()
1738 return &ah->ibv_ah; in mlx5_create_ah()
1749 if (mah->kern_ah) { in mlx5_destroy_ah()
1779 *srq_num = msrq->srqn; in mlx5_get_srq_num()
1790 struct ibv_open_xrcd cmd = {}; in mlx5_open_xrcd() local
1798 &cmd, sizeof(cmd), &resp, sizeof(resp)); in mlx5_open_xrcd()
1804 return &xrcd->xrcd; in mlx5_open_xrcd()
1824 struct mlx5_create_srq_ex cmd; in mlx5_create_xrc_srq() local
1831 FILE *fp = ctx->dbg_fp; in mlx5_create_xrc_srq()
1837 ibsrq = (struct ibv_srq *)&msrq->vsrq; in mlx5_create_xrc_srq()
1839 memset(&cmd, 0, sizeof(cmd)); in mlx5_create_xrc_srq()
1842 if (mlx5_spinlock_init(&msrq->lock)) { in mlx5_create_xrc_srq()
1843 fprintf(stderr, "%s-%d:\n", __func__, __LINE__); in mlx5_create_xrc_srq()
1847 if (attr->attr.max_wr > ctx->max_srq_recv_wr) { in mlx5_create_xrc_srq()
1848 fprintf(stderr, "%s-%d:max_wr %d, max_srq_recv_wr %d\n", in mlx5_create_xrc_srq()
1849 __func__, __LINE__, attr->attr.max_wr, in mlx5_create_xrc_srq()
1850 ctx->max_srq_recv_wr); in mlx5_create_xrc_srq()
1860 max_sge = ctx->max_recv_wr / sizeof(struct mlx5_wqe_data_seg); in mlx5_create_xrc_srq()
1861 if (attr->attr.max_sge > max_sge) { in mlx5_create_xrc_srq()
1862 fprintf(stderr, "%s-%d:max_wr %d, max_srq_recv_wr %d\n", in mlx5_create_xrc_srq()
1863 __func__, __LINE__, attr->attr.max_wr, in mlx5_create_xrc_srq()
1864 ctx->max_srq_recv_wr); in mlx5_create_xrc_srq()
1869 msrq->max = align_queue_size(attr->attr.max_wr + 1); in mlx5_create_xrc_srq()
1870 msrq->max_gs = attr->attr.max_sge; in mlx5_create_xrc_srq()
1871 msrq->counter = 0; in mlx5_create_xrc_srq()
1874 fprintf(stderr, "%s-%d:\n", __func__, __LINE__); in mlx5_create_xrc_srq()
1878 msrq->db = mlx5_alloc_dbrec(ctx); in mlx5_create_xrc_srq()
1879 if (!msrq->db) { in mlx5_create_xrc_srq()
1880 fprintf(stderr, "%s-%d:\n", __func__, __LINE__); in mlx5_create_xrc_srq()
1884 *msrq->db = 0; in mlx5_create_xrc_srq()
1886 cmd.buf_addr = (uintptr_t)msrq->buf.buf; in mlx5_create_xrc_srq()
1887 cmd.db_addr = (uintptr_t)msrq->db; in mlx5_create_xrc_srq()
1888 msrq->wq_sig = srq_sig_enabled(); in mlx5_create_xrc_srq()
1889 if (msrq->wq_sig) in mlx5_create_xrc_srq()
1890 cmd.flags = MLX5_SRQ_FLAG_SIGNATURE; in mlx5_create_xrc_srq()
1892 attr->attr.max_sge = msrq->max_gs; in mlx5_create_xrc_srq()
1893 if (ctx->cqe_version) { in mlx5_create_xrc_srq()
1899 cmd.uidx = uidx; in mlx5_create_xrc_srq()
1901 cmd.uidx = 0xffffff; in mlx5_create_xrc_srq()
1902 pthread_mutex_lock(&ctx->srq_table_mutex); in mlx5_create_xrc_srq()
1905 err = ibv_cmd_create_srq_ex(context, &msrq->vsrq, sizeof(msrq->vsrq), in mlx5_create_xrc_srq()
1906 attr, &cmd.ibv_cmd, sizeof(cmd), in mlx5_create_xrc_srq()
1911 if (!ctx->cqe_version) { in mlx5_create_xrc_srq()
1916 pthread_mutex_unlock(&ctx->srq_table_mutex); in mlx5_create_xrc_srq()
1919 msrq->srqn = resp.srqn; in mlx5_create_xrc_srq()
1920 msrq->rsc.type = MLX5_RSC_TYPE_XSRQ; in mlx5_create_xrc_srq()
1921 msrq->rsc.rsn = ctx->cqe_version ? cmd.uidx : resp.srqn; in mlx5_create_xrc_srq()
1929 if (ctx->cqe_version) in mlx5_create_xrc_srq()
1930 mlx5_clear_uidx(ctx, cmd.uidx); in mlx5_create_xrc_srq()
1932 pthread_mutex_unlock(&ctx->srq_table_mutex); in mlx5_create_xrc_srq()
1935 mlx5_free_db(ctx, msrq->db); in mlx5_create_xrc_srq()
1938 free(msrq->wrid); in mlx5_create_xrc_srq()
1939 mlx5_free_buf(&msrq->buf); in mlx5_create_xrc_srq()
1942 mlx5_spinlock_destroy(&msrq->lock); in mlx5_create_xrc_srq()
1953 if (!(attr->comp_mask & IBV_SRQ_INIT_ATTR_TYPE) || in mlx5_create_srq_ex()
1954 (attr->srq_type == IBV_SRQT_BASIC)) in mlx5_create_srq_ex()
1955 return mlx5_create_srq(attr->pd, in mlx5_create_srq_ex()
1957 else if (attr->srq_type == IBV_SRQT_XRC) in mlx5_create_srq_ex()
1970 struct mlx5_query_device_ex cmd; in mlx5_query_device_ex() local
1977 int cmd_supp_uhw = mctx->cmds_supp_uhw & in mlx5_query_device_ex()
1980 memset(&cmd, 0, sizeof(cmd)); in mlx5_query_device_ex()
1984 &cmd.ibv_cmd, sizeof(cmd.ibv_cmd), sizeof(cmd), in mlx5_query_device_ex()
1990 attr->tso_caps = resp.tso_caps; in mlx5_query_device_ex()
1991 attr->rss_caps.rx_hash_fields_mask = resp.rss_caps.rx_hash_fields_mask; in mlx5_query_device_ex()
1992 attr->rss_caps.rx_hash_function = resp.rss_caps.rx_hash_function; in mlx5_query_device_ex()
1993 attr->packet_pacing_caps = resp.packet_pacing_caps.caps; in mlx5_query_device_ex()
1996 mctx->vendor_cap_flags |= MLX5_VENDOR_CAP_FLAGS_MPW; in mlx5_query_device_ex()
1998 mctx->cqe_comp_caps = resp.cqe_comp_caps; in mlx5_query_device_ex()
2003 a = &attr->orig_attr; in mlx5_query_device_ex()
2004 snprintf(a->fw_ver, sizeof(a->fw_ver), "%d.%d.%04d", in mlx5_query_device_ex()
2025 mlx5_free_actual_buf(ctx, &rwq->buf); in mlx5_free_rwq_buf()
2026 free(rwq->rq.wrid); in mlx5_free_rwq_buf()
2036 rwq->rq.wrid = malloc(rwq->rq.wqe_cnt * sizeof(uint64_t)); in mlx5_alloc_rwq_buf()
2037 if (!rwq->rq.wrid) { in mlx5_alloc_rwq_buf()
2039 return -1; in mlx5_alloc_rwq_buf()
2042 err = mlx5_alloc_prefered_buf(to_mctx(context), &rwq->buf, in mlx5_alloc_rwq_buf()
2043 align(rwq->buf_size, to_mdev in mlx5_alloc_rwq_buf()
2044 (context->device)->page_size), in mlx5_alloc_rwq_buf()
2045 to_mdev(context->device)->page_size, in mlx5_alloc_rwq_buf()
2050 free(rwq->rq.wrid); in mlx5_alloc_rwq_buf()
2052 return -1; in mlx5_alloc_rwq_buf()
2061 struct mlx5_create_wq cmd; in mlx5_create_wq() local
2068 FILE *fp = ctx->dbg_fp; in mlx5_create_wq()
2070 if (attr->wq_type != IBV_WQT_RQ) in mlx5_create_wq()
2073 memset(&cmd, 0, sizeof(cmd)); in mlx5_create_wq()
2080 ret = ibv_init_wq(&rwq->wq); in mlx5_create_wq()
2084 rwq->wq_sig = rwq_sig_enabled(context); in mlx5_create_wq()
2085 if (rwq->wq_sig) in mlx5_create_wq()
2086 cmd.drv.flags = MLX5_RWQ_FLAG_SIGNATURE; in mlx5_create_wq()
2090 errno = -ret; in mlx5_create_wq()
2094 rwq->buf_size = ret; in mlx5_create_wq()
2100 if (mlx5_spinlock_init(&rwq->rq.lock)) in mlx5_create_wq()
2103 rwq->db = mlx5_alloc_dbrec(ctx); in mlx5_create_wq()
2104 if (!rwq->db) in mlx5_create_wq()
2107 rwq->db[MLX5_RCV_DBR] = 0; in mlx5_create_wq()
2108 rwq->db[MLX5_SND_DBR] = 0; in mlx5_create_wq()
2109 rwq->pbuff = rwq->buf.buf + rwq->rq.offset; in mlx5_create_wq()
2110 rwq->recv_db = &rwq->db[MLX5_RCV_DBR]; in mlx5_create_wq()
2111 cmd.drv.buf_addr = (uintptr_t)rwq->buf.buf; in mlx5_create_wq()
2112 cmd.drv.db_addr = (uintptr_t)rwq->db; in mlx5_create_wq()
2113 cmd.drv.rq_wqe_count = rwq->rq.wqe_cnt; in mlx5_create_wq()
2114 cmd.drv.rq_wqe_shift = rwq->rq.wqe_shift; in mlx5_create_wq()
2121 cmd.drv.user_index = usr_idx; in mlx5_create_wq()
2122 err = ibv_cmd_create_wq(context, attr, &rwq->wq, &cmd.ibv_cmd, in mlx5_create_wq()
2123 sizeof(cmd.ibv_cmd), in mlx5_create_wq()
2124 sizeof(cmd), in mlx5_create_wq()
2130 rwq->rsc.type = MLX5_RSC_TYPE_RWQ; in mlx5_create_wq()
2131 rwq->rsc.rsn = cmd.drv.user_index; in mlx5_create_wq()
2133 rwq->wq.post_recv = mlx5_post_wq_recv; in mlx5_create_wq()
2134 return &rwq->wq; in mlx5_create_wq()
2137 mlx5_clear_uidx(ctx, cmd.drv.user_index); in mlx5_create_wq()
2139 mlx5_free_db(to_mctx(context), rwq->db); in mlx5_create_wq()
2141 mlx5_spinlock_destroy(&rwq->rq.lock); in mlx5_create_wq()
2145 ibv_cleanup_wq(&rwq->wq); in mlx5_create_wq()
2153 struct mlx5_modify_wq cmd = {}; in mlx5_modify_wq() local
2156 if ((attr->attr_mask & IBV_WQ_ATTR_STATE) && in mlx5_modify_wq()
2157 attr->wq_state == IBV_WQS_RDY) { in mlx5_modify_wq()
2158 if ((attr->attr_mask & IBV_WQ_ATTR_CURR_STATE) && in mlx5_modify_wq()
2159 attr->curr_wq_state != wq->state) in mlx5_modify_wq()
2160 return -EINVAL; in mlx5_modify_wq()
2162 if (wq->state == IBV_WQS_RESET) { in mlx5_modify_wq()
2163 mlx5_spin_lock(&to_mcq(wq->cq)->lock); in mlx5_modify_wq()
2164 __mlx5_cq_clean(to_mcq(wq->cq), in mlx5_modify_wq()
2165 rwq->rsc.rsn, NULL); in mlx5_modify_wq()
2166 mlx5_spin_unlock(&to_mcq(wq->cq)->lock); in mlx5_modify_wq()
2168 rwq->db[MLX5_RCV_DBR] = 0; in mlx5_modify_wq()
2169 rwq->db[MLX5_SND_DBR] = 0; in mlx5_modify_wq()
2173 return ibv_cmd_modify_wq(wq, attr, &cmd.ibv_cmd, sizeof(cmd.ibv_cmd), sizeof(cmd)); in mlx5_modify_wq()
2185 mlx5_spin_lock(&to_mcq(wq->cq)->lock); in mlx5_destroy_wq()
2186 __mlx5_cq_clean(to_mcq(wq->cq), rwq->rsc.rsn, NULL); in mlx5_destroy_wq()
2187 mlx5_spin_unlock(&to_mcq(wq->cq)->lock); in mlx5_destroy_wq()
2188 mlx5_clear_uidx(to_mctx(wq->context), rwq->rsc.rsn); in mlx5_destroy_wq()
2189 mlx5_free_db(to_mctx(wq->context), rwq->db); in mlx5_destroy_wq()
2190 mlx5_spinlock_destroy(&rwq->rq.lock); in mlx5_destroy_wq()
2191 mlx5_free_rwq_buf(rwq, wq->context); in mlx5_destroy_wq()
2192 ibv_cleanup_wq(&rwq->wq); in mlx5_destroy_wq()
2201 struct ibv_create_rwq_ind_table *cmd; in mlx5_create_rwq_ind_table() local
2209 num_tbl_entries = 1 << init_attr->log_ind_tbl_size; in mlx5_create_rwq_ind_table()
2214 cmd_size = required_tbl_size + sizeof(*cmd); in mlx5_create_rwq_ind_table()
2215 cmd = calloc(1, cmd_size); in mlx5_create_rwq_ind_table()
2216 if (!cmd) in mlx5_create_rwq_ind_table()
2224 err = ibv_cmd_create_rwq_ind_table(context, init_attr, ind_table, cmd, in mlx5_create_rwq_ind_table()
2230 free(cmd); in mlx5_create_rwq_ind_table()
2236 free(cmd); in mlx5_create_rwq_ind_table()