Lines Matching +full:x +full:- +full:rc
2 * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
42 *nmap = umem->nmap;
43 return umem->sg_head.sgl;
54 int rc = -1;
60 rdev = dmac_work->rdev;
61 ah_attr = dmac_work->ah_attr;
62 rc = ib_resolve_eth_dmac(&rdev->ibdev, ah_attr);
63 if (rc)
64 dev_err(rdev_to_dev(dmac_work->rdev),
65 "Failed to resolve dest mac rc = %d\n", rc);
66 atomic_set(&dmac_work->status_wait, rc << 8);
114 int rc;
116 rc = ib_copy_to_udata(udata, data, len);
117 if (rc)
119 "ucontext copy failed from %ps rc %d\n",
120 __builtin_return_address(0), rc);
122 return rc;
133 if (!rdev || !rdev->netdev)
136 netdev = rdev->netdev;
138 /* In case of active-backup bond mode, return active slave */
152 struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
156 memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, 4);
157 bnxt_qplib_get_guid(rdev->dev_addr, (u8 *)&ib_attr->sys_image_guid);
158 ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
159 ib_attr->page_size_cap = dev_attr->page_size_cap;
160 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
161 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
162 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
163 ib_attr->max_qp = dev_attr->max_qp;
164 ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
169 if (rdev->min_tx_depth == 1 &&
170 min_tx_depth < dev_attr->max_qp_wqes)
171 rdev->min_tx_depth = min_tx_depth;
172 ib_attr->device_cap_flags =
184 ib_attr->max_send_sge = dev_attr->max_qp_sges;
185 ib_attr->max_recv_sge = dev_attr->max_qp_sges;
186 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
187 ib_attr->max_cq = dev_attr->max_cq;
188 ib_attr->max_cqe = dev_attr->max_cq_wqes;
189 ib_attr->max_mr = dev_attr->max_mr;
190 ib_attr->max_pd = dev_attr->max_pd;
191 ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
192 ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
193 if (dev_attr->is_atomic) {
194 ib_attr->atomic_cap = IB_ATOMIC_GLOB;
195 ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
197 ib_attr->max_ee_rd_atom = 0;
198 ib_attr->max_res_rd_atom = 0;
199 ib_attr->max_ee_init_rd_atom = 0;
200 ib_attr->max_ee = 0;
201 ib_attr->max_rdd = 0;
202 ib_attr->max_mw = dev_attr->max_mw;
203 ib_attr->max_raw_ipv6_qp = 0;
204 ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
205 ib_attr->max_mcast_grp = 0;
206 ib_attr->max_mcast_qp_attach = 0;
207 ib_attr->max_total_mcast_qp_attach = 0;
208 ib_attr->max_ah = dev_attr->max_ah;
209 ib_attr->max_srq = dev_attr->max_srq;
210 ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
211 ib_attr->max_srq_sge = dev_attr->max_srq_sges;
213 ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
214 ib_attr->max_pkeys = 1;
215 ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
216 ib_attr->sig_prot_cap = 0;
217 ib_attr->sig_guard_cap = 0;
218 ib_attr->odp_caps.general_caps = 0;
227 dev_dbg(rdev_to_dev(rdev), "Modify device with mask 0x%x\n",
233 /* GUID should be made as READ-ONLY */
236 /* Node Desc should be made as READ-ONLY */
291 struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
294 dev_dbg(rdev_to_dev(rdev), "QUERY PORT with port_num 0x%x\n", port_num);
297 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
298 port_attr->state = bnxt_re_get_link_state(rdev);
299 if (port_attr->state == IB_PORT_ACTIVE)
300 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
301 port_attr->max_mtu = IB_MTU_4096;
302 port_attr->active_mtu = iboe_get_mtu(if_getmtu(rdev->netdev));
303 port_attr->gid_tbl_len = dev_attr->max_sgid;
304 port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
309 port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
310 port_attr->bad_pkey_cntr = 0;
311 port_attr->qkey_viol_cntr = 0;
312 port_attr->pkey_tbl_len = dev_attr->max_pkey;
313 port_attr->lid = 0;
314 port_attr->sm_lid = 0;
315 port_attr->lmc = 0;
316 port_attr->max_vl_num = 4;
317 port_attr->sm_sl = 0;
318 port_attr->subnet_timeout = 0;
319 port_attr->init_type_reply = 0;
320 rdev->espeed = rdev->en_dev->espeed;
322 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
323 __to_ib_speed_width(rdev->espeed, &active_speed,
326 port_attr->active_speed = active_speed;
327 port_attr->active_width = active_width;
336 dev_dbg(rdev_to_dev(rdev), "Modify port with mask 0x%x\n",
359 return -EINVAL;
361 immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
362 immutable->gid_tbl_len = port_attr.gid_tbl_len;
363 if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV1_CAP)
364 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
365 else if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV2_CAP)
366 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
368 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
370 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
378 sprintf(str, "%d.%d.%d.%d", rdev->dev_attr->fw_ver[0],
379 rdev->dev_attr->fw_ver[1], rdev->dev_attr->fw_ver[2],
380 rdev->dev_attr->fw_ver[3]);
387 return -EINVAL;
398 int rc = 0;
402 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
403 &rdev->qplib_res.sgid_tbl, index,
405 return rc;
411 int rc = 0;
414 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
422 return -EINVAL;
424 if (sgid_tbl && sgid_tbl->active) {
425 if (ctx->idx >= sgid_tbl->max) {
427 return -EINVAL;
429 gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
430 vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
431 ctx->refcnt--;
438 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
440 if (ctx->idx == 0 &&
442 (rdev->gsi_ctx.gsi_sqp ||
443 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)) {
446 if (!ctx->refcnt) {
447 rdev->gid_map[index] = -1;
448 ctx_tbl = sgid_tbl->ctx;
449 ctx_tbl[ctx->idx] = NULL;
454 rdev->gid_map[index] = -1;
455 if (!ctx->refcnt) {
456 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
458 if (!rc) {
460 ctx_tbl = sgid_tbl->ctx;
461 ctx_tbl[ctx->idx] = NULL;
465 "Remove GID failed rc = 0x%x\n", rc);
470 return -EINVAL;
472 return rc;
479 int rc;
484 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
485 if ((attr->ndev) && is_vlan_dev(attr->ndev))
486 vlan_id = vlan_dev_vlan_id(attr->ndev);
488 rc = bnxt_qplib_add_sgid(sgid_tbl, gid,
489 rdev->dev_addr,
491 if (rc == -EALREADY) {
493 ctx_tbl = sgid_tbl->ctx;
497 return -ENOMEM;
498 ctx->idx = tbl_idx;
499 ctx->refcnt = 1;
502 ctx_tbl[tbl_idx]->refcnt++;
506 rdev->gid_map[index] = tbl_idx;
508 } else if (rc < 0) {
509 dev_err(rdev_to_dev(rdev), "Add GID failed rc = 0x%x\n", rc);
510 return rc;
515 return -ENOMEM;
517 ctx_tbl = sgid_tbl->ctx;
518 ctx->idx = tbl_idx;
519 ctx->refcnt = 1;
522 rdev->gid_map[index] = tbl_idx;
525 return rc;
536 struct bnxt_re_legacy_fence_data *fence = &pd->fence;
537 struct ib_mr *ib_mr = &fence->mr->ib_mr;
538 struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
539 struct bnxt_re_dev *rdev = pd->rdev;
541 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
545 wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
546 wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
547 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
548 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
549 wqe->bind.zero_based = false;
550 wqe->bind.parent_l_key = ib_mr->lkey;
551 wqe->bind.va = (u64)fence->va;
552 wqe->bind.length = fence->size;
553 wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
554 wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
557 * wqe->bind.r_key will be set at (re)bind time.
559 fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
566 struct ib_pd *ib_pd = qp->ib_qp.pd;
568 struct bnxt_re_legacy_fence_data *fence = &pd->fence;
569 struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
571 int rc;
577 wqe.bind.r_key = fence->bind_rkey;
578 fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
580 dev_dbg(rdev_to_dev(qp->rdev),
581 "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
582 wqe.bind.r_key, qp->qplib_qp.id, pd);
583 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
584 if (rc) {
585 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
586 return rc;
588 bnxt_qplib_post_send_db(&qp->qplib_qp);
590 return rc;
596 struct bnxt_re_legacy_fence_data *fence = &pd->fence;
597 struct bnxt_re_dev *rdev = pd->rdev;
604 int rc;
606 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
610 /* Allocate a small chunk of memory and dma-map it */
611 fence->va = kzalloc(BNXT_RE_LEGACY_FENCE_BYTES, GFP_KERNEL);
612 if (!fence->va)
613 return -ENOMEM;
614 dma_addr = ib_dma_map_single(&rdev->ibdev, fence->va,
617 rc = ib_dma_mapping_error(&rdev->ibdev, dma_addr);
618 if (rc) {
619 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
620 rc = -EIO;
621 fence->dma_addr = 0;
624 fence->dma_addr = dma_addr;
630 fence->mr = mr;
631 mr->rdev = rdev;
632 mr->qplib_mr.pd = &pd->qplib_pd;
633 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
634 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
635 if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) {
636 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
637 if (rc) {
638 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
642 mr->ib_mr.lkey = mr->qplib_mr.lkey;
644 mr->qplib_mr.va = (u64)fence->va;
645 mr->qplib_mr.total_size = BNXT_RE_LEGACY_FENCE_BYTES;
648 mrinfo.mrw = &mr->qplib_mr;
656 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
657 if (rc) {
658 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
661 mr->ib_mr.lkey = mr->qplib_mr.lkey;
662 mr->ib_mr.rkey = mr->qplib_mr.rkey;
663 atomic_inc(&rdev->stats.rsors.mr_count);
664 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
665 if (max_mr_count > (atomic_read(&rdev->stats.rsors.max_mr_count)))
666 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
668 ib_mw = bnxt_re_alloc_mw(&pd->ibpd, IB_MW_TYPE_1, NULL);
672 "Failed to create fence-MW for PD: %p\n", pd);
673 rc = -EINVAL;
676 fence->mw = ib_mw;
682 if (mr->ib_mr.lkey) {
683 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
684 atomic_dec(&rdev->stats.rsors.mr_count);
687 fence->mr = NULL;
690 ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr,
692 fence->dma_addr = 0;
695 kfree(fence->va);
696 fence->va = NULL;
697 return rc;
702 struct bnxt_re_legacy_fence_data *fence = &pd->fence;
703 struct bnxt_re_dev *rdev = pd->rdev;
704 struct bnxt_re_mr *mr = fence->mr;
706 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
709 if (fence->mw) {
710 bnxt_re_dealloc_mw(fence->mw);
711 fence->mw = NULL;
714 if (mr->ib_mr.rkey)
715 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
717 if (mr->ib_mr.lkey)
718 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
720 fence->mr = NULL;
721 atomic_dec(&rdev->stats.rsors.mr_count);
723 if (fence->dma_addr) {
724 ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr,
727 fence->dma_addr = 0;
729 kfree(fence->va);
730 fence->va = NULL;
737 struct bnxt_qplib_chip_ctx *cctx = rdev->chip_ctx;
744 ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->dpi, cntx, type);
750 if (cctx->modes.db_push) {
752 ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->wcdpi,
765 struct bnxt_re_dev *rdev = pd->rdev;
766 int rc;
770 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
771 &rdev->qplib_res.pd_tbl,
772 &pd->qplib_pd);
773 if (rc)
775 "%s failed rc = %d\n", __func__, rc);
776 atomic_dec(&rdev->stats.rsors.pd_count);
785 struct ib_device *ibdev = ibpd->device;
791 int rc;
794 pd->rdev = rdev;
795 if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
798 rc = -ENOMEM;
805 if (!ucntx->dpi.dbr) {
806 rc = bnxt_re_get_user_dpi(rdev, ucntx);
807 if (rc)
811 resp.pdid = pd->qplib_pd.id;
813 resp.dpi = ucntx->dpi.dpi;
814 resp.dbr = (u64)ucntx->dpi.umdbr;
816 if (ucntx->wcdpi.dpi) {
817 resp.wcdpi = ucntx->wcdpi.dpi;
820 if (rdev->dbr_pacing) {
821 WARN_ON(!rdev->dbr_bar_addr);
822 resp.dbr_bar_addr = (u64)rdev->dbr_bar_addr;
826 rc = bnxt_re_copy_to_udata(rdev, &resp,
827 min(udata->outlen, sizeof(resp)),
829 if (rc)
836 "Failed to create Fence-MR\n");
838 atomic_inc(&rdev->stats.rsors.pd_count);
839 max_pd_count = atomic_read(&rdev->stats.rsors.pd_count);
840 if (max_pd_count > atomic_read(&rdev->stats.rsors.max_pd_count))
841 atomic_set(&rdev->stats.rsors.max_pd_count, max_pd_count);
845 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
846 &pd->qplib_pd);
848 return rc;
855 struct bnxt_re_dev *rdev = ah->rdev;
856 int rc = 0;
861 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
862 if (rc)
864 "%s id = %d blocking %d failed rc = %d\n",
865 __func__, ah->qplib_ah.id, block, rc);
866 atomic_dec(&rdev->stats.rsors.ah_count);
903 return ib_gid_to_network_type(sgid_attr->gid_type, sgid);
914 int rc = 0;
916 gid = &ah_info->sgid;
917 gattr = &ah_info->sgid_attr;
919 rc = bnxt_re_get_cached_gid(&rdev->ibdev, 1, ah_attr->grh.sgid_index,
920 gid, &gattr, &ah_attr->grh, NULL);
921 if (rc)
922 return rc;
925 if (gattr->ndev) {
926 if (is_vlan_dev(gattr->ndev))
927 ah_info->vlan_tag = vlan_dev_vlan_id(gattr->ndev);
928 if_rele(gattr->ndev);
935 ah_info->nw_type = ntype;
937 return rc;
942 gindx = rdev->gid_map[gindx];
950 int rc = 0;
954 ah_attr->grh.dgid.raw) &&
955 !rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
963 resolve_dmac_work->rdev = rdev;
964 resolve_dmac_work->ah_attr = ah_attr;
965 resolve_dmac_work->ah_info = ah_info;
967 atomic_set(&resolve_dmac_work->status_wait, 1);
968 INIT_WORK(&resolve_dmac_work->work, bnxt_re_resolve_dmac_task);
969 queue_work(rdev->resolve_wq, &resolve_dmac_work->work);
972 rc = atomic_read(&resolve_dmac_work->status_wait) & 0xFF;
973 if (!rc)
976 } while (--retry_count);
977 if (atomic_read(&resolve_dmac_work->status_wait)) {
978 INIT_LIST_HEAD(&resolve_dmac_work->list);
979 list_add_tail(&resolve_dmac_work->list,
980 &rdev->mac_wq_list);
981 return -EFAULT;
987 memcpy(ah->qplib_ah.dmac, dmac, ETH_ALEN);
988 return rc;
996 struct ib_pd *ib_pd = ib_ah->pd;
999 struct bnxt_re_dev *rdev = pd->rdev;
1003 int rc;
1008 if (!(ah_attr->ah_flags & IB_AH_GRH))
1009 dev_err(rdev_to_dev(rdev), "ah_attr->ah_flags GRH is not set\n");
1011 ah->rdev = rdev;
1012 ah->qplib_ah.pd = &pd->qplib_pd;
1013 is_user = ib_pd->uobject ? true : false;
1016 memcpy(ah->qplib_ah.dgid.data, ah_attr->grh.dgid.raw,
1018 ah->qplib_ah.sgid_index = _get_sgid_index(rdev, ah_attr->grh.sgid_index);
1019 if (ah->qplib_ah.sgid_index == 0xFF) {
1021 rc = -EINVAL;
1024 ah->qplib_ah.host_sgid_index = ah_attr->grh.sgid_index;
1025 ah->qplib_ah.traffic_class = ah_attr->grh.traffic_class;
1026 ah->qplib_ah.flow_label = ah_attr->grh.flow_label;
1027 ah->qplib_ah.hop_limit = ah_attr->grh.hop_limit;
1028 ah->qplib_ah.sl = ah_attr->sl;
1029 rc = bnxt_re_get_ah_info(rdev, ah_attr, &ah_info);
1030 if (rc)
1032 ah->qplib_ah.nw_type = ah_info.nw_type;
1034 rc = bnxt_re_init_dmac(rdev, ah_attr, &ah_info, is_user, ah);
1035 if (rc)
1038 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, block);
1039 if (rc) {
1046 if (ib_pd->uobject) {
1047 struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
1053 spin_lock_irqsave(&uctx->sh_lock, flag);
1054 wrptr = (u32 *)((u8 *)uctx->shpg + BNXT_RE_AVID_OFFT);
1055 *wrptr = ah->qplib_ah.id;
1057 spin_unlock_irqrestore(&uctx->sh_lock, flag);
1059 atomic_inc(&rdev->stats.rsors.ah_count);
1060 max_ah_count = atomic_read(&rdev->stats.rsors.ah_count);
1061 if (max_ah_count > atomic_read(&rdev->stats.rsors.max_ah_count))
1062 atomic_set(&rdev->stats.rsors.max_ah_count, max_ah_count);
1066 return rc;
1078 memcpy(ah_attr->grh.dgid.raw, ah->qplib_ah.dgid.data,
1080 ah_attr->grh.sgid_index = ah->qplib_ah.host_sgid_index;
1081 ah_attr->grh.traffic_class = ah->qplib_ah.traffic_class;
1082 ah_attr->sl = ah->qplib_ah.sl;
1083 memcpy(ROCE_DMAC(ah_attr), ah->qplib_ah.dmac, ETH_ALEN);
1084 ah_attr->ah_flags = IB_AH_GRH;
1085 ah_attr->port_num = 1;
1086 ah_attr->static_rate = 0;
1096 struct bnxt_re_dev *rdev = srq->rdev;
1097 struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
1098 int rc = 0;
1101 rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1102 if (rc)
1104 "%s id = %d failed rc = %d\n",
1105 __func__, qplib_srq->id, rc);
1107 if (srq->umem && !IS_ERR(srq->umem))
1108 ib_umem_release(srq->umem);
1110 atomic_dec(&rdev->stats.rsors.srq_count);
1123 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1166 int rc, bytes = 0;
1168 context = pd->ibpd.uobject->context;
1170 qplib_srq = &srq->qplib_srq;
1171 sginfo = &qplib_srq->sginfo;
1173 if (udata->inlen < sizeof(ureq))
1176 (unsigned int)udata->inlen,
1179 rc = ib_copy_from_udata(&ureq, udata,
1180 min(udata->inlen, sizeof(ureq)));
1181 if (rc)
1182 return rc;
1184 bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
1194 srq->umem = umem;
1195 sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
1196 sginfo->npages = ib_umem_num_pages_compat(umem);
1197 qplib_srq->srq_handle = ureq.srq_handle;
1198 qplib_srq->dpi = &cntx->dpi;
1199 qplib_srq->is_user = true;
1212 int rc, entries;
1214 struct ib_pd *ib_pd = ib_srq->pd;
1220 rdev = pd->rdev;
1221 dev_attr = rdev->dev_attr;
1223 if (rdev->mod_exit) {
1225 rc = -EIO;
1229 if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
1231 rc = -ENOTSUPP;
1236 context = pd->ibpd.uobject->context;
1240 if (atomic_read(&rdev->stats.rsors.srq_count) >= dev_attr->max_srq) {
1241 dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQs)\n");
1242 rc = -EINVAL;
1246 if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
1247 dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQ_WQs)\n");
1248 rc = -EINVAL;
1252 srq->rdev = rdev;
1253 srq->qplib_srq.pd = &pd->qplib_pd;
1254 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1258 entries = srq_init_attr->attr.max_wr + 1;
1260 if (entries > dev_attr->max_srq_wqes + 1)
1261 entries = dev_attr->max_srq_wqes + 1;
1263 srq->qplib_srq.wqe_size = _max_rwqe_sz(6); /* 128 byte wqe size */
1264 srq->qplib_srq.max_wqe = entries;
1265 srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
1266 srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
1267 srq->srq_limit = srq_init_attr->attr.srq_limit;
1268 srq->qplib_srq.eventq_hw_ring_id = rdev->nqr.nq[0].ring_id;
1269 srq->qplib_srq.sginfo.pgsize = PAGE_SIZE;
1270 srq->qplib_srq.sginfo.pgshft = PAGE_SHIFT;
1273 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1274 if (rc)
1278 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1279 if (rc) {
1287 resp.srqid = srq->qplib_srq.id;
1288 rc = bnxt_re_copy_to_udata(rdev, &resp,
1289 min(udata->outlen, sizeof(resp)),
1291 if (rc) {
1292 bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq);
1296 atomic_inc(&rdev->stats.rsors.srq_count);
1297 max_srq_count = atomic_read(&rdev->stats.rsors.srq_count);
1298 if (max_srq_count > atomic_read(&rdev->stats.rsors.max_srq_count))
1299 atomic_set(&rdev->stats.rsors.max_srq_count, max_srq_count);
1300 spin_lock_init(&srq->lock);
1304 if (udata && srq->umem && !IS_ERR(srq->umem)) {
1305 ib_umem_release(srq->umem);
1306 srq->umem = NULL;
1309 return rc;
1318 struct bnxt_re_dev *rdev = srq->rdev;
1319 int rc;
1327 if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
1328 return -EINVAL;
1330 srq->qplib_srq.threshold = srq_attr->srq_limit;
1331 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1332 if (rc) {
1334 return rc;
1337 srq->srq_limit = srq_attr->srq_limit;
1341 rc = bnxt_re_copy_to_udata(rdev, srq, 0, udata);
1342 if (rc)
1343 return rc;
1348 "Unsupported srq_attr_mask 0x%x\n", srq_attr_mask);
1349 return -EINVAL;
1358 struct bnxt_re_dev *rdev = srq->rdev;
1359 int rc;
1361 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &srq->qplib_srq);
1362 if (rc) {
1363 dev_err(rdev_to_dev(rdev), "Query HW SRQ (0x%x) failed! rc = %d\n",
1364 srq->qplib_srq.id, rc);
1365 return rc;
1367 srq_attr->max_wr = srq->qplib_srq.max_wqe;
1368 srq_attr->max_sge = srq->qplib_srq.max_sge;
1369 srq_attr->srq_limit = srq->qplib_srq.threshold;
1381 int rc = 0;
1383 spin_lock_irqsave(&srq->lock, flags);
1386 wqe.num_sge = wr->num_sge;
1387 wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
1388 wqe.wr_id = wr->wr_id;
1390 rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
1391 if (rc) {
1395 wr = wr->next;
1397 spin_unlock_irqrestore(&srq->lock, flags);
1399 return rc;
1406 spin_lock_irqsave(&qp->scq->cq_lock, flags);
1407 if (qp->rcq && qp->rcq != qp->scq)
1408 spin_lock(&qp->rcq->cq_lock);
1416 if (qp->rcq && qp->rcq != qp->scq)
1417 spin_unlock(&qp->rcq->cq_lock);
1418 spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
1428 int rc = 0;
1430 rdev = qp->rdev;
1431 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
1432 gsi_sah = rdev->gsi_ctx.gsi_sah;
1435 mutex_lock(&rdev->qp_lock);
1436 list_del(&gsi_sqp->list);
1437 mutex_unlock(&rdev->qp_lock);
1441 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah,
1443 if (rc)
1446 atomic_dec(&rdev->stats.rsors.ah_count);
1450 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1451 if (rc)
1456 bnxt_qplib_clean_qp(&gsi_sqp->qplib_qp);
1459 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1460 bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1461 kfree(rdev->gsi_ctx.sqp_tbl);
1464 rdev->gsi_ctx.gsi_sqp = NULL;
1465 rdev->gsi_ctx.gsi_sah = NULL;
1466 rdev->gsi_ctx.sqp_tbl = NULL;
1467 atomic_dec(&rdev->stats.rsors.qp_count);
1478 if (!rdev->rcfw.sp_perf_stats_enabled)
1485 if (rdev->rcfw.qp_destroy_stats[i]) {
1487 avg_time += rdev->rcfw.qp_destroy_stats[i];
1510 atomic_read(&rdev->stats.rsors.max_qp_count));
1518 struct bnxt_re_dev *rdev = qp->rdev;
1521 int rc;
1523 mutex_lock(&rdev->qp_lock);
1524 list_del(&qp->list);
1525 active_qps = atomic_dec_return(&rdev->stats.rsors.qp_count);
1526 if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
1527 atomic_dec(&rdev->stats.rsors.rc_qp_count);
1528 else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
1529 atomic_dec(&rdev->stats.rsors.ud_qp_count);
1530 mutex_unlock(&rdev->qp_lock);
1532 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1533 if (rc)
1535 "%s id = %d failed rc = %d\n",
1536 __func__, qp->qplib_qp.id, rc);
1538 if (!ib_qp->uobject) {
1540 bnxt_qplib_clean_qp(&qp->qplib_qp);
1544 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
1545 if (ib_qp->qp_type == IB_QPT_GSI &&
1546 rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
1547 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL &&
1548 rdev->gsi_ctx.gsi_sqp) {
1551 bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &qp->qplib_qp);
1554 if (qp->rumem && !IS_ERR(qp->rumem))
1555 ib_umem_release(qp->rumem);
1556 if (qp->sumem && !IS_ERR(qp->sumem))
1557 ib_umem_release(qp->sumem);
1608 rdev = qp->rdev;
1609 qplqp = &qp->qplib_qp;
1610 sq = &qplqp->sq;
1611 dev_attr = rdev->dev_attr;
1614 ilsize = ALIGN(init_attr->cap.max_inline_data, align);
1616 sq->wqe_size = bnxt_re_get_swqe_size(ilsize, sq->max_sge);
1617 if (sq->wqe_size > _get_swqe_sz(dev_attr->max_qp_sges))
1618 return -EINVAL;
1622 if (sq->wqe_size < _get_swqe_sz(dev_attr->max_qp_sges) &&
1623 qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1624 sq->wqe_size = _get_swqe_sz(dev_attr->max_qp_sges);
1626 if (init_attr->cap.max_inline_data) {
1627 qplqp->max_inline_data = sq->wqe_size -
1629 init_attr->cap.max_inline_data = qplqp->max_inline_data;
1630 if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
1631 sq->max_sge = qplqp->max_inline_data /
1648 int rc, bytes = 0;
1652 qplib_qp = &qp->qplib_qp;
1653 context = pd->ibpd.uobject->context;
1655 sginfo = &qplib_qp->sq.sginfo;
1657 if (udata->inlen < sizeof(ureq))
1660 (unsigned int)udata->inlen,
1663 rc = ib_copy_from_udata(&ureq, udata,
1664 min(udata->inlen, sizeof(ureq)));
1665 if (rc)
1666 return rc;
1668 bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
1669 /* Consider mapping PSN search memory only for RC QPs. */
1670 if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
1671 psn_sz = _is_chip_gen_p5_p7(rdev->chip_ctx) ?
1674 if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
1676 psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
1677 qplib_qp->sq.max_wqe :
1678 ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
1680 if (BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
1694 qp->sumem = umem;
1696 sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
1697 sginfo->npages = ib_umem_num_pages_compat(umem);
1698 qplib_qp->qp_handle = ureq.qp_handle;
1700 if (!qp->qplib_qp.srq) {
1701 sginfo = &qplib_qp->rq.sginfo;
1702 bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
1713 qp->rumem = umem;
1715 sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
1716 sginfo->npages = ib_umem_num_pages_compat(umem);
1719 qplib_qp->dpi = &cntx->dpi;
1720 qplib_qp->is_user = true;
1724 ib_umem_release(qp->sumem);
1725 qp->sumem = NULL;
1726 qplib_qp->sq.sginfo.sghead = NULL;
1727 qplib_qp->sq.sginfo.nmap = 0;
1736 struct bnxt_re_dev *rdev = pd->rdev;
1739 int rc;
1747 ah->rdev = rdev;
1748 ah->qplib_ah.pd = &pd->qplib_pd;
1750 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1751 if (rc)
1755 memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
1757 ah->qplib_ah.sgid_index = 0;
1759 ah->qplib_ah.traffic_class = 0;
1760 ah->qplib_ah.flow_label = 0;
1761 ah->qplib_ah.hop_limit = 1;
1762 ah->qplib_ah.sl = 0;
1764 ether_addr_copy(ah->qplib_ah.dmac, rdev->dev_addr);
1765 dev_dbg(rdev_to_dev(rdev), "ah->qplib_ah.dmac = %x:%x:%x:%x:%x:%x\n",
1766 ah->qplib_ah.dmac[0], ah->qplib_ah.dmac[1], ah->qplib_ah.dmac[2],
1767 ah->qplib_ah.dmac[3], ah->qplib_ah.dmac[4], ah->qplib_ah.dmac[5]);
1769 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, true);
1770 if (rc) {
1775 dev_dbg(rdev_to_dev(rdev), "AH ID = %d\n", ah->qplib_ah.id);
1776 atomic_inc(&rdev->stats.rsors.ah_count);
1790 int rc;
1795 sah = rdev->gsi_ctx.gsi_sah;
1800 if (!compare_ether_header(sah->qplib_ah.dmac, rdev->dev_addr)) {
1806 gsi_qp = rdev->gsi_ctx.gsi_qp;
1807 ib_pd = gsi_qp->ib_qp.pd;
1809 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
1810 &sah->qplib_ah, false);
1811 if (rc) {
1816 atomic_dec(&rdev->stats.rsors.ah_count);
1818 rdev->gsi_ctx.gsi_sah = NULL;
1820 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1821 &gsi_qp->qplib_qp);
1827 rdev->gsi_ctx.gsi_sah = sah;
1828 atomic_inc(&rdev->stats.rsors.ah_count);
1836 struct bnxt_re_dev *rdev = pd->rdev;
1838 int rc;
1846 qp->rdev = rdev;
1849 ether_addr_copy(qp->qplib_qp.smac, rdev->dev_addr);
1850 qp->qplib_qp.pd = &pd->qplib_pd;
1851 qp->qplib_qp.qp_handle = (u64)&qp->qplib_qp;
1852 qp->qplib_qp.type = IB_QPT_UD;
1854 qp->qplib_qp.max_inline_data = 0;
1855 qp->qplib_qp.sig_type = true;
1858 qp->qplib_qp.sq.wqe_size = bnxt_re_get_swqe_size(0, 6);
1859 qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
1860 qp->qplib_qp.sq.max_sge = 2;
1862 qp->qplib_qp.sq.q_full_delta = 1;
1863 qp->qplib_qp.sq.sginfo.pgsize = PAGE_SIZE;
1864 qp->qplib_qp.sq.sginfo.pgshft = PAGE_SHIFT;
1866 qp->qplib_qp.scq = qp1_qp->scq;
1867 qp->qplib_qp.rcq = qp1_qp->rcq;
1869 qp->qplib_qp.rq.wqe_size = _max_rwqe_sz(6); /* 128 Byte wqe size */
1870 qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
1871 qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
1872 qp->qplib_qp.rq.sginfo.pgsize = PAGE_SIZE;
1873 qp->qplib_qp.rq.sginfo.pgshft = PAGE_SHIFT;
1875 qp->qplib_qp.rq.q_full_delta = 1;
1876 qp->qplib_qp.mtu = qp1_qp->mtu;
1877 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1879 rc = bnxt_qplib_alloc_hdr_buf(qp1_res, &qp->qplib_qp, 0,
1881 if (rc)
1884 rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
1885 if (rc) {
1891 qp->qplib_qp.id);
1892 spin_lock_init(&qp->sq_lock);
1893 INIT_LIST_HEAD(&qp->list);
1894 mutex_lock(&rdev->qp_lock);
1895 list_add_tail(&qp->list, &rdev->qp_list);
1896 atomic_inc(&rdev->stats.rsors.qp_count);
1897 mutex_unlock(&rdev->qp_lock);
1900 bnxt_qplib_free_hdr_buf(qp1_res, &qp->qplib_qp);
1915 rdev = qp->rdev;
1916 qplqp = &qp->qplib_qp;
1917 rq = &qplqp->rq;
1918 dev_attr = rdev->dev_attr;
1920 if (init_attr->srq) {
1923 srq = to_bnxt_re(init_attr->srq, struct bnxt_re_srq, ibsrq);
1926 return -EINVAL;
1928 qplqp->srq = &srq->qplib_srq;
1929 rq->max_wqe = 0;
1931 rq->max_sge = init_attr->cap.max_recv_sge;
1932 if (rq->max_sge > dev_attr->max_qp_sges)
1933 rq->max_sge = dev_attr->max_qp_sges;
1934 init_attr->cap.max_recv_sge = rq->max_sge;
1935 rq->wqe_size = bnxt_re_get_rwqe_size(qplqp, rq->max_sge,
1936 dev_attr->max_qp_sges);
1940 entries = init_attr->cap.max_recv_wr + 1;
1942 rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
1943 rq->q_full_delta = 0;
1944 rq->sginfo.pgsize = PAGE_SIZE;
1945 rq->sginfo.pgshft = PAGE_SHIFT;
1957 rdev = qp->rdev;
1958 qplqp = &qp->qplib_qp;
1959 dev_attr = rdev->dev_attr;
1961 if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD)
1962 qplqp->rq.max_sge = dev_attr->max_qp_sges;
1975 int rc;
1977 rdev = qp->rdev;
1978 qplqp = &qp->qplib_qp;
1979 sq = &qplqp->sq;
1980 dev_attr = rdev->dev_attr;
1982 sq->max_sge = init_attr->cap.max_send_sge;
1983 if (sq->max_sge > dev_attr->max_qp_sges) {
1984 sq->max_sge = dev_attr->max_qp_sges;
1985 init_attr->cap.max_send_sge = sq->max_sge;
1987 rc = bnxt_re_setup_swqe_size(qp, init_attr);
1988 if (rc)
1989 return rc;
1996 entries = init_attr->cap.max_send_wr;
1997 if (!cntx && rdev->min_tx_depth && init_attr->qp_type != IB_QPT_GSI) {
2003 if (rdev->min_tx_depth > 1 && entries < rdev->min_tx_depth)
2004 entries = rdev->min_tx_depth;
2008 diff = bnxt_re_get_diff(cntx, rdev->chip_ctx);
2010 sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
2011 sq->q_full_delta = diff + 1;
2017 sq->q_full_delta -= 1; /* becomes 0 for gen-p5 */
2018 sq->sginfo.pgsize = PAGE_SIZE;
2019 sq->sginfo.pgshft = PAGE_SHIFT;
2032 rdev = qp->rdev;
2033 qplqp = &qp->qplib_qp;
2034 dev_attr = rdev->dev_attr;
2036 if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
2037 entries = init_attr->cap.max_send_wr + 1;
2039 qplqp->sq.max_wqe = min_t(u32, entries,
2040 dev_attr->max_qp_wqes + 1);
2041 qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
2042 init_attr->cap.max_send_wr;
2043 qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
2044 if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
2045 qplqp->sq.max_sge = dev_attr->max_qp_sges;
2056 chip_ctx = rdev->chip_ctx;
2057 gsi_ctx = &rdev->gsi_ctx;
2059 qptype = __from_ib_qp_type(init_attr->qp_type);
2061 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported\n",
2063 qptype = -EINVAL;
2067 if (_is_chip_gen_p5_p7(chip_ctx) && init_attr->qp_type == IB_QPT_GSI) {
2070 gsi_ctx->gsi_qp_mode = BNXT_RE_GSI_MODE_UD;
2078 return rdev->chip_ctx->modes.wqe_mode;
2091 int rc = 0, qptype;
2093 rdev = qp->rdev;
2094 qplqp = &qp->qplib_qp;
2095 dev_attr = rdev->dev_attr;
2098 context = pd->ibpd.uobject->context;
2103 qplqp->is_user = false;
2104 qplqp->pd = &pd->qplib_pd;
2105 qplqp->qp_handle = (u64)qplqp;
2106 qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
2110 rc = qptype;
2113 qplqp->type = (u8)qptype;
2114 qplqp->wqe_mode = bnxt_re_init_qp_wqe_mode(rdev);
2115 ether_addr_copy(qplqp->smac, rdev->dev_addr);
2117 if (init_attr->qp_type == IB_QPT_RC) {
2118 qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
2119 qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
2121 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(if_getmtu(rdev->netdev)));
2122 qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
2123 if (init_attr->create_flags) {
2125 "QP create flags 0x%x not supported\n",
2126 init_attr->create_flags);
2127 return -EOPNOTSUPP;
2131 if (init_attr->send_cq) {
2132 cq = to_bnxt_re(init_attr->send_cq, struct bnxt_re_cq, ibcq);
2135 rc = -EINVAL;
2138 qplqp->scq = &cq->qplib_cq;
2139 qp->scq = cq;
2142 if (init_attr->recv_cq) {
2143 cq = to_bnxt_re(init_attr->recv_cq, struct bnxt_re_cq, ibcq);
2146 rc = -EINVAL;
2149 qplqp->rcq = &cq->qplib_cq;
2150 qp->rcq = cq;
2154 rc = bnxt_re_init_rq_attr(qp, init_attr, cntx);
2155 if (rc)
2157 if (init_attr->qp_type == IB_QPT_GSI)
2161 rc = bnxt_re_init_sq_attr(qp, init_attr, cntx);
2162 if (rc)
2164 if (init_attr->qp_type == IB_QPT_GSI)
2168 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
2170 return rc;
2180 int rc = 0;
2182 rdev = qp->rdev;
2187 return -ENOMEM;
2188 rdev->gsi_ctx.sqp_tbl = sqp_tbl;
2190 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
2192 rc = -ENODEV;
2197 rdev->gsi_ctx.gsi_sqp = sqp;
2199 sqp->rcq = qp->rcq;
2200 sqp->scq = qp->scq;
2201 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
2202 &qp->qplib_qp);
2204 bnxt_qplib_destroy_qp(&rdev->qplib_res,
2205 &sqp->qplib_qp);
2206 rc = -ENODEV;
2211 rdev->gsi_ctx.gsi_sah = sah;
2216 return rc;
2240 int rc = 0;
2242 rdev = qp->rdev;
2243 qplqp = &qp->qplib_qp;
2244 res = &rdev->qplib_res;
2245 gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
2249 rc = bnxt_qplib_alloc_hdr_buf(res, qplqp, sstep, rstep);
2250 if (rc)
2253 rc = bnxt_qplib_create_qp1(res, qplqp);
2254 if (rc) {
2260 rc = bnxt_re_create_shadow_gsi(qp, pd);
2262 return rc;
2269 bool rc = true;
2272 ilsize = ALIGN(init_attr->cap.max_inline_data, sizeof(struct sq_sge));
2273 if ((init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
2274 (init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
2275 (init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
2276 (init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
2277 (ilsize > dev_attr->max_inline_data)) {
2278 dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded! "
2279 "0x%x/0x%x 0x%x/0x%x 0x%x/0x%x "
2280 "0x%x/0x%x 0x%x/0x%x\n",
2281 init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
2282 init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
2283 init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
2284 init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
2285 init_attr->cap.max_inline_data,
2286 dev_attr->max_inline_data);
2287 rc = false;
2289 return rc;
2314 int rc;
2317 rdev = pd->rdev;
2318 dev_attr = rdev->dev_attr;
2319 if (rdev->mod_exit) {
2320 rc = -EIO;
2325 if (atomic_read(&rdev->stats.rsors.qp_count) >= dev_attr->max_qp) {
2326 dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded(QPs Alloc'd %u of max %u)\n",
2327 atomic_read(&rdev->stats.rsors.qp_count), dev_attr->max_qp);
2328 rc = -EINVAL;
2332 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
2333 if (!rc) {
2334 rc = -EINVAL;
2339 rc = -ENOMEM;
2342 qp->rdev = rdev;
2344 rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
2345 if (rc)
2348 if (qp_init_attr->qp_type == IB_QPT_GSI &&
2349 !_is_chip_gen_p5_p7(rdev->chip_ctx)) {
2350 rc = bnxt_re_create_gsi_qp(qp, pd);
2351 if (rc == -ENODEV)
2353 if (rc)
2356 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
2357 if (rc) {
2365 resp.qpid = qp->qplib_qp.id;
2366 rc = bnxt_re_copy_to_udata(rdev, &resp,
2367 min(udata->outlen, sizeof(resp)),
2369 if (rc)
2374 qp->ib_qp.qp_num = qp->qplib_qp.id;
2375 if (qp_init_attr->qp_type == IB_QPT_GSI)
2376 rdev->gsi_ctx.gsi_qp = qp;
2377 spin_lock_init(&qp->sq_lock);
2378 spin_lock_init(&qp->rq_lock);
2379 INIT_LIST_HEAD(&qp->list);
2380 mutex_lock(&rdev->qp_lock);
2381 list_add_tail(&qp->list, &rdev->qp_list);
2382 mutex_unlock(&rdev->qp_lock);
2383 atomic_inc(&rdev->stats.rsors.qp_count);
2384 active_qps = atomic_read(&rdev->stats.rsors.qp_count);
2385 if (active_qps > atomic_read(&rdev->stats.rsors.max_qp_count))
2386 atomic_set(&rdev->stats.rsors.max_qp_count, active_qps);
2390 /* Get the counters for RC QPs and UD QPs */
2391 if (qp_init_attr->qp_type == IB_QPT_RC) {
2392 tmp_qps = atomic_inc_return(&rdev->stats.rsors.rc_qp_count);
2393 if (tmp_qps > atomic_read(&rdev->stats.rsors.max_rc_qp_count))
2394 atomic_set(&rdev->stats.rsors.max_rc_qp_count, tmp_qps);
2395 } else if (qp_init_attr->qp_type == IB_QPT_UD) {
2396 tmp_qps = atomic_inc_return(&rdev->stats.rsors.ud_qp_count);
2397 if (tmp_qps > atomic_read(&rdev->stats.rsors.max_ud_qp_count))
2398 atomic_set(&rdev->stats.rsors.max_ud_qp_count, tmp_qps);
2401 return &qp->ib_qp;
2404 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
2407 if (qp->rumem && !IS_ERR(qp->rumem))
2408 ib_umem_release(qp->rumem);
2409 if (qp->sumem && !IS_ERR(qp->sumem))
2410 ib_umem_release(qp->sumem);
2415 return ERR_PTR(rc);
2422 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
2423 int rc = 0;
2426 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2427 qp->qplib_qp.state = qp1_qp->qplib_qp.state;
2430 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2431 qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
2435 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2437 qp->qplib_qp.qkey = BNXT_RE_QP_RANDOM_QKEY;
2440 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2441 qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
2444 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2445 if (rc)
2447 return rc;
2463 if (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6) {
2471 memcpy(smac, qp->qplib_qp.smac, ETH_ALEN);
2474 memcpy(data, qp->qplib_qp.ah.dmac, ETH_ALEN);
2480 memcpy(data + buf_len, qp->qplib_qp.ah.dgid.data + ip_off, addr_len);
2483 memcpy(data + buf_len, qp->qp_info_entry.sgid.raw + ip_off, addr_len);
2486 qpn = htonl(qp->qplib_qp.dest_qpn);
2500 type = __from_hw_to_ib_qp_type(qp->qplib_qp.type);
2502 /* User-space can extract ip address with sgid_index. */
2503 if (ipv6_addr_v4mapped((struct in6_addr *)&qp->qplib_qp.ah.dgid)) {
2504 qp->qp_info_entry.s_ip.ipv4_addr = ipv4_from_gid(qp->qp_info_entry.sgid.raw);
2505 qp->qp_info_entry.d_ip.ipv4_addr = ipv4_from_gid(qp->qplib_qp.ah.dgid.data);
2507 memcpy(&qp->qp_info_entry.s_ip.ipv6_addr, qp->qp_info_entry.sgid.raw,
2508 sizeof(qp->qp_info_entry.s_ip.ipv6_addr));
2509 memcpy(&qp->qp_info_entry.d_ip.ipv6_addr, qp->qplib_qp.ah.dgid.data,
2510 sizeof(qp->qp_info_entry.d_ip.ipv6_addr));
2514 (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4 ||
2515 qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6)) {
2516 qp->qp_info_entry.s_port = get_source_port(rdev, qp);
2518 qp->qp_info_entry.d_port = BNXT_RE_QP_DEST_PORT;
2526 if (qp->sumem)
2529 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
2530 rq = &qp->qplib_qp.rq;
2531 sq = &qp->qplib_qp.sq;
2533 dev_dbg(rdev_to_dev(qp->rdev),
2536 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
2539 if (sq->hwq.prod != sq->hwq.cons)
2540 bnxt_re_handle_cqn(&qp->scq->qplib_cq);
2542 if (qp->rcq && (qp->rcq != qp->scq) &&
2543 (rq->hwq.prod != rq->hwq.cons))
2544 bnxt_re_handle_cqn(&qp->rcq->qplib_cq);
2547 if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
2548 dev_dbg(rdev_to_dev(qp->rdev),
2551 bnxt_qplib_clean_qp(&qp->qplib_qp);
2579 int rc, entries, status;
2584 rdev = qp->rdev;
2585 dev_attr = rdev->dev_attr;
2587 qp->qplib_qp.modify_flags = 0;
2588 ppp = &qp->qplib_qp.ppp;
2590 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
2591 new_qp_state = qp_attr->qp_state;
2593 ib_qp->qp_type, qp_attr_mask)) {
2594 dev_err(rdev_to_dev(rdev),"invalid attribute mask=0x%x"
2595 " specified for qpn=0x%x of type=0x%x"
2596 " current_qp_state=0x%x, new_qp_state=0x%x\n",
2597 qp_attr_mask, ib_qp->qp_num, ib_qp->qp_type,
2599 return -EINVAL;
2601 dev_dbg(rdev_to_dev(rdev), "%s:%d INFO attribute mask=0x%x qpn=0x%x "
2602 "of type=0x%x current_qp_state=0x%x, new_qp_state=0x%x\n",
2603 __func__, __LINE__, qp_attr_mask, ib_qp->qp_num,
2604 ib_qp->qp_type, curr_qp_state, new_qp_state);
2605 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
2606 qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
2613 ppp->req = BNXT_QPLIB_PPP_REQ;
2614 ppp->dpi = ureq.dpi;
2620 qp->qplib_qp.modify_flags |=
2622 qp->qplib_qp.en_sqd_async_notify = true;
2625 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
2626 qp->qplib_qp.access =
2627 __from_ib_access_flags(qp_attr->qp_access_flags);
2628 /* LOCAL_WRITE access must be set to allow RC receive */
2629 qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
2630 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
2631 qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
2634 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
2635 qp->qplib_qp.pkey_index = qp_attr->pkey_index;
2638 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
2639 qp->qplib_qp.qkey = qp_attr->qkey;
2642 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
2649 memcpy(qp->qplib_qp.ah.dgid.data, qp_attr->ah_attr.grh.dgid.raw,
2650 sizeof(qp->qplib_qp.ah.dgid.data));
2651 qp->qplib_qp.ah.flow_label = qp_attr->ah_attr.grh.flow_label;
2652 qp->qplib_qp.ah.sgid_index = _get_sgid_index(rdev,
2653 qp_attr->ah_attr.grh.sgid_index);
2654 qp->qplib_qp.ah.host_sgid_index = qp_attr->ah_attr.grh.sgid_index;
2655 qp->qplib_qp.ah.hop_limit = qp_attr->ah_attr.grh.hop_limit;
2656 qp->qplib_qp.ah.traffic_class =
2657 qp_attr->ah_attr.grh.traffic_class;
2658 qp->qplib_qp.ah.sl = qp_attr->ah_attr.sl;
2659 ether_addr_copy(qp->qplib_qp.ah.dmac, ROCE_DMAC(&qp_attr->ah_attr));
2661 status = bnxt_re_get_cached_gid(&rdev->ibdev, 1,
2662 qp_attr->ah_attr.grh.sgid_index,
2664 &qp_attr->ah_attr.grh, NULL);
2666 if_rele(sgid_attr->ndev);
2668 if (sgid_attr->ndev) {
2669 memcpy(qp->qplib_qp.smac, rdev->dev_addr,
2676 qp->qplib_qp.nw_type =
2680 qp->qplib_qp.nw_type =
2684 qp->qplib_qp.nw_type =
2689 memcpy(&qp->qp_info_entry.sgid, gid_ptr, sizeof(qp->qp_info_entry.sgid));
2692 /* MTU settings allowed only during INIT -> RTR */
2693 if (qp_attr->qp_state == IB_QPS_RTR) {
2694 bnxt_re_init_qpmtu(qp, if_getmtu(rdev->netdev), qp_attr_mask, qp_attr,
2699 resp.path_mtu = qp->qplib_qp.mtu;
2702 dev_err(rdev_to_dev(rdev), "qp %#x invalid mtu\n",
2703 qp->qplib_qp.id);
2704 return -EINVAL;
2710 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
2711 qp->qplib_qp.timeout = qp_attr->timeout;
2714 qp->qplib_qp.modify_flags |=
2716 qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
2719 qp->qplib_qp.modify_flags |=
2721 qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
2724 qp->qplib_qp.modify_flags |=
2726 qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
2729 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
2730 qp->qplib_qp.rq.psn = qp_attr->rq_psn;
2733 qp->qplib_qp.modify_flags |=
2736 if (qp_attr->max_rd_atomic > dev_attr->max_qp_rd_atom)
2739 qp_attr->max_rd_atomic,
2740 dev_attr->max_qp_rd_atom);
2741 qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
2742 dev_attr->max_qp_rd_atom);
2745 qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
2746 qp->qplib_qp.sq.psn = qp_attr->sq_psn;
2749 if (qp_attr->max_dest_rd_atomic >
2750 dev_attr->max_qp_init_rd_atom) {
2753 qp_attr->max_dest_rd_atomic,
2754 dev_attr->max_qp_init_rd_atom);
2755 return -EINVAL;
2757 qp->qplib_qp.modify_flags |=
2759 qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
2762 qp->qplib_qp.modify_flags |=
2768 if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
2769 (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
2770 (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
2771 (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
2772 (qp_attr->cap.max_inline_data >=
2773 dev_attr->max_inline_data)) {
2775 "Create QP failed - max exceeded\n");
2776 return -EINVAL;
2778 entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
2779 if (entries > dev_attr->max_qp_wqes)
2780 entries = dev_attr->max_qp_wqes;
2781 entries = min_t(u32, entries, dev_attr->max_qp_wqes);
2782 qp->qplib_qp.sq.max_wqe = entries;
2783 qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
2784 qp_attr->cap.max_send_wr;
2790 qp->qplib_qp.sq.q_full_delta -= 1;
2791 qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
2792 if (qp->qplib_qp.rq.max_wqe) {
2793 entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
2794 if (entries > dev_attr->max_qp_wqes)
2795 entries = dev_attr->max_qp_wqes;
2796 qp->qplib_qp.rq.max_wqe = entries;
2797 qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
2798 qp_attr->cap.max_recv_wr;
2799 qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
2805 qp->qplib_qp.modify_flags |=
2807 qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
2810 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2811 if (rc) {
2813 return rc;
2818 ppp->st_idx_en & CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED) {
2820 resp.ppp_st_idx = ppp->st_idx_en >>
2826 rc = bnxt_re_copy_to_udata(rdev, &resp,
2827 min(udata->outlen, sizeof(resp)),
2829 if (rc)
2830 return rc;
2833 if (ib_qp->qp_type == IB_QPT_GSI &&
2834 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL &&
2835 rdev->gsi_ctx.gsi_sqp)
2836 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2841 return rc;
2848 struct bnxt_re_dev *rdev = qp->rdev;
2850 int rc;
2854 return -ENOMEM;
2856 qplib_qp->id = qp->qplib_qp.id;
2857 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
2859 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2860 if (rc) {
2861 dev_err(rdev_to_dev(rdev), "Query HW QP (0x%x) failed! rc = %d\n",
2862 qplib_qp->id, rc);
2865 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2866 qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2867 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2868 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2869 qp_attr->pkey_index = qplib_qp->pkey_index;
2870 qp_attr->qkey = qplib_qp->qkey;
2871 memcpy(qp_attr->ah_attr.grh.dgid.raw, qplib_qp->ah.dgid.data,
2872 sizeof(qplib_qp->ah.dgid.data));
2873 qp_attr->ah_attr.grh.flow_label = qplib_qp->ah.flow_label;
2874 qp_attr->ah_attr.grh.sgid_index = qplib_qp->ah.host_sgid_index;
2875 qp_attr->ah_attr.grh.hop_limit = qplib_qp->ah.hop_limit;
2876 qp_attr->ah_attr.grh.traffic_class = qplib_qp->ah.traffic_class;
2877 qp_attr->ah_attr.sl = qplib_qp->ah.sl;
2878 ether_addr_copy(ROCE_DMAC(&qp_attr->ah_attr), qplib_qp->ah.dmac);
2879 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
2880 qp_attr->timeout = qplib_qp->timeout;
2881 qp_attr->retry_cnt = qplib_qp->retry_cnt;
2882 qp_attr->rnr_retry = qplib_qp->rnr_retry;
2883 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
2884 qp_attr->rq_psn = qplib_qp->rq.psn;
2885 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
2886 qp_attr->sq_psn = qplib_qp->sq.psn;
2887 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
2888 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
2890 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
2892 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
2893 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
2894 qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
2895 qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
2896 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
2897 qp_init_attr->cap = qp_attr->cap;
2901 return rc;
2910 switch (wr->send_flags) {
2912 wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
2916 wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
2927 struct bnxt_re_ah *ah = to_bnxt_re(ud_wr(wr)->ah, struct bnxt_re_ah,
2929 struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
2931 int i, rc = 0;
2937 memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
2940 rc = bnxt_re_query_gid(&qp->rdev->ibdev, 1, qplib_ah->sgid_index, &sgid);
2941 if (rc)
2942 return rc;
2945 qp->qp1_hdr.eth_present = 1;
2946 ptmac = ah->qplib_ah.dmac;
2947 memcpy(qp->qp1_hdr.eth.dmac_h, ptmac, 4);
2949 memcpy(qp->qp1_hdr.eth.dmac_l, ptmac, 2);
2951 ptmac = qp->qplib_qp.smac;
2952 memcpy(qp->qp1_hdr.eth.smac_h, ptmac, 2);
2954 memcpy(qp->qp1_hdr.eth.smac_l, ptmac, 4);
2956 qp->qp1_hdr.eth.type = cpu_to_be16(BNXT_QPLIB_ETHTYPE_ROCEV1);
2961 qp->qp1_hdr.vlan_present = 1;
2962 qp->qp1_hdr.eth.type = cpu_to_be16(ETH_P_8021Q);
2965 qp->qp1_hdr.grh_present = 1;
2966 qp->qp1_hdr.grh.ip_version = 6;
2967 qp->qp1_hdr.grh.payload_length =
2970 qp->qp1_hdr.grh.next_header = 0x1b;
2971 memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
2972 memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
2976 if (wr->opcode == IB_WR_SEND_WITH_IMM) {
2977 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
2978 qp->qp1_hdr.immediate_present = 1;
2980 qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
2982 if (wr->send_flags & IB_SEND_SOLICITED)
2983 qp->qp1_hdr.bth.solicited_event = 1;
2984 qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
2986 qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
2987 qp->qp1_hdr.bth.destination_qpn = IB_QP1;
2988 qp->qp1_hdr.bth.ack_req = 0;
2989 qp->send_psn++;
2990 qp->send_psn &= BTH_PSN_MASK;
2991 qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
2994 qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
2995 qp->qp1_hdr.deth.source_qpn = IB_QP1;
2998 buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
3000 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n");
3001 rc = -ENOMEM;
3003 for (i = wqe->num_sge; i; i--) {
3004 wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
3005 wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
3006 wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
3008 wqe->sg_list[0].addr = sge.addr;
3009 wqe->sg_list[0].lkey = sge.lkey;
3010 wqe->sg_list[0].size = sge.size;
3011 wqe->num_sge++;
3013 return rc;
3021 int rc, indx, len = 0;
3023 rdev = qp->rdev;
3026 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)
3029 for (indx = 0; indx < wr->num_sge; indx++) {
3030 wqe->sg_list[indx].addr = wr->sg_list[indx].addr;
3031 wqe->sg_list[indx].lkey = wr->sg_list[indx].lkey;
3032 wqe->sg_list[indx].size = wr->sg_list[indx].length;
3033 len += wr->sg_list[indx].length;
3035 rc = bnxt_re_build_qp1_send(qp, wr, wqe, len);
3036 wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
3038 return rc;
3052 struct bnxt_re_dev *rdev = qp->rdev;
3056 int rc = 0;
3059 if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) {
3062 * ib_grh (40) - as provided from the wr
3064 * MAD (256) - as provided from the wr
3069 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ROCE_V2_IPV4)
3071 if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_ROCE_V1)
3075 ref.addr = wr->sg_list[0].addr;
3076 ref.lkey = wr->sg_list[0].lkey;
3077 ref.size = wr->sg_list[0].length;
3081 wqe->sg_list[0].addr = sge.addr;
3082 wqe->sg_list[0].lkey = sge.lkey;
3083 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE;
3084 size -= wqe->sg_list[0].size;
3086 dev_err(rdev_to_dev(qp->rdev),"QP1 rq buffer is empty!\n");
3087 rc = -ENOMEM;
3091 sge.addr += wqe->sg_list[0].size;
3095 wqe->sg_list[1].addr = ref.addr + ip_hdr_size;
3096 wqe->sg_list[1].lkey = ref.lkey;
3097 wqe->sg_list[1].size = sizeof(struct ib_grh) - ip_hdr_size;
3098 ref.size -= wqe->sg_list[1].size;
3100 dev_err(rdev_to_dev(qp->rdev),
3102 rc = -ENOMEM;
3105 ref.addr += wqe->sg_list[1].size + ip_hdr_size;
3108 wqe->sg_list[2].addr = sge.addr;
3109 wqe->sg_list[2].lkey = sge.lkey;
3110 wqe->sg_list[2].size = BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE +
3112 size -= wqe->sg_list[2].size;
3114 dev_err(rdev_to_dev(qp->rdev),
3116 rc = -ENOMEM;
3120 sge.addr += wqe->sg_list[2].size;
3123 wqe->sg_list[3].addr = ref.addr;
3124 wqe->sg_list[3].lkey = ref.lkey;
3125 wqe->sg_list[3].size = ref.size;
3126 ref.size -= wqe->sg_list[3].size;
3128 dev_err(rdev_to_dev(qp->rdev),
3130 rc = -ENOMEM;
3134 wqe->sg_list[4].addr = sge.addr;
3135 wqe->sg_list[4].lkey = sge.lkey;
3136 wqe->sg_list[4].size = sge.size;
3137 size -= wqe->sg_list[4].size;
3139 dev_err(rdev_to_dev(qp->rdev),
3141 rc = -ENOMEM;
3145 wqe->num_sge = 5;
3147 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n");
3148 rc = -ENOMEM;
3151 return rc;
3162 int rc = 0;
3164 rdev = qp->rdev;
3166 rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
3168 if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) {
3173 wqe->sg_list[0].addr = sge.addr;
3175 wqe->sg_list[0].lkey = sge.lkey;
3176 wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
3177 if (sge.size < wqe->sg_list[0].size) {
3178 dev_err(rdev_to_dev(qp->rdev),
3180 rc = -ENOMEM;
3184 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
3185 sqp_entry->sge.addr = wr->sg_list[0].addr;
3186 sqp_entry->sge.lkey = wr->sg_list[0].lkey;
3187 sqp_entry->sge.size = wr->sg_list[0].length;
3189 sqp_entry->wrid = wqe->wr_id;
3190 /* change the wqe->wrid to table index */
3191 wqe->wr_id = rq_prod_index;
3194 return rc;
3199 return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
3200 qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
3210 ah = to_bnxt_re(ud_wr(wr)->ah, struct bnxt_re_ah, ibah);
3211 wqe->send.q_key = ud_wr(wr)->remote_qkey;
3212 wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
3213 wqe->send.avid = ah->qplib_ah.id;
3215 switch (wr->opcode) {
3217 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
3220 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
3221 wqe->send.imm_data = wr->ex.imm_data;
3224 wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
3225 wqe->send.inv_key = wr->ex.invalidate_rkey;
3228 dev_err(rdev_to_dev(qp->rdev), "%s Invalid opcode %d!\n",
3229 __func__, wr->opcode);
3230 return -EINVAL;
3232 if (wr->send_flags & IB_SEND_SIGNALED)
3233 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3234 if (wr->send_flags & IB_SEND_FENCE)
3235 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3236 if (wr->send_flags & IB_SEND_SOLICITED)
3237 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3238 if (wr->send_flags & IB_SEND_INLINE)
3239 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
3247 switch (wr->opcode) {
3249 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
3252 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
3253 wqe->rdma.imm_data = wr->ex.imm_data;
3256 wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
3257 wqe->rdma.inv_key = wr->ex.invalidate_rkey;
3260 return -EINVAL;
3262 wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
3263 wqe->rdma.r_key = rdma_wr(wr)->rkey;
3264 if (wr->send_flags & IB_SEND_SIGNALED)
3265 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3266 if (wr->send_flags & IB_SEND_FENCE)
3267 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3268 if (wr->send_flags & IB_SEND_SOLICITED)
3269 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3270 if (wr->send_flags & IB_SEND_INLINE)
3271 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
3279 switch (wr->opcode) {
3281 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
3282 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
3283 wqe->atomic.swap_data = atomic_wr(wr)->swap;
3286 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
3287 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
3290 return -EINVAL;
3292 wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
3293 wqe->atomic.r_key = atomic_wr(wr)->rkey;
3294 if (wr->send_flags & IB_SEND_SIGNALED)
3295 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3296 if (wr->send_flags & IB_SEND_FENCE)
3297 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3298 if (wr->send_flags & IB_SEND_SOLICITED)
3299 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3306 wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
3307 wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
3308 if (wr->send_flags & IB_SEND_SIGNALED)
3309 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3310 if (wr->send_flags & IB_SEND_FENCE)
3311 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3312 if (wr->send_flags & IB_SEND_SOLICITED)
3313 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
3321 struct bnxt_re_mr *mr = to_bnxt_re(wr->mr, struct bnxt_re_mr, ib_mr);
3322 struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
3323 int reg_len, i, access = wr->access;
3325 if (mr->npages > qplib_frpl->max_pg_ptrs) {
3326 dev_err_ratelimited(rdev_to_dev(mr->rdev),
3328 mr->npages, qplib_frpl->max_pg_ptrs);
3329 return -EINVAL;
3332 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
3333 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
3334 wqe->frmr.levels = qplib_frpl->hwq.level;
3335 wqe->frmr.page_list = mr->pages;
3336 wqe->frmr.page_list_len = mr->npages;
3337 wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
3339 if (wr->wr.send_flags & IB_SEND_SIGNALED)
3340 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
3342 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
3344 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
3346 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
3348 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
3350 wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
3353 wqe->frmr.l_key = wr->key;
3354 wqe->frmr.length = wr->mr->length;
3355 wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
3356 wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
3357 wqe->frmr.va = wr->mr->iova;
3358 reg_len = wqe->frmr.page_list_len * wr->mr->page_size;
3360 if (wqe->frmr.length > reg_len) {
3361 dev_err_ratelimited(rdev_to_dev(mr->rdev),
3362 "%s: bnxt_re_mr 0x%px len (%d > %d)\n",
3363 __func__, (void *)mr, wqe->frmr.length,
3366 for (i = 0; i < mr->npages; i++)
3367 dev_dbg(rdev_to_dev(mr->rdev),
3368 "%s: build_reg_wqe page[%d] = 0x%llx\n",
3369 __func__, i, mr->pages[i]);
3371 return -EINVAL;
3380 wqe->sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
3381 wqe->num_sge = wr->num_sge;
3386 if ((qp->ib_qp.qp_type == IB_QPT_UD || qp->ib_qp.qp_type == IB_QPT_GSI ||
3387 qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
3388 qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
3394 bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
3395 qp->qplib_qp.wqe_cnt = 0;
3405 int rc = 0;
3407 spin_lock_irqsave(&qp->sq_lock, flags);
3412 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
3415 rc = -EINVAL;
3420 wqe.wr_id = wr->wr_id;
3422 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
3423 if (rc)
3426 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
3427 if (rc) {
3429 "bad_wr seen with opcode = 0x%x rc = %d\n",
3430 wr->opcode, rc);
3433 wr = wr->next;
3435 bnxt_qplib_post_send_db(&qp->qplib_qp);
3437 spin_unlock_irqrestore(&qp->sq_lock, flags);
3438 return rc;
3443 /* Need unconditional fence for non-wire memory opcode
3446 if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
3447 wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
3448 wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
3449 wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
3450 wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
3461 int rc = 0;
3463 rdev = qp->rdev;
3464 spin_lock_irqsave(&qp->sq_lock, flags);
3469 if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
3472 rc = -EINVAL;
3477 wqe.wr_id = wr->wr_id;
3479 switch (wr->opcode) {
3482 if (ib_qp->qp_type == IB_QPT_GSI &&
3483 rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
3486 rc = bnxt_re_build_gsi_send(qp, wr, &wqe);
3487 if (rc)
3489 } else if (ib_qp->qp_type == IB_QPT_RAW_ETHERTYPE) {
3492 switch (wr->send_flags) {
3502 rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
3507 rc = bnxt_re_build_rdma_wqe(wr, &wqe);
3511 rc = bnxt_re_build_atomic_wqe(wr, &wqe);
3516 rc = -EINVAL;
3519 rc = bnxt_re_build_inv_wqe(wr, &wqe);
3522 rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
3527 "WR (0x%x) is not supported\n", wr->opcode);
3528 rc = -EINVAL;
3532 if (likely(!rc)) {
3533 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
3535 rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
3538 if (unlikely(rc)) {
3540 "bad_wr seen with opcode = 0x%x\n", wr->opcode);
3544 wr = wr->next;
3546 bnxt_qplib_post_send_db(&qp->qplib_qp);
3547 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
3549 spin_unlock_irqrestore(&qp->sq_lock, flags);
3551 return rc;
3559 int rc = 0;
3566 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
3569 rc = -EINVAL;
3573 wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
3574 wqe.num_sge = wr->num_sge;
3575 wqe.wr_id = wr->wr_id;
3577 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
3579 if (rc) {
3584 wr = wr->next;
3586 bnxt_qplib_post_recv_db(&qp->qplib_qp);
3587 return rc;
3594 struct bnxt_re_dev *rdev = qp->rdev;
3595 int rc = 0;
3597 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL)
3598 rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, wqe);
3600 rc = bnxt_re_build_qp1_recv(qp, wr, wqe);
3602 return rc;
3613 int rc = 0;
3615 spin_lock_irqsave(&qp->rq_lock, flags);
3618 if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
3619 dev_err(rdev_to_dev(qp->rdev),
3621 rc = -EINVAL;
3624 wqe.num_sge = wr->num_sge;
3625 wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
3626 wqe.wr_id = wr->wr_id;
3629 if (ib_qp->qp_type == IB_QPT_GSI &&
3630 qp->rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
3633 rc = bnxt_re_build_gsi_recv(qp, wr, &wqe);
3634 if (rc)
3637 rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
3639 if (rc) {
3640 dev_err(rdev_to_dev(qp->rdev),
3647 bnxt_qplib_post_recv_db(&qp->qplib_qp);
3650 wr = wr->next;
3654 bnxt_qplib_post_recv_db(&qp->qplib_qp);
3655 spin_unlock_irqrestore(&qp->rq_lock, flags);
3657 return rc;
3664 struct bnxt_re_dev *rdev = cq->rdev;
3665 int rc = 0;
3667 if (cq->uctx_cq_page) {
3668 BNXT_RE_CQ_PAGE_LIST_DEL(cq->uctx, cq);
3669 free_page((u64)cq->uctx_cq_page);
3670 cq->uctx_cq_page = NULL;
3672 if (cq->is_dbr_soft_cq && cq->uctx) {
3675 if (cq->uctx->dbr_recov_cq) {
3676 dbr_page = cq->uctx->dbr_recov_cq_page;
3677 cq->uctx->dbr_recov_cq_page = NULL;
3678 cq->uctx->dbr_recov_cq = NULL;
3684 spin_lock_bh(&cq->qplib_cq.compl_lock);
3685 cq->qplib_cq.destroyed = true;
3686 spin_unlock_bh(&cq->qplib_cq.compl_lock);
3687 if (ib_cq->poll_ctx == IB_POLL_WORKQUEUE ||
3688 ib_cq->poll_ctx == IB_POLL_UNBOUND_WORKQUEUE)
3689 cancel_work_sync(&ib_cq->work);
3691 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3692 if (rc)
3694 "%s id = %d failed rc = %d\n",
3695 __func__, cq->qplib_cq.id, rc);
3697 bnxt_re_put_nq(rdev, cq->qplib_cq.nq);
3698 if (cq->umem && !IS_ERR(cq->umem))
3699 ib_umem_release(cq->umem);
3701 kfree(cq->cql);
3702 atomic_dec(&rdev->stats.rsors.cq_count);
3726 int rc, entries;
3729 int cqe = attr->cqe;
3731 if (attr->flags)
3732 return -EOPNOTSUPP;
3735 if (rdev->mod_exit) {
3736 rc = -EIO;
3744 context = &uctx->ibucontext;
3746 dev_attr = rdev->dev_attr;
3748 if (atomic_read(&rdev->stats.rsors.cq_count) >= dev_attr->max_cq) {
3749 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQs)\n");
3750 rc = -EINVAL;
3754 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
3755 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQ_WQs)\n");
3756 rc = -EINVAL;
3762 rc = -ENOMEM;
3765 cq->rdev = rdev;
3766 cq->uctx = uctx;
3767 qplcq = &cq->qplib_cq;
3768 qplcq->cq_handle = (u64)qplcq;
3775 if (!udata && !rdev->gsi_ctx.first_cq_created &&
3776 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL) {
3777 rdev->gsi_ctx.first_cq_created = true;
3787 if (entries > dev_attr->max_cq_wqes + 1)
3788 entries = dev_attr->max_cq_wqes + 1;
3790 qplcq->sginfo.pgshft = PAGE_SHIFT;
3791 qplcq->sginfo.pgsize = PAGE_SIZE;
3793 if (udata->inlen < sizeof(ureq))
3796 (unsigned int)udata->inlen,
3799 rc = ib_copy_from_udata(&ureq, udata,
3800 min(udata->inlen, sizeof(ureq)));
3801 if (rc)
3805 cq->is_dbr_soft_cq = true;
3817 rc = -ENOMEM;
3826 uctx->dbr_recov_cq = cq;
3827 uctx->dbr_recov_cq_page = dbr_page;
3829 cq->is_dbr_soft_cq = true;
3833 cq->umem = ib_umem_get_compat
3837 if (IS_ERR(cq->umem)) {
3838 rc = PTR_ERR(cq->umem);
3840 "%s: ib_umem_get failed! rc = %d\n",
3841 __func__, rc);
3844 qplcq->sginfo.sghead = get_ib_umem_sgl(cq->umem,
3845 &qplcq->sginfo.nmap);
3846 qplcq->sginfo.npages = ib_umem_num_pages_compat(cq->umem);
3847 if (!uctx->dpi.dbr) {
3848 rc = bnxt_re_get_user_dpi(rdev, uctx);
3849 if (rc)
3852 qplcq->dpi = &uctx->dpi;
3854 cq->max_cql = entries > MAX_CQL_PER_POLL ? MAX_CQL_PER_POLL : entries;
3855 cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
3857 if (!cq->cql) {
3859 "Allocate CQL for %d failed!\n", cq->max_cql);
3860 rc = -ENOMEM;
3863 qplcq->dpi = &rdev->dpi_privileged;
3869 qplcq->max_wqe = entries;
3870 qplcq->nq = bnxt_re_get_nq(rdev);
3871 qplcq->cnq_hw_ring_id = qplcq->nq->ring_id;
3873 rc = bnxt_qplib_create_cq(&rdev->qplib_res, qplcq);
3874 if (rc) {
3879 INIT_LIST_HEAD(&cq->cq_list);
3880 cq->ibcq.cqe = entries;
3881 cq->cq_period = qplcq->period;
3883 atomic_inc(&rdev->stats.rsors.cq_count);
3884 max_active_cqs = atomic_read(&rdev->stats.rsors.cq_count);
3885 if (max_active_cqs > atomic_read(&rdev->stats.rsors.max_cq_count))
3886 atomic_set(&rdev->stats.rsors.max_cq_count, max_active_cqs);
3887 spin_lock_init(&cq->cq_lock);
3892 resp.cqid = qplcq->id;
3893 resp.tail = qplcq->hwq.cons;
3894 resp.phase = qplcq->period;
3896 resp.dbr = (u64)uctx->dpi.umdbr;
3897 resp.dpi = uctx->dpi.dpi;
3900 if (uctx->wcdpi.dpi) {
3901 resp.wcdpi = uctx->wcdpi.dpi;
3905 if (_is_chip_p7(rdev->chip_ctx)) {
3906 cq->uctx_cq_page = (void *)__get_free_page(GFP_KERNEL);
3908 if (!cq->uctx_cq_page) {
3911 bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq);
3912 rc = -ENOMEM;
3916 resp.uctx_cq_page = (u64)cq->uctx_cq_page;
3920 rc = bnxt_re_copy_to_udata(rdev, &resp,
3921 min(udata->outlen, sizeof(resp)),
3923 if (rc) {
3924 free_page((u64)cq->uctx_cq_page);
3925 cq->uctx_cq_page = NULL;
3926 bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq);
3930 if (cq->uctx_cq_page)
3937 if (udata && cq->umem && !IS_ERR(cq->umem))
3938 ib_umem_release(cq->umem);
3941 if (cq->cql)
3942 kfree(cq->cql);
3945 return rc;
3951 struct bnxt_re_dev *rdev = cq->rdev;
3952 int rc;
3954 if ((cq->cq_count != cq_count) || (cq->cq_period != cq_period)) {
3955 cq->qplib_cq.count = cq_count;
3956 cq->qplib_cq.period = cq_period;
3957 rc = bnxt_qplib_modify_cq(&rdev->qplib_res, &cq->qplib_cq);
3958 if (rc) {
3959 dev_err(rdev_to_dev(rdev), "Modify HW CQ %#x failed!\n",
3960 cq->qplib_cq.id);
3961 return rc;
3964 cq->cq_count = cq_count;
3965 cq->cq_period = cq_period;
3972 struct bnxt_re_dev *rdev = cq->rdev;
3974 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3976 cq->qplib_cq.max_wqe = cq->resize_cqe;
3977 if (cq->resize_umem) {
3978 ib_umem_release(cq->umem);
3979 cq->umem = cq->resize_umem;
3980 cq->resize_umem = NULL;
3981 cq->resize_cqe = 0;
3995 int rc, entries;
4001 rdev = cq->rdev;
4002 dev_attr = rdev->dev_attr;
4003 if (ib_cq->uobject) {
4007 context = &uctx->ibucontext;
4010 if (cq->resize_umem) {
4011 dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - Busy\n",
4012 cq->qplib_cq.id);
4013 return -EBUSY;
4017 if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
4018 dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - max exceeded\n",
4019 cq->qplib_cq.id);
4020 return -EINVAL;
4024 entries = min_t(u32, (u32)entries, dev_attr->max_cq_wqes + 1);
4029 if (entries == cq->ibcq.cqe) {
4034 if (ib_cq->uobject && udata) {
4035 if (udata->inlen < sizeof(ureq))
4038 (unsigned int)udata->inlen,
4041 rc = ib_copy_from_udata(&ureq, udata,
4042 min(udata->inlen, sizeof(ureq)));
4043 if (rc)
4048 cq->resize_umem = ib_umem_get_compat
4053 if (IS_ERR(cq->resize_umem)) {
4054 rc = PTR_ERR(cq->resize_umem);
4055 cq->resize_umem = NULL;
4056 dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n",
4057 __func__, rc);
4060 cq->resize_cqe = entries;
4063 memcpy(&sginfo, &cq->qplib_cq.sginfo, sizeof(sginfo));
4064 orig_dpi = cq->qplib_cq.dpi;
4066 cq->qplib_cq.sginfo.sghead = get_ib_umem_sgl(cq->resize_umem,
4067 &cq->qplib_cq.sginfo.nmap);
4068 cq->qplib_cq.sginfo.npages =
4069 ib_umem_num_pages_compat(cq->resize_umem);
4070 cq->qplib_cq.sginfo.pgsize = PAGE_SIZE;
4071 cq->qplib_cq.sginfo.pgshft = PAGE_SHIFT;
4072 cq->qplib_cq.dpi = &uctx->dpi;
4077 rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
4078 if (rc) {
4079 dev_err(rdev_to_dev(rdev), "Resize HW CQ %#x failed!\n",
4080 cq->qplib_cq.id);
4084 cq->ibcq.cqe = cq->resize_cqe;
4088 if (!cq->resize_umem)
4089 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
4091 atomic_inc(&rdev->stats.rsors.resize_count);
4095 if (cq->resize_umem) {
4096 ib_umem_release(cq->resize_umem);
4097 cq->resize_umem = NULL;
4098 cq->resize_cqe = 0;
4099 memcpy(&cq->qplib_cq.sginfo, &sginfo, sizeof(sginfo));
4100 cq->qplib_cq.dpi = orig_dpi;
4102 return rc;
4190 switch (cqe->type) {
4192 wc->opcode = IB_WC_SEND;
4195 wc->opcode = IB_WC_SEND;
4196 wc->wc_flags |= IB_WC_WITH_IMM;
4199 wc->opcode = IB_WC_SEND;
4200 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4203 wc->opcode = IB_WC_RDMA_WRITE;
4206 wc->opcode = IB_WC_RDMA_WRITE;
4207 wc->wc_flags |= IB_WC_WITH_IMM;
4210 wc->opcode = IB_WC_RDMA_READ;
4213 wc->opcode = IB_WC_COMP_SWAP;
4216 wc->opcode = IB_WC_FETCH_ADD;
4219 wc->opcode = IB_WC_LOCAL_INV;
4222 wc->opcode = IB_WC_REG_MR;
4225 wc->opcode = IB_WC_SEND;
4229 wc->status = __req_to_ib_wc_status(cqe->status);
4236 /* raweth_qp1_flags Bit 9-6 indicates itype */
4240 return -1;
4246 /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
4264 bool rc = false;
4272 if (!ether_addr_equal(tmp_buf, rdev->dev_addr)) {
4276 eth_type = ntohs(eth_hdr->h_proto);
4279 rc = true;
4286 return rc;
4301 eth_type = ntohs(eth_hdr->h_proto);
4305 cqe->raweth_qp1_metadata =
4306 ntohs(vlan_hdr->h_vlan_TCI) |
4309 cqe->raweth_qp1_flags2 |=
4336 int rc;
4340 rdev = gsi_qp->rdev;
4341 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
4342 tbl_idx = cqe->wr_id;
4344 hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf;
4345 rq_hdr_buf = (u8 *) hdr_buf->va + tbl_idx * hdr_buf->step;
4346 rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
4349 shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_sqp->qplib_qp,
4351 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4354 pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
4355 cqe->raweth_qp1_flags2);
4358 return -EINVAL;
4377 memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
4378 sqp_entry->qp1_qp = gsi_qp;
4400 r_sge[1].addr = sqp_entry->sge.addr + offset;
4401 r_sge[1].lkey = sqp_entry->sge.lkey;
4402 r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
4410 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
4411 if (rc) {
4414 return -ENOMEM;
4417 swr->num_sge = 2;
4418 swr->sg_list = s_sge;
4419 swr->wr_id = tbl_idx;
4420 swr->opcode = IB_WR_SEND;
4421 swr->next = NULL;
4423 gsi_sah = rdev->gsi_ctx.gsi_sah;
4424 udwr.ah = &gsi_sah->ibah;
4425 udwr.remote_qpn = gsi_sqp->qplib_qp.id;
4426 udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
4428 rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
4430 return rc;
4436 wc->opcode = IB_WC_RECV;
4437 wc->status = __rawqp1_to_ib_wc_status(cqe->status);
4438 wc->wc_flags |= IB_WC_GRH;
4444 wc->opcode = IB_WC_RECV;
4445 wc->status = __rc_to_ib_wc_status(cqe->status);
4447 if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
4448 wc->wc_flags |= IB_WC_WITH_IMM;
4449 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
4450 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4451 if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
4453 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
4456 /* Returns TRUE if pkt has valid VLAN and if VLAN id is non-zero */
4463 metadata = orig_cqe->raweth_qp1_metadata;
4464 if (orig_cqe->raweth_qp1_flags2 &
4487 struct bnxt_re_dev *rdev = gsi_sqp->rdev;
4495 tbl_idx = cqe->wr_id;
4497 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4498 gsi_qp = sqp_entry->qp1_qp;
4499 orig_cqe = &sqp_entry->cqe;
4501 wc->wr_id = sqp_entry->wrid;
4502 wc->byte_len = orig_cqe->length;
4503 wc->qp = &gsi_qp->ib_qp;
4505 wc->ex.imm_data = orig_cqe->immdata;
4506 wc->src_qp = orig_cqe->src_qp;
4507 memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
4510 wc->sl = sl;
4511 wc->vlan_id = vlan_id;
4512 wc->wc_flags |= IB_WC_WITH_VLAN;
4515 wc->port_num = 1;
4516 wc->vendor_err = orig_cqe->status;
4518 wc->opcode = IB_WC_RECV;
4519 wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
4520 wc->wc_flags |= IB_WC_GRH;
4522 nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
4523 orig_cqe->raweth_qp1_flags2);
4534 wc->opcode = IB_WC_RECV;
4535 wc->status = __rc_to_ib_wc_status(cqe->status);
4536 if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
4537 wc->wc_flags |= IB_WC_WITH_IMM;
4538 if (cqe->flags & CQ_RES_RC_FLAGS_INV)
4539 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
4541 if (rdev->gsi_ctx.gsi_qp->qplib_qp.id == qp->qplib_qp.id &&
4542 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD) {
4543 wc->wc_flags |= IB_WC_GRH;
4544 memcpy(wc->smac, cqe->smac, ETH_ALEN);
4545 wc->wc_flags |= IB_WC_WITH_SMAC;
4546 if (_is_cqe_v2_supported(rdev->dev_attr->dev_cap_flags)) {
4547 if (cqe->flags & CQ_RES_UD_V2_FLAGS_META_FORMAT_MASK) {
4548 if (cqe->cfa_meta &
4550 vlan_id = (cqe->cfa_meta & 0xFFF);
4552 } else if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
4553 vlan_id = (cqe->cfa_meta & 0xFFF);
4557 wc->vlan_id = vlan_id;
4558 wc->wc_flags |= IB_WC_WITH_VLAN;
4565 struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
4567 int rc = 0;
4569 spin_lock_irqsave(&qp->sq_lock, flags);
4571 rc = bnxt_re_legacy_bind_fence_mw(lib_qp);
4572 if (!rc) {
4573 lib_qp->sq.phantom_wqe_cnt++;
4574 dev_dbg(&lib_qp->sq.hwq.pdev->dev,
4575 "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
4576 lib_qp->id, lib_qp->sq.hwq.prod,
4577 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
4578 lib_qp->sq.phantom_wqe_cnt);
4581 spin_unlock_irqrestore(&qp->sq_lock, flags);
4582 return rc;
4588 struct bnxt_re_dev *rdev = cq->rdev;
4603 if (cq->is_dbr_soft_cq) {
4611 if (cq->umem) {
4612 if (cq->resize_umem)
4617 spin_lock_irqsave(&cq->cq_lock, flags);
4619 budget = min_t(u32, num_entries, cq->max_cql);
4621 if (!cq->cql) {
4625 cqe = &cq->cql[0];
4626 gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
4629 ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
4631 sq = &lib_qp->sq;
4632 if (sq->legacy_send_phantom == true) {
4634 if (bnxt_re_legacy_send_phantom_wqe(qp) == -ENOMEM)
4638 sq->legacy_send_phantom = false;
4642 ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
4644 budget - ncqe);
4653 wc->wr_id = cqe->wr_id;
4654 wc->byte_len = cqe->length;
4655 qp = to_bnxt_re((struct bnxt_qplib_qp *)cqe->qp_handle,
4662 wc->qp = &qp->ib_qp;
4663 wc->ex.imm_data = cqe->immdata;
4664 wc->src_qp = cqe->src_qp;
4665 memcpy(wc->smac, cqe->smac, ETH_ALEN);
4666 wc->port_num = 1;
4667 wc->vendor_err = cqe->status;
4669 switch(cqe->opcode) {
4672 qp->qplib_qp.id ==
4673 rdev->gsi_ctx.gsi_sqp->qplib_qp.id) {
4685 if (!cqe->status) {
4686 int rc = 0;
4687 rc = bnxt_re_process_raw_qp_packet_receive(qp, cqe);
4688 if (!rc) {
4693 cqe->status = -1;
4699 tbl_idx = cqe->wr_id;
4700 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4701 wc->wr_id = sqp_entry->wrid;
4711 qp->qplib_qp.id ==
4712 rdev->gsi_ctx.gsi_sqp->qplib_qp.id) {
4718 if (cqe->status) {
4731 dev_err(rdev_to_dev(cq->rdev),
4732 "POLL CQ type 0x%x not handled, skip!\n",
4733 cqe->opcode);
4737 budget--;
4741 spin_unlock_irqrestore(&cq->cq_lock, flags);
4742 return init_budget - budget;
4749 int type = 0, rc = 0;
4752 spin_lock_irqsave(&cq->cq_lock, flags);
4760 bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
4764 !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
4765 rc = 1;
4767 spin_unlock_irqrestore(&cq->cq_lock, flags);
4769 return rc;
4781 int rc;
4785 rdev = pd->rdev;
4791 return ERR_PTR(-ENOMEM);
4793 mr->rdev = rdev;
4794 mr->qplib_mr.pd = &pd->qplib_pd;
4795 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
4796 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4799 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4800 if (rc) {
4804 mr->qplib_mr.total_size = -1; /* Infinite length */
4810 mrinfo.mrw = &mr->qplib_mr;
4812 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
4813 if (rc) {
4817 mr->ib_mr.lkey = mr->qplib_mr.lkey;
4820 mr->ib_mr.rkey = mr->ib_mr.lkey;
4821 atomic_inc(&rdev->stats.rsors.mr_count);
4822 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
4823 if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
4824 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
4826 return &mr->ib_mr;
4829 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4832 return ERR_PTR(rc);
4838 struct bnxt_re_dev *rdev = mr->rdev;
4839 int rc = 0;
4841 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4842 if (rc)
4843 dev_err(rdev_to_dev(rdev), "Dereg MR failed (%d): rc - %#x\n",
4844 mr->qplib_mr.lkey, rc);
4846 if (mr->pages) {
4847 bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
4848 &mr->qplib_frpl);
4849 kfree(mr->pages);
4850 mr->npages = 0;
4851 mr->pages = NULL;
4853 if (!IS_ERR(mr->ib_umem) && mr->ib_umem) {
4854 mr->is_invalcb_active = false;
4855 bnxt_re_peer_mem_release(mr->ib_umem);
4858 atomic_dec(&rdev->stats.rsors.mr_count);
4866 if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
4867 return -ENOMEM;
4869 mr->pages[mr->npages++] = addr;
4870 dev_dbg(NULL, "%s: ibdev %p Set MR pages[%d] = 0x%lx\n",
4871 ROCE_DRV_MODULE_NAME, ib_mr->device, mr->npages - 1,
4872 mr->pages[mr->npages - 1]);
4881 mr->npages = 0;
4890 struct bnxt_re_dev *rdev = pd->rdev;
4893 int rc;
4897 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported\n", type);
4898 return ERR_PTR(-EINVAL);
4902 return ERR_PTR(-EINVAL);
4907 return ERR_PTR(-ENOMEM);
4909 mr->rdev = rdev;
4910 mr->qplib_mr.pd = &pd->qplib_pd;
4911 mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
4912 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
4914 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4915 if (rc) {
4919 mr->ib_mr.lkey = mr->qplib_mr.lkey;
4920 mr->ib_mr.rkey = mr->ib_mr.lkey;
4921 mr->pages = kzalloc(sizeof(u64) * max_num_sg, GFP_KERNEL);
4922 if (!mr->pages) {
4925 rc = -ENOMEM;
4928 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
4929 &mr->qplib_frpl, max_num_sg);
4930 if (rc) {
4935 dev_dbg(rdev_to_dev(rdev), "Alloc MR pages = 0x%p\n", mr->pages);
4937 atomic_inc(&rdev->stats.rsors.mr_count);
4938 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
4939 if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
4940 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
4941 return &mr->ib_mr;
4944 kfree(mr->pages);
4946 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4949 return ERR_PTR(rc);
4957 struct bnxt_re_dev *rdev = pd->rdev;
4960 int rc;
4965 rc = -ENOMEM;
4968 mw->rdev = rdev;
4969 mw->qplib_mw.pd = &pd->qplib_pd;
4971 mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
4974 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
4975 if (rc) {
4979 mw->ib_mw.rkey = mw->qplib_mw.rkey;
4980 atomic_inc(&rdev->stats.rsors.mw_count);
4981 max_mw_count = atomic_read(&rdev->stats.rsors.mw_count);
4982 if (max_mw_count > atomic_read(&rdev->stats.rsors.max_mw_count))
4983 atomic_set(&rdev->stats.rsors.max_mw_count, max_mw_count);
4985 return &mw->ib_mw;
4989 return ERR_PTR(rc);
4995 struct bnxt_re_dev *rdev = mw->rdev;
4996 int rc;
4998 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
4999 if (rc) {
5000 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
5001 return rc;
5005 atomic_dec(&rdev->stats.rsors.mw_count);
5006 return rc;
5032 pgshft = ilog2(umem->page_size);
5057 struct bnxt_re_dev *rdev = pd->rdev;
5059 int umem_pgs, page_shift, rc;
5067 if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr)
5068 return ERR_PTR(-ENOMEM);
5070 if (rdev->mod_exit) {
5072 return ERR_PTR(-EIO);
5078 return ERR_PTR(-ENOMEM);
5083 return ERR_PTR (-ENOMEM);
5085 mr->rdev = rdev;
5086 mr->qplib_mr.pd = &pd->qplib_pd;
5087 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
5088 mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
5090 if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) {
5091 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
5092 if (rc) {
5097 mr->ib_mr.rkey = mr->qplib_mr.rkey;
5100 umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context,
5104 rc = PTR_ERR(umem);
5105 dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n",
5106 __func__, rc);
5109 mr->ib_umem = umem;
5111 mr->qplib_mr.va = virt_addr;
5115 rc = -EINVAL;
5118 mr->qplib_mr.total_size = length;
5120 rdev->dev_attr->page_size_cap);
5123 rc = -EFAULT;
5134 mrinfo.mrw = &mr->qplib_mr;
5136 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
5137 if (rc) {
5142 mr->ib_mr.lkey = mr->ib_mr.rkey = mr->qplib_mr.lkey;
5143 atomic_inc(&rdev->stats.rsors.mr_count);
5144 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
5145 if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
5146 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
5148 return &mr->ib_mr;
5151 bnxt_re_peer_mem_release(mr->ib_umem);
5153 if (!_is_alloc_mr_unified(rdev->qplib_res.dattr))
5154 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
5157 return ERR_PTR(rc);
5167 int umem_pgs = 0, page_shift = PAGE_SHIFT, rc;
5168 struct bnxt_re_dev *rdev = mr->rdev;
5176 umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context,
5180 rc = PTR_ERR(umem);
5183 __func__, rc);
5186 mr->ib_umem = umem;
5188 mr->qplib_mr.va = virt_addr;
5192 rc = -EINVAL;
5195 mr->qplib_mr.total_size = length;
5197 rdev->dev_attr->page_size_cap);
5201 rc = -EFAULT;
5212 mrinfo.mrw = &mr->qplib_mr;
5214 mr->qplib_mr.pd = &pd->qplib_pd;
5217 mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
5219 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
5220 if (rc) {
5224 mr->ib_mr.rkey = mr->qplib_mr.rkey;
5229 bnxt_re_peer_mem_release(mr->ib_umem);
5231 return rc;
5236 struct ib_device *ibdev = &rdev->ibdev;
5245 return -EPERM;
5254 struct ib_device *ibdev = ctx->device;
5259 struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
5265 int rc;
5267 cctx = rdev->chip_ctx;
5268 rc = bnxt_re_check_abi_version(rdev);
5269 if (rc)
5272 uctx->rdev = rdev;
5273 uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
5274 if (!uctx->shpg) {
5276 rc = -ENOMEM;
5279 spin_lock_init(&uctx->sh_lock);
5281 chip_met_rev_num = cctx->chip_num;
5282 chip_met_rev_num |= ((u32)cctx->chip_rev & 0xFF) <<
5284 chip_met_rev_num |= ((u32)cctx->chip_metal & 0xFF) <<
5292 resp.dev_id = rdev->en_dev->pdev->devfn;
5293 resp.max_qp = rdev->qplib_res.hctx->qp_ctx.max;
5298 resp.modes = genp5 ? cctx->modes.wqe_mode : 0;
5299 if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
5305 resp.max_cqd = dev_attr->max_cq_wqes;
5306 if (genp5 && cctx->modes.db_push) {
5309 !(dev_attr->dev_cap_flags &
5317 if (rdev->dbr_pacing)
5320 if (rdev->dbr_drop_recov && rdev->user_dbr_drop_recov)
5323 if (udata->inlen >= sizeof(ureq)) {
5324 rc = ib_copy_from_udata(&ureq, udata,
5325 min(udata->inlen, sizeof(ureq)));
5326 if (rc)
5344 uctx->cmask = (uint64_t)resp.comp_mask;
5345 rc = bnxt_re_copy_to_udata(rdev, &resp,
5346 min(udata->outlen, sizeof(resp)),
5348 if (rc)
5351 INIT_LIST_HEAD(&uctx->cq_list);
5352 mutex_init(&uctx->cq_lock);
5356 free_page((u64)uctx->shpg);
5357 uctx->shpg = NULL;
5359 return rc;
5367 struct bnxt_re_dev *rdev = uctx->rdev;
5368 int rc = 0;
5370 if (uctx->shpg)
5371 free_page((u64)uctx->shpg);
5373 if (uctx->dpi.dbr) {
5377 if (_is_chip_gen_p5_p7(rdev->chip_ctx) && uctx->wcdpi.dbr) {
5378 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
5379 &uctx->wcdpi);
5380 if (rc)
5383 uctx->wcdpi.dbr = NULL;
5386 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
5387 &uctx->dpi);
5388 if (rc)
5391 uctx->dpi.dbr = NULL;
5401 if (!_is_chip_p7(uctx->rdev->chip_ctx))
5404 mutex_lock(&uctx->cq_lock);
5405 list_for_each_entry(tmp_cq, &uctx->cq_list, cq_list) {
5406 if (((u64)tmp_cq->uctx_cq_page >> PAGE_SHIFT) == pg_off) {
5411 mutex_unlock(&uctx->cq_lock);
5421 struct bnxt_re_dev *rdev = uctx->rdev;
5423 int rc = 0;
5426 switch (vma->vm_pgoff) {
5428 pfn = vtophys(uctx->shpg) >> PAGE_SHIFT;
5429 return rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
5430 dev_dbg(rdev_to_dev(rdev), "%s:%d uctx->shpg 0x%lx, vtophys(uctx->shpg) 0x%lx, pfn = 0x%lx \n",
5431 __func__, __LINE__, (u64) uctx->shpg, vtophys(uctx->shpg), pfn);
5432 if (rc) {
5434 rc = -EAGAIN;
5436 return rc;
5438 vma->vm_page_prot =
5439 pgprot_writecombine(vma->vm_page_prot);
5440 pfn = (uctx->wcdpi.umdbr >> PAGE_SHIFT);
5442 return -EFAULT;
5446 if (vma->vm_flags & VM_WRITE)
5447 return -EFAULT;
5449 pfn = vtophys(rdev->dbr_page) >> PAGE_SHIFT;
5451 return -EFAULT;
5454 pfn = vtophys(uctx->dbr_recov_cq_page) >> PAGE_SHIFT;
5456 return -EFAULT;
5459 cq = is_bnxt_re_cq_page(uctx, vma->vm_pgoff);
5461 pfn = vtophys((void *)cq->uctx_cq_page) >> PAGE_SHIFT;
5462 rc = rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
5463 if (rc) {
5466 rc = -EAGAIN;
5470 vma->vm_page_prot =
5471 pgprot_noncached(vma->vm_page_prot);
5472 pfn = vma->vm_pgoff;
5477 rc = rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
5478 if (rc) {
5480 return -EAGAIN;
5482 rc = __bnxt_re_set_vma_data(uctx, vma);
5484 return rc;