Lines Matching defs:rdev

55 	struct bnxt_re_dev *rdev;
60 rdev = dmac_work->rdev;
62 rc = ib_resolve_eth_dmac(&rdev->ibdev, ah_attr);
64 dev_err(rdev_to_dev(dmac_work->rdev),
111 static int bnxt_re_copy_to_udata(struct bnxt_re_dev *rdev, void *data,
118 dev_err(rdev_to_dev(rdev),
128 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
133 if (!rdev || !rdev->netdev)
136 netdev = rdev->netdev;
151 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
152 struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
157 bnxt_qplib_get_guid(rdev->dev_addr, (u8 *)&ib_attr->sys_image_guid);
160 ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
161 ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
162 ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
169 if (rdev->min_tx_depth == 1 &&
171 rdev->min_tx_depth = min_tx_depth;
227 dev_dbg(rdev_to_dev(rdev), "Modify device with mask 0x%x\n",
290 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
291 struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
294 dev_dbg(rdev_to_dev(rdev), "QUERY PORT with port_num 0x%x\n", port_num);
298 port_attr->state = bnxt_re_get_link_state(rdev);
302 port_attr->active_mtu = iboe_get_mtu(if_getmtu(rdev->netdev));
320 rdev->espeed = rdev->en_dev->espeed;
322 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
323 __to_ib_speed_width(rdev->espeed, &active_speed,
336 dev_dbg(rdev_to_dev(rdev), "Modify port with mask 0x%x\n",
355 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
363 if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV1_CAP)
365 else if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV2_CAP)
376 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
378 sprintf(str, "%d.%d.%d.%d", rdev->dev_attr->fw_ver[0],
379 rdev->dev_attr->fw_ver[1], rdev->dev_attr->fw_ver[2],
380 rdev->dev_attr->fw_ver[3]);
397 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
402 rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
403 &rdev->qplib_res.sgid_tbl, index,
413 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
414 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
421 dev_err(rdev_to_dev(rdev), "GID entry has no ctx?!\n");
426 dev_dbg(rdev_to_dev(rdev), "GID index out of range?!\n");
442 (rdev->gsi_ctx.gsi_sqp ||
443 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)) {
444 dev_dbg(rdev_to_dev(rdev),
447 rdev->gid_map[index] = -1;
454 rdev->gid_map[index] = -1;
459 dev_dbg(rdev_to_dev(rdev), "GID remove success\n");
464 dev_err(rdev_to_dev(rdev),
469 dev_dbg(rdev_to_dev(rdev), "GID sgid_tbl does not exist!\n");
483 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
484 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
489 rdev->dev_addr,
492 dev_dbg(rdev_to_dev(rdev), "GID %pI6 is already present\n", gid);
506 rdev->gid_map[index] = tbl_idx;
509 dev_err(rdev_to_dev(rdev), "Add GID failed rc = 0x%x\n", rc);
514 dev_err(rdev_to_dev(rdev), "Add GID ctx failed\n");
522 rdev->gid_map[index] = tbl_idx;
539 struct bnxt_re_dev *rdev = pd->rdev;
541 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
580 dev_dbg(rdev_to_dev(qp->rdev),
585 dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
597 struct bnxt_re_dev *rdev = pd->rdev;
606 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
614 dma_addr = ib_dma_map_single(&rdev->ibdev, fence->va,
617 rc = ib_dma_mapping_error(&rdev->ibdev, dma_addr);
619 dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
631 mr->rdev = rdev;
635 if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) {
636 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
638 dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
656 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
658 dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
663 atomic_inc(&rdev->stats.rsors.mr_count);
664 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
665 if (max_mr_count > (atomic_read(&rdev->stats.rsors.max_mr_count)))
666 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
671 dev_err(rdev_to_dev(rdev),
683 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
684 atomic_dec(&rdev->stats.rsors.mr_count);
690 ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr,
703 struct bnxt_re_dev *rdev = pd->rdev;
706 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
715 bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
718 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
721 atomic_dec(&rdev->stats.rsors.mr_count);
724 ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr,
734 static int bnxt_re_get_user_dpi(struct bnxt_re_dev *rdev,
737 struct bnxt_qplib_chip_ctx *cctx = rdev->chip_ctx;
744 ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->dpi, cntx, type);
746 dev_err(rdev_to_dev(rdev), "Alloc doorbell page failed!\n");
752 ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->wcdpi,
755 dev_err(rdev_to_dev(rdev), "push dp alloc failed\n");
765 struct bnxt_re_dev *rdev = pd->rdev;
770 rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
771 &rdev->qplib_res.pd_tbl,
774 dev_err_ratelimited(rdev_to_dev(rdev),
776 atomic_dec(&rdev->stats.rsors.pd_count);
786 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
794 pd->rdev = rdev;
795 if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
796 dev_err(rdev_to_dev(rdev),
806 rc = bnxt_re_get_user_dpi(rdev, ucntx);
820 if (rdev->dbr_pacing) {
821 WARN_ON(!rdev->dbr_bar_addr);
822 resp.dbr_bar_addr = (u64)rdev->dbr_bar_addr;
826 rc = bnxt_re_copy_to_udata(rdev, &resp,
835 dev_warn(rdev_to_dev(rdev),
838 atomic_inc(&rdev->stats.rsors.pd_count);
839 max_pd_count = atomic_read(&rdev->stats.rsors.pd_count);
840 if (max_pd_count > atomic_read(&rdev->stats.rsors.max_pd_count))
841 atomic_set(&rdev->stats.rsors.max_pd_count, max_pd_count);
845 (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
855 struct bnxt_re_dev *rdev = ah->rdev;
861 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
863 dev_err_ratelimited(rdev_to_dev(rdev),
866 atomic_dec(&rdev->stats.rsors.ah_count);
906 static int bnxt_re_get_ah_info(struct bnxt_re_dev *rdev,
919 rc = bnxt_re_get_cached_gid(&rdev->ibdev, 1, ah_attr->grh.sgid_index,
940 static u8 _get_sgid_index(struct bnxt_re_dev *rdev, u8 gindx)
942 gindx = rdev->gid_map[gindx];
946 static int bnxt_re_init_dmac(struct bnxt_re_dev *rdev, struct ib_ah_attr *ah_attr,
963 resolve_dmac_work->rdev = rdev;
969 queue_work(rdev->resolve_wq, &resolve_dmac_work->work);
980 &rdev->mac_wq_list);
999 struct bnxt_re_dev *rdev = pd->rdev;
1009 dev_err(rdev_to_dev(rdev), "ah_attr->ah_flags GRH is not set\n");
1011 ah->rdev = rdev;
1018 ah->qplib_ah.sgid_index = _get_sgid_index(rdev, ah_attr->grh.sgid_index);
1020 dev_err(rdev_to_dev(rdev), "invalid sgid_index!\n");
1029 rc = bnxt_re_get_ah_info(rdev, ah_attr, &ah_info);
1034 rc = bnxt_re_init_dmac(rdev, ah_attr, &ah_info, is_user, ah);
1038 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, block);
1040 dev_err(rdev_to_dev(rdev),
1059 atomic_inc(&rdev->stats.rsors.ah_count);
1060 max_ah_count = atomic_read(&rdev->stats.rsors.ah_count);
1061 if (max_ah_count > atomic_read(&rdev->stats.rsors.max_ah_count))
1062 atomic_set(&rdev->stats.rsors.max_ah_count, max_ah_count);
1096 struct bnxt_re_dev *rdev = srq->rdev;
1101 rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
1103 dev_err_ratelimited(rdev_to_dev(rdev),
1110 atomic_dec(&rdev->stats.rsors.srq_count);
1130 struct ib_umem *ib_umem_get_compat(struct bnxt_re_dev *rdev,
1140 struct ib_umem *ib_umem_get_flags_compat(struct bnxt_re_dev *rdev,
1146 return ib_umem_get_compat(rdev, ucontext, udata, addr, size,
1155 static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
1174 dev_warn(rdev_to_dev(rdev),
1186 umem = ib_umem_get_compat(rdev, context, udata, ureq.srqva, bytes,
1189 dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n",
1210 struct bnxt_re_dev *rdev;
1220 rdev = pd->rdev;
1221 dev_attr = rdev->dev_attr;
1223 if (rdev->mod_exit) {
1224 dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
1230 dev_err(rdev_to_dev(rdev), "SRQ type not supported\n");
1240 if (atomic_read(&rdev->stats.rsors.srq_count) >= dev_attr->max_srq) {
1241 dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQs)\n");
1247 dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQ_WQs)\n");
1252 srq->rdev = rdev;
1254 srq->qplib_srq.dpi = &rdev->dpi_privileged;
1268 srq->qplib_srq.eventq_hw_ring_id = rdev->nqr.nq[0].ring_id;
1273 rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
1278 rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
1280 dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!\n");
1288 rc = bnxt_re_copy_to_udata(rdev, &resp,
1292 bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq);
1296 atomic_inc(&rdev->stats.rsors.srq_count);
1297 max_srq_count = atomic_read(&rdev->stats.rsors.srq_count);
1298 if (max_srq_count > atomic_read(&rdev->stats.rsors.max_srq_count))
1299 atomic_set(&rdev->stats.rsors.max_srq_count, max_srq_count);
1318 struct bnxt_re_dev *rdev = srq->rdev;
1331 rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
1333 dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!\n");
1341 rc = bnxt_re_copy_to_udata(rdev, srq, 0, udata);
1347 dev_err(rdev_to_dev(rdev),
1358 struct bnxt_re_dev *rdev = srq->rdev;
1361 rc = bnxt_qplib_query_srq(&rdev->qplib_res, &srq->qplib_srq);
1363 dev_err(rdev_to_dev(rdev), "Query HW SRQ (0x%x) failed! rc = %d\n",
1426 struct bnxt_re_dev *rdev;
1430 rdev = qp->rdev;
1431 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
1432 gsi_sah = rdev->gsi_ctx.gsi_sah;
1435 mutex_lock(&rdev->qp_lock);
1437 mutex_unlock(&rdev->qp_lock);
1440 dev_dbg(rdev_to_dev(rdev), "Destroy the shadow AH\n");
1441 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah,
1444 dev_err(rdev_to_dev(rdev),
1446 atomic_dec(&rdev->stats.rsors.ah_count);
1449 dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n");
1450 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1452 dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed\n");
1459 bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1460 bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &gsi_sqp->qplib_qp);
1461 kfree(rdev->gsi_ctx.sqp_tbl);
1464 rdev->gsi_ctx.gsi_sqp = NULL;
1465 rdev->gsi_ctx.gsi_sah = NULL;
1466 rdev->gsi_ctx.sqp_tbl = NULL;
1467 atomic_dec(&rdev->stats.rsors.qp_count);
1472 static void bnxt_re_dump_debug_stats(struct bnxt_re_dev *rdev, u32 active_qps)
1478 if (!rdev->rcfw.sp_perf_stats_enabled)
1485 if (rdev->rcfw.qp_destroy_stats[i]) {
1487 avg_time += rdev->rcfw.qp_destroy_stats[i];
1491 dev_dbg(rdev_to_dev(rdev),
1498 dev_dbg(rdev_to_dev(rdev),
1507 dev_dbg(rdev_to_dev(rdev),
1510 atomic_read(&rdev->stats.rsors.max_qp_count));
1518 struct bnxt_re_dev *rdev = qp->rdev;
1523 mutex_lock(&rdev->qp_lock);
1525 active_qps = atomic_dec_return(&rdev->stats.rsors.qp_count);
1527 atomic_dec(&rdev->stats.rsors.rc_qp_count);
1529 atomic_dec(&rdev->stats.rsors.ud_qp_count);
1530 mutex_unlock(&rdev->qp_lock);
1532 rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
1534 dev_err_ratelimited(rdev_to_dev(rdev),
1544 bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
1546 rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
1547 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL &&
1548 rdev->gsi_ctx.gsi_sqp) {
1551 bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &qp->qplib_qp);
1560 bnxt_re_dump_debug_stats(rdev, active_qps);
1604 struct bnxt_re_dev *rdev;
1608 rdev = qp->rdev;
1611 dev_attr = rdev->dev_attr;
1638 static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev,
1658 dev_warn(rdev_to_dev(rdev),
1671 psn_sz = _is_chip_gen_p5_p7(rdev->chip_ctx) ?
1674 if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
1680 if (BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
1686 umem = ib_umem_get_compat(rdev, context, udata, ureq.qpsva, bytes,
1689 dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n",
1704 umem = ib_umem_get_compat(rdev,
1708 dev_err(rdev_to_dev(rdev),
1736 struct bnxt_re_dev *rdev = pd->rdev;
1743 dev_err(rdev_to_dev(rdev), "Allocate Address Handle failed!\n");
1747 ah->rdev = rdev;
1750 rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
1764 ether_addr_copy(ah->qplib_ah.dmac, rdev->dev_addr);
1765 dev_dbg(rdev_to_dev(rdev), "ah->qplib_ah.dmac = %x:%x:%x:%x:%x:%x\n",
1769 rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, true);
1771 dev_err(rdev_to_dev(rdev),
1775 dev_dbg(rdev_to_dev(rdev), "AH ID = %d\n", ah->qplib_ah.id);
1776 atomic_inc(&rdev->stats.rsors.ah_count);
1784 void bnxt_re_update_shadow_ah(struct bnxt_re_dev *rdev)
1792 if (!rdev)
1795 sah = rdev->gsi_ctx.gsi_sah;
1797 dev_dbg(rdev_to_dev(rdev), "Updating the AH\n");
1800 if (!compare_ether_header(sah->qplib_ah.dmac, rdev->dev_addr)) {
1801 dev_dbg(rdev_to_dev(rdev),
1806 gsi_qp = rdev->gsi_ctx.gsi_qp;
1809 rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
1812 dev_err(rdev_to_dev(rdev),
1816 atomic_dec(&rdev->stats.rsors.ah_count);
1818 rdev->gsi_ctx.gsi_sah = NULL;
1820 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
1823 dev_err(rdev_to_dev(rdev),
1827 rdev->gsi_ctx.gsi_sah = sah;
1828 atomic_inc(&rdev->stats.rsors.ah_count);
1836 struct bnxt_re_dev *rdev = pd->rdev;
1842 dev_err(rdev_to_dev(rdev), "Allocate internal UD QP failed!\n");
1846 qp->rdev = rdev;
1849 ether_addr_copy(qp->qplib_qp.smac, rdev->dev_addr);
1877 qp->qplib_qp.dpi = &rdev->dpi_privileged;
1886 dev_err(rdev_to_dev(rdev), "create HW QP failed!\n");
1890 dev_dbg(rdev_to_dev(rdev), "Created shadow QP with ID = %d\n",
1894 mutex_lock(&rdev->qp_lock);
1895 list_add_tail(&qp->list, &rdev->qp_list);
1896 atomic_inc(&rdev->stats.rsors.qp_count);
1897 mutex_unlock(&rdev->qp_lock);
1911 struct bnxt_re_dev *rdev;
1915 rdev = qp->rdev;
1918 dev_attr = rdev->dev_attr;
1925 dev_err(rdev_to_dev(rdev), "SRQ not found\n");
1955 struct bnxt_re_dev *rdev;
1957 rdev = qp->rdev;
1959 dev_attr = rdev->dev_attr;
1961 if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD)
1971 struct bnxt_re_dev *rdev;
1977 rdev = qp->rdev;
1980 dev_attr = rdev->dev_attr;
1997 if (!cntx && rdev->min_tx_depth && init_attr->qp_type != IB_QPT_GSI) {
2003 if (rdev->min_tx_depth > 1 && entries < rdev->min_tx_depth)
2004 entries = rdev->min_tx_depth;
2008 diff = bnxt_re_get_diff(cntx, rdev->chip_ctx);
2029 struct bnxt_re_dev *rdev;
2032 rdev = qp->rdev;
2034 dev_attr = rdev->dev_attr;
2036 if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
2049 static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
2056 chip_ctx = rdev->chip_ctx;
2057 gsi_ctx = &rdev->gsi_ctx;
2061 dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported\n",
2076 static int bnxt_re_init_qp_wqe_mode(struct bnxt_re_dev *rdev)
2078 return rdev->chip_ctx->modes.wqe_mode;
2089 struct bnxt_re_dev *rdev;
2093 rdev = qp->rdev;
2095 dev_attr = rdev->dev_attr;
2108 qptype = bnxt_re_init_qp_type(rdev, init_attr);
2114 qplqp->wqe_mode = bnxt_re_init_qp_wqe_mode(rdev);
2115 ether_addr_copy(qplqp->smac, rdev->dev_addr);
2121 qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(if_getmtu(rdev->netdev)));
2122 qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
2124 dev_dbg(rdev_to_dev(rdev),
2134 dev_err(rdev_to_dev(rdev), "Send CQ not found\n");
2145 dev_err(rdev_to_dev(rdev), "Receive CQ not found\n");
2168 rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
2177 struct bnxt_re_dev *rdev;
2182 rdev = qp->rdev;
2188 rdev->gsi_ctx.sqp_tbl = sqp_tbl;
2190 sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
2193 dev_err(rdev_to_dev(rdev),
2197 rdev->gsi_ctx.gsi_sqp = sqp;
2201 sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
2204 bnxt_qplib_destroy_qp(&rdev->qplib_res,
2207 dev_err(rdev_to_dev(rdev),
2211 rdev->gsi_ctx.gsi_sah = sah;
2237 struct bnxt_re_dev *rdev;
2242 rdev = qp->rdev;
2244 res = &rdev->qplib_res;
2245 gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
2255 dev_err(rdev_to_dev(rdev), "create HW QP1 failed!\n");
2265 static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
2278 dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded! "
2294 struct bnxt_re_dev *rdev)
2300 dev_err(rdev_to_dev(rdev), "Allocate QP failed!\n");
2311 struct bnxt_re_dev *rdev;
2317 rdev = pd->rdev;
2318 dev_attr = rdev->dev_attr;
2319 if (rdev->mod_exit) {
2321 dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
2325 if (atomic_read(&rdev->stats.rsors.qp_count) >= dev_attr->max_qp) {
2326 dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded(QPs Alloc'd %u of max %u)\n",
2327 atomic_read(&rdev->stats.rsors.qp_count), dev_attr->max_qp);
2332 rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
2337 qp = __get_qp_from_qp_in(qp_in, rdev);
2342 qp->rdev = rdev;
2349 !_is_chip_gen_p5_p7(rdev->chip_ctx)) {
2356 rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
2358 dev_err(rdev_to_dev(rdev), "create HW QP failed!\n");
2366 rc = bnxt_re_copy_to_udata(rdev, &resp,
2376 rdev->gsi_ctx.gsi_qp = qp;
2380 mutex_lock(&rdev->qp_lock);
2381 list_add_tail(&qp->list, &rdev->qp_list);
2382 mutex_unlock(&rdev->qp_lock);
2383 atomic_inc(&rdev->stats.rsors.qp_count);
2384 active_qps = atomic_read(&rdev->stats.rsors.qp_count);
2385 if (active_qps > atomic_read(&rdev->stats.rsors.max_qp_count))
2386 atomic_set(&rdev->stats.rsors.max_qp_count, active_qps);
2388 bnxt_re_dump_debug_stats(rdev, active_qps);
2392 tmp_qps = atomic_inc_return(&rdev->stats.rsors.rc_qp_count);
2393 if (tmp_qps > atomic_read(&rdev->stats.rsors.max_rc_qp_count))
2394 atomic_set(&rdev->stats.rsors.max_rc_qp_count, tmp_qps);
2396 tmp_qps = atomic_inc_return(&rdev->stats.rsors.ud_qp_count);
2397 if (tmp_qps > atomic_read(&rdev->stats.rsors.max_ud_qp_count))
2398 atomic_set(&rdev->stats.rsors.max_ud_qp_count, tmp_qps);
2404 bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
2418 static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
2422 struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
2444 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2446 dev_err(rdev_to_dev(rdev), "Modify Shadow QP for QP1 failed\n");
2455 static u16 get_source_port(struct bnxt_re_dev *rdev,
2496 static void bnxt_re_update_qp_info(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
2516 qp->qp_info_entry.s_port = get_source_port(rdev, qp);
2533 dev_dbg(rdev_to_dev(qp->rdev),
2548 dev_dbg(rdev_to_dev(qp->rdev),
2573 struct bnxt_re_dev *rdev;
2584 rdev = qp->rdev;
2585 dev_attr = rdev->dev_attr;
2594 dev_err(rdev_to_dev(rdev),"invalid attribute mask=0x%x"
2601 dev_dbg(rdev_to_dev(rdev), "%s:%d INFO attribute mask=0x%x qpn=0x%x "
2652 qp->qplib_qp.ah.sgid_index = _get_sgid_index(rdev,
2661 status = bnxt_re_get_cached_gid(&rdev->ibdev, 1,
2669 memcpy(qp->qplib_qp.smac, rdev->dev_addr,
2672 dev_dbg(rdev_to_dev(rdev),
2694 bnxt_re_init_qpmtu(qp, if_getmtu(rdev->netdev), qp_attr_mask, qp_attr,
2702 dev_err(rdev_to_dev(rdev), "qp %#x invalid mtu\n",
2737 dev_dbg(rdev_to_dev(rdev),
2751 dev_err(rdev_to_dev(rdev),
2774 dev_err(rdev_to_dev(rdev),
2810 rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
2812 dev_err(rdev_to_dev(rdev), "Modify HW QP failed!\n");
2826 rc = bnxt_re_copy_to_udata(rdev, &resp,
2834 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL &&
2835 rdev->gsi_ctx.gsi_sqp)
2836 rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
2840 bnxt_re_update_qp_info(rdev, qp);
2848 struct bnxt_re_dev *rdev = qp->rdev;
2859 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
2861 dev_err(rdev_to_dev(rdev), "Query HW QP (0x%x) failed! rc = %d\n",
2940 rc = bnxt_re_query_gid(&qp->rdev->ibdev, 1, qplib_ah->sgid_index, &sgid);
3000 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n");
3020 struct bnxt_re_dev *rdev;
3023 rdev = qp->rdev;
3026 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)
3052 struct bnxt_re_dev *rdev = qp->rdev;
3069 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ROCE_V2_IPV4)
3071 if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_ROCE_V1)
3086 dev_err(rdev_to_dev(qp->rdev),"QP1 rq buffer is empty!\n");
3100 dev_err(rdev_to_dev(qp->rdev),
3114 dev_err(rdev_to_dev(qp->rdev),
3128 dev_err(rdev_to_dev(qp->rdev),
3139 dev_err(rdev_to_dev(qp->rdev),
3147 dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n");
3160 struct bnxt_re_dev *rdev;
3164 rdev = qp->rdev;
3178 dev_err(rdev_to_dev(qp->rdev),
3184 sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
3228 dev_err(rdev_to_dev(qp->rdev), "%s Invalid opcode %d!\n",
3326 dev_err_ratelimited(rdev_to_dev(mr->rdev),
3361 dev_err_ratelimited(rdev_to_dev(mr->rdev),
3367 dev_dbg(rdev_to_dev(mr->rdev),
3399 static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
3413 dev_err(rdev_to_dev(rdev),
3428 dev_err(rdev_to_dev(rdev),
3459 struct bnxt_re_dev *rdev;
3463 rdev = qp->rdev;
3470 dev_err(rdev_to_dev(rdev),
3483 rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
3514 dev_err(rdev_to_dev(rdev),
3526 dev_err(rdev_to_dev(rdev),
3533 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
3539 dev_err(rdev_to_dev(rdev),
3547 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
3554 static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
3567 dev_err(rdev_to_dev(rdev),
3580 dev_err(rdev_to_dev(rdev),
3594 struct bnxt_re_dev *rdev = qp->rdev;
3597 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL)
3619 dev_err(rdev_to_dev(qp->rdev),
3630 qp->rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
3640 dev_err(rdev_to_dev(qp->rdev),
3664 struct bnxt_re_dev *rdev = cq->rdev;
3691 rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
3693 dev_err_ratelimited(rdev_to_dev(rdev),
3697 bnxt_re_put_nq(rdev, cq->qplib_cq.nq);
3702 atomic_dec(&rdev->stats.rsors.cq_count);
3709 struct bnxt_re_dev *rdev)
3725 struct bnxt_re_dev *rdev;
3734 rdev = rdev_from_cq_in(cq_in);
3735 if (rdev->mod_exit) {
3737 dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
3746 dev_attr = rdev->dev_attr;
3748 if (atomic_read(&rdev->stats.rsors.cq_count) >= dev_attr->max_cq) {
3749 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQs)\n");
3755 dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQ_WQs)\n");
3760 cq = __get_cq_from_cq_in(cq_in, rdev);
3765 cq->rdev = rdev;
3775 if (!udata && !rdev->gsi_ctx.first_cq_created &&
3776 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL) {
3777 rdev->gsi_ctx.first_cq_created = true;
3794 dev_warn(rdev_to_dev(rdev),
3815 dev_err(rdev_to_dev(rdev),
3834 (rdev, context, udata, ureq.cq_va,
3839 dev_err(rdev_to_dev(rdev),
3848 rc = bnxt_re_get_user_dpi(rdev, uctx);
3858 dev_err(rdev_to_dev(rdev),
3863 qplcq->dpi = &rdev->dpi_privileged;
3870 qplcq->nq = bnxt_re_get_nq(rdev);
3873 rc = bnxt_qplib_create_cq(&rdev->qplib_res, qplcq);
3875 dev_err(rdev_to_dev(rdev), "Create HW CQ failed!\n");
3883 atomic_inc(&rdev->stats.rsors.cq_count);
3884 max_active_cqs = atomic_read(&rdev->stats.rsors.cq_count);
3885 if (max_active_cqs > atomic_read(&rdev->stats.rsors.max_cq_count))
3886 atomic_set(&rdev->stats.rsors.max_cq_count, max_active_cqs);
3905 if (_is_chip_p7(rdev->chip_ctx)) {
3909 dev_err(rdev_to_dev(rdev),
3911 bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq);
3920 rc = bnxt_re_copy_to_udata(rdev, &resp,
3926 bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq);
3951 struct bnxt_re_dev *rdev = cq->rdev;
3957 rc = bnxt_qplib_modify_cq(&rdev->qplib_res, &cq->qplib_cq);
3959 dev_err(rdev_to_dev(rdev), "Modify HW CQ %#x failed!\n",
3972 struct bnxt_re_dev *rdev = cq->rdev;
3974 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
3993 struct bnxt_re_dev *rdev;
4001 rdev = cq->rdev;
4002 dev_attr = rdev->dev_attr;
4011 dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - Busy\n",
4018 dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - max exceeded\n",
4030 dev_info(rdev_to_dev(rdev), "CQ is already at size %d\n", cqe);
4036 dev_warn(rdev_to_dev(rdev),
4046 dev_dbg(rdev_to_dev(rdev), "%s: va %p\n", __func__,
4049 (rdev,
4056 dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n",
4061 dev_dbg(rdev_to_dev(rdev), "%s: ib_umem_get() success\n",
4077 rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
4079 dev_err(rdev_to_dev(rdev), "Resize HW CQ %#x failed!\n",
4089 bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
4091 atomic_inc(&rdev->stats.rsors.resize_count);
4258 static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
4272 if (!ether_addr_equal(tmp_buf, rdev->dev_addr)) {
4289 static bool bnxt_re_is_vlan_in_packet(struct bnxt_re_dev *rdev,
4329 struct bnxt_re_dev *rdev;
4340 rdev = gsi_qp->rdev;
4341 gsi_sqp = rdev->gsi_ctx.gsi_sqp;
4351 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4357 dev_err(rdev_to_dev(rdev), "Not handling this packet\n");
4370 if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
4373 if (bnxt_re_is_vlan_in_packet(rdev, rq_hdr_buf, cqe))
4410 rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
4412 dev_err(rdev_to_dev(rdev),
4423 gsi_sah = rdev->gsi_ctx.gsi_sah;
4428 rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
4487 struct bnxt_re_dev *rdev = gsi_sqp->rdev;
4497 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4509 if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
4525 dev_dbg(rdev_to_dev(rdev), "%s nw_type = %d\n", __func__, nw_type);
4528 static void bnxt_re_process_res_ud_wc(struct bnxt_re_dev *rdev,
4541 if (rdev->gsi_ctx.gsi_qp->qplib_qp.id == qp->qplib_qp.id &&
4542 rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD) {
4546 if (_is_cqe_v2_supported(rdev->dev_attr->dev_cap_flags)) {
4556 if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
4588 struct bnxt_re_dev *rdev = cq->rdev;
4604 bnxt_re_pacing_alert(rdev);
4622 dev_err(rdev_to_dev(rdev), "POLL CQ no CQL to use\n");
4626 gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
4635 dev_err(rdev_to_dev(rdev),
4658 dev_err(rdev_to_dev(rdev),
4673 rdev->gsi_ctx.gsi_sqp->qplib_qp.id) {
4676 dev_dbg(rdev_to_dev(rdev),
4700 sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
4712 rdev->gsi_ctx.gsi_sqp->qplib_qp.id) {
4716 dev_dbg(rdev_to_dev(rdev),
4728 bnxt_re_process_res_ud_wc(rdev, qp, wc, cqe);
4731 dev_err(rdev_to_dev(cq->rdev),
4776 struct bnxt_re_dev *rdev;
4785 rdev = pd->rdev;
4789 dev_err(rdev_to_dev(rdev),
4793 mr->rdev = rdev;
4799 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4801 dev_err(rdev_to_dev(rdev), "Allocate DMA MR failed!\n");
4812 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
4814 dev_err(rdev_to_dev(rdev), "Register DMA MR failed!\n");
4821 atomic_inc(&rdev->stats.rsors.mr_count);
4822 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
4823 if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
4824 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
4829 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4838 struct bnxt_re_dev *rdev = mr->rdev;
4841 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4843 dev_err(rdev_to_dev(rdev), "Dereg MR failed (%d): rc - %#x\n",
4847 bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
4858 atomic_dec(&rdev->stats.rsors.mr_count);
4890 struct bnxt_re_dev *rdev = pd->rdev;
4895 dev_dbg(rdev_to_dev(rdev), "Alloc MR\n");
4897 dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported\n", type);
4901 dev_dbg(rdev_to_dev(rdev), "Max SG exceeded\n");
4906 dev_err(rdev_to_dev(rdev), "Allocate MR mem failed!\n");
4909 mr->rdev = rdev;
4914 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
4916 dev_err(rdev_to_dev(rdev), "Allocate MR failed!\n");
4923 dev_err(rdev_to_dev(rdev),
4928 rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
4931 dev_err(rdev_to_dev(rdev),
4935 dev_dbg(rdev_to_dev(rdev), "Alloc MR pages = 0x%p\n", mr->pages);
4937 atomic_inc(&rdev->stats.rsors.mr_count);
4938 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
4939 if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
4940 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
4946 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
4957 struct bnxt_re_dev *rdev = pd->rdev;
4964 dev_err(rdev_to_dev(rdev), "Allocate MW failed!\n");
4968 mw->rdev = rdev;
4974 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
4976 dev_err(rdev_to_dev(rdev), "Allocate MW failed!\n");
4980 atomic_inc(&rdev->stats.rsors.mw_count);
4981 max_mw_count = atomic_read(&rdev->stats.rsors.mw_count);
4982 if (max_mw_count > atomic_read(&rdev->stats.rsors.max_mw_count))
4983 atomic_set(&rdev->stats.rsors.max_mw_count, max_mw_count);
4995 struct bnxt_re_dev *rdev = mw->rdev;
4998 rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
5000 dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
5005 atomic_dec(&rdev->stats.rsors.mw_count);
5057 struct bnxt_re_dev *rdev = pd->rdev;
5065 dev_dbg(rdev_to_dev(rdev), "Reg user MR\n");
5067 if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr)
5070 if (rdev->mod_exit) {
5071 dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
5076 dev_err(rdev_to_dev(rdev), "Requested MR Size: %lu "
5082 dev_err(rdev_to_dev(rdev), "Allocate MR failed!\n");
5085 mr->rdev = rdev;
5090 if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) {
5091 rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
5093 dev_err(rdev_to_dev(rdev), "Alloc MR failed!\n");
5100 umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context,
5105 dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n",
5114 dev_err(rdev_to_dev(rdev), "umem is invalid!\n");
5120 rdev->dev_attr->page_size_cap);
5122 dev_err(rdev_to_dev(rdev), "umem page size unsupported!\n");
5136 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
5138 dev_err(rdev_to_dev(rdev), "Reg user MR failed!\n");
5143 atomic_inc(&rdev->stats.rsors.mr_count);
5144 max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
5145 if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
5146 atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
5153 if (!_is_alloc_mr_unified(rdev->qplib_res.dattr))
5154 bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
5168 struct bnxt_re_dev *rdev = mr->rdev;
5176 umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context,
5181 dev_err(rdev_to_dev(rdev),
5191 dev_err(rdev_to_dev(rdev), "umem is invalid!\n");
5197 rdev->dev_attr->page_size_cap);
5199 dev_err(rdev_to_dev(rdev),
5219 rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
5221 dev_err(rdev_to_dev(rdev), "Rereg user MR failed!\n");
5234 static int bnxt_re_check_abi_version(struct bnxt_re_dev *rdev)
5236 struct ib_device *ibdev = &rdev->ibdev;
5240 dev_dbg(rdev_to_dev(rdev), "ABI version requested %d\n",
5243 dev_dbg(rdev_to_dev(rdev), " is different from the device %d \n",
5258 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
5259 struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
5267 cctx = rdev->chip_ctx;
5268 rc = bnxt_re_check_abi_version(rdev);
5272 uctx->rdev = rdev;
5275 dev_err(rdev_to_dev(rdev), "shared memory allocation failed!\n");
5292 resp.dev_id = rdev->en_dev->pdev->devfn;
5293 resp.max_qp = rdev->qplib_res.hctx->qp_ctx.max;
5299 if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
5317 if (rdev->dbr_pacing)
5320 if (rdev->dbr_drop_recov && rdev->user_dbr_drop_recov)
5329 dev_warn(rdev_to_dev(rdev),
5332 dev_warn(rdev_to_dev(rdev),
5335 dev_warn(rdev_to_dev(rdev),
5339 dev_warn(rdev_to_dev(rdev),
5345 rc = bnxt_re_copy_to_udata(rdev, &resp,
5367 struct bnxt_re_dev *rdev = uctx->rdev;
5377 if (_is_chip_gen_p5_p7(rdev->chip_ctx) && uctx->wcdpi.dbr) {
5378 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
5381 dev_err(rdev_to_dev(rdev),
5386 rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
5389 dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!\n");
5401 if (!_is_chip_p7(uctx->rdev->chip_ctx))
5421 struct bnxt_re_dev *rdev = uctx->rdev;
5430 dev_dbg(rdev_to_dev(rdev), "%s:%d uctx->shpg 0x%lx, vtophys(uctx->shpg) 0x%lx, pfn = 0x%lx \n",
5433 dev_err(rdev_to_dev(rdev), "Shared page mapping failed!\n");
5449 pfn = vtophys(rdev->dbr_page) >> PAGE_SHIFT;
5464 dev_err(rdev_to_dev(rdev),
5479 dev_err(rdev_to_dev(rdev), "DPI mapping failed!\n");