Lines Matching defs:rdev

109 static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev);
110 static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
112 static int bnxt_re_ib_init(struct bnxt_re_dev *rdev);
113 static void bnxt_re_ib_init_2(struct bnxt_re_dev *rdev);
117 static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev);
133 void bnxt_re_set_dma_device(struct ib_device *ibdev, struct bnxt_re_dev *rdev)
135 ibdev->dma_device = &rdev->en_dev->pdev->dev;
138 void bnxt_re_init_resolve_wq(struct bnxt_re_dev *rdev)
140 rdev->resolve_wq = create_singlethread_workqueue("bnxt_re_resolve_wq");
141 INIT_LIST_HEAD(&rdev->mac_wq_list);
144 void bnxt_re_uninit_resolve_wq(struct bnxt_re_dev *rdev)
147 if (!rdev->resolve_wq)
149 flush_workqueue(rdev->resolve_wq);
150 list_for_each_entry_safe(tmp_work, tmp_st, &rdev->mac_wq_list, list) {
154 destroy_workqueue(rdev->resolve_wq);
155 rdev->resolve_wq = NULL;
175 static void bnxt_re_update_fifo_occup_slabs(struct bnxt_re_dev *rdev,
178 if (fifo_occup > rdev->dbg_stats->dbq.fifo_occup_water_mark)
179 rdev->dbg_stats->dbq.fifo_occup_water_mark = fifo_occup;
181 if (fifo_occup > 8 * rdev->pacing_algo_th)
182 rdev->dbg_stats->dbq.fifo_occup_slab_4++;
183 else if (fifo_occup > 4 * rdev->pacing_algo_th)
184 rdev->dbg_stats->dbq.fifo_occup_slab_3++;
185 else if (fifo_occup > 2 * rdev->pacing_algo_th)
186 rdev->dbg_stats->dbq.fifo_occup_slab_2++;
187 else if (fifo_occup > rdev->pacing_algo_th)
188 rdev->dbg_stats->dbq.fifo_occup_slab_1++;
191 static void bnxt_re_update_do_pacing_slabs(struct bnxt_re_dev *rdev)
193 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
195 if (pacing_data->do_pacing > rdev->dbg_stats->dbq.do_pacing_water_mark)
196 rdev->dbg_stats->dbq.do_pacing_water_mark = pacing_data->do_pacing;
198 if (pacing_data->do_pacing > 16 * rdev->dbr_def_do_pacing)
199 rdev->dbg_stats->dbq.do_pacing_slab_5++;
200 else if (pacing_data->do_pacing > 8 * rdev->dbr_def_do_pacing)
201 rdev->dbg_stats->dbq.do_pacing_slab_4++;
202 else if (pacing_data->do_pacing > 4 * rdev->dbr_def_do_pacing)
203 rdev->dbg_stats->dbq.do_pacing_slab_3++;
204 else if (pacing_data->do_pacing > 2 * rdev->dbr_def_do_pacing)
205 rdev->dbg_stats->dbq.do_pacing_slab_2++;
206 else if (pacing_data->do_pacing > rdev->dbr_def_do_pacing)
207 rdev->dbg_stats->dbq.do_pacing_slab_1++;
215 static struct bnxt_re_qp *bnxt_re_get_qp1_qp(struct bnxt_re_dev *rdev)
219 mutex_lock(&rdev->qp_lock);
220 list_for_each_entry(qp, &rdev->qp_list, list) {
222 mutex_unlock(&rdev->qp_lock);
226 mutex_unlock(&rdev->qp_lock);
235 static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
243 attr = rdev->dev_attr;
244 hctx = rdev->qplib_res.hctx;
245 cctx = rdev->chip_ctx;
247 bnxt_qplib_max_res_supported(cctx, &rdev->qplib_res, &dev_res, false);
267 static void bnxt_re_limit_vf_res(struct bnxt_re_dev *rdev,
271 struct bnxt_qplib_chip_ctx *cctx = rdev->chip_ctx;
274 bnxt_qplib_max_res_supported(cctx, &rdev->qplib_res, &dev_res, true);
286 static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
290 hctx = rdev->qplib_res.hctx;
292 bnxt_re_limit_pf_res(rdev);
294 if (rdev->num_vfs)
295 bnxt_re_limit_vf_res(rdev, &hctx->vf_res, rdev->num_vfs);
298 static void bnxt_re_dettach_irq(struct bnxt_re_dev *rdev)
304 rcfw = &rdev->rcfw;
305 for (indx = 0; indx < rdev->nqr.max_init; indx++) {
306 nq = &rdev->nqr.nq[indx];
315 static void bnxt_re_detach_err_device(struct bnxt_re_dev *rdev)
318 bnxt_re_dettach_irq(rdev);
321 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
322 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
323 wake_up_all(&rdev->rcfw.cmdq.waitq);
330 struct bnxt_re_dev *rdev;
334 static void bnxt_re_init_dcb_wq(struct bnxt_re_dev *rdev)
336 rdev->dcb_wq = create_singlethread_workqueue("bnxt_re_dcb_wq");
339 static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev)
341 if (!rdev->dcb_wq)
343 flush_workqueue(rdev->dcb_wq);
344 destroy_workqueue(rdev->dcb_wq);
345 rdev->dcb_wq = NULL;
348 static void bnxt_re_init_aer_wq(struct bnxt_re_dev *rdev)
350 rdev->aer_wq = create_singlethread_workqueue("bnxt_re_aer_wq");
353 static void bnxt_re_uninit_aer_wq(struct bnxt_re_dev *rdev)
355 if (!rdev->aer_wq)
357 flush_workqueue(rdev->aer_wq);
358 destroy_workqueue(rdev->aer_wq);
359 rdev->aer_wq = NULL;
362 static int bnxt_re_update_qp1_tos_dscp(struct bnxt_re_dev *rdev)
366 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
369 qp = bnxt_re_get_qp1_qp(rdev);
374 qp->qplib_qp.tos_dscp = rdev->cc_param.qp1_tos_dscp;
376 return bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
379 static void bnxt_re_reconfigure_dscp(struct bnxt_re_dev *rdev)
387 cc_param = &rdev->cc_param;
388 tc_rec = &rdev->tc_rec[0];
414 rc = bnxt_qplib_modify_cc(&rdev->qplib_res, cc_param);
416 dev_err(rdev_to_dev(rdev), "Failed to apply cc settings\n");
424 struct bnxt_re_dev *rdev;
429 rdev = dcb_work->rdev;
430 if (!rdev)
433 mutex_lock(&rdev->cc_lock);
435 cc_param = &rdev->cc_param;
436 rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, cc_param);
438 dev_err(rdev_to_dev(rdev), "Failed to query ccparam rc:%d", rc);
441 tc_rec = &rdev->tc_rec[0];
447 bnxt_re_reconfigure_dscp(rdev);
452 rc = bnxt_re_update_qp1_tos_dscp(rdev);
454 dev_err(rdev_to_dev(rdev), "%s:Failed to modify QP1 rc:%d",
461 mutex_unlock(&rdev->cc_lock);
466 static int bnxt_re_hwrm_dbr_pacing_broadcast_event(struct bnxt_re_dev *rdev)
470 struct bnxt_en_dev *en_dev = rdev->en_dev;
475 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
478 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
481 dev_dbg(rdev_to_dev(rdev),
488 static int bnxt_re_hwrm_dbr_pacing_nqlist_query(struct bnxt_re_dev *rdev)
492 struct bnxt_dbq_nq_list *nq_list = &rdev->nq_list;
493 struct bnxt_en_dev *en_dev = rdev->en_dev;
500 nq = &rdev->nqr.nq[0];
503 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
506 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
509 dev_err(rdev_to_dev(rdev), "Failed to send dbr pacing nq list query rc:%d", rc);
522 dev_dbg(rdev_to_dev(rdev),
534 bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 1);
539 static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
541 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
549 read_val = readl_fbsd(rdev->en_dev->softc, rdev->dbr_db_fifo_reg_off, 0);
558 bnxt_re_update_fifo_occup_slabs(rdev, fifo_occup);
566 static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev)
568 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
570 pacing_data->do_pacing = rdev->dbr_def_do_pacing;
571 pacing_data->pacing_th = rdev->pacing_algo_th;
573 pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE(rdev->chip_ctx);
581 static bool bnxt_re_check_if_dbq_intr_triggered(struct bnxt_re_dev *rdev)
587 read_val = readl_fbsd(rdev->en_dev->softc, rdev->dbr_aeq_arm_reg_off, 0);
588 dev_dbg(rdev_to_dev(rdev), "AEQ ARM status = 0x%x\n",
596 int bnxt_re_set_dbq_throttling_reg(struct bnxt_re_dev *rdev, u16 nq_id, u32 throttle)
602 throttle_val = (rdev->qplib_res.pacing_data->fifo_max_depth * throttle) / 100;
604 if (bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) {
607 writel_fbsd(rdev->en_dev->softc, rdev->dbr_throttling_reg_off, 0, cag_ring_water_mark);
608 read_val = readl_fbsd(rdev->en_dev->softc , rdev->dbr_throttling_reg_off, 0);
609 dev_dbg(rdev_to_dev(rdev),
613 dev_dbg(rdev_to_dev(rdev),
619 writel_fbsd(rdev->en_dev->softc, rdev->dbr_aeq_arm_reg_off, 0, 1);
623 static void bnxt_re_set_dbq_throttling_for_non_primary(struct bnxt_re_dev *rdev)
629 nq_list = &rdev->nq_list;
631 if (bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)) {
632 dev_dbg(rdev_to_dev(rdev), "%s: nq_list->num_nql_entries= %d\n",
634 nq = &rdev->nqr.nq[0];
638 dev_dbg(rdev_to_dev(rdev),
642 (rdev, nq_id, 0))
644 bnxt_re_check_if_dbq_intr_triggered(rdev);
649 static void bnxt_re_handle_dbr_nq_pacing_notification(struct bnxt_re_dev *rdev)
654 nq = &rdev->nqr.nq[0];
657 rc = bnxt_re_hwrm_dbr_pacing_nqlist_query(rdev);
659 dev_err(rdev_to_dev(rdev),
664 writel_fbsd(rdev->en_dev->softc, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 28, 0,
665 rdev->chip_ctx->dbr_aeq_arm_reg & BNXT_GRC_BASE_MASK);
667 rdev->dbr_throttling_reg_off =
668 (rdev->chip_ctx->dbr_throttling_reg &
670 rdev->dbr_aeq_arm_reg_off =
671 (rdev->chip_ctx->dbr_aeq_arm_reg &
674 bnxt_re_set_dbq_throttling_reg(rdev, nq->ring_id, rdev->dbq_watermark);
681 struct bnxt_re_dev *rdev;
683 rdev = dbq_work->rdev;
685 if (!rdev)
689 dev_dbg(rdev_to_dev(rdev), "%s: Handle DBQ Pacing event\n",
691 if (!bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx))
692 bnxt_re_hwrm_dbr_pacing_broadcast_event(rdev);
694 bnxt_re_pacing_alert(rdev);
697 dev_dbg(rdev_to_dev(rdev), "%s: Sched interrupt/pacing worker\n",
699 if (_is_chip_p7(rdev->chip_ctx))
700 bnxt_re_pacing_alert(rdev);
701 else if (!rdev->chip_ctx->modes.dbr_pacing_v0)
702 bnxt_re_hwrm_dbr_pacing_qcfg(rdev);
705 bnxt_re_handle_dbr_nq_pacing_notification(rdev);
709 bnxt_re_hwrm_dbr_pacing_broadcast_event(rdev);
721 struct bnxt_re_dev *rdev;
735 rdev = en_info->rdev;
741 if (!rdev || !rdev_to_dev(rdev)) {
742 dev_dbg(NULL, "Async event, bad rdev or netdev\n");
746 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags) ||
747 !test_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
752 dev_dbg(rdev_to_dev(rdev), "Async event_id = %d data1 = %d data2 = %d",
758 if (!is_qport_service_type_supported(rdev))
760 if (!rdev->dcb_wq)
766 dcb_work->rdev = rdev;
769 queue_work(rdev->dcb_wq, &dcb_work->work);
774 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
776 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
777 wake_up_all(&rdev->rcfw.cmdq.waitq);
781 if (!rdev->dbr_pacing)
786 dbq_work->rdev = rdev;
789 queue_work(rdev->dbq_wq, &dbq_work->work);
790 rdev->dbr_sw_stats->dbq_int_recv++;
793 if (!rdev->dbr_pacing)
799 dbq_work->rdev = rdev;
802 queue_work(rdev->dbq_wq, &dbq_work->work);
814 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
819 if (!mutex_trylock(&rdev->dbq_lock))
821 pacing_data = rdev->qplib_res.pacing_data;
822 pacing_save = rdev->do_pacing_save;
823 __wait_for_fifo_occupancy_below_th(rdev);
824 cancel_delayed_work_sync(&rdev->dbq_pacing_work);
825 if (rdev->dbr_recovery_on)
827 if (pacing_save > rdev->dbr_def_do_pacing) {
839 pacing_data->pacing_th = rdev->pacing_algo_th * 4;
846 rdev->do_pacing_save = pacing_data->do_pacing;
848 pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE(rdev->chip_ctx);
850 schedule_delayed_work(&rdev->dbq_pacing_work,
851 msecs_to_jiffies(rdev->dbq_pacing_time));
852 rdev->dbr_sw_stats->dbq_pacing_alerts++;
853 mutex_unlock(&rdev->dbq_lock);
858 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
864 if (!mutex_trylock(&rdev->dbq_lock))
867 pacing_data = rdev->qplib_res.pacing_data;
868 read_val = readl_fbsd(rdev->en_dev->softc , rdev->dbr_db_fifo_reg_off, 0);
881 pacing_data->do_pacing = max_t(u32, rdev->dbr_def_do_pacing, pacing_data->do_pacing);
886 if (rdev->dbq_int_disable && fifo_occup < rdev->pacing_en_int_th) {
887 if (bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)) {
888 if (!rdev->chip_ctx->modes.dbr_pacing_v0) {
889 nq = &rdev->nqr.nq[0];
890 bnxt_re_set_dbq_throttling_reg(rdev, nq->ring_id,
891 rdev->dbq_watermark);
892 rdev->dbr_sw_stats->dbq_int_en++;
893 rdev->dbq_int_disable = false;
897 if (pacing_data->do_pacing <= rdev->dbr_def_do_pacing) {
898 bnxt_re_set_default_pacing_data(rdev);
899 rdev->dbr_sw_stats->dbq_pacing_complete++;
903 schedule_delayed_work(&rdev->dbq_pacing_work,
904 msecs_to_jiffies(rdev->dbq_pacing_time));
905 bnxt_re_update_do_pacing_slabs(rdev);
906 rdev->dbr_sw_stats->dbq_pacing_resched++;
908 rdev->do_pacing_save = pacing_data->do_pacing;
909 mutex_unlock(&rdev->dbq_lock);
912 void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev)
916 if (!rdev->dbr_pacing)
918 mutex_lock(&rdev->dbq_lock);
919 pacing_data = rdev->qplib_res.pacing_data;
927 cancel_work_sync(&rdev->dbq_fifo_check_work);
928 schedule_work(&rdev->dbq_fifo_check_work);
929 mutex_unlock(&rdev->dbq_lock);
935 struct bnxt_re_dev *rdev;
937 rdev = container_of(res, struct bnxt_re_dev, qplib_res);
939 atomic_set(&rdev->dbq_intr_running, 1);
941 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
946 if (bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx) &&
947 !rdev->chip_ctx->modes.dbr_pacing_v0)
948 bnxt_re_set_dbq_throttling_for_non_primary(rdev);
953 dbq_work->rdev = rdev;
956 queue_work(rdev->dbq_wq, &dbq_work->work);
957 rdev->dbr_sw_stats->dbq_int_recv++;
958 rdev->dbq_int_disable = true;
960 atomic_set(&rdev->dbq_intr_running, 0);
963 static void bnxt_re_free_msix(struct bnxt_re_dev *rdev)
965 struct bnxt_en_dev *en_dev = rdev->en_dev;
968 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
970 dev_err(rdev_to_dev(rdev), "netdev %p free_msix failed! rc = 0x%x",
971 rdev->netdev, rc);
974 static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
976 struct bnxt_en_dev *en_dev = rdev->en_dev;
989 if (rdev->is_virtfn)
993 else if (_is_chip_gen_p5_p7(rdev->chip_ctx))
994 num_msix_want = rdev->num_msix_requested ?: BNXT_RE_MAX_MSIX_GEN_P5_PF;
1008 entry = rdev->nqr.msix_entries;
1017 dev_warn(rdev_to_dev(rdev),
1021 rdev->nqr.num_msix = num_msix_got;
1025 bnxt_re_free_msix(rdev);
1029 static int __wait_for_ib_unregister(struct bnxt_re_dev *rdev,
1036 cur_prod = rdev->rcfw.cmdq.hwq.prod;
1037 cur_cons = rdev->rcfw.cmdq.hwq.cons;
1043 if (rdev->mod_exit)
1056 if (!bnxt_re_is_rdev_valid(rdev))
1062 if (!RCFW_NO_FW_ACCESS(&rdev->rcfw)) {
1066 if (!rdev->mod_exit)
1067 ret = __check_cmdq_stall(&rdev->rcfw, &cur_prod, &cur_cons);
1079 struct bnxt_re_dev *rdev = NULL;
1090 if (en_info->rdev) {
1091 dev_info(rdev_to_dev(en_info->rdev),
1092 "%s: Device is already added adev %p rdev: %p\n",
1093 __func__, adev, en_info->rdev);
1101 rc = bnxt_re_add_device(&rdev, real_dev,
1119 rdev->adev = adev;
1121 bnxt_re_get_link_speed(rdev);
1123 rc = bnxt_re_ib_init(rdev);
1125 dev_err(rdev_to_dev(rdev), "Failed ib_init\n");
1128 bnxt_re_ib_init_2(rdev);
1137 struct bnxt_re_dev *rdev;
1148 rdev = en_info->rdev;
1149 if (!rdev)
1152 if (!bnxt_re_is_rdev_valid(rdev))
1159 en_dev = rdev->en_dev;
1163 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
1165 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
1166 wake_up_all(&rdev->rcfw.cmdq.waitq);
1169 if (test_bit(BNXT_RE_FLAG_STOP_IN_PROGRESS, &rdev->flags))
1171 set_bit(BNXT_RE_FLAG_STOP_IN_PROGRESS, &rdev->flags);
1173 en_info->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
1174 en_info->gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
1175 en_info->num_msix_requested = rdev->num_msix_requested;
1178 if (rdev->dbr_pacing)
1179 bnxt_re_set_pacing_dev_state(rdev);
1181 dev_info(rdev_to_dev(rdev), "%s: L2 driver notified to stop."
1186 bnxt_re_schedule_work(rdev, NETDEV_UNREGISTER,
1187 NULL, netdev, rdev->adev);
1188 rc = __wait_for_ib_unregister(rdev, en_info);
1189 if (!bnxt_re_is_rdev_valid(rdev))
1192 dev_info(rdev_to_dev(rdev), "%s: Attempt to stop failed\n",
1194 bnxt_re_detach_err_device(rdev);
1197 bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, rdev->adev);
1221 struct bnxt_re_dev *rdev;
1229 rdev = en_info->rdev;
1230 if (!rdev || !bnxt_re_is_rdev_valid(rdev))
1234 bnxt_re_stopqps_and_ib_uninit(rdev);
1235 bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, rdev->adev);
1246 struct bnxt_re_dev *rdev;
1254 rdev = en_info->rdev;
1256 if (!rdev)
1259 rcfw = &rdev->rcfw;
1260 for (indx = 0; indx < rdev->nqr.max_init; indx++) {
1261 nq = &rdev->nqr.nq[indx];
1267 if (test_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags))
1276 struct bnxt_re_dev *rdev;
1284 rdev = en_info->rdev;
1285 if (!rdev)
1287 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
1289 msix_ent = rdev->nqr.msix_entries;
1290 rcfw = &rdev->rcfw;
1298 dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
1305 for (indx = 0; indx < rdev->nqr.num_msix; indx++)
1306 rdev->nqr.msix_entries[indx].vector = ent[indx].vector;
1308 if (test_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags)) {
1312 dev_warn(rdev_to_dev(rdev),
1317 for (indx = 0 ; indx < rdev->nqr.max_init; indx++) {
1318 nq = &rdev->nqr.nq[indx];
1323 dev_warn(rdev_to_dev(rdev),
1358 static void bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
1360 struct bnxt_en_dev *en_dev = rdev->en_dev;
1364 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
1368 dev_err(rdev_to_dev(rdev), "netdev %p unregister failed! rc = 0x%x",
1369 rdev->en_dev->net, rc);
1371 clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
1374 static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
1376 struct bnxt_en_dev *en_dev = rdev->en_dev;
1383 rdev->adev);
1386 dev_err(rdev_to_dev(rdev), "netdev %p register failed! rc = 0x%x",
1387 rdev->netdev, rc);
1394 static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
1404 res = &rdev->qplib_res;
1405 en_dev = rdev->en_dev;
1406 cctx = rdev->chip_ctx;
1409 rc = bnxt_re_hwrm_qcfg(rdev, &l2db_len, &offset);
1411 dev_info(rdev_to_dev(rdev),
1429 dev_info(rdev_to_dev(rdev),
1436 static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
1441 en_dev = rdev->en_dev;
1442 cctx = rdev->chip_ctx;
1443 cctx->modes.wqe_mode = _is_chip_gen_p5_p7(rdev->chip_ctx) ?
1446 if (bnxt_re_hwrm_qcaps(rdev))
1447 dev_err(rdev_to_dev(rdev),
1454 if (_is_chip_p7(rdev->chip_ctx) && cctx->modes.db_push) {
1455 if (rdev->is_virtfn || BNXT_EN_NPAR(en_dev))
1459 rdev->roce_mode = en_dev->flags & BNXT_EN_FLAG_ROCE_CAP;
1460 dev_dbg(rdev_to_dev(rdev),
1462 rdev->roce_mode);
1463 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
1464 rdev->roce_mode = BNXT_RE_FLAG_ROCEV2_CAP;
1468 static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
1473 if (!rdev->chip_ctx)
1476 res = &rdev->qplib_res;
1481 kfree(rdev->dev_attr);
1482 rdev->dev_attr = NULL;
1484 chip_ctx = rdev->chip_ctx;
1485 rdev->chip_ctx = NULL;
1493 static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
1499 en_dev = rdev->en_dev;
1501 rdev->qplib_res.pdev = en_dev->pdev;
1502 rdev->qplib_res.netdev = rdev->netdev;
1503 rdev->qplib_res.en_dev = en_dev;
1508 rdev->chip_ctx = chip_ctx;
1509 rdev->qplib_res.cctx = chip_ctx;
1510 rc = bnxt_re_query_hwrm_intf_version(rdev);
1513 rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL);
1514 if (!rdev->dev_attr) {
1518 rdev->qplib_res.dattr = rdev->dev_attr;
1519 rdev->qplib_res.rcfw = &rdev->rcfw;
1520 rdev->qplib_res.is_vf = rdev->is_virtfn;
1522 rdev->qplib_res.hctx = kzalloc(sizeof(*rdev->qplib_res.hctx),
1524 if (!rdev->qplib_res.hctx) {
1528 bnxt_re_set_drv_mode(rdev, wqe_mode);
1530 bnxt_re_set_db_offset(rdev);
1531 rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
1537 dev_dbg(rdev_to_dev(rdev),
1542 kfree(rdev->chip_ctx);
1543 rdev->chip_ctx = NULL;
1545 kfree(rdev->dev_attr);
1546 rdev->dev_attr = NULL;
1548 kfree(rdev->qplib_res.hctx);
1549 rdev->qplib_res.hctx = NULL;
1553 static u16 bnxt_re_get_rtype(struct bnxt_re_dev *rdev) {
1554 return _is_chip_gen_p5_p7(rdev->chip_ctx) ?
1559 static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
1564 struct bnxt_en_dev *en_dev = rdev->en_dev;
1572 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
1581 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
1582 req.ring_type = bnxt_re_get_rtype(rdev);
1588 dev_err(rdev_to_dev(rdev),
1592 dev_dbg(rdev_to_dev(rdev), "HW ring freed with id = 0x%x\n",
1598 static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
1605 struct bnxt_en_dev *en_dev = rdev->en_dev;
1612 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
1635 dev_err(rdev_to_dev(rdev),
1640 dev_dbg(rdev_to_dev(rdev),
1647 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
1650 struct bnxt_en_dev *en_dev = rdev->en_dev;
1661 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
1664 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, tid);
1670 dev_err(rdev_to_dev(rdev),
1674 dev_dbg(rdev_to_dev(rdev),
1680 static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, u16 tid)
1684 struct bnxt_en_dev *en_dev = rdev->en_dev;
1690 hctx = rdev->qplib_res.hctx;
1698 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1701 req.stats_dma_length = rdev->chip_ctx->hw_stats_size;
1708 dev_err(rdev_to_dev(rdev),
1713 dev_dbg(rdev_to_dev(rdev), "HW stats ctx allocated with id = 0x%x",
1719 static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
1723 if (rdev->is_virtfn ||
1724 test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
1727 memset(rdev->event_bitmap, 0, sizeof(rdev->event_bitmap));
1728 en_ops = rdev->en_dev->en_ops;
1730 (rdev->en_dev, BNXT_ROCE_ULP,
1731 (unsigned long *)rdev->event_bitmap,
1733 dev_err(rdev_to_dev(rdev),
1737 static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
1741 if (rdev->is_virtfn)
1744 rdev->event_bitmap[0] |=
1748 rdev->event_bitmap[2] |=
1750 rdev->event_bitmap[2] |=
1753 en_ops = rdev->en_dev->en_ops;
1755 (rdev->en_dev, BNXT_ROCE_ULP,
1756 (unsigned long *)rdev->event_bitmap,
1758 dev_err(rdev_to_dev(rdev),
1762 static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
1764 struct bnxt_en_dev *en_dev = rdev->en_dev;
1772 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1781 dev_err(rdev_to_dev(rdev),
1785 cctx = rdev->chip_ctx;
1803 static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
1806 struct bnxt_en_dev *en_dev = rdev->en_dev;
1813 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1820 dev_err(rdev_to_dev(rdev),
1831 int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
1833 struct bnxt_en_dev *en_dev = rdev->en_dev;
1841 cctx = rdev->chip_ctx;
1843 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1850 dev_err(rdev_to_dev(rdev),
1854 if (_is_chip_p7(rdev->chip_ctx))
1880 dev_dbg(rdev_to_dev(rdev),
1887 static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
1889 struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
1892 struct bnxt_en_dev *en_dev = rdev->en_dev;
1898 cctx = rdev->chip_ctx;
1900 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1903 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
1906 dev_dbg(rdev_to_dev(rdev),
1913 !bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) {
1914 dev_err(rdev_to_dev(rdev), "%s:%d Invoke bnxt_qplib_dbr_pacing_set_primary_pf with 1\n",
1916 bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 1);
1919 if (bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) {
1922 nq = &rdev->nqr.nq[0];
1925 bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 0);
1947 dev_dbg(rdev_to_dev(rdev),
1957 static int bnxt_re_hwrm_dbr_pacing_cfg(struct bnxt_re_dev *rdev, bool enable)
1961 struct bnxt_en_dev *en_dev = rdev->en_dev;
1965 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
1969 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
1979 req.primary_nq_id = cpu_to_le32(rdev->dbq_nq_id);
1980 req.pacing_threshold = cpu_to_le32(rdev->dbq_watermark);
1981 dev_dbg(rdev_to_dev(rdev), "%s: nq_id = 0x%x pacing_threshold = 0x%x",
1984 sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
1987 dev_dbg(rdev_to_dev(rdev),
1999 struct bnxt_re_dev *rdev;
2002 list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) {
2003 if (rdev->netdev == netdev) {
2005 dev_dbg(rdev_to_dev(rdev),
2007 netdev, atomic_read(&rdev->ref_count));
2008 return rdev;
2018 struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
2020 return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
2027 struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
2029 return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
2039 int ib_register_device_compat(struct bnxt_re_dev *rdev)
2041 struct ib_device *ibdev = &rdev->ibdev;
2052 static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
2054 struct ib_device *ibdev = &rdev->ibdev;
2065 bnxt_qplib_get_guid(rdev->dev_addr, (u8 *)&ibdev->node_guid);
2068 ibdev->num_comp_vectors = rdev->nqr.num_msix - 1;
2069 bnxt_re_set_dma_device(ibdev, rdev);
2161 ret = ib_register_device_compat(rdev);
2165 static void bnxt_re_dev_dealloc(struct bnxt_re_dev *rdev)
2169 dev_dbg(rdev_to_dev(rdev), "%s:Remove the device %p\n", __func__, rdev);
2170 /* Wait for rdev refcount to come down */
2171 while ((atomic_read(&rdev->ref_count) > 1) && i--)
2174 if (atomic_read(&rdev->ref_count) > 1)
2175 dev_err(rdev_to_dev(rdev),
2177 atomic_read(&rdev->ref_count));
2179 atomic_set(&rdev->ref_count, 0);
2180 if_rele(rdev->netdev);
2181 rdev->netdev = NULL;
2184 kfree(rdev->gid_map);
2185 kfree(rdev->dbg_stats);
2186 ib_dealloc_device(&rdev->ibdev);
2192 struct bnxt_re_dev *rdev;
2196 rdev = (struct bnxt_re_dev *)compat_ib_alloc_device(sizeof(*rdev));
2197 if (!rdev) {
2203 atomic_set(&rdev->ref_count, 0);
2204 rdev->netdev = netdev;
2205 dev_hold(rdev->netdev);
2206 rdev->en_dev = en_dev;
2207 rdev->id = rdev->en_dev->pdev->devfn;
2208 INIT_LIST_HEAD(&rdev->qp_list);
2209 mutex_init(&rdev->qp_lock);
2210 mutex_init(&rdev->cc_lock);
2211 mutex_init(&rdev->dbq_lock);
2212 bnxt_re_clear_rsors_stat(&rdev->stats.rsors);
2213 rdev->cosq[0] = rdev->cosq[1] = 0xFFFF;
2214 rdev->min_tx_depth = 1;
2215 rdev->stats.stats_query_sec = 1;
2217 rdev->cc_param.disable_prio_vlan_tx = 1;
2220 INIT_WORK(&rdev->dbq_fifo_check_work, bnxt_re_db_fifo_check);
2221 INIT_DELAYED_WORK(&rdev->dbq_pacing_work, bnxt_re_pacing_timer_exp);
2222 rdev->gid_map = kzalloc(sizeof(*(rdev->gid_map)) *
2225 if (!rdev->gid_map) {
2226 ib_dealloc_device(&rdev->ibdev);
2230 rdev->gid_map[count] = -1;
2232 rdev->dbg_stats = kzalloc(sizeof(*rdev->dbg_stats), GFP_KERNEL);
2233 if (!rdev->dbg_stats) {
2234 ib_dealloc_device(&rdev->ibdev);
2238 return rdev;
2276 event.device = &qp->rdev->ibdev;
2321 dev_err(rdev_to_dev(qp->rdev),
2361 ibevent.device = &cq->rdev->ibdev;
2363 dev_err(rdev_to_dev(cq->rdev),
2437 ib_event.device = &srq->rdev->ibdev;
2484 struct bnxt_qplib_nq *bnxt_re_get_nq(struct bnxt_re_dev *rdev)
2488 mutex_lock(&rdev->nqr.load_lock);
2489 for (indx = 0, min = 0; indx < (rdev->nqr.num_msix - 1); indx++) {
2490 if (rdev->nqr.nq[min].load > rdev->nqr.nq[indx].load)
2493 rdev->nqr.nq[min].load++;
2494 mutex_unlock(&rdev->nqr.load_lock);
2496 return &rdev->nqr.nq[min];
2499 void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq)
2501 mutex_lock(&rdev->nqr.load_lock);
2503 mutex_unlock(&rdev->nqr.load_lock);
2506 static bool bnxt_re_check_min_attr(struct bnxt_re_dev *rdev)
2511 attr = rdev->dev_attr;
2515 dev_err(rdev_to_dev(rdev),"Insufficient RoCE resources");
2516 dev_dbg(rdev_to_dev(rdev),
2546 static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
2549 if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL)
2551 (qp == rdev->gsi_ctx.gsi_sqp);
2556 static void bnxt_re_stop_all_nonqp1_nonshadow_qps(struct bnxt_re_dev *rdev)
2566 if (!rdev)
2570 if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
2574 mutex_lock(&rdev->qp_lock);
2575 list_for_each_entry(qp, &rdev->qp_list, list) {
2577 if (dev_detached || !bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
2603 bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
2612 mutex_unlock(&rdev->qp_lock);
2619 mutex_unlock(&rdev->qp_lock);
2622 static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
2624 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
2629 if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
2633 dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated");
2655 rdev->dev_addr);
2661 static void bnxt_re_clear_cc(struct bnxt_re_dev *rdev)
2663 struct bnxt_qplib_cc_param *cc_param = &rdev->cc_param;
2665 if (_is_chip_p7(rdev->chip_ctx)) {
2672 if (!is_qport_service_type_supported(rdev))
2681 if (bnxt_qplib_modify_cc(&rdev->qplib_res, cc_param))
2682 dev_err(rdev_to_dev(rdev), "Failed to modify cc\n");
2685 static int bnxt_re_setup_cc(struct bnxt_re_dev *rdev)
2687 struct bnxt_qplib_cc_param *cc_param = &rdev->cc_param;
2690 if (_is_chip_p7(rdev->chip_ctx)) {
2699 if (!is_qport_service_type_supported(rdev))
2708 rc = bnxt_qplib_modify_cc(&rdev->qplib_res, cc_param);
2710 dev_err(rdev_to_dev(rdev), "Failed to modify cc\n");
2717 rc = bnxt_re_update_qp1_tos_dscp(rdev);
2719 dev_err(rdev_to_dev(rdev), "%s:Failed to modify QP1:%d",
2727 bnxt_re_clear_cc(rdev);
2731 int bnxt_re_query_hwrm_dscp2pri(struct bnxt_re_dev *rdev,
2735 struct bnxt_en_dev *en_dev = rdev->en_dev;
2749 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
2756 dev_err(rdev_to_dev(rdev),
2783 int bnxt_re_prio_vlan_tx_update(struct bnxt_re_dev *rdev)
2786 if (rdev->cc_param.disable_prio_vlan_tx)
2787 rdev->qplib_res.prio = false;
2789 rdev->qplib_res.prio = true;
2791 return bnxt_re_update_gid(rdev);
2794 int bnxt_re_set_hwrm_dscp2pri(struct bnxt_re_dev *rdev,
2798 struct bnxt_en_dev *en_dev = rdev->en_dev;
2809 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
2816 dev_err(rdev_to_dev(rdev),
2840 int bnxt_re_query_hwrm_qportcfg(struct bnxt_re_dev *rdev,
2846 struct bnxt_en_dev *en_dev = rdev->en_dev;
2854 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_QUEUE_QPORTCFG,
2885 if (is_bnxt_roce_queue(rdev, *qptr, *tmp_type)) {
2888 } else if (is_bnxt_cnp_queue(rdev, *qptr, *tmp_type)) {
2902 int bnxt_re_hwrm_cos2bw_qcfg(struct bnxt_re_dev *rdev, u16 target_id,
2905 struct bnxt_en_dev *en_dev = rdev->en_dev;
2913 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
2934 int bnxt_re_hwrm_cos2bw_cfg(struct bnxt_re_dev *rdev, u16 target_id,
2937 struct bnxt_en_dev *en_dev = rdev->en_dev;
2946 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
2975 int bnxt_re_host_pf_id_query(struct bnxt_re_dev *rdev,
2981 struct bnxt_en_dev *en_dev = rdev->en_dev;
2987 bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
3018 static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev)
3024 res = &rdev->qplib_res;
3027 if (test_and_clear_bit(BNXT_RE_FLAG_STATS_CTX_ALLOC, &rdev->flags)) {
3028 bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id, tid);
3033 static void bnxt_re_put_stats2_ctx(struct bnxt_re_dev *rdev)
3035 test_and_clear_bit(BNXT_RE_FLAG_STATS_CTX2_ALLOC, &rdev->flags);
3038 static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev)
3045 res = &rdev->qplib_res;
3048 rc = bnxt_qplib_alloc_stat_mem(res->pdev, rdev->chip_ctx, &hctx->stats);
3051 rc = bnxt_re_net_stats_ctx_alloc(rdev, tid);
3054 set_bit(BNXT_RE_FLAG_STATS_CTX_ALLOC, &rdev->flags);
3064 static int bnxt_re_update_dev_attr(struct bnxt_re_dev *rdev)
3068 rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
3071 if (!bnxt_re_check_min_attr(rdev))
3076 static void bnxt_re_free_tbls(struct bnxt_re_dev *rdev)
3078 bnxt_qplib_clear_tbls(&rdev->qplib_res);
3079 bnxt_qplib_free_tbls(&rdev->qplib_res);
3082 static int bnxt_re_alloc_init_tbls(struct bnxt_re_dev *rdev)
3084 struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
3094 pppp_factor = rdev->en_dev->port_count;
3095 rc = bnxt_qplib_alloc_tbls(&rdev->qplib_res, pppp_factor);
3098 bnxt_qplib_init_tbls(&rdev->qplib_res);
3099 set_bit(BNXT_RE_FLAG_TBLS_ALLOCINIT, &rdev->flags);
3104 static void bnxt_re_clean_nqs(struct bnxt_re_dev *rdev)
3109 if (!rdev->nqr.max_init)
3112 for (i = (rdev->nqr.max_init - 1) ; i >= 0; i--) {
3113 nq = &rdev->nqr.nq[i];
3115 bnxt_re_net_ring_free(rdev, nq->ring_id);
3118 rdev->nqr.max_init = 0;
3121 static int bnxt_re_setup_nqs(struct bnxt_re_dev *rdev)
3130 mutex_init(&rdev->nqr.load_lock);
3136 for (i = 0; i < rdev->nqr.num_msix - 1; i++) {
3137 nq = &rdev->nqr.nq[i];
3138 vec = rdev->nqr.msix_entries[i + 1].vector;
3139 offt = rdev->nqr.msix_entries[i + 1].db_offset;
3141 rc = bnxt_qplib_alloc_nq_mem(&rdev->qplib_res, nq);
3143 dev_err(rdev_to_dev(rdev),
3150 rattr.pages = nq->hwq.pbl[rdev->nqr.nq[i].hwq.level].pg_count;
3151 rattr.type = bnxt_re_get_rtype(rdev);
3154 rattr.lrid = rdev->nqr.msix_entries[i + 1].ring_idx;
3157 if (!i && bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx))
3162 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
3165 dev_err(rdev_to_dev(rdev),
3175 dev_err(rdev_to_dev(rdev),
3181 rdev->nqr.max_init = i;
3185 bnxt_re_net_ring_free(rdev, nq->ring_id);
3189 rdev->nqr.max_init = i;
3193 static void bnxt_re_sysfs_destroy_file(struct bnxt_re_dev *rdev)
3198 device_remove_file(&rdev->ibdev.dev, bnxt_re_attributes[i]);
3201 static int bnxt_re_sysfs_create_file(struct bnxt_re_dev *rdev)
3206 rc = device_create_file(&rdev->ibdev.dev,
3209 dev_err(rdev_to_dev(rdev),
3213 device_remove_file(&rdev->ibdev.dev,
3215 clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
3216 ib_unregister_device(&rdev->ibdev);
3226 struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
3231 if (!rdev->is_virtfn && !rdev->worker_30s--)
3232 rdev->worker_30s = 30;
3242 if (!rdev->stats.stats_query_sec)
3245 if (test_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, &rdev->flags) &&
3246 (rdev->is_virtfn ||
3247 !_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags))) {
3248 if (!(rdev->stats.stats_query_counter++ %
3249 rdev->stats.stats_query_sec)) {
3250 rc = bnxt_re_get_qos_stats(rdev);
3253 &rdev->flags);
3258 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(1000));
3261 static int bnxt_re_alloc_dbr_sw_stats_mem(struct bnxt_re_dev *rdev)
3263 if (!(rdev->dbr_drop_recov || rdev->dbr_pacing))
3266 rdev->dbr_sw_stats = kzalloc(sizeof(*rdev->dbr_sw_stats), GFP_KERNEL);
3267 if (!rdev->dbr_sw_stats)
3273 static void bnxt_re_free_dbr_sw_stats_mem(struct bnxt_re_dev *rdev)
3275 kfree(rdev->dbr_sw_stats);
3276 rdev->dbr_sw_stats = NULL;
3279 static int bnxt_re_initialize_dbr_drop_recov(struct bnxt_re_dev *rdev)
3281 rdev->dbr_drop_recov_wq =
3283 if (!rdev->dbr_drop_recov_wq) {
3284 dev_err(rdev_to_dev(rdev), "DBR Drop Revov wq alloc failed!");
3287 rdev->dbr_drop_recov = true;
3290 rdev->user_dbr_drop_recov = true;
3292 rdev->user_dbr_drop_recov_timeout = BNXT_RE_DBR_RECOV_USERLAND_TIMEOUT;
3296 static void bnxt_re_deinitialize_dbr_drop_recov(struct bnxt_re_dev *rdev)
3298 if (rdev->dbr_drop_recov_wq) {
3299 flush_workqueue(rdev->dbr_drop_recov_wq);
3300 destroy_workqueue(rdev->dbr_drop_recov_wq);
3301 rdev->dbr_drop_recov_wq = NULL;
3303 rdev->dbr_drop_recov = false;
3306 static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
3311 rdev->dbr_page = (void *)__get_free_page(GFP_KERNEL);
3312 if (!rdev->dbr_page) {
3313 dev_err(rdev_to_dev(rdev), "DBR page allocation failed!");
3316 memset((u8 *)rdev->dbr_page, 0, PAGE_SIZE);
3317 rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->dbr_page;
3318 rc = bnxt_re_hwrm_dbr_pacing_qcfg(rdev);
3320 dev_err(rdev_to_dev(rdev),
3325 rdev->dbq_wq = create_singlethread_workqueue("bnxt_re_dbq");
3326 if (!rdev->dbq_wq) {
3327 dev_err(rdev_to_dev(rdev), "DBQ wq alloc failed!");
3332 writel_fbsd(rdev->en_dev->softc, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4, 0,
3333 rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK);
3334 rdev->dbr_db_fifo_reg_off =
3335 (rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_OFFSET_MASK) +
3337 rdev->qplib_res.pacing_data->grc_reg_offset = rdev->dbr_db_fifo_reg_off;
3339 rdev->dbr_bar_addr =
3340 pci_resource_start(rdev->qplib_res.pdev, 0) +
3341 rdev->dbr_db_fifo_reg_off;
3344 rdev->dbq_watermark = BNXT_RE_PACING_DBQ_THRESHOLD;
3345 rdev->pacing_en_int_th = BNXT_RE_PACING_EN_INT_THRESHOLD;
3346 rdev->pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD;
3347 rdev->dbq_pacing_time = BNXT_RE_DBR_INT_TIME;
3348 rdev->dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION;
3349 rdev->do_pacing_save = rdev->dbr_def_do_pacing;
3350 bnxt_re_set_default_pacing_data(rdev);
3351 dev_dbg(rdev_to_dev(rdev), "Initialized db pacing\n");
3355 free_page((u64)rdev->dbr_page);
3356 rdev->dbr_page = NULL;
3360 static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
3362 if (rdev->dbq_wq)
3363 flush_workqueue(rdev->dbq_wq);
3365 cancel_work_sync(&rdev->dbq_fifo_check_work);
3366 cancel_delayed_work_sync(&rdev->dbq_pacing_work);
3368 if (rdev->dbq_wq) {
3369 destroy_workqueue(rdev->dbq_wq);
3370 rdev->dbq_wq = NULL;
3373 if (rdev->dbr_page)
3374 free_page((u64)rdev->dbr_page);
3375 rdev->dbr_page = NULL;
3376 rdev->dbr_pacing = false;
3383 int bnxt_re_enable_dbr_pacing(struct bnxt_re_dev *rdev)
3387 nq = &rdev->nqr.nq[0];
3388 rdev->dbq_nq_id = nq->ring_id;
3390 if (!bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx) &&
3391 bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)) {
3392 if (bnxt_re_hwrm_dbr_pacing_cfg(rdev, true)) {
3393 dev_err(rdev_to_dev(rdev),
3398 writel_fbsd(rdev->en_dev->softc, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 28 , 0,
3399 rdev->chip_ctx->dbr_aeq_arm_reg & BNXT_GRC_BASE_MASK);
3400 rdev->dbr_aeq_arm_reg_off =
3401 (rdev->chip_ctx->dbr_aeq_arm_reg &
3403 writel_fbsd(rdev->en_dev->softc, rdev->dbr_aeq_arm_reg_off , 0, 1);
3414 int bnxt_re_disable_dbr_pacing(struct bnxt_re_dev *rdev)
3418 if (!bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx) &&
3419 bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx))
3420 rc = bnxt_re_hwrm_dbr_pacing_cfg(rdev, false);
3425 static void bnxt_re_ib_uninit(struct bnxt_re_dev *rdev)
3427 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
3428 bnxt_re_sysfs_destroy_file(rdev);
3430 ib_unregister_device(&rdev->ibdev);
3431 clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
3436 static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
3441 bnxt_re_net_unregister_async_event(rdev);
3443 bnxt_re_put_stats2_ctx(rdev);
3445 &rdev->flags)) {
3448 list_del_rcu(&rdev->list);
3452 bnxt_re_uninit_resolve_wq(rdev);
3453 bnxt_re_uninit_dcb_wq(rdev);
3454 bnxt_re_uninit_aer_wq(rdev);
3456 bnxt_re_deinitialize_dbr_drop_recov(rdev);
3458 if (bnxt_qplib_dbr_pacing_en(rdev->chip_ctx))
3459 (void)bnxt_re_disable_dbr_pacing(rdev);
3461 if (test_and_clear_bit(BNXT_RE_FLAG_WORKER_REG, &rdev->flags)) {
3462 cancel_delayed_work_sync(&rdev->worker);
3466 while (atomic_read(&rdev->stats.rsors.cq_count) && --wait_count)
3469 dev_err(rdev_to_dev(rdev),
3471 atomic_read(&rdev->stats.rsors.cq_count));
3473 kdpi = &rdev->dpi_privileged;
3475 (void)bnxt_qplib_dealloc_dpi(&rdev->qplib_res, kdpi);
3487 if (test_and_clear_bit(BNXT_RE_FLAG_SETUP_NQ, &rdev->flags))
3488 bnxt_re_clean_nqs(rdev);
3491 if (test_and_clear_bit(BNXT_RE_FLAG_TBLS_ALLOCINIT, &rdev->flags))
3492 bnxt_re_free_tbls(rdev);
3493 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_INIT, &rdev->flags)) {
3494 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
3496 dev_warn(rdev_to_dev(rdev),
3500 bnxt_re_put_stats_ctx(rdev);
3502 if (test_and_clear_bit(BNXT_RE_FLAG_ALLOC_CTX, &rdev->flags))
3503 bnxt_qplib_free_hwctx(&rdev->qplib_res);
3506 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags))
3507 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
3509 if (rdev->dbr_pacing)
3510 bnxt_re_deinitialize_dbr_pacing(rdev);
3512 bnxt_re_free_dbr_sw_stats_mem(rdev);
3514 if (test_and_clear_bit(BNXT_RE_FLAG_NET_RING_ALLOC, &rdev->flags))
3515 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id);
3517 if (test_and_clear_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags))
3518 bnxt_qplib_free_rcfw_channel(&rdev->qplib_res);
3520 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags))
3521 bnxt_re_free_msix(rdev);
3524 bnxt_re_destroy_chip_ctx(rdev);
3528 &rdev->flags))
3529 bnxt_re_unregister_netdev(rdev);
3533 static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type, u8 wqe_mode)
3542 rc = bnxt_re_register_netdev(rdev);
3546 set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
3548 rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
3550 dev_err(rdev_to_dev(rdev), "Failed to get chip context rc 0x%x", rc);
3551 bnxt_re_unregister_netdev(rdev);
3552 clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
3561 rc = bnxt_re_request_msix(rdev);
3563 dev_err(rdev_to_dev(rdev),
3568 set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
3572 rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res);
3574 dev_err(rdev_to_dev(rdev),
3578 set_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags);
3580 creq = &rdev->rcfw.creq;
3583 rattr.type = bnxt_re_get_rtype(rdev);
3586 rattr.lrid = rdev->nqr.msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
3587 rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
3590 dev_err(rdev_to_dev(rdev),
3595 if (!rdev->chip_ctx)
3598 if (rdev->chip_ctx->modes.dbr_pacing_v0 ||
3599 bnxt_qplib_dbr_pacing_en(rdev->chip_ctx) ||
3600 bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) {
3601 rc = bnxt_re_initialize_dbr_pacing(rdev);
3603 rdev->dbr_pacing = true;
3605 rdev->dbr_pacing = false;
3606 dev_dbg(rdev_to_dev(rdev), "%s: initialize db pacing ret %d\n",
3610 vec = rdev->nqr.msix_entries[BNXT_RE_AEQ_IDX].vector;
3611 offset = rdev->nqr.msix_entries[BNXT_RE_AEQ_IDX].db_offset;
3612 rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, vec, offset,
3615 dev_err(rdev_to_dev(rdev),
3619 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
3621 rc = bnxt_re_update_dev_attr(rdev);
3624 bnxt_re_set_resource_limits(rdev);
3625 if (!rdev->is_virtfn && !_is_chip_gen_p5_p7(rdev->chip_ctx)) {
3626 rc = bnxt_qplib_alloc_hwctx(&rdev->qplib_res);
3628 dev_err(rdev_to_dev(rdev),
3632 set_bit(BNXT_RE_FLAG_ALLOC_CTX, &rdev->flags);
3635 rc = bnxt_re_get_stats_ctx(rdev);
3639 rc = bnxt_qplib_init_rcfw(&rdev->rcfw, rdev->is_virtfn);
3641 dev_err(rdev_to_dev(rdev),
3645 set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_INIT, &rdev->flags);
3648 rc = bnxt_re_update_dev_attr(rdev);
3651 rc = bnxt_re_alloc_init_tbls(rdev);
3653 dev_err(rdev_to_dev(rdev), "tbls alloc-init failed rc = %#x",
3657 rc = bnxt_re_setup_nqs(rdev);
3659 dev_err(rdev_to_dev(rdev), "NQs alloc-init failed rc = %#x\n",
3661 if (rdev->nqr.max_init == 0)
3664 dev_warn(rdev_to_dev(rdev),
3666 rdev->nqr.num_msix, rdev->nqr.max_init);
3668 set_bit(BNXT_RE_FLAG_SETUP_NQ, &rdev->flags);
3671 rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &rdev->dpi_privileged,
3672 rdev, BNXT_QPLIB_DPI_TYPE_KERNEL);
3676 if (rdev->dbr_pacing)
3677 bnxt_re_enable_dbr_pacing(rdev);
3679 if (rdev->chip_ctx->modes.dbr_drop_recov)
3680 bnxt_re_initialize_dbr_drop_recov(rdev);
3682 rc = bnxt_re_alloc_dbr_sw_stats_mem(rdev);
3687 if (!rdev->is_virtfn) {
3690 tc_rec = &rdev->tc_rec[0];
3691 rc = bnxt_re_query_hwrm_qportcfg(rdev, tc_rec, 0xFFFF);
3693 dev_err(rdev_to_dev(rdev),
3699 rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param);
3701 dev_warn(rdev_to_dev(rdev),
3704 rdev->num_vfs = pci_num_vf(rdev->en_dev->pdev);
3705 if (rdev->num_vfs) {
3706 bnxt_re_set_resource_limits(rdev);
3707 bnxt_qplib_set_func_resources(&rdev->qplib_res);
3711 INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
3712 set_bit(BNXT_RE_FLAG_WORKER_REG, &rdev->flags);
3713 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(1000));
3715 bnxt_re_init_dcb_wq(rdev);
3716 bnxt_re_init_aer_wq(rdev);
3717 bnxt_re_init_resolve_wq(rdev);
3719 list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list);
3722 set_bit(BNXT_RE_FLAG_DEV_LIST_INITIALIZED, &rdev->flags);
3730 bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
3735 static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
3739 rc = bnxt_re_register_ib(rdev);
3741 dev_err(rdev_to_dev(rdev),
3745 if (bnxt_re_sysfs_create_file(rdev)) {
3746 bnxt_re_stopqps_and_ib_uninit(rdev);
3750 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
3751 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
3752 set_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, &rdev->flags);
3753 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
3754 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
3758 bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
3763 int _bnxt_re_ib_init(struct bnxt_re_dev *rdev)
3765 return bnxt_re_ib_init(rdev);
3769 int _bnxt_re_ib_init2(struct bnxt_re_dev *rdev)
3771 bnxt_re_ib_init_2(rdev);
3775 static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
3777 bnxt_re_dev_dealloc(rdev);
3781 static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct ifnet *netdev,
3800 *rdev = bnxt_re_dev_alloc(netdev, en_dev);
3801 if (!*rdev) {
3806 bnxt_re_hold(*rdev);
3811 void bnxt_re_get_link_speed(struct bnxt_re_dev *rdev)
3813 rdev->espeed = rdev->en_dev->espeed;
3817 void bnxt_re_stopqps_and_ib_uninit(struct bnxt_re_dev *rdev)
3819 dev_dbg(rdev_to_dev(rdev), "%s: Stopping QPs, IB uninit on rdev: %p\n",
3820 __func__, rdev);
3821 bnxt_re_stop_all_nonqp1_nonshadow_qps(rdev);
3822 bnxt_re_ib_uninit(rdev);
3825 void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
3832 rcfw = &rdev->rcfw;
3835 set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
3837 dev_dbg(rdev_to_dev(rdev), "%s: Removing rdev: %p\n", __func__, rdev);
3838 bnxt_re_dev_uninit(rdev, op_type);
3842 en_info->rdev = NULL;
3850 bnxt_re_dev_unreg(rdev);
3853 int bnxt_re_add_device(struct bnxt_re_dev **rdev,
3876 rc = bnxt_re_dev_reg(rdev, netdev, en_dev);
3916 (*rdev)->num_msix_requested = num_msix_requested;
3917 (*rdev)->gsi_ctx.gsi_qp_mode = qp_mode;
3918 (*rdev)->adev = aux_dev;
3919 (*rdev)->dev_addr = en_dev->softc->func.mac_addr;
3920 /* Before updating the rdev pointer in bnxt_re_en_dev_info structure,
3921 * take the rtnl lock to avoid accessing invalid rdev pointer from
3922 * L2 ULP callbacks. This is applicable in all the places where rdev
3926 en_info->rdev = *rdev;
3928 rc = bnxt_re_dev_init(*rdev, op_type, wqe_mode);
3931 bnxt_re_dev_unreg(*rdev);
3932 *rdev = NULL;
3947 dev_dbg(rdev_to_dev(*rdev), "%s: Adding rdev: %p\n", __func__, *rdev);
3954 struct bnxt_re_dev *bnxt_re_get_peer_pf(struct bnxt_re_dev *rdev)
3956 struct pci_dev *pdev_in = rdev->en_dev->pdev;
3979 int bnxt_re_schedule_work(struct bnxt_re_dev *rdev, unsigned long event,
3991 re_work->rdev = rdev;
3996 if (rdev)
3997 atomic_inc(&rdev->sched_count);
4005 int bnxt_re_get_slot_pf_count(struct bnxt_re_dev *rdev)
4007 struct pci_dev *pdev_in = rdev->en_dev->pdev;
4031 struct bnxt_re_dev *rdev;
4036 rdev = re_work->rdev;
4039 * If the previous rdev is deleted due to bond creation
4042 if (!bnxt_re_is_rdev_valid(rdev))
4048 if (rdev && !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
4049 dev_dbg(rdev_to_dev(rdev), "%s: Ignoring netdev event 0x%lx",
4055 * when rdev is NULL.
4057 if (!rdev)
4060 dev_dbg(rdev_to_dev(rdev), "Scheduled work for event 0x%lx",
4065 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
4067 bnxt_re_net_register_async_event(rdev);
4071 bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 0);
4072 bnxt_re_stop_all_nonqp1_nonshadow_qps(rdev);
4073 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
4078 if (bnxt_re_get_link_state(rdev) == IB_PORT_DOWN) {
4079 bnxt_re_stop_all_nonqp1_nonshadow_qps(rdev);
4080 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
4083 } else if (bnxt_re_get_link_state(rdev) == IB_PORT_ACTIVE) {
4084 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
4089 if (!bnxt_qplib_query_cc_param(&rdev->qplib_res,
4090 &rdev->cc_param) &&
4091 !_is_chip_p7(rdev->chip_ctx)) {
4096 if (rdev->sl_espeed == SPEED_10000 &&
4097 !_is_chip_gen_p5_p7(rdev->chip_ctx)) {
4098 if (rdev->cc_param.enable)
4099 bnxt_re_clear_cc(rdev);
4101 if (!rdev->cc_param.enable &&
4102 rdev->cc_param.admin_enable)
4103 bnxt_re_setup_cc(rdev);
4109 bnxt_re_stopqps_and_ib_uninit(rdev);
4110 aux_dev = rdev->adev;
4114 bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, aux_dev);
4122 if (rdev) {
4127 atomic_dec(&rdev->sched_count);
4157 struct bnxt_re_dev *rdev = NULL;
4163 /* In case of bonding,this will be bond's rdev */
4164 rdev = bnxt_re_from_netdev(real_dev);
4166 if (!rdev)
4169 dev_info(rdev_to_dev(rdev), "%s: Event = %s (0x%lx), rdev %s (real_dev %s)\n",
4171 rdev ? rdev->netdev ? if_getdname(rdev->netdev) : "->netdev = NULL" : "= NULL",
4174 if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
4177 bnxt_re_hold(rdev);
4182 bnxt_re_schedule_work(rdev, event, netdev,
4195 if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
4196 bnxt_re_update_shadow_ah(rdev);
4197 bnxt_qplib_get_guid(rdev->dev_addr,
4198 (u8 *)&rdev->ibdev.node_guid);
4202 bnxt_re_get_link_speed(rdev);
4203 bnxt_re_schedule_work(rdev, event, NULL, NULL, NULL);
4216 if (rdev) {
4217 dev_info(rdev_to_dev(rdev),
4219 dev_info(rdev_to_dev(rdev),
4221 dev_info(rdev_to_dev(rdev),
4223 dev_info(rdev_to_dev(rdev),
4228 if (atomic_read(&rdev->sched_count) > 0)
4230 if (!rdev->unreg_sched) {
4231 bnxt_re_schedule_work(rdev, NETDEV_UNREGISTER,
4233 rdev->unreg_sched = true;
4242 if (rdev)
4243 bnxt_re_put(rdev);
4252 static void bnxt_re_remove_base_interface(struct bnxt_re_dev *rdev,
4255 bnxt_re_stopqps_and_ib_uninit(rdev);
4256 bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev);
4266 * If the rdev is bond interace, destroys the lag
4276 struct bnxt_re_dev *rdev;
4286 rdev = en_info->rdev;
4288 if (rdev && bnxt_re_is_rdev_valid(rdev)) {
4289 if (pci_channel_offline(rdev->rcfw.pdev))
4290 set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
4299 * same rdev pointer when LAG is configured. This rdev pointer
4300 * is rdev of bond interface.
4304 bnxt_re_remove_base_interface(rdev, adev);
4308 * case bond device is already removed, so rdev->binfo
4332 static void bnxt_re_ib_init_2(struct bnxt_re_dev *rdev)
4336 rc = bnxt_re_get_device_stats(rdev);
4338 dev_err(rdev_to_dev(rdev),
4341 bnxt_re_net_register_async_event(rdev);
4351 struct bnxt_re_dev *rdev;
4374 rc = bnxt_re_add_device(&rdev, en_dev->net,
4384 rc = bnxt_re_ib_init(rdev);
4388 bnxt_re_ib_init_2(rdev);
4390 dev_dbg(rdev_to_dev(rdev), "%s: adev: %p\n", __func__, adev);
4391 rdev->adev = adev;