Lines Matching defs:ha

82 static void qlnx_add_sysctls(qlnx_host_t *ha);
87 static void qlnx_release(qlnx_host_t *ha);
89 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
91 static void qlnx_init_locked(qlnx_host_t *ha);
92 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
93 static int qlnx_set_promisc(qlnx_host_t *ha, int enabled);
94 static int qlnx_set_allmulti(qlnx_host_t *ha, int enabled);
98 static void qlnx_stop(qlnx_host_t *ha);
99 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp,
101 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha);
102 static uint32_t qlnx_get_optics(qlnx_host_t *ha,
109 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha);
110 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha);
111 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha);
112 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha);
113 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha);
114 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha);
116 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver);
117 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size);
122 static int qlnx_slowpath_start(qlnx_host_t *ha);
123 static int qlnx_slowpath_stop(qlnx_host_t *ha);
124 static int qlnx_init_hw(qlnx_host_t *ha);
127 static void qlnx_unload(qlnx_host_t *ha);
128 static int qlnx_load(qlnx_host_t *ha);
129 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
131 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf,
133 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
137 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter);
138 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords,
140 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords,
143 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
144 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp);
145 static void qlnx_trigger_dump(qlnx_host_t *ha);
146 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
148 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
150 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
153 static void qlnx_sample_storm_stats(qlnx_host_t *ha);
154 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
156 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa);
174 static void qlnx_initialize_sriov(qlnx_host_t *ha);
176 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha);
177 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha);
178 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha);
305 qlnx_vf_device(qlnx_host_t *ha)
309 device_id = ha->device_id;
318 qlnx_valid_device(qlnx_host_t *ha)
322 device_id = ha->device_id;
341 qlnx_rdma_supported(struct qlnx_host *ha)
345 device_id = pci_get_device(ha->pci_dev);
434 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp,
451 qlnx_host_t *ha;
461 ha = (qlnx_host_t *)p_hwfn->p_dev;
463 QL_DPRINT2(ha, "enter\n");
465 for (i = 0; i < ha->cdev.num_hwfns; i++) {
466 if (&ha->cdev.hwfns[i] == p_hwfn) {
467 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
471 QL_DPRINT2(ha, "exit\n");
490 qlnx_create_sp_taskqueues(qlnx_host_t *ha)
495 for (i = 0; i < ha->cdev.num_hwfns; i++) {
496 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
501 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
503 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
504 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
506 if (ha->sp_taskqueue[i] == NULL)
509 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
512 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
519 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha)
523 for (i = 0; i < ha->cdev.num_hwfns; i++) {
524 if (ha->sp_taskqueue[i] != NULL) {
525 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
526 taskqueue_free(ha->sp_taskqueue[i]);
536 qlnx_host_t *ha;
544 ha = (qlnx_host_t *)fp->edev;
546 ifp = ha->ifp;
569 QL_DPRINT2(ha, "exit \n");
574 qlnx_create_fp_taskqueues(qlnx_host_t *ha)
580 for (i = 0; i < ha->num_rss; i++) {
581 fp = &ha->fp_array[i];
598 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
605 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha)
610 for (i = 0; i < ha->num_rss; i++) {
611 fp = &ha->fp_array[i];
623 qlnx_drain_fp_taskqueues(qlnx_host_t *ha)
628 for (i = 0; i < ha->num_rss; i++) {
629 fp = &ha->fp_array[i];
632 QLNX_UNLOCK(ha);
634 QLNX_LOCK(ha);
641 qlnx_get_params(qlnx_host_t *ha)
644 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
654 qlnx_host_t *ha;
656 ha = context;
658 QL_DPRINT2(ha, "enter\n");
660 QLNX_LOCK(ha);
661 qlnx_stop(ha);
662 QLNX_UNLOCK(ha);
665 qlnx_rdma_dev_remove(ha);
668 qlnx_slowpath_stop(ha);
669 qlnx_slowpath_start(ha);
672 qlnx_rdma_dev_add(ha);
675 qlnx_init(ha);
677 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
679 QL_DPRINT2(ha, "exit\n");
685 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha)
692 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
694 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
695 taskqueue_thread_enqueue, &ha->err_taskqueue);
697 if (ha->err_taskqueue == NULL)
700 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
702 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
708 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha)
710 if (ha->err_taskqueue != NULL) {
711 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
712 taskqueue_free(ha->err_taskqueue);
715 ha->err_taskqueue = NULL;
727 qlnx_host_t *ha = NULL;
736 if ((ha = device_get_softc(dev)) == NULL) {
741 memset(ha, 0, sizeof (qlnx_host_t));
743 ha->device_id = pci_get_device(dev);
745 if (qlnx_valid_device(ha) != 0) {
749 ha->pci_func = pci_get_function(dev);
751 ha->pci_dev = dev;
753 sx_init(&ha->hw_lock, "qlnx_hw_lock");
755 ha->flags.lock_init = 1;
763 ha->reg_rid = PCIR_BAR(0);
764 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
767 if (ha->pci_reg == NULL) {
773 ha->reg_rid);
775 ha->dbells_rid = PCIR_BAR(2);
778 ha->dbells_rid);
780 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
781 &ha->dbells_rid, RF_ACTIVE);
783 if (ha->pci_dbells == NULL) {
787 ha->dbells_phys_addr = (uint64_t)
788 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
790 ha->dbells_size = rsrc_len_dbells;
792 if (qlnx_vf_device(ha) != 0) {
798 ha->msix_rid = PCIR_BAR(4);
799 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
800 &ha->msix_rid, RF_ACTIVE);
802 if (ha->msix_bar == NULL) {
808 ha->msix_rid);
810 ha->dbg_level = 0x0000;
812 QL_DPRINT1(ha, "\n\t\t\t"
818 ha->pci_dev, ha->pci_reg, rsrc_len_reg,
819 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
820 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
826 if (qlnx_alloc_parent_dma_tag(ha))
829 if (qlnx_alloc_tx_dma_tag(ha))
832 if (qlnx_alloc_rx_dma_tag(ha))
836 if (qlnx_init_hw(ha) != 0)
839 ha->flags.hw_init = 1;
841 qlnx_get_params(ha);
851 if (qlnx_vf_device(ha) != 0) {
853 ha->num_rss = QLNX_DEFAULT_RSS;
855 ha->num_rss = qlnxe_queue_count;
857 num_sp_msix = ha->cdev.num_hwfns;
862 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
863 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
866 ha->num_rss = max_rxq;
868 ha->num_rss = max_txq;
870 if (ha->num_rss > QLNX_MAX_VF_RSS)
871 ha->num_rss = QLNX_MAX_VF_RSS;
876 if (ha->num_rss > mp_ncpus)
877 ha->num_rss = mp_ncpus;
879 ha->num_tc = QLNX_MAX_TC;
881 ha->msix_count = pci_msix_count(dev);
885 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha);
889 if (!ha->msix_count ||
890 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
892 ha->msix_count);
896 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
897 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
899 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
901 QL_DPRINT1(ha, "\n\t\t\t"
907 ha->pci_reg, rsrc_len_reg,
908 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
909 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
910 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
912 if (pci_alloc_msix(dev, &ha->msix_count)) {
914 ha->msix_count);
915 ha->msix_count = 0;
924 if (qlnx_create_sp_taskqueues(ha) != 0)
927 for (i = 0; i < ha->cdev.num_hwfns; i++) {
928 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
930 ha->sp_irq_rid[i] = i + 1;
931 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
932 &ha->sp_irq_rid[i],
934 if (ha->sp_irq[i] == NULL) {
940 if (bus_setup_intr(dev, ha->sp_irq[i],
942 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
948 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d"
950 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
957 if (qlnx_create_fp_taskqueues(ha) != 0)
960 for (i = 0; i < ha->num_rss; i++) {
961 ha->irq_vec[i].rss_idx = i;
962 ha->irq_vec[i].ha = ha;
963 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
965 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
966 &ha->irq_vec[i].irq_rid,
969 if (ha->irq_vec[i].irq == NULL) {
972 i, ha->irq_vec[i].irq_rid);
976 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
982 if (qlnx_vf_device(ha) != 0) {
983 callout_init(&ha->qlnx_callout, 1);
984 ha->flags.callout_init = 1;
986 for (i = 0; i < ha->cdev.num_hwfns; i++) {
987 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
989 if (ha->grcdump_size[i] == 0)
992 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
993 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n",
994 i, ha->grcdump_size[i]);
996 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
997 if (ha->grcdump[i] == NULL) {
1002 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1004 if (ha->idle_chk_size[i] == 0)
1007 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1008 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n",
1009 i, ha->idle_chk_size[i]);
1011 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1013 if (ha->idle_chk[i] == NULL) {
1019 if (qlnx_create_error_recovery_taskqueue(ha) != 0)
1023 if (qlnx_slowpath_start(ha) != 0)
1026 ha->flags.slowpath_start = 1;
1028 if (qlnx_vf_device(ha) != 0) {
1029 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1031 qlnx_trigger_dump(ha);
1036 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) {
1038 qlnx_trigger_dump(ha);
1043 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1047 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1050 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1054 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n",
1055 ha->stormfw_ver, ha->mfw_ver);
1057 qlnx_init_ifnet(dev, ha);
1062 qlnx_add_sysctls(ha);
1068 if (qlnx_vf_device(ha) != 0) {
1069 if (qlnx_make_cdev(ha)) {
1075 qlnx_rdma_dev_add(ha);
1082 if (qlnx_vf_device(ha) != 0)
1083 qlnx_initialize_sriov(ha);
1088 QL_DPRINT2(ha, "success\n");
1094 qlnx_release(ha);
1106 qlnx_host_t *ha = NULL;
1108 if ((ha = device_get_softc(dev)) == NULL) {
1113 if (qlnx_vf_device(ha) != 0) {
1126 if (qlnx_rdma_dev_remove(ha) != 0)
1131 QLNX_LOCK(ha);
1132 qlnx_stop(ha);
1133 QLNX_UNLOCK(ha);
1135 qlnx_release(ha);
1154 qlnx_set_personality(qlnx_host_t *ha)
1158 personality = qlnx_get_personality(ha->pci_func);
1162 device_printf(ha->pci_dev, "%s: DEFAULT\n",
1164 ha->personality = ECORE_PCI_DEFAULT;
1168 device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1170 ha->personality = ECORE_PCI_ETH;
1174 device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1176 ha->personality = ECORE_PCI_ETH_IWARP;
1180 device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1182 ha->personality = ECORE_PCI_ETH_ROCE;
1192 qlnx_init_hw(qlnx_host_t *ha)
1197 ha->cdev.ha = ha;
1198 ecore_init_struct(&ha->cdev);
1200 /* ha->dp_module = ECORE_MSG_PROBE |
1206 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1207 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1208 ha->dp_level = ECORE_LEVEL_NOTICE;
1209 //ha->dp_level = ECORE_LEVEL_VERBOSE;
1211 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1213 ha->cdev.regview = ha->pci_reg;
1215 ha->personality = ECORE_PCI_DEFAULT;
1217 if (qlnx_vf_device(ha) == 0) {
1218 ha->cdev.b_is_vf = true;
1220 if (ha->pci_dbells != NULL) {
1221 ha->cdev.doorbells = ha->pci_dbells;
1222 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1223 ha->cdev.db_size = ha->dbells_size;
1225 ha->pci_dbells = ha->pci_reg;
1228 ha->cdev.doorbells = ha->pci_dbells;
1229 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1230 ha->cdev.db_size = ha->dbells_size;
1234 if (qlnx_rdma_supported(ha) == 0)
1235 qlnx_set_personality(ha);
1239 QL_DPRINT2(ha, "%s: %s\n", __func__,
1240 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1244 params.personality = ha->personality;
1251 ecore_hw_prepare(&ha->cdev, &params);
1253 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1255 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n",
1256 ha, &ha->cdev, &ha->cdev.hwfns[0]);
1262 qlnx_release(qlnx_host_t *ha)
1267 dev = ha->pci_dev;
1269 QL_DPRINT2(ha, "enter\n");
1272 if (ha->idle_chk[i] != NULL) {
1273 free(ha->idle_chk[i], M_QLNXBUF);
1274 ha->idle_chk[i] = NULL;
1277 if (ha->grcdump[i] != NULL) {
1278 free(ha->grcdump[i], M_QLNXBUF);
1279 ha->grcdump[i] = NULL;
1283 if (ha->flags.callout_init)
1284 callout_drain(&ha->qlnx_callout);
1286 if (ha->flags.slowpath_start) {
1287 qlnx_slowpath_stop(ha);
1290 if (ha->flags.hw_init)
1291 ecore_hw_remove(&ha->cdev);
1293 qlnx_del_cdev(ha);
1295 if (ha->ifp != NULL)
1296 ether_ifdetach(ha->ifp);
1298 qlnx_free_tx_dma_tag(ha);
1300 qlnx_free_rx_dma_tag(ha);
1302 qlnx_free_parent_dma_tag(ha);
1304 if (qlnx_vf_device(ha) != 0) {
1305 qlnx_destroy_error_recovery_taskqueue(ha);
1308 for (i = 0; i < ha->num_rss; i++) {
1309 struct qlnx_fastpath *fp = &ha->fp_array[i];
1311 if (ha->irq_vec[i].handle) {
1312 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1313 ha->irq_vec[i].handle);
1316 if (ha->irq_vec[i].irq) {
1318 ha->irq_vec[i].irq_rid,
1319 ha->irq_vec[i].irq);
1322 qlnx_free_tx_br(ha, fp);
1324 qlnx_destroy_fp_taskqueues(ha);
1326 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1327 if (ha->sp_handle[i])
1328 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1329 ha->sp_handle[i]);
1331 if (ha->sp_irq[i])
1333 ha->sp_irq_rid[i], ha->sp_irq[i]);
1336 qlnx_destroy_sp_taskqueues(ha);
1338 if (ha->msix_count)
1341 if (ha->flags.lock_init) {
1342 sx_destroy(&ha->hw_lock);
1345 if (ha->pci_reg)
1346 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1347 ha->pci_reg);
1349 if (ha->dbells_size && ha->pci_dbells)
1350 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1351 ha->pci_dbells);
1353 if (ha->msix_bar)
1354 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1355 ha->msix_bar);
1357 QL_DPRINT2(ha, "exit\n");
1362 qlnx_trigger_dump(qlnx_host_t *ha)
1366 if (ha->ifp != NULL)
1367 if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
1369 QL_DPRINT2(ha, "enter\n");
1371 if (qlnx_vf_device(ha) == 0)
1374 ha->error_recovery = 1;
1376 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1377 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1378 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1381 QL_DPRINT2(ha, "exit\n");
1390 qlnx_host_t *ha;
1398 ha = (qlnx_host_t *)arg1;
1399 qlnx_trigger_dump(ha);
1408 qlnx_host_t *ha;
1417 ha = (qlnx_host_t *)arg1;
1419 if (qlnx_vf_device(ha) == 0)
1422 for (i = 0; i < ha->num_rss; i++) {
1423 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1425 fp = &ha->fp_array[i];
1434 ha->tx_coalesce_usecs = (uint8_t)usecs;
1443 qlnx_host_t *ha;
1452 ha = (qlnx_host_t *)arg1;
1454 if (qlnx_vf_device(ha) == 0)
1457 for (i = 0; i < ha->num_rss; i++) {
1458 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1460 fp = &ha->fp_array[i];
1469 ha->rx_coalesce_usecs = (uint8_t)usecs;
1475 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha)
1481 ctx = device_get_sysctl_ctx(ha->pci_dev);
1482 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1490 CTLFLAG_RD, &ha->sp_interrupts,
1497 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha)
1506 ctx = device_get_sysctl_ctx(ha->pci_dev);
1507 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1513 for (i = 0; i < ha->num_rss; i++) {
1525 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1530 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1535 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1540 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1545 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1552 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1557 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1562 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1567 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1572 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1578 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1583 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1588 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1593 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1598 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1599 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1603 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1604 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1608 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1609 ha->fp_array[i].tx_tso_max_pkt_len,
1614 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1615 ha->fp_array[i].tx_tso_min_pkt_len,
1625 &ha->fp_array[i].tx_pkts[j], name_str);
1636 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1645 &ha->fp_array[i].tx_comInt[j], name_str);
1654 &ha->fp_array[i].tx_pkts_q[j], name_str);
1660 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1665 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1670 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1675 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1680 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1685 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1690 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1695 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1700 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1705 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1710 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1715 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1720 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1727 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1732 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1737 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1742 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1747 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1752 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1757 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1762 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1770 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha)
1776 ctx = device_get_sysctl_ctx(ha->pci_dev);
1777 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1785 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1790 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1795 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1800 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1805 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1810 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1815 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1820 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1825 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1830 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1835 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1840 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1845 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1850 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1855 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1860 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1865 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1870 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1875 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1880 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1885 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1890 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1895 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1900 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1905 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1910 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1915 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1920 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1925 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1930 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1935 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1940 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1945 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1950 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1955 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1960 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1965 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1970 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1975 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1980 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1985 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1990 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1995 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
2000 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2005 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2010 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2015 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2020 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2025 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2030 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2035 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2040 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2045 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2050 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2055 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2060 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2065 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2070 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2075 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2080 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2085 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2090 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2095 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2100 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2105 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2110 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2115 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2120 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2125 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2130 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2136 qlnx_add_sysctls(qlnx_host_t *ha)
2138 device_t dev = ha->pci_dev;
2145 qlnx_add_fp_stats_sysctls(ha);
2146 qlnx_add_sp_stats_sysctls(ha);
2148 if (qlnx_vf_device(ha) != 0)
2149 qlnx_add_hw_stats_sysctls(ha);
2156 CTLFLAG_RD, ha->stormfw_ver, 0,
2160 CTLFLAG_RD, ha->mfw_ver, 0,
2165 &ha->personality, ha->personality,
2171 ha->dbg_level = 0;
2174 &ha->dbg_level, ha->dbg_level, "Debug Level");
2176 ha->dp_level = 0x01;
2179 &ha->dp_level, ha->dp_level, "DP Level");
2181 ha->dbg_trace_lro_cnt = 0;
2184 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2187 ha->dbg_trace_tso_pkt_len = 0;
2190 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2193 ha->dp_module = 0;
2196 &ha->dp_module, ha->dp_module, "DP Module");
2198 ha->err_inject = 0;
2202 &ha->err_inject, ha->err_inject, "Error Inject");
2204 ha->storm_stats_enable = 0;
2208 &ha->storm_stats_enable, ha->storm_stats_enable,
2211 ha->storm_stats_index = 0;
2215 &ha->storm_stats_index, ha->storm_stats_index,
2218 ha->grcdump_taken = 0;
2221 &ha->grcdump_taken, ha->grcdump_taken,
2224 ha->idle_chk_taken = 0;
2227 &ha->idle_chk_taken, ha->idle_chk_taken,
2232 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2237 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2243 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump");
2248 (void *)ha, 0, qlnx_set_rx_coalesce, "I",
2254 (void *)ha, 0, qlnx_set_tx_coalesce, "I",
2257 ha->rx_pkt_threshold = 128;
2260 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2263 ha->rx_jumbo_buf_eq_mtu = 0;
2266 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2272 &ha->err_illegal_intr, "err_illegal_intr");
2276 &ha->err_fp_null, "err_fp_null");
2280 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2289 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
2294 ifp = ha->ifp = if_alloc(IFT_ETHER);
2297 device_id = pci_get_device(ha->pci_dev);
2312 if_setsoftc(ifp, ha);
2318 if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha));
2323 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2325 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2327 if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2328 !ha->primary_mac[2] && !ha->primary_mac[3] &&
2329 !ha->primary_mac[4] && !ha->primary_mac[5]) {
2334 ha->primary_mac[0] = 0x00;
2335 ha->primary_mac[1] = 0x0e;
2336 ha->primary_mac[2] = 0x1e;
2337 ha->primary_mac[3] = rnd & 0xFF;
2338 ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2339 ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2342 ether_ifattach(ifp, ha->primary_mac);
2343 bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2371 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2375 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2376 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2377 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2380 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2381 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2383 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2384 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2386 ifmedia_add(&ha->media,
2388 ifmedia_add(&ha->media,
2390 ifmedia_add(&ha->media,
2394 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2395 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2397 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2399 QL_DPRINT2(ha, "exit\n");
2405 qlnx_init_locked(qlnx_host_t *ha)
2407 if_t ifp = ha->ifp;
2409 QL_DPRINT1(ha, "Driver Initialization start \n");
2411 qlnx_stop(ha);
2413 if (qlnx_load(ha) == 0) {
2418 if (qlnx_vf_device(ha) != 0) {
2419 qlnx_rdma_dev_open(ha);
2430 qlnx_host_t *ha;
2432 ha = (qlnx_host_t *)arg;
2434 QL_DPRINT2(ha, "enter\n");
2436 QLNX_LOCK(ha);
2437 qlnx_init_locked(ha);
2438 QLNX_UNLOCK(ha);
2440 QL_DPRINT2(ha, "exit\n");
2446 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac)
2452 cdev = &ha->cdev;
2454 mcast = &ha->ecore_mcast;
2471 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta)
2476 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2481 if ((ha->mcast[i].addr[0] == 0) &&
2482 (ha->mcast[i].addr[1] == 0) &&
2483 (ha->mcast[i].addr[2] == 0) &&
2484 (ha->mcast[i].addr[3] == 0) &&
2485 (ha->mcast[i].addr[4] == 0) &&
2486 (ha->mcast[i].addr[5] == 0)) {
2487 if (qlnx_config_mcast_mac_addr(ha, mta, 1))
2490 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2491 ha->nmcast++;
2500 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta)
2505 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2506 if (qlnx_config_mcast_mac_addr(ha, mta, 0))
2509 ha->mcast[i].addr[0] = 0;
2510 ha->mcast[i].addr[1] = 0;
2511 ha->mcast[i].addr[2] = 0;
2512 ha->mcast[i].addr[3] = 0;
2513 ha->mcast[i].addr[4] = 0;
2514 ha->mcast[i].addr[5] = 0;
2516 ha->nmcast--;
2530 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt,
2537 if (qlnx_hw_add_mcast(ha, mta))
2540 if (qlnx_hw_del_mcast(ha, mta))
2563 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
2566 if_t ifp = ha->ifp;
2569 if (qlnx_vf_device(ha) == 0)
2574 QLNX_LOCK(ha);
2575 qlnx_hw_set_multi(ha, mta, mcnt, add_multi);
2576 QLNX_UNLOCK(ha);
2582 qlnx_set_promisc(qlnx_host_t *ha, int enabled)
2587 if (qlnx_vf_device(ha) == 0)
2590 filter = ha->filter;
2599 rc = qlnx_set_rx_accept_filter(ha, filter);
2604 qlnx_set_allmulti(qlnx_host_t *ha, int enabled)
2609 if (qlnx_vf_device(ha) == 0)
2612 filter = ha->filter;
2618 rc = qlnx_set_rx_accept_filter(ha, filter);
2632 qlnx_host_t *ha;
2634 ha = (qlnx_host_t *)if_getsoftc(ifp);
2638 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd);
2644 QLNX_LOCK(ha);
2645 qlnx_init_locked(ha);
2646 QLNX_UNLOCK(ha);
2648 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n",
2659 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd);
2664 QLNX_LOCK(ha);
2666 ha->max_frame_size =
2669 qlnx_init_locked(ha);
2672 QLNX_UNLOCK(ha);
2678 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
2680 QLNX_LOCK(ha);
2685 if ((flags ^ ha->if_flags) &
2687 ret = qlnx_set_promisc(ha, flags & IFF_PROMISC);
2688 } else if ((if_getflags(ifp) ^ ha->if_flags) &
2690 ret = qlnx_set_allmulti(ha, flags & IFF_ALLMULTI);
2693 ha->max_frame_size = if_getmtu(ifp) +
2695 qlnx_init_locked(ha);
2699 qlnx_stop(ha);
2702 ha->if_flags = if_getflags(ifp);
2703 QLNX_UNLOCK(ha);
2707 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd);
2710 if (qlnx_set_multi(ha, 1))
2716 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd);
2719 if (qlnx_set_multi(ha, 0))
2726 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
2728 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2735 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd);
2750 QLNX_LOCK(ha);
2753 qlnx_init_locked(ha);
2755 QLNX_UNLOCK(ha);
2763 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2780 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
2786 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2798 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \
2809 QL_DPRINT4(ha, "default (0x%lx)\n", cmd);
2820 qlnx_host_t *ha;
2824 ha = (qlnx_host_t *)if_getsoftc(ifp);
2826 QL_DPRINT2(ha, "enter\n");
2828 ifm = &ha->media;
2833 QL_DPRINT2(ha, "exit\n");
2841 qlnx_host_t *ha;
2843 ha = (qlnx_host_t *)if_getsoftc(ifp);
2845 QL_DPRINT2(ha, "enter\n");
2850 if (ha->link_up) {
2853 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2855 if (ha->if_link.link_partner_caps &
2861 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2867 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2882 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){
2883 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL);
2885 QL_DPRINT1(ha, "(mp == NULL) "
2902 qlnx_trigger_dump(ha);
2906 QLNX_INC_OPACKETS((ha->ifp));
2907 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2909 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2910 bus_dmamap_unload(ha->tx_tag, map);
2935 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp,
2949 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){
2950 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF);
2952 QL_DPRINT1(ha, "(diff = 0x%x) "
2970 qlnx_trigger_dump(ha);
2978 qlnx_free_tx_pkt(ha, fp, txq);
2990 qlnx_host_t * ha;
2994 ha = (qlnx_host_t *)fp->edev;
2996 if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) {
3008 if (qlnx_send(ha, fp, &mp)) {
3027 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3030 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3032 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret);
3039 qlnx_host_t *ha = (qlnx_host_t *)if_getsoftc(ifp);
3047 QL_DPRINT2(ha, "enter\n");
3051 ha->num_rss;
3053 fp = &ha->fp_array[rss_id];
3082 QL_DPRINT2(ha, "exit ret = %d\n", ret);
3092 qlnx_host_t *ha;
3094 ha = (qlnx_host_t *)if_getsoftc(ifp);
3096 QL_DPRINT2(ha, "enter\n");
3098 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3099 fp = &ha->fp_array[rss_id];
3114 QL_DPRINT2(ha, "exit\n");
3120 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value)
3124 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3126 bus_write_4(ha->pci_dbells, offset, value);
3127 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
3128 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
3134 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp)
3239 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp)
3265 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3267 if (!ha->link_up)
3290 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3293 if (ha->dbg_trace_tso_pkt_len) {
3310 offset = qlnx_tcp_offset(ha, m_head);
3319 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3329 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret);
3336 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3340 QL_DPRINT1(ha,
3355 QL_DPRINT1(ha,
3366 offset = qlnx_tcp_offset(ha, m_head);
3371 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n",
3379 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet"));
3381 if (ha->dbg_trace_tso_pkt_len) {
3488 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs"
3494 (void)qlnx_tx_int(ha, fp, txq);
3498 QL_DPRINT1(ha,
3504 if (ha->storm_stats_enable)
3505 ha->storm_stats_gather = 1;
3510 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3713 if (ha->dbg_trace_tso_pkt_len) {
3727 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3729 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3734 qlnx_stop(qlnx_host_t *ha)
3736 if_t ifp = ha->ifp;
3746 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3748 if (ha->state == QLNX_STATE_OPEN) {
3749 for (i = 0; i < ha->num_rss; i++) {
3750 struct qlnx_fastpath *fp = &ha->fp_array[i];
3761 if (qlnx_vf_device(ha) != 0) {
3762 qlnx_rdma_dev_close(ha);
3766 qlnx_unload(ha);
3772 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha)
3778 qlnx_get_mac_addr(qlnx_host_t *ha)
3784 p_hwfn = &ha->cdev.hwfns[0];
3786 if (qlnx_vf_device(ha) != 0)
3792 device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3795 memcpy(ha->primary_mac, mac, ETH_ALEN);
3798 return (ha->primary_mac);
3802 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
3846 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp,
3864 QL_DPRINT1(ha, "mp = NULL\n");
3874 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3877 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3878 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
3917 qlnx_tpa_start(qlnx_host_t *ha,
3923 if_t ifp = ha->ifp;
3935 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \
3965 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3968 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3971 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3979 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error,"
3992 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3993 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4024 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4045 QL_DPRINT7(ha, "[%d]: invalid aggregation state,"
4082 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4088 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4094 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4107 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4108 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4135 mpl->m_len = ha->rx_buf_size;
4146 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping"
4161 mp->m_len = ha->rx_buf_size;
4221 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n",
4229 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4239 QL_DPRINT7(ha, "[%d]: enter\n \
4255 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4261 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4273 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4279 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4293 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4294 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4321 mpl->m_len = ha->rx_buf_size;
4331 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n",
4336 mp->m_len = ha->rx_buf_size;
4345 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp,
4355 if_t ifp = ha->ifp;
4357 QL_DPRINT7(ha, "[%d]: enter\n \
4378 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4385 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4392 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4399 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4405 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4419 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4420 QL_DPRINT7(ha, "[%d]: New buffer allocation failed,"
4447 mpl->m_len = ha->rx_buf_size;
4457 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4460 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4463 mp->m_len = ha->rx_buf_size;
4468 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4501 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \
4516 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget,
4522 if_t ifp = ha->ifp;
4523 struct ecore_dev *cdev = &ha->cdev;
4535 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4560 QL_DPRINT3(ha, "Got a slowath CQE\n");
4570 qlnx_tpa_start(ha, fp, rxq,
4576 qlnx_tpa_cont(ha, fp, rxq,
4582 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4599 QL_DPRINT1(ha, "mp = NULL\n");
4605 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4613 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x,"
4623 qlnx_dump_buf8(ha, __func__, data, len);
4634 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x,"
4646 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4647 QL_DPRINT1(ha, "New buffer allocation failed, dropping"
4667 if (qlnx_rx_jumbo_chain(ha, fp, mp,
4779 qlnx_host_t *ha;
4783 ha = ivec->ha;
4785 if (ha->state != QLNX_STATE_OPEN) {
4791 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4792 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx);
4793 ha->err_illegal_intr++;
4796 fp = &ha->fp_array[idx];
4799 ha->err_fp_null++;
4809 lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO;
4814 for (tc = 0; tc < ha->num_tc; tc++) {
4825 qlnx_tx_int(ha, fp, fp->txq[tc]);
4845 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4899 qlnx_host_t *ha;
4903 ha = (qlnx_host_t *)p_hwfn->p_dev;
4905 ha->sp_interrupts++;
4907 QL_DPRINT2(ha, "enter\n");
4911 QL_DPRINT2(ha, "exit\n");
4936 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
4942 ha->parent_tag,/* parent */
4956 QL_DPRINT1(ha, "could not create dma tag\n");
4965 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n");
4992 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf)
5005 qlnx_host_t *ha __unused;
5007 ha = (qlnx_host_t *)ecore_dev;
5026 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5038 qlnx_host_t *ha;
5040 ha = (qlnx_host_t *)ecore_dev;
5049 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n",
5055 if (!ha->qlnxr_debug)
5061 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha)
5066 dev = ha->pci_dev;
5082 &ha->parent_tag);
5085 QL_DPRINT1(ha, "could not create parent dma tag\n");
5089 ha->flags.parent_tag = 1;
5095 qlnx_free_parent_dma_tag(qlnx_host_t *ha)
5097 if (ha->parent_tag != NULL) {
5098 bus_dma_tag_destroy(ha->parent_tag);
5099 ha->parent_tag = NULL;
5105 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha)
5118 &ha->tx_tag)) {
5119 QL_DPRINT1(ha, "tx_tag alloc failed\n");
5127 qlnx_free_tx_dma_tag(qlnx_host_t *ha)
5129 if (ha->tx_tag != NULL) {
5130 bus_dma_tag_destroy(ha->tx_tag);
5131 ha->tx_tag = NULL;
5137 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha)
5150 &ha->rx_tag)) {
5151 QL_DPRINT1(ha, " rx_tag alloc failed\n");
5159 qlnx_free_rx_dma_tag(qlnx_host_t *ha)
5161 if (ha->rx_tag != NULL) {
5162 bus_dma_tag_destroy(ha->rx_tag);
5163 ha->rx_tag = NULL;
5241 qlnx_host_t *ha;
5243 ha = ecore_dev;
5245 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, &reg) == 0)
5248 QL_DPRINT1(ha, "failed\n");
5257 qlnx_host_t *ha;
5259 ha = ecore_dev;
5261 if (pci_find_extcap(ha->pci_dev, ext_cap, &reg) == 0)
5264 QL_DPRINT1(ha, "failed\n");
5387 qlnx_host_t *ha;
5389 ha = ((struct ecore_dev *) p_dev)->ha;
5390 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
5396 qlnx_host_t *ha;
5399 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5401 qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5403 prev_link_state = ha->link_up;
5404 ha->link_up = ha->if_link.link_up;
5406 if (prev_link_state != ha->link_up) {
5407 if (ha->link_up) {
5408 if_link_state_change(ha->ifp, LINK_STATE_UP);
5410 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5416 if (qlnx_vf_device(ha) != 0) {
5417 if (ha->sriov_initialized)
5418 qlnx_inform_vf_link_state(p_hwfn, ha);
5449 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn,
5461 ha = (qlnx_host_t *)hwfn->p_dev;
5465 if (qlnx_vf_device(ha) != 0) {
5469 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5577 qlnx_host_t *ha;
5579 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5581 if (qlnx_vf_device(ha) != 0) {
5582 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5639 qlnx_slowpath_start(qlnx_host_t *ha)
5647 (ha->num_rss) * (ha->num_tc + 1);
5650 if (qlnx_vf_device(ha) != 0) {
5651 if(ha->personality == ECORE_PCI_ETH_IWARP) {
5652 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5657 } else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5658 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5670 cdev = &ha->cdev;
5686 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5687 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5690 (void)qlnx_set_lldp_tlvx(ha, NULL);
5699 qlnx_slowpath_stop(qlnx_host_t *ha)
5702 device_t dev = ha->pci_dev;
5705 cdev = &ha->cdev;
5709 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5710 if (ha->sp_handle[i])
5711 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5712 ha->sp_handle[i]);
5714 ha->sp_handle[i] = NULL;
5716 if (ha->sp_irq[i])
5718 ha->sp_irq_rid[i], ha->sp_irq[i]);
5719 ha->sp_irq[i] = NULL;
5750 qlnx_host_t *ha;
5752 ha = cdev;
5765 ha->err_get_proto_invalid_type++;
5767 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type);
5774 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver)
5779 p_hwfn = &ha->cdev.hwfns[0];
5783 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
5794 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size)
5799 p_hwfn = &ha->cdev.hwfns[0];
5803 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n");
5814 qlnx_alloc_mem_arrays(qlnx_host_t *ha)
5816 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5817 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5818 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5824 qlnx_init_fp(qlnx_host_t *ha)
5828 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5829 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5832 fp->edev = ha;
5833 fp->sb_info = &ha->sb_array[rss_id];
5834 fp->rxq = &ha->rxq_array[rss_id];
5837 for (tc = 0; tc < ha->num_tc; tc++) {
5838 txq_array_index = tc * ha->num_rss + rss_id;
5839 fp->txq[tc] = &ha->txq_array[txq_array_index];
5884 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info)
5888 cdev = &ha->cdev;
5923 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id)
5931 cdev = &ha->cdev;
5937 QL_DPRINT1(ha, "Status block allocation failed\n");
5950 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5960 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5961 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5972 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5977 cdev = &ha->cdev;
5979 qlnx_free_rx_buffers(ha, rxq);
5982 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
6017 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6034 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6042 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6048 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6062 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6070 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size,
6084 QL_DPRINT1(ha, "Failed to allocate Rx data\n");
6092 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6098 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n",
6111 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6117 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa)
6125 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6126 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6137 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6142 cdev = &ha->cdev;
6146 rxq->rx_buf_size = ha->rx_buf_size;
6180 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6187 rc = qlnx_alloc_rx_buffer(ha, rxq);
6193 QL_DPRINT1(ha, "Rx buffers allocation failed\n");
6196 QL_DPRINT1(ha, "Allocated less buffers than"
6208 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n",
6213 lro->ifp = ha->ifp;
6219 qlnx_free_mem_rxq(ha, rxq);
6224 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6229 cdev = &ha->cdev;
6244 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6251 cdev = &ha->cdev;
6274 qlnx_free_mem_txq(ha, fp, txq);
6279 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6282 if_t ifp = ha->ifp;
6304 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6308 qlnx_free_mem_sb(ha, fp->sb_info);
6310 qlnx_free_mem_rxq(ha, fp->rxq);
6312 for (tc = 0; tc < ha->num_tc; tc++)
6313 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6319 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6322 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6329 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n",
6330 ha->dev_unit, fp->rss_id);
6337 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp)
6341 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6345 if (ha->rx_jumbo_buf_eq_mtu) {
6346 if (ha->max_frame_size <= MCLBYTES)
6347 ha->rx_buf_size = MCLBYTES;
6348 else if (ha->max_frame_size <= MJUMPAGESIZE)
6349 ha->rx_buf_size = MJUMPAGESIZE;
6350 else if (ha->max_frame_size <= MJUM9BYTES)
6351 ha->rx_buf_size = MJUM9BYTES;
6352 else if (ha->max_frame_size <= MJUM16BYTES)
6353 ha->rx_buf_size = MJUM16BYTES;
6355 if (ha->max_frame_size <= MCLBYTES)
6356 ha->rx_buf_size = MCLBYTES;
6358 ha->rx_buf_size = MJUMPAGESIZE;
6361 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6365 for (tc = 0; tc < ha->num_tc; tc++) {
6366 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6374 qlnx_free_mem_fp(ha, fp);
6379 qlnx_free_mem_load(qlnx_host_t *ha)
6383 for (i = 0; i < ha->num_rss; i++) {
6384 struct qlnx_fastpath *fp = &ha->fp_array[i];
6386 qlnx_free_mem_fp(ha, fp);
6392 qlnx_alloc_mem_load(qlnx_host_t *ha)
6396 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6397 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6399 rc = qlnx_alloc_mem_fp(ha, fp);
6417 qlnx_host_t *ha __unused;
6419 ha = (qlnx_host_t *)cdev;
6434 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id);
6445 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6452 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6466 qlnx_host_t *ha = (qlnx_host_t *)cdev;
6505 ha->num_rss;
6507 fp = &ha->fp_array[fp_index];
6512 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n",
6527 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6532 QL_DPRINT1(ha, "Failed to update VPORT\n");
6536 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6624 qlnx_start_queues(qlnx_host_t *ha)
6629 struct ecore_dev *cdev = &ha->cdev;
6630 struct ecore_rss_params *rss_params = &ha->rss_params;
6638 ifp = ha->ifp;
6640 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6642 if (!ha->num_rss) {
6643 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6656 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6660 QL_DPRINT2(ha, "Start vport ramrod passed, "
6668 fp = &ha->fp_array[i];
6695 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6706 for (tc = 0; tc < ha->num_tc; tc++) {
6729 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n",
6752 if (ha->num_rss > 1) {
6763 fp = &ha->fp_array[(i % ha->num_rss)];
6808 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6816 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp,
6822 QL_DPRINT2(ha, "enter\n");
6830 (void)qlnx_tx_int(ha, fp, txq);
6839 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6845 qlnx_stop_queues(qlnx_host_t *ha)
6852 cdev = &ha->cdev;
6863 vport_update_params.rss_params = &ha->rss_params;
6869 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id);
6873 QL_DPRINT1(ha, "Failed to update vport\n");
6879 fp = &ha->fp_array[i];
6881 for (tc = 0; tc < ha->num_tc; tc++) {
6884 rc = qlnx_drain_txq(ha, fp, txq);
6891 for (i = ha->num_rss - 1; i >= 0; i--) {
6894 fp = &ha->fp_array[i];
6897 for (tc = 0; tc < ha->num_tc; tc++) {
6900 tx_queue_id = tc * ha->num_rss + i;
6905 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n",
6915 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6927 QL_DPRINT1(ha, "Failed to stop VPORT\n");
6936 qlnx_set_ucast_rx_mac(qlnx_host_t *ha,
6944 cdev = &ha->cdev;
6960 qlnx_remove_all_ucast_mac(qlnx_host_t *ha)
6972 cdev = &ha->cdev;
6980 qlnx_remove_all_mcast_mac(qlnx_host_t *ha)
6986 cdev = &ha->cdev;
6988 mcast = &ha->ecore_mcast;
6994 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6995 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6996 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6997 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
7001 mcast = &ha->ecore_mcast;
7005 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7006 ha->nmcast = 0;
7012 qlnx_clean_filters(qlnx_host_t *ha)
7017 rc = qlnx_remove_all_ucast_mac(ha);
7022 rc = qlnx_remove_all_mcast_mac(ha);
7026 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7032 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter)
7038 cdev = &ha->cdev;
7056 qlnx_set_rx_mode(qlnx_host_t *ha)
7060 const if_t ifp = ha->ifp;
7069 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, LLADDR(sdl));
7071 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7076 rc = qlnx_remove_all_mcast_mac(ha);
7084 if (qlnx_vf_device(ha) == 0 || (if_getflags(ha->ifp) & IFF_PROMISC)) {
7087 } else if (if_getflags(ha->ifp) & IFF_ALLMULTI) {
7090 ha->filter = filter;
7092 rc = qlnx_set_rx_accept_filter(ha, filter);
7098 qlnx_set_link(qlnx_host_t *ha, bool link_up)
7105 if (qlnx_vf_device(ha) == 0)
7108 cdev = &ha->cdev;
7130 qlnx_host_t *ha;
7133 ha = (qlnx_host_t *)if_getsoftc(ifp);
7137 count = ha->hw_stats.common.rx_ucast_pkts +
7138 ha->hw_stats.common.rx_mcast_pkts +
7139 ha->hw_stats.common.rx_bcast_pkts;
7143 count = ha->hw_stats.common.rx_crc_errors +
7144 ha->hw_stats.common.rx_align_errors +
7145 ha->hw_stats.common.rx_oversize_packets +
7146 ha->hw_stats.common.rx_undersize_packets;
7150 count = ha->hw_stats.common.tx_ucast_pkts +
7151 ha->hw_stats.common.tx_mcast_pkts +
7152 ha->hw_stats.common.tx_bcast_pkts;
7156 count = ha->hw_stats.common.tx_err_drop_pkts;
7163 count = ha->hw_stats.common.rx_ucast_bytes +
7164 ha->hw_stats.common.rx_mcast_bytes +
7165 ha->hw_stats.common.rx_bcast_bytes;
7169 count = ha->hw_stats.common.tx_ucast_bytes +
7170 ha->hw_stats.common.tx_mcast_bytes +
7171 ha->hw_stats.common.tx_bcast_bytes;
7175 count = ha->hw_stats.common.rx_mcast_bytes;
7179 count = ha->hw_stats.common.tx_mcast_bytes;
7195 qlnx_host_t *ha;
7197 ha = (qlnx_host_t *)arg;
7199 if (ha->error_recovery) {
7200 ha->error_recovery = 0;
7201 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7205 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7207 if (ha->storm_stats_gather)
7208 qlnx_sample_storm_stats(ha);
7210 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7216 qlnx_load(qlnx_host_t *ha)
7222 dev = ha->pci_dev;
7224 QL_DPRINT2(ha, "enter\n");
7226 rc = qlnx_alloc_mem_arrays(ha);
7230 qlnx_init_fp(ha);
7232 rc = qlnx_alloc_mem_load(ha);
7236 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n",
7237 ha->num_rss, ha->num_tc);
7239 for (i = 0; i < ha->num_rss; i++) {
7240 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7242 NULL, qlnx_fp_isr, &ha->irq_vec[i],
7243 &ha->irq_vec[i].handle))) {
7244 QL_DPRINT1(ha, "could not setup interrupt\n");
7248 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \
7250 ha->irq_vec[i].irq_rid,
7251 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7253 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7256 rc = qlnx_start_queues(ha);
7260 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7263 rc = qlnx_set_rx_mode(ha);
7268 qlnx_set_link(ha, true);
7270 if (qlnx_vf_device(ha) == 0)
7271 qlnx_link_update(&ha->cdev.hwfns[0]);
7273 ha->state = QLNX_STATE_OPEN;
7275 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7277 if (ha->flags.callout_init)
7278 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7283 qlnx_free_mem_load(ha);
7286 ha->num_rss = 0;
7289 QL_DPRINT2(ha, "exit [%d]\n", rc);
7294 qlnx_drain_soft_lro(qlnx_host_t *ha)
7301 ifp = ha->ifp;
7304 for (i = 0; i < ha->num_rss; i++) {
7305 struct qlnx_fastpath *fp = &ha->fp_array[i];
7320 qlnx_unload(qlnx_host_t *ha)
7326 cdev = &ha->cdev;
7327 dev = ha->pci_dev;
7329 QL_DPRINT2(ha, "enter\n");
7330 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7332 if (ha->state == QLNX_STATE_OPEN) {
7333 qlnx_set_link(ha, false);
7334 qlnx_clean_filters(ha);
7335 qlnx_stop_queues(ha);
7338 for (i = 0; i < ha->num_rss; i++) {
7339 if (ha->irq_vec[i].handle) {
7341 ha->irq_vec[i].irq,
7342 ha->irq_vec[i].handle);
7343 ha->irq_vec[i].handle = NULL;
7347 qlnx_drain_fp_taskqueues(ha);
7348 qlnx_drain_soft_lro(ha);
7349 qlnx_free_mem_load(ha);
7352 if (ha->flags.callout_init)
7353 callout_drain(&ha->qlnx_callout);
7357 ha->state = QLNX_STATE_CLOSED;
7359 QL_DPRINT2(ha, "exit\n");
7364 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7372 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7376 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7385 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed"
7395 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index)
7403 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7407 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
7416 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed"
7426 qlnx_sample_storm_stats(qlnx_host_t *ha)
7435 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7436 ha->storm_stats_gather = 0;
7440 cdev = &ha->cdev;
7449 index = ha->storm_stats_index +
7452 s_stats = &ha->storm_stats[index];
7559 ha->storm_stats_index++;
7569 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len)
7575 dev = ha->pci_dev;
7793 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7796 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n",
7797 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7802 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7803 atomic_testandset_32(&ha->sriov_task[i].flags,
7806 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7807 &ha->sriov_task[i].pf_task);
7822 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7825 if (!ha->sriov_initialized)
7828 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7829 ha, p_hwfn->p_dev, p_hwfn);
7834 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7835 atomic_testandset_32(&ha->sriov_task[i].flags,
7838 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7839 &ha->sriov_task[i].pf_task);
7858 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7861 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n",
7862 ha, p_hwfn->p_dev, p_hwfn);
7867 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n",
7868 ha, p_hwfn->p_dev, p_hwfn, i);
7870 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7871 atomic_testandset_32(&ha->sriov_task[i].flags,
7874 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7875 &ha->sriov_task[i].pf_task);
7880 qlnx_initialize_sriov(qlnx_host_t *ha)
7886 dev = ha->pci_dev;
7902 ha->sriov_initialized = 0;
7905 ha->sriov_initialized = 1;
7912 qlnx_sriov_disable(qlnx_host_t *ha)
7917 cdev = &ha->cdev;
7926 QL_DPRINT1(ha, "Failed to acquire ptt\n");
7953 QL_DPRINT1(ha,
7994 qlnx_host_t *ha;
8000 if ((ha = device_get_softc(dev)) == NULL) {
8005 if (qlnx_create_pf_taskqueues(ha) != 0)
8008 cdev = &ha->cdev;
8012 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n",
8016 QL_DPRINT1(ha, "Can start at most %d VFs\n",
8021 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8024 if (ha->vf_attr == NULL)
8040 QL_DPRINT1(ha, "Failed to acquire ptt\n");
8053 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i);
8062 ha->num_vfs = num_vfs;
8063 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8065 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs);
8070 qlnx_sriov_disable(ha);
8073 qlnx_destroy_pf_taskqueues(ha);
8074 ha->num_vfs = 0;
8082 qlnx_host_t *ha;
8084 if ((ha = device_get_softc(dev)) == NULL) {
8089 QL_DPRINT2(ha," dev = %p enter\n", dev);
8091 qlnx_sriov_disable(ha);
8092 qlnx_destroy_pf_taskqueues(ha);
8094 free(ha->vf_attr, M_QLNXBUF);
8095 ha->vf_attr = NULL;
8097 ha->num_vfs = 0;
8099 QL_DPRINT2(ha," dev = %p exit\n", dev);
8106 qlnx_host_t *ha;
8112 if ((ha = device_get_softc(dev)) == NULL) {
8117 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum);
8119 if (vfnum > (ha->num_vfs - 1)) {
8120 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n",
8121 vfnum, (ha->num_vfs - 1));
8124 vf_attr = &ha->vf_attr[vfnum];
8135 p_hwfn = &ha->cdev.hwfns[0];
8140 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum);
8145 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8153 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8160 QL_DPRINT2(ha, "Event mask of VF events:"
8169 QL_DPRINT2(ha,
8186 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8194 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8202 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8211 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn)
8219 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8225 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n",
8239 qlnx_host_t *ha;
8247 ha = (qlnx_host_t *)(p_hwfn->p_dev);
8252 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8254 qlnx_handle_vf_msg(ha, p_hwfn);
8256 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8258 qlnx_handle_vf_flr_update(ha, p_hwfn);
8260 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8262 qlnx_handle_bulletin_update(ha, p_hwfn);
8268 qlnx_create_pf_taskqueues(qlnx_host_t *ha)
8273 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8274 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8279 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8281 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8283 &ha->sriov_task[i].pf_taskqueue);
8285 if (ha->sriov_task[i].pf_taskqueue == NULL)
8288 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8291 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8298 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha)
8302 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8303 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8304 taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8305 &ha->sriov_task[i].pf_task);
8306 taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8307 ha->sriov_task[i].pf_taskqueue = NULL;
8314 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha)
8332 QL_DPRINT2(ha, "called\n");
8340 if (ha->link_up) {
8349 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up);