Lines Matching +full:num +full:- +full:rxq
2 * Copyright (c) 2017-2018 Cavium, Inc.
133 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq);
134 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq);
136 struct qlnx_rx_queue *rxq);
278 &qlnxe_queue_count, 0, "Multi-Queue queue count");
309 device_id = ha->device_id;
314 return -1;
322 device_id = ha->device_id;
336 return -1;
345 device_id = pci_get_device(ha->pci_dev);
353 return (-1);
377 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
384 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
391 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
398 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
406 " Adapter-Ethernet Function",
415 "Adapter-Ethernet Function",
440 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
442 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl);
444 return (hw_bd_cons - ecore_cons_idx);
461 ha = (qlnx_host_t *)p_hwfn->p_dev;
465 for (i = 0; i < ha->cdev.num_hwfns; i++) {
466 if (&ha->cdev.hwfns[i] == p_hwfn) {
467 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]);
495 for (i = 0; i < ha->cdev.num_hwfns; i++) {
496 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
501 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn);
503 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT,
504 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]);
506 if (ha->sp_taskqueue[i] == NULL)
507 return (-1);
509 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s",
512 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]);
523 for (i = 0; i < ha->cdev.num_hwfns; i++) {
524 if (ha->sp_taskqueue[i] != NULL) {
525 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]);
526 taskqueue_free(ha->sp_taskqueue[i]);
544 ha = (qlnx_host_t *)fp->edev;
546 ifp = ha->ifp;
549 if (!drbr_empty(ifp, fp->tx_br)) {
550 if(mtx_trylock(&fp->tx_mtx)) {
552 tx_pkts = fp->tx_pkts_transmitted;
553 tx_compl = fp->tx_pkts_completed;
559 fp->tx_pkts_trans_fp +=
560 (fp->tx_pkts_transmitted - tx_pkts);
561 fp->tx_pkts_compl_fp +=
562 (fp->tx_pkts_completed - tx_compl);
564 mtx_unlock(&fp->tx_mtx);
580 for (i = 0; i < ha->num_rss; i++) {
581 fp = &ha->fp_array[i];
586 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp);
588 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
590 &fp->fp_taskqueue);
592 if (fp->fp_taskqueue == NULL)
593 return (-1);
595 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s",
598 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue);
610 for (i = 0; i < ha->num_rss; i++) {
611 fp = &ha->fp_array[i];
613 if (fp->fp_taskqueue != NULL) {
614 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
615 taskqueue_free(fp->fp_taskqueue);
616 fp->fp_taskqueue = NULL;
628 for (i = 0; i < ha->num_rss; i++) {
629 fp = &ha->fp_array[i];
631 if (fp->fp_taskqueue != NULL) {
633 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task);
644 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n",
677 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
692 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha);
694 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
695 taskqueue_thread_enqueue, &ha->err_taskqueue);
697 if (ha->err_taskqueue == NULL)
698 return (-1);
700 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name);
702 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue);
710 if (ha->err_taskqueue != NULL) {
711 taskqueue_drain(ha->err_taskqueue, &ha->err_task);
712 taskqueue_free(ha->err_taskqueue);
715 ha->err_taskqueue = NULL;
743 ha->device_id = pci_get_device(dev);
749 ha->pci_func = pci_get_function(dev);
751 ha->pci_dev = dev;
753 sx_init(&ha->hw_lock, "qlnx_hw_lock");
755 ha->flags.lock_init = 1;
763 ha->reg_rid = PCIR_BAR(0);
764 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid,
767 if (ha->pci_reg == NULL) {
773 ha->reg_rid);
775 ha->dbells_rid = PCIR_BAR(2);
778 ha->dbells_rid);
780 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
781 &ha->dbells_rid, RF_ACTIVE);
783 if (ha->pci_dbells == NULL) {
787 ha->dbells_phys_addr = (uint64_t)
788 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);
790 ha->dbells_size = rsrc_len_dbells;
798 ha->msix_rid = PCIR_BAR(4);
799 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
800 &ha->msix_rid, RF_ACTIVE);
802 if (ha->msix_bar == NULL) {
808 ha->msix_rid);
810 ha->dbg_level = 0x0000;
818 ha->pci_dev, ha->pci_reg, rsrc_len_reg,
819 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
820 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
839 ha->flags.hw_init = 1;
849 * Allocate MSI-x vectors
853 ha->num_rss = QLNX_DEFAULT_RSS;
855 ha->num_rss = qlnxe_queue_count;
857 num_sp_msix = ha->cdev.num_hwfns;
862 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq);
863 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq);
866 ha->num_rss = max_rxq;
868 ha->num_rss = max_txq;
870 if (ha->num_rss > QLNX_MAX_VF_RSS)
871 ha->num_rss = QLNX_MAX_VF_RSS;
876 if (ha->num_rss > mp_ncpus)
877 ha->num_rss = mp_ncpus;
879 ha->num_tc = QLNX_MAX_TC;
881 ha->msix_count = pci_msix_count(dev);
889 if (!ha->msix_count ||
890 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) {
892 ha->msix_count);
896 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs))
897 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs;
899 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs);
907 ha->pci_reg, rsrc_len_reg,
908 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid,
909 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev),
910 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc);
912 if (pci_alloc_msix(dev, &ha->msix_count)) {
914 ha->msix_count);
915 ha->msix_count = 0;
927 for (i = 0; i < ha->cdev.num_hwfns; i++) {
928 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
930 ha->sp_irq_rid[i] = i + 1;
931 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ,
932 &ha->sp_irq_rid[i],
934 if (ha->sp_irq[i] == NULL) {
940 if (bus_setup_intr(dev, ha->sp_irq[i],
942 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) {
950 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]);
960 for (i = 0; i < ha->num_rss; i++) {
961 ha->irq_vec[i].rss_idx = i;
962 ha->irq_vec[i].ha = ha;
963 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i;
965 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
966 &ha->irq_vec[i].irq_rid,
969 if (ha->irq_vec[i].irq == NULL) {
972 i, ha->irq_vec[i].irq_rid);
976 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) {
983 callout_init(&ha->qlnx_callout, 1);
984 ha->flags.callout_init = 1;
986 for (i = 0; i < ha->cdev.num_hwfns; i++) {
987 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0)
989 if (ha->grcdump_size[i] == 0)
992 ha->grcdump_size[i] = ha->grcdump_size[i] << 2;
994 i, ha->grcdump_size[i]);
996 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]);
997 if (ha->grcdump[i] == NULL) {
1002 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0)
1004 if (ha->idle_chk_size[i] == 0)
1007 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2;
1009 i, ha->idle_chk_size[i]);
1011 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]);
1013 if (ha->idle_chk[i] == NULL) {
1026 ha->flags.slowpath_start = 1;
1029 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) {
1043 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
1047 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d",
1050 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d",
1055 ha->stormfw_ver, ha->mfw_ver);
1158 personality = qlnx_get_personality(ha->pci_func);
1162 device_printf(ha->pci_dev, "%s: DEFAULT\n",
1164 ha->personality = ECORE_PCI_DEFAULT;
1168 device_printf(ha->pci_dev, "%s: ETH_ONLY\n",
1170 ha->personality = ECORE_PCI_ETH;
1174 device_printf(ha->pci_dev, "%s: ETH_IWARP\n",
1176 ha->personality = ECORE_PCI_ETH_IWARP;
1180 device_printf(ha->pci_dev, "%s: ETH_ROCE\n",
1182 ha->personality = ECORE_PCI_ETH_ROCE;
1197 ha->cdev.ha = ha;
1198 ecore_init_struct(&ha->cdev);
1200 /* ha->dp_module = ECORE_MSG_PROBE |
1206 ha->dp_level = ECORE_LEVEL_VERBOSE;*/
1207 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2;
1208 ha->dp_level = ECORE_LEVEL_NOTICE;
1209 //ha->dp_level = ECORE_LEVEL_VERBOSE;
1211 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev);
1213 ha->cdev.regview = ha->pci_reg;
1215 ha->personality = ECORE_PCI_DEFAULT;
1218 ha->cdev.b_is_vf = true;
1220 if (ha->pci_dbells != NULL) {
1221 ha->cdev.doorbells = ha->pci_dbells;
1222 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1223 ha->cdev.db_size = ha->dbells_size;
1225 ha->pci_dbells = ha->pci_reg;
1228 ha->cdev.doorbells = ha->pci_dbells;
1229 ha->cdev.db_phys_addr = ha->dbells_phys_addr;
1230 ha->cdev.db_size = ha->dbells_size;
1240 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet"));
1244 params.personality = ha->personality;
1251 ecore_hw_prepare(&ha->cdev, ¶ms);
1253 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str);
1256 ha, &ha->cdev, &ha->cdev.hwfns[0]);
1267 dev = ha->pci_dev;
1272 if (ha->idle_chk[i] != NULL) {
1273 free(ha->idle_chk[i], M_QLNXBUF);
1274 ha->idle_chk[i] = NULL;
1277 if (ha->grcdump[i] != NULL) {
1278 free(ha->grcdump[i], M_QLNXBUF);
1279 ha->grcdump[i] = NULL;
1283 if (ha->flags.callout_init)
1284 callout_drain(&ha->qlnx_callout);
1286 if (ha->flags.slowpath_start) {
1290 if (ha->flags.hw_init)
1291 ecore_hw_remove(&ha->cdev);
1295 if (ha->ifp != NULL)
1296 ether_ifdetach(ha->ifp);
1308 for (i = 0; i < ha->num_rss; i++) {
1309 struct qlnx_fastpath *fp = &ha->fp_array[i];
1311 if (ha->irq_vec[i].handle) {
1312 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq,
1313 ha->irq_vec[i].handle);
1316 if (ha->irq_vec[i].irq) {
1318 ha->irq_vec[i].irq_rid,
1319 ha->irq_vec[i].irq);
1326 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1327 if (ha->sp_handle[i])
1328 (void)bus_teardown_intr(dev, ha->sp_irq[i],
1329 ha->sp_handle[i]);
1331 if (ha->sp_irq[i])
1333 ha->sp_irq_rid[i], ha->sp_irq[i]);
1338 if (ha->msix_count)
1341 if (ha->flags.lock_init) {
1342 sx_destroy(&ha->hw_lock);
1345 if (ha->pci_reg)
1346 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid,
1347 ha->pci_reg);
1349 if (ha->dbells_size && ha->pci_dbells)
1350 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid,
1351 ha->pci_dbells);
1353 if (ha->msix_bar)
1354 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid,
1355 ha->msix_bar);
1366 if (ha->ifp != NULL)
1367 if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING));
1374 ha->error_recovery = 1;
1376 for (i = 0; i < ha->cdev.num_hwfns; i++) {
1377 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i);
1378 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i);
1394 if (err || !req->newptr)
1414 if (err || !req->newptr || !usecs || (usecs > 255))
1420 return (-1);
1422 for (i = 0; i < ha->num_rss; i++) {
1423 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1425 fp = &ha->fp_array[i];
1427 if (fp->txq[0]->handle != NULL) {
1429 (uint16_t)usecs, fp->txq[0]->handle);
1434 ha->tx_coalesce_usecs = (uint8_t)usecs;
1449 if (err || !req->newptr || !usecs || (usecs > 255))
1455 return (-1);
1457 for (i = 0; i < ha->num_rss; i++) {
1458 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)];
1460 fp = &ha->fp_array[i];
1462 if (fp->rxq->handle != NULL) {
1464 0, fp->rxq->handle);
1469 ha->rx_coalesce_usecs = (uint8_t)usecs;
1481 ctx = device_get_sysctl_ctx(ha->pci_dev);
1482 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1490 CTLFLAG_RD, &ha->sp_interrupts,
1506 ctx = device_get_sysctl_ctx(ha->pci_dev);
1507 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1513 for (i = 0; i < ha->num_rss; i++) {
1525 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed,
1530 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed,
1535 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted,
1540 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed,
1545 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts,
1552 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx,
1557 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx,
1562 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp,
1567 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp,
1572 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr,
1578 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts,
1583 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len,
1588 CTLFLAG_RD, &ha->fp_array[i].tx_defrag,
1593 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left,
1598 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs,
1599 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs");
1603 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs,
1604 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs");
1608 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len,
1609 ha->fp_array[i].tx_tso_max_pkt_len,
1614 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len,
1615 ha->fp_array[i].tx_tso_min_pkt_len,
1625 &ha->fp_array[i].tx_pkts[j], name_str);
1636 &ha->fp_array[i].tx_pkts_hist[j], name_str);
1645 &ha->fp_array[i].tx_comInt[j], name_str);
1654 &ha->fp_array[i].tx_pkts_q[j], name_str);
1660 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left,
1665 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create,
1670 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load,
1675 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg,
1680 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load,
1685 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag,
1690 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null,
1695 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict,
1700 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64,
1705 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128,
1710 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256,
1715 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512,
1720 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024,
1727 CTLFLAG_RD, &ha->fp_array[i].rx_pkts,
1732 CTLFLAG_RD, &ha->fp_array[i].tpa_start,
1737 CTLFLAG_RD, &ha->fp_array[i].tpa_cont,
1742 CTLFLAG_RD, &ha->fp_array[i].tpa_end,
1747 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl,
1752 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl,
1757 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors,
1762 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors,
1776 ctx = device_get_sysctl_ctx(ha->pci_dev);
1777 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev));
1785 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards,
1790 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard,
1795 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard,
1800 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes,
1805 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes,
1810 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes,
1815 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts,
1820 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts,
1825 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts,
1830 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards,
1835 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards,
1840 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes,
1845 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes,
1850 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes,
1855 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts,
1860 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts,
1865 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts,
1870 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts,
1875 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts,
1880 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events,
1885 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num,
1890 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts,
1895 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes,
1900 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets,
1905 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets,
1910 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets,
1915 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets,
1920 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets,
1925 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets,
1930 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets,
1935 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets,
1940 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets,
1945 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets,
1950 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets,
1955 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors,
1960 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames,
1965 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames,
1970 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames,
1975 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors,
1980 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors,
1985 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets,
1990 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers,
1995 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets,
2000 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments,
2005 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets,
2010 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets,
2015 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets,
2020 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets,
2025 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets,
2030 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets,
2035 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets,
2040 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets,
2045 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets,
2050 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets,
2055 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames,
2060 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames,
2065 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count,
2070 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions,
2075 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates,
2080 CTLFLAG_RD, &ha->hw_stats.common.brb_discards,
2085 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes,
2090 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets,
2095 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets,
2100 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets,
2105 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok,
2110 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes,
2115 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets,
2120 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets,
2125 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets,
2130 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames,
2138 device_t dev = ha->pci_dev;
2156 CTLFLAG_RD, ha->stormfw_ver, 0,
2160 CTLFLAG_RD, ha->mfw_ver, 0,
2165 &ha->personality, ha->personality,
2171 ha->dbg_level = 0;
2174 &ha->dbg_level, ha->dbg_level, "Debug Level");
2176 ha->dp_level = 0x01;
2179 &ha->dp_level, ha->dp_level, "DP Level");
2181 ha->dbg_trace_lro_cnt = 0;
2184 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt,
2187 ha->dbg_trace_tso_pkt_len = 0;
2190 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len,
2193 ha->dp_module = 0;
2196 &ha->dp_module, ha->dp_module, "DP Module");
2198 ha->err_inject = 0;
2202 &ha->err_inject, ha->err_inject, "Error Inject");
2204 ha->storm_stats_enable = 0;
2208 &ha->storm_stats_enable, ha->storm_stats_enable,
2211 ha->storm_stats_index = 0;
2215 &ha->storm_stats_index, ha->storm_stats_index,
2218 ha->grcdump_taken = 0;
2221 &ha->grcdump_taken, ha->grcdump_taken,
2224 ha->idle_chk_taken = 0;
2227 &ha->idle_chk_taken, ha->idle_chk_taken,
2232 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs,
2237 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs,
2257 ha->rx_pkt_threshold = 128;
2260 &ha->rx_pkt_threshold, ha->rx_pkt_threshold,
2263 ha->rx_jumbo_buf_eq_mtu = 0;
2266 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu,
2272 &ha->err_illegal_intr, "err_illegal_intr");
2276 &ha->err_fp_null, "err_fp_null");
2280 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type");
2294 ifp = ha->ifp = if_alloc(IFT_ETHER);
2297 device_id = pci_get_device(ha->pci_dev);
2323 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN;
2325 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN);
2327 if (!ha->primary_mac[0] && !ha->primary_mac[1] &&
2328 !ha->primary_mac[2] && !ha->primary_mac[3] &&
2329 !ha->primary_mac[4] && !ha->primary_mac[5]) {
2334 ha->primary_mac[0] = 0x00;
2335 ha->primary_mac[1] = 0x0e;
2336 ha->primary_mac[2] = 0x1e;
2337 ha->primary_mac[3] = rnd & 0xFF;
2338 ha->primary_mac[4] = (rnd >> 8) & 0xFF;
2339 ha->primary_mac[5] = (rnd >> 16) & 0xFF;
2342 ether_ifattach(ifp, ha->primary_mac);
2343 bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
2357 if_sethwtsomax(ifp, QLNX_MAX_TSO_FRAME_SIZE -
2359 if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */
2371 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\
2375 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL);
2376 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL);
2377 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
2380 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
2381 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
2383 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
2384 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
2386 ifmedia_add(&ha->media,
2388 ifmedia_add(&ha->media,
2390 ifmedia_add(&ha->media,
2394 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
2395 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL);
2397 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
2407 if_t ifp = ha->ifp;
2452 cdev = &ha->cdev;
2454 mcast = &ha->ecore_mcast;
2458 mcast->opcode = ECORE_FILTER_ADD;
2460 mcast->opcode = ECORE_FILTER_REMOVE;
2462 mcast->num_mc_addrs = 1;
2463 memcpy(mcast->mac, mac_addr, ETH_ALEN);
2476 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0)
2481 if ((ha->mcast[i].addr[0] == 0) &&
2482 (ha->mcast[i].addr[1] == 0) &&
2483 (ha->mcast[i].addr[2] == 0) &&
2484 (ha->mcast[i].addr[3] == 0) &&
2485 (ha->mcast[i].addr[4] == 0) &&
2486 (ha->mcast[i].addr[5] == 0)) {
2488 return (-1);
2490 bcopy(mta, ha->mcast[i].addr, ETH_ALEN);
2491 ha->nmcast++;
2505 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) {
2507 return (-1);
2509 ha->mcast[i].addr[0] = 0;
2510 ha->mcast[i].addr[1] = 0;
2511 ha->mcast[i].addr[2] = 0;
2512 ha->mcast[i].addr[3] = 0;
2513 ha->mcast[i].addr[4] = 0;
2514 ha->mcast[i].addr[5] = 0;
2516 ha->nmcast--;
2566 if_t ifp = ha->ifp;
2590 filter = ha->filter;
2612 filter = ha->filter;
2641 if (ifa->ifa_addr->sa_family == AF_INET) {
2649 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr));
2661 if (ifr->ifr_mtu > QLNX_MAX_MTU) {
2665 if_setmtu(ifp, ifr->ifr_mtu);
2666 ha->max_frame_size =
2685 if ((flags ^ ha->if_flags) &
2688 } else if ((if_getflags(ifp) ^ ha->if_flags) &
2693 ha->max_frame_size = if_getmtu(ifp) +
2702 ha->if_flags = if_getflags(ifp);
2728 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
2733 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
2763 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0];
2781 ret = -1;
2786 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset,
2792 ret = -1;
2828 ifm = &ha->media;
2830 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2847 ifmr->ifm_status = IFM_AVALID;
2848 ifmr->ifm_active = IFM_ETHER;
2850 if (ha->link_up) {
2851 ifmr->ifm_status |= IFM_ACTIVE;
2852 ifmr->ifm_active |=
2853 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link));
2855 if (ha->if_link.link_partner_caps &
2857 ifmr->ifm_active |=
2861 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down"));
2878 idx = txq->sw_tx_cons;
2879 mp = txq->sw_tx_ring[idx].mp;
2880 map = txq->sw_tx_ring[idx].map;
2892 fp->rss_id,
2893 ecore_chain_get_prod_idx(&txq->tx_pbl),
2894 ecore_chain_get_cons_idx(&txq->tx_pbl),
2895 le16toh(*txq->hw_cons_ptr),
2896 txq->tx_db.raw,
2897 ecore_chain_get_elem_left(&txq->tx_pbl));
2899 fp->err_tx_free_pkt_null++;
2906 QLNX_INC_OPACKETS((ha->ifp));
2907 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len));
2909 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE);
2910 bus_dmamap_unload(ha->tx_tag, map);
2912 fp->tx_pkts_freed++;
2913 fp->tx_pkts_completed++;
2918 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl);
2919 nbds = first_bd->data.nbds;
2924 /* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl);
2927 txq->sw_tx_ring[idx].flags = 0;
2928 txq->sw_tx_ring[idx].mp = NULL;
2929 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0;
2943 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
2946 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
2947 diff = hw_bd_cons - ecore_cons_idx;
2960 fp->rss_id,
2961 ecore_chain_get_prod_idx(&txq->tx_pbl),
2962 ecore_chain_get_cons_idx(&txq->tx_pbl),
2963 le16toh(*txq->hw_cons_ptr),
2964 txq->tx_db.raw,
2965 ecore_chain_get_elem_left(&txq->tx_pbl));
2967 fp->err_tx_cons_idx_conflict++;
2973 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2974 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1);
2975 prefetch(txq->sw_tx_ring[idx].mp);
2976 prefetch(txq->sw_tx_ring[idx2].mp);
2980 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1);
2993 txq = fp->txq[0];
2994 ha = (qlnx_host_t *)fp->edev;
2996 if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) {
2998 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3003 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3005 mp = drbr_peek(ifp, fp->tx_br);
3010 drbr_putback(ifp, fp->tx_br, mp);
3012 fp->tx_pkts_processed++;
3013 drbr_advance(ifp, fp->tx_br);
3018 drbr_advance(ifp, fp->tx_br);
3019 fp->tx_pkts_transmitted++;
3020 fp->tx_pkts_processed++;
3023 mp = drbr_peek(ifp, fp->tx_br);
3027 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) ||
3028 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))
3030 (void)qlnx_tx_int(ha, fp, fp->txq[0]);
3050 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) %
3051 ha->num_rss;
3053 fp = &ha->fp_array[rss_id];
3055 if (fp->tx_br == NULL) {
3060 if (mtx_trylock(&fp->tx_mtx)) {
3062 tx_pkts = fp->tx_pkts_transmitted;
3063 tx_compl = fp->tx_pkts_completed;
3069 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts);
3070 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl);
3072 mtx_unlock(&fp->tx_mtx);
3074 if (mp != NULL && (fp->fp_taskqueue != NULL)) {
3075 ret = drbr_enqueue(ifp, fp->tx_br, mp);
3076 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task);
3098 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
3099 fp = &ha->fp_array[rss_id];
3104 if (fp->tx_br) {
3105 mtx_lock(&fp->tx_mtx);
3107 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
3108 fp->tx_pkts_freed++;
3111 mtx_unlock(&fp->tx_mtx);
3124 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells);
3126 bus_write_4(ha->pci_dbells, offset, value);
3127 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ);
3128 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ);
3146 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3148 etype = ntohs(eh->evl_proto);
3151 etype = ntohs(eh->evl_encap_proto);
3156 ip = (struct ip *)(mp->m_data + ehdrlen);
3160 if (mp->m_len < (ehdrlen + ip_hlen)) {
3166 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3170 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
3174 if (mp->m_len < (ehdrlen + ip_hlen)) {
3180 offset = ip_hlen + ehdrlen + (th->th_off << 2);
3206 while ((i < nsegs) && (offset >= segs->ds_len)) {
3207 offset = offset - segs->ds_len;
3213 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr;
3215 nsegs = nsegs - i;
3222 sum += s_seg->ds_len;
3227 fp->tx_lso_wnd_min_len++;
3228 return (-1);
3231 nsegs = nsegs - 1;
3244 int ret = -1;
3265 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id);
3267 if (!ha->link_up)
3268 return (-1);
3275 txq = fp->txq[0];
3277 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) <
3279 fp->tx_nsegs_gt_elem_left++;
3280 fp->err_tx_nsegs_gt_elem_left++;
3285 idx = txq->sw_tx_prod;
3287 map = txq->sw_tx_ring[idx].map;
3288 segs = txq->segs;
3290 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs,
3293 if (ha->dbg_trace_tso_pkt_len) {
3294 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3295 if (!fp->tx_tso_min_pkt_len) {
3296 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3297 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len;
3299 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len)
3300 fp->tx_tso_min_pkt_len =
3301 m_head->m_pkthdr.len;
3302 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len)
3303 fp->tx_tso_max_pkt_len =
3304 m_head->m_pkthdr.len;
3309 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3314 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) ||
3315 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) &&
3319 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len);
3321 fp->tx_defrag++;
3325 fp->err_tx_defrag++;
3326 fp->tx_pkts_freed++;
3336 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head,
3338 fp->err_tx_defrag_dmamap_load++;
3342 ret, m_head->m_pkthdr.len);
3344 fp->tx_pkts_freed++;
3352 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) {
3353 fp->err_tx_non_tso_max_seg++;
3356 "(%d) nsegs too many for non-TSO [%d, %d]\n",
3357 ret, nsegs, m_head->m_pkthdr.len);
3359 fp->tx_pkts_freed++;
3365 if (m_head->m_pkthdr.csum_flags & CSUM_TSO)
3369 fp->err_tx_dmamap_load++;
3372 ret, m_head->m_pkthdr.len);
3373 fp->tx_pkts_freed++;
3381 if (ha->dbg_trace_tso_pkt_len) {
3383 fp->tx_pkts[(nsegs - 1)]++;
3385 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++;
3389 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3390 if(m_head->m_pkthdr.len <= 2048)
3391 fp->tx_pkts_hist[0]++;
3392 else if((m_head->m_pkthdr.len > 2048) &&
3393 (m_head->m_pkthdr.len <= 4096))
3394 fp->tx_pkts_hist[1]++;
3395 else if((m_head->m_pkthdr.len > 4096) &&
3396 (m_head->m_pkthdr.len <= 8192))
3397 fp->tx_pkts_hist[2]++;
3398 else if((m_head->m_pkthdr.len > 8192) &&
3399 (m_head->m_pkthdr.len <= 12288 ))
3400 fp->tx_pkts_hist[3]++;
3401 else if((m_head->m_pkthdr.len > 11288) &&
3402 (m_head->m_pkthdr.len <= 16394))
3403 fp->tx_pkts_hist[4]++;
3404 else if((m_head->m_pkthdr.len > 16384) &&
3405 (m_head->m_pkthdr.len <= 20480))
3406 fp->tx_pkts_hist[5]++;
3407 else if((m_head->m_pkthdr.len > 20480) &&
3408 (m_head->m_pkthdr.len <= 24576))
3409 fp->tx_pkts_hist[6]++;
3410 else if((m_head->m_pkthdr.len > 24576) &&
3411 (m_head->m_pkthdr.len <= 28672))
3412 fp->tx_pkts_hist[7]++;
3413 else if((m_head->m_pkthdr.len > 28762) &&
3414 (m_head->m_pkthdr.len <= 32768))
3415 fp->tx_pkts_hist[8]++;
3416 else if((m_head->m_pkthdr.len > 32768) &&
3417 (m_head->m_pkthdr.len <= 36864))
3418 fp->tx_pkts_hist[9]++;
3419 else if((m_head->m_pkthdr.len > 36864) &&
3420 (m_head->m_pkthdr.len <= 40960))
3421 fp->tx_pkts_hist[10]++;
3422 else if((m_head->m_pkthdr.len > 40960) &&
3423 (m_head->m_pkthdr.len <= 45056))
3424 fp->tx_pkts_hist[11]++;
3425 else if((m_head->m_pkthdr.len > 45056) &&
3426 (m_head->m_pkthdr.len <= 49152))
3427 fp->tx_pkts_hist[12]++;
3428 else if((m_head->m_pkthdr.len > 49512) &&
3429 m_head->m_pkthdr.len <= 53248))
3430 fp->tx_pkts_hist[13]++;
3431 else if((m_head->m_pkthdr.len > 53248) &&
3432 (m_head->m_pkthdr.len <= 57344))
3433 fp->tx_pkts_hist[14]++;
3434 else if((m_head->m_pkthdr.len > 53248) &&
3435 (m_head->m_pkthdr.len <= 57344))
3436 fp->tx_pkts_hist[15]++;
3437 else if((m_head->m_pkthdr.len > 57344) &&
3438 (m_head->m_pkthdr.len <= 61440))
3439 fp->tx_pkts_hist[16]++;
3441 fp->tx_pkts_hist[17]++;
3444 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3445 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl);
3446 bd_used = TX_RING_SIZE - elem_left;
3449 fp->tx_pkts_q[0]++;
3451 fp->tx_pkts_q[1]++;
3453 fp->tx_pkts_q[2]++;
3455 fp->tx_pkts_q[3]++;
3457 fp->tx_pkts_q[4]++;
3459 fp->tx_pkts_q[5]++;
3461 fp->tx_pkts_q[6]++;
3463 fp->tx_pkts_q[7]++;
3465 fp->tx_pkts_q[8]++;
3467 fp->tx_pkts_q[9]++;
3469 fp->tx_pkts_q[10]++;
3471 fp->tx_pkts_q[11]++;
3473 fp->tx_pkts_q[12]++;
3475 fp->tx_pkts_q[13]++;
3477 fp->tx_pkts_q[14]++;
3479 fp->tx_pkts_q[15]++;
3481 fp->tx_pkts_q[16]++;
3487 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) {
3490 nsegs, elem_left, fp->rss_id);
3492 fp->tx_nsegs_gt_elem_left++;
3497 ecore_chain_get_elem_left(&txq->tx_pbl))) {
3500 nsegs, elem_left, fp->rss_id);
3502 fp->err_tx_nsegs_gt_elem_left++;
3503 fp->tx_ring_full = 1;
3504 if (ha->storm_stats_enable)
3505 ha->storm_stats_gather = 1;
3510 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE);
3512 txq->sw_tx_ring[idx].mp = m_head;
3514 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
3518 first_bd->data.bd_flags.bitfields =
3521 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len);
3525 if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
3526 first_bd->data.bd_flags.bitfields |=
3530 if (m_head->m_pkthdr.csum_flags &
3532 first_bd->data.bd_flags.bitfields |=
3536 if (m_head->m_flags & M_VLANTAG) {
3537 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag;
3538 first_bd->data.bd_flags.bitfields |=
3542 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
3543 first_bd->data.bd_flags.bitfields |=
3545 first_bd->data.bd_flags.bitfields |=
3550 if (offset == segs->ds_len) {
3551 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3556 ecore_chain_produce(&txq->tx_pbl);
3562 (segs->ds_addr), (segs->ds_len));
3568 ecore_chain_produce(&txq->tx_pbl);
3570 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3571 third_bd->data.bitfields |=
3577 (segs->ds_addr), (segs->ds_len));
3584 ecore_chain_produce(&txq->tx_pbl);
3587 segs->ds_addr,\
3588 segs->ds_len);
3593 } else if (offset < segs->ds_len) {
3594 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset);
3597 ecore_chain_produce(&txq->tx_pbl);
3600 (segs->ds_addr + offset),\
3601 (segs->ds_len - offset));
3606 ecore_chain_produce(&txq->tx_pbl);
3610 segs->ds_addr,\
3611 segs->ds_len);
3612 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3613 third_bd->data.bitfields |=
3620 ecore_chain_produce(&txq->tx_pbl);
3623 segs->ds_addr,\
3624 segs->ds_len);
3630 offset = offset - segs->ds_len;
3638 ecore_chain_produce(&txq->tx_pbl);
3649 if (offset && (offset < segs->ds_len)) {
3651 segs->ds_addr, offset);
3654 ecore_chain_produce(&txq->tx_pbl);
3667 (segs->ds_addr + offset), \
3668 (segs->ds_len - offset));
3673 offset = offset - segs->ds_len;
3675 segs->ds_addr, segs->ds_len);
3683 ecore_chain_produce(&txq->tx_pbl);
3687 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz;
3688 third_bd->data.bitfields |=
3691 fp->tx_tso_pkts++;
3696 ecore_chain_produce(&txq->tx_pbl);
3698 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\
3699 segs->ds_len);
3703 first_bd->data.bitfields =
3704 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
3706 first_bd->data.bitfields =
3707 htole16(first_bd->data.bitfields);
3708 fp->tx_non_tso_pkts++;
3711 first_bd->data.nbds = nbd;
3713 if (ha->dbg_trace_tso_pkt_len) {
3714 if (fp->tx_tso_max_nsegs < nsegs)
3715 fp->tx_tso_max_nsegs = nsegs;
3717 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs))
3718 fp->tx_tso_min_nsegs = nsegs;
3721 txq->sw_tx_ring[idx].nsegs = nsegs;
3722 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1);
3724 txq->tx_db.data.bd_prod =
3725 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl));
3727 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw);
3729 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id);
3736 if_t ifp = ha->ifp;
3742 * We simply lock and unlock each fp->tx_mtx to
3746 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state);
3748 if (ha->state == QLNX_STATE_OPEN) {
3749 for (i = 0; i < ha->num_rss; i++) {
3750 struct qlnx_fastpath *fp = &ha->fp_array[i];
3752 mtx_lock(&fp->tx_mtx);
3753 mtx_unlock(&fp->tx_mtx);
3755 if (fp->fp_taskqueue != NULL)
3756 taskqueue_enqueue(fp->fp_taskqueue,
3757 &fp->fp_task);
3774 return(TX_RING_SIZE - 1);
3784 p_hwfn = &ha->cdev.hwfns[0];
3787 return (p_hwfn->hw_info.hw_mac_addr);
3792 device_printf(ha->pci_dev, "%s: p_is_forced = %d"
3795 memcpy(ha->primary_mac, mac, ETH_ALEN);
3798 return (ha->primary_mac);
3806 switch (if_link->media_type) {
3809 if (if_link->speed == (100 * 1000))
3811 else if (if_link->speed == (40 * 1000))
3813 else if (if_link->speed == (25 * 1000))
3815 else if (if_link->speed == (10 * 1000))
3817 else if (if_link->speed == (1 * 1000))
3823 if (if_link->speed == (100 * 1000))
3825 else if (if_link->speed == (40 * 1000))
3827 else if (if_link->speed == (25 * 1000))
3829 else if (if_link->speed == (10 * 1000))
3851 struct qlnx_rx_queue *rxq;
3854 rxq = fp->rxq;
3858 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3860 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3861 mp = sw_rx_data->data;
3865 fp->err_rx_mp_null++;
3866 rxq->sw_rx_cons =
3867 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3872 return (-1);
3874 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
3877 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3881 qlnx_reuse_rx_data(rxq);
3882 fp->err_rx_alloc_errors++;
3887 return (-1);
3889 ecore_chain_consume(&rxq->rx_bd_ring);
3891 if (len > rxq->rx_buf_size)
3892 len_in_buffer = rxq->rx_buf_size;
3896 len = len - len_in_buffer;
3898 mp->m_flags &= ~M_PKTHDR;
3899 mp->m_next = NULL;
3900 mp->m_len = len_in_buffer;
3905 mpl->m_next = mp;
3911 mp_head->m_next = mpf;
3919 struct qlnx_rx_queue *rxq,
3923 if_t ifp = ha->ifp;
3933 agg_index = cqe->tpa_agg_index;
3951 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len,
3952 cqe->pars_flags.flags, cqe->vlan_tag,
3953 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset,
3954 cqe->tpa_agg_index, cqe->header_len,
3955 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1],
3956 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3],
3957 cqe->ext_bd_len_list[4]);
3960 fp->err_rx_tpa_invalid_agg_num++;
3964 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
3965 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD);
3966 mp = sw_rx_data->data;
3968 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp);
3971 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id);
3972 fp->err_rx_mp_null++;
3973 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
3978 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) {
3980 " flags = %x, dropping incoming packet\n", fp->rss_id,
3981 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags));
3983 fp->err_rx_hw_errors++;
3985 qlnx_reuse_rx_data(rxq);
3992 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
3995 fp->rss_id);
3997 fp->err_rx_alloc_errors++;
4005 map = sw_rx_data->map;
4006 addr = sw_rx_data->dma_addr;
4008 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
4010 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data;
4011 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr;
4012 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map;
4014 rxq->tpa_info[agg_index].rx_buf.data = mp;
4015 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr;
4016 rxq->tpa_info[agg_index].rx_buf.map = map;
4019 ecore_chain_produce(&rxq->rx_bd_ring);
4021 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr));
4022 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr));
4024 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4027 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
4028 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4030 ecore_chain_consume(&rxq->rx_bd_ring);
4034 if (cqe->ext_bd_len_list[i] == 0)
4037 qlnx_reuse_rx_data(rxq);
4040 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4044 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4047 fp->rss_id);
4052 if (rxq->tpa_info[agg_index].mpf) {
4053 m_freem(rxq->tpa_info[agg_index].mpf);
4054 rxq->tpa_info[agg_index].mpl = NULL;
4056 rxq->tpa_info[agg_index].mpf = mp;
4057 rxq->tpa_info[agg_index].mpl = NULL;
4059 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4060 ecore_chain_consume(&rxq->rx_bd_ring);
4064 if (cqe->ext_bd_len_list[i] == 0)
4067 qlnx_reuse_rx_data(rxq);
4069 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR;
4078 ecore_chain_consume(&rxq->rx_bd_ring);
4079 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4082 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id);
4084 if (cqe->ext_bd_len_list[i] == 0)
4087 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4088 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4091 mpc = sw_rx_data->data;
4094 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4095 fp->err_rx_mp_null++;
4099 rxq->tpa_info[agg_index].agg_state =
4101 ecore_chain_consume(&rxq->rx_bd_ring);
4102 rxq->sw_rx_cons =
4103 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4107 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4110 " buffer\n", fp->rss_id);
4112 qlnx_reuse_rx_data(rxq);
4118 rxq->tpa_info[agg_index].agg_state =
4121 ecore_chain_consume(&rxq->rx_bd_ring);
4122 rxq->sw_rx_cons =
4123 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4128 mpc->m_flags &= ~M_PKTHDR;
4129 mpc->m_next = NULL;
4130 mpc->m_len = cqe->ext_bd_len_list[i];
4135 mpl->m_len = ha->rx_buf_size;
4136 mpl->m_next = mpc;
4140 ecore_chain_consume(&rxq->rx_bd_ring);
4141 rxq->sw_rx_cons =
4142 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4145 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) {
4148 fp->rss_id);
4152 rxq->tpa_info[agg_index].mpf = mp;
4153 rxq->tpa_info[agg_index].mpl = NULL;
4158 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset;
4161 mp->m_len = ha->rx_buf_size;
4162 mp->m_next = mpf;
4163 rxq->tpa_info[agg_index].mpf = mp;
4164 rxq->tpa_info[agg_index].mpl = mpl;
4166 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset;
4167 rxq->tpa_info[agg_index].mpf = mp;
4168 rxq->tpa_info[agg_index].mpl = mp;
4169 mp->m_next = NULL;
4172 mp->m_flags |= M_PKTHDR;
4175 mp->m_pkthdr.rcvif = ifp;
4178 mp->m_pkthdr.csum_flags = 0;
4180 //mp->m_pkthdr.flowid = fp->rss_id;
4181 mp->m_pkthdr.flowid = cqe->rss_hash;
4183 hash_type = cqe->bitfields &
4209 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID |
4212 mp->m_pkthdr.csum_data = 0xFFFF;
4214 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) {
4215 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag);
4216 mp->m_flags |= M_VLANTAG;
4219 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START;
4222 fp->rss_id, rxq->tpa_info[agg_index].agg_state,
4223 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl);
4230 struct qlnx_rx_queue *rxq,
4248 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4249 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4250 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]);
4252 agg_index = cqe->tpa_agg_index;
4255 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4256 fp->err_rx_tpa_invalid_agg_num++;
4261 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4263 if (cqe->len_list[i] == 0)
4266 if (rxq->tpa_info[agg_index].agg_state !=
4268 qlnx_reuse_rx_data(rxq);
4272 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4273 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4276 mpc = sw_rx_data->data;
4279 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4281 fp->err_rx_mp_null++;
4285 rxq->tpa_info[agg_index].agg_state =
4287 ecore_chain_consume(&rxq->rx_bd_ring);
4288 rxq->sw_rx_cons =
4289 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4293 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4296 " buffer\n", fp->rss_id);
4298 qlnx_reuse_rx_data(rxq);
4304 rxq->tpa_info[agg_index].agg_state =
4307 ecore_chain_consume(&rxq->rx_bd_ring);
4308 rxq->sw_rx_cons =
4309 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4314 mpc->m_flags &= ~M_PKTHDR;
4315 mpc->m_next = NULL;
4316 mpc->m_len = cqe->len_list[i];
4321 mpl->m_len = ha->rx_buf_size;
4322 mpl->m_next = mpc;
4326 ecore_chain_consume(&rxq->rx_bd_ring);
4327 rxq->sw_rx_cons =
4328 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4332 fp->rss_id, mpf, mpl);
4335 mp = rxq->tpa_info[agg_index].mpl;
4336 mp->m_len = ha->rx_buf_size;
4337 mp->m_next = mpf;
4338 rxq->tpa_info[agg_index].mpl = mpl;
4346 struct qlnx_rx_queue *rxq,
4355 if_t ifp = ha->ifp;
4369 fp->rss_id, cqe->type, cqe->tpa_agg_index,
4370 cqe->total_packet_len, cqe->num_of_bds,
4371 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta,
4372 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2],
4373 cqe->len_list[3]);
4375 agg_index = cqe->tpa_agg_index;
4378 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id);
4380 fp->err_rx_tpa_invalid_agg_num++;
4385 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id);
4387 if (cqe->len_list[i] == 0)
4390 if (rxq->tpa_info[agg_index].agg_state !=
4392 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id);
4394 qlnx_reuse_rx_data(rxq);
4398 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4399 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4402 mpc = sw_rx_data->data;
4405 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id);
4407 fp->err_rx_mp_null++;
4411 rxq->tpa_info[agg_index].agg_state =
4413 ecore_chain_consume(&rxq->rx_bd_ring);
4414 rxq->sw_rx_cons =
4415 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4419 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4422 " buffer\n", fp->rss_id);
4424 qlnx_reuse_rx_data(rxq);
4430 rxq->tpa_info[agg_index].agg_state =
4433 ecore_chain_consume(&rxq->rx_bd_ring);
4434 rxq->sw_rx_cons =
4435 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4440 mpc->m_flags &= ~M_PKTHDR;
4441 mpc->m_next = NULL;
4442 mpc->m_len = cqe->len_list[i];
4447 mpl->m_len = ha->rx_buf_size;
4448 mpl->m_next = mpc;
4452 ecore_chain_consume(&rxq->rx_bd_ring);
4453 rxq->sw_rx_cons =
4454 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4457 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id);
4460 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id);
4462 mp = rxq->tpa_info[agg_index].mpl;
4463 mp->m_len = ha->rx_buf_size;
4464 mp->m_next = mpf;
4467 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) {
4468 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id);
4470 if (rxq->tpa_info[agg_index].mpf != NULL)
4471 m_freem(rxq->tpa_info[agg_index].mpf);
4472 rxq->tpa_info[agg_index].mpf = NULL;
4473 rxq->tpa_info[agg_index].mpl = NULL;
4474 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4478 mp = rxq->tpa_info[agg_index].mpf;
4479 m_adj(mp, rxq->tpa_info[agg_index].placement_offset);
4480 mp->m_pkthdr.len = cqe->total_packet_len;
4482 if (mp->m_next == NULL)
4483 mp->m_len = mp->m_pkthdr.len;
4488 len += mpf->m_len;
4489 mpf = mpf->m_next;
4492 if (cqe->total_packet_len > len) {
4493 mpl = rxq->tpa_info[agg_index].mpl;
4494 mpl->m_len += (cqe->total_packet_len - len);
4499 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len));
4503 fp->rss_id, mp->m_pkthdr.csum_data,
4504 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len);
4508 rxq->tpa_info[agg_index].mpf = NULL;
4509 rxq->tpa_info[agg_index].mpl = NULL;
4510 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE;
4512 return (cqe->num_of_coalesced_segs);
4521 struct qlnx_rx_queue *rxq = fp->rxq;
4522 if_t ifp = ha->ifp;
4523 struct ecore_dev *cdev = &ha->cdev;
4529 lro = &rxq->lro;
4532 hw_comp_cons = le16toh(*rxq->hw_cons_ptr);
4533 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4535 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)];
4538 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
4556 ecore_chain_consume(&rxq->rx_comp_ring);
4557 cqe_type = cqe->fast_path_regular.type;
4570 qlnx_tpa_start(ha, fp, rxq,
4571 &cqe->fast_path_tpa_start);
4572 fp->tpa_start++;
4576 qlnx_tpa_cont(ha, fp, rxq,
4577 &cqe->fast_path_tpa_cont);
4578 fp->tpa_cont++;
4582 rx_pkt += qlnx_tpa_end(ha, fp, rxq,
4583 &cqe->fast_path_tpa_end);
4584 fp->tpa_end++;
4595 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons];
4596 mp = sw_rx_data->data;
4600 fp->err_rx_mp_null++;
4601 rxq->sw_rx_cons =
4602 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4605 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map,
4609 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */
4610 len = le16toh(fp_cqe->pkt_len);
4611 pad = fp_cqe->placement_offset;
4615 cqe_type, fp_cqe->bitfields,
4616 le16toh(fp_cqe->vlan_tag),
4617 len, le16toh(fp_cqe->pars_flags.flags), pad);
4632 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) &
4636 le16toh(cqe->fast_path_regular.pars_flags.flags));
4637 fp->err_rx_hw_errors++;
4639 qlnx_reuse_rx_data(rxq);
4646 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) {
4649 qlnx_reuse_rx_data(rxq);
4651 fp->err_rx_alloc_errors++;
4658 ecore_chain_consume(&rxq->rx_bd_ring);
4660 len_on_first_bd = fp_cqe->len_on_first_bd;
4662 mp->m_pkthdr.len = len;
4665 mp->m_len = len_on_first_bd;
4668 (len - len_on_first_bd)) != 0) {
4677 fp->err_rx_jumbo_chain_pkts++;
4679 mp->m_len = len;
4682 mp->m_flags |= M_PKTHDR;
4685 mp->m_pkthdr.rcvif = ifp;
4688 mp->m_pkthdr.csum_flags = 0;
4690 mp->m_pkthdr.flowid = fp_cqe->rss_hash;
4692 hash_type = fp_cqe->bitfields &
4718 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) {
4719 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
4722 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) {
4723 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
4726 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) {
4727 mp->m_pkthdr.csum_data = 0xFFFF;
4728 mp->m_pkthdr.csum_flags |=
4732 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) {
4733 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag);
4734 mp->m_flags |= M_VLANTAG;
4753 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
4756 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
4757 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
4759 /* CR TPA - revisit how to handle budget in TPA perhaps
4766 qlnx_update_rx_prod(p_hwfn, rxq);
4783 ha = ivec->ha;
4785 if (ha->state != QLNX_STATE_OPEN) {
4789 idx = ivec->rss_idx;
4791 if ((idx = ivec->rss_idx) >= ha->num_rss) {
4793 ha->err_illegal_intr++;
4796 fp = &ha->fp_array[idx];
4799 ha->err_fp_null++;
4809 lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO;
4811 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
4814 for (tc = 0; tc < ha->num_tc; tc++) {
4815 txq = fp->txq[tc];
4818 ecore_chain_get_elem_left(&txq->tx_pbl)) <
4820 if (mtx_trylock(&fp->tx_mtx)) {
4822 tx_compl = fp->tx_pkts_completed;
4825 qlnx_tx_int(ha, fp, fp->txq[tc]);
4827 fp->tx_pkts_compl_intr +=
4828 (fp->tx_pkts_completed - tx_compl);
4829 if ((fp->tx_pkts_completed - tx_compl) <= 32)
4830 fp->tx_comInt[0]++;
4831 else if (((fp->tx_pkts_completed - tx_compl) > 32) &&
4832 ((fp->tx_pkts_completed - tx_compl) <= 64))
4833 fp->tx_comInt[1]++;
4834 else if(((fp->tx_pkts_completed - tx_compl) > 64) &&
4835 ((fp->tx_pkts_completed - tx_compl) <= 128))
4836 fp->tx_comInt[2]++;
4837 else if(((fp->tx_pkts_completed - tx_compl) > 128))
4838 fp->tx_comInt[3]++;
4840 mtx_unlock(&fp->tx_mtx);
4845 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold,
4849 fp->rx_pkts += rx_int;
4861 lro = &fp->rxq->lro;
4866 if (lro->lro_mbuf_count & ~1023)
4867 fp->lro_cnt_1024++;
4868 else if (lro->lro_mbuf_count & ~511)
4869 fp->lro_cnt_512++;
4870 else if (lro->lro_mbuf_count & ~255)
4871 fp->lro_cnt_256++;
4872 else if (lro->lro_mbuf_count & ~127)
4873 fp->lro_cnt_128++;
4874 else if (lro->lro_mbuf_count & ~63)
4875 fp->lro_cnt_64++;
4883 ecore_sb_update_sb_idx(fp->sb_info);
4885 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
4903 ha = (qlnx_host_t *)p_hwfn->p_dev;
4905 ha->sp_interrupts++;
4942 ha->parent_tag,/* parent */
4943 dma_buf->alignment,
4948 dma_buf->size, /* maxsize */
4950 dma_buf->size, /* maxsegsize */
4953 &dma_buf->dma_tag);
4959 ret = bus_dmamem_alloc(dma_buf->dma_tag,
4960 (void **)&dma_buf->dma_b,
4962 &dma_buf->dma_map);
4964 bus_dma_tag_destroy(dma_buf->dma_tag);
4969 ret = bus_dmamap_load(dma_buf->dma_tag,
4970 dma_buf->dma_map,
4971 dma_buf->dma_b,
4972 dma_buf->size,
4977 bus_dma_tag_destroy(dma_buf->dma_tag);
4978 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b,
4979 dma_buf->dma_map);
4980 ret = -1;
4984 dma_buf->dma_addr = b_addr;
4994 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map);
4995 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map);
4996 bus_dma_tag_destroy(dma_buf->dma_tag);
5009 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5045 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
5050 (void *)dma_p->dma_map, (void *)dma_p->dma_tag,
5051 dma_p->dma_b, (void *)dma_p->dma_addr, size);
5055 if (!ha->qlnxr_debug)
5066 dev = ha->pci_dev;
5082 &ha->parent_tag);
5086 return (-1);
5089 ha->flags.parent_tag = 1;
5097 if (ha->parent_tag != NULL) {
5098 bus_dma_tag_destroy(ha->parent_tag);
5099 ha->parent_tag = NULL;
5118 &ha->tx_tag)) {
5120 return (-1);
5129 if (ha->tx_tag != NULL) {
5130 bus_dma_tag_destroy(ha->tx_tag);
5131 ha->tx_tag = NULL;
5150 &ha->rx_tag)) {
5153 return (-1);
5161 if (ha->rx_tag != NULL) {
5162 bus_dma_tag_destroy(ha->rx_tag);
5163 ha->rx_tag = NULL;
5178 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev,
5188 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5197 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5206 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5214 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5223 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5232 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev,
5245 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0)
5261 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0)
5277 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5278 (bus_size_t)(p_hwfn->reg_offset + reg_addr));
5288 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5289 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5299 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \
5300 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value);
5313 cdev = p_hwfn->p_dev;
5315 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells));
5316 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value);
5326 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \
5327 (bus_size_t)(p_hwfn->db_offset + reg_addr), value);
5339 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5340 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5342 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset);
5353 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5354 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5356 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5367 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev;
5368 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview));
5370 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value);
5389 ha = ((struct ecore_dev *) p_dev)->ha;
5390 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
5399 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5401 qlnx_fill_link(ha, p_hwfn, &ha->if_link);
5403 prev_link_state = ha->link_up;
5404 ha->link_up = ha->if_link.link_up;
5406 if (prev_link_state != ha->link_up) {
5407 if (ha->link_up) {
5408 if_link_state_change(ha->ifp, LINK_STATE_UP);
5410 if_link_state_change(ha->ifp, LINK_STATE_DOWN);
5417 if (ha->sriov_initialized)
5431 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) |
5434 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD;
5461 ha = (qlnx_host_t *)hwfn->p_dev;
5473 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type);
5481 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type);
5489 if_link->link_up = true;
5490 if_link->speed = link_state.speed;
5493 if_link->supported_caps = QLNX_LINK_CAP_FIBRE;
5496 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg;
5500 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause;
5504 if_link->supported_caps |= QLNX_LINK_CAP_Pause;
5508 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half |
5513 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5517 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5521 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5525 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5529 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5531 if_link->advertised_caps = if_link->supported_caps;
5533 if_link->autoneg = link_params.speed.autoneg;
5534 if_link->duplex = QLNX_LINK_DUPLEX;
5539 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half;
5542 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full;
5545 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full;
5548 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full;
5551 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full;
5554 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full;
5557 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full;
5560 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg;
5563 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause;
5569 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause;
5579 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
5582 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
5593 for (i = 0; i < cdev->num_hwfns; i++) {
5594 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
5595 p_hwfn->pf_params = *func_params;
5599 p_hwfn->using_ll2 = true;
5625 params.int_mode = cdev->int_mode;
5647 (ha->num_rss) * (ha->num_tc + 1);
5651 if(ha->personality == ECORE_PCI_ETH_IWARP) {
5652 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n");
5657 } else if(ha->personality == ECORE_PCI_ETH_ROCE) {
5658 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n");
5670 cdev = &ha->cdev;
5676 cdev->int_mode = ECORE_INT_MODE_MSIX;
5677 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
5680 cdev->rx_coalesce_usecs = 255;
5681 cdev->tx_coalesce_usecs = 255;
5686 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs;
5687 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs;
5702 device_t dev = ha->pci_dev;
5705 cdev = &ha->cdev;
5709 for (i = 0; i < ha->cdev.num_hwfns; i++) {
5710 if (ha->sp_handle[i])
5711 (void)bus_teardown_intr(dev, ha->sp_irq[i],
5712 ha->sp_handle[i]);
5714 ha->sp_handle[i] = NULL;
5716 if (ha->sp_irq[i])
5718 ha->sp_irq_rid[i], ha->sp_irq[i]);
5719 ha->sp_irq[i] = NULL;
5733 memcpy(cdev->name, name, NAME_SIZE);
5736 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
5739 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD;
5759 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts;
5760 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts;
5761 stats->lan_stats.fcs_err = -1;
5765 ha->err_get_proto_invalid_type++;
5779 p_hwfn = &ha->cdev.hwfns[0];
5784 return (-1);
5799 p_hwfn = &ha->cdev.hwfns[0];
5804 return (-1);
5816 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS));
5817 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS));
5818 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS));
5828 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
5829 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
5831 fp->rss_id = rss_id;
5832 fp->edev = ha;
5833 fp->sb_info = &ha->sb_array[rss_id];
5834 fp->rxq = &ha->rxq_array[rss_id];
5835 fp->rxq->rxq_id = rss_id;
5837 for (tc = 0; tc < ha->num_tc; tc++) {
5838 txq_array_index = tc * ha->num_rss + rss_id;
5839 fp->txq[tc] = &ha->txq_array[txq_array_index];
5840 fp->txq[tc]->index = txq_array_index;
5843 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str,
5846 fp->tx_ring_full = 0;
5850 fp->tx_pkts_processed = 0;
5851 fp->tx_pkts_freed = 0;
5852 fp->tx_pkts_transmitted = 0;
5853 fp->tx_pkts_completed = 0;
5856 fp->tx_pkts_trans_ctx = 0;
5857 fp->tx_pkts_compl_ctx = 0;
5858 fp->tx_pkts_trans_fp = 0;
5859 fp->tx_pkts_compl_fp = 0;
5860 fp->tx_pkts_compl_intr = 0;
5862 fp->tx_lso_wnd_min_len = 0;
5863 fp->tx_defrag = 0;
5864 fp->tx_nsegs_gt_elem_left = 0;
5865 fp->tx_tso_max_nsegs = 0;
5866 fp->tx_tso_min_nsegs = 0;
5867 fp->err_tx_nsegs_gt_elem_left = 0;
5868 fp->err_tx_dmamap_create = 0;
5869 fp->err_tx_defrag_dmamap_load = 0;
5870 fp->err_tx_non_tso_max_seg = 0;
5871 fp->err_tx_dmamap_load = 0;
5872 fp->err_tx_defrag = 0;
5873 fp->err_tx_free_pkt_null = 0;
5874 fp->err_tx_cons_idx_conflict = 0;
5876 fp->rx_pkts = 0;
5877 fp->err_m_getcl = 0;
5878 fp->err_m_getjcl = 0;
5888 cdev = &ha->cdev;
5890 if (sb_info->sb_virt) {
5891 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt),
5892 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt)));
5893 sb_info->sb_virt = NULL;
5905 hwfn_index = sb_id % cdev->num_hwfns;
5906 p_hwfn = &cdev->hwfns[hwfn_index];
5907 rel_sb_id = sb_id / cdev->num_hwfns;
5915 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
5921 /* This function allocates fast-path status block memory */
5931 cdev = &ha->cdev;
5938 return -ENOMEM;
5950 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5955 for (i = 0; i < rxq->num_rx_buffers; i++) {
5956 rx_buf = &rxq->sw_rx_ring[i];
5958 if (rx_buf->data != NULL) {
5959 if (rx_buf->map != NULL) {
5960 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
5961 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
5962 rx_buf->map = NULL;
5964 m_freem(rx_buf->data);
5965 rx_buf->data = NULL;
5972 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
5977 cdev = &ha->cdev;
5979 qlnx_free_rx_buffers(ha, rxq);
5982 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]);
5983 if (rxq->tpa_info[i].mpf != NULL)
5984 m_freem(rxq->tpa_info[i].mpf);
5987 bzero((void *)&rxq->sw_rx_ring[0],
5991 if (rxq->rx_bd_ring.p_virt_addr) {
5992 ecore_chain_free(cdev, &rxq->rx_bd_ring);
5993 rxq->rx_bd_ring.p_virt_addr = NULL;
5997 if (rxq->rx_comp_ring.p_virt_addr &&
5998 rxq->rx_comp_ring.pbl_sp.p_virt_table) {
5999 ecore_chain_free(cdev, &rxq->rx_comp_ring);
6000 rxq->rx_comp_ring.p_virt_addr = NULL;
6001 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL;
6008 lro = &rxq->lro;
6017 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6029 rx_buf_size = rxq->rx_buf_size;
6035 return -ENOMEM;
6038 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6042 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6050 return -ENOMEM;
6053 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod];
6054 sw_rx_data->data = mp;
6055 sw_rx_data->dma_addr = dma_addr;
6056 sw_rx_data->map = map;
6059 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
6060 rx_bd->addr.hi = htole32(U64_HI(dma_addr));
6061 rx_bd->addr.lo = htole32(U64_LO(dma_addr));
6062 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6064 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6085 return -ENOMEM;
6088 mp->m_len = mp->m_pkthdr.len = rx_buf_size;
6092 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs,
6100 return -ENOMEM;
6103 rx_buf = &tpa->rx_buf;
6107 rx_buf->data = mp;
6108 rx_buf->dma_addr = dma_addr;
6109 rx_buf->map = map;
6111 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD);
6121 rx_buf = &tpa->rx_buf;
6123 if (rx_buf->data != NULL) {
6124 if (rx_buf->map != NULL) {
6125 bus_dmamap_unload(ha->rx_tag, rx_buf->map);
6126 bus_dmamap_destroy(ha->rx_tag, rx_buf->map);
6127 rx_buf->map = NULL;
6129 m_freem(rx_buf->data);
6130 rx_buf->data = NULL;
6137 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq)
6142 cdev = &ha->cdev;
6144 rxq->num_rx_buffers = RX_RING_SIZE;
6146 rxq->rx_buf_size = ha->rx_buf_size;
6149 bzero((void *)&rxq->sw_rx_ring[0],
6160 &rxq->rx_bd_ring, NULL);
6172 &rxq->rx_comp_ring, NULL);
6180 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size,
6181 &rxq->tpa_info[i]);
6186 for (i = 0; i < rxq->num_rx_buffers; i++) {
6187 rc = qlnx_alloc_rx_buffer(ha, rxq);
6195 } else if (num_allocated < rxq->num_rx_buffers) {
6205 lro = &rxq->lro;
6207 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) {
6209 rxq->rxq_id);
6213 lro->ifp = ha->ifp;
6219 qlnx_free_mem_rxq(ha, rxq);
6220 return -ENOMEM;
6229 cdev = &ha->cdev;
6231 bzero((void *)&txq->sw_tx_ring[0],
6235 if (txq->tx_pbl.p_virt_addr) {
6236 ecore_chain_free(cdev, &txq->tx_pbl);
6237 txq->tx_pbl.p_virt_addr = NULL;
6251 cdev = &ha->cdev;
6253 bzero((void *)&txq->sw_tx_ring[0],
6263 &txq->tx_pbl, NULL);
6269 txq->num_tx_buffers = TX_RING_SIZE;
6275 return -ENOMEM;
6282 if_t ifp = ha->ifp;
6284 if (mtx_initialized(&fp->tx_mtx)) {
6285 if (fp->tx_br != NULL) {
6286 mtx_lock(&fp->tx_mtx);
6288 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) {
6289 fp->tx_pkts_freed++;
6293 mtx_unlock(&fp->tx_mtx);
6295 buf_ring_free(fp->tx_br, M_DEVBUF);
6296 fp->tx_br = NULL;
6298 mtx_destroy(&fp->tx_mtx);
6308 qlnx_free_mem_sb(ha, fp->sb_info);
6310 qlnx_free_mem_rxq(ha, fp->rxq);
6312 for (tc = 0; tc < ha->num_tc; tc++)
6313 qlnx_free_mem_txq(ha, fp, fp->txq[tc]);
6321 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
6322 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id);
6324 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
6326 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF,
6327 M_NOWAIT, &fp->tx_mtx);
6328 if (fp->tx_br == NULL) {
6330 ha->dev_unit, fp->rss_id);
6331 return -ENOMEM;
6341 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id);
6345 if (ha->rx_jumbo_buf_eq_mtu) {
6346 if (ha->max_frame_size <= MCLBYTES)
6347 ha->rx_buf_size = MCLBYTES;
6348 else if (ha->max_frame_size <= MJUMPAGESIZE)
6349 ha->rx_buf_size = MJUMPAGESIZE;
6350 else if (ha->max_frame_size <= MJUM9BYTES)
6351 ha->rx_buf_size = MJUM9BYTES;
6352 else if (ha->max_frame_size <= MJUM16BYTES)
6353 ha->rx_buf_size = MJUM16BYTES;
6355 if (ha->max_frame_size <= MCLBYTES)
6356 ha->rx_buf_size = MCLBYTES;
6358 ha->rx_buf_size = MJUMPAGESIZE;
6361 rc = qlnx_alloc_mem_rxq(ha, fp->rxq);
6365 for (tc = 0; tc < ha->num_tc; tc++) {
6366 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]);
6375 return -ENOMEM;
6383 for (i = 0; i < ha->num_rss; i++) {
6384 struct qlnx_fastpath *fp = &ha->fp_array[i];
6396 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) {
6397 struct qlnx_fastpath *fp = &ha->fp_array[rss_id];
6437 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6439 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid;
6440 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6445 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d"
6447 return -ENOMEM;
6452 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n",
6471 sp_params.vport_id = params->vport_id;
6474 params->update_vport_active_rx_flg;
6475 sp_params.vport_active_rx_flg = params->vport_active_rx_flg;
6478 params->update_vport_active_tx_flg;
6479 sp_params.vport_active_tx_flg = params->vport_active_tx_flg;
6482 params->update_inner_vlan_removal_flg;
6483 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
6485 sp_params.sge_tpa_params = params->sge_tpa_params;
6487 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
6488 * We need to re-fix the rss values per engine for CMT.
6490 if (params->rss_params->update_rss_config)
6491 sp_params.rss_params = params->rss_params;
6496 p_hwfn = &cdev->hwfns[i];
6498 if ((cdev->num_hwfns > 1) &&
6499 params->rss_params->update_rss_config &&
6500 params->rss_params->rss_enable) {
6501 rss = params->rss_params;
6504 fp_index = ((cdev->num_hwfns * j) + i) %
6505 ha->num_rss;
6507 fp = &ha->fp_array[fp_index];
6508 rss->rss_ind_table[j] = fp->rxq->handle;
6513 rss->rss_ind_table[j],
6514 rss->rss_ind_table[j+1],
6515 rss->rss_ind_table[j+2],
6516 rss->rss_ind_table[j+3],
6517 rss->rss_ind_table[j+4],
6518 rss->rss_ind_table[j+5],
6519 rss->rss_ind_table[j+6],
6520 rss->rss_ind_table[j+7]);
6525 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
6527 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id);
6536 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \
6538 params->vport_id, params->vport_active_tx_flg,
6539 params->vport_active_rx_flg,
6540 params->update_vport_active_tx_flg,
6541 params->update_vport_active_rx_flg);
6548 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq)
6551 ecore_chain_consume(&rxq->rx_bd_ring);
6553 ecore_chain_produce(&rxq->rx_bd_ring);
6555 &rxq->sw_rx_ring[rxq->sw_rx_cons];
6557 &rxq->sw_rx_ring[rxq->sw_rx_prod];
6559 sw_rx_data_prod->data = sw_rx_data_cons->data;
6562 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1);
6563 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1);
6569 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq)
6579 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
6580 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
6593 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr,
6596 internal_ram_wr(rxq->hw_rxq_prod_addr,
6629 struct ecore_dev *cdev = &ha->cdev;
6630 struct ecore_rss_params *rss_params = &ha->rss_params;
6638 ifp = ha->ifp;
6640 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss);
6642 if (!ha->num_rss) {
6643 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there"
6645 return -EINVAL;
6656 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc);
6668 fp = &ha->fp_array[i];
6669 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)];
6678 qparams.p_sb = fp->sb_info;
6683 p_hwfn->hw_info.opaque_fid,
6685 fp->rxq->rx_buf_size, /* bd_max_bytes */
6687 fp->rxq->rx_bd_ring.p_phys_addr,
6689 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring),
6691 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring),
6695 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc);
6699 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod;
6700 fp->rxq->handle = rx_ret_params.p_handle;
6701 fp->rxq->hw_cons_ptr =
6702 &fp->sb_info->sb_virt->pi_array[RX_PI];
6704 qlnx_update_rx_prod(p_hwfn, fp->rxq);
6706 for (tc = 0; tc < ha->num_tc; tc++) {
6707 struct qlnx_tx_queue *txq = fp->txq[tc];
6714 qparams.queue_id = txq->index / cdev->num_hwfns ;
6717 qparams.p_sb = fp->sb_info;
6721 p_hwfn->hw_info.opaque_fid,
6724 ecore_chain_get_pbl_phys(&txq->tx_pbl),
6725 ecore_chain_get_page_cnt(&txq->tx_pbl),
6730 txq->index, rc);
6734 txq->doorbell_addr = tx_ret_params.p_doorbell;
6735 txq->handle = tx_ret_params.p_handle;
6737 txq->hw_cons_ptr =
6738 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
6739 SET_FIELD(txq->tx_db.data.params,
6741 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
6743 SET_FIELD(txq->tx_db.data.params,
6747 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
6752 if (ha->num_rss > 1) {
6753 rss_params->update_rss_config = 1;
6754 rss_params->rss_enable = 1;
6755 rss_params->update_rss_capabilities = 1;
6756 rss_params->update_rss_ind_table = 1;
6757 rss_params->update_rss_key = 1;
6758 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
6760 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */
6763 fp = &ha->fp_array[(i % ha->num_rss)];
6764 rss_params->rss_ind_table[i] = fp->rxq->handle;
6768 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i];
6799 tpa_params.tpa_max_size = (uint16_t)(-1);
6808 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc);
6824 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6827 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) {
6828 mtx_lock(&fp->tx_mtx);
6832 mtx_unlock(&fp->tx_mtx);
6836 hw_bd_cons = le16toh(*txq->hw_cons_ptr);
6839 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index);
6852 cdev = &ha->cdev;
6863 vport_update_params.rss_params = &ha->rss_params;
6864 vport_update_params.rss_params->update_rss_config = 0;
6865 vport_update_params.rss_params->rss_enable = 0;
6879 fp = &ha->fp_array[i];
6881 for (tc = 0; tc < ha->num_tc; tc++) {
6882 struct qlnx_tx_queue *txq = fp->txq[tc];
6891 for (i = ha->num_rss - 1; i >= 0; i--) {
6892 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)];
6894 fp = &ha->fp_array[i];
6897 for (tc = 0; tc < ha->num_tc; tc++) {
6900 tx_queue_id = tc * ha->num_rss + i;
6902 fp->txq[tc]->handle);
6912 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false,
6915 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i);
6922 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i];
6924 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0);
6944 cdev = &ha->cdev;
6972 cdev = &ha->cdev;
6986 cdev = &ha->cdev;
6988 mcast = &ha->ecore_mcast;
6991 mcast->opcode = ECORE_FILTER_REMOVE;
6994 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] ||
6995 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] ||
6996 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) {
6997 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN);
6998 mcast->num_mc_addrs++;
7001 mcast = &ha->ecore_mcast;
7005 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS));
7006 ha->nmcast = 0;
7026 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac);
7038 cdev = &ha->cdev;
7060 const if_t ifp = ha->ifp;
7066 ifa->ifa_addr != NULL) {
7067 sdl = (struct sockaddr_dl *) ifa->ifa_addr;
7071 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
7084 if (qlnx_vf_device(ha) == 0 || (if_getflags(ha->ifp) & IFF_PROMISC)) {
7087 } else if (if_getflags(ha->ifp) & IFF_ALLMULTI) {
7090 ha->filter = filter;
7108 cdev = &ha->cdev;
7111 hwfn = &cdev->hwfns[i];
7115 return -EBUSY;
7137 count = ha->hw_stats.common.rx_ucast_pkts +
7138 ha->hw_stats.common.rx_mcast_pkts +
7139 ha->hw_stats.common.rx_bcast_pkts;
7143 count = ha->hw_stats.common.rx_crc_errors +
7144 ha->hw_stats.common.rx_align_errors +
7145 ha->hw_stats.common.rx_oversize_packets +
7146 ha->hw_stats.common.rx_undersize_packets;
7150 count = ha->hw_stats.common.tx_ucast_pkts +
7151 ha->hw_stats.common.tx_mcast_pkts +
7152 ha->hw_stats.common.tx_bcast_pkts;
7156 count = ha->hw_stats.common.tx_err_drop_pkts;
7163 count = ha->hw_stats.common.rx_ucast_bytes +
7164 ha->hw_stats.common.rx_mcast_bytes +
7165 ha->hw_stats.common.rx_bcast_bytes;
7169 count = ha->hw_stats.common.tx_ucast_bytes +
7170 ha->hw_stats.common.tx_mcast_bytes +
7171 ha->hw_stats.common.tx_bcast_bytes;
7175 count = ha->hw_stats.common.rx_mcast_bytes;
7179 count = ha->hw_stats.common.tx_mcast_bytes;
7199 if (ha->error_recovery) {
7200 ha->error_recovery = 0;
7201 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task);
7205 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats);
7207 if (ha->storm_stats_gather)
7210 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7222 dev = ha->pci_dev;
7237 ha->num_rss, ha->num_tc);
7239 for (i = 0; i < ha->num_rss; i++) {
7240 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq,
7242 NULL, qlnx_fp_isr, &ha->irq_vec[i],
7243 &ha->irq_vec[i].handle))) {
7250 ha->irq_vec[i].irq_rid,
7251 ha->irq_vec[i].irq, ha->irq_vec[i].handle);
7253 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus));
7260 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n");
7267 /* Ask for link-up using current configuration */
7271 qlnx_link_update(&ha->cdev.hwfns[0]);
7273 ha->state = QLNX_STATE_OPEN;
7275 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats));
7277 if (ha->flags.callout_init)
7278 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha);
7286 ha->num_rss = 0;
7301 ifp = ha->ifp;
7304 for (i = 0; i < ha->num_rss; i++) {
7305 struct qlnx_fastpath *fp = &ha->fp_array[i];
7308 lro = &fp->rxq->lro;
7326 cdev = &ha->cdev;
7327 dev = ha->pci_dev;
7330 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state);
7332 if (ha->state == QLNX_STATE_OPEN) {
7338 for (i = 0; i < ha->num_rss; i++) {
7339 if (ha->irq_vec[i].handle) {
7341 ha->irq_vec[i].irq,
7342 ha->irq_vec[i].handle);
7343 ha->irq_vec[i].handle = NULL;
7352 if (ha->flags.callout_init)
7353 callout_drain(&ha->qlnx_callout);
7357 ha->state = QLNX_STATE_CLOSED;
7366 int rval = -1;
7372 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7397 int rval = -1;
7403 p_hwfn = &ha->cdev.hwfns[hwfn_index];
7435 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
7436 ha->storm_stats_gather = 0;
7440 cdev = &ha->cdev;
7443 hwfn = &cdev->hwfns[i];
7449 index = ha->storm_stats_index +
7452 s_stats = &ha->storm_stats[index];
7457 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7461 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7465 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7469 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7474 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7478 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7482 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7486 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7491 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7495 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7499 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7503 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7508 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7512 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7516 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7520 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7525 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7529 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7533 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7537 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7542 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg);
7546 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg);
7550 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg);
7554 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg);
7559 ha->storm_stats_index++;
7575 dev = ha->pci_dev;
7589 len -= 16;
7701 memset(vf_info->forced_mac, 0, ETH_ALEN);
7703 vf_info->forced_vlan = 0;
7722 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev),
7729 return -EINVAL;
7732 if (params->type == ECORE_FILTER_MAC ||
7733 params->type == ECORE_FILTER_MAC_VLAN)
7734 memcpy(params->mac, vf->forced_mac, ETH_ALEN);
7750 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev),
7777 cdev = p_hwfn->p_dev;
7779 for (i = 0; i < cdev->num_hwfns; i++) {
7780 if (&cdev->hwfns[i] == p_hwfn)
7784 if (i >= cdev->num_hwfns)
7785 return (-1);
7793 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7797 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id);
7799 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7800 return (-1);
7802 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7803 atomic_testandset_32(&ha->sriov_task[i].flags,
7806 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7807 &ha->sriov_task[i].pf_task);
7822 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7825 if (!ha->sriov_initialized)
7829 ha, p_hwfn->p_dev, p_hwfn);
7831 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7834 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7835 atomic_testandset_32(&ha->sriov_task[i].flags,
7838 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7839 &ha->sriov_task[i].pf_task);
7858 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev;
7862 ha, p_hwfn->p_dev, p_hwfn);
7864 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
7868 ha, p_hwfn->p_dev, p_hwfn, i);
7870 if (ha->sriov_task[i].pf_taskqueue != NULL) {
7871 atomic_testandset_32(&ha->sriov_task[i].flags,
7874 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue,
7875 &ha->sriov_task[i].pf_task);
7886 dev = ha->pci_dev;
7891 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
7892 pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
7894 pci_iov_schema_add_bool(vf_schema, "allow-promisc",
7896 pci_iov_schema_add_uint16(vf_schema, "num-queues",
7902 ha->sriov_initialized = 0;
7905 ha->sriov_initialized = 1;
7917 cdev = &ha->cdev;
7922 struct ecore_hwfn *hwfn = &cdev->hwfns[i];
7950 ecore_iov_release_hw_for_vf(&cdev->hwfns[i],
7971 /* Since we have an equal resource distribution per-VF, and we assume
7975 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues;
7977 params->rel_vf_id = vfid;
7979 for (i = 0; i < params->num_queues; i++) {
7980 params->req_rx_queue[i] = base + i;
7981 params->req_tx_queue[i] = base + i;
7985 params->vport_id = vfid + 1;
7986 params->rss_eng_id = vfid + 1;
8002 return (-1);
8008 cdev = &ha->cdev;
8010 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT);
8017 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1));
8021 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF,
8024 if (ha->vf_attr == NULL)
8031 struct ecore_hwfn *hwfn = &cdev->hwfns[j];
8062 ha->num_vfs = num_vfs;
8063 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha);
8074 ha->num_vfs = 0;
8076 return (-1);
8094 free(ha->vf_attr, M_QLNXBUF);
8095 ha->vf_attr = NULL;
8097 ha->num_vfs = 0;
8114 return (-1);
8119 if (vfnum > (ha->num_vfs - 1)) {
8121 vfnum, (ha->num_vfs - 1));
8124 vf_attr = &ha->vf_attr[vfnum];
8126 if (nvlist_exists_binary(params, "mac-addr")) {
8127 mac = nvlist_get_binary(params, "mac-addr", &size);
8128 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN);
8131 __func__, vf_attr->mac_addr[0],
8132 vf_attr->mac_addr[1], vf_attr->mac_addr[2],
8133 vf_attr->mac_addr[3], vf_attr->mac_addr[4],
8134 vf_attr->mac_addr[5]);
8135 p_hwfn = &ha->cdev.hwfns[0];
8136 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr,
8153 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8171 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
8194 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8202 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n");
8219 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n");
8247 ha = (qlnx_host_t *)(p_hwfn->p_dev);
8249 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1)
8252 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8256 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8260 if (atomic_testandclear_32(&ha->sriov_task[i].flags,
8273 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8274 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i];
8279 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn);
8281 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT,
8283 &ha->sriov_task[i].pf_taskqueue);
8285 if (ha->sriov_task[i].pf_taskqueue == NULL)
8286 return (-1);
8288 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1,
8291 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue);
8302 for (i = 0; i < ha->cdev.num_hwfns; i++) {
8303 if (ha->sriov_task[i].pf_taskqueue != NULL) {
8304 taskqueue_drain(ha->sriov_task[i].pf_taskqueue,
8305 &ha->sriov_task[i].pf_task);
8306 taskqueue_free(ha->sriov_task[i].pf_taskqueue);
8307 ha->sriov_task[i].pf_taskqueue = NULL;
8321 if (!p_hwfn->pf_iov_info)
8335 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
8340 if (ha->link_up) {
8346 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ?