Lines Matching +full:gpio +full:- +full:7 +full:- +full:segment

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2007-2014 QLogic Corporation. All rights reserved.
64 * FLR Support - bxe_pf_flr_clnup() is called during nic_load in the per
241 static int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
253 /* Interrupt Mode: 0 (IRQ), 1 (MSI/IRQ), and 2 (MSI-X/MSI/IRQ) */
256 &bxe_interrupt_mode, 0, "Interrupt (MSI-X/MSI/INTx) mode");
261 &bxe_queue_count, 0, "Multi-Queue queue count");
288 /* PCI MRRS: -1 (Auto), 0 (128B), 1 (256B), 2 (512B), 3 (1KB) */
289 static int bxe_mrrs = -1;
298 /* 4-tuple RSS support for UDP: 0 (disabled), 1 (enabled) */
763 shft = sizeof(crc32_result) * 8 - 1;
769 shft-- ;
772 /* temp[31-bit] = crc32_result[bit] */
776 /* crc32_result = {temp[7:0], temp[15:8], temp[23:16], temp[31:24]} */
860 * (bus_dmamap_load) so we use the value of map_arg->maxsegs to pass any
872 dma->paddr = 0;
873 dma->nseg = 0;
874 BLOGE(dma->sc, "Failed DMA alloc '%s' (%d)!\n", dma->msg, error);
876 dma->paddr = segs->ds_addr;
877 dma->nseg = nseg;
897 if (dma->size > 0) {
899 (unsigned long)dma->size);
904 dma->sc = sc;
905 dma->size = size;
906 snprintf(dma->msg, sizeof(dma->msg), "%s", msg);
908 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
921 &dma->tag); /* returned dma tag */
928 rc = bus_dmamem_alloc(dma->tag,
929 (void **)&dma->vaddr,
931 &dma->map);
934 bus_dma_tag_destroy(dma->tag);
939 rc = bus_dmamap_load(dma->tag,
940 dma->map,
941 dma->vaddr,
948 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
949 bus_dma_tag_destroy(dma->tag);
961 if (dma->size > 0) {
962 DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL"));
964 bus_dmamap_sync(dma->tag, dma->map,
966 bus_dmamap_unload(dma->tag, dma->map);
967 bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
968 bus_dma_tag_destroy(dma->tag);
984 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
985 pci_write_config(sc->dev, PCICFG_GRC_DATA, val, 4);
986 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
995 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, addr, 4);
996 val = pci_read_config(sc->dev, PCICFG_GRC_DATA, 4);
997 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
1016 return (-1);
1023 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1031 return (-1);
1046 return (-1);
1062 return (-1);
1069 (MISC_REG_DRIVER_CONTROL_7 + ((func - 6) * 8));
1077 return (-1);
1141 return (-1);
1177 return (-1);
1241 rc = -1;
1250 * converting to big-endian will do the work
1258 if (rc == -1) {
1280 return (-1);
1283 if ((offset + buf_size) > sc->devinfo.flash_size) {
1286 offset, buf_size, sc->devinfo.flash_size);
1287 return (-1);
1308 buf_size -= sizeof(uint32_t);
1356 rc = -1;
1366 if (rc == -1) {
1388 if ((offset + buf_size) > sc->devinfo.flash_size) {
1391 offset, buf_size, sc->devinfo.flash_size);
1392 return (-1);
1445 return (-1);
1452 if ((offset + buf_size) > sc->devinfo.flash_size) {
1455 offset, buf_size, sc->devinfo.flash_size);
1456 return (-1);
1471 if (written_so_far == (buf_size - sizeof(uint32_t))) {
1571 dmae->opcode = bxe_dmae_opcode(sc, src_type, dst_type,
1575 dmae->comp_addr_lo = U64_LO(BXE_SP_MAPPING(sc, wb_comp));
1576 dmae->comp_addr_hi = U64_HI(BXE_SP_MAPPING(sc, wb_comp));
1577 dmae->comp_val = DMAE_COMP_VAL;
1601 (sc->recovery_state != BXE_RECOVERY_DONE &&
1602 sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) {
1604 *wb_comp, sc->recovery_state);
1609 timeout--;
1615 *wb_comp, sc->recovery_state);
1635 if (!sc->dmae_ready) {
1672 if (!sc->dmae_ready) {
1715 len -= dmae_wr_max;
1730 cxt->ustorm_ag_context.cdu_usage =
1734 cxt->xstorm_ag_context.cdu_reserved =
1863 return (-1);
1873 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output low\n", spio);
1880 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> output high\n", spio);
1887 BLOGD(sc, DBG_LOAD, "Set SPIO 0x%x -> input\n", spio);
1907 /* The GPIO should be swapped if swap register is set and active */
1916 BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d"
1919 return (-1);
1922 /* read GPIO value */
1935 /* The GPIO should be swapped if swap register is set and active */
1944 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
1947 return (-1);
1952 /* read GPIO and mask except the float bits */
1958 "Set GPIO %d (shift %d) -> output low\n",
1967 "Set GPIO %d (shift %d) -> output high\n",
1976 "Set GPIO %d (shift %d) -> input\n",
2003 /* read GPIO and mask except the float bits */
2011 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output low\n", pins);
2017 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> output high\n", pins);
2023 BLOGD(sc, DBG_PHY, "Set GPIO 0x%x -> input\n", pins);
2029 BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x"
2032 return (-1);
2047 /* The GPIO should be swapped if swap register is set and active */
2056 BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d"
2059 return (-1);
2064 /* read GPIO int */
2070 "Clear GPIO INT %d (shift %d) -> output low\n",
2079 "Set GPIO INT %d (shift %d) -> output high\n",
2151 seq = ++sc->fw_seq;
2173 /* Ruh-roh! */
2271 * RCQ of the multi-queue/RSS connection being initialized.
2287 * on the RCQ of the multi-queue/RSS connection being torn down. Don't
2305 * Used for connection offload. Completes on the RCQ of the multi-queue
2319 struct eth_spe *next_spe = sc->spq_prod_bd;
2321 if (sc->spq_prod_bd == sc->spq_last_bd) {
2323 sc->spq_prod_bd = sc->spq;
2324 sc->spq_prod_idx = 0;
2326 sc->spq_prod_bd++;
2327 sc->spq_prod_idx++;
2347 sc->spq_prod_idx);
2349 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
2354 * bxe_is_contextless_ramrod - check if the current command ends on EQ
2377 * bxe_sp_post - place a single command on an SP ring
2407 if (!atomic_load_acq_long(&sc->eq_spq_left)) {
2410 return (-1);
2413 if (!atomic_load_acq_long(&sc->cq_spq_left)) {
2416 return (-1);
2423 spe->hdr.conn_and_cmd_data =
2432 spe->hdr.type = htole16(type);
2434 spe->data.update_data_addr.hi = htole32(data_hi);
2435 spe->data.update_data_addr.lo = htole32(data_lo);
2443 atomic_subtract_acq_long(&sc->eq_spq_left, 1);
2445 atomic_subtract_acq_long(&sc->cq_spq_left, 1);
2448 BLOGD(sc, DBG_SP, "SPQE -> %#jx\n", (uintmax_t)sc->spq_dma.paddr);
2449 BLOGD(sc, DBG_SP, "FUNC_RDATA -> %p / %#jx\n",
2453 sc->spq_prod_idx,
2454 (uint32_t)U64_HI(sc->spq_dma.paddr),
2455 (uint32_t)(U64_LO(sc->spq_dma.paddr) + (uint8_t *)sc->spq_prod_bd - (uint8_t *)sc->spq),
2462 atomic_load_acq_long(&sc->cq_spq_left),
2463 atomic_load_acq_long(&sc->eq_spq_left));
2472 * bxe_debug_print_ind_table - prints the indirection table configuration.
2504 while (t->bxe_name != NULL) {
2505 if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
2506 ((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
2507 ((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
2509 "%s (%c%d) BXE v:%s", t->bxe_name,
2526 snprintf(sc->core_sx_name, sizeof(sc->core_sx_name),
2527 "bxe%d_core_lock", sc->unit);
2528 sx_init(&sc->core_sx, sc->core_sx_name);
2530 snprintf(sc->core_mtx_name, sizeof(sc->core_mtx_name),
2531 "bxe%d_core_lock", sc->unit);
2532 mtx_init(&sc->core_mtx, sc->core_mtx_name, NULL, MTX_DEF);
2535 snprintf(sc->sp_mtx_name, sizeof(sc->sp_mtx_name),
2536 "bxe%d_sp_lock", sc->unit);
2537 mtx_init(&sc->sp_mtx, sc->sp_mtx_name, NULL, MTX_DEF);
2539 snprintf(sc->dmae_mtx_name, sizeof(sc->dmae_mtx_name),
2540 "bxe%d_dmae_lock", sc->unit);
2541 mtx_init(&sc->dmae_mtx, sc->dmae_mtx_name, NULL, MTX_DEF);
2543 snprintf(sc->port.phy_mtx_name, sizeof(sc->port.phy_mtx_name),
2544 "bxe%d_phy_lock", sc->unit);
2545 mtx_init(&sc->port.phy_mtx, sc->port.phy_mtx_name, NULL, MTX_DEF);
2547 snprintf(sc->fwmb_mtx_name, sizeof(sc->fwmb_mtx_name),
2548 "bxe%d_fwmb_lock", sc->unit);
2549 mtx_init(&sc->fwmb_mtx, sc->fwmb_mtx_name, NULL, MTX_DEF);
2551 snprintf(sc->print_mtx_name, sizeof(sc->print_mtx_name),
2552 "bxe%d_print_lock", sc->unit);
2553 mtx_init(&(sc->print_mtx), sc->print_mtx_name, NULL, MTX_DEF);
2555 snprintf(sc->stats_mtx_name, sizeof(sc->stats_mtx_name),
2556 "bxe%d_stats_lock", sc->unit);
2557 mtx_init(&(sc->stats_mtx), sc->stats_mtx_name, NULL, MTX_DEF);
2559 snprintf(sc->mcast_mtx_name, sizeof(sc->mcast_mtx_name),
2560 "bxe%d_mcast_lock", sc->unit);
2561 mtx_init(&(sc->mcast_mtx), sc->mcast_mtx_name, NULL, MTX_DEF);
2568 sx_destroy(&sc->core_sx);
2570 if (mtx_initialized(&sc->core_mtx)) {
2571 mtx_destroy(&sc->core_mtx);
2575 if (mtx_initialized(&sc->sp_mtx)) {
2576 mtx_destroy(&sc->sp_mtx);
2579 if (mtx_initialized(&sc->dmae_mtx)) {
2580 mtx_destroy(&sc->dmae_mtx);
2583 if (mtx_initialized(&sc->port.phy_mtx)) {
2584 mtx_destroy(&sc->port.phy_mtx);
2587 if (mtx_initialized(&sc->fwmb_mtx)) {
2588 mtx_destroy(&sc->fwmb_mtx);
2591 if (mtx_initialized(&sc->print_mtx)) {
2592 mtx_destroy(&sc->print_mtx);
2595 if (mtx_initialized(&sc->stats_mtx)) {
2596 mtx_destroy(&sc->stats_mtx);
2599 if (mtx_initialized(&sc->mcast_mtx)) {
2600 mtx_destroy(&sc->mcast_mtx);
2607 if_t ifp = sc->ifp;
2619 sc->fw_drv_pulse_wr_seq);
2630 prod = fp->tx_bd_prod;
2631 cons = fp->tx_bd_cons;
2635 return (int16_t)(sc->tx_ring_size) - used;
2644 hw_cons = le16toh(*fp->tx_cons_sb);
2645 return (hw_cons != fp->tx_pkt_cons);
2651 /* expand this for multi-cos if ever supported */
2661 rx_cq_cons_sb = le16toh(*fp->rx_cq_cons_sb);
2664 return (fp->rx_cq_cons != rx_cq_cons_sb);
2672 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2673 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
2678 fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type);
2692 BLOGD(sc, DBG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
2713 command, fp->index);
2718 q_obj->complete_cmd(sc, q_obj, drv_cmd)) {
2720 * q_obj->complete_cmd() failure means that this was
2723 * In this case we don't want to increase the sc->spq_left
2731 atomic_add_acq_long(&sc->cq_spq_left, 1);
2733 BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n",
2734 atomic_load_acq_long(&sc->cq_spq_left));
2740 * the current aggregation queue as in-progress.
2754 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
2759 fp->index, queue, cons, prod);
2765 fp->index, queue, max_agg_queues));
2767 KASSERT((tpa_info->state == BXE_TPA_STATE_STOP),
2769 fp->index, queue));
2772 tmp_bd = tpa_info->bd;
2780 fp->index, queue, cons, prod);
2782 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2789 tpa_info->state = BXE_TPA_STATE_START;
2790 tpa_info->placement_offset = cqe->placement_offset;
2791 tpa_info->parsing_flags = le16toh(cqe->pars_flags.flags);
2792 tpa_info->vlan_tag = le16toh(cqe->vlan_tag);
2793 tpa_info->len_on_bd = le16toh(cqe->len_on_bd);
2795 fp->rx_tpa_queue_used |= (1 << queue);
2802 index = (sc->max_rx_bufs != RX_BD_USABLE) ?
2806 tpa_info->bd = fp->rx_mbuf_chain[cons];
2810 rx_buf = &fp->rx_mbuf_chain[cons];
2812 if (rx_buf->m_map != NULL) {
2813 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
2815 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
2823 fp->rx_mbuf_chain[cons].m = NULL;
2827 fp->rx_mbuf_chain[index] = tmp_bd;
2830 rx_bd = &fp->rx_chain[index];
2831 rx_bd->addr_hi = htole32(U64_HI(tpa_info->seg.ds_addr));
2832 rx_bd->addr_lo = htole32(U64_LO(tpa_info->seg.ds_addr));
2856 frag_size = le16toh(cqe->pkt_len) - tpa_info->len_on_bd;
2860 fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
2869 fp->index, cqe_idx, pages, le16toh(cqe->pkt_len),
2870 tpa_info->len_on_bd, frag_size);
2873 *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7));
2884 sge_idx = RX_SGE(le16toh(cqe->sgl_or_raw_data.sgl[j]));
2894 fp->index, queue, i, j, sge_idx, frag_size, frag_len);
2896 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
2906 m_frag->m_len = frag_len;
2910 fp->eth_q_stats.mbuf_alloc_sge--;
2913 m->m_pkthdr.len += frag_len;
2914 frag_size -= frag_len;
2919 fp->index, queue, frag_size);
2930 int idx = RX_SGE_TOTAL_PER_PAGE * i - 1;
2933 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
2934 idx--;
2943 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
2957 uint16_t last_max = fp->last_max_sge;
2960 fp->last_max_sge = idx;
2980 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
2981 RX_SGE(le16toh(cqe->sgl[i])));
2985 "fp[%02d] fp_cqe->sgl[%d] = %d\n",
2986 fp->index, sge_len - 1,
2987 le16toh(cqe->sgl[sge_len - 1]));
2991 le16toh(cqe->sgl[sge_len - 1]));
2993 last_max = RX_SGE(fp->last_max_sge);
2995 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
3004 if (__predict_true(fp->sge_mask[i])) {
3008 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
3013 fp->rx_sge_prod += delta;
3014 /* clear page-end entries */
3019 "fp[%02d] fp->last_max_sge=%d fp->rx_sge_prod=%d\n",
3020 fp->index, fp->last_max_sge, fp->rx_sge_prod);
3037 if_t ifp = sc->ifp;
3043 fp->index, queue, tpa_info->placement_offset,
3044 le16toh(cqe->pkt_len), pages, tpa_info->vlan_tag);
3046 m = tpa_info->bd.m;
3052 fp->eth_q_stats.rx_soft_errors++;
3057 m_adj(m, tpa_info->placement_offset);
3058 m->m_pkthdr.len = m->m_len = tpa_info->len_on_bd;
3061 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3062 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3063 m->m_pkthdr.csum_data = 0xffff;
3064 m->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED |
3073 fp->eth_q_stats.rx_soft_errors++;
3076 if (tpa_info->parsing_flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3077 m->m_pkthdr.ether_vtag = tpa_info->vlan_tag;
3078 m->m_flags |= M_VLANTAG;
3085 m->m_pkthdr.flowid = fp->index;
3089 fp->eth_q_stats.rx_tpa_pkts++;
3096 fp->eth_q_stats.mbuf_alloc_tpa--;
3100 fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
3101 fp->rx_tpa_queue_used &= ~(1 << queue);
3120 m->m_len = lenonbd;
3122 frag_size = len - lenonbd;
3126 sge_idx = RX_SGE(le16toh(cqe_fp->sgl_or_raw_data.sgl[j]));
3128 m_frag = fp->rx_sge_mbuf_chain[sge_idx].m;
3130 m_frag->m_len = frag_len;
3138 fp->eth_q_stats.mbuf_alloc_sge--;
3143 frag_size -= frag_len;
3146 bxe_update_sge_prod(fp->sc, fp, frags, &cqe_fp->sgl_or_raw_data);
3155 if_t ifp = sc->ifp;
3164 hw_cq_cons = le16toh(*fp->rx_cq_cons_sb);
3169 bd_cons = fp->rx_bd_cons;
3170 bd_prod = fp->rx_bd_prod;
3172 sw_cq_cons = fp->rx_cq_cons;
3173 sw_cq_prod = fp->rx_cq_prod;
3183 fp->index, hw_cq_cons, sw_cq_cons);
3198 cqe = &fp->rcq_chain[comp_ring_cons];
3199 cqe_fp = &cqe->fast_path_cqe;
3200 cqe_fp_flags = cqe_fp->type_error_flags;
3207 fp->index,
3214 cqe_fp->status_flags,
3215 le32toh(cqe_fp->rss_hash_result),
3216 le16toh(cqe_fp->vlan_tag),
3217 le16toh(cqe_fp->pkt_len_or_gro_seg_len),
3218 le16toh(cqe_fp->len_on_bd));
3226 rx_buf = &fp->rx_mbuf_chain[bd_cons];
3234 bxe_tpa_start(sc, fp, cqe_fp->queue_index,
3243 queue = cqe->end_agg_cqe.queue_index;
3244 tpa_info = &fp->rx_tpa_info[queue];
3247 fp->index, queue);
3249 frag_size = (le16toh(cqe->end_agg_cqe.pkt_len) -
3250 tpa_info->len_on_bd);
3254 &cqe->end_agg_cqe, comp_ring_cons);
3256 bxe_update_sge_prod(sc, fp, pages, &cqe->end_agg_cqe.sgl_or_raw_data);
3267 fp->eth_q_stats.rx_soft_errors++;
3271 len = le16toh(cqe_fp->pkt_len_or_gro_seg_len);
3272 lenonbd = le16toh(cqe_fp->len_on_bd);
3273 pad = cqe_fp->placement_offset;
3275 m = rx_buf->m;
3279 bd_cons, fp->index);
3291 (sc->max_rx_bufs != RX_BD_USABLE) ?
3299 fp->index, rc);
3300 fp->eth_q_stats.rx_soft_errors++;
3302 if (sc->max_rx_bufs != RX_BD_USABLE) {
3304 memcpy(&fp->rx_mbuf_chain[bd_prod], rx_buf,
3313 fp->eth_q_stats.mbuf_alloc_rx--;
3317 m->m_pkthdr.len = m->m_len = len;
3320 fp->eth_q_stats.rx_bxe_service_rxsgl++;
3324 fp->eth_q_stats.rx_jumbo_sge_pkts++;
3326 fp->eth_q_stats.rx_erroneous_jumbo_sge_pkts++;
3333 m->m_pkthdr.csum_flags = 0;
3338 if (!(cqe->fast_path_cqe.status_flags &
3340 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
3343 fp->eth_q_stats.rx_hw_csum_errors++;
3345 fp->eth_q_stats.rx_ofld_frames_csum_ip++;
3346 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
3351 if (!(cqe->fast_path_cqe.status_flags &
3355 fp->eth_q_stats.rx_hw_csum_errors++;
3357 fp->eth_q_stats.rx_ofld_frames_csum_tcp_udp++;
3358 m->m_pkthdr.csum_data = 0xFFFF;
3359 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID |
3366 if (cqe->fast_path_cqe.pars_flags.flags & PARSING_FLAGS_INNER_VLAN_EXIST) {
3367 m->m_pkthdr.ether_vtag = cqe->fast_path_cqe.vlan_tag;
3368 m->m_flags |= M_VLANTAG;
3372 m->m_pkthdr.flowid = fp->index;
3397 if (rx_pkts == sc->rx_budget) {
3398 fp->eth_q_stats.rx_budget_reached++;
3403 fp->rx_bd_cons = bd_cons;
3404 fp->rx_bd_prod = bd_prod_fw;
3405 fp->rx_cq_cons = sw_cq_cons;
3406 fp->rx_cq_prod = sw_cq_prod;
3409 bxe_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod, fp->rx_sge_prod);
3411 fp->eth_q_stats.rx_pkts += rx_pkts;
3412 fp->eth_q_stats.rx_calls++;
3424 struct bxe_sw_tx_bd *tx_buf = &fp->tx_mbuf_chain[idx];
3426 uint16_t bd_idx = TX_BD(tx_buf->first_bd);
3430 /* unmap the mbuf from non-paged memory */
3431 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
3433 tx_start_bd = &fp->tx_chain[bd_idx].start_bd;
3434 nbd = le16toh(tx_start_bd->nbd) - 1;
3436 new_cons = (tx_buf->first_bd + nbd);
3439 if (__predict_true(tx_buf->m != NULL)) {
3440 m_freem(tx_buf->m);
3441 fp->eth_q_stats.mbuf_alloc_tx--;
3443 fp->eth_q_stats.tx_chain_lost_mbuf++;
3446 tx_buf->m = NULL;
3447 tx_buf->first_bd = 0;
3459 if ((fp->watchdog_timer == 0) || (--fp->watchdog_timer)) {
3464 BLOGE(sc, "TX watchdog timeout on fp[%02d], resetting!\n", fp->index);
3469 &sc->sp_err_timeout_task, hz/10);
3471 return (-1);
3479 if_t ifp = sc->ifp;
3485 bd_cons = fp->tx_bd_cons;
3486 hw_cons = le16toh(*fp->tx_cons_sb);
3487 sw_cons = fp->tx_pkt_cons;
3494 fp->index, hw_cons, sw_cons, pkt_cons);
3501 fp->tx_pkt_cons = sw_cons;
3502 fp->tx_bd_cons = bd_cons;
3506 fp->index, hw_cons, fp->tx_pkt_cons, fp->tx_pkt_prod);
3518 if (fp->tx_pkt_prod != fp->tx_pkt_cons) {
3520 fp->watchdog_timer = BXE_TX_TIMEOUT;
3524 fp->watchdog_timer = 0;
3536 for (i = 0; i < sc->num_queues; i++) {
3537 fp = &sc->fp[i];
3554 count--;
3580 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
3656 return (-1);
3684 ramrod_param.rx_mode_obj = &sc->rx_mode_obj;
3687 ramrod_param.pstate = &sc->sp_state;
3693 bxe_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
3705 "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id,
3721 rc = bxe_fill_accept_flags(sc, sc->rx_mode, &rx_accept_flags,
3731 return (bxe_set_q_rx_mode(sc, sc->fp[0].cl_id, rx_mode_flags,
3743 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3748 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3767 BLOGI(sc, "NO MCP - load counts[%d] %d, %d, %d\n",
3770 load_count[path][0]--;
3771 load_count[path][1 + port]--;
3772 BLOGI(sc, "NO MCP - new load counts[%d] %d, %d, %d\n",
3827 if (!sc->port.pmf) {
3835 * 2. Sync SP queue - this guarantees us that attention handling started
3839 * pending bit of transaction from STARTED-->TX_STOPPED, if we already
3841 * State will return to STARTED after completion of TX_STOPPED-->STARTED
3850 while (ecore_func_get_state(sc, &sc->func_obj) !=
3851 ECORE_F_STATE_STARTED && tout--) {
3855 if (ecore_func_get_state(sc, &sc->func_obj) != ECORE_F_STATE_STARTED) {
3863 "Forcing STARTED-->TX_STOPPED-->STARTED\n");
3865 func_params.f_obj = &sc->func_obj;
3868 /* STARTED-->TX_STOPPED */
3872 /* TX_STOPPED-->STARTED */
3884 struct bxe_fastpath *fp = &sc->fp[index];
3888 BLOGD(sc, DBG_LOAD, "stopping queue %d cid %d\n", index, fp->index);
3890 q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
3927 while (tout--) {
3929 if (!(atomic_load_acq_long(&sc->sp_state) & mask)) {
3938 tmp = atomic_load_acq_long(&sc->sp_state);
3957 func_params.f_obj = &sc->func_obj;
3986 func_params.f_obj = &sc->func_obj;
4026 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC, FALSE);
4032 rc = bxe_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC, TRUE);
4050 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
4051 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
4057 rparam.mcast_obj = &sc->mcast_obj;
4087 for (i = 0; i < sc->num_queues; i++) {
4094 * If SP settings didn't get completed so far - something
4156 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
4168 rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
4177 rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags,
4185 rparam.mcast_obj = &sc->mcast_obj;
4219 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
4221 for (i = 0; i < sc->num_queues; i++) {
4224 fp = &sc->fp[i];
4225 fp->watchdog_timer = 0;
4239 if (IS_PF(sc) && sc->recovery_state != BXE_RECOVERY_DONE &&
4240 (sc->state == BXE_STATE_CLOSED || sc->state == BXE_STATE_ERROR)) {
4251 sc->recovery_state = BXE_RECOVERY_DONE;
4252 sc->is_leader = 0;
4258 " state = 0x%x\n", sc->recovery_state, sc->state);
4259 return (-1);
4264 * did not completed successfully - all resourses are released.
4266 if ((sc->state == BXE_STATE_CLOSED) ||
4267 (sc->state == BXE_STATE_ERROR)) {
4271 sc->state = BXE_STATE_CLOSING_WAITING_HALT;
4277 sc->rx_mode = BXE_RX_MODE_NONE;
4280 if (IS_PF(sc) && !sc->grcdump_done) {
4282 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
4300 if (!sc->grcdump_done)
4336 sc->sp_state = 0;
4338 sc->port.pmf = 0;
4348 sc->state = BXE_STATE_CLOSED;
4351 * Check if there are pending parity attentions. If there are - set
4389 ifm = &sc->ifmedia;
4392 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
4396 switch (IFM_SUBTYPE(ifm->ifm_media)) {
4406 IFM_SUBTYPE(ifm->ifm_media));
4427 ifmr->ifm_status = IFM_AVALID;
4430 ifmr->ifm_active = IFM_ETHER;
4434 ifmr->ifm_active |= IFM_NONE;
4437 __func__, sc->link_vars.link_up);
4442 if (sc->link_vars.link_up) {
4443 ifmr->ifm_status |= IFM_ACTIVE;
4444 ifmr->ifm_active |= IFM_FDX;
4446 ifmr->ifm_active |= IFM_NONE;
4452 ifmr->ifm_active |= sc->media;
4461 long work = atomic_load_acq_long(&sc->chip_tq_flags);
4467 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
4500 int mtu_min = (ETH_MIN_PACKET_SIZE - ETH_HLEN);
4501 int mtu_max = (MJUM9BYTES - ETH_OVERHEAD - IP_HEADER_ALIGNMENT_PADDING);
4507 ifr->ifr_mtu);
4509 if (sc->mtu == ifr->ifr_mtu) {
4514 if ((ifr->ifr_mtu < mtu_min) || (ifr->ifr_mtu > mtu_max)) {
4515 BLOGE(sc, "Unsupported MTU size %d (range is %d-%d)\n",
4516 ifr->ifr_mtu, mtu_min, mtu_max);
4521 atomic_store_rel_int((volatile unsigned int *)&sc->mtu,
4522 (unsigned long)ifr->ifr_mtu);
4525 (unsigned long)ifr->ifr_mtu);
4526 XXX - Not sure why it needs to be atomic
4528 if_setmtu(ifp, ifr->ifr_mtu);
4542 } else if(sc->state != BXE_STATE_DISABLED) {
4572 mask = (ifr->ifr_reqcap ^ if_getcapenable(ifp));
4681 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command);
4691 if (reinit && (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
4693 "Re-initializing hardware from IOCTL change\n");
4712 if (!(sc->debug & DBG_MBUF)) {
4725 i, m, m->m_len, m->m_flags, M_FLAG_BITS, m->m_data);
4727 if (m->m_flags & M_PKTHDR) {
4729 "%02d: - m_pkthdr: tot_len=%d flags=0x%b csum_flags=%b\n",
4730 i, m->m_pkthdr.len, m->m_flags, M_FLAG_BITS,
4731 (int)m->m_pkthdr.csum_flags, CSUM_BITS);
4734 if (m->m_flags & M_EXT) {
4735 switch (m->m_ext.ext_type) {
4751 "%02d: - m_ext: %p ext_size=%d type=%s\n",
4752 i, m->m_ext.ext_buf, m->m_ext.ext_size, type);
4759 m = m->m_next;
4766 * Check that (13 total bds - 3 bds) = 10 bd window >= MSS.
4768 * The headers comes in a separate bd in FreeBSD so 13-3=10.
4783 num_wnds = nsegs - wnd_size;
4784 lso_mss = htole16(m->m_pkthdr.tso_segsz);
4802 /* subtract the first mbuf->m_len of the last wndw(-header) */
4803 wnd_sum -= htole16(segs[wnd_idx+1].ds_len);
4827 if (m->m_pkthdr.csum_flags == CSUM_IP) {
4836 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4838 proto = ntohs(eh->evl_proto);
4841 proto = ntohs(eh->evl_encap_proto);
4847 ip4 = (m->m_len < sizeof(struct ip)) ?
4848 (struct ip *)m->m_next->m_data :
4849 (struct ip *)(m->m_data + e_hlen);
4850 /* ip_hl is number of 32-bit words */
4851 ip_hlen = (ip4->ip_hl << 2);
4856 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4857 (struct ip6_hdr *)m->m_next->m_data :
4858 (struct ip6_hdr *)(m->m_data + e_hlen);
4876 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4879 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4881 /* th_off is number of 32-bit words */
4882 *parsing_data |= ((th->th_off <<
4885 return (l4_off + (th->th_off << 2)); /* entire header length */
4886 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4888 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
4917 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
4919 proto = ntohs(eh->evl_proto);
4922 proto = ntohs(eh->evl_encap_proto);
4928 ip4 = (m->m_len < sizeof(struct ip)) ?
4929 (struct ip *)m->m_next->m_data :
4930 (struct ip *)(m->m_data + e_hlen);
4931 /* ip_hl is number of 32-bit words */
4932 ip_hlen = (ip4->ip_hl << 1);
4937 ip6 = (m->m_len < sizeof(struct ip6_hdr)) ?
4938 (struct ip6_hdr *)m->m_next->m_data :
4939 (struct ip6_hdr *)(m->m_data + e_hlen);
4953 if (m->m_flags & M_VLANTAG) {
4954 pbd->global_data =
4957 pbd->global_data = htole16(hlen);
4960 pbd->ip_hlen_w = ip_hlen;
4962 hlen += pbd->ip_hlen_w;
4966 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4970 /* th_off is number of 32-bit words */
4971 hlen += (uint16_t)(th->th_off << 1);
4972 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4981 pbd->total_hlen_w = htole16(hlen);
4983 if (m->m_pkthdr.csum_flags & (CSUM_TCP |
4986 fp->eth_q_stats.tx_ofld_frames_csum_tcp++;
4987 pbd->tcp_pseudo_csum = ntohs(th->th_sum);
4988 } else if (m->m_pkthdr.csum_flags & (CSUM_UDP |
4990 fp->eth_q_stats.tx_ofld_frames_csum_udp++;
5009 tmp_uh = (uint32_t *)((uint8_t *)uh - 10);
5016 pbd->tcp_pseudo_csum = ntohs(in_addword(uh->uh_sum, ~tmp_csum));
5026 *parsing_data |= ((m->m_pkthdr.tso_segsz <<
5046 e_hlen = (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) ?
5051 ip = (struct ip *)(m->m_data + e_hlen);
5052 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2));
5054 pbd->lso_mss = htole16(m->m_pkthdr.tso_segsz);
5055 pbd->tcp_send_seq = ntohl(th->th_seq);
5056 pbd->tcp_flags = ((ntohl(((uint32_t *)th)[3]) >> 16) & 0xff);
5060 pbd->ip_id = ntohs(ip->ip_id);
5061 pbd->tcp_pseudo_csum =
5062 ntohs(in_pseudo(ip->ip_src.s_addr,
5063 ip->ip_dst.s_addr,
5067 pbd->tcp_pseudo_csum =
5068 ntohs(in_pseudo(&ip6->ip6_src,
5069 &ip6->ip6_dst,
5073 pbd->global_data |=
5112 sc = fp->sc;
5123 pkt_prod = fp->tx_pkt_prod;
5124 bd_prod = fp->tx_bd_prod;
5129 tx_buf = &fp->tx_mbuf_chain[TX_BD(pkt_prod)];
5130 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5131 tx_buf->m_map, m0,
5136 fp->eth_q_stats.tx_dma_mapping_failure++;
5142 fp->eth_q_stats.mbuf_defrag_attempts++;
5145 fp->eth_q_stats.mbuf_defrag_failures++;
5150 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5151 tx_buf->m_map, m0,
5154 fp->eth_q_stats.tx_dma_mapping_failure++;
5173 fp->eth_q_stats.tx_hw_queue_full++;
5174 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5180 if (__predict_false(fp->eth_q_stats.tx_hw_max_queue_depth <
5181 (TX_BD_USABLE - tx_bd_avail))) {
5182 fp->eth_q_stats.tx_hw_max_queue_depth = (TX_BD_USABLE - tx_bd_avail);
5191 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5195 fp->eth_q_stats.tx_window_violation_tso++;
5198 fp->eth_q_stats.tx_window_violation_std++;
5202 fp->eth_q_stats.mbuf_defrag_attempts++;
5203 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5207 fp->eth_q_stats.mbuf_defrag_failures++;
5213 error = bus_dmamap_load_mbuf_sg(fp->tx_mbuf_tag,
5214 tx_buf->m_map, m0,
5217 fp->eth_q_stats.tx_dma_mapping_failure++;
5222 if(m0->m_pkthdr.csum_flags & CSUM_TSO) {
5228 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5229 fp->eth_q_stats.nsegs_path1_errors++;
5234 bus_dmamap_unload(fp->tx_mbuf_tag, tx_buf->m_map);
5235 fp->eth_q_stats.nsegs_path2_errors++;
5250 fp->eth_q_stats.tx_soft_errors++;
5251 fp->eth_q_stats.mbuf_alloc_tx--;
5260 if (m0->m_flags & M_BCAST) {
5262 } else if (m0->m_flags & M_MCAST) {
5267 tx_buf->m = m0;
5268 tx_buf->first_bd = fp->tx_bd_prod;
5269 tx_buf->flags = 0;
5272 tx_start_bd = &fp->tx_chain[TX_BD(bd_prod)].start_bd;
5276 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
5278 tx_start_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
5279 tx_start_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
5280 tx_start_bd->nbytes = htole16(segs[0].ds_len);
5281 total_pkt_size += tx_start_bd->nbytes;
5282 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
5284 tx_start_bd->general_data = (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
5288 tx_start_bd->nbd = htole16(nbds);
5290 if (m0->m_flags & M_VLANTAG) {
5291 tx_start_bd->vlan_or_ethertype = htole16(m0->m_pkthdr.ether_vtag);
5292 tx_start_bd->bd_flags.as_bitfield |=
5299 tx_start_bd->vlan_or_ethertype = eh->evl_encap_proto;
5302 tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod);
5312 if (m0->m_pkthdr.csum_flags) {
5313 if (m0->m_pkthdr.csum_flags & CSUM_IP) {
5314 fp->eth_q_stats.tx_ofld_frames_csum_ip++;
5315 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
5318 if (m0->m_pkthdr.csum_flags & CSUM_TCP_IPV6) {
5319 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5321 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP_IPV6) {
5322 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_IPV6 |
5325 } else if ((m0->m_pkthdr.csum_flags & CSUM_TCP) ||
5326 (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
5327 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
5328 } else if (m0->m_pkthdr.csum_flags & CSUM_UDP) {
5329 tx_start_bd->bd_flags.as_bitfield |= (ETH_TX_BD_FLAGS_L4_CSUM |
5335 pbd_e2 = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e2;
5338 if (m0->m_pkthdr.csum_flags) {
5347 pbd_e1x = &fp->tx_chain[TX_BD(bd_prod)].parse_bd_e1x;
5350 if (m0->m_pkthdr.csum_flags) {
5356 pbd_e1x->global_data |= htole16(global_data);
5360 if (m0->m_pkthdr.csum_flags & CSUM_TSO) {
5361 fp->eth_q_stats.tx_ofld_frames_lso++;
5362 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
5364 if (__predict_false(tx_start_bd->nbytes > hlen)) {
5365 fp->eth_q_stats.tx_ofld_frames_lso_hdr_splits++;
5369 tx_start_bd->nbd = htole16(nbds);
5370 tx_start_bd->nbytes = htole16(hlen);
5375 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5376 tx_data_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr + hlen));
5377 tx_data_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr + hlen));
5378 tx_data_bd->nbytes = htole16(segs[0].ds_len - hlen);
5385 le16toh(tx_start_bd->nbytes),
5386 le32toh(tx_start_bd->addr_hi),
5387 le32toh(tx_start_bd->addr_lo),
5399 pbd_e2->parsing_data = htole32(pbd_e2_parsing_data);
5405 tx_data_bd = &fp->tx_chain[TX_BD(bd_prod)].reg_bd;
5406 tx_data_bd->addr_lo = htole32(U64_LO(segs[i].ds_addr));
5407 tx_data_bd->addr_hi = htole32(U64_HI(segs[i].ds_addr));
5408 tx_data_bd->nbytes = htole16(segs[i].ds_len);
5412 total_pkt_size += tx_data_bd->nbytes;
5418 tx_total_pkt_size_bd->total_pkt_bytes = total_pkt_size;
5421 if (__predict_false(sc->debug & DBG_TX)) {
5422 tmp_bd = tx_buf->first_bd;
5431 le16toh(tx_start_bd->nbd),
5432 le16toh(tx_start_bd->vlan_or_ethertype),
5433 tx_start_bd->bd_flags.as_bitfield,
5434 (tx_start_bd->general_data & ETH_TX_START_BD_HDR_NBDS));
5438 "-> Prse: %p bd=%d global=0x%x ip_hlen_w=%u "
5443 pbd_e1x->global_data,
5444 pbd_e1x->ip_hlen_w,
5445 pbd_e1x->ip_id,
5446 pbd_e1x->lso_mss,
5447 pbd_e1x->tcp_flags,
5448 pbd_e1x->tcp_pseudo_csum,
5449 pbd_e1x->tcp_send_seq,
5450 le16toh(pbd_e1x->total_hlen_w));
5453 "-> Parse: %p bd=%d dst=%02x:%02x:%02x "
5457 pbd_e2->data.mac_addr.dst_hi,
5458 pbd_e2->data.mac_addr.dst_mid,
5459 pbd_e2->data.mac_addr.dst_lo,
5460 pbd_e2->data.mac_addr.src_hi,
5461 pbd_e2->data.mac_addr.src_mid,
5462 pbd_e2->data.mac_addr.src_lo,
5463 pbd_e2->parsing_data);
5468 tx_data_bd = &fp->tx_chain[TX_BD(tmp_bd)].reg_bd;
5470 "-> Frag: %p bd=%d nbytes=%d hi=0x%x lo: 0x%x\n",
5473 le16toh(tx_data_bd->nbytes),
5474 le32toh(tx_data_bd->addr_hi),
5475 le32toh(tx_data_bd->addr_lo));
5498 fp->tx_db.data.prod += nbds;
5501 fp->tx_pkt_prod++;
5502 fp->tx_bd_prod = bd_prod;
5504 DOORBELL(sc, fp->index, fp->tx_db.raw);
5506 fp->eth_q_stats.tx_pkts++;
5509 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle,
5513 bus_space_barrier(sc->bar[BAR2].tag, sc->bar[BAR2].handle,
5543 fp->eth_q_stats.mbuf_alloc_tx++;
5551 fp->eth_q_stats.tx_encap_failures++;
5556 fp->eth_q_stats.mbuf_alloc_tx--;
5557 fp->eth_q_stats.tx_queue_xoff++;
5585 fp->watchdog_timer = BXE_TX_TIMEOUT;
5589 /* Legacy (non-RSS) dispatch routine */
5603 if (!sc->link_vars.link_up) {
5608 fp = &sc->fp[0];
5611 fp->eth_q_stats.tx_queue_full_return++;
5626 struct buf_ring *tx_br = fp->tx_br;
5635 if (sc->state != BXE_STATE_OPEN) {
5636 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5648 fp->eth_q_stats.tx_soft_errors++;
5653 if (!sc->link_vars.link_up || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) {
5654 fp->eth_q_stats.tx_request_link_down_failures++;
5660 if (depth > fp->eth_q_stats.tx_max_drbr_queue_depth) {
5661 fp->eth_q_stats.tx_max_drbr_queue_depth = depth;
5673 fp->eth_q_stats.bd_avail_too_less_failures++;
5682 fp->eth_q_stats.mbuf_alloc_tx++;
5691 fp->eth_q_stats.tx_encap_failures++;
5696 fp->eth_q_stats.mbuf_alloc_tx--;
5697 fp->eth_q_stats.tx_frames_deferred++;
5717 fp->watchdog_timer = BXE_TX_TIMEOUT;
5723 fp->eth_q_stats.tx_mq_not_empty++;
5724 taskqueue_enqueue_timeout(fp->tq, &fp->tx_timeout_task, 1);
5735 struct bxe_softc *sc = fp->sc;
5736 if_t ifp = sc->ifp;
5757 fp_index = (m->m_pkthdr.flowid % sc->num_queues);
5759 fp = &sc->fp[fp_index];
5761 if (sc->state != BXE_STATE_OPEN) {
5762 fp->eth_q_stats.bxe_tx_mq_sc_state_failures++;
5770 rc = drbr_enqueue(ifp, fp->tx_br, m);
5771 taskqueue_enqueue(fp->tq, &fp->tx_task);
5785 for (i = 0; i < sc->num_queues; i++) {
5786 fp = &sc->fp[i];
5788 if (fp->state != BXE_FP_STATE_IRQ) {
5790 fp->index, fp->state);
5794 if (fp->tx_br != NULL) {
5795 BLOGD(sc, DBG_LOAD, "Clearing fp[%02d] buf_ring\n", fp->index);
5797 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL) {
5820 struct ecore_ilt *ilt = sc->ilt;
5823 ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
5824 BLOGD(sc, DBG_LOAD, "ilt starts at line %d\n", ilt->start_line);
5827 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5828 ilt_client->client_num = ILT_CLIENT_CDU;
5829 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5830 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5831 ilt_client->start = line;
5838 ilt_client->end = (line - 1);
5843 ilt_client->start, ilt_client->end,
5844 ilt_client->page_size,
5845 ilt_client->flags,
5846 ilog2(ilt_client->page_size >> 12));
5849 if (QM_INIT(sc->qm_cid_count)) {
5850 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5851 ilt_client->client_num = ILT_CLIENT_QM;
5852 ilt_client->page_size = QM_ILT_PAGE_SZ;
5853 ilt_client->flags = 0;
5854 ilt_client->start = line;
5857 line += DIV_ROUND_UP(sc->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5860 ilt_client->end = (line - 1);
5865 ilt_client->start, ilt_client->end,
5866 ilt_client->page_size, ilt_client->flags,
5867 ilog2(ilt_client->page_size >> 12));
5872 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5873 ilt_client->client_num = ILT_CLIENT_SRC;
5874 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5875 ilt_client->flags = 0;
5876 ilt_client->start = line;
5878 ilt_client->end = (line - 1);
5883 ilt_client->start, ilt_client->end,
5884 ilt_client->page_size, ilt_client->flags,
5885 ilog2(ilt_client->page_size >> 12));
5888 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5889 ilt_client->client_num = ILT_CLIENT_TM;
5890 ilt_client->page_size = TM_ILT_PAGE_SZ;
5891 ilt_client->flags = 0;
5892 ilt_client->start = line;
5894 ilt_client->end = (line - 1);
5899 ilt_client->start, ilt_client->end,
5900 ilt_client->page_size, ilt_client->flags,
5901 ilog2(ilt_client->page_size >> 12));
5913 rx_buf_size = (IP_HEADER_ALIGNMENT_PADDING + ETH_OVERHEAD + sc->mtu);
5915 for (i = 0; i < sc->num_queues; i++) {
5917 sc->fp[i].rx_buf_size = rx_buf_size;
5918 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5920 sc->fp[i].rx_buf_size = rx_buf_size;
5921 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5923 sc->fp[i].rx_buf_size = MCLBYTES;
5924 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5926 sc->fp[i].rx_buf_size = MJUMPAGESIZE;
5927 sc->fp[i].mbuf_alloc_size = MJUMPAGESIZE;
5929 sc->fp[i].rx_buf_size = MCLBYTES;
5930 sc->fp[i].mbuf_alloc_size = MCLBYTES;
5940 if ((sc->ilt =
5955 if ((sc->ilt->lines =
5968 if (sc->ilt != NULL) {
5969 free(sc->ilt, M_BXE_ILT);
5970 sc->ilt = NULL;
5977 if (sc->ilt->lines != NULL) {
5978 free(sc->ilt->lines, M_BXE_ILT);
5979 sc->ilt->lines = NULL;
5989 bxe_dma_free(sc, &sc->context[i].vcxt_dma);
5990 sc->context[i].vcxt = NULL;
5991 sc->context[i].size = 0;
6012 * 1. There can be multiple entities allocating memory for context -
6015 * 2. Since CDU page-size is not a single 4KB page (which is the case
6017 * allocation of sub-page-size in the last entry.
6024 sc->context[i].size = min(CDU_ILT_PAGE_SZ,
6025 (context_size - allocated));
6027 if (bxe_dma_alloc(sc, sc->context[i].size,
6028 &sc->context[i].vcxt_dma,
6031 return (-1);
6034 sc->context[i].vcxt =
6035 (union cdu_context *)sc->context[i].vcxt_dma.vaddr;
6037 allocated += sc->context[i].size;
6043 sc->ilt, sc->ilt->start_line, sc->ilt->lines);
6049 sc->ilt->clients[i].page_size,
6050 sc->ilt->clients[i].start,
6051 sc->ilt->clients[i].end,
6052 sc->ilt->clients[i].client_num,
6053 sc->ilt->clients[i].flags);
6059 return (-1);
6070 if (fp->rx_mbuf_tag == NULL) {
6076 if (fp->rx_mbuf_chain[i].m_map != NULL) {
6077 bus_dmamap_sync(fp->rx_mbuf_tag,
6078 fp->rx_mbuf_chain[i].m_map,
6080 bus_dmamap_unload(fp->rx_mbuf_tag,
6081 fp->rx_mbuf_chain[i].m_map);
6084 if (fp->rx_mbuf_chain[i].m != NULL) {
6085 m_freem(fp->rx_mbuf_chain[i].m);
6086 fp->rx_mbuf_chain[i].m = NULL;
6087 fp->eth_q_stats.mbuf_alloc_rx--;
6098 sc = fp->sc;
6100 if (fp->rx_mbuf_tag == NULL) {
6108 if (fp->rx_tpa_info[i].bd.m_map != NULL) {
6109 bus_dmamap_sync(fp->rx_mbuf_tag,
6110 fp->rx_tpa_info[i].bd.m_map,
6112 bus_dmamap_unload(fp->rx_mbuf_tag,
6113 fp->rx_tpa_info[i].bd.m_map);
6116 if (fp->rx_tpa_info[i].bd.m != NULL) {
6117 m_freem(fp->rx_tpa_info[i].bd.m);
6118 fp->rx_tpa_info[i].bd.m = NULL;
6119 fp->eth_q_stats.mbuf_alloc_tpa--;
6129 if (fp->rx_sge_mbuf_tag == NULL) {
6135 if (fp->rx_sge_mbuf_chain[i].m_map != NULL) {
6136 bus_dmamap_sync(fp->rx_sge_mbuf_tag,
6137 fp->rx_sge_mbuf_chain[i].m_map,
6139 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
6140 fp->rx_sge_mbuf_chain[i].m_map);
6143 if (fp->rx_sge_mbuf_chain[i].m != NULL) {
6144 m_freem(fp->rx_sge_mbuf_chain[i].m);
6145 fp->rx_sge_mbuf_chain[i].m = NULL;
6146 fp->eth_q_stats.mbuf_alloc_sge--;
6157 for (i = 0; i < sc->num_queues; i++) {
6158 fp = &sc->fp[i];
6160 if (fp->tx_br != NULL) {
6162 if (mtx_initialized(&fp->tx_mtx)) {
6166 while ((m = buf_ring_dequeue_sc(fp->tx_br)) != NULL)
6177 if (fp->eth_q_stats.mbuf_alloc_rx != 0) {
6179 fp->eth_q_stats.mbuf_alloc_rx);
6182 if (fp->eth_q_stats.mbuf_alloc_sge != 0) {
6184 fp->eth_q_stats.mbuf_alloc_sge);
6187 if (fp->eth_q_stats.mbuf_alloc_tpa != 0) {
6189 fp->eth_q_stats.mbuf_alloc_tpa);
6192 if (fp->eth_q_stats.mbuf_alloc_tx != 0) {
6194 fp->eth_q_stats.mbuf_alloc_tx);
6216 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6218 fp->eth_q_stats.mbuf_rx_bd_alloc_failed++;
6222 fp->eth_q_stats.mbuf_alloc_rx++;
6225 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6227 /* map the mbuf into non-paged pool */
6228 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6229 fp->rx_mbuf_spare_map,
6232 fp->eth_q_stats.mbuf_rx_bd_mapping_failed++;
6234 fp->eth_q_stats.mbuf_alloc_rx--;
6238 /* all mbufs must map to a single segment */
6244 rx_buf = &fp->rx_mbuf_chain[prev_index];
6246 if (rx_buf->m_map != NULL) {
6247 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6249 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6258 fp->rx_mbuf_chain[prev_index].m = NULL;
6261 rx_buf = &fp->rx_mbuf_chain[index];
6263 if (rx_buf->m_map != NULL) {
6264 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6266 bus_dmamap_unload(fp->rx_mbuf_tag, rx_buf->m_map);
6271 fp->rx_mbuf_chain[prev_index].m_map : rx_buf->m_map;
6272 rx_buf->m_map = fp->rx_mbuf_spare_map;
6273 fp->rx_mbuf_spare_map = map;
6274 bus_dmamap_sync(fp->rx_mbuf_tag, rx_buf->m_map,
6276 rx_buf->m = m;
6278 rx_bd = &fp->rx_chain[index];
6279 rx_bd->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6280 rx_bd->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6289 struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
6297 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, fp->mbuf_alloc_size);
6299 fp->eth_q_stats.mbuf_rx_tpa_alloc_failed++;
6303 fp->eth_q_stats.mbuf_alloc_tpa++;
6306 m->m_pkthdr.len = m->m_len = fp->rx_buf_size;
6308 /* map the mbuf into non-paged pool */
6309 rc = bus_dmamap_load_mbuf_sg(fp->rx_mbuf_tag,
6310 fp->rx_tpa_info_mbuf_spare_map,
6313 fp->eth_q_stats.mbuf_rx_tpa_mapping_failed++;
6315 fp->eth_q_stats.mbuf_alloc_tpa--;
6319 /* all mbufs must map to a single segment */
6323 if (tpa_info->bd.m_map != NULL) {
6324 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6326 bus_dmamap_unload(fp->rx_mbuf_tag, tpa_info->bd.m_map);
6330 map = tpa_info->bd.m_map;
6331 tpa_info->bd.m_map = fp->rx_tpa_info_mbuf_spare_map;
6332 fp->rx_tpa_info_mbuf_spare_map = map;
6333 bus_dmamap_sync(fp->rx_mbuf_tag, tpa_info->bd.m_map,
6335 tpa_info->bd.m = m;
6336 tpa_info->seg = segs[0];
6361 fp->eth_q_stats.mbuf_rx_sge_alloc_failed++;
6365 fp->eth_q_stats.mbuf_alloc_sge++;
6368 m->m_pkthdr.len = m->m_len = SGE_PAGE_SIZE;
6370 /* map the SGE mbuf into non-paged pool */
6371 rc = bus_dmamap_load_mbuf_sg(fp->rx_sge_mbuf_tag,
6372 fp->rx_sge_mbuf_spare_map,
6375 fp->eth_q_stats.mbuf_rx_sge_mapping_failed++;
6377 fp->eth_q_stats.mbuf_alloc_sge--;
6381 /* all mbufs must map to a single segment */
6384 sge_buf = &fp->rx_sge_mbuf_chain[index];
6387 if (sge_buf->m_map != NULL) {
6388 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6390 bus_dmamap_unload(fp->rx_sge_mbuf_tag, sge_buf->m_map);
6394 map = sge_buf->m_map;
6395 sge_buf->m_map = fp->rx_sge_mbuf_spare_map;
6396 fp->rx_sge_mbuf_spare_map = map;
6397 bus_dmamap_sync(fp->rx_sge_mbuf_tag, sge_buf->m_map,
6399 sge_buf->m = m;
6401 sge = &fp->rx_sge_chain[index];
6402 sge->addr_hi = htole32(U64_HI(segs[0].ds_addr));
6403 sge->addr_lo = htole32(U64_LO(segs[0].ds_addr));
6416 for (i = 0; i < sc->num_queues; i++) {
6417 fp = &sc->fp[i];
6420 fp->rx_bd_cons = 0;
6421 fp->rx_cq_cons = 0;
6424 for (j = 0; j < sc->max_rx_bufs; j++) {
6436 fp->rx_bd_prod = ring_prod;
6437 fp->rx_cq_prod = cqe_ring_prod;
6438 fp->eth_q_stats.rx_calls = fp->eth_q_stats.rx_pkts = 0;
6442 fp->tpa_enable = TRUE;
6450 fp->tpa_enable = FALSE;
6454 fp->rx_tpa_info[j].state = BXE_TPA_STATE_STOP;
6457 if (fp->tpa_enable) {
6465 fp->tpa_enable = FALSE;
6473 fp->rx_sge_prod = ring_prod;
6492 bxe_dma_free(sc, &sc->fw_stats_dma);
6494 sc->fw_stats_num = 0;
6496 sc->fw_stats_req_size = 0;
6497 sc->fw_stats_req = NULL;
6498 sc->fw_stats_req_mapping = 0;
6500 sc->fw_stats_data_size = 0;
6501 sc->fw_stats_data = NULL;
6502 sc->fw_stats_data_mapping = 0;
6518 sc->fw_stats_num = (2 + num_queue_stats);
6527 ((sc->fw_stats_num / STATS_QUERY_CMD_COUNT) +
6528 ((sc->fw_stats_num % STATS_QUERY_CMD_COUNT) ? 1 : 0));
6531 sc->fw_stats_num, num_groups);
6533 sc->fw_stats_req_size =
6539 * stats_counter holds per-STORM counters that are incremented when
6545 sc->fw_stats_data_size =
6552 if (bxe_dma_alloc(sc, (sc->fw_stats_req_size + sc->fw_stats_data_size),
6553 &sc->fw_stats_dma, "fw stats") != 0) {
6555 return (-1);
6560 sc->fw_stats_req =
6561 (struct bxe_fw_stats_req *)sc->fw_stats_dma.vaddr;
6562 sc->fw_stats_req_mapping = sc->fw_stats_dma.paddr;
6564 sc->fw_stats_data =
6565 (struct bxe_fw_stats_data *)((uint8_t *)sc->fw_stats_dma.vaddr +
6566 sc->fw_stats_req_size);
6567 sc->fw_stats_data_mapping = (sc->fw_stats_dma.paddr +
6568 sc->fw_stats_req_size);
6571 (uintmax_t)sc->fw_stats_req_mapping);
6574 (uintmax_t)sc->fw_stats_data_mapping);
6581 * 0-7 - Engine0 load counter.
6582 * 8-15 - Engine1 load counter.
6583 * 16 - Engine0 RESET_IN_PROGRESS bit.
6584 * 17 - Engine1 RESET_IN_PROGRESS bit.
6585 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active
6587 * 19 - Engine1 ONE_IS_LOADED.
6588 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
6773 sc->fw_seq =
6777 BLOGD(sc, DBG_LOAD, "initial fw_seq 0x%04x\n", sc->fw_seq);
6780 sc->fw_drv_pulse_wr_seq =
6785 sc->fw_drv_pulse_wr_seq);
6794 return (-1);
6800 return (-1);
6835 return (-1);
6853 * Barrier here for ordering between the writing to sc->port.pmf here
6856 sc->port.pmf = 1;
6859 sc->port.pmf = 0;
6862 BLOGD(sc, DBG_LOAD, "pmf %d\n", sc->port.pmf);
6901 sc->devinfo.mf_info.mf_config[vn] =
6905 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
6908 sc->flags |= BXE_MF_FUNC_DIS;
6911 sc->flags &= ~BXE_MF_FUNC_DIS;
6932 return (-1);
6973 bxe_set_bit(BXE_SP_RTNL_FAN_FAILURE, &sc->sp_rtnl_state);
6974 schedule_delayed_work(&sc->sp_rtnl_task, 0);
6990 BLOGD(sc, DBG_LOAD, "link_vars phy_flags : %x\n", sc->link_vars.phy_flags);
6991 elink_link_update(&sc->link_params, &sc->link_vars);
6993 if (sc->link_vars.link_up) {
6996 if (!CHIP_IS_E1(sc) && sc->dropless_fc) {
6999 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
7009 if (sc->link_vars.mac_type != ELINK_MAC_TYPE_EMAC) {
7012 memset(&(pstats->mac_stx[0]), 0, sizeof(struct mac_stx));
7015 if (sc->state == BXE_STATE_OPEN) {
7019 fp = &sc->fp[i];
7020 taskqueue_enqueue(fp->tq, &fp->tx_task);
7026 if (sc->link_vars.link_up && sc->link_vars.line_speed) {
7031 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7060 if (sc->attn_state & asserted) {
7079 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
7080 sc->attn_state |= asserted;
7081 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
7145 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7153 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
7160 * NIG mask. This loop should exit after 2-3 iterations max.
7162 if (sc->devinfo.int_block != INT_BLOCK_HC) {
7529 if(sc->state != BXE_STATE_OPEN)
7612 &sc->sp_err_timeout_task, hz/10);
7646 if (sc->link_vars.link_up) {
7651 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
7664 BLOGD(sc, DBG_INTR, "EEE - LLDP event\n");
7674 &sc->sp->drv_info_to_mcp.ether_stat;
7676 strlcpy(ether_stat->version, BXE_DRIVER_VERSION,
7680 sc->sp_objs[0].mac_obj.get_n_elements(sc, &sc->sp_objs[0].mac_obj,
7682 ether_stat->mac_local + MAC_PAD,
7685 ether_stat->mtu_size = sc->mtu;
7687 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
7688 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
7689 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
7692 // XXX ether_stat->feature_flags |= ???;
7694 ether_stat->promiscuous_mode = 0; // (flags & PROMISC) ? 1 : 0;
7696 ether_stat->txq_size = sc->tx_ring_size;
7697 ether_stat->rxq_size = sc->rx_ring_size;
7706 /* if drv_info version supported by MFW doesn't match - send NACK */
7715 memset(&sc->sp->drv_info_to_mcp, 0, sizeof(union drv_info_to_mcp));
7724 /* if op code isn't supported - send NACK */
7750 * where the sc->flags can change so it is done without any
7753 if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
7755 sc->flags |= BXE_MF_FUNC_DIS;
7759 sc->flags &= ~BXE_MF_FUNC_DIS;
7783 sc->port.pmf = 1;
7784 BLOGD(sc, DBG_INTR, "pmf %d\n", sc->port.pmf);
7788 * sc->port.pmf here and reading it from the bxe_periodic_task().
7799 if (sc->devinfo.int_block == INT_BLOCK_HC) {
7924 sc->devinfo.mf_info.mf_config[SC_VN(sc)] =
7937 if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF))
7943 if (sc->link_vars.periodic_flags &
7947 sc->link_vars.periodic_flags &=
7972 &sc->sp_err_timeout_task, hz/10);
7980 &sc->sp_err_timeout_task, hz/10);
7993 BLOGE(sc, "GRC time-out 0x%08x\n", val);
8025 BLOGE(sc, "PXP hw attention-0 0x%08x\n", val);
8034 BLOGE(sc, "PXP hw attention-1 0x%08x\n", val);
8053 * STS0 and STS1 - clear it
8101 &sc->sp_err_timeout_task, hz/10);
8141 &sc->sp_err_timeout_task, hz/10);
8165 elink_hw_reset_phy(&sc->link_params);
8169 if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
8171 elink_handle_module_detect_int(&sc->link_params);
8183 &sc->sp_err_timeout_task, hz/10);
8219 &sc->sp_err_timeout_task, hz/10);
8239 group_mask = &sc->attn_group[index];
8243 group_mask->sig[0], group_mask->sig[1],
8244 group_mask->sig[2], group_mask->sig[3],
8245 group_mask->sig[4]);
8247 bxe_attn_int_deasserted4(sc, attn.sig[4] & group_mask->sig[4]);
8248 bxe_attn_int_deasserted3(sc, attn.sig[3] & group_mask->sig[3]);
8249 bxe_attn_int_deasserted1(sc, attn.sig[1] & group_mask->sig[1]);
8250 bxe_attn_int_deasserted2(sc, attn.sig[2] & group_mask->sig[2]);
8251 bxe_attn_int_deasserted0(sc, attn.sig[0] & group_mask->sig[0]);
8257 if (sc->devinfo.int_block == INT_BLOCK_HC) {
8267 (sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
8270 if (~sc->attn_state & deasserted) {
8289 BLOGD(sc, DBG_INTR, "attn_state 0x%08x\n", sc->attn_state);
8290 sc->attn_state &= ~deasserted;
8291 BLOGD(sc, DBG_INTR, "new state 0x%08x\n", sc->attn_state);
8298 uint32_t attn_bits = le32toh(sc->def_sb->atten_status_block.attn_bits);
8299 uint32_t attn_ack = le32toh(sc->def_sb->atten_status_block.attn_bits_ack);
8300 uint32_t attn_state = sc->attn_state;
8327 struct host_sp_status_block *def_sb = sc->def_sb;
8332 if (sc->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
8333 sc->def_att_idx = def_sb->atten_status_block.attn_bits_index;
8337 if (sc->def_idx != def_sb->sp_sb.running_index) {
8338 sc->def_idx = def_sb->sp_sb.running_index;
8352 return (&sc->sp_objs[CID_TO_FP(cid, sc)].q_obj);
8363 rparam.mcast_obj = &sc->mcast_obj;
8368 sc->mcast_obj.raw.clear_pending(&sc->mcast_obj.raw);
8370 /* if there are pending mcast commands - send them */
8371 if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
8388 uint32_t cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8394 switch (le32toh(elem->message.data.eth_event.echo) >> BXE_SWCID_SHIFT) {
8397 vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
8411 elem->message.data.eth_event.echo);
8415 rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
8428 bxe_clear_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
8432 &sc->sp_state)) {
8455 struct ecore_func_sp_obj *f_obj = &sc->func_obj;
8456 struct ecore_raw_obj *rss_raw = &sc->rss_conf_obj.raw;
8458 hw_cons = le16toh(*sc->eq_cons_sb);
8461 * The hw_cons range is 1-255, 257 - the sw_cons range is 0-254, 256.
8462 * when we get to the next-page we need to adjust so the loop
8474 sw_cons = sc->eq_cons;
8475 sw_prod = sc->eq_prod;
8478 hw_cons, sw_cons, atomic_load_acq_long(&sc->eq_spq_left));
8484 elem = &sc->eq[EQ_DESC(sw_cons)];
8487 cid = SW_CID(elem->message.data.cfc_del_event.cid);
8488 opcode = elem->message.opcode;
8495 sc->stats_comp++);
8504 if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
8511 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
8519 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_START)) {
8526 echo = elem->message.data.function_update_event.echo;
8529 if (f_obj->complete_cmd(sc, f_obj,
8542 if (q_obj->complete_cmd(sc, q_obj,
8550 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
8557 if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
8563 switch (opcode | sc->state) {
8566 cid = elem->message.data.eth_event.echo & BXE_SWCID_MASK;
8568 rss_raw->clear_pending(rss_raw);
8597 BLOGE(sc, "Unknown EQ event %d, sc->state 0x%x\n",
8598 elem->message.opcode, sc->state);
8606 atomic_add_acq_long(&sc->eq_spq_left, spqe_cnt);
8608 sc->eq_cons = sw_cons;
8609 sc->eq_prod = sw_prod;
8615 bxe_update_eq_prod(sc, sc->eq_prod);
8625 BLOGD(sc, DBG_SP, "---> SP TASK <---\n");
8634 BLOGD(sc, DBG_SP, "---> ATTN INTR <---\n");
8642 BLOGD(sc, DBG_SP, "---> EQ INTR <---\n");
8644 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
8645 le16toh(sc->def_idx), IGU_INT_NOP, 1);
8655 bxe_ack_sb(sc, sc->igu_dsb_id, ATTENTION_ID,
8656 le16toh(sc->def_att_idx), IGU_INT_ENABLE, 1);
8673 struct bxe_softc *sc = fp->sc;
8677 BLOGD(sc, DBG_INTR, "---> FP TASK QUEUE (%d) <---\n", fp->index);
8686 if (!(if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING)) {
8695 /* fp->txdata[cos] */
8708 taskqueue_enqueue(fp->tq, &fp->tq_task);
8712 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8713 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8719 struct bxe_softc *sc = fp->sc;
8723 BLOGD(sc, DBG_INTR, "---> FP TASK ISR (%d) <---\n", fp->index);
8729 /* fp->txdata[cos] */
8742 taskqueue_enqueue(fp->tq, &fp->tq_task);
8753 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
8754 le16toh(fp->fp_hc_idx), IGU_INT_ENABLE, 1);
8772 BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n");
8776 * the bits returned from ack_int() are 0-15
8794 fp = &sc->fp[i];
8795 mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
8798 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8806 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8809 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8825 BLOGD(sc, (DBG_INTR | DBG_SP), "---> SP INTR <---\n");
8828 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8831 taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task);
8839 struct bxe_softc *sc = fp->sc;
8841 BLOGD(sc, DBG_INTR, "---> FP INTR %d <---\n", fp->index);
8844 "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n",
8845 curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id);
8848 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
8859 switch (sc->interrupt_mode) {
8862 if (sc->intr[0].resource != NULL) {
8863 bus_release_resource(sc->dev,
8865 sc->intr[0].rid,
8866 sc->intr[0].resource);
8870 for (i = 0; i < sc->intr_count; i++) {
8872 if (sc->intr[i].resource && sc->intr[i].rid) {
8873 bus_release_resource(sc->dev,
8875 sc->intr[i].rid,
8876 sc->intr[i].resource);
8879 pci_release_msi(sc->dev);
8882 for (i = 0; i < sc->intr_count; i++) {
8883 BLOGD(sc, DBG_LOAD, "Releasing MSI-X vector %d\n", i);
8884 if (sc->intr[i].resource && sc->intr[i].rid) {
8885 bus_release_resource(sc->dev,
8887 sc->intr[i].rid,
8888 sc->intr[i].resource);
8891 pci_release_msi(sc->dev);
8926 /* get the number of available MSI/MSI-X interrupts from the OS */
8927 if (sc->interrupt_mode > 0) {
8928 if (sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) {
8929 msix_count = pci_msix_count(sc->dev);
8932 if (sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) {
8933 msi_count = pci_msi_count(sc->dev);
8936 BLOGD(sc, DBG_LOAD, "%d MSI and %d MSI-X vectors available\n",
8940 do { /* try allocating MSI-X interrupt resources (at least 2) */
8941 if (sc->interrupt_mode != INTR_MODE_MSIX) {
8945 if (((sc->devinfo.pcie_cap_flags & BXE_MSIX_CAPABLE_FLAG) == 0) ||
8947 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8951 /* ask for the necessary number of MSI-X vectors */
8952 num_requested = min((sc->num_queues + 1), msix_count);
8954 BLOGD(sc, DBG_LOAD, "Requesting %d MSI-X vectors\n", num_requested);
8957 if ((rc = pci_alloc_msix(sc->dev, &num_allocated)) != 0) {
8958 BLOGE(sc, "MSI-X alloc failed! (%d)\n", rc);
8959 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8964 BLOGE(sc, "MSI-X allocation less than 2!\n");
8965 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
8966 pci_release_msi(sc->dev);
8970 BLOGI(sc, "MSI-X vectors Requested %d and Allocated %d\n",
8974 sc->intr_count = num_allocated;
8975 sc->num_queues = num_allocated - 1;
8979 /* allocate the MSI-X vectors */
8981 sc->intr[i].rid = (rid + i);
8983 if ((sc->intr[i].resource =
8984 bus_alloc_resource_any(sc->dev,
8986 &sc->intr[i].rid,
8988 BLOGE(sc, "Failed to map MSI-X[%d] (rid=%d)!\n",
8991 for (j = (i - 1); j >= 0; j--) {
8992 bus_release_resource(sc->dev,
8994 sc->intr[j].rid,
8995 sc->intr[j].resource);
8998 sc->intr_count = 0;
8999 sc->num_queues = 0;
9000 sc->interrupt_mode = INTR_MODE_MSI; /* try MSI next */
9001 pci_release_msi(sc->dev);
9005 BLOGD(sc, DBG_LOAD, "Mapped MSI-X[%d] (rid=%d)\n", i, (rid + i));
9010 if (sc->interrupt_mode != INTR_MODE_MSI) {
9014 if (((sc->devinfo.pcie_cap_flags & BXE_MSI_CAPABLE_FLAG) == 0) ||
9016 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9026 if ((rc = pci_alloc_msi(sc->dev, &num_allocated)) != 0) {
9028 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9034 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9035 pci_release_msi(sc->dev);
9043 sc->intr_count = num_allocated;
9044 sc->num_queues = num_allocated;
9048 sc->intr[0].rid = rid;
9050 if ((sc->intr[0].resource =
9051 bus_alloc_resource_any(sc->dev,
9053 &sc->intr[0].rid,
9056 sc->intr_count = 0;
9057 sc->num_queues = 0;
9058 sc->interrupt_mode = INTR_MODE_INTX; /* try INTx next */
9059 pci_release_msi(sc->dev);
9067 if (sc->interrupt_mode != INTR_MODE_INTX) {
9074 sc->intr_count = 1;
9075 sc->num_queues = 1;
9079 sc->intr[0].rid = rid;
9081 if ((sc->intr[0].resource =
9082 bus_alloc_resource_any(sc->dev,
9084 &sc->intr[0].rid,
9087 sc->intr_count = 0;
9088 sc->num_queues = 0;
9089 sc->interrupt_mode = -1; /* Failed! */
9096 if (sc->interrupt_mode == -1) {
9102 sc->interrupt_mode, sc->num_queues);
9116 for (i = 0; i < sc->intr_count; i++) {
9117 if (sc->intr[i].resource && sc->intr[i].tag) {
9119 bus_teardown_intr(sc->dev, sc->intr[i].resource, sc->intr[i].tag);
9123 for (i = 0; i < sc->num_queues; i++) {
9124 fp = &sc->fp[i];
9125 if (fp->tq) {
9126 taskqueue_drain(fp->tq, &fp->tq_task);
9127 taskqueue_drain(fp->tq, &fp->tx_task);
9128 while (taskqueue_cancel_timeout(fp->tq, &fp->tx_timeout_task,
9130 taskqueue_drain_timeout(fp->tq, &fp->tx_timeout_task);
9133 for (i = 0; i < sc->num_queues; i++) {
9134 fp = &sc->fp[i];
9135 if (fp->tq != NULL) {
9136 taskqueue_free(fp->tq);
9137 fp->tq = NULL;
9142 if (sc->sp_tq) {
9143 taskqueue_drain(sc->sp_tq, &sc->sp_tq_task);
9144 taskqueue_free(sc->sp_tq);
9145 sc->sp_tq = NULL;
9152 * When using multiple MSI/MSI-X vectors the first vector
9155 * single MSI/MSI-X vector is used (SINGLE_ISR) then the
9165 snprintf(sc->sp_tq_name, sizeof(sc->sp_tq_name),
9166 "bxe%d_sp_tq", sc->unit);
9167 TASK_INIT(&sc->sp_tq_task, 0, bxe_handle_sp_tq, sc);
9168 sc->sp_tq = taskqueue_create(sc->sp_tq_name, M_NOWAIT,
9170 &sc->sp_tq);
9171 taskqueue_start_threads(&sc->sp_tq, 1, PWAIT, /* lower priority */
9172 "%s", sc->sp_tq_name);
9175 for (i = 0; i < sc->num_queues; i++) {
9176 fp = &sc->fp[i];
9177 snprintf(fp->tq_name, sizeof(fp->tq_name),
9178 "bxe%d_fp%d_tq", sc->unit, i);
9179 NET_TASK_INIT(&fp->tq_task, 0, bxe_handle_fp_tq, fp);
9180 TASK_INIT(&fp->tx_task, 0, bxe_tx_mq_start_deferred, fp);
9181 fp->tq = taskqueue_create(fp->tq_name, M_NOWAIT,
9183 &fp->tq);
9184 TIMEOUT_TASK_INIT(fp->tq, &fp->tx_timeout_task, 0,
9186 taskqueue_start_threads(&fp->tq, 1, PI_NET, /* higher priority */
9187 "%s", fp->tq_name);
9191 if (sc->interrupt_mode == INTR_MODE_MSIX) {
9192 BLOGD(sc, DBG_LOAD, "Enabling slowpath MSI-X[0] vector\n");
9198 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9201 &sc->intr[0].tag)) != 0) {
9202 BLOGE(sc, "Failed to allocate MSI-X[0] vector (%d)\n", rc);
9206 bus_describe_intr(sc->dev, sc->intr[0].resource,
9207 sc->intr[0].tag, "sp");
9209 /* bus_bind_intr(sc->dev, sc->intr[0].resource, 0); */
9212 for (i = 0; i < sc->num_queues; i++) {
9213 fp = &sc->fp[i];
9214 BLOGD(sc, DBG_LOAD, "Enabling MSI-X[%d] vector\n", (i + 1));
9221 if ((rc = bus_setup_intr(sc->dev, sc->intr[i + 1].resource,
9224 &sc->intr[i + 1].tag)) != 0) {
9225 BLOGE(sc, "Failed to allocate MSI-X[%d] vector (%d)\n",
9230 bus_describe_intr(sc->dev, sc->intr[i + 1].resource,
9231 sc->intr[i + 1].tag, "fp%02d", i);
9234 if (sc->num_queues > 1) {
9235 bus_bind_intr(sc->dev, sc->intr[i + 1].resource, i);
9238 fp->state = BXE_FP_STATE_IRQ;
9240 } else if (sc->interrupt_mode == INTR_MODE_MSI) {
9248 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9251 &sc->intr[0].tag)) != 0) {
9256 } else { /* (sc->interrupt_mode == INTR_MODE_INTX) */
9264 if ((rc = bus_setup_intr(sc->dev, sc->intr[0].resource,
9267 &sc->intr[0].tag)) != 0) {
9311 sc->dmae_ready = 0;
9314 &sc->func_obj,
9332 func_params.f_obj = &sc->func_obj;
9365 /* writes FP SP data to FW - data_size in dwords */
9460 hc_sm->igu_sb_id = igu_sb_id;
9461 hc_sm->igu_seg_id = igu_seg_id;
9462 hc_sm->timer_value = 0xFF;
9463 hc_sm->time_to_expire = 0xFFFFFFFF;
9557 /* write indices to HW - PCI guarantees endianity of regpairs */
9564 if (CHIP_IS_E1x(fp->sc)) {
9565 return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
9567 return (fp->cl_id);
9578 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
9580 offset += USTORM_RX_PRODS_E1X_OFFSET(SC_PORT(sc), fp->cl_id);
9590 struct bxe_fastpath *fp = &sc->fp[idx];
9595 fp->sc = sc;
9596 fp->index = idx;
9598 fp->igu_sb_id = (sc->igu_base_sb + idx + CNIC_SUPPORT(sc));
9599 fp->fw_sb_id = (sc->base_fw_ndsb + idx + CNIC_SUPPORT(sc));
9601 fp->cl_id = (CHIP_IS_E1x(sc)) ?
9603 /* want client ID same as IGU SB ID for non-E1 */
9604 fp->igu_sb_id;
9605 fp->cl_qzone_id = bxe_fp_qzone_id(fp);
9609 fp->sb_index_values = fp->status_block.e2_sb->sb.index_values;
9610 fp->sb_running_index = fp->status_block.e2_sb->sb.running_index;
9612 fp->sb_index_values = fp->status_block.e1x_sb->sb.index_values;
9613 fp->sb_running_index = fp->status_block.e1x_sb->sb.running_index;
9617 fp->ustorm_rx_prods_offset = bxe_rx_ustorm_prods_offset(sc, fp);
9619 fp->rx_cq_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS];
9625 for (cos = 0; cos < sc->max_cos; cos++) {
9628 fp->tx_cons_sb = &fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0];
9635 bxe_init_sb(sc, fp->sb_dma.paddr, BXE_VF_ID_INVALID, FALSE,
9636 fp->fw_sb_id, fp->igu_sb_id);
9645 &sc->sp_objs[idx].q_obj,
9646 fp->cl_id,
9648 sc->max_cos,
9656 &sc->sp_objs[idx].mac_obj,
9657 fp->cl_id,
9663 &sc->sp_state,
9665 &sc->macs_pool);
9668 idx, fp->status_block.e2_sb, fp->cl_id, fp->fw_sb_id, fp->igu_sb_id);
9690 * This is only applicable for weak-ordered memory model archs such
9691 * as IA-64. The following barrier is also mandatory since FW will
9698 (fp->ustorm_rx_prods_offset + (i * 4)),
9706 fp->index, rx_bd_prod, rx_cq_prod, rx_sge_prod);
9715 for (i = 0; i < sc->num_queues; i++) {
9716 fp = &sc->fp[i];
9718 fp->rx_bd_cons = 0;
9726 fp->rx_bd_prod,
9727 fp->rx_cq_prod,
9728 fp->rx_sge_prod);
9738 U64_LO(fp->rcq_dma.paddr));
9742 U64_HI(fp->rcq_dma.paddr));
9750 SET_FLAG(fp->tx_db.data.header.data, DOORBELL_HDR_T_DB_TYPE, 1);
9751 fp->tx_db.data.zero_fill1 = 0;
9752 fp->tx_db.data.prod = 0;
9754 fp->tx_pkt_prod = 0;
9755 fp->tx_pkt_cons = 0;
9756 fp->tx_bd_prod = 0;
9757 fp->tx_bd_cons = 0;
9758 fp->eth_q_stats.tx_pkts = 0;
9766 for (i = 0; i < sc->num_queues; i++) {
9767 bxe_init_tx_ring_one(&sc->fp[i]);
9774 struct host_sp_status_block *def_sb = sc->def_sb;
9775 bus_addr_t mapping = sc->def_sb_dma.paddr;
9791 igu_sp_sb_index = sc->igu_dsb_id;
9798 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
9799 sc->attn_state = 0;
9811 sc->attn_group[index].sig[sindex] =
9821 sc->attn_group[index].sig[4] =
9824 sc->attn_group[index].sig[4] = 0;
9828 if (sc->devinfo.int_block == INT_BLOCK_HC) {
9856 bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
9862 atomic_store_rel_long(&sc->cq_spq_left, MAX_SPQ_PENDING);
9863 sc->spq_prod_idx = 0;
9864 sc->dsb_sp_prod = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_ETH_DEF_CONS];
9865 sc->spq_prod_bd = sc->spq;
9866 sc->spq_last_bd = (sc->spq_prod_bd + MAX_SP_DESC_CNT);
9876 elem = &sc->eq[EQ_DESC_CNT_PAGE * i - 1];
9878 elem->next_page.addr.hi = htole32(U64_HI(sc->eq_dma.paddr +
9881 elem->next_page.addr.lo = htole32(U64_LO(sc->eq_dma.paddr +
9886 sc->eq_cons = 0;
9887 sc->eq_prod = NUM_EQ_DESC;
9888 sc->eq_cons_sb = &sc->def_sb->sp_sb.index_values[HC_SP_INDEX_EQ_CONS];
9890 atomic_store_rel_long(&sc->eq_spq_left,
9891 (min((MAX_SP_DESC_CNT - MAX_SPQ_PENDING),
9892 NUM_EQ_DESC) - 1));
9961 storm_memset_func_cfg(sc, &tcfg, p->func_id);
9965 storm_memset_vf_to_pf(sc, p->func_id, p->pf_id);
9966 storm_memset_func_en(sc, p->func_id, 1);
9969 if (p->func_flgs & FUNC_FLG_SPQ) {
9970 storm_memset_spq_addr(sc, p->spq_map, p->func_id);
9972 (XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(p->func_id)),
9973 p->spq_prod);
9983 * 0 - if all the min_rates are 0.
9997 vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10005 /* If min rate is zero - set it to 100 */
10011 input->vnic_min_rate[vn] = vn_min_rate;
10014 /* if ETS or all min rates are zeros - disable fairness */
10016 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10019 input->flags.cmng_enables &= ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10023 input->flags.cmng_enables |= CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
10035 BLOGD(sc, DBG_LOAD, "Max BW configured to 0 - using 100 instead\n");
10048 uint32_t vn_cfg = sc->devinfo.mf_info.mf_config[vn];
10058 vn_max_rate = ((sc->link_vars.line_speed * max_cfg) / 100);
10067 input->vnic_max_rate[vn] = vn_max_rate;
10080 input.port_rate = sc->link_vars.line_speed;
10092 if (sc->port.pmf) {
10101 ecore_init_cmng(&input, &sc->cmng);
10136 ecore_storm_memset_struct(sc, addr, size, (uint32_t *)&cmng->port);
10145 (uint32_t *)&cmng->vnic.vnic_max_rate[vn]);
10151 (uint32_t *)&cmng->vnic.vnic_min_rate[vn]);
10186 flags |= (if_getcapenable(sc->ifp) & IFCAP_LRO) ? FUNC_FLG_TPA : 0;
10191 func_init.spq_map = sc->spq_dma.paddr;
10192 func_init.spq_prod = sc->spq_prod_idx;
10196 memset(&sc->cmng, 0, sizeof(struct cmng_struct_per_port));
10202 * re-calculated according to the actual link rate.
10204 sc->link_vars.line_speed = SPEED_10000;
10208 if (sc->port.pmf) {
10209 storm_memset_cmng(sc, &sc->cmng, SC_PORT(sc));
10212 /* init Event Queue - PCI bus guarantees correct endainity */
10213 eq_data.base_addr.hi = U64_HI(sc->eq_dma.paddr);
10214 eq_data.base_addr.lo = U64_LO(sc->eq_dma.paddr);
10215 eq_data.producer = sc->eq_prod;
10227 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10228 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10229 (sc->intr_count == 1)) ? TRUE : FALSE;
10230 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10266 val, port, addr, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10277 if (sc->port.pmf) {
10297 uint8_t msix = (sc->interrupt_mode == INTR_MODE_MSIX) ? TRUE : FALSE;
10298 uint8_t single_msix = ((sc->interrupt_mode == INTR_MODE_MSIX) &&
10299 (sc->intr_count == 1)) ? TRUE : FALSE;
10300 uint8_t msi = (sc->interrupt_mode == INTR_MODE_MSI) ? TRUE : FALSE;
10324 /* clean previous status - need to configure igu prior to ack*/
10333 val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
10342 if (sc->port.pmf) {
10360 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10430 if (sc->devinfo.int_block == INT_BLOCK_HC) {
10443 for (i = 0; i < sc->num_queues; i++) {
10457 elink_init_mod_abs_int(sc, &sc->link_vars,
10458 sc->devinfo.chip_id,
10459 sc->devinfo.shmem_base,
10460 sc->devinfo.shmem2_base,
10489 (sc->flags & BXE_TX_SWITCHING) ? ECORE_OBJ_TYPE_RX_TX :
10493 ecore_init_rx_mode_obj(sc, &sc->rx_mode_obj);
10497 &sc->mcast_obj,
10498 sc->fp[0].cl_id,
10499 sc->fp[0].index,
10505 &sc->sp_state,
10510 &sc->macs_pool,
10516 &sc->vlans_pool,
10523 &sc->rss_conf_obj,
10524 sc->fp[0].cl_id,
10525 sc->fp[0].index,
10531 &sc->sp_state, ECORE_OBJ_TYPE_RX);
10547 func_params.f_obj = &sc->func_obj;
10551 start_params->mf_mode = sc->devinfo.mf_info.mf_mode;
10552 start_params->sd_vlan_tag = OVLAN(sc);
10555 start_params->network_cos_mode = STATIC_COS;
10557 start_params->network_cos_mode = FW_WRR;
10560 //start_params->gre_tunnel_mode = 0;
10561 //start_params->gre_tunnel_rss = 0;
10573 if (!(sc->devinfo.pcie_cap_flags & BXE_PM_CAPABLE_FLAG)) {
10578 pmcsr = pci_read_config(sc->dev,
10579 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10584 pci_write_config(sc->dev,
10585 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10606 if (sc->wol) {
10610 pci_write_config(sc->dev,
10611 (sc->devinfo.pcie_pm_cap_reg + PCIR_POWER_STATUS),
10623 return (-1);
10653 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
10755 } while (--cnt > 0);
10759 return (-1);
10834 sc->devinfo.shmem_base =
10835 sc->link_params.shmem_base =
10838 if (sc->devinfo.shmem_base) {
10850 return (-1);
10879 * - PCIE core
10880 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by one reset bit)
10881 * - IGU
10882 * - MISC (including AEU)
10883 * - GRC
10884 * - RBCN, RBCP
10932 * - all xxMACs are handled by the elink code.
11014 } while (cnt-- > 0);
11023 return (-1);
11033 return (-1);
11046 * Wait for 1ms to empty GLUE and PCI-E core queues,
11072 return (-1);
11082 * re-enable attentions
11104 rc = -1;
11111 rc = -1;
11118 rc = -1;
11126 rc = -1;
11149 sc->is_leader = 0;
11158 * - HC configuration
11159 * - Queue's CDU context
11169 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->rx.flags);
11170 bxe_set_bit(ECORE_Q_FLG_HC, &init_params->tx.flags);
11172 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->rx.flags);
11173 bxe_set_bit(ECORE_Q_FLG_HC_EN, &init_params->tx.flags);
11176 init_params->rx.hc_rate =
11177 sc->hc_rx_ticks ? (1000000 / sc->hc_rx_ticks) : 0;
11178 init_params->tx.hc_rate =
11179 sc->hc_tx_ticks ? (1000000 / sc->hc_tx_ticks) : 0;
11182 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id = fp->fw_sb_id;
11185 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11186 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
11189 init_params->max_cos = sc->max_cos;
11192 fp->index, init_params->max_cos);
11195 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
11197 /* fp->txdata[cos]->cid */
11198 cxt_index = fp->index / ILT_PAGE_CIDS;
11199 cxt_offset = fp->index - (cxt_index * ILT_PAGE_CIDS);
11200 init_params->cxts[cos] = &sc->context[cxt_index].vcxt[cxt_offset].eth;
11204 /* set flags that are common for the Tx-only and not normal connections */
11227 * tx only connections can support tx-switching, though their
11228 * CoS-ness doesn't survive the loopback
11230 if (sc->flags & BXE_TX_SWITCHING) {
11250 if (if_getcapenable(sc->ifp) & IFCAP_LRO) {
11272 gen_init->stat_id = bxe_stats_id(fp);
11273 gen_init->spcl_id = fp->cl_id;
11274 gen_init->mtu = sc->mtu;
11275 gen_init->cos = cos;
11288 pause->sge_th_lo = SGE_TH_LO(sc);
11289 pause->sge_th_hi = SGE_TH_HI(sc);
11292 if (sc->dropless_fc &&
11293 (pause->sge_th_hi + FW_PREFETCH_CNT) >
11299 tpa_agg_size = (2 * sc->mtu);
11300 if (tpa_agg_size < sc->max_aggregation_size) {
11301 tpa_agg_size = sc->max_aggregation_size;
11304 max_sge = SGE_PAGE_ALIGN(sc->mtu) >> SGE_PAGE_SHIFT;
11305 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
11306 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
11309 /* pause - not for e1 */
11311 pause->bd_th_lo = BD_TH_LO(sc);
11312 pause->bd_th_hi = BD_TH_HI(sc);
11314 pause->rcq_th_lo = RCQ_TH_LO(sc);
11315 pause->rcq_th_hi = RCQ_TH_HI(sc);
11318 if (sc->dropless_fc &&
11319 pause->bd_th_hi + FW_PREFETCH_CNT >
11320 sc->rx_ring_size) {
11324 if (sc->dropless_fc &&
11325 pause->rcq_th_hi + FW_PREFETCH_CNT >
11330 pause->pri_map = 1;
11334 rxq_init->dscr_map = fp->rx_dma.paddr;
11335 rxq_init->sge_map = fp->rx_sge_dma.paddr;
11336 rxq_init->rcq_map = fp->rcq_dma.paddr;
11337 rxq_init->rcq_np_map = (fp->rcq_dma.paddr + BCM_PAGE_SIZE);
11343 rxq_init->buf_sz = (fp->rx_buf_size -
11346 rxq_init->cl_qzone_id = fp->cl_qzone_id;
11347 rxq_init->tpa_agg_sz = tpa_agg_size;
11348 rxq_init->sge_buf_sz = sge_sz;
11349 rxq_init->max_sges_pkt = max_sge;
11350 rxq_init->rss_engine_id = SC_FUNC(sc);
11351 rxq_init->mcast_engine_id = SC_FUNC(sc);
11358 rxq_init->max_tpa_queues = MAX_AGG_QS(sc);
11360 rxq_init->cache_line_log = BXE_RX_ALIGN_SHIFT;
11361 rxq_init->fw_sb_id = fp->fw_sb_id;
11363 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
11370 rxq_init->silent_removal_value =
11371 sc->devinfo.mf_info.afex_def_vlan_tag;
11372 rxq_init->silent_removal_mask = EVL_VLID_MASK;
11385 * fp->txdata[cos]->tx_dma.paddr;
11387 txq_init->dscr_map = fp->tx_dma.paddr;
11388 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
11389 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
11390 txq_init->fw_sb_id = fp->fw_sb_id;
11396 txq_init->tss_leading_cl_id = BXE_FP(sc, 0, cl_id);
11401 * 1) RESET->INIT
11402 * 2) INIT->SETUP
11414 BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index);
11416 bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
11432 BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc);
11442 setup_params->flags = bxe_get_q_flags(sc, fp, leading);
11445 bxe_pf_q_prep_general(sc, fp, &setup_params->gen_params,
11449 &setup_params->pause_params,
11450 &setup_params->rxq_params);
11453 &setup_params->txq_params,
11462 BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc);
11472 return (bxe_setup_queue(sc, &sc->fp[0], TRUE));
11499 if (rss_obj->udp_rss_v4) {
11502 if (rss_obj->udp_rss_v6) {
11509 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
11527 return (bxe_config_rss_pf(sc, &sc->rss_conf_obj, config_hash));
11540 for (i = 0; i < sizeof(sc->rss_conf_obj.ind_table); i++) {
11541 sc->rss_conf_obj.ind_table[i] =
11542 (sc->fp->cl_id + (i % num_eth_queues));
11545 if (sc->udp_rss) {
11546 sc->rss_conf_obj.udp_rss_v4 = sc->rss_conf_obj.udp_rss_v6 = 1;
11551 * per-port, so if explicit configuration is needed, do it only
11554 * For 57712 and newer it's a per-function configuration.
11556 return (bxe_config_rss_eth(sc, sc->port.pmf || !CHIP_IS_E1x(sc)));
11611 return (bxe_set_mac_one(sc, sc->link_params.mac_addr,
11612 &sc->sp_objs->mac_obj,
11621 if (sc->link_params.num_phys <= 1) {
11625 if (sc->link_vars.link_up) {
11628 if ((sc->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
11629 (sc->link_params.phy[ELINK_EXT_PHY2].supported &
11633 switch (elink_phy_selection(&sc->link_params)) {
11660 if (sc->link_params.multi_phy_config & PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11678 if (CHIP_IS_E1x(sc) && (sc->mtu > 5000)) {
11679 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_TX;
11681 sc->link_params.req_fc_auto_adv = ELINK_FLOW_CTRL_BOTH;
11691 sc->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
11694 switch (sc->link_vars.ieee_fc &
11698 sc->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
11703 sc->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
11715 uint16_t line_speed = sc->link_vars.line_speed;
11718 bxe_extract_max_cfg(sc, sc->devinfo.mf_info.mf_config[SC_VN(sc)]);
11744 data->line_speed = line_speed;
11747 if (!sc->link_vars.link_up || (sc->flags & BXE_MF_FUNC_DIS)) {
11748 bxe_set_bit(BXE_LINK_REPORT_LINK_DOWN, &data->link_report_flags);
11752 if (sc->link_vars.duplex == DUPLEX_FULL) {
11753 bxe_set_bit(BXE_LINK_REPORT_FULL_DUPLEX, &data->link_report_flags);
11757 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_RX) {
11758 bxe_set_bit(BXE_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
11762 if (sc->link_vars.flow_ctrl & ELINK_FLOW_CTRL_TX) {
11763 bxe_set_bit(BXE_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
11782 if (!memcmp(&cur_data, &sc->last_reported_link, sizeof(cur_data)) ||
11784 &sc->last_reported_link.link_report_flags) &&
11791 cur_data.link_report_flags, sc->last_reported_link.link_report_flags);
11792 sc->link_cnt++;
11794 ELINK_DEBUG_P1(sc, "link status change count = %x\n", sc->link_cnt);
11796 memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
11800 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
11824 flow = "ON - receive & transmit";
11829 flow = "ON - receive";
11834 flow = "ON - transmit";
11842 if_link_state_change(sc->ifp, LINK_STATE_UP);
11859 if (sc->state != BXE_STATE_OPEN) {
11864 elink_link_status_update(&sc->link_params, &sc->link_vars);
11866 sc->port.supported[0] |= (ELINK_SUPPORTED_10baseT_Half |
11878 sc->port.advertising[0] = sc->port.supported[0];
11880 sc->link_params.sc = sc;
11881 sc->link_params.port = SC_PORT(sc);
11882 sc->link_params.req_duplex[0] = DUPLEX_FULL;
11883 sc->link_params.req_flow_ctrl[0] = ELINK_FLOW_CTRL_NONE;
11884 sc->link_params.req_line_speed[0] = SPEED_10000;
11885 sc->link_params.speed_cap_mask[0] = 0x7f0000;
11886 sc->link_params.switch_cfg = ELINK_SWITCH_CFG_10G;
11889 sc->link_vars.mac_type = ELINK_MAC_TYPE_EMAC;
11890 sc->link_vars.line_speed = ELINK_SPEED_1000;
11891 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11894 sc->link_vars.mac_type = ELINK_MAC_TYPE_BMAC;
11895 sc->link_vars.line_speed = ELINK_SPEED_10000;
11896 sc->link_vars.link_status = (LINK_STATUS_LINK_UP |
11900 sc->link_vars.link_up = 1;
11902 sc->link_vars.duplex = DUPLEX_FULL;
11903 sc->link_vars.flow_ctrl = ELINK_FLOW_CTRL_NONE;
11906 REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + sc->link_params.port*4, 0);
11913 if (sc->link_vars.link_up) {
11930 uint16_t req_line_speed = sc->link_params.req_line_speed[cfg_idx];
11931 struct elink_params *lp = &sc->link_params;
11960 sc->link_params.feature_config_flags |= feat;
11966 lp->loopback_mode = ELINK_LOOPBACK_XGXS;
11968 if (lp->req_line_speed[cfg_idx] < ELINK_SPEED_10000) {
11969 if (lp->speed_cap_mask[cfg_idx] &
11971 lp->req_line_speed[cfg_idx] = ELINK_SPEED_10000;
11973 lp->req_line_speed[cfg_idx] = ELINK_SPEED_1000;
11979 lp->loopback_mode = ELINK_LOOPBACK_EXT;
11982 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
11988 if (sc->link_vars.link_up) {
11997 sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
12007 mc_mac->mac = (uint8_t *)LLADDR(sdl);
12016 if_t ifp = sc->ifp;
12020 ECORE_LIST_INIT(&p->mcast_list);
12021 p->mcast_list_len = 0;
12034 return (-1);
12040 ECORE_LIST_PUSH_TAIL(&mc_mac[i].link, &p->mcast_list);
12048 p->mcast_list_len = mc_count;
12057 ECORE_LIST_FIRST_ENTRY(&p->mcast_list,
12072 rparam.mcast_obj = &sc->mcast_obj;
12116 struct ecore_vlan_mac_obj *mac_obj = &ctx->sc->sp_objs->mac_obj;
12119 if (ctx->rc < 0)
12122 rc = bxe_set_mac_one(ctx->sc, (uint8_t *)LLADDR(sdl), mac_obj, TRUE,
12123 ECORE_UC_LIST_MAC, &ctx->ramrod_flags);
12126 if (rc == -EEXIST)
12127 BLOGD(ctx->sc, DBG_SP, "Failed to schedule ADD operations (EEXIST)\n");
12129 BLOGE(ctx->sc, "Failed to schedule ADD operations (%d)\n", rc);
12130 ctx->rc = rc;
12139 if_t ifp = sc->ifp;
12140 struct ecore_vlan_mac_obj *mac_obj = &sc->sp_objs->mac_obj;
12164 if_t ifp = sc->ifp;
12167 if (sc->state != BXE_STATE_OPEN) {
12168 BLOGD(sc, DBG_SP, "state is %x, returning\n", sc->state);
12172 BLOGD(sc, DBG_SP, "if_flags(ifp)=0x%x\n", if_getflags(sc->ifp));
12192 sc->rx_mode = rx_mode;
12195 if (bxe_test_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state)) {
12197 bxe_set_bit(ECORE_FILTER_RX_MODE_SCHED, &sc->sp_state);
12243 if ((sc->state == BXE_STATE_OPEN) &&
12244 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12246 callout_reset(&sc->periodic_callout, hz,
12253 if ((sc->state != BXE_STATE_OPEN) ||
12254 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
12255 BLOGW(sc, "periodic callout exit (state=0x%x)\n", sc->state);
12263 if (bxe_watchdog(sc, &sc->fp[i]) != 0) {
12264 /* Ruh-Roh, chip was reset! */
12272 * to the sc->port.pmf in the bxe_nic_load() or bxe_pmf_update() and
12276 if (sc->port.pmf) {
12278 elink_period_func(&sc->link_params, &sc->link_vars);
12283 if (IS_PF(sc) && !(sc->flags & BXE_NO_PULSE)) {
12288 ++sc->fw_drv_pulse_wr_seq;
12289 sc->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
12291 drv_pulse = sc->fw_drv_pulse_wr_seq;
12314 if ((sc->state == BXE_STATE_OPEN) &&
12315 (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_GO)) {
12317 callout_reset(&sc->periodic_callout, hz,
12325 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
12326 callout_reset(&sc->periodic_callout, hz, bxe_periodic_callout_func, sc);
12332 atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
12333 callout_drain(&sc->periodic_callout);
12343 if ((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12344 (sc->state == BXE_STATE_ERROR)) {
12354 __func__, sc, sc->state, sc->recovery_state, sc->error_status);
12356 switch(sc->recovery_state) {
12362 (sc->error_status & BXE_ERR_MCP_ASSERT) ||
12363 (sc->error_status & BXE_ERR_GLOBAL)) {
12366 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12370 sc->state = BXE_STATE_ERROR;
12371 sc->recovery_state = BXE_RECOVERY_FAILED;
12375 sc->error_status);
12393 sc->is_leader = 1;
12396 /* If interface has been removed - break */
12398 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12404 sc->recovery_state = BXE_RECOVERY_WAIT;
12416 if (sc->is_leader) {
12439 &sc->sp_err_timeout_task, hz/10);
12451 sc->recovery_state = BXE_RECOVERY_FAILED;
12452 sc->state = BXE_STATE_ERROR;
12461 * to continue as a none-leader.
12466 } else { /* non-leader */
12480 sc->is_leader = 1;
12485 &sc->sp_err_timeout_task, hz/10);
12495 &sc->sp_err_timeout_task, hz/10);
12500 sc->eth_stats.recoverable_error;
12502 sc->eth_stats.unrecoverable_error;
12504 sc->recovery_state =
12508 sc->recovery_state = BXE_RECOVERY_FAILED;
12509 sc->state = BXE_STATE_ERROR;
12512 sc->state, sc->recovery_state, sc->error_status);
12513 sc->error_status = 0;
12515 sc->recovery_state =
12520 " recovery_state=0x%x \n", sc->error_status,
12521 sc->state, sc->recovery_state);
12524 sc->error_status = 0;
12526 sc->eth_stats.recoverable_error =
12528 sc->eth_stats.unrecoverable_error =
12543 if(sc->recovery_state == BXE_RECOVERY_WAIT) {
12546 if(sc->error_status) {
12547 if (sc->state == BXE_STATE_OPEN) {
12550 if (sc->link_vars.link_up) {
12551 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
12553 sc->recovery_state = BXE_RECOVERY_INIT;
12555 sc->unit, sc->error_status, sc->recovery_state);
12568 __func__, sc->state, sc->recovery_state, sc->error_status);
12570 if((sc->recovery_state == BXE_RECOVERY_FAILED) &&
12571 (sc->state == BXE_STATE_ERROR)) {
12575 if ((sc->error_status) && (sc->trigger_grcdump)) {
12578 if (sc->recovery_state != BXE_RECOVERY_DONE) {
12581 } else if (sc->error_status) {
12601 sc->state = BXE_STATE_OPENING_WAITING_LOAD;
12608 sc->last_reported_link_state = LINK_STATE_UNKNOWN;
12614 sc->state = BXE_STATE_CLOSED;
12620 sc->state = BXE_STATE_CLOSED;
12626 sc->state = BXE_STATE_CLOSED;
12639 sc->state = BXE_STATE_CLOSED;
12647 sc->state = BXE_STATE_CLOSED;
12666 sc->state = BXE_STATE_CLOSED;
12673 sc->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
12675 sc->flags |= BXE_NO_PULSE;
12679 sc->state = BXE_STATE_CLOSED;
12686 /* Init per-function objects */
12692 sc->devinfo.mf_info.afex_def_vlan_tag = -1;
12695 sc->state = BXE_STATE_OPENING_WAITING_PORT;
12700 sc->state = BXE_STATE_ERROR;
12709 sc->state = BXE_STATE_ERROR;
12718 sc->state = BXE_STATE_ERROR;
12723 rc = bxe_setup_queue(sc, &sc->fp[i], FALSE);
12726 sc->state = BXE_STATE_ERROR;
12734 sc->state = BXE_STATE_ERROR;
12741 sc->state = BXE_STATE_OPEN;
12749 sc->state = BXE_STATE_ERROR;
12753 if (sc->port.pmf) {
12756 sc->state = BXE_STATE_ERROR;
12761 sc->link_params.feature_config_flags &=
12777 sc->state = BXE_STATE_DIAG;
12784 if (sc->port.pmf) {
12811 if_setdrvflags(sc->ifp, IFF_DRV_RUNNING);
12835 sc->port.pmf = 0;
12864 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
12869 if((sc->state == BXE_STATE_ERROR) &&
12870 (sc->recovery_state == BXE_RECOVERY_FAILED)) {
12873 "Reboot/Power-cycle the system\n" );
12915 sc->recovery_state = BXE_RECOVERY_FAILED;
12928 sc->recovery_state = BXE_RECOVERY_DONE;
12938 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING);
12975 ifmedia_init(&sc->ifmedia, IFM_IMASK,
12980 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_FDX | sc->media), 0, NULL);
12981 ifmedia_add(&sc->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL);
12982 ifmedia_set(&sc->ifmedia, (IFM_ETHER | IFM_AUTO));
12984 sc->ifmedia.ifm_media = sc->ifmedia.ifm_cur->ifm_media; /* XXX ? */
12985 BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
12991 if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
12999 if_setmtu(ifp, sc->mtu);
13023 if_setsendqlen(ifp, sc->tx_ring_size);
13027 sc->ifp = ifp;
13030 ether_ifattach(ifp, sc->link_params.mac_addr);
13042 if (sc->bar[i].resource != NULL) {
13043 bus_release_resource(sc->dev,
13045 sc->bar[i].rid,
13046 sc->bar[i].resource);
13059 memset(sc->bar, 0, sizeof(sc->bar));
13064 /* Run `pciconf -lb` to see mappings */
13069 sc->bar[i].rid = PCIR_BAR(i);
13076 if ((sc->bar[i].resource =
13077 bus_alloc_resource_any(sc->dev,
13079 &sc->bar[i].rid,
13084 sc->bar[i].tag = rman_get_bustag(sc->bar[i].resource);
13085 sc->bar[i].handle = rman_get_bushandle(sc->bar[i].resource);
13086 sc->bar[i].kva = (vm_offset_t)rman_get_virtual(sc->bar[i].resource);
13088 BLOGI(sc, "PCI BAR%d [%02x] memory allocated: %#jx-%#jx (%jd) -> %#jx\n",
13090 rman_get_start(sc->bar[i].resource),
13091 rman_get_end(sc->bar[i].resource),
13092 rman_get_size(sc->bar[i].resource),
13093 (uintmax_t)sc->bar[i].kva);
13106 * holds the relative-function number and absolute-function number. The
13107 * absolute-function number appears only in E2 and above. Before that
13113 sc->pfunc_rel =
13115 sc->path_id =
13119 sc->pfunc_abs = ((sc->pfunc_rel << 1) | sc->path_id);
13121 sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
13126 sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
13141 if (sc->devinfo.shmem2_base != 0) {
13162 if (pci_find_cap(sc->dev, PCIY_EXPRESS, &pcie_reg) == 0) {
13165 return (pci_read_config(sc->dev, (pcie_reg + reg), width));
13193 if (pci_find_cap(sc->dev, PCIY_PMG, &reg) == 0) {
13197 sc->devinfo.pcie_cap_flags |= BXE_PM_CAPABLE_FLAG;
13198 sc->devinfo.pcie_pm_cap_reg = (uint16_t)reg;
13207 sc->devinfo.pcie_link_speed =
13211 sc->devinfo.pcie_link_width =
13213 if (sc->devinfo.pcie_link_speed > 1) {
13214 sc->devinfo.pcie_link_width =
13218 sc->devinfo.pcie_link_speed =
13220 sc->devinfo.pcie_link_width =
13225 sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
13227 sc->devinfo.pcie_cap_flags |= BXE_PCIE_CAPABLE_FLAG;
13228 sc->devinfo.pcie_pcie_cap_reg = (uint16_t)reg;
13231 if (pci_find_cap(sc->dev, PCIY_MSI, &reg) == 0) {
13235 sc->devinfo.pcie_cap_flags |= BXE_MSI_CAPABLE_FLAG;
13236 sc->devinfo.pcie_msi_cap_reg = (uint16_t)reg;
13240 /* check if MSI-X capability is enabled */
13241 if (pci_find_cap(sc->dev, PCIY_MSIX, &reg) == 0) {
13243 BLOGD(sc, DBG_LOAD, "Found MSI-X capability at 0x%04x\n", reg);
13245 sc->devinfo.pcie_cap_flags |= BXE_MSIX_CAPABLE_FLAG;
13246 sc->devinfo.pcie_msix_cap_reg = (uint16_t)reg;
13254 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13257 /* get the outer vlan if we're in switch-dependent mode */
13260 mf_info->ext_id = (uint16_t)val;
13262 mf_info->multi_vnics_mode = 1;
13264 if (!VALID_OVLAN(mf_info->ext_id)) {
13265 BLOGE(sc, "Invalid VLAN (%d)\n", mf_info->ext_id);
13270 if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13272 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ISCSI;
13273 } else if ((mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_PROTOCOL_MASK) ==
13275 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_FCOE;
13277 mf_info->mf_protos_supported |= MF_PROTO_SUPPORT_ETHERNET;
13280 mf_info->vnics_per_port =
13312 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13316 * There is no outer vlan if we're in switch-independent mode.
13317 * If the mac is valid then assume multi-function.
13322 mf_info->multi_vnics_mode = ((val & MACP_FUNC_CFG_FLAGS_MASK) != 0);
13324 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13326 mf_info->vnics_per_port =
13335 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13340 mf_info->multi_vnics_mode = 1;
13346 mf_info->ext_id =
13350 mf_info->default_vlan =
13354 mf_info->niv_allowed_priorities =
13358 mf_info->niv_default_cos =
13362 mf_info->afex_vlan_mode =
13366 mf_info->niv_mba_enabled =
13370 mf_info->mf_protos_supported = bxe_get_shmem_ext_proto_support_flags(sc);
13372 mf_info->vnics_per_port =
13381 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13391 mf_info->mf_config[SC_VN(sc)]);
13393 mf_info->multi_vnics_mode);
13395 mf_info->vnics_per_port);
13397 mf_info->ext_id);
13399 mf_info->min_bw[0], mf_info->min_bw[1],
13400 mf_info->min_bw[2], mf_info->min_bw[3]);
13402 mf_info->max_bw[0], mf_info->max_bw[1],
13403 mf_info->max_bw[2], mf_info->max_bw[3]);
13405 sc->mac_addr_str);
13409 if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
13415 if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
13417 mf_info->vnics_per_port, mf_info->multi_vnics_mode);
13421 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13422 /* vnic id > 0 must have valid ovlan in switch-dependent mode */
13429 if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
13431 mf_info->multi_vnics_mode, OVLAN(sc));
13437 * sure that all non-hidden functions have a valid ovlan. If SF,
13438 * make sure that all non-hidden functions have an invalid ovlan.
13444 (((mf_info->multi_vnics_mode) && !VALID_OVLAN(ovlan1)) ||
13445 ((!mf_info->multi_vnics_mode) && VALID_OVLAN(ovlan1)))) {
13448 i, mf_info->multi_vnics_mode, ovlan1);
13481 struct bxe_mf_info *mf_info = &sc->devinfo.mf_info;
13486 mf_info->vnics_per_port = 1;
13487 mf_info->multi_vnics_mode = FALSE;
13488 mf_info->path_has_ovlan = FALSE;
13489 mf_info->mf_mode = SINGLE_FUNCTION;
13495 if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
13500 /* get the MF mode (switch dependent / independent / single-function) */
13512 mf_info->mf_mode = MULTI_FUNCTION_SI;
13527 mf_info->mf_mode = MULTI_FUNCTION_SD;
13542 * Mark MF mode as NIV if MCP version includes NPAR-SD support
13549 mf_info->mf_mode = MULTI_FUNCTION_AFEX;
13565 if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
13566 mf_info->path_has_ovlan = TRUE;
13567 } else if (mf_info->mf_mode == SINGLE_FUNCTION) {
13570 * 4-port mode, this is good enough to check vnic-0 of the other port
13579 mf_info->path_has_ovlan = VALID_OVLAN((uint16_t)val) ? 1 : 0;
13583 if (mf_info->mf_mode == SINGLE_FUNCTION) {
13594 mf_info->mf_config[SC_VN(sc)] =
13597 switch(mf_info->mf_mode)
13617 mf_info->mf_mode);
13627 mf_info->min_bw[vnic] =
13629 mf_info->max_bw[vnic] =
13646 sc->link_params.sc = sc;
13647 sc->link_params.port = port;
13650 sc->devinfo.hw_config =
13652 sc->devinfo.hw_config2 =
13655 sc->link_params.hw_led_mode =
13656 ((sc->devinfo.hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
13660 sc->port.config =
13664 sc->link_params.speed_cap_mask[0] =
13666 sc->link_params.speed_cap_mask[1] =
13670 sc->link_params.lane_config =
13675 sc->port.link_config[ELINK_INT_PHY] = val;
13676 sc->link_params.switch_cfg = (val & PORT_FEATURE_CONNECTED_SWITCH_MASK);
13677 sc->port.link_config[ELINK_EXT_PHY1] =
13683 sc->link_params.feature_config_flags |=
13686 sc->link_params.feature_config_flags &=
13691 sc->link_params.multi_phy_config =
13695 sc->port.ext_phy_config =
13711 *sc->mac_addr_str = 0;
13714 sc->link_params.mac_addr[0] = (uint8_t)(mac_hi >> 8);
13715 sc->link_params.mac_addr[1] = (uint8_t)(mac_hi);
13716 sc->link_params.mac_addr[2] = (uint8_t)(mac_lo >> 24);
13717 sc->link_params.mac_addr[3] = (uint8_t)(mac_lo >> 16);
13718 sc->link_params.mac_addr[4] = (uint8_t)(mac_lo >> 8);
13719 sc->link_params.mac_addr[5] = (uint8_t)(mac_lo);
13720 snprintf(sc->mac_addr_str, sizeof(sc->mac_addr_str),
13722 sc->link_params.mac_addr[0], sc->link_params.mac_addr[1],
13723 sc->link_params.mac_addr[2], sc->link_params.mac_addr[3],
13724 sc->link_params.mac_addr[4], sc->link_params.mac_addr[5]);
13725 BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str);
13777 if ((bxe_mrrs < -1) || (bxe_mrrs > 3)) {
13779 bxe_mrrs = -1;
13794 sc->interrupt_mode = bxe_interrupt_mode;
13795 sc->max_rx_bufs = bxe_max_rx_bufs;
13796 sc->hc_rx_ticks = bxe_hc_rx_ticks;
13797 sc->hc_tx_ticks = bxe_hc_tx_ticks;
13798 sc->max_aggregation_size = bxe_max_aggregation_size;
13799 sc->mrrs = bxe_mrrs;
13800 sc->autogreeen = bxe_autogreeen;
13801 sc->udp_rss = bxe_udp_rss;
13804 sc->num_queues = 1;
13806 sc->num_queues =
13809 if (sc->num_queues > mp_ncpus) {
13810 sc->num_queues = mp_ncpus;
13827 sc->interrupt_mode,
13828 sc->num_queues,
13829 sc->hc_rx_ticks,
13830 sc->hc_tx_ticks,
13832 sc->max_aggregation_size,
13833 sc->mrrs,
13834 sc->autogreeen,
13835 sc->udp_rss);
13844 switch (sc->link_params.phy[phy_idx].media_type) {
13848 sc->media = IFM_10G_SR;
13853 sc->media = IFM_1000_SX;
13858 BLOGI(sc, "Found 10GBase-CX4 media.\n");
13859 sc->media = IFM_10G_CX4;
13864 sc->media = IFM_10G_TWINAX;
13868 if (sc->link_params.speed_cap_mask[0] &
13870 BLOGI(sc, "Found 10GBase-T media.\n");
13871 sc->media = IFM_10G_T;
13874 BLOGI(sc, "Found 1000Base-T media.\n");
13875 sc->media = IFM_1000_T;
13881 sc->media = 0;
13887 sc->media = 0;
13907 sc->igu_base_sb = 0xff;
13911 igu_sb_cnt = sc->igu_sb_cnt;
13912 sc->igu_base_sb = ((CHIP_IS_MODE_4_PORT(sc) ? pfid : vn) *
13914 sc->igu_dsb_id = (E1HVN_MAX * FP_SB_MAX_E1x +
13919 /* IGU in normal mode - read CAM */
13934 sc->igu_dsb_id = igu_sb_id;
13936 if (sc->igu_base_sb == 0xff) {
13937 sc->igu_base_sb = igu_sb_id;
13950 sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
13954 return (-1);
13971 sc->devinfo.vendor_id = pci_get_vendor(sc->dev);
13972 sc->devinfo.device_id = pci_get_device(sc->dev);
13973 sc->devinfo.subvendor_id = pci_get_subvendor(sc->dev);
13974 sc->devinfo.subdevice_id = pci_get_subdevice(sc->dev);
13977 sc->devinfo.chip_id =
13978 sc->link_params.chip_id =
13987 sc->devinfo.chip_id = ((CHIP_NUM_57811 << 16) |
13988 (sc->devinfo.chip_id & 0x0000ffff));
13990 sc->devinfo.chip_id = ((CHIP_NUM_57811_MF << 16) |
13991 (sc->devinfo.chip_id & 0x0000ffff));
13993 sc->devinfo.chip_id |= 0x1;
13998 sc->devinfo.chip_id,
13999 ((sc->devinfo.chip_id >> 16) & 0xffff),
14000 ((sc->devinfo.chip_id >> 12) & 0xf),
14001 ((sc->devinfo.chip_id >> 4) & 0xff),
14002 ((sc->devinfo.chip_id >> 0) & 0xf));
14005 if ((sc->devinfo.chip_id & 0x1) ||
14008 sc->flags |= BXE_ONE_PORT_FLAG;
14013 sc->doorbell_size = (1 << BXE_DB_SHIFT);
14016 sc->devinfo.chip_port_mode = CHIP_PORT_MODE_NONE; /* E1 & E1h*/
14030 sc->devinfo.chip_port_mode =
14040 sc->devinfo.shmem_base =
14041 sc->link_params.shmem_base =
14043 sc->devinfo.shmem2_base =
14048 sc->devinfo.shmem_base, sc->devinfo.shmem2_base);
14050 if (!sc->devinfo.shmem_base) {
14053 sc->flags |= BXE_NO_MCP_FLAG;
14067 sc->devinfo.bc_ver = SHMEM_RD(sc, dev_info.bc_rev);
14068 snprintf(sc->devinfo.bc_ver_str,
14069 sizeof(sc->devinfo.bc_ver_str),
14071 ((sc->devinfo.bc_ver >> 24) & 0xff),
14072 ((sc->devinfo.bc_ver >> 16) & 0xff),
14073 ((sc->devinfo.bc_ver >> 8) & 0xff));
14074 BLOGD(sc, DBG_LOAD, "Bootcode version: %s\n", sc->devinfo.bc_ver_str);
14077 sc->devinfo.mf_cfg_base = bxe_get_shmem_mf_cfg_base(sc);
14078 BLOGD(sc, DBG_LOAD, "mf_cfg_base=0x08%x \n", sc->devinfo.mf_cfg_base);
14081 pci_write_config(sc->dev, PCICFG_GRC_ADDRESS, 0, 4);
14095 * Enable internal target-read (in case we are probed after PF
14106 sc->devinfo.flash_size =
14108 BLOGD(sc, DBG_LOAD, "nvram flash size: %d\n", sc->devinfo.flash_size);
14118 if (sc->devinfo.pcie_msix_cap_reg != 0) {
14119 val = pci_read_config(sc->dev,
14120 (sc->devinfo.pcie_msix_cap_reg +
14123 sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
14125 sc->igu_sb_cnt = 1;
14128 sc->igu_base_addr = BAR_IGU_INTMEM;
14132 sc->devinfo.int_block = INT_BLOCK_HC;
14133 sc->igu_dsb_id = DEF_SB_IGU_ID;
14134 sc->igu_base_sb = 0;
14136 sc->devinfo.int_block = INT_BLOCK_IGU;
14153 tout--;
14160 return (-1);
14166 sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
14181 * Get base FW non-default (fast path) status block ID. This value is
14186 sc->base_fw_ndsb = ((SC_PORT(sc) * FP_SB_MAX_E1x) + SC_L_ID(sc));
14189 * 57712+ - We currently use one FW SB per IGU SB (Rx and Tx of
14193 sc->base_fw_ndsb = sc->igu_base_sb;
14198 sc->igu_dsb_id, sc->igu_base_sb,
14199 sc->igu_sb_cnt, sc->base_fw_ndsb);
14201 elink_phy_probe(&sc->link_params);
14215 sc->port.supported[0] = 0;
14216 sc->port.supported[1] = 0;
14218 switch (sc->link_params.num_phys) {
14220 sc->port.supported[0] = sc->link_params.phy[ELINK_INT_PHY].supported;
14224 sc->port.supported[0] = sc->link_params.phy[ELINK_EXT_PHY1].supported;
14228 if (sc->link_params.multi_phy_config &
14230 sc->port.supported[1] =
14231 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14232 sc->port.supported[0] =
14233 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14235 sc->port.supported[0] =
14236 sc->link_params.phy[ELINK_EXT_PHY1].supported;
14237 sc->port.supported[1] =
14238 sc->link_params.phy[ELINK_EXT_PHY2].supported;
14244 if (!(sc->port.supported[0] || sc->port.supported[1])) {
14254 sc->port.phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
14258 sc->port.phy_addr =
14262 sc->port.phy_addr =
14267 sc->port.link_config[0]);
14272 BLOGD(sc, DBG_LOAD, "PHY addr 0x%08x\n", sc->port.phy_addr);
14276 if (!(sc->link_params.speed_cap_mask[idx] &
14278 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Half;
14281 if (!(sc->link_params.speed_cap_mask[idx] &
14283 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10baseT_Full;
14286 if (!(sc->link_params.speed_cap_mask[idx] &
14288 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Half;
14291 if (!(sc->link_params.speed_cap_mask[idx] &
14293 sc->port.supported[idx] &= ~ELINK_SUPPORTED_100baseT_Full;
14296 if (!(sc->link_params.speed_cap_mask[idx] &
14298 sc->port.supported[idx] &= ~ELINK_SUPPORTED_1000baseT_Full;
14301 if (!(sc->link_params.speed_cap_mask[idx] &
14303 sc->port.supported[idx] &= ~ELINK_SUPPORTED_2500baseX_Full;
14306 if (!(sc->link_params.speed_cap_mask[idx] &
14308 sc->port.supported[idx] &= ~ELINK_SUPPORTED_10000baseT_Full;
14311 if (!(sc->link_params.speed_cap_mask[idx] &
14313 sc->port.supported[idx] &= ~ELINK_SUPPORTED_20000baseKR2_Full;
14318 sc->port.supported[0], sc->port.supported[1]);
14320 sc->port.supported[0], sc->port.supported[1]);
14330 sc->port.advertising[0] = 0;
14331 sc->port.advertising[1] = 0;
14333 switch (sc->link_params.num_phys) {
14344 sc->link_params.req_duplex[idx] = DUPLEX_FULL;
14345 link_config = sc->port.link_config[idx];
14349 if (sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg) {
14350 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14351 sc->port.advertising[idx] |= sc->port.supported[idx];
14352 if (sc->link_params.phy[ELINK_EXT_PHY1].type ==
14354 sc->port.advertising[idx] |=
14359 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14360 sc->port.advertising[idx] |=
14367 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Full) {
14368 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14369 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Full |
14374 link_config, sc->link_params.speed_cap_mask[idx]);
14380 if (sc->port.supported[idx] & ELINK_SUPPORTED_10baseT_Half) {
14381 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10;
14382 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14383 sc->port.advertising[idx] |= (ADVERTISED_10baseT_Half |
14386 sc->link_params.req_duplex[idx]);
14390 link_config, sc->link_params.speed_cap_mask[idx]);
14396 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Full) {
14397 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14398 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Full |
14403 link_config, sc->link_params.speed_cap_mask[idx]);
14409 if (sc->port.supported[idx] & ELINK_SUPPORTED_100baseT_Half) {
14410 sc->link_params.req_line_speed[idx] = ELINK_SPEED_100;
14411 sc->link_params.req_duplex[idx] = DUPLEX_HALF;
14412 sc->port.advertising[idx] |= (ADVERTISED_100baseT_Half |
14417 link_config, sc->link_params.speed_cap_mask[idx]);
14423 if (sc->port.supported[idx] & ELINK_SUPPORTED_1000baseT_Full) {
14424 sc->link_params.req_line_speed[idx] = ELINK_SPEED_1000;
14425 sc->port.advertising[idx] |= (ADVERTISED_1000baseT_Full |
14430 link_config, sc->link_params.speed_cap_mask[idx]);
14436 if (sc->port.supported[idx] & ELINK_SUPPORTED_2500baseX_Full) {
14437 sc->link_params.req_line_speed[idx] = ELINK_SPEED_2500;
14438 sc->port.advertising[idx] |= (ADVERTISED_2500baseX_Full |
14443 link_config, sc->link_params.speed_cap_mask[idx]);
14449 if (sc->port.supported[idx] & ELINK_SUPPORTED_10000baseT_Full) {
14450 sc->link_params.req_line_speed[idx] = ELINK_SPEED_10000;
14451 sc->port.advertising[idx] |= (ADVERTISED_10000baseT_Full |
14456 link_config, sc->link_params.speed_cap_mask[idx]);
14462 sc->link_params.req_line_speed[idx] = ELINK_SPEED_20000;
14468 link_config, sc->link_params.speed_cap_mask[idx]);
14469 sc->link_params.req_line_speed[idx] = ELINK_SPEED_AUTO_NEG;
14470 sc->port.advertising[idx] = sc->port.supported[idx];
14474 sc->link_params.req_flow_ctrl[idx] =
14477 if (sc->link_params.req_flow_ctrl[idx] == ELINK_FLOW_CTRL_AUTO) {
14478 if (!(sc->port.supported[idx] & ELINK_SUPPORTED_Autoneg)) {
14479 sc->link_params.req_flow_ctrl[idx] = ELINK_FLOW_CTRL_NONE;
14487 sc->link_params.req_line_speed[idx],
14488 sc->link_params.req_duplex[idx],
14489 sc->link_params.req_flow_ctrl[idx],
14490 sc->port.advertising[idx]);
14493 sc->link_params.req_line_speed[idx],
14494 sc->link_params.req_duplex[idx],
14495 sc->port.advertising[idx]);
14503 uint32_t config = sc->port.config;
14510 sc->link_params.lane_config,
14511 sc->link_params.speed_cap_mask[0],
14512 sc->port.link_config[0]);
14515 bxe_link_settings_supported(sc, sc->link_params.switch_cfg);
14518 if (sc->autogreeen == AUTO_GREEN_FORCE_ON) {
14519 sc->link_params.feature_config_flags |=
14521 } else if (sc->autogreeen == AUTO_GREEN_FORCE_OFF) {
14522 sc->link_params.feature_config_flags &=
14525 sc->link_params.feature_config_flags |=
14535 sc->link_params.eee_mode = (ELINK_EEE_MODE_ADV_LPI |
14539 sc->link_params.eee_mode = 0;
14544 ELINK_DEBUG_P1(sc, "detected media type\n", sc->media);
14554 sc->tx_ring_size = TX_BD_USABLE;
14555 sc->rx_ring_size = RX_BD_USABLE;
14558 sc->wol = 0;
14593 switch (sc->devinfo.mf_info.mf_mode) {
14633 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent tag */
14646 &sc->parent_dma_tag); /* returned dma tag */
14657 &sc->def_sb_dma, "default status block") != 0) {
14659 bus_dma_tag_destroy(sc->parent_dma_tag);
14663 sc->def_sb = (struct host_sp_status_block *)sc->def_sb_dma.vaddr;
14670 &sc->eq_dma, "event queue") != 0) {
14672 bxe_dma_free(sc, &sc->def_sb_dma);
14673 sc->def_sb = NULL;
14674 bus_dma_tag_destroy(sc->parent_dma_tag);
14678 sc->eq = (union event_ring_elem * )sc->eq_dma.vaddr;
14685 &sc->sp_dma, "slow path") != 0) {
14687 bxe_dma_free(sc, &sc->eq_dma);
14688 sc->eq = NULL;
14689 bxe_dma_free(sc, &sc->def_sb_dma);
14690 sc->def_sb = NULL;
14691 bus_dma_tag_destroy(sc->parent_dma_tag);
14695 sc->sp = (struct bxe_slowpath *)sc->sp_dma.vaddr;
14702 &sc->spq_dma, "slow path queue") != 0) {
14704 bxe_dma_free(sc, &sc->sp_dma);
14705 sc->sp = NULL;
14706 bxe_dma_free(sc, &sc->eq_dma);
14707 sc->eq = NULL;
14708 bxe_dma_free(sc, &sc->def_sb_dma);
14709 sc->def_sb = NULL;
14710 bus_dma_tag_destroy(sc->parent_dma_tag);
14714 sc->spq = (struct eth_spe *)sc->spq_dma.vaddr;
14720 if (bxe_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
14723 bxe_dma_free(sc, &sc->spq_dma);
14724 sc->spq = NULL;
14725 bxe_dma_free(sc, &sc->sp_dma);
14726 sc->sp = NULL;
14727 bxe_dma_free(sc, &sc->eq_dma);
14728 sc->eq = NULL;
14729 bxe_dma_free(sc, &sc->def_sb_dma);
14730 sc->def_sb = NULL;
14731 bus_dma_tag_destroy(sc->parent_dma_tag);
14735 sc->gz_buf = (void *)sc->gz_buf_dma.vaddr;
14737 if ((sc->gz_strm =
14738 malloc(sizeof(*sc->gz_strm), M_DEVBUF, M_NOWAIT)) == NULL) {
14740 bxe_dma_free(sc, &sc->gz_buf_dma);
14741 sc->gz_buf = NULL;
14742 bxe_dma_free(sc, &sc->spq_dma);
14743 sc->spq = NULL;
14744 bxe_dma_free(sc, &sc->sp_dma);
14745 sc->sp = NULL;
14746 bxe_dma_free(sc, &sc->eq_dma);
14747 sc->eq = NULL;
14748 bxe_dma_free(sc, &sc->def_sb_dma);
14749 sc->def_sb = NULL;
14750 bus_dma_tag_destroy(sc->parent_dma_tag);
14759 for (i = 0; i < sc->num_queues; i++) {
14760 fp = &sc->fp[i];
14761 fp->sc = sc;
14762 fp->index = i;
14770 &fp->sb_dma, buf) != 0) {
14776 fp->status_block.e2_sb =
14777 (struct host_hc_status_block_e2 *)fp->sb_dma.vaddr;
14779 fp->status_block.e1x_sb =
14780 (struct host_hc_status_block_e1x *)fp->sb_dma.vaddr;
14790 &fp->tx_dma, buf) != 0) {
14795 fp->tx_chain = (union eth_tx_bd_types *)fp->tx_dma.vaddr;
14802 &fp->tx_chain[TX_BD_TOTAL_PER_PAGE * j - 1].next_bd;
14804 busaddr = (fp->tx_dma.paddr +
14806 tx_next_bd->addr_hi = htole32(U64_HI(busaddr));
14807 tx_next_bd->addr_lo = htole32(U64_LO(busaddr));
14816 &fp->rx_dma, buf) != 0) {
14821 fp->rx_chain = (struct eth_rx_bd *)fp->rx_dma.vaddr;
14828 &fp->rx_chain[RX_BD_TOTAL_PER_PAGE * j - 2];
14830 busaddr = (fp->rx_dma.paddr +
14832 rx_bd->addr_hi = htole32(U64_HI(busaddr));
14833 rx_bd->addr_lo = htole32(U64_LO(busaddr));
14842 &fp->rcq_dma, buf) != 0) {
14847 fp->rcq_chain = (union eth_rx_cqe *)fp->rcq_dma.vaddr;
14855 &fp->rcq_chain[RCQ_TOTAL_PER_PAGE * j - 1];
14857 busaddr = (fp->rcq_dma.paddr +
14859 rx_cqe_next->addr_hi = htole32(U64_HI(busaddr));
14860 rx_cqe_next->addr_lo = htole32(U64_LO(busaddr));
14869 &fp->rx_sge_dma, buf) != 0) {
14874 fp->rx_sge_chain = (struct eth_rx_sge *)fp->rx_sge_dma.vaddr;
14881 &fp->rx_sge_chain[RX_SGE_TOTAL_PER_PAGE * j - 2];
14883 busaddr = (fp->rx_sge_dma.paddr +
14885 rx_sge->addr_hi = htole32(U64_HI(busaddr));
14886 rx_sge->addr_lo = htole32(U64_LO(busaddr));
14894 if (if_getcapenable(sc->ifp) & (IFCAP_TSO4 | IFCAP_TSO6)) {
14905 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14918 &fp->tx_mbuf_tag); /* returned dma tag */
14928 if (bus_dmamap_create(fp->tx_mbuf_tag,
14930 &fp->tx_mbuf_chain[j].m_map)) {
14943 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
14956 &fp->rx_mbuf_tag); /* returned dma tag */
14966 if (bus_dmamap_create(fp->rx_mbuf_tag,
14968 &fp->rx_mbuf_chain[j].m_map)) {
14977 if (bus_dmamap_create(fp->rx_mbuf_tag,
14979 &fp->rx_mbuf_spare_map)) {
14991 rc = bus_dma_tag_create(sc->parent_dma_tag, /* parent tag */
15004 &fp->rx_sge_mbuf_tag); /* returned dma tag */
15014 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15016 &fp->rx_sge_mbuf_chain[j].m_map)) {
15025 if (bus_dmamap_create(fp->rx_sge_mbuf_tag,
15027 &fp->rx_sge_mbuf_spare_map)) {
15042 if (bus_dmamap_create(fp->rx_mbuf_tag,
15044 &fp->rx_tpa_info[j].bd.m_map)) {
15053 if (bus_dmamap_create(fp->rx_mbuf_tag,
15055 &fp->rx_tpa_info_mbuf_spare_map)) {
15075 if (sc->parent_dma_tag == NULL) {
15079 for (i = 0; i < sc->num_queues; i++) {
15080 fp = &sc->fp[i];
15086 bxe_dma_free(sc, &fp->sb_dma);
15087 memset(&fp->status_block, 0, sizeof(fp->status_block));
15093 bxe_dma_free(sc, &fp->tx_dma);
15094 fp->tx_chain = NULL;
15100 bxe_dma_free(sc, &fp->rx_dma);
15101 fp->rx_chain = NULL;
15107 bxe_dma_free(sc, &fp->rcq_dma);
15108 fp->rcq_chain = NULL;
15114 bxe_dma_free(sc, &fp->rx_sge_dma);
15115 fp->rx_sge_chain = NULL;
15121 if (fp->tx_mbuf_tag != NULL) {
15123 if (fp->tx_mbuf_chain[j].m_map != NULL) {
15124 bus_dmamap_unload(fp->tx_mbuf_tag,
15125 fp->tx_mbuf_chain[j].m_map);
15126 bus_dmamap_destroy(fp->tx_mbuf_tag,
15127 fp->tx_mbuf_chain[j].m_map);
15131 bus_dma_tag_destroy(fp->tx_mbuf_tag);
15132 fp->tx_mbuf_tag = NULL;
15139 if (fp->rx_mbuf_tag != NULL) {
15141 if (fp->rx_mbuf_chain[j].m_map != NULL) {
15142 bus_dmamap_unload(fp->rx_mbuf_tag,
15143 fp->rx_mbuf_chain[j].m_map);
15144 bus_dmamap_destroy(fp->rx_mbuf_tag,
15145 fp->rx_mbuf_chain[j].m_map);
15149 if (fp->rx_mbuf_spare_map != NULL) {
15150 bus_dmamap_unload(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15151 bus_dmamap_destroy(fp->rx_mbuf_tag, fp->rx_mbuf_spare_map);
15161 if (fp->rx_tpa_info[j].bd.m_map != NULL) {
15162 bus_dmamap_unload(fp->rx_mbuf_tag,
15163 fp->rx_tpa_info[j].bd.m_map);
15164 bus_dmamap_destroy(fp->rx_mbuf_tag,
15165 fp->rx_tpa_info[j].bd.m_map);
15169 if (fp->rx_tpa_info_mbuf_spare_map != NULL) {
15170 bus_dmamap_unload(fp->rx_mbuf_tag,
15171 fp->rx_tpa_info_mbuf_spare_map);
15172 bus_dmamap_destroy(fp->rx_mbuf_tag,
15173 fp->rx_tpa_info_mbuf_spare_map);
15176 bus_dma_tag_destroy(fp->rx_mbuf_tag);
15177 fp->rx_mbuf_tag = NULL;
15184 if (fp->rx_sge_mbuf_tag != NULL) {
15186 if (fp->rx_sge_mbuf_chain[j].m_map != NULL) {
15187 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15188 fp->rx_sge_mbuf_chain[j].m_map);
15189 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15190 fp->rx_sge_mbuf_chain[j].m_map);
15194 if (fp->rx_sge_mbuf_spare_map != NULL) {
15195 bus_dmamap_unload(fp->rx_sge_mbuf_tag,
15196 fp->rx_sge_mbuf_spare_map);
15197 bus_dmamap_destroy(fp->rx_sge_mbuf_tag,
15198 fp->rx_sge_mbuf_spare_map);
15201 bus_dma_tag_destroy(fp->rx_sge_mbuf_tag);
15202 fp->rx_sge_mbuf_tag = NULL;
15210 bxe_dma_free(sc, &sc->gz_buf_dma);
15211 sc->gz_buf = NULL;
15212 free(sc->gz_strm, M_DEVBUF);
15213 sc->gz_strm = NULL;
15219 bxe_dma_free(sc, &sc->spq_dma);
15220 sc->spq = NULL;
15226 bxe_dma_free(sc, &sc->sp_dma);
15227 sc->sp = NULL;
15233 bxe_dma_free(sc, &sc->eq_dma);
15234 sc->eq = NULL;
15240 bxe_dma_free(sc, &sc->def_sb_dma);
15241 sc->def_sb = NULL;
15243 bus_dma_tag_destroy(sc->parent_dma_tag);
15244 sc->parent_dma_tag = NULL;
15248 * Previous driver DMAE transaction may have occurred when pre-boot stage
15250 * transaction, resulting in was-error bit set in the PCI causing all
15251 * hw-to-host PCIe transactions to timeout. If this happened we want to clear
15252 * the interrupt which detected this from the pglueb and the was-done bit
15263 "Clearing 'was-error' bit that was set in pglueb");
15276 return (-1);
15288 if ((sc->pcie_bus == tmp->bus) &&
15289 (sc->pcie_device == tmp->slot) &&
15290 (SC_PATH(sc) == tmp->path)) {
15308 if (tmp->aer) {
15311 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15316 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15336 if (!tmp->aer) {
15338 "Re-marking AER in path %d/%d/%d\n",
15339 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15343 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15344 tmp->aer = 0;
15358 return (-1);
15361 tmp->bus = sc->pcie_bus;
15362 tmp->slot = sc->pcie_device;
15363 tmp->path = SC_PATH(sc);
15364 tmp->aer = 0;
15365 tmp->undi = after_undi ? (1 << SC_PORT(sc)) : 0;
15370 "Marked path %d/%d/%d - finished previous unload\n",
15371 sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
15387 return (-1);
15391 if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
15393 sc->devinfo.bc_ver);
15394 return (-1);
15400 DELAY(((1 << (i - 1)) * 100) * 1000);
15440 vals->bmac_addr = 0;
15441 vals->umac_addr = 0;
15442 vals->xmac_addr = 0;
15443 vals->emac_addr = 0;
15465 vals->bmac_addr = base_addr + offset;
15466 vals->bmac_val[0] = wb_data[0];
15467 vals->bmac_val[1] = wb_data[1];
15469 REG_WR(sc, vals->bmac_addr, wb_data[0]);
15470 REG_WR(sc, vals->bmac_addr + 0x4, wb_data[1]);
15474 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + SC_PORT(sc)*4;
15475 vals->emac_val = REG_RD(sc, vals->emac_addr);
15476 REG_WR(sc, vals->emac_addr, 0);
15485 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
15486 vals->xmac_val = REG_RD(sc, vals->xmac_addr);
15487 REG_WR(sc, vals->xmac_addr, 0);
15495 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
15496 vals->umac_val = REG_RD(sc, vals->umac_addr);
15497 REG_WR(sc, vals->umac_addr, 0);
15527 "UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
15561 elink_set_rx_filter(&sc->link_params, 0);
15595 timer_count--;
15697 (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
15722 rc = -1;
15731 /* non-common reply from MCP night require looping */
15738 } while (--time_counter);
15743 rc = -1;
15755 sc->dcb_state = dcb_on;
15756 sc->dcbx_enabled = dcbx_enabled;
15758 sc->dcb_state = FALSE;
15759 sc->dcbx_enabled = BXE_DCBX_ENABLED_INVALID;
15764 (dcbx_enabled == BXE_DCBX_ENABLED_OFF) ? "user-mode" :
15765 (dcbx_enabled == BXE_DCBX_ENABLED_ON_NEG_OFF) ? "on-chip static" :
15767 "on-chip with negotiation" : "invalid");
15770 /* must be called after sriov-enable */
15796 if (cos < sc->max_cos) {
15797 sc->prio_to_cos[pri] = cos;
15801 cos, pri, (sc->max_cos - 1));
15802 sc->prio_to_cos[pri] = 0;
15816 if (error || !req->newptr) {
15836 uint32_t *eth_stats = (uint32_t *)&sc->eth_stats;
15843 return (-1);
15858 return (-1);
15875 eth_stats = (uint32_t *)&sc->fp[fp_index].eth_q_stats;
15879 return (-1);
15894 return (-1);
15904 elink_link_reset(&sc->link_params, &sc->link_vars, 1);
15918 error = sysctl_handle_int(oidp, &sc->bxe_pause_param, 0, req);
15920 if (error || !req->newptr) {
15923 if ((sc->bxe_pause_param < 0) || (sc->bxe_pause_param > 8)) {
15924 BLOGW(sc, "invalid pause param (%d) - use integers between 1 & 8\n",sc->bxe_pause_param);
15925 sc->bxe_pause_param = 8;
15928 result = (sc->bxe_pause_param << PORT_FEATURE_FLOW_CONTROL_SHIFT);
15931 if((result & 0x400) && !(sc->port.supported[cfg_idx] & ELINK_SUPPORTED_Autoneg)) {
15932 BLOGW(sc, "Does not support Autoneg pause_param %d\n", sc->bxe_pause_param);
15933 return -EINVAL;
15938 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_AUTO;
15940 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_RX;
15943 sc->link_params.req_flow_ctrl[cfg_idx] |= ELINK_FLOW_CTRL_TX;
15944 if(sc->link_params.req_flow_ctrl[cfg_idx] == ELINK_FLOW_CTRL_AUTO)
15945 sc->link_params.req_flow_ctrl[cfg_idx] = ELINK_FLOW_CTRL_NONE;
15948 if (sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG) {
15949 sc->link_params.req_flow_ctrl[cfg_idx] =
15952 sc->link_params.req_fc_auto_adv = 0;
15954 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_RX;
15957 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_TX;
15958 if (!sc->link_params.req_fc_auto_adv)
15959 sc->link_params.req_fc_auto_adv |= ELINK_FLOW_CTRL_NONE;
15962 if (sc->link_vars.link_up) {
15965 if (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) {
15969 rc = elink_phy_init(&sc->link_params, &sc->link_vars);
15991 ctx = device_get_sysctl_ctx(sc->dev);
15992 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
15998 snprintf(sc->fw_ver_str, sizeof(sc->fw_ver_str), "%d.%d.%d.%d",
16004 snprintf(sc->mf_mode_str, sizeof(sc->mf_mode_str), "%s",
16005 ((sc->devinfo.mf_info.mf_mode == SINGLE_FUNCTION) ? "Single" :
16006 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SD) ? "MF-SD" :
16007 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_SI) ? "MF-SI" :
16008 (sc->devinfo.mf_info.mf_mode == MULTI_FUNCTION_AFEX) ? "MF-AFEX" :
16011 CTLFLAG_RD, &sc->devinfo.mf_info.vnics_per_port, 0,
16014 snprintf(sc->pci_link_str, sizeof(sc->pci_link_str), "%s x%d",
16015 ((sc->devinfo.pcie_link_speed == 1) ? "2.5GT/s" :
16016 (sc->devinfo.pcie_link_speed == 2) ? "5.0GT/s" :
16017 (sc->devinfo.pcie_link_speed == 4) ? "8.0GT/s" :
16019 sc->devinfo.pcie_link_width);
16021 sc->debug = bxe_debug;
16024 CTLFLAG_RD, sc->devinfo.bc_ver_str, 0,
16027 CTLFLAG_RD, sc->fw_ver_str, 0,
16030 CTLFLAG_RD, sc->mf_mode_str, 0,
16033 CTLFLAG_RD, sc->mac_addr_str, 0,
16036 CTLFLAG_RD, sc->pci_link_str, 0,
16039 CTLFLAG_RW, &sc->debug,
16042 sc->trigger_grcdump = 0;
16044 CTLFLAG_RW, &sc->trigger_grcdump, 0,
16048 sc->grcdump_started = 0;
16049 sc->grcdump_done = 0;
16051 CTLFLAG_RD, &sc->grcdump_done, 0,
16054 sc->rx_budget = bxe_rx_budget;
16056 CTLFLAG_RW, &sc->rx_budget, 0,
16062 "need pause frames- DEF:0/TX:1/RX:2/BOTH:3/AUTO:4/AUTOTX:5/AUTORX:6/AUTORXTX:7/NONE:8");
16081 for (i = 0; i < sc->num_queues; i++) {
16104 for (i = 0; i < sc->num_queues; i++) {
16106 fp = &sc->fp[i];
16108 fp->tx_br = buf_ring_alloc(BXE_BR_SIZE, M_DEVBUF,
16109 M_NOWAIT, &fp->tx_mtx);
16110 if (fp->tx_br == NULL)
16111 return (-1);
16123 for (i = 0; i < sc->num_queues; i++) {
16125 fp = &sc->fp[i];
16127 if (fp->tx_br) {
16128 buf_ring_free(fp->tx_br, M_DEVBUF);
16129 fp->tx_br = NULL;
16140 for (i = 0; i < sc->num_queues; i++) {
16142 fp = &sc->fp[i];
16144 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name),
16145 "bxe%d_fp%d_tx_lock", sc->unit, i);
16146 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF);
16148 snprintf(fp->rx_mtx_name, sizeof(fp->rx_mtx_name),
16149 "bxe%d_fp%d_rx_lock", sc->unit, i);
16150 mtx_init(&fp->rx_mtx, fp->rx_mtx_name, NULL, MTX_DEF);
16160 for (i = 0; i < sc->num_queues; i++) {
16162 fp = &sc->fp[i];
16164 if (mtx_initialized(&fp->tx_mtx)) {
16165 mtx_destroy(&fp->tx_mtx);
16168 if (mtx_initialized(&fp->rx_mtx)) {
16169 mtx_destroy(&fp->rx_mtx);
16194 sc->state = BXE_STATE_CLOSED;
16196 sc->dev = dev;
16197 sc->unit = device_get_unit(dev);
16201 sc->pcie_bus = pci_get_bus(dev);
16202 sc->pcie_device = pci_get_slot(dev);
16203 sc->pcie_func = pci_get_function(dev);
16217 callout_init(&sc->periodic_callout, 1);
16220 sc->chip_tq_flags = CHIP_TQ_NONE;
16221 snprintf(sc->chip_tq_name, sizeof(sc->chip_tq_name),
16222 "bxe%d_chip_tq", sc->unit);
16223 TASK_INIT(&sc->chip_tq_task, 0, bxe_handle_chip_tq, sc);
16224 sc->chip_tq = taskqueue_create(sc->chip_tq_name, M_NOWAIT,
16226 &sc->chip_tq);
16227 taskqueue_start_threads(&sc->chip_tq, 1, PWAIT, /* lower priority */
16228 "%s", sc->chip_tq_name);
16231 &sc->sp_err_timeout_task, 0, bxe_sp_err_timeout_task, sc);
16246 sc->mtu = ETHERMTU;
16262 if (sc->ifp != NULL) {
16263 ether_ifdetach(sc->ifp);
16265 ifmedia_removeall(&sc->ifmedia);
16275 if (sc->ifp != NULL) {
16276 ether_ifdetach(sc->ifp);
16278 ifmedia_removeall(&sc->ifmedia);
16291 if (sc->ifp != NULL) {
16292 ether_ifdetach(sc->ifp);
16294 ifmedia_removeall(&sc->ifmedia);
16306 if (sc->ifp != NULL) {
16307 ether_ifdetach(sc->ifp);
16309 ifmedia_removeall(&sc->ifmedia);
16322 if (sc->ifp != NULL) {
16323 ether_ifdetach(sc->ifp);
16325 ifmedia_removeall(&sc->ifmedia);
16335 sc->fw_seq =
16338 BLOGD(sc, DBG_LOAD, "prev unload fw_seq 0x%04x\n", sc->fw_seq);
16358 sc->qm_cid_count = bxe_set_qm_cid_count(sc);
16359 BLOGD(sc, DBG_LOAD, "qm_cid_count=%d\n", sc->qm_cid_count);
16361 sc->max_cos = 1;
16387 ifp = sc->ifp;
16399 atomic_store_rel_long(&sc->chip_tq_flags, CHIP_TQ_NONE);
16400 if (sc->chip_tq) {
16401 taskqueue_drain(sc->chip_tq, &sc->chip_tq_task);
16402 taskqueue_free(sc->chip_tq);
16403 sc->chip_tq = NULL;
16405 &sc->sp_err_timeout_task);
16409 if (sc->state != BXE_STATE_CLOSED) {
16412 sc->state = BXE_STATE_DISABLED;
16420 ifmedia_removeall(&sc->ifmedia);
16444 if (sc->ifp != NULL) {
16445 if_free(sc->ifp);
16473 if (sc->state != BXE_STATE_CLOSED) {
16485 uint8_t segment,
16490 uint32_t igu_addr = sc->igu_base_addr;
16492 bxe_igu_ack_sb_gen(sc, igu_sb_id, segment, index, op, update, igu_addr);
16527 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16535 bus_space_barrier(sc->bar[BAR0].tag, sc->bar[BAR0].handle, 0, 0,
16540 while (!(REG_RD(sc, igu_addr_ack) & sb_bit) && --cnt) {
16598 shmem_base[0] = sc->devinfo.shmem_base;
16599 shmem2_base[0] = sc->devinfo.shmem2_base;
16608 sc->devinfo.chip_id, 0);
16636 if (sc->mrrs == -1) {
16639 BLOGD(sc, DBG_LOAD, "forcing read order to %d\n", sc->mrrs);
16640 r_order = sc->mrrs;
16650 uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
16656 * When pretending to be PF, the pretend value is the function number 0..7.
16657 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
16667 return (-1);
16701 /* NON-IP protocol */
16750 count--;
16755 return (-1);
16767 count--;
16772 return (-2);
16807 count--;
16812 return (-3);
16841 return (-4);
16887 sc->devinfo.shmem_base,
16888 sc->devinfo.shmem2_base,
16978 * bxe_init_hw_common - initialize the HW at the COMMON phase.
17016 * 4-port mode or 2-port mode we need to turn off master-enable for
17018 * multi-function, and always disable all functions on the given path,
17019 * this means 0,2,4,6 for path 0 and 1,3,5,7 for path 1
17083 return (-1);
17088 return (-1);
17100 * In E2 there is a bug in the timers block that can cause function 6 / 7
17101 * (i.e. vnic3) to start even if it is marked as "scan-off".
17103 * as "scan-off". Real-life scenario for example: if a driver is being
17104 * load-unloaded while func6,7 are down. This will cause the timer to access
17119 * dmae-operations (writing to pram for example.)
17120 * note: can be done for only function 6,7 but cleaner this
17129 * b. Wait 20msec. - note that this timeout is needed to make
17153 * there is no Timer disable during Func6/7 enable. This is because the
17155 * Step 2.d can be done only for PF6/7 and the driver can also check if
17158 * All ILT entries are written by zero+valid and not just PF6/7
17160 * PF-s might be dynamic.
17170 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
17209 } while (factor-- && (val != 1));
17213 return (-1);
17224 sc->dmae_ready = 1;
17243 ecore_qm_init_ptr_table(sc, sc->qm_cid_count, INITOP_SET);
17265 REG_WR(sc, PRS_REG_E1HOV_MODE, sc->devinfo.mf_info.path_has_ovlan);
17281 * Bit-map indicating which L2 hdrs may appear
17285 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17334 sc->devinfo.mf_info.path_has_ovlan ? 7 : 6);
17404 /* in E3 this done in per-port section */
17422 return (-1);
17427 return (-1);
17432 return (-1);
17444 return (-1);
17469 * bxe_init_hw_common_chip - init HW at the COMMON_CHIP phase.
17483 /* In E2 2-PORT mode, same ext phy is used for the two paths */
17510 * attempted. Therefore we manually added the enable-master to the
17528 ecore_qm_init_cid_count(sc, sc->qm_cid_count, INITOP_SET);
17543 } else if (sc->mtu > 4096) {
17547 val = sc->mtu;
17579 /* Ovlan exists only if we are in multi-function +
17580 * switch-dependent mode, in switch-independent there
17586 (sc->devinfo.mf_info.path_has_ovlan ? 7 : 6));
17612 REG_WR(sc, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
17637 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
17638 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
17639 * bits 4-7 are used for "per vn group attention" */
17648 /* Bit-map indicating which L2 hdrs may appear after the
17659 IS_MF_SD(sc) ? 7 : 6);
17679 switch (sc->devinfo.mf_info.mf_mode) {
17719 while ((val = REG_RD(sc, reg)) != expected && cur_cnt--) {
17762 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
17770 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
17778 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
17786 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
17864 crd_freed = crd_freed_start = REG_RD(sc, regs->crd_freed);
17865 crd = crd_start = REG_RD(sc, regs->crd);
17866 init_crd = REG_RD(sc, regs->init_crd);
17868 BLOGD(sc, DBG_LOAD, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
17869 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : s:%x\n", regs->pN, crd);
17870 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
17873 ((uint32_t)((int32_t)crd_freed - (int32_t)crd_freed_start) <
17874 (init_crd - crd_start))) {
17875 if (cur_cnt--) {
17877 crd = REG_RD(sc, regs->crd);
17878 crd_freed = REG_RD(sc, regs->crd_freed);
17880 BLOGD(sc, DBG_LOAD, "PBF tx buffer[%d] timed out\n", regs->pN);
17881 BLOGD(sc, DBG_LOAD, "CREDIT[%d] : c:%x\n", regs->pN, crd);
17882 BLOGD(sc, DBG_LOAD, "CREDIT_FREED[%d]: c:%x\n", regs->pN, crd_freed);
17888 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
17899 occup = to_free = REG_RD(sc, regs->lines_occup);
17900 freed = freed_start = REG_RD(sc, regs->lines_freed);
17902 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
17903 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17906 ((uint32_t)((int32_t)freed - (int32_t)freed_start) < to_free)) {
17907 if (cur_cnt--) {
17909 occup = REG_RD(sc, regs->lines_occup);
17910 freed = REG_RD(sc, regs->lines_freed);
17912 BLOGD(sc, DBG_LOAD, "PBF cmd queue[%d] timed out\n", regs->pN);
17913 BLOGD(sc, DBG_LOAD, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
17914 BLOGD(sc, DBG_LOAD, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
17920 poll_count - cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
18027 /* Re-enable PF target read access */
18033 return (-1);
18040 return (-1);
18060 * Master enable - Due to WB DMAE writes performed before this
18061 * register is re-initialized as part of the regular function init
18074 struct ecore_ilt *ilt = sc->ilt;
18094 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18104 ilt = sc->ilt;
18105 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
18108 ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
18109 ilt->lines[cdu_ilt_start + i].page_mapping =
18110 sc->context[i].vcxt_dma.paddr;
18111 ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
18125 if (sc->interrupt_mode != INTR_MODE_MSIX) {
18138 * Master enable - Due to WB DMAE writes performed before this
18139 * register is re-initialized as part of the regular function
18147 sc->dmae_ready = 1;
18211 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18236 * E2 mode: address 0-135 match to the mapping memory;
18237 * 136 - PF0 default prod; 137 - PF1 default prod;
18238 * 138 - PF2 default prod; 139 - PF3 default prod;
18239 * 140 - PF0 attn prod; 141 - PF1 attn prod;
18240 * 142 - PF2 attn prod; 143 - PF3 attn prod;
18241 * 144-147 reserved.
18243 * E1.5 mode - In backward compatible mode;
18247 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
18250 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
18251 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
18252 * 144-147 attn prods;
18254 /* non-default-status-blocks */
18257 for (sb_idx = 0; sb_idx < sc->igu_sb_cnt; sb_idx++) {
18258 prod_offset = (sc->igu_base_sb + sb_idx) *
18267 bxe_ack_sb(sc, sc->igu_base_sb + sb_idx,
18269 bxe_igu_clear_sb(sc, sc->igu_base_sb + sb_idx);
18272 /* default-status-blocks */
18286 * igu prods come in chunks of E1HVN_MAX (4) -
18297 bxe_ack_sb(sc, sc->igu_dsb_id,
18299 bxe_ack_sb(sc, sc->igu_dsb_id,
18301 bxe_ack_sb(sc, sc->igu_dsb_id,
18303 bxe_ack_sb(sc, sc->igu_dsb_id,
18305 bxe_ack_sb(sc, sc->igu_dsb_id,
18308 bxe_ack_sb(sc, sc->igu_dsb_id,
18310 bxe_ack_sb(sc, sc->igu_dsb_id,
18313 bxe_igu_clear_sb(sc, sc->igu_dsb_id);
18316 rf-tool supports split-68 const */
18344 /* Clear "false" parity errors in MSI-X table */
18368 elink_phy_probe(&sc->link_params);
18378 elink_lfa_reset(&sc->link_params, &sc->link_vars);
18382 BLOGW(sc, "Bootcode is missing - cannot reset link\n");
18465 fp = &sc->fp[i];
18467 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
18481 if (sc->devinfo.int_block == INT_BLOCK_HC) {
18507 * Timers workaround bug for E2: if this is vnic-3,
18515 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
18526 sc->dmae_ready = 0;
18546 sc->iro_array = e1_iro_arr;
18549 sc->iro_array = e1h_iro_arr;
18552 sc->iro_array = e2_iro_arr;
18555 return (-1);
18609 * character device - ioctl interface definitions
18626 #define BXE_PATH(sc) (CHIP_IS_E1x(sc) ? 0 : (sc->pcie_func & 1))
18638 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
18645 return dump_num_registers[0][preset-1];
18647 return dump_num_registers[1][preset-1];
18649 return dump_num_registers[2][preset-1];
18651 return dump_num_registers[3][preset-1];
18653 return dump_num_registers[4][preset-1];
18743 return IS_E1_REG(reg_info->chips);
18745 return IS_E1H_REG(reg_info->chips);
18747 return IS_E2_REG(reg_info->chips);
18749 return IS_E3A0_REG(reg_info->chips);
18751 return IS_E3B0_REG(reg_info->chips);
18760 return IS_E1_REG(wreg_info->chips);
18762 return IS_E1H_REG(wreg_info->chips);
18764 return IS_E2_REG(wreg_info->chips);
18766 return IS_E3A0_REG(wreg_info->chips);
18768 return IS_E3B0_REG(wreg_info->chips);
18774 * bxe_read_pages_regs - read "paged" registers
18839 return (-1);
18861 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
18862 for (i = 0; i < wreg_addr_p->size; i++) {
18863 *p++ = REG_RD(sc, wreg_addr_p->addr + i*4);
18868 for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
18869 addr = *(wreg_addr_p->read_regs);
18902 if (sc->grcdump_done || sc->grcdump_started)
18905 sc->grcdump_started = 1;
18911 sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT);
18913 if (sc->grc_dump == NULL) {
18922 * will re-enable parity attentions right after the dump.
18937 buf = sc->grc_dump;
18938 d_hdr = sc->grc_dump;
18940 d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1;
18941 d_hdr->version = BNX2X_DUMP_VERSION;
18942 d_hdr->preset = DUMP_ALL_PRESETS;
18945 d_hdr->dump_meta_data = DUMP_CHIP_E1;
18947 d_hdr->dump_meta_data = DUMP_CHIP_E1H;
18949 d_hdr->dump_meta_data = DUMP_CHIP_E2 |
18952 d_hdr->dump_meta_data = DUMP_CHIP_E3A0 |
18955 d_hdr->dump_meta_data = DUMP_CHIP_E3B0 |
18991 if(sc->state == BXE_STATE_OPEN) {
18992 if(sc->fw_stats_req != NULL) {
18994 (uintmax_t)sc->fw_stats_req_mapping,
18995 (uintmax_t)sc->fw_stats_data_mapping,
18996 sc->fw_stats_req, (sc->fw_stats_req_size + sc->fw_stats_data_size));
18998 if(sc->def_sb != NULL) {
19000 (void *)sc->def_sb_dma.paddr, sc->def_sb,
19003 if(sc->eq_dma.vaddr != NULL) {
19005 (uintmax_t)sc->eq_dma.paddr, sc->eq_dma.vaddr, BCM_PAGE_SIZE);
19007 if(sc->sp_dma.vaddr != NULL) {
19009 (uintmax_t)sc->sp_dma.paddr, sc->sp_dma.vaddr,
19012 if(sc->spq_dma.vaddr != NULL) {
19014 (uintmax_t)sc->spq_dma.paddr, sc->spq_dma.vaddr, BCM_PAGE_SIZE);
19016 if(sc->gz_buf_dma.vaddr != NULL) {
19018 (uintmax_t)sc->gz_buf_dma.paddr, sc->gz_buf_dma.vaddr,
19021 for (i = 0; i < sc->num_queues; i++) {
19022 fp = &sc->fp[i];
19023 if(fp->sb_dma.vaddr != NULL && fp->tx_dma.vaddr != NULL &&
19024 fp->rx_dma.vaddr != NULL && fp->rcq_dma.vaddr != NULL &&
19025 fp->rx_sge_dma.vaddr != NULL) {
19028 (uintmax_t)fp->sb_dma.paddr, fp->sb_dma.vaddr,
19031 (uintmax_t)fp->tx_dma.paddr, fp->tx_dma.vaddr,
19034 (uintmax_t)fp->rx_dma.paddr, fp->rx_dma.vaddr,
19037 (uintmax_t)fp->rcq_dma.paddr, fp->rcq_dma.vaddr,
19040 (uintmax_t)fp->rx_sge_dma.paddr, fp->rx_sge_dma.vaddr,
19045 ilt_cli = &ilt->clients[1];
19046 if(ilt->lines != NULL) {
19047 for (i = ilt_cli->start; i <= ilt_cli->end; i++) {
19049 (uintmax_t)(((struct bxe_dma *)((&ilt->lines[i])->page))->paddr),
19050 ((struct bxe_dma *)((&ilt->lines[i])->page))->vaddr, BCM_PAGE_SIZE);
19066 sc->grcdump_done = 1;
19073 sc->eeprom = malloc(BXE_EEPROM_MAX_DATA_LEN, M_DEVBUF, M_NOWAIT);
19075 if (sc->eeprom == NULL) {
19077 return (-1);
19080 sc->ioctl_dev = make_dev(&bxe_cdevsw,
19081 if_getdunit(sc->ifp),
19086 if_name(sc->ifp));
19088 if (sc->ioctl_dev == NULL) {
19089 free(sc->eeprom, M_DEVBUF);
19090 sc->eeprom = NULL;
19091 return (-1);
19094 sc->ioctl_dev->si_drv1 = sc;
19102 if (sc->ioctl_dev != NULL)
19103 destroy_dev(sc->ioctl_dev);
19105 if (sc->eeprom != NULL) {
19106 free(sc->eeprom, M_DEVBUF);
19107 sc->eeprom = NULL;
19109 sc->ioctl_dev = NULL;
19117 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
19131 return (-EAGAIN);
19146 return (-EAGAIN);
19158 switch (eeprom->eeprom_cmd) {
19162 rval = copyin(eeprom->eeprom_data, sc->eeprom,
19163 eeprom->eeprom_data_len);
19168 rval = bxe_wr_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19169 eeprom->eeprom_data_len);
19174 rval = bxe_rd_eeprom(sc, sc->eeprom, eeprom->eeprom_offset,
19175 eeprom->eeprom_data_len);
19181 rval = copyout(sc->eeprom, eeprom->eeprom_data,
19182 eeprom->eeprom_data_len);
19191 BLOGW(sc, "ioctl cmd %d failed rval %d\n", eeprom->eeprom_cmd, rval);
19204 dev_p->supported = sc->port.supported[cfg_idx] |
19205 (sc->port.supported[cfg_idx ^ 1] &
19207 dev_p->advertising = sc->port.advertising[cfg_idx];
19208 if(sc->link_params.phy[bxe_get_cur_phy_idx(sc)].media_type ==
19210 dev_p->supported = ~(ELINK_SUPPORTED_10000baseT_Full);
19211 dev_p->advertising &= ~(ADVERTISED_10000baseT_Full);
19213 if ((sc->state == BXE_STATE_OPEN) && sc->link_vars.link_up &&
19214 !(sc->flags & BXE_MF_FUNC_DIS)) {
19215 dev_p->duplex = sc->link_vars.duplex;
19217 dev_p->speed = bxe_get_mf_speed(sc);
19219 dev_p->speed = sc->link_vars.line_speed;
19221 dev_p->duplex = DUPLEX_UNKNOWN;
19222 dev_p->speed = SPEED_UNKNOWN;
19225 dev_p->port = bxe_media_detect(sc);
19231 dev_p->phy_address = sc->port.phy_addr;
19236 dev_p->phy_address = ELINK_XGXS_EXT_PHY_ADDR(ext_phy_config);
19238 dev_p->phy_address = 0;
19240 if(sc->link_params.req_line_speed[cfg_idx] == ELINK_SPEED_AUTO_NEG)
19241 dev_p->autoneg = AUTONEG_ENABLE;
19243 dev_p->autoneg = AUTONEG_DISABLE;
19266 if ((sc = (struct bxe_softc *)dev->si_drv1) == NULL)
19274 dump->pci_func = sc->pcie_func;
19275 dump->grcdump_size =
19284 if ((!sc->trigger_grcdump) || (dump->grcdump == NULL) ||
19285 (dump->grcdump_size < grc_dump_size)) {
19290 if((sc->trigger_grcdump) && (!sc->grcdump_done) &&
19291 (!sc->grcdump_started)) {
19295 if((!rval) && (sc->grcdump_done) && (sc->grcdump_started) &&
19296 (sc->grc_dump != NULL)) {
19297 dump->grcdump_dwords = grc_dump_size >> 2;
19298 rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size);
19299 free(sc->grc_dump, M_DEVBUF);
19300 sc->grc_dump = NULL;
19301 sc->grcdump_started = 0;
19302 sc->grcdump_done = 0;
19309 snprintf(drv_infop->drv_name, BXE_DRV_NAME_LENGTH, "%s", "bxe");
19310 snprintf(drv_infop->drv_version, BXE_DRV_VERSION_LENGTH, "v:%s",
19312 snprintf(drv_infop->mfw_version, BXE_MFW_VERSION_LENGTH, "%s",
19313 sc->devinfo.bc_ver_str);
19314 snprintf(drv_infop->stormfw_version, BXE_STORMFW_VERSION_LENGTH,
19315 "%s", sc->fw_ver_str);
19316 drv_infop->eeprom_dump_len = sc->devinfo.flash_size;
19317 drv_infop->reg_dump_len =
19320 snprintf(drv_infop->bus_info, BXE_BUS_INFO_LENGTH, "%d:%d:%d",
19321 sc->pcie_bus, sc->pcie_device, sc->pcie_func);
19327 dev_p->supported = dev_set.supported;
19328 dev_p->advertising = dev_set.advertising;
19329 dev_p->speed = dev_set.speed;
19330 dev_p->duplex = dev_set.duplex;
19331 dev_p->port = dev_set.port;
19332 dev_p->phy_address = dev_set.phy_address;
19333 dev_p->autoneg = dev_set.autoneg;
19340 grc_dump_size = reg_p->reg_buf_len;
19342 if((!sc->grcdump_done) && (!sc->grcdump_started)) {
19345 if((sc->grcdump_done) && (sc->grcdump_started) &&
19346 (sc->grc_dump != NULL)) {
19347 rval = copyout(sc->grc_dump, reg_p->reg_buf, grc_dump_size);
19348 free(sc->grc_dump, M_DEVBUF);
19349 sc->grc_dump = NULL;
19350 sc->grcdump_started = 0;
19351 sc->grcdump_done = 0;
19358 if((reg_rdw_p->reg_cmd == BXE_READ_REG_CMD) &&
19359 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19360 reg_rdw_p->reg_val = REG_RD(sc, reg_rdw_p->reg_id);
19362 if((reg_rdw_p->reg_cmd == BXE_WRITE_REG_CMD) &&
19363 (reg_rdw_p->reg_access_type == BXE_REG_ACCESS_DIRECT))
19364 REG_WR(sc, reg_rdw_p->reg_id, reg_rdw_p->reg_val);
19370 if(cfg_rdw_p->cfg_cmd == BXE_READ_PCICFG) {
19372 cfg_rdw_p->cfg_val = pci_read_config(sc->dev, cfg_rdw_p->cfg_id,
19373 cfg_rdw_p->cfg_width);
19375 } else if(cfg_rdw_p->cfg_cmd == BXE_WRITE_PCICFG) {
19376 pci_write_config(sc->dev, cfg_rdw_p->cfg_id, cfg_rdw_p->cfg_val,
19377 cfg_rdw_p->cfg_width);
19385 snprintf(mac_addr_p->mac_addr_str, sizeof(sc->mac_addr_str), "%s",
19386 sc->mac_addr_str);
19409 *nrxr = sc->num_queues;
19411 *clsize = sc->fp[0].mbuf_alloc_size;
19428 IFF_DRV_RUNNING || !sc->link_vars.link_up)
19431 error = bxe_tx_encap(&sc->fp[0], &m);
19445 !sc->link_vars.link_up)
19448 for (i = 0; i < sc->num_queues; i++)
19449 (void)bxe_rxeof(sc, &sc->fp[i]);
19450 (void)bxe_txeof(sc, &sc->fp[0]);