15e111ed8SAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
25e111ed8SAndrew Rybchenko *
3672386c1SAndrew Rybchenko * Copyright(c) 2019-2021 Xilinx, Inc.
45e111ed8SAndrew Rybchenko * Copyright(c) 2012-2019 Solarflare Communications Inc.
55e111ed8SAndrew Rybchenko */
65e111ed8SAndrew Rybchenko
75e111ed8SAndrew Rybchenko #include "efx.h"
85e111ed8SAndrew Rybchenko #include "efx_impl.h"
95e111ed8SAndrew Rybchenko #if EFSYS_OPT_MON_MCDI
105e111ed8SAndrew Rybchenko #include "mcdi_mon.h"
115e111ed8SAndrew Rybchenko #endif
125e111ed8SAndrew Rybchenko
135e111ed8SAndrew Rybchenko #if EFSYS_OPT_HUNTINGTON
145e111ed8SAndrew Rybchenko
155e111ed8SAndrew Rybchenko #include "ef10_tlv_layout.h"
165e111ed8SAndrew Rybchenko
175e111ed8SAndrew Rybchenko static __checkReturn efx_rc_t
hunt_nic_get_required_pcie_bandwidth(__in efx_nic_t * enp,__out uint32_t * bandwidth_mbpsp)185e111ed8SAndrew Rybchenko hunt_nic_get_required_pcie_bandwidth(
195e111ed8SAndrew Rybchenko __in efx_nic_t *enp,
205e111ed8SAndrew Rybchenko __out uint32_t *bandwidth_mbpsp)
215e111ed8SAndrew Rybchenko {
225e111ed8SAndrew Rybchenko uint32_t port_modes;
235e111ed8SAndrew Rybchenko uint32_t bandwidth;
245e111ed8SAndrew Rybchenko efx_rc_t rc;
255e111ed8SAndrew Rybchenko
265e111ed8SAndrew Rybchenko /*
275e111ed8SAndrew Rybchenko * On Huntington, the firmware may not give us the current port mode, so
285e111ed8SAndrew Rybchenko * we need to go by the set of available port modes and assume the most
295e111ed8SAndrew Rybchenko * capable mode is in use.
305e111ed8SAndrew Rybchenko */
315e111ed8SAndrew Rybchenko
325e111ed8SAndrew Rybchenko if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
335e111ed8SAndrew Rybchenko NULL, NULL)) != 0) {
345e111ed8SAndrew Rybchenko /* No port mode info available */
355e111ed8SAndrew Rybchenko bandwidth = 0;
365e111ed8SAndrew Rybchenko goto out;
375e111ed8SAndrew Rybchenko }
385e111ed8SAndrew Rybchenko
395e111ed8SAndrew Rybchenko if (port_modes & (1U << TLV_PORT_MODE_40G_40G)) {
405e111ed8SAndrew Rybchenko /*
415e111ed8SAndrew Rybchenko * This needs the full PCIe bandwidth (and could use
425e111ed8SAndrew Rybchenko * more) - roughly 64 Gbit/s for 8 lanes of Gen3.
435e111ed8SAndrew Rybchenko */
445e111ed8SAndrew Rybchenko if ((rc = efx_nic_calculate_pcie_link_bandwidth(8,
455e111ed8SAndrew Rybchenko EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0)
465e111ed8SAndrew Rybchenko goto fail1;
475e111ed8SAndrew Rybchenko } else {
485e111ed8SAndrew Rybchenko if (port_modes & (1U << TLV_PORT_MODE_40G)) {
495e111ed8SAndrew Rybchenko bandwidth = 40000;
505e111ed8SAndrew Rybchenko } else if (port_modes & (1U << TLV_PORT_MODE_10G_10G_10G_10G)) {
515e111ed8SAndrew Rybchenko bandwidth = 4 * 10000;
525e111ed8SAndrew Rybchenko } else {
535e111ed8SAndrew Rybchenko /* Assume two 10G ports */
545e111ed8SAndrew Rybchenko bandwidth = 2 * 10000;
555e111ed8SAndrew Rybchenko }
565e111ed8SAndrew Rybchenko }
575e111ed8SAndrew Rybchenko
585e111ed8SAndrew Rybchenko out:
595e111ed8SAndrew Rybchenko *bandwidth_mbpsp = bandwidth;
605e111ed8SAndrew Rybchenko
615e111ed8SAndrew Rybchenko return (0);
625e111ed8SAndrew Rybchenko
635e111ed8SAndrew Rybchenko fail1:
645e111ed8SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
655e111ed8SAndrew Rybchenko
665e111ed8SAndrew Rybchenko return (rc);
675e111ed8SAndrew Rybchenko }
685e111ed8SAndrew Rybchenko
695e111ed8SAndrew Rybchenko __checkReturn efx_rc_t
hunt_board_cfg(__in efx_nic_t * enp)705e111ed8SAndrew Rybchenko hunt_board_cfg(
715e111ed8SAndrew Rybchenko __in efx_nic_t *enp)
725e111ed8SAndrew Rybchenko {
735e111ed8SAndrew Rybchenko efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
745e111ed8SAndrew Rybchenko efx_port_t *epp = &(enp->en_port);
755e111ed8SAndrew Rybchenko uint32_t sysclk, dpcpu_clk;
765e111ed8SAndrew Rybchenko uint32_t bandwidth;
775e111ed8SAndrew Rybchenko efx_rc_t rc;
785e111ed8SAndrew Rybchenko
795e111ed8SAndrew Rybchenko /*
8092fedcd3SIvan Malov * Event queue creation is complete when an
8192fedcd3SIvan Malov * EVQ_INIT_DONE_EV event is received.
8292fedcd3SIvan Malov */
8392fedcd3SIvan Malov encp->enc_evq_init_done_ev_supported = B_TRUE;
8492fedcd3SIvan Malov
8592fedcd3SIvan Malov /*
865e111ed8SAndrew Rybchenko * Enable firmware workarounds for hardware errata.
875e111ed8SAndrew Rybchenko * Expected responses are:
885e111ed8SAndrew Rybchenko * - 0 (zero):
895e111ed8SAndrew Rybchenko * Success: workaround enabled or disabled as requested.
905e111ed8SAndrew Rybchenko * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
915e111ed8SAndrew Rybchenko * Firmware does not support the MC_CMD_WORKAROUND request.
925e111ed8SAndrew Rybchenko * (assume that the workaround is not supported).
935e111ed8SAndrew Rybchenko * - MC_CMD_ERR_ENOENT (reported as ENOENT):
945e111ed8SAndrew Rybchenko * Firmware does not support the requested workaround.
955e111ed8SAndrew Rybchenko * - MC_CMD_ERR_EPERM (reported as EACCES):
965e111ed8SAndrew Rybchenko * Unprivileged function cannot enable/disable workarounds.
975e111ed8SAndrew Rybchenko *
985e111ed8SAndrew Rybchenko * See efx_mcdi_request_errcode() for MCDI error translations.
995e111ed8SAndrew Rybchenko */
1005e111ed8SAndrew Rybchenko
1015e111ed8SAndrew Rybchenko /*
1025e111ed8SAndrew Rybchenko * If the bug35388 workaround is enabled, then use an indirect access
1035e111ed8SAndrew Rybchenko * method to avoid unsafe EVQ writes.
1045e111ed8SAndrew Rybchenko */
1055e111ed8SAndrew Rybchenko rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG35388, B_TRUE,
1065e111ed8SAndrew Rybchenko NULL);
1075e111ed8SAndrew Rybchenko if ((rc == 0) || (rc == EACCES))
1085e111ed8SAndrew Rybchenko encp->enc_bug35388_workaround = B_TRUE;
1095e111ed8SAndrew Rybchenko else if ((rc == ENOTSUP) || (rc == ENOENT))
1105e111ed8SAndrew Rybchenko encp->enc_bug35388_workaround = B_FALSE;
1115e111ed8SAndrew Rybchenko else
1125e111ed8SAndrew Rybchenko goto fail1;
1135e111ed8SAndrew Rybchenko
1145e111ed8SAndrew Rybchenko /*
1155e111ed8SAndrew Rybchenko * If the bug41750 workaround is enabled, then do not test interrupts,
1165e111ed8SAndrew Rybchenko * as the test will fail (seen with Greenport controllers).
1175e111ed8SAndrew Rybchenko */
1185e111ed8SAndrew Rybchenko rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG41750, B_TRUE,
1195e111ed8SAndrew Rybchenko NULL);
1205e111ed8SAndrew Rybchenko if (rc == 0) {
1215e111ed8SAndrew Rybchenko encp->enc_bug41750_workaround = B_TRUE;
1225e111ed8SAndrew Rybchenko } else if (rc == EACCES) {
1235e111ed8SAndrew Rybchenko /* Assume a controller with 40G ports needs the workaround. */
1245e111ed8SAndrew Rybchenko if (epp->ep_default_adv_cap_mask & EFX_PHY_CAP_40000FDX)
1255e111ed8SAndrew Rybchenko encp->enc_bug41750_workaround = B_TRUE;
1265e111ed8SAndrew Rybchenko else
1275e111ed8SAndrew Rybchenko encp->enc_bug41750_workaround = B_FALSE;
1285e111ed8SAndrew Rybchenko } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
1295e111ed8SAndrew Rybchenko encp->enc_bug41750_workaround = B_FALSE;
1305e111ed8SAndrew Rybchenko } else {
1315e111ed8SAndrew Rybchenko goto fail2;
1325e111ed8SAndrew Rybchenko }
1335e111ed8SAndrew Rybchenko if (EFX_PCI_FUNCTION_IS_VF(encp)) {
1345e111ed8SAndrew Rybchenko /* Interrupt testing does not work for VFs. See bug50084. */
1355e111ed8SAndrew Rybchenko encp->enc_bug41750_workaround = B_TRUE;
1365e111ed8SAndrew Rybchenko }
1375e111ed8SAndrew Rybchenko
1385e111ed8SAndrew Rybchenko /* Get clock frequencies (in MHz). */
1395e111ed8SAndrew Rybchenko if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
1405e111ed8SAndrew Rybchenko goto fail3;
1415e111ed8SAndrew Rybchenko
1425e111ed8SAndrew Rybchenko /*
1435e111ed8SAndrew Rybchenko * The Huntington timer quantum is 1536 sysclk cycles, documented for
1445e111ed8SAndrew Rybchenko * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
1455e111ed8SAndrew Rybchenko */
1465e111ed8SAndrew Rybchenko encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */
1475e111ed8SAndrew Rybchenko if (encp->enc_bug35388_workaround) {
1485e111ed8SAndrew Rybchenko encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
1495e111ed8SAndrew Rybchenko ERF_DD_EVQ_IND_TIMER_VAL_WIDTH) / 1000;
1505e111ed8SAndrew Rybchenko } else {
1515e111ed8SAndrew Rybchenko encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
1525e111ed8SAndrew Rybchenko FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
1535e111ed8SAndrew Rybchenko }
1545e111ed8SAndrew Rybchenko
1555e111ed8SAndrew Rybchenko encp->enc_bug61265_workaround = B_FALSE; /* Medford only */
1565e111ed8SAndrew Rybchenko
1575e111ed8SAndrew Rybchenko /* Checksums for TSO sends can be incorrect on Huntington. */
1585e111ed8SAndrew Rybchenko encp->enc_bug61297_workaround = B_TRUE;
1595e111ed8SAndrew Rybchenko
1605e111ed8SAndrew Rybchenko encp->enc_ev_desc_size = EF10_EVQ_DESC_SIZE;
1615e111ed8SAndrew Rybchenko encp->enc_rx_desc_size = EF10_RXQ_DESC_SIZE;
1625e111ed8SAndrew Rybchenko encp->enc_tx_desc_size = EF10_TXQ_DESC_SIZE;
1635e111ed8SAndrew Rybchenko
1645e111ed8SAndrew Rybchenko /* Alignment for receive packet DMA buffers */
1655e111ed8SAndrew Rybchenko encp->enc_rx_buf_align_start = 1;
1665e111ed8SAndrew Rybchenko encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */
1675e111ed8SAndrew Rybchenko
1685e111ed8SAndrew Rybchenko encp->enc_evq_max_nevs = EF10_EVQ_MAXNEVS;
1695e111ed8SAndrew Rybchenko encp->enc_evq_min_nevs = EF10_EVQ_MINNEVS;
1705e111ed8SAndrew Rybchenko
1715e111ed8SAndrew Rybchenko encp->enc_rxq_max_ndescs = EF10_RXQ_MAXNDESCS;
1725e111ed8SAndrew Rybchenko encp->enc_rxq_min_ndescs = EF10_RXQ_MINNDESCS;
1735e111ed8SAndrew Rybchenko
1745e111ed8SAndrew Rybchenko /*
1755e111ed8SAndrew Rybchenko * The workaround for bug35388 uses the top bit of transmit queue
1765e111ed8SAndrew Rybchenko * descriptor writes, preventing the use of 4096 descriptor TXQs.
1775e111ed8SAndrew Rybchenko */
1785e111ed8SAndrew Rybchenko encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ?
1795e111ed8SAndrew Rybchenko HUNT_TXQ_MAXNDESCS_BUG35388_WORKAROUND :
1805e111ed8SAndrew Rybchenko HUNT_TXQ_MAXNDESCS;
1815e111ed8SAndrew Rybchenko encp->enc_txq_min_ndescs = EF10_TXQ_MINNDESCS;
1825e111ed8SAndrew Rybchenko
1835e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(HUNT_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
1845e111ed8SAndrew Rybchenko encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS;
1855e111ed8SAndrew Rybchenko encp->enc_piobuf_size = HUNT_PIOBUF_SIZE;
1865e111ed8SAndrew Rybchenko encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE;
1875e111ed8SAndrew Rybchenko
1885e111ed8SAndrew Rybchenko if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0)
1895e111ed8SAndrew Rybchenko goto fail4;
1905e111ed8SAndrew Rybchenko encp->enc_required_pcie_bandwidth_mbps = bandwidth;
1915e111ed8SAndrew Rybchenko
1925e111ed8SAndrew Rybchenko /* All Huntington devices have a PCIe Gen3, 8 lane connector */
1935e111ed8SAndrew Rybchenko encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
1945e111ed8SAndrew Rybchenko
195*950fe1edSDenis Pryazhennikov encp->enc_table_api_supported = B_FALSE;
196*950fe1edSDenis Pryazhennikov
1975e111ed8SAndrew Rybchenko return (0);
1985e111ed8SAndrew Rybchenko
1995e111ed8SAndrew Rybchenko fail4:
2005e111ed8SAndrew Rybchenko EFSYS_PROBE(fail4);
2015e111ed8SAndrew Rybchenko fail3:
2025e111ed8SAndrew Rybchenko EFSYS_PROBE(fail3);
2035e111ed8SAndrew Rybchenko fail2:
2045e111ed8SAndrew Rybchenko EFSYS_PROBE(fail2);
2055e111ed8SAndrew Rybchenko fail1:
2065e111ed8SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
2075e111ed8SAndrew Rybchenko
2085e111ed8SAndrew Rybchenko return (rc);
2095e111ed8SAndrew Rybchenko }
2105e111ed8SAndrew Rybchenko
2115e111ed8SAndrew Rybchenko
2125e111ed8SAndrew Rybchenko #endif /* EFSYS_OPT_HUNTINGTON */
213