15e111ed8SAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
25e111ed8SAndrew Rybchenko *
3672386c1SAndrew Rybchenko * Copyright(c) 2019-2021 Xilinx, Inc.
45e111ed8SAndrew Rybchenko * Copyright(c) 2015-2019 Solarflare Communications Inc.
55e111ed8SAndrew Rybchenko */
65e111ed8SAndrew Rybchenko
75e111ed8SAndrew Rybchenko #include "efx.h"
85e111ed8SAndrew Rybchenko #include "efx_impl.h"
95e111ed8SAndrew Rybchenko
105e111ed8SAndrew Rybchenko
115e111ed8SAndrew Rybchenko #if EFSYS_OPT_MEDFORD
125e111ed8SAndrew Rybchenko
135e111ed8SAndrew Rybchenko static __checkReturn efx_rc_t
medford_nic_get_required_pcie_bandwidth(__in efx_nic_t * enp,__out uint32_t * bandwidth_mbpsp)145e111ed8SAndrew Rybchenko medford_nic_get_required_pcie_bandwidth(
155e111ed8SAndrew Rybchenko __in efx_nic_t *enp,
165e111ed8SAndrew Rybchenko __out uint32_t *bandwidth_mbpsp)
175e111ed8SAndrew Rybchenko {
185e111ed8SAndrew Rybchenko uint32_t bandwidth;
195e111ed8SAndrew Rybchenko efx_rc_t rc;
205e111ed8SAndrew Rybchenko
215e111ed8SAndrew Rybchenko if ((rc = ef10_nic_get_port_mode_bandwidth(enp,
225e111ed8SAndrew Rybchenko &bandwidth)) != 0)
235e111ed8SAndrew Rybchenko goto fail1;
245e111ed8SAndrew Rybchenko
255e111ed8SAndrew Rybchenko *bandwidth_mbpsp = bandwidth;
265e111ed8SAndrew Rybchenko
275e111ed8SAndrew Rybchenko return (0);
285e111ed8SAndrew Rybchenko
295e111ed8SAndrew Rybchenko fail1:
305e111ed8SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
315e111ed8SAndrew Rybchenko
325e111ed8SAndrew Rybchenko return (rc);
335e111ed8SAndrew Rybchenko }
345e111ed8SAndrew Rybchenko
355e111ed8SAndrew Rybchenko __checkReturn efx_rc_t
medford_board_cfg(__in efx_nic_t * enp)365e111ed8SAndrew Rybchenko medford_board_cfg(
375e111ed8SAndrew Rybchenko __in efx_nic_t *enp)
385e111ed8SAndrew Rybchenko {
395e111ed8SAndrew Rybchenko efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
405e111ed8SAndrew Rybchenko uint32_t sysclk, dpcpu_clk;
415e111ed8SAndrew Rybchenko uint32_t end_padding;
425e111ed8SAndrew Rybchenko uint32_t bandwidth;
435e111ed8SAndrew Rybchenko efx_rc_t rc;
445e111ed8SAndrew Rybchenko
455e111ed8SAndrew Rybchenko /*
4692fedcd3SIvan Malov * Event queue creation is complete when an
4792fedcd3SIvan Malov * EVQ_INIT_DONE_EV event is received.
4892fedcd3SIvan Malov */
4992fedcd3SIvan Malov encp->enc_evq_init_done_ev_supported = B_TRUE;
5092fedcd3SIvan Malov
5192fedcd3SIvan Malov /*
525e111ed8SAndrew Rybchenko * Enable firmware workarounds for hardware errata.
535e111ed8SAndrew Rybchenko * Expected responses are:
545e111ed8SAndrew Rybchenko * - 0 (zero):
555e111ed8SAndrew Rybchenko * Success: workaround enabled or disabled as requested.
565e111ed8SAndrew Rybchenko * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
575e111ed8SAndrew Rybchenko * Firmware does not support the MC_CMD_WORKAROUND request.
585e111ed8SAndrew Rybchenko * (assume that the workaround is not supported).
595e111ed8SAndrew Rybchenko * - MC_CMD_ERR_ENOENT (reported as ENOENT):
605e111ed8SAndrew Rybchenko * Firmware does not support the requested workaround.
615e111ed8SAndrew Rybchenko * - MC_CMD_ERR_EPERM (reported as EACCES):
625e111ed8SAndrew Rybchenko * Unprivileged function cannot enable/disable workarounds.
635e111ed8SAndrew Rybchenko *
645e111ed8SAndrew Rybchenko * See efx_mcdi_request_errcode() for MCDI error translations.
655e111ed8SAndrew Rybchenko */
665e111ed8SAndrew Rybchenko
675e111ed8SAndrew Rybchenko
685e111ed8SAndrew Rybchenko if (EFX_PCI_FUNCTION_IS_VF(encp)) {
695e111ed8SAndrew Rybchenko /*
705e111ed8SAndrew Rybchenko * Interrupt testing does not work for VFs. See bug50084 and
715e111ed8SAndrew Rybchenko * bug71432 comment 21.
725e111ed8SAndrew Rybchenko */
735e111ed8SAndrew Rybchenko encp->enc_bug41750_workaround = B_TRUE;
745e111ed8SAndrew Rybchenko }
755e111ed8SAndrew Rybchenko
765e111ed8SAndrew Rybchenko /*
775e111ed8SAndrew Rybchenko * If the bug61265 workaround is enabled, then interrupt holdoff timers
785e111ed8SAndrew Rybchenko * cannot be controlled by timer table writes, so MCDI must be used
795e111ed8SAndrew Rybchenko * (timer table writes can still be used for wakeup timers).
805e111ed8SAndrew Rybchenko */
815e111ed8SAndrew Rybchenko rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE,
825e111ed8SAndrew Rybchenko NULL);
835e111ed8SAndrew Rybchenko if ((rc == 0) || (rc == EACCES))
845e111ed8SAndrew Rybchenko encp->enc_bug61265_workaround = B_TRUE;
855e111ed8SAndrew Rybchenko else if ((rc == ENOTSUP) || (rc == ENOENT))
865e111ed8SAndrew Rybchenko encp->enc_bug61265_workaround = B_FALSE;
875e111ed8SAndrew Rybchenko else
885e111ed8SAndrew Rybchenko goto fail1;
895e111ed8SAndrew Rybchenko
905e111ed8SAndrew Rybchenko /* Checksums for TSO sends can be incorrect on Medford. */
915e111ed8SAndrew Rybchenko encp->enc_bug61297_workaround = B_TRUE;
925e111ed8SAndrew Rybchenko
935e111ed8SAndrew Rybchenko /* Get clock frequencies (in MHz). */
945e111ed8SAndrew Rybchenko if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
955e111ed8SAndrew Rybchenko goto fail2;
965e111ed8SAndrew Rybchenko
975e111ed8SAndrew Rybchenko /*
985e111ed8SAndrew Rybchenko * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for
995e111ed8SAndrew Rybchenko * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
1005e111ed8SAndrew Rybchenko */
1015e111ed8SAndrew Rybchenko encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */
1025e111ed8SAndrew Rybchenko encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
1035e111ed8SAndrew Rybchenko FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
1045e111ed8SAndrew Rybchenko
1055e111ed8SAndrew Rybchenko encp->enc_ev_desc_size = EF10_EVQ_DESC_SIZE;
1065e111ed8SAndrew Rybchenko encp->enc_rx_desc_size = EF10_RXQ_DESC_SIZE;
1075e111ed8SAndrew Rybchenko encp->enc_tx_desc_size = EF10_TXQ_DESC_SIZE;
1085e111ed8SAndrew Rybchenko
1095e111ed8SAndrew Rybchenko /* Alignment for receive packet DMA buffers */
1105e111ed8SAndrew Rybchenko encp->enc_rx_buf_align_start = 1;
1115e111ed8SAndrew Rybchenko
1125e111ed8SAndrew Rybchenko /* Get the RX DMA end padding alignment configuration */
1135e111ed8SAndrew Rybchenko if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
1145e111ed8SAndrew Rybchenko if (rc != EACCES)
1155e111ed8SAndrew Rybchenko goto fail3;
1165e111ed8SAndrew Rybchenko
1175e111ed8SAndrew Rybchenko /* Assume largest tail padding size supported by hardware */
1185e111ed8SAndrew Rybchenko end_padding = 256;
1195e111ed8SAndrew Rybchenko }
1205e111ed8SAndrew Rybchenko encp->enc_rx_buf_align_end = end_padding;
1215e111ed8SAndrew Rybchenko
1225e111ed8SAndrew Rybchenko encp->enc_evq_max_nevs = EF10_EVQ_MAXNEVS;
1235e111ed8SAndrew Rybchenko encp->enc_evq_min_nevs = EF10_EVQ_MINNEVS;
1245e111ed8SAndrew Rybchenko
1255e111ed8SAndrew Rybchenko encp->enc_rxq_max_ndescs = EF10_RXQ_MAXNDESCS;
1265e111ed8SAndrew Rybchenko encp->enc_rxq_min_ndescs = EF10_RXQ_MINNDESCS;
1275e111ed8SAndrew Rybchenko
1285e111ed8SAndrew Rybchenko /*
1295e111ed8SAndrew Rybchenko * The maximum supported transmit queue size is 2048. TXQs with 4096
1305e111ed8SAndrew Rybchenko * descriptors are not supported as the top bit is used for vfifo
1315e111ed8SAndrew Rybchenko * stuffing.
1325e111ed8SAndrew Rybchenko */
1335e111ed8SAndrew Rybchenko encp->enc_txq_max_ndescs = MEDFORD_TXQ_MAXNDESCS;
1345e111ed8SAndrew Rybchenko encp->enc_txq_min_ndescs = EF10_TXQ_MINNDESCS;
1355e111ed8SAndrew Rybchenko
1365e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(MEDFORD_PIOBUF_NBUFS <= EF10_MAX_PIOBUF_NBUFS);
1375e111ed8SAndrew Rybchenko encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS;
1385e111ed8SAndrew Rybchenko encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE;
1395e111ed8SAndrew Rybchenko encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE;
1405e111ed8SAndrew Rybchenko
1415e111ed8SAndrew Rybchenko /*
1425e111ed8SAndrew Rybchenko * Medford stores a single global copy of VPD, not per-PF as on
1435e111ed8SAndrew Rybchenko * Huntington.
1445e111ed8SAndrew Rybchenko */
1455e111ed8SAndrew Rybchenko encp->enc_vpd_is_global = B_TRUE;
1465e111ed8SAndrew Rybchenko
1475e111ed8SAndrew Rybchenko rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth);
1485e111ed8SAndrew Rybchenko if (rc != 0)
1495e111ed8SAndrew Rybchenko goto fail4;
1505e111ed8SAndrew Rybchenko encp->enc_required_pcie_bandwidth_mbps = bandwidth;
1515e111ed8SAndrew Rybchenko encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
1525e111ed8SAndrew Rybchenko
153*950fe1edSDenis Pryazhennikov encp->enc_table_api_supported = B_FALSE;
154*950fe1edSDenis Pryazhennikov
1555e111ed8SAndrew Rybchenko return (0);
1565e111ed8SAndrew Rybchenko
1575e111ed8SAndrew Rybchenko fail4:
1585e111ed8SAndrew Rybchenko EFSYS_PROBE(fail4);
1595e111ed8SAndrew Rybchenko fail3:
1605e111ed8SAndrew Rybchenko EFSYS_PROBE(fail3);
1615e111ed8SAndrew Rybchenko fail2:
1625e111ed8SAndrew Rybchenko EFSYS_PROBE(fail2);
1635e111ed8SAndrew Rybchenko fail1:
1645e111ed8SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
1655e111ed8SAndrew Rybchenko
1665e111ed8SAndrew Rybchenko return (rc);
1675e111ed8SAndrew Rybchenko }
1685e111ed8SAndrew Rybchenko
1695e111ed8SAndrew Rybchenko #endif /* EFSYS_OPT_MEDFORD */
170