1298d969cSNavdeep Parhar /*- 2298d969cSNavdeep Parhar * Copyright (c) 2014 Chelsio Communications, Inc. 3298d969cSNavdeep Parhar * All rights reserved. 4298d969cSNavdeep Parhar * Written by: Navdeep Parhar <np@FreeBSD.org> 5298d969cSNavdeep Parhar * 6298d969cSNavdeep Parhar * Redistribution and use in source and binary forms, with or without 7298d969cSNavdeep Parhar * modification, are permitted provided that the following conditions 8298d969cSNavdeep Parhar * are met: 9298d969cSNavdeep Parhar * 1. Redistributions of source code must retain the above copyright 10298d969cSNavdeep Parhar * notice, this list of conditions and the following disclaimer. 11298d969cSNavdeep Parhar * 2. Redistributions in binary form must reproduce the above copyright 12298d969cSNavdeep Parhar * notice, this list of conditions and the following disclaimer in the 13298d969cSNavdeep Parhar * documentation and/or other materials provided with the distribution. 14298d969cSNavdeep Parhar * 15298d969cSNavdeep Parhar * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16298d969cSNavdeep Parhar * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17298d969cSNavdeep Parhar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18298d969cSNavdeep Parhar * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19298d969cSNavdeep Parhar * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20298d969cSNavdeep Parhar * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21298d969cSNavdeep Parhar * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22298d969cSNavdeep Parhar * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23298d969cSNavdeep Parhar * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24298d969cSNavdeep Parhar * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25298d969cSNavdeep Parhar * SUCH DAMAGE. 26298d969cSNavdeep Parhar */ 27298d969cSNavdeep Parhar 28298d969cSNavdeep Parhar #include <sys/cdefs.h> 29298d969cSNavdeep Parhar #include "opt_inet.h" 30298d969cSNavdeep Parhar #include "opt_inet6.h" 31298d969cSNavdeep Parhar 32298d969cSNavdeep Parhar #ifdef DEV_NETMAP 33298d969cSNavdeep Parhar #include <sys/param.h> 34fe2ebb76SJohn Baldwin #include <sys/bus.h> 35298d969cSNavdeep Parhar #include <sys/eventhandler.h> 36298d969cSNavdeep Parhar #include <sys/lock.h> 37298d969cSNavdeep Parhar #include <sys/mbuf.h> 38fe2ebb76SJohn Baldwin #include <sys/module.h> 39298d969cSNavdeep Parhar #include <sys/selinfo.h> 40298d969cSNavdeep Parhar #include <sys/socket.h> 41298d969cSNavdeep Parhar #include <sys/sockio.h> 42298d969cSNavdeep Parhar #include <machine/bus.h> 43298d969cSNavdeep Parhar #include <net/ethernet.h> 44298d969cSNavdeep Parhar #include <net/if.h> 45298d969cSNavdeep Parhar #include <net/if_media.h> 46298d969cSNavdeep Parhar #include <net/if_var.h> 47298d969cSNavdeep Parhar #include <net/if_clone.h> 48298d969cSNavdeep Parhar #include <net/if_types.h> 49298d969cSNavdeep Parhar #include <net/netmap.h> 50298d969cSNavdeep Parhar #include <dev/netmap/netmap_kern.h> 51298d969cSNavdeep Parhar 52298d969cSNavdeep Parhar #include "common/common.h" 53298d969cSNavdeep Parhar #include "common/t4_regs.h" 54298d969cSNavdeep Parhar #include "common/t4_regs_values.h" 55298d969cSNavdeep Parhar 56298d969cSNavdeep Parhar extern int fl_pad; /* XXXNM */ 57298d969cSNavdeep Parhar 5808aeb151SNavdeep Parhar /* 5908aeb151SNavdeep Parhar * 0 = normal netmap rx 6008aeb151SNavdeep Parhar * 1 = black hole 6108aeb151SNavdeep Parhar * 2 = supermassive black hole (buffer packing enabled) 6208aeb151SNavdeep Parhar */ 6308aeb151SNavdeep Parhar int black_hole = 0; 64ba8b75aeSNavdeep Parhar SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_black_hole, CTLFLAG_RWTUN, &black_hole, 0, 6508aeb151SNavdeep Parhar "Sink incoming packets."); 6608aeb151SNavdeep Parhar 671cdfce07SNavdeep Parhar int rx_ndesc = 256; 681cdfce07SNavdeep Parhar SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_ndesc, CTLFLAG_RWTUN, 691cdfce07SNavdeep Parhar &rx_ndesc, 0, "# of rx descriptors after which the hw cidx is updated."); 701cdfce07SNavdeep Parhar 7131f494cdSNavdeep Parhar int rx_nframes = 64; 7231f494cdSNavdeep Parhar SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_rx_nframes, CTLFLAG_RWTUN, 7331f494cdSNavdeep Parhar &rx_nframes, 0, "max # of frames received before waking up netmap rx."); 7431f494cdSNavdeep Parhar 751cdfce07SNavdeep Parhar int holdoff_tmr_idx = 2; 761cdfce07SNavdeep Parhar SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_holdoff_tmr_idx, CTLFLAG_RWTUN, 771cdfce07SNavdeep Parhar &holdoff_tmr_idx, 0, "Holdoff timer index for netmap rx queues."); 781cdfce07SNavdeep Parhar 799af71ab3SNavdeep Parhar /* 809af71ab3SNavdeep Parhar * Congestion drops. 819af71ab3SNavdeep Parhar * -1: no congestion feedback (not recommended). 829af71ab3SNavdeep Parhar * 0: backpressure the channel instead of dropping packets right away. 839af71ab3SNavdeep Parhar * 1: no backpressure, drop packets for the congested queue immediately. 849af71ab3SNavdeep Parhar */ 859af71ab3SNavdeep Parhar static int nm_cong_drop = 1; 86ba8b75aeSNavdeep Parhar SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_cong_drop, CTLFLAG_RWTUN, 872d714dbcSJohn Baldwin &nm_cong_drop, 0, 882d714dbcSJohn Baldwin "Congestion control for netmap rx queues (0 = backpressure, 1 = drop"); 899af71ab3SNavdeep Parhar 900afe96c7SNavdeep Parhar int starve_fl = 0; 910afe96c7SNavdeep Parhar SYSCTL_INT(_hw_cxgbe, OID_AUTO, starve_fl, CTLFLAG_RWTUN, 920afe96c7SNavdeep Parhar &starve_fl, 0, "Don't ring fl db for netmap rx queues."); 930afe96c7SNavdeep Parhar 942d73ac5eSNavdeep Parhar /* 952d73ac5eSNavdeep Parhar * Try to process tx credits in bulk. This may cause a delay in the return of 962d73ac5eSNavdeep Parhar * tx credits and is suitable for bursty or non-stop tx only. 972d73ac5eSNavdeep Parhar */ 982d73ac5eSNavdeep Parhar int lazy_tx_credit_flush = 1; 992d73ac5eSNavdeep Parhar SYSCTL_INT(_hw_cxgbe, OID_AUTO, lazy_tx_credit_flush, CTLFLAG_RWTUN, 1002d73ac5eSNavdeep Parhar &lazy_tx_credit_flush, 0, "lazy credit flush for netmap tx queues."); 1012d73ac5eSNavdeep Parhar 102f02c9e69SNavdeep Parhar /* 103f02c9e69SNavdeep Parhar * Split the netmap rx queues into two groups that populate separate halves of 104f02c9e69SNavdeep Parhar * the RSS indirection table. This allows filters with hashmask to steer to a 105f02c9e69SNavdeep Parhar * particular group of queues. 106f02c9e69SNavdeep Parhar */ 107f02c9e69SNavdeep Parhar static int nm_split_rss = 0; 108f02c9e69SNavdeep Parhar SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_split_rss, CTLFLAG_RWTUN, 109f02c9e69SNavdeep Parhar &nm_split_rss, 0, "Split the netmap rx queues into two groups."); 110f02c9e69SNavdeep Parhar 111f4220a70SNavdeep Parhar /* 112f4220a70SNavdeep Parhar * netmap(4) says "netmap does not use features such as checksum offloading, TCP 113f4220a70SNavdeep Parhar * segmentation offloading, encryption, VLAN encapsulation/decapsulation, etc." 114f4220a70SNavdeep Parhar * but this knob can be used to get the hardware to checksum all tx traffic 115f4220a70SNavdeep Parhar * anyway. 116f4220a70SNavdeep Parhar */ 117f4220a70SNavdeep Parhar static int nm_txcsum = 0; 118f4220a70SNavdeep Parhar SYSCTL_INT(_hw_cxgbe, OID_AUTO, nm_txcsum, CTLFLAG_RWTUN, 119f4220a70SNavdeep Parhar &nm_txcsum, 0, "Enable transmit checksum offloading."); 120f4220a70SNavdeep Parhar 1218eba75edSNavdeep Parhar static int free_nm_rxq_hwq(struct vi_info *, struct sge_nm_rxq *); 1228eba75edSNavdeep Parhar static int free_nm_txq_hwq(struct vi_info *, struct sge_nm_txq *); 1238eba75edSNavdeep Parhar 1248eba75edSNavdeep Parhar int 1258eba75edSNavdeep Parhar alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx, 12643bbae19SNavdeep Parhar int idx) 1278eba75edSNavdeep Parhar { 1288eba75edSNavdeep Parhar int rc; 12943bbae19SNavdeep Parhar struct sysctl_oid *oid; 1308eba75edSNavdeep Parhar struct sysctl_oid_list *children; 1318eba75edSNavdeep Parhar struct sysctl_ctx_list *ctx; 1328eba75edSNavdeep Parhar char name[16]; 1338eba75edSNavdeep Parhar size_t len; 1348eba75edSNavdeep Parhar struct adapter *sc = vi->adapter; 1358eba75edSNavdeep Parhar struct netmap_adapter *na = NA(vi->ifp); 1368eba75edSNavdeep Parhar 1378eba75edSNavdeep Parhar MPASS(na != NULL); 1388eba75edSNavdeep Parhar 1398eba75edSNavdeep Parhar len = vi->qsize_rxq * IQ_ESIZE; 1408eba75edSNavdeep Parhar rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 1418eba75edSNavdeep Parhar &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 1428eba75edSNavdeep Parhar if (rc != 0) 1438eba75edSNavdeep Parhar return (rc); 1448eba75edSNavdeep Parhar 1458eba75edSNavdeep Parhar len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len; 1468eba75edSNavdeep Parhar rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 1478eba75edSNavdeep Parhar &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 1488eba75edSNavdeep Parhar if (rc != 0) 1498eba75edSNavdeep Parhar return (rc); 1508eba75edSNavdeep Parhar 1518eba75edSNavdeep Parhar nm_rxq->vi = vi; 1528eba75edSNavdeep Parhar nm_rxq->nid = idx; 1538eba75edSNavdeep Parhar nm_rxq->iq_cidx = 0; 1548eba75edSNavdeep Parhar nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE; 1558eba75edSNavdeep Parhar nm_rxq->iq_gen = F_RSPD_GEN; 1568eba75edSNavdeep Parhar nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 1578eba75edSNavdeep Parhar nm_rxq->fl_sidx = na->num_rx_desc; 1588eba75edSNavdeep Parhar nm_rxq->fl_sidx2 = nm_rxq->fl_sidx; /* copy for rxsync cacheline */ 1598eba75edSNavdeep Parhar nm_rxq->intr_idx = intr_idx; 1608eba75edSNavdeep Parhar nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 1618eba75edSNavdeep Parhar 1628eba75edSNavdeep Parhar ctx = &vi->ctx; 16343bbae19SNavdeep Parhar children = SYSCTL_CHILDREN(vi->nm_rxq_oid); 1648eba75edSNavdeep Parhar 1658eba75edSNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 1668eba75edSNavdeep Parhar oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, 1678eba75edSNavdeep Parhar CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "rx queue"); 1688eba75edSNavdeep Parhar children = SYSCTL_CHILDREN(oid); 1698eba75edSNavdeep Parhar 170473f6163SNavdeep Parhar SYSCTL_ADD_U16(ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 171473f6163SNavdeep Parhar &nm_rxq->iq_abs_id, 0, "absolute id of the queue"); 172473f6163SNavdeep Parhar SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 173473f6163SNavdeep Parhar &nm_rxq->iq_cntxt_id, 0, "SGE context id of the queue"); 174473f6163SNavdeep Parhar SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 175473f6163SNavdeep Parhar &nm_rxq->iq_cidx, 0, "consumer index"); 1768eba75edSNavdeep Parhar 1778eba75edSNavdeep Parhar children = SYSCTL_CHILDREN(oid); 1788eba75edSNavdeep Parhar oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", 1798eba75edSNavdeep Parhar CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "freelist"); 1808eba75edSNavdeep Parhar children = SYSCTL_CHILDREN(oid); 1818eba75edSNavdeep Parhar 182473f6163SNavdeep Parhar SYSCTL_ADD_U16(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 183473f6163SNavdeep Parhar &nm_rxq->fl_cntxt_id, 0, "SGE context id of the freelist"); 1848eba75edSNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 1858eba75edSNavdeep Parhar &nm_rxq->fl_cidx, 0, "consumer index"); 1868eba75edSNavdeep Parhar SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 1878eba75edSNavdeep Parhar &nm_rxq->fl_pidx, 0, "producer index"); 1888eba75edSNavdeep Parhar 1898eba75edSNavdeep Parhar return (rc); 1908eba75edSNavdeep Parhar } 1918eba75edSNavdeep Parhar 1928eba75edSNavdeep Parhar int 1938eba75edSNavdeep Parhar free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 1948eba75edSNavdeep Parhar { 1958eba75edSNavdeep Parhar struct adapter *sc = vi->adapter; 1968eba75edSNavdeep Parhar 1978eba75edSNavdeep Parhar if (!(vi->flags & VI_INIT_DONE)) 1988eba75edSNavdeep Parhar return (0); 1998eba75edSNavdeep Parhar 2008eba75edSNavdeep Parhar if (nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID) 2018eba75edSNavdeep Parhar free_nm_rxq_hwq(vi, nm_rxq); 2028eba75edSNavdeep Parhar MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID); 2038eba75edSNavdeep Parhar 2048eba75edSNavdeep Parhar free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 2058eba75edSNavdeep Parhar nm_rxq->iq_desc); 2068eba75edSNavdeep Parhar free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 2078eba75edSNavdeep Parhar nm_rxq->fl_desc); 2088eba75edSNavdeep Parhar 2098eba75edSNavdeep Parhar return (0); 2108eba75edSNavdeep Parhar } 2118eba75edSNavdeep Parhar 2128eba75edSNavdeep Parhar int 21343bbae19SNavdeep Parhar alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx) 2148eba75edSNavdeep Parhar { 2158eba75edSNavdeep Parhar int rc; 2168eba75edSNavdeep Parhar size_t len; 2178eba75edSNavdeep Parhar struct port_info *pi = vi->pi; 2188eba75edSNavdeep Parhar struct adapter *sc = pi->adapter; 2198eba75edSNavdeep Parhar struct netmap_adapter *na = NA(vi->ifp); 2208eba75edSNavdeep Parhar char name[16]; 22143bbae19SNavdeep Parhar struct sysctl_oid *oid; 22243bbae19SNavdeep Parhar struct sysctl_oid_list *children = SYSCTL_CHILDREN(vi->nm_txq_oid); 2238eba75edSNavdeep Parhar 2248eba75edSNavdeep Parhar len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 2258eba75edSNavdeep Parhar rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 2268eba75edSNavdeep Parhar &nm_txq->ba, (void **)&nm_txq->desc); 2278eba75edSNavdeep Parhar if (rc) 2288eba75edSNavdeep Parhar return (rc); 2298eba75edSNavdeep Parhar 2308eba75edSNavdeep Parhar nm_txq->pidx = nm_txq->cidx = 0; 2318eba75edSNavdeep Parhar nm_txq->sidx = na->num_tx_desc; 2328eba75edSNavdeep Parhar nm_txq->nid = idx; 2338eba75edSNavdeep Parhar nm_txq->iqidx = iqidx; 2348eba75edSNavdeep Parhar nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 2358eba75edSNavdeep Parhar V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 2368eba75edSNavdeep Parhar V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 2378eba75edSNavdeep Parhar if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0)) 2388eba75edSNavdeep Parhar nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); 2398eba75edSNavdeep Parhar else 2408eba75edSNavdeep Parhar nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 2418eba75edSNavdeep Parhar nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 2428eba75edSNavdeep Parhar 2438eba75edSNavdeep Parhar snprintf(name, sizeof(name), "%d", idx); 2448eba75edSNavdeep Parhar oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, 2458eba75edSNavdeep Parhar CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "netmap tx queue"); 2468eba75edSNavdeep Parhar children = SYSCTL_CHILDREN(oid); 2478eba75edSNavdeep Parhar 2488eba75edSNavdeep Parhar SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 2498eba75edSNavdeep Parhar &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 250473f6163SNavdeep Parhar SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 251473f6163SNavdeep Parhar &nm_txq->cidx, 0, "consumer index"); 252473f6163SNavdeep Parhar SYSCTL_ADD_U16(&vi->ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 253473f6163SNavdeep Parhar &nm_txq->pidx, 0, "producer index"); 2548eba75edSNavdeep Parhar 2558eba75edSNavdeep Parhar return (rc); 2568eba75edSNavdeep Parhar } 2578eba75edSNavdeep Parhar 2588eba75edSNavdeep Parhar int 2598eba75edSNavdeep Parhar free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 2608eba75edSNavdeep Parhar { 2618eba75edSNavdeep Parhar struct adapter *sc = vi->adapter; 2628eba75edSNavdeep Parhar 2638eba75edSNavdeep Parhar if (!(vi->flags & VI_INIT_DONE)) 2648eba75edSNavdeep Parhar return (0); 2658eba75edSNavdeep Parhar 2668eba75edSNavdeep Parhar if (nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID) 2678eba75edSNavdeep Parhar free_nm_txq_hwq(vi, nm_txq); 2688eba75edSNavdeep Parhar MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID); 2698eba75edSNavdeep Parhar 2708eba75edSNavdeep Parhar free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 2718eba75edSNavdeep Parhar nm_txq->desc); 2728eba75edSNavdeep Parhar 2738eba75edSNavdeep Parhar return (0); 2748eba75edSNavdeep Parhar } 2758eba75edSNavdeep Parhar 276298d969cSNavdeep Parhar static int 277df275ae5SNavdeep Parhar alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 278298d969cSNavdeep Parhar { 279df275ae5SNavdeep Parhar int rc, cntxt_id; 280298d969cSNavdeep Parhar __be32 v; 2817c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 282df275ae5SNavdeep Parhar struct port_info *pi = vi->pi; 28390e7434aSNavdeep Parhar struct sge_params *sp = &sc->params.sge; 284fe2ebb76SJohn Baldwin struct netmap_adapter *na = NA(vi->ifp); 285298d969cSNavdeep Parhar struct fw_iq_cmd c; 286df275ae5SNavdeep Parhar const int cong_drop = nm_cong_drop; 287df275ae5SNavdeep Parhar const int cong_map = pi->rx_e_chan_map; 288298d969cSNavdeep Parhar 289298d969cSNavdeep Parhar MPASS(na != NULL); 290298d969cSNavdeep Parhar MPASS(nm_rxq->iq_desc != NULL); 291298d969cSNavdeep Parhar MPASS(nm_rxq->fl_desc != NULL); 292298d969cSNavdeep Parhar 293fe2ebb76SJohn Baldwin bzero(nm_rxq->iq_desc, vi->qsize_rxq * IQ_ESIZE); 29490e7434aSNavdeep Parhar bzero(nm_rxq->fl_desc, na->num_rx_desc * EQ_ESIZE + sp->spg_len); 295298d969cSNavdeep Parhar 296298d969cSNavdeep Parhar bzero(&c, sizeof(c)); 297298d969cSNavdeep Parhar c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 298298d969cSNavdeep Parhar F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 299298d969cSNavdeep Parhar V_FW_IQ_CMD_VFN(0)); 3008eba75edSNavdeep Parhar c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_IQSTART | FW_LEN16(c)); 3018eba75edSNavdeep Parhar if (nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID) 3028eba75edSNavdeep Parhar c.alloc_to_len16 |= htobe32(F_FW_IQ_CMD_ALLOC); 3038eba75edSNavdeep Parhar else { 3048eba75edSNavdeep Parhar c.iqid = htobe16(nm_rxq->iq_cntxt_id); 3058eba75edSNavdeep Parhar c.fl0id = htobe16(nm_rxq->fl_cntxt_id); 3068eba75edSNavdeep Parhar c.fl1id = htobe16(0xffff); 3078eba75edSNavdeep Parhar c.physiqid = htobe16(nm_rxq->iq_abs_id); 3088eba75edSNavdeep Parhar } 309f549e352SNavdeep Parhar MPASS(!forwarding_intr_to_fwq(sc)); 310298d969cSNavdeep Parhar KASSERT(nm_rxq->intr_idx < sc->intr_count, 311f549e352SNavdeep Parhar ("%s: invalid direct intr_idx %d", __func__, nm_rxq->intr_idx)); 312298d969cSNavdeep Parhar v = V_FW_IQ_CMD_IQANDSTINDEX(nm_rxq->intr_idx); 313298d969cSNavdeep Parhar c.type_to_iqandstindex = htobe32(v | 314298d969cSNavdeep Parhar V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 315fe2ebb76SJohn Baldwin V_FW_IQ_CMD_VIID(vi->viid) | 316298d969cSNavdeep Parhar V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 317df275ae5SNavdeep Parhar c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 318298d969cSNavdeep Parhar F_FW_IQ_CMD_IQGTSMODE | 319298d969cSNavdeep Parhar V_FW_IQ_CMD_IQINTCNTTHRESH(0) | 320b2daa9a9SNavdeep Parhar V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 321fe2ebb76SJohn Baldwin c.iqsize = htobe16(vi->qsize_rxq); 322298d969cSNavdeep Parhar c.iqaddr = htobe64(nm_rxq->iq_ba); 323df275ae5SNavdeep Parhar if (cong_drop != -1) { 3241605bac6SNavdeep Parhar c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN | 325df275ae5SNavdeep Parhar V_FW_IQ_CMD_FL0CNGCHMAP(cong_map) | F_FW_IQ_CMD_FL0CONGCIF | 3261605bac6SNavdeep Parhar F_FW_IQ_CMD_FL0CONGEN); 3271605bac6SNavdeep Parhar } 328298d969cSNavdeep Parhar c.iqns_to_fl0congen |= 329298d969cSNavdeep Parhar htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 330c387ff00SNavdeep Parhar V_FW_IQ_CMD_IQTYPE(FW_IQ_IQTYPE_NIC) | 331298d969cSNavdeep Parhar F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 33208aeb151SNavdeep Parhar (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 33308aeb151SNavdeep Parhar (black_hole == 2 ? F_FW_IQ_CMD_FL0PACKEN : 0)); 334298d969cSNavdeep Parhar c.fl0dcaen_to_fl0cidxfthresh = 335ed7e5640SNavdeep Parhar htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 336adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) | 337ed7e5640SNavdeep Parhar V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 338ed7e5640SNavdeep Parhar X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 33990e7434aSNavdeep Parhar c.fl0size = htobe16(na->num_rx_desc / 8 + sp->spg_len / EQ_ESIZE); 340298d969cSNavdeep Parhar c.fl0addr = htobe64(nm_rxq->fl_ba); 341298d969cSNavdeep Parhar 342298d969cSNavdeep Parhar rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 343298d969cSNavdeep Parhar if (rc != 0) { 344298d969cSNavdeep Parhar device_printf(sc->dev, 345298d969cSNavdeep Parhar "failed to create netmap ingress queue: %d\n", rc); 346298d969cSNavdeep Parhar return (rc); 347298d969cSNavdeep Parhar } 348298d969cSNavdeep Parhar 349298d969cSNavdeep Parhar nm_rxq->iq_cidx = 0; 35090e7434aSNavdeep Parhar MPASS(nm_rxq->iq_sidx == vi->qsize_rxq - sp->spg_len / IQ_ESIZE); 351298d969cSNavdeep Parhar nm_rxq->iq_gen = F_RSPD_GEN; 352298d969cSNavdeep Parhar nm_rxq->iq_cntxt_id = be16toh(c.iqid); 353298d969cSNavdeep Parhar nm_rxq->iq_abs_id = be16toh(c.physiqid); 354298d969cSNavdeep Parhar cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start; 355b20b25e7SNavdeep Parhar if (cntxt_id >= sc->sge.iqmap_sz) { 356298d969cSNavdeep Parhar panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)", 357b20b25e7SNavdeep Parhar __func__, cntxt_id, sc->sge.iqmap_sz - 1); 358298d969cSNavdeep Parhar } 359298d969cSNavdeep Parhar sc->sge.iqmap[cntxt_id] = (void *)nm_rxq; 360298d969cSNavdeep Parhar 361298d969cSNavdeep Parhar nm_rxq->fl_cntxt_id = be16toh(c.fl0id); 362298d969cSNavdeep Parhar nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 36315ca0766SNavdeep Parhar nm_rxq->fl_db_saved = 0; 36415ca0766SNavdeep Parhar /* matches the X_FETCHBURSTMAX_512B or X_FETCHBURSTMAX_256B above. */ 36515ca0766SNavdeep Parhar nm_rxq->fl_db_threshold = chip_id(sc) <= CHELSIO_T5 ? 8 : 4; 366298d969cSNavdeep Parhar MPASS(nm_rxq->fl_sidx == na->num_rx_desc); 367298d969cSNavdeep Parhar cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start; 368b20b25e7SNavdeep Parhar if (cntxt_id >= sc->sge.eqmap_sz) { 369298d969cSNavdeep Parhar panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)", 370b20b25e7SNavdeep Parhar __func__, cntxt_id, sc->sge.eqmap_sz - 1); 371298d969cSNavdeep Parhar } 372298d969cSNavdeep Parhar sc->sge.eqmap[cntxt_id] = (void *)nm_rxq; 373298d969cSNavdeep Parhar 374d1205d09SNavdeep Parhar nm_rxq->fl_db_val = V_QID(nm_rxq->fl_cntxt_id) | 375d1205d09SNavdeep Parhar sc->chip_params->sge_fl_db; 376298d969cSNavdeep Parhar 377df275ae5SNavdeep Parhar if (chip_id(sc) >= CHELSIO_T5 && cong_drop != -1) { 378df275ae5SNavdeep Parhar t4_sge_set_conm_context(sc, nm_rxq->iq_cntxt_id, cong_drop, 379df275ae5SNavdeep Parhar cong_map); 3801605bac6SNavdeep Parhar } 3811605bac6SNavdeep Parhar 382315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_gts_reg, 3831cdfce07SNavdeep Parhar V_INGRESSQID(nm_rxq->iq_cntxt_id) | 3841cdfce07SNavdeep Parhar V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 385298d969cSNavdeep Parhar 386298d969cSNavdeep Parhar return (rc); 387298d969cSNavdeep Parhar } 388298d969cSNavdeep Parhar 389298d969cSNavdeep Parhar static int 390fe2ebb76SJohn Baldwin free_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 391298d969cSNavdeep Parhar { 3927c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 393298d969cSNavdeep Parhar int rc; 394298d969cSNavdeep Parhar 395298d969cSNavdeep Parhar rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 396298d969cSNavdeep Parhar nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 397298d969cSNavdeep Parhar if (rc != 0) 398298d969cSNavdeep Parhar device_printf(sc->dev, "%s: failed for iq %d, fl %d: %d\n", 399298d969cSNavdeep Parhar __func__, nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, rc); 400a8c4fcb9SNavdeep Parhar nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 401298d969cSNavdeep Parhar return (rc); 402298d969cSNavdeep Parhar } 403298d969cSNavdeep Parhar 404298d969cSNavdeep Parhar static int 405fe2ebb76SJohn Baldwin alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 406298d969cSNavdeep Parhar { 407298d969cSNavdeep Parhar int rc, cntxt_id; 408298d969cSNavdeep Parhar size_t len; 4097c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 410fe2ebb76SJohn Baldwin struct netmap_adapter *na = NA(vi->ifp); 411298d969cSNavdeep Parhar struct fw_eq_eth_cmd c; 412298d969cSNavdeep Parhar 413298d969cSNavdeep Parhar MPASS(na != NULL); 414298d969cSNavdeep Parhar MPASS(nm_txq->desc != NULL); 415298d969cSNavdeep Parhar 41690e7434aSNavdeep Parhar len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 417298d969cSNavdeep Parhar bzero(nm_txq->desc, len); 418298d969cSNavdeep Parhar 419298d969cSNavdeep Parhar bzero(&c, sizeof(c)); 420298d969cSNavdeep Parhar c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 421298d969cSNavdeep Parhar F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 422298d969cSNavdeep Parhar V_FW_EQ_ETH_CMD_VFN(0)); 4238eba75edSNavdeep Parhar c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 4248eba75edSNavdeep Parhar if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) 4258eba75edSNavdeep Parhar c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC); 4268eba75edSNavdeep Parhar else 4278eba75edSNavdeep Parhar c.eqid_pkd = htobe32(V_FW_EQ_ETH_CMD_EQID(nm_txq->cntxt_id)); 428fd215e45SNavdeep Parhar c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 429fe2ebb76SJohn Baldwin F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 430298d969cSNavdeep Parhar c.fetchszm_to_iqid = 431298d969cSNavdeep Parhar htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 432fe2ebb76SJohn Baldwin V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 433298d969cSNavdeep Parhar V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id)); 434adb0cd84SNavdeep Parhar c.dcaen_to_eqsize = 435adb0cd84SNavdeep Parhar htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 436adb0cd84SNavdeep Parhar X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 437298d969cSNavdeep Parhar V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 438298d969cSNavdeep Parhar V_FW_EQ_ETH_CMD_EQSIZE(len / EQ_ESIZE)); 439298d969cSNavdeep Parhar c.eqaddr = htobe64(nm_txq->ba); 440298d969cSNavdeep Parhar 441298d969cSNavdeep Parhar rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 442298d969cSNavdeep Parhar if (rc != 0) { 443fe2ebb76SJohn Baldwin device_printf(vi->dev, 444298d969cSNavdeep Parhar "failed to create netmap egress queue: %d\n", rc); 445298d969cSNavdeep Parhar return (rc); 446298d969cSNavdeep Parhar } 447298d969cSNavdeep Parhar 448298d969cSNavdeep Parhar nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 449298d969cSNavdeep Parhar cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start; 450b20b25e7SNavdeep Parhar if (cntxt_id >= sc->sge.eqmap_sz) 451298d969cSNavdeep Parhar panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__, 452b20b25e7SNavdeep Parhar cntxt_id, sc->sge.eqmap_sz - 1); 453298d969cSNavdeep Parhar sc->sge.eqmap[cntxt_id] = (void *)nm_txq; 454298d969cSNavdeep Parhar 455298d969cSNavdeep Parhar nm_txq->pidx = nm_txq->cidx = 0; 456298d969cSNavdeep Parhar MPASS(nm_txq->sidx == na->num_tx_desc); 457298d969cSNavdeep Parhar nm_txq->equiqidx = nm_txq->equeqidx = nm_txq->dbidx = 0; 458298d969cSNavdeep Parhar 459298d969cSNavdeep Parhar nm_txq->doorbells = sc->doorbells; 460298d969cSNavdeep Parhar if (isset(&nm_txq->doorbells, DOORBELL_UDB) || 461298d969cSNavdeep Parhar isset(&nm_txq->doorbells, DOORBELL_UDBWC) || 462298d969cSNavdeep Parhar isset(&nm_txq->doorbells, DOORBELL_WCWR)) { 46390e7434aSNavdeep Parhar uint32_t s_qpp = sc->params.sge.eq_s_qpp; 464298d969cSNavdeep Parhar uint32_t mask = (1 << s_qpp) - 1; 465298d969cSNavdeep Parhar volatile uint8_t *udb; 466298d969cSNavdeep Parhar 467298d969cSNavdeep Parhar udb = sc->udbs_base + UDBS_DB_OFFSET; 468298d969cSNavdeep Parhar udb += (nm_txq->cntxt_id >> s_qpp) << PAGE_SHIFT; 469298d969cSNavdeep Parhar nm_txq->udb_qid = nm_txq->cntxt_id & mask; 470f10405b3SNavdeep Parhar if (nm_txq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 471298d969cSNavdeep Parhar clrbit(&nm_txq->doorbells, DOORBELL_WCWR); 472298d969cSNavdeep Parhar else { 473298d969cSNavdeep Parhar udb += nm_txq->udb_qid << UDBS_SEG_SHIFT; 474298d969cSNavdeep Parhar nm_txq->udb_qid = 0; 475298d969cSNavdeep Parhar } 476298d969cSNavdeep Parhar nm_txq->udb = (volatile void *)udb; 477298d969cSNavdeep Parhar } 478298d969cSNavdeep Parhar 479822967e7SNavdeep Parhar if (sc->params.fw_vers < FW_VERSION32(1, 25, 1, 0)) { 480822967e7SNavdeep Parhar uint32_t param, val; 481822967e7SNavdeep Parhar 482822967e7SNavdeep Parhar param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 483822967e7SNavdeep Parhar V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) | 484822967e7SNavdeep Parhar V_FW_PARAMS_PARAM_YZ(nm_txq->cntxt_id); 485822967e7SNavdeep Parhar val = 0xff; 486822967e7SNavdeep Parhar rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 487822967e7SNavdeep Parhar if (rc != 0) { 488822967e7SNavdeep Parhar device_printf(vi->dev, 489822967e7SNavdeep Parhar "failed to bind netmap txq %d to class 0xff: %d\n", 490822967e7SNavdeep Parhar nm_txq->cntxt_id, rc); 491822967e7SNavdeep Parhar rc = 0; 492822967e7SNavdeep Parhar } 493822967e7SNavdeep Parhar } 494822967e7SNavdeep Parhar 495298d969cSNavdeep Parhar return (rc); 496298d969cSNavdeep Parhar } 497298d969cSNavdeep Parhar 498298d969cSNavdeep Parhar static int 499fe2ebb76SJohn Baldwin free_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 500298d969cSNavdeep Parhar { 5017c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 502298d969cSNavdeep Parhar int rc; 503298d969cSNavdeep Parhar 504298d969cSNavdeep Parhar rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 505298d969cSNavdeep Parhar if (rc != 0) 506298d969cSNavdeep Parhar device_printf(sc->dev, "%s: failed for eq %d: %d\n", __func__, 507298d969cSNavdeep Parhar nm_txq->cntxt_id, rc); 508a8c4fcb9SNavdeep Parhar nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 509298d969cSNavdeep Parhar return (rc); 510298d969cSNavdeep Parhar } 511298d969cSNavdeep Parhar 512298d969cSNavdeep Parhar static int 513a9f47658SNavdeep Parhar cxgbe_netmap_simple_rss(struct adapter *sc, struct vi_info *vi, 514954712e8SJustin Hibbits if_t ifp, struct netmap_adapter *na) 515a9f47658SNavdeep Parhar { 516a9f47658SNavdeep Parhar struct netmap_kring *kring; 517a9f47658SNavdeep Parhar struct sge_nm_rxq *nm_rxq; 518a9f47658SNavdeep Parhar int rc, i, j, nm_state, defq; 519a9f47658SNavdeep Parhar uint16_t *rss; 520a9f47658SNavdeep Parhar 521a9f47658SNavdeep Parhar /* 522a9f47658SNavdeep Parhar * Check if there's at least one active (or about to go active) netmap 523a9f47658SNavdeep Parhar * rx queue. 524a9f47658SNavdeep Parhar */ 525a9f47658SNavdeep Parhar defq = -1; 526a9f47658SNavdeep Parhar for_each_nm_rxq(vi, j, nm_rxq) { 527a9f47658SNavdeep Parhar nm_state = atomic_load_int(&nm_rxq->nm_state); 528a9f47658SNavdeep Parhar kring = na->rx_rings[nm_rxq->nid]; 529a9f47658SNavdeep Parhar if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) || 530a9f47658SNavdeep Parhar (nm_state == NM_OFF && nm_kring_pending_on(kring))) { 531a9f47658SNavdeep Parhar MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 532a9f47658SNavdeep Parhar if (defq == -1) { 533a9f47658SNavdeep Parhar defq = nm_rxq->iq_abs_id; 534a9f47658SNavdeep Parhar break; 535a9f47658SNavdeep Parhar } 536a9f47658SNavdeep Parhar } 537a9f47658SNavdeep Parhar } 538a9f47658SNavdeep Parhar 539a9f47658SNavdeep Parhar if (defq == -1) { 540a9f47658SNavdeep Parhar /* No active netmap queues. Switch back to NIC queues. */ 541a9f47658SNavdeep Parhar rss = vi->rss; 542a9f47658SNavdeep Parhar defq = vi->rss[0]; 543a9f47658SNavdeep Parhar } else { 544a9f47658SNavdeep Parhar for (i = 0; i < vi->rss_size;) { 545a9f47658SNavdeep Parhar for_each_nm_rxq(vi, j, nm_rxq) { 546a9f47658SNavdeep Parhar nm_state = atomic_load_int(&nm_rxq->nm_state); 547a9f47658SNavdeep Parhar kring = na->rx_rings[nm_rxq->nid]; 548a9f47658SNavdeep Parhar if ((nm_state != NM_OFF && 549a9f47658SNavdeep Parhar !nm_kring_pending_off(kring)) || 550a9f47658SNavdeep Parhar (nm_state == NM_OFF && 551a9f47658SNavdeep Parhar nm_kring_pending_on(kring))) { 552a9f47658SNavdeep Parhar MPASS(nm_rxq->iq_cntxt_id != 553a9f47658SNavdeep Parhar INVALID_NM_RXQ_CNTXT_ID); 554a9f47658SNavdeep Parhar vi->nm_rss[i++] = nm_rxq->iq_abs_id; 555a9f47658SNavdeep Parhar if (i == vi->rss_size) 556a9f47658SNavdeep Parhar break; 557a9f47658SNavdeep Parhar } 558a9f47658SNavdeep Parhar } 559a9f47658SNavdeep Parhar } 560a9f47658SNavdeep Parhar rss = vi->nm_rss; 561a9f47658SNavdeep Parhar } 562a9f47658SNavdeep Parhar 563a9f47658SNavdeep Parhar rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, rss, 564a9f47658SNavdeep Parhar vi->rss_size); 565a9f47658SNavdeep Parhar if (rc != 0) 566a9f47658SNavdeep Parhar if_printf(ifp, "netmap rss_config failed: %d\n", rc); 567a9f47658SNavdeep Parhar 568a9f47658SNavdeep Parhar rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0); 569a9f47658SNavdeep Parhar if (rc != 0) { 570a9f47658SNavdeep Parhar if_printf(ifp, "netmap defaultq config failed: %d\n", rc); 571a9f47658SNavdeep Parhar } 572a9f47658SNavdeep Parhar 573a9f47658SNavdeep Parhar return (rc); 574a9f47658SNavdeep Parhar } 575a9f47658SNavdeep Parhar 576a9f47658SNavdeep Parhar /* 577a9f47658SNavdeep Parhar * Odd number of rx queues work best for split RSS mode as the first queue can 578a9f47658SNavdeep Parhar * be dedicated for non-RSS traffic and the rest divided into two equal halves. 579a9f47658SNavdeep Parhar */ 580a9f47658SNavdeep Parhar static int 581a9f47658SNavdeep Parhar cxgbe_netmap_split_rss(struct adapter *sc, struct vi_info *vi, 582954712e8SJustin Hibbits if_t ifp, struct netmap_adapter *na) 583a9f47658SNavdeep Parhar { 584a9f47658SNavdeep Parhar struct netmap_kring *kring; 585a9f47658SNavdeep Parhar struct sge_nm_rxq *nm_rxq; 586a9f47658SNavdeep Parhar int rc, i, j, nm_state, defq; 587a9f47658SNavdeep Parhar int nactive[2] = {0, 0}; 588a9f47658SNavdeep Parhar int dq[2] = {-1, -1}; 589a9f47658SNavdeep Parhar bool dq_norss; /* default queue should not be in RSS table. */ 590a9f47658SNavdeep Parhar 591a9f47658SNavdeep Parhar MPASS(nm_split_rss != 0); 592a9f47658SNavdeep Parhar MPASS(vi->nnmrxq > 1); 593a9f47658SNavdeep Parhar 594a9f47658SNavdeep Parhar for_each_nm_rxq(vi, i, nm_rxq) { 595a9f47658SNavdeep Parhar j = i / ((vi->nnmrxq + 1) / 2); 596a9f47658SNavdeep Parhar nm_state = atomic_load_int(&nm_rxq->nm_state); 597a9f47658SNavdeep Parhar kring = na->rx_rings[nm_rxq->nid]; 598a9f47658SNavdeep Parhar if ((nm_state != NM_OFF && !nm_kring_pending_off(kring)) || 599a9f47658SNavdeep Parhar (nm_state == NM_OFF && nm_kring_pending_on(kring))) { 600a9f47658SNavdeep Parhar MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 601a9f47658SNavdeep Parhar nactive[j]++; 602a9f47658SNavdeep Parhar if (dq[j] == -1) { 603a9f47658SNavdeep Parhar dq[j] = nm_rxq->iq_abs_id; 604a9f47658SNavdeep Parhar break; 605a9f47658SNavdeep Parhar } 606a9f47658SNavdeep Parhar } 607a9f47658SNavdeep Parhar } 608a9f47658SNavdeep Parhar 609a9f47658SNavdeep Parhar if (nactive[0] == 0 || nactive[1] == 0) 610a9f47658SNavdeep Parhar return (cxgbe_netmap_simple_rss(sc, vi, ifp, na)); 611a9f47658SNavdeep Parhar 612a9f47658SNavdeep Parhar MPASS(dq[0] != -1 && dq[1] != -1); 613a9f47658SNavdeep Parhar if (nactive[0] > nactive[1]) { 614a9f47658SNavdeep Parhar defq = dq[0]; 615a9f47658SNavdeep Parhar dq_norss = true; 616a9f47658SNavdeep Parhar } else if (nactive[0] < nactive[1]) { 617a9f47658SNavdeep Parhar defq = dq[1]; 618a9f47658SNavdeep Parhar dq_norss = true; 619a9f47658SNavdeep Parhar } else { 620a9f47658SNavdeep Parhar defq = dq[0]; 621a9f47658SNavdeep Parhar dq_norss = false; 622a9f47658SNavdeep Parhar } 623a9f47658SNavdeep Parhar 624a9f47658SNavdeep Parhar i = 0; 625a9f47658SNavdeep Parhar nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq]; 626a9f47658SNavdeep Parhar while (i < vi->rss_size / 2) { 627a9f47658SNavdeep Parhar for (j = 0; j < (vi->nnmrxq + 1) / 2; j++) { 628a9f47658SNavdeep Parhar nm_state = atomic_load_int(&nm_rxq[j].nm_state); 629a9f47658SNavdeep Parhar kring = na->rx_rings[nm_rxq[j].nid]; 630a9f47658SNavdeep Parhar if ((nm_state == NM_OFF && 631a9f47658SNavdeep Parhar !nm_kring_pending_on(kring)) || 632a9f47658SNavdeep Parhar (nm_state == NM_ON && 633a9f47658SNavdeep Parhar nm_kring_pending_off(kring))) { 634a9f47658SNavdeep Parhar continue; 635a9f47658SNavdeep Parhar } 636a9f47658SNavdeep Parhar MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 637a9f47658SNavdeep Parhar if (dq_norss && defq == nm_rxq[j].iq_abs_id) 638a9f47658SNavdeep Parhar continue; 639a9f47658SNavdeep Parhar vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; 640a9f47658SNavdeep Parhar if (i == vi->rss_size / 2) 641a9f47658SNavdeep Parhar break; 642a9f47658SNavdeep Parhar } 643a9f47658SNavdeep Parhar } 644a9f47658SNavdeep Parhar while (i < vi->rss_size) { 645a9f47658SNavdeep Parhar for (j = (vi->nnmrxq + 1) / 2; j < vi->nnmrxq; j++) { 646a9f47658SNavdeep Parhar nm_state = atomic_load_int(&nm_rxq[j].nm_state); 647a9f47658SNavdeep Parhar kring = na->rx_rings[nm_rxq[j].nid]; 648a9f47658SNavdeep Parhar if ((nm_state == NM_OFF && 649a9f47658SNavdeep Parhar !nm_kring_pending_on(kring)) || 650a9f47658SNavdeep Parhar (nm_state == NM_ON && 651a9f47658SNavdeep Parhar nm_kring_pending_off(kring))) { 652a9f47658SNavdeep Parhar continue; 653a9f47658SNavdeep Parhar } 654a9f47658SNavdeep Parhar MPASS(nm_rxq[j].iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 655a9f47658SNavdeep Parhar if (dq_norss && defq == nm_rxq[j].iq_abs_id) 656a9f47658SNavdeep Parhar continue; 657a9f47658SNavdeep Parhar vi->nm_rss[i++] = nm_rxq[j].iq_abs_id; 658a9f47658SNavdeep Parhar if (i == vi->rss_size) 659a9f47658SNavdeep Parhar break; 660a9f47658SNavdeep Parhar } 661a9f47658SNavdeep Parhar } 662a9f47658SNavdeep Parhar 663a9f47658SNavdeep Parhar rc = -t4_config_rss_range(sc, sc->mbox, vi->viid, 0, vi->rss_size, 664a9f47658SNavdeep Parhar vi->nm_rss, vi->rss_size); 665a9f47658SNavdeep Parhar if (rc != 0) 666a9f47658SNavdeep Parhar if_printf(ifp, "netmap split_rss_config failed: %d\n", rc); 667a9f47658SNavdeep Parhar 668a9f47658SNavdeep Parhar rc = -t4_config_vi_rss(sc, sc->mbox, vi->viid, vi->hashen, defq, 0, 0); 669a9f47658SNavdeep Parhar if (rc != 0) 670a9f47658SNavdeep Parhar if_printf(ifp, "netmap defaultq config failed: %d\n", rc); 671a9f47658SNavdeep Parhar 672a9f47658SNavdeep Parhar return (rc); 673a9f47658SNavdeep Parhar } 674a9f47658SNavdeep Parhar 675a9f47658SNavdeep Parhar static inline int 676954712e8SJustin Hibbits cxgbe_netmap_rss(struct adapter *sc, struct vi_info *vi, if_t ifp, 677a9f47658SNavdeep Parhar struct netmap_adapter *na) 678a9f47658SNavdeep Parhar { 679a9f47658SNavdeep Parhar 680a9f47658SNavdeep Parhar if (nm_split_rss == 0 || vi->nnmrxq == 1) 681a9f47658SNavdeep Parhar return (cxgbe_netmap_simple_rss(sc, vi, ifp, na)); 682a9f47658SNavdeep Parhar else 683a9f47658SNavdeep Parhar return (cxgbe_netmap_split_rss(sc, vi, ifp, na)); 684a9f47658SNavdeep Parhar } 685a9f47658SNavdeep Parhar 686a9f47658SNavdeep Parhar static int 687954712e8SJustin Hibbits cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, if_t ifp, 688298d969cSNavdeep Parhar struct netmap_adapter *na) 689298d969cSNavdeep Parhar { 690298d969cSNavdeep Parhar struct netmap_slot *slot; 691a8c4fcb9SNavdeep Parhar struct netmap_kring *kring; 692298d969cSNavdeep Parhar struct sge_nm_rxq *nm_rxq; 693298d969cSNavdeep Parhar struct sge_nm_txq *nm_txq; 694a9f47658SNavdeep Parhar int i, j, hwidx; 69546e1e307SNavdeep Parhar struct rx_buf_info *rxb; 696298d969cSNavdeep Parhar 697298d969cSNavdeep Parhar ASSERT_SYNCHRONIZED_OP(sc); 698a9f47658SNavdeep Parhar MPASS(vi->nnmrxq > 0); 699a9f47658SNavdeep Parhar MPASS(vi->nnmtxq > 0); 700298d969cSNavdeep Parhar 701fe2ebb76SJohn Baldwin if ((vi->flags & VI_INIT_DONE) == 0 || 702954712e8SJustin Hibbits (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 703f7b8615aSNavdeep Parhar if_printf(ifp, "cannot enable netmap operation because " 704f7b8615aSNavdeep Parhar "interface is not UP.\n"); 705298d969cSNavdeep Parhar return (EAGAIN); 706f7b8615aSNavdeep Parhar } 707298d969cSNavdeep Parhar 70846e1e307SNavdeep Parhar rxb = &sc->sge.rx_buf_info[0]; 70946e1e307SNavdeep Parhar for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 71046e1e307SNavdeep Parhar if (rxb->size1 == NETMAP_BUF_SIZE(na)) { 71146e1e307SNavdeep Parhar hwidx = rxb->hwidx1; 712298d969cSNavdeep Parhar break; 713298d969cSNavdeep Parhar } 71446e1e307SNavdeep Parhar if (rxb->size2 == NETMAP_BUF_SIZE(na)) { 71546e1e307SNavdeep Parhar hwidx = rxb->hwidx2; 71646e1e307SNavdeep Parhar break; 71746e1e307SNavdeep Parhar } 71846e1e307SNavdeep Parhar } 71946e1e307SNavdeep Parhar if (i >= SW_ZONE_SIZES) { 720298d969cSNavdeep Parhar if_printf(ifp, "no hwidx for netmap buffer size %d.\n", 7214bf50f18SLuigi Rizzo NETMAP_BUF_SIZE(na)); 722298d969cSNavdeep Parhar return (ENXIO); 723298d969cSNavdeep Parhar } 724298d969cSNavdeep Parhar 725298d969cSNavdeep Parhar /* Must set caps before calling netmap_reset */ 7264bf50f18SLuigi Rizzo nm_set_native_flags(na); 727298d969cSNavdeep Parhar 728fe2ebb76SJohn Baldwin for_each_nm_rxq(vi, i, nm_rxq) { 7292ff91c17SVincenzo Maffione kring = na->rx_rings[nm_rxq->nid]; 7308eba75edSNavdeep Parhar if (!nm_kring_pending_on(kring)) 731a8c4fcb9SNavdeep Parhar continue; 732a8c4fcb9SNavdeep Parhar 733df275ae5SNavdeep Parhar alloc_nm_rxq_hwq(vi, nm_rxq); 734298d969cSNavdeep Parhar nm_rxq->fl_hwidx = hwidx; 735298d969cSNavdeep Parhar slot = netmap_reset(na, NR_RX, i, 0); 736298d969cSNavdeep Parhar MPASS(slot != NULL); /* XXXNM: error check, not assert */ 737298d969cSNavdeep Parhar 738298d969cSNavdeep Parhar /* We deal with 8 bufs at a time */ 739298d969cSNavdeep Parhar MPASS((na->num_rx_desc & 7) == 0); 740298d969cSNavdeep Parhar MPASS(na->num_rx_desc == nm_rxq->fl_sidx); 7411cdfce07SNavdeep Parhar for (j = 0; j < nm_rxq->fl_sidx; j++) { 742298d969cSNavdeep Parhar uint64_t ba; 743298d969cSNavdeep Parhar 7444bf50f18SLuigi Rizzo PNMB(na, &slot[j], &ba); 7451cdfce07SNavdeep Parhar MPASS(ba != 0); 746298d969cSNavdeep Parhar nm_rxq->fl_desc[j] = htobe64(ba | hwidx); 747298d969cSNavdeep Parhar } 7481cdfce07SNavdeep Parhar j = nm_rxq->fl_pidx = nm_rxq->fl_sidx - 8; 749298d969cSNavdeep Parhar MPASS((j & 7) == 0); 750298d969cSNavdeep Parhar j /= 8; /* driver pidx to hardware pidx */ 751298d969cSNavdeep Parhar wmb(); 752315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_kdoorbell_reg, 753298d969cSNavdeep Parhar nm_rxq->fl_db_val | V_PIDX(j)); 75462291463SNavdeep Parhar 755da6e3387SNavdeep Parhar (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_OFF, NM_ON); 756298d969cSNavdeep Parhar } 757298d969cSNavdeep Parhar 758fe2ebb76SJohn Baldwin for_each_nm_txq(vi, i, nm_txq) { 7592ff91c17SVincenzo Maffione kring = na->tx_rings[nm_txq->nid]; 7608eba75edSNavdeep Parhar if (!nm_kring_pending_on(kring)) 761a8c4fcb9SNavdeep Parhar continue; 762a8c4fcb9SNavdeep Parhar 763fe2ebb76SJohn Baldwin alloc_nm_txq_hwq(vi, nm_txq); 764298d969cSNavdeep Parhar slot = netmap_reset(na, NR_TX, i, 0); 765298d969cSNavdeep Parhar MPASS(slot != NULL); /* XXXNM: error check, not assert */ 766298d969cSNavdeep Parhar } 767298d969cSNavdeep Parhar 76862291463SNavdeep Parhar if (vi->nm_rss == NULL) { 76962291463SNavdeep Parhar vi->nm_rss = malloc(vi->rss_size * sizeof(uint16_t), M_CXGBE, 77062291463SNavdeep Parhar M_ZERO | M_WAITOK); 77162291463SNavdeep Parhar } 772f02c9e69SNavdeep Parhar 773a9f47658SNavdeep Parhar return (cxgbe_netmap_rss(sc, vi, ifp, na)); 774298d969cSNavdeep Parhar } 775298d969cSNavdeep Parhar 776298d969cSNavdeep Parhar static int 777954712e8SJustin Hibbits cxgbe_netmap_off(struct adapter *sc, struct vi_info *vi, if_t ifp, 778298d969cSNavdeep Parhar struct netmap_adapter *na) 779298d969cSNavdeep Parhar { 780a8c4fcb9SNavdeep Parhar struct netmap_kring *kring; 781a9f47658SNavdeep Parhar int rc, i, nm_state, nactive; 782298d969cSNavdeep Parhar struct sge_nm_txq *nm_txq; 783298d969cSNavdeep Parhar struct sge_nm_rxq *nm_rxq; 784298d969cSNavdeep Parhar 785298d969cSNavdeep Parhar ASSERT_SYNCHRONIZED_OP(sc); 786a9f47658SNavdeep Parhar MPASS(vi->nnmrxq > 0); 787a9f47658SNavdeep Parhar MPASS(vi->nnmtxq > 0); 788298d969cSNavdeep Parhar 78923d903a7SJulien Charbon if (!nm_netmap_on(na)) 79023d903a7SJulien Charbon return (0); 79123d903a7SJulien Charbon 792fe2ebb76SJohn Baldwin if ((vi->flags & VI_INIT_DONE) == 0) 793fe2ebb76SJohn Baldwin return (0); 794fe2ebb76SJohn Baldwin 795a9f47658SNavdeep Parhar /* First remove the queues that are stopping from the RSS table. */ 796a9f47658SNavdeep Parhar rc = cxgbe_netmap_rss(sc, vi, ifp, na); 797298d969cSNavdeep Parhar if (rc != 0) 798a9f47658SNavdeep Parhar return (rc); /* error message logged already. */ 799298d969cSNavdeep Parhar 800fe2ebb76SJohn Baldwin for_each_nm_txq(vi, i, nm_txq) { 8012ff91c17SVincenzo Maffione kring = na->tx_rings[nm_txq->nid]; 8028eba75edSNavdeep Parhar if (!nm_kring_pending_off(kring)) 803a8c4fcb9SNavdeep Parhar continue; 8048eba75edSNavdeep Parhar MPASS(nm_txq->cntxt_id != INVALID_NM_TXQ_CNTXT_ID); 805a8c4fcb9SNavdeep Parhar 8068eba75edSNavdeep Parhar rc = -t4_eth_eq_stop(sc, sc->mbox, sc->pf, 0, nm_txq->cntxt_id); 8078eba75edSNavdeep Parhar if (rc != 0) { 8088eba75edSNavdeep Parhar device_printf(vi->dev, 8098eba75edSNavdeep Parhar "failed to stop nm_txq[%d]: %d.\n", i, rc); 8108eba75edSNavdeep Parhar return (rc); 8118eba75edSNavdeep Parhar } 812a9f47658SNavdeep Parhar 813a9f47658SNavdeep Parhar /* XXX: netmap, not the driver, should do this. */ 814a9f47658SNavdeep Parhar kring->rhead = kring->rcur = kring->nr_hwcur = 0; 815a9f47658SNavdeep Parhar kring->rtail = kring->nr_hwtail = kring->nkr_num_slots - 1; 816298d969cSNavdeep Parhar } 817a9f47658SNavdeep Parhar nactive = 0; 818fe2ebb76SJohn Baldwin for_each_nm_rxq(vi, i, nm_rxq) { 819a9f47658SNavdeep Parhar nm_state = atomic_load_int(&nm_rxq->nm_state); 8202ff91c17SVincenzo Maffione kring = na->rx_rings[nm_rxq->nid]; 821a9f47658SNavdeep Parhar if (nm_state != NM_OFF && !nm_kring_pending_off(kring)) 822a9f47658SNavdeep Parhar nactive++; 8238eba75edSNavdeep Parhar if (!nm_kring_pending_off(kring)) 824a8c4fcb9SNavdeep Parhar continue; 8258eba75edSNavdeep Parhar MPASS(nm_state != NM_OFF); 826a9f47658SNavdeep Parhar MPASS(nm_rxq->iq_cntxt_id != INVALID_NM_RXQ_CNTXT_ID); 8278eba75edSNavdeep Parhar 8288eba75edSNavdeep Parhar rc = -t4_iq_stop(sc, sc->mbox, sc->pf, 0, FW_IQ_TYPE_FL_INT_CAP, 8298eba75edSNavdeep Parhar nm_rxq->iq_cntxt_id, nm_rxq->fl_cntxt_id, 0xffff); 8308eba75edSNavdeep Parhar if (rc != 0) { 8318eba75edSNavdeep Parhar device_printf(vi->dev, 8328eba75edSNavdeep Parhar "failed to stop nm_rxq[%d]: %d.\n", i, rc); 8338eba75edSNavdeep Parhar return (rc); 8348eba75edSNavdeep Parhar } 8358eba75edSNavdeep Parhar 8363098bcfcSNavdeep Parhar while (!atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_OFF)) 83762291463SNavdeep Parhar pause("nmst", 1); 83862291463SNavdeep Parhar 839a9f47658SNavdeep Parhar /* XXX: netmap, not the driver, should do this. */ 840a9f47658SNavdeep Parhar kring->rhead = kring->rcur = kring->nr_hwcur = 0; 841a9f47658SNavdeep Parhar kring->rtail = kring->nr_hwtail = 0; 842298d969cSNavdeep Parhar } 843a9f47658SNavdeep Parhar netmap_krings_mode_commit(na, 0); 844a9f47658SNavdeep Parhar if (nactive == 0) 845a9f47658SNavdeep Parhar nm_clear_native_flags(na); 846298d969cSNavdeep Parhar 847298d969cSNavdeep Parhar return (rc); 848298d969cSNavdeep Parhar } 849298d969cSNavdeep Parhar 850298d969cSNavdeep Parhar static int 851298d969cSNavdeep Parhar cxgbe_netmap_reg(struct netmap_adapter *na, int on) 852298d969cSNavdeep Parhar { 853954712e8SJustin Hibbits if_t ifp = na->ifp; 854954712e8SJustin Hibbits struct vi_info *vi = if_getsoftc(ifp); 8557c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 856298d969cSNavdeep Parhar int rc; 857298d969cSNavdeep Parhar 858fe2ebb76SJohn Baldwin rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4nmreg"); 859298d969cSNavdeep Parhar if (rc != 0) 860298d969cSNavdeep Parhar return (rc); 861298d969cSNavdeep Parhar if (on) 862fe2ebb76SJohn Baldwin rc = cxgbe_netmap_on(sc, vi, ifp, na); 863298d969cSNavdeep Parhar else 864fe2ebb76SJohn Baldwin rc = cxgbe_netmap_off(sc, vi, ifp, na); 865298d969cSNavdeep Parhar end_synchronized_op(sc, 0); 866298d969cSNavdeep Parhar 867298d969cSNavdeep Parhar return (rc); 868298d969cSNavdeep Parhar } 869298d969cSNavdeep Parhar 870298d969cSNavdeep Parhar /* How many packets can a single type1 WR carry in n descriptors */ 871298d969cSNavdeep Parhar static inline int 872298d969cSNavdeep Parhar ndesc_to_npkt(const int n) 873298d969cSNavdeep Parhar { 874298d969cSNavdeep Parhar 875298d969cSNavdeep Parhar MPASS(n > 0 && n <= SGE_MAX_WR_NDESC); 876298d969cSNavdeep Parhar 877298d969cSNavdeep Parhar return (n * 2 - 1); 878298d969cSNavdeep Parhar } 879298d969cSNavdeep Parhar #define MAX_NPKT_IN_TYPE1_WR (ndesc_to_npkt(SGE_MAX_WR_NDESC)) 880298d969cSNavdeep Parhar 881aa7bdbc0SNavdeep Parhar /* 882aa7bdbc0SNavdeep Parhar * Space (in descriptors) needed for a type1 WR (TX_PKTS or TX_PKTS2) that 883aa7bdbc0SNavdeep Parhar * carries n packets 884aa7bdbc0SNavdeep Parhar */ 885298d969cSNavdeep Parhar static inline int 886298d969cSNavdeep Parhar npkt_to_ndesc(const int n) 887298d969cSNavdeep Parhar { 888298d969cSNavdeep Parhar 889298d969cSNavdeep Parhar MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 890298d969cSNavdeep Parhar 891298d969cSNavdeep Parhar return ((n + 2) / 2); 892298d969cSNavdeep Parhar } 893298d969cSNavdeep Parhar 894aa7bdbc0SNavdeep Parhar /* 895aa7bdbc0SNavdeep Parhar * Space (in 16B units) needed for a type1 WR (TX_PKTS or TX_PKTS2) that 896aa7bdbc0SNavdeep Parhar * carries n packets 897aa7bdbc0SNavdeep Parhar */ 898298d969cSNavdeep Parhar static inline int 899298d969cSNavdeep Parhar npkt_to_len16(const int n) 900298d969cSNavdeep Parhar { 901298d969cSNavdeep Parhar 902298d969cSNavdeep Parhar MPASS(n > 0 && n <= MAX_NPKT_IN_TYPE1_WR); 903298d969cSNavdeep Parhar 904298d969cSNavdeep Parhar return (n * 2 + 1); 905298d969cSNavdeep Parhar } 906298d969cSNavdeep Parhar 907b2daa9a9SNavdeep Parhar #define NMIDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->sidx) 908298d969cSNavdeep Parhar 909298d969cSNavdeep Parhar static void 910298d969cSNavdeep Parhar ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq) 911298d969cSNavdeep Parhar { 912298d969cSNavdeep Parhar int n; 913298d969cSNavdeep Parhar u_int db = nm_txq->doorbells; 914298d969cSNavdeep Parhar 915298d969cSNavdeep Parhar MPASS(nm_txq->pidx != nm_txq->dbidx); 916298d969cSNavdeep Parhar 917b2daa9a9SNavdeep Parhar n = NMIDXDIFF(nm_txq, dbidx); 918298d969cSNavdeep Parhar if (n > 1) 919298d969cSNavdeep Parhar clrbit(&db, DOORBELL_WCWR); 920298d969cSNavdeep Parhar wmb(); 921298d969cSNavdeep Parhar 922298d969cSNavdeep Parhar switch (ffs(db) - 1) { 923298d969cSNavdeep Parhar case DOORBELL_UDB: 924298d969cSNavdeep Parhar *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 925298d969cSNavdeep Parhar break; 926298d969cSNavdeep Parhar 927298d969cSNavdeep Parhar case DOORBELL_WCWR: { 928298d969cSNavdeep Parhar volatile uint64_t *dst, *src; 929298d969cSNavdeep Parhar 930298d969cSNavdeep Parhar /* 931298d969cSNavdeep Parhar * Queues whose 128B doorbell segment fits in the page do not 932298d969cSNavdeep Parhar * use relative qid (udb_qid is always 0). Only queues with 933298d969cSNavdeep Parhar * doorbell segments can do WCWR. 934298d969cSNavdeep Parhar */ 935298d969cSNavdeep Parhar KASSERT(nm_txq->udb_qid == 0 && n == 1, 936298d969cSNavdeep Parhar ("%s: inappropriate doorbell (0x%x, %d, %d) for nm_txq %p", 937298d969cSNavdeep Parhar __func__, nm_txq->doorbells, n, nm_txq->pidx, nm_txq)); 938298d969cSNavdeep Parhar 939298d969cSNavdeep Parhar dst = (volatile void *)((uintptr_t)nm_txq->udb + 940298d969cSNavdeep Parhar UDBS_WR_OFFSET - UDBS_DB_OFFSET); 941298d969cSNavdeep Parhar src = (void *)&nm_txq->desc[nm_txq->dbidx]; 942298d969cSNavdeep Parhar while (src != (void *)&nm_txq->desc[nm_txq->dbidx + 1]) 943298d969cSNavdeep Parhar *dst++ = *src++; 944298d969cSNavdeep Parhar wmb(); 945298d969cSNavdeep Parhar break; 946298d969cSNavdeep Parhar } 947298d969cSNavdeep Parhar 948298d969cSNavdeep Parhar case DOORBELL_UDBWC: 949298d969cSNavdeep Parhar *nm_txq->udb = htole32(V_QID(nm_txq->udb_qid) | V_PIDX(n)); 950298d969cSNavdeep Parhar wmb(); 951298d969cSNavdeep Parhar break; 952298d969cSNavdeep Parhar 953298d969cSNavdeep Parhar case DOORBELL_KDB: 954315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_kdoorbell_reg, 955298d969cSNavdeep Parhar V_QID(nm_txq->cntxt_id) | V_PIDX(n)); 956298d969cSNavdeep Parhar break; 957298d969cSNavdeep Parhar } 958298d969cSNavdeep Parhar nm_txq->dbidx = nm_txq->pidx; 959298d969cSNavdeep Parhar } 960298d969cSNavdeep Parhar 961298d969cSNavdeep Parhar /* 962298d969cSNavdeep Parhar * Write work requests to send 'npkt' frames and ring the doorbell to send them 963298d969cSNavdeep Parhar * on their way. No need to check for wraparound. 964298d969cSNavdeep Parhar */ 965298d969cSNavdeep Parhar static void 966298d969cSNavdeep Parhar cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq, 96782694ec0SNavdeep Parhar struct netmap_kring *kring, int npkt, int npkt_remaining) 968298d969cSNavdeep Parhar { 969298d969cSNavdeep Parhar struct netmap_ring *ring = kring->ring; 970298d969cSNavdeep Parhar struct netmap_slot *slot; 971298d969cSNavdeep Parhar const u_int lim = kring->nkr_num_slots - 1; 972298d969cSNavdeep Parhar struct fw_eth_tx_pkts_wr *wr = (void *)&nm_txq->desc[nm_txq->pidx]; 973298d969cSNavdeep Parhar uint16_t len; 974298d969cSNavdeep Parhar uint64_t ba; 975298d969cSNavdeep Parhar struct cpl_tx_pkt_core *cpl; 976298d969cSNavdeep Parhar struct ulptx_sgl *usgl; 977298d969cSNavdeep Parhar int i, n; 978298d969cSNavdeep Parhar 979298d969cSNavdeep Parhar while (npkt) { 980298d969cSNavdeep Parhar n = min(npkt, MAX_NPKT_IN_TYPE1_WR); 981298d969cSNavdeep Parhar len = 0; 982298d969cSNavdeep Parhar 983298d969cSNavdeep Parhar wr = (void *)&nm_txq->desc[nm_txq->pidx]; 984aa7bdbc0SNavdeep Parhar wr->op_pkd = nm_txq->op_pkd; 985298d969cSNavdeep Parhar wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(npkt_to_len16(n))); 986298d969cSNavdeep Parhar wr->npkt = n; 987298d969cSNavdeep Parhar wr->r3 = 0; 988298d969cSNavdeep Parhar wr->type = 1; 989298d969cSNavdeep Parhar cpl = (void *)(wr + 1); 990298d969cSNavdeep Parhar 991298d969cSNavdeep Parhar for (i = 0; i < n; i++) { 992298d969cSNavdeep Parhar slot = &ring->slot[kring->nr_hwcur]; 9934bf50f18SLuigi Rizzo PNMB(kring->na, slot, &ba); 9941cdfce07SNavdeep Parhar MPASS(ba != 0); 995298d969cSNavdeep Parhar 996298d969cSNavdeep Parhar cpl->ctrl0 = nm_txq->cpl_ctrl0; 997298d969cSNavdeep Parhar cpl->pack = 0; 998298d969cSNavdeep Parhar cpl->len = htobe16(slot->len); 999f4220a70SNavdeep Parhar cpl->ctrl1 = nm_txcsum ? 0 : 1000f4220a70SNavdeep Parhar htobe64(F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 1001298d969cSNavdeep Parhar 1002298d969cSNavdeep Parhar usgl = (void *)(cpl + 1); 1003298d969cSNavdeep Parhar usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 1004298d969cSNavdeep Parhar V_ULPTX_NSGE(1)); 1005298d969cSNavdeep Parhar usgl->len0 = htobe32(slot->len); 1006df8a58b1SNavdeep Parhar usgl->addr0 = htobe64(ba + nm_get_offset(kring, slot)); 1007298d969cSNavdeep Parhar 1008298d969cSNavdeep Parhar slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 1009298d969cSNavdeep Parhar cpl = (void *)(usgl + 1); 1010298d969cSNavdeep Parhar MPASS(slot->len + len <= UINT16_MAX); 1011298d969cSNavdeep Parhar len += slot->len; 1012298d969cSNavdeep Parhar kring->nr_hwcur = nm_next(kring->nr_hwcur, lim); 1013298d969cSNavdeep Parhar } 1014298d969cSNavdeep Parhar wr->plen = htobe16(len); 1015298d969cSNavdeep Parhar 1016298d969cSNavdeep Parhar npkt -= n; 1017298d969cSNavdeep Parhar nm_txq->pidx += npkt_to_ndesc(n); 1018298d969cSNavdeep Parhar MPASS(nm_txq->pidx <= nm_txq->sidx); 1019298d969cSNavdeep Parhar if (__predict_false(nm_txq->pidx == nm_txq->sidx)) { 1020298d969cSNavdeep Parhar /* 1021298d969cSNavdeep Parhar * This routine doesn't know how to write WRs that wrap 1022298d969cSNavdeep Parhar * around. Make sure it wasn't asked to. 1023298d969cSNavdeep Parhar */ 1024298d969cSNavdeep Parhar MPASS(npkt == 0); 1025298d969cSNavdeep Parhar nm_txq->pidx = 0; 1026298d969cSNavdeep Parhar } 1027298d969cSNavdeep Parhar 1028*6af3d599SMark Johnston if (npkt + npkt_remaining == 0) { 1029298d969cSNavdeep Parhar /* All done. */ 1030*6af3d599SMark Johnston if (lazy_tx_credit_flush == 0 || 1031*6af3d599SMark Johnston NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) { 1032298d969cSNavdeep Parhar wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | 1033298d969cSNavdeep Parhar F_FW_WR_EQUIQ); 1034298d969cSNavdeep Parhar nm_txq->equeqidx = nm_txq->pidx; 1035298d969cSNavdeep Parhar nm_txq->equiqidx = nm_txq->pidx; 1036b2daa9a9SNavdeep Parhar } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 1037298d969cSNavdeep Parhar wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 1038298d969cSNavdeep Parhar nm_txq->equeqidx = nm_txq->pidx; 1039298d969cSNavdeep Parhar } 1040298d969cSNavdeep Parhar ring_nm_txq_db(sc, nm_txq); 1041*6af3d599SMark Johnston return; 1042*6af3d599SMark Johnston } 1043*6af3d599SMark Johnston if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) { 1044*6af3d599SMark Johnston if (NMIDXDIFF(nm_txq, equeqidx) >= 64) { 1045*6af3d599SMark Johnston wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 1046*6af3d599SMark Johnston nm_txq->equeqidx = nm_txq->pidx; 1047*6af3d599SMark Johnston } 1048*6af3d599SMark Johnston ring_nm_txq_db(sc, nm_txq); 1049*6af3d599SMark Johnston } 1050298d969cSNavdeep Parhar } 1051298d969cSNavdeep Parhar 1052298d969cSNavdeep Parhar /* Will get called again. */ 1053298d969cSNavdeep Parhar MPASS(npkt_remaining); 1054298d969cSNavdeep Parhar } 1055298d969cSNavdeep Parhar 1056298d969cSNavdeep Parhar /* How many contiguous free descriptors starting at pidx */ 1057298d969cSNavdeep Parhar static inline int 1058298d969cSNavdeep Parhar contiguous_ndesc_available(struct sge_nm_txq *nm_txq) 1059298d969cSNavdeep Parhar { 1060298d969cSNavdeep Parhar 1061298d969cSNavdeep Parhar if (nm_txq->cidx > nm_txq->pidx) 1062298d969cSNavdeep Parhar return (nm_txq->cidx - nm_txq->pidx - 1); 1063298d969cSNavdeep Parhar else if (nm_txq->cidx > 0) 1064298d969cSNavdeep Parhar return (nm_txq->sidx - nm_txq->pidx); 1065298d969cSNavdeep Parhar else 1066298d969cSNavdeep Parhar return (nm_txq->sidx - nm_txq->pidx - 1); 1067298d969cSNavdeep Parhar } 1068298d969cSNavdeep Parhar 1069298d969cSNavdeep Parhar static int 1070298d969cSNavdeep Parhar reclaim_nm_tx_desc(struct sge_nm_txq *nm_txq) 1071298d969cSNavdeep Parhar { 1072298d969cSNavdeep Parhar struct sge_qstat *spg = (void *)&nm_txq->desc[nm_txq->sidx]; 1073298d969cSNavdeep Parhar uint16_t hw_cidx = spg->cidx; /* snapshot */ 1074298d969cSNavdeep Parhar struct fw_eth_tx_pkts_wr *wr; 1075298d969cSNavdeep Parhar int n = 0; 1076298d969cSNavdeep Parhar 1077298d969cSNavdeep Parhar hw_cidx = be16toh(hw_cidx); 1078298d969cSNavdeep Parhar 1079298d969cSNavdeep Parhar while (nm_txq->cidx != hw_cidx) { 1080298d969cSNavdeep Parhar wr = (void *)&nm_txq->desc[nm_txq->cidx]; 1081298d969cSNavdeep Parhar 1082aa7bdbc0SNavdeep Parhar MPASS(wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)) || 1083aa7bdbc0SNavdeep Parhar wr->op_pkd == htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR))); 1084298d969cSNavdeep Parhar MPASS(wr->type == 1); 1085298d969cSNavdeep Parhar MPASS(wr->npkt > 0 && wr->npkt <= MAX_NPKT_IN_TYPE1_WR); 1086298d969cSNavdeep Parhar 1087298d969cSNavdeep Parhar n += wr->npkt; 1088298d969cSNavdeep Parhar nm_txq->cidx += npkt_to_ndesc(wr->npkt); 1089b2daa9a9SNavdeep Parhar 1090b2daa9a9SNavdeep Parhar /* 1091b2daa9a9SNavdeep Parhar * We never sent a WR that wrapped around so the credits coming 1092b2daa9a9SNavdeep Parhar * back, WR by WR, should never cause the cidx to wrap around 1093b2daa9a9SNavdeep Parhar * either. 1094b2daa9a9SNavdeep Parhar */ 1095b2daa9a9SNavdeep Parhar MPASS(nm_txq->cidx <= nm_txq->sidx); 1096b2daa9a9SNavdeep Parhar if (__predict_false(nm_txq->cidx == nm_txq->sidx)) 1097b2daa9a9SNavdeep Parhar nm_txq->cidx = 0; 1098298d969cSNavdeep Parhar } 1099298d969cSNavdeep Parhar 1100298d969cSNavdeep Parhar return (n); 1101298d969cSNavdeep Parhar } 1102298d969cSNavdeep Parhar 1103298d969cSNavdeep Parhar static int 11044bf50f18SLuigi Rizzo cxgbe_netmap_txsync(struct netmap_kring *kring, int flags) 1105298d969cSNavdeep Parhar { 11064bf50f18SLuigi Rizzo struct netmap_adapter *na = kring->na; 1107954712e8SJustin Hibbits if_t ifp = na->ifp; 1108954712e8SJustin Hibbits struct vi_info *vi = if_getsoftc(ifp); 11097c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 111062291463SNavdeep Parhar struct sge_nm_txq *nm_txq = &sc->sge.nm_txq[vi->first_nm_txq + kring->ring_id]; 1111298d969cSNavdeep Parhar const u_int head = kring->rhead; 1112298d969cSNavdeep Parhar u_int reclaimed = 0; 111382694ec0SNavdeep Parhar int n, d, npkt_remaining, ndesc_remaining; 1114298d969cSNavdeep Parhar 1115298d969cSNavdeep Parhar /* 1116298d969cSNavdeep Parhar * Tx was at kring->nr_hwcur last time around and now we need to advance 1117298d969cSNavdeep Parhar * to kring->rhead. Note that the driver's pidx moves independent of 1118298d969cSNavdeep Parhar * netmap's kring->nr_hwcur (pidx counts descriptors and the relation 1119298d969cSNavdeep Parhar * between descriptors and frames isn't 1:1). 1120298d969cSNavdeep Parhar */ 1121298d969cSNavdeep Parhar 1122298d969cSNavdeep Parhar npkt_remaining = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 1123298d969cSNavdeep Parhar kring->nkr_num_slots - kring->nr_hwcur + head; 1124298d969cSNavdeep Parhar while (npkt_remaining) { 1125298d969cSNavdeep Parhar reclaimed += reclaim_nm_tx_desc(nm_txq); 1126298d969cSNavdeep Parhar ndesc_remaining = contiguous_ndesc_available(nm_txq); 1127298d969cSNavdeep Parhar /* Can't run out of descriptors with packets still remaining */ 1128298d969cSNavdeep Parhar MPASS(ndesc_remaining > 0); 1129298d969cSNavdeep Parhar 1130298d969cSNavdeep Parhar /* # of desc needed to tx all remaining packets */ 1131298d969cSNavdeep Parhar d = (npkt_remaining / MAX_NPKT_IN_TYPE1_WR) * SGE_MAX_WR_NDESC; 1132298d969cSNavdeep Parhar if (npkt_remaining % MAX_NPKT_IN_TYPE1_WR) 1133298d969cSNavdeep Parhar d += npkt_to_ndesc(npkt_remaining % MAX_NPKT_IN_TYPE1_WR); 1134298d969cSNavdeep Parhar 1135298d969cSNavdeep Parhar if (d <= ndesc_remaining) 1136298d969cSNavdeep Parhar n = npkt_remaining; 1137298d969cSNavdeep Parhar else { 1138298d969cSNavdeep Parhar /* Can't send all, calculate how many can be sent */ 1139298d969cSNavdeep Parhar n = (ndesc_remaining / SGE_MAX_WR_NDESC) * 1140298d969cSNavdeep Parhar MAX_NPKT_IN_TYPE1_WR; 1141298d969cSNavdeep Parhar if (ndesc_remaining % SGE_MAX_WR_NDESC) 1142298d969cSNavdeep Parhar n += ndesc_to_npkt(ndesc_remaining % SGE_MAX_WR_NDESC); 1143298d969cSNavdeep Parhar } 1144298d969cSNavdeep Parhar 1145298d969cSNavdeep Parhar /* Send n packets and update nm_txq->pidx and kring->nr_hwcur */ 1146298d969cSNavdeep Parhar npkt_remaining -= n; 114782694ec0SNavdeep Parhar cxgbe_nm_tx(sc, nm_txq, kring, n, npkt_remaining); 1148298d969cSNavdeep Parhar } 1149298d969cSNavdeep Parhar MPASS(npkt_remaining == 0); 1150298d969cSNavdeep Parhar MPASS(kring->nr_hwcur == head); 1151298d969cSNavdeep Parhar MPASS(nm_txq->dbidx == nm_txq->pidx); 1152298d969cSNavdeep Parhar 1153298d969cSNavdeep Parhar /* 1154298d969cSNavdeep Parhar * Second part: reclaim buffers for completed transmissions. 1155298d969cSNavdeep Parhar */ 1156298d969cSNavdeep Parhar if (reclaimed || flags & NAF_FORCE_RECLAIM || nm_kr_txempty(kring)) { 1157298d969cSNavdeep Parhar reclaimed += reclaim_nm_tx_desc(nm_txq); 1158298d969cSNavdeep Parhar kring->nr_hwtail += reclaimed; 1159298d969cSNavdeep Parhar if (kring->nr_hwtail >= kring->nkr_num_slots) 1160298d969cSNavdeep Parhar kring->nr_hwtail -= kring->nkr_num_slots; 1161298d969cSNavdeep Parhar } 1162298d969cSNavdeep Parhar 1163298d969cSNavdeep Parhar return (0); 1164298d969cSNavdeep Parhar } 1165298d969cSNavdeep Parhar 1166298d969cSNavdeep Parhar static int 11674bf50f18SLuigi Rizzo cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags) 1168298d969cSNavdeep Parhar { 11694bf50f18SLuigi Rizzo struct netmap_adapter *na = kring->na; 1170298d969cSNavdeep Parhar struct netmap_ring *ring = kring->ring; 1171954712e8SJustin Hibbits if_t ifp = na->ifp; 1172954712e8SJustin Hibbits struct vi_info *vi = if_getsoftc(ifp); 11737c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 117462291463SNavdeep Parhar struct sge_nm_rxq *nm_rxq = &sc->sge.nm_rxq[vi->first_nm_rxq + kring->ring_id]; 1175847bf383SLuigi Rizzo u_int const head = kring->rhead; 1176298d969cSNavdeep Parhar u_int n; 1177298d969cSNavdeep Parhar int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1178298d969cSNavdeep Parhar 117908aeb151SNavdeep Parhar if (black_hole) 118008aeb151SNavdeep Parhar return (0); /* No updates ever. */ 118108aeb151SNavdeep Parhar 1182298d969cSNavdeep Parhar if (netmap_no_pendintr || force_update) { 1183298d969cSNavdeep Parhar kring->nr_hwtail = atomic_load_acq_32(&nm_rxq->fl_cidx); 1184298d969cSNavdeep Parhar kring->nr_kflags &= ~NKR_PENDINTR; 1185298d969cSNavdeep Parhar } 1186298d969cSNavdeep Parhar 11870afe96c7SNavdeep Parhar if (nm_rxq->fl_db_saved > 0 && starve_fl == 0) { 11880afe96c7SNavdeep Parhar wmb(); 11890afe96c7SNavdeep Parhar t4_write_reg(sc, sc->sge_kdoorbell_reg, 11900afe96c7SNavdeep Parhar nm_rxq->fl_db_val | V_PIDX(nm_rxq->fl_db_saved)); 11910afe96c7SNavdeep Parhar nm_rxq->fl_db_saved = 0; 11920afe96c7SNavdeep Parhar } 11930afe96c7SNavdeep Parhar 1194298d969cSNavdeep Parhar /* Userspace done with buffers from kring->nr_hwcur to head */ 1195298d969cSNavdeep Parhar n = head >= kring->nr_hwcur ? head - kring->nr_hwcur : 1196298d969cSNavdeep Parhar kring->nkr_num_slots - kring->nr_hwcur + head; 1197298d969cSNavdeep Parhar n &= ~7U; 1198298d969cSNavdeep Parhar if (n > 0) { 1199298d969cSNavdeep Parhar u_int fl_pidx = nm_rxq->fl_pidx; 1200298d969cSNavdeep Parhar struct netmap_slot *slot = &ring->slot[fl_pidx]; 1201298d969cSNavdeep Parhar uint64_t ba; 1202298d969cSNavdeep Parhar int i, dbinc = 0, hwidx = nm_rxq->fl_hwidx; 1203298d969cSNavdeep Parhar 1204298d969cSNavdeep Parhar /* 1205298d969cSNavdeep Parhar * We always deal with 8 buffers at a time. We must have 1206298d969cSNavdeep Parhar * stopped at an 8B boundary (fl_pidx) last time around and we 1207298d969cSNavdeep Parhar * must have a multiple of 8B buffers to give to the freelist. 1208298d969cSNavdeep Parhar */ 1209298d969cSNavdeep Parhar MPASS((fl_pidx & 7) == 0); 1210298d969cSNavdeep Parhar MPASS((n & 7) == 0); 1211298d969cSNavdeep Parhar 1212b2daa9a9SNavdeep Parhar IDXINCR(kring->nr_hwcur, n, kring->nkr_num_slots); 1213aa301e5fSNavdeep Parhar IDXINCR(nm_rxq->fl_pidx, n, nm_rxq->fl_sidx2); 1214298d969cSNavdeep Parhar 1215298d969cSNavdeep Parhar while (n > 0) { 1216298d969cSNavdeep Parhar for (i = 0; i < 8; i++, fl_pidx++, slot++) { 12174bf50f18SLuigi Rizzo PNMB(na, slot, &ba); 12181cdfce07SNavdeep Parhar MPASS(ba != 0); 1219298d969cSNavdeep Parhar nm_rxq->fl_desc[fl_pidx] = htobe64(ba | hwidx); 1220298d969cSNavdeep Parhar slot->flags &= ~NS_BUF_CHANGED; 1221aa301e5fSNavdeep Parhar MPASS(fl_pidx <= nm_rxq->fl_sidx2); 1222298d969cSNavdeep Parhar } 1223298d969cSNavdeep Parhar n -= 8; 1224aa301e5fSNavdeep Parhar if (fl_pidx == nm_rxq->fl_sidx2) { 1225298d969cSNavdeep Parhar fl_pidx = 0; 1226298d969cSNavdeep Parhar slot = &ring->slot[0]; 1227298d969cSNavdeep Parhar } 122815ca0766SNavdeep Parhar if (++dbinc == nm_rxq->fl_db_threshold) { 1229298d969cSNavdeep Parhar wmb(); 12300afe96c7SNavdeep Parhar if (starve_fl) 12310afe96c7SNavdeep Parhar nm_rxq->fl_db_saved += dbinc; 12320afe96c7SNavdeep Parhar else { 1233315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_kdoorbell_reg, 1234298d969cSNavdeep Parhar nm_rxq->fl_db_val | V_PIDX(dbinc)); 12350afe96c7SNavdeep Parhar } 1236298d969cSNavdeep Parhar dbinc = 0; 1237298d969cSNavdeep Parhar } 1238298d969cSNavdeep Parhar } 1239298d969cSNavdeep Parhar MPASS(nm_rxq->fl_pidx == fl_pidx); 1240298d969cSNavdeep Parhar 1241298d969cSNavdeep Parhar if (dbinc > 0) { 1242298d969cSNavdeep Parhar wmb(); 12430afe96c7SNavdeep Parhar if (starve_fl) 12440afe96c7SNavdeep Parhar nm_rxq->fl_db_saved += dbinc; 12450afe96c7SNavdeep Parhar else { 1246315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_kdoorbell_reg, 1247298d969cSNavdeep Parhar nm_rxq->fl_db_val | V_PIDX(dbinc)); 1248298d969cSNavdeep Parhar } 1249298d969cSNavdeep Parhar } 12500afe96c7SNavdeep Parhar } 1251298d969cSNavdeep Parhar 1252298d969cSNavdeep Parhar return (0); 1253298d969cSNavdeep Parhar } 1254298d969cSNavdeep Parhar 125562291463SNavdeep Parhar void 125662291463SNavdeep Parhar cxgbe_nm_attach(struct vi_info *vi) 1257298d969cSNavdeep Parhar { 1258fe2ebb76SJohn Baldwin struct port_info *pi; 1259fe2ebb76SJohn Baldwin struct adapter *sc; 1260298d969cSNavdeep Parhar struct netmap_adapter na; 1261298d969cSNavdeep Parhar 126262291463SNavdeep Parhar MPASS(vi->nnmrxq > 0); 126362291463SNavdeep Parhar MPASS(vi->ifp != NULL); 126462291463SNavdeep Parhar 1265fe2ebb76SJohn Baldwin pi = vi->pi; 1266fe2ebb76SJohn Baldwin sc = pi->adapter; 1267298d969cSNavdeep Parhar 1268298d969cSNavdeep Parhar bzero(&na, sizeof(na)); 1269298d969cSNavdeep Parhar 127062291463SNavdeep Parhar na.ifp = vi->ifp; 1271df8a58b1SNavdeep Parhar na.na_flags = NAF_BDG_MAYSLEEP | NAF_OFFSETS; 1272298d969cSNavdeep Parhar 1273298d969cSNavdeep Parhar /* Netmap doesn't know about the space reserved for the status page. */ 127490e7434aSNavdeep Parhar na.num_tx_desc = vi->qsize_txq - sc->params.sge.spg_len / EQ_ESIZE; 1275298d969cSNavdeep Parhar 1276298d969cSNavdeep Parhar /* 1277298d969cSNavdeep Parhar * The freelist's cidx/pidx drives netmap's rx cidx/pidx. So 1278298d969cSNavdeep Parhar * num_rx_desc is based on the number of buffers that can be held in the 1279298d969cSNavdeep Parhar * freelist, and not the number of entries in the iq. (These two are 1280298d969cSNavdeep Parhar * not exactly the same due to the space taken up by the status page). 1281298d969cSNavdeep Parhar */ 12824ed3c0e7SPedro F. Giffuni na.num_rx_desc = rounddown(vi->qsize_rxq, 8); 1283298d969cSNavdeep Parhar na.nm_txsync = cxgbe_netmap_txsync; 1284298d969cSNavdeep Parhar na.nm_rxsync = cxgbe_netmap_rxsync; 1285298d969cSNavdeep Parhar na.nm_register = cxgbe_netmap_reg; 128662291463SNavdeep Parhar na.num_tx_rings = vi->nnmtxq; 128762291463SNavdeep Parhar na.num_rx_rings = vi->nnmrxq; 1288df8a58b1SNavdeep Parhar na.rx_buf_maxsize = MAX_MTU + sc->params.sge.fl_pktshift; 128943cf589cSVincenzo Maffione netmap_attach(&na); /* This adds IFCAP_NETMAP to if_capabilities */ 1290298d969cSNavdeep Parhar } 1291298d969cSNavdeep Parhar 129262291463SNavdeep Parhar void 129362291463SNavdeep Parhar cxgbe_nm_detach(struct vi_info *vi) 1294298d969cSNavdeep Parhar { 1295298d969cSNavdeep Parhar 129662291463SNavdeep Parhar MPASS(vi->nnmrxq > 0); 129762291463SNavdeep Parhar MPASS(vi->ifp != NULL); 1298fe2ebb76SJohn Baldwin 1299fe2ebb76SJohn Baldwin netmap_detach(vi->ifp); 1300298d969cSNavdeep Parhar } 1301298d969cSNavdeep Parhar 13023cdfcb51SNavdeep Parhar static inline const void * 13033cdfcb51SNavdeep Parhar unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) 1304298d969cSNavdeep Parhar { 13053cdfcb51SNavdeep Parhar 13063cdfcb51SNavdeep Parhar MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); 13073cdfcb51SNavdeep Parhar 13083cdfcb51SNavdeep Parhar /* data[0] is RSS header */ 13093cdfcb51SNavdeep Parhar return (&cpl->data[1]); 13103cdfcb51SNavdeep Parhar } 13113cdfcb51SNavdeep Parhar 13123cdfcb51SNavdeep Parhar static void 1313954712e8SJustin Hibbits handle_nm_sge_egr_update(struct adapter *sc, if_t ifp, 13143cdfcb51SNavdeep Parhar const struct cpl_sge_egr_update *egr) 13153cdfcb51SNavdeep Parhar { 1316298d969cSNavdeep Parhar uint32_t oq; 1317298d969cSNavdeep Parhar struct sge_nm_txq *nm_txq; 1318298d969cSNavdeep Parhar 1319298d969cSNavdeep Parhar oq = be32toh(egr->opcode_qid); 1320298d969cSNavdeep Parhar MPASS(G_CPL_OPCODE(oq) == CPL_SGE_EGR_UPDATE); 1321298d969cSNavdeep Parhar nm_txq = (void *)sc->sge.eqmap[G_EGR_QID(oq) - sc->sge.eq_start]; 1322298d969cSNavdeep Parhar 1323298d969cSNavdeep Parhar netmap_tx_irq(ifp, nm_txq->nid); 1324298d969cSNavdeep Parhar } 1325298d969cSNavdeep Parhar 1326298d969cSNavdeep Parhar void 13273098bcfcSNavdeep Parhar service_nm_rxq(struct sge_nm_rxq *nm_rxq) 1328298d969cSNavdeep Parhar { 1329fe2ebb76SJohn Baldwin struct vi_info *vi = nm_rxq->vi; 13307c228be3SNavdeep Parhar struct adapter *sc = vi->adapter; 1331954712e8SJustin Hibbits if_t ifp = vi->ifp; 1332298d969cSNavdeep Parhar struct netmap_adapter *na = NA(ifp); 13332ff91c17SVincenzo Maffione struct netmap_kring *kring = na->rx_rings[nm_rxq->nid]; 1334298d969cSNavdeep Parhar struct netmap_ring *ring = kring->ring; 1335b2daa9a9SNavdeep Parhar struct iq_desc *d = &nm_rxq->iq_desc[nm_rxq->iq_cidx]; 13363cdfcb51SNavdeep Parhar const void *cpl; 1337298d969cSNavdeep Parhar uint32_t lq; 133831f494cdSNavdeep Parhar u_int work = 0; 1339298d969cSNavdeep Parhar uint8_t opcode; 1340298d969cSNavdeep Parhar uint32_t fl_cidx = atomic_load_acq_32(&nm_rxq->fl_cidx); 134108aeb151SNavdeep Parhar u_int fl_credits = fl_cidx & 7; 134231f494cdSNavdeep Parhar u_int ndesc = 0; /* desc processed since last cidx update */ 134331f494cdSNavdeep Parhar u_int nframes = 0; /* frames processed since last netmap wakeup */ 1344298d969cSNavdeep Parhar 1345298d969cSNavdeep Parhar while ((d->rsp.u.type_gen & F_RSPD_GEN) == nm_rxq->iq_gen) { 1346298d969cSNavdeep Parhar 1347298d969cSNavdeep Parhar rmb(); 1348298d969cSNavdeep Parhar 1349298d969cSNavdeep Parhar lq = be32toh(d->rsp.pldbuflen_qid); 1350298d969cSNavdeep Parhar opcode = d->rss.opcode; 13513cdfcb51SNavdeep Parhar cpl = &d->cpl[0]; 1352298d969cSNavdeep Parhar 1353298d969cSNavdeep Parhar switch (G_RSPD_TYPE(d->rsp.u.type_gen)) { 1354298d969cSNavdeep Parhar case X_RSPD_TYPE_FLBUF: 1355298d969cSNavdeep Parhar 1356298d969cSNavdeep Parhar /* fall through */ 1357298d969cSNavdeep Parhar 1358298d969cSNavdeep Parhar case X_RSPD_TYPE_CPL: 1359298d969cSNavdeep Parhar MPASS(opcode < NUM_CPL_CMDS); 1360298d969cSNavdeep Parhar 1361298d969cSNavdeep Parhar switch (opcode) { 1362298d969cSNavdeep Parhar case CPL_FW4_MSG: 1363298d969cSNavdeep Parhar case CPL_FW6_MSG: 13643cdfcb51SNavdeep Parhar cpl = unwrap_nm_fw6_msg(cpl); 13653cdfcb51SNavdeep Parhar /* fall through */ 13663cdfcb51SNavdeep Parhar case CPL_SGE_EGR_UPDATE: 13673cdfcb51SNavdeep Parhar handle_nm_sge_egr_update(sc, ifp, cpl); 1368298d969cSNavdeep Parhar break; 1369298d969cSNavdeep Parhar case CPL_RX_PKT: 1370df8a58b1SNavdeep Parhar /* 1371df8a58b1SNavdeep Parhar * Note that the application must have netmap 1372df8a58b1SNavdeep Parhar * offsets (NETMAP_REQ_OPT_OFFSETS) enabled on 1373df8a58b1SNavdeep Parhar * the ring or its rx will not work correctly 1374df8a58b1SNavdeep Parhar * when fl_pktshift > 0. 1375df8a58b1SNavdeep Parhar */ 1376df8a58b1SNavdeep Parhar nm_write_offset(kring, &ring->slot[fl_cidx], 1377df8a58b1SNavdeep Parhar sc->params.sge.fl_pktshift); 137890e7434aSNavdeep Parhar ring->slot[fl_cidx].len = G_RSPD_LEN(lq) - 137990e7434aSNavdeep Parhar sc->params.sge.fl_pktshift; 13807cb7c6e3SNavdeep Parhar ring->slot[fl_cidx].flags = 0; 138131f494cdSNavdeep Parhar nframes++; 138231f494cdSNavdeep Parhar if (!(lq & F_RSPD_NEWBUF)) { 138331f494cdSNavdeep Parhar MPASS(black_hole == 2); 138431f494cdSNavdeep Parhar break; 138531f494cdSNavdeep Parhar } 138631f494cdSNavdeep Parhar fl_credits++; 138731f494cdSNavdeep Parhar if (__predict_false(++fl_cidx == nm_rxq->fl_sidx)) 1388298d969cSNavdeep Parhar fl_cidx = 0; 1389298d969cSNavdeep Parhar break; 1390298d969cSNavdeep Parhar default: 1391298d969cSNavdeep Parhar panic("%s: unexpected opcode 0x%x on nm_rxq %p", 1392298d969cSNavdeep Parhar __func__, opcode, nm_rxq); 1393298d969cSNavdeep Parhar } 1394298d969cSNavdeep Parhar break; 1395298d969cSNavdeep Parhar 1396298d969cSNavdeep Parhar case X_RSPD_TYPE_INTR: 1397298d969cSNavdeep Parhar /* Not equipped to handle forwarded interrupts. */ 1398298d969cSNavdeep Parhar panic("%s: netmap queue received interrupt for iq %u\n", 1399298d969cSNavdeep Parhar __func__, lq); 1400298d969cSNavdeep Parhar 1401298d969cSNavdeep Parhar default: 1402298d969cSNavdeep Parhar panic("%s: illegal response type %d on nm_rxq %p", 1403298d969cSNavdeep Parhar __func__, G_RSPD_TYPE(d->rsp.u.type_gen), nm_rxq); 1404298d969cSNavdeep Parhar } 1405298d969cSNavdeep Parhar 1406298d969cSNavdeep Parhar d++; 1407298d969cSNavdeep Parhar if (__predict_false(++nm_rxq->iq_cidx == nm_rxq->iq_sidx)) { 1408298d969cSNavdeep Parhar nm_rxq->iq_cidx = 0; 1409298d969cSNavdeep Parhar d = &nm_rxq->iq_desc[0]; 1410298d969cSNavdeep Parhar nm_rxq->iq_gen ^= F_RSPD_GEN; 1411298d969cSNavdeep Parhar } 1412298d969cSNavdeep Parhar 141331f494cdSNavdeep Parhar if (__predict_false(++nframes == rx_nframes) && !black_hole) { 14141cdfce07SNavdeep Parhar atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 141531f494cdSNavdeep Parhar netmap_rx_irq(ifp, nm_rxq->nid, &work); 141631f494cdSNavdeep Parhar nframes = 0; 141731f494cdSNavdeep Parhar } 141831f494cdSNavdeep Parhar 141931f494cdSNavdeep Parhar if (__predict_false(++ndesc == rx_ndesc)) { 142008aeb151SNavdeep Parhar if (black_hole && fl_credits >= 8) { 142108aeb151SNavdeep Parhar fl_credits /= 8; 142208aeb151SNavdeep Parhar IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, 142308aeb151SNavdeep Parhar nm_rxq->fl_sidx); 1424315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_kdoorbell_reg, 142508aeb151SNavdeep Parhar nm_rxq->fl_db_val | V_PIDX(fl_credits)); 142608aeb151SNavdeep Parhar fl_credits = fl_cidx & 7; 142708aeb151SNavdeep Parhar } 1428315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_gts_reg, 142931f494cdSNavdeep Parhar V_CIDXINC(ndesc) | 143031f494cdSNavdeep Parhar V_INGRESSQID(nm_rxq->iq_cntxt_id) | 1431298d969cSNavdeep Parhar V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 143231f494cdSNavdeep Parhar ndesc = 0; 1433298d969cSNavdeep Parhar } 1434298d969cSNavdeep Parhar } 143508aeb151SNavdeep Parhar 1436298d969cSNavdeep Parhar atomic_store_rel_32(&nm_rxq->fl_cidx, fl_cidx); 143708aeb151SNavdeep Parhar if (black_hole) { 143808aeb151SNavdeep Parhar fl_credits /= 8; 143908aeb151SNavdeep Parhar IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx); 1440315048f2SJohn Baldwin t4_write_reg(sc, sc->sge_kdoorbell_reg, 144108aeb151SNavdeep Parhar nm_rxq->fl_db_val | V_PIDX(fl_credits)); 144231f494cdSNavdeep Parhar } else if (nframes > 0) 14431cdfce07SNavdeep Parhar netmap_rx_irq(ifp, nm_rxq->nid, &work); 144408aeb151SNavdeep Parhar 144531f494cdSNavdeep Parhar t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndesc) | 1446b91cbddaSNavdeep Parhar V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) | 14471cdfce07SNavdeep Parhar V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx))); 1448298d969cSNavdeep Parhar } 1449298d969cSNavdeep Parhar #endif 1450