Lines Matching +full:rx +full:- +full:tx
111 /* Rx/Tx Queue Control */
159 /* Tx Subroutines */
166 /* Rx Subroutines */
191 #define mvneta_sc_lock(sc) mtx_lock(&sc->mtx)
192 #define mvneta_sc_unlock(sc) mtx_unlock(&sc->mtx)
270 "rx_good_oct", "Good Octets Rx"},
272 "rx_bad_oct", "Bad Octets Rx"},
276 "rx_good_frame", "Good Frames Rx"},
278 "rx_bad_frame", "Bad Frames Rx"},
280 "rx_bcast_frame", "Broadcast Frames Rx"},
282 "rx_mcast_frame", "Multicast Frames Rx"},
284 "rx_frame_1_64", "Frame Size 1 - 64"},
286 "rx_frame_65_127", "Frame Size 65 - 127"},
288 "rx_frame_128_255", "Frame Size 128 - 255"},
290 "rx_frame_256_511", "Frame Size 256 - 511"},
292 "rx_frame_512_1023", "Frame Size 512 - 1023"},
294 "rx_fame_1024_max", "Frame Size 1024 - Max"},
296 "tx_good_oct", "Good Octets Tx"},
298 "tx_good_frame", "Good Frames Tx"},
302 "tx_mcast_frame", "Multicast Frames Tx"},
304 "tx_bcast_frame", "Broadcast Frames Tx"},
308 "fc_tx", "Flow Control Tx"},
310 "fc_rx_good", "Good Flow Control Rx"},
312 "fc_rx_bad", "Bad Flow Control Rx"},
314 "pkt_undersize", "Undersized Packets Rx"},
316 "pkt_fragment", "Fragmented Packets Rx"},
318 "pkt_oversize", "Oversized Packets Rx"},
320 "pkt_jabber", "Jabber Packets Rx"},
322 "mac_rx_err", "MAC Rx Errors"},
334 { -1, 0}
369 * Fall back -- use the currently programmed address.
375 * Generate pseudo-random MAC.
379 mac_l |= device_get_unit(sc->dev) & 0xff;
383 device_printf(sc->dev,
416 * Create Tx DMA
421 bus_get_dma_tag(sc->dev), /* parent */
431 &sc->tx_dtag); /* dmat */
433 device_printf(sc->dev,
434 "Failed to create DMA tag for Tx descriptors.\n");
438 bus_get_dma_tag(sc->dev), /* parent */
448 &sc->txmbuf_dtag);
450 device_printf(sc->dev,
451 "Failed to create DMA tag for Tx mbufs.\n");
458 device_printf(sc->dev,
465 * Create Rx DMA.
467 /* Create tag for Rx descripors */
469 bus_get_dma_tag(sc->dev), /* parent */
479 &sc->rx_dtag); /* dmat */
481 device_printf(sc->dev,
482 "Failed to create DMA tag for Rx descriptors.\n");
486 /* Create tag for Rx buffers */
488 bus_get_dma_tag(sc->dev), /* parent */
497 &sc->rxbuf_dtag); /* dmat */
499 device_printf(sc->dev,
500 "Failed to create DMA tag for Rx buffers.\n");
506 device_printf(sc->dev,
514 mvneta_detach(sc->dev);
534 sc->dev = self;
536 mtx_init(&sc->mtx, "mvneta_sc", NULL, MTX_DEF);
538 error = bus_alloc_resources(self, res_spec, sc->res);
544 sc->version = MVNETA_READ(sc, MVNETA_PV);
545 device_printf(self, "version is %x\n", sc->version);
546 callout_init(&sc->tick_ch, 0);
554 error = clk_get_by_ofw_index(sc->dev, ofw_bus_get_node(sc->dev), 0,
558 device_printf(sc->dev,
561 sc->clk_freq = A3700_TCLK_250MHZ;
563 device_printf(sc->dev,
565 sc->clk_freq = get_tclk();
568 error = clk_get_freq(clk, &sc->clk_freq);
570 device_printf(sc->dev,
572 bus_release_resources(sc->dev, res_spec, sc->res);
592 error = bus_setup_intr(self, sc->res[1],
594 &sc->ih_cookie[0]);
605 if (mvneta_get_mac_address(sc, sc->enaddr)) {
609 mvneta_set_mac_address(sc, sc->enaddr);
614 ifp = sc->ifp = if_alloc(IFT_ETHER);
618 * We can support 802.1Q VLAN-sized frames and jumbo
630 if_setsendqlen(ifp, MVNETA_TX_RING_CNT - 1);
656 * - Support for Large Receive Offload
662 sc->rx_frame_size = MCLBYTES; /* ether_ifattach() always sets normal mtu */
715 if ((sc->phy_addr != MII_PHY_ANY) && (!sc->use_inband_status)) {
716 error = mii_attach(self, &sc->miibus, ifp, mvneta_mediachange,
717 mvneta_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr,
722 ether_ifdetach(sc->ifp);
726 sc->mii = device_get_softc(sc->miibus);
727 sc->phy_attached = 1;
729 /* Disable auto-negotiation in MAC - rely on PHY layer */
731 } else if (sc->use_inband_status == TRUE) {
732 /* In-band link status */
733 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
737 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX,
739 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
740 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX,
742 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
743 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX,
745 ifmedia_add(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
746 ifmedia_set(&sc->mvneta_ifmedia, IFM_ETHER | IFM_AUTO);
748 /* Enable auto-negotiation */
759 /* Fixed-link, use predefined values */
761 ifmedia_init(&sc->mvneta_ifmedia, 0, mvneta_mediachange,
765 switch (sc->phy_speed) {
767 if (sc->phy_mode != MVNETA_PHY_SGMII &&
768 sc->phy_mode != MVNETA_PHY_QSGMII) {
771 ether_ifdetach(sc->ifp);
787 ether_ifdetach(sc->ifp);
792 if (sc->phy_fdx)
797 ifmedia_add(&sc->mvneta_ifmedia, ifm_target, 0, NULL);
798 ifmedia_set(&sc->mvneta_ifmedia, ifm_target);
799 if_link_state_change(sc->ifp, LINK_STATE_UP);
804 child = device_add_child(sc->dev, "mdio", DEVICE_UNIT_ANY);
806 ether_ifdetach(sc->ifp);
810 bus_attach_children(sc->dev);
818 ether_ifattach(ifp, sc->enaddr);
820 callout_reset(&sc->tick_ch, 0, mvneta_tick, sc);
837 callout_drain(&sc->tick_ch);
838 ether_ifdetach(sc->ifp);
848 if (sc->ih_cookie[0] != NULL)
849 bus_teardown_intr(dev, sc->res[1], sc->ih_cookie[0]);
851 if (sc->tx_dtag != NULL)
852 bus_dma_tag_destroy(sc->tx_dtag);
853 if (sc->rx_dtag != NULL)
854 bus_dma_tag_destroy(sc->rx_dtag);
855 if (sc->txmbuf_dtag != NULL)
856 bus_dma_tag_destroy(sc->txmbuf_dtag);
857 if (sc->rxbuf_dtag != NULL)
858 bus_dma_tag_destroy(sc->rxbuf_dtag);
860 bus_release_resources(dev, res_spec, sc->res);
862 if (sc->ifp)
863 if_free(sc->ifp);
865 if (mtx_initialized(&sc->mtx))
866 mtx_destroy(&sc->mtx);
883 ifp = sc->ifp;
895 return (-1);
911 return (-1);
923 return (-1);
951 ifp = sc->ifp;
995 mvneta_rx_queue_enable(sc->ifp, q);
1001 mvneta_tx_queue_enable(sc->ifp, q);
1010 struct mvneta_rx_ring *rx;
1011 struct mvneta_tx_ring *tx;
1016 rx = MVNETA_RX_RING(sc, q);
1018 rx->queue_status = MVNETA_QUEUE_DISABLED;
1023 tx = MVNETA_TX_RING(sc, q);
1025 tx->queue_status = MVNETA_QUEUE_DISABLED;
1029 /* Wait for all Rx activity to terminate. */
1036 if_printf(sc->ifp,
1037 "timeout for RX stopped. rqc 0x%x\n", reg);
1044 /* Wait for all Tx activity to terminate. */
1059 if_printf(sc->ifp,
1060 "timeout for TX stopped. tqc 0x%x\n", reg);
1067 /* Wait for all Tx FIFO is empty */
1071 if_printf(sc->ifp,
1072 "timeout for TX FIFO drained. ps0 0x%x\n", reg);
1103 /* Init TX/RX Queue Registers */
1107 device_printf(sc->dev,
1117 device_printf(sc->dev,
1126 * Ethernet Unit Control - disable automatic PHY management by HW.
1127 * In case the port uses SMI-controlled PHY, poll its status with
1153 switch (sc->phy_mode) {
1171 /* Port Configuration Extended: enable Tx CRC generation */
1197 *(bus_addr_t *)arg = segs->ds_addr;
1203 struct mvneta_rx_ring *rx;
1211 rx = MVNETA_RX_RING(sc, q);
1212 mtx_init(&rx->ring_mtx, "mvneta_rx", NULL, MTX_DEF);
1213 /* Allocate DMA memory for Rx descriptors */
1214 error = bus_dmamem_alloc(sc->rx_dtag,
1215 (void**)&(rx->desc),
1217 &rx->desc_map);
1218 if (error != 0 || rx->desc == NULL)
1220 error = bus_dmamap_load(sc->rx_dtag, rx->desc_map,
1221 rx->desc,
1223 mvneta_dmamap_cb, &rx->desc_pa, BUS_DMA_NOWAIT);
1228 error = bus_dmamap_create(sc->rxbuf_dtag, 0, &dmap);
1230 device_printf(sc->dev,
1231 "Failed to create DMA map for Rx buffer num: %d\n", i);
1234 rxbuf = &rx->rxbuf[i];
1235 rxbuf->dmap = dmap;
1236 rxbuf->m = NULL;
1245 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1252 struct mvneta_tx_ring *tx;
1257 tx = MVNETA_TX_RING(sc, q);
1258 mtx_init(&tx->ring_mtx, "mvneta_tx", NULL, MTX_DEF);
1259 error = bus_dmamem_alloc(sc->tx_dtag,
1260 (void**)&(tx->desc),
1262 &tx->desc_map);
1263 if (error != 0 || tx->desc == NULL)
1265 error = bus_dmamap_load(sc->tx_dtag, tx->desc_map,
1266 tx->desc,
1268 mvneta_dmamap_cb, &tx->desc_pa, BUS_DMA_NOWAIT);
1273 tx->br = buf_ring_alloc(MVNETA_BUFRING_SIZE, M_DEVBUF, M_NOWAIT,
1274 &tx->ring_mtx);
1275 if (tx->br == NULL) {
1276 device_printf(sc->dev,
1289 device_printf(sc->dev, "DMA Ring buffer allocation failure.\n");
1296 struct mvneta_tx_ring *tx;
1304 tx = MVNETA_TX_RING(sc, q);
1306 if (tx->taskq != NULL) {
1308 while (taskqueue_cancel(tx->taskq, &tx->task, NULL) != 0)
1309 taskqueue_drain(tx->taskq, &tx->task);
1312 if (tx->br != NULL)
1313 drbr_free(tx->br, M_DEVBUF);
1316 if (sc->txmbuf_dtag != NULL) {
1318 txbuf = &tx->txbuf[i];
1319 if (txbuf->dmap != NULL) {
1320 error = bus_dmamap_destroy(sc->txmbuf_dtag,
1321 txbuf->dmap);
1323 panic("%s: map busy for Tx descriptor (Q%d, %d)",
1330 if (tx->desc_pa != 0)
1331 bus_dmamap_unload(sc->tx_dtag, tx->desc_map);
1333 kva = (void *)tx->desc;
1335 bus_dmamem_free(sc->tx_dtag, tx->desc, tx->desc_map);
1337 if (mtx_name(&tx->ring_mtx) != NULL)
1338 mtx_destroy(&tx->ring_mtx);
1340 memset(tx, 0, sizeof(*tx));
1346 struct mvneta_rx_ring *rx;
1353 rx = MVNETA_RX_RING(sc, q);
1355 if (rx->desc_pa != 0)
1356 bus_dmamap_unload(sc->rx_dtag, rx->desc_map);
1358 kva = (void *)rx->desc;
1360 bus_dmamem_free(sc->rx_dtag, rx->desc, rx->desc_map);
1362 lro = &rx->lro;
1365 if (mtx_name(&rx->ring_mtx) != NULL)
1366 mtx_destroy(&rx->ring_mtx);
1368 memset(rx, 0, sizeof(*rx));
1374 struct mvneta_rx_ring *rx;
1381 rx = MVNETA_RX_RING(sc, q);
1382 rx->dma = rx->cpu = 0;
1383 rx->queue_th_received = MVNETA_RXTH_COUNT;
1384 rx->queue_th_time = (sc->clk_freq / 1000) / 10; /* 0.1 [ms] */
1387 rx->lro_enabled = FALSE;
1388 if ((if_getcapenable(sc->ifp) & IFCAP_LRO) != 0) {
1389 lro = &rx->lro;
1392 device_printf(sc->dev, "LRO Initialization failed!\n");
1394 rx->lro_enabled = TRUE;
1395 lro->ifp = sc->ifp;
1405 struct mvneta_tx_ring *tx;
1412 tx = MVNETA_TX_RING(sc, q);
1414 /* Tx handle */
1416 txbuf = &tx->txbuf[i];
1417 txbuf->m = NULL;
1418 /* Tx handle needs DMA map for busdma_load_mbuf() */
1419 error = bus_dmamap_create(sc->txmbuf_dtag, 0,
1420 &txbuf->dmap);
1422 device_printf(sc->dev,
1423 "can't create dma map (tx ring %d)\n", i);
1427 tx->dma = tx->cpu = 0;
1428 tx->used = 0;
1429 tx->drv_error = 0;
1430 tx->queue_status = MVNETA_QUEUE_DISABLED;
1431 tx->queue_hung = FALSE;
1433 tx->ifp = sc->ifp;
1434 tx->qidx = q;
1435 TASK_INIT(&tx->task, 0, mvneta_tx_task, tx);
1436 tx->taskq = taskqueue_create_fast("mvneta_tx_taskq", M_WAITOK,
1437 taskqueue_thread_enqueue, &tx->taskq);
1438 taskqueue_start_threads(&tx->taskq, 1, PI_NET, "%s: tx_taskq(%d)",
1439 device_get_nameunit(sc->dev), q);
1447 struct mvneta_tx_ring *tx;
1451 tx = MVNETA_TX_RING(sc, q);
1454 /* Tx handle */
1456 txbuf = &tx->txbuf[i];
1457 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
1458 if (txbuf->m != NULL) {
1459 m_freem(txbuf->m);
1460 txbuf->m = NULL;
1463 tx->dma = tx->cpu = 0;
1464 tx->used = 0;
1470 struct mvneta_rx_ring *rx;
1474 rx = MVNETA_RX_RING(sc, q);
1477 /* Rx handle */
1479 rxbuf = &rx->rxbuf[i];
1482 rx->dma = rx->cpu = 0;
1486 * Rx/Tx Queue Control
1492 struct mvneta_rx_ring *rx;
1497 rx = MVNETA_RX_RING(sc, q);
1498 DASSERT(rx->desc_pa != 0);
1501 MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
1503 /* Rx buffer size and descriptor ring size */
1504 reg = MVNETA_PRXDQS_BUFFERSIZE(sc->rx_frame_size >> 3);
1511 /* Rx packet offset address */
1520 DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
1528 struct mvneta_tx_ring *tx;
1533 tx = MVNETA_TX_RING(sc, q);
1534 DASSERT(tx->desc_pa != 0);
1537 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
1544 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
1552 struct mvneta_rx_ring *rx;
1556 rx = MVNETA_RX_RING(sc, q);
1559 /* Set Rx interrupt threshold */
1560 reg = MVNETA_PRXDQTH_ODT(rx->queue_th_received);
1563 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
1568 reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1571 /* Enable Rx queue */
1576 rx->queue_status = MVNETA_QUEUE_WORKING;
1584 struct mvneta_tx_ring *tx;
1587 tx = MVNETA_TX_RING(sc, q);
1590 /* Enable Tx queue */
1593 tx->queue_status = MVNETA_QUEUE_IDLE;
1594 tx->queue_hung = FALSE;
1604 mtx_lock(&sc->rx_ring[q].ring_mtx);
1613 mtx_unlock(&sc->rx_ring[q].ring_mtx);
1622 return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
1631 mtx_lock(&sc->tx_ring[q].ring_mtx);
1640 mtx_unlock(&sc->tx_ring[q].ring_mtx);
1671 if (!sc->phy_attached || sc->use_inband_status) {
1692 ifp = sc->ifp;
1703 (!sc->phy_attached || sc->use_inband_status))) {
1716 /* At the moment the driver support only one RX queue. */
1729 CTR1(KTR_SPARE2, "%s got MISC_INTR", if_name(sc->ifp));
1752 struct mvneta_tx_ring *tx;
1753 struct mvneta_rx_ring *rx;
1765 /* Extract previous flow-control frame received counter. */
1766 fc_prev = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1769 /* Extract current flow-control frame received counter. */
1770 fc_curr = sc->sysctl_mib[MVNETA_MIB_FC_GOOD_IDX].counter;
1773 if (sc->phy_attached && if_getflags(sc->ifp) & IFF_UP) {
1775 mii_tick(sc->mii);
1783 * We were unable to refill the rx queue and left the rx func, leaving
1787 rx = MVNETA_RX_RING(sc, q);
1788 if (rx->needs_refill == TRUE) {
1797 * - check if queue is mark as hung.
1798 * - ignore hung status if we received some pause frame
1807 tx = MVNETA_TX_RING(sc, q);
1809 if (tx->queue_hung && (fc_curr - fc_prev) == 0)
1813 callout_schedule(&sc->tick_ch, hz);
1817 if_printf(sc->ifp, "watchdog timeout\n");
1820 sc->counter_watchdog++;
1821 sc->counter_watchdog_mib++;
1833 struct mvneta_tx_ring *tx;
1840 tx = MVNETA_TX_RING(sc, q);
1842 while ((m = buf_ring_dequeue_sc(tx->br)) != NULL)
1854 struct mvneta_tx_ring *tx;
1858 tx = arg;
1859 ifp = tx->ifp;
1862 mvneta_tx_lockq(sc, tx->qidx);
1863 error = mvneta_xmit_locked(sc, tx->qidx);
1864 mvneta_tx_unlockq(sc, tx->qidx);
1869 taskqueue_enqueue(tx->taskq, &tx->task);
1876 struct mvneta_tx_ring *tx;
1881 tx = MVNETA_TX_RING(sc, q);
1884 ifp = sc->ifp;
1887 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED)) {
1894 if (__predict_true(tx->used > MVNETA_TX_RECLAIM_COUNT))
1898 if (__predict_false(tx->used >
1899 MVNETA_TX_RING_CNT - MVNETA_TX_SEGLIMIT))
1910 tx->watchdog_time = ticks;
1911 tx->queue_status = MVNETA_QUEUE_WORKING;
1921 struct mvneta_tx_ring *tx;
1929 q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
1933 tx = MVNETA_TX_RING(sc, q);
1936 if (buf_ring_full(tx->br)) {
1946 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
1958 error = drbr_enqueue(ifp, tx->br, m);
1962 taskqueue_enqueue(tx->taskq, &tx->task);
1970 struct mvneta_tx_ring *tx;
1975 ifp = sc->ifp;
1976 tx = MVNETA_TX_RING(sc, q);
1979 while ((m = drbr_peek(ifp, tx->br)) != NULL) {
1983 drbr_putback(ifp, tx->br, m);
1985 drbr_advance(ifp, tx->br);
1988 drbr_advance(ifp, tx->br);
1998 struct mvneta_tx_ring *tx;
2002 tx = MVNETA_TX_RING(sc, 0);
2009 taskqueue_enqueue(tx->taskq, &tx->task);
2020 ifp = sc->ifp;
2044 struct mvneta_rx_ring *rx;
2060 flags = if_getflags(ifp) ^ sc->mvneta_if_flags;
2063 sc->mvneta_if_flags = if_getflags(ifp);
2069 sc->mvneta_if_flags = if_getflags(ifp);
2070 if (sc->phy_attached)
2071 mii_mediachg(sc->mii);
2078 sc->mvneta_if_flags = if_getflags(ifp);
2082 if (if_getmtu(ifp) > sc->tx_csum_limit &&
2083 ifr->ifr_reqcap & IFCAP_TXCSUM)
2084 ifr->ifr_reqcap &= ~IFCAP_TXCSUM;
2085 mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
2087 if_setcapenablebit(ifp, IFCAP_HWCSUM & ifr->ifr_reqcap,
2100 rx = MVNETA_RX_RING(sc, q);
2101 rx->lro_enabled = !rx->lro_enabled;
2109 if ((IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ||
2110 IFM_SUBTYPE(ifr->ifr_media) == IFM_2500_T) &&
2111 (ifr->ifr_media & IFM_FDX) == 0) {
2112 device_printf(sc->dev,
2113 "%s half-duplex unsupported\n",
2114 IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T ?
2115 "1000Base-T" :
2116 "2500Base-T");
2122 if (!sc->phy_attached)
2123 error = ifmedia_ioctl(ifp, ifr, &sc->mvneta_ifmedia,
2126 error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media,
2130 if (ifr->ifr_mtu < 68 || ifr->ifr_mtu > MVNETA_MAX_FRAME -
2134 if_setmtu(ifp, ifr->ifr_mtu);
2137 sc->rx_frame_size = MCLBYTES;
2139 sc->rx_frame_size = MJUM9BYTES;
2141 if (if_getmtu(ifp) > sc->tx_csum_limit) {
2150 * Reinitialize RX queues.
2151 * We need to update RX descriptor size.
2161 device_printf(sc->dev,
2194 ifp = sc->ifp;
2196 if (!device_is_attached(sc->dev) ||
2201 callout_stop(&sc->tick_ch);
2204 bcopy(if_getlladdr(ifp), sc->enaddr, ETHER_ADDR_LEN);
2205 mvneta_set_mac_address(sc, sc->enaddr);
2231 if (!sc->phy_attached)
2238 callout_schedule(&sc->tick_ch, hz);
2251 if (sc->phy_attached)
2252 mii_mediachg(sc->mii);
2264 ifp = sc->ifp;
2270 callout_stop(&sc->tick_ch);
2275 if (sc->linkup == TRUE)
2320 if (!sc->phy_attached && !sc->use_inband_status) {
2322 if_printf(ifp, "Cannot change media in fixed-link mode!\n");
2326 if (sc->use_inband_status) {
2327 mvneta_update_media(sc, sc->mvneta_ifmedia.ifm_media);
2334 mii_mediachg(sc->mii);
2350 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_1000_T);
2352 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_100_TX);
2354 ifmr->ifm_active = IFM_ETHER_SUBTYPE_SET(IFM_10_T);
2358 ifmr->ifm_active |= IFM_FDX;
2361 ifmr->ifm_status = IFM_AVALID;
2363 ifmr->ifm_status |= IFM_ACTIVE;
2374 if (!sc->phy_attached && !sc->use_inband_status) {
2375 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
2381 if (sc->use_inband_status) {
2387 mii = sc->mii;
2390 ifmr->ifm_active = mii->mii_media_active;
2391 ifmr->ifm_status = mii->mii_media_status;
2448 running = (if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0;
2452 sc->autoneg = (IFM_SUBTYPE(media) == IFM_AUTO);
2454 if (!sc->phy_attached || sc->use_inband_status)
2468 device_printf(sc->dev,
2469 "%s half-duplex unsupported\n",
2471 "1000Base-T" :
2472 "2500Base-T");
2503 phy_linkup = (sc->mii->mii_media_status &
2506 if (sc->linkup != phy_linkup)
2514 if (sc->mvneta_media != sc->mii->mii_media_active) {
2515 sc->mvneta_media = sc->mii->mii_media_active;
2521 if (IFM_SUBTYPE(sc->mvneta_media) == IFM_1000_T ||
2522 IFM_SUBTYPE(sc->mvneta_media) == IFM_2500_T) {
2524 } else if (IFM_SUBTYPE(sc->mvneta_media) == IFM_100_TX)
2527 if (sc->mvneta_media & IFM_FDX)
2542 if (sc->linkup == linkup)
2551 device_printf(sc->dev,
2552 "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down");
2568 device_printf(sc->dev,
2569 "%s: link %s\n", if_name(sc->ifp), linkup ? "up" : "down");
2582 if (sc->cf_lpi)
2597 if (sc->cf_fc) {
2617 if (!sc->phy_attached || !sc->use_inband_status) {
2624 mvneta_qflush(sc->ifp);
2626 sc->linkup = TRUE;
2627 if_link_state_change(sc->ifp, LINK_STATE_UP);
2637 if (!sc->phy_attached || !sc->use_inband_status) {
2645 mvneta_qflush(sc->ifp);
2646 sc->linkup = FALSE;
2647 if_link_state_change(sc->ifp, LINK_STATE_DOWN);
2655 if (sc->phy_attached) {
2657 mii = LIST_FIRST(&sc->mii->mii_phys);
2664 * Tx Subroutines
2672 struct mvneta_tx_ring *tx;
2679 tx = MVNETA_TX_RING(sc, q);
2680 DASSERT(tx->used >= 0);
2681 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2683 ifp = sc->ifp;
2685 if (__predict_false(mbuf->m_flags & M_VLANTAG)) {
2686 mbuf = ether_vlanencap(mbuf, mbuf->m_pkthdr.ether_vtag);
2688 tx->drv_error++;
2692 mbuf->m_flags &= ~M_VLANTAG;
2696 if (__predict_false(mbuf->m_next != NULL &&
2697 (mbuf->m_pkthdr.csum_flags &
2703 tx->drv_error++;
2712 txbuf = &tx->txbuf[tx->cpu];
2713 error = bus_dmamap_load_mbuf_sg(sc->txmbuf_dtag,
2714 txbuf->dmap, mbuf, txsegs, &txnsegs,
2722 tx->drv_error++;
2731 || (txnsegs + tx->used) > MVNETA_TX_RING_CNT)) {
2737 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2740 DASSERT(txbuf->m == NULL);
2743 txbuf->m = mbuf;
2744 bus_dmamap_sync(sc->txmbuf_dtag, txbuf->dmap,
2747 /* load to tx descriptors */
2750 t = &tx->desc[tx->cpu];
2751 t->command = 0;
2752 t->l4ichk = 0;
2753 t->flags = 0;
2756 t->command |= MVNETA_TX_CMD_W_PACKET_OFFSET(0);
2757 t->command |= MVNETA_TX_CMD_F;
2760 t->bufptr_pa = txsegs[i].ds_addr;
2761 t->bytecnt = txsegs[i].ds_len;
2762 tx->cpu = tx_counter_adv(tx->cpu, 1);
2764 tx->used++;
2769 t->command |= MVNETA_TX_CMD_L|MVNETA_TX_CMD_PADDING;
2771 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2777 used -= 255;
2797 csum_flags = if_gethwassist(ifp) & m->m_pkthdr.csum_flags;
2800 switch (ntohs(eh->ether_type)) {
2807 if (ntohs(evh->evl_proto) == ETHERTYPE_VLAN)
2815 ip = (struct ip *)(m->m_data + ipoff);
2816 iphl = ip->ip_hl<<2;
2817 t->command |= MVNETA_TX_CMD_L3_IP4;
2819 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2826 t->command |= MVNETA_TX_CMD_IP4_CHECKSUM;
2831 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2832 t->command |= MVNETA_TX_CMD_L4_TCP;
2834 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NOFRAG;
2835 t->command |= MVNETA_TX_CMD_L4_UDP;
2837 t->command |= MVNETA_TX_CMD_L4_CHECKSUM_NONE;
2839 t->l4ichk = 0;
2840 t->command |= MVNETA_TX_CMD_IP_HEADER_LEN(iphl >> 2);
2841 t->command |= MVNETA_TX_CMD_L3_OFFSET(ipoff);
2847 struct mvneta_tx_ring *tx;
2855 tx = MVNETA_TX_RING(sc, q);
2856 if (__predict_false(tx->queue_status == MVNETA_QUEUE_DISABLED))
2863 if (tx->used == 0)
2864 tx->queue_status = MVNETA_QUEUE_IDLE;
2865 else if (tx->queue_status == MVNETA_QUEUE_WORKING &&
2866 ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG))
2867 tx->queue_hung = TRUE;
2873 if_name(sc->ifp), q, ndesc);
2876 bus_dmamap_sync(sc->tx_dtag, tx->desc_map,
2880 t = &tx->desc[tx->dma];
2882 if (t->flags & MVNETA_TX_F_ES)
2883 CTR3(KTR_SPARE2, "%s tx error queue %d desc %d",
2884 if_name(sc->ifp), q, tx->dma);
2886 txbuf = &tx->txbuf[tx->dma];
2887 if (__predict_true(txbuf->m != NULL)) {
2888 DASSERT((t->command & MVNETA_TX_CMD_F) != 0);
2889 bus_dmamap_unload(sc->txmbuf_dtag, txbuf->dmap);
2890 m_freem(txbuf->m);
2891 txbuf->m = NULL;
2894 DASSERT((t->flags & MVNETA_TX_CMD_F) == 0);
2895 tx->dma = tx_counter_adv(tx->dma, 1);
2896 tx->used--;
2898 DASSERT(tx->used >= 0);
2899 DASSERT(tx->used <= MVNETA_TX_RING_CNT);
2903 ndesc -= 255;
2911 if_name(sc->ifp), q, tx->cpu, tx->dma, tx->used);
2914 tx->watchdog_time = ticks;
2916 if (tx->used == 0)
2917 tx->queue_status = MVNETA_QUEUE_IDLE;
2921 * Do a final TX complete when TX is idle.
2926 struct mvneta_tx_ring *tx;
2930 * Handle trailing mbuf on TX queue.
2931 * Check is done lockess to avoid TX path contention.
2934 tx = MVNETA_TX_RING(sc, q);
2935 if ((ticks - tx->watchdog_time) > MVNETA_WATCHDOG_TXCOMP &&
2936 tx->used > 0) {
2945 * Rx Subroutines
2982 processed -= 255;
2999 struct mvneta_rx_ring *rx;
3008 ifp = sc->ifp;
3009 rx = MVNETA_RX_RING(sc, q);
3012 if (__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3015 bus_dmamap_sync(sc->rx_dtag, rx->desc_map,
3020 ndma = rx_counter_adv(rx->dma, 1);
3021 mvneta_prefetch(&rx->desc[ndma]);
3022 mvneta_prefetch(&rx->rxbuf[ndma]);
3025 r = &rx->desc[rx->dma];
3026 rxbuf = &rx->rxbuf[rx->dma];
3027 m = rxbuf->m;
3028 rxbuf->m = NULL;
3030 bus_dmamap_sync(sc->rxbuf_dtag, rxbuf->dmap,
3032 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3038 DASSERT((r->status & (MVNETA_RX_F|MVNETA_RX_L)) ==
3040 if (__predict_false((r->status & MVNETA_RX_ES) ||
3041 (r->status & (MVNETA_RX_F|MVNETA_RX_L)) !=
3049 pktlen = r->bytecnt - ETHER_CRC_LEN - MVNETA_HWHEADER_SIZE;
3050 pktbuf = (uint8_t *)rx->rxbuf_virt_addr[rx->dma] + MVNETA_PACKET_OFFSET +
3057 m->m_data = pktbuf;
3058 m->m_len = m->m_pkthdr.len = pktlen;
3059 m->m_pkthdr.rcvif = ifp;
3063 rx->dma = ndma;
3065 if (__predict_false(rx->lro_enabled &&
3066 ((r->status & MVNETA_RX_L3_IP) != 0) &&
3067 ((r->status & MVNETA_RX_L4_MASK) == MVNETA_RX_L4_TCP) &&
3068 (m->m_pkthdr.csum_flags &
3071 if (rx->lro.lro_cnt != 0) {
3072 if (tcp_lro_rx(&rx->lro, m, 0) == 0)
3084 if(__predict_false(rx->queue_status == MVNETA_QUEUE_DISABLED))
3096 rx->dma = ndma;
3116 tcp_lro_flush_all(&rx->lro);
3123 bus_dmamap_unload(sc->rxbuf_dtag, rxbuf->dmap);
3125 m_freem(rxbuf->m);
3131 struct mvneta_rx_ring *rx;
3141 rx = MVNETA_RX_RING(sc, q);
3144 refill = MVNETA_RX_RING_CNT - ndesc;
3146 CTR3(KTR_SPARE2, "%s:%u refill %u packets", if_name(sc->ifp), q,
3153 rxbuf = &rx->rxbuf[rx->cpu];
3154 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->rx_frame_size);
3159 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3161 error = bus_dmamap_load_mbuf_sg(sc->rxbuf_dtag, rxbuf->dmap,
3164 KASSERT(1, ("Failed to load Rx mbuf DMA map"));
3170 rxbuf->m = m;
3171 r = &rx->desc[rx->cpu];
3172 r->bufptr_pa = segs.ds_addr;
3173 rx->rxbuf_virt_addr[rx->cpu] = m->m_data;
3175 rx->cpu = rx_counter_adv(rx->cpu, 1);
3179 rx->needs_refill = TRUE;
3183 rx->needs_refill = FALSE;
3184 bus_dmamap_sync(sc->rx_dtag, rx->desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3189 npkt -= 255;
3204 if (__predict_false((r->status &
3209 if (__predict_true((r->status & MVNETA_RX_IP_HEADER_OK) ==
3213 if (__predict_true((r->status & (MVNETA_RX_IP_HEADER_OK|MVNETA_RX_L3_IP)) ==
3216 switch (r->status & MVNETA_RX_L4_MASK) {
3220 if (__predict_true((r->status &
3223 m->m_pkthdr.csum_data = htons(0xffff);
3231 m->m_pkthdr.csum_flags = csum_flags;
3251 ifp = sc->ifp;
3266 pxc |= MVNETA_PXC_RXQ(MVNETA_RX_QNUM_MAX-1);
3267 pxc |= MVNETA_PXC_RXQARP(MVNETA_RX_QNUM_MAX-1);
3268 pxc |= MVNETA_PXC_TCPQ(MVNETA_RX_QNUM_MAX-1);
3269 pxc |= MVNETA_PXC_UDPQ(MVNETA_RX_QNUM_MAX-1);
3270 pxc |= MVNETA_PXC_BPDUQ(MVNETA_RX_QNUM_MAX-1);
3291 i = sc->enaddr[5] & 0xf; /* last nibble */
3315 sc = arg->sc;
3318 if (arg->index < 0 || arg->index > MVNETA_PORTMIB_NOCOUNTER)
3322 val = arg->counter;
3359 struct mvneta_rx_ring *rx;
3364 rx = NULL;
3368 if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
3370 if (arg->rxtx != MVNETA_SYSCTL_RX)
3373 sc = arg->sc;
3379 mvneta_rx_lockq(sc, arg->queue);
3380 rx = MVNETA_RX_RING(sc, arg->queue);
3381 time_mvtclk = rx->queue_th_time;
3382 time_us = ((uint64_t)time_mvtclk * 1000ULL * 1000ULL) / sc->clk_freq;
3383 mvneta_rx_unlockq(sc, arg->queue);
3391 mvneta_rx_lockq(sc, arg->queue);
3393 /* update queue length (0[sec] - 1[sec]) */
3395 mvneta_rx_unlockq(sc, arg->queue);
3399 time_mvtclk = sc->clk_freq * (uint64_t)time_us / (1000ULL * 1000ULL);
3400 rx->queue_th_time = time_mvtclk;
3401 reg = MVNETA_PRXITTH_RITT(rx->queue_th_time);
3402 MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
3403 mvneta_rx_unlockq(sc, arg->queue);
3440 ctx = device_get_sysctl_ctx(sc->dev);
3441 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3443 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "rx",
3444 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "NETA RX");
3452 CTLFLAG_RW, &sc->cf_fc, 0, "flow control");
3454 CTLFLAG_RW, &sc->cf_lpi, 0, "Low Power Idle");
3461 struct mvneta_sysctl_mib *mib_arg = &sc->sysctl_mib[i];
3463 mib_arg->sc = sc;
3464 mib_arg->index = i;
3472 CTLFLAG_RD, &sc->counter_pdfc, "Port Rx Discard Frame Counter");
3474 CTLFLAG_RD, &sc->counter_pofc, "Port Overrun Frame Counter");
3476 CTLFLAG_RD, &sc->counter_watchdog, 0, "TX Watchdog Counter");
3483 rxarg = &sc->sysctl_rx_queue[q];
3485 rxarg->sc = sc;
3486 rxarg->queue = q;
3487 rxarg->rxtx = MVNETA_SYSCTL_RX;
3489 /* hw.mvneta.mvneta[unit].rx.[queue] */
3495 /* hw.mvneta.mvneta[unit].rx.[queue].threshold_timer_us */
3513 val = MVNETA_READ_MIB(sc, mib->regnum);
3514 if (mib->reg64)
3515 val |= (uint64_t)MVNETA_READ_MIB(sc, mib->regnum + 4) << 32;
3528 sc->sysctl_mib[i].counter = 0;
3531 sc->counter_pdfc = 0;
3533 sc->counter_pofc = 0;
3534 sc->counter_watchdog = 0;
3540 struct mvneta_tx_ring *tx;
3551 sc->sysctl_mib[i].counter += val;
3554 if_inc_counter(sc->ifp, IFCOUNTER_IBYTES, val);
3557 if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, val);
3560 if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, val);
3563 if_inc_counter(sc->ifp, IFCOUNTER_IMCASTS, val);
3566 if_inc_counter(sc->ifp, IFCOUNTER_OBYTES, val);
3569 if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, val);
3572 if_inc_counter(sc->ifp, IFCOUNTER_OMCASTS, val);
3575 if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, val);
3580 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, val);
3586 sc->counter_pdfc += reg;
3587 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3589 sc->counter_pofc += reg;
3590 if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, reg);
3592 /* TX watchdog. */
3593 if (sc->counter_watchdog_mib > 0) {
3594 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, sc->counter_watchdog_mib);
3595 sc->counter_watchdog_mib = 0;
3598 * TX driver errors:
3599 * We do not take queue locks to not disrupt TX path.
3601 * next mib update. We may also clear counter when TX path
3606 tx = MVNETA_TX_RING(sc, i);
3608 if (tx->drv_error > 0) {
3609 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, tx->drv_error);
3610 tx->drv_error = 0;