Lines Matching defs:q
412 size_t q;
455 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
456 error = mvneta_ring_alloc_tx_queue(sc, q);
459 "Failed to allocate DMA safe memory for TxQ: %zu\n", q);
504 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
505 if (mvneta_ring_alloc_rx_queue(sc, q) != 0) {
507 "Failed to allocate DMA safe memory for RxQ: %zu\n", q);
527 int q, error;
675 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
676 error = mvneta_ring_init_tx_queue(sc, q);
683 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
684 error = mvneta_ring_init_rx_queue(sc, q);
831 int q;
841 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++)
842 mvneta_ring_dealloc_rx_queue(sc, q);
843 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
844 mvneta_ring_dealloc_tx_queue(sc, q);
991 int q;
993 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
994 mvneta_rx_lockq(sc, q);
995 mvneta_rx_queue_enable(sc->ifp, q);
996 mvneta_rx_unlockq(sc, q);
999 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1000 mvneta_tx_lockq(sc, q);
1001 mvneta_tx_queue_enable(sc->ifp, q);
1002 mvneta_tx_unlockq(sc, q);
1012 int q, cnt;
1015 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1016 rx = MVNETA_RX_RING(sc, q);
1017 mvneta_rx_lockq(sc, q);
1019 mvneta_rx_unlockq(sc, q);
1022 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1023 tx = MVNETA_TX_RING(sc, q);
1024 mvneta_tx_lockq(sc, q);
1026 mvneta_tx_unlockq(sc, q);
1090 int q;
1104 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1105 mvneta_rx_lockq(sc, q);
1106 if (mvneta_rx_queue_init(ifp, q) != 0) {
1109 mvneta_rx_unlockq(sc, q);
1112 mvneta_rx_unlockq(sc, q);
1114 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1115 mvneta_tx_lockq(sc, q);
1116 if (mvneta_tx_queue_init(ifp, q) != 0) {
1119 mvneta_tx_unlockq(sc, q);
1122 mvneta_tx_unlockq(sc, q);
1201 mvneta_ring_alloc_rx_queue(struct mvneta_softc *sc, int q)
1208 if (q >= MVNETA_RX_QNUM_MAX)
1211 rx = MVNETA_RX_RING(sc, q);
1241 mvneta_rx_lockq(sc, q);
1242 mvneta_ring_flush_rx_queue(sc, q);
1243 mvneta_rx_unlockq(sc, q);
1244 mvneta_ring_dealloc_rx_queue(sc, q);
1250 mvneta_ring_alloc_tx_queue(struct mvneta_softc *sc, int q)
1255 if (q >= MVNETA_TX_QNUM_MAX)
1257 tx = MVNETA_TX_RING(sc, q);
1277 "Could not setup buffer ring for TxQ(%d)\n", q);
1285 mvneta_tx_lockq(sc, q);
1286 mvneta_ring_flush_tx_queue(sc, q);
1287 mvneta_tx_unlockq(sc, q);
1288 mvneta_ring_dealloc_tx_queue(sc, q);
1294 mvneta_ring_dealloc_tx_queue(struct mvneta_softc *sc, int q)
1302 if (q >= MVNETA_TX_QNUM_MAX)
1304 tx = MVNETA_TX_RING(sc, q);
1324 __func__, q, i);
1344 mvneta_ring_dealloc_rx_queue(struct mvneta_softc *sc, int q)
1350 if (q >= MVNETA_RX_QNUM_MAX)
1353 rx = MVNETA_RX_RING(sc, q);
1372 mvneta_ring_init_rx_queue(struct mvneta_softc *sc, int q)
1378 if (q >= MVNETA_RX_QNUM_MAX)
1381 rx = MVNETA_RX_RING(sc, q);
1403 mvneta_ring_init_tx_queue(struct mvneta_softc *sc, int q)
1409 if (q >= MVNETA_TX_QNUM_MAX)
1412 tx = MVNETA_TX_RING(sc, q);
1434 tx->qidx = q;
1439 device_get_nameunit(sc->dev), q);
1445 mvneta_ring_flush_tx_queue(struct mvneta_softc *sc, int q)
1451 tx = MVNETA_TX_RING(sc, q);
1452 KASSERT_TX_MTX(sc, q);
1468 mvneta_ring_flush_rx_queue(struct mvneta_softc *sc, int q)
1474 rx = MVNETA_RX_RING(sc, q);
1475 KASSERT_RX_MTX(sc, q);
1489 mvneta_rx_queue_init(if_t ifp, int q)
1496 KASSERT_RX_MTX(sc, q);
1497 rx = MVNETA_RX_RING(sc, q);
1501 MVNETA_WRITE(sc, MVNETA_PRXDQA(q), rx->desc_pa);
1506 MVNETA_WRITE(sc, MVNETA_PRXDQS(q), reg);
1508 CTR3(KTR_SPARE2, "%s PRXDQS(%d): %#x", if_name(ifp), q,
1509 MVNETA_READ(sc, MVNETA_PRXDQS(q)));
1513 MVNETA_WRITE(sc, MVNETA_PRXC(q), reg);
1515 CTR3(KTR_SPARE2, "%s PRXC(%d): %#x", if_name(ifp), q,
1516 MVNETA_READ(sc, MVNETA_PRXC(q)));
1520 DASSERT(MVNETA_READ(sc, MVNETA_PRXDQA(q)) == rx->desc_pa);
1525 mvneta_tx_queue_init(if_t ifp, int q)
1532 KASSERT_TX_MTX(sc, q);
1533 tx = MVNETA_TX_RING(sc, q);
1537 MVNETA_WRITE(sc, MVNETA_PTXDQA(q), tx->desc_pa);
1541 MVNETA_WRITE(sc, MVNETA_PTXDQS(q), reg);
1544 DASSERT(MVNETA_READ(sc, MVNETA_PTXDQA(q)) == tx->desc_pa);
1549 mvneta_rx_queue_enable(if_t ifp, int q)
1556 rx = MVNETA_RX_RING(sc, q);
1557 KASSERT_RX_MTX(sc, q);
1561 MVNETA_WRITE(sc, MVNETA_PRXDQTH(q), reg);
1564 MVNETA_WRITE(sc, MVNETA_PRXITTH(q), reg);
1568 reg |= MVNETA_PRXTXTI_RBICTAPQ(q); /* Rx Buffer Interrupt Coalese */
1573 reg |= MVNETA_RQC_ENQ(q);
1581 mvneta_tx_queue_enable(if_t ifp, int q)
1587 tx = MVNETA_TX_RING(sc, q);
1588 KASSERT_TX_MTX(sc, q);
1591 MVNETA_WRITE(sc, MVNETA_TQC, MVNETA_TQC_ENQ(q));
1599 mvneta_rx_lockq(struct mvneta_softc *sc, int q)
1602 DASSERT(q >= 0);
1603 DASSERT(q < MVNETA_RX_QNUM_MAX);
1604 mtx_lock(&sc->rx_ring[q].ring_mtx);
1608 mvneta_rx_unlockq(struct mvneta_softc *sc, int q)
1611 DASSERT(q >= 0);
1612 DASSERT(q < MVNETA_RX_QNUM_MAX);
1613 mtx_unlock(&sc->rx_ring[q].ring_mtx);
1617 mvneta_tx_trylockq(struct mvneta_softc *sc, int q)
1620 DASSERT(q >= 0);
1621 DASSERT(q < MVNETA_TX_QNUM_MAX);
1622 return (mtx_trylock(&sc->tx_ring[q].ring_mtx));
1626 mvneta_tx_lockq(struct mvneta_softc *sc, int q)
1629 DASSERT(q >= 0);
1630 DASSERT(q < MVNETA_TX_QNUM_MAX);
1631 mtx_lock(&sc->tx_ring[q].ring_mtx);
1635 mvneta_tx_unlockq(struct mvneta_softc *sc, int q)
1638 DASSERT(q >= 0);
1639 DASSERT(q < MVNETA_TX_QNUM_MAX);
1640 mtx_unlock(&sc->tx_ring[q].ring_mtx);
1754 int q;
1786 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
1787 rx = MVNETA_RX_RING(sc, q);
1789 mvneta_rx_lockq(sc, q);
1790 mvneta_rx_queue_refill(sc, q);
1791 mvneta_rx_unlockq(sc, q);
1801 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1807 tx = MVNETA_TX_RING(sc, q);
1835 size_t q;
1839 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
1840 tx = MVNETA_TX_RING(sc, q);
1841 mvneta_tx_lockq(sc, q);
1844 mvneta_tx_unlockq(sc, q);
1874 mvneta_xmitfast_locked(struct mvneta_softc *sc, int q, struct mbuf **m)
1880 KASSERT_TX_MTX(sc, q);
1881 tx = MVNETA_TX_RING(sc, q);
1895 mvneta_tx_queue_complete(sc, q);
1902 error = mvneta_tx_queue(sc, m, q);
1923 int q;
1929 q = m->m_pkthdr.flowid % MVNETA_TX_QNUM_MAX;
1931 q = 0;
1933 tx = MVNETA_TX_RING(sc, q);
1937 mvneta_tx_lockq(sc, q);
1938 mvneta_xmit_locked(sc, q);
1939 mvneta_tx_unlockq(sc, q);
1946 if (buf_ring_empty(tx->br) && mvneta_tx_trylockq(sc, q) != 0) {
1947 error = mvneta_xmitfast_locked(sc, q, &m);
1948 mvneta_tx_unlockq(sc, q);
1967 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
1974 KASSERT_TX_MTX(sc, q);
1976 tx = MVNETA_TX_RING(sc, q);
1980 error = mvneta_xmitfast_locked(sc, q, &m);
2013 mvneta_xmit_locked(struct mvneta_softc *sc, int q)
2019 KASSERT_TX_MTX(sc, q);
2028 error = mvneta_xmitfast_locked(sc, q, &m);
2049 int q;
2099 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2100 rx = MVNETA_RX_RING(sc, q);
2158 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2159 mvneta_rx_lockq(sc, q);
2160 if (mvneta_rx_queue_init(ifp, q) != 0) {
2164 mvneta_rx_unlockq(sc, q);
2168 mvneta_rx_unlockq(sc, q);
2191 int q, cpu;
2225 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2226 mvneta_rx_lockq(sc, q);
2227 mvneta_rx_queue_refill(sc, q);
2228 mvneta_rx_unlockq(sc, q);
2262 int q;
2284 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
2285 mvneta_rx_lockq(sc, q);
2286 mvneta_ring_flush_rx_queue(sc, q);
2287 mvneta_rx_unlockq(sc, q);
2297 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2298 mvneta_tx_lockq(sc, q);
2299 mvneta_ring_flush_tx_queue(sc, q);
2300 mvneta_tx_unlockq(sc, q);
2667 mvneta_tx_queue(struct mvneta_softc *sc, struct mbuf **mbufp, int q)
2679 tx = MVNETA_TX_RING(sc, q);
2718 CTR3(KTR_SPARE2, "%s:%u bus_dmamap_load_mbuf_sg error=%d", if_name(ifp), q, error);
2735 if_name(ifp), q, txnsegs);
2776 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2781 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2845 mvneta_tx_queue_complete(struct mvneta_softc *sc, int q)
2853 KASSERT_TX_MTX(sc, q);
2855 tx = MVNETA_TX_RING(sc, q);
2859 ptxs = MVNETA_READ(sc, MVNETA_PTXS(q));
2873 if_name(sc->ifp), q, ndesc);
2884 if_name(sc->ifp), q, tx->dma);
2902 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2907 MVNETA_WRITE(sc, MVNETA_PTXSU(q), ptxsu);
2911 if_name(sc->ifp), q, tx->cpu, tx->dma, tx->used);
2927 int q;
2933 for (q = 0; q < MVNETA_TX_QNUM_MAX; q++) {
2934 tx = MVNETA_TX_RING(sc, q);
2937 mvneta_tx_lockq(sc, q);
2938 mvneta_tx_queue_complete(sc, q);
2939 mvneta_tx_unlockq(sc, q);
2948 mvneta_rx(struct mvneta_softc *sc, int q, int count)
2954 mvneta_rx_lockq(sc, q);
2955 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
2964 mvneta_rx_queue(sc, q, npkt);
2966 mvneta_rx_unlockq(sc, q);
2975 mvneta_prxsu_update(struct mvneta_softc *sc, int q, int processed)
2981 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2985 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
2996 mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
3006 KASSERT_RX_MTX(sc, q);
3009 rx = MVNETA_RX_RING(sc, q);
3077 mvneta_rx_unlockq(sc, q);
3079 mvneta_rx_lockq(sc, q);
3089 mvneta_prxsu_update(sc, q, processed);
3090 mvneta_rx_queue_refill(sc, q);
3099 mvneta_prxsu_update(sc, q, processed);
3100 mvneta_rx_queue_refill(sc, q);
3105 CTR3(KTR_SPARE2, "%s:%u %u packets received", if_name(ifp), q, npkt);
3108 mvneta_prxsu_update(sc, q, processed);
3110 mvneta_rx_queue_refill(sc, q);
3129 mvneta_rx_queue_refill(struct mvneta_softc *sc, int q)
3139 KASSERT_RX_MTX(sc, q);
3141 rx = MVNETA_RX_RING(sc, q);
3142 prxs = MVNETA_READ(sc, MVNETA_PRXS(q));
3146 CTR3(KTR_SPARE2, "%s:%u refill %u packets", if_name(sc->ifp), q,
3188 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3193 MVNETA_WRITE(sc, MVNETA_PRXSU(q), prxsu);
3417 int i, q;
3482 for (q = 0; q < MVNETA_RX_QNUM_MAX; q++) {
3483 rxarg = &sc->sysctl_rx_queue[q];
3486 rxarg->queue = q;
3491 sysctl_queue_names[q], CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
3492 sysctl_queue_descrs[q]);