Lines Matching defs:txq

325 		aprint_error(": failed to init txq: %d\n", error);
614 struct bcmeth_txqueue * const txq = &sc->sc_txq;
627 bcmeth_write_4(sc, txq->txq_reg_xmtctl,
628 bcmeth_read_4(sc, txq->txq_reg_xmtctl) & ~XMTCTL_ENABLE);
634 uint32_t tx0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
725 struct bcmeth_txqueue *txq,
729 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
730 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
737 struct bcmeth_txqueue *txq,
741 bus_dmamap_sync(sc->sc_dmat, txq->txq_descmap,
742 (txdb - txq->txq_first) * sizeof(*txdb), count * sizeof(*txdb),
1304 struct bcmeth_txqueue *txq)
1306 return !IF_IS_EMPTY(&txq->txq_mbufs);
1312 struct bcmeth_txqueue *txq)
1314 return txq->txq_free >= txq->txq_threshold;
1320 struct bcmeth_txqueue *txq,
1323 size_t desc_count = BCMETH_RINGSIZE / sizeof(txq->txq_first[0]);
1330 &txq->txq_descmap_seg, &txq->txq_descmap, &descs);
1335 txq->txq_first = descs;
1336 txq->txq_last = txq->txq_first + desc_count;
1337 txq->txq_consumer = descs;
1338 txq->txq_producer = descs;
1340 IFQ_SET_MAXLEN(&txq->txq_mbufs, BCMETH_MAXTXMBUFS);
1342 txq->txq_reg_xmtaddrlo = GMAC_XMTADDR_LOW;
1343 txq->txq_reg_xmtctl = GMAC_XMTCONTROL;
1344 txq->txq_reg_xmtptr = GMAC_XMTPTR;
1345 txq->txq_reg_xmtsts0 = GMAC_XMTSTATUS0;
1346 txq->txq_reg_xmtsts1 = GMAC_XMTSTATUS1;
1348 bcmeth_txq_reset(sc, txq);
1356 struct bcmeth_txqueue *txq,
1384 struct bcmeth_txqueue *txq,
1398 struct bcmeth_txqueue *txq,
1403 if (map->dm_nsegs > txq->txq_free)
1409 struct gmac_txdb *producer = txq->txq_producer;
1417 txq->txq_lastintr += map->dm_nsegs;
1418 if (txq->txq_lastintr >= txq->txq_threshold
1419 || txq->txq_mbufs.ifq_len + 1 == txq->txq_mbufs.ifq_maxlen) {
1420 txq->txq_lastintr = 0;
1424 KASSERT(producer != txq->txq_last);
1433 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1439 if (__predict_false(++producer == txq->txq_last)) {
1440 bcmeth_txq_desc_presync(sc, txq, start,
1441 txq->txq_last - start);
1442 count -= txq->txq_last - start;
1443 producer = txq->txq_first;
1444 start = txq->txq_first;
1451 printf("[%zu]: %#x/%#x/%#x/%#x\n", producer - txq->txq_first,
1456 bcmeth_txq_desc_presync(sc, txq, start, count);
1461 txq->txq_free -= map->dm_nsegs;
1462 KASSERT(map->dm_nsegs == 1 || txq->txq_producer != producer);
1464 || (txq->txq_producer->txdb_flags & htole32(TXDB_FLAG_EF)) == 0);
1470 txq->txq_producer - txq->txq_first, producer - txq->txq_first);
1473 if (producer + 1 == txq->txq_last)
1474 txq->txq_producer = txq->txq_first;
1476 txq->txq_producer = producer + 1;
1477 IF_ENQUEUE(&txq->txq_mbufs, m);
1482 bcmeth_write_4(sc, txq->txq_reg_xmtptr,
1483 txq->txq_descmap->dm_segs[0].ds_addr
1484 + ((uintptr_t)txq->txq_producer & XMT_LASTDSCR));
1551 struct bcmeth_txqueue *txq)
1554 if (IF_QFULL(&txq->txq_mbufs))
1556 struct mbuf *m = txq->txq_next;
1565 txq->txq_next = NULL;
1574 txq->txq_next = m;
1579 int error = bcmeth_txq_map_load(sc, txq, m);
1587 KASSERT(txq->txq_next == NULL);
1588 if (!bcmeth_txq_produce(sc, txq, m)) {
1589 txq->txq_next = m;
1592 KASSERT(txq->txq_next == NULL);
1599 struct bcmeth_txqueue *txq)
1602 struct gmac_txdb *consumer = txq->txq_consumer;
1606 printf("%s: entry: free=%zu\n", __func__, txq->txq_free);
1610 if (consumer == txq->txq_producer) {
1611 txq->txq_consumer = consumer;
1612 txq->txq_free += txfree;
1613 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree);
1617 txq->txq_free - txfree, txq->txq_free);
1619 KASSERT(txq->txq_lastintr == 0);
1620 KASSERT(txq->txq_free
1621 == txq->txq_last - txq->txq_first - 1);
1624 bcmeth_txq_desc_postsync(sc, txq, consumer, 1);
1625 uint32_t s0 = bcmeth_read_4(sc, txq->txq_reg_xmtsts0);
1626 if (consumer == txq->txq_first + __SHIFTOUT(s0, XMT_CURRDSCR)) {
1627 txq->txq_consumer = consumer;
1628 txq->txq_free += txfree;
1629 txq->txq_lastintr -= uimin(txq->txq_lastintr, txfree);
1634 return bcmeth_txq_fillable_p(sc, txq);
1645 IF_DEQUEUE(&txq->txq_mbufs, m);
1647 bcmeth_txq_map_unload(sc, txq, m);
1670 KASSERT(consumer + 1 == txq->txq_last);
1671 consumer = txq->txq_first;
1675 KASSERT(consumer < txq->txq_last);
1683 struct bcmeth_txqueue *txq)
1689 IF_DEQUEUE(&txq->txq_mbufs, m);
1692 bcmeth_txq_map_unload(sc, txq, m);
1695 if ((m = txq->txq_next) != NULL) {
1696 txq->txq_next = NULL;
1697 bcmeth_txq_map_unload(sc, txq, m);
1705 struct bcmeth_txqueue *txq)
1710 bcmeth_txq_desc_postsync(sc, txq, txq->txq_first,
1711 txq->txq_last - txq->txq_first);
1717 for (txdb = txq->txq_first; txdb < txq->txq_last - 1; txdb++) {
1729 txq->txq_consumer = txq->txq_first;
1730 txq->txq_producer = txq->txq_first;
1731 txq->txq_free = txq->txq_last - txq->txq_first - 1;
1732 txq->txq_threshold = txq->txq_free / 2;
1733 txq->txq_lastintr = 0;
1743 bcmeth_write_4(sc, txq->txq_reg_xmtaddrlo,
1744 txq->txq_descmap->dm_segs->ds_addr);