Lines Matching defs:txq
214 #define WM_TXQUEUELEN(txq) ((txq)->txq_num)
215 #define WM_TXQUEUELEN_MASK(txq) (WM_TXQUEUELEN(txq) - 1)
216 #define WM_TXQUEUE_GC(txq) (WM_TXQUEUELEN(txq) / 8)
219 #define WM_NTXDESC(txq) ((txq)->txq_ndesc)
220 #define WM_NTXDESC_MASK(txq) (WM_NTXDESC(txq) - 1)
221 #define WM_TXDESCS_SIZE(txq) (WM_NTXDESC(txq) * (txq)->txq_descsize)
222 #define WM_NEXTTX(txq, x) (((x) + 1) & WM_NTXDESC_MASK(txq))
223 #define WM_NEXTTXS(txq, x) (((x) + 1) & WM_TXQUEUELEN_MASK(txq))
265 #define WM_CDTXOFF(txq, x) ((txq)->txq_descsize * (x))
377 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
399 WM_Q_EVCNT_DEFINE(txq, txsstall); /* Stalled due to no txs */
400 WM_Q_EVCNT_DEFINE(txq, txdstall); /* Stalled due to no txd */
401 WM_Q_EVCNT_DEFINE(txq, fifo_stall); /* FIFO stalls (82547) */
402 WM_Q_EVCNT_DEFINE(txq, txdw); /* Tx descriptor interrupts */
403 WM_Q_EVCNT_DEFINE(txq, txqe); /* Tx queue empty interrupts */
406 WM_Q_EVCNT_DEFINE(txq, ipsum); /* IP checksums comp. */
407 WM_Q_EVCNT_DEFINE(txq, tusum); /* TCP/UDP cksums comp. */
408 WM_Q_EVCNT_DEFINE(txq, tusum6); /* TCP/UDP v6 cksums comp. */
409 WM_Q_EVCNT_DEFINE(txq, tso); /* TCP seg offload (IPv4) */
410 WM_Q_EVCNT_DEFINE(txq, tso6); /* TCP seg offload (IPv6) */
411 WM_Q_EVCNT_DEFINE(txq, tsopain); /* Painful header manip. for TSO */
412 WM_Q_EVCNT_DEFINE(txq, pcqdrop); /* Pkt dropped in pcq */
413 WM_Q_EVCNT_DEFINE(txq, descdrop); /* Pkt dropped in MAC desc ring */
416 WM_Q_EVCNT_DEFINE(txq, toomanyseg); /* Pkt dropped(toomany DMA segs) */
417 WM_Q_EVCNT_DEFINE(txq, defrag); /* m_defrag() */
418 WM_Q_EVCNT_DEFINE(txq, underrun); /* Tx underrun */
419 WM_Q_EVCNT_DEFINE(txq, skipcontext); /* Tx skip wrong cksum context */
812 #define WM_CDTXADDR(txq, x) ((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
815 #define WM_CDTXADDR_LO(txq, x) (WM_CDTXADDR((txq), (x)) & 0xffffffffU)
816 #define WM_CDTXADDR_HI(txq, x) \
818 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
1895 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1897 struct wm_softc *sc = txq->txq_sc;
1900 if ((start + num) > WM_NTXDESC(txq)) {
1901 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1902 WM_CDTXOFF(txq, start), txq->txq_descsize *
1903 (WM_NTXDESC(txq) - start), ops);
1904 num -= (WM_NTXDESC(txq) - start);
1909 bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1910 WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
3813 struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
3815 wm_watchdog_txq(ifp, txq, &hang_queue);
3864 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3867 mutex_enter(txq->txq_lock);
3868 if (txq->txq_sending &&
3869 time_uptime - txq->txq_lastsent > wm_watchdog_timeout)
3870 wm_watchdog_txq_locked(ifp, txq, hang);
3872 mutex_exit(txq->txq_lock);
3876 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3880 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3882 KASSERT(mutex_owned(txq->txq_lock));
3888 wm_txeof(txq, UINT_MAX);
3890 if (txq->txq_sending)
3893 if (txq->txq_free == WM_NTXDESC(txq)) {
3903 device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3904 txq->txq_next);
3907 for (i = txq->txq_sdirty; i != txq->txq_snext;
3908 i = WM_NEXTTXS(txq, i)) {
3909 txs = &txq->txq_soft[i];
3912 for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3915 txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3917 txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3918 txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3921 (uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3922 txq->txq_descs[j].wtx_addr.wa_low);
3924 txq->txq_descs[j].wtx_fields.wtxu_vlan,
3925 txq->txq_descs[j].wtx_fields.wtxu_options,
3926 txq->txq_descs[j].wtx_fields.wtxu_status,
3927 txq->txq_descs[j].wtx_cmdlen);
4068 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4070 mutex_enter(txq->txq_lock);
4071 txq->txq_flags |= WM_TXQ_LINKDOWN_DISCARD;
4072 mutex_exit(txq->txq_lock);
4081 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4083 mutex_enter(txq->txq_lock);
4084 txq->txq_flags &= ~WM_TXQ_LINKDOWN_DISCARD;
4085 mutex_exit(txq->txq_lock);
5404 struct wm_txqueue *txq;
5435 txq = &sc->sc_queue[0].wmq_txq;
5436 nexttx = txq->txq_next;
5437 txd = &txq->txq_descs[nexttx];
5438 wm_set_dma_addr(&txd->wtx_addr, txq->txq_desc_dma);
5444 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5447 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
5448 CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
5515 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5516 txq->txq_fifo_head = 0;
5517 txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
5518 txq->txq_fifo_size =
5520 txq->txq_fifo_stall = 0;
6355 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6358 mutex_enter(txq->txq_lock);
6359 txq->txq_stopping = false;
6360 mutex_exit(txq->txq_lock);
6382 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6388 mutex_enter(txq->txq_lock);
6389 txq->txq_stopping = true;
6390 mutex_exit(txq->txq_lock);
6448 struct wm_txqueue *txq = &wmq->wmq_txq;
6454 if (txq->txq_packets)
6455 avg_size = uimax(avg_size, txq->txq_bytes / txq->txq_packets);
6490 txq->txq_packets = 0;
6491 txq->txq_bytes = 0;
6522 struct wm_txqueue *txq = &wmq->wmq_txq;
6537 NULL, 0, &txq->txq_free,
6543 wm_sysctl_tdh_handler, 0, (void *)txq,
6549 wm_sysctl_tdt_handler, 0, (void *)txq,
6555 NULL, 0, &txq->txq_next,
6561 NULL, 0, &txq->txq_sfree,
6567 NULL, 0, &txq->txq_snext,
6573 NULL, 0, &txq->txq_sdirty,
6579 NULL, 0, &txq->txq_flags,
6585 NULL, 0, &txq->txq_stopping,
6591 NULL, 0, &txq->txq_sending,
6860 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6863 WM_Q_EVCNT_STORE(txq, txsstall, 0);
6864 WM_Q_EVCNT_STORE(txq, txdstall, 0);
6865 WM_Q_EVCNT_STORE(txq, fifo_stall, 0);
6866 WM_Q_EVCNT_STORE(txq, txdw, 0);
6867 WM_Q_EVCNT_STORE(txq, txqe, 0);
6868 WM_Q_EVCNT_STORE(txq, ipsum, 0);
6869 WM_Q_EVCNT_STORE(txq, tusum, 0);
6870 WM_Q_EVCNT_STORE(txq, tusum6, 0);
6871 WM_Q_EVCNT_STORE(txq, tso, 0);
6872 WM_Q_EVCNT_STORE(txq, tso6, 0);
6873 WM_Q_EVCNT_STORE(txq, tsopain, 0);
6876 WM_EVCNT_STORE(&txq->txq_ev_txseg[j], 0);
6878 WM_Q_EVCNT_STORE(txq, pcqdrop, 0);
6879 WM_Q_EVCNT_STORE(txq, descdrop, 0);
6880 WM_Q_EVCNT_STORE(txq, toomanyseg, 0);
6881 WM_Q_EVCNT_STORE(txq, defrag, 0);
6883 WM_Q_EVCNT_STORE(txq, underrun, 0);
6884 WM_Q_EVCNT_STORE(txq, skipcontext, 0);
7715 struct wm_txqueue *txq = &wmq->wmq_txq;
7718 mutex_enter(txq->txq_lock);
7719 txq->txq_sending = false; /* Ensure watchdog disabled */
7720 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
7721 txs = &txq->txq_soft[i];
7729 while ((m = pcq_get(txq->txq_interq)) != NULL)
7731 mutex_exit(txq->txq_lock);
7778 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7780 mutex_enter(txq->txq_lock);
7782 if (txq->txq_stopping)
7785 if (txq->txq_fifo_stall) {
7796 CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
7797 CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
7798 CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
7799 CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
7803 txq->txq_fifo_head = 0;
7804 txq->txq_fifo_stall = 0;
7816 mutex_exit(txq->txq_lock);
7837 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7838 int space = txq->txq_fifo_size - txq->txq_fifo_head;
7842 if (txq->txq_fifo_stall)
7851 txq->txq_fifo_stall = 1;
7857 txq->txq_fifo_head += len;
7858 if (txq->txq_fifo_head >= txq->txq_fifo_size)
7859 txq->txq_fifo_head -= txq->txq_fifo_size;
7865 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7878 WM_NTXDESC(txq) = WM_NTXDESC_82542;
7880 WM_NTXDESC(txq) = WM_NTXDESC_82544;
7882 txq->txq_descsize = sizeof(nq_txdesc_t);
7884 txq->txq_descsize = sizeof(wiseman_txdesc_t);
7886 if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
7887 PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
7888 1, &txq->txq_desc_rseg, 0)) != 0) {
7895 if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
7896 txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
7897 (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
7903 if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
7904 WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
7911 if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
7912 txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
7922 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7924 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7925 WM_TXDESCS_SIZE(txq));
7927 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
7933 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
7936 bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
7937 bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
7938 bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
7939 WM_TXDESCS_SIZE(txq));
7940 bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
8025 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
8030 WM_TXQUEUELEN(txq) =
8033 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
8036 &txq->txq_soft[i].txs_dmamap)) != 0) {
8047 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
8048 if (txq->txq_soft[i].txs_dmamap != NULL)
8050 txq->txq_soft[i].txs_dmamap);
8056 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
8060 for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
8061 if (txq->txq_soft[i].txs_dmamap != NULL)
8063 txq->txq_soft[i].txs_dmamap);
8133 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8134 txq->txq_sc = sc;
8135 txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
8137 error = wm_alloc_tx_descs(sc, txq);
8140 error = wm_alloc_tx_buffer(sc, txq);
8142 wm_free_tx_descs(sc, txq);
8145 txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
8146 if (txq->txq_interq == NULL) {
8147 wm_free_tx_descs(sc, txq);
8148 wm_free_tx_buffer(sc, txq);
8156 WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
8157 WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
8158 WM_Q_MISC_EVCNT_ATTACH(txq, fifo_stall, txq, i, xname);
8159 WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
8160 WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
8161 WM_Q_MISC_EVCNT_ATTACH(txq, ipsum, txq, i, xname);
8162 WM_Q_MISC_EVCNT_ATTACH(txq, tusum, txq, i, xname);
8163 WM_Q_MISC_EVCNT_ATTACH(txq, tusum6, txq, i, xname);
8164 WM_Q_MISC_EVCNT_ATTACH(txq, tso, txq, i, xname);
8165 WM_Q_MISC_EVCNT_ATTACH(txq, tso6, txq, i, xname);
8166 WM_Q_MISC_EVCNT_ATTACH(txq, tsopain, txq, i, xname);
8169 snprintf(txq->txq_txseg_evcnt_names[j],
8170 sizeof(txq->txq_txseg_evcnt_names[j]),
8171 "txq%02dtxseg%d", i, j);
8172 evcnt_attach_dynamic(&txq->txq_ev_txseg[j],
8174 NULL, xname, txq->txq_txseg_evcnt_names[j]);
8177 WM_Q_MISC_EVCNT_ATTACH(txq, pcqdrop, txq, i, xname);
8178 WM_Q_MISC_EVCNT_ATTACH(txq, descdrop, txq, i, xname);
8179 WM_Q_MISC_EVCNT_ATTACH(txq, toomanyseg, txq, i, xname);
8180 WM_Q_MISC_EVCNT_ATTACH(txq, defrag, txq, i, xname);
8183 WM_Q_MISC_EVCNT_ATTACH(txq, underrun, txq, i, xname);
8184 WM_Q_MISC_EVCNT_ATTACH(txq, skipcontext, txq, i, xname);
8241 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8242 pcq_destroy(txq->txq_interq);
8243 wm_free_tx_buffer(sc, txq);
8244 wm_free_tx_descs(sc, txq);
8245 if (txq->txq_lock)
8246 mutex_obj_free(txq->txq_lock);
8283 struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
8288 WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
8289 WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
8290 WM_Q_EVCNT_DETACH(txq, fifo_stall, txq, i);
8291 WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
8292 WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
8293 WM_Q_EVCNT_DETACH(txq, ipsum, txq, i);
8294 WM_Q_EVCNT_DETACH(txq, tusum, txq, i);
8295 WM_Q_EVCNT_DETACH(txq, tusum6, txq, i);
8296 WM_Q_EVCNT_DETACH(txq, tso, txq, i);
8297 WM_Q_EVCNT_DETACH(txq, tso6, txq, i);
8298 WM_Q_EVCNT_DETACH(txq, tsopain, txq, i);
8301 evcnt_detach(&txq->txq_ev_txseg[j]);
8303 WM_Q_EVCNT_DETACH(txq, pcqdrop, txq, i);
8304 WM_Q_EVCNT_DETACH(txq, descdrop, txq, i);
8305 WM_Q_EVCNT_DETACH(txq, toomanyseg, txq, i);
8306 WM_Q_EVCNT_DETACH(txq, defrag, txq, i);
8308 WM_Q_EVCNT_DETACH(txq, underrun, txq, i);
8309 WM_Q_EVCNT_DETACH(txq, skipcontext, txq, i);
8313 while ((m = pcq_get(txq->txq_interq)) != NULL)
8315 pcq_destroy(txq->txq_interq);
8317 wm_free_tx_buffer(sc, txq);
8318 wm_free_tx_descs(sc, txq);
8319 if (txq->txq_lock)
8320 mutex_obj_free(txq->txq_lock);
8327 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8330 KASSERT(mutex_owned(txq->txq_lock));
8333 memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
8334 wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
8336 txq->txq_free = WM_NTXDESC(txq);
8337 txq->txq_next = 0;
8342 struct wm_txqueue *txq)
8347 KASSERT(mutex_owned(txq->txq_lock));
8350 CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
8351 CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
8352 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
8359 CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
8360 CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
8361 CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
8388 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
8392 KASSERT(mutex_owned(txq->txq_lock));
8395 for (i = 0; i < WM_TXQUEUELEN(txq); i++)
8396 txq->txq_soft[i].txs_mbuf = NULL;
8397 txq->txq_sfree = WM_TXQUEUELEN(txq);
8398 txq->txq_snext = 0;
8399 txq->txq_sdirty = 0;
8404 struct wm_txqueue *txq)
8407 KASSERT(mutex_owned(txq->txq_lock));
8414 txq->txq_tdt_reg = WMREG_OLD_TDT;
8416 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
8418 wm_init_tx_descs(sc, txq);
8419 wm_init_tx_regs(sc, wmq, txq);
8420 wm_init_tx_buffer(sc, txq);
8423 txq->txq_flags &= WM_TXQ_LINKDOWN_DISCARD;
8425 txq->txq_sending = false;
8576 struct wm_txqueue *txq = &wmq->wmq_txq;
8592 mutex_enter(txq->txq_lock);
8593 wm_init_tx_queue(sc, wmq, txq);
8594 mutex_exit(txq->txq_lock);
8613 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
8642 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
8643 txq->txq_last_hw_ipcs = 0;
8644 txq->txq_last_hw_tucs = 0;
8676 WM_Q_EVCNT_INCR(txq, tsopain);
8732 WM_Q_EVCNT_INCR(txq, tso);
8735 WM_Q_EVCNT_INCR(txq, tso6);
8755 WM_Q_EVCNT_INCR(txq, ipsum);
8763 WM_Q_EVCNT_INCR(txq, tusum);
8771 WM_Q_EVCNT_INCR(txq, tusum6);
8813 if (txq->txq_last_hw_cmd == cmd &&
8814 txq->txq_last_hw_fields == fields &&
8815 txq->txq_last_hw_ipcs == (ipcs & 0xffff) &&
8816 txq->txq_last_hw_tucs == (tucs & 0xffff)) {
8817 WM_Q_EVCNT_INCR(txq, skipcontext);
8822 txq->txq_last_hw_cmd = cmd;
8823 txq->txq_last_hw_fields = fields;
8824 txq->txq_last_hw_ipcs = (ipcs & 0xffff);
8825 txq->txq_last_hw_tucs = (tucs & 0xffff);
8830 &txq->txq_descs[txq->txq_next];
8835 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
8837 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
8856 wm_linkdown_discard(struct wm_txqueue *txq)
8859 if ((txq->txq_flags & WM_TXQ_LINKDOWN_DISCARD) != 0)
8874 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8881 mutex_enter(txq->txq_lock);
8882 if (!txq->txq_stopping)
8884 mutex_exit(txq->txq_lock);
8891 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
8893 wm_send_common_locked(ifp, txq, false);
8901 struct wm_txqueue *txq;
8904 txq = &sc->sc_queue[qid].wmq_txq;
8906 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
8908 WM_Q_EVCNT_INCR(txq, pcqdrop);
8918 if (mutex_tryenter(txq->txq_lock)) {
8919 if (!txq->txq_stopping)
8920 wm_transmit_locked(ifp, txq);
8921 mutex_exit(txq->txq_lock);
8928 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
8931 wm_send_common_locked(ifp, txq, true);
8935 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
8949 KASSERT(mutex_owned(txq->txq_lock));
8950 KASSERT(!txq->txq_stopping);
8952 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
8955 if (__predict_false(wm_linkdown_discard(txq))) {
8958 m0 = pcq_get(txq->txq_interq);
8974 ofree = txq->txq_free;
8985 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
8986 wm_txeof(txq, UINT_MAX);
8987 if (txq->txq_sfree == 0) {
8991 WM_Q_EVCNT_INCR(txq, txsstall);
8998 m0 = pcq_get(txq->txq_interq);
9008 txs = &txq->txq_soft[txq->txq_snext];
9047 WM_Q_EVCNT_INCR(txq, defrag);
9052 WM_Q_EVCNT_INCR(txq, toomanyseg);
9080 if (segs_needed > txq->txq_free - 2) {
9091 segs_needed, txq->txq_free - 1));
9092 txq->txq_flags |= WM_TXQ_NO_SPACE;
9094 WM_Q_EVCNT_INCR(txq, txdstall);
9108 txq->txq_flags |= WM_TXQ_NO_SPACE;
9110 WM_Q_EVCNT_INCR(txq, fifo_stall);
9120 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9132 txs->txs_firstdesc = txq->txq_next;
9140 wm_tx_offload(sc, txq, txs, &cksumcmd, &cksumfields);
9142 txq->txq_last_hw_cmd = txq->txq_last_hw_fields = 0;
9143 txq->txq_last_hw_ipcs = txq->txq_last_hw_tucs = 0;
9155 for (nexttx = txq->txq_next, seg = 0;
9161 nexttx = WM_NEXTTX(txq, nexttx)) {
9175 &txq->txq_descs[nexttx].wtx_addr, curaddr);
9176 txq->txq_descs[nexttx].wtx_cmdlen
9178 txq->txq_descs[nexttx].wtx_fields.wtxu_status
9180 txq->txq_descs[nexttx].wtx_fields.wtxu_options
9182 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9200 txq->txq_descs[lasttx].wtx_cmdlen |=
9210 txq->txq_descs[lasttx].wtx_cmdlen |=
9212 txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
9221 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9224 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9228 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9235 device_xname(sc->sc_dev), txq->txq_snext));
9238 txq->txq_free -= txs->txs_ndesc;
9239 txq->txq_next = nexttx;
9241 txq->txq_sfree--;
9242 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9249 txq->txq_flags |= WM_TXQ_NO_SPACE;
9250 WM_Q_EVCNT_INCR(txq, descdrop);
9256 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9258 txq->txq_flags |= WM_TXQ_NO_SPACE;
9261 if (txq->txq_free != ofree) {
9263 txq->txq_lastsent = time_uptime;
9264 txq->txq_sending = true;
9275 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
9344 WM_Q_EVCNT_INCR(txq, tsopain);
9402 WM_Q_EVCNT_INCR(txq, tso);
9405 WM_Q_EVCNT_INCR(txq, tso6);
9426 WM_Q_EVCNT_INCR(txq, tusum);
9437 WM_Q_EVCNT_INCR(txq, tusum6);
9456 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_vl_len =
9458 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_sn = 0;
9459 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_cmd =
9461 txq->txq_nq_descs[txq->txq_next].nqtx_ctx.nqtxc_mssidx =
9463 wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
9466 txq->txq_next, 0, vl_len));
9468 txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
9481 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9488 mutex_enter(txq->txq_lock);
9489 if (!txq->txq_stopping)
9491 mutex_exit(txq->txq_lock);
9498 struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
9500 wm_nq_send_common_locked(ifp, txq, false);
9508 struct wm_txqueue *txq;
9511 txq = &sc->sc_queue[qid].wmq_txq;
9513 if (__predict_false(!pcq_put(txq->txq_interq, m))) {
9515 WM_Q_EVCNT_INCR(txq, pcqdrop);
9530 * In the case of (1), the last packet enqueued to txq->txq_interq is
9532 * In the case of (2), the last packet enqueued to txq->txq_interq is
9536 if (mutex_tryenter(txq->txq_lock)) {
9537 if (!txq->txq_stopping)
9538 wm_nq_transmit_locked(ifp, txq);
9539 mutex_exit(txq->txq_lock);
9546 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
9549 wm_nq_send_common_locked(ifp, txq, true);
9553 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
9564 KASSERT(mutex_owned(txq->txq_lock));
9565 KASSERT(!txq->txq_stopping);
9567 if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
9570 if (__predict_false(wm_linkdown_discard(txq))) {
9573 m0 = pcq_get(txq->txq_interq);
9599 if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
9600 wm_txeof(txq, UINT_MAX);
9601 if (txq->txq_sfree == 0) {
9605 WM_Q_EVCNT_INCR(txq, txsstall);
9612 m0 = pcq_get(txq->txq_interq);
9622 txs = &txq->txq_soft[txq->txq_snext];
9644 WM_Q_EVCNT_INCR(txq, defrag);
9649 WM_Q_EVCNT_INCR(txq, toomanyseg);
9673 if (segs_needed > txq->txq_free - 2) {
9684 segs_needed, txq->txq_free - 1));
9685 txq->txq_flags |= WM_TXQ_NO_SPACE;
9687 WM_Q_EVCNT_INCR(txq, txdstall);
9697 WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
9709 txs->txs_firstdesc = txq->txq_next;
9718 wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
9731 nexttx = txq->txq_next;
9734 wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
9736 txq->txq_descs[nexttx].wtx_cmdlen =
9738 txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
9739 txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
9741 txq->txq_descs[nexttx].wtx_cmdlen |=
9743 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
9746 txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
9751 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9754 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9756 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
9769 nexttx = WM_NEXTTX(txq, nexttx);
9775 seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
9776 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
9778 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
9781 txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
9800 txq->txq_descs[lasttx].wtx_cmdlen |=
9807 lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
9810 wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
9814 CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
9822 device_xname(sc->sc_dev), txq->txq_snext));
9825 txq->txq_free -= txs->txs_ndesc;
9826 txq->txq_next = nexttx;
9828 txq->txq_sfree--;
9829 txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
9836 txq->txq_flags |= WM_TXQ_NO_SPACE;
9837 WM_Q_EVCNT_INCR(txq, descdrop);
9843 if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
9845 txq->txq_flags |= WM_TXQ_NO_SPACE;
9850 txq->txq_lastsent = time_uptime;
9851 txq->txq_sending = true;
9856 wm_deferred_start_locked(struct wm_txqueue *txq)
9858 struct wm_softc *sc = txq->txq_sc;
9860 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9863 KASSERT(mutex_owned(txq->txq_lock));
9864 KASSERT(!txq->txq_stopping);
9870 wm_nq_transmit_locked(ifp, txq);
9875 wm_transmit_locked(ifp, txq);
9887 wm_txeof(struct wm_txqueue *txq, u_int limit)
9889 struct wm_softc *sc = txq->txq_sc;
9897 KASSERT(mutex_owned(txq->txq_lock));
9899 if (txq->txq_stopping)
9902 txq->txq_flags &= ~WM_TXQ_NO_SPACE;
9908 for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
9909 i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
9910 txs = &txq->txq_soft[i];
9915 wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
9919 txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
9921 wm_cdtxsync(txq, txs->txs_lastdesc, 1,
9942 WM_Q_EVCNT_INCR(txq, underrun);
9972 txq->txq_packets++;
9973 txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
9975 txq->txq_free += txs->txs_ndesc;
9984 txq->txq_sdirty = i;
9995 if (txq->txq_sfree == WM_TXQUEUELEN(txq))
9996 txq->txq_sending = false;
10905 struct wm_txqueue *txq = &wmq->wmq_txq;
10921 mutex_enter(txq->txq_lock);
10923 if (txq->txq_stopping) {
10924 mutex_exit(txq->txq_lock);
10933 WM_Q_EVCNT_INCR(txq, txdw);
10937 more |= wm_txeof(txq, txlimit);
10942 mutex_exit(txq->txq_lock);
11059 struct wm_txqueue *txq = &wmq->wmq_txq;
11061 struct wm_softc *sc = txq->txq_sc;
11074 mutex_enter(txq->txq_lock);
11076 if (txq->txq_stopping) {
11077 mutex_exit(txq->txq_lock);
11081 WM_Q_EVCNT_INCR(txq, txdw);
11083 txmore = wm_txeof(txq, txlimit);
11087 mutex_exit(txq->txq_lock);
11120 struct wm_txqueue *txq = &wmq->wmq_txq;
11122 struct wm_softc *sc = txq->txq_sc;
11128 mutex_enter(txq->txq_lock);
11129 if (txq->txq_stopping) {
11130 mutex_exit(txq->txq_lock);
11133 txmore = wm_txeof(txq, txlimit);
11134 wm_deferred_start_locked(txq);
11135 mutex_exit(txq->txq_lock);
18257 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
18258 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18259 struct wm_softc *sc = txq->txq_sc;
18271 struct wm_txqueue *txq = (struct wm_txqueue *)node.sysctl_data;
18272 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18273 struct wm_softc *sc = txq->txq_sc;