Lines Matching defs:wmq

3880 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3891 *hang |= __BIT(wmq->wmq_id);
6178 struct wm_queue *wmq = &sc->sc_queue[qidx];
6180 wmq->wmq_id = qidx;
6181 wmq->wmq_intr_idx = intr_idx;
6182 wmq->wmq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
6183 wm_handle_queue, wmq);
6184 if (wmq->wmq_si != NULL)
6188 wmq->wmq_id);
6189 pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
6190 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6269 struct wm_queue *wmq = &sc->sc_queue[qidx];
6280 IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
6337 struct wm_queue *wmq = &sc->sc_queue[qidx];
6338 pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
6339 sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
6398 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
6401 if (!wmq->wmq_set_itr)
6405 uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
6412 eitr |= __SHIFTIN(wmq->wmq_itr,
6417 CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
6423 CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
6424 wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
6426 KASSERT(wmq->wmq_id == 0);
6427 CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
6430 wmq->wmq_set_itr = false;
6444 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
6447 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6448 struct wm_txqueue *txq = &wmq->wmq_txq;
6482 if (new_itr != wmq->wmq_itr) {
6483 wmq->wmq_itr = new_itr;
6484 wmq->wmq_set_itr = true;
6486 wmq->wmq_set_itr = false;
6521 struct wm_queue *wmq = &sc->sc_queue[i];
6522 struct wm_txqueue *txq = &wmq->wmq_txq;
6523 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7272 struct wm_queue *wmq;
7283 wmq = &sc->sc_queue[i];
7284 CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
7285 EITR_TX_QUEUE(wmq->wmq_id)
7286 | EITR_RX_QUEUE(wmq->wmq_id));
7310 wmq = &sc->sc_queue[i];
7311 qid = wmq->wmq_id;
7312 qintr_idx = wmq->wmq_intr_idx;
7336 wmq = &sc->sc_queue[i];
7337 qid = wmq->wmq_id;
7338 qintr_idx = wmq->wmq_intr_idx;
7355 wmq = &sc->sc_queue[i];
7356 qid = wmq->wmq_id;
7357 qintr_idx = wmq->wmq_intr_idx;
7410 struct wm_queue *wmq;
7416 wmq = &sc->sc_queue[i];
7417 mask |= ICR_TXQ(wmq->wmq_id);
7418 mask |= ICR_RXQ(wmq->wmq_id);
7428 wmq = &sc->sc_queue[i];
7429 mask |= EITR_TX_QUEUE(wmq->wmq_id);
7430 mask |= EITR_RX_QUEUE(wmq->wmq_id);
7436 wmq = &sc->sc_queue[i];
7437 mask |= 1 << wmq->wmq_intr_idx;
7460 struct wm_queue *wmq = &sc->sc_queue[qidx];
7461 wm_itrs_writereg(sc, wmq);
7714 struct wm_queue *wmq = &sc->sc_queue[qidx];
7715 struct wm_txqueue *txq = &wmq->wmq_txq;
8341 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8357 int qid = wmq->wmq_id;
8374 CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
8377 CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
8403 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8416 txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
8419 wm_init_tx_regs(sc, wmq, txq);
8429 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
8455 int qid = wmq->wmq_id;
8494 (wmq->wmq_itr / 4) | RDTR_FPD);
8496 CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
8543 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
8556 rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
8558 wm_init_rx_regs(sc, wmq, rxq);
8575 struct wm_queue *wmq = &sc->sc_queue[i];
8576 struct wm_txqueue *txq = &wmq->wmq_txq;
8577 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8587 wmq->wmq_itr = 50;
8589 wmq->wmq_itr = sc->sc_itr_init;
8590 wmq->wmq_set_itr = true;
8593 wm_init_tx_queue(sc, wmq, txq);
8597 error = wm_init_rx_queue(sc, wmq, rxq);
9860 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
9861 int qid = wmq->wmq_id;
10867 wm_sched_handle_queue(struct wm_softc *sc, struct wm_queue *wmq)
10870 if (wmq->wmq_txrx_use_workqueue) {
10871 if (!wmq->wmq_wq_enqueued) {
10872 wmq->wmq_wq_enqueued = true;
10873 workqueue_enqueue(sc->sc_queue_wq, &wmq->wmq_cookie,
10877 softint_schedule(wmq->wmq_si);
10904 struct wm_queue *wmq = &sc->sc_queue[0];
10905 struct wm_txqueue *txq = &wmq->wmq_txq;
10906 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
11000 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
11001 wm_sched_handle_queue(sc, wmq);
11008 wm_txrxintr_disable(struct wm_queue *wmq)
11010 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
11019 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
11022 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
11024 CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
11028 wm_txrxintr_enable(struct wm_queue *wmq)
11030 struct wm_softc *sc = wmq->wmq_txq.txq_sc;
11032 wm_itrs_calculate(sc, wmq);
11043 * while each wm_handle_queue(wmq) is runnig.
11047 ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
11050 EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
11052 CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
11058 struct wm_queue *wmq = arg;
11059 struct wm_txqueue *txq = &wmq->wmq_txq;
11060 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
11067 KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
11072 wm_txrxintr_disable(wmq);
11105 wm_itrs_writereg(sc, wmq);
11108 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
11109 wm_sched_handle_queue(sc, wmq);
11111 wm_txrxintr_enable(wmq);
11119 struct wm_queue *wmq = arg;
11120 struct wm_txqueue *txq = &wmq->wmq_txq;
11121 struct wm_rxqueue *rxq = &wmq->wmq_rxq;
11147 wmq->wmq_txrx_use_workqueue = sc->sc_txrx_use_workqueue;
11148 wm_sched_handle_queue(sc, wmq);
11150 wm_txrxintr_enable(wmq);
11156 struct wm_queue *wmq = container_of(wk, struct wm_queue, wmq_cookie);
11162 wmq->wmq_wq_enqueued = false;
11163 wm_handle_queue(wmq);
18258 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18262 reg = CSR_READ(sc, WMREG_TDH(wmq->wmq_id));
18272 struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
18276 reg = CSR_READ(sc, WMREG_TDT(wmq->wmq_id));