Lines Matching defs:iq
658 for (int iq = 0; iq < sc->sc_nqueues; iq++, txconf++) {
659 struct tx_ring *txr = &sc->tx_rings[iq];
665 txr->txr_igcq = &sc->queues[iq];
666 txr->me = iq;
683 for (int iq = 0; iq < sc->sc_nqueues; iq++, rxconf++) {
684 struct rx_ring *rxr = &sc->rx_rings[iq];
690 rxr->rxr_igcq = &sc->queues[iq];
691 rxr->me = iq;
713 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
714 struct igc_queue *q = &sc->queues[iq];
717 q->txr = &sc->tx_rings[iq];
718 q->rxr = &sc->rx_rings[iq];
769 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
770 struct rx_ring *rxr = &sc->rx_rings[iq];
776 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
777 struct tx_ring *txr = &sc->tx_rings[iq];
990 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
991 struct igc_queue *q = &sc->queues[iq];
999 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1000 struct igc_queue *q = &sc->queues[iq];
1003 "%s q%d", device_xname(sc->sc_dev), iq);
1043 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1044 struct igc_queue *q = &sc->queues[iq];
1057 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1058 struct igc_queue *q = &sc->queues[iq];
1103 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1104 struct igc_queue *q = &sc->queues[iq];
1134 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1138 val = IGC_READ_REG(hw, IGC_RQDPC(iq));
1142 IGC_WRITE_REG(hw, IGC_RQDPC(iq), 0);
1161 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1162 struct igc_queue *q = &sc->queues[iq];
1172 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1173 struct igc_queue *q = &sc->queues[iq];
1200 int iq, error;
1202 for (iq = 0, intrs = sc->sc_intrs, ihs = sc->sc_ihs;
1203 iq < sc->sc_nqueues; iq++, intrs++, ihs++) {
1204 struct igc_queue *q = &sc->queues[iq];
1207 device_xname(dev), iq);
1224 kcpuset_set(affinity, iq % ncpu);
1241 q->msix = iq;
1242 q->eims = 1 << iq;
1267 sc->linkvec = iq;
1297 struct igc_queue *iq = sc->queues;
1298 iq->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1299 igc_handle_queue, iq);
1300 if (iq->igcq_si == NULL) {
1346 struct igc_queue *iq = sc->queues;
1347 iq->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1348 igc_handle_queue, iq);
1349 if (iq->igcq_si == NULL) {
1564 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1565 struct rx_ring *rxr = &sc->rx_rings[iq];
1914 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1915 struct tx_ring *txr = &sc->tx_rings[iq];
1920 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1921 struct rx_ring *rxr = &sc->rx_rings[iq];
2014 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2015 struct rx_ring *rxr = &sc->rx_rings[iq];
2017 ifr[iq].ifr_size = MCLBYTES;
2018 snprintf(ifr[iq].ifr_name, sizeof(ifr[iq].ifr_name), "%d", iq);
2019 ifr[iq].ifr_info = rxr->rx_ring;
2644 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2645 struct igc_queue *q = &sc->queues[iq];
2648 igc_set_queues(sc, iq, q->msix, 0);
2650 igc_set_queues(sc, iq, q->msix, 1);
2757 struct igc_queue *iq = arg;
2758 struct igc_softc *sc = iq->sc;
2760 struct rx_ring *rxr = iq->rxr;
2761 struct tx_ring *txr = iq->txr;
2766 IGC_QUEUE_EVENT(iq, irqs, 1);
2779 IGC_QUEUE_EVENT(iq, req, 1);
2780 igc_sched_handle_queue(sc, iq);
2782 igc_enable_queue(sc, iq->eims);
2793 struct igc_queue *iq = &sc->queues[0];
2794 struct rx_ring *rxr = iq->rxr;
2795 struct tx_ring *txr = iq->txr;
2812 IGC_QUEUE_EVENT(iq, irqs, 1);
2851 IGC_QUEUE_EVENT(iq, req, 1);
2852 igc_sched_handle_queue(sc, iq);
2863 struct igc_queue *iq = arg;
2864 struct igc_softc *sc = iq->sc;
2865 struct tx_ring *txr = iq->txr;
2866 struct rx_ring *rxr = iq->rxr;
2871 IGC_QUEUE_EVENT(iq, handleq, 1);
2888 igc_sched_handle_queue(sc, iq);
2891 igc_enable_queue(sc, iq->eims);
2900 struct igc_queue *iq =
2903 igc_handle_queue(iq);
2907 igc_sched_handle_queue(struct igc_softc *sc, struct igc_queue *iq)
2910 if (iq->igcq_workqueue) {
2912 workqueue_enqueue(sc->sc_queue_wq, &iq->igcq_wq_cookie,
2915 softint_schedule(iq->igcq_si);
2924 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2925 struct igc_queue *q = &sc->queues[iq];
2981 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2982 struct tx_ring *txr = &sc->tx_rings[iq];
3039 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3040 struct tx_ring *txr = &sc->tx_rings[iq];
3045 IGC_WRITE_REG(hw, IGC_TDLEN(iq),
3047 IGC_WRITE_REG(hw, IGC_TDBAH(iq), (uint32_t)(bus_addr >> 32));
3048 IGC_WRITE_REG(hw, IGC_TDBAL(iq), (uint32_t)bus_addr);
3051 IGC_WRITE_REG(hw, IGC_TDT(iq), 0 /* XXX txr->next_avail_desc */);
3052 IGC_WRITE_REG(hw, IGC_TDH(iq), 0);
3064 IGC_WRITE_REG(hw, IGC_TXDCTL(iq), txdctl);
3087 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3088 struct tx_ring *txr = &sc->tx_rings[iq];
3502 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3503 struct rx_ring *rxr = &sc->rx_rings[iq];
3628 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3629 struct rx_ring *rxr = &sc->rx_rings[iq];
3633 IGC_WRITE_REG(hw, IGC_RXDCTL(iq), 0);
3637 IGC_WRITE_REG(hw, IGC_RDLEN(iq),
3639 IGC_WRITE_REG(hw, IGC_RDBAH(iq), (uint32_t)(bus_addr >> 32));
3640 IGC_WRITE_REG(hw, IGC_RDBAL(iq), (uint32_t)bus_addr);
3641 IGC_WRITE_REG(hw, IGC_SRRCTL(iq), srrctl);
3644 IGC_WRITE_REG(hw, IGC_RDH(iq), 0);
3645 IGC_WRITE_REG(hw, IGC_RDT(iq), 0 /* XXX rxr->last_desc_filled */);
3648 uint32_t rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(iq));
3654 IGC_WRITE_REG(hw, IGC_RXDCTL(iq), rxdctl);
3673 for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3674 struct rx_ring *rxr = &sc->rx_rings[iq];