Lines Matching defs:netq

705 	struct vioif_netqueue *netq;
720 netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)];
722 mutex_enter(&netq->netq_lock);
723 vioif_populate_rx_mbufs_locked(sc, netq);
724 mutex_exit(&netq->netq_lock);
753 struct vioif_netqueue *netq;
761 netq = &sc->sc_netqs[i];
763 mutex_enter(&netq->netq_lock);
764 netq->netq_stopping = true;
765 mutex_exit(&netq->netq_lock);
784 netq = &sc->sc_netqs[i];
785 vioif_work_wait(sc->sc_txrx_workqueue, &netq->netq_work);
789 netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)];
790 vioif_rx_queue_clear(sc, vsc, netq);
792 netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
793 vioif_tx_queue_clear(sc, vsc, netq);
798 netq = &sc->sc_netqs[i];
800 mutex_enter(&netq->netq_lock);
801 netq->netq_stopping = false;
802 mutex_exit(&netq->netq_lock);
834 struct vioif_netqueue *netq;
839 netq = &sc->sc_netqs[qid];
840 txc = netq->netq_ctx;
853 if (mutex_tryenter(&netq->netq_lock)) {
854 vioif_transmit_locked(ifp, netq);
855 mutex_exit(&netq->netq_lock);
865 struct vioif_netqueue *netq;
875 netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
877 mutex_enter(&netq->netq_lock);
878 if (!netq->netq_running_handle) {
879 netq->netq_running_handle = true;
880 vioif_net_sched_handle(sc, netq);
882 mutex_exit(&netq->netq_lock);
1034 struct vioif_netqueue *netq;
1041 netq = &sc->sc_netqs[i];
1042 evcnt_attach_dynamic(&netq->netq_mbuf_load_failed, EVCNT_TYPE_MISC,
1043 NULL, netq->netq_evgroup, "failed to load mbuf to DMA");
1044 evcnt_attach_dynamic(&netq->netq_enqueue_failed,
1045 EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
1050 rxc = netq->netq_ctx;
1052 EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
1056 txc = netq->netq_ctx;
1058 EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
1151 struct vioif_netqueue *netq;
1206 netq = &sc->sc_netqs[qid];
1207 maps = netq->netq_maps;
1208 vq_num = netq->netq_vq->vq_num;
1210 netq->netq_maps_kva = vioif_assign_mem(&p,
1236 netq = &sc->sc_netqs[qid];
1237 vq_num = netq->netq_vq->vq_num;
1239 kmemsize += sizeof(netq->netq_maps[0]) * vq_num;
1249 netq = &sc->sc_netqs[qid];
1250 vq_num = netq->netq_vq->vq_num;
1252 netq->netq_maps = vioif_assign_mem(&p,
1253 sizeof(netq->netq_maps[0]) * vq_num);
1286 netq = &sc->sc_netqs[qid];
1287 vq_num = netq->netq_vq->vq_num;
1288 maps = netq->netq_maps;
1289 hdrs = netq->netq_maps_kva;
1456 struct vioif_netqueue *netq;
1464 netq = &sc->sc_netqs[qid];
1468 netq->netq_vq = &sc->sc_vqs[qid];
1469 netq->netq_stopping = false;
1470 netq->netq_running_handle = false;
1474 snprintf(netq->netq_evgroup, sizeof(netq->netq_evgroup),
1477 mutex_init(&netq->netq_lock, MUTEX_DEFAULT, IPL_NET);
1478 virtio_init_vq(vsc, vq, qid, params[dir].intrhand, netq);
1485 netq->netq_vq = vq;
1487 netq->netq_softint = softint_establish(softint_flags,
1488 params[dir].sihand, netq);
1489 if (netq->netq_softint == NULL) {
1495 vioif_work_set(&netq->netq_work, params[dir].sihand, netq);
1500 netq->netq_ctx = rxc;
1505 netq->netq_ctx = (void *)txc;
1507 vioif_deferred_transmit, netq);
1523 netq->netq_ctx = NULL;
1537 vioif_work_set(&netq->netq_work, NULL, NULL);
1538 if (netq->netq_softint != NULL) {
1539 softint_disestablish(netq->netq_softint);
1540 netq->netq_softint = NULL;
1544 mutex_destroy(&netq->netq_lock);
1545 netq->netq_vq = NULL;
1554 struct vioif_netqueue *netq;
1559 netq = &sc->sc_netqs[qid];
1561 if (netq->netq_vq == NULL)
1564 netq = &sc->sc_netqs[qid];
1568 rxc = netq->netq_ctx;
1569 netq->netq_ctx = NULL;
1573 txc = netq->netq_ctx;
1574 netq->netq_ctx = NULL;
1581 softint_disestablish(netq->netq_softint);
1582 virtio_free_vq(vsc, netq->netq_vq);
1583 mutex_destroy(&netq->netq_lock);
1584 netq->netq_vq = NULL;
1588 vioif_net_sched_handle(struct vioif_softc *sc, struct vioif_netqueue *netq)
1591 KASSERT(mutex_owned(&netq->netq_lock));
1592 KASSERT(!netq->netq_stopping);
1594 if (netq->netq_workqueue) {
1595 vioif_work_add(sc->sc_txrx_workqueue, &netq->netq_work);
1597 softint_schedule(netq->netq_softint);
1697 struct vioif_netqueue *netq;
1703 netq = &sc->sc_netqs[i];
1705 KASSERT(!netq->netq_stopping);
1706 KASSERT(!netq->netq_running_handle);
1708 enqueued = virtio_start_vq_intr(vsc, netq->netq_vq);
1710 virtio_stop_vq_intr(vsc, netq->netq_vq);
1712 mutex_enter(&netq->netq_lock);
1713 netq->netq_running_handle = true;
1714 vioif_net_sched_handle(sc, netq);
1715 mutex_exit(&netq->netq_lock);
1723 struct vioif_netqueue *netq;
1728 netq = &sc->sc_netqs[i];
1730 virtio_stop_vq_intr(vsc, netq->netq_vq);
1739 vioif_populate_rx_mbufs_locked(struct vioif_softc *sc, struct vioif_netqueue *netq)
1741 struct virtqueue *vq = netq->netq_vq;
1748 KASSERT(mutex_owned(&netq->netq_lock));
1750 rxc = netq->netq_ctx;
1778 map = &netq->netq_maps[slot];
1783 netq->netq_mbuf_load_failed.ev_count++;
1790 netq->netq_enqueue_failed.ev_count++;
1806 struct vioif_netqueue *netq, u_int limit, size_t *ndeqp)
1808 struct virtqueue *vq = netq->netq_vq;
1816 KASSERT(mutex_owned(&netq->netq_lock));
1833 map = &netq->netq_maps[slot];
1853 struct vioif_netqueue *netq)
1860 mutex_enter(&netq->netq_lock);
1862 vq_num = netq->netq_vq->vq_num;
1864 more = vioif_rx_deq_locked(sc, vsc, netq, vq_num, NULL);
1870 map = &netq->netq_maps[i];
1879 mutex_exit(&netq->netq_lock);
1885 struct vioif_netqueue *netq = xnetq;
1886 struct virtqueue *vq = netq->netq_vq;
1893 KASSERT(mutex_owned(&netq->netq_lock));
1894 KASSERT(!netq->netq_stopping);
1896 more = vioif_rx_deq_locked(sc, vsc, netq, limit, &ndeq);
1898 vioif_populate_rx_mbufs_locked(sc, netq);
1901 vioif_net_sched_handle(sc, netq);
1905 enqueued = virtio_start_vq_intr(vsc, netq->netq_vq);
1907 virtio_stop_vq_intr(vsc, netq->netq_vq);
1908 vioif_net_sched_handle(sc, netq);
1912 netq->netq_running_handle = false;
1918 struct vioif_netqueue *netq = arg;
1919 struct virtqueue *vq = netq->netq_vq;
1924 mutex_enter(&netq->netq_lock);
1927 if (netq->netq_running_handle)
1930 if (netq->netq_stopping)
1933 netq->netq_running_handle = true;
1937 vioif_rx_handle_locked(netq, limit);
1940 mutex_exit(&netq->netq_lock);
1947 struct vioif_netqueue *netq = xnetq;
1948 struct virtqueue *vq = netq->netq_vq;
1953 mutex_enter(&netq->netq_lock);
1955 KASSERT(netq->netq_running_handle);
1957 if (netq->netq_stopping) {
1958 netq->netq_running_handle = false;
1963 vioif_rx_handle_locked(netq, limit);
1966 mutex_exit(&netq->netq_lock);
1974 vioif_send_common_locked(struct ifnet *ifp, struct vioif_netqueue *netq,
1979 struct virtqueue *vq = netq->netq_vq;
1985 KASSERT(mutex_owned(&netq->netq_lock));
1987 if (netq->netq_stopping ||
1991 txc = netq->netq_ctx;
2017 map = &netq->netq_maps[slot];
2036 netq->netq_mbuf_load_failed.ev_count++;
2048 netq->netq_enqueue_failed.ev_count++;
2070 struct vioif_netqueue *netq, u_int limit, size_t *ndeqp)
2072 struct virtqueue *vq = netq->netq_vq;
2080 KASSERT(mutex_owned(&netq->netq_lock));
2097 map = &netq->netq_maps[slot];
2115 struct vioif_netqueue *netq)
2123 mutex_enter(&netq->netq_lock);
2125 txc = netq->netq_ctx;
2126 vq_num = netq->netq_vq->vq_num;
2129 more = vioif_tx_deq_locked(sc, vsc, netq, vq_num, NULL);
2135 map = &netq->netq_maps[i];
2147 mutex_exit(&netq->netq_lock);
2151 vioif_start_locked(struct ifnet *ifp, struct vioif_netqueue *netq)
2157 vioif_send_common_locked(ifp, netq, false);
2162 vioif_transmit_locked(struct ifnet *ifp, struct vioif_netqueue *netq)
2165 vioif_send_common_locked(ifp, netq, true);
2171 struct vioif_netqueue *netq = arg;
2172 struct virtio_softc *vsc = netq->netq_vq->vq_owner;
2176 mutex_enter(&netq->netq_lock);
2177 vioif_send_common_locked(ifp, netq, true);
2178 mutex_exit(&netq->netq_lock);
2182 vioif_tx_handle_locked(struct vioif_netqueue *netq, u_int limit)
2184 struct virtqueue *vq = netq->netq_vq;
2185 struct vioif_tx_context *txc = netq->netq_ctx;
2193 KASSERT(mutex_owned(&netq->netq_lock));
2194 KASSERT(!netq->netq_stopping);
2196 more = vioif_tx_deq_locked(sc, vsc, netq, limit, &ndeq);
2203 vioif_net_sched_handle(sc, netq);
2212 vioif_net_sched_handle(sc, netq);
2216 netq->netq_running_handle = false;
2219 if (netq == &sc->sc_netqs[VIOIF_NETQ_TXQID(0)])
2228 struct vioif_netqueue *netq = arg;
2229 struct virtqueue *vq = netq->netq_vq;
2234 mutex_enter(&netq->netq_lock);
2237 if (netq->netq_running_handle)
2240 if (netq->netq_stopping)
2243 netq->netq_running_handle = true;
2246 netq->netq_workqueue = sc->sc_txrx_workqueue_sysctl;
2248 vioif_tx_handle_locked(netq, limit);
2251 mutex_exit(&netq->netq_lock);
2258 struct vioif_netqueue *netq = xnetq;
2259 struct virtqueue *vq = netq->netq_vq;
2264 mutex_enter(&netq->netq_lock);
2266 KASSERT(netq->netq_running_handle);
2268 if (netq->netq_stopping) {
2269 netq->netq_running_handle = false;
2274 vioif_tx_handle_locked(netq, limit);
2277 mutex_exit(&netq->netq_lock);
2724 struct vioif_netqueue *netq;
2740 netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
2742 mutex_enter(&netq->netq_lock);
2743 txc = netq->netq_ctx;
2745 mutex_exit(&netq->netq_lock);