Lines Matching defs:txq

206 		struct netfront_txq *txq;
222 struct netfront_txq *txq;
357 SLIST_INSERT_HEAD(&ref->txq->tags, ref, next);
480 XN_TX_LOCK(&np->txq[i]);
485 XN_TX_UNLOCK(&np->txq[i]);
505 XN_TX_LOCK(&info->txq[i]);
510 XN_TX_UNLOCK(&info->txq[i]);
522 struct netfront_txq *txq,
531 KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids"));
533 KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle,
546 err = xs_printf(*xst, path, "tx-ring-ref","%u", txq->ring_ref);
614 &info->txq[0], &xst, false);
627 &info->txq[i], &xst, true);
693 xn_txq_start(struct netfront_txq *txq)
695 struct netfront_info *np = txq->info;
698 XN_TX_LOCK_ASSERT(txq);
699 if (!drbr_empty(ifp, txq->br))
700 xn_txq_mq_start_locked(txq, NULL);
704 xn_txq_intr(struct netfront_txq *txq)
707 XN_TX_LOCK(txq);
708 if (RING_HAS_UNCONSUMED_RESPONSES(&txq->ring))
709 xn_txeof(txq);
710 xn_txq_start(txq);
711 XN_TX_UNLOCK(txq);
717 struct netfront_txq *txq = xtxq;
719 XN_TX_LOCK(txq);
720 xn_txq_start(txq);
721 XN_TX_UNLOCK(txq);
833 disconnect_txq(struct netfront_txq *txq)
836 xn_release_tx_bufs(txq);
837 gnttab_free_grant_references(txq->gref_head);
838 if (txq->ring_ref != GRANT_REF_INVALID) {
839 gnttab_end_foreign_access(txq->ring_ref, NULL);
840 txq->ring_ref = GRANT_REF_INVALID;
842 xen_intr_unbind(&txq->xen_intr_handle);
846 destroy_txq(struct netfront_txq *txq)
850 free(txq->ring.sring, M_DEVBUF);
851 txq->ring.sring = NULL;
852 buf_ring_free(txq->br, M_DEVBUF);
853 txq->br = NULL;
854 if (txq->tq) {
855 taskqueue_drain_all(txq->tq);
856 taskqueue_free(txq->tq);
857 txq->tq = NULL;
861 bus_dmamap_destroy(txq->info->dma_tag,
862 txq->xennet_tag[i].dma_map);
863 txq->xennet_tag[i].dma_map = NULL;
873 destroy_txq(&np->txq[i]);
875 free(np->txq, M_DEVBUF);
876 np->txq = NULL;
886 struct netfront_txq *txq;
888 info->txq = malloc(sizeof(struct netfront_txq) * num_queues,
892 txq = &info->txq[q];
894 txq->id = q;
895 txq->info = info;
897 txq->gref_head = GNTTAB_LIST_END;
898 txq->ring_ref = GRANT_REF_INVALID;
899 txq->ring.sring = NULL;
901 snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q);
903 mtx_init(&txq->lock, txq->name, "netfront transmit lock",
905 SLIST_INIT(&txq->tags);
908 txq->mbufs[i] = (void *) ((u_long) i+1);
909 txq->grant_ref[i] = GRANT_REF_INVALID;
910 txq->xennet_tag[i].txq = txq;
911 txq->xennet_tag[i].dma_tag = info->dma_tag;
913 &txq->xennet_tag[i].dma_map);
919 m_tag_setup(&txq->xennet_tag[i].tag,
921 sizeof(txq->xennet_tag[i]) -
922 sizeof(txq->xennet_tag[i].tag));
923 txq->xennet_tag[i].tag.m_tag_free = &tag_free;
924 SLIST_INSERT_HEAD(&txq->tags, &txq->xennet_tag[i],
927 txq->mbufs[NET_TX_RING_SIZE] = (void *)0;
932 &txq->gref_head) != 0) {
941 FRONT_RING_INIT(&txq->ring, txs, PAGE_SIZE);
944 &txq->ring_ref);
950 txq->br = buf_ring_alloc(NET_TX_RING_SIZE, M_DEVBUF,
951 M_WAITOK, &txq->lock);
952 TASK_INIT(&txq->defrtask, 0, xn_txq_tq_deferred, txq);
954 txq->tq = taskqueue_create(txq->name, M_WAITOK,
955 taskqueue_thread_enqueue, &txq->tq);
957 error = taskqueue_start_threads(&txq->tq, 1, PI_NET,
958 "%s txq %d", device_get_nameunit(dev), txq->id);
961 txq->id);
967 &info->txq[q], INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY,
968 &txq->xen_intr_handle);
979 taskqueue_drain_all(txq->tq);
981 buf_ring_free(txq->br, M_DEVBUF);
982 taskqueue_free(txq->tq);
983 gnttab_end_foreign_access(txq->ring_ref, NULL);
985 gnttab_free_grant_references(txq->gref_head);
986 free(txq->ring.sring, M_DEVBUF);
989 disconnect_txq(&info->txq[q]);
990 destroy_txq(&info->txq[q]);
993 free(info->txq, M_DEVBUF);
1004 if (info->txq)
1023 info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle;
1093 xn_tx_slot_available(struct netfront_txq *txq)
1096 return (RING_FREE_REQUESTS(&txq->ring) > (MAX_TX_REQ_FRAGS + 2));
1100 xn_release_tx_bufs(struct netfront_txq *txq)
1107 m = txq->mbufs[i];
1117 gnttab_end_foreign_access_ref(txq->grant_ref[i]);
1118 gnttab_release_grant_reference(&txq->gref_head,
1119 txq->grant_ref[i]);
1120 txq->grant_ref[i] = GRANT_REF_INVALID;
1121 add_id_to_freelist(txq->mbufs, i);
1122 txq->mbufs_cnt--;
1123 if (txq->mbufs_cnt < 0) {
1346 xn_txeof(struct netfront_txq *txq)
1353 struct netfront_info *np = txq->info;
1355 XN_TX_LOCK_ASSERT(txq);
1363 prod = txq->ring.sring->rsp_prod;
1366 for (i = txq->ring.rsp_cons; i != prod; i++) {
1367 txr = RING_GET_RESPONSE(&txq->ring, i);
1376 m = txq->mbufs[id];
1384 txq->grant_ref[id]) != 0)) {
1388 gnttab_end_foreign_access_ref(txq->grant_ref[id]);
1390 &txq->gref_head, txq->grant_ref[id]);
1391 txq->grant_ref[id] = GRANT_REF_INVALID;
1393 txq->mbufs[id] = NULL;
1394 add_id_to_freelist(txq->mbufs, id);
1395 txq->mbufs_cnt--;
1397 /* Only mark the txq active if we've freed up at least one slot to try */
1400 txq->ring.rsp_cons = prod;
1411 txq->ring.sring->rsp_event =
1412 prod + ((txq->ring.sring->req_prod - prod) >> 1) + 1;
1415 } while (prod != txq->ring.sring->rsp_prod);
1417 if (txq->full &&
1418 ((txq->ring.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1419 txq->full = false;
1420 xn_txq_start(txq);
1427 struct netfront_txq *txq = xsc;
1428 struct netfront_info *np = txq->info;
1429 struct netfront_rxq *rxq = &np->rxq[txq->id];
1433 xn_txq_intr(txq);
1597 xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head)
1599 struct netfront_info *np = txq->info;
1602 bus_dma_segment_t *segs = txq->segs;
1607 KASSERT(!SLIST_EMPTY(&txq->tags), ("no tags available"));
1608 tag = SLIST_FIRST(&txq->tags);
1609 SLIST_REMOVE_HEAD(&txq->tags, next);
1624 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1633 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1638 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1656 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1669 KASSERT((txq->mbufs_cnt + nfrags) <= NET_TX_RING_SIZE,
1671 "(%d)!", __func__, (int) txq->mbufs_cnt,
1687 tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt);
1688 id = get_id_from_freelist(txq->mbufs);
1692 txq->mbufs_cnt++;
1693 if (txq->mbufs_cnt > NET_TX_RING_SIZE)
1697 txq->mbufs[id] = m_head;
1699 ref = gnttab_claim_grant_reference(&txq->gref_head);
1704 tx->gref = txq->grant_ref[id] = ref;
1740 RING_GET_REQUEST(&txq->ring,
1741 ++txq->ring.req_prod_pvt);
1760 txq->ring.req_prod_pvt++;
1770 xn_txeof(txq);
2004 struct netfront_txq *txq;
2026 txq = &np->txq[i];
2027 xn_release_tx_bufs(txq);
2051 struct netfront_txq *txq;
2055 txq = &np->txq[i];
2057 xen_intr_signal(txq->xen_intr_handle);
2058 XN_TX_LOCK(txq);
2059 xn_txeof(txq);
2060 XN_TX_UNLOCK(txq);
2167 xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m)
2174 np = txq->info;
2175 br = txq->br;
2179 XN_TX_LOCK_ASSERT(txq);
2195 if (!xn_tx_slot_available(txq)) {
2200 error = xn_assemble_tx_request(txq, m);
2207 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&txq->ring, notify);
2209 xen_intr_signal(txq->xen_intr_handle);
2214 if (RING_FULL(&txq->ring))
2215 txq->full = true;
2224 struct netfront_txq *txq;
2241 txq = &np->txq[i];
2243 if (XN_TX_TRYLOCK(txq) != 0) {
2244 error = xn_txq_mq_start_locked(txq, m);
2245 XN_TX_UNLOCK(txq);
2247 error = drbr_enqueue(ifp, txq->br, m);
2248 taskqueue_enqueue(txq->tq, &txq->defrtask);
2258 struct netfront_txq *txq;
2265 txq = &np->txq[i];
2267 XN_TX_LOCK(txq);
2268 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
2270 XN_TX_UNLOCK(txq);
2371 free(np->txq, M_DEVBUF);
2385 XN_TX_LOCK(&np->txq[i]);
2390 XN_TX_UNLOCK(&np->txq[i]);
2395 disconnect_txq(&np->txq[i]);