Lines Matching +full:num +full:- +full:txq

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2004-2006 Kip Macy
64 #include <xen/xen-os.h>
206 struct netfront_txq *txq;
222 struct netfront_txq *txq;
241 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
244 #define XN_RX_LOCK(_q) mtx_lock(&(_q)->lock)
245 #define XN_RX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
247 #define XN_TX_LOCK(_q) mtx_lock(&(_q)->lock)
248 #define XN_TX_TRYLOCK(_q) mtx_trylock(&(_q)->lock)
249 #define XN_TX_UNLOCK(_q) mtx_unlock(&(_q)->lock)
251 #define XN_LOCK(_sc) mtx_lock(&(_sc)->sc_lock);
252 #define XN_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_lock);
254 #define XN_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_lock, MA_OWNED);
255 #define XN_RX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
256 #define XN_TX_LOCK_ASSERT(_q) mtx_assert(&(_q)->lock, MA_OWNED);
258 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
259 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
260 #define netfront_carrier_ok(netif) ((netif)->carrier)
290 return idx & (NET_RX_RING_SIZE - 1);
300 m = rxq->mbufs[i];
301 rxq->mbufs[i] = NULL;
309 grant_ref_t ref = rxq->grant_ref[i];
312 rxq->grant_ref[i] = GRANT_REF_INVALID;
326 ref->count++;
336 KASSERT(ref->count > 0, ("Invalid reference count"));
338 if (--ref->count == 0) {
345 m_tag_delete(m, &ref->tag);
354 KASSERT(ref->count == 0, ("Free mbuf tag with pending refcnt"));
355 bus_dmamap_sync(ref->dma_tag, ref->dma_map, BUS_DMASYNC_POSTWRITE);
356 bus_dmamap_destroy(ref->dma_tag, ref->dma_map);
357 SLIST_INSERT_HEAD(&ref->txq->tags, ref, next);
377 * as colon-separated octets, placing result the given mac array. mac must be
400 * front-side mac node, even when operating in Dom0.
478 for (i = 0; i < np->num_queues; i++) {
479 XN_RX_LOCK(&np->rxq[i]);
480 XN_TX_LOCK(&np->txq[i]);
483 for (i = 0; i < np->num_queues; i++) {
484 XN_RX_UNLOCK(&np->rxq[i]);
485 XN_TX_UNLOCK(&np->txq[i]);
493 * leave the device-layer structures intact so that this is transparent to the
503 for (i = 0; i < info->num_queues; i++) {
504 XN_RX_LOCK(&info->rxq[i]);
505 XN_TX_LOCK(&info->txq[i]);
508 for (i = 0; i < info->num_queues; i++) {
509 XN_RX_UNLOCK(&info->rxq[i]);
510 XN_TX_UNLOCK(&info->txq[i]);
522 struct netfront_txq *txq,
531 KASSERT(rxq->id == txq->id, ("Mismatch between RX and TX queue ids"));
533 KASSERT(rxq->xen_intr_handle == txq->xen_intr_handle,
539 snprintf(path, path_size, "%s/queue-%u", node, rxq->id);
546 err = xs_printf(*xst, path, "tx-ring-ref","%u", txq->ring_ref);
548 message = "writing tx ring-ref";
551 err = xs_printf(*xst, path, "rx-ring-ref","%u", rxq->ring_ref);
553 message = "writing rx ring-ref";
556 err = xs_printf(*xst, path, "event-channel", "%u",
557 xen_intr_port(rxq->xen_intr_handle));
559 message = "writing event-channel";
585 err = xen_net_read_mac(dev, info->mac);
591 err = xs_scanf(XST_NIL, xenbus_get_otherend_path(info->xbdev),
592 "multi-queue-max-queues", NULL, "%lu", &max_queues);
612 if (info->num_queues == 1) {
613 err = write_queue_xenstore_keys(dev, &info->rxq[0],
614 &info->txq[0], &xst, false);
618 err = xs_printf(xst, node, "multi-queue-num-queues",
619 "%u", info->num_queues);
621 message = "writing multi-queue-num-queues";
625 for (i = 0; i < info->num_queues; i++) {
626 err = write_queue_xenstore_keys(dev, &info->rxq[i],
627 &info->txq[i], &xst, true);
633 err = xs_printf(xst, node, "request-rx-copy", "%u", 1);
635 message = "writing request-rx-copy";
638 err = xs_printf(xst, node, "feature-rx-notify", "%d", 1);
640 message = "writing feature-rx-notify";
643 err = xs_printf(xst, node, "feature-sg", "%d", 1);
645 message = "writing feature-sg";
648 if ((if_getcapenable(info->xn_ifp) & IFCAP_LRO) != 0) {
649 err = xs_printf(xst, node, "feature-gso-tcpv4", "%d", 1);
651 message = "writing feature-gso-tcpv4";
655 if ((if_getcapenable(info->xn_ifp) & IFCAP_RXCSUM) == 0) {
656 err = xs_printf(xst, node, "feature-no-csum-offload", "%d", 1);
658 message = "writing feature-no-csum-offload";
693 xn_txq_start(struct netfront_txq *txq)
695 struct netfront_info *np = txq->info;
696 if_t ifp = np->xn_ifp;
698 XN_TX_LOCK_ASSERT(txq);
699 if (!drbr_empty(ifp, txq->br))
700 xn_txq_mq_start_locked(txq, NULL);
704 xn_txq_intr(struct netfront_txq *txq)
707 XN_TX_LOCK(txq);
708 if (RING_HAS_UNCONSUMED_RESPONSES(&txq->ring))
709 xn_txeof(txq);
710 xn_txq_start(txq);
711 XN_TX_UNLOCK(txq);
717 struct netfront_txq *txq = xtxq;
719 XN_TX_LOCK(txq);
720 xn_txq_start(txq);
721 XN_TX_UNLOCK(txq);
729 gnttab_free_grant_references(rxq->gref_head);
730 if (rxq->ring_ref != GRANT_REF_INVALID) {
731 gnttab_end_foreign_access(rxq->ring_ref, NULL);
732 rxq->ring_ref = GRANT_REF_INVALID;
739 rxq->xen_intr_handle = 0;
746 callout_drain(&rxq->rx_refill);
747 free(rxq->ring.sring, M_DEVBUF);
748 rxq->ring.sring = NULL;
756 for (i = 0; i < np->num_queues; i++)
757 destroy_rxq(&np->rxq[i]);
759 free(np->rxq, M_DEVBUF);
760 np->rxq = NULL;
772 info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues,
776 rxq = &info->rxq[q];
778 rxq->id = q;
779 rxq->info = info;
781 rxq->gref_head = GNTTAB_LIST_END;
782 rxq->ring_ref = GRANT_REF_INVALID;
783 rxq->ring.sring = NULL;
784 snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q);
785 mtx_init(&rxq->lock, rxq->name, "netfront receive lock",
789 rxq->mbufs[i] = NULL;
790 rxq->grant_ref[i] = GRANT_REF_INVALID;
796 &rxq->gref_head) != 0) {
805 FRONT_RING_INIT(&rxq->ring, rxs, PAGE_SIZE);
808 &rxq->ring_ref);
814 callout_init(&rxq->rx_refill, 1);
820 gnttab_free_grant_references(rxq->gref_head);
821 free(rxq->ring.sring, M_DEVBUF);
823 for (; q >= 0; q--) {
824 disconnect_rxq(&info->rxq[q]);
825 destroy_rxq(&info->rxq[q]);
828 free(info->rxq, M_DEVBUF);
833 disconnect_txq(struct netfront_txq *txq)
836 xn_release_tx_bufs(txq);
837 gnttab_free_grant_references(txq->gref_head);
838 if (txq->ring_ref != GRANT_REF_INVALID) {
839 gnttab_end_foreign_access(txq->ring_ref, NULL);
840 txq->ring_ref = GRANT_REF_INVALID;
842 xen_intr_unbind(&txq->xen_intr_handle);
846 destroy_txq(struct netfront_txq *txq)
850 free(txq->ring.sring, M_DEVBUF);
851 txq->ring.sring = NULL;
852 buf_ring_free(txq->br, M_DEVBUF);
853 txq->br = NULL;
854 if (txq->tq) {
855 taskqueue_drain_all(txq->tq);
856 taskqueue_free(txq->tq);
857 txq->tq = NULL;
861 bus_dmamap_destroy(txq->info->dma_tag,
862 txq->xennet_tag[i].dma_map);
863 txq->xennet_tag[i].dma_map = NULL;
872 for (i = 0; i < np->num_queues; i++)
873 destroy_txq(&np->txq[i]);
875 free(np->txq, M_DEVBUF);
876 np->txq = NULL;
886 struct netfront_txq *txq;
888 info->txq = malloc(sizeof(struct netfront_txq) * num_queues,
892 txq = &info->txq[q];
894 txq->id = q;
895 txq->info = info;
897 txq->gref_head = GNTTAB_LIST_END;
898 txq->ring_ref = GRANT_REF_INVALID;
899 txq->ring.sring = NULL;
901 snprintf(txq->name, XN_QUEUE_NAME_LEN, "xntx_%u", q);
903 mtx_init(&txq->lock, txq->name, "netfront transmit lock",
905 SLIST_INIT(&txq->tags);
908 txq->mbufs[i] = (void *) ((u_long) i+1);
909 txq->grant_ref[i] = GRANT_REF_INVALID;
910 txq->xennet_tag[i].txq = txq;
911 txq->xennet_tag[i].dma_tag = info->dma_tag;
912 error = bus_dmamap_create(info->dma_tag, 0,
913 &txq->xennet_tag[i].dma_map);
919 m_tag_setup(&txq->xennet_tag[i].tag,
921 sizeof(txq->xennet_tag[i]) -
922 sizeof(txq->xennet_tag[i].tag));
923 txq->xennet_tag[i].tag.m_tag_free = &tag_free;
924 SLIST_INSERT_HEAD(&txq->tags, &txq->xennet_tag[i],
927 txq->mbufs[NET_TX_RING_SIZE] = (void *)0;
932 &txq->gref_head) != 0) {
941 FRONT_RING_INIT(&txq->ring, txs, PAGE_SIZE);
944 &txq->ring_ref);
950 txq->br = buf_ring_alloc(NET_TX_RING_SIZE, M_DEVBUF,
951 M_WAITOK, &txq->lock);
952 TASK_INIT(&txq->defrtask, 0, xn_txq_tq_deferred, txq);
954 txq->tq = taskqueue_create(txq->name, M_WAITOK,
955 taskqueue_thread_enqueue, &txq->tq);
957 error = taskqueue_start_threads(&txq->tq, 1, PI_NET,
958 "%s txq %d", device_get_nameunit(dev), txq->id);
961 txq->id);
967 &info->txq[q], INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY,
968 &txq->xen_intr_handle);
979 taskqueue_drain_all(txq->tq);
981 buf_ring_free(txq->br, M_DEVBUF);
982 taskqueue_free(txq->tq);
983 gnttab_end_foreign_access(txq->ring_ref, NULL);
985 gnttab_free_grant_references(txq->gref_head);
986 free(txq->ring.sring, M_DEVBUF);
988 for (; q >= 0; q--) {
989 disconnect_txq(&info->txq[q]);
990 destroy_txq(&info->txq[q]);
993 free(info->txq, M_DEVBUF);
1004 if (info->txq)
1007 if (info->rxq)
1010 info->num_queues = 0;
1019 info->num_queues = num_queues;
1023 info->rxq[q].xen_intr_handle = info->txq[q].xen_intr_handle;
1042 CURVNET_SET(if_getvnet(sc->xn_ifp));
1057 xenbus_set_state(sc->xbdev, XenbusStateConnected);
1064 if (sc->xn_reset) {
1067 sc->xn_reset = false;
1077 EVENTHANDLER_INVOKE(iflladdr_event, sc->xn_ifp);
1093 xn_tx_slot_available(struct netfront_txq *txq)
1096 return (RING_FREE_REQUESTS(&txq->ring) > (MAX_TX_REQ_FRAGS + 2));
1100 xn_release_tx_bufs(struct netfront_txq *txq)
1107 m = txq->mbufs[i];
1113 * must be an index from free-list tracking.
1117 gnttab_end_foreign_access_ref(txq->grant_ref[i]);
1118 gnttab_release_grant_reference(&txq->gref_head,
1119 txq->grant_ref[i]);
1120 txq->grant_ref[i] = GRANT_REF_INVALID;
1121 add_id_to_freelist(txq->mbufs, i);
1122 txq->mbufs_cnt--;
1123 if (txq->mbufs_cnt < 0) {
1138 m->m_len = m->m_pkthdr.len = MJUMPAGESIZE;
1151 if (__predict_false(rxq->info->carrier == 0))
1154 for (req_prod = rxq->ring.req_prod_pvt;
1155 req_prod - rxq->ring.rsp_cons < NET_RX_RING_SIZE;
1169 KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain"));
1170 rxq->mbufs[id] = m;
1172 ref = gnttab_claim_grant_reference(&rxq->gref_head);
1175 rxq->grant_ref[id] = ref;
1178 req = RING_GET_REQUEST(&rxq->ring, req_prod);
1181 xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0);
1182 req->id = id;
1183 req->gref = ref;
1186 rxq->ring.req_prod_pvt = req_prod;
1189 if (req_prod - rxq->ring.rsp_cons < NET_RX_SLOTS_MIN) {
1190 callout_reset_curcpu(&rxq->rx_refill, hz/10,
1197 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rxq->ring, notify);
1199 xen_intr_signal(rxq->xen_intr_handle);
1219 m = rxq->mbufs[i];
1224 ref = rxq->grant_ref[i];
1229 gnttab_release_grant_reference(&rxq->gref_head, ref);
1230 rxq->mbufs[i] = NULL;
1231 rxq->grant_ref[i] = GRANT_REF_INVALID;
1240 struct netfront_info *np = rxq->info;
1242 struct lro_ctrl *lro = &rxq->lro;
1261 ifp = np->xn_ifp;
1264 rp = rxq->ring.sring->rsp_prod;
1267 i = rxq->ring.rsp_cons;
1269 memcpy(rx, RING_GET_RESPONSE(&rxq->ring, i), sizeof(*rx));
1282 m->m_pkthdr.rcvif = ifp;
1283 if (rx->flags & NETRXF_data_validated) {
1293 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID
1295 m->m_pkthdr.csum_data = 0xffff;
1297 if ((rx->flags & NETRXF_extra_info) != 0 &&
1298 (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type ==
1300 m->m_pkthdr.tso_segsz =
1301 extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].u.gso.size;
1302 m->m_pkthdr.csum_flags |= CSUM_TSO;
1308 rxq->ring.rsp_cons = i;
1312 RING_FINAL_CHECK_FOR_RESPONSES(&rxq->ring, work_to_do);
1325 lro->lro_cnt == 0 || tcp_lro_rx(lro, m, 0)) {
1346 xn_txeof(struct netfront_txq *txq)
1353 struct netfront_info *np = txq->info;
1355 XN_TX_LOCK_ASSERT(txq);
1360 ifp = np->xn_ifp;
1363 prod = txq->ring.sring->rsp_prod;
1366 for (i = txq->ring.rsp_cons; i != prod; i++) {
1367 txr = RING_GET_RESPONSE(&txq->ring, i);
1368 if (txr->status == NETIF_RSP_NULL)
1371 if (txr->status != NETIF_RSP_OKAY) {
1373 __func__, txr->status);
1375 id = txr->id;
1376 m = txq->mbufs[id];
1384 txq->grant_ref[id]) != 0)) {
1388 gnttab_end_foreign_access_ref(txq->grant_ref[id]);
1390 &txq->gref_head, txq->grant_ref[id]);
1391 txq->grant_ref[id] = GRANT_REF_INVALID;
1393 txq->mbufs[id] = NULL;
1394 add_id_to_freelist(txq->mbufs, id);
1395 txq->mbufs_cnt--;
1397 /* Only mark the txq active if we've freed up at least one slot to try */
1400 txq->ring.rsp_cons = prod;
1411 txq->ring.sring->rsp_event =
1412 prod + ((txq->ring.sring->req_prod - prod) >> 1) + 1;
1415 } while (prod != txq->ring.sring->rsp_prod);
1417 if (txq->full &&
1418 ((txq->ring.sring->req_prod - prod) < NET_TX_RING_SIZE)) {
1419 txq->full = false;
1420 xn_txq_start(txq);
1427 struct netfront_txq *txq = xsc;
1428 struct netfront_info *np = txq->info;
1429 struct netfront_rxq *rxq = &np->rxq[txq->id];
1433 xn_txq_intr(txq);
1440 int new = xn_rxidx(rxq->ring.req_prod_pvt);
1442 KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL"));
1443 rxq->mbufs[new] = m;
1444 rxq->grant_ref[new] = ref;
1445 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new;
1446 RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref;
1447 rxq->ring.req_prod_pvt++;
1468 RING_GET_RESPONSE(&rxq->ring, ++(*cons));
1470 if (__predict_false(!extra->type ||
1471 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1474 memcpy(&extras[extra->type - 1], extra, sizeof(*extra));
1480 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1490 struct netif_rx_response *rx = &rinfo->rx;
1491 struct netif_extra_info *extras = rinfo->extras;
1500 if (rx->flags & NETRXF_extra_info) {
1505 m0->m_pkthdr.len = 0;
1506 m0->m_next = NULL;
1511 DPRINTK("rx->status=%hd rx->offset=%hu frags=%u\n",
1512 rx->status, rx->offset, frags);
1514 if (__predict_false(rx->status < 0 ||
1515 rx->offset + rx->status > PAGE_SIZE)) {
1530 printf("%s: Bad rx response id %d.\n", __func__, rx->id);
1538 gnttab_release_grant_reference(&rxq->gref_head, ref);
1544 m->m_len = rx->status;
1545 m->m_data += rx->offset;
1546 m0->m_pkthdr.len += rx->status;
1549 if (!(rx->flags & NETRXF_more_data))
1561 * Note that m can be NULL, if rx->status < 0 or if
1562 * rx->offset + rx->status > PAGE_SIZE above.
1566 rx = RING_GET_RESPONSE(&rxq->ring, *cons + frags);
1570 * m_prev == NULL can happen if rx->status < 0 or if
1571 * rx->offset + * rx->status > PAGE_SIZE above.
1574 m_prev->m_next = m;
1577 * m0 can be NULL if rx->status < 0 or if * rx->offset +
1578 * rx->status > PAGE_SIZE above.
1582 m->m_next = NULL;
1597 xn_assemble_tx_request(struct netfront_txq *txq, struct mbuf *m_head)
1599 struct netfront_info *np = txq->info;
1600 if_t ifp = np->xn_ifp;
1602 bus_dma_segment_t *segs = txq->segs;
1607 KASSERT(!SLIST_EMPTY(&txq->tags), ("no tags available"));
1608 tag = SLIST_FIRST(&txq->tags);
1609 SLIST_REMOVE_HEAD(&txq->tags, next);
1610 KASSERT(tag->count == 0, ("tag already in-use"));
1611 map = tag->dma_map;
1612 error = bus_dmamap_load_mbuf_sg(np->dma_tag, map, m_head, segs,
1614 if (error == EFBIG || nfrags > np->maxfrags) {
1617 bus_dmamap_unload(np->dma_tag, map);
1624 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1629 error = bus_dmamap_load_mbuf_sg(np->dma_tag, map, m_head, segs,
1631 if (error != 0 || nfrags > np->maxfrags) {
1632 bus_dmamap_unload(np->dma_tag, map);
1633 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1638 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1646 * pass a too-long chain over to the other side by dropping the
1656 SLIST_INSERT_HEAD(&txq->tags, tag, next);
1657 bus_dmamap_unload(np->dma_tag, map);
1669 KASSERT((txq->mbufs_cnt + nfrags) <= NET_TX_RING_SIZE,
1671 "(%d)!", __func__, (int) txq->mbufs_cnt,
1679 otherend_id = xenbus_get_otherend_id(np->xbdev);
1680 m_tag_prepend(m_head, &tag->tag);
1687 tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt);
1688 id = get_id_from_freelist(txq->mbufs);
1692 txq->mbufs_cnt++;
1693 if (txq->mbufs_cnt > NET_TX_RING_SIZE)
1697 txq->mbufs[id] = m_head;
1698 tx->id = id;
1699 ref = gnttab_claim_grant_reference(&txq->gref_head);
1704 tx->gref = txq->grant_ref[id] = ref;
1705 tx->offset = segs[i].ds_addr & PAGE_MASK;
1706 KASSERT(tx->offset + segs[i].ds_len <= PAGE_SIZE,
1708 tx->flags = 0;
1718 tx->size = m_head->m_pkthdr.len;
1732 if (m_head->m_pkthdr.csum_flags
1734 tx->flags |= (NETTXF_csum_blank
1737 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
1740 RING_GET_REQUEST(&txq->ring,
1741 ++txq->ring.req_prod_pvt);
1743 tx->flags |= NETTXF_extra_info;
1745 gso->u.gso.size = m_head->m_pkthdr.tso_segsz;
1746 gso->u.gso.type =
1748 gso->u.gso.pad = 0;
1749 gso->u.gso.features = 0;
1751 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1752 gso->flags = 0;
1755 tx->size = segs[i].ds_len;
1757 if (i != nfrags - 1)
1758 tx->flags |= NETTXF_more_data;
1760 txq->ring.req_prod_pvt++;
1762 bus_dmamap_sync(np->dma_tag, map, BUS_DMASYNC_PREWRITE);
1766 if_inc_counter(ifp, IFCOUNTER_OBYTES, m_head->m_pkthdr.len);
1767 if (m_head->m_flags & M_MCAST)
1770 xn_txeof(txq);
1785 ifp = np->xn_ifp;
1792 for (i = 0; i < np->num_queues; i++) {
1793 rxq = &np->rxq[i];
1796 rxq->ring.sring->rsp_event = rxq->ring.rsp_cons + 1;
1797 if (RING_HAS_UNCONSUMED_RESPONSES(&rxq->ring))
1828 dev = sc->xbdev;
1834 if (ifa->ifa_addr->sa_family == AF_INET) {
1849 if (if_getmtu(ifp) == ifr->ifr_mtu)
1852 if_setmtu(ifp, ifr->ifr_mtu);
1863 * a full re-init means reloading the firmware and
1873 sc->xn_if_flags = if_getflags(ifp);
1877 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
1906 device_printf(sc->xbdev,
1910 sc->xn_reset = true;
1926 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-gso-tcpv4");
1927 xs_rm(XST_NIL, xenbus_get_node(dev), "feature-no-csum-offload");
1942 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1958 ifp = sc->xn_ifp;
1975 if (rxq->mbufs[i] == NULL)
1978 m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i);
1979 ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i);
1981 req = RING_GET_REQUEST(&rxq->ring, requeue_idx);
1985 xenbus_get_otherend_id(rxq->info->xbdev),
1988 req->gref = ref;
1989 req->id = requeue_idx;
1994 rxq->ring.req_prod_pvt = requeue_idx;
2004 struct netfront_txq *txq;
2006 error = xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2007 "feature-rx-copy", NULL, "%u", &feature_rx_copy);
2016 error = talk_to_backend(np->xbdev, np);
2025 for (i = 0; i < np->num_queues; i++) {
2026 txq = &np->txq[i];
2027 xn_release_tx_bufs(txq);
2031 for (i = 0; i < np->num_queues; i++) {
2032 rxq = &np->rxq[i];
2051 struct netfront_txq *txq;
2054 for (i = 0; i < np->num_queues; i++) {
2055 txq = &np->txq[i];
2056 rxq = &np->rxq[i];
2057 xen_intr_signal(txq->xen_intr_handle);
2058 XN_TX_LOCK(txq);
2059 xn_txeof(txq);
2060 XN_TX_UNLOCK(txq);
2072 device_printf(np->xbdev, "backend features:");
2074 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2075 "feature-sg", NULL, "%d", &val) != 0)
2078 np->maxfrags = 1;
2080 np->maxfrags = MAX_TX_REQ_FRAGS;
2081 printf(" feature-sg");
2084 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2085 "feature-gso-tcpv4", NULL, "%d", &val) != 0)
2088 if_setcapabilitiesbit(np->xn_ifp, 0, IFCAP_TSO4 | IFCAP_LRO);
2090 if_setcapabilitiesbit(np->xn_ifp, IFCAP_TSO4 | IFCAP_LRO, 0);
2091 printf(" feature-gso-tcp4");
2096 * feature-no-csum-offload is set in xenstore.
2098 if (xs_scanf(XST_NIL, xenbus_get_otherend_path(np->xbdev),
2099 "feature-no-csum-offload", NULL, "%d", &val) != 0)
2102 if_setcapabilitiesbit(np->xn_ifp, IFCAP_HWCSUM, 0);
2104 if_setcapabilitiesbit(np->xn_ifp, 0, IFCAP_HWCSUM);
2105 printf(" feature-no-csum-offload");
2120 ifp = np->xn_ifp;
2135 for (i = 0; i < np->num_queues; i++)
2136 tcp_lro_free(&np->rxq[i].lro);
2140 for (i = 0; i < np->num_queues; i++) {
2141 err = tcp_lro_init(&np->rxq[i].lro);
2143 device_printf(np->xbdev,
2148 np->rxq[i].lro.ifp = ifp;
2167 xn_txq_mq_start_locked(struct netfront_txq *txq, struct mbuf *m)
2174 np = txq->info;
2175 br = txq->br;
2176 ifp = np->xn_ifp;
2179 XN_TX_LOCK_ASSERT(txq);
2195 if (!xn_tx_slot_available(txq)) {
2200 error = xn_assemble_tx_request(txq, m);
2207 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&txq->ring, notify);
2209 xen_intr_signal(txq->xen_intr_handle);
2214 if (RING_FULL(&txq->ring))
2215 txq->full = true;
2224 struct netfront_txq *txq;
2228 npairs = np->num_queues;
2237 i = m->m_pkthdr.flowid % npairs;
2241 txq = &np->txq[i];
2243 if (XN_TX_TRYLOCK(txq) != 0) {
2244 error = xn_txq_mq_start_locked(txq, m);
2245 XN_TX_UNLOCK(txq);
2247 error = drbr_enqueue(ifp, txq->br, m);
2248 taskqueue_enqueue(txq->tq, &txq->defrtask);
2258 struct netfront_txq *txq;
2264 for (i = 0; i < np->num_queues; i++) {
2265 txq = &np->txq[i];
2267 XN_TX_LOCK(txq);
2268 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL)
2270 XN_TX_UNLOCK(txq);
2289 np->xbdev = dev;
2291 mtx_init(&np->sc_lock, "xnsc", "netfront softc lock", MTX_DEF);
2293 ifmedia_init(&np->sc_media, 0, xn_ifmedia_upd, xn_ifmedia_sts);
2294 ifmedia_add(&np->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
2295 ifmedia_set(&np->sc_media, IFM_ETHER|IFM_MANUAL);
2297 err = xen_net_read_mac(dev, np->mac);
2302 ifp = np->xn_ifp = if_alloc(IFT_ETHER);
2322 if_sethwtsomax(ifp, 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
2326 ether_ifattach(ifp, np->mac);
2340 &np->dma_tag);
2369 ether_ifdetach(np->xn_ifp);
2370 free(np->rxq, M_DEVBUF);
2371 free(np->txq, M_DEVBUF);
2372 if_free(np->xn_ifp);
2373 np->xn_ifp = NULL;
2374 ifmedia_removeall(&np->sc_media);
2375 bus_dma_tag_destroy(np->dma_tag);
2383 for (i = 0; i < np->num_queues; i++) {
2384 XN_RX_LOCK(&np->rxq[i]);
2385 XN_TX_LOCK(&np->txq[i]);
2388 for (i = 0; i < np->num_queues; i++) {
2389 XN_RX_UNLOCK(&np->rxq[i]);
2390 XN_TX_UNLOCK(&np->txq[i]);
2393 for (i = 0; i < np->num_queues; i++) {
2394 disconnect_rxq(&np->rxq[i]);
2395 disconnect_txq(&np->txq[i]);
2410 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE;
2411 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;