Lines Matching +full:queue +full:- +full:pkt +full:- +full:rx

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2009-2011 Spectra Logic Corporation
80 #include <xen/xen-os.h>
86 /*--------------------------- Compile-time Tunables --------------------------*/
88 /*---------------------------------- Macros ----------------------------------*/
94 #define XNB_SG 1 /* netback driver supports feature-sg */
95 #define XNB_GSO_TCPV4 0 /* netback driver supports feature-gso-tcpv4 */
96 #define XNB_RX_COPY 1 /* netback driver supports feature-rx-copy */
97 #define XNB_RX_FLIP 0 /* netback driver does not support feature-rx-flip */
109 /* Default length for stack-allocated grant tables */
123 unsigned int req = (_r)->sring->req_prod - cons; \
124 unsigned int rsp = RING_SIZE(_r) - \
125 (cons - (_r)->rsp_prod_pvt); \
130 #define virt_to_offset(x) ((x) & (PAGE_SIZE - 1))
138 /*--------------------------- Forward Declarations ---------------------------*/
156 static int xnb_ring2pkt(struct xnb_pkt *pkt,
159 static void xnb_txpkt2rsp(const struct xnb_pkt *pkt,
161 static struct mbuf *xnb_pkt2mbufc(const struct xnb_pkt *pkt, if_t ifp);
162 static int xnb_txpkt2gnttab(const struct xnb_pkt *pkt,
170 struct xnb_pkt *pkt,
172 static int xnb_rxpkt2gnttab(const struct xnb_pkt *pkt,
177 static int xnb_rxpkt2rsp(const struct xnb_pkt *pkt,
193 /*------------------------------ Data Structures -----------------------------*/
197 * stored in the Xen tx ring. Applicable to both RX and TX packets
201 * Array index of the first data-bearing (eg, not extra info) entry
207 * Array index of the second data-bearing entry for this packet.
208 * Invalid if the packet has only one data-bearing entry. If the
209 * packet has more than two data-bearing entries, then the second
230 * not the same for TX and RX packets
235 * The number of valid data-bearing entries (either netif_tx_request's
256 pxnb->error = 0; in xnb_pkt_validate()
263 pxnb->error = 1; in xnb_pkt_invalidate()
270 return (! pxnb->error); in xnb_pkt_is_valid()
274 /** xnb_pkt method: print the packet's contents in human-readable format*/
276 xnb_dump_pkt(const struct xnb_pkt *pkt) { in xnb_dump_pkt() argument
277 if (pkt == NULL) { in xnb_dump_pkt()
281 DPRINTF("pkt address= %p\n", pkt); in xnb_dump_pkt()
282 DPRINTF("pkt->size=%d\n", pkt->size); in xnb_dump_pkt()
283 DPRINTF("pkt->car_size=%d\n", pkt->car_size); in xnb_dump_pkt()
284 DPRINTF("pkt->flags=0x%04x\n", pkt->flags); in xnb_dump_pkt()
285 DPRINTF("pkt->list_len=%d\n", pkt->list_len); in xnb_dump_pkt()
286 /* DPRINTF("pkt->extra"); TODO */ in xnb_dump_pkt()
287 DPRINTF("pkt->car=%d\n", pkt->car); in xnb_dump_pkt()
288 DPRINTF("pkt->cdr=%d\n", pkt->cdr); in xnb_dump_pkt()
289 DPRINTF("pkt->error=%d\n", pkt->error); in xnb_dump_pkt()
298 DPRINTF("netif_tx_request.gref =%u\n", txreq->gref); in xnb_dump_txreq()
299 DPRINTF("netif_tx_request.offset=%hu\n", txreq->offset); in xnb_dump_txreq()
300 DPRINTF("netif_tx_request.flags =%hu\n", txreq->flags); in xnb_dump_txreq()
301 DPRINTF("netif_tx_request.id =%hu\n", txreq->id); in xnb_dump_txreq()
302 DPRINTF("netif_tx_request.size =%hu\n", txreq->size); in xnb_dump_txreq()
308 * used to communicate with the front-end client of this
313 * Runtime structures for ring access. Unfortunately, TX and RX rings
329 /** The pseudo-physical address where ring memory is mapped.*/
336 * Grant table handles, one per-ring page, returned by the
346 * The grant references, one per-ring page, supplied by the
347 * front-end, allowing us to reference the ring pages in the
348 * front-end's domain and to map these pages into our own domain.
354 * Per-instance connection state flags.
358 /** Communication with the front-end has been established. */
362 * Front-end requests exist in the ring and are waiting for
380 XNB_RING_TYPE_RX = 1, /* ID of RX rings, used for array indices */
385 * Per-instance configuration data.
412 * 32bit x86 domains on the same machine). The back-end
413 * always accommodates the front-end's native abi. That
435 * \brief Cached value of the front-end's domain id.
457 * Preallocated grant table copy descriptor for RX operations.
470 * associated with our per-instance kva region.
486 /** Pseudo-physical address corresponding to kva. */
492 /** Mutex protecting per-instance data in the receive path. */
495 /** Mutex protecting per-instance data in the softc structure. */
498 /** Mutex protecting per-instance data in the transmit path. */
508 /*---------------------------- Debugging functions ---------------------------*/
518 if (entry->flags & GNTCOPY_dest_gref) in xnb_dump_gnttab_copy()
519 printf("gnttab dest ref=\t%u\n", entry->dest.u.ref); in xnb_dump_gnttab_copy()
522 entry->dest.u.gmfn); in xnb_dump_gnttab_copy()
523 printf("gnttab dest offset=\t%hu\n", entry->dest.offset); in xnb_dump_gnttab_copy()
524 printf("gnttab dest domid=\t%hu\n", entry->dest.domid); in xnb_dump_gnttab_copy()
525 if (entry->flags & GNTCOPY_source_gref) in xnb_dump_gnttab_copy()
526 printf("gnttab source ref=\t%u\n", entry->source.u.ref); in xnb_dump_gnttab_copy()
529 entry->source.u.gmfn); in xnb_dump_gnttab_copy()
530 printf("gnttab source offset=\t%hu\n", entry->source.offset); in xnb_dump_gnttab_copy()
531 printf("gnttab source domid=\t%hu\n", entry->source.domid); in xnb_dump_gnttab_copy()
532 printf("gnttab len=\t%hu\n", entry->len); in xnb_dump_gnttab_copy()
533 printf("gnttab flags=\t%hu\n", entry->flags); in xnb_dump_gnttab_copy()
534 printf("gnttab status=\t%hd\n", entry->status); in xnb_dump_gnttab_copy()
543 &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; in xnb_dump_rings()
545 &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; in xnb_dump_rings()
550 if ( !txb || !txb->sring || !rxb || !rxb->sring ) in xnb_dump_rings()
554 "\n\t%35s %18s\n" /* TX, RX */ in xnb_dump_rings()
563 "TX", "RX", in xnb_dump_rings()
564 "req_cons", txb->req_cons, rxb->req_cons, in xnb_dump_rings()
565 "nr_ents", txb->nr_ents, rxb->nr_ents, in xnb_dump_rings()
566 "rsp_prod_pvt", txb->rsp_prod_pvt, rxb->rsp_prod_pvt, in xnb_dump_rings()
567 "sring", txb->sring, rxb->sring, in xnb_dump_rings()
568 "sring->req_prod", txb->sring->req_prod, rxb->sring->req_prod, in xnb_dump_rings()
569 "sring->req_event", txb->sring->req_event, rxb->sring->req_event, in xnb_dump_rings()
570 "sring->rsp_prod", txb->sring->rsp_prod, rxb->sring->rsp_prod, in xnb_dump_rings()
571 "sring->rsp_event", txb->sring->rsp_event, rxb->sring->rsp_event); in xnb_dump_rings()
585 if (m->m_flags & M_PKTHDR) { in xnb_dump_mbuf()
588 m->m_pkthdr.flowid, (int)m->m_pkthdr.csum_flags, in xnb_dump_mbuf()
589 m->m_pkthdr.csum_data, m->m_pkthdr.tso_segsz); in xnb_dump_mbuf()
591 m->m_pkthdr.rcvif, m->m_pkthdr.len); in xnb_dump_mbuf()
594 m->m_next, m->m_nextpkt, m->m_data); in xnb_dump_mbuf()
596 m->m_len, m->m_flags, m->m_type); in xnb_dump_mbuf()
598 len = m->m_len; in xnb_dump_mbuf()
603 for (i = 0; (i < 16) && (len > 0); i++, len--) { in xnb_dump_mbuf()
611 /*------------------------ Inter-Domain Communication ------------------------*/
613 * Free dynamically allocated KVA or pseudo-physical address allocations.
615 * \param xnb Per-instance xnb configuration structure.
620 if (xnb->kva != 0) { in xnb_free_communication_mem()
621 if (xnb->pseudo_phys_res != NULL) { in xnb_free_communication_mem()
622 xenmem_free(xnb->dev, xnb->pseudo_phys_res_id, in xnb_free_communication_mem()
623 xnb->pseudo_phys_res); in xnb_free_communication_mem()
624 xnb->pseudo_phys_res = NULL; in xnb_free_communication_mem()
627 xnb->kva = 0; in xnb_free_communication_mem()
628 xnb->gnt_base_addr = 0; in xnb_free_communication_mem()
632 * Cleanup all inter-domain communication mechanisms.
634 * \param xnb Per-instance xnb configuration structure.
643 if (xnb->xen_intr_handle != NULL) in xnb_disconnect()
644 xen_intr_unbind(&xnb->xen_intr_handle); in xnb_disconnect()
648 * must acquire the rx and tx locks to make sure those threads are done, in xnb_disconnect()
652 mtx_lock(&xnb->tx_lock); in xnb_disconnect()
653 mtx_unlock(&xnb->tx_lock); in xnb_disconnect()
654 mtx_lock(&xnb->rx_lock); in xnb_disconnect()
655 mtx_unlock(&xnb->rx_lock); in xnb_disconnect()
657 mtx_lock(&xnb->sc_lock); in xnb_disconnect()
659 if (xnb->bridge != NULL) { in xnb_disconnect()
660 free(xnb->bridge, M_XENSTORE); in xnb_disconnect()
661 xnb->bridge = NULL; in xnb_disconnect()
666 gnts[i].host_addr = xnb->ring_configs[i].gnt_addr; in xnb_disconnect()
667 gnts[i].dev_bus_addr = xnb->ring_configs[i].bus_addr; in xnb_disconnect()
668 gnts[i].handle = xnb->ring_configs[i].handle; in xnb_disconnect()
679 bzero(&xnb->ring_configs[XNB_RING_TYPE_TX], in xnb_disconnect()
681 bzero(&xnb->ring_configs[XNB_RING_TYPE_RX], in xnb_disconnect()
684 xnb->flags &= ~XNBF_RING_CONNECTED; in xnb_disconnect()
685 mtx_unlock(&xnb->sc_lock); in xnb_disconnect()
694 * \param xnb Per-instance xnb configuration structure
702 struct xnb_ring_config *ring = &xnb->ring_configs[ring_type]; in xnb_connect_ring()
705 /* TX ring type = 0, RX =1 */ in xnb_connect_ring()
706 ring->va = xnb->kva + ring_type * PAGE_SIZE; in xnb_connect_ring()
707 ring->gnt_addr = xnb->gnt_base_addr + ring_type * PAGE_SIZE; in xnb_connect_ring()
709 gnt.host_addr = ring->gnt_addr; in xnb_connect_ring()
711 gnt.ref = ring->ring_ref; in xnb_connect_ring()
712 gnt.dom = xnb->otherend_id; in xnb_connect_ring()
719 ring->va = 0; in xnb_connect_ring()
721 xenbus_dev_fatal(xnb->dev, error, in xnb_connect_ring()
725 ring->handle = gnt.handle; in xnb_connect_ring()
726 ring->bus_addr = gnt.dev_bus_addr; in xnb_connect_ring()
729 BACK_RING_INIT(&ring->back_ring.tx_ring, in xnb_connect_ring()
730 (netif_tx_sring_t*)ring->va, in xnb_connect_ring()
731 ring->ring_pages * PAGE_SIZE); in xnb_connect_ring()
733 BACK_RING_INIT(&ring->back_ring.rx_ring, in xnb_connect_ring()
734 (netif_rx_sring_t*)ring->va, in xnb_connect_ring()
735 ring->ring_pages * PAGE_SIZE); in xnb_connect_ring()
737 xenbus_dev_fatal(xnb->dev, error, in xnb_connect_ring()
749 * \param xnb Per-instance xnb configuration structure.
757 if ((xnb->flags & XNBF_RING_CONNECTED) != 0) in xnb_connect_comms()
770 xnb->flags |= XNBF_RING_CONNECTED; in xnb_connect_comms()
772 error = xen_intr_bind_remote_port(xnb->dev, in xnb_connect_comms()
773 xnb->otherend_id, in xnb_connect_comms()
774 xnb->evtchn, in xnb_connect_comms()
778 &xnb->xen_intr_handle); in xnb_connect_comms()
781 xenbus_dev_fatal(xnb->dev, error, "binding event channel"); in xnb_connect_comms()
791 * Size KVA and pseudo-physical address allocations based on negotiated
795 * \param xnb Per-instance xnb configuration structure.
798 * front-end's domain into our own.
805 xnb->kva_size = 0; in xnb_alloc_communication_mem()
807 xnb->kva_size += xnb->ring_configs[i].ring_pages * PAGE_SIZE; in xnb_alloc_communication_mem()
813 * pages ("real memory") during the lifetime of front-end requests in xnb_alloc_communication_mem()
814 * via grant table operations. We will map the netif tx and rx rings in xnb_alloc_communication_mem()
817 xnb->pseudo_phys_res_id = 0; in xnb_alloc_communication_mem()
818 xnb->pseudo_phys_res = xenmem_alloc(xnb->dev, &xnb->pseudo_phys_res_id, in xnb_alloc_communication_mem()
819 xnb->kva_size); in xnb_alloc_communication_mem()
820 if (xnb->pseudo_phys_res == NULL) { in xnb_alloc_communication_mem()
821 xnb->kva = 0; in xnb_alloc_communication_mem()
824 xnb->kva = (vm_offset_t)rman_get_virtual(xnb->pseudo_phys_res); in xnb_alloc_communication_mem()
825 xnb->gnt_base_addr = rman_get_start(xnb->pseudo_phys_res); in xnb_alloc_communication_mem()
832 * \param xnb Per-instance xnb configuration structure.
840 * "feature-rx-notify" in xnb_collect_xenstore_info()
848 otherend_path = xenbus_get_otherend_path(xnb->dev); in xnb_collect_xenstore_info()
849 our_path = xenbus_get_node(xnb->dev); in xnb_collect_xenstore_info()
853 "tx-ring-ref", "%l" PRIu32, in xnb_collect_xenstore_info()
854 &xnb->ring_configs[XNB_RING_TYPE_TX].ring_ref, in xnb_collect_xenstore_info()
855 "rx-ring-ref", "%l" PRIu32, in xnb_collect_xenstore_info()
856 &xnb->ring_configs[XNB_RING_TYPE_RX].ring_ref, in xnb_collect_xenstore_info()
857 "event-channel", "%" PRIu32, &xnb->evtchn, in xnb_collect_xenstore_info()
860 xenbus_dev_fatal(xnb->dev, err, in xnb_collect_xenstore_info()
868 err = xs_scanf(XST_NIL, our_path, "handle", NULL, "%li", &xnb->handle); in xnb_collect_xenstore_info()
870 xenbus_dev_fatal(xnb->dev, err, in xnb_collect_xenstore_info()
880 (void**)&xnb->bridge); in xnb_collect_xenstore_info()
882 xnb->bridge = NULL; in xnb_collect_xenstore_info()
885 * Does the frontend request that we use rx copy? If not, return an in xnb_collect_xenstore_info()
886 * error because this driver only supports rx copy. in xnb_collect_xenstore_info()
888 err = xs_scanf(XST_NIL, otherend_path, "request-rx-copy", NULL, in xnb_collect_xenstore_info()
895 xenbus_dev_fatal(xnb->dev, err, "reading %s/request-rx-copy", in xnb_collect_xenstore_info()
907 /** \todo Collect the rx notify feature */ in xnb_collect_xenstore_info()
909 /* Collect the feature-sg. */ in xnb_collect_xenstore_info()
910 if (xs_scanf(XST_NIL, otherend_path, "feature-sg", NULL, in xnb_collect_xenstore_info()
911 "%hhu", &xnb->can_sg) < 0) in xnb_collect_xenstore_info()
912 xnb->can_sg = 0; in xnb_collect_xenstore_info()
915 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4", NULL, in xnb_collect_xenstore_info()
916 "%hhu", &xnb->gso) < 0) in xnb_collect_xenstore_info()
917 xnb->gso = 0; in xnb_collect_xenstore_info()
919 if (xs_scanf(XST_NIL, otherend_path, "feature-gso-tcpv4-prefix", NULL, in xnb_collect_xenstore_info()
920 "%hhu", &xnb->gso_prefix) < 0) in xnb_collect_xenstore_info()
921 xnb->gso_prefix = 0; in xnb_collect_xenstore_info()
923 if (xs_scanf(XST_NIL, otherend_path, "feature-no-csum-offload", NULL, in xnb_collect_xenstore_info()
926 xnb->ip_csum = (no_csum_offload == 0); in xnb_collect_xenstore_info()
935 * \param xnb Per-instance xnb configuration structure.
944 our_path = xenbus_get_node(xnb->dev); in xnb_publish_backend_info()
949 xenbus_dev_fatal(xnb->dev, error, in xnb_publish_backend_info()
955 error = xs_printf(xst, our_path, "feature-sg", in xnb_publish_backend_info()
960 error = xs_printf(xst, our_path, "feature-gso-tcpv4", in xnb_publish_backend_info()
965 error = xs_printf(xst, our_path, "feature-rx-copy", in xnb_publish_backend_info()
970 error = xs_printf(xst, our_path, "feature-rx-flip", in xnb_publish_backend_info()
977 xenbus_dev_fatal(xnb->dev, error, "ending transaction"); in xnb_publish_backend_info()
990 * \param xnb Per-instance xnb configuration structure.
997 if (xenbus_get_state(xnb->dev) == XenbusStateConnected) in xnb_connect()
1003 xnb->flags &= ~XNBF_SHUTDOWN; in xnb_connect()
1007 /* Allocate resources whose size depends on front-end configuration. */ in xnb_connect()
1010 xenbus_dev_fatal(xnb->dev, error, in xnb_connect()
1023 xnb->carrier = 1; in xnb_connect()
1026 xenbus_set_state(xnb->dev, XenbusStateConnected); in xnb_connect()
1029 /*-------------------------- Device Teardown Support -------------------------*/
1033 * \param xnb Per-instance xnb configuration structure.
1036 * to drain, disconnect from the front-end, and notify any waiters (e.g.
1049 if ((xnb->flags & XNBF_IN_SHUTDOWN) != 0) in xnb_shutdown()
1052 xnb->flags |= XNBF_SHUTDOWN; in xnb_shutdown()
1054 xnb->flags |= XNBF_IN_SHUTDOWN; in xnb_shutdown()
1056 mtx_unlock(&xnb->sc_lock); in xnb_shutdown()
1058 xnb->carrier = 0; in xnb_shutdown()
1059 if (xnb->xnb_ifp != NULL) { in xnb_shutdown()
1060 ether_ifdetach(xnb->xnb_ifp); in xnb_shutdown()
1061 if_free(xnb->xnb_ifp); in xnb_shutdown()
1062 xnb->xnb_ifp = NULL; in xnb_shutdown()
1067 if (xenbus_get_state(xnb->dev) < XenbusStateClosing) in xnb_shutdown()
1068 xenbus_set_state(xnb->dev, XenbusStateClosing); in xnb_shutdown()
1069 mtx_lock(&xnb->sc_lock); in xnb_shutdown()
1071 xnb->flags &= ~XNBF_IN_SHUTDOWN; in xnb_shutdown()
1083 * \param xnb Per-instance xnb configuration structure.
1095 xs_vprintf(XST_NIL, xenbus_get_node(xnb->dev), in xnb_attach_failed()
1096 "hotplug-error", fmt, ap_hotplug); in xnb_attach_failed()
1098 (void)xs_printf(XST_NIL, xenbus_get_node(xnb->dev), in xnb_attach_failed()
1099 "hotplug-status", "error"); in xnb_attach_failed()
1101 xenbus_dev_vfatal(xnb->dev, err, fmt, ap); in xnb_attach_failed()
1104 (void)xs_printf(XST_NIL, xenbus_get_node(xnb->dev), "online", "0"); in xnb_attach_failed()
1105 xnb_detach(xnb->dev); in xnb_attach_failed()
1108 /*---------------------------- NewBus Entrypoints ----------------------------*/
1141 sysctl_ctx = device_get_sysctl_ctx(xnb->dev); in xnb_setup_sysctl()
1145 sysctl_tree = device_get_sysctl_tree(xnb->dev); in xnb_setup_sysctl()
1187 mtx_init(&xnb->sc_lock, "xnb_softc", "xen netback softc lock", MTX_DEF); in create_netdev()
1188 mtx_init(&xnb->tx_lock, "xnb_tx", "xen netback tx lock", MTX_DEF); in create_netdev()
1189 mtx_init(&xnb->rx_lock, "xnb_rx", "xen netback rx lock", MTX_DEF); in create_netdev()
1191 xnb->dev = dev; in create_netdev()
1193 ifmedia_init(&xnb->sc_media, 0, xnb_ifmedia_upd, xnb_ifmedia_sts); in create_netdev()
1194 ifmedia_add(&xnb->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); in create_netdev()
1195 ifmedia_set(&xnb->sc_media, IFM_ETHER|IFM_MANUAL); in create_netdev()
1199 * if the MAC address of the host-facing interface is set in create_netdev()
1200 * to the same as the guest-facing one (the value found in in create_netdev()
1207 bzero(&xnb->mac[0], sizeof(xnb->mac)); in create_netdev()
1215 err = xs_scanf(XST_NIL, xenbus_get_node(xnb->dev), "handle", NULL, in create_netdev()
1219 snprintf(xnb->if_name, IFNAMSIZ, "xnb%" PRIu16 ".%" PRIu32, in create_netdev()
1224 ifp = xnb->xnb_ifp = if_alloc(IFT_ETHER); in create_netdev()
1226 if_initname(ifp, xnb->if_name, IF_DUNIT_NONE); in create_netdev()
1232 if_setsendqlen(ifp, NET_RX_RING_SIZE - 1); in create_netdev()
1238 ether_ifattach(ifp, xnb->mac); in create_netdev()
1239 xnb->carrier = 0; in create_netdev()
1273 xnb->otherend_id = xenbus_get_otherend_id(dev); in xnb_attach()
1275 xnb->ring_configs[i].ring_pages = 1; in xnb_attach()
1283 /* Update hot-plug status to satisfy xend. */ in xnb_attach()
1284 error = xs_printf(XST_NIL, xenbus_get_node(xnb->dev), in xnb_attach()
1285 "hotplug-status", "connected"); in xnb_attach()
1287 xnb_attach_failed(xnb, error, "writing %s/hotplug-status", in xnb_attach()
1288 xenbus_get_node(xnb->dev)); in xnb_attach()
1295 * in this connection, and waiting for a front-end state in xnb_attach()
1300 xenbus_get_node(xnb->dev)); in xnb_attach()
1317 * \note A net back device may be detached at any time in its life-cycle,
1331 mtx_lock(&xnb->sc_lock); in xnb_detach()
1333 msleep(xnb, &xnb->sc_lock, /*wakeup prio unchanged*/0, in xnb_detach()
1336 mtx_unlock(&xnb->sc_lock); in xnb_detach()
1339 mtx_destroy(&xnb->tx_lock); in xnb_detach()
1340 mtx_destroy(&xnb->rx_lock); in xnb_detach()
1341 mtx_destroy(&xnb->sc_lock); in xnb_detach()
1372 * Handle state changes expressed via the XenStore by our front-end peer.
1376 * \param frontend_state The new state of the front-end.
1389 xenbus_strstate(xenbus_get_state(xnb->dev))); in xnb_frontend_changed()
1400 mtx_lock(&xnb->sc_lock); in xnb_frontend_changed()
1402 mtx_unlock(&xnb->sc_lock); in xnb_frontend_changed()
1404 xenbus_set_state(xnb->dev, XenbusStateClosed); in xnb_frontend_changed()
1407 xenbus_dev_fatal(xnb->dev, EINVAL, "saw state %d at frontend", in xnb_frontend_changed()
1413 /*---------------------------- Request Processing ----------------------------*/
1420 * binding - the xnb_softc for this instance.
1431 ifp = xnb->xnb_ifp; in xnb_intr()
1432 txb = &xnb->ring_configs[XNB_RING_TYPE_TX].back_ring.tx_ring; in xnb_intr()
1434 mtx_lock(&xnb->tx_lock); in xnb_intr()
1437 req_prod_local = txb->sring->req_prod; in xnb_intr()
1444 err = xnb_recv(txb, xnb->otherend_id, &mbufc, ifp, in xnb_intr()
1445 xnb->tx_gnttab); in xnb_intr()
1450 if_input(xnb->xnb_ifp, mbufc); in xnb_intr()
1455 xen_intr_signal(xnb->xen_intr_handle); in xnb_intr()
1457 txb->sring->req_event = txb->req_cons + 1; in xnb_intr()
1459 } while (txb->sring->req_prod != req_prod_local) ; in xnb_intr()
1460 mtx_unlock(&xnb->tx_lock); in xnb_intr()
1468 * \param[out] pkt The returned packet. If there is an error building
1469 * the packet, pkt.list_len will be set to 0.
1475 xnb_ring2pkt(struct xnb_pkt *pkt, const netif_tx_back_ring_t *tx_ring, in xnb_ring2pkt() argument
1480 * 1) Initialize pkt in xnb_ring2pkt()
1485 * 6) Finalize pkt (stuff like car_size and list_len) in xnb_ring2pkt()
1492 xnb_pkt_initialize(pkt); in xnb_ring2pkt()
1497 pkt->size = tx->size; in xnb_ring2pkt()
1498 pkt->flags = tx->flags & ~NETTXF_more_data; in xnb_ring2pkt()
1499 more_data = tx->flags & NETTXF_more_data; in xnb_ring2pkt()
1500 pkt->list_len++; in xnb_ring2pkt()
1501 pkt->car = idx; in xnb_ring2pkt()
1506 if ((pkt->flags & NETTXF_extra_info) && in xnb_ring2pkt()
1510 pkt->extra.type = ext->type; in xnb_ring2pkt()
1511 switch (pkt->extra.type) { in xnb_ring2pkt()
1513 pkt->extra.u.gso = ext->u.gso; in xnb_ring2pkt()
1524 __func__, __LINE__, pkt->extra.type); in xnb_ring2pkt()
1533 pkt->extra.flags = ext->flags; in xnb_ring2pkt()
1534 if (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE) { in xnb_ring2pkt()
1552 pkt->cdr = idx; in xnb_ring2pkt()
1557 pkt->list_len++; in xnb_ring2pkt()
1558 cdr_size += tx->size; in xnb_ring2pkt()
1559 if (tx->flags & ~NETTXF_more_data) { in xnb_ring2pkt()
1563 __func__, __LINE__, tx->flags); in xnb_ring2pkt()
1568 more_data = tx->flags & NETTXF_more_data; in xnb_ring2pkt()
1575 xnb_pkt_invalidate(pkt); in xnb_ring2pkt()
1579 pkt->car_size = pkt->size - cdr_size; in xnb_ring2pkt()
1582 xnb_pkt_invalidate(pkt); in xnb_ring2pkt()
1585 return idx - start; in xnb_ring2pkt()
1589 * Respond to all the requests that constituted pkt. Builds the responses and
1591 * \param[in] pkt the packet that needs a response
1597 xnb_txpkt2rsp(const struct xnb_pkt *pkt, netif_tx_back_ring_t *ring, in xnb_txpkt2rsp() argument
1613 status = (xnb_pkt_is_valid(pkt) == 0) || error ? in xnb_txpkt2rsp()
1615 KASSERT((pkt->list_len == 0) || (ring->rsp_prod_pvt == pkt->car), in xnb_txpkt2rsp()
1618 if (pkt->list_len >= 1) { in xnb_txpkt2rsp()
1620 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); in xnb_txpkt2rsp()
1621 id = tx->id; in xnb_txpkt2rsp()
1622 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); in xnb_txpkt2rsp()
1623 rsp->id = id; in xnb_txpkt2rsp()
1624 rsp->status = status; in xnb_txpkt2rsp()
1625 ring->rsp_prod_pvt++; in xnb_txpkt2rsp()
1627 if (pkt->flags & NETRXF_extra_info) { in xnb_txpkt2rsp()
1628 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); in xnb_txpkt2rsp()
1629 rsp->status = NETIF_RSP_NULL; in xnb_txpkt2rsp()
1630 ring->rsp_prod_pvt++; in xnb_txpkt2rsp()
1634 for (i=0; i < pkt->list_len - 1; i++) { in xnb_txpkt2rsp()
1636 tx = RING_GET_REQUEST(ring, ring->rsp_prod_pvt); in xnb_txpkt2rsp()
1637 id = tx->id; in xnb_txpkt2rsp()
1638 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); in xnb_txpkt2rsp()
1639 rsp->id = id; in xnb_txpkt2rsp()
1640 rsp->status = status; in xnb_txpkt2rsp()
1641 ring->rsp_prod_pvt++; in xnb_txpkt2rsp()
1649 * \param[in] pkt A packet to model the mbuf chain after
1654 xnb_pkt2mbufc(const struct xnb_pkt *pkt, if_t ifp) in xnb_pkt2mbufc() argument
1663 m = m_getm(NULL, pkt->size, M_NOWAIT, MT_DATA); in xnb_pkt2mbufc()
1666 m->m_pkthdr.rcvif = ifp; in xnb_pkt2mbufc()
1667 if (pkt->flags & NETTXF_data_validated) { in xnb_pkt2mbufc()
1673 m->m_pkthdr.csum_flags = ( in xnb_pkt2mbufc()
1679 m->m_pkthdr.csum_data = 0xffff; in xnb_pkt2mbufc()
1686 * Build a gnttab_copy table that can be used to copy data from a pkt
1689 * \param[in] pkt pkt's associated requests form the src for
1698 xnb_txpkt2gnttab(const struct xnb_pkt *pkt, struct mbuf *mbufc, in xnb_txpkt2gnttab() argument
1705 RING_IDX r_idx = pkt->car; /* index into tx ring buffer */ in xnb_txpkt2gnttab()
1709 uint16_t size_remaining = pkt->size; in xnb_txpkt2gnttab()
1713 const size_t mbuf_space = M_TRAILINGSPACE(mbuf) - m_ofs; in xnb_txpkt2gnttab()
1715 r_idx == pkt->car ? pkt->car_size : txq->size; in xnb_txpkt2gnttab()
1716 const size_t pkt_space = req_size - r_ofs; in xnb_txpkt2gnttab()
1726 gnttab[gnt_idx].source.u.ref = txq->gref; in xnb_txpkt2gnttab()
1728 gnttab[gnt_idx].source.offset = txq->offset + r_ofs; in xnb_txpkt2gnttab()
1740 size_remaining -= space; in xnb_txpkt2gnttab()
1741 if (req_size - r_ofs <= 0) { in xnb_txpkt2gnttab()
1744 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; in xnb_txpkt2gnttab()
1746 if (M_TRAILINGSPACE(mbuf) - m_ofs <= 0) { in xnb_txpkt2gnttab()
1749 mbuf = mbuf->m_next; in xnb_txpkt2gnttab()
1758 * non-data fields to reflect the data present.
1777 mbuf->m_len += gnttab[i].len; in xnb_update_mbufc()
1780 mbuf = mbuf->m_next; in xnb_update_mbufc()
1783 mbufc->m_pkthdr.len = total_size; in xnb_update_mbufc()
1808 struct xnb_pkt pkt; in xnb_recv() local
1814 num_consumed = xnb_ring2pkt(&pkt, txb, txb->req_cons); in xnb_recv()
1825 if (xnb_pkt_is_valid(&pkt) == 0) { in xnb_recv()
1827 xnb_txpkt2rsp(&pkt, txb, 1); in xnb_recv()
1828 txb->req_cons += num_consumed; in xnb_recv()
1835 *mbufc = xnb_pkt2mbufc(&pkt, ifnet); in xnb_recv()
1842 xnb_txpkt2rsp(&pkt, txb, 1); in xnb_recv()
1849 nr_ents = xnb_txpkt2gnttab(&pkt, *mbufc, gnttab, txb, otherend); in xnb_recv()
1859 xnb_txpkt2rsp(&pkt, txb, 0); in xnb_recv()
1860 txb->req_cons += num_consumed; in xnb_recv()
1867 * \param[out] pkt Storage for the newly generated xnb_pkt
1868 * \param[in] start The ring index of the first available slot in the rx
1870 * \param[in] space The number of free slots in the rx ring
1872 * \retval EINVAL mbufc was corrupt or not convertible into a pkt
1873 * \retval EAGAIN There was not enough space in the ring to queue the
1877 xnb_mbufc2pkt(const struct mbuf *mbufc, struct xnb_pkt *pkt, in xnb_mbufc2pkt() argument
1884 ( (mbufc->m_flags & M_PKTHDR) == 0) || in xnb_mbufc2pkt()
1885 (mbufc->m_pkthdr.len == 0)) { in xnb_mbufc2pkt()
1886 xnb_pkt_invalidate(pkt); in xnb_mbufc2pkt()
1891 xnb_pkt_validate(pkt); in xnb_mbufc2pkt()
1892 pkt->flags = 0; in xnb_mbufc2pkt()
1893 pkt->size = mbufc->m_pkthdr.len; in xnb_mbufc2pkt()
1894 pkt->car = start; in xnb_mbufc2pkt()
1895 pkt->car_size = mbufc->m_len; in xnb_mbufc2pkt()
1897 if (mbufc->m_pkthdr.csum_flags & CSUM_TSO) { in xnb_mbufc2pkt()
1898 pkt->flags |= NETRXF_extra_info; in xnb_mbufc2pkt()
1899 pkt->extra.u.gso.size = mbufc->m_pkthdr.tso_segsz; in xnb_mbufc2pkt()
1900 pkt->extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; in xnb_mbufc2pkt()
1901 pkt->extra.u.gso.pad = 0; in xnb_mbufc2pkt()
1902 pkt->extra.u.gso.features = 0; in xnb_mbufc2pkt()
1903 pkt->extra.type = XEN_NETIF_EXTRA_TYPE_GSO; in xnb_mbufc2pkt()
1904 pkt->extra.flags = 0; in xnb_mbufc2pkt()
1905 pkt->cdr = start + 2; in xnb_mbufc2pkt()
1907 pkt->cdr = start + 1; in xnb_mbufc2pkt()
1909 if (mbufc->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA)) { in xnb_mbufc2pkt()
1910 pkt->flags |= in xnb_mbufc2pkt()
1920 pkt->list_len = howmany(pkt->size, PAGE_SIZE); in xnb_mbufc2pkt()
1922 if (pkt->list_len > 1) { in xnb_mbufc2pkt()
1923 pkt->flags |= NETRXF_more_data; in xnb_mbufc2pkt()
1926 slots_required = pkt->list_len + in xnb_mbufc2pkt()
1927 (pkt->flags & NETRXF_extra_info ? 1 : 0); in xnb_mbufc2pkt()
1929 xnb_pkt_invalidate(pkt); in xnb_mbufc2pkt()
1941 * \param[in] pkt pkt's associated responses form the dest for the copy
1950 xnb_rxpkt2gnttab(const struct xnb_pkt *pkt, const struct mbuf *mbufc, in xnb_rxpkt2gnttab() argument
1957 RING_IDX r_idx = pkt->car; /* index into rx ring buffer */ in xnb_rxpkt2gnttab()
1958 int r_ofs = 0; /* offset of next data within rx request's data area */ in xnb_rxpkt2gnttab()
1963 size_remaining = (xnb_pkt_is_valid(pkt) != 0) ? pkt->size : 0; in xnb_rxpkt2gnttab()
1967 const size_t mbuf_space = mbuf->m_len - m_ofs; in xnb_rxpkt2gnttab()
1970 const size_t pkt_space = req_size - r_ofs; in xnb_rxpkt2gnttab()
1980 gnttab[gnt_idx].dest.u.ref = rxq->gref; in xnb_rxpkt2gnttab()
1995 size_remaining -= space; in xnb_rxpkt2gnttab()
1996 if (req_size - r_ofs <= 0) { in xnb_rxpkt2gnttab()
1997 /* Must move to the next rx request */ in xnb_rxpkt2gnttab()
1999 r_idx = (r_idx == pkt->car) ? pkt->cdr : r_idx + 1; in xnb_rxpkt2gnttab()
2001 if (mbuf->m_len - m_ofs <= 0) { in xnb_rxpkt2gnttab()
2004 mbuf = mbuf->m_next; in xnb_rxpkt2gnttab()
2012 * Generates responses for all the requests that constituted pkt. Builds
2015 * \param[in] pkt the packet that needs a response
2017 * Used to determine how many rsp->netif_rx_response_t's to
2021 * \return The number of RX requests that were consumed to generate
2025 xnb_rxpkt2rsp(const struct xnb_pkt *pkt, const gnttab_copy_table gnttab, in xnb_rxpkt2rsp() argument
2071 id = RING_GET_REQUEST(ring, ring->rsp_prod_pvt)->id; in xnb_rxpkt2rsp()
2072 rsp = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt); in xnb_rxpkt2rsp()
2073 rsp->id = id; in xnb_rxpkt2rsp()
2074 rsp->status = NETIF_RSP_ERROR; in xnb_rxpkt2rsp()
2078 const int has_extra = pkt->flags & NETRXF_extra_info; in xnb_rxpkt2rsp()
2086 r_idx = ring->rsp_prod_pvt + i; in xnb_rxpkt2rsp()
2096 ext->type = XEN_NETIF_EXTRA_TYPE_GSO; in xnb_rxpkt2rsp()
2097 ext->flags = 0; in xnb_rxpkt2rsp()
2098 ext->u.gso.size = pkt->extra.u.gso.size; in xnb_rxpkt2rsp()
2099 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; in xnb_rxpkt2rsp()
2100 ext->u.gso.pad = 0; in xnb_rxpkt2rsp()
2101 ext->u.gso.features = 0; in xnb_rxpkt2rsp()
2103 rsp->id = rxq.id; in xnb_rxpkt2rsp()
2104 rsp->status = GNTST_okay; in xnb_rxpkt2rsp()
2105 rsp->offset = 0; in xnb_rxpkt2rsp()
2106 rsp->flags = 0; in xnb_rxpkt2rsp()
2107 if (i < pkt->list_len - 1) in xnb_rxpkt2rsp()
2108 rsp->flags |= NETRXF_more_data; in xnb_rxpkt2rsp()
2110 rsp->flags |= NETRXF_extra_info; in xnb_rxpkt2rsp()
2112 (pkt->flags & NETRXF_data_validated)) { in xnb_rxpkt2rsp()
2113 rsp->flags |= NETRXF_data_validated; in xnb_rxpkt2rsp()
2114 rsp->flags |= NETRXF_csum_blank; in xnb_rxpkt2rsp()
2116 rsp->status = 0; in xnb_rxpkt2rsp()
2119 rsp->status += gnttab[gnt_idx].len; in xnb_rxpkt2rsp()
2125 ring->req_cons += n_responses; in xnb_rxpkt2rsp()
2126 ring->rsp_prod_pvt += n_responses; in xnb_rxpkt2rsp()
2146 ether_type = ntohs(eh->ether_type); in xnb_add_mbuf_cksum()
2153 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { in xnb_add_mbuf_cksum()
2154 iph->ip_sum = 0; in xnb_add_mbuf_cksum()
2155 iph->ip_sum = in_cksum_hdr(iph); in xnb_add_mbuf_cksum()
2158 switch (iph->ip_p) { in xnb_add_mbuf_cksum()
2160 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { in xnb_add_mbuf_cksum()
2161 size_t tcplen = ntohs(iph->ip_len) - sizeof(struct ip); in xnb_add_mbuf_cksum()
2163 th->th_sum = in_pseudo(iph->ip_src.s_addr, in xnb_add_mbuf_cksum()
2164 iph->ip_dst.s_addr, htons(IPPROTO_TCP + tcplen)); in xnb_add_mbuf_cksum()
2165 th->th_sum = in_cksum_skip(mbufc, in xnb_add_mbuf_cksum()
2166 sizeof(struct ether_header) + ntohs(iph->ip_len), in xnb_add_mbuf_cksum()
2167 sizeof(struct ether_header) + (iph->ip_hl << 2)); in xnb_add_mbuf_cksum()
2171 if (mbufc->m_pkthdr.csum_flags & CSUM_IP_VALID) { in xnb_add_mbuf_cksum()
2172 size_t udplen = ntohs(iph->ip_len) - sizeof(struct ip); in xnb_add_mbuf_cksum()
2174 uh->uh_sum = in_pseudo(iph->ip_src.s_addr, in xnb_add_mbuf_cksum()
2175 iph->ip_dst.s_addr, htons(IPPROTO_UDP + udplen)); in xnb_add_mbuf_cksum()
2176 uh->uh_sum = in_cksum_skip(mbufc, in xnb_add_mbuf_cksum()
2177 sizeof(struct ether_header) + ntohs(iph->ip_len), in xnb_add_mbuf_cksum()
2178 sizeof(struct ether_header) + (iph->ip_hl << 2)); in xnb_add_mbuf_cksum()
2192 mtx_assert(&xnb->sc_lock, MA_OWNED); in xnb_stop()
2193 ifp = xnb->xnb_ifp; in xnb_stop()
2210 mtx_lock(&xnb->sc_lock); in xnb_ioctl()
2222 mtx_unlock(&xnb->sc_lock); in xnb_ioctl()
2226 mtx_lock(&xnb->sc_lock); in xnb_ioctl()
2227 if (ifa->ifa_addr->sa_family == AF_INET) { in xnb_ioctl()
2240 mtx_unlock(&xnb->sc_lock); in xnb_ioctl()
2242 mtx_unlock(&xnb->sc_lock); in xnb_ioctl()
2250 mtx_lock(&xnb->sc_lock); in xnb_ioctl()
2251 if (ifr->ifr_reqcap & IFCAP_TXCSUM) { in xnb_ioctl()
2258 if ((ifr->ifr_reqcap & IFCAP_RXCSUM)) { in xnb_ioctl()
2268 if (ifr->if_reqcap |= IFCAP_TSO4) { in xnb_ioctl()
2282 if (ifr->ifreqcap |= IFCAP_LRO) { in xnb_ioctl()
2288 mtx_unlock(&xnb->sc_lock); in xnb_ioctl()
2291 if_setmtu(ifp, ifr->ifr_mtu); in xnb_ioctl()
2300 error = ifmedia_ioctl(ifp, ifr, &xnb->sc_media, cmd); in xnb_ioctl()
2318 rxb = &xnb->ring_configs[XNB_RING_TYPE_RX].back_ring.rx_ring; in xnb_start_locked()
2320 if (!xnb->carrier) in xnb_start_locked()
2326 req_prod_local = rxb->sring->req_prod; in xnb_start_locked()
2334 error = xnb_send(rxb, xnb->otherend_id, mbufc, in xnb_start_locked()
2335 xnb->rx_gnttab); in xnb_start_locked()
2340 * Requeue pkt and send when space is in xnb_start_locked()
2370 xen_intr_signal(xnb->xen_intr_handle); in xnb_start_locked()
2371 rxb->sring->req_event = req_prod_local + 1; in xnb_start_locked()
2373 } while (rxb->sring->req_prod != req_prod_local) ; in xnb_start_locked()
2387 * \retval EINVAL mbufc was corrupt or not convertible into a pkt
2393 struct xnb_pkt pkt; in xnb_send() local
2397 space = ring->sring->req_prod - ring->req_cons; in xnb_send()
2398 error = xnb_mbufc2pkt(mbufc, &pkt, ring->rsp_prod_pvt, space); in xnb_send()
2401 n_entries = xnb_rxpkt2gnttab(&pkt, mbufc, gnttab, ring, otherend); in xnb_send()
2409 xnb_rxpkt2rsp(&pkt, gnttab, n_entries, ring); in xnb_send()
2420 mtx_lock(&xnb->rx_lock); in xnb_start()
2422 mtx_unlock(&xnb->rx_lock); in xnb_start()
2431 ifp = xnb->xnb_ifp; in xnb_ifinit_locked()
2433 mtx_assert(&xnb->sc_lock, MA_OWNED); in xnb_ifinit_locked()
2450 mtx_lock(&xnb->sc_lock); in xnb_ifinit()
2452 mtx_unlock(&xnb->sc_lock); in xnb_ifinit()
2472 ifmr->ifm_status = IFM_AVALID|IFM_ACTIVE; in xnb_ifmedia_sts()
2473 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; in xnb_ifmedia_sts()
2476 /*---------------------------- NewBus Registration ---------------------------*/
2500 /*-------------------------- Unit Tests -------------------------------------*/