xref: /netbsd-src/sys/arch/xen/xen/if_xennet_xenbus.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*      $NetBSD: if_xennet_xenbus.c,v 1.30 2008/11/13 18:44:51 cegger Exp $      */
2 
3 /*
4  * Copyright (c) 2006 Manuel Bouyer.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Manuel Bouyer.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 /*
34  * Copyright (c) 2004 Christian Limpach.
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. All advertising materials mentioning features or use of this software
46  *    must display the following acknowledgement:
47  *      This product includes software developed by Christian Limpach.
48  * 4. The name of the author may not be used to endorse or promote products
49  *    derived from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
55  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  */
62 
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.30 2008/11/13 18:44:51 cegger Exp $");
65 
66 #include "opt_xen.h"
67 #include "opt_nfs_boot.h"
68 #include "rnd.h"
69 #include "bpfilter.h"
70 
71 #include <sys/param.h>
72 #include <sys/device.h>
73 #include <sys/conf.h>
74 #include <sys/kernel.h>
75 #include <sys/proc.h>
76 #include <sys/systm.h>
77 #include <sys/intr.h>
78 #if NRND > 0
79 #include <sys/rnd.h>
80 #endif
81 
82 #include <net/if.h>
83 #include <net/if_dl.h>
84 #include <net/if_ether.h>
85 #if NBPFILTER > 0
86 #include <net/bpf.h>
87 #include <net/bpfdesc.h>
88 #endif
89 
90 #if defined(NFS_BOOT_BOOTSTATIC)
91 #include <sys/fstypes.h>
92 #include <sys/mount.h>
93 #include <sys/statvfs.h>
94 #include <netinet/in.h>
95 #include <nfs/rpcv2.h>
96 #include <nfs/nfsproto.h>
97 #include <nfs/nfs.h>
98 #include <nfs/nfsmount.h>
99 #include <nfs/nfsdiskless.h>
100 #include <xen/if_xennetvar.h>
101 #endif /* defined(NFS_BOOT_BOOTSTATIC) */
102 
103 #include <xen/xennet_checksum.h>
104 
105 #include <uvm/uvm.h>
106 
107 #include <xen/hypervisor.h>
108 #include <xen/evtchn.h>
109 #include <xen/granttables.h>
110 #include <xen/xen3-public/io/netif.h>
111 #include <xen/xenpmap.h>
112 
113 #include <xen/xenbus.h>
114 #include "locators.h"
115 
116 #undef XENNET_DEBUG_DUMP
117 #undef XENNET_DEBUG
118 #ifdef XENNET_DEBUG
119 #define XEDB_FOLLOW     0x01
120 #define XEDB_INIT       0x02
121 #define XEDB_EVENT      0x04
122 #define XEDB_MBUF       0x08
123 #define XEDB_MEM        0x10
124 int xennet_debug = 0xff;
125 #define DPRINTF(x) if (xennet_debug) printf x;
126 #define DPRINTFN(n,x) if (xennet_debug & (n)) printf x;
127 #else
128 #define DPRINTF(x)
129 #define DPRINTFN(n,x)
130 #endif
131 
132 #define GRANT_INVALID_REF -1 /* entry is free */
133 #define GRANT_STACK_REF   -2 /* entry owned by the network stack */
134 
135 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
136 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
137 
138 struct xennet_txreq {
139 	SLIST_ENTRY(xennet_txreq) txreq_next;
140 	uint16_t txreq_id; /* ID passed to backend */
141 	grant_ref_t txreq_gntref; /* grant ref of this request */
142 	struct mbuf *txreq_m; /* mbuf being transmitted */
143 };
144 
145 struct xennet_rxreq {
146 	SLIST_ENTRY(xennet_rxreq) rxreq_next;
147 	uint16_t rxreq_id; /* ID passed to backend */
148 	grant_ref_t rxreq_gntref; /* grant ref of this request */
149 /* va/pa for this receive buf. ma will be provided by backend */
150 	paddr_t rxreq_pa;
151 	vaddr_t rxreq_va;
152 	struct xennet_xenbus_softc *rxreq_sc; /* pointer to our interface */
153 };
154 
155 struct xennet_xenbus_softc {
156 	device_t sc_dev;
157 	struct ethercom sc_ethercom;
158 	uint8_t sc_enaddr[6];
159 	struct xenbus_device *sc_xbusd;
160 
161 	netif_tx_front_ring_t sc_tx_ring;
162 	netif_rx_front_ring_t sc_rx_ring;
163 
164 	unsigned int sc_evtchn;
165 	void *sc_softintr;
166 
167 	grant_ref_t sc_tx_ring_gntref;
168 	grant_ref_t sc_rx_ring_gntref;
169 
170 	struct xennet_txreq sc_txreqs[NET_TX_RING_SIZE];
171 	struct xennet_rxreq sc_rxreqs[NET_RX_RING_SIZE];
172 	SLIST_HEAD(,xennet_txreq) sc_txreq_head; /* list of free TX requests */
173 	SLIST_HEAD(,xennet_rxreq) sc_rxreq_head; /* list of free RX requests */
174 	int sc_free_rxreql; /* number of free receive request struct */
175 
176 	int sc_backend_status; /* our status with backend */
177 #define BEST_CLOSED		0
178 #define BEST_DISCONNECTED	1
179 #define BEST_CONNECTED		2
180 #define BEST_SUSPENDED		3
181 #if NRND > 0
182 	rndsource_element_t     sc_rnd_source;
183 #endif
184 };
185 #define SC_NLIVEREQ(sc) ((sc)->sc_rx_ring.req_prod_pvt - \
186 			    (sc)->sc_rx_ring.sring->rsp_prod)
187 
188 /* too big to be on stack */
189 static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
190 static u_long xennet_pages[NET_RX_RING_SIZE];
191 
192 static int  xennet_xenbus_match(device_t, cfdata_t, void *);
193 static void xennet_xenbus_attach(device_t, device_t, void *);
194 static int  xennet_xenbus_detach(device_t, int);
195 static void xennet_backend_changed(void *, XenbusState);
196 
197 static int  xennet_xenbus_resume(void *);
198 static void xennet_alloc_rx_buffer(struct xennet_xenbus_softc *);
199 static void xennet_free_rx_buffer(struct xennet_xenbus_softc *);
200 static void xennet_tx_complete(struct xennet_xenbus_softc *);
201 static void xennet_rx_mbuf_free(struct mbuf *, void *, size_t, void *);
202 static int  xennet_handler(void *);
203 #ifdef XENNET_DEBUG_DUMP
204 static void xennet_hex_dump(const unsigned char *, size_t, const char *, int);
205 #endif
206 
207 static int  xennet_init(struct ifnet *);
208 static void xennet_stop(struct ifnet *, int);
209 static void xennet_reset(struct xennet_xenbus_softc *);
210 static void xennet_softstart(void *);
211 static void xennet_start(struct ifnet *);
212 static int  xennet_ioctl(struct ifnet *, u_long, void *);
213 static void xennet_watchdog(struct ifnet *);
214 
215 CFATTACH_DECL_NEW(xennet_xenbus, sizeof(struct xennet_xenbus_softc),
216    xennet_xenbus_match, xennet_xenbus_attach, xennet_xenbus_detach, NULL);
217 
218 static int
219 xennet_xenbus_match(device_t parent, cfdata_t match, void *aux)
220 {
221 	struct xenbusdev_attach_args *xa = aux;
222 
223 	if (strcmp(xa->xa_type, "vif") != 0)
224 		return 0;
225 
226 	if (match->cf_loc[XENBUSCF_ID] != XENBUSCF_ID_DEFAULT &&
227 	    match->cf_loc[XENBUSCF_ID] != xa->xa_id)
228 		return 0;
229 
230 	return 1;
231 }
232 
233 static void
234 xennet_xenbus_attach(device_t parent, device_t self, void *aux)
235 {
236 	struct xennet_xenbus_softc *sc = device_private(self);
237 	struct xenbusdev_attach_args *xa = aux;
238 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
239 	int err;
240 	RING_IDX i;
241 	char *val, *e, *p;
242 	int s;
243 	extern int ifqmaxlen; /* XXX */
244 #ifdef XENNET_DEBUG
245 	char **dir;
246 	int dir_n = 0;
247 	char id_str[20];
248 #endif
249 
250 	aprint_normal(": Xen Virtual Network Interface\n");
251 	sc->sc_dev = self;
252 
253 #ifdef XENNET_DEBUG
254 	printf("path: %s\n", xa->xa_xbusd->xbusd_path);
255 	snprintf(id_str, sizeof(id_str), "%d", xa->xa_id);
256 	err = xenbus_directory(NULL, "device/vif", id_str, &dir_n, &dir);
257 	if (err) {
258 		aprint_error_dev(self, "xenbus_directory err %d\n", err);
259 	} else {
260 		printf("%s/\n", xa->xa_xbusd->xbusd_path);
261 		for (i = 0; i < dir_n; i++) {
262 			printf("\t/%s", dir[i]);
263 			err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, dir[i],
264 			    NULL, &val);
265 			if (err) {
266 				aprint_error_dev(self, "xenbus_read err %d\n", err);
267 			} else {
268 				printf(" = %s\n", val);
269 				free(val, M_DEVBUF);
270 			}
271 		}
272 	}
273 #endif /* XENNET_DEBUG */
274 	sc->sc_xbusd = xa->xa_xbusd;
275 	sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed;
276 
277 	/* initialize free RX and RX request lists */
278 	SLIST_INIT(&sc->sc_txreq_head);
279 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
280 		sc->sc_txreqs[i].txreq_id = i;
281 		SLIST_INSERT_HEAD(&sc->sc_txreq_head, &sc->sc_txreqs[i],
282 		    txreq_next);
283 	}
284 	SLIST_INIT(&sc->sc_rxreq_head);
285 	s = splvm();
286 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
287 		struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
288 		rxreq->rxreq_id = i;
289 		rxreq->rxreq_sc = sc;
290 		rxreq->rxreq_va = uvm_km_alloc(kernel_map,
291 		    PAGE_SIZE, PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_ZERO);
292 		if (rxreq->rxreq_va == 0)
293 			break;
294 		if (!pmap_extract(pmap_kernel(), rxreq->rxreq_va,
295 		    &rxreq->rxreq_pa))
296 			panic("%s: no pa for mapped va ?", device_xname(self));
297 		rxreq->rxreq_gntref = GRANT_INVALID_REF;
298 		SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next);
299 	}
300 	splx(s);
301 	sc->sc_free_rxreql = i;
302 	if (sc->sc_free_rxreql == 0) {
303 		aprint_error_dev(self, "failed to allocate rx memory\n");
304 		return;
305 	}
306 
307 	/* read mac address */
308 	err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, "mac", NULL, &val);
309 	if (err) {
310 		aprint_error_dev(self, "can't read mac address, err %d\n", err);
311 		return;
312 	}
313 	/* read mac address */
314 	for (i = 0, p = val; i < 6; i++) {
315 		sc->sc_enaddr[i] = strtoul(p, &e, 16);
316 		if ((e[0] == '\0' && i != 5) && e[0] != ':') {
317 			aprint_error_dev(self, "%s is not a valid mac address\n", val);
318 			free(val, M_DEVBUF);
319 			return;
320 		}
321 		p = &e[1];
322 	}
323 	free(val, M_DEVBUF);
324 	aprint_normal_dev(self, "MAC address %s\n",
325 	    ether_sprintf(sc->sc_enaddr));
326 	/* Initialize ifnet structure and attach interface */
327 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
328 	ifp->if_softc = sc;
329 	ifp->if_start = xennet_start;
330 	ifp->if_ioctl = xennet_ioctl;
331 	ifp->if_watchdog = xennet_watchdog;
332 	ifp->if_init = xennet_init;
333 	ifp->if_stop = xennet_stop;
334 	ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
335 	ifp->if_timer = 0;
336 	ifp->if_snd.ifq_maxlen = max(ifqmaxlen, NET_TX_RING_SIZE * 2);
337 	ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx;
338 	IFQ_SET_READY(&ifp->if_snd);
339 	if_attach(ifp);
340 	ether_ifattach(ifp, sc->sc_enaddr);
341 	sc->sc_softintr = softint_establish(SOFTINT_NET, xennet_softstart, sc);
342 	if (sc->sc_softintr == NULL)
343 		panic("%s: can't establish soft interrupt",
344 			device_xname(self));
345 
346 	/* initialise shared structures and tell backend that we are ready */
347 	xennet_xenbus_resume(sc);
348 }
349 
350 static int
351 xennet_xenbus_detach(device_t self, int flags)
352 {
353 	struct xennet_xenbus_softc *sc = device_private(self);
354 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
355 	int s0, s1;
356 	RING_IDX i;
357 
358 	DPRINTF(("%s: xennet_xenbus_detach\n", device_xname(self)));
359 	s0 = splnet();
360 	xennet_stop(ifp, 1);
361 	/* wait for pending TX to complete, and collect pending RX packets */
362 	xennet_handler(sc);
363 	while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) {
364 		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_detach", hz/2);
365 		xennet_handler(sc);
366 	}
367 	xennet_free_rx_buffer(sc);
368 
369 	s1 = splvm();
370 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
371 		struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
372 		uvm_km_free(kernel_map, rxreq->rxreq_va, PAGE_SIZE,
373 		    UVM_KMF_WIRED);
374 	}
375 	splx(s1);
376 
377 	ether_ifdetach(ifp);
378 	if_detach(ifp);
379 	while (xengnt_status(sc->sc_tx_ring_gntref)) {
380 		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_txref", hz/2);
381 	}
382 	xengnt_revoke_access(sc->sc_tx_ring_gntref);
383 	uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE,
384 	    UVM_KMF_WIRED);
385 	while (xengnt_status(sc->sc_rx_ring_gntref)) {
386 		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_rxref", hz/2);
387 	}
388 	xengnt_revoke_access(sc->sc_rx_ring_gntref);
389 	uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE,
390 	    UVM_KMF_WIRED);
391 	softint_disestablish(sc->sc_softintr);
392 	event_remove_handler(sc->sc_evtchn, &xennet_handler, sc);
393 	splx(s0);
394 	DPRINTF(("%s: xennet_xenbus_detach done\n", device_xname(self)));
395 	return 0;
396 }
397 
398 static int
399 xennet_xenbus_resume(void *p)
400 {
401 	struct xennet_xenbus_softc *sc = p;
402 	struct xenbus_transaction *xbt;
403 	int error;
404 	netif_tx_sring_t *tx_ring;
405 	netif_rx_sring_t *rx_ring;
406 	paddr_t ma;
407 	const char *errmsg;
408 
409 	sc->sc_tx_ring_gntref = GRANT_INVALID_REF;
410 	sc->sc_rx_ring_gntref = GRANT_INVALID_REF;
411 
412 
413 	/* setup device: alloc event channel and shared rings */
414 	tx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
415 	     UVM_KMF_WIRED | UVM_KMF_ZERO);
416 	rx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
417 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
418 	if (tx_ring == NULL || rx_ring == NULL)
419 		panic("xennet_xenbus_resume: can't alloc rings");
420 
421 	SHARED_RING_INIT(tx_ring);
422 	FRONT_RING_INIT(&sc->sc_tx_ring, tx_ring, PAGE_SIZE);
423 	SHARED_RING_INIT(rx_ring);
424 	FRONT_RING_INIT(&sc->sc_rx_ring, rx_ring, PAGE_SIZE);
425 
426 	(void)pmap_extract_ma(pmap_kernel(), (vaddr_t)tx_ring, &ma);
427 	error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_tx_ring_gntref);
428 	if (error)
429 		return error;
430 	(void)pmap_extract_ma(pmap_kernel(), (vaddr_t)rx_ring, &ma);
431 	error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_rx_ring_gntref);
432 	if (error)
433 		return error;
434 	error = xenbus_alloc_evtchn(sc->sc_xbusd, &sc->sc_evtchn);
435 	if (error)
436 		return error;
437 	aprint_verbose_dev(sc->sc_dev, "using event channel %d\n",
438 	    sc->sc_evtchn);
439 	event_set_handler(sc->sc_evtchn, &xennet_handler, sc,
440 	    IPL_NET, device_xname(sc->sc_dev));
441 
442 again:
443 	xbt = xenbus_transaction_start();
444 	if (xbt == NULL)
445 		return ENOMEM;
446 	error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
447 	    "tx-ring-ref","%u", sc->sc_tx_ring_gntref);
448 	if (error) {
449 		errmsg = "writing tx ring-ref";
450 		goto abort_transaction;
451 	}
452 	error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
453 	    "rx-ring-ref","%u", sc->sc_rx_ring_gntref);
454 	if (error) {
455 		errmsg = "writing rx ring-ref";
456 		goto abort_transaction;
457 	}
458 	error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
459 	    "event-channel", "%u", sc->sc_evtchn);
460 	if (error) {
461 		errmsg = "writing event channel";
462 		goto abort_transaction;
463 	}
464 	error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
465 	    "state", "%d", XenbusStateConnected);
466 	if (error) {
467 		errmsg = "writing frontend XenbusStateConnected";
468 		goto abort_transaction;
469 	}
470 	error = xenbus_transaction_end(xbt, 0);
471 	if (error == EAGAIN)
472 		goto again;
473 	if (error) {
474 		xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction");
475 		return -1;
476 	}
477 	xennet_alloc_rx_buffer(sc);
478 	sc->sc_backend_status = BEST_CONNECTED;
479 	return 0;
480 
481 abort_transaction:
482 	xenbus_transaction_end(xbt, 1);
483 	xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg);
484 	return error;
485 }
486 
487 static void xennet_backend_changed(void *arg, XenbusState new_state)
488 {
489 	struct xennet_xenbus_softc *sc = device_private((device_t)arg);
490 	DPRINTF(("%s: new backend state %d\n", device_xname(sc->sc_dev), new_state));
491 
492 	switch (new_state) {
493 	case XenbusStateInitialising:
494 	case XenbusStateInitWait:
495 	case XenbusStateInitialised:
496 		break;
497 	case XenbusStateClosing:
498 		sc->sc_backend_status = BEST_CLOSED;
499 		xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosed);
500 		break;
501 	case XenbusStateConnected:
502 		break;
503 	case XenbusStateUnknown:
504 	default:
505 		panic("bad backend state %d", new_state);
506 	}
507 }
508 
509 static void
510 xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc)
511 {
512 	RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt;
513 	RING_IDX i;
514 	struct xennet_rxreq *req;
515 	struct xen_memory_reservation reservation;
516 	int s1, s2;
517 	paddr_t pfn;
518 
519 	s1 = splnet();
520 	for (i = 0; sc->sc_free_rxreql != 0; i++) {
521 		req  = SLIST_FIRST(&sc->sc_rxreq_head);
522 		KASSERT(req != NULL);
523 		KASSERT(req == &sc->sc_rxreqs[req->rxreq_id]);
524 		RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id =
525 		    req->rxreq_id;
526 		if (xengnt_grant_transfer(sc->sc_xbusd->xbusd_otherend_id,
527 		    &req->rxreq_gntref) != 0) {
528 			break;
529 		}
530 		RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref =
531 		    req->rxreq_gntref;
532 
533 		SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next);
534 		sc->sc_free_rxreql--;
535 
536 		/* unmap the page */
537 		MULTI_update_va_mapping(&rx_mcl[i], req->rxreq_va, 0, 0);
538 		/*
539 		 * Remove this page from pseudo phys map before
540 		 * passing back to Xen.
541 		 */
542 		pfn = (req->rxreq_pa - XPMAP_OFFSET) >> PAGE_SHIFT;
543 		xennet_pages[i] = xpmap_phys_to_machine_mapping[pfn];
544 		xpmap_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
545 	}
546 	if (i == 0) {
547 		splx(s1);
548 		return;
549 	}
550 	/* also make sure to flush all TLB entries */
551 	rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
552 	/*
553 	 * We may have allocated buffers which have entries
554 	 * outstanding in the page update queue -- make sure we flush
555 	 * those first!
556 	 */
557 	s2 = splvm();
558 	xpq_flush_queue();
559 	splx(s2);
560 	/* now decrease reservation */
561 	xenguest_handle(reservation.extent_start) = xennet_pages;
562 	reservation.nr_extents = i;
563 	reservation.extent_order = 0;
564 	reservation.address_bits = 0;
565 	reservation.domid = DOMID_SELF;
566 	rx_mcl[i].op = __HYPERVISOR_memory_op;
567 	rx_mcl[i].args[0] = XENMEM_decrease_reservation;
568 	rx_mcl[i].args[1] = (unsigned long)&reservation;
569 	HYPERVISOR_multicall(rx_mcl, i+1);
570 	if (__predict_false(rx_mcl[i].result != i)) {
571 		panic("xennet_alloc_rx_buffer: XENMEM_decrease_reservation");
572 	}
573 	sc->sc_rx_ring.req_prod_pvt = req_prod + i;
574 	RING_PUSH_REQUESTS(&sc->sc_rx_ring);
575 
576 	splx(s1);
577 	return;
578 }
579 
580 static void
581 xennet_free_rx_buffer(struct xennet_xenbus_softc *sc)
582 {
583 	paddr_t ma, pa;
584 	vaddr_t va;
585 	RING_IDX i;
586 	mmu_update_t mmu[1];
587 	multicall_entry_t mcl[2];
588 
589 	int s = splbio();
590 
591 	DPRINTF(("%s: xennet_free_rx_buffer\n", device_xname(sc->sc_dev)));
592 	/* get back memory from RX ring */
593 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
594 		struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
595 
596 		/*
597 		 * if the buffer is in transit in the network stack, wait for
598 		 * the network stack to free it.
599 		 */
600 		while ((volatile grant_ref_t)rxreq->rxreq_gntref ==
601 		    GRANT_STACK_REF)
602 			tsleep(xennet_xenbus_detach, PRIBIO, "xnet_free", hz/2);
603 
604 		if (rxreq->rxreq_gntref != GRANT_INVALID_REF) {
605 			/*
606 			 * this req is still granted. Get back the page or
607 			 * allocate a new one, and remap it.
608 			 */
609 			SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq,
610 			    rxreq_next);
611 			sc->sc_free_rxreql++;
612 			ma = xengnt_revoke_transfer(rxreq->rxreq_gntref);
613 			rxreq->rxreq_gntref = GRANT_INVALID_REF;
614 			if (ma == 0) {
615 				u_long pfn;
616 				struct xen_memory_reservation xenres;
617 				/*
618 				 * transfer not complete, we lost the page.
619 				 * Get one from hypervisor
620 				 */
621 				xenguest_handle(xenres.extent_start) = &pfn;
622 				xenres.nr_extents = 1;
623 				xenres.extent_order = 0;
624 				xenres.address_bits = 31;
625 				xenres.domid = DOMID_SELF;
626 				if (HYPERVISOR_memory_op(
627 				    XENMEM_increase_reservation, &xenres) < 0) {
628 					panic("xennet_free_rx_buffer: "
629 					    "can't get memory back");
630 				}
631 				ma = pfn;
632 				KASSERT(ma != 0);
633 			}
634 			pa = rxreq->rxreq_pa;
635 			va = rxreq->rxreq_va;
636 			/* remap the page */
637 			mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
638 			mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT);
639 			MULTI_update_va_mapping(&mcl[0], va,
640 			    (ma << PAGE_SHIFT) | PG_V | PG_KW,
641 			    UVMF_TLB_FLUSH|UVMF_ALL);
642 			xpmap_phys_to_machine_mapping[
643 			    (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma;
644 			mcl[1].op = __HYPERVISOR_mmu_update;
645 			mcl[1].args[0] = (unsigned long)mmu;
646 			mcl[1].args[1] = 1;
647 			mcl[1].args[2] = 0;
648 			mcl[1].args[3] = DOMID_SELF;
649 			HYPERVISOR_multicall(mcl, 2);
650 		}
651 
652 	}
653 	splx(s);
654 	DPRINTF(("%s: xennet_free_rx_buffer done\n", device_xname(sc->sc_dev)));
655 }
656 
657 static void
658 xennet_rx_mbuf_free(struct mbuf *m, void *buf, size_t size, void *arg)
659 {
660 	struct xennet_rxreq *req = arg;
661 	struct xennet_xenbus_softc *sc = req->rxreq_sc;
662 
663 	int s = splnet();
664 
665 	SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next);
666 	sc->sc_free_rxreql++;
667 
668 	req->rxreq_gntref = GRANT_INVALID_REF;
669 	if (sc->sc_free_rxreql >= SC_NLIVEREQ(sc) &&
670 	    __predict_true(sc->sc_backend_status == BEST_CONNECTED)) {
671 		xennet_alloc_rx_buffer(sc);
672 	}
673 
674 	if (m)
675 		pool_cache_put(mb_cache, m);
676 	splx(s);
677 }
678 
679 
680 static void
681 xennet_tx_complete(struct xennet_xenbus_softc *sc)
682 {
683 	struct xennet_txreq *req;
684 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
685 	RING_IDX resp_prod, i;
686 
687 	DPRINTFN(XEDB_EVENT, ("xennet_tx_complete prod %d cons %d\n",
688 	    sc->sc_tx_ring.sring->rsp_prod, sc->sc_tx_ring.rsp_cons));
689 
690 again:
691 	resp_prod = sc->sc_tx_ring.sring->rsp_prod;
692 	x86_lfence();
693 	for (i = sc->sc_tx_ring.rsp_cons; i != resp_prod; i++) {
694 		req = &sc->sc_txreqs[RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id];
695 		KASSERT(req->txreq_id ==
696 		    RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id);
697 		if (__predict_false(xengnt_status(req->txreq_gntref))) {
698 			aprint_verbose_dev(sc->sc_dev,
699 					   "grant still used by backend\n");
700 			sc->sc_tx_ring.rsp_cons = i;
701 			goto end;
702 		}
703 		if (__predict_false(
704 		    RING_GET_RESPONSE(&sc->sc_tx_ring, i)->status !=
705 		    NETIF_RSP_OKAY))
706 			ifp->if_oerrors++;
707 		else
708 			ifp->if_opackets++;
709 		xengnt_revoke_access(req->txreq_gntref);
710 		m_freem(req->txreq_m);
711 		SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next);
712 	}
713 	sc->sc_tx_ring.rsp_cons = resp_prod;
714 	/* set new event and check for race with rsp_cons update */
715 	sc->sc_tx_ring.sring->rsp_event =
716 	    resp_prod + ((sc->sc_tx_ring.sring->req_prod - resp_prod) >> 1) + 1;
717 	ifp->if_timer = 0;
718 	x86_sfence();
719 	if (resp_prod != sc->sc_tx_ring.sring->rsp_prod)
720 		goto again;
721 end:
722 	if (ifp->if_flags & IFF_OACTIVE) {
723 		ifp->if_flags &= ~IFF_OACTIVE;
724 		xennet_softstart(sc);
725 	}
726 }
727 
728 static int
729 xennet_handler(void *arg)
730 {
731 	struct xennet_xenbus_softc *sc = arg;
732 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
733 	RING_IDX resp_prod, i;
734 	struct xennet_rxreq *req;
735 	paddr_t ma, pa;
736 	vaddr_t va;
737 	mmu_update_t mmu[1];
738 	multicall_entry_t mcl[2];
739 	struct mbuf *m;
740 	void *pktp;
741 	int more_to_do;
742 
743 	if (sc->sc_backend_status != BEST_CONNECTED)
744 		return 1;
745 
746 	xennet_tx_complete(sc);
747 
748 again:
749 	DPRINTFN(XEDB_EVENT, ("xennet_handler prod %d cons %d\n",
750 	    sc->sc_rx_ring.sring->rsp_prod, sc->sc_rx_ring.rsp_cons));
751 
752 	resp_prod = sc->sc_rx_ring.sring->rsp_prod;
753 	x86_lfence(); /* ensure we see replies up to resp_prod */
754 	for (i = sc->sc_rx_ring.rsp_cons; i != resp_prod; i++) {
755 		netif_rx_response_t *rx = RING_GET_RESPONSE(&sc->sc_rx_ring, i);
756 		req = &sc->sc_rxreqs[rx->id];
757 		KASSERT(req->rxreq_gntref != GRANT_INVALID_REF);
758 		KASSERT(req->rxreq_id == rx->id);
759 		ma = xengnt_revoke_transfer(req->rxreq_gntref);
760 		if (ma == 0) {
761 			DPRINTFN(XEDB_EVENT, ("xennet_handler ma == 0\n"));
762 			/*
763 			 * the remote could't send us a packet.
764 			 * we can't free this rxreq as no page will be mapped
765 			 * here. Instead give it back immediatly to backend.
766 			 */
767 			ifp->if_ierrors++;
768 			RING_GET_REQUEST(&sc->sc_rx_ring,
769 			    sc->sc_rx_ring.req_prod_pvt)->id = req->rxreq_id;
770 			RING_GET_REQUEST(&sc->sc_rx_ring,
771 			    sc->sc_rx_ring.req_prod_pvt)->gref =
772 				req->rxreq_gntref;
773 			sc->sc_rx_ring.req_prod_pvt++;
774 			RING_PUSH_REQUESTS(&sc->sc_rx_ring);
775 			continue;
776 		}
777 		req->rxreq_gntref = GRANT_INVALID_REF;
778 
779 		pa = req->rxreq_pa;
780 		va = req->rxreq_va;
781 		/* remap the page */
782 		mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
783 		mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT);
784 		MULTI_update_va_mapping(&mcl[0], va,
785 		    (ma << PAGE_SHIFT) | PG_V | PG_KW, UVMF_TLB_FLUSH|UVMF_ALL);
786 		xpmap_phys_to_machine_mapping[
787 		    (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma;
788 		mcl[1].op = __HYPERVISOR_mmu_update;
789 		mcl[1].args[0] = (unsigned long)mmu;
790 		mcl[1].args[1] = 1;
791 		mcl[1].args[2] = 0;
792 		mcl[1].args[3] = DOMID_SELF;
793 		HYPERVISOR_multicall(mcl, 2);
794 		pktp = (void *)(va + rx->offset);
795 #ifdef XENNET_DEBUG_DUMP
796 		xennet_hex_dump(pktp, rx->status, "r", rx->id);
797 #endif
798 		if ((ifp->if_flags & IFF_PROMISC) == 0) {
799 			struct ether_header *eh = pktp;
800 			if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 &&
801 			    memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost,
802 			    ETHER_ADDR_LEN) != 0) {
803 				DPRINTFN(XEDB_EVENT,
804 				    ("xennet_handler bad dest\n"));
805 				/* packet not for us */
806 				xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE,
807 				    req);
808 				continue;
809 			}
810 		}
811 		MGETHDR(m, M_DONTWAIT, MT_DATA);
812 		if (__predict_false(m == NULL)) {
813 			printf("xennet: rx no mbuf\n");
814 			ifp->if_ierrors++;
815 			xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req);
816 			continue;
817 		}
818 		MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner);
819 
820 		m->m_pkthdr.rcvif = ifp;
821 		if (__predict_true(sc->sc_rx_ring.req_prod_pvt !=
822 		    sc->sc_rx_ring.sring->rsp_prod)) {
823 			m->m_len = m->m_pkthdr.len = rx->status;
824 			MEXTADD(m, pktp, rx->status,
825 			    M_DEVBUF, xennet_rx_mbuf_free, req);
826 			m->m_flags |= M_EXT_RW; /* we own the buffer */
827 			req->rxreq_gntref = GRANT_STACK_REF;
828 		} else {
829 			/*
830 			 * This was our last receive buffer, allocate
831 			 * memory, copy data and push the receive
832 			 * buffer back to the hypervisor.
833 			 */
834 			m->m_len = min(MHLEN, rx->status);
835 			m->m_pkthdr.len = 0;
836 			m_copyback(m, 0, rx->status, pktp);
837 			xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req);
838 			if (m->m_pkthdr.len < rx->status) {
839 				/* out of memory, just drop packets */
840 				ifp->if_ierrors++;
841 				m_freem(m);
842 				continue;
843 			}
844 		}
845 		if ((rx->flags & NETRXF_csum_blank) != 0) {
846 			xennet_checksum_fill(&m);
847 			if (m == NULL) {
848 				ifp->if_ierrors++;
849 				continue;
850 			}
851 		}
852 #if NBPFILTER > 0
853 		/*
854 		 * Pass packet to bpf if there is a listener.
855 		 */
856 		if (ifp->if_bpf)
857 			bpf_mtap(ifp->if_bpf, m);
858 #endif
859 
860 		ifp->if_ipackets++;
861 
862 		/* Pass the packet up. */
863 		(*ifp->if_input)(ifp, m);
864 	}
865 	x86_lfence();
866 	sc->sc_rx_ring.rsp_cons = i;
867 	RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_rx_ring, more_to_do);
868 	if (more_to_do)
869 		goto again;
870 	return 1;
871 }
872 
873 /*
874  * Called at splnet.
875  */
876 void
877 xennet_start(struct ifnet *ifp)
878 {
879 	struct xennet_xenbus_softc *sc = ifp->if_softc;
880 
881 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start()\n", device_xname(sc->sc_dev)));
882 
883 #if NRND > 0
884 	rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt);
885 #endif
886 
887 	xennet_tx_complete(sc);
888 
889 	if (__predict_false(
890 	    (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING))
891 		return;
892 
893 	/*
894 	 * The Xen communication channel is much more efficient if we can
895 	 * schedule batch of packets for domain0. To achieve this, we
896 	 * schedule a soft interrupt, and just return. This way, the network
897 	 * stack will enqueue all pending mbufs in the interface's send queue
898 	 * before it is processed by xennet_softstart().
899 	 */
900 	softint_schedule(sc->sc_softintr);
901 	return;
902 }
903 
904 /*
905  * called at splsoftnet
906  */
907 void
908 xennet_softstart(void *arg)
909 {
910 	struct xennet_xenbus_softc *sc = arg;
911 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
912 	struct mbuf *m, *new_m;
913 	netif_tx_request_t *txreq;
914 	RING_IDX req_prod;
915 	paddr_t pa, pa2;
916 	struct xennet_txreq *req;
917 	int notify;
918 	int do_notify = 0;
919 	int s;
920 
921 	s = splnet();
922 	if (__predict_false(
923 	    (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) {
924 		splx(s);
925 		return;
926 	}
927 
928 	req_prod = sc->sc_tx_ring.req_prod_pvt;
929 	while (/*CONSTCOND*/1) {
930 		uint16_t txflags;
931 
932 		req = SLIST_FIRST(&sc->sc_txreq_head);
933 		if (__predict_false(req == NULL)) {
934 			ifp->if_flags |= IFF_OACTIVE;
935 			break;
936 		}
937 		IFQ_POLL(&ifp->if_snd, m);
938 		if (m == NULL)
939 			break;
940 
941 		switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
942 		case M_EXT|M_EXT_CLUSTER:
943 			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
944 			pa = m->m_ext.ext_paddr +
945 				(m->m_data - m->m_ext.ext_buf);
946 			break;
947 		case 0:
948 			KASSERT(m->m_paddr != M_PADDR_INVALID);
949 			pa = m->m_paddr + M_BUFOFFSET(m) +
950 				(m->m_data - M_BUFADDR(m));
951 			break;
952 		default:
953 			if (__predict_false(
954 			    !pmap_extract(pmap_kernel(), (vaddr_t)m->m_data,
955 			    &pa))) {
956 				panic("xennet_start: no pa");
957 			}
958 			break;
959 		}
960 
961 		if ((m->m_pkthdr.csum_flags &
962 		    (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) {
963 			txflags = NETTXF_csum_blank;
964 		} else {
965 			txflags = 0;
966 		}
967 
968 		if (m->m_pkthdr.len != m->m_len ||
969 		    (pa ^ (pa + m->m_pkthdr.len - 1)) & PG_FRAME) {
970 
971 			MGETHDR(new_m, M_DONTWAIT, MT_DATA);
972 			if (__predict_false(new_m == NULL)) {
973 				printf("xennet: no mbuf\n");
974 				break;
975 			}
976 			if (m->m_pkthdr.len > MHLEN) {
977 				MCLGET(new_m, M_DONTWAIT);
978 				if (__predict_false(
979 				    (new_m->m_flags & M_EXT) == 0)) {
980 					DPRINTF(("xennet: no mbuf cluster\n"));
981 					m_freem(new_m);
982 					break;
983 				}
984 			}
985 
986 			m_copydata(m, 0, m->m_pkthdr.len, mtod(new_m, void *));
987 			new_m->m_len = new_m->m_pkthdr.len = m->m_pkthdr.len;
988 
989 			if ((new_m->m_flags & M_EXT) != 0) {
990 				pa = new_m->m_ext.ext_paddr;
991 				KASSERT(new_m->m_data == new_m->m_ext.ext_buf);
992 				KASSERT(pa != M_PADDR_INVALID);
993 			} else {
994 				pa = new_m->m_paddr;
995 				KASSERT(pa != M_PADDR_INVALID);
996 				KASSERT(new_m->m_data == M_BUFADDR(new_m));
997 				pa += M_BUFOFFSET(new_m);
998 			}
999 			if (__predict_false(xengnt_grant_access(
1000 			    sc->sc_xbusd->xbusd_otherend_id,
1001 			    xpmap_ptom_masked(pa),
1002 			    GNTMAP_readonly, &req->txreq_gntref) != 0)) {
1003 				m_freem(new_m);
1004 				ifp->if_flags |= IFF_OACTIVE;
1005 				break;
1006 			}
1007 			/* we will be able to send new_m */
1008 			IFQ_DEQUEUE(&ifp->if_snd, m);
1009 			m_freem(m);
1010 			m = new_m;
1011 		} else {
1012 			if (__predict_false(xengnt_grant_access(
1013 			    sc->sc_xbusd->xbusd_otherend_id,
1014 			    xpmap_ptom_masked(pa),
1015 			    GNTMAP_readonly, &req->txreq_gntref) != 0)) {
1016 				ifp->if_flags |= IFF_OACTIVE;
1017 				break;
1018 			}
1019 			/* we will be able to send m */
1020 			IFQ_DEQUEUE(&ifp->if_snd, m);
1021 		}
1022 		MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner);
1023 
1024 		KASSERT(((pa ^ (pa + m->m_pkthdr.len -  1)) & PG_FRAME) == 0);
1025 
1026 		SLIST_REMOVE_HEAD(&sc->sc_txreq_head, txreq_next);
1027 		req->txreq_m = m;
1028 
1029 		DPRINTFN(XEDB_MBUF, ("xennet_start id %d, "
1030 		    "mbuf %p, buf %p/%p/%p, size %d\n",
1031 		    req->txreq_id, m, mtod(m, void *), (void *)pa,
1032 		    (void *)xpmap_ptom_masked(pa), m->m_pkthdr.len));
1033 		pmap_extract_ma(pmap_kernel(), mtod(m, vaddr_t), &pa2);
1034 		DPRINTFN(XEDB_MBUF, ("xennet_start pa %p ma %p/%p\n",
1035 		    (void *)pa, (void *)xpmap_ptom_masked(pa), (void *)pa2));
1036 #ifdef XENNET_DEBUG_DUMP
1037 		xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "s", req->txreq_id);
1038 #endif
1039 
1040 		txreq = RING_GET_REQUEST(&sc->sc_tx_ring, req_prod);
1041 		txreq->id = req->txreq_id;
1042 		txreq->gref = req->txreq_gntref;
1043 		txreq->offset = pa & ~PG_FRAME;
1044 		txreq->size = m->m_pkthdr.len;
1045 		txreq->flags = txflags;
1046 
1047 		req_prod++;
1048 		sc->sc_tx_ring.req_prod_pvt = req_prod;
1049 		RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_tx_ring, notify);
1050 		if (notify)
1051 			do_notify = 1;
1052 
1053 #ifdef XENNET_DEBUG
1054 		DPRINTFN(XEDB_MEM, ("packet addr %p/%p, physical %p/%p, "
1055 		    "m_paddr %p, len %d/%d\n", M_BUFADDR(m), mtod(m, void *),
1056 		    (void *)*kvtopte(mtod(m, vaddr_t)),
1057 		    (void *)xpmap_mtop(*kvtopte(mtod(m, vaddr_t))),
1058 		    (void *)m->m_paddr, m->m_pkthdr.len, m->m_len));
1059 		DPRINTFN(XEDB_MEM, ("id %d gref %d offset %d size %d flags %d"
1060 		    " prod %d\n",
1061 		    txreq->id, txreq->gref, txreq->offset, txreq->size,
1062 		    txreq->flags, req_prod));
1063 #endif
1064 
1065 #if NBPFILTER > 0
1066 		/*
1067 		 * Pass packet to bpf if there is a listener.
1068 		 */
1069 		if (ifp->if_bpf) {
1070 			bpf_mtap(ifp->if_bpf, m);
1071 		}
1072 #endif
1073 	}
1074 
1075 	x86_lfence();
1076 	if (do_notify) {
1077 		hypervisor_notify_via_evtchn(sc->sc_evtchn);
1078 		ifp->if_timer = 5;
1079 	}
1080 	splx(s);
1081 
1082 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start() done\n",
1083 	    device_xname(sc->sc_dev)));
1084 }
1085 
1086 int
1087 xennet_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1088 {
1089 #ifdef XENNET_DEBUG
1090 	struct xennet_xenbus_softc *sc = ifp->if_softc;
1091 #endif
1092 	int s, error = 0;
1093 
1094 	s = splnet();
1095 
1096 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl()\n", device_xname(sc->sc_dev)));
1097 	error = ether_ioctl(ifp, cmd, data);
1098 	if (error == ENETRESET)
1099 		error = 0;
1100 	splx(s);
1101 
1102 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() returning %d\n",
1103 	    device_xname(sc->sc_dev), error));
1104 
1105 	return error;
1106 }
1107 
1108 void
1109 xennet_watchdog(struct ifnet *ifp)
1110 {
1111 	aprint_verbose_ifnet(ifp, "xennet_watchdog\n");
1112 }
1113 
1114 int
1115 xennet_init(struct ifnet *ifp)
1116 {
1117 	struct xennet_xenbus_softc *sc = ifp->if_softc;
1118 	int s = splnet();
1119 
1120 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n", device_xname(sc->sc_dev)));
1121 
1122 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
1123 		sc->sc_rx_ring.sring->rsp_event =
1124 		    sc->sc_rx_ring.rsp_cons + 1;
1125 		hypervisor_enable_event(sc->sc_evtchn);
1126 		hypervisor_notify_via_evtchn(sc->sc_evtchn);
1127 		xennet_reset(sc);
1128 	}
1129 	ifp->if_flags |= IFF_RUNNING;
1130 	ifp->if_flags &= ~IFF_OACTIVE;
1131 	ifp->if_timer = 0;
1132 	splx(s);
1133 	return 0;
1134 }
1135 
1136 void
1137 xennet_stop(struct ifnet *ifp, int disable)
1138 {
1139 	struct xennet_xenbus_softc *sc = ifp->if_softc;
1140 	int s = splnet();
1141 
1142 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1143 	hypervisor_mask_event(sc->sc_evtchn);
1144 	xennet_reset(sc);
1145 	splx(s);
1146 }
1147 
1148 void
1149 xennet_reset(struct xennet_xenbus_softc *sc)
1150 {
1151 
1152 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_reset()\n", device_xname(sc->sc_dev)));
1153 }
1154 
1155 #if defined(NFS_BOOT_BOOTSTATIC)
1156 int
1157 xennet_bootstatic_callback(struct nfs_diskless *nd)
1158 {
1159 #if 0
1160 	struct ifnet *ifp = nd->nd_ifp;
1161 	struct xennet_xenbus_softc *sc =
1162 	    (struct xennet_xenbus_softc *)ifp->if_softc;
1163 #endif
1164 	int flags = 0;
1165 	union xen_cmdline_parseinfo xcp;
1166 	struct sockaddr_in *sin;
1167 
1168 	memset(&xcp, 0, sizeof(xcp.xcp_netinfo));
1169 	xcp.xcp_netinfo.xi_ifno = /* XXX sc->sc_ifno */ 0;
1170 	xcp.xcp_netinfo.xi_root = nd->nd_root.ndm_host;
1171 	xen_parse_cmdline(XEN_PARSE_NETINFO, &xcp);
1172 
1173 	if (xcp.xcp_netinfo.xi_root[0] != '\0') {
1174 		flags |= NFS_BOOT_HAS_SERVER;
1175 		if (strchr(xcp.xcp_netinfo.xi_root, ':') != NULL)
1176 			flags |= NFS_BOOT_HAS_ROOTPATH;
1177 	}
1178 
1179 	nd->nd_myip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[0]);
1180 	nd->nd_gwip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[2]);
1181 	nd->nd_mask.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[3]);
1182 
1183 	sin = (struct sockaddr_in *) &nd->nd_root.ndm_saddr;
1184 	memset((void *)sin, 0, sizeof(*sin));
1185 	sin->sin_len = sizeof(*sin);
1186 	sin->sin_family = AF_INET;
1187 	sin->sin_addr.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[1]);
1188 
1189 	if (nd->nd_myip.s_addr)
1190 		flags |= NFS_BOOT_HAS_MYIP;
1191 	if (nd->nd_gwip.s_addr)
1192 		flags |= NFS_BOOT_HAS_GWIP;
1193 	if (nd->nd_mask.s_addr)
1194 		flags |= NFS_BOOT_HAS_MASK;
1195 	if (sin->sin_addr.s_addr)
1196 		flags |= NFS_BOOT_HAS_SERVADDR;
1197 
1198 	return flags;
1199 }
1200 #endif /* defined(NFS_BOOT_BOOTSTATIC) */
1201 
1202 #ifdef XENNET_DEBUG_DUMP
1203 #define XCHR(x) hexdigits[(x) & 0xf]
1204 static void
1205 xennet_hex_dump(const unsigned char *pkt, size_t len, const char *type, int id)
1206 {
1207 	size_t i, j;
1208 
1209 	printf("pkt %p len %d/%x type %s id %d\n", pkt, len, len, type, id);
1210 	printf("00000000  ");
1211 	for(i=0; i<len; i++) {
1212 		printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i]));
1213 		if ((i+1) % 16 == 8)
1214 			printf(" ");
1215 		if ((i+1) % 16 == 0) {
1216 			printf(" %c", '|');
1217 			for(j=0; j<16; j++)
1218 				printf("%c", pkt[i-15+j]>=32 &&
1219 				    pkt[i-15+j]<127?pkt[i-15+j]:'.');
1220 			printf("%c\n%c%c%c%c%c%c%c%c  ", '|',
1221 			    XCHR((i+1)>>28), XCHR((i+1)>>24),
1222 			    XCHR((i+1)>>20), XCHR((i+1)>>16),
1223 			    XCHR((i+1)>>12), XCHR((i+1)>>8),
1224 			    XCHR((i+1)>>4), XCHR(i+1));
1225 		}
1226 	}
1227 	printf("\n");
1228 }
1229 #undef XCHR
1230 #endif
1231