xref: /netbsd-src/sys/arch/xen/xen/if_xennet_xenbus.c (revision fad4c9f71477ae11cea2ee75ec82151ac770a534)
1 /*      $NetBSD: if_xennet_xenbus.c,v 1.10 2006/05/27 19:54:59 bouyer Exp $      */
2 
3 /*
4  * Copyright (c) 2006 Manuel Bouyer.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *      This product includes software developed by Manuel Bouyer.
17  * 4. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  *
31  */
32 
33 /*
34  * Copyright (c) 2004 Christian Limpach.
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. All advertising materials mentioning features or use of this software
46  *    must display the following acknowledgement:
47  *      This product includes software developed by Christian Limpach.
48  * 4. The name of the author may not be used to endorse or promote products
49  *    derived from this software without specific prior written permission.
50  *
51  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
52  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
53  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
54  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
55  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
56  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
60  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  */
62 
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.10 2006/05/27 19:54:59 bouyer Exp $");
65 
66 #include "opt_xen.h"
67 #include "opt_nfs_boot.h"
68 #include "rnd.h"
69 #include "bpfilter.h"
70 
71 #include <sys/param.h>
72 #include <sys/device.h>
73 #include <sys/conf.h>
74 #include <sys/kernel.h>
75 #include <sys/systm.h>
76 #if NRND > 0
77 #include <sys/rnd.h>
78 #endif
79 
80 #include <net/if.h>
81 #include <net/if_dl.h>
82 #include <net/if_ether.h>
83 #if NBPFILTER > 0
84 #include <net/bpf.h>
85 #include <net/bpfdesc.h>
86 #endif
87 
88 #if defined(NFS_BOOT_BOOTSTATIC)
89 #include <sys/fstypes.h>
90 #include <sys/mount.h>
91 #include <sys/statvfs.h>
92 #include <netinet/in.h>
93 #include <nfs/rpcv2.h>
94 #include <nfs/nfsproto.h>
95 #include <nfs/nfs.h>
96 #include <nfs/nfsmount.h>
97 #include <nfs/nfsdiskless.h>
98 #include <machine/if_xennetvar.h>
99 #endif /* defined(NFS_BOOT_BOOTSTATIC) */
100 
101 #include <uvm/uvm.h>
102 
103 #include <machine/xen3-public/io/ring.h>
104 
105 #include <machine/granttables.h>
106 #include <machine/xenbus.h>
107 #include "locators.h"
108 
109 #undef XENNET_DEBUG_DUMP
110 #undef XENNET_DEBUG
111 #ifdef XENNET_DEBUG
112 #define XEDB_FOLLOW     0x01
113 #define XEDB_INIT       0x02
114 #define XEDB_EVENT      0x04
115 #define XEDB_MBUF       0x08
116 #define XEDB_MEM        0x10
117 int xennet_debug = 0xff;
118 #define DPRINTF(x) if (xennet_debug) printf x;
119 #define DPRINTFN(n,x) if (xennet_debug & (n)) printf x;
120 #else
121 #define DPRINTF(x)
122 #define DPRINTFN(n,x)
123 #endif
124 
125 #define GRANT_INVALID_REF -1 /* entry is free */
126 #define GRANT_STACK_REF   -2 /* entry owned by the network stack */
127 
128 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
129 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
130 
131 struct xennet_txreq {
132 	SLIST_ENTRY(xennet_txreq) txreq_next;
133 	uint16_t txreq_id; /* ID passed to backed */
134 	grant_ref_t txreq_gntref; /* grant ref of this request */
135 	struct mbuf *txreq_m; /* mbuf being transmitted */
136 };
137 
138 struct xennet_rxreq {
139 	SLIST_ENTRY(xennet_rxreq) rxreq_next;
140 	uint16_t rxreq_id; /* ID passed to backed */
141 	grant_ref_t rxreq_gntref; /* grant ref of this request */
142 /* va/pa for this receive buf. ma will be provided by backend */
143 	paddr_t rxreq_pa;
144 	vaddr_t rxreq_va;
145 	struct xennet_xenbus_softc *rxreq_sc; /* pointer to our interface */
146 };
147 
148 struct xennet_xenbus_softc {
149 	struct device sc_dev;
150 	struct ethercom sc_ethercom;
151 	uint8_t sc_enaddr[6];
152 	struct xenbus_device *sc_xbusd;
153 
154 	netif_tx_front_ring_t sc_tx_ring;
155 	netif_rx_front_ring_t sc_rx_ring;
156 
157 	unsigned int sc_evtchn;
158 	void *sc_softintr;
159 
160 	grant_ref_t sc_tx_ring_gntref;
161 	grant_ref_t sc_rx_ring_gntref;
162 
163 	struct xennet_txreq sc_txreqs[NET_TX_RING_SIZE];
164 	struct xennet_rxreq sc_rxreqs[NET_RX_RING_SIZE];
165 	SLIST_HEAD(,xennet_txreq) sc_txreq_head; /* list of free TX requests */
166 	SLIST_HEAD(,xennet_rxreq) sc_rxreq_head; /* list of free RX requests */
167 	int sc_free_rxreql; /* number of free receive request struct */
168 
169 	int sc_backend_status; /* our status with backend */
170 #define BEST_CLOSED		0
171 #define BEST_DISCONNECTED	1
172 #define BEST_CONNECTED		2
173 #if NRND > 0
174 	rndsource_element_t     sc_rnd_source;
175 #endif
176 };
177 
178 /* too big to be on stack */
179 static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1];
180 static paddr_t xennet_pages[NET_RX_RING_SIZE];
181 
182 static int  xennet_xenbus_match(struct device *, struct cfdata *, void *);
183 static void xennet_xenbus_attach(struct device *, struct device *, void *);
184 static int  xennet_xenbus_detach(struct device *, int);
185 static void xennet_backend_changed(void *, XenbusState);
186 
187 static int  xennet_xenbus_resume(void *);
188 static void xennet_alloc_rx_buffer(struct xennet_xenbus_softc *);
189 static void xennet_free_rx_buffer(struct xennet_xenbus_softc *);
190 static void xennet_tx_complete(struct xennet_xenbus_softc *);
191 static void xennet_rx_mbuf_free(struct mbuf *, caddr_t, size_t, void *);
192 static int  xennet_handler(void *);
193 #ifdef XENNET_DEBUG_DUMP
194 static void xennet_hex_dump(const unsigned char *, size_t, const char *, int);
195 #endif
196 
197 static int  xennet_init(struct ifnet *);
198 static void xennet_stop(struct ifnet *, int);
199 static void xennet_reset(struct xennet_xenbus_softc *);
200 static void xennet_softstart(void *);
201 static void xennet_start(struct ifnet *);
202 static int  xennet_ioctl(struct ifnet *, u_long, caddr_t);
203 static void xennet_watchdog(struct ifnet *);
204 
205 CFATTACH_DECL(xennet_xenbus, sizeof(struct xennet_xenbus_softc),
206    xennet_xenbus_match, xennet_xenbus_attach, xennet_xenbus_detach, NULL);
207 
208 static int
209 xennet_xenbus_match(struct device *parent, struct cfdata *match, void *aux)
210 {
211 	struct xenbusdev_attach_args *xa = aux;
212 
213 	if (strcmp(xa->xa_type, "vif") != 0)
214 		return 0;
215 
216 	if (match->cf_loc[XENBUSCF_ID] != XENBUSCF_ID_DEFAULT &&
217 	    match->cf_loc[XENBUSCF_ID] != xa->xa_id)
218 		return 0;
219 
220 	return 1;
221 }
222 
223 static void
224 xennet_xenbus_attach(struct device *parent, struct device *self, void *aux)
225 {
226 	struct xennet_xenbus_softc *sc = (void *)self;
227 	struct xenbusdev_attach_args *xa = aux;
228 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
229 	int err;
230 	RING_IDX i;
231 	char *val, *e, *p;
232 	int s;
233 	extern int ifqmaxlen; /* XXX */
234 #ifdef XENNET_DEBUG
235 	char **dir;
236 	int dir_n = 0;
237 	char id_str[20];
238 #endif
239 
240 	aprint_normal(": Xen Virtual Network Interface\n");
241 #ifdef XENNET_DEBUG
242 	printf("path: %s\n", xa->xa_xbusd->xbusd_path);
243 	snprintf(id_str, sizeof(id_str), "%d", xa->xa_id);
244 	err = xenbus_directory(NULL, "device/vif", id_str, &dir_n, &dir);
245 	if (err) {
246 		printf("%s: xenbus_directory err %d\n",
247 		    sc->sc_dev.dv_xname, err);
248 	} else {
249 		printf("%s/\n", xa->xa_xbusd->xbusd_path);
250 		for (i = 0; i < dir_n; i++) {
251 			printf("\t/%s", dir[i]);
252 			err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, dir[i],
253 			    NULL, &val);
254 			if (err) {
255 				printf("%s: xenbus_read err %d\n",
256 		    		sc->sc_dev.dv_xname, err);
257 			} else {
258 				printf(" = %s\n", val);
259 				free(val, M_DEVBUF);
260 			}
261 		}
262 	}
263 #endif /* XENNET_DEBUG */
264 	sc->sc_xbusd = xa->xa_xbusd;
265 	sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed;
266 
267 	/* initialize free RX and RX request lists */
268 	SLIST_INIT(&sc->sc_txreq_head);
269 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
270 		sc->sc_txreqs[i].txreq_id = i;
271 		SLIST_INSERT_HEAD(&sc->sc_txreq_head, &sc->sc_txreqs[i],
272 		    txreq_next);
273 	}
274 	SLIST_INIT(&sc->sc_rxreq_head);
275 	s = splvm();
276 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
277 		struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
278 		rxreq->rxreq_id = i;
279 		rxreq->rxreq_sc = sc;
280 		rxreq->rxreq_va = uvm_km_alloc(kernel_map,
281 		    PAGE_SIZE, PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_ZERO);
282 		if (rxreq->rxreq_va == 0)
283 			break;
284 		if (!pmap_extract(pmap_kernel(), rxreq->rxreq_va,
285 		    &rxreq->rxreq_pa))
286 			panic("xennet: no pa for mapped va ?");
287 		rxreq->rxreq_gntref = GRANT_INVALID_REF;
288 		SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next);
289 	}
290 	splx(s);
291 	sc->sc_free_rxreql = i;
292 	if (sc->sc_free_rxreql == 0) {
293 		aprint_error("%s: failed to allocate rx memory\n",
294 		    sc->sc_dev.dv_xname);
295 		return;
296 	}
297 
298 	/* read mac address */
299 	err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, "mac", NULL, &val);
300 	if (err) {
301 		aprint_error("%s: can't read mac address, err %d\n",
302 		    sc->sc_dev.dv_xname, err);
303 		return;
304 	}
305 	/* read mac address */
306 	for (i = 0, p = val; i < 6; i++) {
307 		sc->sc_enaddr[i] = strtoul(p, &e, 16);
308 		if ((e[0] == '\0' && i != 5) && e[0] != ':') {
309 			aprint_error("%s: %s is not a valid mac address\n",
310 			    sc->sc_dev.dv_xname, val);
311 			free(val, M_DEVBUF);
312 			return;
313 		}
314 		p = &e[1];
315 	}
316 	free(val, M_DEVBUF);
317 	aprint_normal("%s: MAC address %s\n", sc->sc_dev.dv_xname,
318 	    ether_sprintf(sc->sc_enaddr));
319 	/* Initialize ifnet structure and attach interface */
320 	memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ);
321 	ifp->if_softc = sc;
322 	ifp->if_start = xennet_start;
323 	ifp->if_ioctl = xennet_ioctl;
324 	ifp->if_watchdog = xennet_watchdog;
325 	ifp->if_init = xennet_init;
326 	ifp->if_stop = xennet_stop;
327 	ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST;
328 	ifp->if_timer = 0;
329 	ifp->if_snd.ifq_maxlen = max(ifqmaxlen, NET_TX_RING_SIZE * 2);
330 	IFQ_SET_READY(&ifp->if_snd);
331 	if_attach(ifp);
332 	ether_ifattach(ifp, sc->sc_enaddr);
333 	sc->sc_softintr = softintr_establish(IPL_SOFTNET, xennet_softstart, sc);
334 	if (sc->sc_softintr == NULL)
335 		panic(" xennet: can't establish soft interrupt");
336 
337 	/* initialise shared structures and tell backend that we are ready */
338 	xennet_xenbus_resume(sc);
339 }
340 
341 static int
342 xennet_xenbus_detach(struct device *self, int flags)
343 {
344 	struct xennet_xenbus_softc *sc = (void *)self;
345 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
346 	int s0, s1;
347 	RING_IDX i;
348 
349 	DPRINTF(("%s: xennet_xenbus_detach\n", sc->sc_dev.dv_xname));
350 	s0 = splnet();
351 	xennet_stop(ifp, 1);
352 	/* wait for pending TX to complete, and collect pending RX packets */
353 	xennet_handler(sc);
354 	while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) {
355 		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_detach", hz/2);
356 		xennet_handler(sc);
357 	}
358 	xennet_free_rx_buffer(sc);
359 
360 	s1 = splvm();
361 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
362 		struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
363 		uvm_km_free(kernel_map, rxreq->rxreq_va, PAGE_SIZE,
364 		    UVM_KMF_WIRED);
365 	}
366 	splx(s1);
367 
368 	ether_ifdetach(ifp);
369 	if_detach(ifp);
370 	while (xengnt_status(sc->sc_tx_ring_gntref)) {
371 		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_txref", hz/2);
372 	}
373 	xengnt_revoke_access(sc->sc_tx_ring_gntref);
374 	uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE,
375 	    UVM_KMF_WIRED);
376 	while (xengnt_status(sc->sc_rx_ring_gntref)) {
377 		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_rxref", hz/2);
378 	}
379 	xengnt_revoke_access(sc->sc_rx_ring_gntref);
380 	uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE,
381 	    UVM_KMF_WIRED);
382 	softintr_disestablish(sc->sc_softintr);
383 	event_remove_handler(sc->sc_evtchn, &xennet_handler, sc);
384 	splx(s0);
385 	DPRINTF(("%s: xennet_xenbus_detach done\n", sc->sc_dev.dv_xname));
386 	return 0;
387 }
388 
389 static int
390 xennet_xenbus_resume(void *p)
391 {
392 	struct xennet_xenbus_softc *sc = p;
393 	struct xenbus_transaction *xbt;
394 	int error;
395 	netif_tx_sring_t *tx_ring;
396 	netif_rx_sring_t *rx_ring;
397 	paddr_t ma;
398 	const char *errmsg;
399 
400 	sc->sc_tx_ring_gntref = GRANT_INVALID_REF;
401 	sc->sc_rx_ring_gntref = GRANT_INVALID_REF;
402 
403 
404 	/* setup device: alloc event channel and shared rings */
405 	tx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
406 	     UVM_KMF_WIRED | UVM_KMF_ZERO);
407 	rx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
408 	    UVM_KMF_WIRED | UVM_KMF_ZERO);
409 	if (tx_ring == NULL || rx_ring == NULL)
410 		panic("xennet_xenbus_resume: can't alloc rings");
411 
412 	SHARED_RING_INIT(tx_ring);
413 	FRONT_RING_INIT(&sc->sc_tx_ring, tx_ring, PAGE_SIZE);
414 	SHARED_RING_INIT(rx_ring);
415 	FRONT_RING_INIT(&sc->sc_rx_ring, rx_ring, PAGE_SIZE);
416 
417 	(void)pmap_extract_ma(pmap_kernel(), (vaddr_t)tx_ring, &ma);
418 	error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_tx_ring_gntref);
419 	if (error)
420 		return error;
421 	(void)pmap_extract_ma(pmap_kernel(), (vaddr_t)rx_ring, &ma);
422 	error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_rx_ring_gntref);
423 	if (error)
424 		return error;
425 	error = xenbus_alloc_evtchn(sc->sc_xbusd, &sc->sc_evtchn);
426 	if (error)
427 		return error;
428 	aprint_verbose("%s: using event channel %d\n",
429 	    sc->sc_dev.dv_xname, sc->sc_evtchn);
430 	event_set_handler(sc->sc_evtchn, &xennet_handler, sc,
431 	    IPL_NET, sc->sc_dev.dv_xname);
432 
433 again:
434 	xbt = xenbus_transaction_start();
435 	if (xbt == NULL)
436 		return ENOMEM;
437 	error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
438 	    "tx-ring-ref","%u", sc->sc_tx_ring_gntref);
439 	if (error) {
440 		errmsg = "writing tx ring-ref";
441 		goto abort_transaction;
442 	}
443 	error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
444 	    "rx-ring-ref","%u", sc->sc_rx_ring_gntref);
445 	if (error) {
446 		errmsg = "writing rx ring-ref";
447 		goto abort_transaction;
448 	}
449 	error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
450 	    "event-channel", "%u", sc->sc_evtchn);
451 	if (error) {
452 		errmsg = "writing event channel";
453 		goto abort_transaction;
454 	}
455 	error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path,
456 	    "state", "%d", XenbusStateConnected);
457 	if (error) {
458 		errmsg = "writing frontend XenbusStateConnected";
459 		goto abort_transaction;
460 	}
461 	error = xenbus_transaction_end(xbt, 0);
462 	if (error == EAGAIN)
463 		goto again;
464 	if (error) {
465 		xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction");
466 		return -1;
467 	}
468 	xennet_alloc_rx_buffer(sc);
469 	sc->sc_backend_status = BEST_CONNECTED;
470 	return 0;
471 
472 abort_transaction:
473 	xenbus_transaction_end(xbt, 1);
474 	xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg);
475 	return error;
476 }
477 
478 static void xennet_backend_changed(void *arg, XenbusState new_state)
479 {
480 	struct xennet_xenbus_softc *sc = arg;
481 	DPRINTF(("%s: new backend state %d\n", sc->sc_dev.dv_xname, new_state));
482 
483 	switch (new_state) {
484 	case XenbusStateInitialising:
485 	case XenbusStateInitWait:
486 	case XenbusStateInitialised:
487 		break;
488 	case XenbusStateClosing:
489 		sc->sc_backend_status = BEST_CLOSED;
490 		xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosed);
491 		break;
492 	case XenbusStateConnected:
493 		break;
494 	case XenbusStateUnknown:
495 	default:
496 		panic("bad backend state %d", new_state);
497 	}
498 }
499 
500 static void
501 xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc)
502 {
503 	RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt;
504 	RING_IDX i;
505 	struct xennet_rxreq *req;
506 	struct xen_memory_reservation reservation;
507 	int s1, s2;
508 	paddr_t pfn;
509 
510 	s1 = splnet();
511 	for (i = 0; sc->sc_free_rxreql != 0; i++) {
512 		req  = SLIST_FIRST(&sc->sc_rxreq_head);
513 		KASSERT(req != NULL);
514 		RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id =
515 		    req->rxreq_id;
516 		if (xengnt_grant_transfer(sc->sc_xbusd->xbusd_otherend_id,
517 		    &req->rxreq_gntref) != 0) {
518 			break;
519 		}
520 		RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref =
521 		    req->rxreq_gntref;
522 
523 		SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next);
524 		sc->sc_free_rxreql--;
525 
526 		/* unmap the page */
527 		MULTI_update_va_mapping(&rx_mcl[i], req->rxreq_va, 0, 0);
528 		/*
529 		 * Remove this page from pseudo phys map before
530 		 * passing back to Xen.
531 		 */
532 		pfn = (req->rxreq_pa - XPMAP_OFFSET) >> PAGE_SHIFT;
533 		xennet_pages[i] = xpmap_phys_to_machine_mapping[pfn];
534 		xpmap_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
535 	}
536 	if (i == 0) {
537 		splx(s1);
538 		return;
539 	}
540 	/* also make sure to flush all TLB entries */
541 	rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
542 	/*
543 	 * We may have allocated buffers which have entries
544 	 * outstanding in the page update queue -- make sure we flush
545 	 * those first!
546 	 */
547 	s2 = splvm();
548 	xpq_flush_queue();
549 	splx(s2);
550 	/* now decrease reservation */
551 	reservation.extent_start = xennet_pages;
552 	reservation.nr_extents = i;
553 	reservation.extent_order = 0;
554 	reservation.address_bits = 0;
555 	reservation.domid = DOMID_SELF;
556 	rx_mcl[i].op = __HYPERVISOR_memory_op;
557 	rx_mcl[i].args[0] = XENMEM_decrease_reservation;
558 	rx_mcl[i].args[1] = (unsigned long)&reservation;
559 	HYPERVISOR_multicall(rx_mcl, i+1);
560 	if (__predict_false(rx_mcl[i].result != i)) {
561 		panic("xennet_alloc_rx_buffer: XENMEM_decrease_reservation");
562 	}
563 	sc->sc_rx_ring.req_prod_pvt = req_prod + i;
564 	RING_PUSH_REQUESTS(&sc->sc_rx_ring);
565 
566 	splx(s1);
567 	return;
568 }
569 
570 static void
571 xennet_free_rx_buffer(struct xennet_xenbus_softc *sc)
572 {
573 	paddr_t ma, pa;
574 	vaddr_t va;
575 	RING_IDX i;
576 	mmu_update_t mmu[1];
577 	multicall_entry_t mcl[2];
578 
579 	int s = splbio();
580 
581 	DPRINTF(("%s: xennet_free_rx_buffer\n", sc->sc_dev.dv_xname));
582 	/* get back memory from RX ring */
583 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
584 		struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
585 
586 		/*
587 		 * if the buffer is in transit in the network stack, wait for
588 		 * the network stack to free it.
589 		 */
590 		while ((volatile grant_ref_t)rxreq->rxreq_gntref ==
591 		    GRANT_STACK_REF)
592 			tsleep(xennet_xenbus_detach, PRIBIO, "xnet_free", hz/2);
593 
594 		if (rxreq->rxreq_gntref != GRANT_INVALID_REF) {
595 			/*
596 			 * this req is still granted. Get back the page or
597 			 * allocate a new one, and remap it.
598 			 */
599 			SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq,
600 			    rxreq_next);
601 			sc->sc_free_rxreql++;
602 			ma = xengnt_revoke_transfer(rxreq->rxreq_gntref);
603 			rxreq->rxreq_gntref = GRANT_INVALID_REF;
604 			if (ma == 0) {
605 				struct xen_memory_reservation xenres;
606 				/*
607 				 * transfer not complete, we lost the page.
608 				 * Get one from hypervisor
609 				 */
610 				xenres.extent_start = &ma;
611 				xenres.nr_extents = 1;
612 				xenres.extent_order = 0;
613 				xenres.address_bits = 31;
614 				xenres.domid = DOMID_SELF;
615 				if (HYPERVISOR_memory_op(
616 				    XENMEM_increase_reservation, &xenres) < 0) {
617 					panic("xennet_free_rx_buffer: "
618 					    "can't get memory back");
619 				}
620 				KASSERT(ma != 0);
621 			}
622 			pa = rxreq->rxreq_pa;
623 			va = rxreq->rxreq_va;
624 			/* remap the page */
625 			mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
626 			mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT);
627 			MULTI_update_va_mapping(&mcl[0], va,
628 			    (ma << PAGE_SHIFT) | PG_V | PG_KW,
629 			    UVMF_TLB_FLUSH|UVMF_ALL);
630 			xpmap_phys_to_machine_mapping[
631 			    (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma;
632 			mcl[1].op = __HYPERVISOR_mmu_update;
633 			mcl[1].args[0] = (unsigned long)mmu;
634 			mcl[1].args[1] = 1;
635 			mcl[1].args[2] = 0;
636 			mcl[1].args[3] = DOMID_SELF;
637 			HYPERVISOR_multicall(mcl, 2);
638 		}
639 
640 	}
641 	splx(s);
642 	DPRINTF(("%s: xennet_free_rx_buffer done\n", sc->sc_dev.dv_xname));
643 }
644 
645 static void
646 xennet_rx_mbuf_free(struct mbuf *m, caddr_t buf, size_t size, void *arg)
647 {
648 	struct xennet_rxreq *req = arg;
649 	struct xennet_xenbus_softc *sc = req->rxreq_sc;
650 
651 	SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next);
652 	sc->sc_free_rxreql++;
653 
654 	req->rxreq_gntref = GRANT_INVALID_REF;
655 	if (sc->sc_free_rxreql >= NET_RX_RING_SIZE / 2 &&
656 	    __predict_true(sc->sc_backend_status == BEST_CONNECTED)) {
657 		xennet_alloc_rx_buffer(sc);
658 	}
659 
660 	if (m)
661 		pool_cache_put(&mbpool_cache, m);
662 }
663 
664 
665 static void
666 xennet_tx_complete(struct xennet_xenbus_softc *sc)
667 {
668 	struct xennet_txreq *req;
669 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
670 	RING_IDX resp_prod, i;
671 
672 	DPRINTFN(XEDB_EVENT, ("xennet_tx_complete prod %d cons %d\n",
673 	    sc->sc_tx_ring.sring->rsp_prod, sc->sc_tx_ring.rsp_cons));
674 
675 again:
676 	resp_prod = sc->sc_tx_ring.sring->rsp_prod;
677 	x86_lfence();
678 	for (i = sc->sc_tx_ring.rsp_cons; i != resp_prod; i++) {
679 		req = &sc->sc_txreqs[RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id];
680 		KASSERT(req->txreq_id ==
681 		    RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id);
682 		if (__predict_false(xengnt_status(req->txreq_gntref))) {
683 			printf("%s: grant still used by backend\n",
684 			    sc->sc_dev.dv_xname);
685 			sc->sc_tx_ring.rsp_cons = i;
686 			goto end;
687 		}
688 		if (__predict_false(
689 		    RING_GET_RESPONSE(&sc->sc_tx_ring, i)->status !=
690 		    NETIF_RSP_OKAY))
691 			ifp->if_oerrors++;
692 		else
693 			ifp->if_opackets++;
694 		xengnt_revoke_access(req->txreq_gntref);
695 		m_freem(req->txreq_m);
696 		SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next);
697 	}
698 	sc->sc_tx_ring.rsp_cons = resp_prod;
699 	/* set new event and check fopr race with rsp_cons update */
700 	sc->sc_tx_ring.sring->rsp_event =
701 	    resp_prod + ((sc->sc_tx_ring.sring->req_prod - resp_prod) >> 1) + 1;
702 	ifp->if_timer = 0;
703 	x86_sfence();
704 	if (resp_prod != sc->sc_tx_ring.sring->rsp_prod)
705 		goto again;
706 end:
707 	if (ifp->if_flags & IFF_OACTIVE) {
708 		ifp->if_flags &= ~IFF_OACTIVE;
709 		xennet_softstart(sc);
710 	}
711 }
712 
713 static int
714 xennet_handler(void *arg)
715 {
716 	struct xennet_xenbus_softc *sc = arg;
717 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
718 	RING_IDX resp_prod, i;
719 	struct xennet_rxreq *req;
720 	paddr_t ma, pa;
721 	vaddr_t va;
722 	mmu_update_t mmu[1];
723 	multicall_entry_t mcl[2];
724 	struct mbuf *m;
725 	void *pktp;
726 	int more_to_do;
727 
728 	if (sc->sc_backend_status != BEST_CONNECTED)
729 		return 1;
730 
731 	xennet_tx_complete(sc);
732 
733 again:
734 	DPRINTFN(XEDB_EVENT, ("xennet_handler prod %d cons %d\n",
735 	    sc->sc_rx_ring.sring->rsp_prod, sc->sc_rx_ring.rsp_cons));
736 
737 	resp_prod = sc->sc_rx_ring.sring->rsp_prod;
738 	x86_lfence(); /* ensure we see replies up to resp_prod */
739 	for (i = sc->sc_rx_ring.rsp_cons; i != resp_prod; i++) {
740 		netif_rx_response_t *rx = RING_GET_RESPONSE(&sc->sc_rx_ring, i);
741 		req = &sc->sc_rxreqs[rx->id];
742 		KASSERT(req->rxreq_gntref != GRANT_INVALID_REF);
743 		ma = xengnt_revoke_transfer(req->rxreq_gntref);
744 		if (ma == 0) {
745 			DPRINTFN(XEDB_EVENT, ("xennet_handler ma == 0\n"));
746 			/*
747 			 * the remote could't send us a packet.
748 			 * we can't free this rxreq as no page will be mapped
749 			 * here. Instead give it back immediatly to backend.
750 			 */
751 			ifp->if_ierrors++;
752 			RING_GET_REQUEST(&sc->sc_rx_ring,
753 			    sc->sc_rx_ring.req_prod_pvt)->id = req->rxreq_id;
754 			RING_GET_REQUEST(&sc->sc_rx_ring,
755 			    sc->sc_rx_ring.req_prod_pvt)->gref =
756 				req->rxreq_gntref;
757 			sc->sc_rx_ring.req_prod_pvt++;
758 			RING_PUSH_REQUESTS(&sc->sc_rx_ring);
759 			continue;
760 		}
761 		req->rxreq_gntref = GRANT_INVALID_REF;
762 
763 		pa = req->rxreq_pa;
764 		va = req->rxreq_va;
765 		/* remap the page */
766 		mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
767 		mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT);
768 		MULTI_update_va_mapping(&mcl[0], va,
769 		    (ma << PAGE_SHIFT) | PG_V | PG_KW, UVMF_TLB_FLUSH|UVMF_ALL);
770 		xpmap_phys_to_machine_mapping[
771 		    (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma;
772 		mcl[1].op = __HYPERVISOR_mmu_update;
773 		mcl[1].args[0] = (unsigned long)mmu;
774 		mcl[1].args[1] = 1;
775 		mcl[1].args[2] = 0;
776 		mcl[1].args[3] = DOMID_SELF;
777 		HYPERVISOR_multicall(mcl, 2);
778 		pktp = (void *)(va + rx->offset);
779 #ifdef XENNET_DEBUG_DUMP
780 		xennet_hex_dump(pktp, rx->status, "r", rx->id);
781 #endif
782 		if ((ifp->if_flags & IFF_PROMISC) == 0) {
783 			struct ether_header *eh = pktp;
784 			if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 &&
785 			    memcmp(LLADDR(ifp->if_sadl), eh->ether_dhost,
786 			    ETHER_ADDR_LEN) != 0) {
787 				DPRINTFN(XEDB_EVENT,
788 				    ("xennet_handler bad dest\n"));
789 				/* packet not for us */
790 				xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE,
791 				    req);
792 				continue;
793 			}
794 		}
795 		MGETHDR(m, M_DONTWAIT, MT_DATA);
796 		if (__predict_false(m == NULL)) {
797 			printf("xennet: rx no mbuf\n");
798 			ifp->if_ierrors++;
799 			xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req);
800 			continue;
801 		}
802 
803 		m->m_pkthdr.rcvif = ifp;
804 		if (__predict_true(sc->sc_rx_ring.req_prod_pvt !=
805 		    sc->sc_rx_ring.sring->rsp_prod)) {
806 			m->m_len = m->m_pkthdr.len = rx->status;
807 			MEXTADD(m, pktp, rx->status,
808 			    M_DEVBUF, xennet_rx_mbuf_free, req);
809 			m->m_flags |= M_EXT_RW; /* we own the buffer */
810 			req->rxreq_gntref = GRANT_STACK_REF;
811 		} else {
812 			/*
813 			 * This was our last receive buffer, allocate
814 			 * memory, copy data and push the receive
815 			 * buffer back to the hypervisor.
816 			 */
817 			m->m_len = MHLEN;
818 			m->m_pkthdr.len = 0;
819 			m_copyback(m, 0, rx->status, pktp);
820 			xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req);
821 			if (m->m_pkthdr.len < rx->status) {
822 				/* out of memory, just drop packets */
823 				ifp->if_ierrors++;
824 				m_freem(m);
825 				continue;
826 			}
827 		}
828 #if NBPFILTER > 0
829 		/*
830 		 * Pass packet to bpf if there is a listener.
831 		 */
832 		if (ifp->if_bpf)
833 			bpf_mtap(ifp->if_bpf, m);
834 #endif
835 
836 		ifp->if_ipackets++;
837 
838 		/* Pass the packet up. */
839 		(*ifp->if_input)(ifp, m);
840 	}
841 	x86_lfence();
842 	sc->sc_rx_ring.rsp_cons = i;
843 	RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_rx_ring, more_to_do);
844 	if (more_to_do)
845 		goto again;
846 	return 1;
847 }
848 
849 /*
850  * Called at splnet.
851  */
852 void
853 xennet_start(struct ifnet *ifp)
854 {
855 	struct xennet_xenbus_softc *sc = ifp->if_softc;
856 
857 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start()\n", sc->sc_dev.dv_xname));
858 
859 #if NRND > 0
860 	rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt);
861 #endif
862 
863 	xennet_tx_complete(sc);
864 
865 	if (__predict_false(
866 	    (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING))
867 		return;
868 
869 	/*
870 	 * The Xen communication channel is much more efficient if we can
871 	 * schedule batch of packets for domain0. To achieve this, we
872 	 * schedule a soft interrupt, and just return. This way, the network
873 	 * stack will enqueue all pending mbufs in the interface's send queue
874 	 * before it is processed by xennet_softstart().
875 	 */
876 	softintr_schedule(sc->sc_softintr);
877 	return;
878 }
879 
880 /*
881  * called at splsoftnet
882  */
883 void
884 xennet_softstart(void *arg)
885 {
886 	struct xennet_xenbus_softc *sc = arg;
887 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
888 	struct mbuf *m, *new_m;
889 	netif_tx_request_t *txreq;
890 	RING_IDX req_prod;
891 	paddr_t pa, pa2;
892 	struct xennet_txreq *req;
893 	int notify;
894 	int do_notify = 0;
895 	int s;
896 
897 	s = splnet();
898 	if (__predict_false(
899 	    (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) {
900 		splx(s);
901 		return;
902 	}
903 
904 	req_prod = sc->sc_tx_ring.req_prod_pvt;
905 	while (/*CONSTCOND*/1) {
906 		req = SLIST_FIRST(&sc->sc_txreq_head);
907 		if (__predict_false(req == NULL)) {
908 			ifp->if_flags |= IFF_OACTIVE;
909 			break;
910 		}
911 		IFQ_POLL(&ifp->if_snd, m);
912 		if (m == NULL)
913 			break;
914 
915 		switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) {
916 		case M_EXT|M_EXT_CLUSTER:
917 			KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID);
918 			pa = m->m_ext.ext_paddr +
919 				(m->m_data - m->m_ext.ext_buf);
920 			break;
921 		case 0:
922 			KASSERT(m->m_paddr != M_PADDR_INVALID);
923 			pa = m->m_paddr + M_BUFOFFSET(m) +
924 				(m->m_data - M_BUFADDR(m));
925 			break;
926 		default:
927 			if (__predict_false(
928 			    !pmap_extract(pmap_kernel(), (vaddr_t)m->m_data,
929 			    &pa))) {
930 				panic("xennet_start: no pa");
931 			}
932 			break;
933 		}
934 
935 		if (m->m_pkthdr.len != m->m_len ||
936 		    (pa ^ (pa + m->m_pkthdr.len - 1)) & PG_FRAME) {
937 
938 			MGETHDR(new_m, M_DONTWAIT, MT_DATA);
939 			if (__predict_false(new_m == NULL)) {
940 				printf("xennet: no mbuf\n");
941 				break;
942 			}
943 			if (m->m_pkthdr.len > MHLEN) {
944 				MCLGET(new_m, M_DONTWAIT);
945 				if (__predict_false(
946 				    (new_m->m_flags & M_EXT) == 0)) {
947 					DPRINTF(("xennet: no mbuf cluster\n"));
948 					m_freem(new_m);
949 					break;
950 				}
951 			}
952 
953 			m_copydata(m, 0, m->m_pkthdr.len, mtod(new_m, caddr_t));
954 			new_m->m_len = new_m->m_pkthdr.len = m->m_pkthdr.len;
955 
956 			if ((new_m->m_flags & M_EXT) != 0) {
957 				pa = new_m->m_ext.ext_paddr;
958 				KASSERT(new_m->m_data == new_m->m_ext.ext_buf);
959 				KASSERT(pa != M_PADDR_INVALID);
960 			} else {
961 				pa = new_m->m_paddr;
962 				KASSERT(pa != M_PADDR_INVALID);
963 				KASSERT(new_m->m_data == M_BUFADDR(new_m));
964 				pa += M_BUFOFFSET(new_m);
965 			}
966 			if (__predict_false(xengnt_grant_access(
967 			    sc->sc_xbusd->xbusd_otherend_id,
968 			    xpmap_ptom_masked(pa),
969 			    GNTMAP_readonly, &req->txreq_gntref) != 0)) {
970 				m_freem(new_m);
971 				ifp->if_flags |= IFF_OACTIVE;
972 				break;
973 			}
974 			/* we will be able to send new_m */
975 			IFQ_DEQUEUE(&ifp->if_snd, m);
976 			m_freem(m);
977 			m = new_m;
978 		} else {
979 			if (__predict_false(xengnt_grant_access(
980 			    sc->sc_xbusd->xbusd_otherend_id,
981 			    xpmap_ptom_masked(pa),
982 			    GNTMAP_readonly, &req->txreq_gntref) != 0)) {
983 				ifp->if_flags |= IFF_OACTIVE;
984 				break;
985 			}
986 			/* we will be able to send m */
987 			IFQ_DEQUEUE(&ifp->if_snd, m);
988 		}
989 
990 		KASSERT(((pa ^ (pa + m->m_pkthdr.len -  1)) & PG_FRAME) == 0);
991 
992 		SLIST_REMOVE_HEAD(&sc->sc_txreq_head, txreq_next);
993 		req->txreq_m = m;
994 
995 		DPRINTFN(XEDB_MBUF, ("xennet_start id %d, "
996 		    "mbuf %p, buf %p/%p/%p, size %d\n",
997 		    req->txreq_id, m, mtod(m, void *), (void *)pa,
998 		    (void *)xpmap_ptom_masked(pa), m->m_pkthdr.len));
999 		pmap_extract_ma(pmap_kernel(), mtod(m, vaddr_t), &pa2);
1000 		DPRINTFN(XEDB_MBUF, ("xennet_start pa %p ma %p/%p\n",
1001 		    (void *)pa, (void *)xpmap_ptom_masked(pa), (void *)pa2));
1002 #ifdef XENNET_DEBUG_DUMP
1003 		xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "s", req->txreq_id);
1004 #endif
1005 
1006 		txreq = RING_GET_REQUEST(&sc->sc_tx_ring, req_prod);
1007 		txreq->id = req->txreq_id;
1008 		txreq->gref = req->txreq_gntref;
1009 		txreq->offset = pa & ~PG_FRAME;
1010 		txreq->size = m->m_pkthdr.len;
1011 		txreq->flags = 0;
1012 
1013 		req_prod++;
1014 		sc->sc_tx_ring.req_prod_pvt = req_prod;
1015 		RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_tx_ring, notify);
1016 		if (notify)
1017 			do_notify = 1;
1018 
1019 #ifdef XENNET_DEBUG
1020 		DPRINTFN(XEDB_MEM, ("packet addr %p/%p, physical %p/%p, "
1021 		    "m_paddr %p, len %d/%d\n", M_BUFADDR(m), mtod(m, void *),
1022 		    (void *)*kvtopte(mtod(m, vaddr_t)),
1023 		    (void *)xpmap_mtop(*kvtopte(mtod(m, vaddr_t))),
1024 		    (void *)m->m_paddr, m->m_pkthdr.len, m->m_len));
1025 		DPRINTFN(XEDB_MEM, ("id %d gref %d offset %d size %d flags %d"
1026 		    " prod %d\n",
1027 		    txreq->id, txreq->gref, txreq->offset, txreq->size,
1028 		    txreq->flags, req_prod));
1029 #endif
1030 
1031 #if NBPFILTER > 0
1032 		/*
1033 		 * Pass packet to bpf if there is a listener.
1034 		 */
1035 		if (ifp->if_bpf) {
1036 			bpf_mtap(ifp->if_bpf, m);
1037 		}
1038 #endif
1039 	}
1040 
1041 	x86_lfence();
1042 	if (do_notify) {
1043 		hypervisor_notify_via_evtchn(sc->sc_evtchn);
1044 		ifp->if_timer = 5;
1045 	}
1046 	splx(s);
1047 
1048 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start() done\n",
1049 	    sc->sc_dev.dv_xname));
1050 }
1051 
1052 int
1053 xennet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1054 {
1055 #ifdef XENNET_DEBUG
1056 	struct xennet_xenbus_softc *sc = ifp->if_softc;
1057 #endif
1058 	int s, error = 0;
1059 
1060 	s = splnet();
1061 
1062 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl()\n", sc->sc_dev.dv_xname));
1063 	error = ether_ioctl(ifp, cmd, data);
1064 	if (error == ENETRESET)
1065 		error = 0;
1066 	splx(s);
1067 
1068 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() returning %d\n",
1069 	    sc->sc_dev.dv_xname, error));
1070 
1071 	return error;
1072 }
1073 
1074 void
1075 xennet_watchdog(struct ifnet *ifp)
1076 {
1077 	struct xennet_xenbus_softc *sc = ifp->if_softc;
1078 
1079 	printf("%s: xennet_watchdog\n", sc->sc_dev.dv_xname);
1080 }
1081 
1082 int
1083 xennet_init(struct ifnet *ifp)
1084 {
1085 	struct xennet_xenbus_softc *sc = ifp->if_softc;
1086 	int s = splnet();
1087 
1088 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n", sc->sc_dev.dv_xname));
1089 
1090 	if ((ifp->if_flags & IFF_RUNNING) == 0) {
1091 		sc->sc_rx_ring.sring->rsp_event =
1092 		    sc->sc_rx_ring.rsp_cons + 1;
1093 		hypervisor_enable_event(sc->sc_evtchn);
1094 		hypervisor_notify_via_evtchn(sc->sc_evtchn);
1095 		xennet_reset(sc);
1096 	}
1097 	ifp->if_flags |= IFF_RUNNING;
1098 	ifp->if_flags &= ~IFF_OACTIVE;
1099 	ifp->if_timer = 0;
1100 	splx(s);
1101 	return 0;
1102 }
1103 
1104 void
1105 xennet_stop(struct ifnet *ifp, int disable)
1106 {
1107 	struct xennet_xenbus_softc *sc = ifp->if_softc;
1108 	int s = splnet();
1109 
1110 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1111 	hypervisor_mask_event(sc->sc_evtchn);
1112 	xennet_reset(sc);
1113 	splx(s);
1114 }
1115 
1116 void
1117 xennet_reset(struct xennet_xenbus_softc *sc)
1118 {
1119 
1120 	DPRINTFN(XEDB_FOLLOW, ("%s: xennet_reset()\n", sc->sc_dev.dv_xname));
1121 }
1122 
1123 #if defined(NFS_BOOT_BOOTSTATIC)
1124 int
1125 xennet_bootstatic_callback(struct nfs_diskless *nd)
1126 {
1127 #if 0
1128 	struct ifnet *ifp = nd->nd_ifp;
1129 	struct xennet_xenbus_softc *sc =
1130 	    (struct xennet_xenbus_softc *)ifp->if_softc;
1131 #endif
1132 	union xen_cmdline_parseinfo xcp;
1133 	struct sockaddr_in *sin;
1134 
1135 	memset(&xcp, 0, sizeof(xcp.xcp_netinfo));
1136 	xcp.xcp_netinfo.xi_ifno = /* XXX sc->sc_ifno */ 0;
1137 	xcp.xcp_netinfo.xi_root = nd->nd_root.ndm_host;
1138 	xen_parse_cmdline(XEN_PARSE_NETINFO, &xcp);
1139 
1140 	nd->nd_myip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[0]);
1141 	nd->nd_gwip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[2]);
1142 	nd->nd_mask.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[3]);
1143 
1144 	sin = (struct sockaddr_in *) &nd->nd_root.ndm_saddr;
1145 	memset((caddr_t)sin, 0, sizeof(*sin));
1146 	sin->sin_len = sizeof(*sin);
1147 	sin->sin_family = AF_INET;
1148 	sin->sin_addr.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[1]);
1149 
1150 	return (NFS_BOOTSTATIC_HAS_MYIP|NFS_BOOTSTATIC_HAS_GWIP|
1151 	    NFS_BOOTSTATIC_HAS_MASK|NFS_BOOTSTATIC_HAS_SERVADDR|
1152 	    NFS_BOOTSTATIC_HAS_SERVER);
1153 }
1154 #endif /* defined(NFS_BOOT_BOOTSTATIC) */
1155 
1156 #ifdef XENNET_DEBUG_DUMP
1157 #define XCHR(x) hexdigits[(x) & 0xf]
1158 static void
1159 xennet_hex_dump(const unsigned char *pkt, size_t len, const char *type, int id)
1160 {
1161 	size_t i, j;
1162 
1163 	printf("pkt %p len %d/%x type %s id %d\n", pkt, len, len, type, id);
1164 	printf("00000000  ");
1165 	for(i=0; i<len; i++) {
1166 		printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i]));
1167 		if ((i+1) % 16 == 8)
1168 			printf(" ");
1169 		if ((i+1) % 16 == 0) {
1170 			printf(" %c", '|');
1171 			for(j=0; j<16; j++)
1172 				printf("%c", pkt[i-15+j]>=32 &&
1173 				    pkt[i-15+j]<127?pkt[i-15+j]:'.');
1174 			printf("%c\n%c%c%c%c%c%c%c%c  ", '|',
1175 			    XCHR((i+1)>>28), XCHR((i+1)>>24),
1176 			    XCHR((i+1)>>20), XCHR((i+1)>>16),
1177 			    XCHR((i+1)>>12), XCHR((i+1)>>8),
1178 			    XCHR((i+1)>>4), XCHR(i+1));
1179 		}
1180 	}
1181 	printf("\n");
1182 }
1183 #undef XCHR
1184 #endif
1185