1 /* $NetBSD: if_xennet_xenbus.c,v 1.20 2007/11/07 00:23:17 ad Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Manuel Bouyer. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Manuel Bouyer. 17 * 4. The name of the author may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33 /* 34 * Copyright (c) 2004 Christian Limpach. 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. All advertising materials mentioning features or use of this software 46 * must display the following acknowledgement: 47 * This product includes software developed by Christian Limpach. 48 * 4. The name of the author may not be used to endorse or promote products 49 * derived from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 52 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 53 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 54 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 55 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 56 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 60 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.20 2007/11/07 00:23:17 ad Exp $"); 65 66 #include "opt_xen.h" 67 #include "opt_nfs_boot.h" 68 #include "rnd.h" 69 #include "bpfilter.h" 70 71 #include <sys/param.h> 72 #include <sys/device.h> 73 #include <sys/conf.h> 74 #include <sys/kernel.h> 75 #include <sys/proc.h> 76 #include <sys/systm.h> 77 #if NRND > 0 78 #include <sys/rnd.h> 79 #endif 80 81 #include <net/if.h> 82 #include <net/if_dl.h> 83 #include <net/if_ether.h> 84 #if NBPFILTER > 0 85 #include <net/bpf.h> 86 #include <net/bpfdesc.h> 87 #endif 88 89 #if defined(NFS_BOOT_BOOTSTATIC) 90 #include <sys/fstypes.h> 91 #include <sys/mount.h> 92 #include <sys/statvfs.h> 93 #include <netinet/in.h> 94 #include <nfs/rpcv2.h> 95 #include <nfs/nfsproto.h> 96 #include <nfs/nfs.h> 97 #include <nfs/nfsmount.h> 98 #include <nfs/nfsdiskless.h> 99 #include <machine/if_xennetvar.h> 100 #endif /* defined(NFS_BOOT_BOOTSTATIC) */ 101 102 #include <machine/xennet_checksum.h> 103 104 #include <uvm/uvm.h> 105 106 #include <machine/xen3-public/io/ring.h> 107 108 #include <machine/granttables.h> 109 #include <machine/xenbus.h> 110 #include "locators.h" 111 112 #undef XENNET_DEBUG_DUMP 113 #undef XENNET_DEBUG 114 #ifdef XENNET_DEBUG 115 #define XEDB_FOLLOW 0x01 116 #define XEDB_INIT 0x02 117 #define XEDB_EVENT 0x04 118 #define XEDB_MBUF 0x08 119 #define XEDB_MEM 0x10 120 int xennet_debug = 0xff; 121 #define DPRINTF(x) if (xennet_debug) printf x; 122 #define DPRINTFN(n,x) if (xennet_debug & (n)) printf x; 123 #else 124 #define DPRINTF(x) 125 #define DPRINTFN(n,x) 126 #endif 127 128 #define GRANT_INVALID_REF -1 /* entry is free */ 129 #define GRANT_STACK_REF -2 /* entry owned by the network stack */ 130 131 #define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE) 132 #define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE) 133 134 struct xennet_txreq { 135 SLIST_ENTRY(xennet_txreq) txreq_next; 136 uint16_t txreq_id; /* ID passed to backed */ 137 grant_ref_t txreq_gntref; /* grant ref of this request */ 138 struct mbuf *txreq_m; /* mbuf being transmitted */ 139 }; 140 141 struct xennet_rxreq { 142 SLIST_ENTRY(xennet_rxreq) rxreq_next; 143 uint16_t rxreq_id; /* ID passed to backed */ 144 grant_ref_t rxreq_gntref; /* grant ref of this request */ 145 /* va/pa for this receive buf. ma will be provided by backend */ 146 paddr_t rxreq_pa; 147 vaddr_t rxreq_va; 148 struct xennet_xenbus_softc *rxreq_sc; /* pointer to our interface */ 149 }; 150 151 struct xennet_xenbus_softc { 152 struct device sc_dev; 153 struct ethercom sc_ethercom; 154 uint8_t sc_enaddr[6]; 155 struct xenbus_device *sc_xbusd; 156 157 netif_tx_front_ring_t sc_tx_ring; 158 netif_rx_front_ring_t sc_rx_ring; 159 160 unsigned int sc_evtchn; 161 void *sc_softintr; 162 163 grant_ref_t sc_tx_ring_gntref; 164 grant_ref_t sc_rx_ring_gntref; 165 166 struct xennet_txreq sc_txreqs[NET_TX_RING_SIZE]; 167 struct xennet_rxreq sc_rxreqs[NET_RX_RING_SIZE]; 168 SLIST_HEAD(,xennet_txreq) sc_txreq_head; /* list of free TX requests */ 169 SLIST_HEAD(,xennet_rxreq) sc_rxreq_head; /* list of free RX requests */ 170 int sc_free_rxreql; /* number of free receive request struct */ 171 172 int sc_backend_status; /* our status with backend */ 173 #define BEST_CLOSED 0 174 #define BEST_DISCONNECTED 1 175 #define BEST_CONNECTED 2 176 #if NRND > 0 177 rndsource_element_t sc_rnd_source; 178 #endif 179 }; 180 #define SC_NLIVEREQ(sc) ((sc)->sc_rx_ring.req_prod_pvt - \ 181 (sc)->sc_rx_ring.sring->rsp_prod) 182 183 /* too big to be on stack */ 184 static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; 185 static paddr_t xennet_pages[NET_RX_RING_SIZE]; 186 187 static int xennet_xenbus_match(struct device *, struct cfdata *, void *); 188 static void xennet_xenbus_attach(struct device *, struct device *, void *); 189 static int xennet_xenbus_detach(struct device *, int); 190 static void xennet_backend_changed(void *, XenbusState); 191 192 static int xennet_xenbus_resume(void *); 193 static void xennet_alloc_rx_buffer(struct xennet_xenbus_softc *); 194 static void xennet_free_rx_buffer(struct xennet_xenbus_softc *); 195 static void xennet_tx_complete(struct xennet_xenbus_softc *); 196 static void xennet_rx_mbuf_free(struct mbuf *, void *, size_t, void *); 197 static int xennet_handler(void *); 198 #ifdef XENNET_DEBUG_DUMP 199 static void xennet_hex_dump(const unsigned char *, size_t, const char *, int); 200 #endif 201 202 static int xennet_init(struct ifnet *); 203 static void xennet_stop(struct ifnet *, int); 204 static void xennet_reset(struct xennet_xenbus_softc *); 205 static void xennet_softstart(void *); 206 static void xennet_start(struct ifnet *); 207 static int xennet_ioctl(struct ifnet *, u_long, void *); 208 static void xennet_watchdog(struct ifnet *); 209 210 CFATTACH_DECL(xennet_xenbus, sizeof(struct xennet_xenbus_softc), 211 xennet_xenbus_match, xennet_xenbus_attach, xennet_xenbus_detach, NULL); 212 213 static int 214 xennet_xenbus_match(struct device *parent, struct cfdata *match, void *aux) 215 { 216 struct xenbusdev_attach_args *xa = aux; 217 218 if (strcmp(xa->xa_type, "vif") != 0) 219 return 0; 220 221 if (match->cf_loc[XENBUSCF_ID] != XENBUSCF_ID_DEFAULT && 222 match->cf_loc[XENBUSCF_ID] != xa->xa_id) 223 return 0; 224 225 return 1; 226 } 227 228 static void 229 xennet_xenbus_attach(struct device *parent, struct device *self, void *aux) 230 { 231 struct xennet_xenbus_softc *sc = (void *)self; 232 struct xenbusdev_attach_args *xa = aux; 233 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 234 int err; 235 RING_IDX i; 236 char *val, *e, *p; 237 int s; 238 extern int ifqmaxlen; /* XXX */ 239 #ifdef XENNET_DEBUG 240 char **dir; 241 int dir_n = 0; 242 char id_str[20]; 243 #endif 244 245 aprint_normal(": Xen Virtual Network Interface\n"); 246 #ifdef XENNET_DEBUG 247 printf("path: %s\n", xa->xa_xbusd->xbusd_path); 248 snprintf(id_str, sizeof(id_str), "%d", xa->xa_id); 249 err = xenbus_directory(NULL, "device/vif", id_str, &dir_n, &dir); 250 if (err) { 251 printf("%s: xenbus_directory err %d\n", 252 sc->sc_dev.dv_xname, err); 253 } else { 254 printf("%s/\n", xa->xa_xbusd->xbusd_path); 255 for (i = 0; i < dir_n; i++) { 256 printf("\t/%s", dir[i]); 257 err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, dir[i], 258 NULL, &val); 259 if (err) { 260 printf("%s: xenbus_read err %d\n", 261 sc->sc_dev.dv_xname, err); 262 } else { 263 printf(" = %s\n", val); 264 free(val, M_DEVBUF); 265 } 266 } 267 } 268 #endif /* XENNET_DEBUG */ 269 sc->sc_xbusd = xa->xa_xbusd; 270 sc->sc_xbusd->xbusd_otherend_changed = xennet_backend_changed; 271 272 /* initialize free RX and RX request lists */ 273 SLIST_INIT(&sc->sc_txreq_head); 274 for (i = 0; i < NET_TX_RING_SIZE; i++) { 275 sc->sc_txreqs[i].txreq_id = i; 276 SLIST_INSERT_HEAD(&sc->sc_txreq_head, &sc->sc_txreqs[i], 277 txreq_next); 278 } 279 SLIST_INIT(&sc->sc_rxreq_head); 280 s = splvm(); 281 for (i = 0; i < NET_RX_RING_SIZE; i++) { 282 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i]; 283 rxreq->rxreq_id = i; 284 rxreq->rxreq_sc = sc; 285 rxreq->rxreq_va = uvm_km_alloc(kernel_map, 286 PAGE_SIZE, PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_ZERO); 287 if (rxreq->rxreq_va == 0) 288 break; 289 if (!pmap_extract(pmap_kernel(), rxreq->rxreq_va, 290 &rxreq->rxreq_pa)) 291 panic("xennet: no pa for mapped va ?"); 292 rxreq->rxreq_gntref = GRANT_INVALID_REF; 293 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next); 294 } 295 splx(s); 296 sc->sc_free_rxreql = i; 297 if (sc->sc_free_rxreql == 0) { 298 aprint_error("%s: failed to allocate rx memory\n", 299 sc->sc_dev.dv_xname); 300 return; 301 } 302 303 /* read mac address */ 304 err = xenbus_read(NULL, xa->xa_xbusd->xbusd_path, "mac", NULL, &val); 305 if (err) { 306 aprint_error("%s: can't read mac address, err %d\n", 307 sc->sc_dev.dv_xname, err); 308 return; 309 } 310 /* read mac address */ 311 for (i = 0, p = val; i < 6; i++) { 312 sc->sc_enaddr[i] = strtoul(p, &e, 16); 313 if ((e[0] == '\0' && i != 5) && e[0] != ':') { 314 aprint_error("%s: %s is not a valid mac address\n", 315 sc->sc_dev.dv_xname, val); 316 free(val, M_DEVBUF); 317 return; 318 } 319 p = &e[1]; 320 } 321 free(val, M_DEVBUF); 322 aprint_normal("%s: MAC address %s\n", sc->sc_dev.dv_xname, 323 ether_sprintf(sc->sc_enaddr)); 324 /* Initialize ifnet structure and attach interface */ 325 memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); 326 ifp->if_softc = sc; 327 ifp->if_start = xennet_start; 328 ifp->if_ioctl = xennet_ioctl; 329 ifp->if_watchdog = xennet_watchdog; 330 ifp->if_init = xennet_init; 331 ifp->if_stop = xennet_stop; 332 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; 333 ifp->if_timer = 0; 334 ifp->if_snd.ifq_maxlen = max(ifqmaxlen, NET_TX_RING_SIZE * 2); 335 ifp->if_capabilities = IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx; 336 IFQ_SET_READY(&ifp->if_snd); 337 if_attach(ifp); 338 ether_ifattach(ifp, sc->sc_enaddr); 339 sc->sc_softintr = softintr_establish(IPL_SOFTNET, xennet_softstart, sc); 340 if (sc->sc_softintr == NULL) 341 panic(" xennet: can't establish soft interrupt"); 342 343 /* initialise shared structures and tell backend that we are ready */ 344 xennet_xenbus_resume(sc); 345 } 346 347 static int 348 xennet_xenbus_detach(struct device *self, int flags) 349 { 350 struct xennet_xenbus_softc *sc = (void *)self; 351 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 352 int s0, s1; 353 RING_IDX i; 354 355 DPRINTF(("%s: xennet_xenbus_detach\n", sc->sc_dev.dv_xname)); 356 s0 = splnet(); 357 xennet_stop(ifp, 1); 358 /* wait for pending TX to complete, and collect pending RX packets */ 359 xennet_handler(sc); 360 while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) { 361 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_detach", hz/2); 362 xennet_handler(sc); 363 } 364 xennet_free_rx_buffer(sc); 365 366 s1 = splvm(); 367 for (i = 0; i < NET_RX_RING_SIZE; i++) { 368 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i]; 369 uvm_km_free(kernel_map, rxreq->rxreq_va, PAGE_SIZE, 370 UVM_KMF_WIRED); 371 } 372 splx(s1); 373 374 ether_ifdetach(ifp); 375 if_detach(ifp); 376 while (xengnt_status(sc->sc_tx_ring_gntref)) { 377 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_txref", hz/2); 378 } 379 xengnt_revoke_access(sc->sc_tx_ring_gntref); 380 uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE, 381 UVM_KMF_WIRED); 382 while (xengnt_status(sc->sc_rx_ring_gntref)) { 383 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_rxref", hz/2); 384 } 385 xengnt_revoke_access(sc->sc_rx_ring_gntref); 386 uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE, 387 UVM_KMF_WIRED); 388 softintr_disestablish(sc->sc_softintr); 389 event_remove_handler(sc->sc_evtchn, &xennet_handler, sc); 390 splx(s0); 391 DPRINTF(("%s: xennet_xenbus_detach done\n", sc->sc_dev.dv_xname)); 392 return 0; 393 } 394 395 static int 396 xennet_xenbus_resume(void *p) 397 { 398 struct xennet_xenbus_softc *sc = p; 399 struct xenbus_transaction *xbt; 400 int error; 401 netif_tx_sring_t *tx_ring; 402 netif_rx_sring_t *rx_ring; 403 paddr_t ma; 404 const char *errmsg; 405 406 sc->sc_tx_ring_gntref = GRANT_INVALID_REF; 407 sc->sc_rx_ring_gntref = GRANT_INVALID_REF; 408 409 410 /* setup device: alloc event channel and shared rings */ 411 tx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 412 UVM_KMF_WIRED | UVM_KMF_ZERO); 413 rx_ring = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0, 414 UVM_KMF_WIRED | UVM_KMF_ZERO); 415 if (tx_ring == NULL || rx_ring == NULL) 416 panic("xennet_xenbus_resume: can't alloc rings"); 417 418 SHARED_RING_INIT(tx_ring); 419 FRONT_RING_INIT(&sc->sc_tx_ring, tx_ring, PAGE_SIZE); 420 SHARED_RING_INIT(rx_ring); 421 FRONT_RING_INIT(&sc->sc_rx_ring, rx_ring, PAGE_SIZE); 422 423 (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)tx_ring, &ma); 424 error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_tx_ring_gntref); 425 if (error) 426 return error; 427 (void)pmap_extract_ma(pmap_kernel(), (vaddr_t)rx_ring, &ma); 428 error = xenbus_grant_ring(sc->sc_xbusd, ma, &sc->sc_rx_ring_gntref); 429 if (error) 430 return error; 431 error = xenbus_alloc_evtchn(sc->sc_xbusd, &sc->sc_evtchn); 432 if (error) 433 return error; 434 aprint_verbose("%s: using event channel %d\n", 435 sc->sc_dev.dv_xname, sc->sc_evtchn); 436 event_set_handler(sc->sc_evtchn, &xennet_handler, sc, 437 IPL_NET, sc->sc_dev.dv_xname); 438 439 again: 440 xbt = xenbus_transaction_start(); 441 if (xbt == NULL) 442 return ENOMEM; 443 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 444 "tx-ring-ref","%u", sc->sc_tx_ring_gntref); 445 if (error) { 446 errmsg = "writing tx ring-ref"; 447 goto abort_transaction; 448 } 449 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 450 "rx-ring-ref","%u", sc->sc_rx_ring_gntref); 451 if (error) { 452 errmsg = "writing rx ring-ref"; 453 goto abort_transaction; 454 } 455 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 456 "event-channel", "%u", sc->sc_evtchn); 457 if (error) { 458 errmsg = "writing event channel"; 459 goto abort_transaction; 460 } 461 error = xenbus_printf(xbt, sc->sc_xbusd->xbusd_path, 462 "state", "%d", XenbusStateConnected); 463 if (error) { 464 errmsg = "writing frontend XenbusStateConnected"; 465 goto abort_transaction; 466 } 467 error = xenbus_transaction_end(xbt, 0); 468 if (error == EAGAIN) 469 goto again; 470 if (error) { 471 xenbus_dev_fatal(sc->sc_xbusd, error, "completing transaction"); 472 return -1; 473 } 474 xennet_alloc_rx_buffer(sc); 475 sc->sc_backend_status = BEST_CONNECTED; 476 return 0; 477 478 abort_transaction: 479 xenbus_transaction_end(xbt, 1); 480 xenbus_dev_fatal(sc->sc_xbusd, error, "%s", errmsg); 481 return error; 482 } 483 484 static void xennet_backend_changed(void *arg, XenbusState new_state) 485 { 486 struct xennet_xenbus_softc *sc = arg; 487 DPRINTF(("%s: new backend state %d\n", sc->sc_dev.dv_xname, new_state)); 488 489 switch (new_state) { 490 case XenbusStateInitialising: 491 case XenbusStateInitWait: 492 case XenbusStateInitialised: 493 break; 494 case XenbusStateClosing: 495 sc->sc_backend_status = BEST_CLOSED; 496 xenbus_switch_state(sc->sc_xbusd, NULL, XenbusStateClosed); 497 break; 498 case XenbusStateConnected: 499 break; 500 case XenbusStateUnknown: 501 default: 502 panic("bad backend state %d", new_state); 503 } 504 } 505 506 static void 507 xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc) 508 { 509 RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt; 510 RING_IDX i; 511 struct xennet_rxreq *req; 512 struct xen_memory_reservation reservation; 513 int s1, s2; 514 paddr_t pfn; 515 516 s1 = splnet(); 517 for (i = 0; sc->sc_free_rxreql != 0; i++) { 518 req = SLIST_FIRST(&sc->sc_rxreq_head); 519 KASSERT(req != NULL); 520 KASSERT(req == &sc->sc_rxreqs[req->rxreq_id]); 521 RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id = 522 req->rxreq_id; 523 if (xengnt_grant_transfer(sc->sc_xbusd->xbusd_otherend_id, 524 &req->rxreq_gntref) != 0) { 525 break; 526 } 527 RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref = 528 req->rxreq_gntref; 529 530 SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next); 531 sc->sc_free_rxreql--; 532 533 /* unmap the page */ 534 MULTI_update_va_mapping(&rx_mcl[i], req->rxreq_va, 0, 0); 535 /* 536 * Remove this page from pseudo phys map before 537 * passing back to Xen. 538 */ 539 pfn = (req->rxreq_pa - XPMAP_OFFSET) >> PAGE_SHIFT; 540 xennet_pages[i] = xpmap_phys_to_machine_mapping[pfn]; 541 xpmap_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY; 542 } 543 if (i == 0) { 544 splx(s1); 545 return; 546 } 547 /* also make sure to flush all TLB entries */ 548 rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; 549 /* 550 * We may have allocated buffers which have entries 551 * outstanding in the page update queue -- make sure we flush 552 * those first! 553 */ 554 s2 = splvm(); 555 xpq_flush_queue(); 556 splx(s2); 557 /* now decrease reservation */ 558 reservation.extent_start = xennet_pages; 559 reservation.nr_extents = i; 560 reservation.extent_order = 0; 561 reservation.address_bits = 0; 562 reservation.domid = DOMID_SELF; 563 rx_mcl[i].op = __HYPERVISOR_memory_op; 564 rx_mcl[i].args[0] = XENMEM_decrease_reservation; 565 rx_mcl[i].args[1] = (unsigned long)&reservation; 566 HYPERVISOR_multicall(rx_mcl, i+1); 567 if (__predict_false(rx_mcl[i].result != i)) { 568 panic("xennet_alloc_rx_buffer: XENMEM_decrease_reservation"); 569 } 570 sc->sc_rx_ring.req_prod_pvt = req_prod + i; 571 RING_PUSH_REQUESTS(&sc->sc_rx_ring); 572 573 splx(s1); 574 return; 575 } 576 577 static void 578 xennet_free_rx_buffer(struct xennet_xenbus_softc *sc) 579 { 580 paddr_t ma, pa; 581 vaddr_t va; 582 RING_IDX i; 583 mmu_update_t mmu[1]; 584 multicall_entry_t mcl[2]; 585 586 int s = splbio(); 587 588 DPRINTF(("%s: xennet_free_rx_buffer\n", sc->sc_dev.dv_xname)); 589 /* get back memory from RX ring */ 590 for (i = 0; i < NET_RX_RING_SIZE; i++) { 591 struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i]; 592 593 /* 594 * if the buffer is in transit in the network stack, wait for 595 * the network stack to free it. 596 */ 597 while ((volatile grant_ref_t)rxreq->rxreq_gntref == 598 GRANT_STACK_REF) 599 tsleep(xennet_xenbus_detach, PRIBIO, "xnet_free", hz/2); 600 601 if (rxreq->rxreq_gntref != GRANT_INVALID_REF) { 602 /* 603 * this req is still granted. Get back the page or 604 * allocate a new one, and remap it. 605 */ 606 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, 607 rxreq_next); 608 sc->sc_free_rxreql++; 609 ma = xengnt_revoke_transfer(rxreq->rxreq_gntref); 610 rxreq->rxreq_gntref = GRANT_INVALID_REF; 611 if (ma == 0) { 612 struct xen_memory_reservation xenres; 613 /* 614 * transfer not complete, we lost the page. 615 * Get one from hypervisor 616 */ 617 xenres.extent_start = &ma; 618 xenres.nr_extents = 1; 619 xenres.extent_order = 0; 620 xenres.address_bits = 31; 621 xenres.domid = DOMID_SELF; 622 if (HYPERVISOR_memory_op( 623 XENMEM_increase_reservation, &xenres) < 0) { 624 panic("xennet_free_rx_buffer: " 625 "can't get memory back"); 626 } 627 KASSERT(ma != 0); 628 } 629 pa = rxreq->rxreq_pa; 630 va = rxreq->rxreq_va; 631 /* remap the page */ 632 mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 633 mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT); 634 MULTI_update_va_mapping(&mcl[0], va, 635 (ma << PAGE_SHIFT) | PG_V | PG_KW, 636 UVMF_TLB_FLUSH|UVMF_ALL); 637 xpmap_phys_to_machine_mapping[ 638 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma; 639 mcl[1].op = __HYPERVISOR_mmu_update; 640 mcl[1].args[0] = (unsigned long)mmu; 641 mcl[1].args[1] = 1; 642 mcl[1].args[2] = 0; 643 mcl[1].args[3] = DOMID_SELF; 644 HYPERVISOR_multicall(mcl, 2); 645 } 646 647 } 648 splx(s); 649 DPRINTF(("%s: xennet_free_rx_buffer done\n", sc->sc_dev.dv_xname)); 650 } 651 652 static void 653 xennet_rx_mbuf_free(struct mbuf *m, void *buf, size_t size, void *arg) 654 { 655 struct xennet_rxreq *req = arg; 656 struct xennet_xenbus_softc *sc = req->rxreq_sc; 657 658 int s = splnet(); 659 660 SLIST_INSERT_HEAD(&sc->sc_rxreq_head, req, rxreq_next); 661 sc->sc_free_rxreql++; 662 663 req->rxreq_gntref = GRANT_INVALID_REF; 664 if (sc->sc_free_rxreql >= SC_NLIVEREQ(sc) && 665 __predict_true(sc->sc_backend_status == BEST_CONNECTED)) { 666 xennet_alloc_rx_buffer(sc); 667 } 668 669 if (m) 670 pool_cache_put(mb_cache, m); 671 splx(s); 672 } 673 674 675 static void 676 xennet_tx_complete(struct xennet_xenbus_softc *sc) 677 { 678 struct xennet_txreq *req; 679 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 680 RING_IDX resp_prod, i; 681 682 DPRINTFN(XEDB_EVENT, ("xennet_tx_complete prod %d cons %d\n", 683 sc->sc_tx_ring.sring->rsp_prod, sc->sc_tx_ring.rsp_cons)); 684 685 again: 686 resp_prod = sc->sc_tx_ring.sring->rsp_prod; 687 x86_lfence(); 688 for (i = sc->sc_tx_ring.rsp_cons; i != resp_prod; i++) { 689 req = &sc->sc_txreqs[RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id]; 690 KASSERT(req->txreq_id == 691 RING_GET_RESPONSE(&sc->sc_tx_ring, i)->id); 692 if (__predict_false(xengnt_status(req->txreq_gntref))) { 693 printf("%s: grant still used by backend\n", 694 sc->sc_dev.dv_xname); 695 sc->sc_tx_ring.rsp_cons = i; 696 goto end; 697 } 698 if (__predict_false( 699 RING_GET_RESPONSE(&sc->sc_tx_ring, i)->status != 700 NETIF_RSP_OKAY)) 701 ifp->if_oerrors++; 702 else 703 ifp->if_opackets++; 704 xengnt_revoke_access(req->txreq_gntref); 705 m_freem(req->txreq_m); 706 SLIST_INSERT_HEAD(&sc->sc_txreq_head, req, txreq_next); 707 } 708 sc->sc_tx_ring.rsp_cons = resp_prod; 709 /* set new event and check fopr race with rsp_cons update */ 710 sc->sc_tx_ring.sring->rsp_event = 711 resp_prod + ((sc->sc_tx_ring.sring->req_prod - resp_prod) >> 1) + 1; 712 ifp->if_timer = 0; 713 x86_sfence(); 714 if (resp_prod != sc->sc_tx_ring.sring->rsp_prod) 715 goto again; 716 end: 717 if (ifp->if_flags & IFF_OACTIVE) { 718 ifp->if_flags &= ~IFF_OACTIVE; 719 xennet_softstart(sc); 720 } 721 } 722 723 static int 724 xennet_handler(void *arg) 725 { 726 struct xennet_xenbus_softc *sc = arg; 727 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 728 RING_IDX resp_prod, i; 729 struct xennet_rxreq *req; 730 paddr_t ma, pa; 731 vaddr_t va; 732 mmu_update_t mmu[1]; 733 multicall_entry_t mcl[2]; 734 struct mbuf *m; 735 void *pktp; 736 int more_to_do; 737 738 if (sc->sc_backend_status != BEST_CONNECTED) 739 return 1; 740 741 xennet_tx_complete(sc); 742 743 again: 744 DPRINTFN(XEDB_EVENT, ("xennet_handler prod %d cons %d\n", 745 sc->sc_rx_ring.sring->rsp_prod, sc->sc_rx_ring.rsp_cons)); 746 747 resp_prod = sc->sc_rx_ring.sring->rsp_prod; 748 x86_lfence(); /* ensure we see replies up to resp_prod */ 749 for (i = sc->sc_rx_ring.rsp_cons; i != resp_prod; i++) { 750 netif_rx_response_t *rx = RING_GET_RESPONSE(&sc->sc_rx_ring, i); 751 req = &sc->sc_rxreqs[rx->id]; 752 KASSERT(req->rxreq_gntref != GRANT_INVALID_REF); 753 KASSERT(req->rxreq_id == rx->id); 754 ma = xengnt_revoke_transfer(req->rxreq_gntref); 755 if (ma == 0) { 756 DPRINTFN(XEDB_EVENT, ("xennet_handler ma == 0\n")); 757 /* 758 * the remote could't send us a packet. 759 * we can't free this rxreq as no page will be mapped 760 * here. Instead give it back immediatly to backend. 761 */ 762 ifp->if_ierrors++; 763 RING_GET_REQUEST(&sc->sc_rx_ring, 764 sc->sc_rx_ring.req_prod_pvt)->id = req->rxreq_id; 765 RING_GET_REQUEST(&sc->sc_rx_ring, 766 sc->sc_rx_ring.req_prod_pvt)->gref = 767 req->rxreq_gntref; 768 sc->sc_rx_ring.req_prod_pvt++; 769 RING_PUSH_REQUESTS(&sc->sc_rx_ring); 770 continue; 771 } 772 req->rxreq_gntref = GRANT_INVALID_REF; 773 774 pa = req->rxreq_pa; 775 va = req->rxreq_va; 776 /* remap the page */ 777 mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; 778 mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT); 779 MULTI_update_va_mapping(&mcl[0], va, 780 (ma << PAGE_SHIFT) | PG_V | PG_KW, UVMF_TLB_FLUSH|UVMF_ALL); 781 xpmap_phys_to_machine_mapping[ 782 (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma; 783 mcl[1].op = __HYPERVISOR_mmu_update; 784 mcl[1].args[0] = (unsigned long)mmu; 785 mcl[1].args[1] = 1; 786 mcl[1].args[2] = 0; 787 mcl[1].args[3] = DOMID_SELF; 788 HYPERVISOR_multicall(mcl, 2); 789 pktp = (void *)(va + rx->offset); 790 #ifdef XENNET_DEBUG_DUMP 791 xennet_hex_dump(pktp, rx->status, "r", rx->id); 792 #endif 793 if ((ifp->if_flags & IFF_PROMISC) == 0) { 794 struct ether_header *eh = pktp; 795 if (ETHER_IS_MULTICAST(eh->ether_dhost) == 0 && 796 memcmp(CLLADDR(ifp->if_sadl), eh->ether_dhost, 797 ETHER_ADDR_LEN) != 0) { 798 DPRINTFN(XEDB_EVENT, 799 ("xennet_handler bad dest\n")); 800 /* packet not for us */ 801 xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, 802 req); 803 continue; 804 } 805 } 806 MGETHDR(m, M_DONTWAIT, MT_DATA); 807 if (__predict_false(m == NULL)) { 808 printf("xennet: rx no mbuf\n"); 809 ifp->if_ierrors++; 810 xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req); 811 continue; 812 } 813 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 814 815 m->m_pkthdr.rcvif = ifp; 816 if (__predict_true(sc->sc_rx_ring.req_prod_pvt != 817 sc->sc_rx_ring.sring->rsp_prod)) { 818 m->m_len = m->m_pkthdr.len = rx->status; 819 MEXTADD(m, pktp, rx->status, 820 M_DEVBUF, xennet_rx_mbuf_free, req); 821 m->m_flags |= M_EXT_RW; /* we own the buffer */ 822 req->rxreq_gntref = GRANT_STACK_REF; 823 } else { 824 /* 825 * This was our last receive buffer, allocate 826 * memory, copy data and push the receive 827 * buffer back to the hypervisor. 828 */ 829 m->m_len = min(MHLEN, rx->status); 830 m->m_pkthdr.len = 0; 831 m_copyback(m, 0, rx->status, pktp); 832 xennet_rx_mbuf_free(NULL, (void *)va, PAGE_SIZE, req); 833 if (m->m_pkthdr.len < rx->status) { 834 /* out of memory, just drop packets */ 835 ifp->if_ierrors++; 836 m_freem(m); 837 continue; 838 } 839 } 840 if ((rx->flags & NETRXF_csum_blank) != 0) { 841 xennet_checksum_fill(&m); 842 if (m == NULL) { 843 ifp->if_ierrors++; 844 continue; 845 } 846 } 847 #if NBPFILTER > 0 848 /* 849 * Pass packet to bpf if there is a listener. 850 */ 851 if (ifp->if_bpf) 852 bpf_mtap(ifp->if_bpf, m); 853 #endif 854 855 ifp->if_ipackets++; 856 857 /* Pass the packet up. */ 858 (*ifp->if_input)(ifp, m); 859 } 860 x86_lfence(); 861 sc->sc_rx_ring.rsp_cons = i; 862 RING_FINAL_CHECK_FOR_RESPONSES(&sc->sc_rx_ring, more_to_do); 863 if (more_to_do) 864 goto again; 865 return 1; 866 } 867 868 /* 869 * Called at splnet. 870 */ 871 void 872 xennet_start(struct ifnet *ifp) 873 { 874 struct xennet_xenbus_softc *sc = ifp->if_softc; 875 876 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start()\n", sc->sc_dev.dv_xname)); 877 878 #if NRND > 0 879 rnd_add_uint32(&sc->sc_rnd_source, sc->sc_tx_ring.req_prod_pvt); 880 #endif 881 882 xennet_tx_complete(sc); 883 884 if (__predict_false( 885 (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) 886 return; 887 888 /* 889 * The Xen communication channel is much more efficient if we can 890 * schedule batch of packets for domain0. To achieve this, we 891 * schedule a soft interrupt, and just return. This way, the network 892 * stack will enqueue all pending mbufs in the interface's send queue 893 * before it is processed by xennet_softstart(). 894 */ 895 softintr_schedule(sc->sc_softintr); 896 return; 897 } 898 899 /* 900 * called at splsoftnet 901 */ 902 void 903 xennet_softstart(void *arg) 904 { 905 struct xennet_xenbus_softc *sc = arg; 906 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 907 struct mbuf *m, *new_m; 908 netif_tx_request_t *txreq; 909 RING_IDX req_prod; 910 paddr_t pa, pa2; 911 struct xennet_txreq *req; 912 int notify; 913 int do_notify = 0; 914 int s; 915 916 s = splnet(); 917 if (__predict_false( 918 (ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)) { 919 splx(s); 920 return; 921 } 922 923 req_prod = sc->sc_tx_ring.req_prod_pvt; 924 while (/*CONSTCOND*/1) { 925 uint16_t txflags; 926 927 req = SLIST_FIRST(&sc->sc_txreq_head); 928 if (__predict_false(req == NULL)) { 929 ifp->if_flags |= IFF_OACTIVE; 930 break; 931 } 932 IFQ_POLL(&ifp->if_snd, m); 933 if (m == NULL) 934 break; 935 936 switch (m->m_flags & (M_EXT|M_EXT_CLUSTER)) { 937 case M_EXT|M_EXT_CLUSTER: 938 KASSERT(m->m_ext.ext_paddr != M_PADDR_INVALID); 939 pa = m->m_ext.ext_paddr + 940 (m->m_data - m->m_ext.ext_buf); 941 break; 942 case 0: 943 KASSERT(m->m_paddr != M_PADDR_INVALID); 944 pa = m->m_paddr + M_BUFOFFSET(m) + 945 (m->m_data - M_BUFADDR(m)); 946 break; 947 default: 948 if (__predict_false( 949 !pmap_extract(pmap_kernel(), (vaddr_t)m->m_data, 950 &pa))) { 951 panic("xennet_start: no pa"); 952 } 953 break; 954 } 955 956 if ((m->m_pkthdr.csum_flags & 957 (M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0) { 958 txflags = NETTXF_csum_blank; 959 } else { 960 txflags = 0; 961 } 962 963 if (m->m_pkthdr.len != m->m_len || 964 (pa ^ (pa + m->m_pkthdr.len - 1)) & PG_FRAME) { 965 966 MGETHDR(new_m, M_DONTWAIT, MT_DATA); 967 if (__predict_false(new_m == NULL)) { 968 printf("xennet: no mbuf\n"); 969 break; 970 } 971 if (m->m_pkthdr.len > MHLEN) { 972 MCLGET(new_m, M_DONTWAIT); 973 if (__predict_false( 974 (new_m->m_flags & M_EXT) == 0)) { 975 DPRINTF(("xennet: no mbuf cluster\n")); 976 m_freem(new_m); 977 break; 978 } 979 } 980 981 m_copydata(m, 0, m->m_pkthdr.len, mtod(new_m, void *)); 982 new_m->m_len = new_m->m_pkthdr.len = m->m_pkthdr.len; 983 984 if ((new_m->m_flags & M_EXT) != 0) { 985 pa = new_m->m_ext.ext_paddr; 986 KASSERT(new_m->m_data == new_m->m_ext.ext_buf); 987 KASSERT(pa != M_PADDR_INVALID); 988 } else { 989 pa = new_m->m_paddr; 990 KASSERT(pa != M_PADDR_INVALID); 991 KASSERT(new_m->m_data == M_BUFADDR(new_m)); 992 pa += M_BUFOFFSET(new_m); 993 } 994 if (__predict_false(xengnt_grant_access( 995 sc->sc_xbusd->xbusd_otherend_id, 996 xpmap_ptom_masked(pa), 997 GNTMAP_readonly, &req->txreq_gntref) != 0)) { 998 m_freem(new_m); 999 ifp->if_flags |= IFF_OACTIVE; 1000 break; 1001 } 1002 /* we will be able to send new_m */ 1003 IFQ_DEQUEUE(&ifp->if_snd, m); 1004 m_freem(m); 1005 m = new_m; 1006 } else { 1007 if (__predict_false(xengnt_grant_access( 1008 sc->sc_xbusd->xbusd_otherend_id, 1009 xpmap_ptom_masked(pa), 1010 GNTMAP_readonly, &req->txreq_gntref) != 0)) { 1011 ifp->if_flags |= IFF_OACTIVE; 1012 break; 1013 } 1014 /* we will be able to send m */ 1015 IFQ_DEQUEUE(&ifp->if_snd, m); 1016 } 1017 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 1018 1019 KASSERT(((pa ^ (pa + m->m_pkthdr.len - 1)) & PG_FRAME) == 0); 1020 1021 SLIST_REMOVE_HEAD(&sc->sc_txreq_head, txreq_next); 1022 req->txreq_m = m; 1023 1024 DPRINTFN(XEDB_MBUF, ("xennet_start id %d, " 1025 "mbuf %p, buf %p/%p/%p, size %d\n", 1026 req->txreq_id, m, mtod(m, void *), (void *)pa, 1027 (void *)xpmap_ptom_masked(pa), m->m_pkthdr.len)); 1028 pmap_extract_ma(pmap_kernel(), mtod(m, vaddr_t), &pa2); 1029 DPRINTFN(XEDB_MBUF, ("xennet_start pa %p ma %p/%p\n", 1030 (void *)pa, (void *)xpmap_ptom_masked(pa), (void *)pa2)); 1031 #ifdef XENNET_DEBUG_DUMP 1032 xennet_hex_dump(mtod(m, u_char *), m->m_pkthdr.len, "s", req->txreq_id); 1033 #endif 1034 1035 txreq = RING_GET_REQUEST(&sc->sc_tx_ring, req_prod); 1036 txreq->id = req->txreq_id; 1037 txreq->gref = req->txreq_gntref; 1038 txreq->offset = pa & ~PG_FRAME; 1039 txreq->size = m->m_pkthdr.len; 1040 txreq->flags = txflags; 1041 1042 req_prod++; 1043 sc->sc_tx_ring.req_prod_pvt = req_prod; 1044 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_tx_ring, notify); 1045 if (notify) 1046 do_notify = 1; 1047 1048 #ifdef XENNET_DEBUG 1049 DPRINTFN(XEDB_MEM, ("packet addr %p/%p, physical %p/%p, " 1050 "m_paddr %p, len %d/%d\n", M_BUFADDR(m), mtod(m, void *), 1051 (void *)*kvtopte(mtod(m, vaddr_t)), 1052 (void *)xpmap_mtop(*kvtopte(mtod(m, vaddr_t))), 1053 (void *)m->m_paddr, m->m_pkthdr.len, m->m_len)); 1054 DPRINTFN(XEDB_MEM, ("id %d gref %d offset %d size %d flags %d" 1055 " prod %d\n", 1056 txreq->id, txreq->gref, txreq->offset, txreq->size, 1057 txreq->flags, req_prod)); 1058 #endif 1059 1060 #if NBPFILTER > 0 1061 /* 1062 * Pass packet to bpf if there is a listener. 1063 */ 1064 if (ifp->if_bpf) { 1065 bpf_mtap(ifp->if_bpf, m); 1066 } 1067 #endif 1068 } 1069 1070 x86_lfence(); 1071 if (do_notify) { 1072 hypervisor_notify_via_evtchn(sc->sc_evtchn); 1073 ifp->if_timer = 5; 1074 } 1075 splx(s); 1076 1077 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_start() done\n", 1078 sc->sc_dev.dv_xname)); 1079 } 1080 1081 int 1082 xennet_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1083 { 1084 #ifdef XENNET_DEBUG 1085 struct xennet_xenbus_softc *sc = ifp->if_softc; 1086 #endif 1087 int s, error = 0; 1088 1089 s = splnet(); 1090 1091 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl()\n", sc->sc_dev.dv_xname)); 1092 error = ether_ioctl(ifp, cmd, data); 1093 if (error == ENETRESET) 1094 error = 0; 1095 splx(s); 1096 1097 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_ioctl() returning %d\n", 1098 sc->sc_dev.dv_xname, error)); 1099 1100 return error; 1101 } 1102 1103 void 1104 xennet_watchdog(struct ifnet *ifp) 1105 { 1106 struct xennet_xenbus_softc *sc = ifp->if_softc; 1107 1108 printf("%s: xennet_watchdog\n", sc->sc_dev.dv_xname); 1109 } 1110 1111 int 1112 xennet_init(struct ifnet *ifp) 1113 { 1114 struct xennet_xenbus_softc *sc = ifp->if_softc; 1115 int s = splnet(); 1116 1117 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_init()\n", sc->sc_dev.dv_xname)); 1118 1119 if ((ifp->if_flags & IFF_RUNNING) == 0) { 1120 sc->sc_rx_ring.sring->rsp_event = 1121 sc->sc_rx_ring.rsp_cons + 1; 1122 hypervisor_enable_event(sc->sc_evtchn); 1123 hypervisor_notify_via_evtchn(sc->sc_evtchn); 1124 xennet_reset(sc); 1125 } 1126 ifp->if_flags |= IFF_RUNNING; 1127 ifp->if_flags &= ~IFF_OACTIVE; 1128 ifp->if_timer = 0; 1129 splx(s); 1130 return 0; 1131 } 1132 1133 void 1134 xennet_stop(struct ifnet *ifp, int disable) 1135 { 1136 struct xennet_xenbus_softc *sc = ifp->if_softc; 1137 int s = splnet(); 1138 1139 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1140 hypervisor_mask_event(sc->sc_evtchn); 1141 xennet_reset(sc); 1142 splx(s); 1143 } 1144 1145 void 1146 xennet_reset(struct xennet_xenbus_softc *sc) 1147 { 1148 1149 DPRINTFN(XEDB_FOLLOW, ("%s: xennet_reset()\n", sc->sc_dev.dv_xname)); 1150 } 1151 1152 #if defined(NFS_BOOT_BOOTSTATIC) 1153 int 1154 xennet_bootstatic_callback(struct nfs_diskless *nd) 1155 { 1156 #if 0 1157 struct ifnet *ifp = nd->nd_ifp; 1158 struct xennet_xenbus_softc *sc = 1159 (struct xennet_xenbus_softc *)ifp->if_softc; 1160 #endif 1161 union xen_cmdline_parseinfo xcp; 1162 struct sockaddr_in *sin; 1163 1164 memset(&xcp, 0, sizeof(xcp.xcp_netinfo)); 1165 xcp.xcp_netinfo.xi_ifno = /* XXX sc->sc_ifno */ 0; 1166 xcp.xcp_netinfo.xi_root = nd->nd_root.ndm_host; 1167 xen_parse_cmdline(XEN_PARSE_NETINFO, &xcp); 1168 1169 nd->nd_myip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[0]); 1170 nd->nd_gwip.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[2]); 1171 nd->nd_mask.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[3]); 1172 1173 sin = (struct sockaddr_in *) &nd->nd_root.ndm_saddr; 1174 memset((void *)sin, 0, sizeof(*sin)); 1175 sin->sin_len = sizeof(*sin); 1176 sin->sin_family = AF_INET; 1177 sin->sin_addr.s_addr = ntohl(xcp.xcp_netinfo.xi_ip[1]); 1178 if (nd->nd_myip.s_addr == 0) 1179 return NFS_BOOTSTATIC_NOSTATIC; 1180 else 1181 return (NFS_BOOTSTATIC_HAS_MYIP|NFS_BOOTSTATIC_HAS_GWIP| 1182 NFS_BOOTSTATIC_HAS_MASK|NFS_BOOTSTATIC_HAS_SERVADDR| 1183 NFS_BOOTSTATIC_HAS_SERVER); 1184 } 1185 #endif /* defined(NFS_BOOT_BOOTSTATIC) */ 1186 1187 #ifdef XENNET_DEBUG_DUMP 1188 #define XCHR(x) hexdigits[(x) & 0xf] 1189 static void 1190 xennet_hex_dump(const unsigned char *pkt, size_t len, const char *type, int id) 1191 { 1192 size_t i, j; 1193 1194 printf("pkt %p len %d/%x type %s id %d\n", pkt, len, len, type, id); 1195 printf("00000000 "); 1196 for(i=0; i<len; i++) { 1197 printf("%c%c ", XCHR(pkt[i]>>4), XCHR(pkt[i])); 1198 if ((i+1) % 16 == 8) 1199 printf(" "); 1200 if ((i+1) % 16 == 0) { 1201 printf(" %c", '|'); 1202 for(j=0; j<16; j++) 1203 printf("%c", pkt[i-15+j]>=32 && 1204 pkt[i-15+j]<127?pkt[i-15+j]:'.'); 1205 printf("%c\n%c%c%c%c%c%c%c%c ", '|', 1206 XCHR((i+1)>>28), XCHR((i+1)>>24), 1207 XCHR((i+1)>>20), XCHR((i+1)>>16), 1208 XCHR((i+1)>>12), XCHR((i+1)>>8), 1209 XCHR((i+1)>>4), XCHR(i+1)); 1210 } 1211 } 1212 printf("\n"); 1213 } 1214 #undef XCHR 1215 #endif 1216