1 /* $NetBSD: if_fwip.c,v 1.25 2012/04/29 18:31:40 dsl Exp $ */ 2 /*- 3 * Copyright (c) 2004 4 * Doug Rabson 5 * Copyright (c) 2002-2003 6 * Hidetoshi Shimokawa. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * 19 * This product includes software developed by Hidetoshi Shimokawa. 20 * 21 * 4. Neither the name of the author nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * $FreeBSD: src/sys/dev/firewire/if_fwip.c,v 1.18 2009/02/09 16:58:18 fjoe Exp $ 38 */ 39 40 #include <sys/cdefs.h> 41 __KERNEL_RCSID(0, "$NetBSD: if_fwip.c,v 1.25 2012/04/29 18:31:40 dsl Exp $"); 42 43 #include <sys/param.h> 44 #include <sys/bus.h> 45 #include <sys/device.h> 46 #include <sys/errno.h> 47 #include <sys/malloc.h> 48 #include <sys/mbuf.h> 49 #include <sys/mutex.h> 50 #include <sys/sysctl.h> 51 52 #include <net/bpf.h> 53 #include <net/if.h> 54 #include <net/if_ieee1394.h> 55 #include <net/if_types.h> 56 57 #include <dev/ieee1394/firewire.h> 58 #include <dev/ieee1394/firewirereg.h> 59 #include <dev/ieee1394/iec13213.h> 60 #include <dev/ieee1394/if_fwipvar.h> 61 62 /* 63 * We really need a mechanism for allocating regions in the FIFO 64 * address space. We pick a address in the OHCI controller's 'middle' 65 * address space. This means that the controller will automatically 66 * send responses for us, which is fine since we don't have any 67 * important information to put in the response anyway. 68 */ 69 #define INET_FIFO 0xfffe00000000LL 70 71 #define FWIPDEBUG if (fwipdebug) aprint_debug_ifnet 72 #define TX_MAX_QUEUE (FWMAXQUEUE - 1) 73 74 75 struct fw_hwaddr { 76 uint32_t sender_unique_ID_hi; 77 uint32_t sender_unique_ID_lo; 78 uint8_t sender_max_rec; 79 uint8_t sspd; 80 uint16_t sender_unicast_FIFO_hi; 81 uint32_t sender_unicast_FIFO_lo; 82 }; 83 84 85 static int fwipmatch(device_t, cfdata_t, void *); 86 static void fwipattach(device_t, device_t, void *); 87 static int fwipdetach(device_t, int); 88 static int fwipactivate(device_t, enum devact); 89 90 /* network interface */ 91 static void fwip_start(struct ifnet *); 92 static int fwip_ioctl(struct ifnet *, u_long, void *); 93 static int fwip_init(struct ifnet *); 94 static void fwip_stop(struct ifnet *, int); 95 96 static void fwip_post_busreset(void *); 97 static void fwip_output_callback(struct fw_xfer *); 98 static void fwip_async_output(struct fwip_softc *, struct ifnet *); 99 static void fwip_stream_input(struct fw_xferq *); 100 static void fwip_unicast_input(struct fw_xfer *); 101 102 static int fwipdebug = 0; 103 static int broadcast_channel = 0xc0 | 0x1f; /* tag | channel(XXX) */ 104 static int tx_speed = 2; 105 static int rx_queue_len = FWMAXQUEUE; 106 107 /* 108 * Setup sysctl(3) MIB, hw.fwip.* 109 * 110 * TBD condition CTLFLAG_PERMANENT on being a module or not 111 */ 112 SYSCTL_SETUP(sysctl_fwip, "sysctl fwip(4) subtree setup") 113 { 114 int rc, fwip_node_num; 115 const struct sysctlnode *node; 116 117 if ((rc = sysctl_createv(clog, 0, NULL, NULL, 118 CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL, 119 NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) { 120 goto err; 121 } 122 123 if ((rc = sysctl_createv(clog, 0, NULL, &node, 124 CTLFLAG_PERMANENT, CTLTYPE_NODE, "fwip", 125 SYSCTL_DESCR("fwip controls"), 126 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) { 127 goto err; 128 } 129 fwip_node_num = node->sysctl_num; 130 131 /* fwip RX queue length */ 132 if ((rc = sysctl_createv(clog, 0, NULL, &node, 133 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 134 "rx_queue_len", SYSCTL_DESCR("Length of the receive queue"), 135 NULL, 0, &rx_queue_len, 136 0, CTL_HW, fwip_node_num, CTL_CREATE, CTL_EOL)) != 0) { 137 goto err; 138 } 139 140 /* fwip RX queue length */ 141 if ((rc = sysctl_createv(clog, 0, NULL, &node, 142 CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT, 143 "if_fwip_debug", SYSCTL_DESCR("fwip driver debug flag"), 144 NULL, 0, &fwipdebug, 145 0, CTL_HW, fwip_node_num, CTL_CREATE, CTL_EOL)) != 0) { 146 goto err; 147 } 148 149 return; 150 151 err: 152 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc); 153 } 154 155 156 CFATTACH_DECL_NEW(fwip, sizeof(struct fwip_softc), 157 fwipmatch, fwipattach, fwipdetach, fwipactivate); 158 159 160 static int 161 fwipmatch(device_t parent, cfdata_t cf, void *aux) 162 { 163 struct fw_attach_args *fwa = aux; 164 165 if (strcmp(fwa->name, "fwip") == 0) 166 return 1; 167 return 0; 168 } 169 170 static void 171 fwipattach(device_t parent, device_t self, void *aux) 172 { 173 struct fwip_softc *sc = device_private(self); 174 struct fw_attach_args *fwa = (struct fw_attach_args *)aux; 175 struct fw_hwaddr *hwaddr; 176 struct ifnet *ifp; 177 178 aprint_naive("\n"); 179 aprint_normal(": IP over IEEE1394\n"); 180 181 sc->sc_fd.dev = self; 182 sc->sc_eth.fwip_ifp = &sc->sc_eth.fwcom.fc_if; 183 hwaddr = (struct fw_hwaddr *)&sc->sc_eth.fwcom.ic_hwaddr; 184 185 ifp = sc->sc_eth.fwip_ifp; 186 187 mutex_init(&sc->sc_fwb.fwb_mtx, MUTEX_DEFAULT, IPL_NET); 188 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET); 189 190 /* XXX */ 191 sc->sc_dma_ch = -1; 192 193 sc->sc_fd.fc = fwa->fc; 194 if (tx_speed < 0) 195 tx_speed = sc->sc_fd.fc->speed; 196 197 sc->sc_fd.post_explore = NULL; 198 sc->sc_fd.post_busreset = fwip_post_busreset; 199 sc->sc_eth.fwip = sc; 200 201 /* 202 * Encode our hardware the way that arp likes it. 203 */ 204 hwaddr->sender_unique_ID_hi = htonl(sc->sc_fd.fc->eui.hi); 205 hwaddr->sender_unique_ID_lo = htonl(sc->sc_fd.fc->eui.lo); 206 hwaddr->sender_max_rec = sc->sc_fd.fc->maxrec; 207 hwaddr->sspd = sc->sc_fd.fc->speed; 208 hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32)); 209 hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO); 210 211 /* fill the rest and attach interface */ 212 ifp->if_softc = &sc->sc_eth; 213 214 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 215 ifp->if_start = fwip_start; 216 ifp->if_ioctl = fwip_ioctl; 217 ifp->if_init = fwip_init; 218 ifp->if_stop = fwip_stop; 219 ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); 220 IFQ_SET_READY(&ifp->if_snd); 221 IFQ_SET_MAXLEN(&ifp->if_snd, TX_MAX_QUEUE); 222 223 if_attach(ifp); 224 ieee1394_ifattach(ifp, (const struct ieee1394_hwaddr *)hwaddr); 225 226 if (!pmf_device_register(self, NULL, NULL)) 227 aprint_error_dev(self, "couldn't establish power handler\n"); 228 else 229 pmf_class_network_register(self, ifp); 230 231 FWIPDEBUG(ifp, "interface created\n"); 232 return; 233 } 234 235 static int 236 fwipdetach(device_t self, int flags) 237 { 238 struct fwip_softc *sc = device_private(self); 239 struct ifnet *ifp = sc->sc_eth.fwip_ifp; 240 241 fwip_stop(sc->sc_eth.fwip_ifp, 1); 242 ieee1394_ifdetach(ifp); 243 if_detach(ifp); 244 mutex_destroy(&sc->sc_mtx); 245 mutex_destroy(&sc->sc_fwb.fwb_mtx); 246 return 0; 247 } 248 249 static int 250 fwipactivate(device_t self, enum devact act) 251 { 252 struct fwip_softc *sc = device_private(self); 253 254 switch (act) { 255 case DVACT_DEACTIVATE: 256 if_deactivate(sc->sc_eth.fwip_ifp); 257 return 0; 258 default: 259 return EOPNOTSUPP; 260 } 261 } 262 263 static void 264 fwip_start(struct ifnet *ifp) 265 { 266 struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 267 268 FWIPDEBUG(ifp, "starting\n"); 269 270 if (sc->sc_dma_ch < 0) { 271 struct mbuf *m = NULL; 272 273 FWIPDEBUG(ifp, "not ready\n"); 274 275 do { 276 IF_DEQUEUE(&ifp->if_snd, m); 277 if (m != NULL) 278 m_freem(m); 279 ifp->if_oerrors++; 280 } while (m != NULL); 281 282 return; 283 } 284 285 ifp->if_flags |= IFF_OACTIVE; 286 287 if (ifp->if_snd.ifq_len != 0) 288 fwip_async_output(sc, ifp); 289 290 ifp->if_flags &= ~IFF_OACTIVE; 291 } 292 293 static int 294 fwip_ioctl(struct ifnet *ifp, u_long cmd, void *data) 295 { 296 int s, error = 0; 297 298 s = splnet(); 299 300 switch (cmd) { 301 case SIOCSIFFLAGS: 302 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 303 break; 304 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) { 305 case IFF_RUNNING: 306 fwip_stop(ifp, 0); 307 break; 308 case IFF_UP: 309 fwip_init(ifp); 310 break; 311 default: 312 break; 313 } 314 break; 315 316 case SIOCADDMULTI: 317 case SIOCDELMULTI: 318 break; 319 320 default: 321 error = ieee1394_ioctl(ifp, cmd, data); 322 if (error == ENETRESET) 323 error = 0; 324 break; 325 } 326 327 splx(s); 328 329 return error; 330 } 331 332 static int 333 fwip_init(struct ifnet *ifp) 334 { 335 struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 336 struct firewire_comm *fc; 337 struct fw_xferq *xferq; 338 struct fw_xfer *xfer; 339 struct mbuf *m; 340 int i; 341 342 FWIPDEBUG(ifp, "initializing\n"); 343 344 fc = sc->sc_fd.fc; 345 if (sc->sc_dma_ch < 0) { 346 sc->sc_dma_ch = fw_open_isodma(fc, /* tx */0); 347 if (sc->sc_dma_ch < 0) 348 return ENXIO; 349 xferq = fc->ir[sc->sc_dma_ch]; 350 xferq->flag |= 351 FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_STREAM; 352 xferq->flag &= ~0xff; 353 xferq->flag |= broadcast_channel & 0xff; 354 /* register fwip_input handler */ 355 xferq->sc = (void *) sc; 356 xferq->hand = fwip_stream_input; 357 xferq->bnchunk = rx_queue_len; 358 xferq->bnpacket = 1; 359 xferq->psize = MCLBYTES; 360 xferq->queued = 0; 361 xferq->buf = NULL; 362 xferq->bulkxfer = (struct fw_bulkxfer *) malloc( 363 sizeof(struct fw_bulkxfer) * xferq->bnchunk, 364 M_FW, M_WAITOK); 365 if (xferq->bulkxfer == NULL) { 366 aprint_error_ifnet(ifp, "if_fwip: malloc failed\n"); 367 return ENOMEM; 368 } 369 STAILQ_INIT(&xferq->stvalid); 370 STAILQ_INIT(&xferq->stfree); 371 STAILQ_INIT(&xferq->stdma); 372 xferq->stproc = NULL; 373 for (i = 0; i < xferq->bnchunk; i++) { 374 m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); 375 xferq->bulkxfer[i].mbuf = m; 376 if (m != NULL) { 377 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 378 STAILQ_INSERT_TAIL(&xferq->stfree, 379 &xferq->bulkxfer[i], link); 380 } else 381 aprint_error_ifnet(ifp, 382 "fwip_as_input: m_getcl failed\n"); 383 } 384 385 sc->sc_fwb.start = INET_FIFO; 386 sc->sc_fwb.end = INET_FIFO + 16384; /* S3200 packet size */ 387 388 /* pre-allocate xfer */ 389 STAILQ_INIT(&sc->sc_fwb.xferlist); 390 for (i = 0; i < rx_queue_len; i++) { 391 xfer = fw_xfer_alloc(M_FW); 392 if (xfer == NULL) 393 break; 394 m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); 395 xfer->recv.payload = mtod(m, uint32_t *); 396 xfer->recv.pay_len = MCLBYTES; 397 xfer->hand = fwip_unicast_input; 398 xfer->fc = fc; 399 xfer->sc = (void *) sc; 400 xfer->mbuf = m; 401 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link); 402 } 403 fw_bindadd(fc, &sc->sc_fwb); 404 405 STAILQ_INIT(&sc->sc_xferlist); 406 for (i = 0; i < TX_MAX_QUEUE; i++) { 407 xfer = fw_xfer_alloc(M_FW); 408 if (xfer == NULL) 409 break; 410 xfer->send.spd = tx_speed; 411 xfer->fc = sc->sc_fd.fc; 412 xfer->sc = (void *)sc; 413 xfer->hand = fwip_output_callback; 414 STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link); 415 } 416 } else 417 xferq = fc->ir[sc->sc_dma_ch]; 418 419 sc->sc_last_dest.hi = 0; 420 sc->sc_last_dest.lo = 0; 421 422 /* start dma */ 423 if ((xferq->flag & FWXFERQ_RUNNING) == 0) 424 fc->irx_enable(fc, sc->sc_dma_ch); 425 426 ifp->if_flags |= IFF_RUNNING; 427 ifp->if_flags &= ~IFF_OACTIVE; 428 429 #if 0 430 /* attempt to start output */ 431 fwip_start(ifp); 432 #endif 433 return 0; 434 } 435 436 static void 437 fwip_stop(struct ifnet *ifp, int disable) 438 { 439 struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; 440 struct firewire_comm *fc = sc->sc_fd.fc; 441 struct fw_xferq *xferq; 442 struct fw_xfer *xfer, *next; 443 int i; 444 445 if (sc->sc_dma_ch >= 0) { 446 xferq = fc->ir[sc->sc_dma_ch]; 447 448 if (xferq->flag & FWXFERQ_RUNNING) 449 fc->irx_disable(fc, sc->sc_dma_ch); 450 xferq->flag &= 451 ~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM | 452 FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK); 453 xferq->hand = NULL; 454 455 for (i = 0; i < xferq->bnchunk; i++) 456 m_freem(xferq->bulkxfer[i].mbuf); 457 free(xferq->bulkxfer, M_FW); 458 459 fw_bindremove(fc, &sc->sc_fwb); 460 for (xfer = STAILQ_FIRST(&sc->sc_fwb.xferlist); xfer != NULL; 461 xfer = next) { 462 next = STAILQ_NEXT(xfer, link); 463 fw_xfer_free(xfer); 464 } 465 466 for (xfer = STAILQ_FIRST(&sc->sc_xferlist); xfer != NULL; 467 xfer = next) { 468 next = STAILQ_NEXT(xfer, link); 469 fw_xfer_free(xfer); 470 } 471 472 xferq->bulkxfer = NULL; 473 sc->sc_dma_ch = -1; 474 } 475 476 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 477 } 478 479 static void 480 fwip_post_busreset(void *arg) 481 { 482 struct fwip_softc *sc = arg; 483 struct crom_src *src; 484 struct crom_chunk *root; 485 486 src = sc->sc_fd.fc->crom_src; 487 root = sc->sc_fd.fc->crom_root; 488 489 /* RFC2734 IPv4 over IEEE1394 */ 490 memset(&sc->sc_unit4, 0, sizeof(struct crom_chunk)); 491 crom_add_chunk(src, root, &sc->sc_unit4, CROM_UDIR); 492 crom_add_entry(&sc->sc_unit4, CSRKEY_SPEC, CSRVAL_IETF); 493 crom_add_simple_text(src, &sc->sc_unit4, &sc->sc_spec4, "IANA"); 494 crom_add_entry(&sc->sc_unit4, CSRKEY_VER, 1); 495 crom_add_simple_text(src, &sc->sc_unit4, &sc->sc_ver4, "IPv4"); 496 497 /* RFC3146 IPv6 over IEEE1394 */ 498 memset(&sc->sc_unit6, 0, sizeof(struct crom_chunk)); 499 crom_add_chunk(src, root, &sc->sc_unit6, CROM_UDIR); 500 crom_add_entry(&sc->sc_unit6, CSRKEY_SPEC, CSRVAL_IETF); 501 crom_add_simple_text(src, &sc->sc_unit6, &sc->sc_spec6, "IANA"); 502 crom_add_entry(&sc->sc_unit6, CSRKEY_VER, 2); 503 crom_add_simple_text(src, &sc->sc_unit6, &sc->sc_ver6, "IPv6"); 504 505 sc->sc_last_dest.hi = 0; 506 sc->sc_last_dest.lo = 0; 507 ieee1394_drain(sc->sc_eth.fwip_ifp); 508 } 509 510 static void 511 fwip_output_callback(struct fw_xfer *xfer) 512 { 513 struct fwip_softc *sc = (struct fwip_softc *)xfer->sc; 514 struct ifnet *ifp; 515 516 ifp = sc->sc_eth.fwip_ifp; 517 /* XXX error check */ 518 FWIPDEBUG(ifp, "resp = %d\n", xfer->resp); 519 if (xfer->resp != 0) 520 ifp->if_oerrors++; 521 522 m_freem(xfer->mbuf); 523 fw_xfer_unload(xfer); 524 525 mutex_enter(&sc->sc_mtx); 526 STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link); 527 mutex_exit(&sc->sc_mtx); 528 529 /* for queue full */ 530 if (ifp->if_snd.ifq_head != NULL) 531 fwip_start(ifp); 532 } 533 534 /* Async. stream output */ 535 static void 536 fwip_async_output(struct fwip_softc *sc, struct ifnet *ifp) 537 { 538 struct firewire_comm *fc = sc->sc_fd.fc; 539 struct mbuf *m; 540 struct m_tag *mtag; 541 struct fw_hwaddr *destfw; 542 struct fw_xfer *xfer; 543 struct fw_xferq *xferq; 544 struct fw_pkt *fp; 545 uint16_t nodeid; 546 int error; 547 int i = 0; 548 549 xfer = NULL; 550 xferq = fc->atq; 551 while ((xferq->queued < xferq->maxq - 1) && 552 (ifp->if_snd.ifq_head != NULL)) { 553 mutex_enter(&sc->sc_mtx); 554 if (STAILQ_EMPTY(&sc->sc_xferlist)) { 555 mutex_exit(&sc->sc_mtx); 556 #if 0 557 aprint_normal("if_fwip: lack of xfer\n"); 558 #endif 559 break; 560 } 561 IF_DEQUEUE(&ifp->if_snd, m); 562 if (m == NULL) { 563 mutex_exit(&sc->sc_mtx); 564 break; 565 } 566 xfer = STAILQ_FIRST(&sc->sc_xferlist); 567 STAILQ_REMOVE_HEAD(&sc->sc_xferlist, link); 568 mutex_exit(&sc->sc_mtx); 569 570 /* 571 * Dig out the link-level address which 572 * firewire_output got via arp or neighbour 573 * discovery. If we don't have a link-level address, 574 * just stick the thing on the broadcast channel. 575 */ 576 mtag = m_tag_find(m, MTAG_FIREWIRE_HWADDR, 0); 577 if (mtag == NULL) 578 destfw = 0; 579 else 580 destfw = (struct fw_hwaddr *) (mtag + 1); 581 582 /* 583 * Put the mbuf in the xfer early in case we hit an 584 * error case below - fwip_output_callback will free 585 * the mbuf. 586 */ 587 xfer->mbuf = m; 588 589 /* 590 * We use the arp result (if any) to add a suitable firewire 591 * packet header before handing off to the bus. 592 */ 593 fp = &xfer->send.hdr; 594 nodeid = FWLOCALBUS | fc->nodeid; 595 if ((m->m_flags & M_BCAST) || !destfw) { 596 /* 597 * Broadcast packets are sent as GASP packets with 598 * specifier ID 0x00005e, version 1 on the broadcast 599 * channel. To be conservative, we send at the 600 * slowest possible speed. 601 */ 602 uint32_t *p; 603 604 M_PREPEND(m, 2 * sizeof(uint32_t), M_DONTWAIT); 605 p = mtod(m, uint32_t *); 606 fp->mode.stream.len = m->m_pkthdr.len; 607 fp->mode.stream.chtag = broadcast_channel; 608 fp->mode.stream.tcode = FWTCODE_STREAM; 609 fp->mode.stream.sy = 0; 610 xfer->send.spd = 0; 611 p[0] = htonl(nodeid << 16); 612 p[1] = htonl((0x5e << 24) | 1); 613 } else { 614 /* 615 * Unicast packets are sent as block writes to the 616 * target's unicast fifo address. If we can't 617 * find the node address, we just give up. We 618 * could broadcast it but that might overflow 619 * the packet size limitations due to the 620 * extra GASP header. Note: the hardware 621 * address is stored in network byte order to 622 * make life easier for ARP. 623 */ 624 struct fw_device *fd; 625 struct fw_eui64 eui; 626 627 eui.hi = ntohl(destfw->sender_unique_ID_hi); 628 eui.lo = ntohl(destfw->sender_unique_ID_lo); 629 if (sc->sc_last_dest.hi != eui.hi || 630 sc->sc_last_dest.lo != eui.lo) { 631 fd = fw_noderesolve_eui64(fc, &eui); 632 if (!fd) { 633 /* error */ 634 ifp->if_oerrors++; 635 /* XXX set error code */ 636 fwip_output_callback(xfer); 637 continue; 638 639 } 640 sc->sc_last_hdr.mode.wreqb.dst = 641 FWLOCALBUS | fd->dst; 642 sc->sc_last_hdr.mode.wreqb.tlrt = 0; 643 sc->sc_last_hdr.mode.wreqb.tcode = 644 FWTCODE_WREQB; 645 sc->sc_last_hdr.mode.wreqb.pri = 0; 646 sc->sc_last_hdr.mode.wreqb.src = nodeid; 647 sc->sc_last_hdr.mode.wreqb.dest_hi = 648 ntohs(destfw->sender_unicast_FIFO_hi); 649 sc->sc_last_hdr.mode.wreqb.dest_lo = 650 ntohl(destfw->sender_unicast_FIFO_lo); 651 sc->sc_last_hdr.mode.wreqb.extcode = 0; 652 sc->sc_last_dest = eui; 653 } 654 655 fp->mode.wreqb = sc->sc_last_hdr.mode.wreqb; 656 fp->mode.wreqb.len = m->m_pkthdr.len; 657 xfer->send.spd = min(destfw->sspd, fc->speed); 658 } 659 660 xfer->send.pay_len = m->m_pkthdr.len; 661 662 error = fw_asyreq(fc, -1, xfer); 663 if (error == EAGAIN) { 664 /* 665 * We ran out of tlabels - requeue the packet 666 * for later transmission. 667 */ 668 xfer->mbuf = 0; 669 mutex_enter(&sc->sc_mtx); 670 STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link); 671 mutex_exit(&sc->sc_mtx); 672 IF_PREPEND(&ifp->if_snd, m); 673 break; 674 } 675 if (error) { 676 /* error */ 677 ifp->if_oerrors++; 678 /* XXX set error code */ 679 fwip_output_callback(xfer); 680 continue; 681 } else { 682 ifp->if_opackets++; 683 i++; 684 } 685 } 686 #if 0 687 if (i > 1) 688 aprint_normal("%d queued\n", i); 689 #endif 690 if (i > 0) 691 xferq->start(fc); 692 } 693 694 /* Async. stream output */ 695 static void 696 fwip_stream_input(struct fw_xferq *xferq) 697 { 698 struct mbuf *m, *m0; 699 struct m_tag *mtag; 700 struct ifnet *ifp; 701 struct fwip_softc *sc; 702 struct fw_bulkxfer *sxfer; 703 struct fw_pkt *fp; 704 uint16_t src; 705 uint32_t *p; 706 707 sc = (struct fwip_softc *)xferq->sc; 708 ifp = sc->sc_eth.fwip_ifp; 709 while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { 710 STAILQ_REMOVE_HEAD(&xferq->stvalid, link); 711 fp = mtod(sxfer->mbuf, struct fw_pkt *); 712 if (sc->sc_fd.fc->irx_post != NULL) 713 sc->sc_fd.fc->irx_post(sc->sc_fd.fc, fp->mode.ld); 714 m = sxfer->mbuf; 715 716 /* insert new rbuf */ 717 sxfer->mbuf = m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 718 if (m0 != NULL) { 719 m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size; 720 STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link); 721 } else 722 aprint_error_ifnet(ifp, 723 "fwip_as_input: m_getcl failed\n"); 724 725 /* 726 * We must have a GASP header - leave the 727 * encapsulation sanity checks to the generic 728 * code. Remeber that we also have the firewire async 729 * stream header even though that isn't accounted for 730 * in mode.stream.len. 731 */ 732 if (sxfer->resp != 0 || 733 fp->mode.stream.len < 2 * sizeof(uint32_t)) { 734 m_freem(m); 735 ifp->if_ierrors++; 736 continue; 737 } 738 m->m_len = m->m_pkthdr.len = fp->mode.stream.len 739 + sizeof(fp->mode.stream); 740 741 /* 742 * If we received the packet on the broadcast channel, 743 * mark it as broadcast, otherwise we assume it must 744 * be multicast. 745 */ 746 if (fp->mode.stream.chtag == broadcast_channel) 747 m->m_flags |= M_BCAST; 748 else 749 m->m_flags |= M_MCAST; 750 751 /* 752 * Make sure we recognise the GASP specifier and 753 * version. 754 */ 755 p = mtod(m, uint32_t *); 756 if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) != 757 0x00005e || 758 (ntohl(p[2]) & 0xffffff) != 1) { 759 FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n", 760 ntohl(p[1]), ntohl(p[2])); 761 m_freem(m); 762 ifp->if_ierrors++; 763 continue; 764 } 765 766 /* 767 * Record the sender ID for possible BPF usage. 768 */ 769 src = ntohl(p[1]) >> 16; 770 if (ifp->if_bpf) { 771 mtag = m_tag_get(MTAG_FIREWIRE_SENDER_EUID, 772 2 * sizeof(uint32_t), M_NOWAIT); 773 if (mtag) { 774 /* bpf wants it in network byte order */ 775 struct fw_device *fd; 776 uint32_t *p2 = (uint32_t *) (mtag + 1); 777 778 fd = fw_noderesolve_nodeid(sc->sc_fd.fc, 779 src & 0x3f); 780 if (fd) { 781 p2[0] = htonl(fd->eui.hi); 782 p2[1] = htonl(fd->eui.lo); 783 } else { 784 p2[0] = 0; 785 p2[1] = 0; 786 } 787 m_tag_prepend(m, mtag); 788 } 789 } 790 791 /* 792 * Trim off the GASP header 793 */ 794 m_adj(m, 3*sizeof(uint32_t)); 795 m->m_pkthdr.rcvif = ifp; 796 ieee1394_input(ifp, m, src); 797 ifp->if_ipackets++; 798 } 799 if (STAILQ_FIRST(&xferq->stfree) != NULL) 800 sc->sc_fd.fc->irx_enable(sc->sc_fd.fc, sc->sc_dma_ch); 801 } 802 803 static inline void 804 fwip_unicast_input_recycle(struct fwip_softc *sc, struct fw_xfer *xfer) 805 { 806 struct mbuf *m; 807 808 /* 809 * We have finished with a unicast xfer. Allocate a new 810 * cluster and stick it on the back of the input queue. 811 */ 812 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 813 if (m == NULL) 814 aprint_error_dev(sc->sc_fd.dev, 815 "fwip_unicast_input_recycle: m_getcl failed\n"); 816 xfer->recv.payload = mtod(m, uint32_t *); 817 xfer->recv.pay_len = MCLBYTES; 818 xfer->mbuf = m; 819 mutex_enter(&sc->sc_fwb.fwb_mtx); 820 STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link); 821 mutex_exit(&sc->sc_fwb.fwb_mtx); 822 } 823 824 static void 825 fwip_unicast_input(struct fw_xfer *xfer) 826 { 827 uint64_t address; 828 struct mbuf *m; 829 struct m_tag *mtag; 830 struct ifnet *ifp; 831 struct fwip_softc *sc; 832 struct fw_pkt *fp; 833 int rtcode; 834 835 sc = (struct fwip_softc *)xfer->sc; 836 ifp = sc->sc_eth.fwip_ifp; 837 m = xfer->mbuf; 838 xfer->mbuf = 0; 839 fp = &xfer->recv.hdr; 840 841 /* 842 * Check the fifo address - we only accept addresses of 843 * exactly INET_FIFO. 844 */ 845 address = ((uint64_t)fp->mode.wreqb.dest_hi << 32) 846 | fp->mode.wreqb.dest_lo; 847 if (fp->mode.wreqb.tcode != FWTCODE_WREQB) { 848 rtcode = FWRCODE_ER_TYPE; 849 } else if (address != INET_FIFO) { 850 rtcode = FWRCODE_ER_ADDR; 851 } else { 852 rtcode = FWRCODE_COMPLETE; 853 } 854 855 /* 856 * Pick up a new mbuf and stick it on the back of the receive 857 * queue. 858 */ 859 fwip_unicast_input_recycle(sc, xfer); 860 861 /* 862 * If we've already rejected the packet, give up now. 863 */ 864 if (rtcode != FWRCODE_COMPLETE) { 865 m_freem(m); 866 ifp->if_ierrors++; 867 return; 868 } 869 870 if (ifp->if_bpf) { 871 /* 872 * Record the sender ID for possible BPF usage. 873 */ 874 mtag = m_tag_get(MTAG_FIREWIRE_SENDER_EUID, 875 2 * sizeof(uint32_t), M_NOWAIT); 876 if (mtag) { 877 /* bpf wants it in network byte order */ 878 struct fw_device *fd; 879 uint32_t *p = (uint32_t *) (mtag + 1); 880 881 fd = fw_noderesolve_nodeid(sc->sc_fd.fc, 882 fp->mode.wreqb.src & 0x3f); 883 if (fd) { 884 p[0] = htonl(fd->eui.hi); 885 p[1] = htonl(fd->eui.lo); 886 } else { 887 p[0] = 0; 888 p[1] = 0; 889 } 890 m_tag_prepend(m, mtag); 891 } 892 } 893 894 /* 895 * Hand off to the generic encapsulation code. We don't use 896 * ifp->if_input so that we can pass the source nodeid as an 897 * argument to facilitate link-level fragment reassembly. 898 */ 899 m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len; 900 m->m_pkthdr.rcvif = ifp; 901 ieee1394_input(ifp, m, fp->mode.wreqb.src); 902 ifp->if_ipackets++; 903 } 904