xref: /netbsd-src/sys/dev/ieee1394/if_fwip.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: if_fwip.c,v 1.24 2010/05/23 18:56:59 christos Exp $	*/
2 /*-
3  * Copyright (c) 2004
4  *	Doug Rabson
5  * Copyright (c) 2002-2003
6  * 	Hidetoshi Shimokawa. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *
19  *	This product includes software developed by Hidetoshi Shimokawa.
20  *
21  * 4. Neither the name of the author nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * $FreeBSD: src/sys/dev/firewire/if_fwip.c,v 1.18 2009/02/09 16:58:18 fjoe Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: if_fwip.c,v 1.24 2010/05/23 18:56:59 christos Exp $");
42 
43 #include <sys/param.h>
44 #include <sys/bus.h>
45 #include <sys/device.h>
46 #include <sys/errno.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/mutex.h>
50 #include <sys/sysctl.h>
51 
52 #include <net/bpf.h>
53 #include <net/if.h>
54 #include <net/if_ieee1394.h>
55 #include <net/if_types.h>
56 
57 #include <dev/ieee1394/firewire.h>
58 #include <dev/ieee1394/firewirereg.h>
59 #include <dev/ieee1394/iec13213.h>
60 #include <dev/ieee1394/if_fwipvar.h>
61 
62 /*
63  * We really need a mechanism for allocating regions in the FIFO
64  * address space. We pick a address in the OHCI controller's 'middle'
65  * address space. This means that the controller will automatically
66  * send responses for us, which is fine since we don't have any
67  * important information to put in the response anyway.
68  */
69 #define INET_FIFO	0xfffe00000000LL
70 
71 #define FWIPDEBUG	if (fwipdebug) aprint_debug_ifnet
72 #define TX_MAX_QUEUE	(FWMAXQUEUE - 1)
73 
74 
75 struct fw_hwaddr {
76 	uint32_t		sender_unique_ID_hi;
77 	uint32_t		sender_unique_ID_lo;
78 	uint8_t			sender_max_rec;
79 	uint8_t			sspd;
80 	uint16_t		sender_unicast_FIFO_hi;
81 	uint32_t		sender_unicast_FIFO_lo;
82 };
83 
84 
85 static int fwipmatch(device_t, cfdata_t, void *);
86 static void fwipattach(device_t, device_t, void *);
87 static int fwipdetach(device_t, int);
88 static int fwipactivate(device_t, enum devact);
89 
90 /* network interface */
91 static void fwip_start(struct ifnet *);
92 static int fwip_ioctl(struct ifnet *, u_long, void *);
93 static int fwip_init(struct ifnet *);
94 static void fwip_stop(struct ifnet *, int);
95 
96 static void fwip_post_busreset(void *);
97 static void fwip_output_callback(struct fw_xfer *);
98 static void fwip_async_output(struct fwip_softc *, struct ifnet *);
99 static void fwip_stream_input(struct fw_xferq *);
100 static void fwip_unicast_input(struct fw_xfer *);
101 
102 static int fwipdebug = 0;
103 static int broadcast_channel = 0xc0 | 0x1f; /*  tag | channel(XXX) */
104 static int tx_speed = 2;
105 static int rx_queue_len = FWMAXQUEUE;
106 
107 MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over IEEE1394 interface");
108 /*
109  * Setup sysctl(3) MIB, hw.fwip.*
110  *
111  * TBD condition CTLFLAG_PERMANENT on being a module or not
112  */
113 SYSCTL_SETUP(sysctl_fwip, "sysctl fwip(4) subtree setup")
114 {
115 	int rc, fwip_node_num;
116 	const struct sysctlnode *node;
117 
118 	if ((rc = sysctl_createv(clog, 0, NULL, NULL,
119 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "hw", NULL,
120 	    NULL, 0, NULL, 0, CTL_HW, CTL_EOL)) != 0) {
121 		goto err;
122 	}
123 
124 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
125 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "fwip",
126 	    SYSCTL_DESCR("fwip controls"),
127 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
128 		goto err;
129 	}
130 	fwip_node_num = node->sysctl_num;
131 
132 	/* fwip RX queue length */
133 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
134 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
135 	    "rx_queue_len", SYSCTL_DESCR("Length of the receive queue"),
136 	    NULL, 0, &rx_queue_len,
137 	    0, CTL_HW, fwip_node_num, CTL_CREATE, CTL_EOL)) != 0) {
138 		goto err;
139 	}
140 
141 	/* fwip RX queue length */
142 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
143 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
144 	    "if_fwip_debug", SYSCTL_DESCR("fwip driver debug flag"),
145 	    NULL, 0, &fwipdebug,
146 	    0, CTL_HW, fwip_node_num, CTL_CREATE, CTL_EOL)) != 0) {
147 		goto err;
148 	}
149 
150 	return;
151 
152 err:
153 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
154 }
155 
156 
157 CFATTACH_DECL_NEW(fwip, sizeof(struct fwip_softc),
158     fwipmatch, fwipattach, fwipdetach, fwipactivate);
159 
160 
161 static int
162 fwipmatch(device_t parent, cfdata_t cf, void *aux)
163 {
164 	struct fw_attach_args *fwa = aux;
165 
166 	if (strcmp(fwa->name, "fwip") == 0)
167 		return 1;
168 	return 0;
169 }
170 
171 static void
172 fwipattach(device_t parent, device_t self, void *aux)
173 {
174 	struct fwip_softc *sc = device_private(self);
175 	struct fw_attach_args *fwa = (struct fw_attach_args *)aux;
176 	struct fw_hwaddr *hwaddr;
177 	struct ifnet *ifp;
178 
179 	aprint_naive("\n");
180 	aprint_normal(": IP over IEEE1394\n");
181 
182 	sc->sc_fd.dev = self;
183 	sc->sc_eth.fwip_ifp = &sc->sc_eth.fwcom.fc_if;
184 	hwaddr = (struct fw_hwaddr *)&sc->sc_eth.fwcom.ic_hwaddr;
185 
186 	ifp = sc->sc_eth.fwip_ifp;
187 
188 	mutex_init(&sc->sc_fwb.fwb_mtx, MUTEX_DEFAULT, IPL_NET);
189 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
190 
191 	/* XXX */
192 	sc->sc_dma_ch = -1;
193 
194 	sc->sc_fd.fc = fwa->fc;
195 	if (tx_speed < 0)
196 		tx_speed = sc->sc_fd.fc->speed;
197 
198 	sc->sc_fd.post_explore = NULL;
199 	sc->sc_fd.post_busreset = fwip_post_busreset;
200 	sc->sc_eth.fwip = sc;
201 
202 	/*
203 	 * Encode our hardware the way that arp likes it.
204 	 */
205 	hwaddr->sender_unique_ID_hi = htonl(sc->sc_fd.fc->eui.hi);
206 	hwaddr->sender_unique_ID_lo = htonl(sc->sc_fd.fc->eui.lo);
207 	hwaddr->sender_max_rec = sc->sc_fd.fc->maxrec;
208 	hwaddr->sspd = sc->sc_fd.fc->speed;
209 	hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32));
210 	hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO);
211 
212 	/* fill the rest and attach interface */
213 	ifp->if_softc = &sc->sc_eth;
214 
215 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
216 	ifp->if_start = fwip_start;
217 	ifp->if_ioctl = fwip_ioctl;
218 	ifp->if_init = fwip_init;
219 	ifp->if_stop = fwip_stop;
220 	ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
221 	IFQ_SET_READY(&ifp->if_snd);
222 	IFQ_SET_MAXLEN(&ifp->if_snd, TX_MAX_QUEUE);
223 
224 	if_attach(ifp);
225 	ieee1394_ifattach(ifp, (const struct ieee1394_hwaddr *)hwaddr);
226 
227 	if (!pmf_device_register(self, NULL, NULL))
228 		aprint_error_dev(self, "couldn't establish power handler\n");
229 	else
230 		pmf_class_network_register(self, ifp);
231 
232 	FWIPDEBUG(ifp, "interface created\n");
233 	return;
234 }
235 
236 static int
237 fwipdetach(device_t self, int flags)
238 {
239 	struct fwip_softc *sc = device_private(self);
240 	struct ifnet *ifp = sc->sc_eth.fwip_ifp;
241 
242 	fwip_stop(sc->sc_eth.fwip_ifp, 1);
243 	ieee1394_ifdetach(ifp);
244 	if_detach(ifp);
245 	mutex_destroy(&sc->sc_mtx);
246 	mutex_destroy(&sc->sc_fwb.fwb_mtx);
247 	return 0;
248 }
249 
250 static int
251 fwipactivate(device_t self, enum devact act)
252 {
253 	struct fwip_softc *sc = device_private(self);
254 
255 	switch (act) {
256 	case DVACT_DEACTIVATE:
257 		if_deactivate(sc->sc_eth.fwip_ifp);
258 		return 0;
259 	default:
260 		return EOPNOTSUPP;
261 	}
262 }
263 
264 static void
265 fwip_start(struct ifnet *ifp)
266 {
267 	struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
268 
269 	FWIPDEBUG(ifp, "starting\n");
270 
271 	if (sc->sc_dma_ch < 0) {
272 		struct mbuf *m = NULL;
273 
274 		FWIPDEBUG(ifp, "not ready\n");
275 
276 		do {
277 			IF_DEQUEUE(&ifp->if_snd, m);
278 			if (m != NULL)
279 				m_freem(m);
280 			ifp->if_oerrors++;
281 		} while (m != NULL);
282 
283 		return;
284 	}
285 
286 	ifp->if_flags |= IFF_OACTIVE;
287 
288 	if (ifp->if_snd.ifq_len != 0)
289 		fwip_async_output(sc, ifp);
290 
291 	ifp->if_flags &= ~IFF_OACTIVE;
292 }
293 
294 static int
295 fwip_ioctl(struct ifnet *ifp, u_long cmd, void *data)
296 {
297 	int s, error = 0;
298 
299 	s = splnet();
300 
301 	switch (cmd) {
302 	case SIOCSIFFLAGS:
303 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
304 			break;
305 		switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
306 		case IFF_RUNNING:
307 			fwip_stop(ifp, 0);
308 			break;
309 		case IFF_UP:
310 			fwip_init(ifp);
311 			break;
312 		default:
313 			break;
314 		}
315 		break;
316 
317 	case SIOCADDMULTI:
318 	case SIOCDELMULTI:
319 		break;
320 
321 	default:
322 		error = ieee1394_ioctl(ifp, cmd, data);
323 		if (error == ENETRESET)
324 			error = 0;
325 		break;
326 	}
327 
328 	splx(s);
329 
330 	return error;
331 }
332 
333 static int
334 fwip_init(struct ifnet *ifp)
335 {
336 	struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
337 	struct firewire_comm *fc;
338 	struct fw_xferq *xferq;
339 	struct fw_xfer *xfer;
340 	struct mbuf *m;
341 	int i;
342 
343 	FWIPDEBUG(ifp, "initializing\n");
344 
345 	fc = sc->sc_fd.fc;
346 	if (sc->sc_dma_ch < 0) {
347 		sc->sc_dma_ch = fw_open_isodma(fc, /* tx */0);
348 		if (sc->sc_dma_ch < 0)
349 			return ENXIO;
350 		xferq = fc->ir[sc->sc_dma_ch];
351 		xferq->flag |=
352 		    FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_STREAM;
353 		xferq->flag &= ~0xff;
354 		xferq->flag |= broadcast_channel & 0xff;
355 		/* register fwip_input handler */
356 		xferq->sc = (void *) sc;
357 		xferq->hand = fwip_stream_input;
358 		xferq->bnchunk = rx_queue_len;
359 		xferq->bnpacket = 1;
360 		xferq->psize = MCLBYTES;
361 		xferq->queued = 0;
362 		xferq->buf = NULL;
363 		xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
364 			sizeof(struct fw_bulkxfer) * xferq->bnchunk,
365 							M_FWIP, M_WAITOK);
366 		if (xferq->bulkxfer == NULL) {
367 			aprint_error_ifnet(ifp, "if_fwip: malloc failed\n");
368 			return ENOMEM;
369 		}
370 		STAILQ_INIT(&xferq->stvalid);
371 		STAILQ_INIT(&xferq->stfree);
372 		STAILQ_INIT(&xferq->stdma);
373 		xferq->stproc = NULL;
374 		for (i = 0; i < xferq->bnchunk; i++) {
375 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
376 			xferq->bulkxfer[i].mbuf = m;
377 			if (m != NULL) {
378 				m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
379 				STAILQ_INSERT_TAIL(&xferq->stfree,
380 						&xferq->bulkxfer[i], link);
381 			} else
382 				aprint_error_ifnet(ifp,
383 				    "fwip_as_input: m_getcl failed\n");
384 		}
385 
386 		sc->sc_fwb.start = INET_FIFO;
387 		sc->sc_fwb.end = INET_FIFO + 16384; /* S3200 packet size */
388 
389 		/* pre-allocate xfer */
390 		STAILQ_INIT(&sc->sc_fwb.xferlist);
391 		for (i = 0; i < rx_queue_len; i++) {
392 			xfer = fw_xfer_alloc(M_FWIP);
393 			if (xfer == NULL)
394 				break;
395 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
396 			xfer->recv.payload = mtod(m, uint32_t *);
397 			xfer->recv.pay_len = MCLBYTES;
398 			xfer->hand = fwip_unicast_input;
399 			xfer->fc = fc;
400 			xfer->sc = (void *) sc;
401 			xfer->mbuf = m;
402 			STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
403 		}
404 		fw_bindadd(fc, &sc->sc_fwb);
405 
406 		STAILQ_INIT(&sc->sc_xferlist);
407 		for (i = 0; i < TX_MAX_QUEUE; i++) {
408 			xfer = fw_xfer_alloc(M_FWIP);
409 			if (xfer == NULL)
410 				break;
411 			xfer->send.spd = tx_speed;
412 			xfer->fc = sc->sc_fd.fc;
413 			xfer->sc = (void *)sc;
414 			xfer->hand = fwip_output_callback;
415 			STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link);
416 		}
417 	} else
418 		xferq = fc->ir[sc->sc_dma_ch];
419 
420 	sc->sc_last_dest.hi = 0;
421 	sc->sc_last_dest.lo = 0;
422 
423 	/* start dma */
424 	if ((xferq->flag & FWXFERQ_RUNNING) == 0)
425 		fc->irx_enable(fc, sc->sc_dma_ch);
426 
427 	ifp->if_flags |= IFF_RUNNING;
428 	ifp->if_flags &= ~IFF_OACTIVE;
429 
430 #if 0
431 	/* attempt to start output */
432 	fwip_start(ifp);
433 #endif
434 	return 0;
435 }
436 
437 static void
438 fwip_stop(struct ifnet *ifp, int disable)
439 {
440 	struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
441 	struct firewire_comm *fc = sc->sc_fd.fc;
442 	struct fw_xferq *xferq;
443 	struct fw_xfer *xfer, *next;
444 	int i;
445 
446 	if (sc->sc_dma_ch >= 0) {
447 		xferq = fc->ir[sc->sc_dma_ch];
448 
449 		if (xferq->flag & FWXFERQ_RUNNING)
450 			fc->irx_disable(fc, sc->sc_dma_ch);
451 		xferq->flag &=
452 			~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
453 			FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
454 		xferq->hand = NULL;
455 
456 		for (i = 0; i < xferq->bnchunk; i++)
457 			m_freem(xferq->bulkxfer[i].mbuf);
458 		free(xferq->bulkxfer, M_FWIP);
459 
460 		fw_bindremove(fc, &sc->sc_fwb);
461 		for (xfer = STAILQ_FIRST(&sc->sc_fwb.xferlist); xfer != NULL;
462 		    xfer = next) {
463 			next = STAILQ_NEXT(xfer, link);
464 			fw_xfer_free(xfer);
465 		}
466 
467 		for (xfer = STAILQ_FIRST(&sc->sc_xferlist); xfer != NULL;
468 		    xfer = next) {
469 			next = STAILQ_NEXT(xfer, link);
470 			fw_xfer_free(xfer);
471 		}
472 
473 		xferq->bulkxfer = NULL;
474 		sc->sc_dma_ch = -1;
475 	}
476 
477 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
478 }
479 
480 static void
481 fwip_post_busreset(void *arg)
482 {
483 	struct fwip_softc *sc = arg;
484 	struct crom_src *src;
485 	struct crom_chunk *root;
486 
487 	src = sc->sc_fd.fc->crom_src;
488 	root = sc->sc_fd.fc->crom_root;
489 
490 	/* RFC2734 IPv4 over IEEE1394 */
491 	memset(&sc->sc_unit4, 0, sizeof(struct crom_chunk));
492 	crom_add_chunk(src, root, &sc->sc_unit4, CROM_UDIR);
493 	crom_add_entry(&sc->sc_unit4, CSRKEY_SPEC, CSRVAL_IETF);
494 	crom_add_simple_text(src, &sc->sc_unit4, &sc->sc_spec4, "IANA");
495 	crom_add_entry(&sc->sc_unit4, CSRKEY_VER, 1);
496 	crom_add_simple_text(src, &sc->sc_unit4, &sc->sc_ver4, "IPv4");
497 
498 	/* RFC3146 IPv6 over IEEE1394 */
499 	memset(&sc->sc_unit6, 0, sizeof(struct crom_chunk));
500 	crom_add_chunk(src, root, &sc->sc_unit6, CROM_UDIR);
501 	crom_add_entry(&sc->sc_unit6, CSRKEY_SPEC, CSRVAL_IETF);
502 	crom_add_simple_text(src, &sc->sc_unit6, &sc->sc_spec6, "IANA");
503 	crom_add_entry(&sc->sc_unit6, CSRKEY_VER, 2);
504 	crom_add_simple_text(src, &sc->sc_unit6, &sc->sc_ver6, "IPv6");
505 
506 	sc->sc_last_dest.hi = 0;
507 	sc->sc_last_dest.lo = 0;
508 	ieee1394_drain(sc->sc_eth.fwip_ifp);
509 }
510 
511 static void
512 fwip_output_callback(struct fw_xfer *xfer)
513 {
514 	struct fwip_softc *sc = (struct fwip_softc *)xfer->sc;
515 	struct ifnet *ifp;
516 
517 	ifp = sc->sc_eth.fwip_ifp;
518 	/* XXX error check */
519 	FWIPDEBUG(ifp, "resp = %d\n", xfer->resp);
520 	if (xfer->resp != 0)
521 		ifp->if_oerrors++;
522 
523 	m_freem(xfer->mbuf);
524 	fw_xfer_unload(xfer);
525 
526 	mutex_enter(&sc->sc_mtx);
527 	STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link);
528 	mutex_exit(&sc->sc_mtx);
529 
530 	/* for queue full */
531 	if (ifp->if_snd.ifq_head != NULL)
532 		fwip_start(ifp);
533 }
534 
535 /* Async. stream output */
536 static void
537 fwip_async_output(struct fwip_softc *sc, struct ifnet *ifp)
538 {
539 	struct firewire_comm *fc = sc->sc_fd.fc;
540 	struct mbuf *m;
541 	struct m_tag *mtag;
542 	struct fw_hwaddr *destfw;
543 	struct fw_xfer *xfer;
544 	struct fw_xferq *xferq;
545 	struct fw_pkt *fp;
546 	uint16_t nodeid;
547 	int error;
548 	int i = 0;
549 
550 	xfer = NULL;
551 	xferq = fc->atq;
552 	while ((xferq->queued < xferq->maxq - 1) &&
553 	    (ifp->if_snd.ifq_head != NULL)) {
554 		mutex_enter(&sc->sc_mtx);
555 		if (STAILQ_EMPTY(&sc->sc_xferlist)) {
556 			mutex_exit(&sc->sc_mtx);
557 #if 0
558 			aprint_normal("if_fwip: lack of xfer\n");
559 #endif
560 			break;
561 		}
562 		IF_DEQUEUE(&ifp->if_snd, m);
563 		if (m == NULL) {
564 			mutex_exit(&sc->sc_mtx);
565 			break;
566 		}
567 		xfer = STAILQ_FIRST(&sc->sc_xferlist);
568 		STAILQ_REMOVE_HEAD(&sc->sc_xferlist, link);
569 		mutex_exit(&sc->sc_mtx);
570 
571 		/*
572 		 * Dig out the link-level address which
573 		 * firewire_output got via arp or neighbour
574 		 * discovery. If we don't have a link-level address,
575 		 * just stick the thing on the broadcast channel.
576 		 */
577 		mtag = m_tag_find(m, MTAG_FIREWIRE_HWADDR, 0);
578 		if (mtag == NULL)
579 			destfw = 0;
580 		else
581 			destfw = (struct fw_hwaddr *) (mtag + 1);
582 
583 		/*
584 		 * Put the mbuf in the xfer early in case we hit an
585 		 * error case below - fwip_output_callback will free
586 		 * the mbuf.
587 		 */
588 		xfer->mbuf = m;
589 
590 		/*
591 		 * We use the arp result (if any) to add a suitable firewire
592 		 * packet header before handing off to the bus.
593 		 */
594 		fp = &xfer->send.hdr;
595 		nodeid = FWLOCALBUS | fc->nodeid;
596 		if ((m->m_flags & M_BCAST) || !destfw) {
597 			/*
598 			 * Broadcast packets are sent as GASP packets with
599 			 * specifier ID 0x00005e, version 1 on the broadcast
600 			 * channel. To be conservative, we send at the
601 			 * slowest possible speed.
602 			 */
603 			uint32_t *p;
604 
605 			M_PREPEND(m, 2 * sizeof(uint32_t), M_DONTWAIT);
606 			p = mtod(m, uint32_t *);
607 			fp->mode.stream.len = m->m_pkthdr.len;
608 			fp->mode.stream.chtag = broadcast_channel;
609 			fp->mode.stream.tcode = FWTCODE_STREAM;
610 			fp->mode.stream.sy = 0;
611 			xfer->send.spd = 0;
612 			p[0] = htonl(nodeid << 16);
613 			p[1] = htonl((0x5e << 24) | 1);
614 		} else {
615 			/*
616 			 * Unicast packets are sent as block writes to the
617 			 * target's unicast fifo address. If we can't
618 			 * find the node address, we just give up. We
619 			 * could broadcast it but that might overflow
620 			 * the packet size limitations due to the
621 			 * extra GASP header. Note: the hardware
622 			 * address is stored in network byte order to
623 			 * make life easier for ARP.
624 			 */
625 			struct fw_device *fd;
626 			struct fw_eui64 eui;
627 
628 			eui.hi = ntohl(destfw->sender_unique_ID_hi);
629 			eui.lo = ntohl(destfw->sender_unique_ID_lo);
630 			if (sc->sc_last_dest.hi != eui.hi ||
631 			    sc->sc_last_dest.lo != eui.lo) {
632 				fd = fw_noderesolve_eui64(fc, &eui);
633 				if (!fd) {
634 					/* error */
635 					ifp->if_oerrors++;
636 					/* XXX set error code */
637 					fwip_output_callback(xfer);
638 					continue;
639 
640 				}
641 				sc->sc_last_hdr.mode.wreqb.dst =
642 				    FWLOCALBUS | fd->dst;
643 				sc->sc_last_hdr.mode.wreqb.tlrt = 0;
644 				sc->sc_last_hdr.mode.wreqb.tcode =
645 				    FWTCODE_WREQB;
646 				sc->sc_last_hdr.mode.wreqb.pri = 0;
647 				sc->sc_last_hdr.mode.wreqb.src = nodeid;
648 				sc->sc_last_hdr.mode.wreqb.dest_hi =
649 					ntohs(destfw->sender_unicast_FIFO_hi);
650 				sc->sc_last_hdr.mode.wreqb.dest_lo =
651 					ntohl(destfw->sender_unicast_FIFO_lo);
652 				sc->sc_last_hdr.mode.wreqb.extcode = 0;
653 				sc->sc_last_dest = eui;
654 			}
655 
656 			fp->mode.wreqb = sc->sc_last_hdr.mode.wreqb;
657 			fp->mode.wreqb.len = m->m_pkthdr.len;
658 			xfer->send.spd = min(destfw->sspd, fc->speed);
659 		}
660 
661 		xfer->send.pay_len = m->m_pkthdr.len;
662 
663 		error = fw_asyreq(fc, -1, xfer);
664 		if (error == EAGAIN) {
665 			/*
666 			 * We ran out of tlabels - requeue the packet
667 			 * for later transmission.
668 			 */
669 			xfer->mbuf = 0;
670 			mutex_enter(&sc->sc_mtx);
671 			STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link);
672 			mutex_exit(&sc->sc_mtx);
673 			IF_PREPEND(&ifp->if_snd, m);
674 			break;
675 		}
676 		if (error) {
677 			/* error */
678 			ifp->if_oerrors++;
679 			/* XXX set error code */
680 			fwip_output_callback(xfer);
681 			continue;
682 		} else {
683 			ifp->if_opackets++;
684 			i++;
685 		}
686 	}
687 #if 0
688 	if (i > 1)
689 		aprint_normal("%d queued\n", i);
690 #endif
691 	if (i > 0)
692 		xferq->start(fc);
693 }
694 
695 /* Async. stream output */
696 static void
697 fwip_stream_input(struct fw_xferq *xferq)
698 {
699 	struct mbuf *m, *m0;
700 	struct m_tag *mtag;
701 	struct ifnet *ifp;
702 	struct fwip_softc *sc;
703 	struct fw_bulkxfer *sxfer;
704 	struct fw_pkt *fp;
705 	uint16_t src;
706 	uint32_t *p;
707 
708 	sc = (struct fwip_softc *)xferq->sc;
709 	ifp = sc->sc_eth.fwip_ifp;
710 	while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
711 		STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
712 		fp = mtod(sxfer->mbuf, struct fw_pkt *);
713 		if (sc->sc_fd.fc->irx_post != NULL)
714 			sc->sc_fd.fc->irx_post(sc->sc_fd.fc, fp->mode.ld);
715 		m = sxfer->mbuf;
716 
717 		/* insert new rbuf */
718 		sxfer->mbuf = m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
719 		if (m0 != NULL) {
720 			m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
721 			STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
722 		} else
723 			aprint_error_ifnet(ifp,
724 			    "fwip_as_input: m_getcl failed\n");
725 
726 		/*
727 		 * We must have a GASP header - leave the
728 		 * encapsulation sanity checks to the generic
729 		 * code. Remeber that we also have the firewire async
730 		 * stream header even though that isn't accounted for
731 		 * in mode.stream.len.
732 		 */
733 		if (sxfer->resp != 0 ||
734 		    fp->mode.stream.len < 2 * sizeof(uint32_t)) {
735 			m_freem(m);
736 			ifp->if_ierrors++;
737 			continue;
738 		}
739 		m->m_len = m->m_pkthdr.len = fp->mode.stream.len
740 			+ sizeof(fp->mode.stream);
741 
742 		/*
743 		 * If we received the packet on the broadcast channel,
744 		 * mark it as broadcast, otherwise we assume it must
745 		 * be multicast.
746 		 */
747 		if (fp->mode.stream.chtag == broadcast_channel)
748 			m->m_flags |= M_BCAST;
749 		else
750 			m->m_flags |= M_MCAST;
751 
752 		/*
753 		 * Make sure we recognise the GASP specifier and
754 		 * version.
755 		 */
756 		p = mtod(m, uint32_t *);
757 		if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) !=
758 								0x00005e ||
759 		    (ntohl(p[2]) & 0xffffff) != 1) {
760 			FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n",
761 			    ntohl(p[1]), ntohl(p[2]));
762 			m_freem(m);
763 			ifp->if_ierrors++;
764 			continue;
765 		}
766 
767 		/*
768 		 * Record the sender ID for possible BPF usage.
769 		 */
770 		src = ntohl(p[1]) >> 16;
771 		if (ifp->if_bpf) {
772 			mtag = m_tag_get(MTAG_FIREWIRE_SENDER_EUID,
773 			    2 * sizeof(uint32_t), M_NOWAIT);
774 			if (mtag) {
775 				/* bpf wants it in network byte order */
776 				struct fw_device *fd;
777 				uint32_t *p2 = (uint32_t *) (mtag + 1);
778 
779 				fd = fw_noderesolve_nodeid(sc->sc_fd.fc,
780 				    src & 0x3f);
781 				if (fd) {
782 					p2[0] = htonl(fd->eui.hi);
783 					p2[1] = htonl(fd->eui.lo);
784 				} else {
785 					p2[0] = 0;
786 					p2[1] = 0;
787 				}
788 				m_tag_prepend(m, mtag);
789 			}
790 		}
791 
792 		/*
793 		 * Trim off the GASP header
794 		 */
795 		m_adj(m, 3*sizeof(uint32_t));
796 		m->m_pkthdr.rcvif = ifp;
797 		ieee1394_input(ifp, m, src);
798 		ifp->if_ipackets++;
799 	}
800 	if (STAILQ_FIRST(&xferq->stfree) != NULL)
801 		sc->sc_fd.fc->irx_enable(sc->sc_fd.fc, sc->sc_dma_ch);
802 }
803 
804 static inline void
805 fwip_unicast_input_recycle(struct fwip_softc *sc, struct fw_xfer *xfer)
806 {
807 	struct mbuf *m;
808 
809 	/*
810 	 * We have finished with a unicast xfer. Allocate a new
811 	 * cluster and stick it on the back of the input queue.
812 	 */
813 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
814 	if (m == NULL)
815 		aprint_error_dev(sc->sc_fd.dev,
816 		    "fwip_unicast_input_recycle: m_getcl failed\n");
817 	xfer->recv.payload = mtod(m, uint32_t *);
818 	xfer->recv.pay_len = MCLBYTES;
819 	xfer->mbuf = m;
820 	mutex_enter(&sc->sc_fwb.fwb_mtx);
821 	STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
822 	mutex_exit(&sc->sc_fwb.fwb_mtx);
823 }
824 
825 static void
826 fwip_unicast_input(struct fw_xfer *xfer)
827 {
828 	uint64_t address;
829 	struct mbuf *m;
830 	struct m_tag *mtag;
831 	struct ifnet *ifp;
832 	struct fwip_softc *sc;
833 	struct fw_pkt *fp;
834 	int rtcode;
835 
836 	sc = (struct fwip_softc *)xfer->sc;
837 	ifp = sc->sc_eth.fwip_ifp;
838 	m = xfer->mbuf;
839 	xfer->mbuf = 0;
840 	fp = &xfer->recv.hdr;
841 
842 	/*
843 	 * Check the fifo address - we only accept addresses of
844 	 * exactly INET_FIFO.
845 	 */
846 	address = ((uint64_t)fp->mode.wreqb.dest_hi << 32)
847 		| fp->mode.wreqb.dest_lo;
848 	if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
849 		rtcode = FWRCODE_ER_TYPE;
850 	} else if (address != INET_FIFO) {
851 		rtcode = FWRCODE_ER_ADDR;
852 	} else {
853 		rtcode = FWRCODE_COMPLETE;
854 	}
855 
856 	/*
857 	 * Pick up a new mbuf and stick it on the back of the receive
858 	 * queue.
859 	 */
860 	fwip_unicast_input_recycle(sc, xfer);
861 
862 	/*
863 	 * If we've already rejected the packet, give up now.
864 	 */
865 	if (rtcode != FWRCODE_COMPLETE) {
866 		m_freem(m);
867 		ifp->if_ierrors++;
868 		return;
869 	}
870 
871 	if (ifp->if_bpf) {
872 		/*
873 		 * Record the sender ID for possible BPF usage.
874 		 */
875 		mtag = m_tag_get(MTAG_FIREWIRE_SENDER_EUID,
876 		    2 * sizeof(uint32_t), M_NOWAIT);
877 		if (mtag) {
878 			/* bpf wants it in network byte order */
879 			struct fw_device *fd;
880 			uint32_t *p = (uint32_t *) (mtag + 1);
881 
882 			fd = fw_noderesolve_nodeid(sc->sc_fd.fc,
883 			    fp->mode.wreqb.src & 0x3f);
884 			if (fd) {
885 				p[0] = htonl(fd->eui.hi);
886 				p[1] = htonl(fd->eui.lo);
887 			} else {
888 				p[0] = 0;
889 				p[1] = 0;
890 			}
891 			m_tag_prepend(m, mtag);
892 		}
893 	}
894 
895 	/*
896 	 * Hand off to the generic encapsulation code. We don't use
897 	 * ifp->if_input so that we can pass the source nodeid as an
898 	 * argument to facilitate link-level fragment reassembly.
899 	 */
900 	m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len;
901 	m->m_pkthdr.rcvif = ifp;
902 	ieee1394_input(ifp, m, fp->mode.wreqb.src);
903 	ifp->if_ipackets++;
904 }
905