xref: /netbsd-src/sys/dev/ieee1394/if_fwip.c (revision 481d3881954fd794ca5f2d880b68c53a5db8620e)
1 /*	$NetBSD: if_fwip.c,v 1.32 2024/07/05 04:31:51 rin Exp $	*/
2 /*-
3  * Copyright (c) 2004
4  *	Doug Rabson
5  * Copyright (c) 2002-2003
6  * 	Hidetoshi Shimokawa. All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *
19  *	This product includes software developed by Hidetoshi Shimokawa.
20  *
21  * 4. Neither the name of the author nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  * $FreeBSD: src/sys/dev/firewire/if_fwip.c,v 1.18 2009/02/09 16:58:18 fjoe Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: if_fwip.c,v 1.32 2024/07/05 04:31:51 rin Exp $");
42 
43 #include <sys/param.h>
44 #include <sys/bus.h>
45 #include <sys/device.h>
46 #include <sys/errno.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/mutex.h>
50 #include <sys/sysctl.h>
51 
52 #include <net/bpf.h>
53 #include <net/if.h>
54 #include <net/if_ieee1394.h>
55 #include <net/if_types.h>
56 
57 #include <dev/ieee1394/firewire.h>
58 #include <dev/ieee1394/firewirereg.h>
59 #include <dev/ieee1394/iec13213.h>
60 #include <dev/ieee1394/if_fwipvar.h>
61 
62 /*
63  * We really need a mechanism for allocating regions in the FIFO
64  * address space. We pick a address in the OHCI controller's 'middle'
65  * address space. This means that the controller will automatically
66  * send responses for us, which is fine since we don't have any
67  * important information to put in the response anyway.
68  */
69 #define INET_FIFO	0xfffe00000000LL
70 
71 #define FWIPDEBUG	if (fwipdebug) aprint_debug_ifnet
72 #define TX_MAX_QUEUE	(FWMAXQUEUE - 1)
73 
74 
75 struct fw_hwaddr {
76 	uint32_t		sender_unique_ID_hi;
77 	uint32_t		sender_unique_ID_lo;
78 	uint8_t			sender_max_rec;
79 	uint8_t			sspd;
80 	uint16_t		sender_unicast_FIFO_hi;
81 	uint32_t		sender_unicast_FIFO_lo;
82 };
83 
84 
85 static int fwipmatch(device_t, cfdata_t, void *);
86 static void fwipattach(device_t, device_t, void *);
87 static int fwipdetach(device_t, int);
88 static int fwipactivate(device_t, enum devact);
89 
90 /* network interface */
91 static void fwip_start(struct ifnet *);
92 static int fwip_ioctl(struct ifnet *, u_long, void *);
93 static int fwip_init(struct ifnet *);
94 static void fwip_stop(struct ifnet *, int);
95 
96 static void fwip_post_busreset(void *);
97 static void fwip_output_callback(struct fw_xfer *);
98 static void fwip_async_output(struct fwip_softc *, struct ifnet *);
99 static void fwip_stream_input(struct fw_xferq *);
100 static void fwip_unicast_input(struct fw_xfer *);
101 
102 static int fwipdebug = 0;
103 static int broadcast_channel = 0xc0 | 0x1f; /*  tag | channel(XXX) */
104 static int tx_speed = 2;
105 static int rx_queue_len = FWMAXQUEUE;
106 
107 /*
108  * Setup sysctl(3) MIB, hw.fwip.*
109  *
110  * TBD condition CTLFLAG_PERMANENT on being a module or not
111  */
112 SYSCTL_SETUP(sysctl_fwip, "sysctl fwip(4) subtree setup")
113 {
114 	int rc, fwip_node_num;
115 	const struct sysctlnode *node;
116 
117 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
118 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "fwip",
119 	    SYSCTL_DESCR("fwip controls"),
120 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0) {
121 		goto err;
122 	}
123 	fwip_node_num = node->sysctl_num;
124 
125 	/* fwip RX queue length */
126 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
127 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
128 	    "rx_queue_len", SYSCTL_DESCR("Length of the receive queue"),
129 	    NULL, 0, &rx_queue_len,
130 	    0, CTL_HW, fwip_node_num, CTL_CREATE, CTL_EOL)) != 0) {
131 		goto err;
132 	}
133 
134 	/* fwip RX queue length */
135 	if ((rc = sysctl_createv(clog, 0, NULL, &node,
136 	    CTLFLAG_PERMANENT | CTLFLAG_READWRITE, CTLTYPE_INT,
137 	    "if_fwip_debug", SYSCTL_DESCR("fwip driver debug flag"),
138 	    NULL, 0, &fwipdebug,
139 	    0, CTL_HW, fwip_node_num, CTL_CREATE, CTL_EOL)) != 0) {
140 		goto err;
141 	}
142 
143 	return;
144 
145 err:
146 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
147 }
148 
149 
150 CFATTACH_DECL_NEW(fwip, sizeof(struct fwip_softc),
151     fwipmatch, fwipattach, fwipdetach, fwipactivate);
152 
153 
154 static int
fwipmatch(device_t parent,cfdata_t cf,void * aux)155 fwipmatch(device_t parent, cfdata_t cf, void *aux)
156 {
157 	struct fw_attach_args *fwa = aux;
158 
159 	if (strcmp(fwa->name, "fwip") == 0)
160 		return 1;
161 	return 0;
162 }
163 
164 static void
fwipattach(device_t parent,device_t self,void * aux)165 fwipattach(device_t parent, device_t self, void *aux)
166 {
167 	struct fwip_softc *sc = device_private(self);
168 	struct fw_attach_args *fwa = (struct fw_attach_args *)aux;
169 	struct fw_hwaddr *hwaddr;
170 	struct ifnet *ifp;
171 
172 	aprint_naive("\n");
173 	aprint_normal(": IP over IEEE1394\n");
174 
175 	sc->sc_fd.dev = self;
176 	sc->sc_eth.fwip_ifp = &sc->sc_eth.fwcom.fc_if;
177 	hwaddr = (struct fw_hwaddr *)&sc->sc_eth.fwcom.ic_hwaddr;
178 
179 	ifp = sc->sc_eth.fwip_ifp;
180 
181 	mutex_init(&sc->sc_fwb.fwb_mtx, MUTEX_DEFAULT, IPL_NET);
182 	mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NET);
183 
184 	/* XXX */
185 	sc->sc_dma_ch = -1;
186 
187 	sc->sc_fd.fc = fwa->fc;
188 	if (tx_speed < 0)
189 		tx_speed = sc->sc_fd.fc->speed;
190 
191 	sc->sc_fd.post_explore = NULL;
192 	sc->sc_fd.post_busreset = fwip_post_busreset;
193 	sc->sc_eth.fwip = sc;
194 
195 	/*
196 	 * Encode our hardware the way that arp likes it.
197 	 */
198 	hwaddr->sender_unique_ID_hi = htonl(sc->sc_fd.fc->eui.hi);
199 	hwaddr->sender_unique_ID_lo = htonl(sc->sc_fd.fc->eui.lo);
200 	hwaddr->sender_max_rec = sc->sc_fd.fc->maxrec;
201 	hwaddr->sspd = sc->sc_fd.fc->speed;
202 	hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32));
203 	hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO);
204 
205 	/* fill the rest and attach interface */
206 	ifp->if_softc = &sc->sc_eth;
207 
208 	strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
209 	ifp->if_start = fwip_start;
210 	ifp->if_ioctl = fwip_ioctl;
211 	ifp->if_init = fwip_init;
212 	ifp->if_stop = fwip_stop;
213 	ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
214 	IFQ_SET_READY(&ifp->if_snd);
215 	IFQ_SET_MAXLEN(&ifp->if_snd, TX_MAX_QUEUE);
216 
217 	if_attach(ifp);
218 	ieee1394_ifattach(ifp, (const struct ieee1394_hwaddr *)hwaddr);
219 
220 	if (!pmf_device_register(self, NULL, NULL))
221 		aprint_error_dev(self, "couldn't establish power handler\n");
222 	else
223 		pmf_class_network_register(self, ifp);
224 
225 	FWIPDEBUG(ifp, "interface created\n");
226 	return;
227 }
228 
229 static int
fwipdetach(device_t self,int flags)230 fwipdetach(device_t self, int flags)
231 {
232 	struct fwip_softc *sc = device_private(self);
233 	struct ifnet *ifp = sc->sc_eth.fwip_ifp;
234 
235 	fwip_stop(sc->sc_eth.fwip_ifp, 1);
236 	ieee1394_ifdetach(ifp);
237 	if_detach(ifp);
238 	mutex_destroy(&sc->sc_mtx);
239 	mutex_destroy(&sc->sc_fwb.fwb_mtx);
240 	return 0;
241 }
242 
243 static int
fwipactivate(device_t self,enum devact act)244 fwipactivate(device_t self, enum devact act)
245 {
246 	struct fwip_softc *sc = device_private(self);
247 
248 	switch (act) {
249 	case DVACT_DEACTIVATE:
250 		if_deactivate(sc->sc_eth.fwip_ifp);
251 		return 0;
252 	default:
253 		return EOPNOTSUPP;
254 	}
255 }
256 
257 static void
fwip_start(struct ifnet * ifp)258 fwip_start(struct ifnet *ifp)
259 {
260 	struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
261 
262 	FWIPDEBUG(ifp, "starting\n");
263 
264 	if (sc->sc_dma_ch < 0) {
265 		struct mbuf *m = NULL;
266 
267 		FWIPDEBUG(ifp, "not ready\n");
268 
269 		do {
270 			IF_DEQUEUE(&ifp->if_snd, m);
271 			m_freem(m);
272 			if_statinc(ifp, if_oerrors);
273 		} while (m != NULL);
274 
275 		return;
276 	}
277 
278 	ifp->if_flags |= IFF_OACTIVE;
279 
280 	if (ifp->if_snd.ifq_len != 0)
281 		fwip_async_output(sc, ifp);
282 
283 	ifp->if_flags &= ~IFF_OACTIVE;
284 }
285 
286 static int
fwip_ioctl(struct ifnet * ifp,u_long cmd,void * data)287 fwip_ioctl(struct ifnet *ifp, u_long cmd, void *data)
288 {
289 	int s, error = 0;
290 
291 	s = splnet();
292 
293 	switch (cmd) {
294 	case SIOCSIFFLAGS:
295 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
296 			break;
297 		switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
298 		case IFF_RUNNING:
299 			fwip_stop(ifp, 0);
300 			break;
301 		case IFF_UP:
302 			fwip_init(ifp);
303 			break;
304 		default:
305 			break;
306 		}
307 		break;
308 
309 	case SIOCADDMULTI:
310 	case SIOCDELMULTI:
311 		break;
312 
313 	default:
314 		error = ieee1394_ioctl(ifp, cmd, data);
315 		if (error == ENETRESET)
316 			error = 0;
317 		break;
318 	}
319 
320 	splx(s);
321 
322 	return error;
323 }
324 
325 static int
fwip_init(struct ifnet * ifp)326 fwip_init(struct ifnet *ifp)
327 {
328 	struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
329 	struct firewire_comm *fc;
330 	struct fw_xferq *xferq;
331 	struct fw_xfer *xfer;
332 	struct mbuf *m;
333 	int i;
334 
335 	FWIPDEBUG(ifp, "initializing\n");
336 
337 	fc = sc->sc_fd.fc;
338 	if (sc->sc_dma_ch < 0) {
339 		sc->sc_dma_ch = fw_open_isodma(fc, /* tx */0);
340 		if (sc->sc_dma_ch < 0)
341 			return ENXIO;
342 		xferq = fc->ir[sc->sc_dma_ch];
343 		xferq->flag |=
344 		    FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_STREAM;
345 		xferq->flag &= ~0xff;
346 		xferq->flag |= broadcast_channel & 0xff;
347 		/* register fwip_input handler */
348 		xferq->sc = (void *) sc;
349 		xferq->hand = fwip_stream_input;
350 		xferq->bnchunk = rx_queue_len;
351 		xferq->bnpacket = 1;
352 		xferq->psize = MCLBYTES;
353 		xferq->queued = 0;
354 		xferq->buf = NULL;
355 		xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
356 			sizeof(struct fw_bulkxfer) * xferq->bnchunk,
357 							M_FW, M_WAITOK);
358 		if (xferq->bulkxfer == NULL) {
359 			aprint_error_ifnet(ifp, "if_fwip: malloc failed\n");
360 			return ENOMEM;
361 		}
362 		STAILQ_INIT(&xferq->stvalid);
363 		STAILQ_INIT(&xferq->stfree);
364 		STAILQ_INIT(&xferq->stdma);
365 		xferq->stproc = NULL;
366 		for (i = 0; i < xferq->bnchunk; i++) {
367 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
368 			xferq->bulkxfer[i].mbuf = m;
369 			if (m != NULL) {
370 				m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
371 				STAILQ_INSERT_TAIL(&xferq->stfree,
372 						&xferq->bulkxfer[i], link);
373 			} else
374 				aprint_error_ifnet(ifp,
375 				    "fwip_as_input: m_getcl failed\n");
376 		}
377 
378 		sc->sc_fwb.start = INET_FIFO;
379 		sc->sc_fwb.end = INET_FIFO + 16384; /* S3200 packet size */
380 
381 		/* pre-allocate xfer */
382 		STAILQ_INIT(&sc->sc_fwb.xferlist);
383 		for (i = 0; i < rx_queue_len; i++) {
384 			xfer = fw_xfer_alloc(M_FW);
385 			if (xfer == NULL)
386 				break;
387 			m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
388 			xfer->recv.payload = mtod(m, uint32_t *);
389 			xfer->recv.pay_len = MCLBYTES;
390 			xfer->hand = fwip_unicast_input;
391 			xfer->fc = fc;
392 			xfer->sc = (void *) sc;
393 			xfer->mbuf = m;
394 			STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
395 		}
396 		fw_bindadd(fc, &sc->sc_fwb);
397 
398 		STAILQ_INIT(&sc->sc_xferlist);
399 		for (i = 0; i < TX_MAX_QUEUE; i++) {
400 			xfer = fw_xfer_alloc(M_FW);
401 			if (xfer == NULL)
402 				break;
403 			xfer->send.spd = tx_speed;
404 			xfer->fc = sc->sc_fd.fc;
405 			xfer->sc = (void *)sc;
406 			xfer->hand = fwip_output_callback;
407 			STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link);
408 		}
409 	} else
410 		xferq = fc->ir[sc->sc_dma_ch];
411 
412 	sc->sc_last_dest.hi = 0;
413 	sc->sc_last_dest.lo = 0;
414 
415 	/* start dma */
416 	if ((xferq->flag & FWXFERQ_RUNNING) == 0)
417 		fc->irx_enable(fc, sc->sc_dma_ch);
418 
419 	ifp->if_flags |= IFF_RUNNING;
420 	ifp->if_flags &= ~IFF_OACTIVE;
421 
422 #if 0
423 	/* attempt to start output */
424 	fwip_start(ifp);
425 #endif
426 	return 0;
427 }
428 
429 static void
fwip_stop(struct ifnet * ifp,int disable)430 fwip_stop(struct ifnet *ifp, int disable)
431 {
432 	struct fwip_softc *sc = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
433 	struct firewire_comm *fc = sc->sc_fd.fc;
434 	struct fw_xferq *xferq;
435 	struct fw_xfer *xfer, *next;
436 	int i;
437 
438 	if (sc->sc_dma_ch >= 0) {
439 		xferq = fc->ir[sc->sc_dma_ch];
440 
441 		if (xferq->flag & FWXFERQ_RUNNING)
442 			fc->irx_disable(fc, sc->sc_dma_ch);
443 		xferq->flag &=
444 			~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
445 			FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
446 		xferq->hand = NULL;
447 
448 		for (i = 0; i < xferq->bnchunk; i++)
449 			m_freem(xferq->bulkxfer[i].mbuf);
450 		free(xferq->bulkxfer, M_FW);
451 
452 		fw_bindremove(fc, &sc->sc_fwb);
453 		for (xfer = STAILQ_FIRST(&sc->sc_fwb.xferlist); xfer != NULL;
454 		    xfer = next) {
455 			next = STAILQ_NEXT(xfer, link);
456 			fw_xfer_free(xfer);
457 		}
458 
459 		for (xfer = STAILQ_FIRST(&sc->sc_xferlist); xfer != NULL;
460 		    xfer = next) {
461 			next = STAILQ_NEXT(xfer, link);
462 			fw_xfer_free(xfer);
463 		}
464 
465 		xferq->bulkxfer = NULL;
466 		sc->sc_dma_ch = -1;
467 	}
468 
469 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
470 }
471 
472 static void
fwip_post_busreset(void * arg)473 fwip_post_busreset(void *arg)
474 {
475 	struct fwip_softc *sc = arg;
476 	struct crom_src *src;
477 	struct crom_chunk *root;
478 
479 	src = sc->sc_fd.fc->crom_src;
480 	root = sc->sc_fd.fc->crom_root;
481 
482 	/* RFC2734 IPv4 over IEEE1394 */
483 	memset(&sc->sc_unit4, 0, sizeof(struct crom_chunk));
484 	crom_add_chunk(src, root, &sc->sc_unit4, CROM_UDIR);
485 	crom_add_entry(&sc->sc_unit4, CSRKEY_SPEC, CSRVAL_IETF);
486 	crom_add_simple_text(src, &sc->sc_unit4, &sc->sc_spec4, "IANA");
487 	crom_add_entry(&sc->sc_unit4, CSRKEY_VER, 1);
488 	crom_add_simple_text(src, &sc->sc_unit4, &sc->sc_ver4, "IPv4");
489 
490 	/* RFC3146 IPv6 over IEEE1394 */
491 	memset(&sc->sc_unit6, 0, sizeof(struct crom_chunk));
492 	crom_add_chunk(src, root, &sc->sc_unit6, CROM_UDIR);
493 	crom_add_entry(&sc->sc_unit6, CSRKEY_SPEC, CSRVAL_IETF);
494 	crom_add_simple_text(src, &sc->sc_unit6, &sc->sc_spec6, "IANA");
495 	crom_add_entry(&sc->sc_unit6, CSRKEY_VER, 2);
496 	crom_add_simple_text(src, &sc->sc_unit6, &sc->sc_ver6, "IPv6");
497 
498 	sc->sc_last_dest.hi = 0;
499 	sc->sc_last_dest.lo = 0;
500 	ieee1394_drain(sc->sc_eth.fwip_ifp);
501 }
502 
503 static void
fwip_output_callback(struct fw_xfer * xfer)504 fwip_output_callback(struct fw_xfer *xfer)
505 {
506 	struct fwip_softc *sc = (struct fwip_softc *)xfer->sc;
507 	struct ifnet *ifp;
508 
509 	ifp = sc->sc_eth.fwip_ifp;
510 	/* XXX error check */
511 	FWIPDEBUG(ifp, "resp = %d\n", xfer->resp);
512 	if (xfer->resp != 0)
513 		if_statinc(ifp, if_oerrors);
514 
515 	m_freem(xfer->mbuf);
516 	fw_xfer_unload(xfer);
517 
518 	mutex_enter(&sc->sc_mtx);
519 	STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link);
520 	mutex_exit(&sc->sc_mtx);
521 
522 	/* for queue full */
523 	if (ifp->if_snd.ifq_head != NULL)
524 		fwip_start(ifp);
525 }
526 
527 /* Async. stream output */
528 static void
fwip_async_output(struct fwip_softc * sc,struct ifnet * ifp)529 fwip_async_output(struct fwip_softc *sc, struct ifnet *ifp)
530 {
531 	struct firewire_comm *fc = sc->sc_fd.fc;
532 	struct mbuf *m;
533 	struct m_tag *mtag;
534 	struct fw_hwaddr *destfw;
535 	struct fw_xfer *xfer;
536 	struct fw_xferq *xferq;
537 	struct fw_pkt *fp;
538 	uint16_t nodeid;
539 	int error;
540 	int i = 0;
541 
542 	xfer = NULL;
543 	xferq = fc->atq;
544 	while ((xferq->queued < xferq->maxq - 1) &&
545 	    (ifp->if_snd.ifq_head != NULL)) {
546 		mutex_enter(&sc->sc_mtx);
547 		if (STAILQ_EMPTY(&sc->sc_xferlist)) {
548 			mutex_exit(&sc->sc_mtx);
549 #if 0
550 			aprint_normal("if_fwip: lack of xfer\n");
551 #endif
552 			break;
553 		}
554 		IF_POLL(&ifp->if_snd, m);
555 		if (m == NULL) {
556 			mutex_exit(&sc->sc_mtx);
557 			break;
558 		}
559 		xfer = STAILQ_FIRST(&sc->sc_xferlist);
560 		STAILQ_REMOVE_HEAD(&sc->sc_xferlist, link);
561 		mutex_exit(&sc->sc_mtx);
562 
563 		/*
564 		 * Dig out the link-level address which
565 		 * firewire_output got via arp or neighbour
566 		 * discovery. If we don't have a link-level address,
567 		 * just stick the thing on the broadcast channel.
568 		 */
569 		mtag = m_tag_find(m, MTAG_FIREWIRE_HWADDR);
570 		if (mtag == NULL)
571 			destfw = 0;
572 		else
573 			destfw = (struct fw_hwaddr *) (mtag + 1);
574 
575 		/*
576 		 * Put the mbuf in the xfer early in case we hit an
577 		 * error case below - fwip_output_callback will free
578 		 * the mbuf.
579 		 */
580 		xfer->mbuf = m;
581 
582 		/*
583 		 * We use the arp result (if any) to add a suitable firewire
584 		 * packet header before handing off to the bus.
585 		 */
586 		fp = &xfer->send.hdr;
587 		nodeid = FWLOCALBUS | fc->nodeid;
588 		if ((m->m_flags & M_BCAST) || !destfw) {
589 			/*
590 			 * Broadcast packets are sent as GASP packets with
591 			 * specifier ID 0x00005e, version 1 on the broadcast
592 			 * channel. To be conservative, we send at the
593 			 * slowest possible speed.
594 			 */
595 			uint32_t *p;
596 
597 			M_PREPEND(m, 2 * sizeof(uint32_t), M_DONTWAIT);
598 			p = mtod(m, uint32_t *);
599 			fp->mode.stream.len = m->m_pkthdr.len;
600 			fp->mode.stream.chtag = broadcast_channel;
601 			fp->mode.stream.tcode = FWTCODE_STREAM;
602 			fp->mode.stream.sy = 0;
603 			xfer->send.spd = 0;
604 			p[0] = htonl(nodeid << 16);
605 			p[1] = htonl((0x5e << 24) | 1);
606 		} else {
607 			/*
608 			 * Unicast packets are sent as block writes to the
609 			 * target's unicast fifo address. If we can't
610 			 * find the node address, we just give up. We
611 			 * could broadcast it but that might overflow
612 			 * the packet size limitations due to the
613 			 * extra GASP header. Note: the hardware
614 			 * address is stored in network byte order to
615 			 * make life easier for ARP.
616 			 */
617 			struct fw_device *fd;
618 			struct fw_eui64 eui;
619 
620 			eui.hi = ntohl(destfw->sender_unique_ID_hi);
621 			eui.lo = ntohl(destfw->sender_unique_ID_lo);
622 			if (sc->sc_last_dest.hi != eui.hi ||
623 			    sc->sc_last_dest.lo != eui.lo) {
624 				fd = fw_noderesolve_eui64(fc, &eui);
625 				if (!fd) {
626 					/* error */
627 					if_statinc(ifp, if_oerrors);
628 					/* XXX set error code */
629 					fwip_output_callback(xfer);
630 					continue;
631 
632 				}
633 				sc->sc_last_hdr.mode.wreqb.dst =
634 				    FWLOCALBUS | fd->dst;
635 				sc->sc_last_hdr.mode.wreqb.tlrt = 0;
636 				sc->sc_last_hdr.mode.wreqb.tcode =
637 				    FWTCODE_WREQB;
638 				sc->sc_last_hdr.mode.wreqb.pri = 0;
639 				sc->sc_last_hdr.mode.wreqb.src = nodeid;
640 				sc->sc_last_hdr.mode.wreqb.dest_hi =
641 					ntohs(destfw->sender_unicast_FIFO_hi);
642 				sc->sc_last_hdr.mode.wreqb.dest_lo =
643 					ntohl(destfw->sender_unicast_FIFO_lo);
644 				sc->sc_last_hdr.mode.wreqb.extcode = 0;
645 				sc->sc_last_dest = eui;
646 			}
647 
648 			fp->mode.wreqb = sc->sc_last_hdr.mode.wreqb;
649 			fp->mode.wreqb.len = m->m_pkthdr.len;
650 			xfer->send.spd = uimin(destfw->sspd, fc->speed);
651 		}
652 
653 		xfer->send.pay_len = m->m_pkthdr.len;
654 
655 		error = fw_asyreq(fc, -1, xfer);
656 		if (error == EAGAIN) {
657 			/*
658 			 * We ran out of tlabels - requeue the packet
659 			 * for later transmission.
660 			 */
661 			xfer->mbuf = 0;
662 			mutex_enter(&sc->sc_mtx);
663 			STAILQ_INSERT_TAIL(&sc->sc_xferlist, xfer, link);
664 			mutex_exit(&sc->sc_mtx);
665 			break;
666 		}
667 		IF_DEQUEUE(&ifp->if_snd, m);
668 		if (error) {
669 			/* error */
670 			if_statinc(ifp, if_oerrors);
671 			/* XXX set error code */
672 			fwip_output_callback(xfer);
673 			continue;
674 		} else {
675 			if_statinc(ifp, if_opackets);
676 			i++;
677 		}
678 	}
679 #if 0
680 	if (i > 1)
681 		aprint_normal("%d queued\n", i);
682 #endif
683 	if (i > 0)
684 		xferq->start(fc);
685 }
686 
687 /* Async. stream output */
688 static void
fwip_stream_input(struct fw_xferq * xferq)689 fwip_stream_input(struct fw_xferq *xferq)
690 {
691 	struct mbuf *m, *m0;
692 	struct m_tag *mtag;
693 	struct ifnet *ifp;
694 	struct fwip_softc *sc;
695 	struct fw_bulkxfer *sxfer;
696 	struct fw_pkt *fp;
697 	uint16_t src;
698 	uint32_t *p;
699 
700 	sc = (struct fwip_softc *)xferq->sc;
701 	ifp = sc->sc_eth.fwip_ifp;
702 	while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
703 		STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
704 		fp = mtod(sxfer->mbuf, struct fw_pkt *);
705 		if (sc->sc_fd.fc->irx_post != NULL)
706 			sc->sc_fd.fc->irx_post(sc->sc_fd.fc, fp->mode.ld);
707 		m = sxfer->mbuf;
708 
709 		/* insert new rbuf */
710 		sxfer->mbuf = m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
711 		if (m0 != NULL) {
712 			m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
713 			STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
714 		} else
715 			aprint_error_ifnet(ifp,
716 			    "fwip_as_input: m_getcl failed\n");
717 
718 		/*
719 		 * We must have a GASP header - leave the
720 		 * encapsulation sanity checks to the generic
721 		 * code. Remeber that we also have the firewire async
722 		 * stream header even though that isn't accounted for
723 		 * in mode.stream.len.
724 		 */
725 		if (sxfer->resp != 0 ||
726 		    fp->mode.stream.len < 2 * sizeof(uint32_t)) {
727 			m_freem(m);
728 			if_statinc(ifp, if_ierrors);
729 			continue;
730 		}
731 		m->m_len = m->m_pkthdr.len = fp->mode.stream.len
732 			+ sizeof(fp->mode.stream);
733 
734 		/*
735 		 * If we received the packet on the broadcast channel,
736 		 * mark it as broadcast, otherwise we assume it must
737 		 * be multicast.
738 		 */
739 		if (fp->mode.stream.chtag == broadcast_channel)
740 			m->m_flags |= M_BCAST;
741 		else
742 			m->m_flags |= M_MCAST;
743 
744 		/*
745 		 * Make sure we recognise the GASP specifier and
746 		 * version.
747 		 */
748 		p = mtod(m, uint32_t *);
749 		if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) !=
750 								0x00005e ||
751 		    (ntohl(p[2]) & 0xffffff) != 1) {
752 			FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n",
753 			    ntohl(p[1]), ntohl(p[2]));
754 			m_freem(m);
755 			if_statinc(ifp, if_ierrors);
756 			continue;
757 		}
758 
759 		/*
760 		 * Record the sender ID for possible BPF usage.
761 		 */
762 		src = ntohl(p[1]) >> 16;
763 		if (ifp->if_bpf) {
764 			mtag = m_tag_get(MTAG_FIREWIRE_SENDER_EUID,
765 			    2 * sizeof(uint32_t), M_NOWAIT);
766 			if (mtag) {
767 				/* bpf wants it in network byte order */
768 				struct fw_device *fd;
769 				uint32_t *p2 = (uint32_t *) (mtag + 1);
770 
771 				fd = fw_noderesolve_nodeid(sc->sc_fd.fc,
772 				    src & 0x3f);
773 				if (fd) {
774 					p2[0] = htonl(fd->eui.hi);
775 					p2[1] = htonl(fd->eui.lo);
776 				} else {
777 					p2[0] = 0;
778 					p2[1] = 0;
779 				}
780 				m_tag_prepend(m, mtag);
781 			}
782 		}
783 
784 		/*
785 		 * Trim off the GASP header
786 		 */
787 		m_adj(m, 3*sizeof(uint32_t));
788 		m_set_rcvif(m, ifp);
789 		ieee1394_input(ifp, m, src);
790 		if_statinc(ifp, if_ipackets);
791 	}
792 	if (STAILQ_FIRST(&xferq->stfree) != NULL)
793 		sc->sc_fd.fc->irx_enable(sc->sc_fd.fc, sc->sc_dma_ch);
794 }
795 
796 static inline void
fwip_unicast_input_recycle(struct fwip_softc * sc,struct fw_xfer * xfer)797 fwip_unicast_input_recycle(struct fwip_softc *sc, struct fw_xfer *xfer)
798 {
799 	struct mbuf *m;
800 
801 	/*
802 	 * We have finished with a unicast xfer. Allocate a new
803 	 * cluster and stick it on the back of the input queue.
804 	 */
805 	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
806 	if (m == NULL)
807 		aprint_error_dev(sc->sc_fd.dev,
808 		    "fwip_unicast_input_recycle: m_getcl failed\n");
809 	xfer->recv.payload = mtod(m, uint32_t *);
810 	xfer->recv.pay_len = MCLBYTES;
811 	xfer->mbuf = m;
812 	mutex_enter(&sc->sc_fwb.fwb_mtx);
813 	STAILQ_INSERT_TAIL(&sc->sc_fwb.xferlist, xfer, link);
814 	mutex_exit(&sc->sc_fwb.fwb_mtx);
815 }
816 
817 static void
fwip_unicast_input(struct fw_xfer * xfer)818 fwip_unicast_input(struct fw_xfer *xfer)
819 {
820 	uint64_t address;
821 	struct mbuf *m;
822 	struct m_tag *mtag;
823 	struct ifnet *ifp;
824 	struct fwip_softc *sc;
825 	struct fw_pkt *fp;
826 	int rtcode;
827 
828 	sc = (struct fwip_softc *)xfer->sc;
829 	ifp = sc->sc_eth.fwip_ifp;
830 	m = xfer->mbuf;
831 	xfer->mbuf = 0;
832 	fp = &xfer->recv.hdr;
833 
834 	/*
835 	 * Check the fifo address - we only accept addresses of
836 	 * exactly INET_FIFO.
837 	 */
838 	address = ((uint64_t)fp->mode.wreqb.dest_hi << 32)
839 		| fp->mode.wreqb.dest_lo;
840 	if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
841 		rtcode = FWRCODE_ER_TYPE;
842 	} else if (address != INET_FIFO) {
843 		rtcode = FWRCODE_ER_ADDR;
844 	} else {
845 		rtcode = FWRCODE_COMPLETE;
846 	}
847 
848 	/*
849 	 * Pick up a new mbuf and stick it on the back of the receive
850 	 * queue.
851 	 */
852 	fwip_unicast_input_recycle(sc, xfer);
853 
854 	/*
855 	 * If we've already rejected the packet, give up now.
856 	 */
857 	if (rtcode != FWRCODE_COMPLETE) {
858 		m_freem(m);
859 		if_statinc(ifp, if_ierrors);
860 		return;
861 	}
862 
863 	if (ifp->if_bpf) {
864 		/*
865 		 * Record the sender ID for possible BPF usage.
866 		 */
867 		mtag = m_tag_get(MTAG_FIREWIRE_SENDER_EUID,
868 		    2 * sizeof(uint32_t), M_NOWAIT);
869 		if (mtag) {
870 			/* bpf wants it in network byte order */
871 			struct fw_device *fd;
872 			uint32_t *p = (uint32_t *) (mtag + 1);
873 
874 			fd = fw_noderesolve_nodeid(sc->sc_fd.fc,
875 			    fp->mode.wreqb.src & 0x3f);
876 			if (fd) {
877 				p[0] = htonl(fd->eui.hi);
878 				p[1] = htonl(fd->eui.lo);
879 			} else {
880 				p[0] = 0;
881 				p[1] = 0;
882 			}
883 			m_tag_prepend(m, mtag);
884 		}
885 	}
886 
887 	/*
888 	 * Hand off to the generic encapsulation code. We don't use
889 	 * ifp->if_input so that we can pass the source nodeid as an
890 	 * argument to facilitate link-level fragment reassembly.
891 	 */
892 	m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len;
893 	m_set_rcvif(m, ifp);
894 	ieee1394_input(ifp, m, fp->mode.wreqb.src);
895 	if_statinc(ifp, if_ipackets);
896 }
897