xref: /netbsd-src/sys/dev/ieee1394/fwdev.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: fwdev.c,v 1.28 2014/03/16 05:20:28 dholland Exp $	*/
2 /*-
3  * Copyright (c) 2003 Hidetoshi Shimokawa
4  * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the acknowledgement as bellow:
17  *
18  *    This product includes software developed by K. Kobayashi and H. Shimokawa
19  *
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: src/sys/dev/firewire/fwdev.c,v 1.52 2007/06/06 14:31:36 simokawa Exp $
36  *
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: fwdev.c,v 1.28 2014/03/16 05:20:28 dholland Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/device.h>
44 #include <sys/errno.h>
45 #include <sys/buf.h>
46 #include <sys/bus.h>
47 #include <sys/conf.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/poll.h>
52 #include <sys/proc.h>
53 #include <sys/select.h>
54 
55 #include <dev/ieee1394/firewire.h>
56 #include <dev/ieee1394/firewirereg.h>
57 #include <dev/ieee1394/fwdma.h>
58 #include <dev/ieee1394/fwmem.h>
59 #include <dev/ieee1394/iec68113.h>
60 
61 #include "ioconf.h"
62 
63 #define	FWNODE_INVAL 0xffff
64 
65 dev_type_open(fw_open);
66 dev_type_close(fw_close);
67 dev_type_read(fw_read);
68 dev_type_write(fw_write);
69 dev_type_ioctl(fw_ioctl);
70 dev_type_poll(fw_poll);
71 dev_type_mmap(fw_mmap);
72 dev_type_strategy(fw_strategy);
73 
74 const struct bdevsw fw_bdevsw = {
75 	.d_open = fw_open,
76 	.d_close = fw_close,
77 	.d_strategy = fw_strategy,
78 	.d_ioctl = fw_ioctl,
79 	.d_dump = nodump,
80 	.d_psize = nosize,
81 	.d_flag = D_OTHER
82 };
83 
84 const struct cdevsw fw_cdevsw = {
85 	.d_open = fw_open,
86 	.d_close = fw_close,
87 	.d_read = fw_read,
88 	.d_write = fw_write,
89 	.d_ioctl = fw_ioctl,
90 	.d_stop = nostop,
91 	.d_tty = notty,
92 	.d_poll = fw_poll,
93 	.d_mmap = fw_mmap,
94 	.d_kqfilter = nokqfilter,
95 	.d_flag = D_OTHER
96 };
97 
98 struct fw_drv1 {
99 	struct firewire_comm *fc;
100 	struct fw_xferq *ir;
101 	struct fw_xferq *it;
102 	struct fw_isobufreq bufreq;
103 	STAILQ_HEAD(, fw_bind) binds;
104 	STAILQ_HEAD(, fw_xfer) rq;
105 };
106 
107 static int fwdev_allocbuf(struct firewire_comm *, struct fw_xferq *,
108 			  struct fw_bufspec *);
109 static int fwdev_freebuf(struct fw_xferq *);
110 static int fw_read_async(struct fw_drv1 *, struct uio *, int);
111 static int fw_write_async(struct fw_drv1 *, struct uio *, int);
112 static void fw_hand(struct fw_xfer *);
113 
114 
115 int
116 fw_open(dev_t dev, int flags, int fmt, struct lwp *td)
117 {
118 	struct firewire_softc *sc;
119 	struct fw_drv1 *d;
120 	int err = 0;
121 
122 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
123 	if (sc == NULL)
124 		return ENXIO;
125 
126 	if (DEV_FWMEM(dev))
127 		return fwmem_open(dev, flags, fmt, td);
128 
129 	mutex_enter(&sc->fc->fc_mtx);
130 	if (sc->si_drv1 != NULL) {
131 		mutex_exit(&sc->fc->fc_mtx);
132 		return EBUSY;
133 	}
134 	/* set dummy value for allocation */
135 	sc->si_drv1 = (void *)-1;
136 	mutex_exit(&sc->fc->fc_mtx);
137 
138 	sc->si_drv1 = malloc(sizeof(struct fw_drv1), M_FW, M_WAITOK | M_ZERO);
139 	if (sc->si_drv1 == NULL)
140 		return ENOMEM;
141 
142 	d = (struct fw_drv1 *)sc->si_drv1;
143 	d->fc = sc->fc;
144 	STAILQ_INIT(&d->binds);
145 	STAILQ_INIT(&d->rq);
146 
147 	return err;
148 }
149 
150 int
151 fw_close(dev_t dev, int flags, int fmt, struct lwp *td)
152 {
153 	struct firewire_softc *sc;
154 	struct firewire_comm *fc;
155 	struct fw_drv1 *d;
156 	struct fw_xfer *xfer;
157 	struct fw_bind *fwb;
158         int err = 0;
159 
160 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
161 	if (sc == NULL)
162 		return ENXIO;
163 
164 	if (DEV_FWMEM(dev))
165 		return fwmem_close(dev, flags, fmt, td);
166 
167 	d = (struct fw_drv1 *)sc->si_drv1;
168 	fc = d->fc;
169 
170 	/* remove binding */
171 	for (fwb = STAILQ_FIRST(&d->binds); fwb != NULL;
172 	    fwb = STAILQ_FIRST(&d->binds)) {
173 		fw_bindremove(fc, fwb);
174 		STAILQ_REMOVE_HEAD(&d->binds, chlist);
175 		fw_xferlist_remove(&fwb->xferlist);
176 		free(fwb, M_FW);
177 	}
178 	if (d->ir != NULL) {
179 		struct fw_xferq *ir = d->ir;
180 
181 		if ((ir->flag & FWXFERQ_OPEN) == 0)
182 			return EINVAL;
183 		if (ir->flag & FWXFERQ_RUNNING) {
184 			ir->flag &= ~FWXFERQ_RUNNING;
185 			fc->irx_disable(fc, ir->dmach);
186 		}
187 		/* free extbuf */
188 		fwdev_freebuf(ir);
189 		/* drain receiving buffer */
190 		for (xfer = STAILQ_FIRST(&ir->q); xfer != NULL;
191 		    xfer = STAILQ_FIRST(&ir->q)) {
192 			ir->queued--;
193 			STAILQ_REMOVE_HEAD(&ir->q, link);
194 
195 			xfer->resp = 0;
196 			fw_xfer_done(xfer);
197 		}
198 		ir->flag &=
199 		    ~(FWXFERQ_OPEN | FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
200 		d->ir = NULL;
201 
202 	}
203 	if (d->it != NULL) {
204 		struct fw_xferq *it = d->it;
205 
206 		if ((it->flag & FWXFERQ_OPEN) == 0)
207 			return EINVAL;
208 		if (it->flag & FWXFERQ_RUNNING) {
209 			it->flag &= ~FWXFERQ_RUNNING;
210 			fc->itx_disable(fc, it->dmach);
211 		}
212 		/* free extbuf */
213 		fwdev_freebuf(it);
214 		it->flag &=
215 		    ~(FWXFERQ_OPEN | FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
216 		d->it = NULL;
217 	}
218 	free(sc->si_drv1, M_FW);
219 	sc->si_drv1 = NULL;
220 
221 	return err;
222 }
223 
224 int
225 fw_read(dev_t dev, struct uio *uio, int ioflag)
226 {
227 	struct firewire_softc *sc;
228 	struct firewire_comm *fc;
229 	struct fw_drv1 *d;
230 	struct fw_xferq *ir;
231 	struct fw_pkt *fp;
232 	int err = 0, slept = 0;
233 
234 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
235 	if (sc == NULL)
236 		return ENXIO;
237 
238 	if (DEV_FWMEM(dev))
239 		return physio(fw_strategy, NULL, dev, ioflag, minphys, uio);
240 
241 	d = (struct fw_drv1 *)sc->si_drv1;
242 	fc = d->fc;
243 	ir = d->ir;
244 
245 	if (ir == NULL)
246 		return fw_read_async(d, uio, ioflag);
247 
248 	if (ir->buf == NULL)
249 		return EIO;
250 
251 	mutex_enter(&fc->fc_mtx);
252 readloop:
253 	if (ir->stproc == NULL) {
254 		/* iso bulkxfer */
255 		ir->stproc = STAILQ_FIRST(&ir->stvalid);
256 		if (ir->stproc != NULL) {
257 			STAILQ_REMOVE_HEAD(&ir->stvalid, link);
258 			ir->queued = 0;
259 		}
260 	}
261 	if (ir->stproc == NULL) {
262 		/* no data avaliable */
263 		if (slept == 0) {
264 			slept = 1;
265 			ir->flag |= FWXFERQ_WAKEUP;
266 			mutex_exit(&fc->fc_mtx);
267 			err = tsleep(ir, FWPRI, "fw_read", hz);
268 			mutex_enter(&fc->fc_mtx);
269 			ir->flag &= ~FWXFERQ_WAKEUP;
270 			if (err == 0)
271 				goto readloop;
272 		} else if (slept == 1)
273 			err = EIO;
274 		mutex_exit(&fc->fc_mtx);
275 		return err;
276 	} else if (ir->stproc != NULL) {
277 		/* iso bulkxfer */
278 		mutex_exit(&fc->fc_mtx);
279 		fp = (struct fw_pkt *)fwdma_v_addr(ir->buf,
280 		    ir->stproc->poffset + ir->queued);
281 		if (fc->irx_post != NULL)
282 			fc->irx_post(fc, fp->mode.ld);
283 		if (fp->mode.stream.len == 0)
284 			return EIO;
285 		err = uiomove((void *)fp,
286 		    fp->mode.stream.len + sizeof(uint32_t), uio);
287 		ir->queued++;
288 		if (ir->queued >= ir->bnpacket) {
289 			STAILQ_INSERT_TAIL(&ir->stfree, ir->stproc, link);
290 			fc->irx_enable(fc, ir->dmach);
291 			ir->stproc = NULL;
292 		}
293 		if (uio->uio_resid >= ir->psize) {
294 			slept = -1;
295 			mutex_enter(&fc->fc_mtx);
296 			goto readloop;
297 		}
298 	} else
299 		mutex_exit(&fc->fc_mtx);
300 	return err;
301 }
302 
303 int
304 fw_write(dev_t dev, struct uio *uio, int ioflag)
305 {
306 	struct firewire_softc *sc;
307 	struct firewire_comm *fc;
308 	struct fw_drv1 *d;
309 	struct fw_pkt *fp;
310 	struct fw_xferq *it;
311         int slept = 0, err = 0;
312 
313 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
314 	if (sc == NULL)
315 		return ENXIO;
316 
317 	if (DEV_FWMEM(dev))
318 		return physio(fw_strategy, NULL, dev, ioflag, minphys, uio);
319 
320 	d = (struct fw_drv1 *)sc->si_drv1;
321 	fc = d->fc;
322 	it = d->it;
323 
324 	if (it == NULL)
325 		return fw_write_async(d, uio, ioflag);
326 
327 	if (it->buf == NULL)
328 		return EIO;
329 
330 	mutex_enter(&fc->fc_mtx);
331 isoloop:
332 	if (it->stproc == NULL) {
333 		it->stproc = STAILQ_FIRST(&it->stfree);
334 		if (it->stproc != NULL) {
335 			STAILQ_REMOVE_HEAD(&it->stfree, link);
336 			it->queued = 0;
337 		} else if (slept == 0) {
338 			slept = 1;
339 #if 0   /* XXX to avoid lock recursion */
340 			err = fc->itx_enable(fc, it->dmach);
341 			if (err)
342 				goto out;
343 #endif
344 			mutex_exit(&fc->fc_mtx);
345 			err = tsleep(it, FWPRI, "fw_write", hz);
346 			mutex_enter(&fc->fc_mtx);
347 			if (err)
348 				goto out;
349 			goto isoloop;
350 		} else {
351 			err = EIO;
352 			goto out;
353 		}
354 	}
355 	mutex_exit(&fc->fc_mtx);
356 	fp = (struct fw_pkt *)fwdma_v_addr(it->buf,
357 	    it->stproc->poffset + it->queued);
358 	err = uiomove((void *)fp, sizeof(struct fw_isohdr), uio);
359 	if (err != 0)
360 		return err;
361 	err =
362 	    uiomove((void *)fp->mode.stream.payload, fp->mode.stream.len, uio);
363 	it->queued++;
364 	if (it->queued >= it->bnpacket) {
365 		STAILQ_INSERT_TAIL(&it->stvalid, it->stproc, link);
366 		it->stproc = NULL;
367 		err = fc->itx_enable(fc, it->dmach);
368 	}
369 	if (uio->uio_resid >= sizeof(struct fw_isohdr)) {
370 		slept = 0;
371 		mutex_enter(&fc->fc_mtx);
372 		goto isoloop;
373 	}
374 	return err;
375 
376 out:
377 	mutex_exit(&fc->fc_mtx);
378 	return err;
379 }
380 
381 int
382 fw_ioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *td)
383 {
384 	struct firewire_softc *sc;
385 	struct firewire_comm *fc;
386 	struct fw_drv1 *d;
387 	struct fw_device *fwdev;
388 	struct fw_bind *fwb;
389 	struct fw_xferq *ir, *it;
390 	struct fw_xfer *xfer;
391 	struct fw_pkt *fp;
392 	struct fw_devinfo *devinfo;
393 	struct fw_devlstreq *fwdevlst = (struct fw_devlstreq *)data;
394 	struct fw_asyreq *asyreq = (struct fw_asyreq *)data;
395 	struct fw_isochreq *ichreq = (struct fw_isochreq *)data;
396 	struct fw_isobufreq *ibufreq = (struct fw_isobufreq *)data;
397 	struct fw_asybindreq *bindreq = (struct fw_asybindreq *)data;
398 	struct fw_crom_buf *crom_buf = (struct fw_crom_buf *)data;
399 	int i, len, err = 0;
400 	void *ptr;
401 
402 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
403 	if (sc == NULL)
404 		return ENXIO;
405 
406 	if (DEV_FWMEM(dev))
407 		return fwmem_ioctl(dev, cmd, data, flag, td);
408 
409 	if (!data)
410 		return EINVAL;
411 
412 	d = (struct fw_drv1 *)sc->si_drv1;
413 	fc = d->fc;
414 	ir = d->ir;
415 	it = d->it;
416 
417 	switch (cmd) {
418 	case FW_STSTREAM:
419 		if (it == NULL) {
420 			i = fw_open_isodma(fc, /* tx */1);
421 			if (i < 0) {
422 				err = EBUSY;
423 				break;
424 			}
425 			it = fc->it[i];
426 			err = fwdev_allocbuf(fc, it, &d->bufreq.tx);
427 			if (err) {
428 				it->flag &= ~FWXFERQ_OPEN;
429 				break;
430 			}
431 		}
432 		it->flag &= ~0xff;
433 		it->flag |= (0x3f & ichreq->ch);
434 		it->flag |= ((0x3 & ichreq->tag) << 6);
435 		d->it = it;
436 		break;
437 
438 	case FW_GTSTREAM:
439 		if (it != NULL) {
440 			ichreq->ch = it->flag & 0x3f;
441 			ichreq->tag = it->flag >> 2 & 0x3;
442 		} else
443 			err = EINVAL;
444 		break;
445 
446 	case FW_SRSTREAM:
447 		if (ir == NULL) {
448 			i = fw_open_isodma(fc, /* tx */0);
449 			if (i < 0) {
450 				err = EBUSY;
451 				break;
452 			}
453 			ir = fc->ir[i];
454 			err = fwdev_allocbuf(fc, ir, &d->bufreq.rx);
455 			if (err) {
456 				ir->flag &= ~FWXFERQ_OPEN;
457 				break;
458 			}
459 		}
460 		ir->flag &= ~0xff;
461 		ir->flag |= (0x3f & ichreq->ch);
462 		ir->flag |= ((0x3 & ichreq->tag) << 6);
463 		d->ir = ir;
464 		err = fc->irx_enable(fc, ir->dmach);
465 		break;
466 
467 	case FW_GRSTREAM:
468 		if (d->ir != NULL) {
469 			ichreq->ch = ir->flag & 0x3f;
470 			ichreq->tag = ir->flag >> 2 & 0x3;
471 		} else
472 			err = EINVAL;
473 		break;
474 
475 	case FW_SSTBUF:
476 		memcpy(&d->bufreq, ibufreq, sizeof(d->bufreq));
477 		break;
478 
479 	case FW_GSTBUF:
480 		memset(&ibufreq->rx, 0, sizeof(ibufreq->rx));
481 		if (ir != NULL) {
482 			ibufreq->rx.nchunk = ir->bnchunk;
483 			ibufreq->rx.npacket = ir->bnpacket;
484 			ibufreq->rx.psize = ir->psize;
485 		}
486 		memset(&ibufreq->tx, 0, sizeof(ibufreq->tx));
487 		if (it != NULL) {
488 			ibufreq->tx.nchunk = it->bnchunk;
489 			ibufreq->tx.npacket = it->bnpacket;
490 			ibufreq->tx.psize = it->psize;
491 		}
492 		break;
493 
494 	case FW_ASYREQ:
495 	{
496 		const struct tcode_info *tinfo;
497 		int pay_len = 0;
498 
499 		fp = &asyreq->pkt;
500 		tinfo = &fc->tcode[fp->mode.hdr.tcode];
501 
502 		if ((tinfo->flag & FWTI_BLOCK_ASY) != 0)
503 			pay_len = MAX(0, asyreq->req.len - tinfo->hdr_len);
504 
505 		xfer = fw_xfer_alloc_buf(M_FW, pay_len, PAGE_SIZE/*XXX*/);
506 		if (xfer == NULL)
507 			return ENOMEM;
508 
509 		switch (asyreq->req.type) {
510 		case FWASREQNODE:
511 			break;
512 
513 		case FWASREQEUI:
514 			fwdev = fw_noderesolve_eui64(fc, &asyreq->req.dst.eui);
515 			if (fwdev == NULL) {
516 				aprint_error_dev(fc->bdev,
517 				    "cannot find node\n");
518 				err = EINVAL;
519 				goto out;
520 			}
521 			fp->mode.hdr.dst = FWLOCALBUS | fwdev->dst;
522 			break;
523 
524 		case FWASRESTL:
525 			/* XXX what's this? */
526 			break;
527 
528 		case FWASREQSTREAM:
529 			/* nothing to do */
530 			break;
531 		}
532 
533 		memcpy(&xfer->send.hdr, fp, tinfo->hdr_len);
534 		if (pay_len > 0)
535 			memcpy(xfer->send.payload, (char *)fp + tinfo->hdr_len,
536 			    pay_len);
537 		xfer->send.spd = asyreq->req.sped;
538 		xfer->hand = fw_xferwake;
539 
540 		if ((err = fw_asyreq(fc, -1, xfer)) != 0)
541 			goto out;
542 		if ((err = fw_xferwait(xfer)) != 0)
543 			goto out;
544 		if (xfer->resp != 0) {
545 			err = EIO;
546 			goto out;
547 		}
548 		if ((tinfo->flag & FWTI_TLABEL) == 0)
549 			goto out;
550 
551 		/* copy response */
552 		tinfo = &fc->tcode[xfer->recv.hdr.mode.hdr.tcode];
553 		if (xfer->recv.hdr.mode.hdr.tcode == FWTCODE_RRESB ||
554 		    xfer->recv.hdr.mode.hdr.tcode == FWTCODE_LRES) {
555 			pay_len = xfer->recv.pay_len;
556 			if (asyreq->req.len >=
557 			    xfer->recv.pay_len + tinfo->hdr_len)
558 				asyreq->req.len =
559 				    xfer->recv.pay_len + tinfo->hdr_len;
560 			else {
561 				err = EINVAL;
562 				pay_len = 0;
563 			}
564 		} else
565 			pay_len = 0;
566 		memcpy(fp, &xfer->recv.hdr, tinfo->hdr_len);
567 		memcpy((char *)fp + tinfo->hdr_len, xfer->recv.payload,
568 		    pay_len);
569 out:
570 		fw_xfer_free_buf(xfer);
571 		break;
572 	}
573 
574 	case FW_IBUSRST:
575 		fc->ibr(fc);
576 		break;
577 
578 	case FW_CBINDADDR:
579 		fwb = fw_bindlookup(fc, bindreq->start.hi, bindreq->start.lo);
580 		if (fwb == NULL) {
581 			err = EINVAL;
582 			break;
583 		}
584 		fw_bindremove(fc, fwb);
585 		STAILQ_REMOVE(&d->binds, fwb, fw_bind, chlist);
586 		fw_xferlist_remove(&fwb->xferlist);
587 		free(fwb, M_FW);
588 		break;
589 
590 	case FW_SBINDADDR:
591 		if (bindreq->len <= 0 ) {
592 			err = EINVAL;
593 			break;
594 		}
595 		if (bindreq->start.hi > 0xffff ) {
596 			err = EINVAL;
597 			break;
598 		}
599 		fwb = (struct fw_bind *)malloc(sizeof(struct fw_bind),
600 		    M_FW, M_WAITOK);
601 		if (fwb == NULL) {
602 			err = ENOMEM;
603 			break;
604 		}
605 		fwb->start = ((u_int64_t)bindreq->start.hi << 32) |
606 		    bindreq->start.lo;
607 		fwb->end = fwb->start +  bindreq->len;
608 		fwb->sc = (void *)d;
609 		STAILQ_INIT(&fwb->xferlist);
610 		err = fw_bindadd(fc, fwb);
611 		if (err == 0) {
612 			fw_xferlist_add(&fwb->xferlist, M_FW,
613 			    /* XXX */
614 			    PAGE_SIZE, PAGE_SIZE, 5, fc, (void *)fwb, fw_hand);
615 			STAILQ_INSERT_TAIL(&d->binds, fwb, chlist);
616 		}
617 		break;
618 
619 	case FW_GDEVLST:
620 		i = len = 1;
621 		/* myself */
622 		devinfo = fwdevlst->dev;
623 		devinfo->dst = fc->nodeid;
624 		devinfo->status = 0;	/* XXX */
625 		devinfo->eui.hi = fc->eui.hi;
626 		devinfo->eui.lo = fc->eui.lo;
627 		STAILQ_FOREACH(fwdev, &fc->devices, link) {
628 			if (len < FW_MAX_DEVLST) {
629 				devinfo = &fwdevlst->dev[len++];
630 				devinfo->dst = fwdev->dst;
631 				devinfo->status =
632 				    (fwdev->status == FWDEVINVAL) ? 0 : 1;
633 				devinfo->eui.hi = fwdev->eui.hi;
634 				devinfo->eui.lo = fwdev->eui.lo;
635 			}
636 			i++;
637 		}
638 		fwdevlst->n = i;
639 		fwdevlst->info_len = len;
640 		break;
641 
642 	case FW_GTPMAP:
643 		memcpy(data, fc->topology_map,
644 		    (fc->topology_map->crc_len + 1) * 4);
645 		break;
646 
647 	case FW_GCROM:
648 		STAILQ_FOREACH(fwdev, &fc->devices, link)
649 			if (FW_EUI64_EQUAL(fwdev->eui, crom_buf->eui))
650 				break;
651 		if (fwdev == NULL) {
652 			if (!FW_EUI64_EQUAL(fc->eui, crom_buf->eui)) {
653 				err = FWNODE_INVAL;
654 				break;
655 			}
656 			/* myself */
657 			ptr = malloc(CROMSIZE, M_FW, M_WAITOK);
658 			len = CROMSIZE;
659 			for (i = 0; i < CROMSIZE/4; i++)
660 				((uint32_t *)ptr)[i] = ntohl(fc->config_rom[i]);
661 		} else {
662 			/* found */
663 			ptr = (void *)fwdev->csrrom;
664 			if (fwdev->rommax < CSRROMOFF)
665 				len = 0;
666 			else
667 				len = fwdev->rommax - CSRROMOFF + 4;
668 		}
669 		if (crom_buf->len < len)
670 			len = crom_buf->len;
671 		else
672 			crom_buf->len = len;
673 		err = copyout(ptr, crom_buf->ptr, len);
674 		if (fwdev == NULL)
675 			/* myself */
676 			free(ptr, M_FW);
677 		break;
678 
679 	default:
680 		fc->ioctl(dev, cmd, data, flag, td);
681 		break;
682 	}
683 	return err;
684 }
685 
686 int
687 fw_poll(dev_t dev, int events, struct lwp *td)
688 {
689 	struct firewire_softc *sc;
690 	struct fw_xferq *ir;
691 	int revents, tmp;
692 
693 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
694 	if (sc == NULL)
695 		return ENXIO;
696 
697 	ir = ((struct fw_drv1 *)sc->si_drv1)->ir;
698 	revents = 0;
699 	tmp = POLLIN | POLLRDNORM;
700 	if (events & tmp) {
701 		if (STAILQ_FIRST(&ir->q) != NULL)
702 			revents |= tmp;
703 		else
704 			selrecord(td, &ir->rsel);
705 	}
706 	tmp = POLLOUT | POLLWRNORM;
707 	if (events & tmp)
708 		/* XXX should be fixed */
709 		revents |= tmp;
710 
711 	return revents;
712 }
713 
714 paddr_t
715 fw_mmap(dev_t dev, off_t offset, int nproto)
716 {
717 	struct firewire_softc *sc;
718 
719 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
720 	if (sc == NULL)
721 		return ENXIO;
722 
723 	return EINVAL;
724 }
725 
726 void
727 fw_strategy(struct bio *bp)
728 {
729 	struct firewire_softc *sc;
730 	dev_t dev = bp->bio_dev;
731 
732 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
733 	if (sc == NULL)
734 		return;
735 
736 	if (DEV_FWMEM(dev)) {
737 		fwmem_strategy(bp);
738 		return;
739 	}
740 
741 	bp->bio_error = EOPNOTSUPP;
742 	bp->bio_resid = bp->bio_bcount;
743 	biodone(bp);
744 }
745 
746 
747 static int
748 fwdev_allocbuf(struct firewire_comm *fc, struct fw_xferq *q,
749 	       struct fw_bufspec *b)
750 {
751 	int i;
752 
753 	if (q->flag & (FWXFERQ_RUNNING | FWXFERQ_EXTBUF))
754 		return EBUSY;
755 
756 	q->bulkxfer =
757 	    (struct fw_bulkxfer *)malloc(sizeof(struct fw_bulkxfer) * b->nchunk,
758 								M_FW, M_WAITOK);
759 	if (q->bulkxfer == NULL)
760 		return ENOMEM;
761 
762 	b->psize = roundup2(b->psize, sizeof(uint32_t));
763 	q->buf = fwdma_malloc_multiseg(fc, sizeof(uint32_t), b->psize,
764 	    b->nchunk * b->npacket, BUS_DMA_WAITOK);
765 
766 	if (q->buf == NULL) {
767 		free(q->bulkxfer, M_FW);
768 		q->bulkxfer = NULL;
769 		return ENOMEM;
770 	}
771 	q->bnchunk = b->nchunk;
772 	q->bnpacket = b->npacket;
773 	q->psize = (b->psize + 3) & ~3;
774 	q->queued = 0;
775 
776 	STAILQ_INIT(&q->stvalid);
777 	STAILQ_INIT(&q->stfree);
778 	STAILQ_INIT(&q->stdma);
779 	q->stproc = NULL;
780 
781 	for (i = 0 ; i < q->bnchunk; i++) {
782 		q->bulkxfer[i].poffset = i * q->bnpacket;
783 		q->bulkxfer[i].mbuf = NULL;
784 		STAILQ_INSERT_TAIL(&q->stfree, &q->bulkxfer[i], link);
785 	}
786 
787 	q->flag &= ~FWXFERQ_MODEMASK;
788 	q->flag |= FWXFERQ_STREAM;
789 	q->flag |= FWXFERQ_EXTBUF;
790 
791 	return 0;
792 }
793 
794 static int
795 fwdev_freebuf(struct fw_xferq *q)
796 {
797 
798 	if (q->flag & FWXFERQ_EXTBUF) {
799 		if (q->buf != NULL)
800 			fwdma_free_multiseg(q->buf);
801 		q->buf = NULL;
802 		free(q->bulkxfer, M_FW);
803 		q->bulkxfer = NULL;
804 		q->flag &= ~FWXFERQ_EXTBUF;
805 		q->psize = 0;
806 		q->maxq = FWMAXQUEUE;
807 	}
808 	return 0;
809 }
810 
811 static int
812 fw_read_async(struct fw_drv1 *d, struct uio *uio, int ioflag)
813 {
814 	struct fw_xfer *xfer;
815 	struct fw_bind *fwb;
816 	struct fw_pkt *fp;
817 	const struct tcode_info *tinfo;
818 	int err = 0;
819 
820 	mutex_enter(&d->fc->fc_mtx);
821 
822 	for (;;) {
823 		xfer = STAILQ_FIRST(&d->rq);
824 		if (xfer == NULL && err == 0) {
825 			mutex_exit(&d->fc->fc_mtx);
826 			err = tsleep(&d->rq, FWPRI, "fwra", 0);
827 			if (err != 0)
828 				return err;
829 			mutex_enter(&d->fc->fc_mtx);
830 			continue;
831 		}
832 		break;
833 	}
834 
835 	STAILQ_REMOVE_HEAD(&d->rq, link);
836 	mutex_exit(&d->fc->fc_mtx);
837 	fp = &xfer->recv.hdr;
838 #if 0 /* for GASP ?? */
839 	if (fc->irx_post != NULL)
840 		fc->irx_post(fc, fp->mode.ld);
841 #endif
842 	tinfo = &xfer->fc->tcode[fp->mode.hdr.tcode];
843 	err = uiomove((void *)fp, tinfo->hdr_len, uio);
844 	if (err)
845 		goto out;
846 	err = uiomove((void *)xfer->recv.payload, xfer->recv.pay_len, uio);
847 
848 out:
849 	/* recycle this xfer */
850 	fwb = (struct fw_bind *)xfer->sc;
851 	fw_xfer_unload(xfer);
852 	xfer->recv.pay_len = PAGE_SIZE;
853 	mutex_enter(&d->fc->fc_mtx);
854 	STAILQ_INSERT_TAIL(&fwb->xferlist, xfer, link);
855 	mutex_exit(&d->fc->fc_mtx);
856 	return err;
857 }
858 
859 static int
860 fw_write_async(struct fw_drv1 *d, struct uio *uio, int ioflag)
861 {
862 	struct fw_xfer *xfer;
863 	struct fw_pkt pkt;
864 	const struct tcode_info *tinfo;
865 	int err;
866 
867 	memset(&pkt, 0, sizeof(struct fw_pkt));
868 	if ((err = uiomove((void *)&pkt, sizeof(uint32_t), uio)))
869 		return err;
870 	tinfo = &d->fc->tcode[pkt.mode.hdr.tcode];
871 	if ((err = uiomove((char *)&pkt + sizeof(uint32_t),
872 	    tinfo->hdr_len - sizeof(uint32_t), uio)))
873 		return err;
874 
875 	if ((xfer = fw_xfer_alloc_buf(M_FW, uio->uio_resid,
876 	    PAGE_SIZE/*XXX*/)) == NULL)
877 		return ENOMEM;
878 
879 	memcpy(&xfer->send.hdr, &pkt, sizeof(struct fw_pkt));
880 	xfer->send.pay_len = uio->uio_resid;
881 	if (uio->uio_resid > 0) {
882 		if ((err =
883 		    uiomove((void *)xfer->send.payload, uio->uio_resid, uio)))
884 			goto out;
885 	}
886 
887 	xfer->fc = d->fc;
888 	xfer->sc = NULL;
889 	xfer->hand = fw_xferwake;
890 	xfer->send.spd = 2 /* XXX */;
891 
892 	if ((err = fw_asyreq(xfer->fc, -1, xfer)))
893 		goto out;
894 
895 	if ((err = fw_xferwait(xfer)))
896 		goto out;
897 
898 	if (xfer->resp != 0) {
899 		err = xfer->resp;
900 		goto out;
901 	}
902 
903 	if (xfer->flag == FWXF_RCVD) {
904 		mutex_enter(&xfer->fc->fc_mtx);
905 		STAILQ_INSERT_TAIL(&d->rq, xfer, link);
906 		mutex_exit(&xfer->fc->fc_mtx);
907 		return 0;
908 	}
909 
910 out:
911 	fw_xfer_free(xfer);
912 	return err;
913 }
914 
915 static void
916 fw_hand(struct fw_xfer *xfer)
917 {
918 	struct fw_bind *fwb;
919 	struct fw_drv1 *d;
920 
921 	fwb = (struct fw_bind *)xfer->sc;
922 	d = (struct fw_drv1 *)fwb->sc;
923 	mutex_enter(&xfer->fc->fc_mtx);
924 	STAILQ_INSERT_TAIL(&d->rq, xfer, link);
925 	mutex_exit(&xfer->fc->fc_mtx);
926 	wakeup(&d->rq);
927 }
928