xref: /netbsd-src/sys/dev/ieee1394/fwdev.c (revision 5c46dd73a9bcb28b2994504ea090f64066b17a77)
1 /*	$NetBSD: fwdev.c,v 1.22 2010/05/23 18:56:58 christos Exp $	*/
2 /*-
3  * Copyright (c) 2003 Hidetoshi Shimokawa
4  * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the acknowledgement as bellow:
17  *
18  *    This product includes software developed by K. Kobayashi and H. Shimokawa
19  *
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
27  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
29  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  * $FreeBSD: src/sys/dev/firewire/fwdev.c,v 1.52 2007/06/06 14:31:36 simokawa Exp $
36  *
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: fwdev.c,v 1.22 2010/05/23 18:56:58 christos Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/device.h>
44 #include <sys/errno.h>
45 #include <sys/buf.h>
46 #include <sys/bus.h>
47 #include <sys/conf.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/poll.h>
52 #include <sys/proc.h>
53 #include <sys/select.h>
54 
55 #include <dev/ieee1394/firewire.h>
56 #include <dev/ieee1394/firewirereg.h>
57 #include <dev/ieee1394/fwdma.h>
58 #include <dev/ieee1394/fwmem.h>
59 #include <dev/ieee1394/iec68113.h>
60 
61 #include "ioconf.h"
62 
63 #define	FWNODE_INVAL 0xffff
64 
65 dev_type_open(fw_open);
66 dev_type_close(fw_close);
67 dev_type_read(fw_read);
68 dev_type_write(fw_write);
69 dev_type_ioctl(fw_ioctl);
70 dev_type_poll(fw_poll);
71 dev_type_mmap(fw_mmap);
72 dev_type_strategy(fw_strategy);
73 
74 const struct bdevsw fw_bdevsw = {
75 	fw_open, fw_close, fw_strategy, fw_ioctl, nodump, nosize, D_OTHER,
76 };
77 
78 const struct cdevsw fw_cdevsw = {
79 	fw_open, fw_close, fw_read, fw_write, fw_ioctl,
80 	nostop, notty, fw_poll, fw_mmap, nokqfilter, D_OTHER,
81 };
82 
83 struct fw_drv1 {
84 	struct firewire_comm *fc;
85 	struct fw_xferq *ir;
86 	struct fw_xferq *it;
87 	struct fw_isobufreq bufreq;
88 	STAILQ_HEAD(, fw_bind) binds;
89 	STAILQ_HEAD(, fw_xfer) rq;
90 };
91 
92 static int fwdev_allocbuf(struct firewire_comm *, struct fw_xferq *,
93 			  struct fw_bufspec *);
94 static int fwdev_freebuf(struct fw_xferq *);
95 static int fw_read_async(struct fw_drv1 *, struct uio *, int);
96 static int fw_write_async(struct fw_drv1 *, struct uio *, int);
97 static void fw_hand(struct fw_xfer *);
98 
99 
100 int
101 fw_open(dev_t dev, int flags, int fmt, struct lwp *td)
102 {
103 	struct firewire_softc *sc;
104 	struct fw_drv1 *d;
105 	int err = 0;
106 
107 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
108 	if (sc == NULL)
109 		return ENXIO;
110 
111 	if (DEV_FWMEM(dev))
112 		return fwmem_open(dev, flags, fmt, td);
113 
114 	mutex_enter(&sc->fc->fc_mtx);
115 	if (sc->si_drv1 != NULL) {
116 		mutex_exit(&sc->fc->fc_mtx);
117 		return EBUSY;
118 	}
119 	/* set dummy value for allocation */
120 	sc->si_drv1 = (void *)-1;
121 	mutex_exit(&sc->fc->fc_mtx);
122 
123 	sc->si_drv1 = malloc(sizeof(struct fw_drv1), M_FW, M_WAITOK | M_ZERO);
124 	if (sc->si_drv1 == NULL)
125 		return ENOMEM;
126 
127 	d = (struct fw_drv1 *)sc->si_drv1;
128 	d->fc = sc->fc;
129 	STAILQ_INIT(&d->binds);
130 	STAILQ_INIT(&d->rq);
131 
132 	return err;
133 }
134 
135 int
136 fw_close(dev_t dev, int flags, int fmt, struct lwp *td)
137 {
138 	struct firewire_softc *sc;
139 	struct firewire_comm *fc;
140 	struct fw_drv1 *d;
141 	struct fw_xfer *xfer;
142 	struct fw_bind *fwb;
143         int err = 0;
144 
145 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
146 	if (sc == NULL)
147 		return ENXIO;
148 
149 	if (DEV_FWMEM(dev))
150 		return fwmem_close(dev, flags, fmt, td);
151 
152 	d = (struct fw_drv1 *)sc->si_drv1;
153 	fc = d->fc;
154 
155 	/* remove binding */
156 	for (fwb = STAILQ_FIRST(&d->binds); fwb != NULL;
157 	    fwb = STAILQ_FIRST(&d->binds)) {
158 		fw_bindremove(fc, fwb);
159 		STAILQ_REMOVE_HEAD(&d->binds, chlist);
160 		fw_xferlist_remove(&fwb->xferlist);
161 		free(fwb, M_FW);
162 	}
163 	if (d->ir != NULL) {
164 		struct fw_xferq *ir = d->ir;
165 
166 		if ((ir->flag & FWXFERQ_OPEN) == 0)
167 			return EINVAL;
168 		if (ir->flag & FWXFERQ_RUNNING) {
169 			ir->flag &= ~FWXFERQ_RUNNING;
170 			fc->irx_disable(fc, ir->dmach);
171 		}
172 		/* free extbuf */
173 		fwdev_freebuf(ir);
174 		/* drain receiving buffer */
175 		for (xfer = STAILQ_FIRST(&ir->q); xfer != NULL;
176 		    xfer = STAILQ_FIRST(&ir->q)) {
177 			ir->queued--;
178 			STAILQ_REMOVE_HEAD(&ir->q, link);
179 
180 			xfer->resp = 0;
181 			fw_xfer_done(xfer);
182 		}
183 		ir->flag &=
184 		    ~(FWXFERQ_OPEN | FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
185 		d->ir = NULL;
186 
187 	}
188 	if (d->it != NULL) {
189 		struct fw_xferq *it = d->it;
190 
191 		if ((it->flag & FWXFERQ_OPEN) == 0)
192 			return EINVAL;
193 		if (it->flag & FWXFERQ_RUNNING) {
194 			it->flag &= ~FWXFERQ_RUNNING;
195 			fc->itx_disable(fc, it->dmach);
196 		}
197 		/* free extbuf */
198 		fwdev_freebuf(it);
199 		it->flag &=
200 		    ~(FWXFERQ_OPEN | FWXFERQ_MODEMASK | FWXFERQ_CHTAGMASK);
201 		d->it = NULL;
202 	}
203 	free(sc->si_drv1, M_FW);
204 	sc->si_drv1 = NULL;
205 
206 	return err;
207 }
208 
209 int
210 fw_read(dev_t dev, struct uio *uio, int ioflag)
211 {
212 	struct firewire_softc *sc;
213 	struct firewire_comm *fc;
214 	struct fw_drv1 *d;
215 	struct fw_xferq *ir;
216 	struct fw_pkt *fp;
217 	int err = 0, slept = 0;
218 
219 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
220 	if (sc == NULL)
221 		return ENXIO;
222 
223 	if (DEV_FWMEM(dev))
224 		return physio(fw_strategy, NULL, dev, ioflag, minphys, uio);
225 
226 	d = (struct fw_drv1 *)sc->si_drv1;
227 	fc = d->fc;
228 	ir = d->ir;
229 
230 	if (ir == NULL)
231 		return fw_read_async(d, uio, ioflag);
232 
233 	if (ir->buf == NULL)
234 		return EIO;
235 
236 	mutex_enter(&fc->fc_mtx);
237 readloop:
238 	if (ir->stproc == NULL) {
239 		/* iso bulkxfer */
240 		ir->stproc = STAILQ_FIRST(&ir->stvalid);
241 		if (ir->stproc != NULL) {
242 			STAILQ_REMOVE_HEAD(&ir->stvalid, link);
243 			ir->queued = 0;
244 		}
245 	}
246 	if (ir->stproc == NULL) {
247 		/* no data avaliable */
248 		if (slept == 0) {
249 			slept = 1;
250 			ir->flag |= FWXFERQ_WAKEUP;
251 			err = tsleep(ir, FWPRI, "fw_read", hz);
252 			ir->flag &= ~FWXFERQ_WAKEUP;
253 			if (err == 0)
254 				goto readloop;
255 		} else if (slept == 1)
256 			err = EIO;
257 		mutex_exit(&fc->fc_mtx);
258 		return err;
259 	} else if (ir->stproc != NULL) {
260 		/* iso bulkxfer */
261 		mutex_exit(&fc->fc_mtx);
262 		fp = (struct fw_pkt *)fwdma_v_addr(ir->buf,
263 		    ir->stproc->poffset + ir->queued);
264 		if (fc->irx_post != NULL)
265 			fc->irx_post(fc, fp->mode.ld);
266 		if (fp->mode.stream.len == 0)
267 			return EIO;
268 		err = uiomove((void *)fp,
269 		    fp->mode.stream.len + sizeof(uint32_t), uio);
270 		ir->queued++;
271 		if (ir->queued >= ir->bnpacket) {
272 			STAILQ_INSERT_TAIL(&ir->stfree, ir->stproc, link);
273 			fc->irx_enable(fc, ir->dmach);
274 			ir->stproc = NULL;
275 		}
276 		if (uio->uio_resid >= ir->psize) {
277 			slept = -1;
278 			mutex_enter(&fc->fc_mtx);
279 			goto readloop;
280 		}
281 	} else
282 		mutex_exit(&fc->fc_mtx);
283 	return err;
284 }
285 
286 int
287 fw_write(dev_t dev, struct uio *uio, int ioflag)
288 {
289 	struct firewire_softc *sc;
290 	struct firewire_comm *fc;
291 	struct fw_drv1 *d;
292 	struct fw_pkt *fp;
293 	struct fw_xferq *it;
294         int slept = 0, err = 0;
295 
296 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
297 	if (sc == NULL)
298 		return ENXIO;
299 
300 	if (DEV_FWMEM(dev))
301 		return physio(fw_strategy, NULL, dev, ioflag, minphys, uio);
302 
303 	d = (struct fw_drv1 *)sc->si_drv1;
304 	fc = d->fc;
305 	it = d->it;
306 
307 	if (it == NULL)
308 		return fw_write_async(d, uio, ioflag);
309 
310 	if (it->buf == NULL)
311 		return EIO;
312 
313 	mutex_enter(&fc->fc_mtx);
314 isoloop:
315 	if (it->stproc == NULL) {
316 		it->stproc = STAILQ_FIRST(&it->stfree);
317 		if (it->stproc != NULL) {
318 			STAILQ_REMOVE_HEAD(&it->stfree, link);
319 			it->queued = 0;
320 		} else if (slept == 0) {
321 			slept = 1;
322 #if 0   /* XXX to avoid lock recursion */
323 			err = fc->itx_enable(fc, it->dmach);
324 			if (err)
325 				goto out;
326 #endif
327 			err = tsleep(it, FWPRI, "fw_write", hz);
328 			if (err)
329 				goto out;
330 			goto isoloop;
331 		} else {
332 			err = EIO;
333 			goto out;
334 		}
335 	}
336 	mutex_exit(&fc->fc_mtx);
337 	fp = (struct fw_pkt *)fwdma_v_addr(it->buf,
338 	    it->stproc->poffset + it->queued);
339 	err = uiomove((void *)fp, sizeof(struct fw_isohdr), uio);
340 	if (err != 0)
341 		return err;
342 	err =
343 	    uiomove((void *)fp->mode.stream.payload, fp->mode.stream.len, uio);
344 	it->queued++;
345 	if (it->queued >= it->bnpacket) {
346 		STAILQ_INSERT_TAIL(&it->stvalid, it->stproc, link);
347 		it->stproc = NULL;
348 		err = fc->itx_enable(fc, it->dmach);
349 	}
350 	if (uio->uio_resid >= sizeof(struct fw_isohdr)) {
351 		slept = 0;
352 		mutex_enter(&fc->fc_mtx);
353 		goto isoloop;
354 	}
355 	return err;
356 
357 out:
358 	mutex_exit(&fc->fc_mtx);
359 	return err;
360 }
361 
362 int
363 fw_ioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *td)
364 {
365 	struct firewire_softc *sc;
366 	struct firewire_comm *fc;
367 	struct fw_drv1 *d;
368 	struct fw_device *fwdev;
369 	struct fw_bind *fwb;
370 	struct fw_xferq *ir, *it;
371 	struct fw_xfer *xfer;
372 	struct fw_pkt *fp;
373 	struct fw_devinfo *devinfo;
374 	struct fw_devlstreq *fwdevlst = (struct fw_devlstreq *)data;
375 	struct fw_asyreq *asyreq = (struct fw_asyreq *)data;
376 	struct fw_isochreq *ichreq = (struct fw_isochreq *)data;
377 	struct fw_isobufreq *ibufreq = (struct fw_isobufreq *)data;
378 	struct fw_asybindreq *bindreq = (struct fw_asybindreq *)data;
379 	struct fw_crom_buf *crom_buf = (struct fw_crom_buf *)data;
380 	int i, len, err = 0;
381 	void *ptr;
382 
383 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
384 	if (sc == NULL)
385 		return ENXIO;
386 
387 	if (DEV_FWMEM(dev))
388 		return fwmem_ioctl(dev, cmd, data, flag, td);
389 
390 	if (!data)
391 		return EINVAL;
392 
393 	d = (struct fw_drv1 *)sc->si_drv1;
394 	fc = d->fc;
395 	ir = d->ir;
396 	it = d->it;
397 
398 	switch (cmd) {
399 	case FW_STSTREAM:
400 		if (it == NULL) {
401 			i = fw_open_isodma(fc, /* tx */1);
402 			if (i < 0) {
403 				err = EBUSY;
404 				break;
405 			}
406 			it = fc->it[i];
407 			err = fwdev_allocbuf(fc, it, &d->bufreq.tx);
408 			if (err) {
409 				it->flag &= ~FWXFERQ_OPEN;
410 				break;
411 			}
412 		}
413 		it->flag &= ~0xff;
414 		it->flag |= (0x3f & ichreq->ch);
415 		it->flag |= ((0x3 & ichreq->tag) << 6);
416 		d->it = it;
417 		break;
418 
419 	case FW_GTSTREAM:
420 		if (it != NULL) {
421 			ichreq->ch = it->flag & 0x3f;
422 			ichreq->tag = it->flag >> 2 & 0x3;
423 		} else
424 			err = EINVAL;
425 		break;
426 
427 	case FW_SRSTREAM:
428 		if (ir == NULL) {
429 			i = fw_open_isodma(fc, /* tx */0);
430 			if (i < 0) {
431 				err = EBUSY;
432 				break;
433 			}
434 			ir = fc->ir[i];
435 			err = fwdev_allocbuf(fc, ir, &d->bufreq.rx);
436 			if (err) {
437 				ir->flag &= ~FWXFERQ_OPEN;
438 				break;
439 			}
440 		}
441 		ir->flag &= ~0xff;
442 		ir->flag |= (0x3f & ichreq->ch);
443 		ir->flag |= ((0x3 & ichreq->tag) << 6);
444 		d->ir = ir;
445 		err = fc->irx_enable(fc, ir->dmach);
446 		break;
447 
448 	case FW_GRSTREAM:
449 		if (d->ir != NULL) {
450 			ichreq->ch = ir->flag & 0x3f;
451 			ichreq->tag = ir->flag >> 2 & 0x3;
452 		} else
453 			err = EINVAL;
454 		break;
455 
456 	case FW_SSTBUF:
457 		memcpy(&d->bufreq, ibufreq, sizeof(d->bufreq));
458 		break;
459 
460 	case FW_GSTBUF:
461 		memset(&ibufreq->rx, 0, sizeof(ibufreq->rx));
462 		if (ir != NULL) {
463 			ibufreq->rx.nchunk = ir->bnchunk;
464 			ibufreq->rx.npacket = ir->bnpacket;
465 			ibufreq->rx.psize = ir->psize;
466 		}
467 		memset(&ibufreq->tx, 0, sizeof(ibufreq->tx));
468 		if (it != NULL) {
469 			ibufreq->tx.nchunk = it->bnchunk;
470 			ibufreq->tx.npacket = it->bnpacket;
471 			ibufreq->tx.psize = it->psize;
472 		}
473 		break;
474 
475 	case FW_ASYREQ:
476 	{
477 		const struct tcode_info *tinfo;
478 		int pay_len = 0;
479 
480 		fp = &asyreq->pkt;
481 		tinfo = &fc->tcode[fp->mode.hdr.tcode];
482 
483 		if ((tinfo->flag & FWTI_BLOCK_ASY) != 0)
484 			pay_len = MAX(0, asyreq->req.len - tinfo->hdr_len);
485 
486 		xfer = fw_xfer_alloc_buf(M_FWXFER, pay_len, PAGE_SIZE/*XXX*/);
487 		if (xfer == NULL)
488 			return ENOMEM;
489 
490 		switch (asyreq->req.type) {
491 		case FWASREQNODE:
492 			break;
493 
494 		case FWASREQEUI:
495 			fwdev = fw_noderesolve_eui64(fc, &asyreq->req.dst.eui);
496 			if (fwdev == NULL) {
497 				aprint_error_dev(fc->bdev,
498 				    "cannot find node\n");
499 				err = EINVAL;
500 				goto out;
501 			}
502 			fp->mode.hdr.dst = FWLOCALBUS | fwdev->dst;
503 			break;
504 
505 		case FWASRESTL:
506 			/* XXX what's this? */
507 			break;
508 
509 		case FWASREQSTREAM:
510 			/* nothing to do */
511 			break;
512 		}
513 
514 		memcpy(&xfer->send.hdr, fp, tinfo->hdr_len);
515 		if (pay_len > 0)
516 			memcpy(xfer->send.payload, (char *)fp + tinfo->hdr_len,
517 			    pay_len);
518 		xfer->send.spd = asyreq->req.sped;
519 		xfer->hand = fw_xferwake;
520 
521 		if ((err = fw_asyreq(fc, -1, xfer)) != 0)
522 			goto out;
523 		if ((err = fw_xferwait(xfer)) != 0)
524 			goto out;
525 		if (xfer->resp != 0) {
526 			err = EIO;
527 			goto out;
528 		}
529 		if ((tinfo->flag & FWTI_TLABEL) == 0)
530 			goto out;
531 
532 		/* copy response */
533 		tinfo = &fc->tcode[xfer->recv.hdr.mode.hdr.tcode];
534 		if (xfer->recv.hdr.mode.hdr.tcode == FWTCODE_RRESB ||
535 		    xfer->recv.hdr.mode.hdr.tcode == FWTCODE_LRES) {
536 			pay_len = xfer->recv.pay_len;
537 			if (asyreq->req.len >=
538 			    xfer->recv.pay_len + tinfo->hdr_len)
539 				asyreq->req.len =
540 				    xfer->recv.pay_len + tinfo->hdr_len;
541 			else {
542 				err = EINVAL;
543 				pay_len = 0;
544 			}
545 		} else
546 			pay_len = 0;
547 		memcpy(fp, &xfer->recv.hdr, tinfo->hdr_len);
548 		memcpy((char *)fp + tinfo->hdr_len, xfer->recv.payload,
549 		    pay_len);
550 out:
551 		fw_xfer_free_buf(xfer);
552 		break;
553 	}
554 
555 	case FW_IBUSRST:
556 		fc->ibr(fc);
557 		break;
558 
559 	case FW_CBINDADDR:
560 		fwb = fw_bindlookup(fc, bindreq->start.hi, bindreq->start.lo);
561 		if (fwb == NULL) {
562 			err = EINVAL;
563 			break;
564 		}
565 		fw_bindremove(fc, fwb);
566 		STAILQ_REMOVE(&d->binds, fwb, fw_bind, chlist);
567 		fw_xferlist_remove(&fwb->xferlist);
568 		free(fwb, M_FW);
569 		break;
570 
571 	case FW_SBINDADDR:
572 		if (bindreq->len <= 0 ) {
573 			err = EINVAL;
574 			break;
575 		}
576 		if (bindreq->start.hi > 0xffff ) {
577 			err = EINVAL;
578 			break;
579 		}
580 		fwb = (struct fw_bind *)malloc(sizeof(struct fw_bind),
581 		    M_FW, M_WAITOK);
582 		if (fwb == NULL) {
583 			err = ENOMEM;
584 			break;
585 		}
586 		fwb->start = ((u_int64_t)bindreq->start.hi << 32) |
587 		    bindreq->start.lo;
588 		fwb->end = fwb->start +  bindreq->len;
589 		fwb->sc = (void *)d;
590 		STAILQ_INIT(&fwb->xferlist);
591 		err = fw_bindadd(fc, fwb);
592 		if (err == 0) {
593 			fw_xferlist_add(&fwb->xferlist, M_FWXFER,
594 			    /* XXX */
595 			    PAGE_SIZE, PAGE_SIZE, 5, fc, (void *)fwb, fw_hand);
596 			STAILQ_INSERT_TAIL(&d->binds, fwb, chlist);
597 		}
598 		break;
599 
600 	case FW_GDEVLST:
601 		i = len = 1;
602 		/* myself */
603 		devinfo = fwdevlst->dev;
604 		devinfo->dst = fc->nodeid;
605 		devinfo->status = 0;	/* XXX */
606 		devinfo->eui.hi = fc->eui.hi;
607 		devinfo->eui.lo = fc->eui.lo;
608 		STAILQ_FOREACH(fwdev, &fc->devices, link) {
609 			if (len < FW_MAX_DEVLST) {
610 				devinfo = &fwdevlst->dev[len++];
611 				devinfo->dst = fwdev->dst;
612 				devinfo->status =
613 				    (fwdev->status == FWDEVINVAL) ? 0 : 1;
614 				devinfo->eui.hi = fwdev->eui.hi;
615 				devinfo->eui.lo = fwdev->eui.lo;
616 			}
617 			i++;
618 		}
619 		fwdevlst->n = i;
620 		fwdevlst->info_len = len;
621 		break;
622 
623 	case FW_GTPMAP:
624 		memcpy(data, fc->topology_map,
625 		    (fc->topology_map->crc_len + 1) * 4);
626 		break;
627 
628 	case FW_GCROM:
629 		STAILQ_FOREACH(fwdev, &fc->devices, link)
630 			if (FW_EUI64_EQUAL(fwdev->eui, crom_buf->eui))
631 				break;
632 		if (fwdev == NULL) {
633 			if (!FW_EUI64_EQUAL(fc->eui, crom_buf->eui)) {
634 				err = FWNODE_INVAL;
635 				break;
636 			}
637 			/* myself */
638 			ptr = malloc(CROMSIZE, M_FW, M_WAITOK);
639 			len = CROMSIZE;
640 			for (i = 0; i < CROMSIZE/4; i++)
641 				((uint32_t *)ptr)[i] = ntohl(fc->config_rom[i]);
642 		} else {
643 			/* found */
644 			ptr = (void *)fwdev->csrrom;
645 			if (fwdev->rommax < CSRROMOFF)
646 				len = 0;
647 			else
648 				len = fwdev->rommax - CSRROMOFF + 4;
649 		}
650 		if (crom_buf->len < len)
651 			len = crom_buf->len;
652 		else
653 			crom_buf->len = len;
654 		err = copyout(ptr, crom_buf->ptr, len);
655 		if (fwdev == NULL)
656 			/* myself */
657 			free(ptr, M_FW);
658 		break;
659 
660 	default:
661 		fc->ioctl(dev, cmd, data, flag, td);
662 		break;
663 	}
664 	return err;
665 }
666 
667 int
668 fw_poll(dev_t dev, int events, struct lwp *td)
669 {
670 	struct firewire_softc *sc;
671 	struct fw_xferq *ir;
672 	int revents, tmp;
673 
674 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
675 	if (sc == NULL)
676 		return ENXIO;
677 
678 	ir = ((struct fw_drv1 *)sc->si_drv1)->ir;
679 	revents = 0;
680 	tmp = POLLIN | POLLRDNORM;
681 	if (events & tmp) {
682 		if (STAILQ_FIRST(&ir->q) != NULL)
683 			revents |= tmp;
684 		else
685 			selrecord(td, &ir->rsel);
686 	}
687 	tmp = POLLOUT | POLLWRNORM;
688 	if (events & tmp)
689 		/* XXX should be fixed */
690 		revents |= tmp;
691 
692 	return revents;
693 }
694 
695 paddr_t
696 fw_mmap(dev_t dev, off_t offset, int nproto)
697 {
698 	struct firewire_softc *sc;
699 
700 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
701 	if (sc == NULL)
702 		return ENXIO;
703 
704 	return EINVAL;
705 }
706 
707 void
708 fw_strategy(struct bio *bp)
709 {
710 	struct firewire_softc *sc;
711 	dev_t dev = bp->bio_dev;
712 
713 	sc = device_lookup_private(&ieee1394if_cd, DEV2UNIT(dev));
714 	if (sc == NULL)
715 		return;
716 
717 	if (DEV_FWMEM(dev)) {
718 		fwmem_strategy(bp);
719 		return;
720 	}
721 
722 	bp->bio_error = EOPNOTSUPP;
723 	bp->bio_resid = bp->bio_bcount;
724 	biodone(bp);
725 }
726 
727 
728 static int
729 fwdev_allocbuf(struct firewire_comm *fc, struct fw_xferq *q,
730 	       struct fw_bufspec *b)
731 {
732 	int i;
733 
734 	if (q->flag & (FWXFERQ_RUNNING | FWXFERQ_EXTBUF))
735 		return EBUSY;
736 
737 	q->bulkxfer =
738 	    (struct fw_bulkxfer *)malloc(sizeof(struct fw_bulkxfer) * b->nchunk,
739 								M_FW, M_WAITOK);
740 	if (q->bulkxfer == NULL)
741 		return ENOMEM;
742 
743 	b->psize = roundup2(b->psize, sizeof(uint32_t));
744 	q->buf = fwdma_malloc_multiseg(fc, sizeof(uint32_t), b->psize,
745 	    b->nchunk * b->npacket, BUS_DMA_WAITOK);
746 
747 	if (q->buf == NULL) {
748 		free(q->bulkxfer, M_FW);
749 		q->bulkxfer = NULL;
750 		return ENOMEM;
751 	}
752 	q->bnchunk = b->nchunk;
753 	q->bnpacket = b->npacket;
754 	q->psize = (b->psize + 3) & ~3;
755 	q->queued = 0;
756 
757 	STAILQ_INIT(&q->stvalid);
758 	STAILQ_INIT(&q->stfree);
759 	STAILQ_INIT(&q->stdma);
760 	q->stproc = NULL;
761 
762 	for (i = 0 ; i < q->bnchunk; i++) {
763 		q->bulkxfer[i].poffset = i * q->bnpacket;
764 		q->bulkxfer[i].mbuf = NULL;
765 		STAILQ_INSERT_TAIL(&q->stfree, &q->bulkxfer[i], link);
766 	}
767 
768 	q->flag &= ~FWXFERQ_MODEMASK;
769 	q->flag |= FWXFERQ_STREAM;
770 	q->flag |= FWXFERQ_EXTBUF;
771 
772 	return 0;
773 }
774 
775 static int
776 fwdev_freebuf(struct fw_xferq *q)
777 {
778 
779 	if (q->flag & FWXFERQ_EXTBUF) {
780 		if (q->buf != NULL)
781 			fwdma_free_multiseg(q->buf);
782 		q->buf = NULL;
783 		free(q->bulkxfer, M_FW);
784 		q->bulkxfer = NULL;
785 		q->flag &= ~FWXFERQ_EXTBUF;
786 		q->psize = 0;
787 		q->maxq = FWMAXQUEUE;
788 	}
789 	return 0;
790 }
791 
792 static int
793 fw_read_async(struct fw_drv1 *d, struct uio *uio, int ioflag)
794 {
795 	struct fw_xfer *xfer;
796 	struct fw_bind *fwb;
797 	struct fw_pkt *fp;
798 	const struct tcode_info *tinfo;
799 	int err = 0;
800 
801 	mutex_enter(&d->fc->fc_mtx);
802 	while ((xfer = STAILQ_FIRST(&d->rq)) == NULL && err == 0)
803 		err = tsleep(&d->rq, FWPRI, "fwra", 0);
804 
805 	if (err != 0) {
806 		mutex_exit(&d->fc->fc_mtx);
807 		return err;
808 	}
809 
810 	STAILQ_REMOVE_HEAD(&d->rq, link);
811 	mutex_exit(&d->fc->fc_mtx);
812 	fp = &xfer->recv.hdr;
813 #if 0 /* for GASP ?? */
814 	if (fc->irx_post != NULL)
815 		fc->irx_post(fc, fp->mode.ld);
816 #endif
817 	tinfo = &xfer->fc->tcode[fp->mode.hdr.tcode];
818 	err = uiomove((void *)fp, tinfo->hdr_len, uio);
819 	if (err)
820 		goto out;
821 	err = uiomove((void *)xfer->recv.payload, xfer->recv.pay_len, uio);
822 
823 out:
824 	/* recycle this xfer */
825 	fwb = (struct fw_bind *)xfer->sc;
826 	fw_xfer_unload(xfer);
827 	xfer->recv.pay_len = PAGE_SIZE;
828 	mutex_enter(&d->fc->fc_mtx);
829 	STAILQ_INSERT_TAIL(&fwb->xferlist, xfer, link);
830 	mutex_exit(&d->fc->fc_mtx);
831 	return err;
832 }
833 
834 static int
835 fw_write_async(struct fw_drv1 *d, struct uio *uio, int ioflag)
836 {
837 	struct fw_xfer *xfer;
838 	struct fw_pkt pkt;
839 	const struct tcode_info *tinfo;
840 	int err;
841 
842 	memset(&pkt, 0, sizeof(struct fw_pkt));
843 	if ((err = uiomove((void *)&pkt, sizeof(uint32_t), uio)))
844 		return err;
845 	tinfo = &d->fc->tcode[pkt.mode.hdr.tcode];
846 	if ((err = uiomove((char *)&pkt + sizeof(uint32_t),
847 	    tinfo->hdr_len - sizeof(uint32_t), uio)))
848 		return err;
849 
850 	if ((xfer = fw_xfer_alloc_buf(M_FWXFER, uio->uio_resid,
851 	    PAGE_SIZE/*XXX*/)) == NULL)
852 		return ENOMEM;
853 
854 	memcpy(&xfer->send.hdr, &pkt, sizeof(struct fw_pkt));
855 	xfer->send.pay_len = uio->uio_resid;
856 	if (uio->uio_resid > 0) {
857 		if ((err =
858 		    uiomove((void *)xfer->send.payload, uio->uio_resid, uio)))
859 			goto out;
860 	}
861 
862 	xfer->fc = d->fc;
863 	xfer->sc = NULL;
864 	xfer->hand = fw_xferwake;
865 	xfer->send.spd = 2 /* XXX */;
866 
867 	if ((err = fw_asyreq(xfer->fc, -1, xfer)))
868 		goto out;
869 
870 	if ((err = fw_xferwait(xfer)))
871 		goto out;
872 
873 	if (xfer->resp != 0) {
874 		err = xfer->resp;
875 		goto out;
876 	}
877 
878 	if (xfer->flag == FWXF_RCVD) {
879 		mutex_enter(&xfer->fc->fc_mtx);
880 		STAILQ_INSERT_TAIL(&d->rq, xfer, link);
881 		mutex_exit(&xfer->fc->fc_mtx);
882 		return 0;
883 	}
884 
885 out:
886 	fw_xfer_free(xfer);
887 	return err;
888 }
889 
890 static void
891 fw_hand(struct fw_xfer *xfer)
892 {
893 	struct fw_bind *fwb;
894 	struct fw_drv1 *d;
895 
896 	fwb = (struct fw_bind *)xfer->sc;
897 	d = (struct fw_drv1 *)fwb->sc;
898 	mutex_enter(&xfer->fc->fc_mtx);
899 	STAILQ_INSERT_TAIL(&d->rq, xfer, link);
900 	mutex_exit(&xfer->fc->fc_mtx);
901 	wakeup(&d->rq);
902 }
903