1 /* $NetBSD: fwohci.c,v 1.88 2005/06/08 18:33:15 fair Exp $ */ 2 3 /*- 4 * Copyright (c) 2000 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas of 3am Software Foundry. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * IEEE1394 Open Host Controller Interface 41 * based on OHCI Specification 1.1 (January 6, 2000) 42 * The first version to support network interface part is wrtten by 43 * Atsushi Onoe <onoe@NetBSD.org>. 44 */ 45 46 /* 47 * The first version to support isochronous acquisition part is wrtten 48 * by HAYAKAWA Koichi <haya@NetBSD.org>. 49 */ 50 51 #include <sys/cdefs.h> 52 __KERNEL_RCSID(0, "$NetBSD: fwohci.c,v 1.88 2005/06/08 18:33:15 fair Exp $"); 53 54 #define FWOHCI_WAIT_DEBUG 1 55 56 #define FWOHCI_IT_BUFNUM 4 57 58 #include "opt_inet.h" 59 #include "fwiso.h" 60 61 #include <sys/param.h> 62 #include <sys/systm.h> 63 #include <sys/kthread.h> 64 #include <sys/socket.h> 65 #include <sys/callout.h> 66 #include <sys/device.h> 67 #include <sys/kernel.h> 68 #include <sys/malloc.h> 69 #include <sys/mbuf.h> 70 #include <sys/poll.h> 71 #include <sys/select.h> 72 73 #if __NetBSD_Version__ >= 105010000 74 #include <uvm/uvm_extern.h> 75 #else 76 #include <vm/vm.h> 77 #endif 78 79 #include <machine/bus.h> 80 #include <machine/intr.h> 81 82 #include <dev/ieee1394/ieee1394reg.h> 83 #include <dev/ieee1394/fwohcireg.h> 84 85 #include <dev/ieee1394/ieee1394var.h> 86 #include <dev/ieee1394/fwohcivar.h> 87 #include <dev/ieee1394/fwisovar.h> 88 89 static const char * const ieee1394_speeds[] = { IEEE1394_SPD_STRINGS }; 90 91 #if 0 92 static int fwohci_dnamem_alloc(struct fwohci_softc *sc, int size, 93 int alignment, bus_dmamap_t *mapp, caddr_t *kvap, int flags); 94 #endif 95 static void fwohci_create_event_thread(void *); 96 static void fwohci_thread_init(void *); 97 98 static void fwohci_event_thread(struct fwohci_softc *); 99 static void fwohci_hw_init(struct fwohci_softc *); 100 static void fwohci_power(int, void *); 101 static void fwohci_shutdown(void *); 102 103 static int fwohci_desc_alloc(struct fwohci_softc *); 104 static struct fwohci_desc *fwohci_desc_get(struct fwohci_softc *, int); 105 static void fwohci_desc_put(struct fwohci_softc *, struct fwohci_desc *, int); 106 107 static int fwohci_ctx_alloc(struct fwohci_softc *, struct fwohci_ctx **, 108 int, int, int); 109 static void fwohci_ctx_free(struct fwohci_softc *, struct fwohci_ctx *); 110 static void fwohci_ctx_init(struct fwohci_softc *, struct fwohci_ctx *); 111 112 static int fwohci_misc_dmabuf_alloc(bus_dma_tag_t, int, int, 113 bus_dma_segment_t *, bus_dmamap_t *, void **, const char *); 114 static void fwohci_misc_dmabuf_free(bus_dma_tag_t, int, int, 115 bus_dma_segment_t *, bus_dmamap_t *, caddr_t); 116 117 static struct fwohci_ir_ctx *fwohci_ir_ctx_construct(struct fwohci_softc *, 118 int, int, int, int, int, int); 119 static void fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *); 120 121 static int fwohci_ir_buf_setup(struct fwohci_ir_ctx *); 122 static int fwohci_ir_init(struct fwohci_ir_ctx *); 123 static int fwohci_ir_start(struct fwohci_ir_ctx *); 124 static void fwohci_ir_intr(struct fwohci_softc *, struct fwohci_ir_ctx *); 125 static int fwohci_ir_stop(struct fwohci_ir_ctx *); 126 static int fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *); 127 #ifdef USEDRAIN 128 static int fwohci_ir_ctx_drain(struct fwohci_ir_ctx *); 129 #endif /* USEDRAIN */ 130 131 static int fwohci_it_desc_alloc(struct fwohci_it_ctx *); 132 static void fwohci_it_desc_free(struct fwohci_it_ctx *itc); 133 struct fwohci_it_ctx *fwohci_it_ctx_construct(struct fwohci_softc *, 134 int, int, int, int); 135 void fwohci_it_ctx_destruct(struct fwohci_it_ctx *); 136 int fwohci_it_ctx_writedata(ieee1394_it_tag_t, int, 137 struct ieee1394_it_datalist *, int); 138 static void fwohci_it_ctx_run(struct fwohci_it_ctx *); 139 int fwohci_it_ctx_flush(ieee1394_it_tag_t); 140 static void fwohci_it_intr(struct fwohci_softc *, struct fwohci_it_ctx *); 141 142 int fwohci_itd_construct(struct fwohci_it_ctx *, struct fwohci_it_dmabuf *, 143 int, struct fwohci_desc *, bus_addr_t, int, int, paddr_t); 144 void fwohci_itd_destruct(struct fwohci_it_dmabuf *); 145 static int fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *); 146 static void fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *); 147 int fwohci_itd_link(volatile struct fwohci_it_dmabuf *, 148 volatile struct fwohci_it_dmabuf *); 149 int fwohci_itd_unlink(volatile struct fwohci_it_dmabuf *); 150 int fwohci_itd_writedata(volatile struct fwohci_it_dmabuf *, int, 151 struct ieee1394_it_datalist *); 152 int fwohci_itd_isfilled(volatile struct fwohci_it_dmabuf *); 153 154 static int fwohci_buf_alloc(struct fwohci_softc *, struct fwohci_buf *); 155 static void fwohci_buf_free(struct fwohci_softc *, struct fwohci_buf *); 156 static void fwohci_buf_init_rx(struct fwohci_softc *); 157 static void fwohci_buf_start_rx(struct fwohci_softc *); 158 static void fwohci_buf_stop_tx(struct fwohci_softc *); 159 static void fwohci_buf_stop_rx(struct fwohci_softc *); 160 static void fwohci_buf_next(struct fwohci_softc *, struct fwohci_ctx *); 161 static int fwohci_buf_pktget(struct fwohci_softc *, struct fwohci_buf **, 162 caddr_t *, int); 163 static int fwohci_buf_input(struct fwohci_softc *, struct fwohci_ctx *, 164 struct fwohci_pkt *); 165 static int fwohci_buf_input_ppb(struct fwohci_softc *, struct fwohci_ctx *, 166 struct fwohci_pkt *); 167 168 static u_int8_t fwohci_phy_read(struct fwohci_softc *, u_int8_t); 169 static void fwohci_phy_write(struct fwohci_softc *, u_int8_t, u_int8_t); 170 static void fwohci_phy_busreset(struct fwohci_softc *); 171 static void fwohci_phy_input(struct fwohci_softc *, struct fwohci_pkt *); 172 173 static int fwohci_handler_set(struct fwohci_softc *, int, u_int32_t, u_int32_t, 174 u_int32_t, int (*)(struct fwohci_softc *, void *, struct fwohci_pkt *), 175 void *); 176 177 ieee1394_ir_tag_t fwohci_ir_ctx_set(struct device *, int, int, int, int, int); 178 int fwohci_ir_ctx_clear(struct device *, ieee1394_ir_tag_t); 179 int fwohci_ir_read(struct device *, ieee1394_ir_tag_t, struct uio *, 180 int, int); 181 int fwohci_ir_wait(struct device *, ieee1394_ir_tag_t, void *, char *name); 182 int fwohci_ir_select(struct device *, ieee1394_ir_tag_t, struct proc *); 183 184 185 186 ieee1394_it_tag_t fwohci_it_set(struct ieee1394_softc *, int, int); 187 static ieee1394_it_tag_t fwohci_it_ctx_set(struct fwohci_softc *, int, int, int); 188 int fwohci_it_ctx_clear(ieee1394_it_tag_t *); 189 190 static void fwohci_arrq_input(struct fwohci_softc *, struct fwohci_ctx *); 191 static void fwohci_arrs_input(struct fwohci_softc *, struct fwohci_ctx *); 192 static void fwohci_as_input(struct fwohci_softc *, struct fwohci_ctx *); 193 194 static int fwohci_at_output(struct fwohci_softc *, struct fwohci_ctx *, 195 struct fwohci_pkt *); 196 static void fwohci_at_done(struct fwohci_softc *, struct fwohci_ctx *, int); 197 static void fwohci_atrs_output(struct fwohci_softc *, int, struct fwohci_pkt *, 198 struct fwohci_pkt *); 199 200 static int fwohci_guidrom_init(struct fwohci_softc *); 201 static void fwohci_configrom_init(struct fwohci_softc *); 202 static int fwohci_configrom_input(struct fwohci_softc *, void *, 203 struct fwohci_pkt *); 204 static void fwohci_selfid_init(struct fwohci_softc *); 205 static int fwohci_selfid_input(struct fwohci_softc *); 206 207 static void fwohci_csr_init(struct fwohci_softc *); 208 static int fwohci_csr_input(struct fwohci_softc *, void *, 209 struct fwohci_pkt *); 210 211 static void fwohci_uid_collect(struct fwohci_softc *); 212 static void fwohci_uid_req(struct fwohci_softc *, int); 213 static int fwohci_uid_input(struct fwohci_softc *, void *, 214 struct fwohci_pkt *); 215 static int fwohci_uid_lookup(struct fwohci_softc *, const u_int8_t *); 216 static void fwohci_check_nodes(struct fwohci_softc *); 217 218 static int fwohci_if_inreg(struct device *, u_int32_t, u_int32_t, 219 void (*)(struct device *, struct mbuf *)); 220 static int fwohci_if_input(struct fwohci_softc *, void *, struct fwohci_pkt *); 221 static int fwohci_if_input_iso(struct fwohci_softc *, void *, struct fwohci_pkt *); 222 223 static int fwohci_if_output(struct device *, struct mbuf *, 224 void (*)(struct device *, struct mbuf *)); 225 static int fwohci_if_setiso(struct device *, u_int32_t, u_int32_t, u_int32_t, 226 void (*)(struct device *, struct mbuf *)); 227 static int fwohci_read(struct ieee1394_abuf *); 228 static int fwohci_write(struct ieee1394_abuf *); 229 static int fwohci_read_resp(struct fwohci_softc *, void *, struct fwohci_pkt *); 230 static int fwohci_write_ack(struct fwohci_softc *, void *, struct fwohci_pkt *); 231 static int fwohci_read_multi_resp(struct fwohci_softc *, void *, 232 struct fwohci_pkt *); 233 static int fwohci_inreg(struct ieee1394_abuf *, int); 234 static int fwohci_unreg(struct ieee1394_abuf *, int); 235 static int fwohci_parse_input(struct fwohci_softc *, void *, 236 struct fwohci_pkt *); 237 static int fwohci_submatch(struct device *, struct cfdata *, 238 const locdesc_t *, void *); 239 240 /* XXX */ 241 u_int16_t fwohci_cycletimer(struct fwohci_softc *); 242 u_int16_t fwohci_it_cycletimer(ieee1394_it_tag_t); 243 244 #ifdef FW_DEBUG 245 static void fwohci_show_intr(struct fwohci_softc *, u_int32_t); 246 static void fwohci_show_phypkt(struct fwohci_softc *, u_int32_t); 247 248 /* 1 is normal debug, 2 is verbose debug, 3 is complete (packet dumps). */ 249 250 #define DPRINTF(x) if (fwdebug) printf x 251 #define DPRINTFN(n,x) if (fwdebug>(n)) printf x 252 int fwdebug = 1; 253 #else 254 #define DPRINTF(x) 255 #define DPRINTFN(n,x) 256 #endif 257 258 #define OHCI_ITHEADER_SPD_MASK 0x00070000 259 #define OHCI_ITHEADER_SPD_BITPOS 16 260 #define OHCI_ITHEADER_TAG_MASK 0x0000c000 261 #define OHCI_ITHEADER_TAG_BITPOS 14 262 #define OHCI_ITHEADER_CHAN_MASK 0x00003f00 263 #define OHCI_ITHEADER_CHAN_BITPOS 8 264 #define OHCI_ITHEADER_TCODE_MASK 0x000000f0 265 #define OHCI_ITHEADER_TCODE_BITPOS 4 266 #define OHCI_ITHEADER_SY_MASK 0x0000000f 267 #define OHCI_ITHEADER_SY_BITPOS 0 268 269 #define OHCI_ITHEADER_VAL(fld, val) \ 270 (OHCI_ITHEADER_##fld##_MASK & ((val) << OHCI_ITHEADER_##fld##_BITPOS)) 271 272 int 273 fwohci_init(struct fwohci_softc *sc, const struct evcnt *ev) 274 { 275 int i; 276 u_int32_t val; 277 #if 0 278 int error; 279 #endif 280 281 evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ev, 282 sc->sc_sc1394.sc1394_dev.dv_xname, "intr"); 283 284 evcnt_attach_dynamic(&sc->sc_isocnt, EVCNT_TYPE_MISC, ev, 285 sc->sc_sc1394.sc1394_dev.dv_xname, "isorcvs"); 286 evcnt_attach_dynamic(&sc->sc_ascnt, EVCNT_TYPE_MISC, ev, 287 sc->sc_sc1394.sc1394_dev.dv_xname, "asrcvs"); 288 evcnt_attach_dynamic(&sc->sc_itintrcnt, EVCNT_TYPE_INTR, ev, 289 sc->sc_sc1394.sc1394_dev.dv_xname, "itintr"); 290 291 /* 292 * Wait for reset completion 293 */ 294 for (i = 0; i < OHCI_LOOP; i++) { 295 val = OHCI_CSR_READ(sc, OHCI_REG_HCControlClear); 296 if ((val & OHCI_HCControl_SoftReset) == 0) 297 break; 298 DELAY(10); 299 } 300 301 /* What dialect of OHCI is this device? 302 */ 303 val = OHCI_CSR_READ(sc, OHCI_REG_Version); 304 aprint_normal("%s: OHCI %u.%u", sc->sc_sc1394.sc1394_dev.dv_xname, 305 OHCI_Version_GET_Version(val), OHCI_Version_GET_Revision(val)); 306 307 LIST_INIT(&sc->sc_nodelist); 308 309 if (fwohci_guidrom_init(sc) != 0) { 310 aprint_error("\n%s: fatal: no global UID ROM\n", 311 sc->sc_sc1394.sc1394_dev.dv_xname); 312 return -1; 313 } 314 315 aprint_normal(", %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", 316 sc->sc_sc1394.sc1394_guid[0], sc->sc_sc1394.sc1394_guid[1], 317 sc->sc_sc1394.sc1394_guid[2], sc->sc_sc1394.sc1394_guid[3], 318 sc->sc_sc1394.sc1394_guid[4], sc->sc_sc1394.sc1394_guid[5], 319 sc->sc_sc1394.sc1394_guid[6], sc->sc_sc1394.sc1394_guid[7]); 320 321 /* Get the maximum link speed and receive size 322 */ 323 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions); 324 sc->sc_sc1394.sc1394_link_speed = 325 OHCI_BITVAL(val, OHCI_BusOptions_LinkSpd); 326 if (sc->sc_sc1394.sc1394_link_speed < IEEE1394_SPD_MAX) { 327 aprint_normal(", %s", 328 ieee1394_speeds[sc->sc_sc1394.sc1394_link_speed]); 329 } else { 330 aprint_normal(", unknown speed %u", 331 sc->sc_sc1394.sc1394_link_speed); 332 } 333 334 /* MaxRec is encoded as log2(max_rec_octets)-1 335 */ 336 sc->sc_sc1394.sc1394_max_receive = 337 1 << (OHCI_BITVAL(val, OHCI_BusOptions_MaxRec) + 1); 338 aprint_normal(", %u max_rec", sc->sc_sc1394.sc1394_max_receive); 339 340 /* 341 * Count how many isochronous receive ctx we have. 342 */ 343 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskSet, ~0); 344 val = OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntMaskClear); 345 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskClear, ~0); 346 for (i = 0; val != 0; val >>= 1) { 347 if (val & 0x1) 348 i++; 349 } 350 sc->sc_isoctx = i; 351 aprint_normal(", %d ir_ctx", sc->sc_isoctx); 352 353 /* 354 * Count how many isochronous transmit ctx we have. 355 */ 356 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskSet, ~0); 357 val = OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntMaskClear); 358 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskClear, ~0); 359 for (i = 0; val != 0; val >>= 1) { 360 if (val & 0x1) { 361 i++; 362 OHCI_SYNC_TX_DMA_WRITE(sc, i,OHCI_SUBREG_CommandPtr,0); 363 } 364 } 365 sc->sc_itctx = i; 366 367 aprint_normal(", %d it_ctx", sc->sc_itctx); 368 369 aprint_normal("\n"); 370 371 #if 0 372 error = fwohci_dnamem_alloc(sc, OHCI_CONFIG_SIZE, 373 OHCI_CONFIG_ALIGNMENT, &sc->sc_configrom_map, 374 (caddr_t *) &sc->sc_configrom, BUS_DMA_WAITOK|BUS_DMA_COHERENT); 375 return error; 376 #endif 377 378 sc->sc_dying = 0; 379 sc->sc_nodeid = 0xffff; /* invalid */ 380 381 sc->sc_sc1394.sc1394_callback.sc1394_read = fwohci_read; 382 sc->sc_sc1394.sc1394_callback.sc1394_write = fwohci_write; 383 sc->sc_sc1394.sc1394_callback.sc1394_inreg = fwohci_inreg; 384 sc->sc_sc1394.sc1394_callback.sc1394_unreg = fwohci_unreg; 385 386 kthread_create(fwohci_create_event_thread, sc); 387 return 0; 388 } 389 390 static int 391 fwohci_if_setiso(struct device *self, u_int32_t channel, u_int32_t tag, 392 u_int32_t direction, void (*handler)(struct device *, struct mbuf *)) 393 { 394 struct fwohci_softc *sc = (struct fwohci_softc *)self; 395 int retval; 396 int s; 397 398 if (direction == 1) { 399 return EIO; 400 } 401 402 s = splnet(); 403 retval = fwohci_handler_set(sc, IEEE1394_TCODE_STREAM_DATA, 404 channel, 1 << tag, 0, fwohci_if_input_iso, handler); 405 splx(s); 406 407 if (!retval) { 408 printf("%s: dummy iso handler set\n", 409 sc->sc_sc1394.sc1394_dev.dv_xname); 410 } else { 411 printf("%s: dummy iso handler cannot set\n", 412 sc->sc_sc1394.sc1394_dev.dv_xname); 413 } 414 415 return retval; 416 } 417 418 int 419 fwohci_intr(void *arg) 420 { 421 struct fwohci_softc * const sc = arg; 422 int progress = 0; 423 u_int32_t intmask, iso; 424 425 for (;;) { 426 intmask = OHCI_CSR_READ(sc, OHCI_REG_IntEventClear); 427 428 /* 429 * On a bus reset, everything except bus reset gets 430 * cleared. That can't get cleared until the selfid 431 * phase completes (which happens outside the 432 * interrupt routines). So if just a bus reset is left 433 * in the mask and it's already in the sc_intmask, 434 * just return. 435 */ 436 437 if ((intmask == 0) || 438 (progress && (intmask == OHCI_Int_BusReset) && 439 (sc->sc_intmask & OHCI_Int_BusReset))) { 440 if (progress) 441 wakeup(fwohci_event_thread); 442 return progress; 443 } 444 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear, 445 intmask & ~OHCI_Int_BusReset); 446 #ifdef FW_DEBUG 447 if (fwdebug > 1) 448 fwohci_show_intr(sc, intmask); 449 #endif 450 451 if (intmask & OHCI_Int_BusReset) { 452 /* 453 * According to OHCI spec 6.1.1 "busReset", 454 * All asynchronous transmit must be stopped before 455 * clearing BusReset. Moreover, the BusReset 456 * interrupt bit should not be cleared during the 457 * SelfID phase. Thus we turned off interrupt mask 458 * bit of BusReset instead until SelfID completion 459 * or SelfID timeout. 460 */ 461 intmask &= OHCI_Int_SelfIDComplete; 462 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear, 463 OHCI_Int_BusReset); 464 sc->sc_intmask = OHCI_Int_BusReset; 465 } 466 sc->sc_intmask |= intmask; 467 468 if (intmask & OHCI_Int_IsochTx) { 469 int i; 470 471 iso = OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntEventClear); 472 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntEventClear, iso); 473 474 sc->sc_itintrcnt.ev_count++; 475 for (i = 0; i < sc->sc_itctx; ++i) { 476 if ((iso & (1<<i)) == 0 || 477 sc->sc_ctx_it[i] == NULL) { 478 continue; 479 } 480 481 fwohci_it_intr(sc, sc->sc_ctx_it[i]); 482 } 483 } 484 if (intmask & OHCI_Int_IsochRx) { 485 int i; 486 487 iso = OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntEventClear); 488 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear, iso); 489 490 for (i = 0; i < sc->sc_isoctx; i++) { 491 if ((iso & (1 << i)) 492 && sc->sc_ctx_ir[i] != NULL) { 493 iso &= ~(1 << i); 494 fwohci_ir_intr(sc, sc->sc_ctx_ir[i]); 495 } 496 } 497 498 if (iso == 0) { 499 sc->sc_intmask &= ~OHCI_Int_IsochRx; 500 } 501 sc->sc_iso |= iso; 502 } 503 504 if (!progress) { 505 sc->sc_intrcnt.ev_count++; 506 progress = 1; 507 } 508 } 509 } 510 511 static void 512 fwohci_create_event_thread(void *arg) 513 { 514 struct fwohci_softc *sc = arg; 515 516 if (kthread_create1(fwohci_thread_init, sc, &sc->sc_event_thread, "%s", 517 sc->sc_sc1394.sc1394_dev.dv_xname)) { 518 printf("%s: unable to create event thread\n", 519 sc->sc_sc1394.sc1394_dev.dv_xname); 520 panic("fwohci_create_event_thread"); 521 } 522 } 523 524 static void 525 fwohci_thread_init(void *arg) 526 { 527 struct fwohci_softc *sc = arg; 528 int i; 529 530 /* 531 * Allocate descriptors 532 */ 533 if (fwohci_desc_alloc(sc)) { 534 printf("%s: not enabling interrupts\n", 535 sc->sc_sc1394.sc1394_dev.dv_xname); 536 kthread_exit(1); 537 } 538 539 /* 540 * Enable Link Power 541 */ 542 543 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LPS); 544 545 /* 546 * Allocate DMA Context 547 */ 548 fwohci_ctx_alloc(sc, &sc->sc_ctx_arrq, OHCI_BUF_ARRQ_CNT, 549 OHCI_CTX_ASYNC_RX_REQUEST, FWOHCI_CTX_ASYNC); 550 fwohci_ctx_alloc(sc, &sc->sc_ctx_arrs, OHCI_BUF_ARRS_CNT, 551 OHCI_CTX_ASYNC_RX_RESPONSE, FWOHCI_CTX_ASYNC); 552 fwohci_ctx_alloc(sc, &sc->sc_ctx_atrq, 0, OHCI_CTX_ASYNC_TX_REQUEST, 553 FWOHCI_CTX_ASYNC); 554 fwohci_ctx_alloc(sc, &sc->sc_ctx_atrs, 0, OHCI_CTX_ASYNC_TX_RESPONSE, 555 FWOHCI_CTX_ASYNC); 556 sc->sc_ctx_as = malloc(sizeof(sc->sc_ctx_as[0]) * sc->sc_isoctx, 557 M_DEVBUF, M_WAITOK); 558 if (sc->sc_ctx_as == NULL) { 559 printf("no asynchronous stream\n"); 560 } else { 561 for (i = 0; i < sc->sc_isoctx; i++) 562 sc->sc_ctx_as[i] = NULL; 563 } 564 sc->sc_ctx_ir = malloc(sizeof(sc->sc_ctx_ir[0]) * sc->sc_isoctx, 565 M_DEVBUF, M_WAITOK|M_ZERO); 566 sc->sc_ctx_it = malloc(sizeof(sc->sc_ctx_it[0]) * sc->sc_itctx, 567 M_DEVBUF, M_WAITOK|M_ZERO); 568 569 /* 570 * Allocate buffer for configuration ROM and SelfID buffer 571 */ 572 fwohci_buf_alloc(sc, &sc->sc_buf_cnfrom); 573 fwohci_buf_alloc(sc, &sc->sc_buf_selfid); 574 575 callout_init(&sc->sc_selfid_callout); 576 577 sc->sc_sc1394.sc1394_ifinreg = fwohci_if_inreg; 578 sc->sc_sc1394.sc1394_ifoutput = fwohci_if_output; 579 sc->sc_sc1394.sc1394_ifsetiso = fwohci_if_setiso; 580 581 sc->sc_sc1394.sc1394_ir_open = fwohci_ir_ctx_set; 582 sc->sc_sc1394.sc1394_ir_close = fwohci_ir_ctx_clear; 583 sc->sc_sc1394.sc1394_ir_read = fwohci_ir_read; 584 sc->sc_sc1394.sc1394_ir_wait = fwohci_ir_wait; 585 sc->sc_sc1394.sc1394_ir_select = fwohci_ir_select; 586 587 #if 0 588 sc->sc_sc1394.sc1394_it_open = fwohci_it_open; 589 sc->sc_sc1394.sc1394_it_write = fwohci_it_write; 590 sc->sc_sc1394.sc1394_it_close = fwohci_it_close; 591 /* XXX: need fwohci_it_flush? */ 592 #endif 593 594 /* 595 * establish hooks for shutdown and suspend/resume 596 */ 597 sc->sc_shutdownhook = shutdownhook_establish(fwohci_shutdown, sc); 598 sc->sc_powerhook = powerhook_establish(fwohci_power, sc); 599 600 sc->sc_sc1394.sc1394_if = config_found(&sc->sc_sc1394.sc1394_dev, 601 /*XXXUNCONST*/ 602 __UNCONST("fw"), fwohci_print); 603 604 #if NFWISO > 0 605 fwiso_register_if(&sc->sc_sc1394); 606 #endif 607 608 /* Main loop. It's not coming back normally. */ 609 610 fwohci_event_thread(sc); 611 612 kthread_exit(0); 613 } 614 615 static void 616 fwohci_event_thread(struct fwohci_softc *sc) 617 { 618 int i, s; 619 u_int32_t intmask, iso; 620 621 s = splbio(); 622 623 /* 624 * Initialize hardware registers. 625 */ 626 627 fwohci_hw_init(sc); 628 629 /* Initial Bus Reset */ 630 fwohci_phy_busreset(sc); 631 splx(s); 632 633 while (!sc->sc_dying) { 634 s = splbio(); 635 intmask = sc->sc_intmask; 636 if (intmask == 0) { 637 tsleep(fwohci_event_thread, PZERO, "fwohciev", 0); 638 splx(s); 639 continue; 640 } 641 sc->sc_intmask = 0; 642 splx(s); 643 644 if (intmask & OHCI_Int_BusReset) { 645 fwohci_buf_stop_tx(sc); 646 if (sc->sc_uidtbl != NULL) { 647 free(sc->sc_uidtbl, M_DEVBUF); 648 sc->sc_uidtbl = NULL; 649 } 650 651 callout_reset(&sc->sc_selfid_callout, 652 OHCI_SELFID_TIMEOUT, 653 (void (*)(void *))fwohci_phy_busreset, sc); 654 sc->sc_nodeid = 0xffff; /* indicate invalid */ 655 sc->sc_rootid = 0; 656 sc->sc_irmid = IEEE1394_BCAST_PHY_ID; 657 } 658 if (intmask & OHCI_Int_SelfIDComplete) { 659 s = splbio(); 660 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear, 661 OHCI_Int_BusReset); 662 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, 663 OHCI_Int_BusReset); 664 splx(s); 665 callout_stop(&sc->sc_selfid_callout); 666 if (fwohci_selfid_input(sc) == 0) { 667 fwohci_buf_start_rx(sc); 668 fwohci_uid_collect(sc); 669 } 670 } 671 if (intmask & OHCI_Int_ReqTxComplete) 672 fwohci_at_done(sc, sc->sc_ctx_atrq, 0); 673 if (intmask & OHCI_Int_RespTxComplete) 674 fwohci_at_done(sc, sc->sc_ctx_atrs, 0); 675 if (intmask & OHCI_Int_RQPkt) 676 fwohci_arrq_input(sc, sc->sc_ctx_arrq); 677 if (intmask & OHCI_Int_RSPkt) 678 fwohci_arrs_input(sc, sc->sc_ctx_arrs); 679 if (intmask & OHCI_Int_IsochRx) { 680 if (sc->sc_ctx_as == NULL) { 681 continue; 682 } 683 s = splbio(); 684 iso = sc->sc_iso; 685 sc->sc_iso = 0; 686 splx(s); 687 for (i = 0; i < sc->sc_isoctx; i++) { 688 if ((iso & (1 << i)) && 689 sc->sc_ctx_as[i] != NULL) { 690 fwohci_as_input(sc, sc->sc_ctx_as[i]); 691 sc->sc_ascnt.ev_count++; 692 } 693 } 694 } 695 } 696 } 697 698 #if 0 699 static int 700 fwohci_dnamem_alloc(struct fwohci_softc *sc, int size, int alignment, 701 bus_dmamap_t *mapp, caddr_t *kvap, int flags) 702 { 703 bus_dma_segment_t segs[1]; 704 int error, nsegs, steps; 705 706 steps = 0; 707 error = bus_dmamem_alloc(sc->sc_dmat, size, alignment, alignment, 708 segs, 1, &nsegs, flags); 709 if (error) 710 goto cleanup; 711 712 steps = 1; 713 error = bus_dmamem_map(sc->sc_dmat, segs, nsegs, segs[0].ds_len, 714 kvap, flags); 715 if (error) 716 goto cleanup; 717 718 if (error == 0) 719 error = bus_dmamap_create(sc->sc_dmat, size, 1, alignment, 720 size, flags, mapp); 721 if (error) 722 goto cleanup; 723 if (error == 0) 724 error = bus_dmamap_load(sc->sc_dmat, *mapp, *kvap, size, NULL, 725 flags); 726 if (error) 727 goto cleanup; 728 729 cleanup: 730 switch (steps) { 731 case 1: 732 bus_dmamem_free(sc->sc_dmat, segs, nsegs); 733 } 734 735 return error; 736 } 737 #endif 738 739 int 740 fwohci_print(void *aux, const char *pnp) 741 { 742 char *name = aux; 743 744 if (pnp) 745 aprint_normal("%s at %s", name, pnp); 746 747 return UNCONF; 748 } 749 750 static void 751 fwohci_hw_init(struct fwohci_softc *sc) 752 { 753 int i; 754 u_int32_t val; 755 756 /* 757 * Software Reset. 758 */ 759 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_SoftReset); 760 for (i = 0; i < OHCI_LOOP; i++) { 761 val = OHCI_CSR_READ(sc, OHCI_REG_HCControlClear); 762 if ((val & OHCI_HCControl_SoftReset) == 0) 763 break; 764 DELAY(10); 765 } 766 767 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LPS); 768 769 /* 770 * First, initilize CSRs with undefined value to default settings. 771 */ 772 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions); 773 val |= OHCI_BusOptions_ISC | OHCI_BusOptions_CMC; 774 #if 0 775 val |= OHCI_BusOptions_BMC | OHCI_BusOptions_IRMC; 776 #else 777 val &= ~(OHCI_BusOptions_BMC | OHCI_BusOptions_IRMC); 778 #endif 779 OHCI_CSR_WRITE(sc, OHCI_REG_BusOptions, val); 780 for (i = 0; i < sc->sc_isoctx; i++) { 781 OHCI_SYNC_RX_DMA_WRITE(sc, i, OHCI_SUBREG_ContextControlClear, 782 ~0); 783 } 784 for (i = 0; i < sc->sc_itctx; i++) { 785 OHCI_SYNC_TX_DMA_WRITE(sc, i, OHCI_SUBREG_ContextControlClear, 786 ~0); 787 } 788 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlClear, ~0); 789 790 fwohci_configrom_init(sc); 791 fwohci_selfid_init(sc); 792 fwohci_buf_init_rx(sc); 793 fwohci_csr_init(sc); 794 795 /* 796 * Final CSR settings. 797 */ 798 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlSet, 799 OHCI_LinkControl_CycleTimerEnable | 800 OHCI_LinkControl_RcvSelfID | OHCI_LinkControl_RcvPhyPkt); 801 802 OHCI_CSR_WRITE(sc, OHCI_REG_ATRetries, 0x00000888); /*XXX*/ 803 804 /* clear receive filter */ 805 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiClear, ~0); 806 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoClear, ~0); 807 OHCI_CSR_WRITE(sc, OHCI_REG_AsynchronousRequestFilterHiSet, 0x80000000); 808 809 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear, 810 OHCI_HCControl_NoByteSwapData | OHCI_HCControl_APhyEnhanceEnable); 811 #if BYTE_ORDER == BIG_ENDIAN 812 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, 813 OHCI_HCControl_NoByteSwapData); 814 #endif 815 816 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear, ~0); 817 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_BusReset | 818 OHCI_Int_SelfIDComplete | OHCI_Int_IsochRx | OHCI_Int_IsochTx | 819 OHCI_Int_RSPkt | OHCI_Int_RQPkt | OHCI_Int_ARRS | OHCI_Int_ARRQ | 820 OHCI_Int_RespTxComplete | OHCI_Int_ReqTxComplete); 821 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_CycleTooLong | 822 OHCI_Int_UnrecoverableError | OHCI_Int_CycleInconsistent | 823 OHCI_Int_LockRespErr | OHCI_Int_PostedWriteErr); 824 OHCI_CSR_WRITE(sc, OHCI_REG_IsoXmitIntMaskSet, ~0); 825 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntMaskSet, ~0); 826 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_MasterEnable); 827 828 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_LinkEnable); 829 830 /* 831 * Start the receivers 832 */ 833 fwohci_buf_start_rx(sc); 834 } 835 836 static void 837 fwohci_power(int why, void *arg) 838 { 839 struct fwohci_softc *sc = arg; 840 int s; 841 842 s = splbio(); 843 switch (why) { 844 case PWR_SUSPEND: 845 case PWR_STANDBY: 846 fwohci_shutdown(sc); 847 break; 848 case PWR_RESUME: 849 fwohci_hw_init(sc); 850 fwohci_phy_busreset(sc); 851 break; 852 case PWR_SOFTSUSPEND: 853 case PWR_SOFTSTANDBY: 854 case PWR_SOFTRESUME: 855 break; 856 } 857 splx(s); 858 } 859 860 static void 861 fwohci_shutdown(void *arg) 862 { 863 struct fwohci_softc *sc = arg; 864 u_int32_t val; 865 866 callout_stop(&sc->sc_selfid_callout); 867 /* disable all interrupt */ 868 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskClear, OHCI_Int_MasterEnable); 869 fwohci_buf_stop_tx(sc); 870 fwohci_buf_stop_rx(sc); 871 val = OHCI_CSR_READ(sc, OHCI_REG_BusOptions); 872 val &= ~(OHCI_BusOptions_BMC | OHCI_BusOptions_ISC | 873 OHCI_BusOptions_CMC | OHCI_BusOptions_IRMC); 874 OHCI_CSR_WRITE(sc, OHCI_REG_BusOptions, val); 875 fwohci_phy_busreset(sc); 876 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear, OHCI_HCControl_LinkEnable); 877 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlClear, OHCI_HCControl_LPS); 878 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, OHCI_HCControl_SoftReset); 879 } 880 881 /* 882 * COMMON FUNCTIONS 883 */ 884 885 /* 886 * read the PHY Register. 887 */ 888 static u_int8_t 889 fwohci_phy_read(struct fwohci_softc *sc, u_int8_t reg) 890 { 891 int i; 892 u_int32_t val; 893 894 OHCI_CSR_WRITE(sc, OHCI_REG_PhyControl, 895 OHCI_PhyControl_RdReg | (reg << OHCI_PhyControl_RegAddr_BITPOS)); 896 for (i = 0; i < OHCI_LOOP; i++) { 897 if (OHCI_CSR_READ(sc, OHCI_REG_PhyControl) & 898 OHCI_PhyControl_RdDone) 899 break; 900 DELAY(10); 901 } 902 val = OHCI_CSR_READ(sc, OHCI_REG_PhyControl); 903 return (val & OHCI_PhyControl_RdData) >> OHCI_PhyControl_RdData_BITPOS; 904 } 905 906 /* 907 * write the PHY Register. 908 */ 909 static void 910 fwohci_phy_write(struct fwohci_softc *sc, u_int8_t reg, u_int8_t val) 911 { 912 int i; 913 914 OHCI_CSR_WRITE(sc, OHCI_REG_PhyControl, OHCI_PhyControl_WrReg | 915 (reg << OHCI_PhyControl_RegAddr_BITPOS) | 916 (val << OHCI_PhyControl_WrData_BITPOS)); 917 for (i = 0; i < OHCI_LOOP; i++) { 918 if (!(OHCI_CSR_READ(sc, OHCI_REG_PhyControl) & 919 OHCI_PhyControl_WrReg)) 920 break; 921 DELAY(10); 922 } 923 } 924 925 /* 926 * Initiate Bus Reset 927 */ 928 static void 929 fwohci_phy_busreset(struct fwohci_softc *sc) 930 { 931 int s; 932 u_int8_t val; 933 934 s = splbio(); 935 OHCI_CSR_WRITE(sc, OHCI_REG_IntEventClear, 936 OHCI_Int_BusReset | OHCI_Int_SelfIDComplete); 937 OHCI_CSR_WRITE(sc, OHCI_REG_IntMaskSet, OHCI_Int_BusReset); 938 callout_stop(&sc->sc_selfid_callout); 939 val = fwohci_phy_read(sc, 1); 940 val = (val & 0x80) | /* preserve RHB (force root) */ 941 0x40 | /* Initiate Bus Reset */ 942 0x3f; /* default GAP count */ 943 fwohci_phy_write(sc, 1, val); 944 splx(s); 945 } 946 947 /* 948 * PHY Packet 949 */ 950 static void 951 fwohci_phy_input(struct fwohci_softc *sc, struct fwohci_pkt *pkt) 952 { 953 u_int32_t val; 954 955 val = pkt->fp_hdr[1]; 956 if (val != ~pkt->fp_hdr[2]) { 957 if (val == 0 && ((*pkt->fp_trail & 0x001f0000) >> 16) == 958 OHCI_CTXCTL_EVENT_BUS_RESET) { 959 DPRINTFN(1, ("fwohci_phy_input: BusReset: 0x%08x\n", 960 pkt->fp_hdr[2])); 961 } else { 962 printf("%s: phy packet corrupted (0x%08x, 0x%08x)\n", 963 sc->sc_sc1394.sc1394_dev.dv_xname, val, 964 pkt->fp_hdr[2]); 965 } 966 return; 967 } 968 #ifdef FW_DEBUG 969 if (fwdebug > 1) 970 fwohci_show_phypkt(sc, val); 971 #endif 972 } 973 974 /* 975 * Descriptor for context DMA. 976 */ 977 static int 978 fwohci_desc_alloc(struct fwohci_softc *sc) 979 { 980 int error, mapsize, dsize; 981 982 /* 983 * allocate descriptor buffer 984 */ 985 986 sc->sc_descsize = OHCI_BUF_ARRQ_CNT + OHCI_BUF_ARRS_CNT + 987 OHCI_BUF_ATRQ_CNT + OHCI_BUF_ATRS_CNT + 988 OHCI_BUF_IR_CNT * sc->sc_isoctx + 2; 989 dsize = sizeof(struct fwohci_desc) * sc->sc_descsize; 990 mapsize = howmany(sc->sc_descsize, NBBY); 991 sc->sc_descmap = malloc(mapsize, M_DEVBUF, M_WAITOK|M_ZERO); 992 993 if (sc->sc_descmap == NULL) { 994 printf("fwohci_desc_alloc: cannot get memory\n"); 995 return -1; 996 } 997 998 if ((error = bus_dmamem_alloc(sc->sc_dmat, dsize, PAGE_SIZE, 0, 999 &sc->sc_dseg, 1, &sc->sc_dnseg, 0)) != 0) { 1000 printf("%s: unable to allocate descriptor buffer, error = %d\n", 1001 sc->sc_sc1394.sc1394_dev.dv_xname, error); 1002 goto fail_0; 1003 } 1004 1005 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg, 1006 dsize, (caddr_t *)&sc->sc_desc, BUS_DMA_COHERENT | BUS_DMA_WAITOK)) 1007 != 0) { 1008 printf("%s: unable to map descriptor buffer, error = %d\n", 1009 sc->sc_sc1394.sc1394_dev.dv_xname, error); 1010 goto fail_1; 1011 } 1012 1013 if ((error = bus_dmamap_create(sc->sc_dmat, dsize, sc->sc_dnseg, 1014 dsize, 0, BUS_DMA_WAITOK, &sc->sc_ddmamap)) != 0) { 1015 printf("%s: unable to create descriptor buffer DMA map, " 1016 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, error); 1017 goto fail_2; 1018 } 1019 1020 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_ddmamap, sc->sc_desc, 1021 dsize, NULL, BUS_DMA_WAITOK)) != 0) { 1022 printf("%s: unable to load descriptor buffer DMA map, " 1023 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, error); 1024 goto fail_3; 1025 } 1026 1027 return 0; 1028 1029 fail_3: 1030 bus_dmamap_destroy(sc->sc_dmat, sc->sc_ddmamap); 1031 fail_2: 1032 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_desc, dsize); 1033 fail_1: 1034 bus_dmamem_free(sc->sc_dmat, &sc->sc_dseg, sc->sc_dnseg); 1035 fail_0: 1036 return error; 1037 } 1038 1039 static struct fwohci_desc * 1040 fwohci_desc_get(struct fwohci_softc *sc, int ndesc) 1041 { 1042 int i, n; 1043 1044 for (n = 0; n <= sc->sc_descsize - ndesc; n++) { 1045 for (i = 0; ; i++) { 1046 if (i == ndesc) { 1047 for (i = 0; i < ndesc; i++) 1048 setbit(sc->sc_descmap, n + i); 1049 return sc->sc_desc + n; 1050 } 1051 if (isset(sc->sc_descmap, n + i)) 1052 break; 1053 } 1054 } 1055 return NULL; 1056 } 1057 1058 static void 1059 fwohci_desc_put(struct fwohci_softc *sc, struct fwohci_desc *fd, int ndesc) 1060 { 1061 int i, n; 1062 1063 n = fd - sc->sc_desc; 1064 for (i = 0; i < ndesc; i++, n++) { 1065 #ifdef DIAGNOSTIC 1066 if (isclr(sc->sc_descmap, n)) 1067 panic("fwohci_desc_put: duplicated free"); 1068 #endif 1069 clrbit(sc->sc_descmap, n); 1070 } 1071 } 1072 1073 /* 1074 * Asynchronous/Isochronous Transmit/Receive Context 1075 */ 1076 static int 1077 fwohci_ctx_alloc(struct fwohci_softc *sc, struct fwohci_ctx **fcp, 1078 int bufcnt, int ctx, int ctxtype) 1079 { 1080 int i, error; 1081 struct fwohci_ctx *fc; 1082 struct fwohci_buf *fb; 1083 struct fwohci_desc *fd; 1084 #if DOUBLEBUF 1085 int buf2cnt; 1086 #endif 1087 1088 fc = malloc(sizeof(*fc), M_DEVBUF, M_WAITOK|M_ZERO); 1089 LIST_INIT(&fc->fc_handler); 1090 TAILQ_INIT(&fc->fc_buf); 1091 fc->fc_ctx = ctx; 1092 fc->fc_buffers = fb = malloc(sizeof(*fb) * bufcnt, M_DEVBUF, M_WAITOK|M_ZERO); 1093 fc->fc_bufcnt = bufcnt; 1094 #if DOUBLEBUF 1095 TAILQ_INIT(&fc->fc_buf2); /* for isochronous */ 1096 if (ctxtype == FWOHCI_CTX_ISO_MULTI) { 1097 buf2cnt = bufcnt/2; 1098 bufcnt -= buf2cnt; 1099 if (buf2cnt == 0) { 1100 panic("cannot allocate iso buffer"); 1101 } 1102 } 1103 #endif 1104 for (i = 0; i < bufcnt; i++, fb++) { 1105 if ((error = fwohci_buf_alloc(sc, fb)) != 0) 1106 goto fail; 1107 if ((fd = fwohci_desc_get(sc, 1)) == NULL) { 1108 error = ENOBUFS; 1109 goto fail; 1110 } 1111 fb->fb_desc = fd; 1112 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr + 1113 ((caddr_t)fd - (caddr_t)sc->sc_desc); 1114 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_STATUS | 1115 OHCI_DESC_INTR_ALWAYS | OHCI_DESC_BRANCH; 1116 fd->fd_reqcount = fb->fb_dmamap->dm_segs[0].ds_len; 1117 fd->fd_data = fb->fb_dmamap->dm_segs[0].ds_addr; 1118 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list); 1119 } 1120 #if DOUBLEBUF 1121 if (ctxtype == FWOHCI_CTX_ISO_MULTI) { 1122 for (i = bufcnt; i < bufcnt + buf2cnt; i++, fb++) { 1123 if ((error = fwohci_buf_alloc(sc, fb)) != 0) 1124 goto fail; 1125 if ((fd = fwohci_desc_get(sc, 1)) == NULL) { 1126 error = ENOBUFS; 1127 goto fail; 1128 } 1129 fb->fb_desc = fd; 1130 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr + 1131 ((caddr_t)fd - (caddr_t)sc->sc_desc); 1132 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap, 1133 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc), 1134 BUS_DMASYNC_PREWRITE); 1135 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_STATUS | 1136 OHCI_DESC_INTR_ALWAYS | OHCI_DESC_BRANCH; 1137 fd->fd_reqcount = fb->fb_dmamap->dm_segs[0].ds_len; 1138 fd->fd_data = fb->fb_dmamap->dm_segs[0].ds_addr; 1139 TAILQ_INSERT_TAIL(&fc->fc_buf2, fb, fb_list); 1140 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap, 1141 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc), 1142 BUS_DMASYNC_POSTWRITE); 1143 } 1144 } 1145 #endif /* DOUBLEBUF */ 1146 fc->fc_type = ctxtype; 1147 *fcp = fc; 1148 return 0; 1149 1150 fail: 1151 while (i-- > 0) { 1152 fb--; 1153 if (fb->fb_desc) 1154 fwohci_desc_put(sc, fb->fb_desc, 1); 1155 fwohci_buf_free(sc, fb); 1156 } 1157 free(fc, M_DEVBUF); 1158 return error; 1159 } 1160 1161 static void 1162 fwohci_ctx_free(struct fwohci_softc *sc, struct fwohci_ctx *fc) 1163 { 1164 struct fwohci_buf *fb; 1165 struct fwohci_handler *fh; 1166 1167 #if DOUBLEBUF 1168 if ((fc->fc_type == FWOHCI_CTX_ISO_MULTI) && 1169 (TAILQ_FIRST(&fc->fc_buf) > TAILQ_FIRST(&fc->fc_buf2))) { 1170 struct fwohci_buf_s fctmp; 1171 1172 fctmp = fc->fc_buf; 1173 fc->fc_buf = fc->fc_buf2; 1174 fc->fc_buf2 = fctmp; 1175 } 1176 #endif 1177 while ((fh = LIST_FIRST(&fc->fc_handler)) != NULL) 1178 fwohci_handler_set(sc, fh->fh_tcode, fh->fh_key1, fh->fh_key2, 1179 fh->fh_key3, NULL, NULL); 1180 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) { 1181 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list); 1182 if (fb->fb_desc) 1183 fwohci_desc_put(sc, fb->fb_desc, 1); 1184 fwohci_buf_free(sc, fb); 1185 } 1186 #if DOUBLEBUF 1187 while ((fb = TAILQ_FIRST(&fc->fc_buf2)) != NULL) { 1188 TAILQ_REMOVE(&fc->fc_buf2, fb, fb_list); 1189 if (fb->fb_desc) 1190 fwohci_desc_put(sc, fb->fb_desc, 1); 1191 fwohci_buf_free(sc, fb); 1192 } 1193 #endif /* DOUBLEBUF */ 1194 free(fc->fc_buffers, M_DEVBUF); 1195 free(fc, M_DEVBUF); 1196 } 1197 1198 static void 1199 fwohci_ctx_init(struct fwohci_softc *sc, struct fwohci_ctx *fc) 1200 { 1201 struct fwohci_buf *fb, *nfb; 1202 struct fwohci_desc *fd; 1203 struct fwohci_handler *fh; 1204 int n; 1205 1206 for (fb = TAILQ_FIRST(&fc->fc_buf); fb != NULL; fb = nfb) { 1207 nfb = TAILQ_NEXT(fb, fb_list); 1208 fb->fb_off = 0; 1209 fd = fb->fb_desc; 1210 fd->fd_branch = (nfb != NULL) ? (nfb->fb_daddr | 1) : 0; 1211 fd->fd_rescount = fd->fd_reqcount; 1212 } 1213 1214 #if DOUBLEBUF 1215 for (fb = TAILQ_FIRST(&fc->fc_buf2); fb != NULL; fb = nfb) { 1216 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap, 1217 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc), 1218 BUS_DMASYNC_PREWRITE); 1219 nfb = TAILQ_NEXT(fb, fb_list); 1220 fb->fb_off = 0; 1221 fd = fb->fb_desc; 1222 fd->fd_branch = (nfb != NULL) ? (nfb->fb_daddr | 1) : 0; 1223 fd->fd_rescount = fd->fd_reqcount; 1224 bus_dmamap_sync(sc->sc_dmat, sc->sc_ddmamap, 1225 (caddr_t)fd - (caddr_t)sc->sc_desc, sizeof(struct fwohci_desc), 1226 BUS_DMASYNC_POSTWRITE); 1227 } 1228 #endif /* DOUBLEBUF */ 1229 1230 n = fc->fc_ctx; 1231 fb = TAILQ_FIRST(&fc->fc_buf); 1232 if (fc->fc_type != FWOHCI_CTX_ASYNC) { 1233 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_CommandPtr, 1234 fb->fb_daddr | 1); 1235 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlClear, 1236 OHCI_CTXCTL_RX_BUFFER_FILL | 1237 OHCI_CTXCTL_RX_CYCLE_MATCH_ENABLE | 1238 OHCI_CTXCTL_RX_MULTI_CHAN_MODE | 1239 OHCI_CTXCTL_RX_DUAL_BUFFER_MODE); 1240 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlSet, 1241 OHCI_CTXCTL_RX_ISOCH_HEADER); 1242 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) { 1243 OHCI_SYNC_RX_DMA_WRITE(sc, n, 1244 OHCI_SUBREG_ContextControlSet, 1245 OHCI_CTXCTL_RX_BUFFER_FILL); 1246 } 1247 fh = LIST_FIRST(&fc->fc_handler); 1248 1249 if (fh->fh_key1 == IEEE1394_ISO_CHANNEL_ANY) { 1250 OHCI_SYNC_RX_DMA_WRITE(sc, n, 1251 OHCI_SUBREG_ContextControlSet, 1252 OHCI_CTXCTL_RX_MULTI_CHAN_MODE); 1253 1254 /* Receive all the isochronous channels */ 1255 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiSet, 1256 0xffffffff); 1257 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoSet, 1258 0xffffffff); 1259 DPRINTF(("%s: CTXCTL 0x%08x\n", 1260 sc->sc_sc1394.sc1394_dev.dv_xname, 1261 OHCI_SYNC_RX_DMA_READ(sc, n, 1262 OHCI_SUBREG_ContextControlSet))); 1263 } 1264 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextMatch, 1265 (fh->fh_key2 << OHCI_CTXMATCH_TAG_BITPOS) | 1266 (fh->fh_key1 & IEEE1394_ISO_CHANNEL_MASK)); 1267 } else { 1268 OHCI_ASYNC_DMA_WRITE(sc, n, OHCI_SUBREG_CommandPtr, 1269 fb->fb_daddr | 1); 1270 } 1271 } 1272 1273 /* 1274 * DMA data buffer 1275 */ 1276 static int 1277 fwohci_buf_alloc(struct fwohci_softc *sc, struct fwohci_buf *fb) 1278 { 1279 int error; 1280 1281 if ((error = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 1282 PAGE_SIZE, &fb->fb_seg, 1, &fb->fb_nseg, BUS_DMA_WAITOK)) != 0) { 1283 printf("%s: unable to allocate buffer, error = %d\n", 1284 sc->sc_sc1394.sc1394_dev.dv_xname, error); 1285 goto fail_0; 1286 } 1287 1288 if ((error = bus_dmamem_map(sc->sc_dmat, &fb->fb_seg, 1289 fb->fb_nseg, PAGE_SIZE, &fb->fb_buf, BUS_DMA_WAITOK)) != 0) { 1290 printf("%s: unable to map buffer, error = %d\n", 1291 sc->sc_sc1394.sc1394_dev.dv_xname, error); 1292 goto fail_1; 1293 } 1294 1295 if ((error = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, fb->fb_nseg, 1296 PAGE_SIZE, 0, BUS_DMA_WAITOK, &fb->fb_dmamap)) != 0) { 1297 printf("%s: unable to create buffer DMA map, " 1298 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, 1299 error); 1300 goto fail_2; 1301 } 1302 1303 if ((error = bus_dmamap_load(sc->sc_dmat, fb->fb_dmamap, 1304 fb->fb_buf, PAGE_SIZE, NULL, BUS_DMA_WAITOK)) != 0) { 1305 printf("%s: unable to load buffer DMA map, " 1306 "error = %d\n", sc->sc_sc1394.sc1394_dev.dv_xname, 1307 error); 1308 goto fail_3; 1309 } 1310 1311 return 0; 1312 1313 bus_dmamap_unload(sc->sc_dmat, fb->fb_dmamap); 1314 fail_3: 1315 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap); 1316 fail_2: 1317 bus_dmamem_unmap(sc->sc_dmat, fb->fb_buf, PAGE_SIZE); 1318 fail_1: 1319 bus_dmamem_free(sc->sc_dmat, &fb->fb_seg, fb->fb_nseg); 1320 fail_0: 1321 return error; 1322 } 1323 1324 static void 1325 fwohci_buf_free(struct fwohci_softc *sc, struct fwohci_buf *fb) 1326 { 1327 1328 bus_dmamap_unload(sc->sc_dmat, fb->fb_dmamap); 1329 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap); 1330 bus_dmamem_unmap(sc->sc_dmat, fb->fb_buf, PAGE_SIZE); 1331 bus_dmamem_free(sc->sc_dmat, &fb->fb_seg, fb->fb_nseg); 1332 } 1333 1334 static void 1335 fwohci_buf_init_rx(struct fwohci_softc *sc) 1336 { 1337 int i; 1338 1339 /* 1340 * Initialize for Asynchronous Receive Queue. 1341 */ 1342 fwohci_ctx_init(sc, sc->sc_ctx_arrq); 1343 fwohci_ctx_init(sc, sc->sc_ctx_arrs); 1344 1345 /* 1346 * Initialize for Isochronous Receive Queue. 1347 */ 1348 if (sc->sc_ctx_as != NULL) { 1349 for (i = 0; i < sc->sc_isoctx; i++) { 1350 if (sc->sc_ctx_as[i] != NULL) 1351 fwohci_ctx_init(sc, sc->sc_ctx_as[i]); 1352 } 1353 } 1354 } 1355 1356 static void 1357 fwohci_buf_start_rx(struct fwohci_softc *sc) 1358 { 1359 int i; 1360 1361 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_REQUEST, 1362 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN); 1363 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_RESPONSE, 1364 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN); 1365 if (sc->sc_ctx_as != NULL) { 1366 for (i = 0; i < sc->sc_isoctx; i++) { 1367 if (sc->sc_ctx_as[i] != NULL) 1368 OHCI_SYNC_RX_DMA_WRITE(sc, i, 1369 OHCI_SUBREG_ContextControlSet, 1370 OHCI_CTXCTL_RUN); 1371 } 1372 } 1373 } 1374 1375 static void 1376 fwohci_buf_stop_tx(struct fwohci_softc *sc) 1377 { 1378 int i; 1379 1380 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_TX_REQUEST, 1381 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN); 1382 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_TX_RESPONSE, 1383 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN); 1384 1385 /* 1386 * Make sure the transmitter is stopped. 1387 */ 1388 for (i = 0; i < OHCI_LOOP; i++) { 1389 DELAY(10); 1390 if (OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_REQUEST, 1391 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE) 1392 continue; 1393 if (OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_RESPONSE, 1394 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE) 1395 continue; 1396 break; 1397 } 1398 1399 /* 1400 * Initialize for Asynchronous Transmit Queue. 1401 */ 1402 fwohci_at_done(sc, sc->sc_ctx_atrq, 1); 1403 fwohci_at_done(sc, sc->sc_ctx_atrs, 1); 1404 } 1405 1406 static void 1407 fwohci_buf_stop_rx(struct fwohci_softc *sc) 1408 { 1409 int i; 1410 1411 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_REQUEST, 1412 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN); 1413 OHCI_ASYNC_DMA_WRITE(sc, OHCI_CTX_ASYNC_RX_RESPONSE, 1414 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN); 1415 for (i = 0; i < sc->sc_isoctx; i++) { 1416 OHCI_SYNC_RX_DMA_WRITE(sc, i, 1417 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN); 1418 } 1419 } 1420 1421 static void 1422 fwohci_buf_next(struct fwohci_softc *sc, struct fwohci_ctx *fc) 1423 { 1424 struct fwohci_buf *fb, *tfb; 1425 1426 #if DOUBLEBUF 1427 if (fc->fc_type != FWOHCI_CTX_ISO_MULTI) { 1428 #endif 1429 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) { 1430 if (fc->fc_type) { 1431 if (fb->fb_off == 0) 1432 break; 1433 } else { 1434 if (fb->fb_off != fb->fb_desc->fd_reqcount || 1435 fb->fb_desc->fd_rescount != 0) 1436 break; 1437 } 1438 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list); 1439 fb->fb_desc->fd_rescount = fb->fb_desc->fd_reqcount; 1440 fb->fb_off = 0; 1441 fb->fb_desc->fd_branch = 0; 1442 tfb = TAILQ_LAST(&fc->fc_buf, fwohci_buf_s); 1443 tfb->fb_desc->fd_branch = fb->fb_daddr | 1; 1444 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list); 1445 } 1446 #if DOUBLEBUF 1447 } else { 1448 struct fwohci_buf_s fctmp; 1449 1450 /* cleaning buffer */ 1451 for (fb = TAILQ_FIRST(&fc->fc_buf); fb != NULL; 1452 fb = TAILQ_NEXT(fb, fb_list)) { 1453 fb->fb_off = 0; 1454 fb->fb_desc->fd_rescount = fb->fb_desc->fd_reqcount; 1455 } 1456 1457 /* rotating buffer */ 1458 fctmp = fc->fc_buf; 1459 fc->fc_buf = fc->fc_buf2; 1460 fc->fc_buf2 = fctmp; 1461 } 1462 #endif 1463 } 1464 1465 static int 1466 fwohci_buf_pktget(struct fwohci_softc *sc, struct fwohci_buf **fbp, caddr_t *pp, 1467 int len) 1468 { 1469 struct fwohci_buf *fb; 1470 struct fwohci_desc *fd; 1471 int bufend; 1472 1473 fb = *fbp; 1474 again: 1475 fd = fb->fb_desc; 1476 DPRINTFN(1, ("fwohci_buf_pktget: desc %ld, off %d, req %d, res %d," 1477 " len %d, avail %d\n", (long)(fd - sc->sc_desc), fb->fb_off, 1478 fd->fd_reqcount, fd->fd_rescount, len, 1479 fd->fd_reqcount - fd->fd_rescount - fb->fb_off)); 1480 bufend = fd->fd_reqcount - fd->fd_rescount; 1481 if (fb->fb_off >= bufend) { 1482 DPRINTFN(5, ("buf %x finish req %d res %d off %d ", 1483 fb->fb_desc->fd_data, fd->fd_reqcount, fd->fd_rescount, 1484 fb->fb_off)); 1485 if (fd->fd_rescount == 0) { 1486 *fbp = fb = TAILQ_NEXT(fb, fb_list); 1487 if (fb != NULL) 1488 goto again; 1489 } 1490 return 0; 1491 } 1492 if (fb->fb_off + len > bufend) 1493 len = bufend - fb->fb_off; 1494 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, fb->fb_off, len, 1495 BUS_DMASYNC_POSTREAD); 1496 *pp = fb->fb_buf + fb->fb_off; 1497 fb->fb_off += roundup(len, 4); 1498 return len; 1499 } 1500 1501 static int 1502 fwohci_buf_input(struct fwohci_softc *sc, struct fwohci_ctx *fc, 1503 struct fwohci_pkt *pkt) 1504 { 1505 caddr_t p; 1506 struct fwohci_buf *fb; 1507 int len, count, i; 1508 #ifdef FW_DEBUG 1509 int tlabel; 1510 #endif 1511 1512 memset(pkt, 0, sizeof(*pkt)); 1513 pkt->fp_uio.uio_iov = pkt->fp_iov; 1514 pkt->fp_uio.uio_rw = UIO_WRITE; 1515 pkt->fp_uio.uio_segflg = UIO_SYSSPACE; 1516 1517 /* get first quadlet */ 1518 fb = TAILQ_FIRST(&fc->fc_buf); 1519 count = 4; 1520 len = fwohci_buf_pktget(sc, &fb, &p, count); 1521 if (len <= 0) { 1522 DPRINTFN(1, ("fwohci_buf_input: no input for %d\n", 1523 fc->fc_ctx)); 1524 return 0; 1525 } 1526 pkt->fp_hdr[0] = *(u_int32_t *)p; 1527 pkt->fp_tcode = (pkt->fp_hdr[0] & 0x000000f0) >> 4; 1528 switch (pkt->fp_tcode) { 1529 case IEEE1394_TCODE_WRITE_REQ_QUAD: 1530 case IEEE1394_TCODE_READ_RESP_QUAD: 1531 pkt->fp_hlen = 12; 1532 pkt->fp_dlen = 4; 1533 break; 1534 case IEEE1394_TCODE_READ_REQ_BLOCK: 1535 pkt->fp_hlen = 16; 1536 pkt->fp_dlen = 0; 1537 break; 1538 case IEEE1394_TCODE_WRITE_REQ_BLOCK: 1539 case IEEE1394_TCODE_READ_RESP_BLOCK: 1540 case IEEE1394_TCODE_LOCK_REQ: 1541 case IEEE1394_TCODE_LOCK_RESP: 1542 pkt->fp_hlen = 16; 1543 break; 1544 case IEEE1394_TCODE_STREAM_DATA: 1545 #ifdef DIAGNOSTIC 1546 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) 1547 #endif 1548 { 1549 pkt->fp_hlen = 4; 1550 pkt->fp_dlen = pkt->fp_hdr[0] >> 16; 1551 DPRINTFN(5, ("[%d]", pkt->fp_dlen)); 1552 break; 1553 } 1554 #ifdef DIAGNOSTIC 1555 else { 1556 printf("fwohci_buf_input: bad tcode: STREAM_DATA\n"); 1557 return 0; 1558 } 1559 #endif 1560 default: 1561 pkt->fp_hlen = 12; 1562 pkt->fp_dlen = 0; 1563 break; 1564 } 1565 1566 /* get header */ 1567 while (count < pkt->fp_hlen) { 1568 len = fwohci_buf_pktget(sc, &fb, &p, pkt->fp_hlen - count); 1569 if (len == 0) { 1570 printf("fwohci_buf_input: malformed input 1: %d\n", 1571 pkt->fp_hlen - count); 1572 return 0; 1573 } 1574 memcpy((caddr_t)pkt->fp_hdr + count, p, len); 1575 count += len; 1576 } 1577 if (pkt->fp_hlen == 16 && 1578 pkt->fp_tcode != IEEE1394_TCODE_READ_REQ_BLOCK) 1579 pkt->fp_dlen = pkt->fp_hdr[3] >> 16; 1580 #ifdef FW_DEBUG 1581 tlabel = (pkt->fp_hdr[0] & 0x0000fc00) >> 10; 1582 #endif 1583 DPRINTFN(1, ("fwohci_buf_input: tcode=0x%x, tlabel=0x%x, hlen=%d, " 1584 "dlen=%d\n", pkt->fp_tcode, tlabel, pkt->fp_hlen, pkt->fp_dlen)); 1585 1586 /* get data */ 1587 count = 0; 1588 i = 0; 1589 while (count < pkt->fp_dlen) { 1590 len = fwohci_buf_pktget(sc, &fb, 1591 (caddr_t *)&pkt->fp_iov[i].iov_base, 1592 pkt->fp_dlen - count); 1593 if (len == 0) { 1594 printf("fwohci_buf_input: malformed input 2: %d\n", 1595 pkt->fp_dlen - count); 1596 return 0; 1597 } 1598 pkt->fp_iov[i++].iov_len = len; 1599 count += len; 1600 } 1601 pkt->fp_uio.uio_iovcnt = i; 1602 pkt->fp_uio.uio_resid = count; 1603 1604 /* get trailer */ 1605 len = fwohci_buf_pktget(sc, &fb, (caddr_t *)&pkt->fp_trail, 1606 sizeof(*pkt->fp_trail)); 1607 if (len <= 0) { 1608 printf("fwohci_buf_input: malformed input 3: %d\n", 1609 pkt->fp_hlen - count); 1610 return 0; 1611 } 1612 return 1; 1613 } 1614 1615 static int 1616 fwohci_buf_input_ppb(struct fwohci_softc *sc, struct fwohci_ctx *fc, 1617 struct fwohci_pkt *pkt) 1618 { 1619 caddr_t p; 1620 int len; 1621 struct fwohci_buf *fb; 1622 struct fwohci_desc *fd; 1623 1624 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) { 1625 return fwohci_buf_input(sc, fc, pkt); 1626 } 1627 1628 memset(pkt, 0, sizeof(*pkt)); 1629 pkt->fp_uio.uio_iov = pkt->fp_iov; 1630 pkt->fp_uio.uio_rw = UIO_WRITE; 1631 pkt->fp_uio.uio_segflg = UIO_SYSSPACE; 1632 1633 for (fb = TAILQ_FIRST(&fc->fc_buf); ; fb = TAILQ_NEXT(fb, fb_list)) { 1634 if (fb == NULL) 1635 return 0; 1636 if (fb->fb_off == 0) 1637 break; 1638 } 1639 fd = fb->fb_desc; 1640 len = fd->fd_reqcount - fd->fd_rescount; 1641 if (len == 0) 1642 return 0; 1643 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, fb->fb_off, len, 1644 BUS_DMASYNC_POSTREAD); 1645 1646 p = fb->fb_buf; 1647 fb->fb_off += roundup(len, 4); 1648 if (len < 8) { 1649 printf("fwohci_buf_input_ppb: malformed input 1: %d\n", len); 1650 return 0; 1651 } 1652 1653 /* 1654 * get trailer first, may be bogus data unless status update 1655 * in descriptor is set. 1656 */ 1657 pkt->fp_trail = (u_int32_t *)p; 1658 *pkt->fp_trail = (*pkt->fp_trail & 0xffff) | (fd->fd_status << 16); 1659 pkt->fp_hdr[0] = ((u_int32_t *)p)[1]; 1660 pkt->fp_tcode = (pkt->fp_hdr[0] & 0x000000f0) >> 4; 1661 #ifdef DIAGNOSTIC 1662 if (pkt->fp_tcode != IEEE1394_TCODE_STREAM_DATA) { 1663 printf("fwohci_buf_input_ppb: bad tcode: 0x%x\n", 1664 pkt->fp_tcode); 1665 return 0; 1666 } 1667 #endif 1668 pkt->fp_hlen = 4; 1669 pkt->fp_dlen = pkt->fp_hdr[0] >> 16; 1670 p += 8; 1671 len -= 8; 1672 if (pkt->fp_dlen != len) { 1673 printf("fwohci_buf_input_ppb: malformed input 2: %d != %d\n", 1674 pkt->fp_dlen, len); 1675 return 0; 1676 } 1677 DPRINTFN(1, ("fwohci_buf_input_ppb: tcode=0x%x, hlen=%d, dlen=%d\n", 1678 pkt->fp_tcode, pkt->fp_hlen, pkt->fp_dlen)); 1679 pkt->fp_iov[0].iov_base = p; 1680 pkt->fp_iov[0].iov_len = len; 1681 pkt->fp_uio.uio_iovcnt = 0; 1682 pkt->fp_uio.uio_resid = len; 1683 return 1; 1684 } 1685 1686 static int 1687 fwohci_handler_set(struct fwohci_softc *sc, 1688 int tcode, u_int32_t key1, u_int32_t key2, u_int32_t key3, 1689 int (*handler)(struct fwohci_softc *, void *, struct fwohci_pkt *), 1690 void *arg) 1691 { 1692 struct fwohci_ctx *fc; 1693 struct fwohci_handler *fh; 1694 u_int64_t addr, naddr; 1695 u_int32_t off; 1696 int i, j; 1697 1698 if (tcode == IEEE1394_TCODE_STREAM_DATA && 1699 (((key1 & OHCI_ASYNC_STREAM) && sc->sc_ctx_as != NULL) 1700 || (key1 & OHCI_ASYNC_STREAM) == 0)) { 1701 int isasync = key1 & OHCI_ASYNC_STREAM; 1702 1703 key1 = key1 & IEEE1394_ISO_CHANNEL_ANY ? 1704 IEEE1394_ISO_CHANNEL_ANY : (key1 & IEEE1394_ISOCH_MASK); 1705 if (key1 & IEEE1394_ISO_CHANNEL_ANY) { 1706 printf("%s: key changed to %x\n", 1707 sc->sc_sc1394.sc1394_dev.dv_xname, key1); 1708 } 1709 j = sc->sc_isoctx; 1710 fh = NULL; 1711 1712 for (i = 0; i < sc->sc_isoctx; i++) { 1713 if ((fc = sc->sc_ctx_as[i]) == NULL) { 1714 if (j == sc->sc_isoctx) 1715 j = i; 1716 continue; 1717 } 1718 fh = LIST_FIRST(&fc->fc_handler); 1719 if (fh->fh_tcode == tcode && 1720 fh->fh_key1 == key1 && fh->fh_key2 == key2) 1721 break; 1722 fh = NULL; 1723 } 1724 if (fh == NULL) { 1725 if (handler == NULL) 1726 return 0; 1727 if (j == sc->sc_isoctx) { 1728 DPRINTF(("fwohci_handler_set: no more free " 1729 "context\n")); 1730 return ENOMEM; 1731 } 1732 if ((fc = sc->sc_ctx_as[j]) == NULL) { 1733 fwohci_ctx_alloc(sc, &fc, OHCI_BUF_IR_CNT, j, 1734 isasync ? FWOHCI_CTX_ISO_SINGLE : 1735 FWOHCI_CTX_ISO_MULTI); 1736 sc->sc_ctx_as[j] = fc; 1737 } 1738 } 1739 #ifdef FW_DEBUG 1740 if (fh == NULL && handler != NULL) { 1741 printf("use ir context %d\n", j); 1742 } else if (fh != NULL && handler == NULL) { 1743 printf("remove ir context %d\n", i); 1744 } 1745 #endif 1746 } else { 1747 switch (tcode) { 1748 case IEEE1394_TCODE_WRITE_REQ_QUAD: 1749 case IEEE1394_TCODE_WRITE_REQ_BLOCK: 1750 case IEEE1394_TCODE_READ_REQ_QUAD: 1751 case IEEE1394_TCODE_READ_REQ_BLOCK: 1752 case IEEE1394_TCODE_LOCK_REQ: 1753 fc = sc->sc_ctx_arrq; 1754 break; 1755 case IEEE1394_TCODE_WRITE_RESP: 1756 case IEEE1394_TCODE_READ_RESP_QUAD: 1757 case IEEE1394_TCODE_READ_RESP_BLOCK: 1758 case IEEE1394_TCODE_LOCK_RESP: 1759 fc = sc->sc_ctx_arrs; 1760 break; 1761 default: 1762 return EIO; 1763 } 1764 naddr = ((u_int64_t)key1 << 32) + key2; 1765 1766 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL; 1767 fh = LIST_NEXT(fh, fh_list)) { 1768 if (fh->fh_tcode == tcode) { 1769 if (fh->fh_key1 == key1 && 1770 fh->fh_key2 == key2 && fh->fh_key3 == key3) 1771 break; 1772 /* Make sure it's not within a current range. */ 1773 addr = ((u_int64_t)fh->fh_key1 << 32) + 1774 fh->fh_key2; 1775 off = fh->fh_key3; 1776 if (key3 && 1777 (((naddr >= addr) && 1778 (naddr < (addr + off))) || 1779 (((naddr + key3) > addr) && 1780 ((naddr + key3) <= (addr + off))) || 1781 ((addr > naddr) && 1782 (addr < (naddr + key3))))) 1783 if (handler) 1784 return EEXIST; 1785 } 1786 } 1787 } 1788 if (handler == NULL) { 1789 if (fh != NULL) { 1790 LIST_REMOVE(fh, fh_list); 1791 free(fh, M_DEVBUF); 1792 } 1793 if (tcode == IEEE1394_TCODE_STREAM_DATA) { 1794 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx, 1795 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN); 1796 sc->sc_ctx_as[fc->fc_ctx] = NULL; 1797 fwohci_ctx_free(sc, fc); 1798 } 1799 return 0; 1800 } 1801 if (fh == NULL) { 1802 fh = malloc(sizeof(*fh), M_DEVBUF, M_WAITOK); 1803 LIST_INSERT_HEAD(&fc->fc_handler, fh, fh_list); 1804 } 1805 fh->fh_tcode = tcode; 1806 fh->fh_key1 = key1; 1807 fh->fh_key2 = key2; 1808 fh->fh_key3 = key3; 1809 fh->fh_handler = handler; 1810 fh->fh_handarg = arg; 1811 DPRINTFN(1, ("fwohci_handler_set: ctx %d, tcode %x, key 0x%x, 0x%x, " 1812 "0x%x\n", fc->fc_ctx, tcode, key1, key2, key3)); 1813 1814 if (tcode == IEEE1394_TCODE_STREAM_DATA) { 1815 fwohci_ctx_init(sc, fc); 1816 DPRINTFN(1, ("fwohci_handler_set: SYNC desc %ld\n", 1817 (long)(TAILQ_FIRST(&fc->fc_buf)->fb_desc - sc->sc_desc))); 1818 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx, 1819 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN); 1820 } 1821 return 0; 1822 } 1823 1824 /* 1825 * static ieee1394_ir_tag_t 1826 * fwohci_ir_ctx_set(struct device *dev, int channel, int tagbm, 1827 * int bufnum, int maxsize, int flags) 1828 * 1829 * This function will return non-negative value if it succeeds. 1830 * This return value is pointer to the context of isochronous 1831 * transmission. This function will return NULL value if it 1832 * fails. 1833 */ 1834 ieee1394_ir_tag_t 1835 fwohci_ir_ctx_set(struct device *dev, int channel, int tagbm, 1836 int bufnum, int maxsize, int flags) 1837 { 1838 int i, openctx; 1839 struct fwohci_ir_ctx *irc; 1840 struct fwohci_softc *sc = (struct fwohci_softc *)dev; 1841 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname; 1842 1843 printf("%s: ir_ctx_set channel %d tagbm 0x%x maxsize %d bufnum %d\n", 1844 xname, channel, tagbm, maxsize, bufnum); 1845 /* 1846 * This loop will find the smallest vacant context and check 1847 * whether other channel uses the same channel. 1848 */ 1849 openctx = sc->sc_isoctx; 1850 for (i = 0; i < sc->sc_isoctx; ++i) { 1851 if (sc->sc_ctx_ir[i] == NULL) { 1852 /* 1853 * Find a vacant contet. If this has the 1854 * smallest context number, register it. 1855 */ 1856 if (openctx == sc->sc_isoctx) { 1857 openctx = i; 1858 } 1859 } else { 1860 /* 1861 * This context is used. Check whether this 1862 * context uses the same channel as ours. 1863 */ 1864 if (sc->sc_ctx_ir[i]->irc_channel == channel) { 1865 /* Using same channel. */ 1866 printf("%s: channel %d occupied by ctx%d\n", 1867 xname, channel, i); 1868 return NULL; 1869 } 1870 } 1871 } 1872 1873 /* 1874 * If there is a vacant context, allocate isochronous transmit 1875 * context for it. 1876 */ 1877 if (openctx != sc->sc_isoctx) { 1878 printf("%s using ctx %d for iso receive\n", xname, openctx); 1879 if ((irc = fwohci_ir_ctx_construct(sc, openctx, channel, 1880 tagbm, bufnum, maxsize, flags)) == NULL) { 1881 return NULL; 1882 } 1883 #ifndef IR_CTX_OPENTEST 1884 sc->sc_ctx_ir[openctx] = irc; 1885 #else 1886 fwohci_ir_ctx_destruct(irc); 1887 irc = NULL; 1888 #endif 1889 } else { 1890 printf("%s: cannot find any vacant contexts\n", xname); 1891 irc = NULL; 1892 } 1893 1894 return (ieee1394_ir_tag_t)irc; 1895 } 1896 1897 1898 /* 1899 * int fwohci_ir_ctx_clear(struct device *dev, ieee1394_ir_tag_t *ir) 1900 * 1901 * This function will return 0 if it succeed. Otherwise return 1902 * negative value. 1903 */ 1904 int 1905 fwohci_ir_ctx_clear(struct device *dev, ieee1394_ir_tag_t ir) 1906 { 1907 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)ir; 1908 struct fwohci_softc *sc = irc->irc_sc; 1909 int i; 1910 1911 if (sc->sc_ctx_ir[irc->irc_num] != irc) { 1912 printf("fwohci_ir_ctx_clear: irc differs %p %p\n", 1913 sc->sc_ctx_ir[irc->irc_num], irc); 1914 return -1; 1915 } 1916 1917 i = 0; 1918 while (irc->irc_status & IRC_STATUS_RUN) { 1919 tsleep((void *)irc, PWAIT|PCATCH, "IEEE1394 iso receive", 100); 1920 if (irc->irc_status & IRC_STATUS_RUN) { 1921 if (fwohci_ir_stop(irc) == 0) { 1922 irc->irc_status &= ~IRC_STATUS_RUN; 1923 } 1924 1925 } 1926 if (++i > 20) { 1927 u_int32_t reg 1928 = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, 1929 OHCI_SUBREG_ContextControlSet); 1930 1931 printf("fwochi_ir_ctx_clear: " 1932 "Cannot stop iso receive engine\n"); 1933 printf("%s: intr IR_CommandPtr 0x%08x " 1934 "ContextCtrl 0x%08x%s%s%s%s\n", 1935 sc->sc_sc1394.sc1394_dev.dv_xname, 1936 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, 1937 OHCI_SUBREG_CommandPtr), 1938 reg, 1939 reg & OHCI_CTXCTL_RUN ? " run" : "", 1940 reg & OHCI_CTXCTL_WAKE ? " wake" : "", 1941 reg & OHCI_CTXCTL_DEAD ? " dead" : "", 1942 reg & OHCI_CTXCTL_ACTIVE ? " active" : ""); 1943 1944 return EBUSY; 1945 } 1946 } 1947 1948 printf("fwohci_ir_ctx_clear: DMA engine is stopped. get %d frames max queuelen %d pos %d\n", 1949 irc->irc_pktcount, irc->irc_maxqueuelen, irc->irc_maxqueuepos); 1950 1951 fwohci_ir_ctx_destruct(irc); 1952 1953 sc->sc_ctx_ir[irc->irc_num] = NULL; 1954 1955 return 0; 1956 } 1957 1958 1959 1960 1961 1962 1963 1964 1965 ieee1394_it_tag_t 1966 fwohci_it_set(struct ieee1394_softc *isc, int channel, int tagbm) 1967 { 1968 ieee1394_it_tag_t rv; 1969 int tag; 1970 1971 for (tag = 0; tagbm != 0 && (tagbm & 0x01) == 0; tagbm >>= 1, ++tag); 1972 1973 rv = fwohci_it_ctx_set((struct fwohci_softc *)isc, channel, tag, 488); 1974 1975 return rv; 1976 } 1977 1978 /* 1979 * static ieee1394_it_tag_t 1980 * fwohci_it_ctx_set(struct fwohci_softc *sc, 1981 * u_int32_t key1 (channel), u_int32_t key2 (tag), int maxsize) 1982 * 1983 * This function will return non-negative value if it succeeds. 1984 * This return value is pointer to the context of isochronous 1985 * transmission. This function will return NULL value if it 1986 * fails. 1987 */ 1988 static ieee1394_it_tag_t 1989 fwohci_it_ctx_set(struct fwohci_softc *sc, int channel, int tag, int maxsize) 1990 { 1991 int i, openctx; 1992 struct fwohci_it_ctx *itc; 1993 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname; 1994 #ifdef TEST_CHAIN 1995 extern int fwohci_test_chain(struct fwohci_it_ctx *); 1996 #endif /* TEST_CHAIN */ 1997 #ifdef TEST_WRITE 1998 extern void fwohci_test_write(struct fwohci_it_ctx *itc); 1999 #endif /* TEST_WRITE */ 2000 2001 printf("%s: it_ctx_set channel %d tag %d maxsize %d\n", 2002 xname, channel, tag, maxsize); 2003 2004 /* 2005 * This loop will find the smallest vacant context and check 2006 * whether other channel uses the same channel. 2007 */ 2008 openctx = sc->sc_itctx; 2009 for (i = 0; i < sc->sc_itctx; ++i) { 2010 if (sc->sc_ctx_it[i] == NULL) { 2011 /* 2012 * Find a vacant contet. If this has the 2013 * smallest context number, register it. 2014 */ 2015 if (openctx == sc->sc_itctx) { 2016 openctx = i; 2017 } 2018 } else { 2019 /* 2020 * This context is used. Check whether this 2021 * context uses the same channel as ours. 2022 */ 2023 if (sc->sc_ctx_it[i]->itc_channel == channel) { 2024 /* Using same channel. */ 2025 printf("%s: channel %d occupied by ctx%d\n", 2026 xname, channel, i); 2027 return NULL; 2028 } 2029 } 2030 } 2031 2032 /* 2033 * If there is a vacant context, allocate isochronous transmit 2034 * context for it. 2035 */ 2036 if (openctx != sc->sc_itctx) { 2037 printf("%s using ctx %d for iso trasmit\n", xname, openctx); 2038 if ((itc = fwohci_it_ctx_construct(sc, openctx, channel, 2039 tag, maxsize)) == NULL) { 2040 return NULL; 2041 } 2042 sc->sc_ctx_it[openctx] = itc; 2043 2044 #ifdef TEST_CHAIN 2045 fwohci_test_chain(itc); 2046 #endif /* TEST_CHAIN */ 2047 #ifdef TEST_WRITE 2048 fwohci_test_write(itc); 2049 itc = NULL; 2050 #endif /* TEST_WRITE */ 2051 2052 } else { 2053 printf("%s: cannot find any vacant contexts\n", xname); 2054 itc = NULL; 2055 } 2056 2057 return (ieee1394_it_tag_t)itc; 2058 } 2059 2060 2061 /* 2062 * int fwohci_it_ctx_clear(ieee1394_it_tag_t *it) 2063 * 2064 * This function will return 0 if it succeed. Otherwise return 2065 * negative value. 2066 */ 2067 int 2068 fwohci_it_ctx_clear(ieee1394_it_tag_t *it) 2069 { 2070 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it; 2071 struct fwohci_softc *sc = itc->itc_sc; 2072 int i; 2073 2074 if (sc->sc_ctx_it[itc->itc_num] != itc) { 2075 printf("fwohci_it_ctx_clear: itc differs %p %p\n", 2076 sc->sc_ctx_it[itc->itc_num], itc); 2077 return -1; 2078 } 2079 2080 fwohci_it_ctx_flush(it); 2081 2082 i = 0; 2083 while (itc->itc_flags & ITC_FLAGS_RUN) { 2084 tsleep((void *)itc, PWAIT|PCATCH, "IEEE1394 iso transmit", 100); 2085 if (itc->itc_flags & ITC_FLAGS_RUN) { 2086 u_int32_t reg; 2087 2088 reg = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, 2089 OHCI_SUBREG_ContextControlSet); 2090 2091 if ((reg & OHCI_CTXCTL_WAKE) == 0) { 2092 itc->itc_flags &= ~ITC_FLAGS_RUN; 2093 printf("fwochi_it_ctx_clear: " 2094 "DMA engine stopped without intr\n"); 2095 } 2096 printf("%s: %d intr IT_CommandPtr 0x%08x " 2097 "ContextCtrl 0x%08x%s%s%s%s\n", 2098 sc->sc_sc1394.sc1394_dev.dv_xname, i, 2099 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, 2100 OHCI_SUBREG_CommandPtr), 2101 reg, 2102 reg & OHCI_CTXCTL_RUN ? " run" : "", 2103 reg & OHCI_CTXCTL_WAKE ? " wake" : "", 2104 reg & OHCI_CTXCTL_DEAD ? " dead" : "", 2105 reg & OHCI_CTXCTL_ACTIVE ? " active" : ""); 2106 2107 2108 } 2109 if (++i > 20) { 2110 u_int32_t reg 2111 = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, 2112 OHCI_SUBREG_ContextControlSet); 2113 2114 printf("fwochi_it_ctx_clear: " 2115 "Cannot stop iso transmit engine\n"); 2116 printf("%s: intr IT_CommandPtr 0x%08x " 2117 "ContextCtrl 0x%08x%s%s%s%s\n", 2118 sc->sc_sc1394.sc1394_dev.dv_xname, 2119 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, 2120 OHCI_SUBREG_CommandPtr), 2121 reg, 2122 reg & OHCI_CTXCTL_RUN ? " run" : "", 2123 reg & OHCI_CTXCTL_WAKE ? " wake" : "", 2124 reg & OHCI_CTXCTL_DEAD ? " dead" : "", 2125 reg & OHCI_CTXCTL_ACTIVE ? " active" : ""); 2126 2127 return EBUSY; 2128 } 2129 } 2130 2131 printf("fwohci_it_ctx_clear: DMA engine is stopped.\n"); 2132 2133 fwohci_it_ctx_destruct(itc); 2134 2135 sc->sc_ctx_it[itc->itc_num] = NULL; 2136 2137 2138 return 0; 2139 } 2140 2141 2142 2143 2144 2145 2146 /* 2147 * Asynchronous Receive Requests input frontend. 2148 */ 2149 static void 2150 fwohci_arrq_input(struct fwohci_softc *sc, struct fwohci_ctx *fc) 2151 { 2152 int rcode; 2153 u_int16_t len; 2154 u_int32_t key1, key2, off; 2155 u_int64_t addr, naddr; 2156 struct fwohci_handler *fh; 2157 struct fwohci_pkt pkt, res; 2158 2159 /* 2160 * Do not return if next packet is in the buffer, or the next 2161 * packet cannot be received until the next receive interrupt. 2162 */ 2163 while (fwohci_buf_input(sc, fc, &pkt)) { 2164 if (pkt.fp_tcode == OHCI_TCODE_PHY) { 2165 fwohci_phy_input(sc, &pkt); 2166 continue; 2167 } 2168 key1 = pkt.fp_hdr[1] & 0xffff; 2169 key2 = pkt.fp_hdr[2]; 2170 if ((pkt.fp_tcode == IEEE1394_TCODE_WRITE_REQ_BLOCK) || 2171 (pkt.fp_tcode == IEEE1394_TCODE_READ_REQ_BLOCK)) { 2172 len = (pkt.fp_hdr[3] & 0xffff0000) >> 16; 2173 naddr = ((u_int64_t)key1 << 32) + key2; 2174 } else { 2175 len = 0; 2176 naddr = 0; /* XXX: gcc */ 2177 } 2178 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL; 2179 fh = LIST_NEXT(fh, fh_list)) { 2180 if (pkt.fp_tcode == fh->fh_tcode) { 2181 /* Assume length check happens in handler */ 2182 if (key1 == fh->fh_key1 && 2183 key2 == fh->fh_key2) { 2184 rcode = (*fh->fh_handler)(sc, 2185 fh->fh_handarg, &pkt); 2186 break; 2187 } 2188 addr = ((u_int64_t)fh->fh_key1 << 32) + 2189 fh->fh_key2; 2190 off = fh->fh_key3; 2191 /* Check for a range qualifier */ 2192 if (len && 2193 ((naddr >= addr) && (naddr < (addr + off)) 2194 && (naddr + len <= (addr + off)))) { 2195 rcode = (*fh->fh_handler)(sc, 2196 fh->fh_handarg, &pkt); 2197 break; 2198 } 2199 } 2200 } 2201 if (fh == NULL) { 2202 rcode = IEEE1394_RCODE_ADDRESS_ERROR; 2203 DPRINTFN(1, ("fwohci_arrq_input: no listener: tcode " 2204 "0x%x, addr=0x%04x %08x\n", pkt.fp_tcode, key1, 2205 key2)); 2206 DPRINTFN(2, ("fwohci_arrq_input: no listener: hdr[0]: " 2207 "0x%08x, hdr[1]: 0x%08x, hdr[2]: 0x%08x, hdr[3]: " 2208 "0x%08x\n", pkt.fp_hdr[0], pkt.fp_hdr[1], 2209 pkt.fp_hdr[2], pkt.fp_hdr[3])); 2210 } 2211 if (((*pkt.fp_trail & 0x001f0000) >> 16) != 2212 OHCI_CTXCTL_EVENT_ACK_PENDING) 2213 continue; 2214 if (rcode != -1) { 2215 memset(&res, 0, sizeof(res)); 2216 res.fp_uio.uio_rw = UIO_WRITE; 2217 res.fp_uio.uio_segflg = UIO_SYSSPACE; 2218 fwohci_atrs_output(sc, rcode, &pkt, &res); 2219 } 2220 } 2221 fwohci_buf_next(sc, fc); 2222 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx, 2223 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE); 2224 } 2225 2226 2227 /* 2228 * Asynchronous Receive Response input frontend. 2229 */ 2230 static void 2231 fwohci_arrs_input(struct fwohci_softc *sc, struct fwohci_ctx *fc) 2232 { 2233 struct fwohci_pkt pkt; 2234 struct fwohci_handler *fh; 2235 u_int16_t srcid; 2236 int rcode, tlabel; 2237 2238 while (fwohci_buf_input(sc, fc, &pkt)) { 2239 srcid = pkt.fp_hdr[1] >> 16; 2240 rcode = (pkt.fp_hdr[1] & 0x0000f000) >> 12; 2241 tlabel = (pkt.fp_hdr[0] & 0x0000fc00) >> 10; 2242 DPRINTFN(1, ("fwohci_arrs_input: tcode 0x%x, from 0x%04x," 2243 " tlabel 0x%x, rcode 0x%x, hlen %d, dlen %d\n", 2244 pkt.fp_tcode, srcid, tlabel, rcode, pkt.fp_hlen, 2245 pkt.fp_dlen)); 2246 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL; 2247 fh = LIST_NEXT(fh, fh_list)) { 2248 if (pkt.fp_tcode == fh->fh_tcode && 2249 (srcid & OHCI_NodeId_NodeNumber) == fh->fh_key1 && 2250 tlabel == fh->fh_key2) { 2251 (*fh->fh_handler)(sc, fh->fh_handarg, &pkt); 2252 LIST_REMOVE(fh, fh_list); 2253 free(fh, M_DEVBUF); 2254 break; 2255 } 2256 } 2257 if (fh == NULL) 2258 DPRINTFN(1, ("fwohci_arrs_input: no listner\n")); 2259 } 2260 fwohci_buf_next(sc, fc); 2261 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx, 2262 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE); 2263 } 2264 2265 /* 2266 * Isochronous Receive input frontend. 2267 */ 2268 static void 2269 fwohci_as_input(struct fwohci_softc *sc, struct fwohci_ctx *fc) 2270 { 2271 int rcode, chan, tag; 2272 struct iovec *iov; 2273 struct fwohci_handler *fh; 2274 struct fwohci_pkt pkt; 2275 2276 #if DOUBLEBUF 2277 if (fc->fc_type == FWOHCI_CTX_ISO_MULTI) { 2278 struct fwohci_buf *fb; 2279 int i; 2280 u_int32_t reg; 2281 2282 /* stop DMA engine before read buffer */ 2283 reg = OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx, 2284 OHCI_SUBREG_ContextControlClear); 2285 DPRINTFN(5, ("ir_input %08x =>", reg)); 2286 if (reg & OHCI_CTXCTL_RUN) { 2287 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx, 2288 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN); 2289 } 2290 DPRINTFN(5, (" %08x\n", OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx, OHCI_SUBREG_ContextControlClear))); 2291 2292 i = 0; 2293 while ((reg = OHCI_SYNC_RX_DMA_READ(sc, fc->fc_ctx, OHCI_SUBREG_ContextControlSet)) & OHCI_CTXCTL_ACTIVE) { 2294 delay(10); 2295 if (++i > 10000) { 2296 printf("cannot stop DMA engine 0x%08x\n", reg); 2297 return; 2298 } 2299 } 2300 2301 /* rotate DMA buffer */ 2302 fb = TAILQ_FIRST(&fc->fc_buf2); 2303 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx, OHCI_SUBREG_CommandPtr, 2304 fb->fb_daddr | 1); 2305 /* start DMA engine */ 2306 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx, 2307 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN); 2308 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear, 2309 (1 << fc->fc_ctx)); 2310 } 2311 #endif 2312 2313 while (fwohci_buf_input_ppb(sc, fc, &pkt)) { 2314 chan = (pkt.fp_hdr[0] & 0x00003f00) >> 8; 2315 tag = (pkt.fp_hdr[0] & 0x0000c000) >> 14; 2316 DPRINTFN(1, ("fwohci_as_input: hdr 0x%08x, tcode 0x%0x, hlen %d" 2317 ", dlen %d\n", pkt.fp_hdr[0], pkt.fp_tcode, pkt.fp_hlen, 2318 pkt.fp_dlen)); 2319 if (tag == IEEE1394_TAG_GASP && 2320 fc->fc_type == FWOHCI_CTX_ISO_SINGLE) { 2321 /* 2322 * The pkt with tag=3 is GASP format. 2323 * Move GASP header to header part. 2324 */ 2325 if (pkt.fp_dlen < 8) 2326 continue; 2327 iov = pkt.fp_iov; 2328 /* assuming pkt per buffer mode */ 2329 pkt.fp_hdr[1] = ntohl(((u_int32_t *)iov->iov_base)[0]); 2330 pkt.fp_hdr[2] = ntohl(((u_int32_t *)iov->iov_base)[1]); 2331 iov->iov_base = (caddr_t)iov->iov_base + 8; 2332 iov->iov_len -= 8; 2333 pkt.fp_hlen += 8; 2334 pkt.fp_dlen -= 8; 2335 } 2336 for (fh = LIST_FIRST(&fc->fc_handler); fh != NULL; 2337 fh = LIST_NEXT(fh, fh_list)) { 2338 if (pkt.fp_tcode == fh->fh_tcode && 2339 (chan == fh->fh_key1 || 2340 fh->fh_key1 == IEEE1394_ISO_CHANNEL_ANY) && 2341 ((1 << tag) & fh->fh_key2) != 0) { 2342 rcode = (*fh->fh_handler)(sc, fh->fh_handarg, 2343 &pkt); 2344 break; 2345 } 2346 } 2347 #ifdef FW_DEBUG 2348 if (fh == NULL) { 2349 DPRINTFN(1, ("fwohci_as_input: no handler\n")); 2350 } else { 2351 DPRINTFN(1, ("fwohci_as_input: rcode %d\n", rcode)); 2352 } 2353 #endif 2354 } 2355 fwohci_buf_next(sc, fc); 2356 2357 if (fc->fc_type == FWOHCI_CTX_ISO_SINGLE) { 2358 OHCI_SYNC_RX_DMA_WRITE(sc, fc->fc_ctx, 2359 OHCI_SUBREG_ContextControlSet, 2360 OHCI_CTXCTL_WAKE); 2361 } 2362 } 2363 2364 /* 2365 * Asynchronous Transmit common routine. 2366 */ 2367 static int 2368 fwohci_at_output(struct fwohci_softc *sc, struct fwohci_ctx *fc, 2369 struct fwohci_pkt *pkt) 2370 { 2371 struct fwohci_buf *fb; 2372 struct fwohci_desc *fd; 2373 struct mbuf *m, *m0; 2374 int i, ndesc, error, off, len; 2375 u_int32_t val; 2376 #ifdef FW_DEBUG 2377 struct iovec *iov; 2378 int tlabel = (pkt->fp_hdr[0] & 0x0000fc00) >> 10; 2379 #endif 2380 2381 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) == IEEE1394_BCAST_PHY_ID) 2382 /* We can't send anything during selfid duration */ 2383 return EAGAIN; 2384 2385 #ifdef FW_DEBUG 2386 DPRINTFN(1, ("fwohci_at_output: tcode 0x%x, tlabel 0x%x hlen %d, " 2387 "dlen %d", pkt->fp_tcode, tlabel, pkt->fp_hlen, pkt->fp_dlen)); 2388 for (i = 0; i < pkt->fp_hlen/4; i++) 2389 DPRINTFN(2, ("%s%08x", i?" ":"\n ", pkt->fp_hdr[i])); 2390 DPRINTFN(2, ("$")); 2391 for (ndesc = 0, iov = pkt->fp_iov; 2392 ndesc < pkt->fp_uio.uio_iovcnt; ndesc++, iov++) { 2393 for (i = 0; i < iov->iov_len; i++) 2394 DPRINTFN(2, ("%s%02x", (i%32)?((i%4)?"":" "):"\n ", 2395 ((u_int8_t *)iov->iov_base)[i])); 2396 DPRINTFN(2, ("$")); 2397 } 2398 DPRINTFN(1, ("\n")); 2399 #endif 2400 2401 if ((m = pkt->fp_m) != NULL) { 2402 for (ndesc = 2; m != NULL; m = m->m_next) 2403 ndesc++; 2404 if (ndesc > OHCI_DESC_MAX) { 2405 m0 = NULL; 2406 ndesc = 2; 2407 for (off = 0; off < pkt->fp_dlen; off += len) { 2408 if (m0 == NULL) { 2409 MGETHDR(m0, M_DONTWAIT, MT_DATA); 2410 if (m0 != NULL) 2411 M_COPY_PKTHDR(m0, pkt->fp_m); 2412 m = m0; 2413 } else { 2414 MGET(m->m_next, M_DONTWAIT, MT_DATA); 2415 m = m->m_next; 2416 } 2417 if (m != NULL) 2418 MCLGET(m, M_DONTWAIT); 2419 if (m == NULL || (m->m_flags & M_EXT) == 0) { 2420 m_freem(m0); 2421 return ENOMEM; 2422 } 2423 len = pkt->fp_dlen - off; 2424 if (len > m->m_ext.ext_size) 2425 len = m->m_ext.ext_size; 2426 m_copydata(pkt->fp_m, off, len, 2427 mtod(m, caddr_t)); 2428 m->m_len = len; 2429 ndesc++; 2430 } 2431 m_freem(pkt->fp_m); 2432 pkt->fp_m = m0; 2433 } 2434 } else 2435 ndesc = 2 + pkt->fp_uio.uio_iovcnt; 2436 2437 if (ndesc > OHCI_DESC_MAX) 2438 return ENOBUFS; 2439 2440 fb = malloc(sizeof(*fb), M_DEVBUF, M_WAITOK); 2441 if (ndesc > 2) { 2442 if ((error = bus_dmamap_create(sc->sc_dmat, pkt->fp_dlen, 2443 OHCI_DESC_MAX - 2, pkt->fp_dlen, 0, BUS_DMA_WAITOK, 2444 &fb->fb_dmamap)) != 0) { 2445 fwohci_desc_put(sc, fb->fb_desc, ndesc); 2446 free(fb, M_DEVBUF); 2447 return error; 2448 } 2449 2450 if (pkt->fp_m != NULL) 2451 error = bus_dmamap_load_mbuf(sc->sc_dmat, fb->fb_dmamap, 2452 pkt->fp_m, BUS_DMA_WAITOK); 2453 else 2454 error = bus_dmamap_load_uio(sc->sc_dmat, fb->fb_dmamap, 2455 &pkt->fp_uio, BUS_DMA_WAITOK); 2456 if (error != 0) { 2457 DPRINTFN(1, ("Can't load DMA map: %d\n", error)); 2458 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap); 2459 fwohci_desc_put(sc, fb->fb_desc, ndesc); 2460 free(fb, M_DEVBUF); 2461 return error; 2462 } 2463 ndesc = fb->fb_dmamap->dm_nsegs + 2; 2464 2465 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0, pkt->fp_dlen, 2466 BUS_DMASYNC_PREWRITE); 2467 } 2468 2469 fb->fb_nseg = ndesc; 2470 fb->fb_desc = fwohci_desc_get(sc, ndesc); 2471 if (fb->fb_desc == NULL) { 2472 free(fb, M_DEVBUF); 2473 return ENOBUFS; 2474 } 2475 fb->fb_daddr = sc->sc_ddmamap->dm_segs[0].ds_addr + 2476 ((caddr_t)fb->fb_desc - (caddr_t)sc->sc_desc); 2477 fb->fb_m = pkt->fp_m; 2478 fb->fb_callback = pkt->fp_callback; 2479 fb->fb_statuscb = pkt->fp_statuscb; 2480 fb->fb_statusarg = pkt->fp_statusarg; 2481 2482 fd = fb->fb_desc; 2483 fd->fd_flags = OHCI_DESC_IMMED; 2484 fd->fd_reqcount = pkt->fp_hlen; 2485 fd->fd_data = 0; 2486 fd->fd_branch = 0; 2487 fd->fd_status = 0; 2488 if (fc->fc_ctx == OHCI_CTX_ASYNC_TX_RESPONSE) { 2489 i = 3; /* XXX: 3 sec */ 2490 val = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer); 2491 fd->fd_timestamp = ((val >> 12) & 0x1fff) | 2492 ((((val >> 25) + i) & 0x7) << 13); 2493 } else 2494 fd->fd_timestamp = 0; 2495 memcpy(fd + 1, pkt->fp_hdr, pkt->fp_hlen); 2496 for (i = 0; i < ndesc - 2; i++) { 2497 fd = fb->fb_desc + 2 + i; 2498 fd->fd_flags = 0; 2499 fd->fd_reqcount = fb->fb_dmamap->dm_segs[i].ds_len; 2500 fd->fd_data = fb->fb_dmamap->dm_segs[i].ds_addr; 2501 fd->fd_branch = 0; 2502 fd->fd_status = 0; 2503 fd->fd_timestamp = 0; 2504 } 2505 fd->fd_flags |= OHCI_DESC_LAST | OHCI_DESC_BRANCH; 2506 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS; 2507 2508 #ifdef FW_DEBUG 2509 DPRINTFN(1, ("fwohci_at_output: desc %ld", 2510 (long)(fb->fb_desc - sc->sc_desc))); 2511 for (i = 0; i < ndesc * 4; i++) 2512 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ", 2513 ((u_int32_t *)fb->fb_desc)[i])); 2514 DPRINTFN(1, ("\n")); 2515 #endif 2516 2517 val = OHCI_ASYNC_DMA_READ(sc, fc->fc_ctx, 2518 OHCI_SUBREG_ContextControlClear); 2519 2520 if (val & OHCI_CTXCTL_RUN) { 2521 if (fc->fc_branch == NULL) { 2522 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx, 2523 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN); 2524 goto run; 2525 } 2526 *fc->fc_branch = fb->fb_daddr | ndesc; 2527 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx, 2528 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE); 2529 } else { 2530 run: 2531 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx, 2532 OHCI_SUBREG_CommandPtr, fb->fb_daddr | ndesc); 2533 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx, 2534 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN); 2535 } 2536 fc->fc_branch = &fd->fd_branch; 2537 2538 fc->fc_bufcnt++; 2539 TAILQ_INSERT_TAIL(&fc->fc_buf, fb, fb_list); 2540 pkt->fp_m = NULL; 2541 return 0; 2542 } 2543 2544 static void 2545 fwohci_at_done(struct fwohci_softc *sc, struct fwohci_ctx *fc, int force) 2546 { 2547 struct fwohci_buf *fb; 2548 struct fwohci_desc *fd; 2549 struct fwohci_pkt pkt; 2550 int i; 2551 2552 while ((fb = TAILQ_FIRST(&fc->fc_buf)) != NULL) { 2553 fd = fb->fb_desc; 2554 #ifdef FW_DEBUG 2555 DPRINTFN(1, ("fwohci_at_done: %sdesc %ld (%d)", 2556 force ? "force " : "", (long)(fd - sc->sc_desc), 2557 fb->fb_nseg)); 2558 for (i = 0; i < fb->fb_nseg * 4; i++) 2559 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ", 2560 ((u_int32_t *)fd)[i])); 2561 DPRINTFN(1, ("\n")); 2562 #endif 2563 if (fb->fb_nseg > 2) 2564 fd += fb->fb_nseg - 1; 2565 if (!force && !(fd->fd_status & OHCI_CTXCTL_ACTIVE)) 2566 break; 2567 TAILQ_REMOVE(&fc->fc_buf, fb, fb_list); 2568 if (fc->fc_branch == &fd->fd_branch) { 2569 OHCI_ASYNC_DMA_WRITE(sc, fc->fc_ctx, 2570 OHCI_SUBREG_ContextControlClear, OHCI_CTXCTL_RUN); 2571 fc->fc_branch = NULL; 2572 for (i = 0; i < OHCI_LOOP; i++) { 2573 if (!(OHCI_ASYNC_DMA_READ(sc, fc->fc_ctx, 2574 OHCI_SUBREG_ContextControlClear) & 2575 OHCI_CTXCTL_ACTIVE)) 2576 break; 2577 DELAY(10); 2578 } 2579 } 2580 2581 if (fb->fb_statuscb) { 2582 memset(&pkt, 0, sizeof(pkt)); 2583 pkt.fp_status = fd->fd_status; 2584 memcpy(pkt.fp_hdr, fd + 1, sizeof(pkt.fp_hdr[0])); 2585 2586 /* Indicate this is just returning the status bits. */ 2587 pkt.fp_tcode = -1; 2588 (*fb->fb_statuscb)(sc, fb->fb_statusarg, &pkt); 2589 fb->fb_statuscb = NULL; 2590 fb->fb_statusarg = NULL; 2591 } 2592 fwohci_desc_put(sc, fb->fb_desc, fb->fb_nseg); 2593 if (fb->fb_nseg > 2) 2594 bus_dmamap_destroy(sc->sc_dmat, fb->fb_dmamap); 2595 fc->fc_bufcnt--; 2596 if (fb->fb_callback) { 2597 (*fb->fb_callback)(sc->sc_sc1394.sc1394_if, fb->fb_m); 2598 fb->fb_callback = NULL; 2599 } else if (fb->fb_m != NULL) 2600 m_freem(fb->fb_m); 2601 free(fb, M_DEVBUF); 2602 } 2603 } 2604 2605 /* 2606 * Asynchronous Transmit Response -- in response of request packet. 2607 */ 2608 static void 2609 fwohci_atrs_output(struct fwohci_softc *sc, int rcode, struct fwohci_pkt *req, 2610 struct fwohci_pkt *res) 2611 { 2612 2613 if (((*req->fp_trail & 0x001f0000) >> 16) != 2614 OHCI_CTXCTL_EVENT_ACK_PENDING) 2615 return; 2616 2617 res->fp_hdr[0] = (req->fp_hdr[0] & 0x0000fc00) | 0x00000100; 2618 res->fp_hdr[1] = (req->fp_hdr[1] & 0xffff0000) | (rcode << 12); 2619 switch (req->fp_tcode) { 2620 case IEEE1394_TCODE_WRITE_REQ_QUAD: 2621 case IEEE1394_TCODE_WRITE_REQ_BLOCK: 2622 res->fp_tcode = IEEE1394_TCODE_WRITE_RESP; 2623 res->fp_hlen = 12; 2624 break; 2625 case IEEE1394_TCODE_READ_REQ_QUAD: 2626 res->fp_tcode = IEEE1394_TCODE_READ_RESP_QUAD; 2627 res->fp_hlen = 16; 2628 res->fp_dlen = 0; 2629 if (res->fp_uio.uio_iovcnt == 1 && res->fp_iov[0].iov_len == 4) 2630 res->fp_hdr[3] = 2631 *(u_int32_t *)res->fp_iov[0].iov_base; 2632 res->fp_uio.uio_iovcnt = 0; 2633 break; 2634 case IEEE1394_TCODE_READ_REQ_BLOCK: 2635 case IEEE1394_TCODE_LOCK_REQ: 2636 if (req->fp_tcode == IEEE1394_TCODE_LOCK_REQ) 2637 res->fp_tcode = IEEE1394_TCODE_LOCK_RESP; 2638 else 2639 res->fp_tcode = IEEE1394_TCODE_READ_RESP_BLOCK; 2640 res->fp_hlen = 16; 2641 res->fp_dlen = res->fp_uio.uio_resid; 2642 res->fp_hdr[3] = res->fp_dlen << 16; 2643 break; 2644 } 2645 res->fp_hdr[0] |= (res->fp_tcode << 4); 2646 fwohci_at_output(sc, sc->sc_ctx_atrs, res); 2647 } 2648 2649 /* 2650 * APPLICATION LAYER SERVICES 2651 */ 2652 2653 /* 2654 * Retrieve Global UID from GUID ROM 2655 */ 2656 static int 2657 fwohci_guidrom_init(struct fwohci_softc *sc) 2658 { 2659 int i, n, off; 2660 u_int32_t val1, val2; 2661 2662 /* Extract the Global UID 2663 */ 2664 val1 = OHCI_CSR_READ(sc, OHCI_REG_GUIDHi); 2665 val2 = OHCI_CSR_READ(sc, OHCI_REG_GUIDLo); 2666 2667 if (val1 != 0 || val2 != 0) { 2668 sc->sc_sc1394.sc1394_guid[0] = (val1 >> 24) & 0xff; 2669 sc->sc_sc1394.sc1394_guid[1] = (val1 >> 16) & 0xff; 2670 sc->sc_sc1394.sc1394_guid[2] = (val1 >> 8) & 0xff; 2671 sc->sc_sc1394.sc1394_guid[3] = (val1 >> 0) & 0xff; 2672 sc->sc_sc1394.sc1394_guid[4] = (val2 >> 24) & 0xff; 2673 sc->sc_sc1394.sc1394_guid[5] = (val2 >> 16) & 0xff; 2674 sc->sc_sc1394.sc1394_guid[6] = (val2 >> 8) & 0xff; 2675 sc->sc_sc1394.sc1394_guid[7] = (val2 >> 0) & 0xff; 2676 } else { 2677 val1 = OHCI_CSR_READ(sc, OHCI_REG_Version); 2678 if ((val1 & OHCI_Version_GUID_ROM) == 0) 2679 return -1; 2680 OHCI_CSR_WRITE(sc, OHCI_REG_Guid_Rom, OHCI_Guid_AddrReset); 2681 for (i = 0; i < OHCI_LOOP; i++) { 2682 val1 = OHCI_CSR_READ(sc, OHCI_REG_Guid_Rom); 2683 if (!(val1 & OHCI_Guid_AddrReset)) 2684 break; 2685 DELAY(10); 2686 } 2687 off = OHCI_BITVAL(val1, OHCI_Guid_MiniROM) + 4; 2688 val2 = 0; 2689 for (n = 0; n < off + sizeof(sc->sc_sc1394.sc1394_guid); n++) { 2690 OHCI_CSR_WRITE(sc, OHCI_REG_Guid_Rom, 2691 OHCI_Guid_RdStart); 2692 for (i = 0; i < OHCI_LOOP; i++) { 2693 val1 = OHCI_CSR_READ(sc, OHCI_REG_Guid_Rom); 2694 if (!(val1 & OHCI_Guid_RdStart)) 2695 break; 2696 DELAY(10); 2697 } 2698 if (n < off) 2699 continue; 2700 val1 = OHCI_BITVAL(val1, OHCI_Guid_RdData); 2701 sc->sc_sc1394.sc1394_guid[n - off] = val1; 2702 val2 |= val1; 2703 } 2704 if (val2 == 0) 2705 return -1; 2706 } 2707 return 0; 2708 } 2709 2710 /* 2711 * Initialization for Configuration ROM (no DMA context) 2712 */ 2713 2714 #define CFR_MAXUNIT 20 2715 2716 struct configromctx { 2717 u_int32_t *ptr; 2718 int curunit; 2719 struct { 2720 u_int32_t *start; 2721 int length; 2722 u_int32_t *refer; 2723 int refunit; 2724 } unit[CFR_MAXUNIT]; 2725 }; 2726 2727 #define CFR_PUT_DATA4(cfr, d1, d2, d3, d4) \ 2728 (*(cfr)->ptr++ = (((d1)<<24) | ((d2)<<16) | ((d3)<<8) | (d4))) 2729 2730 #define CFR_PUT_DATA1(cfr, d) (*(cfr)->ptr++ = (d)) 2731 2732 #define CFR_PUT_VALUE(cfr, key, d) (*(cfr)->ptr++ = ((key)<<24) | (d)) 2733 2734 #define CFR_PUT_CRC(cfr, n) \ 2735 (*(cfr)->unit[n].start = ((cfr)->unit[n].length << 16) | \ 2736 fwohci_crc16((cfr)->unit[n].start + 1, (cfr)->unit[n].length)) 2737 2738 #define CFR_START_UNIT(cfr, n) \ 2739 do { \ 2740 if ((cfr)->unit[n].refer != NULL) { \ 2741 *(cfr)->unit[n].refer |= \ 2742 (cfr)->ptr - (cfr)->unit[n].refer; \ 2743 CFR_PUT_CRC(cfr, (cfr)->unit[n].refunit); \ 2744 } \ 2745 (cfr)->curunit = (n); \ 2746 (cfr)->unit[n].start = (cfr)->ptr++; \ 2747 } while (0 /* CONSTCOND */) 2748 2749 #define CFR_PUT_REFER(cfr, key, n) \ 2750 do { \ 2751 (cfr)->unit[n].refer = (cfr)->ptr; \ 2752 (cfr)->unit[n].refunit = (cfr)->curunit; \ 2753 *(cfr)->ptr++ = (key) << 24; \ 2754 } while (0 /* CONSTCOND */) 2755 2756 #define CFR_END_UNIT(cfr) \ 2757 do { \ 2758 (cfr)->unit[(cfr)->curunit].length = (cfr)->ptr - \ 2759 ((cfr)->unit[(cfr)->curunit].start + 1); \ 2760 CFR_PUT_CRC(cfr, (cfr)->curunit); \ 2761 } while (0 /* CONSTCOND */) 2762 2763 static u_int16_t 2764 fwohci_crc16(u_int32_t *ptr, int len) 2765 { 2766 int shift; 2767 u_int32_t crc, sum, data; 2768 2769 crc = 0; 2770 while (len-- > 0) { 2771 data = *ptr++; 2772 for (shift = 28; shift >= 0; shift -= 4) { 2773 sum = ((crc >> 12) ^ (data >> shift)) & 0x000f; 2774 crc = (crc << 4) ^ (sum << 12) ^ (sum << 5) ^ sum; 2775 } 2776 crc &= 0xffff; 2777 } 2778 return crc; 2779 } 2780 2781 static void 2782 fwohci_configrom_init(struct fwohci_softc *sc) 2783 { 2784 int i, val; 2785 struct fwohci_buf *fb; 2786 u_int32_t *hdr; 2787 struct configromctx cfr; 2788 2789 fb = &sc->sc_buf_cnfrom; 2790 memset(&cfr, 0, sizeof(cfr)); 2791 cfr.ptr = hdr = (u_int32_t *)fb->fb_buf; 2792 2793 /* headers */ 2794 CFR_START_UNIT(&cfr, 0); 2795 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_BusId)); 2796 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_BusOptions)); 2797 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_GUIDHi)); 2798 CFR_PUT_DATA1(&cfr, OHCI_CSR_READ(sc, OHCI_REG_GUIDLo)); 2799 CFR_END_UNIT(&cfr); 2800 /* copy info_length from crc_length */ 2801 *hdr |= (*hdr & 0x00ff0000) << 8; 2802 OHCI_CSR_WRITE(sc, OHCI_REG_ConfigROMhdr, *hdr); 2803 2804 /* root directory */ 2805 CFR_START_UNIT(&cfr, 1); 2806 CFR_PUT_VALUE(&cfr, 0x03, 0x00005e); /* vendor id */ 2807 CFR_PUT_REFER(&cfr, 0x81, 2); /* textual descriptor offset */ 2808 CFR_PUT_VALUE(&cfr, 0x0c, 0x0083c0); /* node capability */ 2809 /* spt,64,fix,lst,drq */ 2810 #ifdef INET 2811 CFR_PUT_REFER(&cfr, 0xd1, 3); /* IPv4 unit directory */ 2812 #endif /* INET */ 2813 #ifdef INET6 2814 CFR_PUT_REFER(&cfr, 0xd1, 4); /* IPv6 unit directory */ 2815 #endif /* INET6 */ 2816 CFR_END_UNIT(&cfr); 2817 2818 CFR_START_UNIT(&cfr, 2); 2819 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */ 2820 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */ 2821 CFR_PUT_DATA4(&cfr, 'N', 'e', 't', 'B'); 2822 CFR_PUT_DATA4(&cfr, 'S', 'D', 0x00, 0x00); 2823 CFR_END_UNIT(&cfr); 2824 2825 #ifdef INET 2826 /* IPv4 unit directory */ 2827 CFR_START_UNIT(&cfr, 3); 2828 CFR_PUT_VALUE(&cfr, 0x12, 0x00005e); /* unit spec id */ 2829 CFR_PUT_REFER(&cfr, 0x81, 6); /* textual descriptor offset */ 2830 CFR_PUT_VALUE(&cfr, 0x13, 0x000001); /* unit sw version */ 2831 CFR_PUT_REFER(&cfr, 0x81, 7); /* textual descriptor offset */ 2832 CFR_PUT_REFER(&cfr, 0x95, 8); /* Unit location */ 2833 CFR_END_UNIT(&cfr); 2834 2835 CFR_START_UNIT(&cfr, 6); 2836 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */ 2837 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */ 2838 CFR_PUT_DATA4(&cfr, 'I', 'A', 'N', 'A'); 2839 CFR_END_UNIT(&cfr); 2840 2841 CFR_START_UNIT(&cfr, 7); 2842 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */ 2843 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */ 2844 CFR_PUT_DATA4(&cfr, 'I', 'P', 'v', '4'); 2845 CFR_END_UNIT(&cfr); 2846 2847 CFR_START_UNIT(&cfr, 8); /* Spec's valid addr range. */ 2848 CFR_PUT_DATA1(&cfr, FW_FIFO_HI); 2849 CFR_PUT_DATA1(&cfr, (FW_FIFO_LO | 0x1)); 2850 CFR_PUT_DATA1(&cfr, FW_FIFO_HI); 2851 CFR_PUT_DATA1(&cfr, FW_FIFO_LO); 2852 CFR_END_UNIT(&cfr); 2853 2854 #endif /* INET */ 2855 2856 #ifdef INET6 2857 /* IPv6 unit directory */ 2858 CFR_START_UNIT(&cfr, 4); 2859 CFR_PUT_VALUE(&cfr, 0x12, 0x00005e); /* unit spec id */ 2860 CFR_PUT_REFER(&cfr, 0x81, 9); /* textual descriptor offset */ 2861 CFR_PUT_VALUE(&cfr, 0x13, 0x000002); /* unit sw version */ 2862 /* XXX: TBA by IANA */ 2863 CFR_PUT_REFER(&cfr, 0x81, 10); /* textual descriptor offset */ 2864 CFR_PUT_REFER(&cfr, 0x95, 11); /* Unit location */ 2865 CFR_END_UNIT(&cfr); 2866 2867 CFR_START_UNIT(&cfr, 9); 2868 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */ 2869 CFR_PUT_DATA1(&cfr, 0); /* minimal ASCII */ 2870 CFR_PUT_DATA4(&cfr, 'I', 'A', 'N', 'A'); 2871 CFR_END_UNIT(&cfr); 2872 2873 CFR_START_UNIT(&cfr, 10); 2874 CFR_PUT_VALUE(&cfr, 0, 0); /* textual descriptor */ 2875 CFR_PUT_DATA1(&cfr, 0); 2876 CFR_PUT_DATA4(&cfr, 'I', 'P', 'v', '6'); 2877 CFR_END_UNIT(&cfr); 2878 2879 CFR_START_UNIT(&cfr, 11); /* Spec's valid addr range. */ 2880 CFR_PUT_DATA1(&cfr, FW_FIFO_HI); 2881 CFR_PUT_DATA1(&cfr, (FW_FIFO_LO | 0x1)); 2882 CFR_PUT_DATA1(&cfr, FW_FIFO_HI); 2883 CFR_PUT_DATA1(&cfr, FW_FIFO_LO); 2884 CFR_END_UNIT(&cfr); 2885 2886 #endif /* INET6 */ 2887 2888 fb->fb_off = cfr.ptr - hdr; 2889 #ifdef FW_DEBUG 2890 DPRINTF(("%s: Config ROM:", sc->sc_sc1394.sc1394_dev.dv_xname)); 2891 for (i = 0; i < fb->fb_off; i++) 2892 DPRINTF(("%s%08x", i&7?" ":"\n ", hdr[i])); 2893 DPRINTF(("\n")); 2894 #endif /* FW_DEBUG */ 2895 2896 /* 2897 * Make network byte order for DMA 2898 */ 2899 for (i = 0; i < fb->fb_off; i++) 2900 HTONL(hdr[i]); 2901 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0, 2902 (caddr_t)cfr.ptr - fb->fb_buf, BUS_DMASYNC_PREWRITE); 2903 2904 OHCI_CSR_WRITE(sc, OHCI_REG_ConfigROMmap, 2905 fb->fb_dmamap->dm_segs[0].ds_addr); 2906 2907 /* This register is only valid on OHCI 1.1. */ 2908 val = OHCI_CSR_READ(sc, OHCI_REG_Version); 2909 if ((OHCI_Version_GET_Version(val) == 1) && 2910 (OHCI_Version_GET_Revision(val) == 1)) 2911 OHCI_CSR_WRITE(sc, OHCI_REG_HCControlSet, 2912 OHCI_HCControl_BIBImageValid); 2913 2914 /* Only allow quad reads of the rom. */ 2915 for (i = 0; i < fb->fb_off; i++) 2916 fwohci_handler_set(sc, IEEE1394_TCODE_READ_REQ_QUAD, 2917 CSR_BASE_HI, CSR_BASE_LO + CSR_CONFIG_ROM + (i * 4), 0, 2918 fwohci_configrom_input, NULL); 2919 } 2920 2921 static int 2922 fwohci_configrom_input(struct fwohci_softc *sc, void *arg, 2923 struct fwohci_pkt *pkt) 2924 { 2925 struct fwohci_pkt res; 2926 u_int32_t loc, *rom; 2927 2928 /* This will be used as an array index so size accordingly. */ 2929 loc = pkt->fp_hdr[2] - (CSR_BASE_LO + CSR_CONFIG_ROM); 2930 if ((loc & 0x03) != 0) { 2931 /* alignment error */ 2932 return IEEE1394_RCODE_ADDRESS_ERROR; 2933 } 2934 else 2935 loc /= 4; 2936 rom = (u_int32_t *)sc->sc_buf_cnfrom.fb_buf; 2937 2938 DPRINTFN(1, ("fwohci_configrom_input: ConfigRom[0x%04x]: 0x%08x\n", loc, 2939 ntohl(rom[loc]))); 2940 2941 memset(&res, 0, sizeof(res)); 2942 res.fp_hdr[3] = rom[loc]; 2943 fwohci_atrs_output(sc, IEEE1394_RCODE_COMPLETE, pkt, &res); 2944 return -1; 2945 } 2946 2947 /* 2948 * SelfID buffer (no DMA context) 2949 */ 2950 static void 2951 fwohci_selfid_init(struct fwohci_softc *sc) 2952 { 2953 struct fwohci_buf *fb; 2954 2955 fb = &sc->sc_buf_selfid; 2956 #ifdef DIAGNOSTIC 2957 if ((fb->fb_dmamap->dm_segs[0].ds_addr & 0x7ff) != 0) 2958 panic("fwohci_selfid_init: not aligned: %ld (%ld) %p", 2959 (unsigned long)fb->fb_dmamap->dm_segs[0].ds_addr, 2960 (unsigned long)fb->fb_dmamap->dm_segs[0].ds_len, fb->fb_buf); 2961 #endif 2962 memset(fb->fb_buf, 0, fb->fb_dmamap->dm_segs[0].ds_len); 2963 bus_dmamap_sync(sc->sc_dmat, fb->fb_dmamap, 0, 2964 fb->fb_dmamap->dm_segs[0].ds_len, BUS_DMASYNC_PREREAD); 2965 2966 OHCI_CSR_WRITE(sc, OHCI_REG_SelfIDBuffer, 2967 fb->fb_dmamap->dm_segs[0].ds_addr); 2968 } 2969 2970 static int 2971 fwohci_selfid_input(struct fwohci_softc *sc) 2972 { 2973 int i; 2974 u_int32_t count, val, gen; 2975 u_int32_t *buf; 2976 2977 buf = (u_int32_t *)sc->sc_buf_selfid.fb_buf; 2978 val = OHCI_CSR_READ(sc, OHCI_REG_SelfIDCount); 2979 again: 2980 if (val & OHCI_SelfID_Error) { 2981 printf("%s: SelfID Error\n", sc->sc_sc1394.sc1394_dev.dv_xname); 2982 return -1; 2983 } 2984 count = OHCI_BITVAL(val, OHCI_SelfID_Size); 2985 2986 bus_dmamap_sync(sc->sc_dmat, sc->sc_buf_selfid.fb_dmamap, 2987 0, count << 2, BUS_DMASYNC_POSTREAD); 2988 gen = OHCI_BITVAL(buf[0], OHCI_SelfID_Gen); 2989 2990 #ifdef FW_DEBUG 2991 DPRINTFN(1, ("%s: SelfID: 0x%08x", sc->sc_sc1394.sc1394_dev.dv_xname, 2992 val)); 2993 for (i = 0; i < count; i++) 2994 DPRINTFN(2, ("%s%08x", i&7?" ":"\n ", buf[i])); 2995 DPRINTFN(1, ("\n")); 2996 #endif /* FW_DEBUG */ 2997 2998 for (i = 1; i < count; i += 2) { 2999 if (buf[i] != ~buf[i + 1]) 3000 break; 3001 if (buf[i] & 0x00000001) 3002 continue; /* more pkt */ 3003 if (buf[i] & 0x00800000) 3004 continue; /* external id */ 3005 sc->sc_rootid = (buf[i] & 0x3f000000) >> 24; 3006 if ((buf[i] & 0x00400800) == 0x00400800) 3007 sc->sc_irmid = sc->sc_rootid; 3008 } 3009 3010 val = OHCI_CSR_READ(sc, OHCI_REG_SelfIDCount); 3011 if (OHCI_BITVAL(val, OHCI_SelfID_Gen) != gen) { 3012 if (OHCI_BITVAL(val, OHCI_SelfID_Gen) != 3013 OHCI_BITVAL(buf[0], OHCI_SelfID_Gen)) 3014 goto again; 3015 DPRINTF(("%s: SelfID Gen mismatch (%d, %d)\n", 3016 sc->sc_sc1394.sc1394_dev.dv_xname, gen, 3017 OHCI_BITVAL(val, OHCI_SelfID_Gen))); 3018 return -1; 3019 } 3020 if (i != count) { 3021 printf("%s: SelfID corrupted (%d, 0x%08x, 0x%08x)\n", 3022 sc->sc_sc1394.sc1394_dev.dv_xname, i, buf[i], buf[i + 1]); 3023 #if 1 3024 if (i == 1 && buf[i] == 0 && buf[i + 1] == 0) { 3025 /* 3026 * XXX: CXD3222 sometimes fails to DMA 3027 * selfid packet?? 3028 */ 3029 sc->sc_rootid = (count - 1) / 2 - 1; 3030 sc->sc_irmid = sc->sc_rootid; 3031 } else 3032 #endif 3033 return -1; 3034 } 3035 3036 val = OHCI_CSR_READ(sc, OHCI_REG_NodeId); 3037 if ((val & OHCI_NodeId_IDValid) == 0) { 3038 sc->sc_nodeid = 0xffff; /* invalid */ 3039 printf("%s: nodeid is invalid\n", 3040 sc->sc_sc1394.sc1394_dev.dv_xname); 3041 return -1; 3042 } 3043 sc->sc_nodeid = val & 0xffff; 3044 sc->sc_sc1394.sc1394_node_id = sc->sc_nodeid & OHCI_NodeId_NodeNumber; 3045 3046 DPRINTF(("%s: nodeid=0x%04x(%d), rootid=%d, irmid=%d\n", 3047 sc->sc_sc1394.sc1394_dev.dv_xname, sc->sc_nodeid, 3048 sc->sc_nodeid & OHCI_NodeId_NodeNumber, sc->sc_rootid, 3049 sc->sc_irmid)); 3050 3051 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) > sc->sc_rootid) 3052 return -1; 3053 3054 if ((sc->sc_nodeid & OHCI_NodeId_NodeNumber) == sc->sc_rootid) 3055 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlSet, 3056 OHCI_LinkControl_CycleMaster); 3057 else 3058 OHCI_CSR_WRITE(sc, OHCI_REG_LinkControlClear, 3059 OHCI_LinkControl_CycleMaster); 3060 return 0; 3061 } 3062 3063 /* 3064 * some CSRs are handled by driver. 3065 */ 3066 static void 3067 fwohci_csr_init(struct fwohci_softc *sc) 3068 { 3069 int i; 3070 static u_int32_t csr[] = { 3071 CSR_STATE_CLEAR, CSR_STATE_SET, CSR_SB_CYCLE_TIME, 3072 CSR_SB_BUS_TIME, CSR_SB_BUSY_TIMEOUT, CSR_SB_BUS_MANAGER_ID, 3073 CSR_SB_CHANNEL_AVAILABLE_HI, CSR_SB_CHANNEL_AVAILABLE_LO, 3074 CSR_SB_BROADCAST_CHANNEL 3075 }; 3076 3077 for (i = 0; i < sizeof(csr) / sizeof(csr[0]); i++) { 3078 fwohci_handler_set(sc, IEEE1394_TCODE_WRITE_REQ_QUAD, 3079 CSR_BASE_HI, CSR_BASE_LO + csr[i], 0, fwohci_csr_input, 3080 NULL); 3081 fwohci_handler_set(sc, IEEE1394_TCODE_READ_REQ_QUAD, 3082 CSR_BASE_HI, CSR_BASE_LO + csr[i], 0, fwohci_csr_input, 3083 NULL); 3084 } 3085 sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] = 31; /*XXX*/ 3086 } 3087 3088 static int 3089 fwohci_csr_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt) 3090 { 3091 struct fwohci_pkt res; 3092 u_int32_t reg; 3093 3094 /* 3095 * XXX need to do special functionality other than just r/w... 3096 */ 3097 reg = pkt->fp_hdr[2] - CSR_BASE_LO; 3098 3099 if ((reg & 0x03) != 0) { 3100 /* alignment error */ 3101 return IEEE1394_RCODE_ADDRESS_ERROR; 3102 } 3103 DPRINTFN(1, ("fwohci_csr_input: CSR[0x%04x]: 0x%08x", reg, 3104 *(u_int32_t *)(&sc->sc_csr[reg]))); 3105 if (pkt->fp_tcode == IEEE1394_TCODE_WRITE_REQ_QUAD) { 3106 DPRINTFN(1, (" -> 0x%08x\n", 3107 ntohl(*(u_int32_t *)pkt->fp_iov[0].iov_base))); 3108 *(u_int32_t *)&sc->sc_csr[reg] = 3109 ntohl(*(u_int32_t *)pkt->fp_iov[0].iov_base); 3110 } else { 3111 DPRINTFN(1, ("\n")); 3112 res.fp_hdr[3] = htonl(*(u_int32_t *)&sc->sc_csr[reg]); 3113 res.fp_iov[0].iov_base = &res.fp_hdr[3]; 3114 res.fp_iov[0].iov_len = 4; 3115 res.fp_uio.uio_resid = 4; 3116 res.fp_uio.uio_iovcnt = 1; 3117 fwohci_atrs_output(sc, IEEE1394_RCODE_COMPLETE, pkt, &res); 3118 return -1; 3119 } 3120 return IEEE1394_RCODE_COMPLETE; 3121 } 3122 3123 /* 3124 * Mapping between nodeid and unique ID (EUI-64). 3125 * 3126 * Track old mappings and simply update their devices with the new id's when 3127 * they match an existing EUI. This allows proper renumeration of the bus. 3128 */ 3129 static void 3130 fwohci_uid_collect(struct fwohci_softc *sc) 3131 { 3132 int i; 3133 struct fwohci_uidtbl *fu; 3134 struct ieee1394_softc *iea; 3135 3136 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node) 3137 iea->sc1394_node_id = 0xffff; 3138 3139 if (sc->sc_uidtbl != NULL) 3140 free(sc->sc_uidtbl, M_DEVBUF); 3141 sc->sc_uidtbl = malloc(sizeof(*fu) * (sc->sc_rootid + 1), M_DEVBUF, 3142 M_NOWAIT|M_ZERO); /* XXX M_WAITOK requires locks */ 3143 if (sc->sc_uidtbl == NULL) 3144 return; 3145 3146 for (i = 0, fu = sc->sc_uidtbl; i <= sc->sc_rootid; i++, fu++) { 3147 if (i == (sc->sc_nodeid & OHCI_NodeId_NodeNumber)) { 3148 memcpy(fu->fu_uid, sc->sc_sc1394.sc1394_guid, 8); 3149 fu->fu_valid = 3; 3150 3151 iea = (struct ieee1394_softc *)sc->sc_sc1394.sc1394_if; 3152 if (iea) { 3153 iea->sc1394_node_id = i; 3154 DPRINTF(("%s: Updating nodeid to %d\n", 3155 iea->sc1394_dev.dv_xname, 3156 iea->sc1394_node_id)); 3157 } 3158 } else { 3159 fu->fu_valid = 0; 3160 fwohci_uid_req(sc, i); 3161 } 3162 } 3163 if (sc->sc_rootid == 0) 3164 fwohci_check_nodes(sc); 3165 } 3166 3167 static void 3168 fwohci_uid_req(struct fwohci_softc *sc, int phyid) 3169 { 3170 struct fwohci_pkt pkt; 3171 3172 memset(&pkt, 0, sizeof(pkt)); 3173 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD; 3174 pkt.fp_hlen = 12; 3175 pkt.fp_dlen = 0; 3176 pkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) | 3177 (pkt.fp_tcode << 4); 3178 pkt.fp_hdr[1] = ((0xffc0 | phyid) << 16) | CSR_BASE_HI; 3179 pkt.fp_hdr[2] = CSR_BASE_LO + CSR_CONFIG_ROM + 12; 3180 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, phyid, 3181 sc->sc_tlabel, 0, fwohci_uid_input, (void *)0); 3182 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f; 3183 fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt); 3184 3185 pkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) | 3186 (pkt.fp_tcode << 4); 3187 pkt.fp_hdr[2] = CSR_BASE_LO + CSR_CONFIG_ROM + 16; 3188 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, phyid, 3189 sc->sc_tlabel, 0, fwohci_uid_input, (void *)1); 3190 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f; 3191 fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt); 3192 } 3193 3194 static int 3195 fwohci_uid_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *res) 3196 { 3197 struct fwohci_uidtbl *fu; 3198 struct ieee1394_softc *iea; 3199 struct ieee1394_attach_args fwa; 3200 int i, n, done, rcode, found; 3201 3202 found = 0; 3203 3204 n = (res->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber; 3205 rcode = (res->fp_hdr[1] & 0x0000f000) >> 12; 3206 if (rcode != IEEE1394_RCODE_COMPLETE || 3207 sc->sc_uidtbl == NULL || 3208 n > sc->sc_rootid) 3209 return 0; 3210 fu = &sc->sc_uidtbl[n]; 3211 if (arg == 0) { 3212 memcpy(fu->fu_uid, res->fp_iov[0].iov_base, 4); 3213 fu->fu_valid |= 0x1; 3214 } else { 3215 memcpy(fu->fu_uid + 4, res->fp_iov[0].iov_base, 4); 3216 fu->fu_valid |= 0x2; 3217 } 3218 #ifdef FW_DEBUG 3219 if (fu->fu_valid == 0x3) 3220 DPRINTFN(1, ("fwohci_uid_input: " 3221 "Node %d, UID %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", n, 3222 fu->fu_uid[0], fu->fu_uid[1], fu->fu_uid[2], fu->fu_uid[3], 3223 fu->fu_uid[4], fu->fu_uid[5], fu->fu_uid[6], fu->fu_uid[7])); 3224 #endif 3225 if (fu->fu_valid == 0x3) { 3226 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node) 3227 if (memcmp(iea->sc1394_guid, fu->fu_uid, 8) == 0) { 3228 found = 1; 3229 iea->sc1394_node_id = n; 3230 DPRINTF(("%s: Updating nodeid to %d\n", 3231 iea->sc1394_dev.dv_xname, 3232 iea->sc1394_node_id)); 3233 if (iea->sc1394_callback.sc1394_reset) 3234 iea->sc1394_callback.sc1394_reset(iea, 3235 iea->sc1394_callback.sc1394_resetarg); 3236 break; 3237 } 3238 if (!found) { 3239 strcpy(fwa.name, "fwnode"); 3240 memcpy(fwa.uid, fu->fu_uid, 8); 3241 fwa.nodeid = n; 3242 iea = (struct ieee1394_softc *) 3243 config_found_sm_loc(&sc->sc_sc1394.sc1394_dev, 3244 "fwbus", NULL, &fwa, 3245 fwohci_print, fwohci_submatch); 3246 if (iea != NULL) 3247 LIST_INSERT_HEAD(&sc->sc_nodelist, iea, 3248 sc1394_node); 3249 } 3250 } 3251 done = 1; 3252 3253 for (i = 0; i < sc->sc_rootid + 1; i++) { 3254 fu = &sc->sc_uidtbl[i]; 3255 if (fu->fu_valid != 0x3) { 3256 done = 0; 3257 break; 3258 } 3259 } 3260 if (done) 3261 fwohci_check_nodes(sc); 3262 3263 return 0; 3264 } 3265 3266 static void 3267 fwohci_check_nodes(struct fwohci_softc *sc) 3268 { 3269 struct device *detach = NULL; 3270 struct ieee1394_softc *iea; 3271 3272 LIST_FOREACH(iea, &sc->sc_nodelist, sc1394_node) { 3273 3274 /* 3275 * Have to defer detachment until the next 3276 * loop iteration since config_detach 3277 * free's the softc and the loop iterator 3278 * needs data from the softc to move 3279 * forward. 3280 */ 3281 3282 if (detach) { 3283 config_detach(detach, 0); 3284 detach = NULL; 3285 } 3286 if (iea->sc1394_node_id == 0xffff) { 3287 detach = (struct device *)iea; 3288 LIST_REMOVE(iea, sc1394_node); 3289 } 3290 } 3291 if (detach) 3292 config_detach(detach, 0); 3293 } 3294 3295 static int 3296 fwohci_uid_lookup(struct fwohci_softc *sc, const u_int8_t *uid) 3297 { 3298 struct fwohci_uidtbl *fu; 3299 int n; 3300 static const u_int8_t bcast[] = 3301 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 3302 3303 fu = sc->sc_uidtbl; 3304 if (fu == NULL) { 3305 if (memcmp(uid, bcast, sizeof(bcast)) == 0) 3306 return IEEE1394_BCAST_PHY_ID; 3307 fwohci_uid_collect(sc); /* try to get */ 3308 return -1; 3309 } 3310 for (n = 0; n <= sc->sc_rootid; n++, fu++) { 3311 if (fu->fu_valid == 0x3 && memcmp(fu->fu_uid, uid, 8) == 0) 3312 return n; 3313 } 3314 if (memcmp(uid, bcast, sizeof(bcast)) == 0) 3315 return IEEE1394_BCAST_PHY_ID; 3316 for (n = 0, fu = sc->sc_uidtbl; n <= sc->sc_rootid; n++, fu++) { 3317 if (fu->fu_valid != 0x3) { 3318 /* 3319 * XXX: need timer before retransmission 3320 */ 3321 fwohci_uid_req(sc, n); 3322 } 3323 } 3324 return -1; 3325 } 3326 3327 /* 3328 * functions to support network interface 3329 */ 3330 static int 3331 fwohci_if_inreg(struct device *self, u_int32_t offhi, u_int32_t offlo, 3332 void (*handler)(struct device *, struct mbuf *)) 3333 { 3334 struct fwohci_softc *sc = (struct fwohci_softc *)self; 3335 3336 fwohci_handler_set(sc, IEEE1394_TCODE_WRITE_REQ_BLOCK, offhi, offlo, 0, 3337 handler ? fwohci_if_input : NULL, handler); 3338 fwohci_handler_set(sc, IEEE1394_TCODE_STREAM_DATA, 3339 (sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] & IEEE1394_ISOCH_MASK) | 3340 OHCI_ASYNC_STREAM, 3341 1 << IEEE1394_TAG_GASP, 0, 3342 handler ? fwohci_if_input : NULL, handler); 3343 return 0; 3344 } 3345 3346 static int 3347 fwohci_if_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt) 3348 { 3349 int n, len; 3350 struct mbuf *m; 3351 struct iovec *iov; 3352 void (*handler)(struct device *, struct mbuf *) = arg; 3353 3354 #ifdef FW_DEBUG 3355 int i; 3356 DPRINTFN(1, ("fwohci_if_input: tcode=0x%x, dlen=%d", pkt->fp_tcode, 3357 pkt->fp_dlen)); 3358 for (i = 0; i < pkt->fp_hlen/4; i++) 3359 DPRINTFN(2, ("%s%08x", i?" ":"\n ", pkt->fp_hdr[i])); 3360 DPRINTFN(2, ("$")); 3361 for (n = 0, len = pkt->fp_dlen; len > 0; len -= i, n++){ 3362 iov = &pkt->fp_iov[n]; 3363 for (i = 0; i < iov->iov_len; i++) 3364 DPRINTFN(2, ("%s%02x", (i%32)?((i%4)?"":" "):"\n ", 3365 ((u_int8_t *)iov->iov_base)[i])); 3366 DPRINTFN(2, ("$")); 3367 } 3368 DPRINTFN(1, ("\n")); 3369 #endif /* FW_DEBUG */ 3370 len = pkt->fp_dlen; 3371 MGETHDR(m, M_DONTWAIT, MT_DATA); 3372 if (m == NULL) 3373 return IEEE1394_RCODE_COMPLETE; 3374 m->m_len = 16; 3375 if (len + m->m_len > MHLEN) { 3376 MCLGET(m, M_DONTWAIT); 3377 if ((m->m_flags & M_EXT) == 0) { 3378 m_freem(m); 3379 return IEEE1394_RCODE_COMPLETE; 3380 } 3381 } 3382 n = (pkt->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber; 3383 if (sc->sc_uidtbl == NULL || n > sc->sc_rootid || 3384 sc->sc_uidtbl[n].fu_valid != 0x3) { 3385 printf("%s: packet from unknown node: phy id %d\n", 3386 sc->sc_sc1394.sc1394_dev.dv_xname, n); 3387 m_freem(m); 3388 fwohci_uid_req(sc, n); 3389 return IEEE1394_RCODE_COMPLETE; 3390 } 3391 memcpy(mtod(m, caddr_t), sc->sc_uidtbl[n].fu_uid, 8); 3392 if (pkt->fp_tcode == IEEE1394_TCODE_STREAM_DATA) { 3393 m->m_flags |= M_BCAST; 3394 mtod(m, u_int32_t *)[2] = mtod(m, u_int32_t *)[3] = 0; 3395 } else { 3396 mtod(m, u_int32_t *)[2] = htonl(pkt->fp_hdr[1]); 3397 mtod(m, u_int32_t *)[3] = htonl(pkt->fp_hdr[2]); 3398 } 3399 mtod(m, u_int8_t *)[8] = n; /*XXX: node id for debug */ 3400 mtod(m, u_int8_t *)[9] = 3401 (*pkt->fp_trail >> (16 + OHCI_CTXCTL_SPD_BITPOS)) & 3402 ((1 << OHCI_CTXCTL_SPD_BITLEN) - 1); 3403 3404 m->m_pkthdr.rcvif = NULL; /* set in child */ 3405 m->m_pkthdr.len = len + m->m_len; 3406 /* 3407 * We may use receive buffer by external mbuf instead of copy here. 3408 * But asynchronous receive buffer must be operate in buffer fill 3409 * mode, so that each receive buffer will shared by multiple mbufs. 3410 * If upper layer doesn't free mbuf soon, e.g. application program 3411 * is suspended, buffer must be reallocated. 3412 * Isochronous buffer must be operate in packet buffer mode, and 3413 * it is easy to map receive buffer to external mbuf. But it is 3414 * used for broadcast/multicast only, and is expected not so 3415 * performance sensitive for now. 3416 * XXX: The performance may be important for multicast case, 3417 * so we should revisit here later. 3418 * -- onoe 3419 */ 3420 n = 0; 3421 iov = pkt->fp_uio.uio_iov; 3422 while (len > 0) { 3423 memcpy(mtod(m, caddr_t) + m->m_len, iov->iov_base, 3424 iov->iov_len); 3425 m->m_len += iov->iov_len; 3426 len -= iov->iov_len; 3427 iov++; 3428 } 3429 (*handler)(sc->sc_sc1394.sc1394_if, m); 3430 return IEEE1394_RCODE_COMPLETE; 3431 } 3432 3433 static int 3434 fwohci_if_input_iso(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt) 3435 { 3436 int n, len; 3437 int chan, tag; 3438 struct mbuf *m; 3439 struct iovec *iov; 3440 void (*handler)(struct device *, struct mbuf *) = arg; 3441 #ifdef FW_DEBUG 3442 int i; 3443 #endif 3444 3445 chan = (pkt->fp_hdr[0] & 0x00003f00) >> 8; 3446 tag = (pkt->fp_hdr[0] & 0x0000c000) >> 14; 3447 #ifdef FW_DEBUG 3448 DPRINTFN(1, ("fwohci_if_input_iso: " 3449 "tcode=0x%x, chan=%d, tag=%x, dlen=%d", 3450 pkt->fp_tcode, chan, tag, pkt->fp_dlen)); 3451 for (i = 0; i < pkt->fp_hlen/4; i++) 3452 DPRINTFN(2, ("%s%08x", i?" ":"\n\t", pkt->fp_hdr[i])); 3453 DPRINTFN(2, ("$")); 3454 for (n = 0, len = pkt->fp_dlen; len > 0; len -= i, n++){ 3455 iov = &pkt->fp_iov[n]; 3456 for (i = 0; i < iov->iov_len; i++) 3457 DPRINTFN(2, ("%s%02x", 3458 (i%32)?((i%4)?"":" "):"\n\t", 3459 ((u_int8_t *)iov->iov_base)[i])); 3460 DPRINTFN(2, ("$")); 3461 } 3462 DPRINTFN(2, ("\n")); 3463 #endif /* FW_DEBUG */ 3464 len = pkt->fp_dlen; 3465 MGETHDR(m, M_DONTWAIT, MT_DATA); 3466 if (m == NULL) 3467 return IEEE1394_RCODE_COMPLETE; 3468 m->m_len = 16; 3469 if (m->m_len + len > MHLEN) { 3470 MCLGET(m, M_DONTWAIT); 3471 if ((m->m_flags & M_EXT) == 0) { 3472 m_freem(m); 3473 return IEEE1394_RCODE_COMPLETE; 3474 } 3475 } 3476 3477 m->m_flags |= M_BCAST; 3478 3479 if (tag == IEEE1394_TAG_GASP) { 3480 n = (pkt->fp_hdr[1] >> 16) & OHCI_NodeId_NodeNumber; 3481 if (sc->sc_uidtbl == NULL || n > sc->sc_rootid || 3482 sc->sc_uidtbl[n].fu_valid != 0x3) { 3483 printf("%s: packet from unknown node: phy id %d\n", 3484 sc->sc_sc1394.sc1394_dev.dv_xname, n); 3485 m_freem(m); 3486 return IEEE1394_RCODE_COMPLETE; 3487 } 3488 memcpy(mtod(m, caddr_t), sc->sc_uidtbl[n].fu_uid, 8); 3489 mtod(m, u_int32_t *)[2] = htonl(pkt->fp_hdr[1]); 3490 mtod(m, u_int32_t *)[3] = htonl(pkt->fp_hdr[2]); 3491 mtod(m, u_int8_t *)[8] = n; /*XXX: node id for debug */ 3492 mtod(m, u_int8_t *)[9] = 3493 (*pkt->fp_trail >> (16 + OHCI_CTXCTL_SPD_BITPOS)) & 3494 ((1 << OHCI_CTXCTL_SPD_BITLEN) - 1); 3495 } 3496 mtod(m, u_int8_t *)[14] = chan; 3497 mtod(m, u_int8_t *)[15] = tag; 3498 3499 3500 m->m_pkthdr.rcvif = NULL; /* set in child */ 3501 m->m_pkthdr.len = len + m->m_len; 3502 /* 3503 * We may use receive buffer by external mbuf instead of copy here. 3504 * But asynchronous receive buffer must be operate in buffer fill 3505 * mode, so that each receive buffer will shared by multiple mbufs. 3506 * If upper layer doesn't free mbuf soon, e.g. application program 3507 * is suspended, buffer must be reallocated. 3508 * Isochronous buffer must be operate in packet buffer mode, and 3509 * it is easy to map receive buffer to external mbuf. But it is 3510 * used for broadcast/multicast only, and is expected not so 3511 * performance sensitive for now. 3512 * XXX: The performance may be important for multicast case, 3513 * so we should revisit here later. 3514 * -- onoe 3515 */ 3516 n = 0; 3517 iov = pkt->fp_uio.uio_iov; 3518 while (len > 0) { 3519 memcpy(mtod(m, caddr_t) + m->m_len, iov->iov_base, 3520 iov->iov_len); 3521 m->m_len += iov->iov_len; 3522 len -= iov->iov_len; 3523 iov++; 3524 } 3525 (*handler)(sc->sc_sc1394.sc1394_if, m); 3526 return IEEE1394_RCODE_COMPLETE; 3527 } 3528 3529 3530 3531 static int 3532 fwohci_if_output(struct device *self, struct mbuf *m0, 3533 void (*callback)(struct device *, struct mbuf *)) 3534 { 3535 struct fwohci_softc *sc = (struct fwohci_softc *)self; 3536 struct fwohci_pkt pkt; 3537 u_int8_t *p; 3538 int n = 0, error, spd, hdrlen, maxrec; /* XXX: gcc */ 3539 #ifdef FW_DEBUG 3540 struct mbuf *m; 3541 #endif 3542 3543 p = mtod(m0, u_int8_t *); 3544 if (m0->m_flags & (M_BCAST | M_MCAST)) { 3545 spd = IEEE1394_SPD_S100; /*XXX*/ 3546 maxrec = 512; /*XXX*/ 3547 hdrlen = 8; 3548 } else { 3549 n = fwohci_uid_lookup(sc, p); 3550 if (n < 0) { 3551 printf("%s: nodeid unknown:" 3552 " %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", 3553 sc->sc_sc1394.sc1394_dev.dv_xname, 3554 p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); 3555 error = EHOSTUNREACH; 3556 goto end; 3557 } 3558 if (n == IEEE1394_BCAST_PHY_ID) { 3559 printf("%s: broadcast with !M_MCAST\n", 3560 sc->sc_sc1394.sc1394_dev.dv_xname); 3561 #ifdef FW_DEBUG 3562 DPRINTFN(2, ("packet:")); 3563 for (m = m0; m != NULL; m = m->m_next) { 3564 for (n = 0; n < m->m_len; n++) 3565 DPRINTFN(2, ("%s%02x", (n%32)? 3566 ((n%4)?"":" "):"\n ", 3567 mtod(m, u_int8_t *)[n])); 3568 DPRINTFN(2, ("$")); 3569 } 3570 DPRINTFN(2, ("\n")); 3571 #endif 3572 error = EHOSTUNREACH; 3573 goto end; 3574 } 3575 maxrec = 2 << p[8]; 3576 spd = p[9]; 3577 hdrlen = 0; 3578 } 3579 if (spd > sc->sc_sc1394.sc1394_link_speed) { 3580 DPRINTF(("fwohci_if_output: spd (%d) is faster than %d\n", 3581 spd, sc->sc_sc1394.sc1394_link_speed)); 3582 spd = sc->sc_sc1394.sc1394_link_speed; 3583 } 3584 if (maxrec > (512 << spd)) { 3585 DPRINTF(("fwohci_if_output: maxrec (%d) is larger for spd (%d)" 3586 "\n", maxrec, spd)); 3587 maxrec = 512 << spd; 3588 } 3589 while (maxrec > sc->sc_sc1394.sc1394_max_receive) { 3590 DPRINTF(("fwohci_if_output: maxrec (%d) is larger than" 3591 " %d\n", maxrec, sc->sc_sc1394.sc1394_max_receive)); 3592 maxrec >>= 1; 3593 } 3594 if (maxrec < 512) { 3595 DPRINTF(("fwohci_if_output: maxrec (%d) is smaller than " 3596 "minimum\n", maxrec)); 3597 maxrec = 512; 3598 } 3599 3600 m_adj(m0, 16 - hdrlen); 3601 if (m0->m_pkthdr.len > maxrec) { 3602 DPRINTF(("fwohci_if_output: packet too big: hdr %d, pktlen " 3603 "%d, maxrec %d\n", hdrlen, m0->m_pkthdr.len, maxrec)); 3604 error = E2BIG; /*XXX*/ 3605 goto end; 3606 } 3607 3608 memset(&pkt, 0, sizeof(pkt)); 3609 pkt.fp_uio.uio_iov = pkt.fp_iov; 3610 pkt.fp_uio.uio_segflg = UIO_SYSSPACE; 3611 pkt.fp_uio.uio_rw = UIO_WRITE; 3612 if (m0->m_flags & (M_BCAST | M_MCAST)) { 3613 /* construct GASP header */ 3614 p = mtod(m0, u_int8_t *); 3615 p[0] = sc->sc_nodeid >> 8; 3616 p[1] = sc->sc_nodeid & 0xff; 3617 p[2] = 0x00; p[3] = 0x00; p[4] = 0x5e; 3618 p[5] = 0x00; p[6] = 0x00; p[7] = 0x01; 3619 pkt.fp_tcode = IEEE1394_TCODE_STREAM_DATA; 3620 pkt.fp_hlen = 8; 3621 pkt.fp_hdr[0] = (spd << 16) | (IEEE1394_TAG_GASP << 14) | 3622 ((sc->sc_csr[CSR_SB_BROADCAST_CHANNEL] & 3623 OHCI_NodeId_NodeNumber) << 8); 3624 pkt.fp_hdr[1] = m0->m_pkthdr.len << 16; 3625 } else { 3626 pkt.fp_tcode = IEEE1394_TCODE_WRITE_REQ_BLOCK; 3627 pkt.fp_hlen = 16; 3628 pkt.fp_hdr[0] = 0x00800100 | (sc->sc_tlabel << 10) | 3629 (spd << 16); 3630 pkt.fp_hdr[1] = 3631 (((sc->sc_nodeid & OHCI_NodeId_BusNumber) | n) << 16) | 3632 (p[10] << 8) | p[11]; 3633 pkt.fp_hdr[2] = (p[12]<<24) | (p[13]<<16) | (p[14]<<8) | p[15]; 3634 pkt.fp_hdr[3] = m0->m_pkthdr.len << 16; 3635 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f; 3636 } 3637 pkt.fp_hdr[0] |= (pkt.fp_tcode << 4); 3638 pkt.fp_dlen = m0->m_pkthdr.len; 3639 pkt.fp_m = m0; 3640 pkt.fp_callback = callback; 3641 error = fwohci_at_output(sc, sc->sc_ctx_atrq, &pkt); 3642 m0 = pkt.fp_m; 3643 end: 3644 if (m0 != NULL) { 3645 if (callback) 3646 (*callback)(sc->sc_sc1394.sc1394_if, m0); 3647 else 3648 m_freem(m0); 3649 } 3650 return error; 3651 } 3652 3653 /* 3654 * High level routines to provide abstraction to attaching layers to 3655 * send/receive data. 3656 */ 3657 3658 /* 3659 * These break down into 4 routines as follows: 3660 * 3661 * int fwohci_read(struct ieee1394_abuf *) 3662 * 3663 * This routine will attempt to read a region from the requested node. 3664 * A callback must be provided which will be called when either the completed 3665 * read is done or an unrecoverable error occurs. This is mainly a convenience 3666 * routine since it will encapsulate retrying a region as quadlet vs. block 3667 * reads and recombining all the returned data. This could also be done with a 3668 * series of write/inreg's for each packet sent. 3669 * 3670 * int fwohci_write(struct ieee1394_abuf *) 3671 * 3672 * The work horse main entry point for putting packets on the bus. This is the 3673 * generalized interface for fwnode/etc code to put packets out onto the bus. 3674 * It accepts all standard ieee1394 tcodes (XXX: only a few today) and 3675 * optionally will callback via a func pointer to the calling code with the 3676 * resulting ACK code from the packet. If the ACK code is to be ignored (i.e. 3677 * no cb) then the write routine will take care of free'ing the abuf since the 3678 * fwnode/etc code won't have any knowledge of when to do this. This allows for 3679 * simple one-off packets to be sent from the upper-level code without worrying 3680 * about a callback for cleanup. 3681 * 3682 * int fwohci_inreg(struct ieee1394_abuf *, int) 3683 * 3684 * This is very simple. It evals the abuf passed in and registers an internal 3685 * handler as the callback for packets received for that operation. 3686 * The integer argument specifies whether on a block read/write operation to 3687 * allow sub-regions to be read/written (in block form) as well. 3688 * 3689 * XXX: This whole structure needs to be redone as a list of regions and 3690 * operations allowed on those regions. 3691 * 3692 * int fwohci_unreg(struct ieee1394_abuf *, int) 3693 * 3694 * This simply unregisters the respective callback done via inreg for items 3695 * which only need to register an area for a one-time operation (like a status 3696 * buffer a remote node will write to when the current operation is done). The 3697 * int argument specifies the same behavior as inreg, except in reverse (i.e. 3698 * it unregisters). 3699 */ 3700 3701 static int 3702 fwohci_read(struct ieee1394_abuf *ab) 3703 { 3704 struct fwohci_pkt pkt; 3705 struct ieee1394_softc *sc = ab->ab_req; 3706 struct fwohci_softc *psc = 3707 (struct fwohci_softc *)sc->sc1394_dev.dv_parent; 3708 struct fwohci_cb *fcb; 3709 u_int32_t high, lo; 3710 int rv, tcode; 3711 3712 /* Have to have a callback when reading. */ 3713 if (ab->ab_cb == NULL) 3714 return -1; 3715 3716 fcb = malloc(sizeof(struct fwohci_cb), M_DEVBUF, M_WAITOK); 3717 fcb->ab = ab; 3718 fcb->count = 0; 3719 fcb->abuf_valid = 1; 3720 3721 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32); 3722 lo = (ab->ab_addr & 0x00000000ffffffffULL); 3723 3724 memset(&pkt, 0, sizeof(pkt)); 3725 pkt.fp_hdr[1] = ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high; 3726 pkt.fp_hdr[2] = lo; 3727 pkt.fp_dlen = 0; 3728 3729 if (ab->ab_length == 4) { 3730 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD; 3731 tcode = IEEE1394_TCODE_READ_RESP_QUAD; 3732 pkt.fp_hlen = 12; 3733 } else { 3734 pkt.fp_tcode = IEEE1394_TCODE_READ_REQ_BLOCK; 3735 pkt.fp_hlen = 16; 3736 tcode = IEEE1394_TCODE_READ_RESP_BLOCK; 3737 pkt.fp_hdr[3] = (ab->ab_length << 16); 3738 } 3739 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) | 3740 (psc->sc_tlabel << 10) | (pkt.fp_tcode << 4); 3741 3742 pkt.fp_statusarg = fcb; 3743 pkt.fp_statuscb = fwohci_read_resp; 3744 3745 rv = fwohci_handler_set(psc, tcode, ab->ab_req->sc1394_node_id, 3746 psc->sc_tlabel, 0, fwohci_read_resp, fcb); 3747 if (rv) 3748 return rv; 3749 rv = fwohci_at_output(psc, psc->sc_ctx_atrq, &pkt); 3750 if (rv) 3751 fwohci_handler_set(psc, tcode, ab->ab_req->sc1394_node_id, 3752 psc->sc_tlabel, 0, NULL, NULL); 3753 psc->sc_tlabel = (psc->sc_tlabel + 1) & 0x3f; 3754 fcb->count = 1; 3755 return rv; 3756 } 3757 3758 static int 3759 fwohci_write(struct ieee1394_abuf *ab) 3760 { 3761 struct fwohci_pkt pkt; 3762 struct ieee1394_softc *sc = ab->ab_req; 3763 struct fwohci_softc *psc = 3764 (struct fwohci_softc *)sc->sc1394_dev.dv_parent; 3765 u_int32_t high, lo; 3766 int rv; 3767 3768 if (ab->ab_tcode == IEEE1394_TCODE_WRITE_REQ_BLOCK) { 3769 if (ab->ab_length > IEEE1394_MAX_REC(sc->sc1394_max_receive)) { 3770 DPRINTF(("Packet too large: %d\n", ab->ab_length)); 3771 return E2BIG; 3772 } 3773 } 3774 3775 if (ab->ab_length > 3776 IEEE1394_MAX_ASYNCH_FOR_SPEED(sc->sc1394_link_speed)) { 3777 DPRINTF(("Packet too large: %d\n", ab->ab_length)); 3778 return E2BIG; 3779 } 3780 3781 if (ab->ab_data && ab->ab_uio) 3782 panic("Can't call with uio and data set"); 3783 if ((ab->ab_data == NULL) && (ab->ab_uio == NULL)) 3784 panic("One of either ab_data or ab_uio must be set"); 3785 3786 memset(&pkt, 0, sizeof(pkt)); 3787 3788 pkt.fp_tcode = ab->ab_tcode; 3789 if (ab->ab_data) { 3790 pkt.fp_uio.uio_iov = pkt.fp_iov; 3791 pkt.fp_uio.uio_segflg = UIO_SYSSPACE; 3792 pkt.fp_uio.uio_rw = UIO_WRITE; 3793 } else 3794 memcpy(&pkt.fp_uio, ab->ab_uio, sizeof(struct uio)); 3795 3796 pkt.fp_statusarg = ab; 3797 pkt.fp_statuscb = fwohci_write_ack; 3798 3799 switch (ab->ab_tcode) { 3800 case IEEE1394_TCODE_WRITE_RESP: 3801 pkt.fp_hlen = 12; 3802 case IEEE1394_TCODE_READ_RESP_QUAD: 3803 case IEEE1394_TCODE_READ_RESP_BLOCK: 3804 if (!pkt.fp_hlen) 3805 pkt.fp_hlen = 16; 3806 high = ab->ab_retlen; 3807 ab->ab_retlen = 0; 3808 lo = 0; 3809 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) | 3810 (ab->ab_tlabel << 10) | (pkt.fp_tcode << 4); 3811 break; 3812 default: 3813 pkt.fp_hlen = 16; 3814 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32); 3815 lo = (ab->ab_addr & 0x00000000ffffffffULL); 3816 pkt.fp_hdr[0] = 0x00000100 | (sc->sc1394_link_speed << 16) | 3817 (psc->sc_tlabel << 10) | (pkt.fp_tcode << 4); 3818 psc->sc_tlabel = (psc->sc_tlabel + 1) & 0x3f; 3819 break; 3820 } 3821 3822 pkt.fp_hdr[1] = ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high; 3823 pkt.fp_hdr[2] = lo; 3824 if (pkt.fp_hlen == 16) { 3825 if (ab->ab_length == 4) { 3826 pkt.fp_hdr[3] = ab->ab_data[0]; 3827 pkt.fp_dlen = 0; 3828 } else { 3829 pkt.fp_hdr[3] = (ab->ab_length << 16); 3830 pkt.fp_dlen = ab->ab_length; 3831 if (ab->ab_data) { 3832 pkt.fp_uio.uio_iovcnt = 1; 3833 pkt.fp_uio.uio_resid = ab->ab_length; 3834 pkt.fp_iov[0].iov_base = ab->ab_data; 3835 pkt.fp_iov[0].iov_len = ab->ab_length; 3836 } 3837 } 3838 } 3839 switch (ab->ab_tcode) { 3840 case IEEE1394_TCODE_WRITE_RESP: 3841 case IEEE1394_TCODE_READ_RESP_QUAD: 3842 case IEEE1394_TCODE_READ_RESP_BLOCK: 3843 rv = fwohci_at_output(psc, psc->sc_ctx_atrs, &pkt); 3844 break; 3845 default: 3846 rv = fwohci_at_output(psc, psc->sc_ctx_atrq, &pkt); 3847 break; 3848 } 3849 return rv; 3850 } 3851 3852 static int 3853 fwohci_read_resp(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt) 3854 { 3855 struct fwohci_cb *fcb = arg; 3856 struct ieee1394_abuf *ab = fcb->ab; 3857 struct fwohci_pkt newpkt; 3858 u_int32_t *cur, high, lo; 3859 int i, tcode, rcode, status, rv; 3860 3861 /* 3862 * Both the ACK handling and normal response callbacks are handled here. 3863 * The main reason for this is the various error conditions that can 3864 * occur trying to block read some areas and the ways that gets reported 3865 * back to calling station. This is a variety of ACK codes, responses, 3866 * etc which makes it much more difficult to process if both aren't 3867 * handled here. 3868 */ 3869 3870 /* Check for status packet. */ 3871 3872 if (pkt->fp_tcode == -1) { 3873 status = pkt->fp_status & OHCI_DESC_STATUS_ACK_MASK; 3874 rcode = -1; 3875 tcode = (pkt->fp_hdr[0] >> 4) & 0xf; 3876 if ((status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) && 3877 (status != OHCI_CTXCTL_EVENT_ACK_PENDING)) 3878 DPRINTFN(2, ("Got status packet: 0x%02x\n", 3879 (unsigned int)status)); 3880 fcb->count--; 3881 3882 /* 3883 * Got all the ack's back and the buffer is invalid (i.e. the 3884 * callback has been called. Clean up. 3885 */ 3886 3887 if (fcb->abuf_valid == 0) { 3888 if (fcb->count == 0) 3889 free(fcb, M_DEVBUF); 3890 return IEEE1394_RCODE_COMPLETE; 3891 } 3892 } else { 3893 status = -1; 3894 tcode = pkt->fp_tcode; 3895 rcode = (pkt->fp_hdr[1] & 0x0000f000) >> 12; 3896 } 3897 3898 /* 3899 * Some area's (like the config rom want to be read as quadlets only. 3900 * 3901 * The current ideas to try are: 3902 * 3903 * Got an ACK_TYPE_ERROR on a block read. 3904 * 3905 * Got either RCODE_TYPE or RCODE_ADDRESS errors in a block read 3906 * response. 3907 * 3908 * In all cases construct a new packet for a quadlet read and let 3909 * mutli_resp handle the iteration over the space. 3910 */ 3911 3912 if (((status == OHCI_CTXCTL_EVENT_ACK_TYPE_ERROR) && 3913 (tcode == IEEE1394_TCODE_READ_REQ_BLOCK)) || 3914 (((rcode == IEEE1394_RCODE_TYPE_ERROR) || 3915 (rcode == IEEE1394_RCODE_ADDRESS_ERROR)) && 3916 (tcode == IEEE1394_TCODE_READ_RESP_BLOCK))) { 3917 3918 /* Read the area in quadlet chunks (internally track this). */ 3919 3920 memset(&newpkt, 0, sizeof(newpkt)); 3921 3922 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32); 3923 lo = (ab->ab_addr & 0x00000000ffffffffULL); 3924 3925 newpkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD; 3926 newpkt.fp_hlen = 12; 3927 newpkt.fp_dlen = 0; 3928 newpkt.fp_hdr[1] = 3929 ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high; 3930 newpkt.fp_hdr[2] = lo; 3931 newpkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) | 3932 (newpkt.fp_tcode << 4); 3933 3934 rv = fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, 3935 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0, 3936 fwohci_read_multi_resp, fcb); 3937 if (rv) { 3938 (*ab->ab_cb)(ab, -1); 3939 goto cleanup; 3940 } 3941 newpkt.fp_statusarg = fcb; 3942 newpkt.fp_statuscb = fwohci_read_resp; 3943 rv = fwohci_at_output(sc, sc->sc_ctx_atrq, &newpkt); 3944 if (rv) { 3945 fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, 3946 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0, NULL, 3947 NULL); 3948 (*ab->ab_cb)(ab, -1); 3949 goto cleanup; 3950 } 3951 fcb->count++; 3952 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f; 3953 return IEEE1394_RCODE_COMPLETE; 3954 } else if ((rcode != -1) || ((status != -1) && 3955 (status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) && 3956 (status != OHCI_CTXCTL_EVENT_ACK_PENDING))) { 3957 3958 /* 3959 * Recombine all the iov data into 1 chunk for higher 3960 * level code. 3961 */ 3962 3963 if (rcode != -1) { 3964 cur = ab->ab_data; 3965 for (i = 0; i < pkt->fp_uio.uio_iovcnt; i++) { 3966 /* 3967 * Make sure and don't exceed the buffer 3968 * allocated for return. 3969 */ 3970 if ((ab->ab_retlen + pkt->fp_iov[i].iov_len) > 3971 ab->ab_length) { 3972 memcpy(cur, pkt->fp_iov[i].iov_base, 3973 (ab->ab_length - ab->ab_retlen)); 3974 ab->ab_retlen = ab->ab_length; 3975 break; 3976 } 3977 memcpy(cur, pkt->fp_iov[i].iov_base, 3978 pkt->fp_iov[i].iov_len); 3979 cur += pkt->fp_iov[i].iov_len; 3980 ab->ab_retlen += pkt->fp_iov[i].iov_len; 3981 } 3982 } 3983 if (status != -1) 3984 /* XXX: Need a complete tlabel interface. */ 3985 for (i = 0; i < 64; i++) 3986 fwohci_handler_set(sc, 3987 IEEE1394_TCODE_READ_RESP_QUAD, 3988 ab->ab_req->sc1394_node_id, i, 0, NULL, 3989 NULL); 3990 (*ab->ab_cb)(ab, rcode); 3991 goto cleanup; 3992 } else 3993 /* Good ack packet. */ 3994 return IEEE1394_RCODE_COMPLETE; 3995 3996 /* Can't get here unless ab->ab_cb has been called. */ 3997 3998 cleanup: 3999 fcb->abuf_valid = 0; 4000 if (fcb->count == 0) 4001 free(fcb, M_DEVBUF); 4002 return IEEE1394_RCODE_COMPLETE; 4003 } 4004 4005 static int 4006 fwohci_read_multi_resp(struct fwohci_softc *sc, void *arg, 4007 struct fwohci_pkt *pkt) 4008 { 4009 struct fwohci_cb *fcb = arg; 4010 struct ieee1394_abuf *ab = fcb->ab; 4011 struct fwohci_pkt newpkt; 4012 u_int32_t high, lo; 4013 int rcode, rv; 4014 4015 /* 4016 * Bad return codes from the wire, just return what's already in the 4017 * buf. 4018 */ 4019 4020 /* Make sure a response packet didn't arrive after a bad ACK. */ 4021 if (fcb->abuf_valid == 0) 4022 return IEEE1394_RCODE_COMPLETE; 4023 4024 rcode = (pkt->fp_hdr[1] & 0x0000f000) >> 12; 4025 4026 if (rcode) { 4027 (*ab->ab_cb)(ab, rcode); 4028 goto cleanup; 4029 } 4030 4031 if ((ab->ab_retlen + pkt->fp_iov[0].iov_len) > ab->ab_length) { 4032 memcpy(((char *)ab->ab_data + ab->ab_retlen), 4033 pkt->fp_iov[0].iov_base, (ab->ab_length - ab->ab_retlen)); 4034 ab->ab_retlen = ab->ab_length; 4035 } else { 4036 memcpy(((char *)ab->ab_data + ab->ab_retlen), 4037 pkt->fp_iov[0].iov_base, 4); 4038 ab->ab_retlen += 4; 4039 } 4040 /* Still more, loop and read 4 more bytes. */ 4041 if (ab->ab_retlen < ab->ab_length) { 4042 memset(&newpkt, 0, sizeof(newpkt)); 4043 4044 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32); 4045 lo = (ab->ab_addr & 0x00000000ffffffffULL) + ab->ab_retlen; 4046 4047 newpkt.fp_tcode = IEEE1394_TCODE_READ_REQ_QUAD; 4048 newpkt.fp_hlen = 12; 4049 newpkt.fp_dlen = 0; 4050 newpkt.fp_hdr[1] = 4051 ((0xffc0 | ab->ab_req->sc1394_node_id) << 16) | high; 4052 newpkt.fp_hdr[2] = lo; 4053 newpkt.fp_hdr[0] = 0x00000100 | (sc->sc_tlabel << 10) | 4054 (newpkt.fp_tcode << 4); 4055 4056 newpkt.fp_statusarg = fcb; 4057 newpkt.fp_statuscb = fwohci_read_resp; 4058 4059 /* 4060 * Bad return code. Just give up and return what's 4061 * come in now. 4062 */ 4063 rv = fwohci_handler_set(sc, IEEE1394_TCODE_READ_RESP_QUAD, 4064 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 0, 4065 fwohci_read_multi_resp, fcb); 4066 if (rv) 4067 (*ab->ab_cb)(ab, -1); 4068 else { 4069 rv = fwohci_at_output(sc, sc->sc_ctx_atrq, &newpkt); 4070 if (rv) { 4071 fwohci_handler_set(sc, 4072 IEEE1394_TCODE_READ_RESP_QUAD, 4073 ab->ab_req->sc1394_node_id, sc->sc_tlabel, 4074 0, NULL, NULL); 4075 (*ab->ab_cb)(ab, -1); 4076 } else { 4077 sc->sc_tlabel = (sc->sc_tlabel + 1) & 0x3f; 4078 fcb->count++; 4079 return IEEE1394_RCODE_COMPLETE; 4080 } 4081 } 4082 } else 4083 (*ab->ab_cb)(ab, IEEE1394_RCODE_COMPLETE); 4084 4085 cleanup: 4086 /* Can't get here unless ab_cb has been called. */ 4087 fcb->abuf_valid = 0; 4088 if (fcb->count == 0) 4089 free(fcb, M_DEVBUF); 4090 return IEEE1394_RCODE_COMPLETE; 4091 } 4092 4093 static int 4094 fwohci_write_ack(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt) 4095 { 4096 struct ieee1394_abuf *ab = arg; 4097 u_int16_t status; 4098 4099 4100 status = pkt->fp_status & OHCI_DESC_STATUS_ACK_MASK; 4101 if ((status != OHCI_CTXCTL_EVENT_ACK_COMPLETE) && 4102 (status != OHCI_CTXCTL_EVENT_ACK_PENDING)) 4103 DPRINTF(("Got status packet: 0x%02x\n", 4104 (unsigned int)status)); 4105 4106 /* No callback means this level should free the buffers. */ 4107 if (ab->ab_cb) 4108 (*ab->ab_cb)(ab, status); 4109 else { 4110 if (ab->ab_data) 4111 free(ab->ab_data, M_1394DATA); 4112 free(ab, M_1394DATA); 4113 } 4114 return IEEE1394_RCODE_COMPLETE; 4115 } 4116 4117 static int 4118 fwohci_inreg(struct ieee1394_abuf *ab, int allow) 4119 { 4120 struct ieee1394_softc *sc = ab->ab_req; 4121 struct fwohci_softc *psc = 4122 (struct fwohci_softc *)sc->sc1394_dev.dv_parent; 4123 u_int32_t high, lo; 4124 int rv; 4125 4126 high = ((ab->ab_addr & 0x0000ffff00000000ULL) >> 32); 4127 lo = (ab->ab_addr & 0x00000000ffffffffULL); 4128 4129 rv = 0; 4130 switch (ab->ab_tcode) { 4131 case IEEE1394_TCODE_READ_REQ_QUAD: 4132 case IEEE1394_TCODE_WRITE_REQ_QUAD: 4133 if (ab->ab_cb) 4134 rv = fwohci_handler_set(psc, ab->ab_tcode, high, lo, 0, 4135 fwohci_parse_input, ab); 4136 else 4137 fwohci_handler_set(psc, ab->ab_tcode, high, lo, 0, NULL, 4138 NULL); 4139 break; 4140 case IEEE1394_TCODE_READ_REQ_BLOCK: 4141 case IEEE1394_TCODE_WRITE_REQ_BLOCK: 4142 if (allow) { 4143 if (ab->ab_cb) { 4144 rv = fwohci_handler_set(psc, ab->ab_tcode, 4145 high, lo, ab->ab_length, 4146 fwohci_parse_input, ab); 4147 if (rv) 4148 fwohci_handler_set(psc, ab->ab_tcode, 4149 high, lo, ab->ab_length, NULL, 4150 NULL); 4151 ab->ab_subok = 1; 4152 } else 4153 fwohci_handler_set(psc, ab->ab_tcode, high, lo, 4154 ab->ab_length, NULL, NULL); 4155 } else { 4156 if (ab->ab_cb) 4157 rv = fwohci_handler_set(psc, ab->ab_tcode, high, 4158 lo, 0, fwohci_parse_input, ab); 4159 else 4160 fwohci_handler_set(psc, ab->ab_tcode, high, lo, 4161 0, NULL, NULL); 4162 } 4163 break; 4164 default: 4165 DPRINTF(("Invalid registration tcode: %d\n", ab->ab_tcode)); 4166 return -1; 4167 break; 4168 } 4169 return rv; 4170 } 4171 4172 static int 4173 fwohci_unreg(struct ieee1394_abuf *ab, int allow) 4174 { 4175 void *save; 4176 int rv; 4177 4178 save = ab->ab_cb; 4179 ab->ab_cb = NULL; 4180 rv = fwohci_inreg(ab, allow); 4181 ab->ab_cb = save; 4182 return rv; 4183 } 4184 4185 static int 4186 fwohci_parse_input(struct fwohci_softc *sc, void *arg, struct fwohci_pkt *pkt) 4187 { 4188 struct ieee1394_abuf *ab = (struct ieee1394_abuf *)arg; 4189 u_int64_t addr; 4190 u_int8_t *cur; 4191 int i, count, ret; 4192 4193 ab->ab_tcode = (pkt->fp_hdr[0] >> 4) & 0xf; 4194 ab->ab_tlabel = (pkt->fp_hdr[0] >> 10) & 0x3f; 4195 addr = (((u_int64_t)(pkt->fp_hdr[1] & 0xffff) << 32) | pkt->fp_hdr[2]); 4196 4197 /* Make sure it's always 0 in case this gets reused multiple times. */ 4198 ab->ab_retlen = 0; 4199 4200 switch (ab->ab_tcode) { 4201 case IEEE1394_TCODE_READ_REQ_QUAD: 4202 ab->ab_retlen = 4; 4203 /* Response's (if required) will come from callback code */ 4204 ret = -1; 4205 break; 4206 case IEEE1394_TCODE_READ_REQ_BLOCK: 4207 ab->ab_retlen = (pkt->fp_hdr[3] >> 16) & 0xffff; 4208 if (ab->ab_subok) { 4209 if ((addr + ab->ab_retlen) > 4210 (ab->ab_addr + ab->ab_length)) 4211 return IEEE1394_RCODE_ADDRESS_ERROR; 4212 } else 4213 if (ab->ab_retlen != ab->ab_length) 4214 return IEEE1394_RCODE_ADDRESS_ERROR; 4215 /* Response's (if required) will come from callback code */ 4216 ret = -1; 4217 break; 4218 case IEEE1394_TCODE_WRITE_REQ_QUAD: 4219 ab->ab_retlen = 4; 4220 /* Fall through. */ 4221 4222 case IEEE1394_TCODE_WRITE_REQ_BLOCK: 4223 if (!ab->ab_retlen) 4224 ab->ab_retlen = (pkt->fp_hdr[3] >> 16) & 0xffff; 4225 if (ab->ab_subok) { 4226 if ((addr + ab->ab_retlen) > 4227 (ab->ab_addr + ab->ab_length)) 4228 return IEEE1394_RCODE_ADDRESS_ERROR; 4229 } else 4230 if (ab->ab_retlen > ab->ab_length) 4231 return IEEE1394_RCODE_ADDRESS_ERROR; 4232 4233 if (ab->ab_tcode == IEEE1394_TCODE_WRITE_REQ_QUAD) 4234 ab->ab_data[0] = pkt->fp_hdr[3]; 4235 else { 4236 count = 0; 4237 cur = (u_int8_t *)ab->ab_data + (addr - ab->ab_addr); 4238 for (i = 0; i < pkt->fp_uio.uio_iovcnt; i++) { 4239 memcpy(cur, pkt->fp_iov[i].iov_base, 4240 pkt->fp_iov[i].iov_len); 4241 cur += pkt->fp_iov[i].iov_len; 4242 count += pkt->fp_iov[i].iov_len; 4243 } 4244 if (ab->ab_retlen != count) 4245 panic("Packet claims %d length " 4246 "but only %d bytes returned\n", 4247 ab->ab_retlen, count); 4248 } 4249 ret = IEEE1394_RCODE_COMPLETE; 4250 break; 4251 default: 4252 panic("Got a callback for a tcode that wasn't requested: %d", 4253 ab->ab_tcode); 4254 break; 4255 } 4256 if (ab->ab_cb) { 4257 ab->ab_retaddr = addr; 4258 ab->ab_cb(ab, IEEE1394_RCODE_COMPLETE); 4259 } 4260 return ret; 4261 } 4262 4263 static int 4264 fwohci_submatch(struct device *parent, struct cfdata *cf, 4265 const locdesc_t *ldesc, void *aux) 4266 { 4267 struct ieee1394_attach_args *fwa = aux; 4268 4269 /* Both halves must be filled in for a match. */ 4270 if ((cf->fwbuscf_idhi == FWBUS_UNK_IDHI && 4271 cf->fwbuscf_idlo == FWBUS_UNK_IDLO) || 4272 (cf->fwbuscf_idhi == ntohl(*((u_int32_t *)&fwa->uid[0])) && 4273 cf->fwbuscf_idlo == ntohl(*((u_int32_t *)&fwa->uid[4])))) 4274 return (config_match(parent, cf, aux)); 4275 return 0; 4276 } 4277 4278 int 4279 fwohci_detach(struct fwohci_softc *sc, int flags) 4280 { 4281 int rv = 0; 4282 4283 if (sc->sc_sc1394.sc1394_if != NULL) 4284 rv = config_detach(sc->sc_sc1394.sc1394_if, flags); 4285 if (rv != 0) 4286 return (rv); 4287 4288 callout_stop(&sc->sc_selfid_callout); 4289 4290 if (sc->sc_powerhook != NULL) 4291 powerhook_disestablish(sc->sc_powerhook); 4292 if (sc->sc_shutdownhook != NULL) 4293 shutdownhook_disestablish(sc->sc_shutdownhook); 4294 4295 return (rv); 4296 } 4297 4298 int 4299 fwohci_activate(struct device *self, enum devact act) 4300 { 4301 struct fwohci_softc *sc = (struct fwohci_softc *)self; 4302 int s, rv = 0; 4303 4304 s = splhigh(); 4305 switch (act) { 4306 case DVACT_ACTIVATE: 4307 rv = EOPNOTSUPP; 4308 break; 4309 4310 case DVACT_DEACTIVATE: 4311 if (sc->sc_sc1394.sc1394_if != NULL) 4312 rv = config_deactivate(sc->sc_sc1394.sc1394_if); 4313 break; 4314 } 4315 splx(s); 4316 4317 return (rv); 4318 } 4319 4320 #ifdef FW_DEBUG 4321 static void 4322 fwohci_show_intr(struct fwohci_softc *sc, u_int32_t intmask) 4323 { 4324 4325 printf("%s: intmask=0x%08x:", sc->sc_sc1394.sc1394_dev.dv_xname, 4326 intmask); 4327 if (intmask & OHCI_Int_CycleTooLong) 4328 printf(" CycleTooLong"); 4329 if (intmask & OHCI_Int_UnrecoverableError) 4330 printf(" UnrecoverableError"); 4331 if (intmask & OHCI_Int_CycleInconsistent) 4332 printf(" CycleInconsistent"); 4333 if (intmask & OHCI_Int_BusReset) 4334 printf(" BusReset"); 4335 if (intmask & OHCI_Int_SelfIDComplete) 4336 printf(" SelfIDComplete"); 4337 if (intmask & OHCI_Int_LockRespErr) 4338 printf(" LockRespErr"); 4339 if (intmask & OHCI_Int_PostedWriteErr) 4340 printf(" PostedWriteErr"); 4341 if (intmask & OHCI_Int_ReqTxComplete) 4342 printf(" ReqTxComplete(0x%04x)", 4343 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_REQUEST, 4344 OHCI_SUBREG_ContextControlClear)); 4345 if (intmask & OHCI_Int_RespTxComplete) 4346 printf(" RespTxComplete(0x%04x)", 4347 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_TX_RESPONSE, 4348 OHCI_SUBREG_ContextControlClear)); 4349 if (intmask & OHCI_Int_ARRS) 4350 printf(" ARRS(0x%04x)", 4351 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_RESPONSE, 4352 OHCI_SUBREG_ContextControlClear)); 4353 if (intmask & OHCI_Int_ARRQ) 4354 printf(" ARRQ(0x%04x)", 4355 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_REQUEST, 4356 OHCI_SUBREG_ContextControlClear)); 4357 if (intmask & OHCI_Int_IsochRx) 4358 printf(" IsochRx(0x%08x)", 4359 OHCI_CSR_READ(sc, OHCI_REG_IsoRecvIntEventClear)); 4360 if (intmask & OHCI_Int_IsochTx) 4361 printf(" IsochTx(0x%08x)", 4362 OHCI_CSR_READ(sc, OHCI_REG_IsoXmitIntEventClear)); 4363 if (intmask & OHCI_Int_RQPkt) 4364 printf(" RQPkt(0x%04x)", 4365 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_REQUEST, 4366 OHCI_SUBREG_ContextControlClear)); 4367 if (intmask & OHCI_Int_RSPkt) 4368 printf(" RSPkt(0x%04x)", 4369 OHCI_ASYNC_DMA_READ(sc, OHCI_CTX_ASYNC_RX_RESPONSE, 4370 OHCI_SUBREG_ContextControlClear)); 4371 printf("\n"); 4372 } 4373 4374 static void 4375 fwohci_show_phypkt(struct fwohci_softc *sc, u_int32_t val) 4376 { 4377 u_int8_t key, phyid; 4378 4379 key = (val & 0xc0000000) >> 30; 4380 phyid = (val & 0x3f000000) >> 24; 4381 printf("%s: PHY packet from %d: ", 4382 sc->sc_sc1394.sc1394_dev.dv_xname, phyid); 4383 switch (key) { 4384 case 0: 4385 printf("PHY Config:"); 4386 if (val & 0x00800000) 4387 printf(" ForceRoot"); 4388 if (val & 0x00400000) 4389 printf(" Gap=%x", (val & 0x003f0000) >> 16); 4390 printf("\n"); 4391 break; 4392 case 1: 4393 printf("Link-on\n"); 4394 break; 4395 case 2: 4396 printf("SelfID:"); 4397 if (val & 0x00800000) { 4398 printf(" #%d", (val & 0x00700000) >> 20); 4399 } else { 4400 if (val & 0x00400000) 4401 printf(" LinkActive"); 4402 printf(" Gap=%x", (val & 0x003f0000) >> 16); 4403 printf(" Spd=S%d", 100 << ((val & 0x0000c000) >> 14)); 4404 if (val & 0x00000800) 4405 printf(" Cont"); 4406 if (val & 0x00000002) 4407 printf(" InitiateBusReset"); 4408 } 4409 if (val & 0x00000001) 4410 printf(" +"); 4411 printf("\n"); 4412 break; 4413 default: 4414 printf("unknown: 0x%08x\n", val); 4415 break; 4416 } 4417 } 4418 #endif /* FW_DEBUG */ 4419 4420 #if 0 4421 void fwohci_dumpreg(struct ieee1394_softc *, struct fwiso_regdump *); 4422 4423 void 4424 fwohci_dumpreg(struct ieee1394_softc *isc, struct fwiso_regdump *fr) 4425 { 4426 struct fwohci_softc *sc = (struct fwohci_softc *)isc; 4427 #if 0 4428 u_int32_t val; 4429 4430 printf("%s: dump reg\n", isc->sc1394_dev.dv_xname); 4431 printf("\tNodeID reg 0x%08x\n", 4432 OHCI_CSR_READ(sc, OHCI_REG_NodeId)); 4433 val = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer); 4434 printf("\tIsoCounter 0x%08x, %d %d %d", val, 4435 (val >> 25) & 0xfe, (val >> 12) & 0x1fff, val & 0xfff); 4436 val = OHCI_CSR_READ(sc, OHCI_REG_IntMaskSet); 4437 printf(" IntMask 0x%08x, %s\n", val, 4438 val & OHCI_Int_IsochTx ? "isoTx" : ""); 4439 4440 val = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_ContextControlSet); 4441 printf("\tIT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n", 4442 OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_CommandPtr), 4443 val, 4444 val & OHCI_CTXCTL_RUN ? " run" : "", 4445 val & OHCI_CTXCTL_WAKE ? " wake" : "", 4446 val & OHCI_CTXCTL_DEAD ? " dead" : "", 4447 val & OHCI_CTXCTL_ACTIVE ? " active" : ""); 4448 #endif 4449 4450 fr->fr_nodeid = OHCI_CSR_READ(sc, OHCI_REG_NodeId); 4451 fr->fr_isocounter = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer); 4452 fr->fr_intmask = OHCI_CSR_READ(sc, OHCI_REG_IntMaskSet); 4453 fr->fr_it0_commandptr = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_CommandPtr); 4454 fr->fr_it0_contextctrl = OHCI_SYNC_TX_DMA_READ(sc, 0, OHCI_SUBREG_ContextControlSet); 4455 4456 4457 } 4458 #endif 4459 4460 4461 u_int16_t 4462 fwohci_cycletimer(struct fwohci_softc *sc) 4463 { 4464 u_int32_t reg; 4465 4466 reg = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer); 4467 4468 return (reg >> 12)&0xffff; 4469 } 4470 4471 4472 u_int16_t 4473 fwohci_it_cycletimer(ieee1394_it_tag_t it) 4474 { 4475 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it; 4476 4477 return fwohci_cycletimer(itc->itc_sc); 4478 } 4479 4480 4481 4482 4483 4484 /* 4485 * return value: if positive value, number of DMA buffer segments. If 4486 * negative value, error happens. Never zero. 4487 */ 4488 static int 4489 fwohci_misc_dmabuf_alloc(bus_dma_tag_t dmat, int dsize, int segno, 4490 bus_dma_segment_t *segp, bus_dmamap_t *dmapp, void **mapp, 4491 const char *xname) 4492 { 4493 int nsegs; 4494 int error; 4495 4496 printf("fwohci_misc_desc_alloc: dsize %d segno %d\n", dsize, segno); 4497 4498 if ((error = bus_dmamem_alloc(dmat, dsize, PAGE_SIZE, 0, 4499 segp, segno, &nsegs, 0)) != 0) { 4500 printf("%s: unable to allocate descriptor buffer, error = %d\n", 4501 xname, error); 4502 goto fail_0; 4503 } 4504 4505 DPRINTF(("fwohci_misc_desc_alloc: %d segment[s]\n", nsegs)); 4506 4507 if ((error = bus_dmamem_map(dmat, segp, nsegs, dsize, (caddr_t *)mapp, 4508 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) { 4509 printf("%s: unable to map descriptor buffer, error = %d\n", 4510 xname, error); 4511 goto fail_1; 4512 } 4513 4514 DPRINTF(("fwohci_misc_desc_alloc: %s map ok\n", xname)); 4515 4516 #ifdef FWOHCI_DEBUG 4517 { 4518 int loop; 4519 4520 for (loop = 0; loop < nsegs; ++loop) { 4521 printf("\t%.2d: 0x%lx - 0x%lx\n", loop, 4522 (long)segp[loop].ds_addr, 4523 (long)segp[loop].ds_addr + segp[loop].ds_len - 1); 4524 } 4525 } 4526 #endif /* FWOHCI_DEBUG */ 4527 4528 if ((error = bus_dmamap_create(dmat, dsize, nsegs, dsize, 4529 0, BUS_DMA_WAITOK, dmapp)) != 0) { 4530 printf("%s: unable to create descriptor buffer DMA map, " 4531 "error = %d\n", xname, error); 4532 goto fail_2; 4533 } 4534 4535 DPRINTF(("fwohci_misc_dmabuf_alloc: bus_dmamem_create success\n")); 4536 4537 if ((error = bus_dmamap_load(dmat, *dmapp, *mapp, dsize, NULL, 4538 BUS_DMA_WAITOK)) != 0) { 4539 printf("%s: unable to load descriptor buffer DMA map, " 4540 "error = %d\n", xname, error); 4541 goto fail_3; 4542 } 4543 4544 DPRINTF(("fwohci_it_desc_alloc: bus_dmamem_load success\n")); 4545 4546 return nsegs; 4547 4548 fail_3: 4549 bus_dmamap_destroy(dmat, *dmapp); 4550 fail_2: 4551 bus_dmamem_unmap(dmat, *mapp, dsize); 4552 fail_1: 4553 bus_dmamem_free(dmat, segp, nsegs); 4554 fail_0: 4555 return error; 4556 } 4557 4558 4559 static void 4560 fwohci_misc_dmabuf_free(bus_dma_tag_t dmat, int dsize, int nsegs, 4561 bus_dma_segment_t *segp, bus_dmamap_t *dmapp, caddr_t map) 4562 { 4563 bus_dmamap_destroy(dmat, *dmapp); 4564 bus_dmamem_unmap(dmat, map, dsize); 4565 bus_dmamem_free(dmat, segp, nsegs); 4566 } 4567 4568 4569 4570 4571 /* 4572 * Isochronous receive service 4573 */ 4574 4575 /* 4576 * static struct fwohci_ir_ctx * 4577 * fwohci_ir_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tagbm, 4578 * int bufnum, int maxsize, int flags) 4579 */ 4580 static struct fwohci_ir_ctx * 4581 fwohci_ir_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tagbm, 4582 int bufnum, int maxsize, int flags) 4583 { 4584 struct fwohci_ir_ctx *irc; 4585 int i; 4586 4587 printf("fwohci_ir_construct(%s, %d, %d, %x, %d, %d\n", 4588 sc->sc_sc1394.sc1394_dev.dv_xname, no, ch, tagbm, bufnum, maxsize); 4589 4590 if ((irc = malloc(sizeof(*irc), M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) { 4591 return NULL; 4592 } 4593 4594 irc->irc_sc = sc; 4595 4596 irc->irc_num = no; 4597 irc->irc_status = 0; 4598 4599 irc->irc_channel = ch; 4600 irc->irc_tagbm = tagbm; 4601 4602 irc->irc_desc_num = bufnum; 4603 4604 irc->irc_flags = flags; 4605 4606 /* add header */ 4607 maxsize += 8; 4608 /* rounding up */ 4609 for (i = 32; i < maxsize; i <<= 1); 4610 printf("fwohci_ir_ctx_construct: maxsize %d => %d\n", 4611 maxsize, i); 4612 4613 maxsize = i; 4614 4615 irc->irc_maxsize = maxsize; 4616 irc->irc_buf_totalsize = bufnum * maxsize; 4617 4618 if (fwohci_ir_buf_setup(irc)) { 4619 /* cannot alloc descriptor */ 4620 return NULL; 4621 } 4622 4623 irc->irc_readtop = irc->irc_desc_map; 4624 irc->irc_writeend = irc->irc_desc_map + irc->irc_desc_num - 1; 4625 irc->irc_savedbranch = irc->irc_writeend->fd_branch; 4626 irc->irc_writeend->fd_branch = 0; 4627 /* sync */ 4628 4629 if (fwohci_ir_stop(irc) || fwohci_ir_init(irc)) { 4630 return NULL; 4631 } 4632 4633 irc->irc_status |= IRC_STATUS_READY; 4634 4635 return irc; 4636 } 4637 4638 4639 4640 /* 4641 * static void fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *irc) 4642 * 4643 * This function release all DMA buffers and itself. 4644 */ 4645 static void 4646 fwohci_ir_ctx_destruct(struct fwohci_ir_ctx *irc) 4647 { 4648 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat, irc->irc_buf_totalsize, 4649 irc->irc_buf_nsegs, irc->irc_buf_segs, 4650 &irc->irc_buf_dmamap, (caddr_t)irc->irc_buf); 4651 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat, 4652 irc->irc_desc_size, 4653 irc->irc_desc_nsegs, &irc->irc_desc_seg, 4654 &irc->irc_desc_dmamap, (caddr_t)irc->irc_desc_map); 4655 4656 free(irc, M_DEVBUF); 4657 } 4658 4659 4660 4661 4662 /* 4663 * static int fwohci_ir_buf_setup(struct fwohci_ir_ctx *irc) 4664 * 4665 * Allocates descriptors for context DMA dedicated for 4666 * isochronous receive. 4667 * 4668 * This function returns 0 (zero) if it succeeds. Otherwise, 4669 * return negative value. 4670 */ 4671 static int 4672 fwohci_ir_buf_setup(struct fwohci_ir_ctx *irc) 4673 { 4674 int nsegs; 4675 struct fwohci_desc *fd; 4676 u_int32_t branch; 4677 int bufno = 0; /* DMA segment */ 4678 bus_size_t bufused = 0; /* offset in a DMA segment */ 4679 4680 irc->irc_desc_size = irc->irc_desc_num * sizeof(struct fwohci_desc); 4681 4682 nsegs = fwohci_misc_dmabuf_alloc(irc->irc_sc->sc_dmat, 4683 irc->irc_desc_size, 1, &irc->irc_desc_seg, &irc->irc_desc_dmamap, 4684 (void **)&irc->irc_desc_map, 4685 irc->irc_sc->sc_sc1394.sc1394_dev.dv_xname); 4686 4687 if (nsegs < 0) { 4688 printf("fwohci_ir_buf_alloc: cannot get descriptor\n"); 4689 return -1; 4690 } 4691 irc->irc_desc_nsegs = nsegs; 4692 4693 nsegs = fwohci_misc_dmabuf_alloc(irc->irc_sc->sc_dmat, 4694 irc->irc_buf_totalsize, 16, irc->irc_buf_segs, 4695 &irc->irc_buf_dmamap, (void **)&irc->irc_buf, 4696 irc->irc_sc->sc_sc1394.sc1394_dev.dv_xname); 4697 4698 if (nsegs < 0) { 4699 printf("fwohci_ir_buf_alloc: cannot get DMA buffer\n"); 4700 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat, 4701 irc->irc_desc_size, 4702 irc->irc_desc_nsegs, &irc->irc_desc_seg, 4703 &irc->irc_desc_dmamap, (caddr_t)irc->irc_desc_map); 4704 return -1; 4705 } 4706 irc->irc_buf_nsegs = nsegs; 4707 4708 branch = irc->irc_desc_dmamap->dm_segs[0].ds_addr 4709 + sizeof(struct fwohci_desc); 4710 bufno = 0; 4711 bufused = 0; 4712 4713 for (fd = irc->irc_desc_map; 4714 fd < irc->irc_desc_map + irc->irc_desc_num; ++fd) { 4715 fd->fd_flags = OHCI_DESC_INPUT | OHCI_DESC_LAST 4716 | OHCI_DESC_STATUS | OHCI_DESC_BRANCH; 4717 if (irc->irc_flags & IEEE1394_IR_SHORTDELAY) { 4718 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS; 4719 } 4720 #if 0 4721 if ((fd - irc->irc_desc_map) % 64 == 0) { 4722 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS; 4723 } 4724 #endif 4725 fd->fd_reqcount = irc->irc_maxsize; 4726 fd->fd_status = fd->fd_rescount = 0; 4727 4728 fd->fd_branch = branch | 0x01; 4729 branch += sizeof(struct fwohci_desc); 4730 4731 /* physical addr to data? */ 4732 fd->fd_data = 4733 (u_int32_t)((irc->irc_buf_segs[bufno].ds_addr + bufused)); 4734 bufused += irc->irc_maxsize; 4735 if (bufused > irc->irc_buf_segs[bufno].ds_len) { 4736 bufused = 0; 4737 if (++bufno == irc->irc_buf_nsegs) { 4738 /* fail */ 4739 printf("fwohci_ir_buf_setup fail\n"); 4740 4741 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat, 4742 irc->irc_desc_size, 4743 irc->irc_desc_nsegs, &irc->irc_desc_seg, 4744 &irc->irc_desc_dmamap, 4745 (caddr_t)irc->irc_desc_map); 4746 fwohci_misc_dmabuf_free(irc->irc_sc->sc_dmat, 4747 irc->irc_buf_totalsize, 4748 irc->irc_buf_nsegs, irc->irc_buf_segs, 4749 &irc->irc_buf_dmamap, 4750 (caddr_t)irc->irc_buf); 4751 return -1; 4752 } 4753 } 4754 4755 #ifdef FWOHCI_DEBUG 4756 if (fd < irc->irc_desc_map + 4 4757 || (fd > irc->irc_desc_map + irc->irc_desc_num - 4)) { 4758 printf("fwohci_ir_buf_setup: desc %d %p buf %08x" 4759 " size %d branch %08x\n", 4760 fd - irc->irc_desc_map, fd, fd->fd_data, 4761 fd->fd_reqcount, fd->fd_branch); 4762 } 4763 #endif /* FWOHCI_DEBUG */ 4764 } 4765 4766 --fd; 4767 fd->fd_branch = irc->irc_desc_dmamap->dm_segs[0].ds_addr | 1; 4768 DPRINTF(("fwohci_ir_buf_setup: desc %d %p buf %08x size %d branch %08x\n", 4769 (int)(fd - irc->irc_desc_map), fd, fd->fd_data, fd->fd_reqcount, 4770 fd->fd_branch)); 4771 4772 return 0; 4773 } 4774 4775 4776 4777 /* 4778 * static void fwohci_ir_init(struct fwohci_ir_ctx *irc) 4779 * 4780 * This function initialise DMA engine. 4781 */ 4782 static int 4783 fwohci_ir_init(struct fwohci_ir_ctx *irc) 4784 { 4785 struct fwohci_softc *sc = irc->irc_sc; 4786 int n = irc->irc_num; 4787 u_int32_t ctxmatch; 4788 4789 ctxmatch = irc->irc_channel & IEEE1394_ISO_CHANNEL_MASK; 4790 4791 if (irc->irc_channel & IEEE1394_ISO_CHANNEL_ANY) { 4792 OHCI_SYNC_RX_DMA_WRITE(sc, n, 4793 OHCI_SUBREG_ContextControlSet, 4794 OHCI_CTXCTL_RX_MULTI_CHAN_MODE); 4795 4796 /* Receive all the isochronous channels */ 4797 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskHiSet, 0xffffffff); 4798 OHCI_CSR_WRITE(sc, OHCI_REG_IRMultiChanMaskLoSet, 0xffffffff); 4799 ctxmatch = 0; 4800 } 4801 4802 ctxmatch |= ((irc->irc_tagbm & 0x0f) << OHCI_CTXMATCH_TAG_BITPOS); 4803 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextMatch, ctxmatch); 4804 4805 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlClear, 4806 OHCI_CTXCTL_RX_BUFFER_FILL | OHCI_CTXCTL_RX_CYCLE_MATCH_ENABLE); 4807 OHCI_SYNC_RX_DMA_WRITE(sc, n, OHCI_SUBREG_ContextControlSet, 4808 OHCI_CTXCTL_RX_ISOCH_HEADER); 4809 4810 printf("fwohci_ir_init\n"); 4811 4812 return 0; 4813 } 4814 4815 4816 /* 4817 * static int fwohci_ir_start(struct fwohci_ir_ctx *irc) 4818 * 4819 * This function starts DMA engine. This function must call 4820 * after fwohci_ir_init() and active bit of context control 4821 * register negated. This function will not check it. 4822 */ 4823 static int 4824 fwohci_ir_start(struct fwohci_ir_ctx *irc) 4825 { 4826 struct fwohci_softc *sc = irc->irc_sc; 4827 int startidx = irc->irc_readtop - irc->irc_desc_map; 4828 u_int32_t startaddr; 4829 4830 startaddr = irc->irc_desc_dmamap->dm_segs[0].ds_addr 4831 + sizeof(struct fwohci_desc)*startidx; 4832 4833 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num, OHCI_SUBREG_CommandPtr, 4834 startaddr | 1); 4835 OHCI_CSR_WRITE(sc, OHCI_REG_IsoRecvIntEventClear, 4836 (1 << irc->irc_num)); 4837 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num, 4838 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN); 4839 4840 printf("fwohci_ir_start: CmdPtr %08x Ctx %08x startidx %d\n", 4841 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, OHCI_SUBREG_CommandPtr), 4842 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, OHCI_SUBREG_ContextControlSet), 4843 startidx); 4844 4845 irc->irc_status &= ~IRC_STATUS_READY; 4846 irc->irc_status |= IRC_STATUS_RUN; 4847 4848 if ((irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC) == 0) { 4849 irc->irc_status |= IRC_STATUS_RECEIVE; 4850 } 4851 4852 return 0; 4853 } 4854 4855 4856 4857 /* 4858 * static int fwohci_ir_stop(struct fwohci_ir_ctx *irc) 4859 * 4860 * This function stops DMA engine. 4861 */ 4862 static int 4863 fwohci_ir_stop(struct fwohci_ir_ctx *irc) 4864 { 4865 struct fwohci_softc *sc = irc->irc_sc; 4866 int i; 4867 4868 printf("fwohci_ir_stop\n"); 4869 4870 OHCI_SYNC_RX_DMA_WRITE(sc, irc->irc_num, 4871 OHCI_SUBREG_ContextControlClear, 4872 OHCI_CTXCTL_RUN | OHCI_CTXCTL_DEAD); 4873 4874 i = 0; 4875 while (OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, 4876 OHCI_SUBREG_ContextControlSet) & OHCI_CTXCTL_ACTIVE) { 4877 #if 0 4878 u_int32_t reg = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, 4879 OHCI_SUBREG_ContextControlClear); 4880 4881 printf("%s: %d intr IR_CommandPtr 0x%08x " 4882 "ContextCtrl 0x%08x%s%s%s%s\n", 4883 sc->sc_sc1394.sc1394_dev.dv_xname, i, 4884 OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, 4885 OHCI_SUBREG_CommandPtr), 4886 reg, 4887 reg & OHCI_CTXCTL_RUN ? " run" : "", 4888 reg & OHCI_CTXCTL_WAKE ? " wake" : "", 4889 reg & OHCI_CTXCTL_DEAD ? " dead" : "", 4890 reg & OHCI_CTXCTL_ACTIVE ? " active" : ""); 4891 #endif 4892 if (i > 20) { 4893 printf("fwohci_ir_stop: %s does not stop\n", 4894 sc->sc_sc1394.sc1394_dev.dv_xname); 4895 return 1; 4896 } 4897 DELAY(10); 4898 } 4899 4900 irc->irc_status &= ~IRC_STATUS_RUN; 4901 4902 return 0; 4903 } 4904 4905 4906 4907 4908 4909 4910 static void 4911 fwohci_ir_intr(struct fwohci_softc *sc, struct fwohci_ir_ctx *irc) 4912 { 4913 const char *xname = sc->sc_sc1394.sc1394_dev.dv_xname; 4914 u_int32_t cmd, ctx; 4915 int idx; 4916 struct fwohci_desc *fd; 4917 4918 sc->sc_isocnt.ev_count++; 4919 4920 if (!(irc->irc_status & IRC_STATUS_RUN)) { 4921 printf("fwohci_ir_intr: not running\n"); 4922 return; 4923 } 4924 4925 bus_dmamap_sync(sc->sc_dmat, irc->irc_desc_dmamap, 4926 0, irc->irc_desc_size, BUS_DMASYNC_PREREAD); 4927 4928 ctx = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, 4929 OHCI_SUBREG_ContextControlSet); 4930 4931 cmd = OHCI_SYNC_RX_DMA_READ(sc, irc->irc_num, 4932 OHCI_SUBREG_CommandPtr); 4933 4934 #define OHCI_CTXCTL_RUNNING (OHCI_CTXCTL_RUN|OHCI_CTXCTL_ACTIVE) 4935 #define OHCI_CTXCTL_RUNNING_MASK (OHCI_CTXCTL_RUNNING|OHCI_CTXCTL_DEAD) 4936 4937 idx = (cmd & 0xfffffff8) - (u_int32_t)irc->irc_desc_dmamap->dm_segs[0].ds_addr; 4938 idx /= sizeof(struct fwohci_desc); 4939 4940 if ((ctx & OHCI_CTXCTL_RUNNING_MASK) == OHCI_CTXCTL_RUNNING) { 4941 if (irc->irc_waitchan != NULL) { 4942 DPRINTF(("fwohci_ir_intr: wakeup " 4943 "ctx %d CmdPtr %08x Ctxctl %08x idx %d\n", 4944 irc->irc_num, cmd, ctx, idx)); 4945 #ifdef FWOHCI_WAIT_DEBUG 4946 irc->irc_cycle[1] = fwohci_cycletimer(irc->irc_sc); 4947 #endif 4948 wakeup(irc->irc_waitchan); 4949 } 4950 selwakeup(&irc->irc_sel); 4951 return; 4952 } 4953 4954 fd = irc->irc_desc_map + idx; 4955 4956 printf("fwohci_ir_intr: %s error " 4957 "ctx %d CmdPtr %08x Ctxctl %08x idx %d\n", xname, 4958 irc->irc_num, cmd, ctx, idx); 4959 printf("\tfd flag %x branch %x stat %x rescnt %x total pkt %d\n", 4960 fd->fd_flags, fd->fd_branch, fd->fd_status,fd->fd_rescount, 4961 irc->irc_pktcount); 4962 } 4963 4964 4965 4966 4967 /* 4968 * static int fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *irc) 4969 * 4970 * This function obtains the lenth of descriptors with data. 4971 */ 4972 static int 4973 fwohci_ir_ctx_packetnum(struct fwohci_ir_ctx *irc) 4974 { 4975 struct fwohci_desc *fd = irc->irc_readtop; 4976 int i = 0; 4977 4978 /* XXX SYNC */ 4979 while (fd->fd_status != 0) { 4980 if (fd == irc->irc_readtop && i > 0) { 4981 printf("descriptor filled %d at %d\n", i, 4982 irc->irc_pktcount); 4983 #ifdef FWOHCI_WAIT_DEBUG 4984 irc->irc_cycle[2] = fwohci_cycletimer(irc->irc_sc); 4985 printf("cycletimer %d:%d %d:%d %d:%d\n", 4986 irc->irc_cycle[0]>>13, irc->irc_cycle[0]&0x1fff, 4987 irc->irc_cycle[1]>>13, irc->irc_cycle[1]&0x1fff, 4988 irc->irc_cycle[2]>>13, irc->irc_cycle[2]&0x1fff); 4989 #endif 4990 4991 break; 4992 } 4993 4994 ++i; 4995 ++fd; 4996 if (fd == irc->irc_desc_map + irc->irc_desc_num) { 4997 fd = irc->irc_desc_map; 4998 } 4999 5000 } 5001 5002 return i; 5003 } 5004 5005 5006 5007 5008 /* 5009 * int fwohci_ir_read(struct device *dev, ieee1394_ir_tag_t tag, 5010 * struct uio *uio, int headoffs, int flags) 5011 * 5012 * This function reads data from fwohci's isochronous receive 5013 * buffer. 5014 */ 5015 int 5016 fwohci_ir_read(struct device *dev, ieee1394_ir_tag_t tag, struct uio *uio, 5017 int headoffs, int flags) 5018 { 5019 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag; 5020 int packetnum; 5021 int copylen, hdrshim, fwisohdrsiz; 5022 struct fwohci_desc *fd, *fdprev = NULL; /* XXX fdprev use is suspect */ 5023 u_int8_t *data; 5024 int status = 0; 5025 u_int32_t tmpbranch; 5026 int pktcount_prev = irc->irc_pktcount; 5027 #ifdef FW_DEBUG 5028 int totalread = 0; 5029 #endif 5030 5031 if (irc->irc_status & IRC_STATUS_READY) { 5032 printf("fwohci_ir_read: starting iso read engine\n"); 5033 fwohci_ir_start(irc); 5034 } 5035 5036 packetnum = fwohci_ir_ctx_packetnum(irc); 5037 5038 DPRINTF(("fwohci_ir_read resid %lu DMA buf %d\n", 5039 (unsigned long)uio->uio_resid, packetnum)); 5040 5041 if (packetnum == 0) { 5042 return EAGAIN; 5043 } 5044 5045 #ifdef USEDRAIN 5046 if (packetnum > irc->irc_desc_num - irc->irc_desc_num/4) { 5047 packetnum -= fwohci_ir_ctx_drain(irc); 5048 if (irc->irc_pktcount != 0) { 5049 printf("fwohci_ir_read overrun %d\n", 5050 irc->irc_pktcount); 5051 } 5052 } 5053 #endif /* USEDRAIN */ 5054 5055 fdprev = fd = irc->irc_readtop; 5056 5057 #if 0 5058 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0 5059 && irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC) { 5060 unsigned int s; 5061 int i = 0; 5062 5063 while (fd->fd_status != 0) { 5064 s = data[14] << 8; 5065 s |= data[15]; 5066 5067 if (s != 0x0000ffffu) { 5068 DPRINTF(("find header %x at %d\n", 5069 s, irc->irc_pktcount)); 5070 irc->irc_status |= IRC_STATUS_RECEIVE; 5071 break; 5072 } 5073 5074 fd->fd_rescount = 0; 5075 fd->fd_status = 0; 5076 5077 fdprev = fd; 5078 if (++fd == irc->irc_desc_map + irc->irc_desc_num) { 5079 fd = irc->irc_desc_map; 5080 data = irc->irc_buf; 5081 } 5082 ++i; 5083 } 5084 5085 /* XXX SYNC */ 5086 if (i > 0) { 5087 tmpbranch = fdprev->fd_branch; 5088 fdprev->fd_branch = 0; 5089 irc->irc_writeend->fd_branch = irc->irc_savedbranch; 5090 irc->irc_writeend = fdprev; 5091 irc->irc_savedbranch = tmpbranch; 5092 } 5093 /* XXX SYNC */ 5094 5095 if (fd->fd_status == 0) { 5096 return EAGAIN; 5097 } 5098 } 5099 #endif 5100 5101 hdrshim = 8; 5102 fwisohdrsiz = 0; 5103 data = irc->irc_buf + (fd - irc->irc_desc_map) * irc->irc_maxsize; 5104 if (irc->irc_flags & IEEE1394_IR_NEEDHEADER) { 5105 fwisohdrsiz = sizeof(struct fwiso_header); 5106 } 5107 5108 while (fd->fd_status != 0 && 5109 (copylen = fd->fd_reqcount - fd->fd_rescount - hdrshim - headoffs) 5110 + fwisohdrsiz <= uio->uio_resid) { 5111 5112 DPRINTF(("pkt %04x:%04x uiomove %p, %d\n", 5113 fd->fd_status, fd->fd_rescount, 5114 (void *)(data + 8 + headoffs), copylen)); 5115 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0) { 5116 DPRINTF(("[%d]", copylen)); 5117 if (irc->irc_pktcount > 1000) { 5118 printf("no header found\n"); 5119 status = EIO; 5120 break; /* XXX */ 5121 } 5122 } else { 5123 DPRINTF(("<%d>", copylen)); 5124 } 5125 5126 if ((irc->irc_status & IRC_STATUS_RECEIVE) == 0 5127 && irc->irc_flags & IEEE1394_IR_TRIGGER_CIP_SYNC 5128 && copylen > 0) { 5129 unsigned int s; 5130 5131 s = data[14] << 8; 5132 s |= data[15]; 5133 5134 if (s != 0x0000ffffu) { 5135 DPRINTF(("find header %x at %d\n", 5136 s, irc->irc_pktcount)); 5137 irc->irc_status |= IRC_STATUS_RECEIVE; 5138 } 5139 } 5140 5141 if (irc->irc_status & IRC_STATUS_RECEIVE) { 5142 if (copylen > 0) { 5143 if (irc->irc_flags & IEEE1394_IR_NEEDHEADER) { 5144 struct fwiso_header fh; 5145 5146 fh.fh_timestamp = htonl((*(u_int32_t *)data) & 0xffff); 5147 fh.fh_speed = htonl((fd->fd_status >> 5)& 0x00000007); 5148 fh.fh_capture_size = htonl(copylen + 4); 5149 fh.fh_iso_header = htonl(*(u_int32_t *)(data + 4)); 5150 status = uiomove((void *)&fh, 5151 sizeof(fh), uio); 5152 if (status != 0) { 5153 /* An error happens */ 5154 printf("uio error in hdr\n"); 5155 break; 5156 } 5157 } 5158 status = uiomove((void *)(data + 8 + headoffs), 5159 copylen, uio); 5160 if (status != 0) { 5161 /* An error happens */ 5162 printf("uio error\n"); 5163 break; 5164 } 5165 #ifdef FW_DEBUG 5166 totalread += copylen; 5167 #endif 5168 } 5169 } 5170 5171 fd->fd_rescount = 0; 5172 fd->fd_status = 0; 5173 5174 #if 0 5175 /* advance writeend pointer and fill branch */ 5176 5177 tmpbranch = fd->fd_branch; 5178 fd->fd_branch = 0; 5179 irc->irc_writeend->fd_branch = irc->irc_savedbranch; 5180 irc->irc_writeend = fd; 5181 irc->irc_savedbranch = tmpbranch; 5182 #endif 5183 fdprev = fd; 5184 5185 data += irc->irc_maxsize; 5186 if (++fd == irc->irc_desc_map + irc->irc_desc_num) { 5187 fd = irc->irc_desc_map; 5188 data = irc->irc_buf; 5189 } 5190 ++irc->irc_pktcount; 5191 } 5192 5193 #if 1 5194 if (irc->irc_pktcount != pktcount_prev) { 5195 /* XXX SYNC */ 5196 tmpbranch = fdprev->fd_branch; 5197 fdprev->fd_branch = 0; 5198 irc->irc_writeend->fd_branch = irc->irc_savedbranch; 5199 irc->irc_writeend = fdprev; 5200 irc->irc_savedbranch = tmpbranch; 5201 /* XXX SYNC */ 5202 } 5203 #endif 5204 5205 if (!(OHCI_SYNC_RX_DMA_READ(irc->irc_sc, irc->irc_num, 5206 OHCI_SUBREG_ContextControlClear) & OHCI_CTXCTL_ACTIVE)) { 5207 /* do wake */ 5208 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num, 5209 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_WAKE); 5210 } 5211 5212 if (packetnum > irc->irc_maxqueuelen) { 5213 irc->irc_maxqueuelen = packetnum; 5214 irc->irc_maxqueuepos = irc->irc_pktcount; 5215 } 5216 5217 if (irc->irc_pktcount == pktcount_prev) { 5218 #if 0 5219 printf("fwohci_ir_read: process 0 packet, total %d\n", 5220 irc->irc_pktcount); 5221 if (++pktfail > 30) { 5222 return 0; 5223 } 5224 #endif 5225 return EAGAIN; 5226 } 5227 5228 irc->irc_readtop = fd; 5229 5230 DPRINTF(("fwochi_ir_read: process %d packet, total %d\n", 5231 totalread, irc->irc_pktcount)); 5232 5233 return status; 5234 } 5235 5236 5237 5238 5239 /* 5240 * int fwohci_ir_wait(struct device *dev, ieee1394_ir_tag_t tag, 5241 * void *wchan, char *name) 5242 * 5243 * This function waits till new data comes. 5244 */ 5245 int 5246 fwohci_ir_wait(struct device *dev, ieee1394_ir_tag_t tag, void *wchan, char *name) 5247 { 5248 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag; 5249 struct fwohci_desc *fd; 5250 int pktnum; 5251 int stat; 5252 5253 if ((pktnum = fwohci_ir_ctx_packetnum(irc)) > 4) { 5254 DPRINTF(("fwohci_ir_wait enough data %d\n", pktnum)); 5255 return 0; 5256 } 5257 5258 fd = irc->irc_readtop + 32; 5259 if (fd >= irc->irc_desc_map + irc->irc_desc_num) { 5260 fd -= irc->irc_desc_num; 5261 } 5262 5263 irc->irc_waitchan = wchan; 5264 if ((irc->irc_flags & IEEE1394_IR_SHORTDELAY) == 0) { 5265 fd->fd_flags |= OHCI_DESC_INTR_ALWAYS; 5266 DPRINTF(("fwohci_ir_wait stops %d set intr %d\n", 5267 (int)(irc->irc_readtop - irc->irc_desc_map), 5268 (int)(fd - irc->irc_desc_map))); 5269 /* XXX SYNC */ 5270 } 5271 5272 #ifdef FWOHCI_WAIT_DEBUG 5273 irc->irc_cycle[0] = fwohci_cycletimer(irc->irc_sc); 5274 #endif 5275 5276 irc->irc_status |= IRC_STATUS_SLEEPING; 5277 if ((stat = tsleep(wchan, PCATCH|PRIBIO, name, hz*10)) != 0) { 5278 irc->irc_waitchan = NULL; 5279 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS; 5280 if (stat == EWOULDBLOCK) { 5281 printf("fwohci_ir_wait: timeout\n"); 5282 return EIO; 5283 } else { 5284 return EINTR; 5285 } 5286 } 5287 5288 irc->irc_waitchan = NULL; 5289 if ((irc->irc_flags & IEEE1394_IR_SHORTDELAY) == 0) { 5290 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS; 5291 /* XXX SYNC */ 5292 } 5293 5294 DPRINTF(("fwohci_ir_wait: wakeup\n")); 5295 5296 return 0; 5297 } 5298 5299 5300 5301 5302 /* 5303 * int fwohci_ir_select(struct device *dev, ieee1394_ir_tag_t tag, 5304 * struct proc *p) 5305 * 5306 * This function returns the number of packets in queue. 5307 */ 5308 int 5309 fwohci_ir_select(struct device *dev, ieee1394_ir_tag_t tag, struct proc *p) 5310 { 5311 struct fwohci_ir_ctx *irc = (struct fwohci_ir_ctx *)tag; 5312 int pktnum; 5313 5314 if (irc->irc_status & IRC_STATUS_READY) { 5315 printf("fwohci_ir_select: starting iso read engine\n"); 5316 fwohci_ir_start(irc); 5317 } 5318 5319 if ((pktnum = fwohci_ir_ctx_packetnum(irc)) == 0) { 5320 selrecord(p, &irc->irc_sel); 5321 } 5322 5323 return pktnum; 5324 } 5325 5326 5327 5328 #ifdef USEDRAIN 5329 /* 5330 * int fwohci_ir_ctx_drain(struct fwohci_ir_ctx *irc) 5331 * 5332 * This function will drain all the packets in receive DMA 5333 * buffer. 5334 */ 5335 static int 5336 fwohci_ir_ctx_drain(struct fwohci_ir_ctx *irc) 5337 { 5338 struct fwohci_desc *fd = irc->irc_readtop; 5339 u_int32_t reg; 5340 int count = 0; 5341 5342 reg = OHCI_SYNC_RX_DMA_READ(irc->irc_sc, irc->irc_num, 5343 OHCI_SUBREG_ContextControlClear); 5344 5345 printf("fwohci_ir_ctx_drain ctx%s%s%s%s\n", 5346 reg & OHCI_CTXCTL_RUN ? " run" : "", 5347 reg & OHCI_CTXCTL_WAKE ? " wake" : "", 5348 reg & OHCI_CTXCTL_DEAD ? " dead" : "", 5349 reg & OHCI_CTXCTL_ACTIVE ? " active" : ""); 5350 5351 if ((reg & OHCI_CTXCTL_RUNNING_MASK) == OHCI_CTXCTL_RUN) { 5352 /* DMA engine is stopped */ 5353 u_int32_t startadr; 5354 5355 for (fd = irc->irc_desc_map; 5356 fd < irc->irc_desc_map + irc->irc_desc_num; 5357 ++fd) { 5358 fd->fd_status = 0; 5359 } 5360 5361 /* Restore branch addr of the last descriptor */ 5362 irc->irc_writeend->fd_branch = irc->irc_savedbranch; 5363 5364 irc->irc_readtop = irc->irc_desc_map; 5365 irc->irc_writeend = irc->irc_desc_map + irc->irc_desc_num - 1; 5366 irc->irc_savedbranch = irc->irc_writeend->fd_branch; 5367 irc->irc_writeend->fd_branch = 0; 5368 5369 count = irc->irc_desc_num; 5370 5371 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num, 5372 OHCI_SUBREG_ContextControlClear, 5373 OHCI_CTXCTL_RUN | OHCI_CTXCTL_DEAD); 5374 5375 startadr = (u_int32_t)irc->irc_desc_dmamap->dm_segs[0].ds_addr; 5376 5377 printf("fwohci_ir_ctx_drain: remove %d pkts\n", count); 5378 5379 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num, 5380 OHCI_SUBREG_CommandPtr, startadr | 1); 5381 5382 OHCI_SYNC_RX_DMA_WRITE(irc->irc_sc, irc->irc_num, 5383 OHCI_SUBREG_ContextControlSet, OHCI_CTXCTL_RUN); 5384 } else { 5385 const int removecount = irc->irc_desc_num/2; 5386 u_int32_t tmpbranch; 5387 5388 for (count = 0; count < removecount; ++count) { 5389 if (fd->fd_status == 0) { 5390 break; 5391 } 5392 5393 fd->fd_status = 0; 5394 5395 tmpbranch = fd->fd_branch; 5396 fd->fd_branch = 0; 5397 irc->irc_writeend->fd_branch = irc->irc_savedbranch; 5398 irc->irc_writeend = fd; 5399 irc->irc_savedbranch = tmpbranch; 5400 5401 if (++fd == irc->irc_desc_map + irc->irc_desc_num) { 5402 fd = irc->irc_desc_map; 5403 } 5404 ++count; 5405 } 5406 5407 printf("fwohci_ir_ctx_drain: remove %d pkts\n", count); 5408 } 5409 5410 return count; 5411 } 5412 #endif /* USEDRAIN */ 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 /* 5423 * service routines for isochronous transmit 5424 */ 5425 5426 5427 struct fwohci_it_ctx * 5428 fwohci_it_ctx_construct(struct fwohci_softc *sc, int no, int ch, int tag, int maxsize) 5429 { 5430 struct fwohci_it_ctx *itc; 5431 size_t dmastrsize; 5432 struct fwohci_it_dmabuf *dmastr; 5433 struct fwohci_desc *desc; 5434 bus_addr_t descphys; 5435 int nodesc; 5436 int i, j; 5437 5438 if ((itc = malloc(sizeof(*itc), M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) { 5439 return itc; 5440 } 5441 5442 itc->itc_num = no; 5443 itc->itc_flags = 0; 5444 itc->itc_sc = sc; 5445 itc->itc_bufnum = FWOHCI_IT_BUFNUM; 5446 5447 itc->itc_channel = ch; 5448 itc->itc_tag = tag; 5449 itc->itc_speed = OHCI_CTXCTL_SPD_100; /* XXX */ 5450 5451 itc->itc_outpkt = 0; 5452 5453 itc->itc_maxsize = maxsize; 5454 5455 dmastrsize = sizeof(struct fwohci_it_dmabuf)*itc->itc_bufnum; 5456 5457 if ((dmastr = malloc(dmastrsize, M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL) { 5458 goto error_1; 5459 } 5460 itc->itc_buf = dmastr; 5461 5462 /* 5463 * Get memory for descriptors. One buffer will have 256 5464 * packet entry and 1 trailing descriptor for writing scratch. 5465 * 4-byte space for scratch. 5466 */ 5467 itc->itc_descsize = (256*3 + 1)*itc->itc_bufnum; 5468 5469 if (fwohci_it_desc_alloc(itc)) { 5470 printf("%s: cannot get enough memory for descriptor\n", 5471 sc->sc_sc1394.sc1394_dev.dv_xname); 5472 goto error_2; 5473 } 5474 5475 /* prepare DMA buffer */ 5476 nodesc = itc->itc_descsize/itc->itc_bufnum; 5477 desc = (struct fwohci_desc *)itc->itc_descmap; 5478 descphys = itc->itc_dseg.ds_addr; 5479 5480 for (i = 0; i < itc->itc_bufnum; ++i) { 5481 5482 if (fwohci_itd_construct(itc, &dmastr[i], i, desc, 5483 descphys, nodesc, 5484 itc->itc_maxsize, itc->itc_scratch_paddr)) { 5485 goto error_3; 5486 } 5487 desc += nodesc; 5488 descphys += sizeof(struct fwohci_desc)*nodesc; 5489 } 5490 5491 #if 1 5492 itc->itc_buf_start = itc->itc_buf; 5493 itc->itc_buf_end = itc->itc_buf; 5494 itc->itc_buf_linkend = itc->itc_buf; 5495 #else 5496 itc->itc_bufidx_start = 0; 5497 itc->itc_bufidx_end = 0; 5498 itc->itc_bufidx_linkend = 0; 5499 #endif 5500 itc->itc_buf_cnt = 0; 5501 itc->itc_waitchan = NULL; 5502 *itc->itc_scratch = 0xffffffff; 5503 5504 return itc; 5505 5506 error_3: 5507 for (j = 0; j < i; ++j) { 5508 fwohci_itd_destruct(&dmastr[j]); 5509 } 5510 fwohci_it_desc_free(itc); 5511 error_2: 5512 free(itc->itc_buf, M_DEVBUF); 5513 error_1: 5514 free(itc, M_DEVBUF); 5515 5516 return NULL; 5517 } 5518 5519 5520 5521 void 5522 fwohci_it_ctx_destruct(struct fwohci_it_ctx *itc) 5523 { 5524 int i; 5525 5526 for (i = 0; i < itc->itc_bufnum; ++i) { 5527 fwohci_itd_destruct(&itc->itc_buf[i]); 5528 } 5529 5530 fwohci_it_desc_free(itc); 5531 free(itc, M_DEVBUF); 5532 } 5533 5534 5535 /* 5536 * static int fwohci_it_desc_alloc(struct fwohci_it_ctx *itc) 5537 * 5538 * Allocates descriptors for context DMA dedicated for 5539 * isochronous transmit. 5540 * 5541 * This function returns 0 (zero) if it succeeds. Otherwise, 5542 * return negative value. 5543 */ 5544 static int 5545 fwohci_it_desc_alloc(struct fwohci_it_ctx *itc) 5546 { 5547 bus_dma_tag_t dmat = itc->itc_sc->sc_dmat; 5548 const char *xname = itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname; 5549 int error, dsize; 5550 5551 /* add for scratch */ 5552 itc->itc_descsize++; 5553 5554 /* rounding up to 256 */ 5555 if ((itc->itc_descsize & 0x0ff) != 0) { 5556 itc->itc_descsize = 5557 (itc->itc_descsize & ~0x0ff) + 0x100; 5558 } 5559 /* remove for scratch */ 5560 5561 itc->itc_descsize--; 5562 printf("%s: fwohci_it_desc_alloc will allocate %d descs\n", 5563 xname, itc->itc_descsize); 5564 5565 /* 5566 * allocate descriptor buffer 5567 */ 5568 dsize = sizeof(struct fwohci_desc) * itc->itc_descsize; 5569 5570 printf("%s: fwohci_it_desc_alloc: descriptor %d, dsize %d\n", 5571 xname, itc->itc_descsize, dsize); 5572 5573 if ((error = bus_dmamem_alloc(dmat, dsize, PAGE_SIZE, 0, 5574 &itc->itc_dseg, 1, &itc->itc_dnsegs, 0)) != 0) { 5575 printf("%s: unable to allocate descriptor buffer, error = %d\n", 5576 xname, error); 5577 goto fail_0; 5578 } 5579 5580 printf("fwohci_it_desc_alloc: %d segment[s]\n", itc->itc_dnsegs); 5581 5582 if ((error = bus_dmamem_map(dmat, &itc->itc_dseg, 5583 itc->itc_dnsegs, dsize, (caddr_t *)&itc->itc_descmap, 5584 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) { 5585 printf("%s: unable to map descriptor buffer, error = %d\n", 5586 xname, error); 5587 goto fail_1; 5588 } 5589 5590 printf("fwohci_it_desc_alloc: bus_dmamem_map success dseg %lx:%lx\n", 5591 (long)itc->itc_dseg.ds_addr, (long)itc->itc_dseg.ds_len); 5592 5593 if ((error = bus_dmamap_create(dmat, dsize, itc->itc_dnsegs, 5594 dsize, 0, BUS_DMA_WAITOK, &itc->itc_ddmamap)) != 0) { 5595 printf("%s: unable to create descriptor buffer DMA map, " 5596 "error = %d\n", xname, error); 5597 goto fail_2; 5598 } 5599 5600 printf("fwohci_it_desc_alloc: bus_dmamem_create success\n"); 5601 5602 { 5603 int loop; 5604 5605 for (loop = 0; loop < itc->itc_ddmamap->dm_nsegs; ++loop) { 5606 printf("\t%.2d: 0x%lx - 0x%lx\n", loop, 5607 (long)itc->itc_ddmamap->dm_segs[loop].ds_addr, 5608 (long)itc->itc_ddmamap->dm_segs[loop].ds_addr + 5609 (long)itc->itc_ddmamap->dm_segs[loop].ds_len - 1); 5610 } 5611 } 5612 5613 if ((error = bus_dmamap_load(dmat, itc->itc_ddmamap, 5614 itc->itc_descmap, dsize, NULL, BUS_DMA_WAITOK)) != 0) { 5615 printf("%s: unable to load descriptor buffer DMA map, " 5616 "error = %d\n", xname, error); 5617 goto fail_3; 5618 } 5619 5620 printf("%s: fwohci_it_desc_alloc: get DMA memory phys:0x%08x vm:%p\n", 5621 xname, (int)itc->itc_ddmamap->dm_segs[0].ds_addr, itc->itc_descmap); 5622 5623 itc->itc_scratch = (u_int32_t *)(itc->itc_descmap 5624 + (sizeof(struct fwohci_desc))*itc->itc_descsize); 5625 itc->itc_scratch_paddr = 5626 itc->itc_ddmamap->dm_segs[0].ds_addr 5627 + (sizeof(struct fwohci_desc))*itc->itc_descsize; 5628 5629 printf("%s: scratch %p, 0x%x\n", xname, itc->itc_scratch, 5630 (int)itc->itc_scratch_paddr); 5631 5632 /* itc->itc_scratch_paddr = vtophys(itc->itc_scratch); */ 5633 5634 return 0; 5635 5636 fail_3: 5637 bus_dmamap_destroy(dmat, itc->itc_ddmamap); 5638 fail_2: 5639 bus_dmamem_unmap(dmat, (caddr_t)itc->itc_descmap, dsize); 5640 fail_1: 5641 bus_dmamem_free(dmat, &itc->itc_dseg, itc->itc_dnsegs); 5642 fail_0: 5643 itc->itc_dnsegs = 0; 5644 itc->itc_descmap = NULL; 5645 return error; 5646 } 5647 5648 5649 static void 5650 fwohci_it_desc_free(struct fwohci_it_ctx *itc) 5651 { 5652 bus_dma_tag_t dmat = itc->itc_sc->sc_dmat; 5653 int dsize = sizeof(struct fwohci_desc) * itc->itc_descsize + 4; 5654 5655 bus_dmamap_destroy(dmat, itc->itc_ddmamap); 5656 bus_dmamem_unmap(dmat, (caddr_t)itc->itc_descmap, dsize); 5657 bus_dmamem_free(dmat, &itc->itc_dseg, itc->itc_dnsegs); 5658 5659 itc->itc_dnsegs = 0; 5660 itc->itc_descmap = NULL; 5661 } 5662 5663 5664 5665 /* 5666 * int fwohci_it_ctx_writedata(ieee1394_it_tag_t it, int ndata, 5667 * struct ieee1394_it_datalist *itdata, int flags) 5668 * 5669 * This function will write packet data to DMA buffer in the 5670 * context. This function will parse ieee1394_it_datalist 5671 * command and fill DMA buffer. This function will return the 5672 * number of written packets, or error code if the return value 5673 * is negative. 5674 * 5675 * When this funtion returns positive value but smaller than 5676 * ndata, it reaches at the ent of DMA buffer. 5677 */ 5678 int 5679 fwohci_it_ctx_writedata(ieee1394_it_tag_t it, int ndata, 5680 struct ieee1394_it_datalist *itdata, int flags) 5681 { 5682 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it; 5683 int rv; 5684 int writepkt = 0; 5685 volatile struct fwohci_it_dmabuf *itd; 5686 int i = 0; 5687 5688 itd = itc->itc_buf_end; 5689 5690 while (ndata > 0) { 5691 int s; 5692 5693 if (fwohci_itd_isfull(itd) || fwohci_itd_islocked(itd)) { 5694 if (itc->itc_buf_cnt == itc->itc_bufnum) { 5695 /* no space to write */ 5696 printf("sleeping: start linkend end %d %d %d " 5697 "bufcnt %d\n", 5698 itc->itc_buf_start->itd_num, 5699 itc->itc_buf_linkend->itd_num, 5700 itc->itc_buf_end->itd_num, 5701 itc->itc_buf_cnt); 5702 5703 itc->itc_waitchan = itc; 5704 if (tsleep(itc->itc_waitchan, 5705 PCATCH, "fwohci it", 0) == EWOULDBLOCK) { 5706 itc->itc_waitchan = NULL; 5707 printf("fwohci0 signal\n"); 5708 break; 5709 } 5710 printf("waking: start linkend end %d %d %d\n", 5711 itc->itc_buf_start->itd_num, 5712 itc->itc_buf_linkend->itd_num, 5713 itc->itc_buf_end->itd_num); 5714 5715 itc->itc_waitchan = itc; 5716 i = 0; 5717 } else { 5718 /* 5719 * Use next buffer. This DMA buffer is full 5720 * or locked. 5721 */ 5722 INC_BUF(itc, itd); 5723 } 5724 } 5725 5726 if (++i > 10) { 5727 panic("why loop so much %d", itc->itc_buf_cnt); 5728 break; 5729 } 5730 5731 s = splbio(); 5732 5733 if (fwohci_itd_hasdata(itd) == 0) { 5734 ++itc->itc_buf_cnt; 5735 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt)); 5736 } 5737 5738 rv = fwohci_itd_writedata(itd, ndata, itdata); 5739 DPRINTF(("fwohci_it_ctx_writedata: buf %d ndata %d rv %d\n", 5740 itd->itd_num, ndata, rv)); 5741 5742 if (itc->itc_buf_start == itc->itc_buf_linkend 5743 && (itc->itc_flags & ITC_FLAGS_RUN) != 0) { 5744 5745 #ifdef DEBUG_USERADD 5746 printf("fwohci_it_ctx_writedata: emergency!\n"); 5747 #endif 5748 if (itc->itc_buf_linkend != itc->itc_buf_end 5749 && fwohci_itd_hasdata(itc->itc_buf_end)) { 5750 volatile struct fwohci_it_dmabuf *itdn = itc->itc_buf_linkend; 5751 5752 INC_BUF(itc, itdn); 5753 printf("connecting %d after %d\n", 5754 itdn->itd_num, 5755 itc->itc_buf_linkend->itd_num); 5756 if (fwohci_itd_link(itc->itc_buf_linkend, itdn)) { 5757 printf("fwohci_it_ctx_writedata:" 5758 " cannot link correctly\n"); 5759 splx(s); 5760 return -1; 5761 } 5762 itc->itc_buf_linkend = itdn; 5763 } 5764 } 5765 5766 splx(s); 5767 5768 if (rv < 0) { 5769 /* some errors happend */ 5770 break; 5771 } 5772 5773 writepkt += rv; 5774 ndata -= rv; 5775 itdata += rv; 5776 itc->itc_buf_end = itd; 5777 } 5778 5779 /* Start DMA engine if stopped */ 5780 if ((itc->itc_flags & ITC_FLAGS_RUN) == 0) { 5781 if (itc->itc_buf_cnt > itc->itc_bufnum - 1 || flags) { 5782 /* run */ 5783 printf("fwohci_itc_ctl_writedata: DMA engine start\n"); 5784 fwohci_it_ctx_run(itc); 5785 } 5786 } 5787 5788 return writepkt; 5789 } 5790 5791 5792 5793 static void 5794 fwohci_it_ctx_run(struct fwohci_it_ctx *itc) 5795 { 5796 struct fwohci_softc *sc = itc->itc_sc; 5797 int ctx = itc->itc_num; 5798 volatile struct fwohci_it_dmabuf *itd = 5799 (volatile struct fwohci_it_dmabuf *)itc->itc_buf_start; 5800 u_int32_t reg; 5801 int i; 5802 5803 if (itc->itc_flags & ITC_FLAGS_RUN) { 5804 return; 5805 } 5806 itc->itc_flags |= ITC_FLAGS_RUN; 5807 5808 /* 5809 * dirty, but I can't imagine better place to save branch addr 5810 * of top DMA buffer and substitute 0 to it. 5811 */ 5812 itd->itd_savedbranch = itd->itd_lastdesc->fd_branch; 5813 itd->itd_lastdesc->fd_branch = 0; 5814 5815 if (itc->itc_buf_cnt > 1) { 5816 volatile struct fwohci_it_dmabuf *itdn = itd; 5817 5818 #if 0 5819 INC_BUF(itc, itdn); 5820 5821 if (fwohci_itd_link(itd, itdn)) { 5822 printf("fwohci_it_ctx_run: cannot link correctly\n"); 5823 return; 5824 } 5825 itc->itc_buf_linkend = itdn; 5826 #else 5827 for (;;) { 5828 INC_BUF(itc, itdn); 5829 5830 if (itdn == itc->itc_buf_end) { 5831 break; 5832 } 5833 if (fwohci_itd_link(itd, itdn)) { 5834 printf("fwohci_it_ctx_run: cannot link\n"); 5835 return; 5836 } 5837 itd = itdn; 5838 } 5839 itc->itc_buf_linkend = itd; 5840 #endif 5841 } else { 5842 itd->itd_lastdesc->fd_flags |= OHCI_DESC_INTR_ALWAYS; 5843 itc->itc_buf_linkend = itc->itc_buf_end; 5844 itc->itc_buf_end->itd_flags |= ITD_FLAGS_LOCK; 5845 5846 /* sanity check */ 5847 if (itc->itc_buf_end != itc->itc_buf_start) { 5848 printf("buf start & end differs %p %p\n", 5849 itc->itc_buf_end, itc->itc_buf_start); 5850 } 5851 #if 0 5852 { 5853 u_int32_t *fdp; 5854 u_int32_t adr; 5855 int i; 5856 5857 printf("fwohci_it_ctx_run: itc_buf_cnt 1, DMA buf %d\n", 5858 itd->itd_num); 5859 printf(" last desc %p npacket %d, %d 0x%04x%04x", 5860 itd->itd_lastdesc, itd->itd_npacket, 5861 (itd->itd_lastdesc - itd->itd_desc)/3, 5862 itd->itd_lastdesc->fd_flags, 5863 itd->itd_lastdesc->fd_reqcount); 5864 fdp = (u_int32_t *)itd->itd_desc; 5865 adr = (u_int32_t)itd->itd_desc_phys; /* XXX */ 5866 5867 for (i = 0; i < 7*4; ++i) { 5868 if (i % 4 == 0) { 5869 printf("\n%x:", adr + 4*i); 5870 } 5871 printf(" %08x", fdp[i]); 5872 } 5873 5874 if (itd->itd_npacket > 4) { 5875 printf("\n..."); 5876 i = (itd->itd_npacket - 2)*12 + 4; 5877 } else { 5878 i = 2*12 + 4; 5879 } 5880 for (;i < itd->itd_npacket*12 + 4; ++i) { 5881 if (i % 4 == 0) { 5882 printf("\n%x:", adr + 4*i); 5883 } 5884 printf(" %08x", fdp[i]); 5885 } 5886 printf("\n"); 5887 } 5888 #endif 5889 } 5890 { 5891 struct fwohci_desc *fd; 5892 5893 printf("fwohci_it_ctx_run: link start linkend end %d %d %d\n", 5894 itc->itc_buf_start->itd_num, 5895 itc->itc_buf_linkend->itd_num, 5896 itc->itc_buf_end->itd_num); 5897 5898 fd = itc->itc_buf_start->itd_desc; 5899 if ((fd->fd_flags & 0xff00) != OHCI_DESC_STORE_VALUE) { 5900 printf("fwohci_it_ctx_run: start buf not with STORE\n"); 5901 } 5902 fd += 3; 5903 if ((fd->fd_flags & OHCI_DESC_INTR_ALWAYS) == 0) { 5904 printf("fwohci_it_ctx_run: start buf does not have intr\n"); 5905 } 5906 5907 fd = itc->itc_buf_linkend->itd_desc; 5908 if ((fd->fd_flags & 0xff00) != OHCI_DESC_STORE_VALUE) { 5909 printf("fwohci_it_ctx_run: linkend buf not with STORE\n"); 5910 } 5911 fd += 3; 5912 if ((fd->fd_flags & OHCI_DESC_INTR_ALWAYS) == 0) { 5913 printf("fwohci_it_ctx_run: linkend buf does not have intr\n"); 5914 } 5915 } 5916 5917 *itc->itc_scratch = 0xffffffff; 5918 5919 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlClear, 5920 0xffff0000); 5921 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet); 5922 5923 printf("fwohci_it_ctx_run start for ctx %d\n", ctx); 5924 printf("%s: bfr IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n", 5925 sc->sc_sc1394.sc1394_dev.dv_xname, 5926 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr), 5927 reg, 5928 reg & OHCI_CTXCTL_RUN ? " run" : "", 5929 reg & OHCI_CTXCTL_WAKE ? " wake" : "", 5930 reg & OHCI_CTXCTL_DEAD ? " dead" : "", 5931 reg & OHCI_CTXCTL_ACTIVE ? " active" : ""); 5932 5933 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlClear, 5934 OHCI_CTXCTL_RUN); 5935 5936 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet); 5937 i = 0; 5938 while (reg & (OHCI_CTXCTL_ACTIVE | OHCI_CTXCTL_RUN)) { 5939 delay(100); 5940 if (++i > 1000) { 5941 printf("%s: cannot stop iso transmit engine\n", 5942 sc->sc_sc1394.sc1394_dev.dv_xname); 5943 break; 5944 } 5945 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, 5946 OHCI_SUBREG_ContextControlSet); 5947 } 5948 5949 printf("%s: itm IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n", 5950 sc->sc_sc1394.sc1394_dev.dv_xname, 5951 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr), 5952 reg, 5953 reg & OHCI_CTXCTL_RUN ? " run" : "", 5954 reg & OHCI_CTXCTL_WAKE ? " wake" : "", 5955 reg & OHCI_CTXCTL_DEAD ? " dead" : "", 5956 reg & OHCI_CTXCTL_ACTIVE ? " active" : ""); 5957 5958 printf("%s: writing CommandPtr to 0x%08x\n", 5959 sc->sc_sc1394.sc1394_dev.dv_xname, 5960 (int)itc->itc_buf_start->itd_desc_phys); 5961 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_CommandPtr, 5962 fwohci_itd_list_head(itc->itc_buf_start) | 4); 5963 5964 OHCI_SYNC_TX_DMA_WRITE(sc, ctx, OHCI_SUBREG_ContextControlSet, 5965 OHCI_CTXCTL_RUN | OHCI_CTXCTL_WAKE); 5966 5967 reg = OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_ContextControlSet); 5968 5969 printf("%s: aft IT_CommandPtr 0x%08x ContextCtrl 0x%08x%s%s%s%s\n", 5970 sc->sc_sc1394.sc1394_dev.dv_xname, 5971 OHCI_SYNC_TX_DMA_READ(sc, ctx, OHCI_SUBREG_CommandPtr), 5972 reg, 5973 reg & OHCI_CTXCTL_RUN ? " run" : "", 5974 reg & OHCI_CTXCTL_WAKE ? " wake" : "", 5975 reg & OHCI_CTXCTL_DEAD ? " dead" : "", 5976 reg & OHCI_CTXCTL_ACTIVE ? " active" : ""); 5977 } 5978 5979 5980 5981 int 5982 fwohci_it_ctx_flush(ieee1394_it_tag_t it) 5983 { 5984 struct fwohci_it_ctx *itc = (struct fwohci_it_ctx *)it; 5985 int rv = 0; 5986 5987 if ((itc->itc_flags & ITC_FLAGS_RUN) == 0 5988 && itc->itc_buf_cnt > 0) { 5989 printf("fwohci_it_ctx_flush: %s flushing\n", 5990 itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname); 5991 5992 fwohci_it_ctx_run(itc); 5993 rv = 1; 5994 } 5995 5996 return rv; 5997 } 5998 5999 6000 /* 6001 * static void fwohci_it_intr(struct fwohci_softc *sc, 6002 * struct fwochi_it_ctx *itc) 6003 * 6004 * This function is the interrupt handler for isochronous 6005 * transmit interrupt. This function will 1) unlink used 6006 * (already transmitted) buffers, 2) link new filled buffers, if 6007 * necessary and 3) say some free DMA buffers exist to 6008 * fwiso_write() 6009 */ 6010 static void 6011 fwohci_it_intr(struct fwohci_softc *sc, struct fwohci_it_ctx *itc) 6012 { 6013 volatile struct fwohci_it_dmabuf *itd, *newstartbuf; 6014 u_int16_t scratchval; 6015 u_int32_t reg; 6016 6017 reg = OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, 6018 OHCI_SUBREG_ContextControlSet); 6019 6020 /* print out debug info */ 6021 #ifdef FW_DEBUG 6022 printf("fwohci_it_intr: CTX %d\n", itc->itc_num); 6023 6024 printf("fwohci_it_intr: %s: IT_CommandPtr 0x%08x " 6025 "ContextCtrl 0x%08x%s%s%s%s\n", 6026 sc->sc_sc1394.sc1394_dev.dv_xname, 6027 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, OHCI_SUBREG_CommandPtr), 6028 reg, 6029 reg & OHCI_CTXCTL_RUN ? " run" : "", 6030 reg & OHCI_CTXCTL_WAKE ? " wake" : "", 6031 reg & OHCI_CTXCTL_DEAD ? " dead" : "", 6032 reg & OHCI_CTXCTL_ACTIVE ? " active" : ""); 6033 printf("fwohci_it_intr: %s: scratch %x start %d end %d valid %d\n", 6034 sc->sc_sc1394.sc1394_dev.dv_xname, *itc->itc_scratch, 6035 itc->itc_buf_start->itd_num, itc->itc_buf_end->itd_num, 6036 itc->itc_buf_cnt); 6037 { 6038 u_int32_t cntlstatus 6039 = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer); 6040 printf("\t\tIsoCounter 0x%08x, %d %d %d\n", cntlstatus, 6041 (cntlstatus >> 25) & 0xfe, (cntlstatus >> 12) & 0x1fff, 6042 cntlstatus & 0xfff); 6043 } 6044 #endif /* FW_DEBUG */ 6045 /* end print out debug info */ 6046 6047 scratchval = (*itc->itc_scratch) & 0x0000ffff; 6048 *itc->itc_scratch = 0xffffffff; 6049 6050 if ((reg & OHCI_CTXCTL_ACTIVE) == 0 && scratchval != 0xffff) { 6051 /* DMA engine has been stopped */ 6052 printf("DMA engine stopped\n"); 6053 printf("fwohci_it_intr: %s: IT_CommandPtr 0x%08x " 6054 "ContextCtrl 0x%08x%s%s%s%s\n", 6055 sc->sc_sc1394.sc1394_dev.dv_xname, 6056 OHCI_SYNC_TX_DMA_READ(sc, itc->itc_num, OHCI_SUBREG_CommandPtr), 6057 reg, 6058 reg & OHCI_CTXCTL_RUN ? " run" : "", 6059 reg & OHCI_CTXCTL_WAKE ? " wake" : "", 6060 reg & OHCI_CTXCTL_DEAD ? " dead" : "", 6061 reg & OHCI_CTXCTL_ACTIVE ? " active" : ""); 6062 printf("fwohci_it_intr: %s: scratch %x start %d end %d valid %d\n", 6063 sc->sc_sc1394.sc1394_dev.dv_xname, *itc->itc_scratch, 6064 itc->itc_buf_start->itd_num, itc->itc_buf_end->itd_num, 6065 itc->itc_buf_cnt); 6066 { 6067 u_int32_t xreg 6068 = OHCI_CSR_READ(sc, OHCI_REG_IsochronousCycleTimer); 6069 printf("\t\tIsoCounter 0x%08x, %d %d %d\n", reg, 6070 (xreg >> 25) & 0xfe, (xreg >> 12) & 0x1fff, 6071 xreg & 0xfff); 6072 } 6073 printf("\t\tbranch of lastdesc 0x%08x\n", 6074 itc->itc_buf_start->itd_lastdesc->fd_branch); 6075 6076 scratchval = 0xffff; 6077 itc->itc_flags &= ~ITC_FLAGS_RUN; 6078 } 6079 6080 /* unlink old buffers */ 6081 if (scratchval != 0xffff) { 6082 /* normal path */ 6083 newstartbuf = &itc->itc_buf[scratchval]; 6084 } else { 6085 /* DMA engine stopped */ 6086 newstartbuf = itc->itc_buf_linkend; 6087 INC_BUF(itc, newstartbuf); 6088 } 6089 6090 itd = itc->itc_buf_start; 6091 itc->itc_buf_start = newstartbuf; 6092 while (itd != newstartbuf) { 6093 itc->itc_outpkt += itd->itd_npacket; 6094 fwohci_itd_unlink(itd); 6095 INC_BUF(itc, itd); 6096 --itc->itc_buf_cnt; 6097 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt)); 6098 } 6099 6100 #ifdef DEBUG_USERADD 6101 if (scratchval != 0xffff) { 6102 printf("fwohci0: intr start %d dataend %d %d\n", scratchval, 6103 itc->itc_buf_end->itd_num, itc->itc_outpkt); 6104 } 6105 #endif 6106 6107 if (scratchval == 0xffff) { 6108 /* no data supplied */ 6109 printf("fwohci_it_intr: no it data. output total %d\n", 6110 itc->itc_outpkt); 6111 6112 if (itc->itc_buf_cnt > 0) { 6113 printf("fwohci_it_intr: it DMA stops " 6114 "w/ valid databuf %d buf %d data %d" 6115 " intr reg 0x%08x\n", 6116 itc->itc_buf_cnt, 6117 itc->itc_buf_end->itd_num, 6118 fwohci_itd_hasdata(itc->itc_buf_end), 6119 OHCI_CSR_READ(sc, OHCI_REG_IntEventSet)); 6120 } else { 6121 /* All the data gone */ 6122 itc->itc_buf_start 6123 = itc->itc_buf_end 6124 = itc->itc_buf_linkend 6125 = &itc->itc_buf[0]; 6126 printf("fwohci_it_intr: all packets gone\n"); 6127 } 6128 6129 itc->itc_flags &= ~ITC_FLAGS_RUN; 6130 6131 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num, 6132 OHCI_SUBREG_ContextControlClear, 0xffffffff); 6133 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num, 6134 OHCI_SUBREG_CommandPtr, 0); 6135 OHCI_SYNC_TX_DMA_WRITE(sc, itc->itc_num, 6136 OHCI_SUBREG_ContextControlClear, 0x1f); 6137 6138 /* send message */ 6139 if (itc->itc_waitchan != NULL) { 6140 wakeup(itc->itc_waitchan); 6141 } 6142 6143 return; 6144 } 6145 6146 #if 0 6147 /* unlink old buffers */ 6148 newstartbuf = &itc->itc_buf[scratchval]; 6149 6150 itd = (struct fwohci_it_dmabuf *)itc->itc_buf_start; 6151 itc->itc_buf_start = newstartbuf; 6152 while (itd != newstartbuf) { 6153 itc->itc_outpkt += itd->itd_npacket; 6154 fwohci_itd_unlink(itd); 6155 INC_BUF(itc, itd); 6156 --itc->itc_buf_cnt; 6157 DPRINTF(("<buf cnt %d>\n", itc->itc_buf_cnt)); 6158 } 6159 #endif 6160 6161 /* sanity check */ 6162 { 6163 int startidx, endidx, linkendidx; 6164 6165 startidx = itc->itc_buf_start->itd_num; 6166 endidx = itc->itc_buf_end->itd_num; 6167 linkendidx = itc->itc_buf_linkend->itd_num; 6168 6169 if (startidx < endidx) { 6170 if (linkendidx < startidx 6171 || endidx < linkendidx) { 6172 printf("funny, linkend is not between start " 6173 "and end [%d, %d]: %d\n", 6174 startidx, endidx, linkendidx); 6175 } 6176 } else if (startidx > endidx) { 6177 if (linkendidx < startidx 6178 && endidx < linkendidx) { 6179 printf("funny, linkend is not between start " 6180 "and end [%d, %d]: %d\n", 6181 startidx, endidx, linkendidx); 6182 } 6183 } else { 6184 if (linkendidx != startidx) { 6185 printf("funny, linkend is not between start " 6186 "and end [%d, %d]: %d\n", 6187 startidx, endidx, linkendidx); 6188 } 6189 6190 } 6191 } 6192 6193 /* link if some valid DMA buffers exist */ 6194 if (itc->itc_buf_cnt > 1 6195 && itc->itc_buf_linkend != itc->itc_buf_end) { 6196 volatile struct fwohci_it_dmabuf *itdprev; 6197 int i; 6198 6199 DPRINTF(("CTX %d: start linkend dataend bufs %d, %d, %d, %d\n", 6200 itc->itc_num, 6201 itc->itc_buf_start->itd_num, 6202 itc->itc_buf_linkend->itd_num, 6203 itc->itc_buf_end->itd_num, 6204 itc->itc_buf_cnt)); 6205 6206 itd = itdprev = itc->itc_buf_linkend; 6207 INC_BUF(itc, itd); 6208 6209 #if 0 6210 if (fwohci_itd_isfilled(itd) || itc->itc_buf_cnt == 2) { 6211 while (itdprev != itc->itc_buf_end) { 6212 6213 if (fwohci_itd_link(itdprev, itd)) { 6214 break; 6215 } 6216 6217 itdprev = itd; 6218 INC_BUF(itc, itd); 6219 } 6220 itc->itc_buf_linkend = itdprev; 6221 } 6222 #endif 6223 i = 0; 6224 while (itdprev != itc->itc_buf_end) { 6225 if (!fwohci_itd_isfilled(itd) && itc->itc_buf_cnt > 2) { 6226 break; 6227 } 6228 6229 if (fwohci_itd_link(itdprev, itd)) { 6230 break; 6231 } 6232 6233 itdprev = itd; 6234 INC_BUF(itc, itd); 6235 6236 itc->itc_buf_linkend = itdprev; 6237 ++i; 6238 } 6239 6240 if (i > 0) { 6241 DPRINTF(("CTX %d: start linkend dataend bufs %d, %d, %d, %d\n", 6242 itc->itc_num, 6243 itc->itc_buf_start->itd_num, 6244 itc->itc_buf_linkend->itd_num, 6245 itc->itc_buf_end->itd_num, 6246 itc->itc_buf_cnt)); 6247 } 6248 } else { 6249 volatile struct fwohci_it_dmabuf *le; 6250 6251 le = itc->itc_buf_linkend; 6252 6253 printf("CTX %d: start linkend dataend bufs %d, %d, %d, %d no buffer added\n", 6254 itc->itc_num, 6255 itc->itc_buf_start->itd_num, 6256 itc->itc_buf_linkend->itd_num, 6257 itc->itc_buf_end->itd_num, 6258 itc->itc_buf_cnt); 6259 printf("\tlast descriptor %s %04x %08x\n", 6260 le->itd_lastdesc->fd_flags & OHCI_DESC_INTR_ALWAYS ? "intr" : "", 6261 le->itd_lastdesc->fd_flags, 6262 le->itd_lastdesc->fd_branch); 6263 } 6264 6265 /* send message */ 6266 if (itc->itc_waitchan != NULL) { 6267 /* */ 6268 wakeup(itc->itc_waitchan); 6269 } 6270 } 6271 6272 6273 6274 /* 6275 * int fwohci_itd_construct(struct fwohci_it_ctx *itc, 6276 * struct fwohci_it_dmabuf *itd, int num, 6277 * struct fwohci_desc *desc, bus_addr_t phys, 6278 * int descsize, int maxsize, paddr_t scratch) 6279 * 6280 * 6281 * 6282 */ 6283 int 6284 fwohci_itd_construct(struct fwohci_it_ctx *itc, struct fwohci_it_dmabuf *itd, 6285 int num, struct fwohci_desc *desc, bus_addr_t phys, int descsize, 6286 int maxsize, paddr_t scratch) 6287 { 6288 const char *xname = itc->itc_sc->sc_sc1394.sc1394_dev.dv_xname; 6289 struct fwohci_desc *fd; 6290 struct fwohci_desc *descend; 6291 int npkt; 6292 int bufno = 0; /* DMA segment */ 6293 bus_size_t bufused = 0; /* offset in a DMA segment */ 6294 int roundsize; 6295 int tag = itc->itc_tag; 6296 int ch = itc->itc_channel; 6297 6298 itd->itd_ctx = itc; 6299 itd->itd_num = num; 6300 6301 if (descsize > 1024*3) { 6302 printf("%s: fwohci_itd_construct[%d] descsize %d too big\n", 6303 xname, num, descsize); 6304 return -1; 6305 } 6306 6307 itd->itd_desc = desc; 6308 itd->itd_descsize = descsize; 6309 itd->itd_desc_phys = phys; 6310 6311 itd->itd_lastdesc = desc; 6312 itd->itd_npacket = 0; 6313 6314 printf("%s: fwohci_itd_construct[%d] desc %p descsize %d, maxsize %d\n", 6315 xname, itd->itd_num, itd->itd_desc, itd->itd_descsize, maxsize); 6316 6317 if (descsize < 4) { 6318 /* too small descriptor array. at least 4 */ 6319 return -1; 6320 } 6321 6322 /* count up how many packet can handle */ 6323 itd->itd_maxpacket = (descsize - 1)/3; 6324 6325 /* rounding up to power of 2. minimum 16 */ 6326 roundsize = 16; 6327 for (roundsize = 16; roundsize < maxsize; roundsize <<= 1); 6328 itd->itd_maxsize = roundsize; 6329 6330 printf("\t\tdesc%d [%x, %lx]\n", itd->itd_num, 6331 (u_int32_t)phys, 6332 (unsigned long)((u_int32_t)phys 6333 + (itd->itd_maxpacket*3 + 1)*sizeof(struct fwohci_desc))); 6334 printf("%s: fwohci_itd_construct[%d] npkt %d maxsize round up to %d\n", 6335 xname, itd->itd_num, itd->itd_maxpacket, itd->itd_maxsize); 6336 6337 /* obtain DMA buffer */ 6338 if (fwohci_itd_dmabuf_alloc(itd)) { 6339 /* cannot allocate memory for DMA buffer */ 6340 return -1; 6341 } 6342 6343 /* 6344 * make descriptor chain 6345 * 6346 * First descriptor group has a STORE_VALUE, OUTPUT_IMMEDIATE 6347 * and OUTPUT_LAST descriptors Second and after that, a 6348 * descriptor group has an OUTPUT_IMMEDIATE and an OUTPUT_LAST 6349 * descriptor. 6350 */ 6351 descend = desc + descsize; 6352 6353 /* set store value descriptor for 1st descriptor group */ 6354 desc->fd_flags = OHCI_DESC_STORE_VALUE; 6355 desc->fd_reqcount = num; /* write number of DMA buffer class */ 6356 desc->fd_data = scratch; /* at physical memory 'scratch' */ 6357 desc->fd_branch = 0; 6358 desc->fd_status = desc->fd_rescount = 0; 6359 6360 itd->itd_store = desc; 6361 itd->itd_store_phys = phys; 6362 6363 ++desc; 6364 phys += 16; 6365 6366 npkt = 0; 6367 /* make OUTPUT_DESC chain for packets */ 6368 for (fd = desc; fd + 2 < descend; fd += 3, ++npkt) { 6369 struct fwohci_desc *fi = fd; 6370 struct fwohci_desc *fl = fd + 2; 6371 u_int32_t *fi_data = (u_int32_t *)(fd + 1); 6372 6373 #if 0 6374 if (npkt > itd->itd_maxpacket - 3) { 6375 printf("%s: %3d fi fl %p %p\n", xname, npkt, fi,fl); 6376 } 6377 #endif 6378 6379 fi->fd_reqcount = 8; /* data size for OHCI command */ 6380 fi->fd_flags = OHCI_DESC_IMMED; 6381 fi->fd_data = 0; 6382 fi->fd_branch = 0; /* branch for error */ 6383 fi->fd_status = fi->fd_rescount = 0; 6384 6385 /* channel and tag is unchanged */ 6386 *fi_data = OHCI_ITHEADER_VAL(TAG, tag) | 6387 OHCI_ITHEADER_VAL(CHAN, ch) | 6388 OHCI_ITHEADER_VAL(TCODE, IEEE1394_TCODE_STREAM_DATA); 6389 *++fi_data = 0; 6390 *++fi_data = 0; 6391 *++fi_data = 0; 6392 6393 fl->fd_flags = OHCI_DESC_OUTPUT | OHCI_DESC_LAST | 6394 OHCI_DESC_BRANCH; 6395 fl->fd_branch = 6396 (phys + sizeof(struct fwohci_desc)*(npkt + 1)*3) | 0x03; 6397 fl->fd_status = fl->fd_rescount = 0; 6398 6399 #ifdef FW_DEBUG 6400 if (npkt > itd->itd_maxpacket - 3) { 6401 DPRINTF(("%s: %3d fi fl fl branch %p %p 0x%x\n", 6402 xname, npkt, fi, fl, (int)fl->fd_branch)); 6403 } 6404 #endif 6405 6406 /* physical addr to data? */ 6407 fl->fd_data = 6408 (u_int32_t)((itd->itd_seg[bufno].ds_addr + bufused)); 6409 bufused += itd->itd_maxsize; 6410 if (bufused > itd->itd_seg[bufno].ds_len) { 6411 bufused = 0; 6412 if (++bufno == itd->itd_nsegs) { 6413 /* fail */ 6414 break; 6415 } 6416 } 6417 } 6418 6419 #if 0 6420 if (itd->itd_num == 0) { 6421 u_int32_t *fdp; 6422 u_int32_t adr; 6423 int i = 0; 6424 6425 fdp = (u_int32_t *)itd->itd_desc; 6426 adr = (u_int32_t)itd->itd_desc_phys; /* XXX */ 6427 6428 printf("fwohci_itd_construct: audit DMA desc chain. %d\n", 6429 itd->itd_maxpacket); 6430 for (i = 0; i < itd->itd_maxpacket*12 + 4; ++i) { 6431 if (i % 4 == 0) { 6432 printf("\n%x:", adr + 4*i); 6433 } 6434 printf(" %08x", fdp[i]); 6435 } 6436 printf("\n"); 6437 6438 } 6439 #endif 6440 /* last branch should be 0 */ 6441 --fd; 6442 fd->fd_branch = 0; 6443 6444 printf("%s: pkt %d %d maxdesc %p\n", 6445 xname, npkt, itd->itd_maxpacket, descend); 6446 6447 return 0; 6448 } 6449 6450 void 6451 fwohci_itd_destruct(struct fwohci_it_dmabuf *itd) 6452 { 6453 const char *xname = itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname; 6454 6455 printf("%s: fwohci_itd_destruct %d\n", xname, itd->itd_num); 6456 6457 fwohci_itd_dmabuf_free(itd); 6458 } 6459 6460 6461 /* 6462 * static int fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *itd) 6463 * 6464 * This function allocates DMA memory for fwohci_it_dmabuf. This 6465 * function will return 0 when it succeeds and return non-zero 6466 * value when it fails. 6467 */ 6468 static int 6469 fwohci_itd_dmabuf_alloc(struct fwohci_it_dmabuf *itd) 6470 { 6471 const char *xname = itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname; 6472 bus_dma_tag_t dmat = itd->itd_ctx->itc_sc->sc_dmat; 6473 6474 int dmasize = itd->itd_maxsize * itd->itd_maxpacket; 6475 int error; 6476 6477 DPRINTF(("%s: fwohci_itd_dmabuf_alloc[%d] dmasize %d maxpkt %d\n", 6478 xname, itd->itd_num, dmasize, itd->itd_maxpacket)); 6479 6480 if ((error = bus_dmamem_alloc(dmat, dmasize, PAGE_SIZE, 0, 6481 itd->itd_seg, FWOHCI_MAX_ITDATASEG, &itd->itd_nsegs, 0)) != 0) { 6482 printf("%s: unable to allocate data buffer, error = %d\n", 6483 xname, error); 6484 goto fail_0; 6485 } 6486 6487 /* checking memory range */ 6488 #ifdef FW_DEBUG 6489 { 6490 int loop; 6491 6492 for (loop = 0; loop < itd->itd_nsegs; ++loop) { 6493 DPRINTF(("\t%.2d: 0x%lx - 0x%lx\n", loop, 6494 (long)itd->itd_seg[loop].ds_addr, 6495 (long)itd->itd_seg[loop].ds_addr 6496 + (long)itd->itd_seg[loop].ds_len - 1)); 6497 } 6498 } 6499 #endif 6500 6501 if ((error = bus_dmamem_map(dmat, itd->itd_seg, itd->itd_nsegs, 6502 dmasize, (caddr_t *)&itd->itd_buf, 6503 BUS_DMA_COHERENT | BUS_DMA_WAITOK)) != 0) { 6504 printf("%s: unable to map data buffer, error = %d\n", 6505 xname, error); 6506 goto fail_1; 6507 } 6508 6509 DPRINTF(("fwohci_it_data_alloc[%d]: bus_dmamem_map addr %p\n", 6510 itd->itd_num, itd->itd_buf)); 6511 6512 if ((error = bus_dmamap_create(dmat, /*chunklen*/dmasize, 6513 itd->itd_nsegs, dmasize, 0, BUS_DMA_WAITOK, 6514 &itd->itd_dmamap)) != 0) { 6515 printf("%s: unable to create data buffer DMA map, " 6516 "error = %d\n", xname, error); 6517 goto fail_2; 6518 } 6519 6520 DPRINTF(("fwohci_it_data_alloc: bus_dmamem_create\n")); 6521 6522 if ((error = bus_dmamap_load(dmat, itd->itd_dmamap, 6523 itd->itd_buf, dmasize, NULL, BUS_DMA_WAITOK)) != 0) { 6524 printf("%s: unable to load data buffer DMA map, error = %d\n", 6525 xname, error); 6526 goto fail_3; 6527 } 6528 6529 DPRINTF(("fwohci_itd_dmabuf_alloc: load DMA memory vm %p\n", 6530 itd->itd_buf)); 6531 DPRINTF(("\tmapsize %ld nsegs %d\n", 6532 (long)itd->itd_dmamap->dm_mapsize, itd->itd_dmamap->dm_nsegs)); 6533 6534 #ifdef FW_DEBUG 6535 { 6536 int loop; 6537 6538 for (loop = 0; loop < itd->itd_dmamap->dm_nsegs; ++loop) { 6539 DPRINTF(("\t%.2d: 0x%lx - 0x%lx\n", loop, 6540 (long)itd->itd_dmamap->dm_segs[loop].ds_addr, 6541 (long)itd->itd_dmamap->dm_segs[loop].ds_addr + 6542 (long)itd->itd_dmamap->dm_segs[loop].ds_len - 1)); 6543 } 6544 } 6545 #endif 6546 6547 return 0; 6548 6549 fail_3: 6550 bus_dmamap_destroy(dmat, itd->itd_dmamap); 6551 fail_2: 6552 bus_dmamem_unmap(dmat, (caddr_t)itd->itd_buf, dmasize); 6553 fail_1: 6554 bus_dmamem_free(dmat, itd->itd_seg, itd->itd_nsegs); 6555 fail_0: 6556 itd->itd_nsegs = 0; 6557 itd->itd_maxpacket = 0; 6558 return error; 6559 } 6560 6561 /* 6562 * static void fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *itd) 6563 * 6564 * This function will release memory resource allocated by 6565 * fwohci_itd_dmabuf_alloc(). 6566 */ 6567 static void 6568 fwohci_itd_dmabuf_free(struct fwohci_it_dmabuf *itd) 6569 { 6570 bus_dma_tag_t dmat = itd->itd_ctx->itc_sc->sc_dmat; 6571 int dmasize = itd->itd_maxsize * itd->itd_maxpacket; 6572 6573 bus_dmamap_destroy(dmat, itd->itd_dmamap); 6574 bus_dmamem_unmap(dmat, (caddr_t)itd->itd_buf, dmasize); 6575 bus_dmamem_free(dmat, itd->itd_seg, itd->itd_nsegs); 6576 6577 itd->itd_nsegs = 0; 6578 itd->itd_maxpacket = 0; 6579 } 6580 6581 6582 6583 /* 6584 * int fwohci_itd_link(struct fwohci_it_dmabuf *itd, 6585 * struct fwohci_it_dmabuf *itdc) 6586 * 6587 * This function will concatinate two descriptor chains in dmabuf 6588 * itd and itdc. The descriptor link in itdc follows one in itd. 6589 * This function will move interrrupt packet from the end of itd 6590 * to the top of itdc. 6591 * 6592 * This function will return 0 whel this funcion suceeds. If an 6593 * error happens, return a negative value. 6594 */ 6595 int 6596 fwohci_itd_link(volatile struct fwohci_it_dmabuf *itd, 6597 volatile struct fwohci_it_dmabuf *itdc) 6598 { 6599 struct fwohci_desc *fd1, *fdc; 6600 6601 if (itdc->itd_lastdesc == itdc->itd_desc) { 6602 /* no valid data */ 6603 printf("fwohci_itd_link: no data\n"); 6604 return -1; 6605 } 6606 6607 if (itdc->itd_flags & ITD_FLAGS_LOCK) { 6608 /* used already */ 6609 printf("fwohci_itd_link: link locked\n"); 6610 return -1; 6611 } 6612 itdc->itd_flags |= ITD_FLAGS_LOCK; 6613 /* for the first one */ 6614 itd->itd_flags |= ITD_FLAGS_LOCK; 6615 6616 DPRINTF(("linking %d after %d: add %d pkts\n", 6617 itdc->itd_num, itd->itd_num, itdc->itd_npacket)); 6618 6619 /* XXX: should sync cache */ 6620 6621 fd1 = itd->itd_lastdesc; 6622 fdc = itdc->itd_desc + 3; /* OUTPUT_LAST in the first descriptor */ 6623 6624 /* sanity check */ 6625 #define OUTPUT_LAST_DESC (OHCI_DESC_OUTPUT | OHCI_DESC_LAST | OHCI_DESC_BRANCH) 6626 if ((fd1->fd_flags & OUTPUT_LAST_DESC) != OUTPUT_LAST_DESC) { 6627 printf("funny! not OUTPUT_LAST descriptor %p\n", fd1); 6628 } 6629 if (itd->itd_lastdesc - itd->itd_desc != 3 * itd->itd_npacket) { 6630 printf("funny! packet number inconsistency %ld <=> %ld\n", 6631 (long)(itd->itd_lastdesc - itd->itd_desc), 6632 (long)(3*itd->itd_npacket)); 6633 } 6634 6635 fd1->fd_flags &= ~OHCI_DESC_INTR_ALWAYS; 6636 fdc->fd_flags |= OHCI_DESC_INTR_ALWAYS; 6637 fd1->fd_branch = itdc->itd_desc_phys | 4; 6638 6639 itdc->itd_lastdesc->fd_flags |= OHCI_DESC_INTR_ALWAYS; 6640 /* save branch addr of lastdesc and substitute 0 to it */ 6641 itdc->itd_savedbranch = itdc->itd_lastdesc->fd_branch; 6642 itdc->itd_lastdesc->fd_branch = 0; 6643 6644 DPRINTF(("%s: link (%d %d), add pkt %d/%d branch 0x%x next saved 0x%x\n", 6645 itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname, 6646 itd->itd_num, itdc->itd_num, 6647 itdc->itd_npacket, itdc->itd_maxpacket, 6648 (int)fd1->fd_branch, (int)itdc->itd_savedbranch)); 6649 6650 /* XXX: should sync cache */ 6651 6652 return 0; 6653 } 6654 6655 6656 /* 6657 * int fwohci_itd_unlink(struct fwohci_it_dmabuf *itd) 6658 * 6659 * This function will unlink the descriptor chain from valid link 6660 * of descriptors. The target descriptor is specified by the 6661 * arguent. 6662 */ 6663 int 6664 fwohci_itd_unlink(volatile struct fwohci_it_dmabuf *itd) 6665 { 6666 struct fwohci_desc *fd; 6667 6668 /* XXX: should sync cache */ 6669 6670 fd = itd->itd_lastdesc; 6671 6672 fd->fd_branch = itd->itd_savedbranch; 6673 DPRINTF(("%s: unlink buf %d branch restored 0x%x\n", 6674 itd->itd_ctx->itc_sc->sc_sc1394.sc1394_dev.dv_xname, 6675 itd->itd_num, (int)fd->fd_branch)); 6676 6677 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS; 6678 itd->itd_lastdesc = itd->itd_desc; 6679 6680 fd = itd->itd_desc + 3; /* 1st OUTPUT_LAST */ 6681 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS; 6682 6683 /* XXX: should sync cache */ 6684 6685 itd->itd_npacket = 0; 6686 itd->itd_lastdesc = itd->itd_desc; 6687 itd->itd_flags &= ~ITD_FLAGS_LOCK; 6688 6689 return 0; 6690 } 6691 6692 6693 /* 6694 * static int fwohci_itd_writedata(struct fwohci_it_dmabuf *, int ndata, 6695 * struct ieee1394_it_datalist *); 6696 * 6697 * This function will return the number of written data, or 6698 * negative value if an error happens 6699 */ 6700 int 6701 fwohci_itd_writedata(volatile struct fwohci_it_dmabuf *itd, int ndata, 6702 struct ieee1394_it_datalist *itdata) 6703 { 6704 int writepkt; 6705 int i; 6706 u_int8_t *p; 6707 struct fwohci_desc *fd; 6708 u_int32_t *fd_idata; 6709 const int dspace = 6710 itd->itd_maxpacket - itd->itd_npacket < ndata ? 6711 itd->itd_maxpacket - itd->itd_npacket : ndata; 6712 6713 if (itd->itd_flags & ITD_FLAGS_LOCK || dspace == 0) { 6714 /* it is locked: cannot write anything */ 6715 if (itd->itd_flags & ITD_FLAGS_LOCK) { 6716 DPRINTF(("fwohci_itd_writedata: buf %d lock flag %s," 6717 " dspace %d\n", 6718 itd->itd_num, 6719 itd->itd_flags & ITD_FLAGS_LOCK ? "ON" : "OFF", 6720 dspace)); 6721 return 0; /* not an error */ 6722 } 6723 } 6724 6725 /* sanity check */ 6726 if (itd->itd_maxpacket < itd->itd_npacket) { 6727 printf("fwohci_itd_writedata: funny! # pkt > maxpkt" 6728 "%d %d\n", itd->itd_npacket, itd->itd_maxpacket); 6729 } 6730 6731 p = itd->itd_buf + itd->itd_maxsize * itd->itd_npacket; 6732 fd = itd->itd_lastdesc; 6733 6734 DPRINTF(("fwohci_itd_writedata(%d[%p], %d, %p) invoked:\n", 6735 itd->itd_num, itd, ndata, itdata)); 6736 6737 for (writepkt = 0; writepkt < dspace; ++writepkt) { 6738 u_int8_t *p1 = p; 6739 int cpysize; 6740 int totalsize = 0; 6741 6742 DPRINTF(("writing %d ", writepkt)); 6743 6744 for (i = 0; i < 4; ++i) { 6745 switch (itdata->it_cmd[i]&IEEE1394_IT_CMD_MASK) { 6746 case IEEE1394_IT_CMD_IMMED: 6747 memcpy(p1, &itdata->it_u[i].id_data, 8); 6748 p1 += 8; 6749 totalsize += 8; 6750 break; 6751 case IEEE1394_IT_CMD_PTR: 6752 cpysize = itdata->it_cmd[i]&IEEE1394_IT_CMD_SIZE; 6753 DPRINTF(("fwohci_itd_writedata: cpy %d %p\n", 6754 cpysize, itdata->it_u[i].id_addr)); 6755 if (totalsize + cpysize > itd->itd_maxsize) { 6756 /* error: too big size */ 6757 break; 6758 } 6759 memcpy(p1, itdata->it_u[i].id_addr, cpysize); 6760 totalsize += cpysize; 6761 break; 6762 case IEEE1394_IT_CMD_NOP: 6763 break; 6764 default: 6765 /* unknown command */ 6766 break; 6767 } 6768 } 6769 6770 /* only for DV test */ 6771 if (totalsize != 488) { 6772 printf("error: totalsize %d at %d\n", 6773 totalsize, writepkt); 6774 } 6775 6776 DPRINTF(("totalsize %d ", totalsize)); 6777 6778 /* fill iso command in OUTPUT_IMMED descriptor */ 6779 6780 /* XXX: sync cache */ 6781 fd += 2; /* next to first descriptor */ 6782 fd_idata = (u_int32_t *)fd; 6783 6784 /* 6785 * Umm, should tag, channel and tcode be written 6786 * previously in itd_construct? 6787 */ 6788 #if 0 6789 *fd_idata = OHCI_ITHEADER_VAL(TAG, tag) | 6790 OHCI_ITHEADER_VAL(CHAN, ch) | 6791 OHCI_ITHEADER_VAL(TCODE, IEEE1394_TCODE_STREAM_DATA); 6792 #endif 6793 *++fd_idata = totalsize << 16; 6794 6795 /* fill data in OUTPUT_LAST descriptor */ 6796 ++fd; 6797 /* intr check... */ 6798 if (fd->fd_flags & OHCI_DESC_INTR_ALWAYS) { 6799 printf("uncleared INTR flag in desc %ld\n", 6800 (long)(fd - itd->itd_desc - 1)/3); 6801 } 6802 fd->fd_flags &= ~OHCI_DESC_INTR_ALWAYS; 6803 6804 if ((fd - itd->itd_desc - 1)/3 != itd->itd_maxpacket - 1) { 6805 u_int32_t bcal; 6806 6807 bcal = (fd - itd->itd_desc + 1)*sizeof(struct fwohci_desc) + (u_int32_t)itd->itd_desc_phys; 6808 if (bcal != (fd->fd_branch & 0xfffffff0)) { 6809 6810 printf("uum, branch differ at %d, %x %x %ld/%d\n", 6811 itd->itd_num, 6812 bcal, 6813 fd->fd_branch, 6814 (long)((fd - itd->itd_desc - 1)/3), 6815 itd->itd_maxpacket); 6816 } 6817 } else { 6818 /* the last pcaket */ 6819 if (fd->fd_branch != 0) { 6820 printf("uum, branch differ at %d, %x %x %ld/%d\n", 6821 itd->itd_num, 6822 0, 6823 fd->fd_branch, 6824 (long)((fd - itd->itd_desc - 1)/3), 6825 itd->itd_maxpacket); 6826 } 6827 } 6828 6829 /* sanity check */ 6830 if (fd->fd_flags != OUTPUT_LAST_DESC) { 6831 printf("fwohci_itd_writedata: dmabuf %d desc inconsistent %d\n", 6832 itd->itd_num, writepkt + itd->itd_npacket); 6833 break; 6834 } 6835 fd->fd_reqcount = totalsize; 6836 /* XXX: sync cache */ 6837 6838 ++itdata; 6839 p += itd->itd_maxsize; 6840 } 6841 6842 DPRINTF(("loop start %d, %d times %d\n", 6843 itd->itd_npacket, dspace, writepkt)); 6844 6845 itd->itd_npacket += writepkt; 6846 itd->itd_lastdesc = fd; 6847 6848 return writepkt; 6849 } 6850 6851 6852 6853 6854 6855 int 6856 fwohci_itd_isfilled(volatile struct fwohci_it_dmabuf *itd) 6857 { 6858 6859 return itd->itd_npacket*2 > itd->itd_maxpacket ? 1 : 0; 6860 } 6861