1 /* $NetBSD: uhci.c,v 1.299 2020/03/15 15:00:14 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2004, 2011, 2012 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Lennart Augustsson (lennart@augustsson.net) at 9 * Carlstedt Research & Technology, Jared D. McNeill (jmcneill@invisible.ca) 10 * and Matthew R. Green (mrg@eterna.com.au). 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * USB Universal Host Controller driver. 36 * Handles e.g. PIIX3 and PIIX4. 37 * 38 * UHCI spec: http://www.intel.com/technology/usb/spec.htm 39 * USB spec: http://www.usb.org/developers/docs/ 40 * PIIXn spec: ftp://download.intel.com/design/intarch/datashts/29055002.pdf 41 * ftp://download.intel.com/design/intarch/datashts/29056201.pdf 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: uhci.c,v 1.299 2020/03/15 15:00:14 skrll Exp $"); 46 47 #ifdef _KERNEL_OPT 48 #include "opt_usb.h" 49 #endif 50 51 #include <sys/param.h> 52 53 #include <sys/bus.h> 54 #include <sys/cpu.h> 55 #include <sys/device.h> 56 #include <sys/kernel.h> 57 #include <sys/kmem.h> 58 #include <sys/mutex.h> 59 #include <sys/proc.h> 60 #include <sys/queue.h> 61 #include <sys/select.h> 62 #include <sys/sysctl.h> 63 #include <sys/systm.h> 64 65 #include <machine/endian.h> 66 67 #include <dev/usb/usb.h> 68 #include <dev/usb/usbdi.h> 69 #include <dev/usb/usbdivar.h> 70 #include <dev/usb/usb_mem.h> 71 72 #include <dev/usb/uhcireg.h> 73 #include <dev/usb/uhcivar.h> 74 #include <dev/usb/usbroothub.h> 75 #include <dev/usb/usbhist.h> 76 77 /* Use bandwidth reclamation for control transfers. Some devices choke on it. */ 78 /*#define UHCI_CTL_LOOP */ 79 80 #ifdef UHCI_DEBUG 81 uhci_softc_t *thesc; 82 int uhcinoloop = 0; 83 #endif 84 85 #ifdef USB_DEBUG 86 #ifndef UHCI_DEBUG 87 #define uhcidebug 0 88 #else 89 static int uhcidebug = 0; 90 91 SYSCTL_SETUP(sysctl_hw_uhci_setup, "sysctl hw.uhci setup") 92 { 93 int err; 94 const struct sysctlnode *rnode; 95 const struct sysctlnode *cnode; 96 97 err = sysctl_createv(clog, 0, NULL, &rnode, 98 CTLFLAG_PERMANENT, CTLTYPE_NODE, "uhci", 99 SYSCTL_DESCR("uhci global controls"), 100 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 101 102 if (err) 103 goto fail; 104 105 /* control debugging printfs */ 106 err = sysctl_createv(clog, 0, &rnode, &cnode, 107 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 108 "debug", SYSCTL_DESCR("Enable debugging output"), 109 NULL, 0, &uhcidebug, sizeof(uhcidebug), CTL_CREATE, CTL_EOL); 110 if (err) 111 goto fail; 112 113 return; 114 fail: 115 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err); 116 } 117 118 #endif /* UHCI_DEBUG */ 119 #endif /* USB_DEBUG */ 120 121 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,1,FMT,A,B,C,D) 122 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,N,FMT,A,B,C,D) 123 #define UHCIHIST_FUNC() USBHIST_FUNC() 124 #define UHCIHIST_CALLED(name) USBHIST_CALLED(uhcidebug) 125 126 /* 127 * The UHCI controller is little endian, so on big endian machines 128 * the data stored in memory needs to be swapped. 129 */ 130 131 struct uhci_pipe { 132 struct usbd_pipe pipe; 133 int nexttoggle; 134 135 u_char aborting; 136 struct usbd_xfer *abortstart, abortend; 137 138 /* Info needed for different pipe kinds. */ 139 union { 140 /* Control pipe */ 141 struct { 142 uhci_soft_qh_t *sqh; 143 usb_dma_t reqdma; 144 uhci_soft_td_t *setup; 145 uhci_soft_td_t *stat; 146 } ctrl; 147 /* Interrupt pipe */ 148 struct { 149 int npoll; 150 uhci_soft_qh_t **qhs; 151 } intr; 152 /* Bulk pipe */ 153 struct { 154 uhci_soft_qh_t *sqh; 155 } bulk; 156 /* Isochronous pipe */ 157 struct isoc { 158 uhci_soft_td_t **stds; 159 int next, inuse; 160 } isoc; 161 }; 162 }; 163 164 typedef TAILQ_HEAD(ux_completeq, uhci_xfer) ux_completeq_t; 165 166 Static void uhci_globalreset(uhci_softc_t *); 167 Static usbd_status uhci_portreset(uhci_softc_t*, int); 168 Static void uhci_reset(uhci_softc_t *); 169 Static usbd_status uhci_run(uhci_softc_t *, int, int); 170 Static uhci_soft_td_t *uhci_alloc_std(uhci_softc_t *); 171 Static void uhci_free_std(uhci_softc_t *, uhci_soft_td_t *); 172 Static void uhci_free_std_locked(uhci_softc_t *, uhci_soft_td_t *); 173 Static uhci_soft_qh_t *uhci_alloc_sqh(uhci_softc_t *); 174 Static void uhci_free_sqh(uhci_softc_t *, uhci_soft_qh_t *); 175 #if 0 176 Static void uhci_enter_ctl_q(uhci_softc_t *, uhci_soft_qh_t *, 177 uhci_intr_info_t *); 178 Static void uhci_exit_ctl_q(uhci_softc_t *, uhci_soft_qh_t *); 179 #endif 180 181 #if 0 182 Static void uhci_free_std_chain(uhci_softc_t *, uhci_soft_td_t *, 183 uhci_soft_td_t *); 184 #endif 185 Static int uhci_alloc_std_chain(uhci_softc_t *, struct usbd_xfer *, 186 int, int, uhci_soft_td_t **); 187 Static void uhci_free_stds(uhci_softc_t *, struct uhci_xfer *); 188 189 Static void uhci_reset_std_chain(uhci_softc_t *, struct usbd_xfer *, 190 int, int, int *, uhci_soft_td_t **); 191 192 Static void uhci_poll_hub(void *); 193 Static void uhci_check_intr(uhci_softc_t *, struct uhci_xfer *, 194 ux_completeq_t *); 195 Static void uhci_idone(struct uhci_xfer *, ux_completeq_t *); 196 197 Static void uhci_abortx(struct usbd_xfer *); 198 199 Static void uhci_add_ls_ctrl(uhci_softc_t *, uhci_soft_qh_t *); 200 Static void uhci_add_hs_ctrl(uhci_softc_t *, uhci_soft_qh_t *); 201 Static void uhci_add_bulk(uhci_softc_t *, uhci_soft_qh_t *); 202 Static void uhci_remove_ls_ctrl(uhci_softc_t *,uhci_soft_qh_t *); 203 Static void uhci_remove_hs_ctrl(uhci_softc_t *,uhci_soft_qh_t *); 204 Static void uhci_remove_bulk(uhci_softc_t *,uhci_soft_qh_t *); 205 Static void uhci_add_loop(uhci_softc_t *); 206 Static void uhci_rem_loop(uhci_softc_t *); 207 208 Static usbd_status uhci_setup_isoc(struct usbd_pipe *); 209 210 Static struct usbd_xfer * 211 uhci_allocx(struct usbd_bus *, unsigned int); 212 Static void uhci_freex(struct usbd_bus *, struct usbd_xfer *); 213 Static bool uhci_dying(struct usbd_bus *); 214 Static void uhci_get_lock(struct usbd_bus *, kmutex_t **); 215 Static int uhci_roothub_ctrl(struct usbd_bus *, 216 usb_device_request_t *, void *, int); 217 218 Static int uhci_device_ctrl_init(struct usbd_xfer *); 219 Static void uhci_device_ctrl_fini(struct usbd_xfer *); 220 Static usbd_status uhci_device_ctrl_transfer(struct usbd_xfer *); 221 Static usbd_status uhci_device_ctrl_start(struct usbd_xfer *); 222 Static void uhci_device_ctrl_abort(struct usbd_xfer *); 223 Static void uhci_device_ctrl_close(struct usbd_pipe *); 224 Static void uhci_device_ctrl_done(struct usbd_xfer *); 225 226 Static int uhci_device_intr_init(struct usbd_xfer *); 227 Static void uhci_device_intr_fini(struct usbd_xfer *); 228 Static usbd_status uhci_device_intr_transfer(struct usbd_xfer *); 229 Static usbd_status uhci_device_intr_start(struct usbd_xfer *); 230 Static void uhci_device_intr_abort(struct usbd_xfer *); 231 Static void uhci_device_intr_close(struct usbd_pipe *); 232 Static void uhci_device_intr_done(struct usbd_xfer *); 233 234 Static int uhci_device_bulk_init(struct usbd_xfer *); 235 Static void uhci_device_bulk_fini(struct usbd_xfer *); 236 Static usbd_status uhci_device_bulk_transfer(struct usbd_xfer *); 237 Static usbd_status uhci_device_bulk_start(struct usbd_xfer *); 238 Static void uhci_device_bulk_abort(struct usbd_xfer *); 239 Static void uhci_device_bulk_close(struct usbd_pipe *); 240 Static void uhci_device_bulk_done(struct usbd_xfer *); 241 242 Static int uhci_device_isoc_init(struct usbd_xfer *); 243 Static void uhci_device_isoc_fini(struct usbd_xfer *); 244 Static usbd_status uhci_device_isoc_transfer(struct usbd_xfer *); 245 Static void uhci_device_isoc_abort(struct usbd_xfer *); 246 Static void uhci_device_isoc_close(struct usbd_pipe *); 247 Static void uhci_device_isoc_done(struct usbd_xfer *); 248 249 Static usbd_status uhci_root_intr_transfer(struct usbd_xfer *); 250 Static usbd_status uhci_root_intr_start(struct usbd_xfer *); 251 Static void uhci_root_intr_abort(struct usbd_xfer *); 252 Static void uhci_root_intr_close(struct usbd_pipe *); 253 Static void uhci_root_intr_done(struct usbd_xfer *); 254 255 Static usbd_status uhci_open(struct usbd_pipe *); 256 Static void uhci_poll(struct usbd_bus *); 257 Static void uhci_softintr(void *); 258 259 Static void uhci_add_intr(uhci_softc_t *, uhci_soft_qh_t *); 260 Static void uhci_remove_intr(uhci_softc_t *, uhci_soft_qh_t *); 261 Static usbd_status uhci_device_setintr(uhci_softc_t *, 262 struct uhci_pipe *, int); 263 264 Static void uhci_device_clear_toggle(struct usbd_pipe *); 265 Static void uhci_noop(struct usbd_pipe *); 266 267 static inline uhci_soft_qh_t * 268 uhci_find_prev_qh(uhci_soft_qh_t *, uhci_soft_qh_t *); 269 270 #ifdef UHCI_DEBUG 271 Static void uhci_dump_all(uhci_softc_t *); 272 Static void uhci_dumpregs(uhci_softc_t *); 273 Static void uhci_dump_qhs(uhci_soft_qh_t *); 274 Static void uhci_dump_qh(uhci_soft_qh_t *); 275 Static void uhci_dump_tds(uhci_soft_td_t *); 276 Static void uhci_dump_td(uhci_soft_td_t *); 277 Static void uhci_dump_ii(struct uhci_xfer *); 278 void uhci_dump(void); 279 #endif 280 281 #define UBARR(sc) bus_space_barrier((sc)->iot, (sc)->ioh, 0, (sc)->sc_size, \ 282 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE) 283 #define UWRITE1(sc, r, x) \ 284 do { UBARR(sc); bus_space_write_1((sc)->iot, (sc)->ioh, (r), (x)); \ 285 } while (/*CONSTCOND*/0) 286 #define UWRITE2(sc, r, x) \ 287 do { UBARR(sc); bus_space_write_2((sc)->iot, (sc)->ioh, (r), (x)); \ 288 } while (/*CONSTCOND*/0) 289 #define UWRITE4(sc, r, x) \ 290 do { UBARR(sc); bus_space_write_4((sc)->iot, (sc)->ioh, (r), (x)); \ 291 } while (/*CONSTCOND*/0) 292 293 static __inline uint8_t 294 UREAD1(uhci_softc_t *sc, bus_size_t r) 295 { 296 297 UBARR(sc); 298 return bus_space_read_1(sc->iot, sc->ioh, r); 299 } 300 301 static __inline uint16_t 302 UREAD2(uhci_softc_t *sc, bus_size_t r) 303 { 304 305 UBARR(sc); 306 return bus_space_read_2(sc->iot, sc->ioh, r); 307 } 308 309 #ifdef UHCI_DEBUG 310 static __inline uint32_t 311 UREAD4(uhci_softc_t *sc, bus_size_t r) 312 { 313 314 UBARR(sc); 315 return bus_space_read_4(sc->iot, sc->ioh, r); 316 } 317 #endif 318 319 #define UHCICMD(sc, cmd) UWRITE2(sc, UHCI_CMD, cmd) 320 #define UHCISTS(sc) UREAD2(sc, UHCI_STS) 321 322 #define UHCI_RESET_TIMEOUT 100 /* ms, reset timeout */ 323 324 #define UHCI_CURFRAME(sc) (UREAD2(sc, UHCI_FRNUM) & UHCI_FRNUM_MASK) 325 326 const struct usbd_bus_methods uhci_bus_methods = { 327 .ubm_open = uhci_open, 328 .ubm_softint = uhci_softintr, 329 .ubm_dopoll = uhci_poll, 330 .ubm_allocx = uhci_allocx, 331 .ubm_freex = uhci_freex, 332 .ubm_abortx = uhci_abortx, 333 .ubm_dying = uhci_dying, 334 .ubm_getlock = uhci_get_lock, 335 .ubm_rhctrl = uhci_roothub_ctrl, 336 }; 337 338 const struct usbd_pipe_methods uhci_root_intr_methods = { 339 .upm_transfer = uhci_root_intr_transfer, 340 .upm_start = uhci_root_intr_start, 341 .upm_abort = uhci_root_intr_abort, 342 .upm_close = uhci_root_intr_close, 343 .upm_cleartoggle = uhci_noop, 344 .upm_done = uhci_root_intr_done, 345 }; 346 347 const struct usbd_pipe_methods uhci_device_ctrl_methods = { 348 .upm_init = uhci_device_ctrl_init, 349 .upm_fini = uhci_device_ctrl_fini, 350 .upm_transfer = uhci_device_ctrl_transfer, 351 .upm_start = uhci_device_ctrl_start, 352 .upm_abort = uhci_device_ctrl_abort, 353 .upm_close = uhci_device_ctrl_close, 354 .upm_cleartoggle = uhci_noop, 355 .upm_done = uhci_device_ctrl_done, 356 }; 357 358 const struct usbd_pipe_methods uhci_device_intr_methods = { 359 .upm_init = uhci_device_intr_init, 360 .upm_fini = uhci_device_intr_fini, 361 .upm_transfer = uhci_device_intr_transfer, 362 .upm_start = uhci_device_intr_start, 363 .upm_abort = uhci_device_intr_abort, 364 .upm_close = uhci_device_intr_close, 365 .upm_cleartoggle = uhci_device_clear_toggle, 366 .upm_done = uhci_device_intr_done, 367 }; 368 369 const struct usbd_pipe_methods uhci_device_bulk_methods = { 370 .upm_init = uhci_device_bulk_init, 371 .upm_fini = uhci_device_bulk_fini, 372 .upm_transfer = uhci_device_bulk_transfer, 373 .upm_start = uhci_device_bulk_start, 374 .upm_abort = uhci_device_bulk_abort, 375 .upm_close = uhci_device_bulk_close, 376 .upm_cleartoggle = uhci_device_clear_toggle, 377 .upm_done = uhci_device_bulk_done, 378 }; 379 380 const struct usbd_pipe_methods uhci_device_isoc_methods = { 381 .upm_init = uhci_device_isoc_init, 382 .upm_fini = uhci_device_isoc_fini, 383 .upm_transfer = uhci_device_isoc_transfer, 384 .upm_abort = uhci_device_isoc_abort, 385 .upm_close = uhci_device_isoc_close, 386 .upm_cleartoggle = uhci_noop, 387 .upm_done = uhci_device_isoc_done, 388 }; 389 390 static inline void 391 uhci_add_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux) 392 { 393 394 TAILQ_INSERT_TAIL(&sc->sc_intrhead, ux, ux_list); 395 } 396 397 static inline void 398 uhci_del_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux) 399 { 400 401 TAILQ_REMOVE(&sc->sc_intrhead, ux, ux_list); 402 } 403 404 static inline uhci_soft_qh_t * 405 uhci_find_prev_qh(uhci_soft_qh_t *pqh, uhci_soft_qh_t *sqh) 406 { 407 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 408 DPRINTFN(15, "pqh=%#jx sqh=%#jx", (uintptr_t)pqh, (uintptr_t)sqh, 0, 0); 409 410 for (; pqh->hlink != sqh; pqh = pqh->hlink) { 411 #if defined(DIAGNOSTIC) || defined(UHCI_DEBUG) 412 usb_syncmem(&pqh->dma, 413 pqh->offs + offsetof(uhci_qh_t, qh_hlink), 414 sizeof(pqh->qh.qh_hlink), 415 BUS_DMASYNC_POSTWRITE); 416 if (le32toh(pqh->qh.qh_hlink) & UHCI_PTR_T) { 417 printf("%s: QH not found\n", __func__); 418 return NULL; 419 } 420 #endif 421 } 422 return pqh; 423 } 424 425 void 426 uhci_globalreset(uhci_softc_t *sc) 427 { 428 UHCICMD(sc, UHCI_CMD_GRESET); /* global reset */ 429 usb_delay_ms(&sc->sc_bus, USB_BUS_RESET_DELAY); /* wait a little */ 430 UHCICMD(sc, 0); /* do nothing */ 431 } 432 433 int 434 uhci_init(uhci_softc_t *sc) 435 { 436 usbd_status err; 437 int i, j; 438 uhci_soft_qh_t *clsqh, *chsqh, *bsqh, *sqh, *lsqh; 439 uhci_soft_td_t *std; 440 441 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 442 443 #ifdef UHCI_DEBUG 444 thesc = sc; 445 446 if (uhcidebug >= 2) 447 uhci_dumpregs(sc); 448 #endif 449 450 sc->sc_suspend = PWR_RESUME; 451 452 UWRITE2(sc, UHCI_INTR, 0); /* disable interrupts */ 453 uhci_globalreset(sc); /* reset the controller */ 454 uhci_reset(sc); 455 456 /* Allocate and initialize real frame array. */ 457 err = usb_allocmem(&sc->sc_bus, 458 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t), 459 UHCI_FRAMELIST_ALIGN, &sc->sc_dma); 460 if (err) 461 return err; 462 sc->sc_pframes = KERNADDR(&sc->sc_dma, 0); 463 /* set frame number to 0 */ 464 UWRITE2(sc, UHCI_FRNUM, 0); 465 /* set frame list */ 466 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0)); 467 468 /* Initialise mutex early for uhci_alloc_* */ 469 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 470 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB); 471 472 /* 473 * Allocate a TD, inactive, that hangs from the last QH. 474 * This is to avoid a bug in the PIIX that makes it run berserk 475 * otherwise. 476 */ 477 std = uhci_alloc_std(sc); 478 if (std == NULL) 479 return ENOMEM; 480 std->link.std = NULL; 481 std->td.td_link = htole32(UHCI_PTR_T); 482 std->td.td_status = htole32(0); /* inactive */ 483 std->td.td_token = htole32(0); 484 std->td.td_buffer = htole32(0); 485 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 486 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 487 488 /* Allocate the dummy QH marking the end and used for looping the QHs.*/ 489 lsqh = uhci_alloc_sqh(sc); 490 if (lsqh == NULL) 491 goto fail1; 492 lsqh->hlink = NULL; 493 lsqh->qh.qh_hlink = htole32(UHCI_PTR_T); /* end of QH chain */ 494 lsqh->elink = std; 495 lsqh->qh.qh_elink = htole32(std->physaddr | UHCI_PTR_TD); 496 sc->sc_last_qh = lsqh; 497 usb_syncmem(&lsqh->dma, lsqh->offs, sizeof(lsqh->qh), 498 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 499 500 /* Allocate the dummy QH where bulk traffic will be queued. */ 501 bsqh = uhci_alloc_sqh(sc); 502 if (bsqh == NULL) 503 goto fail2; 504 bsqh->hlink = lsqh; 505 bsqh->qh.qh_hlink = htole32(lsqh->physaddr | UHCI_PTR_QH); 506 bsqh->elink = NULL; 507 bsqh->qh.qh_elink = htole32(UHCI_PTR_T); 508 sc->sc_bulk_start = sc->sc_bulk_end = bsqh; 509 usb_syncmem(&bsqh->dma, bsqh->offs, sizeof(bsqh->qh), 510 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 511 512 /* Allocate dummy QH where high speed control traffic will be queued. */ 513 chsqh = uhci_alloc_sqh(sc); 514 if (chsqh == NULL) 515 goto fail3; 516 chsqh->hlink = bsqh; 517 chsqh->qh.qh_hlink = htole32(bsqh->physaddr | UHCI_PTR_QH); 518 chsqh->elink = NULL; 519 chsqh->qh.qh_elink = htole32(UHCI_PTR_T); 520 sc->sc_hctl_start = sc->sc_hctl_end = chsqh; 521 usb_syncmem(&chsqh->dma, chsqh->offs, sizeof(chsqh->qh), 522 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 523 524 /* Allocate dummy QH where control traffic will be queued. */ 525 clsqh = uhci_alloc_sqh(sc); 526 if (clsqh == NULL) 527 goto fail4; 528 clsqh->hlink = chsqh; 529 clsqh->qh.qh_hlink = htole32(chsqh->physaddr | UHCI_PTR_QH); 530 clsqh->elink = NULL; 531 clsqh->qh.qh_elink = htole32(UHCI_PTR_T); 532 sc->sc_lctl_start = sc->sc_lctl_end = clsqh; 533 usb_syncmem(&clsqh->dma, clsqh->offs, sizeof(clsqh->qh), 534 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 535 536 /* 537 * Make all (virtual) frame list pointers point to the interrupt 538 * queue heads and the interrupt queue heads at the control 539 * queue head and point the physical frame list to the virtual. 540 */ 541 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) { 542 std = uhci_alloc_std(sc); 543 sqh = uhci_alloc_sqh(sc); 544 if (std == NULL || sqh == NULL) 545 return USBD_NOMEM; 546 std->link.sqh = sqh; 547 std->td.td_link = htole32(sqh->physaddr | UHCI_PTR_QH); 548 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */ 549 std->td.td_token = htole32(0); 550 std->td.td_buffer = htole32(0); 551 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 552 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 553 sqh->hlink = clsqh; 554 sqh->qh.qh_hlink = htole32(clsqh->physaddr | UHCI_PTR_QH); 555 sqh->elink = NULL; 556 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 557 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 558 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 559 sc->sc_vframes[i].htd = std; 560 sc->sc_vframes[i].etd = std; 561 sc->sc_vframes[i].hqh = sqh; 562 sc->sc_vframes[i].eqh = sqh; 563 for (j = i; 564 j < UHCI_FRAMELIST_COUNT; 565 j += UHCI_VFRAMELIST_COUNT) 566 sc->sc_pframes[j] = htole32(std->physaddr); 567 } 568 usb_syncmem(&sc->sc_dma, 0, 569 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t), 570 BUS_DMASYNC_PREWRITE); 571 572 573 TAILQ_INIT(&sc->sc_intrhead); 574 575 sc->sc_xferpool = pool_cache_init(sizeof(struct uhci_xfer), 0, 0, 0, 576 "uhcixfer", NULL, IPL_USB, NULL, NULL, NULL); 577 578 callout_init(&sc->sc_poll_handle, CALLOUT_MPSAFE); 579 callout_setfunc(&sc->sc_poll_handle, uhci_poll_hub, sc); 580 581 /* Set up the bus struct. */ 582 sc->sc_bus.ub_methods = &uhci_bus_methods; 583 sc->sc_bus.ub_pipesize = sizeof(struct uhci_pipe); 584 sc->sc_bus.ub_usedma = true; 585 586 UHCICMD(sc, UHCI_CMD_MAXP); /* Assume 64 byte packets at frame end */ 587 588 DPRINTF("Enabling...", 0, 0, 0, 0); 589 590 err = uhci_run(sc, 1, 0); /* and here we go... */ 591 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE | UHCI_INTR_RIE | 592 UHCI_INTR_IOCE | UHCI_INTR_SPIE); /* enable interrupts */ 593 return err; 594 595 fail4: 596 uhci_free_sqh(sc, chsqh); 597 fail3: 598 uhci_free_sqh(sc, lsqh); 599 fail2: 600 uhci_free_sqh(sc, lsqh); 601 fail1: 602 uhci_free_std(sc, std); 603 604 return ENOMEM; 605 } 606 607 int 608 uhci_activate(device_t self, enum devact act) 609 { 610 struct uhci_softc *sc = device_private(self); 611 612 switch (act) { 613 case DVACT_DEACTIVATE: 614 sc->sc_dying = 1; 615 return 0; 616 default: 617 return EOPNOTSUPP; 618 } 619 } 620 621 void 622 uhci_childdet(device_t self, device_t child) 623 { 624 struct uhci_softc *sc = device_private(self); 625 626 KASSERT(sc->sc_child == child); 627 sc->sc_child = NULL; 628 } 629 630 int 631 uhci_detach(struct uhci_softc *sc, int flags) 632 { 633 int rv = 0; 634 635 if (sc->sc_child != NULL) 636 rv = config_detach(sc->sc_child, flags); 637 638 if (rv != 0) 639 return rv; 640 641 callout_halt(&sc->sc_poll_handle, NULL); 642 callout_destroy(&sc->sc_poll_handle); 643 644 mutex_destroy(&sc->sc_lock); 645 mutex_destroy(&sc->sc_intr_lock); 646 647 pool_cache_destroy(sc->sc_xferpool); 648 649 /* XXX free other data structures XXX */ 650 651 return rv; 652 } 653 654 struct usbd_xfer * 655 uhci_allocx(struct usbd_bus *bus, unsigned int nframes) 656 { 657 struct uhci_softc *sc = UHCI_BUS2SC(bus); 658 struct usbd_xfer *xfer; 659 660 xfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK); 661 if (xfer != NULL) { 662 memset(xfer, 0, sizeof(struct uhci_xfer)); 663 664 #ifdef DIAGNOSTIC 665 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 666 uxfer->ux_isdone = true; 667 xfer->ux_state = XFER_BUSY; 668 #endif 669 } 670 return xfer; 671 } 672 673 void 674 uhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 675 { 676 struct uhci_softc *sc = UHCI_BUS2SC(bus); 677 struct uhci_xfer *uxfer __diagused = UHCI_XFER2UXFER(xfer); 678 679 KASSERTMSG(xfer->ux_state == XFER_BUSY || 680 xfer->ux_status == USBD_NOT_STARTED, 681 "xfer %p state %d\n", xfer, xfer->ux_state); 682 KASSERTMSG(uxfer->ux_isdone || xfer->ux_status == USBD_NOT_STARTED, 683 "xfer %p not done\n", xfer); 684 #ifdef DIAGNOSTIC 685 xfer->ux_state = XFER_FREE; 686 #endif 687 pool_cache_put(sc->sc_xferpool, xfer); 688 } 689 690 Static bool 691 uhci_dying(struct usbd_bus *bus) 692 { 693 struct uhci_softc *sc = UHCI_BUS2SC(bus); 694 695 return sc->sc_dying; 696 } 697 698 Static void 699 uhci_get_lock(struct usbd_bus *bus, kmutex_t **lock) 700 { 701 struct uhci_softc *sc = UHCI_BUS2SC(bus); 702 703 *lock = &sc->sc_lock; 704 } 705 706 707 /* 708 * Handle suspend/resume. 709 * 710 * We need to switch to polling mode here, because this routine is 711 * called from an interrupt context. This is all right since we 712 * are almost suspended anyway. 713 */ 714 bool 715 uhci_resume(device_t dv, const pmf_qual_t *qual) 716 { 717 uhci_softc_t *sc = device_private(dv); 718 int cmd; 719 720 mutex_spin_enter(&sc->sc_intr_lock); 721 722 cmd = UREAD2(sc, UHCI_CMD); 723 sc->sc_bus.ub_usepolling++; 724 UWRITE2(sc, UHCI_INTR, 0); 725 uhci_globalreset(sc); 726 uhci_reset(sc); 727 if (cmd & UHCI_CMD_RS) 728 uhci_run(sc, 0, 1); 729 730 /* restore saved state */ 731 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0)); 732 UWRITE2(sc, UHCI_FRNUM, sc->sc_saved_frnum); 733 UWRITE1(sc, UHCI_SOF, sc->sc_saved_sof); 734 735 UHCICMD(sc, cmd | UHCI_CMD_FGR); /* force resume */ 736 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_DELAY, &sc->sc_intr_lock); 737 UHCICMD(sc, cmd & ~UHCI_CMD_EGSM); /* back to normal */ 738 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE | 739 UHCI_INTR_RIE | UHCI_INTR_IOCE | UHCI_INTR_SPIE); 740 UHCICMD(sc, UHCI_CMD_MAXP); 741 uhci_run(sc, 1, 1); /* and start traffic again */ 742 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_RECOVERY, &sc->sc_intr_lock); 743 sc->sc_bus.ub_usepolling--; 744 if (sc->sc_intr_xfer != NULL) 745 callout_schedule(&sc->sc_poll_handle, sc->sc_ival); 746 #ifdef UHCI_DEBUG 747 if (uhcidebug >= 2) 748 uhci_dumpregs(sc); 749 #endif 750 751 sc->sc_suspend = PWR_RESUME; 752 mutex_spin_exit(&sc->sc_intr_lock); 753 754 return true; 755 } 756 757 bool 758 uhci_suspend(device_t dv, const pmf_qual_t *qual) 759 { 760 uhci_softc_t *sc = device_private(dv); 761 int cmd; 762 763 mutex_spin_enter(&sc->sc_intr_lock); 764 765 cmd = UREAD2(sc, UHCI_CMD); 766 767 #ifdef UHCI_DEBUG 768 if (uhcidebug >= 2) 769 uhci_dumpregs(sc); 770 #endif 771 sc->sc_suspend = PWR_SUSPEND; 772 if (sc->sc_intr_xfer != NULL) 773 callout_halt(&sc->sc_poll_handle, &sc->sc_intr_lock); 774 sc->sc_bus.ub_usepolling++; 775 776 uhci_run(sc, 0, 1); /* stop the controller */ 777 cmd &= ~UHCI_CMD_RS; 778 779 /* save some state if BIOS doesn't */ 780 sc->sc_saved_frnum = UREAD2(sc, UHCI_FRNUM); 781 sc->sc_saved_sof = UREAD1(sc, UHCI_SOF); 782 783 UWRITE2(sc, UHCI_INTR, 0); /* disable intrs */ 784 785 UHCICMD(sc, cmd | UHCI_CMD_EGSM); /* enter suspend */ 786 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_WAIT, &sc->sc_intr_lock); 787 sc->sc_bus.ub_usepolling--; 788 789 mutex_spin_exit(&sc->sc_intr_lock); 790 791 return true; 792 } 793 794 #ifdef UHCI_DEBUG 795 Static void 796 uhci_dumpregs(uhci_softc_t *sc) 797 { 798 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 799 DPRINTF("cmd =%04jx sts =%04jx intr =%04jx frnum =%04jx", 800 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS), 801 UREAD2(sc, UHCI_INTR), UREAD2(sc, UHCI_FRNUM)); 802 DPRINTF("sof =%04jx portsc1=%04jx portsc2=%04jx flbase=%08jx", 803 UREAD1(sc, UHCI_SOF), UREAD2(sc, UHCI_PORTSC1), 804 UREAD2(sc, UHCI_PORTSC2), UREAD4(sc, UHCI_FLBASEADDR)); 805 } 806 807 void 808 uhci_dump_td(uhci_soft_td_t *p) 809 { 810 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 811 812 usb_syncmem(&p->dma, p->offs, sizeof(p->td), 813 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 814 815 DPRINTF("TD(%#jx) at 0x%08jx", (uintptr_t)p, p->physaddr, 0, 0); 816 DPRINTF(" link=0x%08jx status=0x%08jx " 817 "token=0x%08x buffer=0x%08x", 818 le32toh(p->td.td_link), 819 le32toh(p->td.td_status), 820 le32toh(p->td.td_token), 821 le32toh(p->td.td_buffer)); 822 823 DPRINTF("bitstuff=%jd crcto =%jd nak =%jd babble =%jd", 824 !!(le32toh(p->td.td_status) & UHCI_TD_BITSTUFF), 825 !!(le32toh(p->td.td_status) & UHCI_TD_CRCTO), 826 !!(le32toh(p->td.td_status) & UHCI_TD_NAK), 827 !!(le32toh(p->td.td_status) & UHCI_TD_BABBLE)); 828 DPRINTF("dbuffer =%jd stalled =%jd active =%jd ioc =%jd", 829 !!(le32toh(p->td.td_status) & UHCI_TD_DBUFFER), 830 !!(le32toh(p->td.td_status) & UHCI_TD_STALLED), 831 !!(le32toh(p->td.td_status) & UHCI_TD_ACTIVE), 832 !!(le32toh(p->td.td_status) & UHCI_TD_IOC)); 833 DPRINTF("ios =%jd ls =%jd spd =%jd", 834 !!(le32toh(p->td.td_status) & UHCI_TD_IOS), 835 !!(le32toh(p->td.td_status) & UHCI_TD_LS), 836 !!(le32toh(p->td.td_status) & UHCI_TD_SPD), 0); 837 DPRINTF("errcnt =%d actlen =%d pid=%02x", 838 UHCI_TD_GET_ERRCNT(le32toh(p->td.td_status)), 839 UHCI_TD_GET_ACTLEN(le32toh(p->td.td_status)), 840 UHCI_TD_GET_PID(le32toh(p->td.td_token)), 0); 841 DPRINTF("addr=%jd endpt=%jd D=%jd maxlen=%jd,", 842 UHCI_TD_GET_DEVADDR(le32toh(p->td.td_token)), 843 UHCI_TD_GET_ENDPT(le32toh(p->td.td_token)), 844 UHCI_TD_GET_DT(le32toh(p->td.td_token)), 845 UHCI_TD_GET_MAXLEN(le32toh(p->td.td_token))); 846 } 847 848 void 849 uhci_dump_qh(uhci_soft_qh_t *sqh) 850 { 851 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 852 853 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 854 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 855 856 DPRINTF("QH(%#jx) at 0x%08jx: hlink=%08jx elink=%08jx", (uintptr_t)sqh, 857 (int)sqh->physaddr, le32toh(sqh->qh.qh_hlink), 858 le32toh(sqh->qh.qh_elink)); 859 860 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD); 861 } 862 863 864 #if 1 865 void 866 uhci_dump(void) 867 { 868 uhci_dump_all(thesc); 869 } 870 #endif 871 872 void 873 uhci_dump_all(uhci_softc_t *sc) 874 { 875 uhci_dumpregs(sc); 876 /*printf("framelist[i].link = %08x\n", sc->sc_framelist[0].link);*/ 877 uhci_dump_qhs(sc->sc_lctl_start); 878 } 879 880 881 void 882 uhci_dump_qhs(uhci_soft_qh_t *sqh) 883 { 884 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 885 886 uhci_dump_qh(sqh); 887 888 /* 889 * uhci_dump_qhs displays all the QHs and TDs from the given QH onwards 890 * Traverses sideways first, then down. 891 * 892 * QH1 893 * QH2 894 * No QH 895 * TD2.1 896 * TD2.2 897 * TD1.1 898 * etc. 899 * 900 * TD2.x being the TDs queued at QH2 and QH1 being referenced from QH1. 901 */ 902 903 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 904 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 905 if (sqh->hlink != NULL && !(le32toh(sqh->qh.qh_hlink) & UHCI_PTR_T)) 906 uhci_dump_qhs(sqh->hlink); 907 else 908 DPRINTF("No QH", 0, 0, 0, 0); 909 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD); 910 911 if (sqh->elink != NULL && !(le32toh(sqh->qh.qh_elink) & UHCI_PTR_T)) 912 uhci_dump_tds(sqh->elink); 913 else 914 DPRINTF("No QH", 0, 0, 0, 0); 915 } 916 917 void 918 uhci_dump_tds(uhci_soft_td_t *std) 919 { 920 uhci_soft_td_t *td; 921 int stop; 922 923 for (td = std; td != NULL; td = td->link.std) { 924 uhci_dump_td(td); 925 926 /* 927 * Check whether the link pointer in this TD marks 928 * the link pointer as end of queue. This avoids 929 * printing the free list in case the queue/TD has 930 * already been moved there (seatbelt). 931 */ 932 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link), 933 sizeof(td->td.td_link), 934 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 935 stop = (le32toh(td->td.td_link) & UHCI_PTR_T || 936 le32toh(td->td.td_link) == 0); 937 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link), 938 sizeof(td->td.td_link), BUS_DMASYNC_PREREAD); 939 if (stop) 940 break; 941 } 942 } 943 944 Static void 945 uhci_dump_ii(struct uhci_xfer *ux) 946 { 947 struct usbd_pipe *pipe; 948 usb_endpoint_descriptor_t *ed; 949 struct usbd_device *dev; 950 951 if (ux == NULL) { 952 printf("ux NULL\n"); 953 return; 954 } 955 pipe = ux->ux_xfer.ux_pipe; 956 if (pipe == NULL) { 957 printf("ux %p: done=%d pipe=NULL\n", ux, ux->ux_isdone); 958 return; 959 } 960 if (pipe->up_endpoint == NULL) { 961 printf("ux %p: done=%d pipe=%p pipe->up_endpoint=NULL\n", 962 ux, ux->ux_isdone, pipe); 963 return; 964 } 965 if (pipe->up_dev == NULL) { 966 printf("ux %p: done=%d pipe=%p pipe->up_dev=NULL\n", 967 ux, ux->ux_isdone, pipe); 968 return; 969 } 970 ed = pipe->up_endpoint->ue_edesc; 971 dev = pipe->up_dev; 972 printf("ux %p: done=%d dev=%p vid=0x%04x pid=0x%04x addr=%d pipe=%p ep=0x%02x attr=0x%02x\n", 973 ux, ux->ux_isdone, dev, 974 UGETW(dev->ud_ddesc.idVendor), 975 UGETW(dev->ud_ddesc.idProduct), 976 dev->ud_addr, pipe, 977 ed->bEndpointAddress, ed->bmAttributes); 978 } 979 980 void uhci_dump_iis(struct uhci_softc *sc); 981 void 982 uhci_dump_iis(struct uhci_softc *sc) 983 { 984 struct uhci_xfer *ux; 985 986 printf("interrupt list:\n"); 987 TAILQ_FOREACH(ux, &sc->sc_intrhead, ux_list) 988 uhci_dump_ii(ux); 989 } 990 991 void iidump(void); 992 void iidump(void) { uhci_dump_iis(thesc); } 993 994 #endif 995 996 /* 997 * This routine is executed periodically and simulates interrupts 998 * from the root controller interrupt pipe for port status change. 999 */ 1000 void 1001 uhci_poll_hub(void *addr) 1002 { 1003 struct uhci_softc *sc = addr; 1004 struct usbd_xfer *xfer; 1005 u_char *p; 1006 1007 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1008 1009 mutex_enter(&sc->sc_lock); 1010 1011 /* 1012 * If the intr xfer has completed or been synchronously 1013 * aborted, we have nothing to do. 1014 */ 1015 xfer = sc->sc_intr_xfer; 1016 if (xfer == NULL) 1017 goto out; 1018 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 1019 1020 /* 1021 * If the intr xfer for which we were scheduled is done, and 1022 * another intr xfer has been submitted, let that one be dealt 1023 * with when the callout fires again. 1024 * 1025 * The call to callout_pending is racy, but the the transition 1026 * from pending to invoking happens atomically. The 1027 * callout_ack ensures callout_invoking does not return true 1028 * due to this invocation of the callout; the lock ensures the 1029 * next invocation of the callout cannot callout_ack (unless it 1030 * had already run to completion and nulled sc->sc_intr_xfer, 1031 * in which case would have bailed out already). 1032 */ 1033 callout_ack(&sc->sc_poll_handle); 1034 if (callout_pending(&sc->sc_poll_handle) || 1035 callout_invoking(&sc->sc_poll_handle)) 1036 goto out; 1037 1038 /* 1039 * Check flags for the two interrupt ports, and set them in the 1040 * buffer if an interrupt arrived; otherwise arrange . 1041 */ 1042 p = xfer->ux_buf; 1043 p[0] = 0; 1044 if (UREAD2(sc, UHCI_PORTSC1) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC)) 1045 p[0] |= 1<<1; 1046 if (UREAD2(sc, UHCI_PORTSC2) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC)) 1047 p[0] |= 1<<2; 1048 if (p[0] == 0) { 1049 /* 1050 * No change -- try again in a while, unless we're 1051 * suspending, in which case we'll try again after 1052 * resume. 1053 */ 1054 if (sc->sc_suspend != PWR_SUSPEND) 1055 callout_schedule(&sc->sc_poll_handle, sc->sc_ival); 1056 goto out; 1057 } 1058 1059 /* 1060 * Interrupt completed, and the xfer has not been completed or 1061 * synchronously aborted. Complete the xfer now. 1062 */ 1063 xfer->ux_actlen = 1; 1064 xfer->ux_status = USBD_NORMAL_COMPLETION; 1065 #ifdef DIAGNOSTIC 1066 UHCI_XFER2UXFER(xfer)->ux_isdone = true; 1067 #endif 1068 usb_transfer_complete(xfer); 1069 1070 out: mutex_exit(&sc->sc_lock); 1071 } 1072 1073 void 1074 uhci_root_intr_done(struct usbd_xfer *xfer) 1075 { 1076 struct uhci_softc *sc = UHCI_XFER2SC(xfer); 1077 1078 KASSERT(mutex_owned(&sc->sc_lock)); 1079 1080 /* Claim the xfer so it doesn't get completed again. */ 1081 KASSERT(sc->sc_intr_xfer == xfer); 1082 KASSERT(xfer->ux_status != USBD_IN_PROGRESS); 1083 sc->sc_intr_xfer = NULL; 1084 } 1085 1086 /* 1087 * Let the last QH loop back to the high speed control transfer QH. 1088 * This is what intel calls "bandwidth reclamation" and improves 1089 * USB performance a lot for some devices. 1090 * If we are already looping, just count it. 1091 */ 1092 void 1093 uhci_add_loop(uhci_softc_t *sc) 1094 { 1095 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1096 1097 #ifdef UHCI_DEBUG 1098 if (uhcinoloop) 1099 return; 1100 #endif 1101 if (++sc->sc_loops == 1) { 1102 DPRINTFN(5, "add loop", 0, 0, 0, 0); 1103 /* Note, we don't loop back the soft pointer. */ 1104 sc->sc_last_qh->qh.qh_hlink = 1105 htole32(sc->sc_hctl_start->physaddr | UHCI_PTR_QH); 1106 usb_syncmem(&sc->sc_last_qh->dma, 1107 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink), 1108 sizeof(sc->sc_last_qh->qh.qh_hlink), 1109 BUS_DMASYNC_PREWRITE); 1110 } 1111 } 1112 1113 void 1114 uhci_rem_loop(uhci_softc_t *sc) 1115 { 1116 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1117 1118 #ifdef UHCI_DEBUG 1119 if (uhcinoloop) 1120 return; 1121 #endif 1122 if (--sc->sc_loops == 0) { 1123 DPRINTFN(5, "remove loop", 0, 0, 0, 0); 1124 sc->sc_last_qh->qh.qh_hlink = htole32(UHCI_PTR_T); 1125 usb_syncmem(&sc->sc_last_qh->dma, 1126 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink), 1127 sizeof(sc->sc_last_qh->qh.qh_hlink), 1128 BUS_DMASYNC_PREWRITE); 1129 } 1130 } 1131 1132 /* Add high speed control QH, called with lock held. */ 1133 void 1134 uhci_add_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1135 { 1136 uhci_soft_qh_t *eqh; 1137 1138 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1139 1140 KASSERT(mutex_owned(&sc->sc_lock)); 1141 1142 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1143 eqh = sc->sc_hctl_end; 1144 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1145 sizeof(eqh->qh.qh_hlink), 1146 BUS_DMASYNC_POSTWRITE); 1147 sqh->hlink = eqh->hlink; 1148 sqh->qh.qh_hlink = eqh->qh.qh_hlink; 1149 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 1150 BUS_DMASYNC_PREWRITE); 1151 eqh->hlink = sqh; 1152 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH); 1153 sc->sc_hctl_end = sqh; 1154 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1155 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 1156 #ifdef UHCI_CTL_LOOP 1157 uhci_add_loop(sc); 1158 #endif 1159 } 1160 1161 /* Remove high speed control QH, called with lock held. */ 1162 void 1163 uhci_remove_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1164 { 1165 uhci_soft_qh_t *pqh; 1166 uint32_t elink; 1167 1168 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1169 1170 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1171 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1172 #ifdef UHCI_CTL_LOOP 1173 uhci_rem_loop(sc); 1174 #endif 1175 /* 1176 * The T bit should be set in the elink of the QH so that the HC 1177 * doesn't follow the pointer. This condition may fail if the 1178 * the transferred packet was short so that the QH still points 1179 * at the last used TD. 1180 * In this case we set the T bit and wait a little for the HC 1181 * to stop looking at the TD. 1182 * Note that if the TD chain is large enough, the controller 1183 * may still be looking at the chain at the end of this function. 1184 * uhci_free_std_chain() will make sure the controller stops 1185 * looking at it quickly, but until then we should not change 1186 * sqh->hlink. 1187 */ 1188 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 1189 sizeof(sqh->qh.qh_elink), 1190 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1191 elink = le32toh(sqh->qh.qh_elink); 1192 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 1193 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD); 1194 if (!(elink & UHCI_PTR_T)) { 1195 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 1196 usb_syncmem(&sqh->dma, 1197 sqh->offs + offsetof(uhci_qh_t, qh_elink), 1198 sizeof(sqh->qh.qh_elink), 1199 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1200 delay(UHCI_QH_REMOVE_DELAY); 1201 } 1202 1203 pqh = uhci_find_prev_qh(sc->sc_hctl_start, sqh); 1204 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink), 1205 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 1206 pqh->hlink = sqh->hlink; 1207 pqh->qh.qh_hlink = sqh->qh.qh_hlink; 1208 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink), 1209 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 1210 delay(UHCI_QH_REMOVE_DELAY); 1211 if (sc->sc_hctl_end == sqh) 1212 sc->sc_hctl_end = pqh; 1213 } 1214 1215 /* Add low speed control QH, called with lock held. */ 1216 void 1217 uhci_add_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1218 { 1219 uhci_soft_qh_t *eqh; 1220 1221 KASSERT(mutex_owned(&sc->sc_lock)); 1222 1223 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1224 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1225 1226 eqh = sc->sc_lctl_end; 1227 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1228 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 1229 sqh->hlink = eqh->hlink; 1230 sqh->qh.qh_hlink = eqh->qh.qh_hlink; 1231 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 1232 BUS_DMASYNC_PREWRITE); 1233 eqh->hlink = sqh; 1234 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH); 1235 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1236 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 1237 sc->sc_lctl_end = sqh; 1238 } 1239 1240 /* Remove low speed control QH, called with lock held. */ 1241 void 1242 uhci_remove_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1243 { 1244 uhci_soft_qh_t *pqh; 1245 uint32_t elink; 1246 1247 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1248 1249 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1250 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1251 1252 /* See comment in uhci_remove_hs_ctrl() */ 1253 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 1254 sizeof(sqh->qh.qh_elink), 1255 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1256 elink = le32toh(sqh->qh.qh_elink); 1257 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 1258 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD); 1259 if (!(elink & UHCI_PTR_T)) { 1260 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 1261 usb_syncmem(&sqh->dma, 1262 sqh->offs + offsetof(uhci_qh_t, qh_elink), 1263 sizeof(sqh->qh.qh_elink), 1264 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1265 delay(UHCI_QH_REMOVE_DELAY); 1266 } 1267 pqh = uhci_find_prev_qh(sc->sc_lctl_start, sqh); 1268 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink), 1269 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 1270 pqh->hlink = sqh->hlink; 1271 pqh->qh.qh_hlink = sqh->qh.qh_hlink; 1272 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink), 1273 sizeof(pqh->qh.qh_hlink), 1274 BUS_DMASYNC_PREWRITE); 1275 delay(UHCI_QH_REMOVE_DELAY); 1276 if (sc->sc_lctl_end == sqh) 1277 sc->sc_lctl_end = pqh; 1278 } 1279 1280 /* Add bulk QH, called with lock held. */ 1281 void 1282 uhci_add_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1283 { 1284 uhci_soft_qh_t *eqh; 1285 1286 KASSERT(mutex_owned(&sc->sc_lock)); 1287 1288 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1289 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1290 1291 eqh = sc->sc_bulk_end; 1292 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1293 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 1294 sqh->hlink = eqh->hlink; 1295 sqh->qh.qh_hlink = eqh->qh.qh_hlink; 1296 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 1297 BUS_DMASYNC_PREWRITE); 1298 eqh->hlink = sqh; 1299 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH); 1300 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1301 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 1302 sc->sc_bulk_end = sqh; 1303 uhci_add_loop(sc); 1304 } 1305 1306 /* Remove bulk QH, called with lock held. */ 1307 void 1308 uhci_remove_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1309 { 1310 uhci_soft_qh_t *pqh; 1311 1312 KASSERT(mutex_owned(&sc->sc_lock)); 1313 1314 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1315 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1316 1317 uhci_rem_loop(sc); 1318 /* See comment in uhci_remove_hs_ctrl() */ 1319 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 1320 sizeof(sqh->qh.qh_elink), 1321 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1322 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) { 1323 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 1324 usb_syncmem(&sqh->dma, 1325 sqh->offs + offsetof(uhci_qh_t, qh_elink), 1326 sizeof(sqh->qh.qh_elink), 1327 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1328 delay(UHCI_QH_REMOVE_DELAY); 1329 } 1330 pqh = uhci_find_prev_qh(sc->sc_bulk_start, sqh); 1331 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink), 1332 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 1333 pqh->hlink = sqh->hlink; 1334 pqh->qh.qh_hlink = sqh->qh.qh_hlink; 1335 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink), 1336 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 1337 delay(UHCI_QH_REMOVE_DELAY); 1338 if (sc->sc_bulk_end == sqh) 1339 sc->sc_bulk_end = pqh; 1340 } 1341 1342 Static int uhci_intr1(uhci_softc_t *); 1343 1344 int 1345 uhci_intr(void *arg) 1346 { 1347 uhci_softc_t *sc = arg; 1348 int ret = 0; 1349 1350 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1351 1352 mutex_spin_enter(&sc->sc_intr_lock); 1353 1354 if (sc->sc_dying || !device_has_power(sc->sc_dev)) 1355 goto done; 1356 1357 if (sc->sc_bus.ub_usepolling || UREAD2(sc, UHCI_INTR) == 0) { 1358 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0); 1359 goto done; 1360 } 1361 1362 ret = uhci_intr1(sc); 1363 1364 done: 1365 mutex_spin_exit(&sc->sc_intr_lock); 1366 return ret; 1367 } 1368 1369 int 1370 uhci_intr1(uhci_softc_t *sc) 1371 { 1372 int status; 1373 int ack; 1374 1375 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1376 1377 #ifdef UHCI_DEBUG 1378 if (uhcidebug >= 15) { 1379 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0); 1380 uhci_dumpregs(sc); 1381 } 1382 #endif 1383 1384 KASSERT(mutex_owned(&sc->sc_intr_lock)); 1385 1386 status = UREAD2(sc, UHCI_STS) & UHCI_STS_ALLINTRS; 1387 /* Check if the interrupt was for us. */ 1388 if (status == 0) 1389 return 0; 1390 1391 if (sc->sc_suspend != PWR_RESUME) { 1392 #ifdef DIAGNOSTIC 1393 printf("%s: interrupt while not operating ignored\n", 1394 device_xname(sc->sc_dev)); 1395 #endif 1396 UWRITE2(sc, UHCI_STS, status); /* acknowledge the ints */ 1397 return 0; 1398 } 1399 1400 ack = 0; 1401 if (status & UHCI_STS_USBINT) 1402 ack |= UHCI_STS_USBINT; 1403 if (status & UHCI_STS_USBEI) 1404 ack |= UHCI_STS_USBEI; 1405 if (status & UHCI_STS_RD) { 1406 ack |= UHCI_STS_RD; 1407 #ifdef UHCI_DEBUG 1408 printf("%s: resume detect\n", device_xname(sc->sc_dev)); 1409 #endif 1410 } 1411 if (status & UHCI_STS_HSE) { 1412 ack |= UHCI_STS_HSE; 1413 printf("%s: host system error\n", device_xname(sc->sc_dev)); 1414 } 1415 if (status & UHCI_STS_HCPE) { 1416 ack |= UHCI_STS_HCPE; 1417 printf("%s: host controller process error\n", 1418 device_xname(sc->sc_dev)); 1419 } 1420 1421 /* When HCHalted=1 and Run/Stop=0 , it is normal */ 1422 if ((status & UHCI_STS_HCH) && (UREAD2(sc, UHCI_CMD) & UHCI_CMD_RS)) { 1423 /* no acknowledge needed */ 1424 if (!sc->sc_dying) { 1425 printf("%s: host controller halted\n", 1426 device_xname(sc->sc_dev)); 1427 #ifdef UHCI_DEBUG 1428 uhci_dump_all(sc); 1429 #endif 1430 } 1431 sc->sc_dying = 1; 1432 } 1433 1434 if (!ack) 1435 return 0; /* nothing to acknowledge */ 1436 UWRITE2(sc, UHCI_STS, ack); /* acknowledge the ints */ 1437 1438 usb_schedsoftintr(&sc->sc_bus); 1439 1440 DPRINTFN(15, "sc %#jx done", (uintptr_t)sc, 0, 0, 0); 1441 1442 return 1; 1443 } 1444 1445 void 1446 uhci_softintr(void *v) 1447 { 1448 struct usbd_bus *bus = v; 1449 uhci_softc_t *sc = UHCI_BUS2SC(bus); 1450 struct uhci_xfer *ux, *nextux; 1451 ux_completeq_t cq; 1452 1453 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1454 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0); 1455 1456 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1457 1458 TAILQ_INIT(&cq); 1459 /* 1460 * Interrupts on UHCI really suck. When the host controller 1461 * interrupts because a transfer is completed there is no 1462 * way of knowing which transfer it was. You can scan down 1463 * the TDs and QHs of the previous frame to limit the search, 1464 * but that assumes that the interrupt was not delayed by more 1465 * than 1 ms, which may not always be true (e.g. after debug 1466 * output on a slow console). 1467 * We scan all interrupt descriptors to see if any have 1468 * completed. 1469 */ 1470 TAILQ_FOREACH_SAFE(ux, &sc->sc_intrhead, ux_list, nextux) { 1471 uhci_check_intr(sc, ux, &cq); 1472 } 1473 1474 /* 1475 * We abuse ux_list for the interrupt and complete lists and 1476 * interrupt transfers will get re-added here so use 1477 * the _SAFE version of TAILQ_FOREACH. 1478 */ 1479 TAILQ_FOREACH_SAFE(ux, &cq, ux_list, nextux) { 1480 DPRINTF("ux %#jx", (uintptr_t)ux, 0, 0, 0); 1481 usb_transfer_complete(&ux->ux_xfer); 1482 } 1483 1484 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1485 } 1486 1487 /* Check for an interrupt. */ 1488 void 1489 uhci_check_intr(uhci_softc_t *sc, struct uhci_xfer *ux, ux_completeq_t *cqp) 1490 { 1491 uhci_soft_td_t *std, *fstd = NULL, *lstd = NULL; 1492 uint32_t status; 1493 1494 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1495 DPRINTFN(15, "ux %#jx", (uintptr_t)ux, 0, 0, 0); 1496 1497 KASSERT(ux != NULL); 1498 1499 struct usbd_xfer *xfer = &ux->ux_xfer; 1500 if (xfer->ux_status == USBD_CANCELLED || 1501 xfer->ux_status == USBD_TIMEOUT) { 1502 DPRINTF("aborted xfer %#jx", (uintptr_t)xfer, 0, 0, 0); 1503 return; 1504 } 1505 1506 switch (ux->ux_type) { 1507 case UX_CTRL: 1508 fstd = ux->ux_setup; 1509 lstd = ux->ux_stat; 1510 break; 1511 case UX_BULK: 1512 case UX_INTR: 1513 case UX_ISOC: 1514 fstd = ux->ux_stdstart; 1515 lstd = ux->ux_stdend; 1516 break; 1517 default: 1518 KASSERT(false); 1519 break; 1520 } 1521 if (fstd == NULL) 1522 return; 1523 1524 KASSERT(lstd != NULL); 1525 1526 usb_syncmem(&lstd->dma, 1527 lstd->offs + offsetof(uhci_td_t, td_status), 1528 sizeof(lstd->td.td_status), 1529 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1530 status = le32toh(lstd->td.td_status); 1531 usb_syncmem(&lstd->dma, 1532 lstd->offs + offsetof(uhci_td_t, td_status), 1533 sizeof(lstd->td.td_status), 1534 BUS_DMASYNC_PREREAD); 1535 1536 /* If the last TD is not marked active we can complete */ 1537 if (!(status & UHCI_TD_ACTIVE)) { 1538 done: 1539 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0); 1540 uhci_idone(ux, cqp); 1541 return; 1542 } 1543 1544 /* 1545 * If the last TD is still active we need to check whether there 1546 * is an error somewhere in the middle, or whether there was a 1547 * short packet (SPD and not ACTIVE). 1548 */ 1549 DPRINTFN(12, "active ux=%#jx", (uintptr_t)ux, 0, 0, 0); 1550 for (std = fstd; std != lstd; std = std->link.std) { 1551 usb_syncmem(&std->dma, 1552 std->offs + offsetof(uhci_td_t, td_status), 1553 sizeof(std->td.td_status), 1554 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1555 status = le32toh(std->td.td_status); 1556 usb_syncmem(&std->dma, 1557 std->offs + offsetof(uhci_td_t, td_status), 1558 sizeof(std->td.td_status), BUS_DMASYNC_PREREAD); 1559 1560 /* If there's an active TD the xfer isn't done. */ 1561 if (status & UHCI_TD_ACTIVE) { 1562 DPRINTFN(12, "ux=%#jx std=%#jx still active", 1563 (uintptr_t)ux, (uintptr_t)std, 0, 0); 1564 return; 1565 } 1566 1567 /* Any kind of error makes the xfer done. */ 1568 if (status & UHCI_TD_STALLED) 1569 goto done; 1570 1571 /* 1572 * If the data phase of a control transfer is short, we need 1573 * to complete the status stage 1574 */ 1575 1576 if ((status & UHCI_TD_SPD) && ux->ux_type == UX_CTRL) { 1577 struct uhci_pipe *upipe = 1578 UHCI_PIPE2UPIPE(xfer->ux_pipe); 1579 uhci_soft_qh_t *sqh = upipe->ctrl.sqh; 1580 uhci_soft_td_t *stat = upipe->ctrl.stat; 1581 1582 DPRINTFN(12, "ux=%#jx std=%#jx control status" 1583 "phase needs completion", (uintptr_t)ux, 1584 (uintptr_t)ux->ux_stdstart, 0, 0); 1585 1586 sqh->qh.qh_elink = 1587 htole32(stat->physaddr | UHCI_PTR_TD); 1588 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 1589 BUS_DMASYNC_PREWRITE); 1590 break; 1591 } 1592 1593 /* We want short packets, and it is short: it's done */ 1594 usb_syncmem(&std->dma, 1595 std->offs + offsetof(uhci_td_t, td_token), 1596 sizeof(std->td.td_token), 1597 BUS_DMASYNC_POSTWRITE); 1598 1599 if ((status & UHCI_TD_SPD) && 1600 UHCI_TD_GET_ACTLEN(status) < 1601 UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token))) { 1602 goto done; 1603 } 1604 } 1605 } 1606 1607 /* Called with USB lock held. */ 1608 void 1609 uhci_idone(struct uhci_xfer *ux, ux_completeq_t *cqp) 1610 { 1611 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1612 struct usbd_xfer *xfer = &ux->ux_xfer; 1613 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer); 1614 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 1615 uhci_soft_td_t *std; 1616 uint32_t status = 0, nstatus; 1617 const bool polling __diagused = sc->sc_bus.ub_usepolling; 1618 int actlen; 1619 1620 KASSERT(polling || mutex_owned(&sc->sc_lock)); 1621 1622 DPRINTFN(12, "ux=%#jx", (uintptr_t)ux, 0, 0, 0); 1623 1624 /* 1625 * Try to claim this xfer for completion. If it has already 1626 * completed or aborted, drop it on the floor. 1627 */ 1628 if (!usbd_xfer_trycomplete(xfer)) 1629 return; 1630 1631 #ifdef DIAGNOSTIC 1632 #ifdef UHCI_DEBUG 1633 if (ux->ux_isdone) { 1634 DPRINTF("--- dump start ---", 0, 0, 0, 0); 1635 uhci_dump_ii(ux); 1636 DPRINTF("--- dump end ---", 0, 0, 0, 0); 1637 } 1638 #endif 1639 KASSERT(!ux->ux_isdone); 1640 KASSERTMSG(!ux->ux_isdone, "xfer %p type %d status %d", xfer, 1641 ux->ux_type, xfer->ux_status); 1642 ux->ux_isdone = true; 1643 #endif 1644 1645 if (xfer->ux_nframes != 0) { 1646 /* Isoc transfer, do things differently. */ 1647 uhci_soft_td_t **stds = upipe->isoc.stds; 1648 int i, n, nframes, len; 1649 1650 DPRINTFN(5, "ux=%#jx isoc ready", (uintptr_t)ux, 0, 0, 0); 1651 1652 nframes = xfer->ux_nframes; 1653 actlen = 0; 1654 n = ux->ux_curframe; 1655 for (i = 0; i < nframes; i++) { 1656 std = stds[n]; 1657 #ifdef UHCI_DEBUG 1658 if (uhcidebug >= 5) { 1659 DPRINTF("isoc TD %jd", i, 0, 0, 0); 1660 DPRINTF("--- dump start ---", 0, 0, 0, 0); 1661 uhci_dump_td(std); 1662 DPRINTF("--- dump end ---", 0, 0, 0, 0); 1663 } 1664 #endif 1665 if (++n >= UHCI_VFRAMELIST_COUNT) 1666 n = 0; 1667 usb_syncmem(&std->dma, 1668 std->offs + offsetof(uhci_td_t, td_status), 1669 sizeof(std->td.td_status), 1670 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1671 status = le32toh(std->td.td_status); 1672 len = UHCI_TD_GET_ACTLEN(status); 1673 xfer->ux_frlengths[i] = len; 1674 actlen += len; 1675 } 1676 upipe->isoc.inuse -= nframes; 1677 xfer->ux_actlen = actlen; 1678 xfer->ux_status = USBD_NORMAL_COMPLETION; 1679 goto end; 1680 } 1681 1682 #ifdef UHCI_DEBUG 1683 DPRINTFN(10, "ux=%#jx, xfer=%#jx, pipe=%#jx ready", (uintptr_t)ux, 1684 (uintptr_t)xfer, (uintptr_t)upipe, 0); 1685 if (uhcidebug >= 10) { 1686 DPRINTF("--- dump start ---", 0, 0, 0, 0); 1687 uhci_dump_tds(ux->ux_stdstart); 1688 DPRINTF("--- dump end ---", 0, 0, 0, 0); 1689 } 1690 #endif 1691 1692 /* The transfer is done, compute actual length and status. */ 1693 actlen = 0; 1694 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) { 1695 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 1696 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1697 nstatus = le32toh(std->td.td_status); 1698 if (nstatus & UHCI_TD_ACTIVE) 1699 break; 1700 1701 status = nstatus; 1702 if (UHCI_TD_GET_PID(le32toh(std->td.td_token)) != 1703 UHCI_TD_PID_SETUP) 1704 actlen += UHCI_TD_GET_ACTLEN(status); 1705 else { 1706 /* 1707 * UHCI will report CRCTO in addition to a STALL or NAK 1708 * for a SETUP transaction. See section 3.2.2, "TD 1709 * CONTROL AND STATUS". 1710 */ 1711 if (status & (UHCI_TD_STALLED | UHCI_TD_NAK)) 1712 status &= ~UHCI_TD_CRCTO; 1713 } 1714 } 1715 /* If there are left over TDs we need to update the toggle. */ 1716 if (std != NULL) 1717 upipe->nexttoggle = UHCI_TD_GET_DT(le32toh(std->td.td_token)); 1718 1719 status &= UHCI_TD_ERROR; 1720 DPRINTFN(10, "actlen=%jd, status=%#jx", actlen, status, 0, 0); 1721 xfer->ux_actlen = actlen; 1722 if (status != 0) { 1723 1724 DPRINTFN((status == UHCI_TD_STALLED) * 10, 1725 "error, addr=%jd, endpt=0x%02jx", 1726 xfer->ux_pipe->up_dev->ud_addr, 1727 xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress, 1728 0, 0); 1729 DPRINTFN((status == UHCI_TD_STALLED) * 10, 1730 "bitstuff=%jd crcto =%jd nak =%jd babble =%jd", 1731 !!(status & UHCI_TD_BITSTUFF), 1732 !!(status & UHCI_TD_CRCTO), 1733 !!(status & UHCI_TD_NAK), 1734 !!(status & UHCI_TD_BABBLE)); 1735 DPRINTFN((status == UHCI_TD_STALLED) * 10, 1736 "dbuffer =%jd stalled =%jd active =%jd", 1737 !!(status & UHCI_TD_DBUFFER), 1738 !!(status & UHCI_TD_STALLED), 1739 !!(status & UHCI_TD_ACTIVE), 1740 0); 1741 1742 if (status == UHCI_TD_STALLED) 1743 xfer->ux_status = USBD_STALLED; 1744 else 1745 xfer->ux_status = USBD_IOERROR; /* more info XXX */ 1746 } else { 1747 xfer->ux_status = USBD_NORMAL_COMPLETION; 1748 } 1749 1750 end: 1751 uhci_del_intr_list(sc, ux); 1752 if (cqp) 1753 TAILQ_INSERT_TAIL(cqp, ux, ux_list); 1754 1755 KASSERT(polling || mutex_owned(&sc->sc_lock)); 1756 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0); 1757 } 1758 1759 void 1760 uhci_poll(struct usbd_bus *bus) 1761 { 1762 uhci_softc_t *sc = UHCI_BUS2SC(bus); 1763 1764 if (UREAD2(sc, UHCI_STS) & UHCI_STS_USBINT) { 1765 mutex_spin_enter(&sc->sc_intr_lock); 1766 uhci_intr1(sc); 1767 mutex_spin_exit(&sc->sc_intr_lock); 1768 } 1769 } 1770 1771 void 1772 uhci_reset(uhci_softc_t *sc) 1773 { 1774 int n; 1775 1776 UHCICMD(sc, UHCI_CMD_HCRESET); 1777 /* The reset bit goes low when the controller is done. */ 1778 for (n = 0; n < UHCI_RESET_TIMEOUT && 1779 (UREAD2(sc, UHCI_CMD) & UHCI_CMD_HCRESET); n++) 1780 usb_delay_ms(&sc->sc_bus, 1); 1781 if (n >= UHCI_RESET_TIMEOUT) 1782 printf("%s: controller did not reset\n", 1783 device_xname(sc->sc_dev)); 1784 } 1785 1786 usbd_status 1787 uhci_run(uhci_softc_t *sc, int run, int locked) 1788 { 1789 int n, running; 1790 uint16_t cmd; 1791 1792 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1793 1794 run = run != 0; 1795 if (!locked) 1796 mutex_spin_enter(&sc->sc_intr_lock); 1797 1798 DPRINTF("setting run=%jd", run, 0, 0, 0); 1799 cmd = UREAD2(sc, UHCI_CMD); 1800 if (run) 1801 cmd |= UHCI_CMD_RS; 1802 else 1803 cmd &= ~UHCI_CMD_RS; 1804 UHCICMD(sc, cmd); 1805 for (n = 0; n < 10; n++) { 1806 running = !(UREAD2(sc, UHCI_STS) & UHCI_STS_HCH); 1807 /* return when we've entered the state we want */ 1808 if (run == running) { 1809 if (!locked) 1810 mutex_spin_exit(&sc->sc_intr_lock); 1811 DPRINTF("done cmd=%#jx sts=%#jx", 1812 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS), 0, 0); 1813 return USBD_NORMAL_COMPLETION; 1814 } 1815 usb_delay_ms_locked(&sc->sc_bus, 1, &sc->sc_intr_lock); 1816 } 1817 if (!locked) 1818 mutex_spin_exit(&sc->sc_intr_lock); 1819 printf("%s: cannot %s\n", device_xname(sc->sc_dev), 1820 run ? "start" : "stop"); 1821 return USBD_IOERROR; 1822 } 1823 1824 /* 1825 * Memory management routines. 1826 * uhci_alloc_std allocates TDs 1827 * uhci_alloc_sqh allocates QHs 1828 * These two routines do their own free list management, 1829 * partly for speed, partly because allocating DMAable memory 1830 * has page size granularity so much memory would be wasted if 1831 * only one TD/QH (32 bytes) was placed in each allocated chunk. 1832 */ 1833 1834 uhci_soft_td_t * 1835 uhci_alloc_std(uhci_softc_t *sc) 1836 { 1837 uhci_soft_td_t *std; 1838 usbd_status err; 1839 int i, offs; 1840 usb_dma_t dma; 1841 1842 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1843 1844 mutex_enter(&sc->sc_lock); 1845 if (sc->sc_freetds == NULL) { 1846 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0); 1847 mutex_exit(&sc->sc_lock); 1848 1849 err = usb_allocmem(&sc->sc_bus, UHCI_STD_SIZE * UHCI_STD_CHUNK, 1850 UHCI_TD_ALIGN, &dma); 1851 if (err) 1852 return NULL; 1853 1854 mutex_enter(&sc->sc_lock); 1855 for (i = 0; i < UHCI_STD_CHUNK; i++) { 1856 offs = i * UHCI_STD_SIZE; 1857 std = KERNADDR(&dma, offs); 1858 std->physaddr = DMAADDR(&dma, offs); 1859 std->dma = dma; 1860 std->offs = offs; 1861 std->link.std = sc->sc_freetds; 1862 sc->sc_freetds = std; 1863 } 1864 } 1865 std = sc->sc_freetds; 1866 sc->sc_freetds = std->link.std; 1867 mutex_exit(&sc->sc_lock); 1868 1869 memset(&std->td, 0, sizeof(uhci_td_t)); 1870 1871 return std; 1872 } 1873 1874 #define TD_IS_FREE 0x12345678 1875 1876 void 1877 uhci_free_std_locked(uhci_softc_t *sc, uhci_soft_td_t *std) 1878 { 1879 KASSERT(mutex_owned(&sc->sc_lock)); 1880 1881 #ifdef DIAGNOSTIC 1882 if (le32toh(std->td.td_token) == TD_IS_FREE) { 1883 printf("%s: freeing free TD %p\n", __func__, std); 1884 return; 1885 } 1886 std->td.td_token = htole32(TD_IS_FREE); 1887 #endif 1888 1889 std->link.std = sc->sc_freetds; 1890 sc->sc_freetds = std; 1891 } 1892 1893 void 1894 uhci_free_std(uhci_softc_t *sc, uhci_soft_td_t *std) 1895 { 1896 mutex_enter(&sc->sc_lock); 1897 uhci_free_std_locked(sc, std); 1898 mutex_exit(&sc->sc_lock); 1899 } 1900 1901 uhci_soft_qh_t * 1902 uhci_alloc_sqh(uhci_softc_t *sc) 1903 { 1904 uhci_soft_qh_t *sqh; 1905 usbd_status err; 1906 int i, offs; 1907 usb_dma_t dma; 1908 1909 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1910 1911 mutex_enter(&sc->sc_lock); 1912 if (sc->sc_freeqhs == NULL) { 1913 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0); 1914 mutex_exit(&sc->sc_lock); 1915 1916 err = usb_allocmem(&sc->sc_bus, UHCI_SQH_SIZE * UHCI_SQH_CHUNK, 1917 UHCI_QH_ALIGN, &dma); 1918 if (err) 1919 return NULL; 1920 1921 mutex_enter(&sc->sc_lock); 1922 for (i = 0; i < UHCI_SQH_CHUNK; i++) { 1923 offs = i * UHCI_SQH_SIZE; 1924 sqh = KERNADDR(&dma, offs); 1925 sqh->physaddr = DMAADDR(&dma, offs); 1926 sqh->dma = dma; 1927 sqh->offs = offs; 1928 sqh->hlink = sc->sc_freeqhs; 1929 sc->sc_freeqhs = sqh; 1930 } 1931 } 1932 sqh = sc->sc_freeqhs; 1933 sc->sc_freeqhs = sqh->hlink; 1934 mutex_exit(&sc->sc_lock); 1935 1936 memset(&sqh->qh, 0, sizeof(uhci_qh_t)); 1937 1938 return sqh; 1939 } 1940 1941 void 1942 uhci_free_sqh(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1943 { 1944 KASSERT(mutex_owned(&sc->sc_lock)); 1945 1946 sqh->hlink = sc->sc_freeqhs; 1947 sc->sc_freeqhs = sqh; 1948 } 1949 1950 #if 0 1951 void 1952 uhci_free_std_chain(uhci_softc_t *sc, uhci_soft_td_t *std, 1953 uhci_soft_td_t *stdend) 1954 { 1955 uhci_soft_td_t *p; 1956 uint32_t td_link; 1957 1958 /* 1959 * to avoid race condition with the controller which may be looking 1960 * at this chain, we need to first invalidate all links, and 1961 * then wait for the controller to move to another queue 1962 */ 1963 for (p = std; p != stdend; p = p->link.std) { 1964 usb_syncmem(&p->dma, 1965 p->offs + offsetof(uhci_td_t, td_link), 1966 sizeof(p->td.td_link), 1967 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1968 td_link = le32toh(p->td.td_link); 1969 usb_syncmem(&p->dma, 1970 p->offs + offsetof(uhci_td_t, td_link), 1971 sizeof(p->td.td_link), 1972 BUS_DMASYNC_PREREAD); 1973 if ((td_link & UHCI_PTR_T) == 0) { 1974 p->td.td_link = htole32(UHCI_PTR_T); 1975 usb_syncmem(&p->dma, 1976 p->offs + offsetof(uhci_td_t, td_link), 1977 sizeof(p->td.td_link), 1978 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1979 } 1980 } 1981 delay(UHCI_QH_REMOVE_DELAY); 1982 1983 for (; std != stdend; std = p) { 1984 p = std->link.std; 1985 uhci_free_std(sc, std); 1986 } 1987 } 1988 #endif 1989 1990 int 1991 uhci_alloc_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer, int len, 1992 int rd, uhci_soft_td_t **sp) 1993 { 1994 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 1995 uint16_t flags = xfer->ux_flags; 1996 uhci_soft_td_t *p; 1997 1998 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1999 2000 DPRINTFN(8, "xfer=%#jx pipe=%#jx", (uintptr_t)xfer, 2001 (uintptr_t)xfer->ux_pipe, 0, 0); 2002 2003 ASSERT_SLEEPABLE(); 2004 KASSERT(sp); 2005 2006 int maxp = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize); 2007 if (maxp == 0) { 2008 printf("%s: maxp=0\n", __func__); 2009 return EINVAL; 2010 } 2011 size_t ntd = howmany(len, maxp); 2012 if (!rd && (flags & USBD_FORCE_SHORT_XFER)) { 2013 ntd++; 2014 } 2015 DPRINTFN(10, "maxp=%jd ntd=%jd", maxp, ntd, 0, 0); 2016 2017 uxfer->ux_stds = NULL; 2018 uxfer->ux_nstd = ntd; 2019 if (ntd == 0) { 2020 *sp = NULL; 2021 DPRINTF("ntd=0", 0, 0, 0, 0); 2022 return 0; 2023 } 2024 uxfer->ux_stds = kmem_alloc(sizeof(uhci_soft_td_t *) * ntd, 2025 KM_SLEEP); 2026 2027 for (int i = 0; i < ntd; i++) { 2028 p = uhci_alloc_std(sc); 2029 if (p == NULL) { 2030 if (i != 0) { 2031 uxfer->ux_nstd = i; 2032 uhci_free_stds(sc, uxfer); 2033 } 2034 kmem_free(uxfer->ux_stds, 2035 sizeof(uhci_soft_td_t *) * ntd); 2036 return ENOMEM; 2037 } 2038 uxfer->ux_stds[i] = p; 2039 } 2040 2041 *sp = uxfer->ux_stds[0]; 2042 2043 return 0; 2044 } 2045 2046 Static void 2047 uhci_free_stds(uhci_softc_t *sc, struct uhci_xfer *ux) 2048 { 2049 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2050 2051 DPRINTFN(8, "ux=%#jx", (uintptr_t)ux, 0, 0, 0); 2052 2053 mutex_enter(&sc->sc_lock); 2054 for (size_t i = 0; i < ux->ux_nstd; i++) { 2055 uhci_soft_td_t *std = ux->ux_stds[i]; 2056 #ifdef DIAGNOSTIC 2057 if (le32toh(std->td.td_token) == TD_IS_FREE) { 2058 printf("%s: freeing free TD %p\n", __func__, std); 2059 return; 2060 } 2061 std->td.td_token = htole32(TD_IS_FREE); 2062 #endif 2063 ux->ux_stds[i]->link.std = sc->sc_freetds; 2064 sc->sc_freetds = std; 2065 } 2066 mutex_exit(&sc->sc_lock); 2067 } 2068 2069 2070 Static void 2071 uhci_reset_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer, 2072 int length, int isread, int *toggle, uhci_soft_td_t **lstd) 2073 { 2074 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 2075 struct usbd_pipe *pipe = xfer->ux_pipe; 2076 usb_dma_t *dma = &xfer->ux_dmabuf; 2077 uint16_t flags = xfer->ux_flags; 2078 uhci_soft_td_t *std, *prev; 2079 int len = length; 2080 int tog = *toggle; 2081 int maxp; 2082 uint32_t status; 2083 size_t i; 2084 2085 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2086 DPRINTFN(8, "xfer=%#jx len %jd isread %jd toggle %jd", (uintptr_t)xfer, 2087 len, isread, *toggle); 2088 2089 KASSERT(len != 0 || (!isread && (flags & USBD_FORCE_SHORT_XFER))); 2090 2091 maxp = UGETW(pipe->up_endpoint->ue_edesc->wMaxPacketSize); 2092 KASSERT(maxp != 0); 2093 2094 int addr = xfer->ux_pipe->up_dev->ud_addr; 2095 int endpt = xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress; 2096 2097 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(3) | UHCI_TD_ACTIVE); 2098 if (pipe->up_dev->ud_speed == USB_SPEED_LOW) 2099 status |= UHCI_TD_LS; 2100 if (flags & USBD_SHORT_XFER_OK) 2101 status |= UHCI_TD_SPD; 2102 usb_syncmem(dma, 0, len, 2103 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2104 std = prev = NULL; 2105 for (i = 0; len != 0 && i < uxfer->ux_nstd; i++, prev = std) { 2106 int l = len; 2107 std = uxfer->ux_stds[i]; 2108 if (l > maxp) 2109 l = maxp; 2110 2111 if (prev) { 2112 prev->link.std = std; 2113 prev->td.td_link = htole32( 2114 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD 2115 ); 2116 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td), 2117 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2118 } 2119 2120 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 2121 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2122 2123 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD); 2124 std->td.td_status = htole32(status); 2125 std->td.td_token = htole32( 2126 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) | 2127 UHCI_TD_SET_DEVADDR(addr) | 2128 UHCI_TD_SET_PID(isread ? UHCI_TD_PID_IN : UHCI_TD_PID_OUT) | 2129 UHCI_TD_SET_DT(tog) | 2130 UHCI_TD_SET_MAXLEN(l) 2131 ); 2132 std->td.td_buffer = htole32(DMAADDR(dma, i * maxp)); 2133 2134 std->link.std = NULL; 2135 2136 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 2137 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2138 tog ^= 1; 2139 2140 len -= l; 2141 } 2142 KASSERTMSG(len == 0, "xfer %p alen %d len %d mps %d ux_nqtd %zu i %zu", 2143 xfer, length, len, maxp, uxfer->ux_nstd, i); 2144 2145 if (!isread && 2146 (flags & USBD_FORCE_SHORT_XFER) && 2147 length % maxp == 0) { 2148 /* Force a 0 length transfer at the end. */ 2149 KASSERTMSG(i < uxfer->ux_nstd, "i=%zu nstd=%zu", i, 2150 uxfer->ux_nstd); 2151 std = uxfer->ux_stds[i++]; 2152 2153 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD); 2154 std->td.td_status = htole32(status); 2155 std->td.td_token = htole32( 2156 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) | 2157 UHCI_TD_SET_DEVADDR(addr) | 2158 UHCI_TD_SET_PID(UHCI_TD_PID_OUT) | 2159 UHCI_TD_SET_DT(tog) | 2160 UHCI_TD_SET_MAXLEN(0) 2161 ); 2162 std->td.td_buffer = 0; 2163 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 2164 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2165 2166 std->link.std = NULL; 2167 if (prev) { 2168 prev->link.std = std; 2169 prev->td.td_link = htole32( 2170 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD 2171 ); 2172 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td), 2173 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2174 } 2175 tog ^= 1; 2176 } 2177 *lstd = std; 2178 *toggle = tog; 2179 } 2180 2181 void 2182 uhci_device_clear_toggle(struct usbd_pipe *pipe) 2183 { 2184 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 2185 upipe->nexttoggle = 0; 2186 } 2187 2188 void 2189 uhci_noop(struct usbd_pipe *pipe) 2190 { 2191 } 2192 2193 int 2194 uhci_device_bulk_init(struct usbd_xfer *xfer) 2195 { 2196 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2197 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 2198 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc; 2199 int endpt = ed->bEndpointAddress; 2200 int isread = UE_GET_DIR(endpt) == UE_DIR_IN; 2201 int len = xfer->ux_bufsize; 2202 int err = 0; 2203 2204 2205 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2206 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, len, 2207 xfer->ux_flags, 0); 2208 2209 if (sc->sc_dying) 2210 return USBD_IOERROR; 2211 2212 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST)); 2213 2214 uxfer->ux_type = UX_BULK; 2215 err = uhci_alloc_std_chain(sc, xfer, len, isread, &uxfer->ux_stdstart); 2216 if (err) 2217 return err; 2218 2219 #ifdef UHCI_DEBUG 2220 if (uhcidebug >= 10) { 2221 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2222 uhci_dump_tds(uxfer->ux_stdstart); 2223 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2224 } 2225 #endif 2226 2227 return 0; 2228 } 2229 2230 Static void 2231 uhci_device_bulk_fini(struct usbd_xfer *xfer) 2232 { 2233 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2234 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2235 2236 KASSERT(ux->ux_type == UX_BULK); 2237 2238 if (ux->ux_nstd) { 2239 uhci_free_stds(sc, ux); 2240 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd); 2241 } 2242 } 2243 2244 usbd_status 2245 uhci_device_bulk_transfer(struct usbd_xfer *xfer) 2246 { 2247 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2248 usbd_status err; 2249 2250 /* Insert last in queue. */ 2251 mutex_enter(&sc->sc_lock); 2252 err = usb_insert_transfer(xfer); 2253 mutex_exit(&sc->sc_lock); 2254 if (err) 2255 return err; 2256 2257 /* 2258 * Pipe isn't running (otherwise err would be USBD_INPROG), 2259 * so start it first. 2260 */ 2261 return uhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 2262 } 2263 2264 usbd_status 2265 uhci_device_bulk_start(struct usbd_xfer *xfer) 2266 { 2267 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2268 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2269 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2270 uhci_soft_td_t *data, *dataend; 2271 uhci_soft_qh_t *sqh; 2272 const bool polling = sc->sc_bus.ub_usepolling; 2273 int len; 2274 int endpt; 2275 int isread; 2276 2277 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2278 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, 2279 xfer->ux_length, xfer->ux_flags, 0); 2280 2281 if (sc->sc_dying) 2282 return USBD_IOERROR; 2283 2284 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST)); 2285 KASSERT(xfer->ux_length <= xfer->ux_bufsize); 2286 2287 len = xfer->ux_length; 2288 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress; 2289 isread = UE_GET_DIR(endpt) == UE_DIR_IN; 2290 sqh = upipe->bulk.sqh; 2291 2292 /* Take lock here to protect nexttoggle */ 2293 if (!polling) 2294 mutex_enter(&sc->sc_lock); 2295 2296 uhci_reset_std_chain(sc, xfer, len, isread, &upipe->nexttoggle, 2297 &dataend); 2298 2299 data = ux->ux_stdstart; 2300 ux->ux_stdend = dataend; 2301 dataend->td.td_status |= htole32(UHCI_TD_IOC); 2302 usb_syncmem(&dataend->dma, 2303 dataend->offs + offsetof(uhci_td_t, td_status), 2304 sizeof(dataend->td.td_status), 2305 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2306 2307 #ifdef UHCI_DEBUG 2308 if (uhcidebug >= 10) { 2309 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2310 DPRINTFN(10, "before transfer", 0, 0, 0, 0); 2311 uhci_dump_tds(data); 2312 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2313 } 2314 #endif 2315 2316 KASSERT(ux->ux_isdone); 2317 #ifdef DIAGNOSTIC 2318 ux->ux_isdone = false; 2319 #endif 2320 2321 sqh->elink = data; 2322 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD); 2323 /* uhci_add_bulk() will do usb_syncmem(sqh) */ 2324 2325 uhci_add_bulk(sc, sqh); 2326 uhci_add_intr_list(sc, ux); 2327 usbd_xfer_schedule_timeout(xfer); 2328 xfer->ux_status = USBD_IN_PROGRESS; 2329 if (!polling) 2330 mutex_exit(&sc->sc_lock); 2331 2332 return USBD_IN_PROGRESS; 2333 } 2334 2335 /* Abort a device bulk request. */ 2336 void 2337 uhci_device_bulk_abort(struct usbd_xfer *xfer) 2338 { 2339 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer); 2340 2341 KASSERT(mutex_owned(&sc->sc_lock)); 2342 2343 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2344 2345 usbd_xfer_abort(xfer); 2346 } 2347 2348 /* 2349 * To allow the hardware time to notice we simply wait. 2350 */ 2351 Static void 2352 uhci_abortx(struct usbd_xfer *xfer) 2353 { 2354 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2355 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2356 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2357 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2358 uhci_soft_td_t *std; 2359 2360 DPRINTFN(1,"xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); 2361 2362 KASSERT(mutex_owned(&sc->sc_lock)); 2363 ASSERT_SLEEPABLE(); 2364 2365 KASSERTMSG((xfer->ux_status == USBD_CANCELLED || 2366 xfer->ux_status == USBD_TIMEOUT), 2367 "bad abort status: %d", xfer->ux_status); 2368 2369 /* 2370 * If we're dying, skip the hardware action and just notify the 2371 * software that we're done. 2372 */ 2373 if (sc->sc_dying) { 2374 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer, 2375 xfer->ux_status, 0, 0); 2376 goto dying; 2377 } 2378 2379 /* 2380 * HC Step 1: Make interrupt routine and hardware ignore xfer. 2381 */ 2382 uhci_del_intr_list(sc, ux); 2383 2384 DPRINTF("stop ux=%#jx", (uintptr_t)ux, 0, 0, 0); 2385 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) { 2386 usb_syncmem(&std->dma, 2387 std->offs + offsetof(uhci_td_t, td_status), 2388 sizeof(std->td.td_status), 2389 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2390 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC)); 2391 usb_syncmem(&std->dma, 2392 std->offs + offsetof(uhci_td_t, td_status), 2393 sizeof(std->td.td_status), 2394 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2395 } 2396 2397 /* 2398 * HC Step 2: Wait until we know hardware has finished any possible 2399 * use of the xfer. 2400 */ 2401 /* Hardware finishes in 1ms */ 2402 usb_delay_ms_locked(upipe->pipe.up_dev->ud_bus, 2, &sc->sc_lock); 2403 2404 /* 2405 * HC Step 3: Notify completion to waiting xfers. 2406 */ 2407 dying: 2408 #ifdef DIAGNOSTIC 2409 ux->ux_isdone = true; 2410 #endif 2411 usb_transfer_complete(xfer); 2412 DPRINTFN(14, "end", 0, 0, 0, 0); 2413 2414 KASSERT(mutex_owned(&sc->sc_lock)); 2415 } 2416 2417 /* Close a device bulk pipe. */ 2418 void 2419 uhci_device_bulk_close(struct usbd_pipe *pipe) 2420 { 2421 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 2422 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 2423 2424 KASSERT(mutex_owned(&sc->sc_lock)); 2425 2426 uhci_free_sqh(sc, upipe->bulk.sqh); 2427 2428 pipe->up_endpoint->ue_toggle = upipe->nexttoggle; 2429 } 2430 2431 int 2432 uhci_device_ctrl_init(struct usbd_xfer *xfer) 2433 { 2434 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 2435 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2436 usb_device_request_t *req = &xfer->ux_request; 2437 struct usbd_device *dev = upipe->pipe.up_dev; 2438 uhci_softc_t *sc = dev->ud_bus->ub_hcpriv; 2439 uhci_soft_td_t *data = NULL; 2440 int len; 2441 usbd_status err; 2442 int isread; 2443 2444 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2445 DPRINTFN(3, "xfer=%#jx len=%jd, addr=%jd, endpt=%jd", 2446 (uintptr_t)xfer, xfer->ux_bufsize, dev->ud_addr, 2447 upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress); 2448 2449 isread = req->bmRequestType & UT_READ; 2450 len = xfer->ux_bufsize; 2451 2452 uxfer->ux_type = UX_CTRL; 2453 /* Set up data transaction */ 2454 if (len != 0) { 2455 err = uhci_alloc_std_chain(sc, xfer, len, isread, &data); 2456 if (err) 2457 return err; 2458 } 2459 /* Set up interrupt info. */ 2460 uxfer->ux_setup = upipe->ctrl.setup; 2461 uxfer->ux_stat = upipe->ctrl.stat; 2462 uxfer->ux_data = data; 2463 2464 return 0; 2465 } 2466 2467 Static void 2468 uhci_device_ctrl_fini(struct usbd_xfer *xfer) 2469 { 2470 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2471 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2472 2473 KASSERT(ux->ux_type == UX_CTRL); 2474 2475 if (ux->ux_nstd) { 2476 uhci_free_stds(sc, ux); 2477 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd); 2478 } 2479 } 2480 2481 usbd_status 2482 uhci_device_ctrl_transfer(struct usbd_xfer *xfer) 2483 { 2484 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2485 usbd_status err; 2486 2487 /* Insert last in queue. */ 2488 mutex_enter(&sc->sc_lock); 2489 err = usb_insert_transfer(xfer); 2490 mutex_exit(&sc->sc_lock); 2491 if (err) 2492 return err; 2493 2494 /* 2495 * Pipe isn't running (otherwise err would be USBD_INPROG), 2496 * so start it first. 2497 */ 2498 return uhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 2499 } 2500 2501 usbd_status 2502 uhci_device_ctrl_start(struct usbd_xfer *xfer) 2503 { 2504 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2505 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 2506 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2507 usb_device_request_t *req = &xfer->ux_request; 2508 struct usbd_device *dev = upipe->pipe.up_dev; 2509 int addr = dev->ud_addr; 2510 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress; 2511 uhci_soft_td_t *setup, *stat, *next, *dataend; 2512 uhci_soft_qh_t *sqh; 2513 const bool polling = sc->sc_bus.ub_usepolling; 2514 int len; 2515 int isread; 2516 2517 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2518 2519 if (sc->sc_dying) 2520 return USBD_IOERROR; 2521 2522 KASSERT(xfer->ux_rqflags & URQ_REQUEST); 2523 2524 DPRINTFN(3, "type=0x%02jx, request=0x%02jx, " 2525 "wValue=0x%04jx, wIndex=0x%04jx", 2526 req->bmRequestType, req->bRequest, UGETW(req->wValue), 2527 UGETW(req->wIndex)); 2528 DPRINTFN(3, "len=%jd, addr=%jd, endpt=%jd", 2529 UGETW(req->wLength), dev->ud_addr, endpt, 0); 2530 2531 isread = req->bmRequestType & UT_READ; 2532 len = UGETW(req->wLength); 2533 2534 setup = upipe->ctrl.setup; 2535 stat = upipe->ctrl.stat; 2536 sqh = upipe->ctrl.sqh; 2537 2538 memcpy(KERNADDR(&upipe->ctrl.reqdma, 0), req, sizeof(*req)); 2539 usb_syncmem(&upipe->ctrl.reqdma, 0, sizeof(*req), BUS_DMASYNC_PREWRITE); 2540 2541 if (!polling) 2542 mutex_enter(&sc->sc_lock); 2543 2544 /* Set up data transaction */ 2545 if (len != 0) { 2546 upipe->nexttoggle = 1; 2547 next = uxfer->ux_data; 2548 uhci_reset_std_chain(sc, xfer, len, isread, 2549 &upipe->nexttoggle, &dataend); 2550 dataend->link.std = stat; 2551 dataend->td.td_link = htole32(stat->physaddr | UHCI_PTR_TD); 2552 usb_syncmem(&dataend->dma, 2553 dataend->offs + offsetof(uhci_td_t, td_link), 2554 sizeof(dataend->td.td_link), 2555 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2556 } else { 2557 next = stat; 2558 } 2559 2560 const uint32_t status = UHCI_TD_ZERO_ACTLEN( 2561 UHCI_TD_SET_ERRCNT(3) | 2562 UHCI_TD_ACTIVE | 2563 (dev->ud_speed == USB_SPEED_LOW ? UHCI_TD_LS : 0) 2564 ); 2565 setup->link.std = next; 2566 setup->td.td_link = htole32(next->physaddr | UHCI_PTR_TD); 2567 setup->td.td_status = htole32(status); 2568 setup->td.td_token = htole32(UHCI_TD_SETUP(sizeof(*req), endpt, addr)); 2569 setup->td.td_buffer = htole32(DMAADDR(&upipe->ctrl.reqdma, 0)); 2570 2571 usb_syncmem(&setup->dma, setup->offs, sizeof(setup->td), 2572 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2573 2574 stat->link.std = NULL; 2575 stat->td.td_link = htole32(UHCI_PTR_T); 2576 stat->td.td_status = htole32(status | UHCI_TD_IOC); 2577 stat->td.td_token = 2578 htole32(isread ? UHCI_TD_OUT(0, endpt, addr, 1) : 2579 UHCI_TD_IN (0, endpt, addr, 1)); 2580 stat->td.td_buffer = htole32(0); 2581 usb_syncmem(&stat->dma, stat->offs, sizeof(stat->td), 2582 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2583 2584 #ifdef UHCI_DEBUG 2585 if (uhcidebug >= 10) { 2586 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2587 DPRINTF("before transfer", 0, 0, 0, 0); 2588 uhci_dump_tds(setup); 2589 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2590 } 2591 #endif 2592 2593 /* Set up interrupt info. */ 2594 uxfer->ux_setup = setup; 2595 uxfer->ux_stat = stat; 2596 KASSERT(uxfer->ux_isdone); 2597 #ifdef DIAGNOSTIC 2598 uxfer->ux_isdone = false; 2599 #endif 2600 2601 sqh->elink = setup; 2602 sqh->qh.qh_elink = htole32(setup->physaddr | UHCI_PTR_TD); 2603 /* uhci_add_?s_ctrl() will do usb_syncmem(sqh) */ 2604 2605 if (dev->ud_speed == USB_SPEED_LOW) 2606 uhci_add_ls_ctrl(sc, sqh); 2607 else 2608 uhci_add_hs_ctrl(sc, sqh); 2609 uhci_add_intr_list(sc, uxfer); 2610 #ifdef UHCI_DEBUG 2611 if (uhcidebug >= 12) { 2612 uhci_soft_td_t *std; 2613 uhci_soft_qh_t *xqh; 2614 uhci_soft_qh_t *sxqh; 2615 int maxqh = 0; 2616 uhci_physaddr_t link; 2617 DPRINTFN(12, "--- dump start ---", 0, 0, 0, 0); 2618 DPRINTFN(12, "follow from [0]", 0, 0, 0, 0); 2619 for (std = sc->sc_vframes[0].htd, link = 0; 2620 (link & UHCI_PTR_QH) == 0; 2621 std = std->link.std) { 2622 link = le32toh(std->td.td_link); 2623 uhci_dump_td(std); 2624 } 2625 sxqh = (uhci_soft_qh_t *)std; 2626 uhci_dump_qh(sxqh); 2627 for (xqh = sxqh; 2628 xqh != NULL; 2629 xqh = (maxqh++ == 5 || xqh->hlink == sxqh || 2630 xqh->hlink == xqh ? NULL : xqh->hlink)) { 2631 uhci_dump_qh(xqh); 2632 } 2633 DPRINTFN(12, "Enqueued QH:", 0, 0, 0, 0); 2634 uhci_dump_qh(sqh); 2635 uhci_dump_tds(sqh->elink); 2636 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2637 } 2638 #endif 2639 usbd_xfer_schedule_timeout(xfer); 2640 xfer->ux_status = USBD_IN_PROGRESS; 2641 if (!polling) 2642 mutex_exit(&sc->sc_lock); 2643 2644 return USBD_IN_PROGRESS; 2645 } 2646 2647 int 2648 uhci_device_intr_init(struct usbd_xfer *xfer) 2649 { 2650 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2651 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2652 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc; 2653 int endpt = ed->bEndpointAddress; 2654 int isread = UE_GET_DIR(endpt) == UE_DIR_IN; 2655 int len = xfer->ux_bufsize; 2656 int err; 2657 2658 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2659 2660 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, 2661 xfer->ux_length, xfer->ux_flags, 0); 2662 2663 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST)); 2664 KASSERT(len != 0); 2665 2666 ux->ux_type = UX_INTR; 2667 ux->ux_nstd = 0; 2668 err = uhci_alloc_std_chain(sc, xfer, len, isread, &ux->ux_stdstart); 2669 2670 return err; 2671 } 2672 2673 Static void 2674 uhci_device_intr_fini(struct usbd_xfer *xfer) 2675 { 2676 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2677 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2678 2679 KASSERT(ux->ux_type == UX_INTR); 2680 2681 if (ux->ux_nstd) { 2682 uhci_free_stds(sc, ux); 2683 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd); 2684 } 2685 } 2686 2687 usbd_status 2688 uhci_device_intr_transfer(struct usbd_xfer *xfer) 2689 { 2690 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2691 usbd_status err; 2692 2693 /* Insert last in queue. */ 2694 mutex_enter(&sc->sc_lock); 2695 err = usb_insert_transfer(xfer); 2696 mutex_exit(&sc->sc_lock); 2697 if (err) 2698 return err; 2699 2700 /* 2701 * Pipe isn't running (otherwise err would be USBD_INPROG), 2702 * so start it first. 2703 */ 2704 return uhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 2705 } 2706 2707 usbd_status 2708 uhci_device_intr_start(struct usbd_xfer *xfer) 2709 { 2710 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2711 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2712 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2713 uhci_soft_td_t *data, *dataend; 2714 uhci_soft_qh_t *sqh; 2715 const bool polling = sc->sc_bus.ub_usepolling; 2716 int isread, endpt; 2717 int i; 2718 2719 if (sc->sc_dying) 2720 return USBD_IOERROR; 2721 2722 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2723 2724 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, 2725 xfer->ux_length, xfer->ux_flags, 0); 2726 2727 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST)); 2728 KASSERT(xfer->ux_length <= xfer->ux_bufsize); 2729 2730 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress; 2731 isread = UE_GET_DIR(endpt) == UE_DIR_IN; 2732 2733 data = ux->ux_stdstart; 2734 2735 KASSERT(ux->ux_isdone); 2736 #ifdef DIAGNOSTIC 2737 ux->ux_isdone = false; 2738 #endif 2739 2740 /* Take lock to protect nexttoggle */ 2741 if (!polling) 2742 mutex_enter(&sc->sc_lock); 2743 uhci_reset_std_chain(sc, xfer, xfer->ux_length, isread, 2744 &upipe->nexttoggle, &dataend); 2745 2746 dataend->td.td_status |= htole32(UHCI_TD_IOC); 2747 usb_syncmem(&dataend->dma, 2748 dataend->offs + offsetof(uhci_td_t, td_status), 2749 sizeof(dataend->td.td_status), 2750 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2751 ux->ux_stdend = dataend; 2752 2753 #ifdef UHCI_DEBUG 2754 if (uhcidebug >= 10) { 2755 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2756 uhci_dump_tds(data); 2757 uhci_dump_qh(upipe->intr.qhs[0]); 2758 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2759 } 2760 #endif 2761 2762 DPRINTFN(10, "qhs[0]=%#jx", (uintptr_t)upipe->intr.qhs[0], 0, 0, 0); 2763 for (i = 0; i < upipe->intr.npoll; i++) { 2764 sqh = upipe->intr.qhs[i]; 2765 sqh->elink = data; 2766 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD); 2767 usb_syncmem(&sqh->dma, 2768 sqh->offs + offsetof(uhci_qh_t, qh_elink), 2769 sizeof(sqh->qh.qh_elink), 2770 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2771 } 2772 uhci_add_intr_list(sc, ux); 2773 xfer->ux_status = USBD_IN_PROGRESS; 2774 if (!polling) 2775 mutex_exit(&sc->sc_lock); 2776 2777 #ifdef UHCI_DEBUG 2778 if (uhcidebug >= 10) { 2779 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2780 uhci_dump_tds(data); 2781 uhci_dump_qh(upipe->intr.qhs[0]); 2782 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2783 } 2784 #endif 2785 2786 return USBD_IN_PROGRESS; 2787 } 2788 2789 /* Abort a device control request. */ 2790 void 2791 uhci_device_ctrl_abort(struct usbd_xfer *xfer) 2792 { 2793 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer); 2794 2795 KASSERT(mutex_owned(&sc->sc_lock)); 2796 2797 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2798 usbd_xfer_abort(xfer); 2799 } 2800 2801 /* Close a device control pipe. */ 2802 void 2803 uhci_device_ctrl_close(struct usbd_pipe *pipe) 2804 { 2805 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 2806 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 2807 2808 uhci_free_sqh(sc, upipe->ctrl.sqh); 2809 uhci_free_std_locked(sc, upipe->ctrl.setup); 2810 uhci_free_std_locked(sc, upipe->ctrl.stat); 2811 2812 usb_freemem(&sc->sc_bus, &upipe->ctrl.reqdma); 2813 } 2814 2815 /* Abort a device interrupt request. */ 2816 void 2817 uhci_device_intr_abort(struct usbd_xfer *xfer) 2818 { 2819 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer); 2820 2821 KASSERT(mutex_owned(&sc->sc_lock)); 2822 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 2823 2824 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2825 DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); 2826 2827 usbd_xfer_abort(xfer); 2828 } 2829 2830 /* Close a device interrupt pipe. */ 2831 void 2832 uhci_device_intr_close(struct usbd_pipe *pipe) 2833 { 2834 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 2835 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 2836 int i, npoll; 2837 2838 KASSERT(mutex_owned(&sc->sc_lock)); 2839 2840 /* Unlink descriptors from controller data structures. */ 2841 npoll = upipe->intr.npoll; 2842 for (i = 0; i < npoll; i++) 2843 uhci_remove_intr(sc, upipe->intr.qhs[i]); 2844 2845 /* 2846 * We now have to wait for any activity on the physical 2847 * descriptors to stop. 2848 */ 2849 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock); 2850 2851 for (i = 0; i < npoll; i++) 2852 uhci_free_sqh(sc, upipe->intr.qhs[i]); 2853 kmem_free(upipe->intr.qhs, npoll * sizeof(uhci_soft_qh_t *)); 2854 } 2855 2856 int 2857 uhci_device_isoc_init(struct usbd_xfer *xfer) 2858 { 2859 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2860 2861 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST)); 2862 KASSERT(xfer->ux_nframes != 0); 2863 KASSERT(ux->ux_isdone); 2864 2865 ux->ux_type = UX_ISOC; 2866 return 0; 2867 } 2868 2869 Static void 2870 uhci_device_isoc_fini(struct usbd_xfer *xfer) 2871 { 2872 struct uhci_xfer *ux __diagused = UHCI_XFER2UXFER(xfer); 2873 2874 KASSERT(ux->ux_type == UX_ISOC); 2875 } 2876 2877 usbd_status 2878 uhci_device_isoc_transfer(struct usbd_xfer *xfer) 2879 { 2880 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2881 usbd_status err __diagused; 2882 2883 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2884 DPRINTFN(5, "xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); 2885 2886 /* Put it on our queue, */ 2887 mutex_enter(&sc->sc_lock); 2888 err = usb_insert_transfer(xfer); 2889 mutex_exit(&sc->sc_lock); 2890 2891 KASSERT(err == USBD_NORMAL_COMPLETION); 2892 2893 /* insert into schedule, */ 2894 2895 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2896 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2897 struct isoc *isoc = &upipe->isoc; 2898 uhci_soft_td_t *std = NULL; 2899 uint32_t buf, len, status, offs; 2900 int i, next, nframes; 2901 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN; 2902 2903 DPRINTFN(5, "used=%jd next=%jd xfer=%#jx nframes=%jd", 2904 isoc->inuse, isoc->next, (uintptr_t)xfer, xfer->ux_nframes); 2905 2906 if (sc->sc_dying) 2907 return USBD_IOERROR; 2908 2909 if (xfer->ux_status == USBD_IN_PROGRESS) { 2910 /* This request has already been entered into the frame list */ 2911 printf("%s: xfer=%p in frame list\n", __func__, xfer); 2912 /* XXX */ 2913 } 2914 2915 #ifdef DIAGNOSTIC 2916 if (isoc->inuse >= UHCI_VFRAMELIST_COUNT) 2917 printf("%s: overflow!\n", __func__); 2918 #endif 2919 2920 KASSERT(xfer->ux_nframes != 0); 2921 2922 mutex_enter(&sc->sc_lock); 2923 next = isoc->next; 2924 if (next == -1) { 2925 /* Not in use yet, schedule it a few frames ahead. */ 2926 next = (UREAD2(sc, UHCI_FRNUM) + 3) % UHCI_VFRAMELIST_COUNT; 2927 DPRINTFN(2, "start next=%jd", next, 0, 0, 0); 2928 } 2929 2930 xfer->ux_status = USBD_IN_PROGRESS; 2931 ux->ux_curframe = next; 2932 2933 buf = DMAADDR(&xfer->ux_dmabuf, 0); 2934 offs = 0; 2935 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(0) | 2936 UHCI_TD_ACTIVE | 2937 UHCI_TD_IOS); 2938 nframes = xfer->ux_nframes; 2939 for (i = 0; i < nframes; i++) { 2940 std = isoc->stds[next]; 2941 if (++next >= UHCI_VFRAMELIST_COUNT) 2942 next = 0; 2943 len = xfer->ux_frlengths[i]; 2944 std->td.td_buffer = htole32(buf); 2945 usb_syncmem(&xfer->ux_dmabuf, offs, len, 2946 rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2947 if (i == nframes - 1) 2948 status |= UHCI_TD_IOC; 2949 std->td.td_status = htole32(status); 2950 std->td.td_token &= htole32(~UHCI_TD_MAXLEN_MASK); 2951 std->td.td_token |= htole32(UHCI_TD_SET_MAXLEN(len)); 2952 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 2953 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2954 #ifdef UHCI_DEBUG 2955 if (uhcidebug >= 5) { 2956 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2957 DPRINTF("TD %jd", i, 0, 0, 0); 2958 uhci_dump_td(std); 2959 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2960 } 2961 #endif 2962 buf += len; 2963 offs += len; 2964 } 2965 isoc->next = next; 2966 isoc->inuse += xfer->ux_nframes; 2967 2968 /* Set up interrupt info. */ 2969 ux->ux_stdstart = std; 2970 ux->ux_stdend = std; 2971 2972 KASSERT(ux->ux_isdone); 2973 #ifdef DIAGNOSTIC 2974 ux->ux_isdone = false; 2975 #endif 2976 uhci_add_intr_list(sc, ux); 2977 2978 mutex_exit(&sc->sc_lock); 2979 2980 return USBD_IN_PROGRESS; 2981 } 2982 2983 void 2984 uhci_device_isoc_abort(struct usbd_xfer *xfer) 2985 { 2986 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2987 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2988 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2989 uhci_soft_td_t **stds = upipe->isoc.stds; 2990 uhci_soft_td_t *std; 2991 int i, n, nframes, maxlen, len; 2992 2993 KASSERT(mutex_owned(&sc->sc_lock)); 2994 2995 /* Transfer is already done. */ 2996 if (xfer->ux_status != USBD_NOT_STARTED && 2997 xfer->ux_status != USBD_IN_PROGRESS) { 2998 return; 2999 } 3000 3001 /* Give xfer the requested abort code. */ 3002 xfer->ux_status = USBD_CANCELLED; 3003 3004 /* make hardware ignore it, */ 3005 nframes = xfer->ux_nframes; 3006 n = ux->ux_curframe; 3007 maxlen = 0; 3008 for (i = 0; i < nframes; i++) { 3009 std = stds[n]; 3010 usb_syncmem(&std->dma, 3011 std->offs + offsetof(uhci_td_t, td_status), 3012 sizeof(std->td.td_status), 3013 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 3014 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC)); 3015 usb_syncmem(&std->dma, 3016 std->offs + offsetof(uhci_td_t, td_status), 3017 sizeof(std->td.td_status), 3018 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3019 usb_syncmem(&std->dma, 3020 std->offs + offsetof(uhci_td_t, td_token), 3021 sizeof(std->td.td_token), 3022 BUS_DMASYNC_POSTWRITE); 3023 len = UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token)); 3024 if (len > maxlen) 3025 maxlen = len; 3026 if (++n >= UHCI_VFRAMELIST_COUNT) 3027 n = 0; 3028 } 3029 3030 /* and wait until we are sure the hardware has finished. */ 3031 delay(maxlen); 3032 3033 #ifdef DIAGNOSTIC 3034 ux->ux_isdone = true; 3035 #endif 3036 /* Remove from interrupt list. */ 3037 uhci_del_intr_list(sc, ux); 3038 3039 /* Run callback. */ 3040 usb_transfer_complete(xfer); 3041 3042 KASSERT(mutex_owned(&sc->sc_lock)); 3043 } 3044 3045 void 3046 uhci_device_isoc_close(struct usbd_pipe *pipe) 3047 { 3048 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 3049 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 3050 uhci_soft_td_t *std, *vstd; 3051 struct isoc *isoc; 3052 int i; 3053 3054 KASSERT(mutex_owned(&sc->sc_lock)); 3055 3056 /* 3057 * Make sure all TDs are marked as inactive. 3058 * Wait for completion. 3059 * Unschedule. 3060 * Deallocate. 3061 */ 3062 isoc = &upipe->isoc; 3063 3064 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) { 3065 std = isoc->stds[i]; 3066 usb_syncmem(&std->dma, 3067 std->offs + offsetof(uhci_td_t, td_status), 3068 sizeof(std->td.td_status), 3069 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 3070 std->td.td_status &= htole32(~UHCI_TD_ACTIVE); 3071 usb_syncmem(&std->dma, 3072 std->offs + offsetof(uhci_td_t, td_status), 3073 sizeof(std->td.td_status), 3074 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3075 } 3076 /* wait for completion */ 3077 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock); 3078 3079 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) { 3080 std = isoc->stds[i]; 3081 for (vstd = sc->sc_vframes[i].htd; 3082 vstd != NULL && vstd->link.std != std; 3083 vstd = vstd->link.std) 3084 ; 3085 if (vstd == NULL) { 3086 /*panic*/ 3087 printf("%s: %p not found\n", __func__, std); 3088 mutex_exit(&sc->sc_lock); 3089 return; 3090 } 3091 vstd->link = std->link; 3092 usb_syncmem(&std->dma, 3093 std->offs + offsetof(uhci_td_t, td_link), 3094 sizeof(std->td.td_link), 3095 BUS_DMASYNC_POSTWRITE); 3096 vstd->td.td_link = std->td.td_link; 3097 usb_syncmem(&vstd->dma, 3098 vstd->offs + offsetof(uhci_td_t, td_link), 3099 sizeof(vstd->td.td_link), 3100 BUS_DMASYNC_PREWRITE); 3101 uhci_free_std_locked(sc, std); 3102 } 3103 3104 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *)); 3105 } 3106 3107 usbd_status 3108 uhci_setup_isoc(struct usbd_pipe *pipe) 3109 { 3110 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 3111 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 3112 int addr = upipe->pipe.up_dev->ud_addr; 3113 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress; 3114 int rd = UE_GET_DIR(endpt) == UE_DIR_IN; 3115 uhci_soft_td_t *std, *vstd; 3116 uint32_t token; 3117 struct isoc *isoc; 3118 int i; 3119 3120 isoc = &upipe->isoc; 3121 3122 isoc->stds = kmem_alloc( 3123 UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *), KM_SLEEP); 3124 if (isoc->stds == NULL) 3125 return USBD_NOMEM; 3126 3127 token = rd ? UHCI_TD_IN (0, endpt, addr, 0) : 3128 UHCI_TD_OUT(0, endpt, addr, 0); 3129 3130 /* Allocate the TDs and mark as inactive; */ 3131 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) { 3132 std = uhci_alloc_std(sc); 3133 if (std == 0) 3134 goto bad; 3135 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */ 3136 std->td.td_token = htole32(token); 3137 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 3138 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3139 isoc->stds[i] = std; 3140 } 3141 3142 mutex_enter(&sc->sc_lock); 3143 3144 /* Insert TDs into schedule. */ 3145 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) { 3146 std = isoc->stds[i]; 3147 vstd = sc->sc_vframes[i].htd; 3148 usb_syncmem(&vstd->dma, 3149 vstd->offs + offsetof(uhci_td_t, td_link), 3150 sizeof(vstd->td.td_link), 3151 BUS_DMASYNC_POSTWRITE); 3152 std->link = vstd->link; 3153 std->td.td_link = vstd->td.td_link; 3154 usb_syncmem(&std->dma, 3155 std->offs + offsetof(uhci_td_t, td_link), 3156 sizeof(std->td.td_link), 3157 BUS_DMASYNC_PREWRITE); 3158 vstd->link.std = std; 3159 vstd->td.td_link = htole32(std->physaddr | UHCI_PTR_TD); 3160 usb_syncmem(&vstd->dma, 3161 vstd->offs + offsetof(uhci_td_t, td_link), 3162 sizeof(vstd->td.td_link), 3163 BUS_DMASYNC_PREWRITE); 3164 } 3165 mutex_exit(&sc->sc_lock); 3166 3167 isoc->next = -1; 3168 isoc->inuse = 0; 3169 3170 return USBD_NORMAL_COMPLETION; 3171 3172 bad: 3173 while (--i >= 0) 3174 uhci_free_std(sc, isoc->stds[i]); 3175 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *)); 3176 return USBD_NOMEM; 3177 } 3178 3179 void 3180 uhci_device_isoc_done(struct usbd_xfer *xfer) 3181 { 3182 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 3183 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 3184 int i, offs; 3185 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN; 3186 3187 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3188 DPRINTFN(4, "length=%jd, ux_state=0x%08jx", 3189 xfer->ux_actlen, xfer->ux_state, 0, 0); 3190 3191 #ifdef DIAGNOSTIC 3192 if (ux->ux_stdend == NULL) { 3193 printf("%s: xfer=%p stdend==NULL\n", __func__, xfer); 3194 #ifdef UHCI_DEBUG 3195 DPRINTF("--- dump start ---", 0, 0, 0, 0); 3196 uhci_dump_ii(ux); 3197 DPRINTF("--- dump end ---", 0, 0, 0, 0); 3198 #endif 3199 return; 3200 } 3201 #endif 3202 3203 /* Turn off the interrupt since it is active even if the TD is not. */ 3204 usb_syncmem(&ux->ux_stdend->dma, 3205 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status), 3206 sizeof(ux->ux_stdend->td.td_status), 3207 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 3208 ux->ux_stdend->td.td_status &= htole32(~UHCI_TD_IOC); 3209 usb_syncmem(&ux->ux_stdend->dma, 3210 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status), 3211 sizeof(ux->ux_stdend->td.td_status), 3212 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3213 3214 offs = 0; 3215 for (i = 0; i < xfer->ux_nframes; i++) { 3216 usb_syncmem(&xfer->ux_dmabuf, offs, xfer->ux_frlengths[i], 3217 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3218 offs += xfer->ux_frlengths[i]; 3219 } 3220 } 3221 3222 void 3223 uhci_device_intr_done(struct usbd_xfer *xfer) 3224 { 3225 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer); 3226 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 3227 uhci_soft_qh_t *sqh; 3228 int i, npoll; 3229 3230 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3231 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0); 3232 3233 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 3234 3235 npoll = upipe->intr.npoll; 3236 for (i = 0; i < npoll; i++) { 3237 sqh = upipe->intr.qhs[i]; 3238 sqh->elink = NULL; 3239 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 3240 usb_syncmem(&sqh->dma, 3241 sqh->offs + offsetof(uhci_qh_t, qh_elink), 3242 sizeof(sqh->qh.qh_elink), 3243 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3244 } 3245 const int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress; 3246 const bool isread = UE_GET_DIR(endpt) == UE_DIR_IN; 3247 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 3248 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3249 } 3250 3251 /* Deallocate request data structures */ 3252 void 3253 uhci_device_ctrl_done(struct usbd_xfer *xfer) 3254 { 3255 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 3256 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 3257 int len = UGETW(xfer->ux_request.wLength); 3258 int isread = (xfer->ux_request.bmRequestType & UT_READ); 3259 3260 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 3261 3262 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3263 3264 KASSERT(xfer->ux_rqflags & URQ_REQUEST); 3265 3266 /* XXXNH move to uhci_idone??? */ 3267 if (upipe->pipe.up_dev->ud_speed == USB_SPEED_LOW) 3268 uhci_remove_ls_ctrl(sc, upipe->ctrl.sqh); 3269 else 3270 uhci_remove_hs_ctrl(sc, upipe->ctrl.sqh); 3271 3272 if (len) { 3273 usb_syncmem(&xfer->ux_dmabuf, 0, len, 3274 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3275 } 3276 usb_syncmem(&upipe->ctrl.reqdma, 0, 3277 sizeof(usb_device_request_t), BUS_DMASYNC_POSTWRITE); 3278 3279 DPRINTF("length=%jd", xfer->ux_actlen, 0, 0, 0); 3280 } 3281 3282 /* Deallocate request data structures */ 3283 void 3284 uhci_device_bulk_done(struct usbd_xfer *xfer) 3285 { 3286 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 3287 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 3288 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc; 3289 int endpt = ed->bEndpointAddress; 3290 int isread = UE_GET_DIR(endpt) == UE_DIR_IN; 3291 3292 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3293 DPRINTFN(5, "xfer=%#jx sc=%#jx upipe=%#jx", (uintptr_t)xfer, 3294 (uintptr_t)sc, (uintptr_t)upipe, 0); 3295 3296 KASSERT(mutex_owned(&sc->sc_lock)); 3297 3298 uhci_remove_bulk(sc, upipe->bulk.sqh); 3299 3300 if (xfer->ux_length) { 3301 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 3302 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3303 } 3304 3305 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0); 3306 } 3307 3308 /* Add interrupt QH, called with vflock. */ 3309 void 3310 uhci_add_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 3311 { 3312 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos]; 3313 uhci_soft_qh_t *eqh; 3314 3315 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3316 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0); 3317 3318 eqh = vf->eqh; 3319 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 3320 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 3321 sqh->hlink = eqh->hlink; 3322 sqh->qh.qh_hlink = eqh->qh.qh_hlink; 3323 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink), 3324 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 3325 eqh->hlink = sqh; 3326 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH); 3327 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 3328 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 3329 vf->eqh = sqh; 3330 vf->bandwidth++; 3331 } 3332 3333 /* Remove interrupt QH. */ 3334 void 3335 uhci_remove_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 3336 { 3337 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos]; 3338 uhci_soft_qh_t *pqh; 3339 3340 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3341 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0); 3342 3343 /* See comment in uhci_remove_ctrl() */ 3344 3345 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 3346 sizeof(sqh->qh.qh_elink), 3347 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 3348 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) { 3349 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 3350 usb_syncmem(&sqh->dma, 3351 sqh->offs + offsetof(uhci_qh_t, qh_elink), 3352 sizeof(sqh->qh.qh_elink), 3353 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3354 delay(UHCI_QH_REMOVE_DELAY); 3355 } 3356 3357 pqh = uhci_find_prev_qh(vf->hqh, sqh); 3358 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink), 3359 sizeof(sqh->qh.qh_hlink), 3360 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 3361 pqh->hlink = sqh->hlink; 3362 pqh->qh.qh_hlink = sqh->qh.qh_hlink; 3363 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink), 3364 sizeof(pqh->qh.qh_hlink), 3365 BUS_DMASYNC_PREWRITE); 3366 delay(UHCI_QH_REMOVE_DELAY); 3367 if (vf->eqh == sqh) 3368 vf->eqh = pqh; 3369 vf->bandwidth--; 3370 } 3371 3372 usbd_status 3373 uhci_device_setintr(uhci_softc_t *sc, struct uhci_pipe *upipe, int ival) 3374 { 3375 uhci_soft_qh_t *sqh; 3376 int i, npoll; 3377 u_int bestbw, bw, bestoffs, offs; 3378 3379 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3380 DPRINTFN(2, "pipe=%#jx", (uintptr_t)upipe, 0, 0, 0); 3381 if (ival == 0) { 3382 printf("%s: 0 interval\n", __func__); 3383 return USBD_INVAL; 3384 } 3385 3386 if (ival > UHCI_VFRAMELIST_COUNT) 3387 ival = UHCI_VFRAMELIST_COUNT; 3388 npoll = howmany(UHCI_VFRAMELIST_COUNT, ival); 3389 DPRINTF("ival=%jd npoll=%jd", ival, npoll, 0, 0); 3390 3391 upipe->intr.npoll = npoll; 3392 upipe->intr.qhs = 3393 kmem_alloc(npoll * sizeof(uhci_soft_qh_t *), KM_SLEEP); 3394 3395 /* 3396 * Figure out which offset in the schedule that has most 3397 * bandwidth left over. 3398 */ 3399 #define MOD(i) ((i) & (UHCI_VFRAMELIST_COUNT-1)) 3400 for (bestoffs = offs = 0, bestbw = ~0; offs < ival; offs++) { 3401 for (bw = i = 0; i < npoll; i++) 3402 bw += sc->sc_vframes[MOD(i * ival + offs)].bandwidth; 3403 if (bw < bestbw) { 3404 bestbw = bw; 3405 bestoffs = offs; 3406 } 3407 } 3408 DPRINTF("bw=%jd offs=%jd", bestbw, bestoffs, 0, 0); 3409 for (i = 0; i < npoll; i++) { 3410 upipe->intr.qhs[i] = sqh = uhci_alloc_sqh(sc); 3411 sqh->elink = NULL; 3412 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 3413 usb_syncmem(&sqh->dma, 3414 sqh->offs + offsetof(uhci_qh_t, qh_elink), 3415 sizeof(sqh->qh.qh_elink), 3416 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3417 sqh->pos = MOD(i * ival + bestoffs); 3418 } 3419 #undef MOD 3420 3421 mutex_enter(&sc->sc_lock); 3422 /* Enter QHs into the controller data structures. */ 3423 for (i = 0; i < npoll; i++) 3424 uhci_add_intr(sc, upipe->intr.qhs[i]); 3425 mutex_exit(&sc->sc_lock); 3426 3427 DPRINTFN(5, "returns %#jx", (uintptr_t)upipe, 0, 0, 0); 3428 3429 return USBD_NORMAL_COMPLETION; 3430 } 3431 3432 /* Open a new pipe. */ 3433 usbd_status 3434 uhci_open(struct usbd_pipe *pipe) 3435 { 3436 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 3437 struct usbd_bus *bus = pipe->up_dev->ud_bus; 3438 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 3439 usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc; 3440 usbd_status err = USBD_NOMEM; 3441 int ival; 3442 3443 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3444 DPRINTF("pipe=%#jx, addr=%jd, endpt=%jd (%jd)", 3445 (uintptr_t)pipe, pipe->up_dev->ud_addr, ed->bEndpointAddress, 3446 bus->ub_rhaddr); 3447 3448 if (sc->sc_dying) 3449 return USBD_IOERROR; 3450 3451 upipe->aborting = 0; 3452 /* toggle state needed for bulk endpoints */ 3453 upipe->nexttoggle = pipe->up_endpoint->ue_toggle; 3454 3455 if (pipe->up_dev->ud_addr == bus->ub_rhaddr) { 3456 switch (ed->bEndpointAddress) { 3457 case USB_CONTROL_ENDPOINT: 3458 pipe->up_methods = &roothub_ctrl_methods; 3459 break; 3460 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT: 3461 pipe->up_methods = &uhci_root_intr_methods; 3462 break; 3463 default: 3464 return USBD_INVAL; 3465 } 3466 } else { 3467 switch (ed->bmAttributes & UE_XFERTYPE) { 3468 case UE_CONTROL: 3469 pipe->up_methods = &uhci_device_ctrl_methods; 3470 upipe->ctrl.sqh = uhci_alloc_sqh(sc); 3471 if (upipe->ctrl.sqh == NULL) 3472 goto bad; 3473 upipe->ctrl.setup = uhci_alloc_std(sc); 3474 if (upipe->ctrl.setup == NULL) { 3475 uhci_free_sqh(sc, upipe->ctrl.sqh); 3476 goto bad; 3477 } 3478 upipe->ctrl.stat = uhci_alloc_std(sc); 3479 if (upipe->ctrl.stat == NULL) { 3480 uhci_free_sqh(sc, upipe->ctrl.sqh); 3481 uhci_free_std(sc, upipe->ctrl.setup); 3482 goto bad; 3483 } 3484 err = usb_allocmem(&sc->sc_bus, 3485 sizeof(usb_device_request_t), 3486 0, &upipe->ctrl.reqdma); 3487 if (err) { 3488 uhci_free_sqh(sc, upipe->ctrl.sqh); 3489 uhci_free_std(sc, upipe->ctrl.setup); 3490 uhci_free_std(sc, upipe->ctrl.stat); 3491 goto bad; 3492 } 3493 break; 3494 case UE_INTERRUPT: 3495 pipe->up_methods = &uhci_device_intr_methods; 3496 ival = pipe->up_interval; 3497 if (ival == USBD_DEFAULT_INTERVAL) 3498 ival = ed->bInterval; 3499 return uhci_device_setintr(sc, upipe, ival); 3500 case UE_ISOCHRONOUS: 3501 pipe->up_serialise = false; 3502 pipe->up_methods = &uhci_device_isoc_methods; 3503 return uhci_setup_isoc(pipe); 3504 case UE_BULK: 3505 pipe->up_methods = &uhci_device_bulk_methods; 3506 upipe->bulk.sqh = uhci_alloc_sqh(sc); 3507 if (upipe->bulk.sqh == NULL) 3508 goto bad; 3509 break; 3510 } 3511 } 3512 return USBD_NORMAL_COMPLETION; 3513 3514 bad: 3515 return USBD_NOMEM; 3516 } 3517 3518 /* 3519 * Data structures and routines to emulate the root hub. 3520 */ 3521 /* 3522 * The USB hub protocol requires that SET_FEATURE(PORT_RESET) also 3523 * enables the port, and also states that SET_FEATURE(PORT_ENABLE) 3524 * should not be used by the USB subsystem. As we cannot issue a 3525 * SET_FEATURE(PORT_ENABLE) externally, we must ensure that the port 3526 * will be enabled as part of the reset. 3527 * 3528 * On the VT83C572, the port cannot be successfully enabled until the 3529 * outstanding "port enable change" and "connection status change" 3530 * events have been reset. 3531 */ 3532 Static usbd_status 3533 uhci_portreset(uhci_softc_t *sc, int index) 3534 { 3535 int lim, port, x; 3536 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3537 3538 if (index == 1) 3539 port = UHCI_PORTSC1; 3540 else if (index == 2) 3541 port = UHCI_PORTSC2; 3542 else 3543 return USBD_IOERROR; 3544 3545 x = URWMASK(UREAD2(sc, port)); 3546 UWRITE2(sc, port, x | UHCI_PORTSC_PR); 3547 3548 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY); 3549 3550 DPRINTF("uhci port %jd reset, status0 = 0x%04jx", index, 3551 UREAD2(sc, port), 0, 0); 3552 3553 x = URWMASK(UREAD2(sc, port)); 3554 UWRITE2(sc, port, x & ~(UHCI_PORTSC_PR | UHCI_PORTSC_SUSP)); 3555 3556 delay(100); 3557 3558 DPRINTF("uhci port %jd reset, status1 = 0x%04jx", index, 3559 UREAD2(sc, port), 0, 0); 3560 3561 x = URWMASK(UREAD2(sc, port)); 3562 UWRITE2(sc, port, x | UHCI_PORTSC_PE); 3563 3564 for (lim = 10; --lim > 0;) { 3565 usb_delay_ms(&sc->sc_bus, USB_PORT_RESET_DELAY); 3566 3567 x = UREAD2(sc, port); 3568 DPRINTF("uhci port %jd iteration %ju, status = 0x%04jx", index, 3569 lim, x, 0); 3570 3571 if (!(x & UHCI_PORTSC_CCS)) { 3572 /* 3573 * No device is connected (or was disconnected 3574 * during reset). Consider the port reset. 3575 * The delay must be long enough to ensure on 3576 * the initial iteration that the device 3577 * connection will have been registered. 50ms 3578 * appears to be sufficient, but 20ms is not. 3579 */ 3580 DPRINTFN(3, "uhci port %jd loop %ju, device detached", 3581 index, lim, 0, 0); 3582 break; 3583 } 3584 3585 if (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)) { 3586 /* 3587 * Port enabled changed and/or connection 3588 * status changed were set. Reset either or 3589 * both raised flags (by writing a 1 to that 3590 * bit), and wait again for state to settle. 3591 */ 3592 UWRITE2(sc, port, URWMASK(x) | 3593 (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC))); 3594 continue; 3595 } 3596 3597 if (x & UHCI_PORTSC_PE) 3598 /* Port is enabled */ 3599 break; 3600 3601 UWRITE2(sc, port, URWMASK(x) | UHCI_PORTSC_PE); 3602 } 3603 3604 DPRINTFN(3, "uhci port %jd reset, status2 = 0x%04jx", index, 3605 UREAD2(sc, port), 0, 0); 3606 3607 if (lim <= 0) { 3608 DPRINTF("uhci port %jd reset timed out", index, 3609 0, 0, 0); 3610 return USBD_TIMEOUT; 3611 } 3612 3613 sc->sc_isreset = 1; 3614 return USBD_NORMAL_COMPLETION; 3615 } 3616 3617 Static int 3618 uhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req, 3619 void *buf, int buflen) 3620 { 3621 uhci_softc_t *sc = UHCI_BUS2SC(bus); 3622 int port, x; 3623 int status, change, totlen = 0; 3624 uint16_t len, value, index; 3625 usb_port_status_t ps; 3626 usbd_status err; 3627 3628 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3629 3630 if (sc->sc_dying) 3631 return -1; 3632 3633 DPRINTF("type=0x%02jx request=%02jx", req->bmRequestType, 3634 req->bRequest, 0, 0); 3635 3636 len = UGETW(req->wLength); 3637 value = UGETW(req->wValue); 3638 index = UGETW(req->wIndex); 3639 3640 #define C(x,y) ((x) | ((y) << 8)) 3641 switch (C(req->bRequest, req->bmRequestType)) { 3642 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 3643 DPRINTF("wValue=0x%04jx", value, 0, 0, 0); 3644 if (len == 0) 3645 break; 3646 switch (value) { 3647 #define sd ((usb_string_descriptor_t *)buf) 3648 case C(2, UDESC_STRING): 3649 /* Product */ 3650 totlen = usb_makestrdesc(sd, len, "UHCI root hub"); 3651 break; 3652 #undef sd 3653 default: 3654 /* default from usbroothub */ 3655 return buflen; 3656 } 3657 break; 3658 3659 /* Hub requests */ 3660 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): 3661 break; 3662 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): 3663 DPRINTF("UR_CLEAR_PORT_FEATURE port=%jd feature=%jd", index, 3664 value, 0, 0); 3665 if (index == 1) 3666 port = UHCI_PORTSC1; 3667 else if (index == 2) 3668 port = UHCI_PORTSC2; 3669 else { 3670 return -1; 3671 } 3672 switch(value) { 3673 case UHF_PORT_ENABLE: 3674 x = URWMASK(UREAD2(sc, port)); 3675 UWRITE2(sc, port, x & ~UHCI_PORTSC_PE); 3676 break; 3677 case UHF_PORT_SUSPEND: 3678 x = URWMASK(UREAD2(sc, port)); 3679 if (!(x & UHCI_PORTSC_SUSP)) /* not suspended */ 3680 break; 3681 UWRITE2(sc, port, x | UHCI_PORTSC_RD); 3682 /* see USB2 spec ch. 7.1.7.7 */ 3683 usb_delay_ms(&sc->sc_bus, 20); 3684 UWRITE2(sc, port, x & ~UHCI_PORTSC_SUSP); 3685 /* 10ms resume delay must be provided by caller */ 3686 break; 3687 case UHF_PORT_RESET: 3688 x = URWMASK(UREAD2(sc, port)); 3689 UWRITE2(sc, port, x & ~UHCI_PORTSC_PR); 3690 break; 3691 case UHF_C_PORT_CONNECTION: 3692 x = URWMASK(UREAD2(sc, port)); 3693 UWRITE2(sc, port, x | UHCI_PORTSC_CSC); 3694 break; 3695 case UHF_C_PORT_ENABLE: 3696 x = URWMASK(UREAD2(sc, port)); 3697 UWRITE2(sc, port, x | UHCI_PORTSC_POEDC); 3698 break; 3699 case UHF_C_PORT_OVER_CURRENT: 3700 x = URWMASK(UREAD2(sc, port)); 3701 UWRITE2(sc, port, x | UHCI_PORTSC_OCIC); 3702 break; 3703 case UHF_C_PORT_RESET: 3704 sc->sc_isreset = 0; 3705 break; 3706 case UHF_PORT_CONNECTION: 3707 case UHF_PORT_OVER_CURRENT: 3708 case UHF_PORT_POWER: 3709 case UHF_PORT_LOW_SPEED: 3710 case UHF_C_PORT_SUSPEND: 3711 default: 3712 return -1; 3713 } 3714 break; 3715 case C(UR_GET_BUS_STATE, UT_READ_CLASS_OTHER): 3716 if (index == 1) 3717 port = UHCI_PORTSC1; 3718 else if (index == 2) 3719 port = UHCI_PORTSC2; 3720 else { 3721 return -1; 3722 } 3723 if (len > 0) { 3724 *(uint8_t *)buf = 3725 UHCI_PORTSC_GET_LS(UREAD2(sc, port)); 3726 totlen = 1; 3727 } 3728 break; 3729 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): 3730 if (len == 0) 3731 break; 3732 if ((value & 0xff) != 0) { 3733 return -1; 3734 } 3735 usb_hub_descriptor_t hubd; 3736 3737 totlen = uimin(buflen, sizeof(hubd)); 3738 memcpy(&hubd, buf, totlen); 3739 hubd.bNbrPorts = 2; 3740 memcpy(buf, &hubd, totlen); 3741 break; 3742 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): 3743 if (len != 4) { 3744 return -1; 3745 } 3746 memset(buf, 0, len); 3747 totlen = len; 3748 break; 3749 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): 3750 if (index == 1) 3751 port = UHCI_PORTSC1; 3752 else if (index == 2) 3753 port = UHCI_PORTSC2; 3754 else { 3755 return -1; 3756 } 3757 if (len != 4) { 3758 return -1; 3759 } 3760 x = UREAD2(sc, port); 3761 status = change = 0; 3762 if (x & UHCI_PORTSC_CCS) 3763 status |= UPS_CURRENT_CONNECT_STATUS; 3764 if (x & UHCI_PORTSC_CSC) 3765 change |= UPS_C_CONNECT_STATUS; 3766 if (x & UHCI_PORTSC_PE) 3767 status |= UPS_PORT_ENABLED; 3768 if (x & UHCI_PORTSC_POEDC) 3769 change |= UPS_C_PORT_ENABLED; 3770 if (x & UHCI_PORTSC_OCI) 3771 status |= UPS_OVERCURRENT_INDICATOR; 3772 if (x & UHCI_PORTSC_OCIC) 3773 change |= UPS_C_OVERCURRENT_INDICATOR; 3774 if (x & UHCI_PORTSC_SUSP) 3775 status |= UPS_SUSPEND; 3776 if (x & UHCI_PORTSC_LSDA) 3777 status |= UPS_LOW_SPEED; 3778 status |= UPS_PORT_POWER; 3779 if (sc->sc_isreset) 3780 change |= UPS_C_PORT_RESET; 3781 USETW(ps.wPortStatus, status); 3782 USETW(ps.wPortChange, change); 3783 totlen = uimin(len, sizeof(ps)); 3784 memcpy(buf, &ps, totlen); 3785 break; 3786 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): 3787 return -1; 3788 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): 3789 break; 3790 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): 3791 if (index == 1) 3792 port = UHCI_PORTSC1; 3793 else if (index == 2) 3794 port = UHCI_PORTSC2; 3795 else { 3796 return -1; 3797 } 3798 switch(value) { 3799 case UHF_PORT_ENABLE: 3800 x = URWMASK(UREAD2(sc, port)); 3801 UWRITE2(sc, port, x | UHCI_PORTSC_PE); 3802 break; 3803 case UHF_PORT_SUSPEND: 3804 x = URWMASK(UREAD2(sc, port)); 3805 UWRITE2(sc, port, x | UHCI_PORTSC_SUSP); 3806 break; 3807 case UHF_PORT_RESET: 3808 err = uhci_portreset(sc, index); 3809 if (err != USBD_NORMAL_COMPLETION) 3810 return -1; 3811 return 0; 3812 case UHF_PORT_POWER: 3813 /* Pretend we turned on power */ 3814 return 0; 3815 case UHF_C_PORT_CONNECTION: 3816 case UHF_C_PORT_ENABLE: 3817 case UHF_C_PORT_OVER_CURRENT: 3818 case UHF_PORT_CONNECTION: 3819 case UHF_PORT_OVER_CURRENT: 3820 case UHF_PORT_LOW_SPEED: 3821 case UHF_C_PORT_SUSPEND: 3822 case UHF_C_PORT_RESET: 3823 default: 3824 return -1; 3825 } 3826 break; 3827 default: 3828 /* default from usbroothub */ 3829 DPRINTF("returning %jd (usbroothub default)", 3830 buflen, 0, 0, 0); 3831 return buflen; 3832 } 3833 3834 DPRINTF("returning %jd", totlen, 0, 0, 0); 3835 3836 return totlen; 3837 } 3838 3839 /* Abort a root interrupt request. */ 3840 void 3841 uhci_root_intr_abort(struct usbd_xfer *xfer) 3842 { 3843 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 3844 3845 KASSERT(mutex_owned(&sc->sc_lock)); 3846 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 3847 3848 /* 3849 * Try to stop the callout before it starts. If we got in too 3850 * late, too bad; but if the callout had yet to run and time 3851 * out the xfer, cancel it ourselves. 3852 */ 3853 callout_stop(&sc->sc_poll_handle); 3854 if (sc->sc_intr_xfer == NULL) 3855 return; 3856 3857 KASSERT(sc->sc_intr_xfer == xfer); 3858 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 3859 xfer->ux_status = USBD_CANCELLED; 3860 #ifdef DIAGNOSTIC 3861 UHCI_XFER2UXFER(xfer)->ux_isdone = true; 3862 #endif 3863 usb_transfer_complete(xfer); 3864 } 3865 3866 usbd_status 3867 uhci_root_intr_transfer(struct usbd_xfer *xfer) 3868 { 3869 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 3870 usbd_status err; 3871 3872 /* Insert last in queue. */ 3873 mutex_enter(&sc->sc_lock); 3874 err = usb_insert_transfer(xfer); 3875 mutex_exit(&sc->sc_lock); 3876 if (err) 3877 return err; 3878 3879 /* 3880 * Pipe isn't running (otherwise err would be USBD_INPROG), 3881 * start first 3882 */ 3883 return uhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 3884 } 3885 3886 /* Start a transfer on the root interrupt pipe */ 3887 usbd_status 3888 uhci_root_intr_start(struct usbd_xfer *xfer) 3889 { 3890 struct usbd_pipe *pipe = xfer->ux_pipe; 3891 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 3892 unsigned int ival; 3893 const bool polling = sc->sc_bus.ub_usepolling; 3894 3895 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3896 DPRINTF("xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, xfer->ux_length, 3897 xfer->ux_flags, 0); 3898 3899 if (sc->sc_dying) 3900 return USBD_IOERROR; 3901 3902 if (!polling) 3903 mutex_enter(&sc->sc_lock); 3904 3905 KASSERT(sc->sc_intr_xfer == NULL); 3906 3907 /* XXX temporary variable needed to avoid gcc3 warning */ 3908 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval; 3909 sc->sc_ival = mstohz(ival); 3910 callout_schedule(&sc->sc_poll_handle, sc->sc_ival); 3911 sc->sc_intr_xfer = xfer; 3912 xfer->ux_status = USBD_IN_PROGRESS; 3913 3914 if (!polling) 3915 mutex_exit(&sc->sc_lock); 3916 3917 return USBD_IN_PROGRESS; 3918 } 3919 3920 /* Close the root interrupt pipe. */ 3921 void 3922 uhci_root_intr_close(struct usbd_pipe *pipe) 3923 { 3924 uhci_softc_t *sc __diagused = UHCI_PIPE2SC(pipe); 3925 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3926 3927 KASSERT(mutex_owned(&sc->sc_lock)); 3928 3929 /* 3930 * The caller must arrange to have aborted the pipe already, so 3931 * there can be no intr xfer in progress. The callout may 3932 * still be pending from a prior intr xfer -- if it has already 3933 * fired, it will see there is nothing to do, and do nothing. 3934 */ 3935 KASSERT(sc->sc_intr_xfer == NULL); 3936 KASSERT(!callout_pending(&sc->sc_poll_handle)); 3937 } 3938