1 /* $NetBSD: uhci.c,v 1.304 2020/07/07 10:02:17 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2004, 2011, 2012, 2016, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Lennart Augustsson (lennart@augustsson.net) at 9 * Carlstedt Research & Technology, Jared D. McNeill (jmcneill@invisible.ca), 10 * Matthew R. Green (mrg@eterna.com.au) and Nick Hudson. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * USB Universal Host Controller driver. 36 * Handles e.g. PIIX3 and PIIX4. 37 * 38 * UHCI spec: http://www.intel.com/technology/usb/spec.htm 39 * USB spec: http://www.usb.org/developers/docs/ 40 * PIIXn spec: ftp://download.intel.com/design/intarch/datashts/29055002.pdf 41 * ftp://download.intel.com/design/intarch/datashts/29056201.pdf 42 */ 43 44 #include <sys/cdefs.h> 45 __KERNEL_RCSID(0, "$NetBSD: uhci.c,v 1.304 2020/07/07 10:02:17 skrll Exp $"); 46 47 #ifdef _KERNEL_OPT 48 #include "opt_usb.h" 49 #endif 50 51 #include <sys/param.h> 52 53 #include <sys/bus.h> 54 #include <sys/cpu.h> 55 #include <sys/device.h> 56 #include <sys/kernel.h> 57 #include <sys/kmem.h> 58 #include <sys/mutex.h> 59 #include <sys/proc.h> 60 #include <sys/queue.h> 61 #include <sys/select.h> 62 #include <sys/sysctl.h> 63 #include <sys/systm.h> 64 65 #include <machine/endian.h> 66 67 #include <dev/usb/usb.h> 68 #include <dev/usb/usbdi.h> 69 #include <dev/usb/usbdivar.h> 70 #include <dev/usb/usb_mem.h> 71 72 #include <dev/usb/uhcireg.h> 73 #include <dev/usb/uhcivar.h> 74 #include <dev/usb/usbroothub.h> 75 #include <dev/usb/usbhist.h> 76 77 /* Use bandwidth reclamation for control transfers. Some devices choke on it. */ 78 /*#define UHCI_CTL_LOOP */ 79 80 #ifdef UHCI_DEBUG 81 uhci_softc_t *thesc; 82 int uhcinoloop = 0; 83 #endif 84 85 #ifdef USB_DEBUG 86 #ifndef UHCI_DEBUG 87 #define uhcidebug 0 88 #else 89 static int uhcidebug = 0; 90 91 SYSCTL_SETUP(sysctl_hw_uhci_setup, "sysctl hw.uhci setup") 92 { 93 int err; 94 const struct sysctlnode *rnode; 95 const struct sysctlnode *cnode; 96 97 err = sysctl_createv(clog, 0, NULL, &rnode, 98 CTLFLAG_PERMANENT, CTLTYPE_NODE, "uhci", 99 SYSCTL_DESCR("uhci global controls"), 100 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 101 102 if (err) 103 goto fail; 104 105 /* control debugging printfs */ 106 err = sysctl_createv(clog, 0, &rnode, &cnode, 107 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 108 "debug", SYSCTL_DESCR("Enable debugging output"), 109 NULL, 0, &uhcidebug, sizeof(uhcidebug), CTL_CREATE, CTL_EOL); 110 if (err) 111 goto fail; 112 113 return; 114 fail: 115 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err); 116 } 117 118 #endif /* UHCI_DEBUG */ 119 #endif /* USB_DEBUG */ 120 121 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,1,FMT,A,B,C,D) 122 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(uhcidebug,N,FMT,A,B,C,D) 123 #define UHCIHIST_FUNC() USBHIST_FUNC() 124 #define UHCIHIST_CALLED(name) USBHIST_CALLED(uhcidebug) 125 126 /* 127 * The UHCI controller is little endian, so on big endian machines 128 * the data stored in memory needs to be swapped. 129 */ 130 131 struct uhci_pipe { 132 struct usbd_pipe pipe; 133 int nexttoggle; 134 135 u_char aborting; 136 struct usbd_xfer *abortstart, abortend; 137 138 /* Info needed for different pipe kinds. */ 139 union { 140 /* Control pipe */ 141 struct { 142 uhci_soft_qh_t *sqh; 143 usb_dma_t reqdma; 144 uhci_soft_td_t *setup; 145 uhci_soft_td_t *stat; 146 } ctrl; 147 /* Interrupt pipe */ 148 struct { 149 int npoll; 150 uhci_soft_qh_t **qhs; 151 } intr; 152 /* Bulk pipe */ 153 struct { 154 uhci_soft_qh_t *sqh; 155 } bulk; 156 /* Isochronous pipe */ 157 struct isoc { 158 uhci_soft_td_t **stds; 159 int next, inuse; 160 } isoc; 161 }; 162 }; 163 164 typedef TAILQ_HEAD(ux_completeq, uhci_xfer) ux_completeq_t; 165 166 Static void uhci_globalreset(uhci_softc_t *); 167 Static usbd_status uhci_portreset(uhci_softc_t*, int); 168 Static void uhci_reset(uhci_softc_t *); 169 Static usbd_status uhci_run(uhci_softc_t *, int, int); 170 Static uhci_soft_td_t *uhci_alloc_std(uhci_softc_t *); 171 Static void uhci_free_std(uhci_softc_t *, uhci_soft_td_t *); 172 Static void uhci_free_std_locked(uhci_softc_t *, uhci_soft_td_t *); 173 Static uhci_soft_qh_t *uhci_alloc_sqh(uhci_softc_t *); 174 Static void uhci_free_sqh(uhci_softc_t *, uhci_soft_qh_t *); 175 #if 0 176 Static void uhci_enter_ctl_q(uhci_softc_t *, uhci_soft_qh_t *, 177 uhci_intr_info_t *); 178 Static void uhci_exit_ctl_q(uhci_softc_t *, uhci_soft_qh_t *); 179 #endif 180 181 #if 0 182 Static void uhci_free_std_chain(uhci_softc_t *, uhci_soft_td_t *, 183 uhci_soft_td_t *); 184 #endif 185 Static int uhci_alloc_std_chain(uhci_softc_t *, struct usbd_xfer *, 186 int, int, uhci_soft_td_t **); 187 Static void uhci_free_stds(uhci_softc_t *, struct uhci_xfer *); 188 189 Static void uhci_reset_std_chain(uhci_softc_t *, struct usbd_xfer *, 190 int, int, int *, uhci_soft_td_t **); 191 192 Static void uhci_poll_hub(void *); 193 Static void uhci_check_intr(uhci_softc_t *, struct uhci_xfer *, 194 ux_completeq_t *); 195 Static void uhci_idone(struct uhci_xfer *, ux_completeq_t *); 196 197 Static void uhci_abortx(struct usbd_xfer *); 198 199 Static void uhci_add_ls_ctrl(uhci_softc_t *, uhci_soft_qh_t *); 200 Static void uhci_add_hs_ctrl(uhci_softc_t *, uhci_soft_qh_t *); 201 Static void uhci_add_bulk(uhci_softc_t *, uhci_soft_qh_t *); 202 Static void uhci_remove_ls_ctrl(uhci_softc_t *,uhci_soft_qh_t *); 203 Static void uhci_remove_hs_ctrl(uhci_softc_t *,uhci_soft_qh_t *); 204 Static void uhci_remove_bulk(uhci_softc_t *,uhci_soft_qh_t *); 205 Static void uhci_add_loop(uhci_softc_t *); 206 Static void uhci_rem_loop(uhci_softc_t *); 207 208 Static usbd_status uhci_setup_isoc(struct usbd_pipe *); 209 210 Static struct usbd_xfer * 211 uhci_allocx(struct usbd_bus *, unsigned int); 212 Static void uhci_freex(struct usbd_bus *, struct usbd_xfer *); 213 Static bool uhci_dying(struct usbd_bus *); 214 Static void uhci_get_lock(struct usbd_bus *, kmutex_t **); 215 Static int uhci_roothub_ctrl(struct usbd_bus *, 216 usb_device_request_t *, void *, int); 217 218 Static int uhci_device_ctrl_init(struct usbd_xfer *); 219 Static void uhci_device_ctrl_fini(struct usbd_xfer *); 220 Static usbd_status uhci_device_ctrl_transfer(struct usbd_xfer *); 221 Static usbd_status uhci_device_ctrl_start(struct usbd_xfer *); 222 Static void uhci_device_ctrl_abort(struct usbd_xfer *); 223 Static void uhci_device_ctrl_close(struct usbd_pipe *); 224 Static void uhci_device_ctrl_done(struct usbd_xfer *); 225 226 Static int uhci_device_intr_init(struct usbd_xfer *); 227 Static void uhci_device_intr_fini(struct usbd_xfer *); 228 Static usbd_status uhci_device_intr_transfer(struct usbd_xfer *); 229 Static usbd_status uhci_device_intr_start(struct usbd_xfer *); 230 Static void uhci_device_intr_abort(struct usbd_xfer *); 231 Static void uhci_device_intr_close(struct usbd_pipe *); 232 Static void uhci_device_intr_done(struct usbd_xfer *); 233 234 Static int uhci_device_bulk_init(struct usbd_xfer *); 235 Static void uhci_device_bulk_fini(struct usbd_xfer *); 236 Static usbd_status uhci_device_bulk_transfer(struct usbd_xfer *); 237 Static usbd_status uhci_device_bulk_start(struct usbd_xfer *); 238 Static void uhci_device_bulk_abort(struct usbd_xfer *); 239 Static void uhci_device_bulk_close(struct usbd_pipe *); 240 Static void uhci_device_bulk_done(struct usbd_xfer *); 241 242 Static int uhci_device_isoc_init(struct usbd_xfer *); 243 Static void uhci_device_isoc_fini(struct usbd_xfer *); 244 Static usbd_status uhci_device_isoc_transfer(struct usbd_xfer *); 245 Static void uhci_device_isoc_abort(struct usbd_xfer *); 246 Static void uhci_device_isoc_close(struct usbd_pipe *); 247 Static void uhci_device_isoc_done(struct usbd_xfer *); 248 249 Static usbd_status uhci_root_intr_transfer(struct usbd_xfer *); 250 Static usbd_status uhci_root_intr_start(struct usbd_xfer *); 251 Static void uhci_root_intr_abort(struct usbd_xfer *); 252 Static void uhci_root_intr_close(struct usbd_pipe *); 253 Static void uhci_root_intr_done(struct usbd_xfer *); 254 255 Static usbd_status uhci_open(struct usbd_pipe *); 256 Static void uhci_poll(struct usbd_bus *); 257 Static void uhci_softintr(void *); 258 259 Static void uhci_add_intr(uhci_softc_t *, uhci_soft_qh_t *); 260 Static void uhci_remove_intr(uhci_softc_t *, uhci_soft_qh_t *); 261 Static usbd_status uhci_device_setintr(uhci_softc_t *, 262 struct uhci_pipe *, int); 263 264 Static void uhci_device_clear_toggle(struct usbd_pipe *); 265 Static void uhci_noop(struct usbd_pipe *); 266 267 static inline uhci_soft_qh_t * 268 uhci_find_prev_qh(uhci_soft_qh_t *, uhci_soft_qh_t *); 269 270 #ifdef UHCI_DEBUG 271 Static void uhci_dump_all(uhci_softc_t *); 272 Static void uhci_dumpregs(uhci_softc_t *); 273 Static void uhci_dump_qhs(uhci_soft_qh_t *); 274 Static void uhci_dump_qh(uhci_soft_qh_t *); 275 Static void uhci_dump_tds(uhci_soft_td_t *); 276 Static void uhci_dump_td(uhci_soft_td_t *); 277 Static void uhci_dump_ii(struct uhci_xfer *); 278 void uhci_dump(void); 279 #endif 280 281 #define UBARR(sc) bus_space_barrier((sc)->iot, (sc)->ioh, 0, (sc)->sc_size, \ 282 BUS_SPACE_BARRIER_READ|BUS_SPACE_BARRIER_WRITE) 283 #define UWRITE1(sc, r, x) \ 284 do { UBARR(sc); bus_space_write_1((sc)->iot, (sc)->ioh, (r), (x)); \ 285 } while (/*CONSTCOND*/0) 286 #define UWRITE2(sc, r, x) \ 287 do { UBARR(sc); bus_space_write_2((sc)->iot, (sc)->ioh, (r), (x)); \ 288 } while (/*CONSTCOND*/0) 289 #define UWRITE4(sc, r, x) \ 290 do { UBARR(sc); bus_space_write_4((sc)->iot, (sc)->ioh, (r), (x)); \ 291 } while (/*CONSTCOND*/0) 292 293 static __inline uint8_t 294 UREAD1(uhci_softc_t *sc, bus_size_t r) 295 { 296 297 UBARR(sc); 298 return bus_space_read_1(sc->iot, sc->ioh, r); 299 } 300 301 static __inline uint16_t 302 UREAD2(uhci_softc_t *sc, bus_size_t r) 303 { 304 305 UBARR(sc); 306 return bus_space_read_2(sc->iot, sc->ioh, r); 307 } 308 309 #ifdef UHCI_DEBUG 310 static __inline uint32_t 311 UREAD4(uhci_softc_t *sc, bus_size_t r) 312 { 313 314 UBARR(sc); 315 return bus_space_read_4(sc->iot, sc->ioh, r); 316 } 317 #endif 318 319 #define UHCICMD(sc, cmd) UWRITE2(sc, UHCI_CMD, cmd) 320 #define UHCISTS(sc) UREAD2(sc, UHCI_STS) 321 322 #define UHCI_RESET_TIMEOUT 100 /* ms, reset timeout */ 323 324 #define UHCI_CURFRAME(sc) (UREAD2(sc, UHCI_FRNUM) & UHCI_FRNUM_MASK) 325 326 const struct usbd_bus_methods uhci_bus_methods = { 327 .ubm_open = uhci_open, 328 .ubm_softint = uhci_softintr, 329 .ubm_dopoll = uhci_poll, 330 .ubm_allocx = uhci_allocx, 331 .ubm_freex = uhci_freex, 332 .ubm_abortx = uhci_abortx, 333 .ubm_dying = uhci_dying, 334 .ubm_getlock = uhci_get_lock, 335 .ubm_rhctrl = uhci_roothub_ctrl, 336 }; 337 338 const struct usbd_pipe_methods uhci_root_intr_methods = { 339 .upm_transfer = uhci_root_intr_transfer, 340 .upm_start = uhci_root_intr_start, 341 .upm_abort = uhci_root_intr_abort, 342 .upm_close = uhci_root_intr_close, 343 .upm_cleartoggle = uhci_noop, 344 .upm_done = uhci_root_intr_done, 345 }; 346 347 const struct usbd_pipe_methods uhci_device_ctrl_methods = { 348 .upm_init = uhci_device_ctrl_init, 349 .upm_fini = uhci_device_ctrl_fini, 350 .upm_transfer = uhci_device_ctrl_transfer, 351 .upm_start = uhci_device_ctrl_start, 352 .upm_abort = uhci_device_ctrl_abort, 353 .upm_close = uhci_device_ctrl_close, 354 .upm_cleartoggle = uhci_noop, 355 .upm_done = uhci_device_ctrl_done, 356 }; 357 358 const struct usbd_pipe_methods uhci_device_intr_methods = { 359 .upm_init = uhci_device_intr_init, 360 .upm_fini = uhci_device_intr_fini, 361 .upm_transfer = uhci_device_intr_transfer, 362 .upm_start = uhci_device_intr_start, 363 .upm_abort = uhci_device_intr_abort, 364 .upm_close = uhci_device_intr_close, 365 .upm_cleartoggle = uhci_device_clear_toggle, 366 .upm_done = uhci_device_intr_done, 367 }; 368 369 const struct usbd_pipe_methods uhci_device_bulk_methods = { 370 .upm_init = uhci_device_bulk_init, 371 .upm_fini = uhci_device_bulk_fini, 372 .upm_transfer = uhci_device_bulk_transfer, 373 .upm_start = uhci_device_bulk_start, 374 .upm_abort = uhci_device_bulk_abort, 375 .upm_close = uhci_device_bulk_close, 376 .upm_cleartoggle = uhci_device_clear_toggle, 377 .upm_done = uhci_device_bulk_done, 378 }; 379 380 const struct usbd_pipe_methods uhci_device_isoc_methods = { 381 .upm_init = uhci_device_isoc_init, 382 .upm_fini = uhci_device_isoc_fini, 383 .upm_transfer = uhci_device_isoc_transfer, 384 .upm_abort = uhci_device_isoc_abort, 385 .upm_close = uhci_device_isoc_close, 386 .upm_cleartoggle = uhci_noop, 387 .upm_done = uhci_device_isoc_done, 388 }; 389 390 static inline void 391 uhci_add_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux) 392 { 393 394 TAILQ_INSERT_TAIL(&sc->sc_intrhead, ux, ux_list); 395 } 396 397 static inline void 398 uhci_del_intr_list(uhci_softc_t *sc, struct uhci_xfer *ux) 399 { 400 401 TAILQ_REMOVE(&sc->sc_intrhead, ux, ux_list); 402 } 403 404 static inline uhci_soft_qh_t * 405 uhci_find_prev_qh(uhci_soft_qh_t *pqh, uhci_soft_qh_t *sqh) 406 { 407 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 408 DPRINTFN(15, "pqh=%#jx sqh=%#jx", (uintptr_t)pqh, (uintptr_t)sqh, 0, 0); 409 410 for (; pqh->hlink != sqh; pqh = pqh->hlink) { 411 #if defined(DIAGNOSTIC) || defined(UHCI_DEBUG) 412 usb_syncmem(&pqh->dma, 413 pqh->offs + offsetof(uhci_qh_t, qh_hlink), 414 sizeof(pqh->qh.qh_hlink), 415 BUS_DMASYNC_POSTWRITE); 416 if (le32toh(pqh->qh.qh_hlink) & UHCI_PTR_T) { 417 printf("%s: QH not found\n", __func__); 418 return NULL; 419 } 420 #endif 421 } 422 return pqh; 423 } 424 425 void 426 uhci_globalreset(uhci_softc_t *sc) 427 { 428 UHCICMD(sc, UHCI_CMD_GRESET); /* global reset */ 429 usb_delay_ms(&sc->sc_bus, USB_BUS_RESET_DELAY); /* wait a little */ 430 UHCICMD(sc, 0); /* do nothing */ 431 } 432 433 int 434 uhci_init(uhci_softc_t *sc) 435 { 436 usbd_status err; 437 int i, j; 438 uhci_soft_qh_t *clsqh, *chsqh, *bsqh, *sqh, *lsqh; 439 uhci_soft_td_t *std; 440 441 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 442 443 #ifdef UHCI_DEBUG 444 thesc = sc; 445 446 if (uhcidebug >= 2) 447 uhci_dumpregs(sc); 448 #endif 449 450 sc->sc_suspend = PWR_RESUME; 451 452 UWRITE2(sc, UHCI_INTR, 0); /* disable interrupts */ 453 uhci_globalreset(sc); /* reset the controller */ 454 uhci_reset(sc); 455 456 /* Allocate and initialize real frame array. */ 457 err = usb_allocmem(&sc->sc_bus, 458 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t), 459 UHCI_FRAMELIST_ALIGN, USBMALLOC_COHERENT, &sc->sc_dma); 460 if (err) 461 return err; 462 sc->sc_pframes = KERNADDR(&sc->sc_dma, 0); 463 /* set frame number to 0 */ 464 UWRITE2(sc, UHCI_FRNUM, 0); 465 /* set frame list */ 466 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0)); 467 468 /* Initialise mutex early for uhci_alloc_* */ 469 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 470 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB); 471 472 /* 473 * Allocate a TD, inactive, that hangs from the last QH. 474 * This is to avoid a bug in the PIIX that makes it run berserk 475 * otherwise. 476 */ 477 std = uhci_alloc_std(sc); 478 if (std == NULL) 479 return ENOMEM; 480 std->link.std = NULL; 481 std->td.td_link = htole32(UHCI_PTR_T); 482 std->td.td_status = htole32(0); /* inactive */ 483 std->td.td_token = htole32(0); 484 std->td.td_buffer = htole32(0); 485 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 486 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 487 488 /* Allocate the dummy QH marking the end and used for looping the QHs.*/ 489 lsqh = uhci_alloc_sqh(sc); 490 if (lsqh == NULL) 491 goto fail1; 492 lsqh->hlink = NULL; 493 lsqh->qh.qh_hlink = htole32(UHCI_PTR_T); /* end of QH chain */ 494 lsqh->elink = std; 495 lsqh->qh.qh_elink = htole32(std->physaddr | UHCI_PTR_TD); 496 sc->sc_last_qh = lsqh; 497 usb_syncmem(&lsqh->dma, lsqh->offs, sizeof(lsqh->qh), 498 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 499 500 /* Allocate the dummy QH where bulk traffic will be queued. */ 501 bsqh = uhci_alloc_sqh(sc); 502 if (bsqh == NULL) 503 goto fail2; 504 bsqh->hlink = lsqh; 505 bsqh->qh.qh_hlink = htole32(lsqh->physaddr | UHCI_PTR_QH); 506 bsqh->elink = NULL; 507 bsqh->qh.qh_elink = htole32(UHCI_PTR_T); 508 sc->sc_bulk_start = sc->sc_bulk_end = bsqh; 509 usb_syncmem(&bsqh->dma, bsqh->offs, sizeof(bsqh->qh), 510 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 511 512 /* Allocate dummy QH where high speed control traffic will be queued. */ 513 chsqh = uhci_alloc_sqh(sc); 514 if (chsqh == NULL) 515 goto fail3; 516 chsqh->hlink = bsqh; 517 chsqh->qh.qh_hlink = htole32(bsqh->physaddr | UHCI_PTR_QH); 518 chsqh->elink = NULL; 519 chsqh->qh.qh_elink = htole32(UHCI_PTR_T); 520 sc->sc_hctl_start = sc->sc_hctl_end = chsqh; 521 usb_syncmem(&chsqh->dma, chsqh->offs, sizeof(chsqh->qh), 522 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 523 524 /* Allocate dummy QH where control traffic will be queued. */ 525 clsqh = uhci_alloc_sqh(sc); 526 if (clsqh == NULL) 527 goto fail4; 528 clsqh->hlink = chsqh; 529 clsqh->qh.qh_hlink = htole32(chsqh->physaddr | UHCI_PTR_QH); 530 clsqh->elink = NULL; 531 clsqh->qh.qh_elink = htole32(UHCI_PTR_T); 532 sc->sc_lctl_start = sc->sc_lctl_end = clsqh; 533 usb_syncmem(&clsqh->dma, clsqh->offs, sizeof(clsqh->qh), 534 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 535 536 /* 537 * Make all (virtual) frame list pointers point to the interrupt 538 * queue heads and the interrupt queue heads at the control 539 * queue head and point the physical frame list to the virtual. 540 */ 541 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) { 542 std = uhci_alloc_std(sc); 543 sqh = uhci_alloc_sqh(sc); 544 if (std == NULL || sqh == NULL) 545 return USBD_NOMEM; 546 std->link.sqh = sqh; 547 std->td.td_link = htole32(sqh->physaddr | UHCI_PTR_QH); 548 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */ 549 std->td.td_token = htole32(0); 550 std->td.td_buffer = htole32(0); 551 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 552 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 553 sqh->hlink = clsqh; 554 sqh->qh.qh_hlink = htole32(clsqh->physaddr | UHCI_PTR_QH); 555 sqh->elink = NULL; 556 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 557 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 558 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 559 sc->sc_vframes[i].htd = std; 560 sc->sc_vframes[i].etd = std; 561 sc->sc_vframes[i].hqh = sqh; 562 sc->sc_vframes[i].eqh = sqh; 563 for (j = i; 564 j < UHCI_FRAMELIST_COUNT; 565 j += UHCI_VFRAMELIST_COUNT) 566 sc->sc_pframes[j] = htole32(std->physaddr); 567 } 568 usb_syncmem(&sc->sc_dma, 0, 569 UHCI_FRAMELIST_COUNT * sizeof(uhci_physaddr_t), 570 BUS_DMASYNC_PREWRITE); 571 572 573 TAILQ_INIT(&sc->sc_intrhead); 574 575 sc->sc_xferpool = pool_cache_init(sizeof(struct uhci_xfer), 0, 0, 0, 576 "uhcixfer", NULL, IPL_USB, NULL, NULL, NULL); 577 578 callout_init(&sc->sc_poll_handle, CALLOUT_MPSAFE); 579 callout_setfunc(&sc->sc_poll_handle, uhci_poll_hub, sc); 580 581 /* Set up the bus struct. */ 582 sc->sc_bus.ub_methods = &uhci_bus_methods; 583 sc->sc_bus.ub_pipesize = sizeof(struct uhci_pipe); 584 sc->sc_bus.ub_usedma = true; 585 sc->sc_bus.ub_dmaflags = USBMALLOC_MULTISEG; 586 587 UHCICMD(sc, UHCI_CMD_MAXP); /* Assume 64 byte packets at frame end */ 588 589 DPRINTF("Enabling...", 0, 0, 0, 0); 590 591 err = uhci_run(sc, 1, 0); /* and here we go... */ 592 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE | UHCI_INTR_RIE | 593 UHCI_INTR_IOCE | UHCI_INTR_SPIE); /* enable interrupts */ 594 return err; 595 596 fail4: 597 uhci_free_sqh(sc, chsqh); 598 fail3: 599 uhci_free_sqh(sc, lsqh); 600 fail2: 601 uhci_free_sqh(sc, lsqh); 602 fail1: 603 uhci_free_std(sc, std); 604 605 return ENOMEM; 606 } 607 608 int 609 uhci_activate(device_t self, enum devact act) 610 { 611 struct uhci_softc *sc = device_private(self); 612 613 switch (act) { 614 case DVACT_DEACTIVATE: 615 sc->sc_dying = 1; 616 return 0; 617 default: 618 return EOPNOTSUPP; 619 } 620 } 621 622 void 623 uhci_childdet(device_t self, device_t child) 624 { 625 struct uhci_softc *sc = device_private(self); 626 627 KASSERT(sc->sc_child == child); 628 sc->sc_child = NULL; 629 } 630 631 int 632 uhci_detach(struct uhci_softc *sc, int flags) 633 { 634 int rv = 0; 635 636 if (sc->sc_child != NULL) 637 rv = config_detach(sc->sc_child, flags); 638 639 if (rv != 0) 640 return rv; 641 642 callout_halt(&sc->sc_poll_handle, NULL); 643 callout_destroy(&sc->sc_poll_handle); 644 645 mutex_destroy(&sc->sc_lock); 646 mutex_destroy(&sc->sc_intr_lock); 647 648 pool_cache_destroy(sc->sc_xferpool); 649 650 /* XXX free other data structures XXX */ 651 652 return rv; 653 } 654 655 struct usbd_xfer * 656 uhci_allocx(struct usbd_bus *bus, unsigned int nframes) 657 { 658 struct uhci_softc *sc = UHCI_BUS2SC(bus); 659 struct usbd_xfer *xfer; 660 661 xfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK); 662 if (xfer != NULL) { 663 memset(xfer, 0, sizeof(struct uhci_xfer)); 664 665 #ifdef DIAGNOSTIC 666 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 667 uxfer->ux_isdone = true; 668 xfer->ux_state = XFER_BUSY; 669 #endif 670 } 671 return xfer; 672 } 673 674 void 675 uhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 676 { 677 struct uhci_softc *sc = UHCI_BUS2SC(bus); 678 struct uhci_xfer *uxfer __diagused = UHCI_XFER2UXFER(xfer); 679 680 KASSERTMSG(xfer->ux_state == XFER_BUSY || 681 xfer->ux_status == USBD_NOT_STARTED, 682 "xfer %p state %d\n", xfer, xfer->ux_state); 683 KASSERTMSG(uxfer->ux_isdone || xfer->ux_status == USBD_NOT_STARTED, 684 "xfer %p not done\n", xfer); 685 #ifdef DIAGNOSTIC 686 xfer->ux_state = XFER_FREE; 687 #endif 688 pool_cache_put(sc->sc_xferpool, xfer); 689 } 690 691 Static bool 692 uhci_dying(struct usbd_bus *bus) 693 { 694 struct uhci_softc *sc = UHCI_BUS2SC(bus); 695 696 return sc->sc_dying; 697 } 698 699 Static void 700 uhci_get_lock(struct usbd_bus *bus, kmutex_t **lock) 701 { 702 struct uhci_softc *sc = UHCI_BUS2SC(bus); 703 704 *lock = &sc->sc_lock; 705 } 706 707 708 /* 709 * Handle suspend/resume. 710 * 711 * We need to switch to polling mode here, because this routine is 712 * called from an interrupt context. This is all right since we 713 * are almost suspended anyway. 714 */ 715 bool 716 uhci_resume(device_t dv, const pmf_qual_t *qual) 717 { 718 uhci_softc_t *sc = device_private(dv); 719 int cmd; 720 721 mutex_spin_enter(&sc->sc_intr_lock); 722 723 cmd = UREAD2(sc, UHCI_CMD); 724 sc->sc_bus.ub_usepolling++; 725 UWRITE2(sc, UHCI_INTR, 0); 726 uhci_globalreset(sc); 727 uhci_reset(sc); 728 if (cmd & UHCI_CMD_RS) 729 uhci_run(sc, 0, 1); 730 731 /* restore saved state */ 732 UWRITE4(sc, UHCI_FLBASEADDR, DMAADDR(&sc->sc_dma, 0)); 733 UWRITE2(sc, UHCI_FRNUM, sc->sc_saved_frnum); 734 UWRITE1(sc, UHCI_SOF, sc->sc_saved_sof); 735 736 UHCICMD(sc, cmd | UHCI_CMD_FGR); /* force resume */ 737 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_DELAY, &sc->sc_intr_lock); 738 UHCICMD(sc, cmd & ~UHCI_CMD_EGSM); /* back to normal */ 739 UWRITE2(sc, UHCI_INTR, UHCI_INTR_TOCRCIE | 740 UHCI_INTR_RIE | UHCI_INTR_IOCE | UHCI_INTR_SPIE); 741 UHCICMD(sc, UHCI_CMD_MAXP); 742 uhci_run(sc, 1, 1); /* and start traffic again */ 743 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_RECOVERY, &sc->sc_intr_lock); 744 sc->sc_bus.ub_usepolling--; 745 if (sc->sc_intr_xfer != NULL) 746 callout_schedule(&sc->sc_poll_handle, sc->sc_ival); 747 #ifdef UHCI_DEBUG 748 if (uhcidebug >= 2) 749 uhci_dumpregs(sc); 750 #endif 751 752 sc->sc_suspend = PWR_RESUME; 753 mutex_spin_exit(&sc->sc_intr_lock); 754 755 return true; 756 } 757 758 bool 759 uhci_suspend(device_t dv, const pmf_qual_t *qual) 760 { 761 uhci_softc_t *sc = device_private(dv); 762 int cmd; 763 764 mutex_spin_enter(&sc->sc_intr_lock); 765 766 cmd = UREAD2(sc, UHCI_CMD); 767 768 #ifdef UHCI_DEBUG 769 if (uhcidebug >= 2) 770 uhci_dumpregs(sc); 771 #endif 772 sc->sc_suspend = PWR_SUSPEND; 773 if (sc->sc_intr_xfer != NULL) 774 callout_halt(&sc->sc_poll_handle, &sc->sc_intr_lock); 775 sc->sc_bus.ub_usepolling++; 776 777 uhci_run(sc, 0, 1); /* stop the controller */ 778 cmd &= ~UHCI_CMD_RS; 779 780 /* save some state if BIOS doesn't */ 781 sc->sc_saved_frnum = UREAD2(sc, UHCI_FRNUM); 782 sc->sc_saved_sof = UREAD1(sc, UHCI_SOF); 783 784 UWRITE2(sc, UHCI_INTR, 0); /* disable intrs */ 785 786 UHCICMD(sc, cmd | UHCI_CMD_EGSM); /* enter suspend */ 787 usb_delay_ms_locked(&sc->sc_bus, USB_RESUME_WAIT, &sc->sc_intr_lock); 788 sc->sc_bus.ub_usepolling--; 789 790 mutex_spin_exit(&sc->sc_intr_lock); 791 792 return true; 793 } 794 795 #ifdef UHCI_DEBUG 796 Static void 797 uhci_dumpregs(uhci_softc_t *sc) 798 { 799 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 800 DPRINTF("cmd =%04jx sts =%04jx intr =%04jx frnum =%04jx", 801 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS), 802 UREAD2(sc, UHCI_INTR), UREAD2(sc, UHCI_FRNUM)); 803 DPRINTF("sof =%04jx portsc1=%04jx portsc2=%04jx flbase=%08jx", 804 UREAD1(sc, UHCI_SOF), UREAD2(sc, UHCI_PORTSC1), 805 UREAD2(sc, UHCI_PORTSC2), UREAD4(sc, UHCI_FLBASEADDR)); 806 } 807 808 void 809 uhci_dump_td(uhci_soft_td_t *p) 810 { 811 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 812 813 usb_syncmem(&p->dma, p->offs, sizeof(p->td), 814 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 815 816 DPRINTF("TD(%#jx) at 0x%08jx", (uintptr_t)p, p->physaddr, 0, 0); 817 DPRINTF(" link=0x%08jx status=0x%08jx " 818 "token=0x%08x buffer=0x%08x", 819 le32toh(p->td.td_link), 820 le32toh(p->td.td_status), 821 le32toh(p->td.td_token), 822 le32toh(p->td.td_buffer)); 823 824 DPRINTF("bitstuff=%jd crcto =%jd nak =%jd babble =%jd", 825 !!(le32toh(p->td.td_status) & UHCI_TD_BITSTUFF), 826 !!(le32toh(p->td.td_status) & UHCI_TD_CRCTO), 827 !!(le32toh(p->td.td_status) & UHCI_TD_NAK), 828 !!(le32toh(p->td.td_status) & UHCI_TD_BABBLE)); 829 DPRINTF("dbuffer =%jd stalled =%jd active =%jd ioc =%jd", 830 !!(le32toh(p->td.td_status) & UHCI_TD_DBUFFER), 831 !!(le32toh(p->td.td_status) & UHCI_TD_STALLED), 832 !!(le32toh(p->td.td_status) & UHCI_TD_ACTIVE), 833 !!(le32toh(p->td.td_status) & UHCI_TD_IOC)); 834 DPRINTF("ios =%jd ls =%jd spd =%jd", 835 !!(le32toh(p->td.td_status) & UHCI_TD_IOS), 836 !!(le32toh(p->td.td_status) & UHCI_TD_LS), 837 !!(le32toh(p->td.td_status) & UHCI_TD_SPD), 0); 838 DPRINTF("errcnt =%d actlen =%d pid=%02x", 839 UHCI_TD_GET_ERRCNT(le32toh(p->td.td_status)), 840 UHCI_TD_GET_ACTLEN(le32toh(p->td.td_status)), 841 UHCI_TD_GET_PID(le32toh(p->td.td_token)), 0); 842 DPRINTF("addr=%jd endpt=%jd D=%jd maxlen=%jd,", 843 UHCI_TD_GET_DEVADDR(le32toh(p->td.td_token)), 844 UHCI_TD_GET_ENDPT(le32toh(p->td.td_token)), 845 UHCI_TD_GET_DT(le32toh(p->td.td_token)), 846 UHCI_TD_GET_MAXLEN(le32toh(p->td.td_token))); 847 } 848 849 void 850 uhci_dump_qh(uhci_soft_qh_t *sqh) 851 { 852 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 853 854 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 855 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 856 857 DPRINTF("QH(%#jx) at 0x%08jx: hlink=%08jx elink=%08jx", (uintptr_t)sqh, 858 (int)sqh->physaddr, le32toh(sqh->qh.qh_hlink), 859 le32toh(sqh->qh.qh_elink)); 860 861 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD); 862 } 863 864 865 #if 1 866 void 867 uhci_dump(void) 868 { 869 uhci_dump_all(thesc); 870 } 871 #endif 872 873 void 874 uhci_dump_all(uhci_softc_t *sc) 875 { 876 uhci_dumpregs(sc); 877 /*printf("framelist[i].link = %08x\n", sc->sc_framelist[0].link);*/ 878 uhci_dump_qhs(sc->sc_lctl_start); 879 } 880 881 882 void 883 uhci_dump_qhs(uhci_soft_qh_t *sqh) 884 { 885 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 886 887 uhci_dump_qh(sqh); 888 889 /* 890 * uhci_dump_qhs displays all the QHs and TDs from the given QH onwards 891 * Traverses sideways first, then down. 892 * 893 * QH1 894 * QH2 895 * No QH 896 * TD2.1 897 * TD2.2 898 * TD1.1 899 * etc. 900 * 901 * TD2.x being the TDs queued at QH2 and QH1 being referenced from QH1. 902 */ 903 904 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 905 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 906 if (sqh->hlink != NULL && !(le32toh(sqh->qh.qh_hlink) & UHCI_PTR_T)) 907 uhci_dump_qhs(sqh->hlink); 908 else 909 DPRINTF("No QH", 0, 0, 0, 0); 910 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), BUS_DMASYNC_PREREAD); 911 912 if (sqh->elink != NULL && !(le32toh(sqh->qh.qh_elink) & UHCI_PTR_T)) 913 uhci_dump_tds(sqh->elink); 914 else 915 DPRINTF("No QH", 0, 0, 0, 0); 916 } 917 918 void 919 uhci_dump_tds(uhci_soft_td_t *std) 920 { 921 uhci_soft_td_t *td; 922 int stop; 923 924 for (td = std; td != NULL; td = td->link.std) { 925 uhci_dump_td(td); 926 927 /* 928 * Check whether the link pointer in this TD marks 929 * the link pointer as end of queue. This avoids 930 * printing the free list in case the queue/TD has 931 * already been moved there (seatbelt). 932 */ 933 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link), 934 sizeof(td->td.td_link), 935 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 936 stop = (le32toh(td->td.td_link) & UHCI_PTR_T || 937 le32toh(td->td.td_link) == 0); 938 usb_syncmem(&td->dma, td->offs + offsetof(uhci_td_t, td_link), 939 sizeof(td->td.td_link), BUS_DMASYNC_PREREAD); 940 if (stop) 941 break; 942 } 943 } 944 945 Static void 946 uhci_dump_ii(struct uhci_xfer *ux) 947 { 948 struct usbd_pipe *pipe; 949 usb_endpoint_descriptor_t *ed; 950 struct usbd_device *dev; 951 952 if (ux == NULL) { 953 printf("ux NULL\n"); 954 return; 955 } 956 pipe = ux->ux_xfer.ux_pipe; 957 if (pipe == NULL) { 958 printf("ux %p: done=%d pipe=NULL\n", ux, ux->ux_isdone); 959 return; 960 } 961 if (pipe->up_endpoint == NULL) { 962 printf("ux %p: done=%d pipe=%p pipe->up_endpoint=NULL\n", 963 ux, ux->ux_isdone, pipe); 964 return; 965 } 966 if (pipe->up_dev == NULL) { 967 printf("ux %p: done=%d pipe=%p pipe->up_dev=NULL\n", 968 ux, ux->ux_isdone, pipe); 969 return; 970 } 971 ed = pipe->up_endpoint->ue_edesc; 972 dev = pipe->up_dev; 973 printf("ux %p: done=%d dev=%p vid=0x%04x pid=0x%04x addr=%d pipe=%p ep=0x%02x attr=0x%02x\n", 974 ux, ux->ux_isdone, dev, 975 UGETW(dev->ud_ddesc.idVendor), 976 UGETW(dev->ud_ddesc.idProduct), 977 dev->ud_addr, pipe, 978 ed->bEndpointAddress, ed->bmAttributes); 979 } 980 981 void uhci_dump_iis(struct uhci_softc *sc); 982 void 983 uhci_dump_iis(struct uhci_softc *sc) 984 { 985 struct uhci_xfer *ux; 986 987 printf("interrupt list:\n"); 988 TAILQ_FOREACH(ux, &sc->sc_intrhead, ux_list) 989 uhci_dump_ii(ux); 990 } 991 992 void iidump(void); 993 void iidump(void) { uhci_dump_iis(thesc); } 994 995 #endif 996 997 /* 998 * This routine is executed periodically and simulates interrupts 999 * from the root controller interrupt pipe for port status change. 1000 */ 1001 void 1002 uhci_poll_hub(void *addr) 1003 { 1004 struct uhci_softc *sc = addr; 1005 struct usbd_xfer *xfer; 1006 u_char *p; 1007 1008 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1009 1010 mutex_enter(&sc->sc_lock); 1011 1012 /* 1013 * If the intr xfer has completed or been synchronously 1014 * aborted, we have nothing to do. 1015 */ 1016 xfer = sc->sc_intr_xfer; 1017 if (xfer == NULL) 1018 goto out; 1019 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 1020 1021 /* 1022 * If the intr xfer for which we were scheduled is done, and 1023 * another intr xfer has been submitted, let that one be dealt 1024 * with when the callout fires again. 1025 * 1026 * The call to callout_pending is racy, but the the transition 1027 * from pending to invoking happens atomically. The 1028 * callout_ack ensures callout_invoking does not return true 1029 * due to this invocation of the callout; the lock ensures the 1030 * next invocation of the callout cannot callout_ack (unless it 1031 * had already run to completion and nulled sc->sc_intr_xfer, 1032 * in which case would have bailed out already). 1033 */ 1034 callout_ack(&sc->sc_poll_handle); 1035 if (callout_pending(&sc->sc_poll_handle) || 1036 callout_invoking(&sc->sc_poll_handle)) 1037 goto out; 1038 1039 /* 1040 * Check flags for the two interrupt ports, and set them in the 1041 * buffer if an interrupt arrived; otherwise arrange . 1042 */ 1043 p = xfer->ux_buf; 1044 p[0] = 0; 1045 if (UREAD2(sc, UHCI_PORTSC1) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC)) 1046 p[0] |= 1<<1; 1047 if (UREAD2(sc, UHCI_PORTSC2) & (UHCI_PORTSC_CSC|UHCI_PORTSC_OCIC)) 1048 p[0] |= 1<<2; 1049 if (p[0] == 0) { 1050 /* 1051 * No change -- try again in a while, unless we're 1052 * suspending, in which case we'll try again after 1053 * resume. 1054 */ 1055 if (sc->sc_suspend != PWR_SUSPEND) 1056 callout_schedule(&sc->sc_poll_handle, sc->sc_ival); 1057 goto out; 1058 } 1059 1060 /* 1061 * Interrupt completed, and the xfer has not been completed or 1062 * synchronously aborted. Complete the xfer now. 1063 */ 1064 xfer->ux_actlen = 1; 1065 xfer->ux_status = USBD_NORMAL_COMPLETION; 1066 #ifdef DIAGNOSTIC 1067 UHCI_XFER2UXFER(xfer)->ux_isdone = true; 1068 #endif 1069 usb_transfer_complete(xfer); 1070 1071 out: mutex_exit(&sc->sc_lock); 1072 } 1073 1074 void 1075 uhci_root_intr_done(struct usbd_xfer *xfer) 1076 { 1077 struct uhci_softc *sc = UHCI_XFER2SC(xfer); 1078 1079 KASSERT(mutex_owned(&sc->sc_lock)); 1080 1081 /* Claim the xfer so it doesn't get completed again. */ 1082 KASSERT(sc->sc_intr_xfer == xfer); 1083 KASSERT(xfer->ux_status != USBD_IN_PROGRESS); 1084 sc->sc_intr_xfer = NULL; 1085 } 1086 1087 /* 1088 * Let the last QH loop back to the high speed control transfer QH. 1089 * This is what intel calls "bandwidth reclamation" and improves 1090 * USB performance a lot for some devices. 1091 * If we are already looping, just count it. 1092 */ 1093 void 1094 uhci_add_loop(uhci_softc_t *sc) 1095 { 1096 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1097 1098 #ifdef UHCI_DEBUG 1099 if (uhcinoloop) 1100 return; 1101 #endif 1102 if (++sc->sc_loops == 1) { 1103 DPRINTFN(5, "add loop", 0, 0, 0, 0); 1104 /* Note, we don't loop back the soft pointer. */ 1105 sc->sc_last_qh->qh.qh_hlink = 1106 htole32(sc->sc_hctl_start->physaddr | UHCI_PTR_QH); 1107 usb_syncmem(&sc->sc_last_qh->dma, 1108 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink), 1109 sizeof(sc->sc_last_qh->qh.qh_hlink), 1110 BUS_DMASYNC_PREWRITE); 1111 } 1112 } 1113 1114 void 1115 uhci_rem_loop(uhci_softc_t *sc) 1116 { 1117 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1118 1119 #ifdef UHCI_DEBUG 1120 if (uhcinoloop) 1121 return; 1122 #endif 1123 if (--sc->sc_loops == 0) { 1124 DPRINTFN(5, "remove loop", 0, 0, 0, 0); 1125 sc->sc_last_qh->qh.qh_hlink = htole32(UHCI_PTR_T); 1126 usb_syncmem(&sc->sc_last_qh->dma, 1127 sc->sc_last_qh->offs + offsetof(uhci_qh_t, qh_hlink), 1128 sizeof(sc->sc_last_qh->qh.qh_hlink), 1129 BUS_DMASYNC_PREWRITE); 1130 } 1131 } 1132 1133 /* Add high speed control QH, called with lock held. */ 1134 void 1135 uhci_add_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1136 { 1137 uhci_soft_qh_t *eqh; 1138 1139 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1140 1141 KASSERT(mutex_owned(&sc->sc_lock)); 1142 1143 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1144 eqh = sc->sc_hctl_end; 1145 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1146 sizeof(eqh->qh.qh_hlink), 1147 BUS_DMASYNC_POSTWRITE); 1148 sqh->hlink = eqh->hlink; 1149 sqh->qh.qh_hlink = eqh->qh.qh_hlink; 1150 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 1151 BUS_DMASYNC_PREWRITE); 1152 eqh->hlink = sqh; 1153 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH); 1154 sc->sc_hctl_end = sqh; 1155 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1156 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 1157 #ifdef UHCI_CTL_LOOP 1158 uhci_add_loop(sc); 1159 #endif 1160 } 1161 1162 /* Remove high speed control QH, called with lock held. */ 1163 void 1164 uhci_remove_hs_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1165 { 1166 uhci_soft_qh_t *pqh; 1167 uint32_t elink; 1168 1169 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1170 1171 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1172 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1173 #ifdef UHCI_CTL_LOOP 1174 uhci_rem_loop(sc); 1175 #endif 1176 /* 1177 * The T bit should be set in the elink of the QH so that the HC 1178 * doesn't follow the pointer. This condition may fail if the 1179 * the transferred packet was short so that the QH still points 1180 * at the last used TD. 1181 * In this case we set the T bit and wait a little for the HC 1182 * to stop looking at the TD. 1183 * Note that if the TD chain is large enough, the controller 1184 * may still be looking at the chain at the end of this function. 1185 * uhci_free_std_chain() will make sure the controller stops 1186 * looking at it quickly, but until then we should not change 1187 * sqh->hlink. 1188 */ 1189 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 1190 sizeof(sqh->qh.qh_elink), 1191 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1192 elink = le32toh(sqh->qh.qh_elink); 1193 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 1194 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD); 1195 if (!(elink & UHCI_PTR_T)) { 1196 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 1197 usb_syncmem(&sqh->dma, 1198 sqh->offs + offsetof(uhci_qh_t, qh_elink), 1199 sizeof(sqh->qh.qh_elink), 1200 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1201 delay(UHCI_QH_REMOVE_DELAY); 1202 } 1203 1204 pqh = uhci_find_prev_qh(sc->sc_hctl_start, sqh); 1205 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink), 1206 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 1207 pqh->hlink = sqh->hlink; 1208 pqh->qh.qh_hlink = sqh->qh.qh_hlink; 1209 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink), 1210 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 1211 delay(UHCI_QH_REMOVE_DELAY); 1212 if (sc->sc_hctl_end == sqh) 1213 sc->sc_hctl_end = pqh; 1214 } 1215 1216 /* Add low speed control QH, called with lock held. */ 1217 void 1218 uhci_add_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1219 { 1220 uhci_soft_qh_t *eqh; 1221 1222 KASSERT(mutex_owned(&sc->sc_lock)); 1223 1224 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1225 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1226 1227 eqh = sc->sc_lctl_end; 1228 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1229 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 1230 sqh->hlink = eqh->hlink; 1231 sqh->qh.qh_hlink = eqh->qh.qh_hlink; 1232 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 1233 BUS_DMASYNC_PREWRITE); 1234 eqh->hlink = sqh; 1235 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH); 1236 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1237 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 1238 sc->sc_lctl_end = sqh; 1239 } 1240 1241 /* Remove low speed control QH, called with lock held. */ 1242 void 1243 uhci_remove_ls_ctrl(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1244 { 1245 uhci_soft_qh_t *pqh; 1246 uint32_t elink; 1247 1248 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1249 1250 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1251 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1252 1253 /* See comment in uhci_remove_hs_ctrl() */ 1254 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 1255 sizeof(sqh->qh.qh_elink), 1256 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1257 elink = le32toh(sqh->qh.qh_elink); 1258 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 1259 sizeof(sqh->qh.qh_elink), BUS_DMASYNC_PREREAD); 1260 if (!(elink & UHCI_PTR_T)) { 1261 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 1262 usb_syncmem(&sqh->dma, 1263 sqh->offs + offsetof(uhci_qh_t, qh_elink), 1264 sizeof(sqh->qh.qh_elink), 1265 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1266 delay(UHCI_QH_REMOVE_DELAY); 1267 } 1268 pqh = uhci_find_prev_qh(sc->sc_lctl_start, sqh); 1269 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink), 1270 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 1271 pqh->hlink = sqh->hlink; 1272 pqh->qh.qh_hlink = sqh->qh.qh_hlink; 1273 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink), 1274 sizeof(pqh->qh.qh_hlink), 1275 BUS_DMASYNC_PREWRITE); 1276 delay(UHCI_QH_REMOVE_DELAY); 1277 if (sc->sc_lctl_end == sqh) 1278 sc->sc_lctl_end = pqh; 1279 } 1280 1281 /* Add bulk QH, called with lock held. */ 1282 void 1283 uhci_add_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1284 { 1285 uhci_soft_qh_t *eqh; 1286 1287 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1288 1289 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1290 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1291 1292 eqh = sc->sc_bulk_end; 1293 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1294 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 1295 sqh->hlink = eqh->hlink; 1296 sqh->qh.qh_hlink = eqh->qh.qh_hlink; 1297 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 1298 BUS_DMASYNC_PREWRITE); 1299 eqh->hlink = sqh; 1300 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH); 1301 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 1302 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 1303 sc->sc_bulk_end = sqh; 1304 uhci_add_loop(sc); 1305 } 1306 1307 /* Remove bulk QH, called with lock held. */ 1308 void 1309 uhci_remove_bulk(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1310 { 1311 uhci_soft_qh_t *pqh; 1312 1313 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1314 1315 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1316 DPRINTFN(10, "sqh %#jx", (uintptr_t)sqh, 0, 0, 0); 1317 1318 uhci_rem_loop(sc); 1319 /* See comment in uhci_remove_hs_ctrl() */ 1320 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 1321 sizeof(sqh->qh.qh_elink), 1322 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1323 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) { 1324 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 1325 usb_syncmem(&sqh->dma, 1326 sqh->offs + offsetof(uhci_qh_t, qh_elink), 1327 sizeof(sqh->qh.qh_elink), 1328 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1329 delay(UHCI_QH_REMOVE_DELAY); 1330 } 1331 pqh = uhci_find_prev_qh(sc->sc_bulk_start, sqh); 1332 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink), 1333 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 1334 pqh->hlink = sqh->hlink; 1335 pqh->qh.qh_hlink = sqh->qh.qh_hlink; 1336 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink), 1337 sizeof(pqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 1338 delay(UHCI_QH_REMOVE_DELAY); 1339 if (sc->sc_bulk_end == sqh) 1340 sc->sc_bulk_end = pqh; 1341 } 1342 1343 Static int uhci_intr1(uhci_softc_t *); 1344 1345 int 1346 uhci_intr(void *arg) 1347 { 1348 uhci_softc_t *sc = arg; 1349 int ret = 0; 1350 1351 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1352 1353 mutex_spin_enter(&sc->sc_intr_lock); 1354 1355 if (sc->sc_dying || !device_has_power(sc->sc_dev)) 1356 goto done; 1357 1358 if (sc->sc_bus.ub_usepolling || UREAD2(sc, UHCI_INTR) == 0) { 1359 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0); 1360 goto done; 1361 } 1362 1363 ret = uhci_intr1(sc); 1364 1365 done: 1366 mutex_spin_exit(&sc->sc_intr_lock); 1367 return ret; 1368 } 1369 1370 int 1371 uhci_intr1(uhci_softc_t *sc) 1372 { 1373 int status; 1374 int ack; 1375 1376 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1377 1378 #ifdef UHCI_DEBUG 1379 if (uhcidebug >= 15) { 1380 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0); 1381 uhci_dumpregs(sc); 1382 } 1383 #endif 1384 1385 KASSERT(mutex_owned(&sc->sc_intr_lock)); 1386 1387 status = UREAD2(sc, UHCI_STS) & UHCI_STS_ALLINTRS; 1388 /* Check if the interrupt was for us. */ 1389 if (status == 0) 1390 return 0; 1391 1392 if (sc->sc_suspend != PWR_RESUME) { 1393 #ifdef DIAGNOSTIC 1394 printf("%s: interrupt while not operating ignored\n", 1395 device_xname(sc->sc_dev)); 1396 #endif 1397 UWRITE2(sc, UHCI_STS, status); /* acknowledge the ints */ 1398 return 0; 1399 } 1400 1401 ack = 0; 1402 if (status & UHCI_STS_USBINT) 1403 ack |= UHCI_STS_USBINT; 1404 if (status & UHCI_STS_USBEI) 1405 ack |= UHCI_STS_USBEI; 1406 if (status & UHCI_STS_RD) { 1407 ack |= UHCI_STS_RD; 1408 #ifdef UHCI_DEBUG 1409 printf("%s: resume detect\n", device_xname(sc->sc_dev)); 1410 #endif 1411 } 1412 if (status & UHCI_STS_HSE) { 1413 ack |= UHCI_STS_HSE; 1414 printf("%s: host system error\n", device_xname(sc->sc_dev)); 1415 } 1416 if (status & UHCI_STS_HCPE) { 1417 ack |= UHCI_STS_HCPE; 1418 printf("%s: host controller process error\n", 1419 device_xname(sc->sc_dev)); 1420 } 1421 1422 /* When HCHalted=1 and Run/Stop=0 , it is normal */ 1423 if ((status & UHCI_STS_HCH) && (UREAD2(sc, UHCI_CMD) & UHCI_CMD_RS)) { 1424 /* no acknowledge needed */ 1425 if (!sc->sc_dying) { 1426 printf("%s: host controller halted\n", 1427 device_xname(sc->sc_dev)); 1428 #ifdef UHCI_DEBUG 1429 uhci_dump_all(sc); 1430 #endif 1431 } 1432 sc->sc_dying = 1; 1433 } 1434 1435 if (!ack) 1436 return 0; /* nothing to acknowledge */ 1437 UWRITE2(sc, UHCI_STS, ack); /* acknowledge the ints */ 1438 1439 usb_schedsoftintr(&sc->sc_bus); 1440 1441 DPRINTFN(15, "sc %#jx done", (uintptr_t)sc, 0, 0, 0); 1442 1443 return 1; 1444 } 1445 1446 void 1447 uhci_softintr(void *v) 1448 { 1449 struct usbd_bus *bus = v; 1450 uhci_softc_t *sc = UHCI_BUS2SC(bus); 1451 struct uhci_xfer *ux, *nextux; 1452 ux_completeq_t cq; 1453 1454 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1455 DPRINTF("sc %#jx", (uintptr_t)sc, 0, 0, 0); 1456 1457 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1458 1459 TAILQ_INIT(&cq); 1460 /* 1461 * Interrupts on UHCI really suck. When the host controller 1462 * interrupts because a transfer is completed there is no 1463 * way of knowing which transfer it was. You can scan down 1464 * the TDs and QHs of the previous frame to limit the search, 1465 * but that assumes that the interrupt was not delayed by more 1466 * than 1 ms, which may not always be true (e.g. after debug 1467 * output on a slow console). 1468 * We scan all interrupt descriptors to see if any have 1469 * completed. 1470 */ 1471 TAILQ_FOREACH_SAFE(ux, &sc->sc_intrhead, ux_list, nextux) { 1472 uhci_check_intr(sc, ux, &cq); 1473 } 1474 1475 /* 1476 * We abuse ux_list for the interrupt and complete lists and 1477 * interrupt transfers will get re-added here so use 1478 * the _SAFE version of TAILQ_FOREACH. 1479 */ 1480 TAILQ_FOREACH_SAFE(ux, &cq, ux_list, nextux) { 1481 DPRINTF("ux %#jx", (uintptr_t)ux, 0, 0, 0); 1482 usb_transfer_complete(&ux->ux_xfer); 1483 } 1484 1485 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1486 } 1487 1488 /* Check for an interrupt. */ 1489 void 1490 uhci_check_intr(uhci_softc_t *sc, struct uhci_xfer *ux, ux_completeq_t *cqp) 1491 { 1492 uhci_soft_td_t *std, *fstd = NULL, *lstd = NULL; 1493 uint32_t status; 1494 1495 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1496 DPRINTFN(15, "ux %#jx", (uintptr_t)ux, 0, 0, 0); 1497 1498 KASSERT(ux != NULL); 1499 1500 struct usbd_xfer *xfer = &ux->ux_xfer; 1501 if (xfer->ux_status == USBD_CANCELLED || 1502 xfer->ux_status == USBD_TIMEOUT) { 1503 DPRINTF("aborted xfer %#jx", (uintptr_t)xfer, 0, 0, 0); 1504 return; 1505 } 1506 1507 switch (ux->ux_type) { 1508 case UX_CTRL: 1509 fstd = ux->ux_setup; 1510 lstd = ux->ux_stat; 1511 break; 1512 case UX_BULK: 1513 case UX_INTR: 1514 case UX_ISOC: 1515 fstd = ux->ux_stdstart; 1516 lstd = ux->ux_stdend; 1517 break; 1518 default: 1519 KASSERT(false); 1520 break; 1521 } 1522 if (fstd == NULL) 1523 return; 1524 1525 KASSERT(lstd != NULL); 1526 1527 usb_syncmem(&lstd->dma, 1528 lstd->offs + offsetof(uhci_td_t, td_status), 1529 sizeof(lstd->td.td_status), 1530 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1531 status = le32toh(lstd->td.td_status); 1532 usb_syncmem(&lstd->dma, 1533 lstd->offs + offsetof(uhci_td_t, td_status), 1534 sizeof(lstd->td.td_status), 1535 BUS_DMASYNC_PREREAD); 1536 1537 /* If the last TD is not marked active we can complete */ 1538 if (!(status & UHCI_TD_ACTIVE)) { 1539 done: 1540 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0); 1541 uhci_idone(ux, cqp); 1542 return; 1543 } 1544 1545 /* 1546 * If the last TD is still active we need to check whether there 1547 * is an error somewhere in the middle, or whether there was a 1548 * short packet (SPD and not ACTIVE). 1549 */ 1550 DPRINTFN(12, "active ux=%#jx", (uintptr_t)ux, 0, 0, 0); 1551 for (std = fstd; std != lstd; std = std->link.std) { 1552 usb_syncmem(&std->dma, 1553 std->offs + offsetof(uhci_td_t, td_status), 1554 sizeof(std->td.td_status), 1555 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1556 status = le32toh(std->td.td_status); 1557 usb_syncmem(&std->dma, 1558 std->offs + offsetof(uhci_td_t, td_status), 1559 sizeof(std->td.td_status), BUS_DMASYNC_PREREAD); 1560 1561 /* If there's an active TD the xfer isn't done. */ 1562 if (status & UHCI_TD_ACTIVE) { 1563 DPRINTFN(12, "ux=%#jx std=%#jx still active", 1564 (uintptr_t)ux, (uintptr_t)std, 0, 0); 1565 return; 1566 } 1567 1568 /* Any kind of error makes the xfer done. */ 1569 if (status & UHCI_TD_STALLED) 1570 goto done; 1571 1572 /* 1573 * If the data phase of a control transfer is short, we need 1574 * to complete the status stage 1575 */ 1576 1577 if ((status & UHCI_TD_SPD) && ux->ux_type == UX_CTRL) { 1578 struct uhci_pipe *upipe = 1579 UHCI_PIPE2UPIPE(xfer->ux_pipe); 1580 uhci_soft_qh_t *sqh = upipe->ctrl.sqh; 1581 uhci_soft_td_t *stat = upipe->ctrl.stat; 1582 1583 DPRINTFN(12, "ux=%#jx std=%#jx control status" 1584 "phase needs completion", (uintptr_t)ux, 1585 (uintptr_t)ux->ux_stdstart, 0, 0); 1586 1587 sqh->qh.qh_elink = 1588 htole32(stat->physaddr | UHCI_PTR_TD); 1589 usb_syncmem(&sqh->dma, sqh->offs, sizeof(sqh->qh), 1590 BUS_DMASYNC_PREWRITE); 1591 break; 1592 } 1593 1594 /* We want short packets, and it is short: it's done */ 1595 usb_syncmem(&std->dma, 1596 std->offs + offsetof(uhci_td_t, td_token), 1597 sizeof(std->td.td_token), 1598 BUS_DMASYNC_POSTWRITE); 1599 1600 if ((status & UHCI_TD_SPD) && 1601 UHCI_TD_GET_ACTLEN(status) < 1602 UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token))) { 1603 goto done; 1604 } 1605 } 1606 } 1607 1608 /* Called with USB lock held. */ 1609 void 1610 uhci_idone(struct uhci_xfer *ux, ux_completeq_t *cqp) 1611 { 1612 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1613 struct usbd_xfer *xfer = &ux->ux_xfer; 1614 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer); 1615 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 1616 uhci_soft_td_t *std; 1617 uint32_t status = 0, nstatus; 1618 int actlen; 1619 1620 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1621 1622 DPRINTFN(12, "ux=%#jx", (uintptr_t)ux, 0, 0, 0); 1623 1624 /* 1625 * Try to claim this xfer for completion. If it has already 1626 * completed or aborted, drop it on the floor. 1627 */ 1628 if (!usbd_xfer_trycomplete(xfer)) 1629 return; 1630 1631 #ifdef DIAGNOSTIC 1632 #ifdef UHCI_DEBUG 1633 if (ux->ux_isdone) { 1634 DPRINTF("--- dump start ---", 0, 0, 0, 0); 1635 uhci_dump_ii(ux); 1636 DPRINTF("--- dump end ---", 0, 0, 0, 0); 1637 } 1638 #endif 1639 KASSERT(!ux->ux_isdone); 1640 KASSERTMSG(!ux->ux_isdone, "xfer %p type %d status %d", xfer, 1641 ux->ux_type, xfer->ux_status); 1642 ux->ux_isdone = true; 1643 #endif 1644 1645 if (xfer->ux_nframes != 0) { 1646 /* Isoc transfer, do things differently. */ 1647 uhci_soft_td_t **stds = upipe->isoc.stds; 1648 int i, n, nframes, len; 1649 1650 DPRINTFN(5, "ux=%#jx isoc ready", (uintptr_t)ux, 0, 0, 0); 1651 1652 nframes = xfer->ux_nframes; 1653 actlen = 0; 1654 n = ux->ux_curframe; 1655 for (i = 0; i < nframes; i++) { 1656 std = stds[n]; 1657 #ifdef UHCI_DEBUG 1658 if (uhcidebug >= 5) { 1659 DPRINTF("isoc TD %jd", i, 0, 0, 0); 1660 DPRINTF("--- dump start ---", 0, 0, 0, 0); 1661 uhci_dump_td(std); 1662 DPRINTF("--- dump end ---", 0, 0, 0, 0); 1663 } 1664 #endif 1665 if (++n >= UHCI_VFRAMELIST_COUNT) 1666 n = 0; 1667 usb_syncmem(&std->dma, 1668 std->offs + offsetof(uhci_td_t, td_status), 1669 sizeof(std->td.td_status), 1670 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1671 status = le32toh(std->td.td_status); 1672 len = UHCI_TD_GET_ACTLEN(status); 1673 xfer->ux_frlengths[i] = len; 1674 actlen += len; 1675 } 1676 upipe->isoc.inuse -= nframes; 1677 xfer->ux_actlen = actlen; 1678 xfer->ux_status = USBD_NORMAL_COMPLETION; 1679 goto end; 1680 } 1681 1682 #ifdef UHCI_DEBUG 1683 DPRINTFN(10, "ux=%#jx, xfer=%#jx, pipe=%#jx ready", (uintptr_t)ux, 1684 (uintptr_t)xfer, (uintptr_t)upipe, 0); 1685 if (uhcidebug >= 10) { 1686 DPRINTF("--- dump start ---", 0, 0, 0, 0); 1687 uhci_dump_tds(ux->ux_stdstart); 1688 DPRINTF("--- dump end ---", 0, 0, 0, 0); 1689 } 1690 #endif 1691 1692 /* The transfer is done, compute actual length and status. */ 1693 actlen = 0; 1694 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) { 1695 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 1696 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1697 nstatus = le32toh(std->td.td_status); 1698 if (nstatus & UHCI_TD_ACTIVE) 1699 break; 1700 1701 status = nstatus; 1702 if (UHCI_TD_GET_PID(le32toh(std->td.td_token)) != 1703 UHCI_TD_PID_SETUP) 1704 actlen += UHCI_TD_GET_ACTLEN(status); 1705 else { 1706 /* 1707 * UHCI will report CRCTO in addition to a STALL or NAK 1708 * for a SETUP transaction. See section 3.2.2, "TD 1709 * CONTROL AND STATUS". 1710 */ 1711 if (status & (UHCI_TD_STALLED | UHCI_TD_NAK)) 1712 status &= ~UHCI_TD_CRCTO; 1713 } 1714 } 1715 /* If there are left over TDs we need to update the toggle. */ 1716 if (std != NULL) 1717 upipe->nexttoggle = UHCI_TD_GET_DT(le32toh(std->td.td_token)); 1718 1719 status &= UHCI_TD_ERROR; 1720 DPRINTFN(10, "actlen=%jd, status=%#jx", actlen, status, 0, 0); 1721 xfer->ux_actlen = actlen; 1722 if (status != 0) { 1723 1724 DPRINTFN((status == UHCI_TD_STALLED) * 10, 1725 "error, addr=%jd, endpt=0x%02jx", 1726 xfer->ux_pipe->up_dev->ud_addr, 1727 xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress, 1728 0, 0); 1729 DPRINTFN((status == UHCI_TD_STALLED) * 10, 1730 "bitstuff=%jd crcto =%jd nak =%jd babble =%jd", 1731 !!(status & UHCI_TD_BITSTUFF), 1732 !!(status & UHCI_TD_CRCTO), 1733 !!(status & UHCI_TD_NAK), 1734 !!(status & UHCI_TD_BABBLE)); 1735 DPRINTFN((status == UHCI_TD_STALLED) * 10, 1736 "dbuffer =%jd stalled =%jd active =%jd", 1737 !!(status & UHCI_TD_DBUFFER), 1738 !!(status & UHCI_TD_STALLED), 1739 !!(status & UHCI_TD_ACTIVE), 1740 0); 1741 1742 if (status == UHCI_TD_STALLED) 1743 xfer->ux_status = USBD_STALLED; 1744 else 1745 xfer->ux_status = USBD_IOERROR; /* more info XXX */ 1746 } else { 1747 xfer->ux_status = USBD_NORMAL_COMPLETION; 1748 } 1749 1750 end: 1751 uhci_del_intr_list(sc, ux); 1752 if (cqp) 1753 TAILQ_INSERT_TAIL(cqp, ux, ux_list); 1754 1755 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 1756 DPRINTFN(12, "ux=%#jx done", (uintptr_t)ux, 0, 0, 0); 1757 } 1758 1759 void 1760 uhci_poll(struct usbd_bus *bus) 1761 { 1762 uhci_softc_t *sc = UHCI_BUS2SC(bus); 1763 1764 if (UREAD2(sc, UHCI_STS) & UHCI_STS_USBINT) { 1765 mutex_spin_enter(&sc->sc_intr_lock); 1766 uhci_intr1(sc); 1767 mutex_spin_exit(&sc->sc_intr_lock); 1768 } 1769 } 1770 1771 void 1772 uhci_reset(uhci_softc_t *sc) 1773 { 1774 int n; 1775 1776 UHCICMD(sc, UHCI_CMD_HCRESET); 1777 /* The reset bit goes low when the controller is done. */ 1778 for (n = 0; n < UHCI_RESET_TIMEOUT && 1779 (UREAD2(sc, UHCI_CMD) & UHCI_CMD_HCRESET); n++) 1780 usb_delay_ms(&sc->sc_bus, 1); 1781 if (n >= UHCI_RESET_TIMEOUT) 1782 printf("%s: controller did not reset\n", 1783 device_xname(sc->sc_dev)); 1784 } 1785 1786 usbd_status 1787 uhci_run(uhci_softc_t *sc, int run, int locked) 1788 { 1789 int n, running; 1790 uint16_t cmd; 1791 1792 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1793 1794 run = run != 0; 1795 if (!locked) 1796 mutex_spin_enter(&sc->sc_intr_lock); 1797 1798 DPRINTF("setting run=%jd", run, 0, 0, 0); 1799 cmd = UREAD2(sc, UHCI_CMD); 1800 if (run) 1801 cmd |= UHCI_CMD_RS; 1802 else 1803 cmd &= ~UHCI_CMD_RS; 1804 UHCICMD(sc, cmd); 1805 for (n = 0; n < 10; n++) { 1806 running = !(UREAD2(sc, UHCI_STS) & UHCI_STS_HCH); 1807 /* return when we've entered the state we want */ 1808 if (run == running) { 1809 if (!locked) 1810 mutex_spin_exit(&sc->sc_intr_lock); 1811 DPRINTF("done cmd=%#jx sts=%#jx", 1812 UREAD2(sc, UHCI_CMD), UREAD2(sc, UHCI_STS), 0, 0); 1813 return USBD_NORMAL_COMPLETION; 1814 } 1815 usb_delay_ms_locked(&sc->sc_bus, 1, &sc->sc_intr_lock); 1816 } 1817 if (!locked) 1818 mutex_spin_exit(&sc->sc_intr_lock); 1819 printf("%s: cannot %s\n", device_xname(sc->sc_dev), 1820 run ? "start" : "stop"); 1821 return USBD_IOERROR; 1822 } 1823 1824 /* 1825 * Memory management routines. 1826 * uhci_alloc_std allocates TDs 1827 * uhci_alloc_sqh allocates QHs 1828 * These two routines do their own free list management, 1829 * partly for speed, partly because allocating DMAable memory 1830 * has page size granularity so much memory would be wasted if 1831 * only one TD/QH (32 bytes) was placed in each allocated chunk. 1832 */ 1833 1834 uhci_soft_td_t * 1835 uhci_alloc_std(uhci_softc_t *sc) 1836 { 1837 uhci_soft_td_t *std; 1838 usbd_status err; 1839 int i, offs; 1840 usb_dma_t dma; 1841 1842 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1843 1844 mutex_enter(&sc->sc_lock); 1845 if (sc->sc_freetds == NULL) { 1846 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0); 1847 mutex_exit(&sc->sc_lock); 1848 1849 err = usb_allocmem(&sc->sc_bus, UHCI_STD_SIZE * UHCI_STD_CHUNK, 1850 UHCI_TD_ALIGN, USBMALLOC_COHERENT, &dma); 1851 if (err) 1852 return NULL; 1853 1854 mutex_enter(&sc->sc_lock); 1855 for (i = 0; i < UHCI_STD_CHUNK; i++) { 1856 offs = i * UHCI_STD_SIZE; 1857 std = KERNADDR(&dma, offs); 1858 std->physaddr = DMAADDR(&dma, offs); 1859 std->dma = dma; 1860 std->offs = offs; 1861 std->link.std = sc->sc_freetds; 1862 sc->sc_freetds = std; 1863 } 1864 } 1865 std = sc->sc_freetds; 1866 sc->sc_freetds = std->link.std; 1867 mutex_exit(&sc->sc_lock); 1868 1869 memset(&std->td, 0, sizeof(uhci_td_t)); 1870 1871 return std; 1872 } 1873 1874 #define TD_IS_FREE 0x12345678 1875 1876 void 1877 uhci_free_std_locked(uhci_softc_t *sc, uhci_soft_td_t *std) 1878 { 1879 KASSERT(mutex_owned(&sc->sc_lock)); 1880 1881 #ifdef DIAGNOSTIC 1882 if (le32toh(std->td.td_token) == TD_IS_FREE) { 1883 printf("%s: freeing free TD %p\n", __func__, std); 1884 return; 1885 } 1886 std->td.td_token = htole32(TD_IS_FREE); 1887 #endif 1888 1889 std->link.std = sc->sc_freetds; 1890 sc->sc_freetds = std; 1891 } 1892 1893 void 1894 uhci_free_std(uhci_softc_t *sc, uhci_soft_td_t *std) 1895 { 1896 mutex_enter(&sc->sc_lock); 1897 uhci_free_std_locked(sc, std); 1898 mutex_exit(&sc->sc_lock); 1899 } 1900 1901 uhci_soft_qh_t * 1902 uhci_alloc_sqh(uhci_softc_t *sc) 1903 { 1904 uhci_soft_qh_t *sqh; 1905 usbd_status err; 1906 int i, offs; 1907 usb_dma_t dma; 1908 1909 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1910 1911 mutex_enter(&sc->sc_lock); 1912 if (sc->sc_freeqhs == NULL) { 1913 DPRINTFN(2, "allocating chunk", 0, 0, 0, 0); 1914 mutex_exit(&sc->sc_lock); 1915 1916 err = usb_allocmem(&sc->sc_bus, UHCI_SQH_SIZE * UHCI_SQH_CHUNK, 1917 UHCI_QH_ALIGN, USBMALLOC_COHERENT, &dma); 1918 if (err) 1919 return NULL; 1920 1921 mutex_enter(&sc->sc_lock); 1922 for (i = 0; i < UHCI_SQH_CHUNK; i++) { 1923 offs = i * UHCI_SQH_SIZE; 1924 sqh = KERNADDR(&dma, offs); 1925 sqh->physaddr = DMAADDR(&dma, offs); 1926 sqh->dma = dma; 1927 sqh->offs = offs; 1928 sqh->hlink = sc->sc_freeqhs; 1929 sc->sc_freeqhs = sqh; 1930 } 1931 } 1932 sqh = sc->sc_freeqhs; 1933 sc->sc_freeqhs = sqh->hlink; 1934 mutex_exit(&sc->sc_lock); 1935 1936 memset(&sqh->qh, 0, sizeof(uhci_qh_t)); 1937 1938 return sqh; 1939 } 1940 1941 void 1942 uhci_free_sqh(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 1943 { 1944 KASSERT(mutex_owned(&sc->sc_lock)); 1945 1946 sqh->hlink = sc->sc_freeqhs; 1947 sc->sc_freeqhs = sqh; 1948 } 1949 1950 #if 0 1951 void 1952 uhci_free_std_chain(uhci_softc_t *sc, uhci_soft_td_t *std, 1953 uhci_soft_td_t *stdend) 1954 { 1955 uhci_soft_td_t *p; 1956 uint32_t td_link; 1957 1958 /* 1959 * to avoid race condition with the controller which may be looking 1960 * at this chain, we need to first invalidate all links, and 1961 * then wait for the controller to move to another queue 1962 */ 1963 for (p = std; p != stdend; p = p->link.std) { 1964 usb_syncmem(&p->dma, 1965 p->offs + offsetof(uhci_td_t, td_link), 1966 sizeof(p->td.td_link), 1967 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 1968 td_link = le32toh(p->td.td_link); 1969 usb_syncmem(&p->dma, 1970 p->offs + offsetof(uhci_td_t, td_link), 1971 sizeof(p->td.td_link), 1972 BUS_DMASYNC_PREREAD); 1973 if ((td_link & UHCI_PTR_T) == 0) { 1974 p->td.td_link = htole32(UHCI_PTR_T); 1975 usb_syncmem(&p->dma, 1976 p->offs + offsetof(uhci_td_t, td_link), 1977 sizeof(p->td.td_link), 1978 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1979 } 1980 } 1981 delay(UHCI_QH_REMOVE_DELAY); 1982 1983 for (; std != stdend; std = p) { 1984 p = std->link.std; 1985 uhci_free_std(sc, std); 1986 } 1987 } 1988 #endif 1989 1990 int 1991 uhci_alloc_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer, int len, 1992 int rd, uhci_soft_td_t **sp) 1993 { 1994 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 1995 uint16_t flags = xfer->ux_flags; 1996 uhci_soft_td_t *p; 1997 1998 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 1999 2000 DPRINTFN(8, "xfer=%#jx pipe=%#jx", (uintptr_t)xfer, 2001 (uintptr_t)xfer->ux_pipe, 0, 0); 2002 2003 ASSERT_SLEEPABLE(); 2004 KASSERT(sp); 2005 2006 int maxp = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize); 2007 if (maxp == 0) { 2008 printf("%s: maxp=0\n", __func__); 2009 return EINVAL; 2010 } 2011 size_t ntd = howmany(len, maxp); 2012 /* 2013 * if our transfer is bigger than PAGE_SIZE and maxp not a factor of 2014 * PAGE_SIZE then we will need another TD per page. 2015 */ 2016 if (len > PAGE_SIZE && (PAGE_SIZE % maxp) != 0) { 2017 ntd += howmany(len, PAGE_SIZE); 2018 } 2019 2020 /* 2021 * Might need one more TD if we're writing a ZLP 2022 */ 2023 if (!rd && (flags & USBD_FORCE_SHORT_XFER)) { 2024 ntd++; 2025 } 2026 DPRINTFN(10, "maxp=%jd ntd=%jd", maxp, ntd, 0, 0); 2027 2028 uxfer->ux_stds = NULL; 2029 uxfer->ux_nstd = ntd; 2030 if (ntd == 0) { 2031 *sp = NULL; 2032 DPRINTF("ntd=0", 0, 0, 0, 0); 2033 return 0; 2034 } 2035 uxfer->ux_stds = kmem_alloc(sizeof(uhci_soft_td_t *) * ntd, 2036 KM_SLEEP); 2037 2038 for (int i = 0; i < ntd; i++) { 2039 p = uhci_alloc_std(sc); 2040 if (p == NULL) { 2041 if (i != 0) { 2042 uxfer->ux_nstd = i; 2043 uhci_free_stds(sc, uxfer); 2044 } 2045 kmem_free(uxfer->ux_stds, 2046 sizeof(uhci_soft_td_t *) * ntd); 2047 return ENOMEM; 2048 } 2049 uxfer->ux_stds[i] = p; 2050 } 2051 2052 *sp = uxfer->ux_stds[0]; 2053 2054 return 0; 2055 } 2056 2057 Static void 2058 uhci_free_stds(uhci_softc_t *sc, struct uhci_xfer *ux) 2059 { 2060 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2061 2062 DPRINTFN(8, "ux=%#jx", (uintptr_t)ux, 0, 0, 0); 2063 2064 mutex_enter(&sc->sc_lock); 2065 for (size_t i = 0; i < ux->ux_nstd; i++) { 2066 uhci_soft_td_t *std = ux->ux_stds[i]; 2067 #ifdef DIAGNOSTIC 2068 if (le32toh(std->td.td_token) == TD_IS_FREE) { 2069 printf("%s: freeing free TD %p\n", __func__, std); 2070 return; 2071 } 2072 std->td.td_token = htole32(TD_IS_FREE); 2073 #endif 2074 ux->ux_stds[i]->link.std = sc->sc_freetds; 2075 sc->sc_freetds = std; 2076 } 2077 mutex_exit(&sc->sc_lock); 2078 } 2079 2080 2081 Static void 2082 uhci_reset_std_chain(uhci_softc_t *sc, struct usbd_xfer *xfer, 2083 int length, int isread, int *toggle, uhci_soft_td_t **lstd) 2084 { 2085 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 2086 struct usbd_pipe *pipe = xfer->ux_pipe; 2087 usb_dma_t *dma = &xfer->ux_dmabuf; 2088 uint16_t flags = xfer->ux_flags; 2089 uhci_soft_td_t *std, *prev; 2090 int len = length; 2091 int tog = *toggle; 2092 int maxp; 2093 uint32_t status; 2094 size_t i, offs; 2095 2096 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2097 DPRINTFN(8, "xfer=%#jx len %jd isread %jd toggle %jd", (uintptr_t)xfer, 2098 len, isread, *toggle); 2099 2100 KASSERT(len != 0 || (!isread && (flags & USBD_FORCE_SHORT_XFER))); 2101 2102 maxp = UGETW(pipe->up_endpoint->ue_edesc->wMaxPacketSize); 2103 KASSERT(maxp != 0); 2104 2105 int addr = xfer->ux_pipe->up_dev->ud_addr; 2106 int endpt = xfer->ux_pipe->up_endpoint->ue_edesc->bEndpointAddress; 2107 2108 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(3) | UHCI_TD_ACTIVE); 2109 if (pipe->up_dev->ud_speed == USB_SPEED_LOW) 2110 status |= UHCI_TD_LS; 2111 if (flags & USBD_SHORT_XFER_OK) 2112 status |= UHCI_TD_SPD; 2113 usb_syncmem(dma, 0, len, 2114 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2115 std = prev = NULL; 2116 for (offs = i = 0; len != 0 && i < uxfer->ux_nstd; i++, prev = std) { 2117 int l = len; 2118 std = uxfer->ux_stds[i]; 2119 2120 const bus_addr_t sbp = DMAADDR(dma, offs); 2121 const bus_addr_t ebp = DMAADDR(dma, offs + l - 1); 2122 if (((sbp ^ ebp) & ~PAGE_MASK) != 0) 2123 l = PAGE_SIZE - (DMAADDR(dma, offs) & PAGE_MASK); 2124 2125 if (l > maxp) 2126 l = maxp; 2127 2128 if (prev) { 2129 prev->link.std = std; 2130 prev->td.td_link = htole32( 2131 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD 2132 ); 2133 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td), 2134 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2135 } 2136 2137 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 2138 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2139 2140 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD); 2141 std->td.td_status = htole32(status); 2142 std->td.td_token = htole32( 2143 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) | 2144 UHCI_TD_SET_DEVADDR(addr) | 2145 UHCI_TD_SET_PID(isread ? UHCI_TD_PID_IN : UHCI_TD_PID_OUT) | 2146 UHCI_TD_SET_DT(tog) | 2147 UHCI_TD_SET_MAXLEN(l) 2148 ); 2149 std->td.td_buffer = htole32(DMAADDR(dma, offs)); 2150 2151 std->link.std = NULL; 2152 2153 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 2154 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2155 tog ^= 1; 2156 2157 offs += l; 2158 len -= l; 2159 } 2160 KASSERTMSG(len == 0, "xfer %p alen %d len %d mps %d ux_nqtd %zu i %zu", 2161 xfer, length, len, maxp, uxfer->ux_nstd, i); 2162 2163 if (!isread && 2164 (flags & USBD_FORCE_SHORT_XFER) && 2165 length % maxp == 0) { 2166 /* Force a 0 length transfer at the end. */ 2167 KASSERTMSG(i < uxfer->ux_nstd, "i=%zu nstd=%zu", i, 2168 uxfer->ux_nstd); 2169 std = uxfer->ux_stds[i++]; 2170 2171 std->td.td_link = htole32(UHCI_PTR_T | UHCI_PTR_VF | UHCI_PTR_TD); 2172 std->td.td_status = htole32(status); 2173 std->td.td_token = htole32( 2174 UHCI_TD_SET_ENDPT(UE_GET_ADDR(endpt)) | 2175 UHCI_TD_SET_DEVADDR(addr) | 2176 UHCI_TD_SET_PID(UHCI_TD_PID_OUT) | 2177 UHCI_TD_SET_DT(tog) | 2178 UHCI_TD_SET_MAXLEN(0) 2179 ); 2180 std->td.td_buffer = 0; 2181 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 2182 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2183 2184 std->link.std = NULL; 2185 if (prev) { 2186 prev->link.std = std; 2187 prev->td.td_link = htole32( 2188 std->physaddr | UHCI_PTR_VF | UHCI_PTR_TD 2189 ); 2190 usb_syncmem(&prev->dma, prev->offs, sizeof(prev->td), 2191 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2192 } 2193 tog ^= 1; 2194 } 2195 *lstd = std; 2196 *toggle = tog; 2197 } 2198 2199 void 2200 uhci_device_clear_toggle(struct usbd_pipe *pipe) 2201 { 2202 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 2203 upipe->nexttoggle = 0; 2204 } 2205 2206 void 2207 uhci_noop(struct usbd_pipe *pipe) 2208 { 2209 } 2210 2211 int 2212 uhci_device_bulk_init(struct usbd_xfer *xfer) 2213 { 2214 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2215 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 2216 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc; 2217 int endpt = ed->bEndpointAddress; 2218 int isread = UE_GET_DIR(endpt) == UE_DIR_IN; 2219 int len = xfer->ux_bufsize; 2220 int err = 0; 2221 2222 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2223 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, len, 2224 xfer->ux_flags, 0); 2225 2226 if (sc->sc_dying) 2227 return USBD_IOERROR; 2228 2229 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST)); 2230 2231 uxfer->ux_type = UX_BULK; 2232 err = uhci_alloc_std_chain(sc, xfer, len, isread, &uxfer->ux_stdstart); 2233 if (err) 2234 return err; 2235 2236 #ifdef UHCI_DEBUG 2237 if (uhcidebug >= 10) { 2238 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2239 uhci_dump_tds(uxfer->ux_stdstart); 2240 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2241 } 2242 #endif 2243 2244 return 0; 2245 } 2246 2247 Static void 2248 uhci_device_bulk_fini(struct usbd_xfer *xfer) 2249 { 2250 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2251 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2252 2253 KASSERT(ux->ux_type == UX_BULK); 2254 2255 if (ux->ux_nstd) { 2256 uhci_free_stds(sc, ux); 2257 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd); 2258 } 2259 } 2260 2261 usbd_status 2262 uhci_device_bulk_transfer(struct usbd_xfer *xfer) 2263 { 2264 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2265 usbd_status err; 2266 2267 /* Insert last in queue. */ 2268 mutex_enter(&sc->sc_lock); 2269 err = usb_insert_transfer(xfer); 2270 mutex_exit(&sc->sc_lock); 2271 if (err) 2272 return err; 2273 2274 /* 2275 * Pipe isn't running (otherwise err would be USBD_INPROG), 2276 * so start it first. 2277 */ 2278 return uhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 2279 } 2280 2281 usbd_status 2282 uhci_device_bulk_start(struct usbd_xfer *xfer) 2283 { 2284 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2285 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2286 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2287 uhci_soft_td_t *data, *dataend; 2288 uhci_soft_qh_t *sqh; 2289 const bool polling = sc->sc_bus.ub_usepolling; 2290 int len; 2291 int endpt; 2292 int isread; 2293 2294 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2295 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, 2296 xfer->ux_length, xfer->ux_flags, 0); 2297 2298 if (sc->sc_dying) 2299 return USBD_IOERROR; 2300 2301 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST)); 2302 KASSERT(xfer->ux_length <= xfer->ux_bufsize); 2303 2304 len = xfer->ux_length; 2305 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress; 2306 isread = UE_GET_DIR(endpt) == UE_DIR_IN; 2307 sqh = upipe->bulk.sqh; 2308 2309 /* Take lock here to protect nexttoggle */ 2310 if (!polling) 2311 mutex_enter(&sc->sc_lock); 2312 2313 uhci_reset_std_chain(sc, xfer, len, isread, &upipe->nexttoggle, 2314 &dataend); 2315 2316 data = ux->ux_stdstart; 2317 ux->ux_stdend = dataend; 2318 dataend->td.td_status |= htole32(UHCI_TD_IOC); 2319 usb_syncmem(&dataend->dma, 2320 dataend->offs + offsetof(uhci_td_t, td_status), 2321 sizeof(dataend->td.td_status), 2322 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2323 2324 #ifdef UHCI_DEBUG 2325 if (uhcidebug >= 10) { 2326 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2327 DPRINTFN(10, "before transfer", 0, 0, 0, 0); 2328 uhci_dump_tds(data); 2329 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2330 } 2331 #endif 2332 2333 KASSERT(ux->ux_isdone); 2334 #ifdef DIAGNOSTIC 2335 ux->ux_isdone = false; 2336 #endif 2337 2338 sqh->elink = data; 2339 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD); 2340 /* uhci_add_bulk() will do usb_syncmem(sqh) */ 2341 2342 uhci_add_bulk(sc, sqh); 2343 uhci_add_intr_list(sc, ux); 2344 usbd_xfer_schedule_timeout(xfer); 2345 xfer->ux_status = USBD_IN_PROGRESS; 2346 if (!polling) 2347 mutex_exit(&sc->sc_lock); 2348 2349 return USBD_IN_PROGRESS; 2350 } 2351 2352 /* Abort a device bulk request. */ 2353 void 2354 uhci_device_bulk_abort(struct usbd_xfer *xfer) 2355 { 2356 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer); 2357 2358 KASSERT(mutex_owned(&sc->sc_lock)); 2359 2360 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2361 2362 usbd_xfer_abort(xfer); 2363 } 2364 2365 /* 2366 * To allow the hardware time to notice we simply wait. 2367 */ 2368 Static void 2369 uhci_abortx(struct usbd_xfer *xfer) 2370 { 2371 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2372 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2373 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2374 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2375 uhci_soft_td_t *std; 2376 2377 DPRINTFN(1,"xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); 2378 2379 KASSERT(mutex_owned(&sc->sc_lock)); 2380 ASSERT_SLEEPABLE(); 2381 2382 KASSERTMSG((xfer->ux_status == USBD_CANCELLED || 2383 xfer->ux_status == USBD_TIMEOUT), 2384 "bad abort status: %d", xfer->ux_status); 2385 2386 /* 2387 * If we're dying, skip the hardware action and just notify the 2388 * software that we're done. 2389 */ 2390 if (sc->sc_dying) { 2391 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer, 2392 xfer->ux_status, 0, 0); 2393 goto dying; 2394 } 2395 2396 /* 2397 * HC Step 1: Make interrupt routine and hardware ignore xfer. 2398 */ 2399 uhci_del_intr_list(sc, ux); 2400 2401 DPRINTF("stop ux=%#jx", (uintptr_t)ux, 0, 0, 0); 2402 for (std = ux->ux_stdstart; std != NULL; std = std->link.std) { 2403 usb_syncmem(&std->dma, 2404 std->offs + offsetof(uhci_td_t, td_status), 2405 sizeof(std->td.td_status), 2406 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2407 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC)); 2408 usb_syncmem(&std->dma, 2409 std->offs + offsetof(uhci_td_t, td_status), 2410 sizeof(std->td.td_status), 2411 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2412 } 2413 2414 /* 2415 * HC Step 2: Wait until we know hardware has finished any possible 2416 * use of the xfer. 2417 */ 2418 /* Hardware finishes in 1ms */ 2419 usb_delay_ms_locked(upipe->pipe.up_dev->ud_bus, 2, &sc->sc_lock); 2420 2421 /* 2422 * HC Step 3: Notify completion to waiting xfers. 2423 */ 2424 dying: 2425 #ifdef DIAGNOSTIC 2426 ux->ux_isdone = true; 2427 #endif 2428 usb_transfer_complete(xfer); 2429 DPRINTFN(14, "end", 0, 0, 0, 0); 2430 2431 KASSERT(mutex_owned(&sc->sc_lock)); 2432 } 2433 2434 /* Close a device bulk pipe. */ 2435 void 2436 uhci_device_bulk_close(struct usbd_pipe *pipe) 2437 { 2438 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 2439 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 2440 2441 KASSERT(mutex_owned(&sc->sc_lock)); 2442 2443 uhci_free_sqh(sc, upipe->bulk.sqh); 2444 2445 pipe->up_endpoint->ue_toggle = upipe->nexttoggle; 2446 } 2447 2448 int 2449 uhci_device_ctrl_init(struct usbd_xfer *xfer) 2450 { 2451 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 2452 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2453 usb_device_request_t *req = &xfer->ux_request; 2454 struct usbd_device *dev = upipe->pipe.up_dev; 2455 uhci_softc_t *sc = dev->ud_bus->ub_hcpriv; 2456 uhci_soft_td_t *data = NULL; 2457 int len; 2458 usbd_status err; 2459 int isread; 2460 2461 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2462 DPRINTFN(3, "xfer=%#jx len=%jd, addr=%jd, endpt=%jd", 2463 (uintptr_t)xfer, xfer->ux_bufsize, dev->ud_addr, 2464 upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress); 2465 2466 isread = req->bmRequestType & UT_READ; 2467 len = xfer->ux_bufsize; 2468 2469 uxfer->ux_type = UX_CTRL; 2470 /* Set up data transaction */ 2471 if (len != 0) { 2472 err = uhci_alloc_std_chain(sc, xfer, len, isread, &data); 2473 if (err) 2474 return err; 2475 } 2476 /* Set up interrupt info. */ 2477 uxfer->ux_setup = upipe->ctrl.setup; 2478 uxfer->ux_stat = upipe->ctrl.stat; 2479 uxfer->ux_data = data; 2480 2481 return 0; 2482 } 2483 2484 Static void 2485 uhci_device_ctrl_fini(struct usbd_xfer *xfer) 2486 { 2487 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2488 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2489 2490 KASSERT(ux->ux_type == UX_CTRL); 2491 2492 if (ux->ux_nstd) { 2493 uhci_free_stds(sc, ux); 2494 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd); 2495 } 2496 } 2497 2498 usbd_status 2499 uhci_device_ctrl_transfer(struct usbd_xfer *xfer) 2500 { 2501 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2502 usbd_status err; 2503 2504 /* Insert last in queue. */ 2505 mutex_enter(&sc->sc_lock); 2506 err = usb_insert_transfer(xfer); 2507 mutex_exit(&sc->sc_lock); 2508 if (err) 2509 return err; 2510 2511 /* 2512 * Pipe isn't running (otherwise err would be USBD_INPROG), 2513 * so start it first. 2514 */ 2515 return uhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 2516 } 2517 2518 usbd_status 2519 uhci_device_ctrl_start(struct usbd_xfer *xfer) 2520 { 2521 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2522 struct uhci_xfer *uxfer = UHCI_XFER2UXFER(xfer); 2523 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2524 usb_device_request_t *req = &xfer->ux_request; 2525 struct usbd_device *dev = upipe->pipe.up_dev; 2526 int addr = dev->ud_addr; 2527 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress; 2528 uhci_soft_td_t *setup, *stat, *next, *dataend; 2529 uhci_soft_qh_t *sqh; 2530 const bool polling = sc->sc_bus.ub_usepolling; 2531 int len; 2532 int isread; 2533 2534 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2535 2536 if (sc->sc_dying) 2537 return USBD_IOERROR; 2538 2539 KASSERT(xfer->ux_rqflags & URQ_REQUEST); 2540 2541 DPRINTFN(3, "type=0x%02jx, request=0x%02jx, " 2542 "wValue=0x%04jx, wIndex=0x%04jx", 2543 req->bmRequestType, req->bRequest, UGETW(req->wValue), 2544 UGETW(req->wIndex)); 2545 DPRINTFN(3, "len=%jd, addr=%jd, endpt=%jd", 2546 UGETW(req->wLength), dev->ud_addr, endpt, 0); 2547 2548 isread = req->bmRequestType & UT_READ; 2549 len = UGETW(req->wLength); 2550 2551 setup = upipe->ctrl.setup; 2552 stat = upipe->ctrl.stat; 2553 sqh = upipe->ctrl.sqh; 2554 2555 memcpy(KERNADDR(&upipe->ctrl.reqdma, 0), req, sizeof(*req)); 2556 usb_syncmem(&upipe->ctrl.reqdma, 0, sizeof(*req), BUS_DMASYNC_PREWRITE); 2557 2558 if (!polling) 2559 mutex_enter(&sc->sc_lock); 2560 2561 /* Set up data transaction */ 2562 if (len != 0) { 2563 upipe->nexttoggle = 1; 2564 next = uxfer->ux_data; 2565 uhci_reset_std_chain(sc, xfer, len, isread, 2566 &upipe->nexttoggle, &dataend); 2567 dataend->link.std = stat; 2568 dataend->td.td_link = htole32(stat->physaddr | UHCI_PTR_TD); 2569 usb_syncmem(&dataend->dma, 2570 dataend->offs + offsetof(uhci_td_t, td_link), 2571 sizeof(dataend->td.td_link), 2572 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2573 } else { 2574 next = stat; 2575 } 2576 2577 const uint32_t status = UHCI_TD_ZERO_ACTLEN( 2578 UHCI_TD_SET_ERRCNT(3) | 2579 UHCI_TD_ACTIVE | 2580 (dev->ud_speed == USB_SPEED_LOW ? UHCI_TD_LS : 0) 2581 ); 2582 setup->link.std = next; 2583 setup->td.td_link = htole32(next->physaddr | UHCI_PTR_TD); 2584 setup->td.td_status = htole32(status); 2585 setup->td.td_token = htole32(UHCI_TD_SETUP(sizeof(*req), endpt, addr)); 2586 setup->td.td_buffer = htole32(DMAADDR(&upipe->ctrl.reqdma, 0)); 2587 2588 usb_syncmem(&setup->dma, setup->offs, sizeof(setup->td), 2589 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2590 2591 stat->link.std = NULL; 2592 stat->td.td_link = htole32(UHCI_PTR_T); 2593 stat->td.td_status = htole32(status | UHCI_TD_IOC); 2594 stat->td.td_token = 2595 htole32(isread ? UHCI_TD_OUT(0, endpt, addr, 1) : 2596 UHCI_TD_IN (0, endpt, addr, 1)); 2597 stat->td.td_buffer = htole32(0); 2598 usb_syncmem(&stat->dma, stat->offs, sizeof(stat->td), 2599 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2600 2601 #ifdef UHCI_DEBUG 2602 if (uhcidebug >= 10) { 2603 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2604 DPRINTF("before transfer", 0, 0, 0, 0); 2605 uhci_dump_tds(setup); 2606 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2607 } 2608 #endif 2609 2610 /* Set up interrupt info. */ 2611 uxfer->ux_setup = setup; 2612 uxfer->ux_stat = stat; 2613 KASSERT(uxfer->ux_isdone); 2614 #ifdef DIAGNOSTIC 2615 uxfer->ux_isdone = false; 2616 #endif 2617 2618 sqh->elink = setup; 2619 sqh->qh.qh_elink = htole32(setup->physaddr | UHCI_PTR_TD); 2620 /* uhci_add_?s_ctrl() will do usb_syncmem(sqh) */ 2621 2622 if (dev->ud_speed == USB_SPEED_LOW) 2623 uhci_add_ls_ctrl(sc, sqh); 2624 else 2625 uhci_add_hs_ctrl(sc, sqh); 2626 uhci_add_intr_list(sc, uxfer); 2627 #ifdef UHCI_DEBUG 2628 if (uhcidebug >= 12) { 2629 uhci_soft_td_t *std; 2630 uhci_soft_qh_t *xqh; 2631 uhci_soft_qh_t *sxqh; 2632 int maxqh = 0; 2633 uhci_physaddr_t link; 2634 DPRINTFN(12, "--- dump start ---", 0, 0, 0, 0); 2635 DPRINTFN(12, "follow from [0]", 0, 0, 0, 0); 2636 for (std = sc->sc_vframes[0].htd, link = 0; 2637 (link & UHCI_PTR_QH) == 0; 2638 std = std->link.std) { 2639 link = le32toh(std->td.td_link); 2640 uhci_dump_td(std); 2641 } 2642 sxqh = (uhci_soft_qh_t *)std; 2643 uhci_dump_qh(sxqh); 2644 for (xqh = sxqh; 2645 xqh != NULL; 2646 xqh = (maxqh++ == 5 || xqh->hlink == sxqh || 2647 xqh->hlink == xqh ? NULL : xqh->hlink)) { 2648 uhci_dump_qh(xqh); 2649 } 2650 DPRINTFN(12, "Enqueued QH:", 0, 0, 0, 0); 2651 uhci_dump_qh(sqh); 2652 uhci_dump_tds(sqh->elink); 2653 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2654 } 2655 #endif 2656 usbd_xfer_schedule_timeout(xfer); 2657 xfer->ux_status = USBD_IN_PROGRESS; 2658 if (!polling) 2659 mutex_exit(&sc->sc_lock); 2660 2661 return USBD_IN_PROGRESS; 2662 } 2663 2664 int 2665 uhci_device_intr_init(struct usbd_xfer *xfer) 2666 { 2667 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2668 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2669 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc; 2670 int endpt = ed->bEndpointAddress; 2671 int isread = UE_GET_DIR(endpt) == UE_DIR_IN; 2672 int len = xfer->ux_bufsize; 2673 int err; 2674 2675 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2676 2677 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, 2678 xfer->ux_length, xfer->ux_flags, 0); 2679 2680 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST)); 2681 KASSERT(len != 0); 2682 2683 ux->ux_type = UX_INTR; 2684 ux->ux_nstd = 0; 2685 err = uhci_alloc_std_chain(sc, xfer, len, isread, &ux->ux_stdstart); 2686 2687 return err; 2688 } 2689 2690 Static void 2691 uhci_device_intr_fini(struct usbd_xfer *xfer) 2692 { 2693 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2694 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2695 2696 KASSERT(ux->ux_type == UX_INTR); 2697 2698 if (ux->ux_nstd) { 2699 uhci_free_stds(sc, ux); 2700 kmem_free(ux->ux_stds, sizeof(uhci_soft_td_t *) * ux->ux_nstd); 2701 } 2702 } 2703 2704 usbd_status 2705 uhci_device_intr_transfer(struct usbd_xfer *xfer) 2706 { 2707 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2708 usbd_status err; 2709 2710 /* Insert last in queue. */ 2711 mutex_enter(&sc->sc_lock); 2712 err = usb_insert_transfer(xfer); 2713 mutex_exit(&sc->sc_lock); 2714 if (err) 2715 return err; 2716 2717 /* 2718 * Pipe isn't running (otherwise err would be USBD_INPROG), 2719 * so start it first. 2720 */ 2721 return uhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 2722 } 2723 2724 usbd_status 2725 uhci_device_intr_start(struct usbd_xfer *xfer) 2726 { 2727 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2728 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2729 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2730 uhci_soft_td_t *data, *dataend; 2731 uhci_soft_qh_t *sqh; 2732 const bool polling = sc->sc_bus.ub_usepolling; 2733 int isread, endpt; 2734 int i; 2735 2736 if (sc->sc_dying) 2737 return USBD_IOERROR; 2738 2739 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2740 2741 DPRINTFN(3, "xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, 2742 xfer->ux_length, xfer->ux_flags, 0); 2743 2744 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST)); 2745 KASSERT(xfer->ux_length <= xfer->ux_bufsize); 2746 2747 endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress; 2748 isread = UE_GET_DIR(endpt) == UE_DIR_IN; 2749 2750 data = ux->ux_stdstart; 2751 2752 KASSERT(ux->ux_isdone); 2753 #ifdef DIAGNOSTIC 2754 ux->ux_isdone = false; 2755 #endif 2756 2757 /* Take lock to protect nexttoggle */ 2758 if (!polling) 2759 mutex_enter(&sc->sc_lock); 2760 uhci_reset_std_chain(sc, xfer, xfer->ux_length, isread, 2761 &upipe->nexttoggle, &dataend); 2762 2763 dataend->td.td_status |= htole32(UHCI_TD_IOC); 2764 usb_syncmem(&dataend->dma, 2765 dataend->offs + offsetof(uhci_td_t, td_status), 2766 sizeof(dataend->td.td_status), 2767 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2768 ux->ux_stdend = dataend; 2769 2770 #ifdef UHCI_DEBUG 2771 if (uhcidebug >= 10) { 2772 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2773 uhci_dump_tds(data); 2774 uhci_dump_qh(upipe->intr.qhs[0]); 2775 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2776 } 2777 #endif 2778 2779 DPRINTFN(10, "qhs[0]=%#jx", (uintptr_t)upipe->intr.qhs[0], 0, 0, 0); 2780 for (i = 0; i < upipe->intr.npoll; i++) { 2781 sqh = upipe->intr.qhs[i]; 2782 sqh->elink = data; 2783 sqh->qh.qh_elink = htole32(data->physaddr | UHCI_PTR_TD); 2784 usb_syncmem(&sqh->dma, 2785 sqh->offs + offsetof(uhci_qh_t, qh_elink), 2786 sizeof(sqh->qh.qh_elink), 2787 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2788 } 2789 uhci_add_intr_list(sc, ux); 2790 xfer->ux_status = USBD_IN_PROGRESS; 2791 if (!polling) 2792 mutex_exit(&sc->sc_lock); 2793 2794 #ifdef UHCI_DEBUG 2795 if (uhcidebug >= 10) { 2796 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2797 uhci_dump_tds(data); 2798 uhci_dump_qh(upipe->intr.qhs[0]); 2799 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2800 } 2801 #endif 2802 2803 return USBD_IN_PROGRESS; 2804 } 2805 2806 /* Abort a device control request. */ 2807 void 2808 uhci_device_ctrl_abort(struct usbd_xfer *xfer) 2809 { 2810 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer); 2811 2812 KASSERT(mutex_owned(&sc->sc_lock)); 2813 2814 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2815 usbd_xfer_abort(xfer); 2816 } 2817 2818 /* Close a device control pipe. */ 2819 void 2820 uhci_device_ctrl_close(struct usbd_pipe *pipe) 2821 { 2822 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 2823 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 2824 2825 uhci_free_sqh(sc, upipe->ctrl.sqh); 2826 uhci_free_std_locked(sc, upipe->ctrl.setup); 2827 uhci_free_std_locked(sc, upipe->ctrl.stat); 2828 2829 usb_freemem(&sc->sc_bus, &upipe->ctrl.reqdma); 2830 } 2831 2832 /* Abort a device interrupt request. */ 2833 void 2834 uhci_device_intr_abort(struct usbd_xfer *xfer) 2835 { 2836 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer); 2837 2838 KASSERT(mutex_owned(&sc->sc_lock)); 2839 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 2840 2841 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2842 DPRINTF("xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); 2843 2844 usbd_xfer_abort(xfer); 2845 } 2846 2847 /* Close a device interrupt pipe. */ 2848 void 2849 uhci_device_intr_close(struct usbd_pipe *pipe) 2850 { 2851 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 2852 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 2853 int i, npoll; 2854 2855 KASSERT(mutex_owned(&sc->sc_lock)); 2856 2857 /* Unlink descriptors from controller data structures. */ 2858 npoll = upipe->intr.npoll; 2859 for (i = 0; i < npoll; i++) 2860 uhci_remove_intr(sc, upipe->intr.qhs[i]); 2861 2862 /* 2863 * We now have to wait for any activity on the physical 2864 * descriptors to stop. 2865 */ 2866 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock); 2867 2868 for (i = 0; i < npoll; i++) 2869 uhci_free_sqh(sc, upipe->intr.qhs[i]); 2870 kmem_free(upipe->intr.qhs, npoll * sizeof(uhci_soft_qh_t *)); 2871 } 2872 2873 int 2874 uhci_device_isoc_init(struct usbd_xfer *xfer) 2875 { 2876 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2877 2878 KASSERT(!(xfer->ux_rqflags & URQ_REQUEST)); 2879 KASSERT(xfer->ux_nframes != 0); 2880 KASSERT(ux->ux_isdone); 2881 2882 ux->ux_type = UX_ISOC; 2883 return 0; 2884 } 2885 2886 Static void 2887 uhci_device_isoc_fini(struct usbd_xfer *xfer) 2888 { 2889 struct uhci_xfer *ux __diagused = UHCI_XFER2UXFER(xfer); 2890 2891 KASSERT(ux->ux_type == UX_ISOC); 2892 } 2893 2894 usbd_status 2895 uhci_device_isoc_transfer(struct usbd_xfer *xfer) 2896 { 2897 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 2898 usbd_status err __diagused; 2899 2900 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 2901 DPRINTFN(5, "xfer=%#jx", (uintptr_t)xfer, 0, 0, 0); 2902 2903 /* Put it on our queue, */ 2904 mutex_enter(&sc->sc_lock); 2905 err = usb_insert_transfer(xfer); 2906 mutex_exit(&sc->sc_lock); 2907 2908 KASSERT(err == USBD_NORMAL_COMPLETION); 2909 2910 /* insert into schedule, */ 2911 2912 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 2913 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 2914 struct isoc *isoc = &upipe->isoc; 2915 uhci_soft_td_t *std = NULL; 2916 uint32_t buf, len, status, offs; 2917 int i, next, nframes; 2918 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN; 2919 2920 DPRINTFN(5, "used=%jd next=%jd xfer=%#jx nframes=%jd", 2921 isoc->inuse, isoc->next, (uintptr_t)xfer, xfer->ux_nframes); 2922 2923 if (sc->sc_dying) 2924 return USBD_IOERROR; 2925 2926 if (xfer->ux_status == USBD_IN_PROGRESS) { 2927 /* This request has already been entered into the frame list */ 2928 printf("%s: xfer=%p in frame list\n", __func__, xfer); 2929 /* XXX */ 2930 } 2931 2932 #ifdef DIAGNOSTIC 2933 if (isoc->inuse >= UHCI_VFRAMELIST_COUNT) 2934 printf("%s: overflow!\n", __func__); 2935 #endif 2936 2937 KASSERT(xfer->ux_nframes != 0); 2938 2939 if (xfer->ux_length) 2940 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 2941 rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2942 2943 mutex_enter(&sc->sc_lock); 2944 next = isoc->next; 2945 if (next == -1) { 2946 /* Not in use yet, schedule it a few frames ahead. */ 2947 next = (UREAD2(sc, UHCI_FRNUM) + 3) % UHCI_VFRAMELIST_COUNT; 2948 DPRINTFN(2, "start next=%jd", next, 0, 0, 0); 2949 } 2950 2951 xfer->ux_status = USBD_IN_PROGRESS; 2952 ux->ux_curframe = next; 2953 2954 offs = 0; 2955 status = UHCI_TD_ZERO_ACTLEN(UHCI_TD_SET_ERRCNT(0) | 2956 UHCI_TD_ACTIVE | 2957 UHCI_TD_IOS); 2958 nframes = xfer->ux_nframes; 2959 for (i = 0; i < nframes; i++) { 2960 buf = DMAADDR(&xfer->ux_dmabuf, offs); 2961 std = isoc->stds[next]; 2962 if (++next >= UHCI_VFRAMELIST_COUNT) 2963 next = 0; 2964 len = xfer->ux_frlengths[i]; 2965 2966 KASSERTMSG(len <= __SHIFTOUT_MASK(UHCI_TD_MAXLEN_MASK), 2967 "len %d", len); 2968 std->td.td_buffer = htole32(buf); 2969 usb_syncmem(&xfer->ux_dmabuf, offs, len, 2970 rd ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2971 if (i == nframes - 1) 2972 status |= UHCI_TD_IOC; 2973 std->td.td_status = htole32(status); 2974 std->td.td_token &= htole32(~UHCI_TD_MAXLEN_MASK); 2975 std->td.td_token |= htole32(UHCI_TD_SET_MAXLEN(len)); 2976 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 2977 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2978 #ifdef UHCI_DEBUG 2979 if (uhcidebug >= 5) { 2980 DPRINTF("--- dump start ---", 0, 0, 0, 0); 2981 DPRINTF("TD %jd", i, 0, 0, 0); 2982 uhci_dump_td(std); 2983 DPRINTF("--- dump end ---", 0, 0, 0, 0); 2984 } 2985 #endif 2986 offs += len; 2987 const bus_addr_t bend __diagused = 2988 DMAADDR(&xfer->ux_dmabuf, offs - 1); 2989 2990 KASSERT(((buf ^ bend) & ~PAGE_MASK) == 0); 2991 } 2992 isoc->next = next; 2993 isoc->inuse += xfer->ux_nframes; 2994 2995 /* Set up interrupt info. */ 2996 ux->ux_stdstart = std; 2997 ux->ux_stdend = std; 2998 2999 KASSERT(ux->ux_isdone); 3000 #ifdef DIAGNOSTIC 3001 ux->ux_isdone = false; 3002 #endif 3003 uhci_add_intr_list(sc, ux); 3004 3005 mutex_exit(&sc->sc_lock); 3006 3007 return USBD_IN_PROGRESS; 3008 } 3009 3010 void 3011 uhci_device_isoc_abort(struct usbd_xfer *xfer) 3012 { 3013 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 3014 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 3015 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 3016 uhci_soft_td_t **stds = upipe->isoc.stds; 3017 uhci_soft_td_t *std; 3018 int i, n, nframes, maxlen, len; 3019 3020 KASSERT(mutex_owned(&sc->sc_lock)); 3021 3022 /* Transfer is already done. */ 3023 if (xfer->ux_status != USBD_NOT_STARTED && 3024 xfer->ux_status != USBD_IN_PROGRESS) { 3025 return; 3026 } 3027 3028 /* Give xfer the requested abort code. */ 3029 xfer->ux_status = USBD_CANCELLED; 3030 3031 /* make hardware ignore it, */ 3032 nframes = xfer->ux_nframes; 3033 n = ux->ux_curframe; 3034 maxlen = 0; 3035 for (i = 0; i < nframes; i++) { 3036 std = stds[n]; 3037 usb_syncmem(&std->dma, 3038 std->offs + offsetof(uhci_td_t, td_status), 3039 sizeof(std->td.td_status), 3040 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 3041 std->td.td_status &= htole32(~(UHCI_TD_ACTIVE | UHCI_TD_IOC)); 3042 usb_syncmem(&std->dma, 3043 std->offs + offsetof(uhci_td_t, td_status), 3044 sizeof(std->td.td_status), 3045 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3046 usb_syncmem(&std->dma, 3047 std->offs + offsetof(uhci_td_t, td_token), 3048 sizeof(std->td.td_token), 3049 BUS_DMASYNC_POSTWRITE); 3050 len = UHCI_TD_GET_MAXLEN(le32toh(std->td.td_token)); 3051 if (len > maxlen) 3052 maxlen = len; 3053 if (++n >= UHCI_VFRAMELIST_COUNT) 3054 n = 0; 3055 } 3056 3057 /* and wait until we are sure the hardware has finished. */ 3058 delay(maxlen); 3059 3060 #ifdef DIAGNOSTIC 3061 ux->ux_isdone = true; 3062 #endif 3063 /* Remove from interrupt list. */ 3064 uhci_del_intr_list(sc, ux); 3065 3066 /* Run callback. */ 3067 usb_transfer_complete(xfer); 3068 3069 KASSERT(mutex_owned(&sc->sc_lock)); 3070 } 3071 3072 void 3073 uhci_device_isoc_close(struct usbd_pipe *pipe) 3074 { 3075 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 3076 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 3077 uhci_soft_td_t *std, *vstd; 3078 struct isoc *isoc; 3079 int i; 3080 3081 KASSERT(mutex_owned(&sc->sc_lock)); 3082 3083 /* 3084 * Make sure all TDs are marked as inactive. 3085 * Wait for completion. 3086 * Unschedule. 3087 * Deallocate. 3088 */ 3089 isoc = &upipe->isoc; 3090 3091 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) { 3092 std = isoc->stds[i]; 3093 usb_syncmem(&std->dma, 3094 std->offs + offsetof(uhci_td_t, td_status), 3095 sizeof(std->td.td_status), 3096 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 3097 std->td.td_status &= htole32(~UHCI_TD_ACTIVE); 3098 usb_syncmem(&std->dma, 3099 std->offs + offsetof(uhci_td_t, td_status), 3100 sizeof(std->td.td_status), 3101 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3102 } 3103 /* wait for completion */ 3104 usb_delay_ms_locked(&sc->sc_bus, 2, &sc->sc_lock); 3105 3106 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) { 3107 std = isoc->stds[i]; 3108 for (vstd = sc->sc_vframes[i].htd; 3109 vstd != NULL && vstd->link.std != std; 3110 vstd = vstd->link.std) 3111 ; 3112 if (vstd == NULL) { 3113 /*panic*/ 3114 printf("%s: %p not found\n", __func__, std); 3115 mutex_exit(&sc->sc_lock); 3116 return; 3117 } 3118 vstd->link = std->link; 3119 usb_syncmem(&std->dma, 3120 std->offs + offsetof(uhci_td_t, td_link), 3121 sizeof(std->td.td_link), 3122 BUS_DMASYNC_POSTWRITE); 3123 vstd->td.td_link = std->td.td_link; 3124 usb_syncmem(&vstd->dma, 3125 vstd->offs + offsetof(uhci_td_t, td_link), 3126 sizeof(vstd->td.td_link), 3127 BUS_DMASYNC_PREWRITE); 3128 uhci_free_std_locked(sc, std); 3129 } 3130 3131 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *)); 3132 } 3133 3134 usbd_status 3135 uhci_setup_isoc(struct usbd_pipe *pipe) 3136 { 3137 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 3138 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 3139 int addr = upipe->pipe.up_dev->ud_addr; 3140 int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress; 3141 int rd = UE_GET_DIR(endpt) == UE_DIR_IN; 3142 uhci_soft_td_t *std, *vstd; 3143 uint32_t token; 3144 struct isoc *isoc; 3145 int i; 3146 3147 isoc = &upipe->isoc; 3148 3149 isoc->stds = kmem_alloc( 3150 UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *), KM_SLEEP); 3151 if (isoc->stds == NULL) 3152 return USBD_NOMEM; 3153 3154 token = rd ? UHCI_TD_IN (0, endpt, addr, 0) : 3155 UHCI_TD_OUT(0, endpt, addr, 0); 3156 3157 /* Allocate the TDs and mark as inactive; */ 3158 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) { 3159 std = uhci_alloc_std(sc); 3160 if (std == 0) 3161 goto bad; 3162 std->td.td_status = htole32(UHCI_TD_IOS); /* iso, inactive */ 3163 std->td.td_token = htole32(token); 3164 usb_syncmem(&std->dma, std->offs, sizeof(std->td), 3165 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3166 isoc->stds[i] = std; 3167 } 3168 3169 mutex_enter(&sc->sc_lock); 3170 3171 /* Insert TDs into schedule. */ 3172 for (i = 0; i < UHCI_VFRAMELIST_COUNT; i++) { 3173 std = isoc->stds[i]; 3174 vstd = sc->sc_vframes[i].htd; 3175 usb_syncmem(&vstd->dma, 3176 vstd->offs + offsetof(uhci_td_t, td_link), 3177 sizeof(vstd->td.td_link), 3178 BUS_DMASYNC_POSTWRITE); 3179 std->link = vstd->link; 3180 std->td.td_link = vstd->td.td_link; 3181 usb_syncmem(&std->dma, 3182 std->offs + offsetof(uhci_td_t, td_link), 3183 sizeof(std->td.td_link), 3184 BUS_DMASYNC_PREWRITE); 3185 vstd->link.std = std; 3186 vstd->td.td_link = htole32(std->physaddr | UHCI_PTR_TD); 3187 usb_syncmem(&vstd->dma, 3188 vstd->offs + offsetof(uhci_td_t, td_link), 3189 sizeof(vstd->td.td_link), 3190 BUS_DMASYNC_PREWRITE); 3191 } 3192 mutex_exit(&sc->sc_lock); 3193 3194 isoc->next = -1; 3195 isoc->inuse = 0; 3196 3197 return USBD_NORMAL_COMPLETION; 3198 3199 bad: 3200 while (--i >= 0) 3201 uhci_free_std(sc, isoc->stds[i]); 3202 kmem_free(isoc->stds, UHCI_VFRAMELIST_COUNT * sizeof(uhci_soft_td_t *)); 3203 return USBD_NOMEM; 3204 } 3205 3206 void 3207 uhci_device_isoc_done(struct usbd_xfer *xfer) 3208 { 3209 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer); 3210 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 3211 struct uhci_xfer *ux = UHCI_XFER2UXFER(xfer); 3212 int i, offs; 3213 int rd = UE_GET_DIR(upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress) == UE_DIR_IN; 3214 3215 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3216 DPRINTFN(4, "length=%jd, ux_state=0x%08jx", 3217 xfer->ux_actlen, xfer->ux_state, 0, 0); 3218 3219 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 3220 3221 #ifdef DIAGNOSTIC 3222 if (ux->ux_stdend == NULL) { 3223 printf("%s: xfer=%p stdend==NULL\n", __func__, xfer); 3224 #ifdef UHCI_DEBUG 3225 DPRINTF("--- dump start ---", 0, 0, 0, 0); 3226 uhci_dump_ii(ux); 3227 DPRINTF("--- dump end ---", 0, 0, 0, 0); 3228 #endif 3229 return; 3230 } 3231 #endif 3232 3233 /* Turn off the interrupt since it is active even if the TD is not. */ 3234 usb_syncmem(&ux->ux_stdend->dma, 3235 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status), 3236 sizeof(ux->ux_stdend->td.td_status), 3237 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 3238 ux->ux_stdend->td.td_status &= htole32(~UHCI_TD_IOC); 3239 usb_syncmem(&ux->ux_stdend->dma, 3240 ux->ux_stdend->offs + offsetof(uhci_td_t, td_status), 3241 sizeof(ux->ux_stdend->td.td_status), 3242 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3243 3244 offs = 0; 3245 for (i = 0; i < xfer->ux_nframes; i++) { 3246 usb_syncmem(&xfer->ux_dmabuf, offs, xfer->ux_frlengths[i], 3247 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3248 offs += xfer->ux_frlengths[i]; 3249 } 3250 } 3251 3252 void 3253 uhci_device_intr_done(struct usbd_xfer *xfer) 3254 { 3255 uhci_softc_t *sc __diagused = UHCI_XFER2SC(xfer); 3256 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 3257 uhci_soft_qh_t *sqh; 3258 int i, npoll; 3259 3260 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3261 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0); 3262 3263 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 3264 3265 npoll = upipe->intr.npoll; 3266 for (i = 0; i < npoll; i++) { 3267 sqh = upipe->intr.qhs[i]; 3268 sqh->elink = NULL; 3269 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 3270 usb_syncmem(&sqh->dma, 3271 sqh->offs + offsetof(uhci_qh_t, qh_elink), 3272 sizeof(sqh->qh.qh_elink), 3273 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3274 } 3275 const int endpt = upipe->pipe.up_endpoint->ue_edesc->bEndpointAddress; 3276 const bool isread = UE_GET_DIR(endpt) == UE_DIR_IN; 3277 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 3278 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3279 } 3280 3281 /* Deallocate request data structures */ 3282 void 3283 uhci_device_ctrl_done(struct usbd_xfer *xfer) 3284 { 3285 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 3286 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 3287 int len = UGETW(xfer->ux_request.wLength); 3288 int isread = (xfer->ux_request.bmRequestType & UT_READ); 3289 3290 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3291 3292 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 3293 KASSERT(xfer->ux_rqflags & URQ_REQUEST); 3294 3295 /* XXXNH move to uhci_idone??? */ 3296 if (upipe->pipe.up_dev->ud_speed == USB_SPEED_LOW) 3297 uhci_remove_ls_ctrl(sc, upipe->ctrl.sqh); 3298 else 3299 uhci_remove_hs_ctrl(sc, upipe->ctrl.sqh); 3300 3301 if (len) { 3302 usb_syncmem(&xfer->ux_dmabuf, 0, len, 3303 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3304 } 3305 usb_syncmem(&upipe->ctrl.reqdma, 0, 3306 sizeof(usb_device_request_t), BUS_DMASYNC_POSTWRITE); 3307 3308 DPRINTF("length=%jd", xfer->ux_actlen, 0, 0, 0); 3309 } 3310 3311 /* Deallocate request data structures */ 3312 void 3313 uhci_device_bulk_done(struct usbd_xfer *xfer) 3314 { 3315 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 3316 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(xfer->ux_pipe); 3317 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc; 3318 int endpt = ed->bEndpointAddress; 3319 int isread = UE_GET_DIR(endpt) == UE_DIR_IN; 3320 3321 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3322 DPRINTFN(5, "xfer=%#jx sc=%#jx upipe=%#jx", (uintptr_t)xfer, 3323 (uintptr_t)sc, (uintptr_t)upipe, 0); 3324 3325 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 3326 3327 uhci_remove_bulk(sc, upipe->bulk.sqh); 3328 3329 if (xfer->ux_length) { 3330 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 3331 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3332 } 3333 3334 DPRINTFN(5, "length=%jd", xfer->ux_actlen, 0, 0, 0); 3335 } 3336 3337 /* Add interrupt QH, called with vflock. */ 3338 void 3339 uhci_add_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 3340 { 3341 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos]; 3342 uhci_soft_qh_t *eqh; 3343 3344 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3345 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0); 3346 3347 eqh = vf->eqh; 3348 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 3349 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_POSTWRITE); 3350 sqh->hlink = eqh->hlink; 3351 sqh->qh.qh_hlink = eqh->qh.qh_hlink; 3352 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink), 3353 sizeof(sqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 3354 eqh->hlink = sqh; 3355 eqh->qh.qh_hlink = htole32(sqh->physaddr | UHCI_PTR_QH); 3356 usb_syncmem(&eqh->dma, eqh->offs + offsetof(uhci_qh_t, qh_hlink), 3357 sizeof(eqh->qh.qh_hlink), BUS_DMASYNC_PREWRITE); 3358 vf->eqh = sqh; 3359 vf->bandwidth++; 3360 } 3361 3362 /* Remove interrupt QH. */ 3363 void 3364 uhci_remove_intr(uhci_softc_t *sc, uhci_soft_qh_t *sqh) 3365 { 3366 struct uhci_vframe *vf = &sc->sc_vframes[sqh->pos]; 3367 uhci_soft_qh_t *pqh; 3368 3369 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3370 DPRINTFN(4, "n=%jd sqh=%#jx", sqh->pos, (uintptr_t)sqh, 0, 0); 3371 3372 /* See comment in uhci_remove_ctrl() */ 3373 3374 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_elink), 3375 sizeof(sqh->qh.qh_elink), 3376 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 3377 if (!(sqh->qh.qh_elink & htole32(UHCI_PTR_T))) { 3378 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 3379 usb_syncmem(&sqh->dma, 3380 sqh->offs + offsetof(uhci_qh_t, qh_elink), 3381 sizeof(sqh->qh.qh_elink), 3382 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3383 delay(UHCI_QH_REMOVE_DELAY); 3384 } 3385 3386 pqh = uhci_find_prev_qh(vf->hqh, sqh); 3387 usb_syncmem(&sqh->dma, sqh->offs + offsetof(uhci_qh_t, qh_hlink), 3388 sizeof(sqh->qh.qh_hlink), 3389 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 3390 pqh->hlink = sqh->hlink; 3391 pqh->qh.qh_hlink = sqh->qh.qh_hlink; 3392 usb_syncmem(&pqh->dma, pqh->offs + offsetof(uhci_qh_t, qh_hlink), 3393 sizeof(pqh->qh.qh_hlink), 3394 BUS_DMASYNC_PREWRITE); 3395 delay(UHCI_QH_REMOVE_DELAY); 3396 if (vf->eqh == sqh) 3397 vf->eqh = pqh; 3398 vf->bandwidth--; 3399 } 3400 3401 usbd_status 3402 uhci_device_setintr(uhci_softc_t *sc, struct uhci_pipe *upipe, int ival) 3403 { 3404 uhci_soft_qh_t *sqh; 3405 int i, npoll; 3406 u_int bestbw, bw, bestoffs, offs; 3407 3408 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3409 DPRINTFN(2, "pipe=%#jx", (uintptr_t)upipe, 0, 0, 0); 3410 if (ival == 0) { 3411 printf("%s: 0 interval\n", __func__); 3412 return USBD_INVAL; 3413 } 3414 3415 if (ival > UHCI_VFRAMELIST_COUNT) 3416 ival = UHCI_VFRAMELIST_COUNT; 3417 npoll = howmany(UHCI_VFRAMELIST_COUNT, ival); 3418 DPRINTF("ival=%jd npoll=%jd", ival, npoll, 0, 0); 3419 3420 upipe->intr.npoll = npoll; 3421 upipe->intr.qhs = 3422 kmem_alloc(npoll * sizeof(uhci_soft_qh_t *), KM_SLEEP); 3423 3424 /* 3425 * Figure out which offset in the schedule that has most 3426 * bandwidth left over. 3427 */ 3428 #define MOD(i) ((i) & (UHCI_VFRAMELIST_COUNT-1)) 3429 for (bestoffs = offs = 0, bestbw = ~0; offs < ival; offs++) { 3430 for (bw = i = 0; i < npoll; i++) 3431 bw += sc->sc_vframes[MOD(i * ival + offs)].bandwidth; 3432 if (bw < bestbw) { 3433 bestbw = bw; 3434 bestoffs = offs; 3435 } 3436 } 3437 DPRINTF("bw=%jd offs=%jd", bestbw, bestoffs, 0, 0); 3438 for (i = 0; i < npoll; i++) { 3439 upipe->intr.qhs[i] = sqh = uhci_alloc_sqh(sc); 3440 sqh->elink = NULL; 3441 sqh->qh.qh_elink = htole32(UHCI_PTR_T); 3442 usb_syncmem(&sqh->dma, 3443 sqh->offs + offsetof(uhci_qh_t, qh_elink), 3444 sizeof(sqh->qh.qh_elink), 3445 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3446 sqh->pos = MOD(i * ival + bestoffs); 3447 } 3448 #undef MOD 3449 3450 mutex_enter(&sc->sc_lock); 3451 /* Enter QHs into the controller data structures. */ 3452 for (i = 0; i < npoll; i++) 3453 uhci_add_intr(sc, upipe->intr.qhs[i]); 3454 mutex_exit(&sc->sc_lock); 3455 3456 DPRINTFN(5, "returns %#jx", (uintptr_t)upipe, 0, 0, 0); 3457 3458 return USBD_NORMAL_COMPLETION; 3459 } 3460 3461 /* Open a new pipe. */ 3462 usbd_status 3463 uhci_open(struct usbd_pipe *pipe) 3464 { 3465 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 3466 struct usbd_bus *bus = pipe->up_dev->ud_bus; 3467 struct uhci_pipe *upipe = UHCI_PIPE2UPIPE(pipe); 3468 usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc; 3469 usbd_status err = USBD_NOMEM; 3470 int ival; 3471 3472 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3473 DPRINTF("pipe=%#jx, addr=%jd, endpt=%jd (%jd)", 3474 (uintptr_t)pipe, pipe->up_dev->ud_addr, ed->bEndpointAddress, 3475 bus->ub_rhaddr); 3476 3477 if (sc->sc_dying) 3478 return USBD_IOERROR; 3479 3480 upipe->aborting = 0; 3481 /* toggle state needed for bulk endpoints */ 3482 upipe->nexttoggle = pipe->up_endpoint->ue_toggle; 3483 3484 if (pipe->up_dev->ud_addr == bus->ub_rhaddr) { 3485 switch (ed->bEndpointAddress) { 3486 case USB_CONTROL_ENDPOINT: 3487 pipe->up_methods = &roothub_ctrl_methods; 3488 break; 3489 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT: 3490 pipe->up_methods = &uhci_root_intr_methods; 3491 break; 3492 default: 3493 return USBD_INVAL; 3494 } 3495 } else { 3496 switch (ed->bmAttributes & UE_XFERTYPE) { 3497 case UE_CONTROL: 3498 pipe->up_methods = &uhci_device_ctrl_methods; 3499 upipe->ctrl.sqh = uhci_alloc_sqh(sc); 3500 if (upipe->ctrl.sqh == NULL) 3501 goto bad; 3502 upipe->ctrl.setup = uhci_alloc_std(sc); 3503 if (upipe->ctrl.setup == NULL) { 3504 uhci_free_sqh(sc, upipe->ctrl.sqh); 3505 goto bad; 3506 } 3507 upipe->ctrl.stat = uhci_alloc_std(sc); 3508 if (upipe->ctrl.stat == NULL) { 3509 uhci_free_sqh(sc, upipe->ctrl.sqh); 3510 uhci_free_std(sc, upipe->ctrl.setup); 3511 goto bad; 3512 } 3513 err = usb_allocmem(&sc->sc_bus, 3514 sizeof(usb_device_request_t), 0, 3515 USBMALLOC_COHERENT, &upipe->ctrl.reqdma); 3516 if (err) { 3517 uhci_free_sqh(sc, upipe->ctrl.sqh); 3518 uhci_free_std(sc, upipe->ctrl.setup); 3519 uhci_free_std(sc, upipe->ctrl.stat); 3520 goto bad; 3521 } 3522 break; 3523 case UE_INTERRUPT: 3524 pipe->up_methods = &uhci_device_intr_methods; 3525 ival = pipe->up_interval; 3526 if (ival == USBD_DEFAULT_INTERVAL) 3527 ival = ed->bInterval; 3528 return uhci_device_setintr(sc, upipe, ival); 3529 case UE_ISOCHRONOUS: 3530 pipe->up_serialise = false; 3531 pipe->up_methods = &uhci_device_isoc_methods; 3532 return uhci_setup_isoc(pipe); 3533 case UE_BULK: 3534 pipe->up_methods = &uhci_device_bulk_methods; 3535 upipe->bulk.sqh = uhci_alloc_sqh(sc); 3536 if (upipe->bulk.sqh == NULL) 3537 goto bad; 3538 break; 3539 } 3540 } 3541 return USBD_NORMAL_COMPLETION; 3542 3543 bad: 3544 return USBD_NOMEM; 3545 } 3546 3547 /* 3548 * Data structures and routines to emulate the root hub. 3549 */ 3550 /* 3551 * The USB hub protocol requires that SET_FEATURE(PORT_RESET) also 3552 * enables the port, and also states that SET_FEATURE(PORT_ENABLE) 3553 * should not be used by the USB subsystem. As we cannot issue a 3554 * SET_FEATURE(PORT_ENABLE) externally, we must ensure that the port 3555 * will be enabled as part of the reset. 3556 * 3557 * On the VT83C572, the port cannot be successfully enabled until the 3558 * outstanding "port enable change" and "connection status change" 3559 * events have been reset. 3560 */ 3561 Static usbd_status 3562 uhci_portreset(uhci_softc_t *sc, int index) 3563 { 3564 int lim, port, x; 3565 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3566 3567 if (index == 1) 3568 port = UHCI_PORTSC1; 3569 else if (index == 2) 3570 port = UHCI_PORTSC2; 3571 else 3572 return USBD_IOERROR; 3573 3574 x = URWMASK(UREAD2(sc, port)); 3575 UWRITE2(sc, port, x | UHCI_PORTSC_PR); 3576 3577 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY); 3578 3579 DPRINTF("uhci port %jd reset, status0 = 0x%04jx", index, 3580 UREAD2(sc, port), 0, 0); 3581 3582 x = URWMASK(UREAD2(sc, port)); 3583 UWRITE2(sc, port, x & ~(UHCI_PORTSC_PR | UHCI_PORTSC_SUSP)); 3584 3585 delay(100); 3586 3587 DPRINTF("uhci port %jd reset, status1 = 0x%04jx", index, 3588 UREAD2(sc, port), 0, 0); 3589 3590 x = URWMASK(UREAD2(sc, port)); 3591 UWRITE2(sc, port, x | UHCI_PORTSC_PE); 3592 3593 for (lim = 10; --lim > 0;) { 3594 usb_delay_ms(&sc->sc_bus, USB_PORT_RESET_DELAY); 3595 3596 x = UREAD2(sc, port); 3597 DPRINTF("uhci port %jd iteration %ju, status = 0x%04jx", index, 3598 lim, x, 0); 3599 3600 if (!(x & UHCI_PORTSC_CCS)) { 3601 /* 3602 * No device is connected (or was disconnected 3603 * during reset). Consider the port reset. 3604 * The delay must be long enough to ensure on 3605 * the initial iteration that the device 3606 * connection will have been registered. 50ms 3607 * appears to be sufficient, but 20ms is not. 3608 */ 3609 DPRINTFN(3, "uhci port %jd loop %ju, device detached", 3610 index, lim, 0, 0); 3611 break; 3612 } 3613 3614 if (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC)) { 3615 /* 3616 * Port enabled changed and/or connection 3617 * status changed were set. Reset either or 3618 * both raised flags (by writing a 1 to that 3619 * bit), and wait again for state to settle. 3620 */ 3621 UWRITE2(sc, port, URWMASK(x) | 3622 (x & (UHCI_PORTSC_POEDC | UHCI_PORTSC_CSC))); 3623 continue; 3624 } 3625 3626 if (x & UHCI_PORTSC_PE) 3627 /* Port is enabled */ 3628 break; 3629 3630 UWRITE2(sc, port, URWMASK(x) | UHCI_PORTSC_PE); 3631 } 3632 3633 DPRINTFN(3, "uhci port %jd reset, status2 = 0x%04jx", index, 3634 UREAD2(sc, port), 0, 0); 3635 3636 if (lim <= 0) { 3637 DPRINTF("uhci port %jd reset timed out", index, 3638 0, 0, 0); 3639 return USBD_TIMEOUT; 3640 } 3641 3642 sc->sc_isreset = 1; 3643 return USBD_NORMAL_COMPLETION; 3644 } 3645 3646 Static int 3647 uhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req, 3648 void *buf, int buflen) 3649 { 3650 uhci_softc_t *sc = UHCI_BUS2SC(bus); 3651 int port, x; 3652 int status, change, totlen = 0; 3653 uint16_t len, value, index; 3654 usb_port_status_t ps; 3655 usbd_status err; 3656 3657 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3658 3659 if (sc->sc_dying) 3660 return -1; 3661 3662 DPRINTF("type=0x%02jx request=%02jx", req->bmRequestType, 3663 req->bRequest, 0, 0); 3664 3665 len = UGETW(req->wLength); 3666 value = UGETW(req->wValue); 3667 index = UGETW(req->wIndex); 3668 3669 #define C(x,y) ((x) | ((y) << 8)) 3670 switch (C(req->bRequest, req->bmRequestType)) { 3671 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 3672 DPRINTF("wValue=0x%04jx", value, 0, 0, 0); 3673 if (len == 0) 3674 break; 3675 switch (value) { 3676 #define sd ((usb_string_descriptor_t *)buf) 3677 case C(2, UDESC_STRING): 3678 /* Product */ 3679 totlen = usb_makestrdesc(sd, len, "UHCI root hub"); 3680 break; 3681 #undef sd 3682 default: 3683 /* default from usbroothub */ 3684 return buflen; 3685 } 3686 break; 3687 3688 /* Hub requests */ 3689 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): 3690 break; 3691 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): 3692 DPRINTF("UR_CLEAR_PORT_FEATURE port=%jd feature=%jd", index, 3693 value, 0, 0); 3694 if (index == 1) 3695 port = UHCI_PORTSC1; 3696 else if (index == 2) 3697 port = UHCI_PORTSC2; 3698 else { 3699 return -1; 3700 } 3701 switch(value) { 3702 case UHF_PORT_ENABLE: 3703 x = URWMASK(UREAD2(sc, port)); 3704 UWRITE2(sc, port, x & ~UHCI_PORTSC_PE); 3705 break; 3706 case UHF_PORT_SUSPEND: 3707 x = URWMASK(UREAD2(sc, port)); 3708 if (!(x & UHCI_PORTSC_SUSP)) /* not suspended */ 3709 break; 3710 UWRITE2(sc, port, x | UHCI_PORTSC_RD); 3711 /* see USB2 spec ch. 7.1.7.7 */ 3712 usb_delay_ms(&sc->sc_bus, 20); 3713 UWRITE2(sc, port, x & ~UHCI_PORTSC_SUSP); 3714 /* 10ms resume delay must be provided by caller */ 3715 break; 3716 case UHF_PORT_RESET: 3717 x = URWMASK(UREAD2(sc, port)); 3718 UWRITE2(sc, port, x & ~UHCI_PORTSC_PR); 3719 break; 3720 case UHF_C_PORT_CONNECTION: 3721 x = URWMASK(UREAD2(sc, port)); 3722 UWRITE2(sc, port, x | UHCI_PORTSC_CSC); 3723 break; 3724 case UHF_C_PORT_ENABLE: 3725 x = URWMASK(UREAD2(sc, port)); 3726 UWRITE2(sc, port, x | UHCI_PORTSC_POEDC); 3727 break; 3728 case UHF_C_PORT_OVER_CURRENT: 3729 x = URWMASK(UREAD2(sc, port)); 3730 UWRITE2(sc, port, x | UHCI_PORTSC_OCIC); 3731 break; 3732 case UHF_C_PORT_RESET: 3733 sc->sc_isreset = 0; 3734 break; 3735 case UHF_PORT_CONNECTION: 3736 case UHF_PORT_OVER_CURRENT: 3737 case UHF_PORT_POWER: 3738 case UHF_PORT_LOW_SPEED: 3739 case UHF_C_PORT_SUSPEND: 3740 default: 3741 return -1; 3742 } 3743 break; 3744 case C(UR_GET_BUS_STATE, UT_READ_CLASS_OTHER): 3745 if (index == 1) 3746 port = UHCI_PORTSC1; 3747 else if (index == 2) 3748 port = UHCI_PORTSC2; 3749 else { 3750 return -1; 3751 } 3752 if (len > 0) { 3753 *(uint8_t *)buf = 3754 UHCI_PORTSC_GET_LS(UREAD2(sc, port)); 3755 totlen = 1; 3756 } 3757 break; 3758 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): 3759 if (len == 0) 3760 break; 3761 if ((value & 0xff) != 0) { 3762 return -1; 3763 } 3764 usb_hub_descriptor_t hubd; 3765 3766 totlen = uimin(buflen, sizeof(hubd)); 3767 memcpy(&hubd, buf, totlen); 3768 hubd.bNbrPorts = 2; 3769 memcpy(buf, &hubd, totlen); 3770 break; 3771 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): 3772 if (len != 4) { 3773 return -1; 3774 } 3775 memset(buf, 0, len); 3776 totlen = len; 3777 break; 3778 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): 3779 if (index == 1) 3780 port = UHCI_PORTSC1; 3781 else if (index == 2) 3782 port = UHCI_PORTSC2; 3783 else { 3784 return -1; 3785 } 3786 if (len != 4) { 3787 return -1; 3788 } 3789 x = UREAD2(sc, port); 3790 status = change = 0; 3791 if (x & UHCI_PORTSC_CCS) 3792 status |= UPS_CURRENT_CONNECT_STATUS; 3793 if (x & UHCI_PORTSC_CSC) 3794 change |= UPS_C_CONNECT_STATUS; 3795 if (x & UHCI_PORTSC_PE) 3796 status |= UPS_PORT_ENABLED; 3797 if (x & UHCI_PORTSC_POEDC) 3798 change |= UPS_C_PORT_ENABLED; 3799 if (x & UHCI_PORTSC_OCI) 3800 status |= UPS_OVERCURRENT_INDICATOR; 3801 if (x & UHCI_PORTSC_OCIC) 3802 change |= UPS_C_OVERCURRENT_INDICATOR; 3803 if (x & UHCI_PORTSC_SUSP) 3804 status |= UPS_SUSPEND; 3805 if (x & UHCI_PORTSC_LSDA) 3806 status |= UPS_LOW_SPEED; 3807 status |= UPS_PORT_POWER; 3808 if (sc->sc_isreset) 3809 change |= UPS_C_PORT_RESET; 3810 USETW(ps.wPortStatus, status); 3811 USETW(ps.wPortChange, change); 3812 totlen = uimin(len, sizeof(ps)); 3813 memcpy(buf, &ps, totlen); 3814 break; 3815 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): 3816 return -1; 3817 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): 3818 break; 3819 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): 3820 if (index == 1) 3821 port = UHCI_PORTSC1; 3822 else if (index == 2) 3823 port = UHCI_PORTSC2; 3824 else { 3825 return -1; 3826 } 3827 switch(value) { 3828 case UHF_PORT_ENABLE: 3829 x = URWMASK(UREAD2(sc, port)); 3830 UWRITE2(sc, port, x | UHCI_PORTSC_PE); 3831 break; 3832 case UHF_PORT_SUSPEND: 3833 x = URWMASK(UREAD2(sc, port)); 3834 UWRITE2(sc, port, x | UHCI_PORTSC_SUSP); 3835 break; 3836 case UHF_PORT_RESET: 3837 err = uhci_portreset(sc, index); 3838 if (err != USBD_NORMAL_COMPLETION) 3839 return -1; 3840 return 0; 3841 case UHF_PORT_POWER: 3842 /* Pretend we turned on power */ 3843 return 0; 3844 case UHF_C_PORT_CONNECTION: 3845 case UHF_C_PORT_ENABLE: 3846 case UHF_C_PORT_OVER_CURRENT: 3847 case UHF_PORT_CONNECTION: 3848 case UHF_PORT_OVER_CURRENT: 3849 case UHF_PORT_LOW_SPEED: 3850 case UHF_C_PORT_SUSPEND: 3851 case UHF_C_PORT_RESET: 3852 default: 3853 return -1; 3854 } 3855 break; 3856 default: 3857 /* default from usbroothub */ 3858 DPRINTF("returning %jd (usbroothub default)", 3859 buflen, 0, 0, 0); 3860 return buflen; 3861 } 3862 3863 DPRINTF("returning %jd", totlen, 0, 0, 0); 3864 3865 return totlen; 3866 } 3867 3868 /* Abort a root interrupt request. */ 3869 void 3870 uhci_root_intr_abort(struct usbd_xfer *xfer) 3871 { 3872 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 3873 3874 KASSERT(mutex_owned(&sc->sc_lock)); 3875 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 3876 3877 /* 3878 * Try to stop the callout before it starts. If we got in too 3879 * late, too bad; but if the callout had yet to run and time 3880 * out the xfer, cancel it ourselves. 3881 */ 3882 callout_stop(&sc->sc_poll_handle); 3883 if (sc->sc_intr_xfer == NULL) 3884 return; 3885 3886 KASSERT(sc->sc_intr_xfer == xfer); 3887 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 3888 xfer->ux_status = USBD_CANCELLED; 3889 #ifdef DIAGNOSTIC 3890 UHCI_XFER2UXFER(xfer)->ux_isdone = true; 3891 #endif 3892 usb_transfer_complete(xfer); 3893 } 3894 3895 usbd_status 3896 uhci_root_intr_transfer(struct usbd_xfer *xfer) 3897 { 3898 uhci_softc_t *sc = UHCI_XFER2SC(xfer); 3899 usbd_status err; 3900 3901 /* Insert last in queue. */ 3902 mutex_enter(&sc->sc_lock); 3903 err = usb_insert_transfer(xfer); 3904 mutex_exit(&sc->sc_lock); 3905 if (err) 3906 return err; 3907 3908 /* 3909 * Pipe isn't running (otherwise err would be USBD_INPROG), 3910 * start first 3911 */ 3912 return uhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 3913 } 3914 3915 /* Start a transfer on the root interrupt pipe */ 3916 usbd_status 3917 uhci_root_intr_start(struct usbd_xfer *xfer) 3918 { 3919 struct usbd_pipe *pipe = xfer->ux_pipe; 3920 uhci_softc_t *sc = UHCI_PIPE2SC(pipe); 3921 unsigned int ival; 3922 const bool polling = sc->sc_bus.ub_usepolling; 3923 3924 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3925 DPRINTF("xfer=%#jx len=%jd flags=%jd", (uintptr_t)xfer, xfer->ux_length, 3926 xfer->ux_flags, 0); 3927 3928 if (sc->sc_dying) 3929 return USBD_IOERROR; 3930 3931 if (!polling) 3932 mutex_enter(&sc->sc_lock); 3933 3934 KASSERT(sc->sc_intr_xfer == NULL); 3935 3936 /* XXX temporary variable needed to avoid gcc3 warning */ 3937 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval; 3938 sc->sc_ival = mstohz(ival); 3939 callout_schedule(&sc->sc_poll_handle, sc->sc_ival); 3940 sc->sc_intr_xfer = xfer; 3941 xfer->ux_status = USBD_IN_PROGRESS; 3942 3943 if (!polling) 3944 mutex_exit(&sc->sc_lock); 3945 3946 return USBD_IN_PROGRESS; 3947 } 3948 3949 /* Close the root interrupt pipe. */ 3950 void 3951 uhci_root_intr_close(struct usbd_pipe *pipe) 3952 { 3953 uhci_softc_t *sc __diagused = UHCI_PIPE2SC(pipe); 3954 UHCIHIST_FUNC(); UHCIHIST_CALLED(); 3955 3956 KASSERT(mutex_owned(&sc->sc_lock)); 3957 3958 /* 3959 * The caller must arrange to have aborted the pipe already, so 3960 * there can be no intr xfer in progress. The callout may 3961 * still be pending from a prior intr xfer -- if it has already 3962 * fired, it will see there is nothing to do, and do nothing. 3963 */ 3964 KASSERT(sc->sc_intr_xfer == NULL); 3965 KASSERT(!callout_pending(&sc->sc_poll_handle)); 3966 } 3967