1 /* $NetBSD: xhci.c,v 1.186 2025/01/09 10:17:22 jmcneill Exp $ */ 2 3 /* 4 * Copyright (c) 2013 Jonathan A. Kollasch 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * USB rev 2.0 and rev 3.1 specification 31 * http://www.usb.org/developers/docs/ 32 * xHCI rev 1.1 specification 33 * http://www.intel.com/technology/usb/spec.htm 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.186 2025/01/09 10:17:22 jmcneill Exp $"); 38 39 #ifdef _KERNEL_OPT 40 #include "opt_usb.h" 41 #endif 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/kmem.h> 47 #include <sys/device.h> 48 #include <sys/select.h> 49 #include <sys/proc.h> 50 #include <sys/queue.h> 51 #include <sys/mutex.h> 52 #include <sys/condvar.h> 53 #include <sys/bus.h> 54 #include <sys/cpu.h> 55 #include <sys/sysctl.h> 56 57 #include <machine/endian.h> 58 59 #include <dev/usb/usb.h> 60 #include <dev/usb/usbdi.h> 61 #include <dev/usb/usbdivar.h> 62 #include <dev/usb/usbdi_util.h> 63 #include <dev/usb/usbhist.h> 64 #include <dev/usb/usb_mem.h> 65 #include <dev/usb/usb_quirks.h> 66 67 #include <dev/usb/xhcireg.h> 68 #include <dev/usb/xhcivar.h> 69 #include <dev/usb/usbroothub.h> 70 71 72 #ifdef USB_DEBUG 73 #ifndef XHCI_DEBUG 74 #define xhcidebug 0 75 #else /* !XHCI_DEBUG */ 76 #define HEXDUMP(a, b, c) \ 77 do { \ 78 if (xhcidebug > 0) \ 79 hexdump(printf, a, b, c); \ 80 } while (/*CONSTCOND*/0) 81 static int xhcidebug = 0; 82 83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup") 84 { 85 int err; 86 const struct sysctlnode *rnode; 87 const struct sysctlnode *cnode; 88 89 err = sysctl_createv(clog, 0, NULL, &rnode, 90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci", 91 SYSCTL_DESCR("xhci global controls"), 92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 93 94 if (err) 95 goto fail; 96 97 /* control debugging printfs */ 98 err = sysctl_createv(clog, 0, &rnode, &cnode, 99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 100 "debug", SYSCTL_DESCR("Enable debugging output"), 101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL); 102 if (err) 103 goto fail; 104 105 return; 106 fail: 107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err); 108 } 109 110 #endif /* !XHCI_DEBUG */ 111 #endif /* USB_DEBUG */ 112 113 #ifndef HEXDUMP 114 #define HEXDUMP(a, b, c) 115 #endif 116 117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D) 118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D) 119 #define XHCIHIST_FUNC() USBHIST_FUNC() 120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug) 121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \ 122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D) 123 124 #define XHCI_DCI_SLOT 0 125 #define XHCI_DCI_EP_CONTROL 1 126 127 #define XHCI_ICI_INPUT_CONTROL 0 128 129 struct xhci_pipe { 130 struct usbd_pipe xp_pipe; 131 struct usb_task xp_async_task; 132 int16_t xp_isoc_next; /* next frame */ 133 uint8_t xp_maxb; /* max burst */ 134 uint8_t xp_mult; 135 }; 136 137 #define XHCI_COMMAND_RING_TRBS 256 138 #define XHCI_EVENT_RING_TRBS 256 139 #define XHCI_EVENT_RING_SEGMENTS 1 140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT 141 142 static usbd_status xhci_open(struct usbd_pipe *); 143 static void xhci_close_pipe(struct usbd_pipe *); 144 static int xhci_intr1(struct xhci_softc * const); 145 static void xhci_softintr(void *); 146 static void xhci_poll(struct usbd_bus *); 147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int); 148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *); 149 static void xhci_abortx(struct usbd_xfer *); 150 static bool xhci_dying(struct usbd_bus *); 151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **); 152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int, 153 struct usbd_port *); 154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, 155 void *, int); 156 157 static void xhci_pipe_restart(struct usbd_pipe *); 158 static void xhci_pipe_restart_async_task(void *); 159 static void xhci_pipe_restart_async(struct usbd_pipe *); 160 161 static usbd_status xhci_configure_endpoint(struct usbd_pipe *); 162 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *); 163 static void xhci_reset_endpoint(struct usbd_pipe *); 164 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *, 165 struct xhci_slot *, u_int, uint32_t); 166 static usbd_status xhci_stop_endpoint(struct usbd_pipe *); 167 168 static void xhci_host_dequeue(struct xhci_ring * const); 169 static void xhci_set_dequeue(struct usbd_pipe *); 170 171 static usbd_status xhci_do_command(struct xhci_softc * const, 172 struct xhci_soft_trb * const, int); 173 static usbd_status xhci_do_command_locked(struct xhci_softc * const, 174 struct xhci_soft_trb * const, int); 175 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t); 176 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *); 177 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool); 178 static usbd_status xhci_enable_slot(struct xhci_softc * const, 179 uint8_t * const); 180 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t); 181 static usbd_status xhci_address_device(struct xhci_softc * const, 182 uint64_t, uint8_t, bool); 183 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int); 184 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const, 185 struct xhci_slot * const, u_int); 186 static usbd_status xhci_ring_init(struct xhci_softc * const, 187 struct xhci_ring **, size_t, size_t); 188 static void xhci_ring_free(struct xhci_softc * const, 189 struct xhci_ring ** const); 190 191 static void xhci_setup_ctx(struct usbd_pipe *); 192 static void xhci_setup_route(struct usbd_pipe *, uint32_t *); 193 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *); 194 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *); 195 static uint32_t xhci_bival2ival(uint32_t, uint32_t, uint32_t); 196 197 static void xhci_noop(struct usbd_pipe *); 198 199 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *); 200 static usbd_status xhci_root_intr_start(struct usbd_xfer *); 201 static void xhci_root_intr_abort(struct usbd_xfer *); 202 static void xhci_root_intr_close(struct usbd_pipe *); 203 static void xhci_root_intr_done(struct usbd_xfer *); 204 205 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *); 206 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *); 207 static void xhci_device_ctrl_abort(struct usbd_xfer *); 208 static void xhci_device_ctrl_close(struct usbd_pipe *); 209 static void xhci_device_ctrl_done(struct usbd_xfer *); 210 211 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *); 212 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *); 213 static void xhci_device_isoc_abort(struct usbd_xfer *); 214 static void xhci_device_isoc_close(struct usbd_pipe *); 215 static void xhci_device_isoc_done(struct usbd_xfer *); 216 217 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *); 218 static usbd_status xhci_device_intr_start(struct usbd_xfer *); 219 static void xhci_device_intr_abort(struct usbd_xfer *); 220 static void xhci_device_intr_close(struct usbd_pipe *); 221 static void xhci_device_intr_done(struct usbd_xfer *); 222 223 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *); 224 static usbd_status xhci_device_bulk_start(struct usbd_xfer *); 225 static void xhci_device_bulk_abort(struct usbd_xfer *); 226 static void xhci_device_bulk_close(struct usbd_pipe *); 227 static void xhci_device_bulk_done(struct usbd_xfer *); 228 229 static const struct usbd_bus_methods xhci_bus_methods = { 230 .ubm_open = xhci_open, 231 .ubm_softint = xhci_softintr, 232 .ubm_dopoll = xhci_poll, 233 .ubm_allocx = xhci_allocx, 234 .ubm_freex = xhci_freex, 235 .ubm_abortx = xhci_abortx, 236 .ubm_dying = xhci_dying, 237 .ubm_getlock = xhci_get_lock, 238 .ubm_newdev = xhci_new_device, 239 .ubm_rhctrl = xhci_roothub_ctrl, 240 }; 241 242 static const struct usbd_pipe_methods xhci_root_intr_methods = { 243 .upm_transfer = xhci_root_intr_transfer, 244 .upm_start = xhci_root_intr_start, 245 .upm_abort = xhci_root_intr_abort, 246 .upm_close = xhci_root_intr_close, 247 .upm_cleartoggle = xhci_noop, 248 .upm_done = xhci_root_intr_done, 249 }; 250 251 252 static const struct usbd_pipe_methods xhci_device_ctrl_methods = { 253 .upm_transfer = xhci_device_ctrl_transfer, 254 .upm_start = xhci_device_ctrl_start, 255 .upm_abort = xhci_device_ctrl_abort, 256 .upm_close = xhci_device_ctrl_close, 257 .upm_cleartoggle = xhci_noop, 258 .upm_done = xhci_device_ctrl_done, 259 }; 260 261 static const struct usbd_pipe_methods xhci_device_isoc_methods = { 262 .upm_transfer = xhci_device_isoc_transfer, 263 .upm_abort = xhci_device_isoc_abort, 264 .upm_close = xhci_device_isoc_close, 265 .upm_cleartoggle = xhci_noop, 266 .upm_done = xhci_device_isoc_done, 267 }; 268 269 static const struct usbd_pipe_methods xhci_device_bulk_methods = { 270 .upm_transfer = xhci_device_bulk_transfer, 271 .upm_start = xhci_device_bulk_start, 272 .upm_abort = xhci_device_bulk_abort, 273 .upm_close = xhci_device_bulk_close, 274 .upm_cleartoggle = xhci_noop, 275 .upm_done = xhci_device_bulk_done, 276 }; 277 278 static const struct usbd_pipe_methods xhci_device_intr_methods = { 279 .upm_transfer = xhci_device_intr_transfer, 280 .upm_start = xhci_device_intr_start, 281 .upm_abort = xhci_device_intr_abort, 282 .upm_close = xhci_device_intr_close, 283 .upm_cleartoggle = xhci_noop, 284 .upm_done = xhci_device_intr_done, 285 }; 286 287 static inline uint32_t 288 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset) 289 { 290 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset); 291 } 292 293 static inline uint32_t 294 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset) 295 { 296 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset); 297 } 298 299 static inline uint32_t 300 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset) 301 { 302 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset); 303 } 304 305 static inline void 306 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset, 307 uint32_t value) 308 { 309 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value); 310 } 311 312 #if 0 /* unused */ 313 static inline void 314 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset, 315 uint32_t value) 316 { 317 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value); 318 } 319 #endif /* unused */ 320 321 static inline uint32_t 322 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset) 323 { 324 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset); 325 } 326 327 static inline uint32_t 328 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset) 329 { 330 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 331 } 332 333 static inline void 334 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset, 335 uint32_t value) 336 { 337 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value); 338 } 339 340 static inline uint64_t 341 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset) 342 { 343 uint64_t value; 344 345 #ifdef XHCI_USE_BUS_SPACE_8 346 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset); 347 #else 348 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 349 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh, 350 offset + 4) << 32; 351 #endif 352 353 return value; 354 } 355 356 static inline void 357 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset, 358 uint64_t value) 359 { 360 #ifdef XHCI_USE_BUS_SPACE_8 361 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value); 362 #else 363 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0, 364 (value >> 0) & 0xffffffff); 365 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4, 366 (value >> 32) & 0xffffffff); 367 #endif 368 } 369 370 static inline uint32_t 371 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset) 372 { 373 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 374 } 375 376 static inline void 377 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset, 378 uint32_t value) 379 { 380 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value); 381 } 382 383 static inline uint64_t 384 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset) 385 { 386 uint64_t value; 387 388 #ifdef XHCI_USE_BUS_SPACE_8 389 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset); 390 #else 391 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 392 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh, 393 offset + 4) << 32; 394 #endif 395 396 return value; 397 } 398 399 static inline void 400 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset, 401 uint64_t value) 402 { 403 #ifdef XHCI_USE_BUS_SPACE_8 404 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value); 405 #else 406 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0, 407 (value >> 0) & 0xffffffff); 408 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4, 409 (value >> 32) & 0xffffffff); 410 #endif 411 } 412 413 #if 0 /* unused */ 414 static inline uint32_t 415 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset) 416 { 417 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset); 418 } 419 #endif /* unused */ 420 421 static inline void 422 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset, 423 uint32_t value) 424 { 425 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value); 426 } 427 428 /* --- */ 429 430 static inline uint8_t 431 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed) 432 { 433 u_int eptype = 0; 434 435 switch (UE_GET_XFERTYPE(ed->bmAttributes)) { 436 case UE_CONTROL: 437 eptype = 0x0; 438 break; 439 case UE_ISOCHRONOUS: 440 eptype = 0x1; 441 break; 442 case UE_BULK: 443 eptype = 0x2; 444 break; 445 case UE_INTERRUPT: 446 eptype = 0x3; 447 break; 448 } 449 450 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 451 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 452 return eptype | 0x4; 453 else 454 return eptype; 455 } 456 457 static u_int 458 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed) 459 { 460 /* xHCI 1.0 section 4.5.1 */ 461 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress); 462 u_int in = 0; 463 464 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 465 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 466 in = 1; 467 468 return epaddr * 2 + in; 469 } 470 471 static inline u_int 472 xhci_dci_to_ici(const u_int i) 473 { 474 return i + 1; 475 } 476 477 static inline void * 478 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs, 479 const u_int dci) 480 { 481 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 482 } 483 484 #if 0 /* unused */ 485 static inline bus_addr_t 486 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs, 487 const u_int dci) 488 { 489 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 490 } 491 #endif /* unused */ 492 493 static inline void * 494 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs, 495 const u_int ici) 496 { 497 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 498 } 499 500 static inline bus_addr_t 501 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs, 502 const u_int ici) 503 { 504 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 505 } 506 507 static inline struct xhci_trb * 508 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx) 509 { 510 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 511 } 512 513 static inline bus_addr_t 514 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx) 515 { 516 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 517 } 518 519 static inline void 520 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx, 521 uint64_t parameter, uint32_t status, uint32_t control) 522 { 523 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb); 524 xx->xx_trb[idx].trb_0 = parameter; 525 xx->xx_trb[idx].trb_2 = status; 526 xx->xx_trb[idx].trb_3 = control; 527 } 528 529 static inline void 530 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status, 531 uint32_t control) 532 { 533 trb->trb_0 = htole64(parameter); 534 trb->trb_2 = htole32(status); 535 trb->trb_3 = htole32(control); 536 } 537 538 static int 539 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx) 540 { 541 /* base address of TRBs */ 542 bus_addr_t trbp = xhci_ring_trbp(xr, 0); 543 544 /* trb_0 range sanity check */ 545 if (trb_0 == 0 || trb_0 < trbp || 546 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 || 547 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) { 548 return 1; 549 } 550 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb); 551 return 0; 552 } 553 554 static unsigned int 555 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs, 556 u_int dci) 557 { 558 uint32_t *cp; 559 560 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 561 cp = xhci_slot_get_dcv(sc, xs, dci); 562 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0])); 563 } 564 565 static inline unsigned int 566 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport) 567 { 568 const unsigned int port = ctlrport - 1; 569 const uint8_t bit = __BIT(port % NBBY); 570 571 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit); 572 } 573 574 /* 575 * Return the roothub port for a controller port. Both are 1..n. 576 */ 577 static inline unsigned int 578 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport) 579 { 580 581 return sc->sc_ctlrportmap[ctrlport - 1]; 582 } 583 584 /* 585 * Return the controller port for a bus roothub port. Both are 1..n. 586 */ 587 static inline unsigned int 588 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn, 589 unsigned int rhport) 590 { 591 592 return sc->sc_rhportmap[bn][rhport - 1]; 593 } 594 595 /* --- */ 596 597 void 598 xhci_childdet(device_t self, device_t child) 599 { 600 struct xhci_softc * const sc = device_private(self); 601 602 mutex_enter(&sc->sc_intr_lock); 603 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child)); 604 if (child == sc->sc_child2) 605 sc->sc_child2 = NULL; 606 else if (child == sc->sc_child) 607 sc->sc_child = NULL; 608 mutex_exit(&sc->sc_intr_lock); 609 } 610 611 int 612 xhci_detach(struct xhci_softc *sc, int flags) 613 { 614 int rv = 0; 615 616 if (sc->sc_child2 != NULL) { 617 rv = config_detach(sc->sc_child2, flags); 618 if (rv != 0) 619 return rv; 620 KASSERT(sc->sc_child2 == NULL); 621 } 622 623 if (sc->sc_child != NULL) { 624 rv = config_detach(sc->sc_child, flags); 625 if (rv != 0) 626 return rv; 627 KASSERT(sc->sc_child == NULL); 628 } 629 630 /* XXX unconfigure/free slots */ 631 632 /* verify: */ 633 xhci_rt_write_4(sc, XHCI_IMAN(0), 0); 634 xhci_op_write_4(sc, XHCI_USBCMD, 0); 635 /* do we need to wait for stop? */ 636 637 xhci_op_write_8(sc, XHCI_CRCR, 0); 638 xhci_ring_free(sc, &sc->sc_cr); 639 cv_destroy(&sc->sc_command_cv); 640 cv_destroy(&sc->sc_cmdbusy_cv); 641 642 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0); 643 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0); 644 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY); 645 xhci_ring_free(sc, &sc->sc_er); 646 647 usb_freemem(&sc->sc_eventst_dma); 648 649 xhci_op_write_8(sc, XHCI_DCBAAP, 0); 650 usb_freemem(&sc->sc_dcbaa_dma); 651 652 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots); 653 654 kmem_free(sc->sc_ctlrportbus, 655 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY)); 656 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int)); 657 658 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 659 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int)); 660 } 661 662 mutex_destroy(&sc->sc_rhlock); 663 mutex_destroy(&sc->sc_lock); 664 mutex_destroy(&sc->sc_intr_lock); 665 666 pool_cache_destroy(sc->sc_xferpool); 667 668 return rv; 669 } 670 671 int 672 xhci_activate(device_t self, enum devact act) 673 { 674 struct xhci_softc * const sc = device_private(self); 675 676 switch (act) { 677 case DVACT_DEACTIVATE: 678 sc->sc_dying = true; 679 return 0; 680 default: 681 return EOPNOTSUPP; 682 } 683 } 684 685 bool 686 xhci_suspend(device_t self, const pmf_qual_t *qual) 687 { 688 struct xhci_softc * const sc = device_private(self); 689 size_t i, j, bn, dci; 690 int port; 691 uint32_t v; 692 usbd_status err; 693 bool ok = false; 694 695 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 696 697 /* 698 * Block issuance of new commands, and wait for all pending 699 * commands to complete. 700 */ 701 mutex_enter(&sc->sc_lock); 702 KASSERT(sc->sc_suspender == NULL); 703 sc->sc_suspender = curlwp; 704 while (sc->sc_command_addr != 0) 705 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock); 706 mutex_exit(&sc->sc_lock); 707 708 /* 709 * Block roothub xfers which might touch portsc registers until 710 * we're done suspending. 711 */ 712 mutex_enter(&sc->sc_rhlock); 713 714 /* 715 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2: 716 * xHCI Power Management, p. 342 717 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342 718 */ 719 720 /* 721 * `1. Stop all USB activity by issuing Stop Endpoint Commands 722 * for Busy endpoints in the Running state. If the Force 723 * Save Context Capability (FSC = ``0'') is not supported, 724 * then Stop Endpoint Commands shall be issued for all idle 725 * endpoints in the Running state as well. The Stop 726 * Endpoint Command causes the xHC to update the respective 727 * Endpoint or Stream Contexts in system memory, e.g. the 728 * TR Dequeue Pointer, DCS, etc. fields. Refer to 729 * Implementation Note "0".' 730 */ 731 for (i = 0; i < sc->sc_maxslots; i++) { 732 struct xhci_slot *xs = &sc->sc_slots[i]; 733 734 /* Skip if the slot is not in use. */ 735 if (xs->xs_idx == 0) 736 continue; 737 738 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 739 /* Skip if the endpoint is not Running. */ 740 /* XXX What about Busy? */ 741 if (xhci_get_epstate(sc, xs, dci) != 742 XHCI_EPSTATE_RUNNING) 743 continue; 744 745 /* Stop endpoint. */ 746 mutex_enter(&sc->sc_lock); 747 err = xhci_stop_endpoint_cmd(sc, xs, dci, 748 XHCI_TRB_3_SUSP_EP_BIT); 749 mutex_exit(&sc->sc_lock); 750 if (err) { 751 device_printf(self, "failed to stop endpoint" 752 " slot %zu dci %zu err %d\n", 753 i, dci, err); 754 goto out; 755 } 756 } 757 } 758 759 /* 760 * Next, suspend all the ports: 761 * 762 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15: 763 * Suspend-Resume, pp. 276-283 764 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276 765 */ 766 for (bn = 0; bn < 2; bn++) { 767 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) { 768 /* 4.15.1: Port Suspend. */ 769 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i)); 770 771 /* 772 * `System software places individual ports 773 * into suspend mode by writing a ``3'' into 774 * the appropriate PORTSC register Port Link 775 * State (PLS) field (refer to Section 5.4.8). 776 * Software should only set the PLS field to 777 * ``3'' when the port is in the Enabled 778 * state.' 779 * 780 * `Software should not attempt to suspend a 781 * port unless the port reports that it is in 782 * the enabled (PED = ``1''; PLS < ``3'') 783 * state (refer to Section 5.4.8 for more 784 * information about PED and PLS).' 785 */ 786 v = xhci_op_read_4(sc, port); 787 if (((v & XHCI_PS_PED) == 0) || 788 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3) 789 continue; 790 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 791 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3); 792 xhci_op_write_4(sc, port, v); 793 794 /* 795 * `When the PLS field is written with U3 796 * (``3''), the status of the PLS bit will not 797 * change to the target U state U3 until the 798 * suspend signaling has completed to the 799 * attached device (which may be as long as 800 * 10ms.).' 801 * 802 * `Software is required to wait for U3 803 * transitions to complete before it puts the 804 * xHC into a low power state, and before 805 * resuming the port.' 806 * 807 * XXX Take advantage of the technique to 808 * reduce polling on host controllers that 809 * support the U3C capability. 810 */ 811 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) { 812 v = xhci_op_read_4(sc, port); 813 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3) 814 break; 815 usb_delay_ms(&sc->sc_bus, 1); 816 } 817 if (j == XHCI_WAIT_PLS_U3) { 818 device_printf(self, 819 "suspend timeout on bus %zu port %zu\n", 820 bn, i); 821 goto out; 822 } 823 } 824 } 825 826 /* 827 * `2. Ensure that the Command Ring is in the Stopped state 828 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is 829 * empty), and all Command Completion Events associated 830 * with them have been received.' 831 * 832 * XXX 833 */ 834 835 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */ 836 xhci_op_write_4(sc, XHCI_USBCMD, 837 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS); 838 839 /* 840 * `4. Read the Operational Runtime, and VTIO registers in the 841 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ, 842 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their 843 * state.' 844 * 845 * (We don't use VTIO here (XXX for now?).) 846 */ 847 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD); 848 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL); 849 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP); 850 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG); 851 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0)); 852 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0)); 853 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0)); 854 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0)); 855 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0)); 856 857 /* 858 * `5. Set the Controller Save State (CSS) flag in the USBCMD 859 * register (5.4.1)...' 860 */ 861 xhci_op_write_4(sc, XHCI_USBCMD, 862 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS); 863 864 /* 865 * `...and wait for the Save State Status (SSS) flag in the 866 * USBSTS register (5.4.2) to transition to ``0''.' 867 */ 868 for (i = 0; i < XHCI_WAIT_SSS; i++) { 869 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0) 870 break; 871 usb_delay_ms(&sc->sc_bus, 1); 872 } 873 if (i >= XHCI_WAIT_SSS) { 874 device_printf(self, "suspend timeout, USBSTS.SSS\n"); 875 /* 876 * Just optimistically go on and check SRE anyway -- 877 * what's the worst that could happen? 878 */ 879 } 880 881 /* 882 * `Note: After a Save or Restore operation completes, the 883 * Save/Restore Error (SRE) flag in the USBSTS register should 884 * be checked to ensure that the operation completed 885 * successfully.' 886 */ 887 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) { 888 device_printf(self, "suspend error, USBSTS.SRE\n"); 889 goto out; 890 } 891 892 /* Success! */ 893 ok = true; 894 895 out: mutex_exit(&sc->sc_rhlock); 896 if (!ok) { 897 /* 898 * If suspend failed, stop holding up command issuance 899 * and make it fail instead. 900 */ 901 mutex_enter(&sc->sc_lock); 902 KASSERT(sc->sc_suspender == curlwp); 903 sc->sc_suspender = NULL; 904 sc->sc_suspendresume_failed = true; 905 cv_broadcast(&sc->sc_cmdbusy_cv); 906 mutex_exit(&sc->sc_lock); 907 } 908 return ok; 909 } 910 911 bool 912 xhci_resume(device_t self, const pmf_qual_t *qual) 913 { 914 struct xhci_softc * const sc = device_private(self); 915 size_t i, j, bn, dci; 916 int port; 917 uint32_t v; 918 bool ok = false; 919 920 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 921 922 /* 923 * If resume had previously failed, just try again. Can't make 924 * things worse, probably. 925 */ 926 mutex_enter(&sc->sc_lock); 927 if (sc->sc_suspendresume_failed) { 928 KASSERT(sc->sc_suspender == NULL); 929 sc->sc_suspender = curlwp; 930 sc->sc_suspendresume_failed = false; 931 } 932 KASSERT(sc->sc_suspender); 933 mutex_exit(&sc->sc_lock); 934 935 /* 936 * Block roothub xfers which might touch portsc registers until 937 * we're done resuming. 938 */ 939 mutex_enter(&sc->sc_rhlock); 940 941 /* 942 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2: 943 * xHCI Power Management, p. 343 944 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343 945 */ 946 947 /* 948 * `4. Restore the Operational Runtime, and VTIO registers with 949 * their previously saved state in the following order: 950 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN, 951 * IMOD, and VTIO.' 952 * 953 * (We don't use VTIO here (for now?).) 954 */ 955 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd); 956 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl); 957 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap); 958 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config); 959 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0); 960 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0); 961 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0); 962 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0); 963 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0); 964 965 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */ 966 967 /* 968 * `5. Set the Controller Restore State (CRS) flag in the 969 * USBCMD register (5.4.1) to ``1''...' 970 */ 971 xhci_op_write_4(sc, XHCI_USBCMD, 972 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS); 973 974 /* 975 * `...and wait for the Restore State Status (RSS) in the 976 * USBSTS register (5.4.2) to transition to ``0''.' 977 */ 978 for (i = 0; i < XHCI_WAIT_RSS; i++) { 979 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0) 980 break; 981 usb_delay_ms(&sc->sc_bus, 1); 982 } 983 if (i >= XHCI_WAIT_RSS) { 984 device_printf(self, "resume timeout, USBSTS.RSS\n"); 985 goto out; 986 } 987 988 /* 989 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits 990 * are consistent with the RCS values to be written to the 991 * CRCR.' 992 * 993 * XXX Hope just zeroing it is good enough! 994 */ 995 xhci_host_dequeue(sc->sc_cr); 996 997 /* 998 * `7. Write the CRCR with the address and RCS value of the 999 * reinitialized Command Ring. Note that this write will 1000 * cause the Command Ring to restart at the address 1001 * specified by the CRCR.' 1002 */ 1003 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) | 1004 sc->sc_cr->xr_cs); 1005 1006 /* 1007 * `8. Enable the controller by setting Run/Stop (R/S) = 1008 * ``1''.' 1009 */ 1010 xhci_op_write_4(sc, XHCI_USBCMD, 1011 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS); 1012 1013 /* 1014 * `9. Software shall walk the USB topology and initialize each 1015 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and 1016 * external hub ports attached to USB devices.' 1017 * 1018 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2 1019 * `Port Resume', 4.15.2.2 `Host Initiated'. 1020 * 1021 * XXX We should maybe batch up initiating the state 1022 * transitions, and then wait for them to complete all at once. 1023 */ 1024 for (bn = 0; bn < 2; bn++) { 1025 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) { 1026 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i)); 1027 1028 /* `When a port is in the U3 state: ...' */ 1029 v = xhci_op_read_4(sc, port); 1030 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3) 1031 continue; 1032 1033 /* 1034 * `For a USB2 protocol port, software shall 1035 * write a ``15'' (Resume) to the PLS field to 1036 * initiate resume signaling. The port shall 1037 * transition to the Resume substate and the 1038 * xHC shall transmit the resume signaling 1039 * within 1ms (T_URSM). Software shall ensure 1040 * that resume is signaled for at least 20ms 1041 * (T_DRSMDN). Software shall start timing 1042 * T_DRSMDN from the write of ``15'' (Resume) 1043 * to PLS.' 1044 */ 1045 if (bn == 1) { 1046 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0); 1047 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 1048 v |= XHCI_PS_LWS; 1049 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME); 1050 xhci_op_write_4(sc, port, v); 1051 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT); 1052 } else { 1053 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0); 1054 } 1055 1056 /* 1057 * `For a USB3 protocol port [and a USB2 1058 * protocol port after transitioning to 1059 * Resume], software shall write a ``0'' (U0) 1060 * to the PLS field...' 1061 */ 1062 v = xhci_op_read_4(sc, port); 1063 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 1064 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0); 1065 xhci_op_write_4(sc, port, v); 1066 1067 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) { 1068 v = xhci_op_read_4(sc, port); 1069 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0) 1070 break; 1071 usb_delay_ms(&sc->sc_bus, 1); 1072 } 1073 if (j == XHCI_WAIT_PLS_U0) { 1074 device_printf(self, 1075 "resume timeout on bus %zu port %zu\n", 1076 bn, i); 1077 goto out; 1078 } 1079 } 1080 } 1081 1082 /* 1083 * `10. Restart each of the previously Running endpoints by 1084 * ringing their doorbells.' 1085 */ 1086 for (i = 0; i < sc->sc_maxslots; i++) { 1087 struct xhci_slot *xs = &sc->sc_slots[i]; 1088 1089 /* Skip if the slot is not in use. */ 1090 if (xs->xs_idx == 0) 1091 continue; 1092 1093 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 1094 /* Skip if the endpoint is not Running. */ 1095 if (xhci_get_epstate(sc, xs, dci) != 1096 XHCI_EPSTATE_RUNNING) 1097 continue; 1098 1099 /* Ring the doorbell. */ 1100 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 1101 } 1102 } 1103 1104 /* 1105 * `Note: After a Save or Restore operation completes, the 1106 * Save/Restore Error (SRE) flag in the USBSTS register should 1107 * be checked to ensure that the operation completed 1108 * successfully.' 1109 */ 1110 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) { 1111 device_printf(self, "resume error, USBSTS.SRE\n"); 1112 goto out; 1113 } 1114 1115 /* Success! */ 1116 ok = true; 1117 1118 out: /* 1119 * Resume command issuance. If the hardware failed to resume, 1120 * well, tough -- deadlocking because everything is held up on 1121 * the suspension, with no opportunity to detach, isn't better 1122 * than timing out waiting for dead hardware. 1123 */ 1124 mutex_enter(&sc->sc_lock); 1125 KASSERT(sc->sc_suspender); 1126 sc->sc_suspender = NULL; 1127 sc->sc_suspendresume_failed = !ok; 1128 cv_broadcast(&sc->sc_cmdbusy_cv); 1129 mutex_exit(&sc->sc_lock); 1130 1131 mutex_exit(&sc->sc_rhlock); 1132 return ok; 1133 } 1134 1135 bool 1136 xhci_shutdown(device_t self, int flags) 1137 { 1138 return false; 1139 } 1140 1141 static int 1142 xhci_hc_reset(struct xhci_softc * const sc) 1143 { 1144 uint32_t usbcmd, usbsts; 1145 int i; 1146 1147 /* Check controller not ready */ 1148 for (i = 0; i < XHCI_WAIT_CNR; i++) { 1149 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1150 if ((usbsts & XHCI_STS_CNR) == 0) 1151 break; 1152 usb_delay_ms(&sc->sc_bus, 1); 1153 } 1154 if (i >= XHCI_WAIT_CNR) { 1155 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n"); 1156 return EIO; 1157 } 1158 1159 /* Halt controller */ 1160 usbcmd = 0; 1161 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 1162 usb_delay_ms(&sc->sc_bus, 1); 1163 1164 /* Reset controller */ 1165 usbcmd = XHCI_CMD_HCRST; 1166 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 1167 for (i = 0; i < XHCI_WAIT_HCRST; i++) { 1168 /* 1169 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to 1170 * prevent system hang (Errata). 1171 */ 1172 usb_delay_ms(&sc->sc_bus, 1); 1173 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD); 1174 if ((usbcmd & XHCI_CMD_HCRST) == 0) 1175 break; 1176 } 1177 if (i >= XHCI_WAIT_HCRST) { 1178 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n"); 1179 return EIO; 1180 } 1181 1182 /* Check controller not ready */ 1183 for (i = 0; i < XHCI_WAIT_CNR; i++) { 1184 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1185 if ((usbsts & XHCI_STS_CNR) == 0) 1186 break; 1187 usb_delay_ms(&sc->sc_bus, 1); 1188 } 1189 if (i >= XHCI_WAIT_CNR) { 1190 aprint_error_dev(sc->sc_dev, 1191 "controller not ready timeout after reset\n"); 1192 return EIO; 1193 } 1194 1195 return 0; 1196 } 1197 1198 /* 7.2 xHCI Support Protocol Capability */ 1199 static void 1200 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp) 1201 { 1202 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1203 1204 /* XXX Cache this lot */ 1205 1206 const uint32_t w0 = xhci_read_4(sc, ecp); 1207 const uint32_t w4 = xhci_read_4(sc, ecp + 4); 1208 const uint32_t w8 = xhci_read_4(sc, ecp + 8); 1209 const uint32_t wc = xhci_read_4(sc, ecp + 0xc); 1210 1211 aprint_debug_dev(sc->sc_dev, 1212 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc); 1213 1214 if (w4 != XHCI_XECP_USBID) 1215 return; 1216 1217 const int major = XHCI_XECP_SP_W0_MAJOR(w0); 1218 const int minor = XHCI_XECP_SP_W0_MINOR(w0); 1219 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8); 1220 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8); 1221 1222 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16)); 1223 switch (mm) { 1224 case 0x0200: 1225 case 0x0300: 1226 case 0x0301: 1227 case 0x0310: 1228 case 0x0320: 1229 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n", 1230 major == 3 ? "ss" : "hs", cpo, cpo + cpc - 1); 1231 if (major == 3) 1232 sc->sc_usb3nports += cpo + cpc - 1; 1233 else 1234 sc->sc_usb2nports += cpo + cpc - 1; 1235 break; 1236 default: 1237 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n", 1238 major, minor); 1239 return; 1240 } 1241 1242 const size_t bus = (major == 3) ? 0 : 1; 1243 1244 /* Index arrays with 0..n-1 where ports are numbered 1..n */ 1245 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) { 1246 if (sc->sc_ctlrportmap[cp] != 0) { 1247 aprint_error_dev(sc->sc_dev, "controller port %zu " 1248 "already assigned", cp); 1249 continue; 1250 } 1251 1252 sc->sc_ctlrportbus[cp / NBBY] |= 1253 bus == 0 ? 0 : __BIT(cp % NBBY); 1254 1255 const size_t rhp = sc->sc_rhportcount[bus]++; 1256 1257 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0, 1258 "bus %zu rhp %zu is %d", bus, rhp, 1259 sc->sc_rhportmap[bus][rhp]); 1260 1261 sc->sc_rhportmap[bus][rhp] = cp + 1; 1262 sc->sc_ctlrportmap[cp] = rhp + 1; 1263 } 1264 } 1265 1266 /* Process extended capabilities */ 1267 static void 1268 xhci_ecp(struct xhci_softc *sc) 1269 { 1270 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1271 1272 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4; 1273 while (ecp != 0) { 1274 uint32_t ecr = xhci_read_4(sc, ecp); 1275 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr); 1276 switch (XHCI_XECP_ID(ecr)) { 1277 case XHCI_ID_PROTOCOLS: { 1278 xhci_id_protocols(sc, ecp); 1279 break; 1280 } 1281 case XHCI_ID_USB_LEGACY: { 1282 uint8_t bios_sem; 1283 1284 /* Take host controller ownership from BIOS */ 1285 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM); 1286 if (bios_sem) { 1287 /* sets xHCI to be owned by OS */ 1288 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1); 1289 aprint_debug_dev(sc->sc_dev, 1290 "waiting for BIOS to give up control\n"); 1291 for (int i = 0; i < 5000; i++) { 1292 bios_sem = xhci_read_1(sc, ecp + 1293 XHCI_XECP_BIOS_SEM); 1294 if (bios_sem == 0) 1295 break; 1296 DELAY(1000); 1297 } 1298 if (bios_sem) { 1299 aprint_error_dev(sc->sc_dev, 1300 "timed out waiting for BIOS\n"); 1301 } 1302 } 1303 break; 1304 } 1305 default: 1306 break; 1307 } 1308 ecr = xhci_read_4(sc, ecp); 1309 if (XHCI_XECP_NEXT(ecr) == 0) { 1310 ecp = 0; 1311 } else { 1312 ecp += XHCI_XECP_NEXT(ecr) * 4; 1313 } 1314 } 1315 } 1316 1317 #define XHCI_HCCPREV1_BITS \ 1318 "\177\020" /* New bitmask */ \ 1319 "f\020\020XECP\0" \ 1320 "f\014\4MAXPSA\0" \ 1321 "b\013CFC\0" \ 1322 "b\012SEC\0" \ 1323 "b\011SBD\0" \ 1324 "b\010FSE\0" \ 1325 "b\7NSS\0" \ 1326 "b\6LTC\0" \ 1327 "b\5LHRC\0" \ 1328 "b\4PIND\0" \ 1329 "b\3PPC\0" \ 1330 "b\2CZC\0" \ 1331 "b\1BNC\0" \ 1332 "b\0AC64\0" \ 1333 "\0" 1334 #define XHCI_HCCV1_x_BITS \ 1335 "\177\020" /* New bitmask */ \ 1336 "f\020\020XECP\0" \ 1337 "f\014\4MAXPSA\0" \ 1338 "b\013CFC\0" \ 1339 "b\012SEC\0" \ 1340 "b\011SPC\0" \ 1341 "b\010PAE\0" \ 1342 "b\7NSS\0" \ 1343 "b\6LTC\0" \ 1344 "b\5LHRC\0" \ 1345 "b\4PIND\0" \ 1346 "b\3PPC\0" \ 1347 "b\2CSZ\0" \ 1348 "b\1BNC\0" \ 1349 "b\0AC64\0" \ 1350 "\0" 1351 1352 #define XHCI_HCC2_BITS \ 1353 "\177\020" /* New bitmask */ \ 1354 "b\7ETC_TSC\0" \ 1355 "b\6ETC\0" \ 1356 "b\5CIC\0" \ 1357 "b\4LEC\0" \ 1358 "b\3CTC\0" \ 1359 "b\2FSC\0" \ 1360 "b\1CMC\0" \ 1361 "b\0U3C\0" \ 1362 "\0" 1363 1364 void 1365 xhci_start(struct xhci_softc *sc) 1366 { 1367 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA); 1368 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0) 1369 /* Intel xhci needs interrupt rate moderated. */ 1370 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP); 1371 else 1372 xhci_rt_write_4(sc, XHCI_IMOD(0), 0); 1373 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n", 1374 xhci_rt_read_4(sc, XHCI_IMOD(0))); 1375 1376 /* Go! */ 1377 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS); 1378 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n", 1379 xhci_op_read_4(sc, XHCI_USBCMD)); 1380 } 1381 1382 int 1383 xhci_init(struct xhci_softc *sc) 1384 { 1385 bus_size_t bsz; 1386 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff; 1387 uint32_t pagesize, config; 1388 int i = 0; 1389 uint16_t hciversion; 1390 uint8_t caplength; 1391 1392 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1393 1394 /* Set up the bus struct for the usb 3 and usb 2 buses */ 1395 sc->sc_bus.ub_methods = &xhci_bus_methods; 1396 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe); 1397 sc->sc_bus.ub_usedma = true; 1398 sc->sc_bus.ub_hcpriv = sc; 1399 1400 sc->sc_bus2.ub_methods = &xhci_bus_methods; 1401 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe); 1402 sc->sc_bus2.ub_revision = USBREV_2_0; 1403 sc->sc_bus2.ub_usedma = true; 1404 sc->sc_bus2.ub_hcpriv = sc; 1405 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag; 1406 1407 caplength = xhci_read_1(sc, XHCI_CAPLENGTH); 1408 hciversion = xhci_read_2(sc, XHCI_HCIVERSION); 1409 1410 if (hciversion < XHCI_HCIVERSION_0_96 || 1411 hciversion >= 0x0200) { 1412 aprint_normal_dev(sc->sc_dev, 1413 "xHCI version %x.%x not known to be supported\n", 1414 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 1415 } else { 1416 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n", 1417 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 1418 } 1419 1420 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength, 1421 &sc->sc_cbh) != 0) { 1422 aprint_error_dev(sc->sc_dev, "capability subregion failure\n"); 1423 return ENOMEM; 1424 } 1425 1426 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1); 1427 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1); 1428 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1); 1429 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1); 1430 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2); 1431 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3); 1432 aprint_debug_dev(sc->sc_dev, 1433 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3); 1434 1435 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS); 1436 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32; 1437 1438 char sbuf[128]; 1439 if (hciversion < XHCI_HCIVERSION_1_0) 1440 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc); 1441 else 1442 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc); 1443 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf); 1444 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n", 1445 XHCI_HCC_XECP(sc->sc_hcc) * 4); 1446 if (hciversion >= XHCI_HCIVERSION_1_1) { 1447 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2); 1448 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2); 1449 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf); 1450 } 1451 1452 /* default all ports to bus 0, i.e. usb 3 */ 1453 sc->sc_ctlrportbus = kmem_zalloc( 1454 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP); 1455 sc->sc_ctlrportmap = 1456 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1457 1458 /* controller port to bus roothub port map */ 1459 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 1460 sc->sc_rhportmap[j] = 1461 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1462 } 1463 1464 /* 1465 * Process all Extended Capabilities 1466 */ 1467 xhci_ecp(sc); 1468 1469 bsz = XHCI_PORTSC(sc->sc_maxports); 1470 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz, 1471 &sc->sc_obh) != 0) { 1472 aprint_error_dev(sc->sc_dev, "operational subregion failure\n"); 1473 return ENOMEM; 1474 } 1475 1476 dboff = xhci_cap_read_4(sc, XHCI_DBOFF); 1477 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff, 1478 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) { 1479 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n"); 1480 return ENOMEM; 1481 } 1482 1483 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF); 1484 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff, 1485 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) { 1486 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n"); 1487 return ENOMEM; 1488 } 1489 1490 int rv; 1491 rv = xhci_hc_reset(sc); 1492 if (rv != 0) { 1493 return rv; 1494 } 1495 1496 if (sc->sc_vendor_init) 1497 sc->sc_vendor_init(sc); 1498 1499 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE); 1500 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize); 1501 pagesize = ffs(pagesize); 1502 if (pagesize == 0) { 1503 aprint_error_dev(sc->sc_dev, "pagesize is 0\n"); 1504 return EIO; 1505 } 1506 sc->sc_pgsz = 1 << (12 + (pagesize - 1)); 1507 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz); 1508 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n", 1509 (uint32_t)sc->sc_maxslots); 1510 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports); 1511 1512 int err; 1513 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2); 1514 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf); 1515 if (sc->sc_maxspbuf != 0) { 1516 err = usb_allocmem(sc->sc_bus.ub_dmatag, 1517 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t), 1518 USBMALLOC_ZERO, &sc->sc_spbufarray_dma); 1519 if (err) { 1520 aprint_error_dev(sc->sc_dev, 1521 "spbufarray init fail, err %d\n", err); 1522 return ENOMEM; 1523 } 1524 1525 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) * 1526 sc->sc_maxspbuf, KM_SLEEP); 1527 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0); 1528 for (i = 0; i < sc->sc_maxspbuf; i++) { 1529 usb_dma_t * const dma = &sc->sc_spbuf_dma[i]; 1530 /* allocate contexts */ 1531 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, 1532 sc->sc_pgsz, USBMALLOC_ZERO, dma); 1533 if (err) { 1534 aprint_error_dev(sc->sc_dev, 1535 "spbufarray_dma init fail, err %d\n", err); 1536 rv = ENOMEM; 1537 goto bad1; 1538 } 1539 spbufarray[i] = htole64(DMAADDR(dma, 0)); 1540 usb_syncmem(dma, 0, sc->sc_pgsz, 1541 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1542 } 1543 1544 usb_syncmem(&sc->sc_spbufarray_dma, 0, 1545 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE); 1546 } 1547 1548 config = xhci_op_read_4(sc, XHCI_CONFIG); 1549 config &= ~0xFF; 1550 config |= sc->sc_maxslots & 0xFF; 1551 xhci_op_write_4(sc, XHCI_CONFIG, config); 1552 1553 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS, 1554 XHCI_COMMAND_RING_SEGMENTS_ALIGN); 1555 if (err) { 1556 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n", 1557 err); 1558 rv = ENOMEM; 1559 goto bad1; 1560 } 1561 1562 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS, 1563 XHCI_EVENT_RING_SEGMENTS_ALIGN); 1564 if (err) { 1565 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n", 1566 err); 1567 rv = ENOMEM; 1568 goto bad2; 1569 } 1570 1571 usb_dma_t *dma; 1572 size_t size; 1573 size_t align; 1574 1575 dma = &sc->sc_eventst_dma; 1576 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE, 1577 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN); 1578 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size); 1579 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN; 1580 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 1581 USBMALLOC_ZERO, dma); 1582 if (err) { 1583 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n", 1584 err); 1585 rv = ENOMEM; 1586 goto bad3; 1587 } 1588 1589 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n", 1590 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0), 1591 KERNADDR(&sc->sc_eventst_dma, 0), 1592 sc->sc_eventst_dma.udma_block->size); 1593 1594 dma = &sc->sc_dcbaa_dma; 1595 size = (1 + sc->sc_maxslots) * sizeof(uint64_t); 1596 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size); 1597 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN; 1598 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 1599 USBMALLOC_ZERO, dma); 1600 if (err) { 1601 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err); 1602 rv = ENOMEM; 1603 goto bad4; 1604 } 1605 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n", 1606 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0), 1607 KERNADDR(&sc->sc_dcbaa_dma, 0), 1608 sc->sc_dcbaa_dma.udma_block->size); 1609 1610 if (sc->sc_maxspbuf != 0) { 1611 /* 1612 * DCBA entry 0 hold the scratchbuf array pointer. 1613 */ 1614 *(uint64_t *)KERNADDR(dma, 0) = 1615 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0)); 1616 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE); 1617 } 1618 1619 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots, 1620 KM_SLEEP); 1621 if (sc->sc_slots == NULL) { 1622 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err); 1623 rv = ENOMEM; 1624 goto bad; 1625 } 1626 1627 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0, 1628 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL); 1629 if (sc->sc_xferpool == NULL) { 1630 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n", 1631 err); 1632 rv = ENOMEM; 1633 goto bad; 1634 } 1635 1636 cv_init(&sc->sc_command_cv, "xhcicmd"); 1637 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq"); 1638 mutex_init(&sc->sc_rhlock, MUTEX_DEFAULT, IPL_NONE); 1639 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 1640 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB); 1641 1642 struct xhci_erste *erst; 1643 erst = KERNADDR(&sc->sc_eventst_dma, 0); 1644 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0)); 1645 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb); 1646 erst[0].erste_3 = htole32(0); 1647 usb_syncmem(&sc->sc_eventst_dma, 0, 1648 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE); 1649 1650 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS); 1651 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0)); 1652 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) | 1653 XHCI_ERDP_BUSY); 1654 1655 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0)); 1656 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) | 1657 sc->sc_cr->xr_cs); 1658 1659 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0), 1660 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS); 1661 1662 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0) 1663 xhci_start(sc); 1664 1665 return 0; 1666 1667 bad: 1668 if (sc->sc_xferpool) { 1669 pool_cache_destroy(sc->sc_xferpool); 1670 sc->sc_xferpool = NULL; 1671 } 1672 1673 if (sc->sc_slots) { 1674 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * 1675 sc->sc_maxslots); 1676 sc->sc_slots = NULL; 1677 } 1678 1679 usb_freemem(&sc->sc_dcbaa_dma); 1680 bad4: 1681 usb_freemem(&sc->sc_eventst_dma); 1682 bad3: 1683 xhci_ring_free(sc, &sc->sc_er); 1684 bad2: 1685 xhci_ring_free(sc, &sc->sc_cr); 1686 i = sc->sc_maxspbuf; 1687 bad1: 1688 for (int j = 0; j < i; j++) 1689 usb_freemem(&sc->sc_spbuf_dma[j]); 1690 usb_freemem(&sc->sc_spbufarray_dma); 1691 1692 return rv; 1693 } 1694 1695 static inline bool 1696 xhci_polling_p(struct xhci_softc * const sc) 1697 { 1698 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling; 1699 } 1700 1701 int 1702 xhci_intr(void *v) 1703 { 1704 struct xhci_softc * const sc = v; 1705 int ret = 0; 1706 1707 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1708 1709 if (sc == NULL) 1710 return 0; 1711 1712 mutex_spin_enter(&sc->sc_intr_lock); 1713 1714 if (sc->sc_dying || !device_has_power(sc->sc_dev)) 1715 goto done; 1716 1717 /* If we get an interrupt while polling, then just ignore it. */ 1718 if (xhci_polling_p(sc)) { 1719 #ifdef DIAGNOSTIC 1720 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0); 1721 #endif 1722 goto done; 1723 } 1724 1725 ret = xhci_intr1(sc); 1726 if (ret) { 1727 KASSERT(sc->sc_child || sc->sc_child2); 1728 1729 /* 1730 * One of child busses could be already detached. It doesn't 1731 * matter on which of the two the softintr is scheduled. 1732 */ 1733 if (sc->sc_child) 1734 usb_schedsoftintr(&sc->sc_bus); 1735 else 1736 usb_schedsoftintr(&sc->sc_bus2); 1737 } 1738 done: 1739 mutex_spin_exit(&sc->sc_intr_lock); 1740 return ret; 1741 } 1742 1743 int 1744 xhci_intr1(struct xhci_softc * const sc) 1745 { 1746 uint32_t usbsts; 1747 uint32_t iman; 1748 1749 XHCIHIST_FUNC(); 1750 1751 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1752 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0); 1753 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD | 1754 XHCI_STS_HCE)) == 0) { 1755 DPRINTFN(16, "ignored intr not for %jd", 1756 device_unit(sc->sc_dev), 0, 0, 0); 1757 return 0; 1758 } 1759 1760 /* 1761 * Clear EINT and other transient flags, to not misenterpret 1762 * next shared interrupt. Also, to avoid race, EINT must be cleared 1763 * before XHCI_IMAN_INTR_PEND is cleared. 1764 */ 1765 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & ~XHCI_STS_RSVDP0); 1766 1767 #ifdef XHCI_DEBUG 1768 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1769 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1770 #endif 1771 1772 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1773 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1774 iman |= XHCI_IMAN_INTR_PEND; 1775 xhci_rt_write_4(sc, XHCI_IMAN(0), iman); 1776 1777 #ifdef XHCI_DEBUG 1778 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1779 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1780 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1781 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1782 #endif 1783 1784 return 1; 1785 } 1786 1787 /* 1788 * 3 port speed types used in USB stack 1789 * 1790 * usbdi speed 1791 * definition: USB_SPEED_* in usb.h 1792 * They are used in struct usbd_device in USB stack. 1793 * ioctl interface uses these values too. 1794 * port_status speed 1795 * definition: UPS_*_SPEED in usb.h 1796 * They are used in usb_port_status_t and valid only for USB 2.0. 1797 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus 1798 * of usb_port_status_ext_t indicates port speed. 1799 * Note that some 3.0 values overlap with 2.0 values. 1800 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and 1801 * means UPS_LOW_SPEED in HS.) 1802 * port status returned from hub also uses these values. 1803 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed 1804 * or more. 1805 * xspeed: 1806 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1) 1807 * They are used in only slot context and PORTSC reg of xhci. 1808 * The difference between usbdi speed and xspeed is 1809 * that FS and LS values are swapped. 1810 */ 1811 1812 /* convert usbdi speed to xspeed */ 1813 static int 1814 xhci_speed2xspeed(int speed) 1815 { 1816 switch (speed) { 1817 case USB_SPEED_LOW: return 2; 1818 case USB_SPEED_FULL: return 1; 1819 default: return speed; 1820 } 1821 } 1822 1823 #if 0 1824 /* convert xspeed to usbdi speed */ 1825 static int 1826 xhci_xspeed2speed(int xspeed) 1827 { 1828 switch (xspeed) { 1829 case 1: return USB_SPEED_FULL; 1830 case 2: return USB_SPEED_LOW; 1831 default: return xspeed; 1832 } 1833 } 1834 #endif 1835 1836 /* convert xspeed to port status speed */ 1837 static int 1838 xhci_xspeed2psspeed(int xspeed) 1839 { 1840 switch (xspeed) { 1841 case 0: return 0; 1842 case 1: return UPS_FULL_SPEED; 1843 case 2: return UPS_LOW_SPEED; 1844 case 3: return UPS_HIGH_SPEED; 1845 default: return UPS_OTHER_SPEED; 1846 } 1847 } 1848 1849 /* 1850 * Construct input contexts and issue TRB to open pipe. 1851 */ 1852 static usbd_status 1853 xhci_configure_endpoint(struct usbd_pipe *pipe) 1854 { 1855 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1856 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1857 #ifdef USB_DEBUG 1858 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1859 #endif 1860 struct xhci_soft_trb trb; 1861 usbd_status err; 1862 1863 XHCIHIST_FUNC(); 1864 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx", 1865 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress, 1866 pipe->up_endpoint->ue_edesc->bmAttributes); 1867 1868 /* XXX ensure input context is available? */ 1869 1870 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz); 1871 1872 /* set up context */ 1873 xhci_setup_ctx(pipe); 1874 1875 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0), 1876 sc->sc_ctxsz * 1); 1877 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs, 1878 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1); 1879 1880 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 1881 trb.trb_2 = 0; 1882 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1883 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 1884 1885 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 1886 1887 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 1888 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci), 1889 sc->sc_ctxsz * 1); 1890 1891 return err; 1892 } 1893 1894 #if 0 1895 static usbd_status 1896 xhci_unconfigure_endpoint(struct usbd_pipe *pipe) 1897 { 1898 #ifdef USB_DEBUG 1899 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1900 #endif 1901 1902 XHCIHIST_FUNC(); 1903 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 1904 1905 return USBD_NORMAL_COMPLETION; 1906 } 1907 #endif 1908 1909 /* 4.6.8, 6.4.3.7 */ 1910 static void 1911 xhci_reset_endpoint(struct usbd_pipe *pipe) 1912 { 1913 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1914 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1915 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1916 struct xhci_soft_trb trb; 1917 1918 XHCIHIST_FUNC(); 1919 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1920 1921 KASSERT(mutex_owned(&sc->sc_lock)); 1922 1923 trb.trb_0 = 0; 1924 trb.trb_2 = 0; 1925 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1926 XHCI_TRB_3_EP_SET(dci) | 1927 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP); 1928 1929 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) { 1930 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n", 1931 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress); 1932 } 1933 } 1934 1935 /* 1936 * 4.6.9, 6.4.3.8 1937 * Stop execution of TDs on xfer ring. 1938 * Should be called with sc_lock held. 1939 */ 1940 static usbd_status 1941 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci, 1942 uint32_t trb3flags) 1943 { 1944 struct xhci_soft_trb trb; 1945 usbd_status err; 1946 1947 XHCIHIST_FUNC(); 1948 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1949 1950 KASSERT(mutex_owned(&sc->sc_lock)); 1951 1952 trb.trb_0 = 0; 1953 trb.trb_2 = 0; 1954 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1955 XHCI_TRB_3_EP_SET(dci) | 1956 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) | 1957 trb3flags; 1958 1959 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1960 1961 return err; 1962 } 1963 1964 static usbd_status 1965 xhci_stop_endpoint(struct usbd_pipe *pipe) 1966 { 1967 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1968 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1969 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1970 1971 XHCIHIST_FUNC(); 1972 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1973 1974 KASSERT(mutex_owned(&sc->sc_lock)); 1975 1976 return xhci_stop_endpoint_cmd(sc, xs, dci, 0); 1977 } 1978 1979 /* 1980 * Set TR Dequeue Pointer. 1981 * xHCI 1.1 4.6.10 6.4.3.9 1982 * Purge all of the TRBs on ring and reinitialize ring. 1983 * Set TR dequeue Pointer to 0 and Cycle State to 1. 1984 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE 1985 * error will be generated. 1986 */ 1987 static void 1988 xhci_set_dequeue(struct usbd_pipe *pipe) 1989 { 1990 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1991 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1992 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1993 struct xhci_ring * const xr = xs->xs_xr[dci]; 1994 struct xhci_soft_trb trb; 1995 1996 XHCIHIST_FUNC(); 1997 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1998 1999 KASSERT(mutex_owned(&sc->sc_lock)); 2000 KASSERT(xr != NULL); 2001 2002 xhci_host_dequeue(xr); 2003 2004 /* set DCS */ 2005 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */ 2006 trb.trb_2 = 0; 2007 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 2008 XHCI_TRB_3_EP_SET(dci) | 2009 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE); 2010 2011 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) { 2012 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n", 2013 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress); 2014 } 2015 } 2016 2017 /* 2018 * Open new pipe: called from usbd_setup_pipe_flags. 2019 * Fills methods of pipe. 2020 * If pipe is not for ep0, calls configure_endpoint. 2021 */ 2022 static usbd_status 2023 xhci_open(struct usbd_pipe *pipe) 2024 { 2025 struct usbd_device * const dev = pipe->up_dev; 2026 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 2027 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 2028 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2029 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 2030 const u_int dci = xhci_ep_get_dci(ed); 2031 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 2032 usbd_status err; 2033 2034 XHCIHIST_FUNC(); 2035 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr, 2036 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed); 2037 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx", 2038 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress, 2039 ed->bmAttributes); 2040 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize), 2041 ed->bInterval, 0, 0); 2042 2043 if (sc->sc_dying) 2044 return USBD_IOERROR; 2045 2046 /* Root Hub */ 2047 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) { 2048 switch (ed->bEndpointAddress) { 2049 case USB_CONTROL_ENDPOINT: 2050 pipe->up_methods = &roothub_ctrl_methods; 2051 break; 2052 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT: 2053 pipe->up_methods = &xhci_root_intr_methods; 2054 break; 2055 default: 2056 pipe->up_methods = NULL; 2057 DPRINTFN(0, "bad bEndpointAddress 0x%02jx", 2058 ed->bEndpointAddress, 0, 0, 0); 2059 return USBD_INVAL; 2060 } 2061 return USBD_NORMAL_COMPLETION; 2062 } 2063 2064 usb_init_task(&xpipe->xp_async_task, xhci_pipe_restart_async_task, 2065 pipe, USB_TASKQ_MPSAFE); 2066 2067 switch (xfertype) { 2068 case UE_CONTROL: 2069 pipe->up_methods = &xhci_device_ctrl_methods; 2070 break; 2071 case UE_ISOCHRONOUS: 2072 pipe->up_methods = &xhci_device_isoc_methods; 2073 pipe->up_serialise = false; 2074 xpipe->xp_isoc_next = -1; 2075 break; 2076 case UE_BULK: 2077 pipe->up_methods = &xhci_device_bulk_methods; 2078 break; 2079 case UE_INTERRUPT: 2080 pipe->up_methods = &xhci_device_intr_methods; 2081 break; 2082 default: 2083 return USBD_IOERROR; 2084 break; 2085 } 2086 2087 KASSERT(xs != NULL); 2088 KASSERT(xs->xs_xr[dci] == NULL); 2089 2090 /* allocate transfer ring */ 2091 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS, 2092 XHCI_TRB_ALIGN); 2093 if (err) { 2094 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0); 2095 return err; 2096 } 2097 2098 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT) 2099 return xhci_configure_endpoint(pipe); 2100 2101 return USBD_NORMAL_COMPLETION; 2102 } 2103 2104 /* 2105 * Closes pipe, called from usbd_kill_pipe via close methods. 2106 * If the endpoint to be closed is ep0, disable_slot. 2107 * Should be called with sc_lock held. 2108 */ 2109 static void 2110 xhci_close_pipe(struct usbd_pipe *pipe) 2111 { 2112 struct xhci_pipe * const xp = 2113 container_of(pipe, struct xhci_pipe, xp_pipe); 2114 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2115 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2116 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 2117 const u_int dci = xhci_ep_get_dci(ed); 2118 struct xhci_soft_trb trb; 2119 uint32_t *cp; 2120 2121 XHCIHIST_FUNC(); 2122 2123 usb_rem_task_wait(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC, 2124 &sc->sc_lock); 2125 2126 if (sc->sc_dying) 2127 return; 2128 2129 /* xs is uninitialized before xhci_init_slot */ 2130 if (xs == NULL || xs->xs_idx == 0) 2131 return; 2132 2133 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju", 2134 (uintptr_t)pipe, xs->xs_idx, dci, 0); 2135 2136 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 2137 KASSERT(mutex_owned(&sc->sc_lock)); 2138 2139 if (pipe->up_dev->ud_depth == 0) 2140 return; 2141 2142 if (dci == XHCI_DCI_EP_CONTROL) { 2143 DPRINTFN(4, "closing ep0", 0, 0, 0, 0); 2144 /* This frees all rings */ 2145 xhci_disable_slot(sc, xs->xs_idx); 2146 return; 2147 } 2148 2149 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED) 2150 (void)xhci_stop_endpoint(pipe); 2151 2152 /* 2153 * set appropriate bit to be dropped. 2154 * don't set DC bit to 1, otherwise all endpoints 2155 * would be deconfigured. 2156 */ 2157 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 2158 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci)); 2159 cp[1] = htole32(0); 2160 2161 /* XXX should be most significant one, not dci? */ 2162 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 2163 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci)); 2164 2165 /* configure ep context performs an implicit dequeue */ 2166 xhci_host_dequeue(xs->xs_xr[dci]); 2167 2168 /* sync input contexts before they are read from memory */ 2169 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 2170 2171 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 2172 trb.trb_2 = 0; 2173 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 2174 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 2175 2176 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 2177 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 2178 2179 xhci_ring_free(sc, &xs->xs_xr[dci]); 2180 xs->xs_xr[dci] = NULL; 2181 } 2182 2183 /* 2184 * Abort transfer. Must be called with sc_lock held. Releases and 2185 * reacquires sc_lock to sleep until hardware acknowledges abort. 2186 */ 2187 static void 2188 xhci_abortx(struct usbd_xfer *xfer) 2189 { 2190 XHCIHIST_FUNC(); 2191 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 2192 2193 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx", 2194 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0); 2195 2196 KASSERT(mutex_owned(&sc->sc_lock)); 2197 KASSERTMSG((xfer->ux_status == USBD_CANCELLED || 2198 xfer->ux_status == USBD_TIMEOUT), 2199 "bad abort status: %d", xfer->ux_status); 2200 2201 xhci_pipe_restart(xfer->ux_pipe); 2202 2203 DPRINTFN(14, "end", 0, 0, 0, 0); 2204 } 2205 2206 static void 2207 xhci_host_dequeue(struct xhci_ring * const xr) 2208 { 2209 /* When dequeueing the controller, update our struct copy too */ 2210 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE); 2211 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE, 2212 BUS_DMASYNC_PREWRITE); 2213 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies)); 2214 2215 xr->xr_ep = 0; 2216 xr->xr_cs = 1; 2217 } 2218 2219 /* 2220 * Recover STALLed endpoint, or stop endpoint to abort a pipe. 2221 * xHCI 1.1 sect 4.10.2.1 2222 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove 2223 * all transfers on transfer ring. 2224 */ 2225 static void 2226 xhci_pipe_restart(struct usbd_pipe *pipe) 2227 { 2228 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2229 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2230 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 2231 2232 XHCIHIST_FUNC(); 2233 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju", 2234 (uintptr_t)pipe, xs->xs_idx, dci, 0); 2235 2236 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2237 2238 /* 2239 * - If the endpoint is halted, indicating a stall, reset it. 2240 * - If the endpoint is stopped, we're already good. 2241 * - Otherwise, someone wanted to abort the pipe, so stop the 2242 * endpoint. 2243 * 2244 * In any case, clear the ring. 2245 */ 2246 switch (xhci_get_epstate(sc, xs, dci)) { 2247 case XHCI_EPSTATE_HALTED: 2248 xhci_reset_endpoint(pipe); 2249 break; 2250 case XHCI_EPSTATE_STOPPED: 2251 break; 2252 default: 2253 xhci_stop_endpoint(pipe); 2254 break; 2255 } 2256 2257 switch (xhci_get_epstate(sc, xs, dci)) { 2258 case XHCI_EPSTATE_STOPPED: 2259 break; 2260 case XHCI_EPSTATE_ERROR: 2261 device_printf(sc->sc_dev, "endpoint 0x%x error\n", 2262 pipe->up_endpoint->ue_edesc->bEndpointAddress); 2263 break; 2264 default: 2265 device_printf(sc->sc_dev, "endpoint 0x%x failed to stop\n", 2266 pipe->up_endpoint->ue_edesc->bEndpointAddress); 2267 } 2268 2269 xhci_set_dequeue(pipe); 2270 2271 DPRINTFN(4, "ends", 0, 0, 0, 0); 2272 } 2273 2274 static void 2275 xhci_pipe_restart_async_task(void *cookie) 2276 { 2277 struct usbd_pipe * const pipe = cookie; 2278 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2279 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2280 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 2281 struct xhci_ring * const tr = xs->xs_xr[dci]; 2282 struct usbd_xfer *xfer; 2283 2284 XHCIHIST_FUNC(); 2285 XHCIHIST_CALLARGS("sc=%#jx pipe=%#jx", 2286 (uintptr_t)sc, (uintptr_t)pipe, 0, 0); 2287 2288 mutex_enter(&sc->sc_lock); 2289 2290 xhci_pipe_restart(pipe); 2291 2292 /* 2293 * We halted our own queue because it stalled. Mark it no 2294 * longer halted and start issuing queued transfers again. 2295 */ 2296 tr->is_halted = false; 2297 xfer = SIMPLEQ_FIRST(&pipe->up_queue); 2298 if (xfer) { 2299 /* 2300 * If the first xfer of the queue is not in progress, 2301 * though, there may be a concurrent software abort 2302 * that has already cancelled it and is now in the 2303 * middle of a concurrent xhci_pipe_restart waiting to 2304 * reacquire the pipe (bus) lock. So only restart the 2305 * xfer if it's still USBD_IN_PROGRESS. 2306 * 2307 * Either way, xfers on the queue can't be in 2308 * USBD_NOT_STARTED. 2309 */ 2310 KASSERT(xfer->ux_status != USBD_NOT_STARTED); 2311 if (xfer->ux_status == USBD_IN_PROGRESS) { 2312 (*pipe->up_methods->upm_start)(xfer); 2313 } else { 2314 DPRINTF("pipe restart race xfer=%#jx status=%jd", 2315 (uintptr_t)xfer, xfer->ux_status, 0, 0); 2316 } 2317 } 2318 2319 mutex_exit(&sc->sc_lock); 2320 } 2321 2322 static void 2323 xhci_pipe_restart_async(struct usbd_pipe *pipe) 2324 { 2325 struct xhci_pipe * const xp = 2326 container_of(pipe, struct xhci_pipe, xp_pipe); 2327 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2328 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2329 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 2330 struct xhci_ring * const tr = xs->xs_xr[dci]; 2331 2332 XHCIHIST_FUNC(); 2333 XHCIHIST_CALLARGS("pipe %#jx", (uintptr_t)pipe, 0, 0, 0); 2334 2335 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2336 2337 tr->is_halted = true; 2338 usb_add_task(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC); 2339 2340 DPRINTFN(4, "ends", 0, 0, 0, 0); 2341 } 2342 2343 /* Process roothub port status/change events and notify to uhub_intr. */ 2344 static void 2345 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport) 2346 { 2347 XHCIHIST_FUNC(); 2348 XHCIHIST_CALLARGS("xhci%jd: port %ju status change", 2349 device_unit(sc->sc_dev), ctlrport, 0, 0); 2350 2351 if (ctlrport > sc->sc_maxports) 2352 return; 2353 2354 const size_t bn = xhci_ctlrport2bus(sc, ctlrport); 2355 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport); 2356 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn]; 2357 2358 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change", 2359 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer); 2360 2361 if (xfer == NULL) 2362 return; 2363 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 2364 2365 uint8_t *p = xfer->ux_buf; 2366 if (!xhci_polling_p(sc) || !sc->sc_intrxfer_deferred[bn]) 2367 memset(p, 0, xfer->ux_length); 2368 p[rhp / NBBY] |= 1 << (rhp % NBBY); 2369 xfer->ux_actlen = xfer->ux_length; 2370 xfer->ux_status = USBD_NORMAL_COMPLETION; 2371 if (xhci_polling_p(sc)) 2372 sc->sc_intrxfer_deferred[bn] = true; 2373 else 2374 usb_transfer_complete(xfer); 2375 } 2376 2377 /* Process Transfer Events */ 2378 static void 2379 xhci_event_transfer(struct xhci_softc * const sc, 2380 const struct xhci_trb * const trb) 2381 { 2382 uint64_t trb_0; 2383 uint32_t trb_2, trb_3; 2384 uint8_t trbcode; 2385 u_int slot, dci; 2386 struct xhci_slot *xs; 2387 struct xhci_ring *xr; 2388 struct xhci_xfer *xx; 2389 struct usbd_xfer *xfer; 2390 usbd_status err; 2391 2392 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2393 2394 trb_0 = le64toh(trb->trb_0); 2395 trb_2 = le32toh(trb->trb_2); 2396 trb_3 = le32toh(trb->trb_3); 2397 trbcode = XHCI_TRB_2_ERROR_GET(trb_2); 2398 slot = XHCI_TRB_3_SLOT_GET(trb_3); 2399 dci = XHCI_TRB_3_EP_GET(trb_3); 2400 xs = &sc->sc_slots[slot]; 2401 xr = xs->xs_xr[dci]; 2402 2403 /* sanity check */ 2404 KASSERT(xr != NULL); 2405 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots, 2406 "invalid xs_idx %u slot %u", xs->xs_idx, slot); 2407 2408 int idx = 0; 2409 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 2410 if (xhci_trb_get_idx(xr, trb_0, &idx)) { 2411 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0); 2412 return; 2413 } 2414 xx = xr->xr_cookies[idx]; 2415 2416 /* clear cookie of consumed TRB */ 2417 xr->xr_cookies[idx] = NULL; 2418 2419 /* 2420 * xx is NULL if pipe is opened but xfer is not started. 2421 * It happens when stopping idle pipe. 2422 */ 2423 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) { 2424 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju", 2425 idx, (uintptr_t)xx, trbcode, dci); 2426 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0, 2427 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)), 2428 0, 0); 2429 return; 2430 } 2431 } else { 2432 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */ 2433 xx = (void *)(uintptr_t)(trb_0 & ~0x3); 2434 } 2435 /* XXX this may not happen */ 2436 if (xx == NULL) { 2437 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0); 2438 return; 2439 } 2440 xfer = &xx->xx_xfer; 2441 /* XXX this may happen when detaching */ 2442 if (xfer == NULL) { 2443 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx", 2444 (uintptr_t)xx, trb_0, 0, 0); 2445 return; 2446 } 2447 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0); 2448 /* XXX I dunno why this happens */ 2449 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer); 2450 2451 if (!xfer->ux_pipe->up_repeat && 2452 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) { 2453 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer, 2454 0, 0, 0); 2455 return; 2456 } 2457 2458 const uint8_t xfertype = 2459 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes); 2460 2461 /* 4.11.5.2 Event Data TRB */ 2462 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) { 2463 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx" 2464 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0); 2465 if ((trb_0 & 0x3) == 0x3) { 2466 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2); 2467 } 2468 } 2469 2470 switch (trbcode) { 2471 case XHCI_TRB_ERROR_SHORT_PKT: 2472 case XHCI_TRB_ERROR_SUCCESS: 2473 /* 2474 * A ctrl transfer can generate two events if it has a Data 2475 * stage. A short data stage can be OK and should not 2476 * complete the transfer as the status stage needs to be 2477 * performed. 2478 * 2479 * Note: Data and Status stage events point at same xfer. 2480 * ux_actlen and ux_dmabuf will be passed to 2481 * usb_transfer_complete after the Status stage event. 2482 * 2483 * It can be distinguished which stage generates the event: 2484 * + by checking least 3 bits of trb_0 if ED==1. 2485 * (see xhci_device_ctrl_start). 2486 * + by checking the type of original TRB if ED==0. 2487 * 2488 * In addition, intr, bulk, and isoc transfer currently 2489 * consists of single TD, so the "skip" is not needed. 2490 * ctrl xfer uses EVENT_DATA, and others do not. 2491 * Thus driver can switch the flow by checking ED bit. 2492 */ 2493 if (xfertype == UE_ISOCHRONOUS) { 2494 xfer->ux_frlengths[xx->xx_isoc_done] -= 2495 XHCI_TRB_2_REM_GET(trb_2); 2496 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done]; 2497 } else if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 2498 if (xfer->ux_actlen == 0) 2499 xfer->ux_actlen = xfer->ux_length - 2500 XHCI_TRB_2_REM_GET(trb_2); 2501 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)) 2502 == XHCI_TRB_TYPE_DATA_STAGE) { 2503 return; 2504 } 2505 } else if ((trb_0 & 0x3) == 0x3) { 2506 return; 2507 } 2508 err = USBD_NORMAL_COMPLETION; 2509 break; 2510 case XHCI_TRB_ERROR_STOPPED: 2511 case XHCI_TRB_ERROR_LENGTH: 2512 case XHCI_TRB_ERROR_STOPPED_SHORT: 2513 err = USBD_IOERROR; 2514 break; 2515 case XHCI_TRB_ERROR_STALL: 2516 case XHCI_TRB_ERROR_BABBLE: 2517 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2518 xhci_pipe_restart_async(xfer->ux_pipe); 2519 err = USBD_STALLED; 2520 break; 2521 default: 2522 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2523 err = USBD_IOERROR; 2524 break; 2525 } 2526 2527 if (xfertype == UE_ISOCHRONOUS) { 2528 switch (trbcode) { 2529 case XHCI_TRB_ERROR_SHORT_PKT: 2530 case XHCI_TRB_ERROR_SUCCESS: 2531 break; 2532 case XHCI_TRB_ERROR_MISSED_SERVICE: 2533 case XHCI_TRB_ERROR_RING_UNDERRUN: 2534 case XHCI_TRB_ERROR_RING_OVERRUN: 2535 default: 2536 xfer->ux_frlengths[xx->xx_isoc_done] = 0; 2537 break; 2538 } 2539 if (++xx->xx_isoc_done < xfer->ux_nframes) 2540 return; 2541 } 2542 2543 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 || 2544 (trb_0 & 0x3) == 0x0) { 2545 /* 2546 * Try to claim this xfer for completion. If it has 2547 * already completed or aborted, drop it on the floor. 2548 */ 2549 if (!usbd_xfer_trycomplete(xfer)) 2550 return; 2551 2552 /* Set the status. */ 2553 xfer->ux_status = err; 2554 2555 usb_transfer_complete(xfer); 2556 } 2557 } 2558 2559 /* Process Command complete events */ 2560 static void 2561 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb) 2562 { 2563 uint64_t trb_0; 2564 uint32_t trb_2, trb_3; 2565 2566 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2567 2568 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2569 2570 trb_0 = le64toh(trb->trb_0); 2571 trb_2 = le32toh(trb->trb_2); 2572 trb_3 = le32toh(trb->trb_3); 2573 2574 if (trb_0 == sc->sc_command_addr) { 2575 sc->sc_resultpending = false; 2576 2577 sc->sc_result_trb.trb_0 = trb_0; 2578 sc->sc_result_trb.trb_2 = trb_2; 2579 sc->sc_result_trb.trb_3 = trb_3; 2580 if (XHCI_TRB_2_ERROR_GET(trb_2) != 2581 XHCI_TRB_ERROR_SUCCESS) { 2582 DPRINTFN(1, "command completion " 2583 "failure: 0x%016jx 0x%08jx 0x%08jx", 2584 trb_0, trb_2, trb_3, 0); 2585 } 2586 cv_signal(&sc->sc_command_cv); 2587 } else { 2588 DPRINTFN(1, "spurious event: %#jx 0x%016jx " 2589 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3); 2590 } 2591 } 2592 2593 /* 2594 * Process events. 2595 * called from xhci_softintr 2596 */ 2597 static void 2598 xhci_handle_event(struct xhci_softc * const sc, 2599 const struct xhci_trb * const trb) 2600 { 2601 uint64_t trb_0; 2602 uint32_t trb_2, trb_3; 2603 2604 XHCIHIST_FUNC(); 2605 2606 trb_0 = le64toh(trb->trb_0); 2607 trb_2 = le32toh(trb->trb_2); 2608 trb_3 = le32toh(trb->trb_3); 2609 2610 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx", 2611 (uintptr_t)trb, trb_0, trb_2, trb_3); 2612 2613 /* 2614 * 4.11.3.1, 6.4.2.1 2615 * TRB Pointer is invalid for these completion codes. 2616 */ 2617 switch (XHCI_TRB_2_ERROR_GET(trb_2)) { 2618 case XHCI_TRB_ERROR_RING_UNDERRUN: 2619 case XHCI_TRB_ERROR_RING_OVERRUN: 2620 case XHCI_TRB_ERROR_VF_RING_FULL: 2621 return; 2622 default: 2623 if (trb_0 == 0) { 2624 return; 2625 } 2626 break; 2627 } 2628 2629 switch (XHCI_TRB_3_TYPE_GET(trb_3)) { 2630 case XHCI_TRB_EVENT_TRANSFER: 2631 xhci_event_transfer(sc, trb); 2632 break; 2633 case XHCI_TRB_EVENT_CMD_COMPLETE: 2634 xhci_event_cmd(sc, trb); 2635 break; 2636 case XHCI_TRB_EVENT_PORT_STS_CHANGE: 2637 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff)); 2638 break; 2639 default: 2640 break; 2641 } 2642 } 2643 2644 static void 2645 xhci_softintr(void *v) 2646 { 2647 struct usbd_bus * const bus = v; 2648 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2649 struct xhci_ring * const er = sc->sc_er; 2650 struct xhci_trb *trb; 2651 int i, j, k, bn; 2652 2653 XHCIHIST_FUNC(); 2654 2655 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2656 2657 i = er->xr_ep; 2658 j = er->xr_cs; 2659 2660 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0); 2661 2662 /* 2663 * Handle deferred root intr xfer, in case we just switched off 2664 * polling. It's not safe to complete root intr xfers while 2665 * polling -- too much kernel machinery gets involved. 2666 */ 2667 if (!xhci_polling_p(sc)) { 2668 for (bn = 0; bn < 2; bn++) { 2669 if (__predict_false(sc->sc_intrxfer_deferred[bn])) { 2670 sc->sc_intrxfer_deferred[bn] = false; 2671 usb_transfer_complete(sc->sc_intrxfer[bn]); 2672 } 2673 } 2674 } 2675 2676 while (1) { 2677 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE, 2678 BUS_DMASYNC_POSTREAD); 2679 trb = &er->xr_trb[i]; 2680 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0; 2681 2682 if (j != k) 2683 break; 2684 2685 xhci_handle_event(sc, trb); 2686 2687 i++; 2688 if (i == er->xr_ntrb) { 2689 i = 0; 2690 j ^= 1; 2691 } 2692 } 2693 2694 er->xr_ep = i; 2695 er->xr_cs = j; 2696 2697 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) | 2698 XHCI_ERDP_BUSY); 2699 2700 DPRINTFN(16, "ends", 0, 0, 0, 0); 2701 2702 return; 2703 } 2704 2705 static void 2706 xhci_poll(struct usbd_bus *bus) 2707 { 2708 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2709 2710 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2711 2712 mutex_enter(&sc->sc_intr_lock); 2713 int ret = xhci_intr1(sc); 2714 if (ret) { 2715 xhci_softintr(bus); 2716 } 2717 mutex_exit(&sc->sc_intr_lock); 2718 2719 return; 2720 } 2721 2722 static struct usbd_xfer * 2723 xhci_allocx(struct usbd_bus *bus, unsigned int nframes) 2724 { 2725 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2726 struct xhci_xfer *xx; 2727 u_int ntrbs; 2728 2729 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2730 2731 ntrbs = uimax(3, nframes); 2732 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs; 2733 2734 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK); 2735 if (xx != NULL) { 2736 memset(xx, 0, sizeof(*xx)); 2737 if (ntrbs > 0) { 2738 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP); 2739 xx->xx_ntrb = ntrbs; 2740 } 2741 #ifdef DIAGNOSTIC 2742 xx->xx_xfer.ux_state = XFER_BUSY; 2743 #endif 2744 } 2745 2746 return &xx->xx_xfer; 2747 } 2748 2749 static void 2750 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 2751 { 2752 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2753 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 2754 2755 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2756 2757 #ifdef DIAGNOSTIC 2758 if (xfer->ux_state != XFER_BUSY && 2759 xfer->ux_status != USBD_NOT_STARTED) { 2760 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx", 2761 (uintptr_t)xfer, xfer->ux_state, 0, 0); 2762 } 2763 xfer->ux_state = XFER_FREE; 2764 #endif 2765 if (xx->xx_ntrb > 0) { 2766 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb)); 2767 xx->xx_trb = NULL; 2768 xx->xx_ntrb = 0; 2769 } 2770 pool_cache_put(sc->sc_xferpool, xx); 2771 } 2772 2773 static bool 2774 xhci_dying(struct usbd_bus *bus) 2775 { 2776 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2777 2778 return sc->sc_dying; 2779 } 2780 2781 static void 2782 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock) 2783 { 2784 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2785 2786 *lock = &sc->sc_lock; 2787 } 2788 2789 extern uint32_t usb_cookie_no; 2790 2791 /* 2792 * xHCI 4.3 2793 * Called when uhub_explore finds a new device (via usbd_new_device). 2794 * Port initialization and speed detection (4.3.1) are already done in uhub.c. 2795 * This function does: 2796 * Allocate and construct dev structure of default endpoint (ep0). 2797 * Allocate and open pipe of ep0. 2798 * Enable slot and initialize slot context. 2799 * Set Address. 2800 * Read initial device descriptor. 2801 * Determine initial MaxPacketSize (mps) by speed. 2802 * Read full device descriptor. 2803 * Register this device. 2804 * Finally state of device transitions ADDRESSED. 2805 */ 2806 static usbd_status 2807 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth, 2808 int speed, int port, struct usbd_port *up) 2809 { 2810 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2811 struct usbd_device *dev; 2812 usbd_status err; 2813 usb_device_descriptor_t *dd; 2814 struct xhci_slot *xs; 2815 uint32_t *cp; 2816 2817 XHCIHIST_FUNC(); 2818 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx", 2819 port, depth, speed, (uintptr_t)up); 2820 2821 KASSERT(KERNEL_LOCKED_P()); 2822 2823 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP); 2824 dev->ud_bus = bus; 2825 dev->ud_quirks = &usbd_no_quirk; 2826 dev->ud_addr = 0; 2827 dev->ud_ddesc.bMaxPacketSize = 0; 2828 dev->ud_depth = depth; 2829 dev->ud_powersrc = up; 2830 dev->ud_myhub = up->up_parent; 2831 dev->ud_speed = speed; 2832 dev->ud_langid = USBD_NOLANG; 2833 dev->ud_cookie.cookie = ++usb_cookie_no; 2834 2835 /* Set up default endpoint handle. */ 2836 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc; 2837 /* doesn't matter, just don't let it uninitialized */ 2838 dev->ud_ep0.ue_toggle = 0; 2839 2840 /* Set up default endpoint descriptor. */ 2841 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE; 2842 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT; 2843 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT; 2844 dev->ud_ep0desc.bmAttributes = UE_CONTROL; 2845 dev->ud_ep0desc.bInterval = 0; 2846 2847 /* 4.3, 4.8.2.1 */ 2848 switch (speed) { 2849 case USB_SPEED_SUPER: 2850 case USB_SPEED_SUPER_PLUS: 2851 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET); 2852 break; 2853 case USB_SPEED_FULL: 2854 /* XXX using 64 as initial mps of ep0 in FS */ 2855 case USB_SPEED_HIGH: 2856 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET); 2857 break; 2858 case USB_SPEED_LOW: 2859 default: 2860 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET); 2861 break; 2862 } 2863 2864 up->up_dev = dev; 2865 2866 dd = &dev->ud_ddesc; 2867 2868 if (depth == 0 && port == 0) { 2869 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL); 2870 bus->ub_devices[USB_ROOTHUB_INDEX] = dev; 2871 2872 /* Establish the default pipe. */ 2873 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2874 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2875 if (err) { 2876 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0); 2877 goto bad; 2878 } 2879 err = usbd_get_initial_ddesc(dev, dd); 2880 if (err) { 2881 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2882 goto bad; 2883 } 2884 } else { 2885 uint8_t slot = 0; 2886 2887 /* 4.3.2 */ 2888 err = xhci_enable_slot(sc, &slot); 2889 if (err) { 2890 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0); 2891 goto bad; 2892 } 2893 2894 xs = &sc->sc_slots[slot]; 2895 dev->ud_hcpriv = xs; 2896 2897 /* 4.3.3 initialize slot structure */ 2898 err = xhci_init_slot(dev, slot); 2899 if (err) { 2900 DPRINTFN(1, "init slot %ju", err, 0, 0, 0); 2901 dev->ud_hcpriv = NULL; 2902 /* 2903 * We have to disable_slot here because 2904 * xs->xs_idx == 0 when xhci_init_slot fails, 2905 * in that case usbd_remove_dev won't work. 2906 */ 2907 mutex_enter(&sc->sc_lock); 2908 xhci_disable_slot(sc, slot); 2909 mutex_exit(&sc->sc_lock); 2910 goto bad; 2911 } 2912 2913 /* 2914 * We have to establish the default pipe _after_ slot 2915 * structure has been prepared. 2916 */ 2917 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2918 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2919 if (err) { 2920 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0, 2921 0); 2922 goto bad; 2923 } 2924 2925 /* 4.3.4 Address Assignment */ 2926 err = xhci_set_address(dev, slot, false); 2927 if (err) { 2928 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0); 2929 goto bad; 2930 } 2931 2932 /* Allow device time to set new address */ 2933 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE); 2934 2935 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 2936 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT); 2937 HEXDUMP("slot context", cp, sc->sc_ctxsz); 2938 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3])); 2939 DPRINTFN(4, "device address %ju", addr, 0, 0, 0); 2940 /* 2941 * XXX ensure we know when the hardware does something 2942 * we can't yet cope with 2943 */ 2944 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr); 2945 dev->ud_addr = addr; 2946 2947 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL, 2948 "addr %d already allocated", dev->ud_addr); 2949 /* 2950 * The root hub is given its own slot 2951 */ 2952 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev; 2953 2954 err = usbd_get_initial_ddesc(dev, dd); 2955 if (err) { 2956 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2957 goto bad; 2958 } 2959 2960 /* 4.8.2.1 */ 2961 if (USB_IS_SS(speed)) { 2962 if (dd->bMaxPacketSize != 9) { 2963 printf("%s: invalid mps 2^%u for SS ep0," 2964 " using 512\n", 2965 device_xname(sc->sc_dev), 2966 dd->bMaxPacketSize); 2967 dd->bMaxPacketSize = 9; 2968 } 2969 USETW(dev->ud_ep0desc.wMaxPacketSize, 2970 (1 << dd->bMaxPacketSize)); 2971 } else 2972 USETW(dev->ud_ep0desc.wMaxPacketSize, 2973 dd->bMaxPacketSize); 2974 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0); 2975 err = xhci_update_ep0_mps(sc, xs, 2976 UGETW(dev->ud_ep0desc.wMaxPacketSize)); 2977 if (err) { 2978 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0); 2979 goto bad; 2980 } 2981 } 2982 2983 err = usbd_reload_device_desc(dev); 2984 if (err) { 2985 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0); 2986 goto bad; 2987 } 2988 2989 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,", 2990 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0); 2991 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,", 2992 dd->bDeviceClass, dd->bDeviceSubClass, 2993 dd->bDeviceProtocol, 0); 2994 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd", 2995 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations, 2996 dev->ud_speed); 2997 2998 usbd_get_device_strings(dev); 2999 3000 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev); 3001 3002 if (depth == 0 && port == 0) { 3003 usbd_attach_roothub(parent, dev); 3004 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0); 3005 return USBD_NORMAL_COMPLETION; 3006 } 3007 3008 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr); 3009 bad: 3010 if (err != USBD_NORMAL_COMPLETION) { 3011 if (depth == 0 && port == 0 && dev->ud_pipe0) 3012 usbd_kill_pipe(dev->ud_pipe0); 3013 usbd_remove_device(dev, up); 3014 } 3015 3016 return err; 3017 } 3018 3019 static usbd_status 3020 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp, 3021 size_t ntrb, size_t align) 3022 { 3023 size_t size = ntrb * XHCI_TRB_SIZE; 3024 struct xhci_ring *xr; 3025 3026 XHCIHIST_FUNC(); 3027 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx", 3028 (uintptr_t)*xrp, ntrb, align, 0); 3029 3030 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP); 3031 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0); 3032 3033 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 3034 USBMALLOC_ZERO, &xr->xr_dma); 3035 if (err) { 3036 kmem_free(xr, sizeof(struct xhci_ring)); 3037 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0); 3038 return err; 3039 } 3040 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 3041 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP); 3042 xr->xr_trb = xhci_ring_trbv(xr, 0); 3043 xr->xr_ntrb = ntrb; 3044 xr->is_halted = false; 3045 xhci_host_dequeue(xr); 3046 *xrp = xr; 3047 3048 return USBD_NORMAL_COMPLETION; 3049 } 3050 3051 static void 3052 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr) 3053 { 3054 if (*xr == NULL) 3055 return; 3056 3057 usb_freemem(&(*xr)->xr_dma); 3058 mutex_destroy(&(*xr)->xr_lock); 3059 kmem_free((*xr)->xr_cookies, 3060 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb); 3061 kmem_free(*xr, sizeof(struct xhci_ring)); 3062 *xr = NULL; 3063 } 3064 3065 static void 3066 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr, 3067 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs) 3068 { 3069 size_t i; 3070 u_int ri; 3071 u_int cs; 3072 uint64_t parameter; 3073 uint32_t status; 3074 uint32_t control; 3075 3076 XHCIHIST_FUNC(); 3077 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju", 3078 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0); 3079 3080 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u", 3081 ntrbs, xr->xr_ntrb); 3082 for (i = 0; i < ntrbs; i++) { 3083 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr, 3084 (uintptr_t)trbs, i, 0); 3085 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx", 3086 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0); 3087 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) != 3088 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3); 3089 } 3090 3091 ri = xr->xr_ep; 3092 cs = xr->xr_cs; 3093 3094 /* 3095 * Although the xhci hardware can do scatter/gather dma from 3096 * arbitrary sized buffers, there is a non-obvious restriction 3097 * that a LINK trb is only allowed at the end of a burst of 3098 * transfers - which might be 16kB. 3099 * Arbitrary aligned LINK trb definitely fail on Ivy bridge. 3100 * The simple solution is not to allow a LINK trb in the middle 3101 * of anything - as here. 3102 * XXX: (dsl) There are xhci controllers out there (eg some made by 3103 * ASMedia) that seem to lock up if they process a LINK trb but 3104 * cannot process the linked-to trb yet. 3105 * The code should write the 'cycle' bit on the link trb AFTER 3106 * adding the other trb. 3107 */ 3108 u_int firstep = xr->xr_ep; 3109 u_int firstcs = xr->xr_cs; 3110 3111 for (i = 0; i < ntrbs; ) { 3112 u_int oldri = ri; 3113 u_int oldcs = cs; 3114 3115 if (ri >= (xr->xr_ntrb - 1)) { 3116 /* Put Link TD at the end of ring */ 3117 parameter = xhci_ring_trbp(xr, 0); 3118 status = 0; 3119 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) | 3120 XHCI_TRB_3_TC_BIT; 3121 xr->xr_cookies[ri] = NULL; 3122 xr->xr_ep = 0; 3123 xr->xr_cs ^= 1; 3124 ri = xr->xr_ep; 3125 cs = xr->xr_cs; 3126 } else { 3127 parameter = trbs[i].trb_0; 3128 status = trbs[i].trb_2; 3129 control = trbs[i].trb_3; 3130 3131 xr->xr_cookies[ri] = cookie; 3132 ri++; 3133 i++; 3134 } 3135 /* 3136 * If this is a first TRB, mark it invalid to prevent 3137 * xHC from running it immediately. 3138 */ 3139 if (oldri == firstep) { 3140 if (oldcs) { 3141 control &= ~XHCI_TRB_3_CYCLE_BIT; 3142 } else { 3143 control |= XHCI_TRB_3_CYCLE_BIT; 3144 } 3145 } else { 3146 if (oldcs) { 3147 control |= XHCI_TRB_3_CYCLE_BIT; 3148 } else { 3149 control &= ~XHCI_TRB_3_CYCLE_BIT; 3150 } 3151 } 3152 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control); 3153 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri, 3154 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 3155 } 3156 3157 /* Now invert cycle bit of first TRB */ 3158 if (firstcs) { 3159 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT); 3160 } else { 3161 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT); 3162 } 3163 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep, 3164 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 3165 3166 xr->xr_ep = ri; 3167 xr->xr_cs = cs; 3168 3169 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep, 3170 xr->xr_cs, 0); 3171 } 3172 3173 static inline void 3174 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr, 3175 struct xhci_xfer *xx, u_int ntrb) 3176 { 3177 KASSERT(ntrb <= xx->xx_ntrb); 3178 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb); 3179 } 3180 3181 /* 3182 * Stop execution commands, purge all commands on command ring, and 3183 * rewind dequeue pointer. 3184 */ 3185 static void 3186 xhci_abort_command(struct xhci_softc *sc) 3187 { 3188 struct xhci_ring * const cr = sc->sc_cr; 3189 uint64_t crcr; 3190 int i; 3191 3192 XHCIHIST_FUNC(); 3193 XHCIHIST_CALLARGS("command %#jx timeout, aborting", 3194 sc->sc_command_addr, 0, 0, 0); 3195 3196 mutex_enter(&cr->xr_lock); 3197 3198 /* 4.6.1.2 Aborting a Command */ 3199 crcr = xhci_op_read_8(sc, XHCI_CRCR); 3200 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA); 3201 3202 for (i = 0; i < 500; i++) { 3203 crcr = xhci_op_read_8(sc, XHCI_CRCR); 3204 if ((crcr & XHCI_CRCR_LO_CRR) == 0) 3205 break; 3206 usb_delay_ms(&sc->sc_bus, 1); 3207 } 3208 if ((crcr & XHCI_CRCR_LO_CRR) != 0) { 3209 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0); 3210 /* reset HC here? */ 3211 } 3212 3213 /* reset command ring dequeue pointer */ 3214 cr->xr_ep = 0; 3215 cr->xr_cs = 1; 3216 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs); 3217 3218 mutex_exit(&cr->xr_lock); 3219 } 3220 3221 /* 3222 * Put a command on command ring, ring bell, set timer, and cv_timedwait. 3223 * Command completion is notified by cv_signal from xhci_event_cmd() 3224 * (called from xhci_softint), or timed-out. 3225 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(), 3226 * then do_command examines it. 3227 */ 3228 static usbd_status 3229 xhci_do_command_locked(struct xhci_softc * const sc, 3230 struct xhci_soft_trb * const trb, int timeout) 3231 { 3232 struct xhci_ring * const cr = sc->sc_cr; 3233 usbd_status err; 3234 3235 XHCIHIST_FUNC(); 3236 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx", 3237 trb->trb_0, trb->trb_2, trb->trb_3, 0); 3238 3239 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 3240 KASSERT(mutex_owned(&sc->sc_lock)); 3241 3242 while (sc->sc_command_addr != 0 || 3243 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp)) 3244 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock); 3245 if (sc->sc_suspendresume_failed) 3246 return USBD_IOERROR; 3247 3248 /* 3249 * If enqueue pointer points at last of ring, it's Link TRB, 3250 * command TRB will be stored in 0th TRB. 3251 */ 3252 if (cr->xr_ep == cr->xr_ntrb - 1) 3253 sc->sc_command_addr = xhci_ring_trbp(cr, 0); 3254 else 3255 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep); 3256 3257 sc->sc_resultpending = true; 3258 3259 mutex_enter(&cr->xr_lock); 3260 xhci_ring_put(sc, cr, NULL, trb, 1); 3261 mutex_exit(&cr->xr_lock); 3262 3263 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0); 3264 3265 while (sc->sc_resultpending) { 3266 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock, 3267 MAX(1, mstohz(timeout))) == EWOULDBLOCK) { 3268 xhci_abort_command(sc); 3269 err = USBD_TIMEOUT; 3270 goto timedout; 3271 } 3272 } 3273 3274 trb->trb_0 = sc->sc_result_trb.trb_0; 3275 trb->trb_2 = sc->sc_result_trb.trb_2; 3276 trb->trb_3 = sc->sc_result_trb.trb_3; 3277 3278 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx", 3279 trb->trb_0, trb->trb_2, trb->trb_3, 0); 3280 3281 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) { 3282 case XHCI_TRB_ERROR_SUCCESS: 3283 err = USBD_NORMAL_COMPLETION; 3284 break; 3285 default: 3286 case 192 ... 223: 3287 DPRINTFN(5, "error %#jx", 3288 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0); 3289 err = USBD_IOERROR; 3290 break; 3291 case 224 ... 255: 3292 err = USBD_NORMAL_COMPLETION; 3293 break; 3294 } 3295 3296 timedout: 3297 sc->sc_resultpending = false; 3298 sc->sc_command_addr = 0; 3299 cv_broadcast(&sc->sc_cmdbusy_cv); 3300 3301 return err; 3302 } 3303 3304 static usbd_status 3305 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb, 3306 int timeout) 3307 { 3308 3309 mutex_enter(&sc->sc_lock); 3310 usbd_status ret = xhci_do_command_locked(sc, trb, timeout); 3311 mutex_exit(&sc->sc_lock); 3312 3313 return ret; 3314 } 3315 3316 static usbd_status 3317 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp) 3318 { 3319 struct xhci_soft_trb trb; 3320 usbd_status err; 3321 3322 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3323 3324 trb.trb_0 = 0; 3325 trb.trb_2 = 0; 3326 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT); 3327 3328 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3329 if (err != USBD_NORMAL_COMPLETION) { 3330 return err; 3331 } 3332 3333 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3); 3334 3335 return err; 3336 } 3337 3338 /* 3339 * xHCI 4.6.4 3340 * Deallocate ring and device/input context DMA buffers, and disable_slot. 3341 * All endpoints in the slot should be stopped. 3342 * Should be called with sc_lock held. 3343 */ 3344 static usbd_status 3345 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot) 3346 { 3347 struct xhci_soft_trb trb; 3348 struct xhci_slot *xs; 3349 usbd_status err; 3350 3351 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3352 3353 if (sc->sc_dying) 3354 return USBD_IOERROR; 3355 3356 trb.trb_0 = 0; 3357 trb.trb_2 = 0; 3358 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) | 3359 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT); 3360 3361 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 3362 3363 if (!err) { 3364 xs = &sc->sc_slots[slot]; 3365 if (xs->xs_idx != 0) { 3366 xhci_free_slot(sc, xs); 3367 xhci_set_dcba(sc, 0, slot); 3368 memset(xs, 0, sizeof(*xs)); 3369 } 3370 } 3371 3372 return err; 3373 } 3374 3375 /* 3376 * Set address of device and transition slot state from ENABLED to ADDRESSED 3377 * if Block Setaddress Request (BSR) is false. 3378 * If BSR==true, transition slot state from ENABLED to DEFAULT. 3379 * see xHCI 1.1 4.5.3, 3.3.4 3380 * Should be called without sc_lock held. 3381 */ 3382 static usbd_status 3383 xhci_address_device(struct xhci_softc * const sc, 3384 uint64_t icp, uint8_t slot_id, bool bsr) 3385 { 3386 struct xhci_soft_trb trb; 3387 usbd_status err; 3388 3389 XHCIHIST_FUNC(); 3390 if (bsr) { 3391 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr", 3392 icp, slot_id, 0, 0); 3393 } else { 3394 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr", 3395 icp, slot_id, 0, 0); 3396 } 3397 3398 trb.trb_0 = icp; 3399 trb.trb_2 = 0; 3400 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) | 3401 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) | 3402 (bsr ? XHCI_TRB_3_BSR_BIT : 0); 3403 3404 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3405 3406 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS) 3407 err = USBD_NO_ADDR; 3408 3409 return err; 3410 } 3411 3412 static usbd_status 3413 xhci_update_ep0_mps(struct xhci_softc * const sc, 3414 struct xhci_slot * const xs, u_int mps) 3415 { 3416 struct xhci_soft_trb trb; 3417 usbd_status err; 3418 uint32_t * cp; 3419 3420 XHCIHIST_FUNC(); 3421 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0); 3422 3423 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 3424 cp[0] = htole32(0); 3425 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL)); 3426 3427 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL)); 3428 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps)); 3429 3430 /* sync input contexts before they are read from memory */ 3431 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 3432 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 3433 sc->sc_ctxsz * 4); 3434 3435 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 3436 trb.trb_2 = 0; 3437 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 3438 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX); 3439 3440 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3441 return err; 3442 } 3443 3444 static void 3445 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si) 3446 { 3447 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0); 3448 3449 XHCIHIST_FUNC(); 3450 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd", 3451 (uintptr_t)&dcbaa[si], dcba, si, 0); 3452 3453 dcbaa[si] = htole64(dcba); 3454 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t), 3455 BUS_DMASYNC_PREWRITE); 3456 } 3457 3458 /* 3459 * Allocate device and input context DMA buffer, and 3460 * TRB DMA buffer for each endpoint. 3461 */ 3462 static usbd_status 3463 xhci_init_slot(struct usbd_device *dev, uint32_t slot) 3464 { 3465 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3466 struct xhci_slot *xs; 3467 3468 XHCIHIST_FUNC(); 3469 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0); 3470 3471 xs = &sc->sc_slots[slot]; 3472 3473 /* allocate contexts */ 3474 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz, 3475 USBMALLOC_ZERO, &xs->xs_dc_dma); 3476 if (err) { 3477 DPRINTFN(1, "failed to allocmem output device context %jd", 3478 err, 0, 0, 0); 3479 return USBD_NOMEM; 3480 } 3481 3482 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz, 3483 USBMALLOC_ZERO, &xs->xs_ic_dma); 3484 if (err) { 3485 DPRINTFN(1, "failed to allocmem input device context %jd", 3486 err, 0, 0, 0); 3487 goto bad1; 3488 } 3489 3490 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr)); 3491 xs->xs_idx = slot; 3492 3493 return USBD_NORMAL_COMPLETION; 3494 3495 bad1: 3496 usb_freemem(&xs->xs_dc_dma); 3497 xs->xs_idx = 0; 3498 return USBD_NOMEM; 3499 } 3500 3501 static void 3502 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs) 3503 { 3504 u_int dci; 3505 3506 XHCIHIST_FUNC(); 3507 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 3508 3509 /* deallocate all allocated rings in the slot */ 3510 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 3511 if (xs->xs_xr[dci] != NULL) 3512 xhci_ring_free(sc, &xs->xs_xr[dci]); 3513 } 3514 usb_freemem(&xs->xs_ic_dma); 3515 usb_freemem(&xs->xs_dc_dma); 3516 xs->xs_idx = 0; 3517 } 3518 3519 /* 3520 * Setup slot context, set Device Context Base Address, and issue 3521 * Set Address Device command. 3522 */ 3523 static usbd_status 3524 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr) 3525 { 3526 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3527 struct xhci_slot *xs; 3528 usbd_status err; 3529 3530 XHCIHIST_FUNC(); 3531 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0); 3532 3533 xs = &sc->sc_slots[slot]; 3534 3535 xhci_setup_ctx(dev->ud_pipe0); 3536 3537 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 3538 sc->sc_ctxsz * 3); 3539 3540 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot); 3541 3542 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr); 3543 3544 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 3545 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0), 3546 sc->sc_ctxsz * 2); 3547 3548 return err; 3549 } 3550 3551 /* 3552 * 4.8.2, 6.2.3.2 3553 * construct slot/endpoint context parameters and do syncmem 3554 */ 3555 static void 3556 xhci_setup_ctx(struct usbd_pipe *pipe) 3557 { 3558 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3559 struct usbd_device *dev = pipe->up_dev; 3560 struct xhci_slot * const xs = dev->ud_hcpriv; 3561 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3562 const u_int dci = xhci_ep_get_dci(ed); 3563 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3564 uint32_t *cp; 3565 uint8_t speed = dev->ud_speed; 3566 3567 XHCIHIST_FUNC(); 3568 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju", 3569 (uintptr_t)pipe, xs->xs_idx, dci, speed); 3570 3571 /* set up initial input control context */ 3572 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 3573 cp[0] = htole32(0); 3574 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci)); 3575 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT)); 3576 cp[7] = htole32(0); 3577 3578 /* set up input slot context */ 3579 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 3580 cp[0] = 3581 XHCI_SCTX_0_CTX_NUM_SET(dci) | 3582 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed)); 3583 cp[1] = 0; 3584 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0); 3585 cp[3] = 0; 3586 xhci_setup_route(pipe, cp); 3587 xhci_setup_tthub(pipe, cp); 3588 3589 cp[0] = htole32(cp[0]); 3590 cp[1] = htole32(cp[1]); 3591 cp[2] = htole32(cp[2]); 3592 cp[3] = htole32(cp[3]); 3593 3594 /* set up input endpoint context */ 3595 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci)); 3596 cp[0] = 3597 XHCI_EPCTX_0_EPSTATE_SET(0) | 3598 XHCI_EPCTX_0_MULT_SET(0) | 3599 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) | 3600 XHCI_EPCTX_0_LSA_SET(0) | 3601 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0); 3602 cp[1] = 3603 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) | 3604 XHCI_EPCTX_1_HID_SET(0) | 3605 XHCI_EPCTX_1_MAXB_SET(0); 3606 3607 if (xfertype != UE_ISOCHRONOUS) 3608 cp[1] |= XHCI_EPCTX_1_CERR_SET(3); 3609 3610 xhci_setup_maxburst(pipe, cp); 3611 3612 DPRINTFN(4, "setting on dci %ju ival %ju mult %ju mps %#jx", 3613 dci, XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_0_MULT_GET(cp[0]), 3614 XHCI_EPCTX_1_MAXP_SIZE_GET(cp[1])); 3615 DPRINTFN(4, " maxburst %ju mep %#jx atl %#jx", 3616 XHCI_EPCTX_1_MAXB_GET(cp[1]), 3617 (XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_GET(cp[0]) << 16) + 3618 XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_GET(cp[4]), 3619 XHCI_EPCTX_4_AVG_TRB_LEN_GET(cp[4]), 0); 3620 3621 /* rewind TR dequeue pointer in xHC */ 3622 /* can't use xhci_ep_get_dci() yet? */ 3623 *(uint64_t *)(&cp[2]) = htole64( 3624 xhci_ring_trbp(xs->xs_xr[dci], 0) | 3625 XHCI_EPCTX_2_DCS_SET(1)); 3626 3627 cp[0] = htole32(cp[0]); 3628 cp[1] = htole32(cp[1]); 3629 cp[4] = htole32(cp[4]); 3630 3631 /* rewind TR dequeue pointer in driver */ 3632 struct xhci_ring *xr = xs->xs_xr[dci]; 3633 mutex_enter(&xr->xr_lock); 3634 xhci_host_dequeue(xr); 3635 mutex_exit(&xr->xr_lock); 3636 3637 /* sync input contexts before they are read from memory */ 3638 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 3639 } 3640 3641 /* 3642 * Setup route string and roothub port of given device for slot context 3643 */ 3644 static void 3645 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp) 3646 { 3647 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3648 struct usbd_device *dev = pipe->up_dev; 3649 struct usbd_port *up = dev->ud_powersrc; 3650 struct usbd_device *hub; 3651 struct usbd_device *adev; 3652 uint8_t rhport = 0; 3653 uint32_t route = 0; 3654 3655 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3656 3657 /* Locate root hub port and Determine route string */ 3658 /* 4.3.3 route string does not include roothub port */ 3659 for (hub = dev; hub != NULL; hub = hub->ud_myhub) { 3660 uint32_t dep; 3661 3662 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd", 3663 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc, 3664 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno : 3665 -1); 3666 3667 if (hub->ud_powersrc == NULL) 3668 break; 3669 dep = hub->ud_depth; 3670 if (dep == 0) 3671 break; 3672 rhport = hub->ud_powersrc->up_portno; 3673 if (dep > USB_HUB_MAX_DEPTH) 3674 continue; 3675 3676 route |= 3677 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport) 3678 << ((dep - 1) * 4); 3679 } 3680 route = route >> 4; 3681 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1; 3682 3683 /* Locate port on upstream high speed hub */ 3684 for (adev = dev, hub = up->up_parent; 3685 hub != NULL && hub->ud_speed != USB_SPEED_HIGH; 3686 adev = hub, hub = hub->ud_myhub) 3687 ; 3688 if (hub) { 3689 int p; 3690 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) { 3691 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) { 3692 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1]; 3693 goto found; 3694 } 3695 } 3696 panic("%s: cannot find HS port", __func__); 3697 found: 3698 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0); 3699 } else { 3700 dev->ud_myhsport = NULL; 3701 } 3702 3703 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport); 3704 3705 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport, 3706 ctlrport, route, (uintptr_t)hub); 3707 3708 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route); 3709 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport); 3710 } 3711 3712 /* 3713 * Setup whether device is hub, whether device uses MTT, and 3714 * TT informations if it uses MTT. 3715 */ 3716 static void 3717 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp) 3718 { 3719 struct usbd_device *dev = pipe->up_dev; 3720 struct usbd_port *myhsport = dev->ud_myhsport; 3721 usb_device_descriptor_t * const dd = &dev->ud_ddesc; 3722 uint32_t speed = dev->ud_speed; 3723 uint8_t rhaddr = dev->ud_bus->ub_rhaddr; 3724 uint8_t tthubslot, ttportnum; 3725 bool ishub; 3726 bool usemtt; 3727 3728 XHCIHIST_FUNC(); 3729 3730 /* 3731 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2 3732 * tthubslot: 3733 * This is the slot ID of parent HS hub 3734 * if LS/FS device is connected && connected through HS hub. 3735 * This is 0 if device is not LS/FS device || 3736 * parent hub is not HS hub || 3737 * attached to root hub. 3738 * ttportnum: 3739 * This is the downstream facing port of parent HS hub 3740 * if LS/FS device is connected. 3741 * This is 0 if device is not LS/FS device || 3742 * parent hub is not HS hub || 3743 * attached to root hub. 3744 */ 3745 if (myhsport && 3746 myhsport->up_parent->ud_addr != rhaddr && 3747 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) { 3748 ttportnum = myhsport->up_portno; 3749 tthubslot = myhsport->up_parent->ud_addr; 3750 } else { 3751 ttportnum = 0; 3752 tthubslot = 0; 3753 } 3754 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd", 3755 (uintptr_t)myhsport, ttportnum, tthubslot, 0); 3756 3757 /* ishub is valid after reading UDESC_DEVICE */ 3758 ishub = (dd->bDeviceClass == UDCLASS_HUB); 3759 3760 /* dev->ud_hub is valid after reading UDESC_HUB */ 3761 if (ishub && dev->ud_hub) { 3762 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc; 3763 uint8_t ttt = 3764 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK); 3765 3766 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts); 3767 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt); 3768 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0); 3769 } 3770 3771 #define IS_MTTHUB(dd) \ 3772 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT) 3773 3774 /* 3775 * MTT flag is set if 3776 * 1. this is HS hub && MTTs are supported and enabled; or 3777 * 2. this is LS or FS device && there is a parent HS hub where MTTs 3778 * are supported and enabled. 3779 * 3780 * XXX enabled is not tested yet 3781 */ 3782 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd)) 3783 usemtt = true; 3784 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) && 3785 myhsport && 3786 myhsport->up_parent->ud_addr != rhaddr && 3787 IS_MTTHUB(&myhsport->up_parent->ud_ddesc)) 3788 usemtt = true; 3789 else 3790 usemtt = false; 3791 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd", 3792 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt); 3793 3794 #undef IS_MTTHUB 3795 3796 cp[0] |= 3797 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) | 3798 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0); 3799 cp[2] |= 3800 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) | 3801 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum); 3802 } 3803 3804 static const usb_endpoint_ss_comp_descriptor_t * 3805 xhci_get_essc_desc(struct usbd_pipe *pipe) 3806 { 3807 struct usbd_device *dev = pipe->up_dev; 3808 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3809 const usb_cdc_descriptor_t *cdcd; 3810 usbd_desc_iter_t iter; 3811 uint8_t ep; 3812 3813 /* config desc is NULL when opening ep0 */ 3814 if (dev == NULL || dev->ud_cdesc == NULL) 3815 return NULL; 3816 3817 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev, 3818 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY); 3819 if (cdcd == NULL) 3820 return NULL; 3821 3822 usb_desc_iter_init(dev, &iter); 3823 iter.cur = (const void *)cdcd; 3824 3825 /* find endpoint_ss_comp desc for ep of this pipe */ 3826 for (ep = 0;;) { 3827 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter); 3828 if (cdcd == NULL) 3829 break; 3830 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) { 3831 ep = ((const usb_endpoint_descriptor_t *)cdcd)-> 3832 bEndpointAddress; 3833 if (UE_GET_ADDR(ep) == 3834 UE_GET_ADDR(ed->bEndpointAddress)) { 3835 cdcd = (const usb_cdc_descriptor_t *) 3836 usb_desc_iter_next(&iter); 3837 break; 3838 } 3839 ep = 0; 3840 } 3841 } 3842 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) { 3843 return (const usb_endpoint_ss_comp_descriptor_t *)cdcd; 3844 } 3845 return NULL; 3846 } 3847 3848 /* set up params for periodic endpoint */ 3849 static void 3850 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp) 3851 { 3852 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 3853 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3854 struct usbd_device * const dev = pipe->up_dev; 3855 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3856 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3857 uint16_t mps = UGETW(ed->wMaxPacketSize); 3858 uint8_t speed = dev->ud_speed; 3859 uint32_t maxb, mep, atl; 3860 uint8_t ival, mult; 3861 3862 const usb_endpoint_ss_comp_descriptor_t * esscd = 3863 xhci_get_essc_desc(pipe); 3864 3865 /* USB 2.0 9.6.6, xHCI 4.8.2.4, 6.2.3.2 - 6.2.3.8 */ 3866 switch (xfertype) { 3867 case UE_ISOCHRONOUS: 3868 case UE_INTERRUPT: 3869 if (USB_IS_SS(speed)) { 3870 maxb = esscd ? esscd->bMaxBurst : UE_GET_TRANS(mps); 3871 mep = esscd ? UGETW(esscd->wBytesPerInterval) : 3872 UE_GET_SIZE(mps) * (maxb + 1); 3873 if (esscd && xfertype == UE_ISOCHRONOUS && 3874 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) { 3875 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes); 3876 mult = (mult > 2) ? 2 : mult; 3877 } else 3878 mult = 0; 3879 3880 } else { 3881 switch (speed) { 3882 case USB_SPEED_HIGH: 3883 maxb = UE_GET_TRANS(mps); 3884 mep = UE_GET_SIZE(mps) * (maxb + 1); 3885 break; 3886 case USB_SPEED_FULL: 3887 maxb = 0; 3888 mep = UE_GET_SIZE(mps); 3889 break; 3890 default: 3891 maxb = 0; 3892 mep = 0; 3893 break; 3894 } 3895 mult = 0; 3896 } 3897 mps = UE_GET_SIZE(mps); 3898 3899 if (pipe->up_interval == USBD_DEFAULT_INTERVAL) 3900 ival = ed->bInterval; 3901 else 3902 ival = pipe->up_interval; 3903 3904 ival = xhci_bival2ival(ival, speed, xfertype); 3905 atl = mep; 3906 break; 3907 case UE_CONTROL: 3908 case UE_BULK: 3909 default: 3910 if (USB_IS_SS(speed)) { 3911 maxb = esscd ? esscd->bMaxBurst : 0; 3912 } else 3913 maxb = 0; 3914 3915 mps = UE_GET_SIZE(mps); 3916 mep = 0; 3917 mult = 0; 3918 ival = 0; 3919 if (xfertype == UE_CONTROL) 3920 atl = 8; /* 6.2.3 */ 3921 else 3922 atl = mps; 3923 break; 3924 } 3925 3926 switch (speed) { 3927 case USB_SPEED_LOW: 3928 break; 3929 case USB_SPEED_FULL: 3930 if (xfertype == UE_INTERRUPT) 3931 if (mep > XHCI_EPCTX_MEP_FS_INTR) 3932 mep = XHCI_EPCTX_MEP_FS_INTR; 3933 if (xfertype == UE_ISOCHRONOUS) 3934 if (mep > XHCI_EPCTX_MEP_FS_ISOC) 3935 mep = XHCI_EPCTX_MEP_FS_ISOC; 3936 break; 3937 case USB_SPEED_HIGH: 3938 if (xfertype == UE_INTERRUPT) 3939 if (mep > XHCI_EPCTX_MEP_HS_INTR) 3940 mep = XHCI_EPCTX_MEP_HS_INTR; 3941 if (xfertype == UE_ISOCHRONOUS) 3942 if (mep > XHCI_EPCTX_MEP_HS_ISOC) 3943 mep = XHCI_EPCTX_MEP_HS_ISOC; 3944 break; 3945 case USB_SPEED_SUPER: 3946 case USB_SPEED_SUPER_PLUS: 3947 default: 3948 if (xfertype == UE_INTERRUPT) 3949 if (mep > XHCI_EPCTX_MEP_SS_INTR) 3950 mep = XHCI_EPCTX_MEP_SS_INTR; 3951 if (xfertype == UE_ISOCHRONOUS) { 3952 if (speed == USB_SPEED_SUPER || 3953 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) { 3954 if (mep > XHCI_EPCTX_MEP_SS_ISOC) 3955 mep = XHCI_EPCTX_MEP_SS_ISOC; 3956 } else { 3957 if (mep > XHCI_EPCTX_MEP_SS_ISOC_LEC) 3958 mep = XHCI_EPCTX_MEP_SS_ISOC_LEC; 3959 } 3960 } 3961 break; 3962 } 3963 3964 xpipe->xp_maxb = maxb + 1; 3965 xpipe->xp_mult = mult + 1; 3966 3967 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(mep >> 16); 3968 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival); 3969 cp[0] |= XHCI_EPCTX_0_MULT_SET(mult); 3970 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps); 3971 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb); 3972 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(mep & 0xffff); 3973 cp[4] |= XHCI_EPCTX_4_AVG_TRB_LEN_SET(atl); 3974 } 3975 3976 /* 3977 * Convert usbdi bInterval value to xhci endpoint context interval value 3978 * for periodic pipe. 3979 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 3980 */ 3981 static uint32_t 3982 xhci_bival2ival(uint32_t ival, uint32_t speed, uint32_t xfertype) 3983 { 3984 if (xfertype != UE_INTERRUPT && xfertype != UE_ISOCHRONOUS) 3985 return 0; 3986 3987 if (xfertype == UE_INTERRUPT && 3988 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) { 3989 u_int i; 3990 3991 /* 3992 * round ival down to "the nearest base 2 multiple of 3993 * bInterval * 8". 3994 * bInterval is at most 255 as its type is uByte. 3995 * 255(ms) = 2040(x 125us) < 2^11, so start with 10. 3996 */ 3997 for (i = 10; i > 0; i--) { 3998 if ((ival * 8) >= (1 << i)) 3999 break; 4000 } 4001 ival = i; 4002 4003 /* 3 - 10 */ 4004 ival = (ival < 3) ? 3 : ival; 4005 } else if (speed == USB_SPEED_FULL) { 4006 /* FS isoc */ 4007 ival += 3; /* 1ms -> 125us */ 4008 ival--; /* Interval = bInterval-1 */ 4009 /* 3 - 18 */ 4010 ival = (ival > 18) ? 18 : ival; 4011 ival = (ival < 3) ? 3 : ival; 4012 } else { 4013 /* SS/HS intr/isoc */ 4014 if (ival > 0) 4015 ival--; /* Interval = bInterval-1 */ 4016 /* 0 - 15 */ 4017 ival = (ival > 15) ? 15 : ival; 4018 } 4019 4020 return ival; 4021 } 4022 4023 /* ----- */ 4024 4025 static void 4026 xhci_noop(struct usbd_pipe *pipe) 4027 { 4028 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4029 } 4030 4031 /* 4032 * Process root hub request. 4033 */ 4034 static int 4035 xhci_roothub_ctrl_locked(struct usbd_bus *bus, usb_device_request_t *req, 4036 void *buf, int buflen) 4037 { 4038 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 4039 usb_port_status_t ps; 4040 int l, totlen = 0; 4041 uint16_t len, value, index; 4042 int port, i; 4043 uint32_t v; 4044 4045 XHCIHIST_FUNC(); 4046 4047 KASSERT(mutex_owned(&sc->sc_rhlock)); 4048 4049 if (sc->sc_dying) 4050 return -1; 4051 4052 size_t bn = bus == &sc->sc_bus ? 0 : 1; 4053 4054 len = UGETW(req->wLength); 4055 value = UGETW(req->wValue); 4056 index = UGETW(req->wIndex); 4057 4058 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx", 4059 req->bmRequestType | (req->bRequest << 8), value, index, len); 4060 4061 #define C(x,y) ((x) | ((y) << 8)) 4062 switch (C(req->bRequest, req->bmRequestType)) { 4063 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 4064 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0); 4065 if (len == 0) 4066 break; 4067 switch (value) { 4068 #define sd ((usb_string_descriptor_t *)buf) 4069 case C(2, UDESC_STRING): 4070 /* Product */ 4071 totlen = usb_makestrdesc(sd, len, "xHCI root hub"); 4072 break; 4073 #undef sd 4074 default: 4075 /* default from usbroothub */ 4076 return buflen; 4077 } 4078 break; 4079 4080 /* Hub requests */ 4081 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): 4082 break; 4083 /* Clear Port Feature request */ 4084 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): { 4085 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4086 4087 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd", 4088 index, value, bn, cp); 4089 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4090 return -1; 4091 } 4092 port = XHCI_PORTSC(cp); 4093 v = xhci_op_read_4(sc, port); 4094 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0); 4095 v &= ~XHCI_PS_CLEAR; 4096 switch (value) { 4097 case UHF_PORT_ENABLE: 4098 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED); 4099 break; 4100 case UHF_PORT_SUSPEND: 4101 return -1; 4102 case UHF_PORT_POWER: 4103 break; 4104 case UHF_PORT_TEST: 4105 case UHF_PORT_INDICATOR: 4106 return -1; 4107 case UHF_C_PORT_CONNECTION: 4108 xhci_op_write_4(sc, port, v | XHCI_PS_CSC); 4109 break; 4110 case UHF_C_PORT_ENABLE: 4111 case UHF_C_PORT_SUSPEND: 4112 case UHF_C_PORT_OVER_CURRENT: 4113 return -1; 4114 case UHF_C_BH_PORT_RESET: 4115 xhci_op_write_4(sc, port, v | XHCI_PS_WRC); 4116 break; 4117 case UHF_C_PORT_RESET: 4118 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 4119 break; 4120 case UHF_C_PORT_LINK_STATE: 4121 xhci_op_write_4(sc, port, v | XHCI_PS_PLC); 4122 break; 4123 case UHF_C_PORT_CONFIG_ERROR: 4124 xhci_op_write_4(sc, port, v | XHCI_PS_CEC); 4125 break; 4126 default: 4127 return -1; 4128 } 4129 break; 4130 } 4131 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): 4132 if (len == 0) 4133 break; 4134 if ((value & 0xff) != 0) { 4135 return -1; 4136 } 4137 usb_hub_descriptor_t hubd; 4138 4139 totlen = uimin(buflen, sizeof(hubd)); 4140 memcpy(&hubd, buf, totlen); 4141 hubd.bNbrPorts = sc->sc_rhportcount[bn]; 4142 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH); 4143 hubd.bPwrOn2PwrGood = 200; 4144 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) { 4145 /* XXX can't find out? */ 4146 hubd.DeviceRemovable[i++] = 0; 4147 } 4148 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i; 4149 totlen = uimin(totlen, hubd.bDescLength); 4150 memcpy(buf, &hubd, totlen); 4151 break; 4152 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): 4153 if (len != 4) { 4154 return -1; 4155 } 4156 memset(buf, 0, len); /* ? XXX */ 4157 totlen = len; 4158 break; 4159 /* Get Port Status request */ 4160 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): { 4161 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4162 4163 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju", 4164 bn, index, cp, 0); 4165 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4166 DPRINTFN(5, "bad get port status: index=%jd bn=%jd " 4167 "portcount=%jd", 4168 index, bn, sc->sc_rhportcount[bn], 0); 4169 return -1; 4170 } 4171 if (len != 4) { 4172 DPRINTFN(5, "bad get port status: len %jd != 4", 4173 len, 0, 0, 0); 4174 return -1; 4175 } 4176 v = xhci_op_read_4(sc, XHCI_PORTSC(cp)); 4177 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0); 4178 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v)); 4179 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS; 4180 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED; 4181 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR; 4182 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND; 4183 if (v & XHCI_PS_PR) i |= UPS_RESET; 4184 if (v & XHCI_PS_PP) { 4185 if (i & UPS_OTHER_SPEED) 4186 i |= UPS_PORT_POWER_SS; 4187 else 4188 i |= UPS_PORT_POWER; 4189 } 4190 if (i & UPS_OTHER_SPEED) 4191 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v)); 4192 if (sc->sc_vendor_port_status) 4193 i = sc->sc_vendor_port_status(sc, v, i); 4194 USETW(ps.wPortStatus, i); 4195 i = 0; 4196 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS; 4197 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED; 4198 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR; 4199 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET; 4200 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET; 4201 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE; 4202 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR; 4203 USETW(ps.wPortChange, i); 4204 totlen = uimin(len, sizeof(ps)); 4205 memcpy(buf, &ps, totlen); 4206 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx" 4207 " totlen %jd", 4208 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0); 4209 break; 4210 } 4211 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): 4212 return -1; 4213 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE): 4214 break; 4215 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): 4216 break; 4217 /* Set Port Feature request */ 4218 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): { 4219 int optval = (index >> 8) & 0xff; 4220 index &= 0xff; 4221 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4222 return -1; 4223 } 4224 4225 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4226 4227 port = XHCI_PORTSC(cp); 4228 v = xhci_op_read_4(sc, port); 4229 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0); 4230 v &= ~XHCI_PS_CLEAR; 4231 switch (value) { 4232 case UHF_PORT_ENABLE: 4233 xhci_op_write_4(sc, port, v | XHCI_PS_PED); 4234 break; 4235 case UHF_PORT_SUSPEND: 4236 /* XXX suspend */ 4237 break; 4238 case UHF_PORT_RESET: 4239 xhci_op_write_4(sc, port, v | XHCI_PS_PR); 4240 /* Wait for reset to complete. */ 4241 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) { 4242 if (sc->sc_dying) { 4243 return -1; 4244 } 4245 v = xhci_op_read_4(sc, port); 4246 if ((v & XHCI_PS_PR) == 0) { 4247 break; 4248 } 4249 usb_delay_ms(&sc->sc_bus, 10); 4250 } 4251 break; 4252 case UHF_PORT_POWER: 4253 /* XXX power control */ 4254 break; 4255 /* XXX more */ 4256 case UHF_C_PORT_RESET: 4257 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 4258 break; 4259 case UHF_PORT_U1_TIMEOUT: 4260 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 4261 return -1; 4262 } 4263 port = XHCI_PORTPMSC(cp); 4264 v = xhci_op_read_4(sc, port); 4265 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 4266 index, cp, v, 0); 4267 v &= ~XHCI_PM3_U1TO_SET(0xff); 4268 v |= XHCI_PM3_U1TO_SET(optval); 4269 xhci_op_write_4(sc, port, v); 4270 break; 4271 case UHF_PORT_U2_TIMEOUT: 4272 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 4273 return -1; 4274 } 4275 port = XHCI_PORTPMSC(cp); 4276 v = xhci_op_read_4(sc, port); 4277 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 4278 index, cp, v, 0); 4279 v &= ~XHCI_PM3_U2TO_SET(0xff); 4280 v |= XHCI_PM3_U2TO_SET(optval); 4281 xhci_op_write_4(sc, port, v); 4282 break; 4283 default: 4284 return -1; 4285 } 4286 } 4287 break; 4288 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): 4289 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): 4290 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): 4291 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): 4292 break; 4293 default: 4294 /* default from usbroothub */ 4295 return buflen; 4296 } 4297 4298 return totlen; 4299 } 4300 4301 static int 4302 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req, 4303 void *buf, int buflen) 4304 { 4305 struct xhci_softc *sc = XHCI_BUS2SC(bus); 4306 int actlen; 4307 4308 mutex_enter(&sc->sc_rhlock); 4309 actlen = xhci_roothub_ctrl_locked(bus, req, buf, buflen); 4310 mutex_exit(&sc->sc_rhlock); 4311 4312 return actlen; 4313 } 4314 4315 /* root hub interrupt */ 4316 4317 static usbd_status 4318 xhci_root_intr_transfer(struct usbd_xfer *xfer) 4319 { 4320 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4321 4322 /* Pipe isn't running, start first */ 4323 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4324 } 4325 4326 /* Wait for roothub port status/change */ 4327 static usbd_status 4328 xhci_root_intr_start(struct usbd_xfer *xfer) 4329 { 4330 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4331 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4332 4333 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4334 4335 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 4336 4337 if (sc->sc_dying) 4338 return USBD_IOERROR; 4339 4340 KASSERT(sc->sc_intrxfer[bn] == NULL); 4341 sc->sc_intrxfer[bn] = xfer; 4342 xfer->ux_status = USBD_IN_PROGRESS; 4343 4344 return USBD_IN_PROGRESS; 4345 } 4346 4347 static void 4348 xhci_root_intr_abort(struct usbd_xfer *xfer) 4349 { 4350 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4351 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4352 4353 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4354 4355 KASSERT(mutex_owned(&sc->sc_lock)); 4356 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 4357 4358 /* If xfer has already completed, nothing to do here. */ 4359 if (sc->sc_intrxfer[bn] == NULL) 4360 return; 4361 4362 /* 4363 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer. 4364 * Cancel it. 4365 */ 4366 KASSERT(sc->sc_intrxfer[bn] == xfer); 4367 xfer->ux_status = USBD_CANCELLED; 4368 usb_transfer_complete(xfer); 4369 } 4370 4371 static void 4372 xhci_root_intr_close(struct usbd_pipe *pipe) 4373 { 4374 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe); 4375 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer; 4376 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4377 4378 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4379 4380 KASSERT(mutex_owned(&sc->sc_lock)); 4381 4382 /* 4383 * Caller must guarantee the xfer has completed first, by 4384 * closing the pipe only after normal completion or an abort. 4385 */ 4386 KASSERT(sc->sc_intrxfer[bn] == NULL); 4387 } 4388 4389 static void 4390 xhci_root_intr_done(struct usbd_xfer *xfer) 4391 { 4392 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4393 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4394 4395 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4396 4397 KASSERT(mutex_owned(&sc->sc_lock)); 4398 4399 /* Claim the xfer so it doesn't get completed again. */ 4400 KASSERT(sc->sc_intrxfer[bn] == xfer); 4401 KASSERT(xfer->ux_status != USBD_IN_PROGRESS); 4402 sc->sc_intrxfer[bn] = NULL; 4403 } 4404 4405 /* -------------- */ 4406 /* device control */ 4407 4408 static usbd_status 4409 xhci_device_ctrl_transfer(struct usbd_xfer *xfer) 4410 { 4411 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4412 4413 /* Pipe isn't running, start first */ 4414 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4415 } 4416 4417 static usbd_status 4418 xhci_device_ctrl_start(struct usbd_xfer *xfer) 4419 { 4420 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4421 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4422 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4423 struct xhci_ring * const tr = xs->xs_xr[dci]; 4424 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4425 usb_device_request_t * const req = &xfer->ux_request; 4426 const bool isread = usbd_xfer_isread(xfer); 4427 const uint32_t len = UGETW(req->wLength); 4428 usb_dma_t * const dma = &xfer->ux_dmabuf; 4429 uint64_t parameter; 4430 uint32_t status; 4431 uint32_t control; 4432 u_int i; 4433 const bool polling = xhci_polling_p(sc); 4434 4435 XHCIHIST_FUNC(); 4436 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx", 4437 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue), 4438 UGETW(req->wIndex), UGETW(req->wLength)); 4439 4440 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4441 4442 /* we rely on the bottom bits for extra info */ 4443 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %p", xfer); 4444 4445 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0); 4446 4447 if (tr->is_halted) 4448 goto out; 4449 4450 i = 0; 4451 4452 /* setup phase */ 4453 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */ 4454 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req)); 4455 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE : 4456 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) | 4457 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) | 4458 XHCI_TRB_3_IDT_BIT; 4459 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4460 4461 if (len != 0) { 4462 /* data phase */ 4463 parameter = DMAADDR(dma, 0); 4464 KASSERTMSG(len <= 0x10000, "len %d", len); 4465 status = XHCI_TRB_2_IRQ_SET(0) | 4466 XHCI_TRB_2_TDSZ_SET(0) | 4467 XHCI_TRB_2_BYTES_SET(len); 4468 control = (isread ? XHCI_TRB_3_DIR_IN : 0) | 4469 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) | 4470 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4471 XHCI_TRB_3_IOC_BIT; 4472 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4473 4474 usb_syncmem(dma, 0, len, 4475 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4476 } 4477 4478 parameter = 0; 4479 status = XHCI_TRB_2_IRQ_SET(0); 4480 /* the status stage has inverted direction */ 4481 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) | 4482 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) | 4483 XHCI_TRB_3_IOC_BIT; 4484 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4485 4486 if (!polling) 4487 mutex_enter(&tr->xr_lock); 4488 xhci_ring_put_xfer(sc, tr, xx, i); 4489 if (!polling) 4490 mutex_exit(&tr->xr_lock); 4491 4492 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4493 4494 out: if (xfer->ux_status == USBD_NOT_STARTED) { 4495 xfer->ux_status = USBD_IN_PROGRESS; 4496 usbd_xfer_schedule_timeout(xfer); 4497 } else { 4498 /* 4499 * We must be coming from xhci_pipe_restart -- timeout 4500 * already set up, nothing to do. 4501 */ 4502 } 4503 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 4504 4505 return USBD_IN_PROGRESS; 4506 } 4507 4508 static void 4509 xhci_device_ctrl_done(struct usbd_xfer *xfer) 4510 { 4511 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4512 usb_device_request_t *req = &xfer->ux_request; 4513 int len = UGETW(req->wLength); 4514 int rd = req->bmRequestType & UT_READ; 4515 4516 if (len) 4517 usb_syncmem(&xfer->ux_dmabuf, 0, len, 4518 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4519 } 4520 4521 static void 4522 xhci_device_ctrl_abort(struct usbd_xfer *xfer) 4523 { 4524 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4525 4526 usbd_xfer_abort(xfer); 4527 } 4528 4529 static void 4530 xhci_device_ctrl_close(struct usbd_pipe *pipe) 4531 { 4532 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4533 4534 xhci_close_pipe(pipe); 4535 } 4536 4537 /* ------------------ */ 4538 /* device isochronous */ 4539 4540 static usbd_status 4541 xhci_device_isoc_transfer(struct usbd_xfer *xfer) 4542 { 4543 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4544 4545 return xhci_device_isoc_enter(xfer); 4546 } 4547 4548 static usbd_status 4549 xhci_device_isoc_enter(struct usbd_xfer *xfer) 4550 { 4551 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4552 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4553 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4554 struct xhci_ring * const tr = xs->xs_xr[dci]; 4555 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4556 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe; 4557 usb_dma_t * const dma = &xfer->ux_dmabuf; 4558 uint64_t parameter; 4559 uint32_t status; 4560 uint32_t control; 4561 uint32_t offs; 4562 int i, ival; 4563 const bool polling = xhci_polling_p(sc); 4564 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize); 4565 const uint16_t mps = UE_GET_SIZE(MPS); 4566 const uint8_t maxb = xpipe->xp_maxb; 4567 4568 XHCIHIST_FUNC(); 4569 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4570 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4571 4572 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4573 4574 if (sc->sc_dying) 4575 return USBD_IOERROR; 4576 4577 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths); 4578 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4579 4580 const bool isread = usbd_xfer_isread(xfer); 4581 if (xfer->ux_length) 4582 usb_syncmem(dma, 0, xfer->ux_length, 4583 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4584 4585 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval; 4586 if (ival >= 1 && ival <= 16) 4587 ival = 1 << (ival - 1); 4588 else 4589 ival = 1; /* fake something up */ 4590 4591 if (xpipe->xp_isoc_next == -1) { 4592 uint32_t mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX); 4593 4594 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0); 4595 mfindex = XHCI_MFINDEX_GET(mfindex + 1); 4596 mfindex /= USB_UFRAMES_PER_FRAME; 4597 mfindex += 7; /* 7 frames is max possible IST */ 4598 xpipe->xp_isoc_next = roundup2(mfindex, ival); 4599 } 4600 4601 offs = 0; 4602 for (i = 0; i < xfer->ux_nframes; i++) { 4603 const uint32_t len = xfer->ux_frlengths[i]; 4604 const unsigned tdpc = howmany(len, mps); 4605 const unsigned tbc = howmany(tdpc, maxb) - 1; 4606 const unsigned tlbpc1 = tdpc % maxb; 4607 const unsigned tlbpc = tlbpc1 ? tlbpc1 - 1 : maxb - 1; 4608 4609 KASSERTMSG(len <= 0x10000, "len %d", len); 4610 parameter = DMAADDR(dma, offs); 4611 status = XHCI_TRB_2_IRQ_SET(0) | 4612 XHCI_TRB_2_TDSZ_SET(0) | 4613 XHCI_TRB_2_BYTES_SET(len); 4614 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) | 4615 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4616 XHCI_TRB_3_TBC_SET(tbc) | 4617 XHCI_TRB_3_TLBPC_SET(tlbpc) | 4618 XHCI_TRB_3_IOC_BIT; 4619 if (XHCI_HCC_CFC(sc->sc_hcc)) { 4620 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next); 4621 #if 0 4622 } else if (xpipe->xp_isoc_next == -1) { 4623 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next); 4624 #endif 4625 } else { 4626 control |= XHCI_TRB_3_ISO_SIA_BIT; 4627 } 4628 #if 0 4629 if (i != xfer->ux_nframes - 1) 4630 control |= XHCI_TRB_3_BEI_BIT; 4631 #endif 4632 xhci_xfer_put_trb(xx, i, parameter, status, control); 4633 4634 xpipe->xp_isoc_next += ival; 4635 offs += len; 4636 } 4637 4638 xx->xx_isoc_done = 0; 4639 4640 if (!polling) 4641 mutex_enter(&tr->xr_lock); 4642 xhci_ring_put_xfer(sc, tr, xx, i); 4643 if (!polling) 4644 mutex_exit(&tr->xr_lock); 4645 4646 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4647 xfer->ux_status = USBD_IN_PROGRESS; 4648 usbd_xfer_schedule_timeout(xfer); 4649 4650 return USBD_IN_PROGRESS; 4651 } 4652 4653 static void 4654 xhci_device_isoc_abort(struct usbd_xfer *xfer) 4655 { 4656 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4657 4658 usbd_xfer_abort(xfer); 4659 } 4660 4661 static void 4662 xhci_device_isoc_close(struct usbd_pipe *pipe) 4663 { 4664 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4665 4666 xhci_close_pipe(pipe); 4667 } 4668 4669 static void 4670 xhci_device_isoc_done(struct usbd_xfer *xfer) 4671 { 4672 #ifdef USB_DEBUG 4673 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4674 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4675 #endif 4676 const bool isread = usbd_xfer_isread(xfer); 4677 4678 XHCIHIST_FUNC(); 4679 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4680 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4681 4682 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4683 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4684 } 4685 4686 /* ----------- */ 4687 /* device bulk */ 4688 4689 static usbd_status 4690 xhci_device_bulk_transfer(struct usbd_xfer *xfer) 4691 { 4692 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4693 4694 /* Pipe isn't running, so start it first. */ 4695 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4696 } 4697 4698 static usbd_status 4699 xhci_device_bulk_start(struct usbd_xfer *xfer) 4700 { 4701 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4702 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4703 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4704 struct xhci_ring * const tr = xs->xs_xr[dci]; 4705 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4706 const uint32_t len = xfer->ux_length; 4707 usb_dma_t * const dma = &xfer->ux_dmabuf; 4708 uint64_t parameter; 4709 uint32_t status; 4710 uint32_t control; 4711 u_int i = 0; 4712 const bool polling = xhci_polling_p(sc); 4713 4714 XHCIHIST_FUNC(); 4715 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4716 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4717 4718 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4719 4720 if (sc->sc_dying) 4721 return USBD_IOERROR; 4722 4723 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4724 4725 if (tr->is_halted) 4726 goto out; 4727 4728 parameter = DMAADDR(dma, 0); 4729 const bool isread = usbd_xfer_isread(xfer); 4730 if (len) 4731 usb_syncmem(dma, 0, len, 4732 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4733 4734 /* 4735 * XXX: (dsl) The physical buffer must not cross a 64k boundary. 4736 * If the user supplied buffer crosses such a boundary then 2 4737 * (or more) TRB should be used. 4738 * If multiple TRB are used the td_size field must be set correctly. 4739 * For v1.0 devices (like ivy bridge) this is the number of usb data 4740 * blocks needed to complete the transfer. 4741 * Setting it to 1 in the last TRB causes an extra zero-length 4742 * data block be sent. 4743 * The earlier documentation differs, I don't know how it behaves. 4744 */ 4745 KASSERTMSG(len <= 0x10000, "len %d", len); 4746 status = XHCI_TRB_2_IRQ_SET(0) | 4747 XHCI_TRB_2_TDSZ_SET(0) | 4748 XHCI_TRB_2_BYTES_SET(len); 4749 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4750 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4751 XHCI_TRB_3_IOC_BIT; 4752 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4753 4754 if (!polling) 4755 mutex_enter(&tr->xr_lock); 4756 xhci_ring_put_xfer(sc, tr, xx, i); 4757 if (!polling) 4758 mutex_exit(&tr->xr_lock); 4759 4760 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4761 4762 out: if (xfer->ux_status == USBD_NOT_STARTED) { 4763 xfer->ux_status = USBD_IN_PROGRESS; 4764 usbd_xfer_schedule_timeout(xfer); 4765 } else { 4766 /* 4767 * We must be coming from xhci_pipe_restart -- timeout 4768 * already set up, nothing to do. 4769 */ 4770 } 4771 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 4772 4773 return USBD_IN_PROGRESS; 4774 } 4775 4776 static void 4777 xhci_device_bulk_done(struct usbd_xfer *xfer) 4778 { 4779 #ifdef USB_DEBUG 4780 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4781 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4782 #endif 4783 const bool isread = usbd_xfer_isread(xfer); 4784 4785 XHCIHIST_FUNC(); 4786 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4787 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4788 4789 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4790 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4791 } 4792 4793 static void 4794 xhci_device_bulk_abort(struct usbd_xfer *xfer) 4795 { 4796 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4797 4798 usbd_xfer_abort(xfer); 4799 } 4800 4801 static void 4802 xhci_device_bulk_close(struct usbd_pipe *pipe) 4803 { 4804 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4805 4806 xhci_close_pipe(pipe); 4807 } 4808 4809 /* ---------------- */ 4810 /* device interrupt */ 4811 4812 static usbd_status 4813 xhci_device_intr_transfer(struct usbd_xfer *xfer) 4814 { 4815 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4816 4817 /* Pipe isn't running, so start it first. */ 4818 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4819 } 4820 4821 static usbd_status 4822 xhci_device_intr_start(struct usbd_xfer *xfer) 4823 { 4824 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4825 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4826 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4827 struct xhci_ring * const tr = xs->xs_xr[dci]; 4828 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4829 const uint32_t len = xfer->ux_length; 4830 const bool polling = xhci_polling_p(sc); 4831 usb_dma_t * const dma = &xfer->ux_dmabuf; 4832 uint64_t parameter; 4833 uint32_t status; 4834 uint32_t control; 4835 u_int i = 0; 4836 4837 XHCIHIST_FUNC(); 4838 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4839 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4840 4841 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4842 4843 if (sc->sc_dying) 4844 return USBD_IOERROR; 4845 4846 if (tr->is_halted) 4847 goto out; 4848 4849 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4850 4851 const bool isread = usbd_xfer_isread(xfer); 4852 if (len) 4853 usb_syncmem(dma, 0, len, 4854 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4855 4856 parameter = DMAADDR(dma, 0); 4857 KASSERTMSG(len <= 0x10000, "len %d", len); 4858 status = XHCI_TRB_2_IRQ_SET(0) | 4859 XHCI_TRB_2_TDSZ_SET(0) | 4860 XHCI_TRB_2_BYTES_SET(len); 4861 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4862 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT; 4863 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4864 4865 if (!polling) 4866 mutex_enter(&tr->xr_lock); 4867 xhci_ring_put_xfer(sc, tr, xx, i); 4868 if (!polling) 4869 mutex_exit(&tr->xr_lock); 4870 4871 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4872 4873 out: if (xfer->ux_status == USBD_NOT_STARTED) { 4874 xfer->ux_status = USBD_IN_PROGRESS; 4875 usbd_xfer_schedule_timeout(xfer); 4876 } else { 4877 /* 4878 * We must be coming from xhci_pipe_restart -- timeout 4879 * already set up, nothing to do. 4880 */ 4881 } 4882 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 4883 4884 return USBD_IN_PROGRESS; 4885 } 4886 4887 static void 4888 xhci_device_intr_done(struct usbd_xfer *xfer) 4889 { 4890 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4891 #ifdef USB_DEBUG 4892 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4893 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4894 #endif 4895 const bool isread = usbd_xfer_isread(xfer); 4896 4897 XHCIHIST_FUNC(); 4898 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4899 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4900 4901 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 4902 4903 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4904 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4905 } 4906 4907 static void 4908 xhci_device_intr_abort(struct usbd_xfer *xfer) 4909 { 4910 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4911 4912 XHCIHIST_FUNC(); 4913 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0); 4914 4915 KASSERT(mutex_owned(&sc->sc_lock)); 4916 usbd_xfer_abort(xfer); 4917 } 4918 4919 static void 4920 xhci_device_intr_close(struct usbd_pipe *pipe) 4921 { 4922 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 4923 4924 XHCIHIST_FUNC(); 4925 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0); 4926 4927 xhci_close_pipe(pipe); 4928 } 4929