1 /* $NetBSD: xhci.c,v 1.184 2024/05/20 11:36:20 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2013 Jonathan A. Kollasch 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * USB rev 2.0 and rev 3.1 specification 31 * http://www.usb.org/developers/docs/ 32 * xHCI rev 1.1 specification 33 * http://www.intel.com/technology/usb/spec.htm 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.184 2024/05/20 11:36:20 riastradh Exp $"); 38 39 #ifdef _KERNEL_OPT 40 #include "opt_usb.h" 41 #endif 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/kmem.h> 47 #include <sys/device.h> 48 #include <sys/select.h> 49 #include <sys/proc.h> 50 #include <sys/queue.h> 51 #include <sys/mutex.h> 52 #include <sys/condvar.h> 53 #include <sys/bus.h> 54 #include <sys/cpu.h> 55 #include <sys/sysctl.h> 56 57 #include <machine/endian.h> 58 59 #include <dev/usb/usb.h> 60 #include <dev/usb/usbdi.h> 61 #include <dev/usb/usbdivar.h> 62 #include <dev/usb/usbdi_util.h> 63 #include <dev/usb/usbhist.h> 64 #include <dev/usb/usb_mem.h> 65 #include <dev/usb/usb_quirks.h> 66 67 #include <dev/usb/xhcireg.h> 68 #include <dev/usb/xhcivar.h> 69 #include <dev/usb/usbroothub.h> 70 71 72 #ifdef USB_DEBUG 73 #ifndef XHCI_DEBUG 74 #define xhcidebug 0 75 #else /* !XHCI_DEBUG */ 76 #define HEXDUMP(a, b, c) \ 77 do { \ 78 if (xhcidebug > 0) \ 79 hexdump(printf, a, b, c); \ 80 } while (/*CONSTCOND*/0) 81 static int xhcidebug = 0; 82 83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup") 84 { 85 int err; 86 const struct sysctlnode *rnode; 87 const struct sysctlnode *cnode; 88 89 err = sysctl_createv(clog, 0, NULL, &rnode, 90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci", 91 SYSCTL_DESCR("xhci global controls"), 92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 93 94 if (err) 95 goto fail; 96 97 /* control debugging printfs */ 98 err = sysctl_createv(clog, 0, &rnode, &cnode, 99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 100 "debug", SYSCTL_DESCR("Enable debugging output"), 101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL); 102 if (err) 103 goto fail; 104 105 return; 106 fail: 107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err); 108 } 109 110 #endif /* !XHCI_DEBUG */ 111 #endif /* USB_DEBUG */ 112 113 #ifndef HEXDUMP 114 #define HEXDUMP(a, b, c) 115 #endif 116 117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D) 118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D) 119 #define XHCIHIST_FUNC() USBHIST_FUNC() 120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug) 121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \ 122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D) 123 124 #define XHCI_DCI_SLOT 0 125 #define XHCI_DCI_EP_CONTROL 1 126 127 #define XHCI_ICI_INPUT_CONTROL 0 128 129 struct xhci_pipe { 130 struct usbd_pipe xp_pipe; 131 struct usb_task xp_async_task; 132 int16_t xp_isoc_next; /* next frame */ 133 uint8_t xp_maxb; /* max burst */ 134 uint8_t xp_mult; 135 }; 136 137 #define XHCI_COMMAND_RING_TRBS 256 138 #define XHCI_EVENT_RING_TRBS 256 139 #define XHCI_EVENT_RING_SEGMENTS 1 140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT 141 142 static usbd_status xhci_open(struct usbd_pipe *); 143 static void xhci_close_pipe(struct usbd_pipe *); 144 static int xhci_intr1(struct xhci_softc * const); 145 static void xhci_softintr(void *); 146 static void xhci_poll(struct usbd_bus *); 147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int); 148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *); 149 static void xhci_abortx(struct usbd_xfer *); 150 static bool xhci_dying(struct usbd_bus *); 151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **); 152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int, 153 struct usbd_port *); 154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, 155 void *, int); 156 157 static void xhci_pipe_restart(struct usbd_pipe *); 158 static void xhci_pipe_restart_async_task(void *); 159 static void xhci_pipe_restart_async(struct usbd_pipe *); 160 161 static usbd_status xhci_configure_endpoint(struct usbd_pipe *); 162 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *); 163 static void xhci_reset_endpoint(struct usbd_pipe *); 164 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *, 165 struct xhci_slot *, u_int, uint32_t); 166 static usbd_status xhci_stop_endpoint(struct usbd_pipe *); 167 168 static void xhci_host_dequeue(struct xhci_ring * const); 169 static void xhci_set_dequeue(struct usbd_pipe *); 170 171 static usbd_status xhci_do_command(struct xhci_softc * const, 172 struct xhci_soft_trb * const, int); 173 static usbd_status xhci_do_command_locked(struct xhci_softc * const, 174 struct xhci_soft_trb * const, int); 175 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t); 176 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *); 177 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool); 178 static usbd_status xhci_enable_slot(struct xhci_softc * const, 179 uint8_t * const); 180 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t); 181 static usbd_status xhci_address_device(struct xhci_softc * const, 182 uint64_t, uint8_t, bool); 183 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int); 184 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const, 185 struct xhci_slot * const, u_int); 186 static usbd_status xhci_ring_init(struct xhci_softc * const, 187 struct xhci_ring **, size_t, size_t); 188 static void xhci_ring_free(struct xhci_softc * const, 189 struct xhci_ring ** const); 190 191 static void xhci_setup_ctx(struct usbd_pipe *); 192 static void xhci_setup_route(struct usbd_pipe *, uint32_t *); 193 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *); 194 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *); 195 static uint32_t xhci_bival2ival(uint32_t, uint32_t, uint32_t); 196 197 static void xhci_noop(struct usbd_pipe *); 198 199 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *); 200 static usbd_status xhci_root_intr_start(struct usbd_xfer *); 201 static void xhci_root_intr_abort(struct usbd_xfer *); 202 static void xhci_root_intr_close(struct usbd_pipe *); 203 static void xhci_root_intr_done(struct usbd_xfer *); 204 205 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *); 206 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *); 207 static void xhci_device_ctrl_abort(struct usbd_xfer *); 208 static void xhci_device_ctrl_close(struct usbd_pipe *); 209 static void xhci_device_ctrl_done(struct usbd_xfer *); 210 211 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *); 212 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *); 213 static void xhci_device_isoc_abort(struct usbd_xfer *); 214 static void xhci_device_isoc_close(struct usbd_pipe *); 215 static void xhci_device_isoc_done(struct usbd_xfer *); 216 217 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *); 218 static usbd_status xhci_device_intr_start(struct usbd_xfer *); 219 static void xhci_device_intr_abort(struct usbd_xfer *); 220 static void xhci_device_intr_close(struct usbd_pipe *); 221 static void xhci_device_intr_done(struct usbd_xfer *); 222 223 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *); 224 static usbd_status xhci_device_bulk_start(struct usbd_xfer *); 225 static void xhci_device_bulk_abort(struct usbd_xfer *); 226 static void xhci_device_bulk_close(struct usbd_pipe *); 227 static void xhci_device_bulk_done(struct usbd_xfer *); 228 229 static const struct usbd_bus_methods xhci_bus_methods = { 230 .ubm_open = xhci_open, 231 .ubm_softint = xhci_softintr, 232 .ubm_dopoll = xhci_poll, 233 .ubm_allocx = xhci_allocx, 234 .ubm_freex = xhci_freex, 235 .ubm_abortx = xhci_abortx, 236 .ubm_dying = xhci_dying, 237 .ubm_getlock = xhci_get_lock, 238 .ubm_newdev = xhci_new_device, 239 .ubm_rhctrl = xhci_roothub_ctrl, 240 }; 241 242 static const struct usbd_pipe_methods xhci_root_intr_methods = { 243 .upm_transfer = xhci_root_intr_transfer, 244 .upm_start = xhci_root_intr_start, 245 .upm_abort = xhci_root_intr_abort, 246 .upm_close = xhci_root_intr_close, 247 .upm_cleartoggle = xhci_noop, 248 .upm_done = xhci_root_intr_done, 249 }; 250 251 252 static const struct usbd_pipe_methods xhci_device_ctrl_methods = { 253 .upm_transfer = xhci_device_ctrl_transfer, 254 .upm_start = xhci_device_ctrl_start, 255 .upm_abort = xhci_device_ctrl_abort, 256 .upm_close = xhci_device_ctrl_close, 257 .upm_cleartoggle = xhci_noop, 258 .upm_done = xhci_device_ctrl_done, 259 }; 260 261 static const struct usbd_pipe_methods xhci_device_isoc_methods = { 262 .upm_transfer = xhci_device_isoc_transfer, 263 .upm_abort = xhci_device_isoc_abort, 264 .upm_close = xhci_device_isoc_close, 265 .upm_cleartoggle = xhci_noop, 266 .upm_done = xhci_device_isoc_done, 267 }; 268 269 static const struct usbd_pipe_methods xhci_device_bulk_methods = { 270 .upm_transfer = xhci_device_bulk_transfer, 271 .upm_start = xhci_device_bulk_start, 272 .upm_abort = xhci_device_bulk_abort, 273 .upm_close = xhci_device_bulk_close, 274 .upm_cleartoggle = xhci_noop, 275 .upm_done = xhci_device_bulk_done, 276 }; 277 278 static const struct usbd_pipe_methods xhci_device_intr_methods = { 279 .upm_transfer = xhci_device_intr_transfer, 280 .upm_start = xhci_device_intr_start, 281 .upm_abort = xhci_device_intr_abort, 282 .upm_close = xhci_device_intr_close, 283 .upm_cleartoggle = xhci_noop, 284 .upm_done = xhci_device_intr_done, 285 }; 286 287 static inline uint32_t 288 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset) 289 { 290 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset); 291 } 292 293 static inline uint32_t 294 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset) 295 { 296 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset); 297 } 298 299 static inline uint32_t 300 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset) 301 { 302 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset); 303 } 304 305 static inline void 306 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset, 307 uint32_t value) 308 { 309 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value); 310 } 311 312 #if 0 /* unused */ 313 static inline void 314 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset, 315 uint32_t value) 316 { 317 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value); 318 } 319 #endif /* unused */ 320 321 static inline uint32_t 322 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset) 323 { 324 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset); 325 } 326 327 static inline uint32_t 328 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset) 329 { 330 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 331 } 332 333 static inline void 334 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset, 335 uint32_t value) 336 { 337 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value); 338 } 339 340 static inline uint64_t 341 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset) 342 { 343 uint64_t value; 344 345 #ifdef XHCI_USE_BUS_SPACE_8 346 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset); 347 #else 348 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 349 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh, 350 offset + 4) << 32; 351 #endif 352 353 return value; 354 } 355 356 static inline void 357 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset, 358 uint64_t value) 359 { 360 #ifdef XHCI_USE_BUS_SPACE_8 361 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value); 362 #else 363 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0, 364 (value >> 0) & 0xffffffff); 365 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4, 366 (value >> 32) & 0xffffffff); 367 #endif 368 } 369 370 static inline uint32_t 371 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset) 372 { 373 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 374 } 375 376 static inline void 377 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset, 378 uint32_t value) 379 { 380 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value); 381 } 382 383 static inline uint64_t 384 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset) 385 { 386 uint64_t value; 387 388 #ifdef XHCI_USE_BUS_SPACE_8 389 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset); 390 #else 391 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 392 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh, 393 offset + 4) << 32; 394 #endif 395 396 return value; 397 } 398 399 static inline void 400 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset, 401 uint64_t value) 402 { 403 #ifdef XHCI_USE_BUS_SPACE_8 404 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value); 405 #else 406 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0, 407 (value >> 0) & 0xffffffff); 408 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4, 409 (value >> 32) & 0xffffffff); 410 #endif 411 } 412 413 #if 0 /* unused */ 414 static inline uint32_t 415 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset) 416 { 417 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset); 418 } 419 #endif /* unused */ 420 421 static inline void 422 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset, 423 uint32_t value) 424 { 425 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value); 426 } 427 428 /* --- */ 429 430 static inline uint8_t 431 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed) 432 { 433 u_int eptype = 0; 434 435 switch (UE_GET_XFERTYPE(ed->bmAttributes)) { 436 case UE_CONTROL: 437 eptype = 0x0; 438 break; 439 case UE_ISOCHRONOUS: 440 eptype = 0x1; 441 break; 442 case UE_BULK: 443 eptype = 0x2; 444 break; 445 case UE_INTERRUPT: 446 eptype = 0x3; 447 break; 448 } 449 450 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 451 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 452 return eptype | 0x4; 453 else 454 return eptype; 455 } 456 457 static u_int 458 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed) 459 { 460 /* xHCI 1.0 section 4.5.1 */ 461 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress); 462 u_int in = 0; 463 464 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 465 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 466 in = 1; 467 468 return epaddr * 2 + in; 469 } 470 471 static inline u_int 472 xhci_dci_to_ici(const u_int i) 473 { 474 return i + 1; 475 } 476 477 static inline void * 478 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs, 479 const u_int dci) 480 { 481 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 482 } 483 484 #if 0 /* unused */ 485 static inline bus_addr_t 486 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs, 487 const u_int dci) 488 { 489 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 490 } 491 #endif /* unused */ 492 493 static inline void * 494 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs, 495 const u_int ici) 496 { 497 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 498 } 499 500 static inline bus_addr_t 501 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs, 502 const u_int ici) 503 { 504 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 505 } 506 507 static inline struct xhci_trb * 508 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx) 509 { 510 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 511 } 512 513 static inline bus_addr_t 514 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx) 515 { 516 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 517 } 518 519 static inline void 520 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx, 521 uint64_t parameter, uint32_t status, uint32_t control) 522 { 523 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb); 524 xx->xx_trb[idx].trb_0 = parameter; 525 xx->xx_trb[idx].trb_2 = status; 526 xx->xx_trb[idx].trb_3 = control; 527 } 528 529 static inline void 530 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status, 531 uint32_t control) 532 { 533 trb->trb_0 = htole64(parameter); 534 trb->trb_2 = htole32(status); 535 trb->trb_3 = htole32(control); 536 } 537 538 static int 539 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx) 540 { 541 /* base address of TRBs */ 542 bus_addr_t trbp = xhci_ring_trbp(xr, 0); 543 544 /* trb_0 range sanity check */ 545 if (trb_0 == 0 || trb_0 < trbp || 546 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 || 547 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) { 548 return 1; 549 } 550 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb); 551 return 0; 552 } 553 554 static unsigned int 555 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs, 556 u_int dci) 557 { 558 uint32_t *cp; 559 560 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 561 cp = xhci_slot_get_dcv(sc, xs, dci); 562 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0])); 563 } 564 565 static inline unsigned int 566 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport) 567 { 568 const unsigned int port = ctlrport - 1; 569 const uint8_t bit = __BIT(port % NBBY); 570 571 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit); 572 } 573 574 /* 575 * Return the roothub port for a controller port. Both are 1..n. 576 */ 577 static inline unsigned int 578 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport) 579 { 580 581 return sc->sc_ctlrportmap[ctrlport - 1]; 582 } 583 584 /* 585 * Return the controller port for a bus roothub port. Both are 1..n. 586 */ 587 static inline unsigned int 588 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn, 589 unsigned int rhport) 590 { 591 592 return sc->sc_rhportmap[bn][rhport - 1]; 593 } 594 595 /* --- */ 596 597 void 598 xhci_childdet(device_t self, device_t child) 599 { 600 struct xhci_softc * const sc = device_private(self); 601 602 mutex_enter(&sc->sc_intr_lock); 603 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child)); 604 if (child == sc->sc_child2) 605 sc->sc_child2 = NULL; 606 else if (child == sc->sc_child) 607 sc->sc_child = NULL; 608 mutex_exit(&sc->sc_intr_lock); 609 } 610 611 int 612 xhci_detach(struct xhci_softc *sc, int flags) 613 { 614 int rv = 0; 615 616 if (sc->sc_child2 != NULL) { 617 rv = config_detach(sc->sc_child2, flags); 618 if (rv != 0) 619 return rv; 620 KASSERT(sc->sc_child2 == NULL); 621 } 622 623 if (sc->sc_child != NULL) { 624 rv = config_detach(sc->sc_child, flags); 625 if (rv != 0) 626 return rv; 627 KASSERT(sc->sc_child == NULL); 628 } 629 630 /* XXX unconfigure/free slots */ 631 632 /* verify: */ 633 xhci_rt_write_4(sc, XHCI_IMAN(0), 0); 634 xhci_op_write_4(sc, XHCI_USBCMD, 0); 635 /* do we need to wait for stop? */ 636 637 xhci_op_write_8(sc, XHCI_CRCR, 0); 638 xhci_ring_free(sc, &sc->sc_cr); 639 cv_destroy(&sc->sc_command_cv); 640 cv_destroy(&sc->sc_cmdbusy_cv); 641 642 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0); 643 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0); 644 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY); 645 xhci_ring_free(sc, &sc->sc_er); 646 647 usb_freemem(&sc->sc_eventst_dma); 648 649 xhci_op_write_8(sc, XHCI_DCBAAP, 0); 650 usb_freemem(&sc->sc_dcbaa_dma); 651 652 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots); 653 654 kmem_free(sc->sc_ctlrportbus, 655 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY)); 656 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int)); 657 658 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 659 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int)); 660 } 661 662 mutex_destroy(&sc->sc_rhlock); 663 mutex_destroy(&sc->sc_lock); 664 mutex_destroy(&sc->sc_intr_lock); 665 666 pool_cache_destroy(sc->sc_xferpool); 667 668 return rv; 669 } 670 671 int 672 xhci_activate(device_t self, enum devact act) 673 { 674 struct xhci_softc * const sc = device_private(self); 675 676 switch (act) { 677 case DVACT_DEACTIVATE: 678 sc->sc_dying = true; 679 return 0; 680 default: 681 return EOPNOTSUPP; 682 } 683 } 684 685 bool 686 xhci_suspend(device_t self, const pmf_qual_t *qual) 687 { 688 struct xhci_softc * const sc = device_private(self); 689 size_t i, j, bn, dci; 690 int port; 691 uint32_t v; 692 usbd_status err; 693 bool ok = false; 694 695 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 696 697 /* 698 * Block issuance of new commands, and wait for all pending 699 * commands to complete. 700 */ 701 mutex_enter(&sc->sc_lock); 702 KASSERT(sc->sc_suspender == NULL); 703 sc->sc_suspender = curlwp; 704 while (sc->sc_command_addr != 0) 705 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock); 706 mutex_exit(&sc->sc_lock); 707 708 /* 709 * Block roothub xfers which might touch portsc registers until 710 * we're done suspending. 711 */ 712 mutex_enter(&sc->sc_rhlock); 713 714 /* 715 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2: 716 * xHCI Power Management, p. 342 717 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342 718 */ 719 720 /* 721 * `1. Stop all USB activity by issuing Stop Endpoint Commands 722 * for Busy endpoints in the Running state. If the Force 723 * Save Context Capability (FSC = ``0'') is not supported, 724 * then Stop Endpoint Commands shall be issued for all idle 725 * endpoints in the Running state as well. The Stop 726 * Endpoint Command causes the xHC to update the respective 727 * Endpoint or Stream Contexts in system memory, e.g. the 728 * TR Dequeue Pointer, DCS, etc. fields. Refer to 729 * Implementation Note "0".' 730 */ 731 for (i = 0; i < sc->sc_maxslots; i++) { 732 struct xhci_slot *xs = &sc->sc_slots[i]; 733 734 /* Skip if the slot is not in use. */ 735 if (xs->xs_idx == 0) 736 continue; 737 738 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 739 /* Skip if the endpoint is not Running. */ 740 /* XXX What about Busy? */ 741 if (xhci_get_epstate(sc, xs, dci) != 742 XHCI_EPSTATE_RUNNING) 743 continue; 744 745 /* Stop endpoint. */ 746 mutex_enter(&sc->sc_lock); 747 err = xhci_stop_endpoint_cmd(sc, xs, dci, 748 XHCI_TRB_3_SUSP_EP_BIT); 749 mutex_exit(&sc->sc_lock); 750 if (err) { 751 device_printf(self, "failed to stop endpoint" 752 " slot %zu dci %zu err %d\n", 753 i, dci, err); 754 goto out; 755 } 756 } 757 } 758 759 /* 760 * Next, suspend all the ports: 761 * 762 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15: 763 * Suspend-Resume, pp. 276-283 764 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276 765 */ 766 for (bn = 0; bn < 2; bn++) { 767 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) { 768 /* 4.15.1: Port Suspend. */ 769 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i)); 770 771 /* 772 * `System software places individual ports 773 * into suspend mode by writing a ``3'' into 774 * the appropriate PORTSC register Port Link 775 * State (PLS) field (refer to Section 5.4.8). 776 * Software should only set the PLS field to 777 * ``3'' when the port is in the Enabled 778 * state.' 779 * 780 * `Software should not attempt to suspend a 781 * port unless the port reports that it is in 782 * the enabled (PED = ``1''; PLS < ``3'') 783 * state (refer to Section 5.4.8 for more 784 * information about PED and PLS).' 785 */ 786 v = xhci_op_read_4(sc, port); 787 if (((v & XHCI_PS_PED) == 0) || 788 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3) 789 continue; 790 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 791 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3); 792 xhci_op_write_4(sc, port, v); 793 794 /* 795 * `When the PLS field is written with U3 796 * (``3''), the status of the PLS bit will not 797 * change to the target U state U3 until the 798 * suspend signaling has completed to the 799 * attached device (which may be as long as 800 * 10ms.).' 801 * 802 * `Software is required to wait for U3 803 * transitions to complete before it puts the 804 * xHC into a low power state, and before 805 * resuming the port.' 806 * 807 * XXX Take advantage of the technique to 808 * reduce polling on host controllers that 809 * support the U3C capability. 810 */ 811 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) { 812 v = xhci_op_read_4(sc, port); 813 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3) 814 break; 815 usb_delay_ms(&sc->sc_bus, 1); 816 } 817 if (j == XHCI_WAIT_PLS_U3) { 818 device_printf(self, 819 "suspend timeout on bus %zu port %zu\n", 820 bn, i); 821 goto out; 822 } 823 } 824 } 825 826 /* 827 * `2. Ensure that the Command Ring is in the Stopped state 828 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is 829 * empty), and all Command Completion Events associated 830 * with them have been received.' 831 * 832 * XXX 833 */ 834 835 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */ 836 xhci_op_write_4(sc, XHCI_USBCMD, 837 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS); 838 839 /* 840 * `4. Read the Operational Runtime, and VTIO registers in the 841 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ, 842 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their 843 * state.' 844 * 845 * (We don't use VTIO here (XXX for now?).) 846 */ 847 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD); 848 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL); 849 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP); 850 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG); 851 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0)); 852 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0)); 853 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0)); 854 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0)); 855 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0)); 856 857 /* 858 * `5. Set the Controller Save State (CSS) flag in the USBCMD 859 * register (5.4.1)...' 860 */ 861 xhci_op_write_4(sc, XHCI_USBCMD, 862 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS); 863 864 /* 865 * `...and wait for the Save State Status (SSS) flag in the 866 * USBSTS register (5.4.2) to transition to ``0''.' 867 */ 868 for (i = 0; i < XHCI_WAIT_SSS; i++) { 869 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0) 870 break; 871 usb_delay_ms(&sc->sc_bus, 1); 872 } 873 if (i >= XHCI_WAIT_SSS) { 874 device_printf(self, "suspend timeout, USBSTS.SSS\n"); 875 /* 876 * Just optimistically go on and check SRE anyway -- 877 * what's the worst that could happen? 878 */ 879 } 880 881 /* 882 * `Note: After a Save or Restore operation completes, the 883 * Save/Restore Error (SRE) flag in the USBSTS register should 884 * be checked to ensure that the operation completed 885 * successfully.' 886 */ 887 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) { 888 device_printf(self, "suspend error, USBSTS.SRE\n"); 889 goto out; 890 } 891 892 /* Success! */ 893 ok = true; 894 895 out: mutex_exit(&sc->sc_rhlock); 896 if (!ok) { 897 /* 898 * If suspend failed, stop holding up command issuance 899 * and make it fail instead. 900 */ 901 mutex_enter(&sc->sc_lock); 902 KASSERT(sc->sc_suspender == curlwp); 903 sc->sc_suspender = NULL; 904 sc->sc_suspendresume_failed = true; 905 cv_broadcast(&sc->sc_cmdbusy_cv); 906 mutex_exit(&sc->sc_lock); 907 } 908 return ok; 909 } 910 911 bool 912 xhci_resume(device_t self, const pmf_qual_t *qual) 913 { 914 struct xhci_softc * const sc = device_private(self); 915 size_t i, j, bn, dci; 916 int port; 917 uint32_t v; 918 bool ok = false; 919 920 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 921 922 /* 923 * If resume had previously failed, just try again. Can't make 924 * things worse, probably. 925 */ 926 mutex_enter(&sc->sc_lock); 927 if (sc->sc_suspendresume_failed) { 928 KASSERT(sc->sc_suspender == NULL); 929 sc->sc_suspender = curlwp; 930 sc->sc_suspendresume_failed = false; 931 } 932 KASSERT(sc->sc_suspender); 933 mutex_exit(&sc->sc_lock); 934 935 /* 936 * Block roothub xfers which might touch portsc registers until 937 * we're done resuming. 938 */ 939 mutex_enter(&sc->sc_rhlock); 940 941 /* 942 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2: 943 * xHCI Power Management, p. 343 944 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343 945 */ 946 947 /* 948 * `4. Restore the Operational Runtime, and VTIO registers with 949 * their previously saved state in the following order: 950 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN, 951 * IMOD, and VTIO.' 952 * 953 * (We don't use VTIO here (for now?).) 954 */ 955 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd); 956 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl); 957 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap); 958 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config); 959 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0); 960 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0); 961 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0); 962 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0); 963 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0); 964 965 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */ 966 967 /* 968 * `5. Set the Controller Restore State (CRS) flag in the 969 * USBCMD register (5.4.1) to ``1''...' 970 */ 971 xhci_op_write_4(sc, XHCI_USBCMD, 972 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS); 973 974 /* 975 * `...and wait for the Restore State Status (RSS) in the 976 * USBSTS register (5.4.2) to transition to ``0''.' 977 */ 978 for (i = 0; i < XHCI_WAIT_RSS; i++) { 979 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0) 980 break; 981 usb_delay_ms(&sc->sc_bus, 1); 982 } 983 if (i >= XHCI_WAIT_RSS) { 984 device_printf(self, "resume timeout, USBSTS.RSS\n"); 985 goto out; 986 } 987 988 /* 989 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits 990 * are consistent with the RCS values to be written to the 991 * CRCR.' 992 * 993 * XXX Hope just zeroing it is good enough! 994 */ 995 xhci_host_dequeue(sc->sc_cr); 996 997 /* 998 * `7. Write the CRCR with the address and RCS value of the 999 * reinitialized Command Ring. Note that this write will 1000 * cause the Command Ring to restart at the address 1001 * specified by the CRCR.' 1002 */ 1003 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) | 1004 sc->sc_cr->xr_cs); 1005 1006 /* 1007 * `8. Enable the controller by setting Run/Stop (R/S) = 1008 * ``1''.' 1009 */ 1010 xhci_op_write_4(sc, XHCI_USBCMD, 1011 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS); 1012 1013 /* 1014 * `9. Software shall walk the USB topology and initialize each 1015 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and 1016 * external hub ports attached to USB devices.' 1017 * 1018 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2 1019 * `Port Resume', 4.15.2.2 `Host Initiated'. 1020 * 1021 * XXX We should maybe batch up initiating the state 1022 * transitions, and then wait for them to complete all at once. 1023 */ 1024 for (bn = 0; bn < 2; bn++) { 1025 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) { 1026 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i)); 1027 1028 /* `When a port is in the U3 state: ...' */ 1029 v = xhci_op_read_4(sc, port); 1030 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3) 1031 continue; 1032 1033 /* 1034 * `For a USB2 protocol port, software shall 1035 * write a ``15'' (Resume) to the PLS field to 1036 * initiate resume signaling. The port shall 1037 * transition to the Resume substate and the 1038 * xHC shall transmit the resume signaling 1039 * within 1ms (T_URSM). Software shall ensure 1040 * that resume is signaled for at least 20ms 1041 * (T_DRSMDN). Software shall start timing 1042 * T_DRSMDN from the write of ``15'' (Resume) 1043 * to PLS.' 1044 */ 1045 if (bn == 1) { 1046 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0); 1047 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 1048 v |= XHCI_PS_LWS; 1049 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME); 1050 xhci_op_write_4(sc, port, v); 1051 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT); 1052 } else { 1053 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0); 1054 } 1055 1056 /* 1057 * `For a USB3 protocol port [and a USB2 1058 * protocol port after transitioning to 1059 * Resume], software shall write a ``0'' (U0) 1060 * to the PLS field...' 1061 */ 1062 v = xhci_op_read_4(sc, port); 1063 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 1064 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0); 1065 xhci_op_write_4(sc, port, v); 1066 1067 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) { 1068 v = xhci_op_read_4(sc, port); 1069 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0) 1070 break; 1071 usb_delay_ms(&sc->sc_bus, 1); 1072 } 1073 if (j == XHCI_WAIT_PLS_U0) { 1074 device_printf(self, 1075 "resume timeout on bus %zu port %zu\n", 1076 bn, i); 1077 goto out; 1078 } 1079 } 1080 } 1081 1082 /* 1083 * `10. Restart each of the previously Running endpoints by 1084 * ringing their doorbells.' 1085 */ 1086 for (i = 0; i < sc->sc_maxslots; i++) { 1087 struct xhci_slot *xs = &sc->sc_slots[i]; 1088 1089 /* Skip if the slot is not in use. */ 1090 if (xs->xs_idx == 0) 1091 continue; 1092 1093 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 1094 /* Skip if the endpoint is not Running. */ 1095 if (xhci_get_epstate(sc, xs, dci) != 1096 XHCI_EPSTATE_RUNNING) 1097 continue; 1098 1099 /* Ring the doorbell. */ 1100 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 1101 } 1102 } 1103 1104 /* 1105 * `Note: After a Save or Restore operation completes, the 1106 * Save/Restore Error (SRE) flag in the USBSTS register should 1107 * be checked to ensure that the operation completed 1108 * successfully.' 1109 */ 1110 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) { 1111 device_printf(self, "resume error, USBSTS.SRE\n"); 1112 goto out; 1113 } 1114 1115 /* Success! */ 1116 ok = true; 1117 1118 out: /* 1119 * Resume command issuance. If the hardware failed to resume, 1120 * well, tough -- deadlocking because everything is held up on 1121 * the suspension, with no opportunity to detach, isn't better 1122 * than timing out waiting for dead hardware. 1123 */ 1124 mutex_enter(&sc->sc_lock); 1125 KASSERT(sc->sc_suspender); 1126 sc->sc_suspender = NULL; 1127 sc->sc_suspendresume_failed = !ok; 1128 cv_broadcast(&sc->sc_cmdbusy_cv); 1129 mutex_exit(&sc->sc_lock); 1130 1131 mutex_exit(&sc->sc_rhlock); 1132 return ok; 1133 } 1134 1135 bool 1136 xhci_shutdown(device_t self, int flags) 1137 { 1138 return false; 1139 } 1140 1141 static int 1142 xhci_hc_reset(struct xhci_softc * const sc) 1143 { 1144 uint32_t usbcmd, usbsts; 1145 int i; 1146 1147 /* Check controller not ready */ 1148 for (i = 0; i < XHCI_WAIT_CNR; i++) { 1149 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1150 if ((usbsts & XHCI_STS_CNR) == 0) 1151 break; 1152 usb_delay_ms(&sc->sc_bus, 1); 1153 } 1154 if (i >= XHCI_WAIT_CNR) { 1155 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n"); 1156 return EIO; 1157 } 1158 1159 /* Halt controller */ 1160 usbcmd = 0; 1161 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 1162 usb_delay_ms(&sc->sc_bus, 1); 1163 1164 /* Reset controller */ 1165 usbcmd = XHCI_CMD_HCRST; 1166 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 1167 for (i = 0; i < XHCI_WAIT_HCRST; i++) { 1168 /* 1169 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to 1170 * prevent system hang (Errata). 1171 */ 1172 usb_delay_ms(&sc->sc_bus, 1); 1173 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD); 1174 if ((usbcmd & XHCI_CMD_HCRST) == 0) 1175 break; 1176 } 1177 if (i >= XHCI_WAIT_HCRST) { 1178 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n"); 1179 return EIO; 1180 } 1181 1182 /* Check controller not ready */ 1183 for (i = 0; i < XHCI_WAIT_CNR; i++) { 1184 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1185 if ((usbsts & XHCI_STS_CNR) == 0) 1186 break; 1187 usb_delay_ms(&sc->sc_bus, 1); 1188 } 1189 if (i >= XHCI_WAIT_CNR) { 1190 aprint_error_dev(sc->sc_dev, 1191 "controller not ready timeout after reset\n"); 1192 return EIO; 1193 } 1194 1195 return 0; 1196 } 1197 1198 /* 7.2 xHCI Support Protocol Capability */ 1199 static void 1200 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp) 1201 { 1202 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1203 1204 /* XXX Cache this lot */ 1205 1206 const uint32_t w0 = xhci_read_4(sc, ecp); 1207 const uint32_t w4 = xhci_read_4(sc, ecp + 4); 1208 const uint32_t w8 = xhci_read_4(sc, ecp + 8); 1209 const uint32_t wc = xhci_read_4(sc, ecp + 0xc); 1210 1211 aprint_debug_dev(sc->sc_dev, 1212 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc); 1213 1214 if (w4 != XHCI_XECP_USBID) 1215 return; 1216 1217 const int major = XHCI_XECP_SP_W0_MAJOR(w0); 1218 const int minor = XHCI_XECP_SP_W0_MINOR(w0); 1219 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8); 1220 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8); 1221 1222 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16)); 1223 switch (mm) { 1224 case 0x0200: 1225 case 0x0300: 1226 case 0x0301: 1227 case 0x0310: 1228 case 0x0320: 1229 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n", 1230 major == 3 ? "ss" : "hs", cpo, cpo + cpc - 1); 1231 if (major == 3) 1232 sc->sc_usb3nports += cpo + cpc - 1; 1233 else 1234 sc->sc_usb2nports += cpo + cpc - 1; 1235 break; 1236 default: 1237 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n", 1238 major, minor); 1239 return; 1240 } 1241 1242 const size_t bus = (major == 3) ? 0 : 1; 1243 1244 /* Index arrays with 0..n-1 where ports are numbered 1..n */ 1245 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) { 1246 if (sc->sc_ctlrportmap[cp] != 0) { 1247 aprint_error_dev(sc->sc_dev, "controller port %zu " 1248 "already assigned", cp); 1249 continue; 1250 } 1251 1252 sc->sc_ctlrportbus[cp / NBBY] |= 1253 bus == 0 ? 0 : __BIT(cp % NBBY); 1254 1255 const size_t rhp = sc->sc_rhportcount[bus]++; 1256 1257 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0, 1258 "bus %zu rhp %zu is %d", bus, rhp, 1259 sc->sc_rhportmap[bus][rhp]); 1260 1261 sc->sc_rhportmap[bus][rhp] = cp + 1; 1262 sc->sc_ctlrportmap[cp] = rhp + 1; 1263 } 1264 } 1265 1266 /* Process extended capabilities */ 1267 static void 1268 xhci_ecp(struct xhci_softc *sc) 1269 { 1270 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1271 1272 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4; 1273 while (ecp != 0) { 1274 uint32_t ecr = xhci_read_4(sc, ecp); 1275 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr); 1276 switch (XHCI_XECP_ID(ecr)) { 1277 case XHCI_ID_PROTOCOLS: { 1278 xhci_id_protocols(sc, ecp); 1279 break; 1280 } 1281 case XHCI_ID_USB_LEGACY: { 1282 uint8_t bios_sem; 1283 1284 /* Take host controller ownership from BIOS */ 1285 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM); 1286 if (bios_sem) { 1287 /* sets xHCI to be owned by OS */ 1288 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1); 1289 aprint_debug_dev(sc->sc_dev, 1290 "waiting for BIOS to give up control\n"); 1291 for (int i = 0; i < 5000; i++) { 1292 bios_sem = xhci_read_1(sc, ecp + 1293 XHCI_XECP_BIOS_SEM); 1294 if (bios_sem == 0) 1295 break; 1296 DELAY(1000); 1297 } 1298 if (bios_sem) { 1299 aprint_error_dev(sc->sc_dev, 1300 "timed out waiting for BIOS\n"); 1301 } 1302 } 1303 break; 1304 } 1305 default: 1306 break; 1307 } 1308 ecr = xhci_read_4(sc, ecp); 1309 if (XHCI_XECP_NEXT(ecr) == 0) { 1310 ecp = 0; 1311 } else { 1312 ecp += XHCI_XECP_NEXT(ecr) * 4; 1313 } 1314 } 1315 } 1316 1317 #define XHCI_HCCPREV1_BITS \ 1318 "\177\020" /* New bitmask */ \ 1319 "f\020\020XECP\0" \ 1320 "f\014\4MAXPSA\0" \ 1321 "b\013CFC\0" \ 1322 "b\012SEC\0" \ 1323 "b\011SBD\0" \ 1324 "b\010FSE\0" \ 1325 "b\7NSS\0" \ 1326 "b\6LTC\0" \ 1327 "b\5LHRC\0" \ 1328 "b\4PIND\0" \ 1329 "b\3PPC\0" \ 1330 "b\2CZC\0" \ 1331 "b\1BNC\0" \ 1332 "b\0AC64\0" \ 1333 "\0" 1334 #define XHCI_HCCV1_x_BITS \ 1335 "\177\020" /* New bitmask */ \ 1336 "f\020\020XECP\0" \ 1337 "f\014\4MAXPSA\0" \ 1338 "b\013CFC\0" \ 1339 "b\012SEC\0" \ 1340 "b\011SPC\0" \ 1341 "b\010PAE\0" \ 1342 "b\7NSS\0" \ 1343 "b\6LTC\0" \ 1344 "b\5LHRC\0" \ 1345 "b\4PIND\0" \ 1346 "b\3PPC\0" \ 1347 "b\2CSZ\0" \ 1348 "b\1BNC\0" \ 1349 "b\0AC64\0" \ 1350 "\0" 1351 1352 #define XHCI_HCC2_BITS \ 1353 "\177\020" /* New bitmask */ \ 1354 "b\7ETC_TSC\0" \ 1355 "b\6ETC\0" \ 1356 "b\5CIC\0" \ 1357 "b\4LEC\0" \ 1358 "b\3CTC\0" \ 1359 "b\2FSC\0" \ 1360 "b\1CMC\0" \ 1361 "b\0U3C\0" \ 1362 "\0" 1363 1364 void 1365 xhci_start(struct xhci_softc *sc) 1366 { 1367 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA); 1368 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0) 1369 /* Intel xhci needs interrupt rate moderated. */ 1370 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP); 1371 else 1372 xhci_rt_write_4(sc, XHCI_IMOD(0), 0); 1373 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n", 1374 xhci_rt_read_4(sc, XHCI_IMOD(0))); 1375 1376 /* Go! */ 1377 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS); 1378 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n", 1379 xhci_op_read_4(sc, XHCI_USBCMD)); 1380 } 1381 1382 int 1383 xhci_init(struct xhci_softc *sc) 1384 { 1385 bus_size_t bsz; 1386 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff; 1387 uint32_t pagesize, config; 1388 int i = 0; 1389 uint16_t hciversion; 1390 uint8_t caplength; 1391 1392 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1393 1394 /* Set up the bus struct for the usb 3 and usb 2 buses */ 1395 sc->sc_bus.ub_methods = &xhci_bus_methods; 1396 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe); 1397 sc->sc_bus.ub_usedma = true; 1398 sc->sc_bus.ub_hcpriv = sc; 1399 1400 sc->sc_bus2.ub_methods = &xhci_bus_methods; 1401 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe); 1402 sc->sc_bus2.ub_revision = USBREV_2_0; 1403 sc->sc_bus2.ub_usedma = true; 1404 sc->sc_bus2.ub_hcpriv = sc; 1405 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag; 1406 1407 caplength = xhci_read_1(sc, XHCI_CAPLENGTH); 1408 hciversion = xhci_read_2(sc, XHCI_HCIVERSION); 1409 1410 if (hciversion < XHCI_HCIVERSION_0_96 || 1411 hciversion >= 0x0200) { 1412 aprint_normal_dev(sc->sc_dev, 1413 "xHCI version %x.%x not known to be supported\n", 1414 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 1415 } else { 1416 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n", 1417 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 1418 } 1419 1420 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength, 1421 &sc->sc_cbh) != 0) { 1422 aprint_error_dev(sc->sc_dev, "capability subregion failure\n"); 1423 return ENOMEM; 1424 } 1425 1426 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1); 1427 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1); 1428 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1); 1429 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1); 1430 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2); 1431 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3); 1432 aprint_debug_dev(sc->sc_dev, 1433 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3); 1434 1435 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS); 1436 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32; 1437 1438 char sbuf[128]; 1439 if (hciversion < XHCI_HCIVERSION_1_0) 1440 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc); 1441 else 1442 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc); 1443 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf); 1444 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n", 1445 XHCI_HCC_XECP(sc->sc_hcc) * 4); 1446 if (hciversion >= XHCI_HCIVERSION_1_1) { 1447 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2); 1448 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2); 1449 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf); 1450 } 1451 1452 /* default all ports to bus 0, i.e. usb 3 */ 1453 sc->sc_ctlrportbus = kmem_zalloc( 1454 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP); 1455 sc->sc_ctlrportmap = 1456 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1457 1458 /* controller port to bus roothub port map */ 1459 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 1460 sc->sc_rhportmap[j] = 1461 kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1462 } 1463 1464 /* 1465 * Process all Extended Capabilities 1466 */ 1467 xhci_ecp(sc); 1468 1469 bsz = XHCI_PORTSC(sc->sc_maxports); 1470 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz, 1471 &sc->sc_obh) != 0) { 1472 aprint_error_dev(sc->sc_dev, "operational subregion failure\n"); 1473 return ENOMEM; 1474 } 1475 1476 dboff = xhci_cap_read_4(sc, XHCI_DBOFF); 1477 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff, 1478 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) { 1479 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n"); 1480 return ENOMEM; 1481 } 1482 1483 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF); 1484 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff, 1485 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) { 1486 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n"); 1487 return ENOMEM; 1488 } 1489 1490 int rv; 1491 rv = xhci_hc_reset(sc); 1492 if (rv != 0) { 1493 return rv; 1494 } 1495 1496 if (sc->sc_vendor_init) 1497 sc->sc_vendor_init(sc); 1498 1499 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE); 1500 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize); 1501 pagesize = ffs(pagesize); 1502 if (pagesize == 0) { 1503 aprint_error_dev(sc->sc_dev, "pagesize is 0\n"); 1504 return EIO; 1505 } 1506 sc->sc_pgsz = 1 << (12 + (pagesize - 1)); 1507 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz); 1508 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n", 1509 (uint32_t)sc->sc_maxslots); 1510 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports); 1511 1512 int err; 1513 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2); 1514 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf); 1515 if (sc->sc_maxspbuf != 0) { 1516 err = usb_allocmem(sc->sc_bus.ub_dmatag, 1517 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t), 1518 USBMALLOC_COHERENT | USBMALLOC_ZERO, 1519 &sc->sc_spbufarray_dma); 1520 if (err) { 1521 aprint_error_dev(sc->sc_dev, 1522 "spbufarray init fail, err %d\n", err); 1523 return ENOMEM; 1524 } 1525 1526 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) * 1527 sc->sc_maxspbuf, KM_SLEEP); 1528 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0); 1529 for (i = 0; i < sc->sc_maxspbuf; i++) { 1530 usb_dma_t * const dma = &sc->sc_spbuf_dma[i]; 1531 /* allocate contexts */ 1532 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, 1533 sc->sc_pgsz, USBMALLOC_COHERENT | USBMALLOC_ZERO, 1534 dma); 1535 if (err) { 1536 aprint_error_dev(sc->sc_dev, 1537 "spbufarray_dma init fail, err %d\n", err); 1538 rv = ENOMEM; 1539 goto bad1; 1540 } 1541 spbufarray[i] = htole64(DMAADDR(dma, 0)); 1542 usb_syncmem(dma, 0, sc->sc_pgsz, 1543 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1544 } 1545 1546 usb_syncmem(&sc->sc_spbufarray_dma, 0, 1547 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE); 1548 } 1549 1550 config = xhci_op_read_4(sc, XHCI_CONFIG); 1551 config &= ~0xFF; 1552 config |= sc->sc_maxslots & 0xFF; 1553 xhci_op_write_4(sc, XHCI_CONFIG, config); 1554 1555 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS, 1556 XHCI_COMMAND_RING_SEGMENTS_ALIGN); 1557 if (err) { 1558 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n", 1559 err); 1560 rv = ENOMEM; 1561 goto bad1; 1562 } 1563 1564 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS, 1565 XHCI_EVENT_RING_SEGMENTS_ALIGN); 1566 if (err) { 1567 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n", 1568 err); 1569 rv = ENOMEM; 1570 goto bad2; 1571 } 1572 1573 usb_dma_t *dma; 1574 size_t size; 1575 size_t align; 1576 1577 dma = &sc->sc_eventst_dma; 1578 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE, 1579 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN); 1580 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size); 1581 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN; 1582 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 1583 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma); 1584 if (err) { 1585 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n", 1586 err); 1587 rv = ENOMEM; 1588 goto bad3; 1589 } 1590 1591 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n", 1592 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0), 1593 KERNADDR(&sc->sc_eventst_dma, 0), 1594 sc->sc_eventst_dma.udma_block->size); 1595 1596 dma = &sc->sc_dcbaa_dma; 1597 size = (1 + sc->sc_maxslots) * sizeof(uint64_t); 1598 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size); 1599 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN; 1600 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 1601 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma); 1602 if (err) { 1603 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err); 1604 rv = ENOMEM; 1605 goto bad4; 1606 } 1607 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n", 1608 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0), 1609 KERNADDR(&sc->sc_dcbaa_dma, 0), 1610 sc->sc_dcbaa_dma.udma_block->size); 1611 1612 if (sc->sc_maxspbuf != 0) { 1613 /* 1614 * DCBA entry 0 hold the scratchbuf array pointer. 1615 */ 1616 *(uint64_t *)KERNADDR(dma, 0) = 1617 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0)); 1618 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE); 1619 } 1620 1621 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots, 1622 KM_SLEEP); 1623 if (sc->sc_slots == NULL) { 1624 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err); 1625 rv = ENOMEM; 1626 goto bad; 1627 } 1628 1629 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0, 1630 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL); 1631 if (sc->sc_xferpool == NULL) { 1632 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n", 1633 err); 1634 rv = ENOMEM; 1635 goto bad; 1636 } 1637 1638 cv_init(&sc->sc_command_cv, "xhcicmd"); 1639 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq"); 1640 mutex_init(&sc->sc_rhlock, MUTEX_DEFAULT, IPL_NONE); 1641 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 1642 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB); 1643 1644 struct xhci_erste *erst; 1645 erst = KERNADDR(&sc->sc_eventst_dma, 0); 1646 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0)); 1647 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb); 1648 erst[0].erste_3 = htole32(0); 1649 usb_syncmem(&sc->sc_eventst_dma, 0, 1650 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE); 1651 1652 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS); 1653 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0)); 1654 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) | 1655 XHCI_ERDP_BUSY); 1656 1657 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0)); 1658 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) | 1659 sc->sc_cr->xr_cs); 1660 1661 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0), 1662 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS); 1663 1664 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0) 1665 xhci_start(sc); 1666 1667 return 0; 1668 1669 bad: 1670 if (sc->sc_xferpool) { 1671 pool_cache_destroy(sc->sc_xferpool); 1672 sc->sc_xferpool = NULL; 1673 } 1674 1675 if (sc->sc_slots) { 1676 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * 1677 sc->sc_maxslots); 1678 sc->sc_slots = NULL; 1679 } 1680 1681 usb_freemem(&sc->sc_dcbaa_dma); 1682 bad4: 1683 usb_freemem(&sc->sc_eventst_dma); 1684 bad3: 1685 xhci_ring_free(sc, &sc->sc_er); 1686 bad2: 1687 xhci_ring_free(sc, &sc->sc_cr); 1688 i = sc->sc_maxspbuf; 1689 bad1: 1690 for (int j = 0; j < i; j++) 1691 usb_freemem(&sc->sc_spbuf_dma[j]); 1692 usb_freemem(&sc->sc_spbufarray_dma); 1693 1694 return rv; 1695 } 1696 1697 static inline bool 1698 xhci_polling_p(struct xhci_softc * const sc) 1699 { 1700 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling; 1701 } 1702 1703 int 1704 xhci_intr(void *v) 1705 { 1706 struct xhci_softc * const sc = v; 1707 int ret = 0; 1708 1709 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1710 1711 if (sc == NULL) 1712 return 0; 1713 1714 mutex_spin_enter(&sc->sc_intr_lock); 1715 1716 if (sc->sc_dying || !device_has_power(sc->sc_dev)) 1717 goto done; 1718 1719 /* If we get an interrupt while polling, then just ignore it. */ 1720 if (xhci_polling_p(sc)) { 1721 #ifdef DIAGNOSTIC 1722 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0); 1723 #endif 1724 goto done; 1725 } 1726 1727 ret = xhci_intr1(sc); 1728 if (ret) { 1729 KASSERT(sc->sc_child || sc->sc_child2); 1730 1731 /* 1732 * One of child busses could be already detached. It doesn't 1733 * matter on which of the two the softintr is scheduled. 1734 */ 1735 if (sc->sc_child) 1736 usb_schedsoftintr(&sc->sc_bus); 1737 else 1738 usb_schedsoftintr(&sc->sc_bus2); 1739 } 1740 done: 1741 mutex_spin_exit(&sc->sc_intr_lock); 1742 return ret; 1743 } 1744 1745 int 1746 xhci_intr1(struct xhci_softc * const sc) 1747 { 1748 uint32_t usbsts; 1749 uint32_t iman; 1750 1751 XHCIHIST_FUNC(); 1752 1753 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1754 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0); 1755 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD | 1756 XHCI_STS_HCE)) == 0) { 1757 DPRINTFN(16, "ignored intr not for %jd", 1758 device_unit(sc->sc_dev), 0, 0, 0); 1759 return 0; 1760 } 1761 1762 /* 1763 * Clear EINT and other transient flags, to not misenterpret 1764 * next shared interrupt. Also, to avoid race, EINT must be cleared 1765 * before XHCI_IMAN_INTR_PEND is cleared. 1766 */ 1767 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & ~XHCI_STS_RSVDP0); 1768 1769 #ifdef XHCI_DEBUG 1770 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1771 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1772 #endif 1773 1774 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1775 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1776 iman |= XHCI_IMAN_INTR_PEND; 1777 xhci_rt_write_4(sc, XHCI_IMAN(0), iman); 1778 1779 #ifdef XHCI_DEBUG 1780 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1781 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1782 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1783 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1784 #endif 1785 1786 return 1; 1787 } 1788 1789 /* 1790 * 3 port speed types used in USB stack 1791 * 1792 * usbdi speed 1793 * definition: USB_SPEED_* in usb.h 1794 * They are used in struct usbd_device in USB stack. 1795 * ioctl interface uses these values too. 1796 * port_status speed 1797 * definition: UPS_*_SPEED in usb.h 1798 * They are used in usb_port_status_t and valid only for USB 2.0. 1799 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus 1800 * of usb_port_status_ext_t indicates port speed. 1801 * Note that some 3.0 values overlap with 2.0 values. 1802 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and 1803 * means UPS_LOW_SPEED in HS.) 1804 * port status returned from hub also uses these values. 1805 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed 1806 * or more. 1807 * xspeed: 1808 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1) 1809 * They are used in only slot context and PORTSC reg of xhci. 1810 * The difference between usbdi speed and xspeed is 1811 * that FS and LS values are swapped. 1812 */ 1813 1814 /* convert usbdi speed to xspeed */ 1815 static int 1816 xhci_speed2xspeed(int speed) 1817 { 1818 switch (speed) { 1819 case USB_SPEED_LOW: return 2; 1820 case USB_SPEED_FULL: return 1; 1821 default: return speed; 1822 } 1823 } 1824 1825 #if 0 1826 /* convert xspeed to usbdi speed */ 1827 static int 1828 xhci_xspeed2speed(int xspeed) 1829 { 1830 switch (xspeed) { 1831 case 1: return USB_SPEED_FULL; 1832 case 2: return USB_SPEED_LOW; 1833 default: return xspeed; 1834 } 1835 } 1836 #endif 1837 1838 /* convert xspeed to port status speed */ 1839 static int 1840 xhci_xspeed2psspeed(int xspeed) 1841 { 1842 switch (xspeed) { 1843 case 0: return 0; 1844 case 1: return UPS_FULL_SPEED; 1845 case 2: return UPS_LOW_SPEED; 1846 case 3: return UPS_HIGH_SPEED; 1847 default: return UPS_OTHER_SPEED; 1848 } 1849 } 1850 1851 /* 1852 * Construct input contexts and issue TRB to open pipe. 1853 */ 1854 static usbd_status 1855 xhci_configure_endpoint(struct usbd_pipe *pipe) 1856 { 1857 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1858 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1859 #ifdef USB_DEBUG 1860 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1861 #endif 1862 struct xhci_soft_trb trb; 1863 usbd_status err; 1864 1865 XHCIHIST_FUNC(); 1866 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx", 1867 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress, 1868 pipe->up_endpoint->ue_edesc->bmAttributes); 1869 1870 /* XXX ensure input context is available? */ 1871 1872 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz); 1873 1874 /* set up context */ 1875 xhci_setup_ctx(pipe); 1876 1877 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0), 1878 sc->sc_ctxsz * 1); 1879 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs, 1880 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1); 1881 1882 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 1883 trb.trb_2 = 0; 1884 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1885 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 1886 1887 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 1888 1889 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 1890 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci), 1891 sc->sc_ctxsz * 1); 1892 1893 return err; 1894 } 1895 1896 #if 0 1897 static usbd_status 1898 xhci_unconfigure_endpoint(struct usbd_pipe *pipe) 1899 { 1900 #ifdef USB_DEBUG 1901 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1902 #endif 1903 1904 XHCIHIST_FUNC(); 1905 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 1906 1907 return USBD_NORMAL_COMPLETION; 1908 } 1909 #endif 1910 1911 /* 4.6.8, 6.4.3.7 */ 1912 static void 1913 xhci_reset_endpoint(struct usbd_pipe *pipe) 1914 { 1915 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1916 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1917 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1918 struct xhci_soft_trb trb; 1919 1920 XHCIHIST_FUNC(); 1921 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1922 1923 KASSERT(mutex_owned(&sc->sc_lock)); 1924 1925 trb.trb_0 = 0; 1926 trb.trb_2 = 0; 1927 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1928 XHCI_TRB_3_EP_SET(dci) | 1929 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP); 1930 1931 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) { 1932 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n", 1933 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress); 1934 } 1935 } 1936 1937 /* 1938 * 4.6.9, 6.4.3.8 1939 * Stop execution of TDs on xfer ring. 1940 * Should be called with sc_lock held. 1941 */ 1942 static usbd_status 1943 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci, 1944 uint32_t trb3flags) 1945 { 1946 struct xhci_soft_trb trb; 1947 usbd_status err; 1948 1949 XHCIHIST_FUNC(); 1950 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1951 1952 KASSERT(mutex_owned(&sc->sc_lock)); 1953 1954 trb.trb_0 = 0; 1955 trb.trb_2 = 0; 1956 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1957 XHCI_TRB_3_EP_SET(dci) | 1958 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) | 1959 trb3flags; 1960 1961 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1962 1963 return err; 1964 } 1965 1966 static usbd_status 1967 xhci_stop_endpoint(struct usbd_pipe *pipe) 1968 { 1969 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1970 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1971 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1972 1973 XHCIHIST_FUNC(); 1974 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1975 1976 KASSERT(mutex_owned(&sc->sc_lock)); 1977 1978 return xhci_stop_endpoint_cmd(sc, xs, dci, 0); 1979 } 1980 1981 /* 1982 * Set TR Dequeue Pointer. 1983 * xHCI 1.1 4.6.10 6.4.3.9 1984 * Purge all of the TRBs on ring and reinitialize ring. 1985 * Set TR dequeue Pointer to 0 and Cycle State to 1. 1986 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE 1987 * error will be generated. 1988 */ 1989 static void 1990 xhci_set_dequeue(struct usbd_pipe *pipe) 1991 { 1992 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1993 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1994 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1995 struct xhci_ring * const xr = xs->xs_xr[dci]; 1996 struct xhci_soft_trb trb; 1997 1998 XHCIHIST_FUNC(); 1999 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 2000 2001 KASSERT(mutex_owned(&sc->sc_lock)); 2002 KASSERT(xr != NULL); 2003 2004 xhci_host_dequeue(xr); 2005 2006 /* set DCS */ 2007 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */ 2008 trb.trb_2 = 0; 2009 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 2010 XHCI_TRB_3_EP_SET(dci) | 2011 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE); 2012 2013 if (xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT)) { 2014 device_printf(sc->sc_dev, "%s: endpoint 0x%x: timed out\n", 2015 __func__, pipe->up_endpoint->ue_edesc->bEndpointAddress); 2016 } 2017 } 2018 2019 /* 2020 * Open new pipe: called from usbd_setup_pipe_flags. 2021 * Fills methods of pipe. 2022 * If pipe is not for ep0, calls configure_endpoint. 2023 */ 2024 static usbd_status 2025 xhci_open(struct usbd_pipe *pipe) 2026 { 2027 struct usbd_device * const dev = pipe->up_dev; 2028 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 2029 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 2030 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2031 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 2032 const u_int dci = xhci_ep_get_dci(ed); 2033 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 2034 usbd_status err; 2035 2036 XHCIHIST_FUNC(); 2037 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr, 2038 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed); 2039 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx", 2040 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress, 2041 ed->bmAttributes); 2042 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize), 2043 ed->bInterval, 0, 0); 2044 2045 if (sc->sc_dying) 2046 return USBD_IOERROR; 2047 2048 /* Root Hub */ 2049 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) { 2050 switch (ed->bEndpointAddress) { 2051 case USB_CONTROL_ENDPOINT: 2052 pipe->up_methods = &roothub_ctrl_methods; 2053 break; 2054 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT: 2055 pipe->up_methods = &xhci_root_intr_methods; 2056 break; 2057 default: 2058 pipe->up_methods = NULL; 2059 DPRINTFN(0, "bad bEndpointAddress 0x%02jx", 2060 ed->bEndpointAddress, 0, 0, 0); 2061 return USBD_INVAL; 2062 } 2063 return USBD_NORMAL_COMPLETION; 2064 } 2065 2066 usb_init_task(&xpipe->xp_async_task, xhci_pipe_restart_async_task, 2067 pipe, USB_TASKQ_MPSAFE); 2068 2069 switch (xfertype) { 2070 case UE_CONTROL: 2071 pipe->up_methods = &xhci_device_ctrl_methods; 2072 break; 2073 case UE_ISOCHRONOUS: 2074 pipe->up_methods = &xhci_device_isoc_methods; 2075 pipe->up_serialise = false; 2076 xpipe->xp_isoc_next = -1; 2077 break; 2078 case UE_BULK: 2079 pipe->up_methods = &xhci_device_bulk_methods; 2080 break; 2081 case UE_INTERRUPT: 2082 pipe->up_methods = &xhci_device_intr_methods; 2083 break; 2084 default: 2085 return USBD_IOERROR; 2086 break; 2087 } 2088 2089 KASSERT(xs != NULL); 2090 KASSERT(xs->xs_xr[dci] == NULL); 2091 2092 /* allocate transfer ring */ 2093 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS, 2094 XHCI_TRB_ALIGN); 2095 if (err) { 2096 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0); 2097 return err; 2098 } 2099 2100 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT) 2101 return xhci_configure_endpoint(pipe); 2102 2103 return USBD_NORMAL_COMPLETION; 2104 } 2105 2106 /* 2107 * Closes pipe, called from usbd_kill_pipe via close methods. 2108 * If the endpoint to be closed is ep0, disable_slot. 2109 * Should be called with sc_lock held. 2110 */ 2111 static void 2112 xhci_close_pipe(struct usbd_pipe *pipe) 2113 { 2114 struct xhci_pipe * const xp = 2115 container_of(pipe, struct xhci_pipe, xp_pipe); 2116 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2117 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2118 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 2119 const u_int dci = xhci_ep_get_dci(ed); 2120 struct xhci_soft_trb trb; 2121 uint32_t *cp; 2122 2123 XHCIHIST_FUNC(); 2124 2125 usb_rem_task_wait(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC, 2126 &sc->sc_lock); 2127 2128 if (sc->sc_dying) 2129 return; 2130 2131 /* xs is uninitialized before xhci_init_slot */ 2132 if (xs == NULL || xs->xs_idx == 0) 2133 return; 2134 2135 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju", 2136 (uintptr_t)pipe, xs->xs_idx, dci, 0); 2137 2138 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 2139 KASSERT(mutex_owned(&sc->sc_lock)); 2140 2141 if (pipe->up_dev->ud_depth == 0) 2142 return; 2143 2144 if (dci == XHCI_DCI_EP_CONTROL) { 2145 DPRINTFN(4, "closing ep0", 0, 0, 0, 0); 2146 /* This frees all rings */ 2147 xhci_disable_slot(sc, xs->xs_idx); 2148 return; 2149 } 2150 2151 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED) 2152 (void)xhci_stop_endpoint(pipe); 2153 2154 /* 2155 * set appropriate bit to be dropped. 2156 * don't set DC bit to 1, otherwise all endpoints 2157 * would be deconfigured. 2158 */ 2159 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 2160 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci)); 2161 cp[1] = htole32(0); 2162 2163 /* XXX should be most significant one, not dci? */ 2164 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 2165 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci)); 2166 2167 /* configure ep context performs an implicit dequeue */ 2168 xhci_host_dequeue(xs->xs_xr[dci]); 2169 2170 /* sync input contexts before they are read from memory */ 2171 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 2172 2173 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 2174 trb.trb_2 = 0; 2175 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 2176 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 2177 2178 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 2179 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 2180 2181 xhci_ring_free(sc, &xs->xs_xr[dci]); 2182 xs->xs_xr[dci] = NULL; 2183 } 2184 2185 /* 2186 * Abort transfer. Must be called with sc_lock held. Releases and 2187 * reacquires sc_lock to sleep until hardware acknowledges abort. 2188 */ 2189 static void 2190 xhci_abortx(struct usbd_xfer *xfer) 2191 { 2192 XHCIHIST_FUNC(); 2193 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 2194 2195 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx", 2196 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0); 2197 2198 KASSERT(mutex_owned(&sc->sc_lock)); 2199 KASSERTMSG((xfer->ux_status == USBD_CANCELLED || 2200 xfer->ux_status == USBD_TIMEOUT), 2201 "bad abort status: %d", xfer->ux_status); 2202 2203 xhci_pipe_restart(xfer->ux_pipe); 2204 2205 DPRINTFN(14, "end", 0, 0, 0, 0); 2206 } 2207 2208 static void 2209 xhci_host_dequeue(struct xhci_ring * const xr) 2210 { 2211 /* When dequeueing the controller, update our struct copy too */ 2212 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE); 2213 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE, 2214 BUS_DMASYNC_PREWRITE); 2215 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies)); 2216 2217 xr->xr_ep = 0; 2218 xr->xr_cs = 1; 2219 } 2220 2221 /* 2222 * Recover STALLed endpoint, or stop endpoint to abort a pipe. 2223 * xHCI 1.1 sect 4.10.2.1 2224 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove 2225 * all transfers on transfer ring. 2226 */ 2227 static void 2228 xhci_pipe_restart(struct usbd_pipe *pipe) 2229 { 2230 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2231 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2232 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 2233 2234 XHCIHIST_FUNC(); 2235 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju", 2236 (uintptr_t)pipe, xs->xs_idx, dci, 0); 2237 2238 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2239 2240 /* 2241 * - If the endpoint is halted, indicating a stall, reset it. 2242 * - If the endpoint is stopped, we're already good. 2243 * - Otherwise, someone wanted to abort the pipe, so stop the 2244 * endpoint. 2245 * 2246 * In any case, clear the ring. 2247 */ 2248 switch (xhci_get_epstate(sc, xs, dci)) { 2249 case XHCI_EPSTATE_HALTED: 2250 xhci_reset_endpoint(pipe); 2251 break; 2252 case XHCI_EPSTATE_STOPPED: 2253 break; 2254 default: 2255 xhci_stop_endpoint(pipe); 2256 break; 2257 } 2258 2259 switch (xhci_get_epstate(sc, xs, dci)) { 2260 case XHCI_EPSTATE_STOPPED: 2261 break; 2262 case XHCI_EPSTATE_ERROR: 2263 device_printf(sc->sc_dev, "endpoint 0x%x error\n", 2264 pipe->up_endpoint->ue_edesc->bEndpointAddress); 2265 break; 2266 default: 2267 device_printf(sc->sc_dev, "endpoint 0x%x failed to stop\n", 2268 pipe->up_endpoint->ue_edesc->bEndpointAddress); 2269 } 2270 2271 xhci_set_dequeue(pipe); 2272 2273 DPRINTFN(4, "ends", 0, 0, 0, 0); 2274 } 2275 2276 static void 2277 xhci_pipe_restart_async_task(void *cookie) 2278 { 2279 struct usbd_pipe * const pipe = cookie; 2280 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2281 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2282 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 2283 struct xhci_ring * const tr = xs->xs_xr[dci]; 2284 struct usbd_xfer *xfer; 2285 2286 XHCIHIST_FUNC(); 2287 XHCIHIST_CALLARGS("sc=%#jx pipe=%#jx", 2288 (uintptr_t)sc, (uintptr_t)pipe, 0, 0); 2289 2290 mutex_enter(&sc->sc_lock); 2291 2292 xhci_pipe_restart(pipe); 2293 2294 /* 2295 * We halted our own queue because it stalled. Mark it no 2296 * longer halted and start issuing queued transfers again. 2297 */ 2298 tr->is_halted = false; 2299 xfer = SIMPLEQ_FIRST(&pipe->up_queue); 2300 if (xfer) { 2301 /* 2302 * If the first xfer of the queue is not in progress, 2303 * though, there may be a concurrent software abort 2304 * that has already cancelled it and is now in the 2305 * middle of a concurrent xhci_pipe_restart waiting to 2306 * reacquire the pipe (bus) lock. So only restart the 2307 * xfer if it's still USBD_IN_PROGRESS. 2308 * 2309 * Either way, xfers on the queue can't be in 2310 * USBD_NOT_STARTED. 2311 */ 2312 KASSERT(xfer->ux_status != USBD_NOT_STARTED); 2313 if (xfer->ux_status == USBD_IN_PROGRESS) { 2314 (*pipe->up_methods->upm_start)(xfer); 2315 } else { 2316 DPRINTF("pipe restart race xfer=%#jx status=%jd", 2317 (uintptr_t)xfer, xfer->ux_status, 0, 0); 2318 } 2319 } 2320 2321 mutex_exit(&sc->sc_lock); 2322 } 2323 2324 static void 2325 xhci_pipe_restart_async(struct usbd_pipe *pipe) 2326 { 2327 struct xhci_pipe * const xp = 2328 container_of(pipe, struct xhci_pipe, xp_pipe); 2329 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2330 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2331 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 2332 struct xhci_ring * const tr = xs->xs_xr[dci]; 2333 2334 XHCIHIST_FUNC(); 2335 XHCIHIST_CALLARGS("pipe %#jx", (uintptr_t)pipe, 0, 0, 0); 2336 2337 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2338 2339 tr->is_halted = true; 2340 usb_add_task(pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC); 2341 2342 DPRINTFN(4, "ends", 0, 0, 0, 0); 2343 } 2344 2345 /* Process roothub port status/change events and notify to uhub_intr. */ 2346 static void 2347 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport) 2348 { 2349 XHCIHIST_FUNC(); 2350 XHCIHIST_CALLARGS("xhci%jd: port %ju status change", 2351 device_unit(sc->sc_dev), ctlrport, 0, 0); 2352 2353 if (ctlrport > sc->sc_maxports) 2354 return; 2355 2356 const size_t bn = xhci_ctlrport2bus(sc, ctlrport); 2357 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport); 2358 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn]; 2359 2360 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change", 2361 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer); 2362 2363 if (xfer == NULL) 2364 return; 2365 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 2366 2367 uint8_t *p = xfer->ux_buf; 2368 if (!xhci_polling_p(sc) || !sc->sc_intrxfer_deferred[bn]) 2369 memset(p, 0, xfer->ux_length); 2370 p[rhp / NBBY] |= 1 << (rhp % NBBY); 2371 xfer->ux_actlen = xfer->ux_length; 2372 xfer->ux_status = USBD_NORMAL_COMPLETION; 2373 if (xhci_polling_p(sc)) 2374 sc->sc_intrxfer_deferred[bn] = true; 2375 else 2376 usb_transfer_complete(xfer); 2377 } 2378 2379 /* Process Transfer Events */ 2380 static void 2381 xhci_event_transfer(struct xhci_softc * const sc, 2382 const struct xhci_trb * const trb) 2383 { 2384 uint64_t trb_0; 2385 uint32_t trb_2, trb_3; 2386 uint8_t trbcode; 2387 u_int slot, dci; 2388 struct xhci_slot *xs; 2389 struct xhci_ring *xr; 2390 struct xhci_xfer *xx; 2391 struct usbd_xfer *xfer; 2392 usbd_status err; 2393 2394 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2395 2396 trb_0 = le64toh(trb->trb_0); 2397 trb_2 = le32toh(trb->trb_2); 2398 trb_3 = le32toh(trb->trb_3); 2399 trbcode = XHCI_TRB_2_ERROR_GET(trb_2); 2400 slot = XHCI_TRB_3_SLOT_GET(trb_3); 2401 dci = XHCI_TRB_3_EP_GET(trb_3); 2402 xs = &sc->sc_slots[slot]; 2403 xr = xs->xs_xr[dci]; 2404 2405 /* sanity check */ 2406 KASSERT(xr != NULL); 2407 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots, 2408 "invalid xs_idx %u slot %u", xs->xs_idx, slot); 2409 2410 int idx = 0; 2411 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 2412 if (xhci_trb_get_idx(xr, trb_0, &idx)) { 2413 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0); 2414 return; 2415 } 2416 xx = xr->xr_cookies[idx]; 2417 2418 /* clear cookie of consumed TRB */ 2419 xr->xr_cookies[idx] = NULL; 2420 2421 /* 2422 * xx is NULL if pipe is opened but xfer is not started. 2423 * It happens when stopping idle pipe. 2424 */ 2425 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) { 2426 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju", 2427 idx, (uintptr_t)xx, trbcode, dci); 2428 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0, 2429 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)), 2430 0, 0); 2431 return; 2432 } 2433 } else { 2434 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */ 2435 xx = (void *)(uintptr_t)(trb_0 & ~0x3); 2436 } 2437 /* XXX this may not happen */ 2438 if (xx == NULL) { 2439 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0); 2440 return; 2441 } 2442 xfer = &xx->xx_xfer; 2443 /* XXX this may happen when detaching */ 2444 if (xfer == NULL) { 2445 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx", 2446 (uintptr_t)xx, trb_0, 0, 0); 2447 return; 2448 } 2449 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0); 2450 /* XXX I dunno why this happens */ 2451 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer); 2452 2453 if (!xfer->ux_pipe->up_repeat && 2454 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) { 2455 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer, 2456 0, 0, 0); 2457 return; 2458 } 2459 2460 const uint8_t xfertype = 2461 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes); 2462 2463 /* 4.11.5.2 Event Data TRB */ 2464 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) { 2465 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx" 2466 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0); 2467 if ((trb_0 & 0x3) == 0x3) { 2468 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2); 2469 } 2470 } 2471 2472 switch (trbcode) { 2473 case XHCI_TRB_ERROR_SHORT_PKT: 2474 case XHCI_TRB_ERROR_SUCCESS: 2475 /* 2476 * A ctrl transfer can generate two events if it has a Data 2477 * stage. A short data stage can be OK and should not 2478 * complete the transfer as the status stage needs to be 2479 * performed. 2480 * 2481 * Note: Data and Status stage events point at same xfer. 2482 * ux_actlen and ux_dmabuf will be passed to 2483 * usb_transfer_complete after the Status stage event. 2484 * 2485 * It can be distinguished which stage generates the event: 2486 * + by checking least 3 bits of trb_0 if ED==1. 2487 * (see xhci_device_ctrl_start). 2488 * + by checking the type of original TRB if ED==0. 2489 * 2490 * In addition, intr, bulk, and isoc transfer currently 2491 * consists of single TD, so the "skip" is not needed. 2492 * ctrl xfer uses EVENT_DATA, and others do not. 2493 * Thus driver can switch the flow by checking ED bit. 2494 */ 2495 if (xfertype == UE_ISOCHRONOUS) { 2496 xfer->ux_frlengths[xx->xx_isoc_done] -= 2497 XHCI_TRB_2_REM_GET(trb_2); 2498 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done]; 2499 } else if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 2500 if (xfer->ux_actlen == 0) 2501 xfer->ux_actlen = xfer->ux_length - 2502 XHCI_TRB_2_REM_GET(trb_2); 2503 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)) 2504 == XHCI_TRB_TYPE_DATA_STAGE) { 2505 return; 2506 } 2507 } else if ((trb_0 & 0x3) == 0x3) { 2508 return; 2509 } 2510 err = USBD_NORMAL_COMPLETION; 2511 break; 2512 case XHCI_TRB_ERROR_STOPPED: 2513 case XHCI_TRB_ERROR_LENGTH: 2514 case XHCI_TRB_ERROR_STOPPED_SHORT: 2515 err = USBD_IOERROR; 2516 break; 2517 case XHCI_TRB_ERROR_STALL: 2518 case XHCI_TRB_ERROR_BABBLE: 2519 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2520 xhci_pipe_restart_async(xfer->ux_pipe); 2521 err = USBD_STALLED; 2522 break; 2523 default: 2524 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2525 err = USBD_IOERROR; 2526 break; 2527 } 2528 2529 if (xfertype == UE_ISOCHRONOUS) { 2530 switch (trbcode) { 2531 case XHCI_TRB_ERROR_SHORT_PKT: 2532 case XHCI_TRB_ERROR_SUCCESS: 2533 break; 2534 case XHCI_TRB_ERROR_MISSED_SERVICE: 2535 case XHCI_TRB_ERROR_RING_UNDERRUN: 2536 case XHCI_TRB_ERROR_RING_OVERRUN: 2537 default: 2538 xfer->ux_frlengths[xx->xx_isoc_done] = 0; 2539 break; 2540 } 2541 if (++xx->xx_isoc_done < xfer->ux_nframes) 2542 return; 2543 } 2544 2545 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 || 2546 (trb_0 & 0x3) == 0x0) { 2547 /* 2548 * Try to claim this xfer for completion. If it has 2549 * already completed or aborted, drop it on the floor. 2550 */ 2551 if (!usbd_xfer_trycomplete(xfer)) 2552 return; 2553 2554 /* Set the status. */ 2555 xfer->ux_status = err; 2556 2557 usb_transfer_complete(xfer); 2558 } 2559 } 2560 2561 /* Process Command complete events */ 2562 static void 2563 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb) 2564 { 2565 uint64_t trb_0; 2566 uint32_t trb_2, trb_3; 2567 2568 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2569 2570 KASSERT(mutex_owned(&sc->sc_lock)); 2571 2572 trb_0 = le64toh(trb->trb_0); 2573 trb_2 = le32toh(trb->trb_2); 2574 trb_3 = le32toh(trb->trb_3); 2575 2576 if (trb_0 == sc->sc_command_addr) { 2577 sc->sc_resultpending = false; 2578 2579 sc->sc_result_trb.trb_0 = trb_0; 2580 sc->sc_result_trb.trb_2 = trb_2; 2581 sc->sc_result_trb.trb_3 = trb_3; 2582 if (XHCI_TRB_2_ERROR_GET(trb_2) != 2583 XHCI_TRB_ERROR_SUCCESS) { 2584 DPRINTFN(1, "command completion " 2585 "failure: 0x%016jx 0x%08jx 0x%08jx", 2586 trb_0, trb_2, trb_3, 0); 2587 } 2588 cv_signal(&sc->sc_command_cv); 2589 } else { 2590 DPRINTFN(1, "spurious event: %#jx 0x%016jx " 2591 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3); 2592 } 2593 } 2594 2595 /* 2596 * Process events. 2597 * called from xhci_softintr 2598 */ 2599 static void 2600 xhci_handle_event(struct xhci_softc * const sc, 2601 const struct xhci_trb * const trb) 2602 { 2603 uint64_t trb_0; 2604 uint32_t trb_2, trb_3; 2605 2606 XHCIHIST_FUNC(); 2607 2608 trb_0 = le64toh(trb->trb_0); 2609 trb_2 = le32toh(trb->trb_2); 2610 trb_3 = le32toh(trb->trb_3); 2611 2612 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx", 2613 (uintptr_t)trb, trb_0, trb_2, trb_3); 2614 2615 /* 2616 * 4.11.3.1, 6.4.2.1 2617 * TRB Pointer is invalid for these completion codes. 2618 */ 2619 switch (XHCI_TRB_2_ERROR_GET(trb_2)) { 2620 case XHCI_TRB_ERROR_RING_UNDERRUN: 2621 case XHCI_TRB_ERROR_RING_OVERRUN: 2622 case XHCI_TRB_ERROR_VF_RING_FULL: 2623 return; 2624 default: 2625 if (trb_0 == 0) { 2626 return; 2627 } 2628 break; 2629 } 2630 2631 switch (XHCI_TRB_3_TYPE_GET(trb_3)) { 2632 case XHCI_TRB_EVENT_TRANSFER: 2633 xhci_event_transfer(sc, trb); 2634 break; 2635 case XHCI_TRB_EVENT_CMD_COMPLETE: 2636 xhci_event_cmd(sc, trb); 2637 break; 2638 case XHCI_TRB_EVENT_PORT_STS_CHANGE: 2639 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff)); 2640 break; 2641 default: 2642 break; 2643 } 2644 } 2645 2646 static void 2647 xhci_softintr(void *v) 2648 { 2649 struct usbd_bus * const bus = v; 2650 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2651 struct xhci_ring * const er = sc->sc_er; 2652 struct xhci_trb *trb; 2653 int i, j, k, bn; 2654 2655 XHCIHIST_FUNC(); 2656 2657 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2658 2659 i = er->xr_ep; 2660 j = er->xr_cs; 2661 2662 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0); 2663 2664 /* 2665 * Handle deferred root intr xfer, in case we just switched off 2666 * polling. It's not safe to complete root intr xfers while 2667 * polling -- too much kernel machinery gets involved. 2668 */ 2669 if (!xhci_polling_p(sc)) { 2670 for (bn = 0; bn < 2; bn++) { 2671 if (__predict_false(sc->sc_intrxfer_deferred[bn])) { 2672 sc->sc_intrxfer_deferred[bn] = false; 2673 usb_transfer_complete(sc->sc_intrxfer[bn]); 2674 } 2675 } 2676 } 2677 2678 while (1) { 2679 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE, 2680 BUS_DMASYNC_POSTREAD); 2681 trb = &er->xr_trb[i]; 2682 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0; 2683 2684 if (j != k) 2685 break; 2686 2687 xhci_handle_event(sc, trb); 2688 2689 i++; 2690 if (i == er->xr_ntrb) { 2691 i = 0; 2692 j ^= 1; 2693 } 2694 } 2695 2696 er->xr_ep = i; 2697 er->xr_cs = j; 2698 2699 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) | 2700 XHCI_ERDP_BUSY); 2701 2702 DPRINTFN(16, "ends", 0, 0, 0, 0); 2703 2704 return; 2705 } 2706 2707 static void 2708 xhci_poll(struct usbd_bus *bus) 2709 { 2710 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2711 2712 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2713 2714 mutex_enter(&sc->sc_intr_lock); 2715 int ret = xhci_intr1(sc); 2716 if (ret) { 2717 xhci_softintr(bus); 2718 } 2719 mutex_exit(&sc->sc_intr_lock); 2720 2721 return; 2722 } 2723 2724 static struct usbd_xfer * 2725 xhci_allocx(struct usbd_bus *bus, unsigned int nframes) 2726 { 2727 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2728 struct xhci_xfer *xx; 2729 u_int ntrbs; 2730 2731 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2732 2733 ntrbs = uimax(3, nframes); 2734 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs; 2735 2736 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK); 2737 if (xx != NULL) { 2738 memset(xx, 0, sizeof(*xx)); 2739 if (ntrbs > 0) { 2740 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP); 2741 xx->xx_ntrb = ntrbs; 2742 } 2743 #ifdef DIAGNOSTIC 2744 xx->xx_xfer.ux_state = XFER_BUSY; 2745 #endif 2746 } 2747 2748 return &xx->xx_xfer; 2749 } 2750 2751 static void 2752 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 2753 { 2754 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2755 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 2756 2757 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2758 2759 #ifdef DIAGNOSTIC 2760 if (xfer->ux_state != XFER_BUSY && 2761 xfer->ux_status != USBD_NOT_STARTED) { 2762 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx", 2763 (uintptr_t)xfer, xfer->ux_state, 0, 0); 2764 } 2765 xfer->ux_state = XFER_FREE; 2766 #endif 2767 if (xx->xx_ntrb > 0) { 2768 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb)); 2769 xx->xx_trb = NULL; 2770 xx->xx_ntrb = 0; 2771 } 2772 pool_cache_put(sc->sc_xferpool, xx); 2773 } 2774 2775 static bool 2776 xhci_dying(struct usbd_bus *bus) 2777 { 2778 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2779 2780 return sc->sc_dying; 2781 } 2782 2783 static void 2784 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock) 2785 { 2786 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2787 2788 *lock = &sc->sc_lock; 2789 } 2790 2791 extern uint32_t usb_cookie_no; 2792 2793 /* 2794 * xHCI 4.3 2795 * Called when uhub_explore finds a new device (via usbd_new_device). 2796 * Port initialization and speed detection (4.3.1) are already done in uhub.c. 2797 * This function does: 2798 * Allocate and construct dev structure of default endpoint (ep0). 2799 * Allocate and open pipe of ep0. 2800 * Enable slot and initialize slot context. 2801 * Set Address. 2802 * Read initial device descriptor. 2803 * Determine initial MaxPacketSize (mps) by speed. 2804 * Read full device descriptor. 2805 * Register this device. 2806 * Finally state of device transitions ADDRESSED. 2807 */ 2808 static usbd_status 2809 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth, 2810 int speed, int port, struct usbd_port *up) 2811 { 2812 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2813 struct usbd_device *dev; 2814 usbd_status err; 2815 usb_device_descriptor_t *dd; 2816 struct xhci_slot *xs; 2817 uint32_t *cp; 2818 2819 XHCIHIST_FUNC(); 2820 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx", 2821 port, depth, speed, (uintptr_t)up); 2822 2823 KASSERT(KERNEL_LOCKED_P()); 2824 2825 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP); 2826 dev->ud_bus = bus; 2827 dev->ud_quirks = &usbd_no_quirk; 2828 dev->ud_addr = 0; 2829 dev->ud_ddesc.bMaxPacketSize = 0; 2830 dev->ud_depth = depth; 2831 dev->ud_powersrc = up; 2832 dev->ud_myhub = up->up_parent; 2833 dev->ud_speed = speed; 2834 dev->ud_langid = USBD_NOLANG; 2835 dev->ud_cookie.cookie = ++usb_cookie_no; 2836 2837 /* Set up default endpoint handle. */ 2838 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc; 2839 /* doesn't matter, just don't let it uninitialized */ 2840 dev->ud_ep0.ue_toggle = 0; 2841 2842 /* Set up default endpoint descriptor. */ 2843 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE; 2844 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT; 2845 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT; 2846 dev->ud_ep0desc.bmAttributes = UE_CONTROL; 2847 dev->ud_ep0desc.bInterval = 0; 2848 2849 /* 4.3, 4.8.2.1 */ 2850 switch (speed) { 2851 case USB_SPEED_SUPER: 2852 case USB_SPEED_SUPER_PLUS: 2853 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET); 2854 break; 2855 case USB_SPEED_FULL: 2856 /* XXX using 64 as initial mps of ep0 in FS */ 2857 case USB_SPEED_HIGH: 2858 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET); 2859 break; 2860 case USB_SPEED_LOW: 2861 default: 2862 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET); 2863 break; 2864 } 2865 2866 up->up_dev = dev; 2867 2868 dd = &dev->ud_ddesc; 2869 2870 if (depth == 0 && port == 0) { 2871 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL); 2872 bus->ub_devices[USB_ROOTHUB_INDEX] = dev; 2873 2874 /* Establish the default pipe. */ 2875 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2876 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2877 if (err) { 2878 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0); 2879 goto bad; 2880 } 2881 err = usbd_get_initial_ddesc(dev, dd); 2882 if (err) { 2883 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2884 goto bad; 2885 } 2886 } else { 2887 uint8_t slot = 0; 2888 2889 /* 4.3.2 */ 2890 err = xhci_enable_slot(sc, &slot); 2891 if (err) { 2892 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0); 2893 goto bad; 2894 } 2895 2896 xs = &sc->sc_slots[slot]; 2897 dev->ud_hcpriv = xs; 2898 2899 /* 4.3.3 initialize slot structure */ 2900 err = xhci_init_slot(dev, slot); 2901 if (err) { 2902 DPRINTFN(1, "init slot %ju", err, 0, 0, 0); 2903 dev->ud_hcpriv = NULL; 2904 /* 2905 * We have to disable_slot here because 2906 * xs->xs_idx == 0 when xhci_init_slot fails, 2907 * in that case usbd_remove_dev won't work. 2908 */ 2909 mutex_enter(&sc->sc_lock); 2910 xhci_disable_slot(sc, slot); 2911 mutex_exit(&sc->sc_lock); 2912 goto bad; 2913 } 2914 2915 /* 2916 * We have to establish the default pipe _after_ slot 2917 * structure has been prepared. 2918 */ 2919 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2920 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2921 if (err) { 2922 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0, 2923 0); 2924 goto bad; 2925 } 2926 2927 /* 4.3.4 Address Assignment */ 2928 err = xhci_set_address(dev, slot, false); 2929 if (err) { 2930 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0); 2931 goto bad; 2932 } 2933 2934 /* Allow device time to set new address */ 2935 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE); 2936 2937 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 2938 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT); 2939 HEXDUMP("slot context", cp, sc->sc_ctxsz); 2940 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3])); 2941 DPRINTFN(4, "device address %ju", addr, 0, 0, 0); 2942 /* 2943 * XXX ensure we know when the hardware does something 2944 * we can't yet cope with 2945 */ 2946 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr); 2947 dev->ud_addr = addr; 2948 2949 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL, 2950 "addr %d already allocated", dev->ud_addr); 2951 /* 2952 * The root hub is given its own slot 2953 */ 2954 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev; 2955 2956 err = usbd_get_initial_ddesc(dev, dd); 2957 if (err) { 2958 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2959 goto bad; 2960 } 2961 2962 /* 4.8.2.1 */ 2963 if (USB_IS_SS(speed)) { 2964 if (dd->bMaxPacketSize != 9) { 2965 printf("%s: invalid mps 2^%u for SS ep0," 2966 " using 512\n", 2967 device_xname(sc->sc_dev), 2968 dd->bMaxPacketSize); 2969 dd->bMaxPacketSize = 9; 2970 } 2971 USETW(dev->ud_ep0desc.wMaxPacketSize, 2972 (1 << dd->bMaxPacketSize)); 2973 } else 2974 USETW(dev->ud_ep0desc.wMaxPacketSize, 2975 dd->bMaxPacketSize); 2976 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0); 2977 err = xhci_update_ep0_mps(sc, xs, 2978 UGETW(dev->ud_ep0desc.wMaxPacketSize)); 2979 if (err) { 2980 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0); 2981 goto bad; 2982 } 2983 } 2984 2985 err = usbd_reload_device_desc(dev); 2986 if (err) { 2987 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0); 2988 goto bad; 2989 } 2990 2991 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,", 2992 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0); 2993 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,", 2994 dd->bDeviceClass, dd->bDeviceSubClass, 2995 dd->bDeviceProtocol, 0); 2996 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd", 2997 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations, 2998 dev->ud_speed); 2999 3000 usbd_get_device_strings(dev); 3001 3002 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev); 3003 3004 if (depth == 0 && port == 0) { 3005 usbd_attach_roothub(parent, dev); 3006 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0); 3007 return USBD_NORMAL_COMPLETION; 3008 } 3009 3010 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr); 3011 bad: 3012 if (err != USBD_NORMAL_COMPLETION) { 3013 if (depth == 0 && port == 0 && dev->ud_pipe0) 3014 usbd_kill_pipe(dev->ud_pipe0); 3015 usbd_remove_device(dev, up); 3016 } 3017 3018 return err; 3019 } 3020 3021 static usbd_status 3022 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp, 3023 size_t ntrb, size_t align) 3024 { 3025 size_t size = ntrb * XHCI_TRB_SIZE; 3026 struct xhci_ring *xr; 3027 3028 XHCIHIST_FUNC(); 3029 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx", 3030 (uintptr_t)*xrp, ntrb, align, 0); 3031 3032 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP); 3033 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0); 3034 3035 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 3036 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xr->xr_dma); 3037 if (err) { 3038 kmem_free(xr, sizeof(struct xhci_ring)); 3039 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0); 3040 return err; 3041 } 3042 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 3043 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP); 3044 xr->xr_trb = xhci_ring_trbv(xr, 0); 3045 xr->xr_ntrb = ntrb; 3046 xr->is_halted = false; 3047 xhci_host_dequeue(xr); 3048 *xrp = xr; 3049 3050 return USBD_NORMAL_COMPLETION; 3051 } 3052 3053 static void 3054 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr) 3055 { 3056 if (*xr == NULL) 3057 return; 3058 3059 usb_freemem(&(*xr)->xr_dma); 3060 mutex_destroy(&(*xr)->xr_lock); 3061 kmem_free((*xr)->xr_cookies, 3062 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb); 3063 kmem_free(*xr, sizeof(struct xhci_ring)); 3064 *xr = NULL; 3065 } 3066 3067 static void 3068 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr, 3069 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs) 3070 { 3071 size_t i; 3072 u_int ri; 3073 u_int cs; 3074 uint64_t parameter; 3075 uint32_t status; 3076 uint32_t control; 3077 3078 XHCIHIST_FUNC(); 3079 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju", 3080 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0); 3081 3082 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u", 3083 ntrbs, xr->xr_ntrb); 3084 for (i = 0; i < ntrbs; i++) { 3085 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr, 3086 (uintptr_t)trbs, i, 0); 3087 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx", 3088 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0); 3089 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) != 3090 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3); 3091 } 3092 3093 ri = xr->xr_ep; 3094 cs = xr->xr_cs; 3095 3096 /* 3097 * Although the xhci hardware can do scatter/gather dma from 3098 * arbitrary sized buffers, there is a non-obvious restriction 3099 * that a LINK trb is only allowed at the end of a burst of 3100 * transfers - which might be 16kB. 3101 * Arbitrary aligned LINK trb definitely fail on Ivy bridge. 3102 * The simple solution is not to allow a LINK trb in the middle 3103 * of anything - as here. 3104 * XXX: (dsl) There are xhci controllers out there (eg some made by 3105 * ASMedia) that seem to lock up if they process a LINK trb but 3106 * cannot process the linked-to trb yet. 3107 * The code should write the 'cycle' bit on the link trb AFTER 3108 * adding the other trb. 3109 */ 3110 u_int firstep = xr->xr_ep; 3111 u_int firstcs = xr->xr_cs; 3112 3113 for (i = 0; i < ntrbs; ) { 3114 u_int oldri = ri; 3115 u_int oldcs = cs; 3116 3117 if (ri >= (xr->xr_ntrb - 1)) { 3118 /* Put Link TD at the end of ring */ 3119 parameter = xhci_ring_trbp(xr, 0); 3120 status = 0; 3121 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) | 3122 XHCI_TRB_3_TC_BIT; 3123 xr->xr_cookies[ri] = NULL; 3124 xr->xr_ep = 0; 3125 xr->xr_cs ^= 1; 3126 ri = xr->xr_ep; 3127 cs = xr->xr_cs; 3128 } else { 3129 parameter = trbs[i].trb_0; 3130 status = trbs[i].trb_2; 3131 control = trbs[i].trb_3; 3132 3133 xr->xr_cookies[ri] = cookie; 3134 ri++; 3135 i++; 3136 } 3137 /* 3138 * If this is a first TRB, mark it invalid to prevent 3139 * xHC from running it immediately. 3140 */ 3141 if (oldri == firstep) { 3142 if (oldcs) { 3143 control &= ~XHCI_TRB_3_CYCLE_BIT; 3144 } else { 3145 control |= XHCI_TRB_3_CYCLE_BIT; 3146 } 3147 } else { 3148 if (oldcs) { 3149 control |= XHCI_TRB_3_CYCLE_BIT; 3150 } else { 3151 control &= ~XHCI_TRB_3_CYCLE_BIT; 3152 } 3153 } 3154 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control); 3155 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri, 3156 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 3157 } 3158 3159 /* Now invert cycle bit of first TRB */ 3160 if (firstcs) { 3161 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT); 3162 } else { 3163 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT); 3164 } 3165 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep, 3166 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 3167 3168 xr->xr_ep = ri; 3169 xr->xr_cs = cs; 3170 3171 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep, 3172 xr->xr_cs, 0); 3173 } 3174 3175 static inline void 3176 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr, 3177 struct xhci_xfer *xx, u_int ntrb) 3178 { 3179 KASSERT(ntrb <= xx->xx_ntrb); 3180 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb); 3181 } 3182 3183 /* 3184 * Stop execution commands, purge all commands on command ring, and 3185 * rewind dequeue pointer. 3186 */ 3187 static void 3188 xhci_abort_command(struct xhci_softc *sc) 3189 { 3190 struct xhci_ring * const cr = sc->sc_cr; 3191 uint64_t crcr; 3192 int i; 3193 3194 XHCIHIST_FUNC(); 3195 XHCIHIST_CALLARGS("command %#jx timeout, aborting", 3196 sc->sc_command_addr, 0, 0, 0); 3197 3198 mutex_enter(&cr->xr_lock); 3199 3200 /* 4.6.1.2 Aborting a Command */ 3201 crcr = xhci_op_read_8(sc, XHCI_CRCR); 3202 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA); 3203 3204 for (i = 0; i < 500; i++) { 3205 crcr = xhci_op_read_8(sc, XHCI_CRCR); 3206 if ((crcr & XHCI_CRCR_LO_CRR) == 0) 3207 break; 3208 usb_delay_ms(&sc->sc_bus, 1); 3209 } 3210 if ((crcr & XHCI_CRCR_LO_CRR) != 0) { 3211 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0); 3212 /* reset HC here? */ 3213 } 3214 3215 /* reset command ring dequeue pointer */ 3216 cr->xr_ep = 0; 3217 cr->xr_cs = 1; 3218 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs); 3219 3220 mutex_exit(&cr->xr_lock); 3221 } 3222 3223 /* 3224 * Put a command on command ring, ring bell, set timer, and cv_timedwait. 3225 * Command completion is notified by cv_signal from xhci_event_cmd() 3226 * (called from xhci_softint), or timed-out. 3227 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(), 3228 * then do_command examines it. 3229 */ 3230 static usbd_status 3231 xhci_do_command_locked(struct xhci_softc * const sc, 3232 struct xhci_soft_trb * const trb, int timeout) 3233 { 3234 struct xhci_ring * const cr = sc->sc_cr; 3235 usbd_status err; 3236 3237 XHCIHIST_FUNC(); 3238 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx", 3239 trb->trb_0, trb->trb_2, trb->trb_3, 0); 3240 3241 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 3242 KASSERT(mutex_owned(&sc->sc_lock)); 3243 3244 while (sc->sc_command_addr != 0 || 3245 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp)) 3246 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock); 3247 if (sc->sc_suspendresume_failed) 3248 return USBD_IOERROR; 3249 3250 /* 3251 * If enqueue pointer points at last of ring, it's Link TRB, 3252 * command TRB will be stored in 0th TRB. 3253 */ 3254 if (cr->xr_ep == cr->xr_ntrb - 1) 3255 sc->sc_command_addr = xhci_ring_trbp(cr, 0); 3256 else 3257 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep); 3258 3259 sc->sc_resultpending = true; 3260 3261 mutex_enter(&cr->xr_lock); 3262 xhci_ring_put(sc, cr, NULL, trb, 1); 3263 mutex_exit(&cr->xr_lock); 3264 3265 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0); 3266 3267 while (sc->sc_resultpending) { 3268 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock, 3269 MAX(1, mstohz(timeout))) == EWOULDBLOCK) { 3270 xhci_abort_command(sc); 3271 err = USBD_TIMEOUT; 3272 goto timedout; 3273 } 3274 } 3275 3276 trb->trb_0 = sc->sc_result_trb.trb_0; 3277 trb->trb_2 = sc->sc_result_trb.trb_2; 3278 trb->trb_3 = sc->sc_result_trb.trb_3; 3279 3280 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx", 3281 trb->trb_0, trb->trb_2, trb->trb_3, 0); 3282 3283 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) { 3284 case XHCI_TRB_ERROR_SUCCESS: 3285 err = USBD_NORMAL_COMPLETION; 3286 break; 3287 default: 3288 case 192 ... 223: 3289 DPRINTFN(5, "error %#jx", 3290 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0); 3291 err = USBD_IOERROR; 3292 break; 3293 case 224 ... 255: 3294 err = USBD_NORMAL_COMPLETION; 3295 break; 3296 } 3297 3298 timedout: 3299 sc->sc_resultpending = false; 3300 sc->sc_command_addr = 0; 3301 cv_broadcast(&sc->sc_cmdbusy_cv); 3302 3303 return err; 3304 } 3305 3306 static usbd_status 3307 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb, 3308 int timeout) 3309 { 3310 3311 mutex_enter(&sc->sc_lock); 3312 usbd_status ret = xhci_do_command_locked(sc, trb, timeout); 3313 mutex_exit(&sc->sc_lock); 3314 3315 return ret; 3316 } 3317 3318 static usbd_status 3319 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp) 3320 { 3321 struct xhci_soft_trb trb; 3322 usbd_status err; 3323 3324 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3325 3326 trb.trb_0 = 0; 3327 trb.trb_2 = 0; 3328 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT); 3329 3330 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3331 if (err != USBD_NORMAL_COMPLETION) { 3332 return err; 3333 } 3334 3335 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3); 3336 3337 return err; 3338 } 3339 3340 /* 3341 * xHCI 4.6.4 3342 * Deallocate ring and device/input context DMA buffers, and disable_slot. 3343 * All endpoints in the slot should be stopped. 3344 * Should be called with sc_lock held. 3345 */ 3346 static usbd_status 3347 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot) 3348 { 3349 struct xhci_soft_trb trb; 3350 struct xhci_slot *xs; 3351 usbd_status err; 3352 3353 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3354 3355 if (sc->sc_dying) 3356 return USBD_IOERROR; 3357 3358 trb.trb_0 = 0; 3359 trb.trb_2 = 0; 3360 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) | 3361 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT); 3362 3363 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 3364 3365 if (!err) { 3366 xs = &sc->sc_slots[slot]; 3367 if (xs->xs_idx != 0) { 3368 xhci_free_slot(sc, xs); 3369 xhci_set_dcba(sc, 0, slot); 3370 memset(xs, 0, sizeof(*xs)); 3371 } 3372 } 3373 3374 return err; 3375 } 3376 3377 /* 3378 * Set address of device and transition slot state from ENABLED to ADDRESSED 3379 * if Block Setaddress Request (BSR) is false. 3380 * If BSR==true, transition slot state from ENABLED to DEFAULT. 3381 * see xHCI 1.1 4.5.3, 3.3.4 3382 * Should be called without sc_lock held. 3383 */ 3384 static usbd_status 3385 xhci_address_device(struct xhci_softc * const sc, 3386 uint64_t icp, uint8_t slot_id, bool bsr) 3387 { 3388 struct xhci_soft_trb trb; 3389 usbd_status err; 3390 3391 XHCIHIST_FUNC(); 3392 if (bsr) { 3393 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr", 3394 icp, slot_id, 0, 0); 3395 } else { 3396 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr", 3397 icp, slot_id, 0, 0); 3398 } 3399 3400 trb.trb_0 = icp; 3401 trb.trb_2 = 0; 3402 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) | 3403 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) | 3404 (bsr ? XHCI_TRB_3_BSR_BIT : 0); 3405 3406 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3407 3408 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS) 3409 err = USBD_NO_ADDR; 3410 3411 return err; 3412 } 3413 3414 static usbd_status 3415 xhci_update_ep0_mps(struct xhci_softc * const sc, 3416 struct xhci_slot * const xs, u_int mps) 3417 { 3418 struct xhci_soft_trb trb; 3419 usbd_status err; 3420 uint32_t * cp; 3421 3422 XHCIHIST_FUNC(); 3423 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0); 3424 3425 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 3426 cp[0] = htole32(0); 3427 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL)); 3428 3429 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL)); 3430 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps)); 3431 3432 /* sync input contexts before they are read from memory */ 3433 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 3434 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 3435 sc->sc_ctxsz * 4); 3436 3437 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 3438 trb.trb_2 = 0; 3439 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 3440 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX); 3441 3442 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3443 return err; 3444 } 3445 3446 static void 3447 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si) 3448 { 3449 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0); 3450 3451 XHCIHIST_FUNC(); 3452 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd", 3453 (uintptr_t)&dcbaa[si], dcba, si, 0); 3454 3455 dcbaa[si] = htole64(dcba); 3456 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t), 3457 BUS_DMASYNC_PREWRITE); 3458 } 3459 3460 /* 3461 * Allocate device and input context DMA buffer, and 3462 * TRB DMA buffer for each endpoint. 3463 */ 3464 static usbd_status 3465 xhci_init_slot(struct usbd_device *dev, uint32_t slot) 3466 { 3467 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3468 struct xhci_slot *xs; 3469 3470 XHCIHIST_FUNC(); 3471 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0); 3472 3473 xs = &sc->sc_slots[slot]; 3474 3475 /* allocate contexts */ 3476 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz, 3477 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_dc_dma); 3478 if (err) { 3479 DPRINTFN(1, "failed to allocmem output device context %jd", 3480 err, 0, 0, 0); 3481 return USBD_NOMEM; 3482 } 3483 3484 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz, 3485 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_ic_dma); 3486 if (err) { 3487 DPRINTFN(1, "failed to allocmem input device context %jd", 3488 err, 0, 0, 0); 3489 goto bad1; 3490 } 3491 3492 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr)); 3493 xs->xs_idx = slot; 3494 3495 return USBD_NORMAL_COMPLETION; 3496 3497 bad1: 3498 usb_freemem(&xs->xs_dc_dma); 3499 xs->xs_idx = 0; 3500 return USBD_NOMEM; 3501 } 3502 3503 static void 3504 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs) 3505 { 3506 u_int dci; 3507 3508 XHCIHIST_FUNC(); 3509 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 3510 3511 /* deallocate all allocated rings in the slot */ 3512 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 3513 if (xs->xs_xr[dci] != NULL) 3514 xhci_ring_free(sc, &xs->xs_xr[dci]); 3515 } 3516 usb_freemem(&xs->xs_ic_dma); 3517 usb_freemem(&xs->xs_dc_dma); 3518 xs->xs_idx = 0; 3519 } 3520 3521 /* 3522 * Setup slot context, set Device Context Base Address, and issue 3523 * Set Address Device command. 3524 */ 3525 static usbd_status 3526 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr) 3527 { 3528 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3529 struct xhci_slot *xs; 3530 usbd_status err; 3531 3532 XHCIHIST_FUNC(); 3533 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0); 3534 3535 xs = &sc->sc_slots[slot]; 3536 3537 xhci_setup_ctx(dev->ud_pipe0); 3538 3539 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 3540 sc->sc_ctxsz * 3); 3541 3542 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot); 3543 3544 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr); 3545 3546 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 3547 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0), 3548 sc->sc_ctxsz * 2); 3549 3550 return err; 3551 } 3552 3553 /* 3554 * 4.8.2, 6.2.3.2 3555 * construct slot/endpoint context parameters and do syncmem 3556 */ 3557 static void 3558 xhci_setup_ctx(struct usbd_pipe *pipe) 3559 { 3560 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3561 struct usbd_device *dev = pipe->up_dev; 3562 struct xhci_slot * const xs = dev->ud_hcpriv; 3563 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3564 const u_int dci = xhci_ep_get_dci(ed); 3565 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3566 uint32_t *cp; 3567 uint8_t speed = dev->ud_speed; 3568 3569 XHCIHIST_FUNC(); 3570 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju", 3571 (uintptr_t)pipe, xs->xs_idx, dci, speed); 3572 3573 /* set up initial input control context */ 3574 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 3575 cp[0] = htole32(0); 3576 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci)); 3577 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT)); 3578 cp[7] = htole32(0); 3579 3580 /* set up input slot context */ 3581 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 3582 cp[0] = 3583 XHCI_SCTX_0_CTX_NUM_SET(dci) | 3584 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed)); 3585 cp[1] = 0; 3586 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0); 3587 cp[3] = 0; 3588 xhci_setup_route(pipe, cp); 3589 xhci_setup_tthub(pipe, cp); 3590 3591 cp[0] = htole32(cp[0]); 3592 cp[1] = htole32(cp[1]); 3593 cp[2] = htole32(cp[2]); 3594 cp[3] = htole32(cp[3]); 3595 3596 /* set up input endpoint context */ 3597 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci)); 3598 cp[0] = 3599 XHCI_EPCTX_0_EPSTATE_SET(0) | 3600 XHCI_EPCTX_0_MULT_SET(0) | 3601 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) | 3602 XHCI_EPCTX_0_LSA_SET(0) | 3603 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0); 3604 cp[1] = 3605 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) | 3606 XHCI_EPCTX_1_HID_SET(0) | 3607 XHCI_EPCTX_1_MAXB_SET(0); 3608 3609 if (xfertype != UE_ISOCHRONOUS) 3610 cp[1] |= XHCI_EPCTX_1_CERR_SET(3); 3611 3612 xhci_setup_maxburst(pipe, cp); 3613 3614 DPRINTFN(4, "setting on dci %ju ival %ju mult %ju mps %#jx", 3615 dci, XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_0_MULT_GET(cp[0]), 3616 XHCI_EPCTX_1_MAXP_SIZE_GET(cp[1])); 3617 DPRINTFN(4, " maxburst %ju mep %#jx atl %#jx", 3618 XHCI_EPCTX_1_MAXB_GET(cp[1]), 3619 (XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_GET(cp[0]) << 16) + 3620 XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_GET(cp[4]), 3621 XHCI_EPCTX_4_AVG_TRB_LEN_GET(cp[4]), 0); 3622 3623 /* rewind TR dequeue pointer in xHC */ 3624 /* can't use xhci_ep_get_dci() yet? */ 3625 *(uint64_t *)(&cp[2]) = htole64( 3626 xhci_ring_trbp(xs->xs_xr[dci], 0) | 3627 XHCI_EPCTX_2_DCS_SET(1)); 3628 3629 cp[0] = htole32(cp[0]); 3630 cp[1] = htole32(cp[1]); 3631 cp[4] = htole32(cp[4]); 3632 3633 /* rewind TR dequeue pointer in driver */ 3634 struct xhci_ring *xr = xs->xs_xr[dci]; 3635 mutex_enter(&xr->xr_lock); 3636 xhci_host_dequeue(xr); 3637 mutex_exit(&xr->xr_lock); 3638 3639 /* sync input contexts before they are read from memory */ 3640 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 3641 } 3642 3643 /* 3644 * Setup route string and roothub port of given device for slot context 3645 */ 3646 static void 3647 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp) 3648 { 3649 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3650 struct usbd_device *dev = pipe->up_dev; 3651 struct usbd_port *up = dev->ud_powersrc; 3652 struct usbd_device *hub; 3653 struct usbd_device *adev; 3654 uint8_t rhport = 0; 3655 uint32_t route = 0; 3656 3657 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3658 3659 /* Locate root hub port and Determine route string */ 3660 /* 4.3.3 route string does not include roothub port */ 3661 for (hub = dev; hub != NULL; hub = hub->ud_myhub) { 3662 uint32_t dep; 3663 3664 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd", 3665 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc, 3666 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno : 3667 -1); 3668 3669 if (hub->ud_powersrc == NULL) 3670 break; 3671 dep = hub->ud_depth; 3672 if (dep == 0) 3673 break; 3674 rhport = hub->ud_powersrc->up_portno; 3675 if (dep > USB_HUB_MAX_DEPTH) 3676 continue; 3677 3678 route |= 3679 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport) 3680 << ((dep - 1) * 4); 3681 } 3682 route = route >> 4; 3683 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1; 3684 3685 /* Locate port on upstream high speed hub */ 3686 for (adev = dev, hub = up->up_parent; 3687 hub != NULL && hub->ud_speed != USB_SPEED_HIGH; 3688 adev = hub, hub = hub->ud_myhub) 3689 ; 3690 if (hub) { 3691 int p; 3692 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) { 3693 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) { 3694 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1]; 3695 goto found; 3696 } 3697 } 3698 panic("%s: cannot find HS port", __func__); 3699 found: 3700 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0); 3701 } else { 3702 dev->ud_myhsport = NULL; 3703 } 3704 3705 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport); 3706 3707 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport, 3708 ctlrport, route, (uintptr_t)hub); 3709 3710 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route); 3711 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport); 3712 } 3713 3714 /* 3715 * Setup whether device is hub, whether device uses MTT, and 3716 * TT informations if it uses MTT. 3717 */ 3718 static void 3719 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp) 3720 { 3721 struct usbd_device *dev = pipe->up_dev; 3722 struct usbd_port *myhsport = dev->ud_myhsport; 3723 usb_device_descriptor_t * const dd = &dev->ud_ddesc; 3724 uint32_t speed = dev->ud_speed; 3725 uint8_t rhaddr = dev->ud_bus->ub_rhaddr; 3726 uint8_t tthubslot, ttportnum; 3727 bool ishub; 3728 bool usemtt; 3729 3730 XHCIHIST_FUNC(); 3731 3732 /* 3733 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2 3734 * tthubslot: 3735 * This is the slot ID of parent HS hub 3736 * if LS/FS device is connected && connected through HS hub. 3737 * This is 0 if device is not LS/FS device || 3738 * parent hub is not HS hub || 3739 * attached to root hub. 3740 * ttportnum: 3741 * This is the downstream facing port of parent HS hub 3742 * if LS/FS device is connected. 3743 * This is 0 if device is not LS/FS device || 3744 * parent hub is not HS hub || 3745 * attached to root hub. 3746 */ 3747 if (myhsport && 3748 myhsport->up_parent->ud_addr != rhaddr && 3749 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) { 3750 ttportnum = myhsport->up_portno; 3751 tthubslot = myhsport->up_parent->ud_addr; 3752 } else { 3753 ttportnum = 0; 3754 tthubslot = 0; 3755 } 3756 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd", 3757 (uintptr_t)myhsport, ttportnum, tthubslot, 0); 3758 3759 /* ishub is valid after reading UDESC_DEVICE */ 3760 ishub = (dd->bDeviceClass == UDCLASS_HUB); 3761 3762 /* dev->ud_hub is valid after reading UDESC_HUB */ 3763 if (ishub && dev->ud_hub) { 3764 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc; 3765 uint8_t ttt = 3766 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK); 3767 3768 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts); 3769 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt); 3770 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0); 3771 } 3772 3773 #define IS_MTTHUB(dd) \ 3774 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT) 3775 3776 /* 3777 * MTT flag is set if 3778 * 1. this is HS hub && MTTs are supported and enabled; or 3779 * 2. this is LS or FS device && there is a parent HS hub where MTTs 3780 * are supported and enabled. 3781 * 3782 * XXX enabled is not tested yet 3783 */ 3784 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd)) 3785 usemtt = true; 3786 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) && 3787 myhsport && 3788 myhsport->up_parent->ud_addr != rhaddr && 3789 IS_MTTHUB(&myhsport->up_parent->ud_ddesc)) 3790 usemtt = true; 3791 else 3792 usemtt = false; 3793 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd", 3794 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt); 3795 3796 #undef IS_MTTHUB 3797 3798 cp[0] |= 3799 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) | 3800 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0); 3801 cp[2] |= 3802 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) | 3803 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum); 3804 } 3805 3806 static const usb_endpoint_ss_comp_descriptor_t * 3807 xhci_get_essc_desc(struct usbd_pipe *pipe) 3808 { 3809 struct usbd_device *dev = pipe->up_dev; 3810 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3811 const usb_cdc_descriptor_t *cdcd; 3812 usbd_desc_iter_t iter; 3813 uint8_t ep; 3814 3815 /* config desc is NULL when opening ep0 */ 3816 if (dev == NULL || dev->ud_cdesc == NULL) 3817 return NULL; 3818 3819 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev, 3820 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY); 3821 if (cdcd == NULL) 3822 return NULL; 3823 3824 usb_desc_iter_init(dev, &iter); 3825 iter.cur = (const void *)cdcd; 3826 3827 /* find endpoint_ss_comp desc for ep of this pipe */ 3828 for (ep = 0;;) { 3829 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter); 3830 if (cdcd == NULL) 3831 break; 3832 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) { 3833 ep = ((const usb_endpoint_descriptor_t *)cdcd)-> 3834 bEndpointAddress; 3835 if (UE_GET_ADDR(ep) == 3836 UE_GET_ADDR(ed->bEndpointAddress)) { 3837 cdcd = (const usb_cdc_descriptor_t *) 3838 usb_desc_iter_next(&iter); 3839 break; 3840 } 3841 ep = 0; 3842 } 3843 } 3844 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) { 3845 return (const usb_endpoint_ss_comp_descriptor_t *)cdcd; 3846 } 3847 return NULL; 3848 } 3849 3850 /* set up params for periodic endpoint */ 3851 static void 3852 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp) 3853 { 3854 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 3855 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3856 struct usbd_device * const dev = pipe->up_dev; 3857 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3858 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3859 uint16_t mps = UGETW(ed->wMaxPacketSize); 3860 uint8_t speed = dev->ud_speed; 3861 uint32_t maxb, mep, atl; 3862 uint8_t ival, mult; 3863 3864 const usb_endpoint_ss_comp_descriptor_t * esscd = 3865 xhci_get_essc_desc(pipe); 3866 3867 /* USB 2.0 9.6.6, xHCI 4.8.2.4, 6.2.3.2 - 6.2.3.8 */ 3868 switch (xfertype) { 3869 case UE_ISOCHRONOUS: 3870 case UE_INTERRUPT: 3871 if (USB_IS_SS(speed)) { 3872 maxb = esscd ? esscd->bMaxBurst : UE_GET_TRANS(mps); 3873 mep = esscd ? UGETW(esscd->wBytesPerInterval) : 3874 UE_GET_SIZE(mps) * (maxb + 1); 3875 if (esscd && xfertype == UE_ISOCHRONOUS && 3876 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) { 3877 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes); 3878 mult = (mult > 2) ? 2 : mult; 3879 } else 3880 mult = 0; 3881 3882 } else { 3883 switch (speed) { 3884 case USB_SPEED_HIGH: 3885 maxb = UE_GET_TRANS(mps); 3886 mep = UE_GET_SIZE(mps) * (maxb + 1); 3887 break; 3888 case USB_SPEED_FULL: 3889 maxb = 0; 3890 mep = UE_GET_SIZE(mps); 3891 break; 3892 default: 3893 maxb = 0; 3894 mep = 0; 3895 break; 3896 } 3897 mult = 0; 3898 } 3899 mps = UE_GET_SIZE(mps); 3900 3901 if (pipe->up_interval == USBD_DEFAULT_INTERVAL) 3902 ival = ed->bInterval; 3903 else 3904 ival = pipe->up_interval; 3905 3906 ival = xhci_bival2ival(ival, speed, xfertype); 3907 atl = mep; 3908 break; 3909 case UE_CONTROL: 3910 case UE_BULK: 3911 default: 3912 if (USB_IS_SS(speed)) { 3913 maxb = esscd ? esscd->bMaxBurst : 0; 3914 } else 3915 maxb = 0; 3916 3917 mps = UE_GET_SIZE(mps); 3918 mep = 0; 3919 mult = 0; 3920 ival = 0; 3921 if (xfertype == UE_CONTROL) 3922 atl = 8; /* 6.2.3 */ 3923 else 3924 atl = mps; 3925 break; 3926 } 3927 3928 switch (speed) { 3929 case USB_SPEED_LOW: 3930 break; 3931 case USB_SPEED_FULL: 3932 if (xfertype == UE_INTERRUPT) 3933 if (mep > XHCI_EPCTX_MEP_FS_INTR) 3934 mep = XHCI_EPCTX_MEP_FS_INTR; 3935 if (xfertype == UE_ISOCHRONOUS) 3936 if (mep > XHCI_EPCTX_MEP_FS_ISOC) 3937 mep = XHCI_EPCTX_MEP_FS_ISOC; 3938 break; 3939 case USB_SPEED_HIGH: 3940 if (xfertype == UE_INTERRUPT) 3941 if (mep > XHCI_EPCTX_MEP_HS_INTR) 3942 mep = XHCI_EPCTX_MEP_HS_INTR; 3943 if (xfertype == UE_ISOCHRONOUS) 3944 if (mep > XHCI_EPCTX_MEP_HS_ISOC) 3945 mep = XHCI_EPCTX_MEP_HS_ISOC; 3946 break; 3947 case USB_SPEED_SUPER: 3948 case USB_SPEED_SUPER_PLUS: 3949 default: 3950 if (xfertype == UE_INTERRUPT) 3951 if (mep > XHCI_EPCTX_MEP_SS_INTR) 3952 mep = XHCI_EPCTX_MEP_SS_INTR; 3953 if (xfertype == UE_ISOCHRONOUS) { 3954 if (speed == USB_SPEED_SUPER || 3955 XHCI_HCC2_LEC(sc->sc_hcc2) == 0) { 3956 if (mep > XHCI_EPCTX_MEP_SS_ISOC) 3957 mep = XHCI_EPCTX_MEP_SS_ISOC; 3958 } else { 3959 if (mep > XHCI_EPCTX_MEP_SS_ISOC_LEC) 3960 mep = XHCI_EPCTX_MEP_SS_ISOC_LEC; 3961 } 3962 } 3963 break; 3964 } 3965 3966 xpipe->xp_maxb = maxb + 1; 3967 xpipe->xp_mult = mult + 1; 3968 3969 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(mep >> 16); 3970 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival); 3971 cp[0] |= XHCI_EPCTX_0_MULT_SET(mult); 3972 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps); 3973 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb); 3974 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(mep & 0xffff); 3975 cp[4] |= XHCI_EPCTX_4_AVG_TRB_LEN_SET(atl); 3976 } 3977 3978 /* 3979 * Convert usbdi bInterval value to xhci endpoint context interval value 3980 * for periodic pipe. 3981 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 3982 */ 3983 static uint32_t 3984 xhci_bival2ival(uint32_t ival, uint32_t speed, uint32_t xfertype) 3985 { 3986 if (xfertype != UE_INTERRUPT && xfertype != UE_ISOCHRONOUS) 3987 return 0; 3988 3989 if (xfertype == UE_INTERRUPT && 3990 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) { 3991 u_int i; 3992 3993 /* 3994 * round ival down to "the nearest base 2 multiple of 3995 * bInterval * 8". 3996 * bInterval is at most 255 as its type is uByte. 3997 * 255(ms) = 2040(x 125us) < 2^11, so start with 10. 3998 */ 3999 for (i = 10; i > 0; i--) { 4000 if ((ival * 8) >= (1 << i)) 4001 break; 4002 } 4003 ival = i; 4004 4005 /* 3 - 10 */ 4006 ival = (ival < 3) ? 3 : ival; 4007 } else if (speed == USB_SPEED_FULL) { 4008 /* FS isoc */ 4009 ival += 3; /* 1ms -> 125us */ 4010 ival--; /* Interval = bInterval-1 */ 4011 /* 3 - 18 */ 4012 ival = (ival > 18) ? 18 : ival; 4013 ival = (ival < 3) ? 3 : ival; 4014 } else { 4015 /* SS/HS intr/isoc */ 4016 if (ival > 0) 4017 ival--; /* Interval = bInterval-1 */ 4018 /* 0 - 15 */ 4019 ival = (ival > 15) ? 15 : ival; 4020 } 4021 4022 return ival; 4023 } 4024 4025 /* ----- */ 4026 4027 static void 4028 xhci_noop(struct usbd_pipe *pipe) 4029 { 4030 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4031 } 4032 4033 /* 4034 * Process root hub request. 4035 */ 4036 static int 4037 xhci_roothub_ctrl_locked(struct usbd_bus *bus, usb_device_request_t *req, 4038 void *buf, int buflen) 4039 { 4040 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 4041 usb_port_status_t ps; 4042 int l, totlen = 0; 4043 uint16_t len, value, index; 4044 int port, i; 4045 uint32_t v; 4046 4047 XHCIHIST_FUNC(); 4048 4049 KASSERT(mutex_owned(&sc->sc_rhlock)); 4050 4051 if (sc->sc_dying) 4052 return -1; 4053 4054 size_t bn = bus == &sc->sc_bus ? 0 : 1; 4055 4056 len = UGETW(req->wLength); 4057 value = UGETW(req->wValue); 4058 index = UGETW(req->wIndex); 4059 4060 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx", 4061 req->bmRequestType | (req->bRequest << 8), value, index, len); 4062 4063 #define C(x,y) ((x) | ((y) << 8)) 4064 switch (C(req->bRequest, req->bmRequestType)) { 4065 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 4066 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0); 4067 if (len == 0) 4068 break; 4069 switch (value) { 4070 #define sd ((usb_string_descriptor_t *)buf) 4071 case C(2, UDESC_STRING): 4072 /* Product */ 4073 totlen = usb_makestrdesc(sd, len, "xHCI root hub"); 4074 break; 4075 #undef sd 4076 default: 4077 /* default from usbroothub */ 4078 return buflen; 4079 } 4080 break; 4081 4082 /* Hub requests */ 4083 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): 4084 break; 4085 /* Clear Port Feature request */ 4086 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): { 4087 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4088 4089 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd", 4090 index, value, bn, cp); 4091 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4092 return -1; 4093 } 4094 port = XHCI_PORTSC(cp); 4095 v = xhci_op_read_4(sc, port); 4096 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0); 4097 v &= ~XHCI_PS_CLEAR; 4098 switch (value) { 4099 case UHF_PORT_ENABLE: 4100 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED); 4101 break; 4102 case UHF_PORT_SUSPEND: 4103 return -1; 4104 case UHF_PORT_POWER: 4105 break; 4106 case UHF_PORT_TEST: 4107 case UHF_PORT_INDICATOR: 4108 return -1; 4109 case UHF_C_PORT_CONNECTION: 4110 xhci_op_write_4(sc, port, v | XHCI_PS_CSC); 4111 break; 4112 case UHF_C_PORT_ENABLE: 4113 case UHF_C_PORT_SUSPEND: 4114 case UHF_C_PORT_OVER_CURRENT: 4115 return -1; 4116 case UHF_C_BH_PORT_RESET: 4117 xhci_op_write_4(sc, port, v | XHCI_PS_WRC); 4118 break; 4119 case UHF_C_PORT_RESET: 4120 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 4121 break; 4122 case UHF_C_PORT_LINK_STATE: 4123 xhci_op_write_4(sc, port, v | XHCI_PS_PLC); 4124 break; 4125 case UHF_C_PORT_CONFIG_ERROR: 4126 xhci_op_write_4(sc, port, v | XHCI_PS_CEC); 4127 break; 4128 default: 4129 return -1; 4130 } 4131 break; 4132 } 4133 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): 4134 if (len == 0) 4135 break; 4136 if ((value & 0xff) != 0) { 4137 return -1; 4138 } 4139 usb_hub_descriptor_t hubd; 4140 4141 totlen = uimin(buflen, sizeof(hubd)); 4142 memcpy(&hubd, buf, totlen); 4143 hubd.bNbrPorts = sc->sc_rhportcount[bn]; 4144 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH); 4145 hubd.bPwrOn2PwrGood = 200; 4146 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) { 4147 /* XXX can't find out? */ 4148 hubd.DeviceRemovable[i++] = 0; 4149 } 4150 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i; 4151 totlen = uimin(totlen, hubd.bDescLength); 4152 memcpy(buf, &hubd, totlen); 4153 break; 4154 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): 4155 if (len != 4) { 4156 return -1; 4157 } 4158 memset(buf, 0, len); /* ? XXX */ 4159 totlen = len; 4160 break; 4161 /* Get Port Status request */ 4162 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): { 4163 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4164 4165 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju", 4166 bn, index, cp, 0); 4167 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4168 DPRINTFN(5, "bad get port status: index=%jd bn=%jd " 4169 "portcount=%jd", 4170 index, bn, sc->sc_rhportcount[bn], 0); 4171 return -1; 4172 } 4173 if (len != 4) { 4174 DPRINTFN(5, "bad get port status: len %jd != 4", 4175 len, 0, 0, 0); 4176 return -1; 4177 } 4178 v = xhci_op_read_4(sc, XHCI_PORTSC(cp)); 4179 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0); 4180 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v)); 4181 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS; 4182 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED; 4183 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR; 4184 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND; 4185 if (v & XHCI_PS_PR) i |= UPS_RESET; 4186 if (v & XHCI_PS_PP) { 4187 if (i & UPS_OTHER_SPEED) 4188 i |= UPS_PORT_POWER_SS; 4189 else 4190 i |= UPS_PORT_POWER; 4191 } 4192 if (i & UPS_OTHER_SPEED) 4193 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v)); 4194 if (sc->sc_vendor_port_status) 4195 i = sc->sc_vendor_port_status(sc, v, i); 4196 USETW(ps.wPortStatus, i); 4197 i = 0; 4198 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS; 4199 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED; 4200 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR; 4201 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET; 4202 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET; 4203 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE; 4204 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR; 4205 USETW(ps.wPortChange, i); 4206 totlen = uimin(len, sizeof(ps)); 4207 memcpy(buf, &ps, totlen); 4208 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx" 4209 " totlen %jd", 4210 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0); 4211 break; 4212 } 4213 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): 4214 return -1; 4215 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE): 4216 break; 4217 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): 4218 break; 4219 /* Set Port Feature request */ 4220 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): { 4221 int optval = (index >> 8) & 0xff; 4222 index &= 0xff; 4223 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4224 return -1; 4225 } 4226 4227 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4228 4229 port = XHCI_PORTSC(cp); 4230 v = xhci_op_read_4(sc, port); 4231 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0); 4232 v &= ~XHCI_PS_CLEAR; 4233 switch (value) { 4234 case UHF_PORT_ENABLE: 4235 xhci_op_write_4(sc, port, v | XHCI_PS_PED); 4236 break; 4237 case UHF_PORT_SUSPEND: 4238 /* XXX suspend */ 4239 break; 4240 case UHF_PORT_RESET: 4241 xhci_op_write_4(sc, port, v | XHCI_PS_PR); 4242 /* Wait for reset to complete. */ 4243 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) { 4244 if (sc->sc_dying) { 4245 return -1; 4246 } 4247 v = xhci_op_read_4(sc, port); 4248 if ((v & XHCI_PS_PR) == 0) { 4249 break; 4250 } 4251 usb_delay_ms(&sc->sc_bus, 10); 4252 } 4253 break; 4254 case UHF_PORT_POWER: 4255 /* XXX power control */ 4256 break; 4257 /* XXX more */ 4258 case UHF_C_PORT_RESET: 4259 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 4260 break; 4261 case UHF_PORT_U1_TIMEOUT: 4262 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 4263 return -1; 4264 } 4265 port = XHCI_PORTPMSC(cp); 4266 v = xhci_op_read_4(sc, port); 4267 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 4268 index, cp, v, 0); 4269 v &= ~XHCI_PM3_U1TO_SET(0xff); 4270 v |= XHCI_PM3_U1TO_SET(optval); 4271 xhci_op_write_4(sc, port, v); 4272 break; 4273 case UHF_PORT_U2_TIMEOUT: 4274 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 4275 return -1; 4276 } 4277 port = XHCI_PORTPMSC(cp); 4278 v = xhci_op_read_4(sc, port); 4279 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 4280 index, cp, v, 0); 4281 v &= ~XHCI_PM3_U2TO_SET(0xff); 4282 v |= XHCI_PM3_U2TO_SET(optval); 4283 xhci_op_write_4(sc, port, v); 4284 break; 4285 default: 4286 return -1; 4287 } 4288 } 4289 break; 4290 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): 4291 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): 4292 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): 4293 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): 4294 break; 4295 default: 4296 /* default from usbroothub */ 4297 return buflen; 4298 } 4299 4300 return totlen; 4301 } 4302 4303 static int 4304 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req, 4305 void *buf, int buflen) 4306 { 4307 struct xhci_softc *sc = XHCI_BUS2SC(bus); 4308 int actlen; 4309 4310 mutex_enter(&sc->sc_rhlock); 4311 actlen = xhci_roothub_ctrl_locked(bus, req, buf, buflen); 4312 mutex_exit(&sc->sc_rhlock); 4313 4314 return actlen; 4315 } 4316 4317 /* root hub interrupt */ 4318 4319 static usbd_status 4320 xhci_root_intr_transfer(struct usbd_xfer *xfer) 4321 { 4322 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4323 4324 /* Pipe isn't running, start first */ 4325 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4326 } 4327 4328 /* Wait for roothub port status/change */ 4329 static usbd_status 4330 xhci_root_intr_start(struct usbd_xfer *xfer) 4331 { 4332 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4333 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4334 4335 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4336 4337 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 4338 4339 if (sc->sc_dying) 4340 return USBD_IOERROR; 4341 4342 KASSERT(sc->sc_intrxfer[bn] == NULL); 4343 sc->sc_intrxfer[bn] = xfer; 4344 xfer->ux_status = USBD_IN_PROGRESS; 4345 4346 return USBD_IN_PROGRESS; 4347 } 4348 4349 static void 4350 xhci_root_intr_abort(struct usbd_xfer *xfer) 4351 { 4352 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4353 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4354 4355 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4356 4357 KASSERT(mutex_owned(&sc->sc_lock)); 4358 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 4359 4360 /* If xfer has already completed, nothing to do here. */ 4361 if (sc->sc_intrxfer[bn] == NULL) 4362 return; 4363 4364 /* 4365 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer. 4366 * Cancel it. 4367 */ 4368 KASSERT(sc->sc_intrxfer[bn] == xfer); 4369 xfer->ux_status = USBD_CANCELLED; 4370 usb_transfer_complete(xfer); 4371 } 4372 4373 static void 4374 xhci_root_intr_close(struct usbd_pipe *pipe) 4375 { 4376 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe); 4377 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer; 4378 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4379 4380 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4381 4382 KASSERT(mutex_owned(&sc->sc_lock)); 4383 4384 /* 4385 * Caller must guarantee the xfer has completed first, by 4386 * closing the pipe only after normal completion or an abort. 4387 */ 4388 KASSERT(sc->sc_intrxfer[bn] == NULL); 4389 } 4390 4391 static void 4392 xhci_root_intr_done(struct usbd_xfer *xfer) 4393 { 4394 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4395 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4396 4397 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4398 4399 KASSERT(mutex_owned(&sc->sc_lock)); 4400 4401 /* Claim the xfer so it doesn't get completed again. */ 4402 KASSERT(sc->sc_intrxfer[bn] == xfer); 4403 KASSERT(xfer->ux_status != USBD_IN_PROGRESS); 4404 sc->sc_intrxfer[bn] = NULL; 4405 } 4406 4407 /* -------------- */ 4408 /* device control */ 4409 4410 static usbd_status 4411 xhci_device_ctrl_transfer(struct usbd_xfer *xfer) 4412 { 4413 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4414 4415 /* Pipe isn't running, start first */ 4416 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4417 } 4418 4419 static usbd_status 4420 xhci_device_ctrl_start(struct usbd_xfer *xfer) 4421 { 4422 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4423 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4424 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4425 struct xhci_ring * const tr = xs->xs_xr[dci]; 4426 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4427 usb_device_request_t * const req = &xfer->ux_request; 4428 const bool isread = usbd_xfer_isread(xfer); 4429 const uint32_t len = UGETW(req->wLength); 4430 usb_dma_t * const dma = &xfer->ux_dmabuf; 4431 uint64_t parameter; 4432 uint32_t status; 4433 uint32_t control; 4434 u_int i; 4435 const bool polling = xhci_polling_p(sc); 4436 4437 XHCIHIST_FUNC(); 4438 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx", 4439 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue), 4440 UGETW(req->wIndex), UGETW(req->wLength)); 4441 4442 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4443 4444 /* we rely on the bottom bits for extra info */ 4445 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %p", xfer); 4446 4447 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0); 4448 4449 if (tr->is_halted) 4450 goto out; 4451 4452 i = 0; 4453 4454 /* setup phase */ 4455 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */ 4456 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req)); 4457 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE : 4458 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) | 4459 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) | 4460 XHCI_TRB_3_IDT_BIT; 4461 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4462 4463 if (len != 0) { 4464 /* data phase */ 4465 parameter = DMAADDR(dma, 0); 4466 KASSERTMSG(len <= 0x10000, "len %d", len); 4467 status = XHCI_TRB_2_IRQ_SET(0) | 4468 XHCI_TRB_2_TDSZ_SET(0) | 4469 XHCI_TRB_2_BYTES_SET(len); 4470 control = (isread ? XHCI_TRB_3_DIR_IN : 0) | 4471 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) | 4472 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4473 XHCI_TRB_3_IOC_BIT; 4474 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4475 4476 usb_syncmem(dma, 0, len, 4477 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4478 } 4479 4480 parameter = 0; 4481 status = XHCI_TRB_2_IRQ_SET(0); 4482 /* the status stage has inverted direction */ 4483 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) | 4484 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) | 4485 XHCI_TRB_3_IOC_BIT; 4486 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4487 4488 if (!polling) 4489 mutex_enter(&tr->xr_lock); 4490 xhci_ring_put_xfer(sc, tr, xx, i); 4491 if (!polling) 4492 mutex_exit(&tr->xr_lock); 4493 4494 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4495 4496 out: if (xfer->ux_status == USBD_NOT_STARTED) { 4497 xfer->ux_status = USBD_IN_PROGRESS; 4498 usbd_xfer_schedule_timeout(xfer); 4499 } else { 4500 /* 4501 * We must be coming from xhci_pipe_restart -- timeout 4502 * already set up, nothing to do. 4503 */ 4504 } 4505 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 4506 4507 return USBD_IN_PROGRESS; 4508 } 4509 4510 static void 4511 xhci_device_ctrl_done(struct usbd_xfer *xfer) 4512 { 4513 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4514 usb_device_request_t *req = &xfer->ux_request; 4515 int len = UGETW(req->wLength); 4516 int rd = req->bmRequestType & UT_READ; 4517 4518 if (len) 4519 usb_syncmem(&xfer->ux_dmabuf, 0, len, 4520 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4521 } 4522 4523 static void 4524 xhci_device_ctrl_abort(struct usbd_xfer *xfer) 4525 { 4526 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4527 4528 usbd_xfer_abort(xfer); 4529 } 4530 4531 static void 4532 xhci_device_ctrl_close(struct usbd_pipe *pipe) 4533 { 4534 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4535 4536 xhci_close_pipe(pipe); 4537 } 4538 4539 /* ------------------ */ 4540 /* device isochronous */ 4541 4542 static usbd_status 4543 xhci_device_isoc_transfer(struct usbd_xfer *xfer) 4544 { 4545 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4546 4547 return xhci_device_isoc_enter(xfer); 4548 } 4549 4550 static usbd_status 4551 xhci_device_isoc_enter(struct usbd_xfer *xfer) 4552 { 4553 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4554 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4555 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4556 struct xhci_ring * const tr = xs->xs_xr[dci]; 4557 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4558 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe; 4559 usb_dma_t * const dma = &xfer->ux_dmabuf; 4560 uint64_t parameter; 4561 uint32_t status; 4562 uint32_t control; 4563 uint32_t offs; 4564 int i, ival; 4565 const bool polling = xhci_polling_p(sc); 4566 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize); 4567 const uint16_t mps = UE_GET_SIZE(MPS); 4568 const uint8_t maxb = xpipe->xp_maxb; 4569 4570 XHCIHIST_FUNC(); 4571 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4572 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4573 4574 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4575 4576 if (sc->sc_dying) 4577 return USBD_IOERROR; 4578 4579 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths); 4580 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4581 4582 const bool isread = usbd_xfer_isread(xfer); 4583 if (xfer->ux_length) 4584 usb_syncmem(dma, 0, xfer->ux_length, 4585 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4586 4587 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval; 4588 if (ival >= 1 && ival <= 16) 4589 ival = 1 << (ival - 1); 4590 else 4591 ival = 1; /* fake something up */ 4592 4593 if (xpipe->xp_isoc_next == -1) { 4594 uint32_t mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX); 4595 4596 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0); 4597 mfindex = XHCI_MFINDEX_GET(mfindex + 1); 4598 mfindex /= USB_UFRAMES_PER_FRAME; 4599 mfindex += 7; /* 7 frames is max possible IST */ 4600 xpipe->xp_isoc_next = roundup2(mfindex, ival); 4601 } 4602 4603 offs = 0; 4604 for (i = 0; i < xfer->ux_nframes; i++) { 4605 const uint32_t len = xfer->ux_frlengths[i]; 4606 const unsigned tdpc = howmany(len, mps); 4607 const unsigned tbc = howmany(tdpc, maxb) - 1; 4608 const unsigned tlbpc1 = tdpc % maxb; 4609 const unsigned tlbpc = tlbpc1 ? tlbpc1 - 1 : maxb - 1; 4610 4611 KASSERTMSG(len <= 0x10000, "len %d", len); 4612 parameter = DMAADDR(dma, offs); 4613 status = XHCI_TRB_2_IRQ_SET(0) | 4614 XHCI_TRB_2_TDSZ_SET(0) | 4615 XHCI_TRB_2_BYTES_SET(len); 4616 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) | 4617 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4618 XHCI_TRB_3_TBC_SET(tbc) | 4619 XHCI_TRB_3_TLBPC_SET(tlbpc) | 4620 XHCI_TRB_3_IOC_BIT; 4621 if (XHCI_HCC_CFC(sc->sc_hcc)) { 4622 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next); 4623 #if 0 4624 } else if (xpipe->xp_isoc_next == -1) { 4625 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next); 4626 #endif 4627 } else { 4628 control |= XHCI_TRB_3_ISO_SIA_BIT; 4629 } 4630 #if 0 4631 if (i != xfer->ux_nframes - 1) 4632 control |= XHCI_TRB_3_BEI_BIT; 4633 #endif 4634 xhci_xfer_put_trb(xx, i, parameter, status, control); 4635 4636 xpipe->xp_isoc_next += ival; 4637 offs += len; 4638 } 4639 4640 xx->xx_isoc_done = 0; 4641 4642 if (!polling) 4643 mutex_enter(&tr->xr_lock); 4644 xhci_ring_put_xfer(sc, tr, xx, i); 4645 if (!polling) 4646 mutex_exit(&tr->xr_lock); 4647 4648 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4649 xfer->ux_status = USBD_IN_PROGRESS; 4650 usbd_xfer_schedule_timeout(xfer); 4651 4652 return USBD_IN_PROGRESS; 4653 } 4654 4655 static void 4656 xhci_device_isoc_abort(struct usbd_xfer *xfer) 4657 { 4658 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4659 4660 usbd_xfer_abort(xfer); 4661 } 4662 4663 static void 4664 xhci_device_isoc_close(struct usbd_pipe *pipe) 4665 { 4666 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4667 4668 xhci_close_pipe(pipe); 4669 } 4670 4671 static void 4672 xhci_device_isoc_done(struct usbd_xfer *xfer) 4673 { 4674 #ifdef USB_DEBUG 4675 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4676 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4677 #endif 4678 const bool isread = usbd_xfer_isread(xfer); 4679 4680 XHCIHIST_FUNC(); 4681 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4682 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4683 4684 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4685 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4686 } 4687 4688 /* ----------- */ 4689 /* device bulk */ 4690 4691 static usbd_status 4692 xhci_device_bulk_transfer(struct usbd_xfer *xfer) 4693 { 4694 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4695 4696 /* Pipe isn't running, so start it first. */ 4697 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4698 } 4699 4700 static usbd_status 4701 xhci_device_bulk_start(struct usbd_xfer *xfer) 4702 { 4703 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4704 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4705 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4706 struct xhci_ring * const tr = xs->xs_xr[dci]; 4707 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4708 const uint32_t len = xfer->ux_length; 4709 usb_dma_t * const dma = &xfer->ux_dmabuf; 4710 uint64_t parameter; 4711 uint32_t status; 4712 uint32_t control; 4713 u_int i = 0; 4714 const bool polling = xhci_polling_p(sc); 4715 4716 XHCIHIST_FUNC(); 4717 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4718 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4719 4720 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4721 4722 if (sc->sc_dying) 4723 return USBD_IOERROR; 4724 4725 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4726 4727 if (tr->is_halted) 4728 goto out; 4729 4730 parameter = DMAADDR(dma, 0); 4731 const bool isread = usbd_xfer_isread(xfer); 4732 if (len) 4733 usb_syncmem(dma, 0, len, 4734 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4735 4736 /* 4737 * XXX: (dsl) The physical buffer must not cross a 64k boundary. 4738 * If the user supplied buffer crosses such a boundary then 2 4739 * (or more) TRB should be used. 4740 * If multiple TRB are used the td_size field must be set correctly. 4741 * For v1.0 devices (like ivy bridge) this is the number of usb data 4742 * blocks needed to complete the transfer. 4743 * Setting it to 1 in the last TRB causes an extra zero-length 4744 * data block be sent. 4745 * The earlier documentation differs, I don't know how it behaves. 4746 */ 4747 KASSERTMSG(len <= 0x10000, "len %d", len); 4748 status = XHCI_TRB_2_IRQ_SET(0) | 4749 XHCI_TRB_2_TDSZ_SET(0) | 4750 XHCI_TRB_2_BYTES_SET(len); 4751 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4752 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4753 XHCI_TRB_3_IOC_BIT; 4754 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4755 4756 if (!polling) 4757 mutex_enter(&tr->xr_lock); 4758 xhci_ring_put_xfer(sc, tr, xx, i); 4759 if (!polling) 4760 mutex_exit(&tr->xr_lock); 4761 4762 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4763 4764 out: if (xfer->ux_status == USBD_NOT_STARTED) { 4765 xfer->ux_status = USBD_IN_PROGRESS; 4766 usbd_xfer_schedule_timeout(xfer); 4767 } else { 4768 /* 4769 * We must be coming from xhci_pipe_restart -- timeout 4770 * already set up, nothing to do. 4771 */ 4772 } 4773 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 4774 4775 return USBD_IN_PROGRESS; 4776 } 4777 4778 static void 4779 xhci_device_bulk_done(struct usbd_xfer *xfer) 4780 { 4781 #ifdef USB_DEBUG 4782 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4783 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4784 #endif 4785 const bool isread = usbd_xfer_isread(xfer); 4786 4787 XHCIHIST_FUNC(); 4788 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4789 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4790 4791 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4792 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4793 } 4794 4795 static void 4796 xhci_device_bulk_abort(struct usbd_xfer *xfer) 4797 { 4798 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4799 4800 usbd_xfer_abort(xfer); 4801 } 4802 4803 static void 4804 xhci_device_bulk_close(struct usbd_pipe *pipe) 4805 { 4806 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4807 4808 xhci_close_pipe(pipe); 4809 } 4810 4811 /* ---------------- */ 4812 /* device interrupt */ 4813 4814 static usbd_status 4815 xhci_device_intr_transfer(struct usbd_xfer *xfer) 4816 { 4817 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4818 4819 /* Pipe isn't running, so start it first. */ 4820 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4821 } 4822 4823 static usbd_status 4824 xhci_device_intr_start(struct usbd_xfer *xfer) 4825 { 4826 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4827 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4828 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4829 struct xhci_ring * const tr = xs->xs_xr[dci]; 4830 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4831 const uint32_t len = xfer->ux_length; 4832 const bool polling = xhci_polling_p(sc); 4833 usb_dma_t * const dma = &xfer->ux_dmabuf; 4834 uint64_t parameter; 4835 uint32_t status; 4836 uint32_t control; 4837 u_int i = 0; 4838 4839 XHCIHIST_FUNC(); 4840 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4841 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4842 4843 KASSERT(polling || mutex_owned(&sc->sc_lock)); 4844 4845 if (sc->sc_dying) 4846 return USBD_IOERROR; 4847 4848 if (tr->is_halted) 4849 goto out; 4850 4851 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4852 4853 const bool isread = usbd_xfer_isread(xfer); 4854 if (len) 4855 usb_syncmem(dma, 0, len, 4856 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4857 4858 parameter = DMAADDR(dma, 0); 4859 KASSERTMSG(len <= 0x10000, "len %d", len); 4860 status = XHCI_TRB_2_IRQ_SET(0) | 4861 XHCI_TRB_2_TDSZ_SET(0) | 4862 XHCI_TRB_2_BYTES_SET(len); 4863 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4864 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT; 4865 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4866 4867 if (!polling) 4868 mutex_enter(&tr->xr_lock); 4869 xhci_ring_put_xfer(sc, tr, xx, i); 4870 if (!polling) 4871 mutex_exit(&tr->xr_lock); 4872 4873 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4874 4875 out: if (xfer->ux_status == USBD_NOT_STARTED) { 4876 xfer->ux_status = USBD_IN_PROGRESS; 4877 usbd_xfer_schedule_timeout(xfer); 4878 } else { 4879 /* 4880 * We must be coming from xhci_pipe_restart -- timeout 4881 * already set up, nothing to do. 4882 */ 4883 } 4884 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 4885 4886 return USBD_IN_PROGRESS; 4887 } 4888 4889 static void 4890 xhci_device_intr_done(struct usbd_xfer *xfer) 4891 { 4892 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4893 #ifdef USB_DEBUG 4894 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4895 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4896 #endif 4897 const bool isread = usbd_xfer_isread(xfer); 4898 4899 XHCIHIST_FUNC(); 4900 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4901 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4902 4903 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 4904 4905 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4906 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4907 } 4908 4909 static void 4910 xhci_device_intr_abort(struct usbd_xfer *xfer) 4911 { 4912 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4913 4914 XHCIHIST_FUNC(); 4915 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0); 4916 4917 KASSERT(mutex_owned(&sc->sc_lock)); 4918 usbd_xfer_abort(xfer); 4919 } 4920 4921 static void 4922 xhci_device_intr_close(struct usbd_pipe *pipe) 4923 { 4924 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 4925 4926 XHCIHIST_FUNC(); 4927 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0); 4928 4929 xhci_close_pipe(pipe); 4930 } 4931