1 /* $NetBSD: xhci.c,v 1.151 2021/12/21 09:51:22 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 2013 Jonathan A. Kollasch 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * USB rev 2.0 and rev 3.1 specification 31 * http://www.usb.org/developers/docs/ 32 * xHCI rev 1.1 specification 33 * http://www.intel.com/technology/usb/spec.htm 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.151 2021/12/21 09:51:22 skrll Exp $"); 38 39 #ifdef _KERNEL_OPT 40 #include "opt_usb.h" 41 #endif 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/kmem.h> 47 #include <sys/device.h> 48 #include <sys/select.h> 49 #include <sys/proc.h> 50 #include <sys/queue.h> 51 #include <sys/mutex.h> 52 #include <sys/condvar.h> 53 #include <sys/bus.h> 54 #include <sys/cpu.h> 55 #include <sys/sysctl.h> 56 57 #include <machine/endian.h> 58 59 #include <dev/usb/usb.h> 60 #include <dev/usb/usbdi.h> 61 #include <dev/usb/usbdivar.h> 62 #include <dev/usb/usbdi_util.h> 63 #include <dev/usb/usbhist.h> 64 #include <dev/usb/usb_mem.h> 65 #include <dev/usb/usb_quirks.h> 66 67 #include <dev/usb/xhcireg.h> 68 #include <dev/usb/xhcivar.h> 69 #include <dev/usb/usbroothub.h> 70 71 72 #ifdef USB_DEBUG 73 #ifndef XHCI_DEBUG 74 #define xhcidebug 0 75 #else /* !XHCI_DEBUG */ 76 #define HEXDUMP(a, b, c) \ 77 do { \ 78 if (xhcidebug > 0) \ 79 hexdump(printf, a, b, c); \ 80 } while (/*CONSTCOND*/0) 81 static int xhcidebug = 0; 82 83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup") 84 { 85 int err; 86 const struct sysctlnode *rnode; 87 const struct sysctlnode *cnode; 88 89 err = sysctl_createv(clog, 0, NULL, &rnode, 90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci", 91 SYSCTL_DESCR("xhci global controls"), 92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 93 94 if (err) 95 goto fail; 96 97 /* control debugging printfs */ 98 err = sysctl_createv(clog, 0, &rnode, &cnode, 99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 100 "debug", SYSCTL_DESCR("Enable debugging output"), 101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL); 102 if (err) 103 goto fail; 104 105 return; 106 fail: 107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err); 108 } 109 110 #endif /* !XHCI_DEBUG */ 111 #endif /* USB_DEBUG */ 112 113 #ifndef HEXDUMP 114 #define HEXDUMP(a, b, c) 115 #endif 116 117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D) 118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D) 119 #define XHCIHIST_FUNC() USBHIST_FUNC() 120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug) 121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \ 122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D) 123 124 #define XHCI_DCI_SLOT 0 125 #define XHCI_DCI_EP_CONTROL 1 126 127 #define XHCI_ICI_INPUT_CONTROL 0 128 129 struct xhci_pipe { 130 struct usbd_pipe xp_pipe; 131 struct usb_task xp_async_task; 132 int16_t xp_isoc_next; /* next frame */ 133 uint8_t xp_maxb; /* max burst */ 134 uint8_t xp_mult; 135 }; 136 137 #define XHCI_COMMAND_RING_TRBS 256 138 #define XHCI_EVENT_RING_TRBS 256 139 #define XHCI_EVENT_RING_SEGMENTS 1 140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT 141 142 static usbd_status xhci_open(struct usbd_pipe *); 143 static void xhci_close_pipe(struct usbd_pipe *); 144 static int xhci_intr1(struct xhci_softc * const); 145 static void xhci_softintr(void *); 146 static void xhci_poll(struct usbd_bus *); 147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int); 148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *); 149 static void xhci_abortx(struct usbd_xfer *); 150 static bool xhci_dying(struct usbd_bus *); 151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **); 152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int, 153 struct usbd_port *); 154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, 155 void *, int); 156 157 static usbd_status xhci_configure_endpoint(struct usbd_pipe *); 158 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *); 159 static usbd_status xhci_reset_endpoint(struct usbd_pipe *); 160 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *, 161 struct xhci_slot *, u_int, uint32_t); 162 static usbd_status xhci_stop_endpoint(struct usbd_pipe *); 163 164 static void xhci_host_dequeue(struct xhci_ring * const); 165 static usbd_status xhci_set_dequeue(struct usbd_pipe *); 166 167 static usbd_status xhci_do_command(struct xhci_softc * const, 168 struct xhci_soft_trb * const, int); 169 static usbd_status xhci_do_command_locked(struct xhci_softc * const, 170 struct xhci_soft_trb * const, int); 171 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t); 172 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *); 173 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool); 174 static usbd_status xhci_enable_slot(struct xhci_softc * const, 175 uint8_t * const); 176 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t); 177 static usbd_status xhci_address_device(struct xhci_softc * const, 178 uint64_t, uint8_t, bool); 179 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int); 180 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const, 181 struct xhci_slot * const, u_int); 182 static usbd_status xhci_ring_init(struct xhci_softc * const, 183 struct xhci_ring **, size_t, size_t); 184 static void xhci_ring_free(struct xhci_softc * const, 185 struct xhci_ring ** const); 186 187 static void xhci_setup_ctx(struct usbd_pipe *); 188 static void xhci_setup_route(struct usbd_pipe *, uint32_t *); 189 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *); 190 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *); 191 static uint32_t xhci_bival2ival(uint32_t, uint32_t); 192 193 static void xhci_noop(struct usbd_pipe *); 194 195 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *); 196 static usbd_status xhci_root_intr_start(struct usbd_xfer *); 197 static void xhci_root_intr_abort(struct usbd_xfer *); 198 static void xhci_root_intr_close(struct usbd_pipe *); 199 static void xhci_root_intr_done(struct usbd_xfer *); 200 201 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *); 202 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *); 203 static void xhci_device_ctrl_abort(struct usbd_xfer *); 204 static void xhci_device_ctrl_close(struct usbd_pipe *); 205 static void xhci_device_ctrl_done(struct usbd_xfer *); 206 207 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *); 208 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *); 209 static void xhci_device_isoc_abort(struct usbd_xfer *); 210 static void xhci_device_isoc_close(struct usbd_pipe *); 211 static void xhci_device_isoc_done(struct usbd_xfer *); 212 213 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *); 214 static usbd_status xhci_device_intr_start(struct usbd_xfer *); 215 static void xhci_device_intr_abort(struct usbd_xfer *); 216 static void xhci_device_intr_close(struct usbd_pipe *); 217 static void xhci_device_intr_done(struct usbd_xfer *); 218 219 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *); 220 static usbd_status xhci_device_bulk_start(struct usbd_xfer *); 221 static void xhci_device_bulk_abort(struct usbd_xfer *); 222 static void xhci_device_bulk_close(struct usbd_pipe *); 223 static void xhci_device_bulk_done(struct usbd_xfer *); 224 225 static const struct usbd_bus_methods xhci_bus_methods = { 226 .ubm_open = xhci_open, 227 .ubm_softint = xhci_softintr, 228 .ubm_dopoll = xhci_poll, 229 .ubm_allocx = xhci_allocx, 230 .ubm_freex = xhci_freex, 231 .ubm_abortx = xhci_abortx, 232 .ubm_dying = xhci_dying, 233 .ubm_getlock = xhci_get_lock, 234 .ubm_newdev = xhci_new_device, 235 .ubm_rhctrl = xhci_roothub_ctrl, 236 }; 237 238 static const struct usbd_pipe_methods xhci_root_intr_methods = { 239 .upm_transfer = xhci_root_intr_transfer, 240 .upm_start = xhci_root_intr_start, 241 .upm_abort = xhci_root_intr_abort, 242 .upm_close = xhci_root_intr_close, 243 .upm_cleartoggle = xhci_noop, 244 .upm_done = xhci_root_intr_done, 245 }; 246 247 248 static const struct usbd_pipe_methods xhci_device_ctrl_methods = { 249 .upm_transfer = xhci_device_ctrl_transfer, 250 .upm_start = xhci_device_ctrl_start, 251 .upm_abort = xhci_device_ctrl_abort, 252 .upm_close = xhci_device_ctrl_close, 253 .upm_cleartoggle = xhci_noop, 254 .upm_done = xhci_device_ctrl_done, 255 }; 256 257 static const struct usbd_pipe_methods xhci_device_isoc_methods = { 258 .upm_transfer = xhci_device_isoc_transfer, 259 .upm_abort = xhci_device_isoc_abort, 260 .upm_close = xhci_device_isoc_close, 261 .upm_cleartoggle = xhci_noop, 262 .upm_done = xhci_device_isoc_done, 263 }; 264 265 static const struct usbd_pipe_methods xhci_device_bulk_methods = { 266 .upm_transfer = xhci_device_bulk_transfer, 267 .upm_start = xhci_device_bulk_start, 268 .upm_abort = xhci_device_bulk_abort, 269 .upm_close = xhci_device_bulk_close, 270 .upm_cleartoggle = xhci_noop, 271 .upm_done = xhci_device_bulk_done, 272 }; 273 274 static const struct usbd_pipe_methods xhci_device_intr_methods = { 275 .upm_transfer = xhci_device_intr_transfer, 276 .upm_start = xhci_device_intr_start, 277 .upm_abort = xhci_device_intr_abort, 278 .upm_close = xhci_device_intr_close, 279 .upm_cleartoggle = xhci_noop, 280 .upm_done = xhci_device_intr_done, 281 }; 282 283 static inline uint32_t 284 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset) 285 { 286 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset); 287 } 288 289 static inline uint32_t 290 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset) 291 { 292 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset); 293 } 294 295 static inline uint32_t 296 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset) 297 { 298 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset); 299 } 300 301 static inline void 302 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset, 303 uint32_t value) 304 { 305 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value); 306 } 307 308 #if 0 /* unused */ 309 static inline void 310 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset, 311 uint32_t value) 312 { 313 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value); 314 } 315 #endif /* unused */ 316 317 static inline void 318 xhci_barrier(const struct xhci_softc * const sc, int flags) 319 { 320 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 0, sc->sc_ios, flags); 321 } 322 323 static inline uint32_t 324 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset) 325 { 326 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset); 327 } 328 329 static inline uint32_t 330 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset) 331 { 332 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 333 } 334 335 static inline void 336 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset, 337 uint32_t value) 338 { 339 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value); 340 } 341 342 static inline uint64_t 343 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset) 344 { 345 uint64_t value; 346 347 #ifdef XHCI_USE_BUS_SPACE_8 348 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset); 349 #else 350 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 351 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh, 352 offset + 4) << 32; 353 #endif 354 355 return value; 356 } 357 358 static inline void 359 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset, 360 uint64_t value) 361 { 362 #ifdef XHCI_USE_BUS_SPACE_8 363 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value); 364 #else 365 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0, 366 (value >> 0) & 0xffffffff); 367 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4, 368 (value >> 32) & 0xffffffff); 369 #endif 370 } 371 372 static inline uint32_t 373 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset) 374 { 375 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 376 } 377 378 static inline void 379 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset, 380 uint32_t value) 381 { 382 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value); 383 } 384 385 static inline uint64_t 386 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset) 387 { 388 uint64_t value; 389 390 #ifdef XHCI_USE_BUS_SPACE_8 391 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset); 392 #else 393 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 394 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh, 395 offset + 4) << 32; 396 #endif 397 398 return value; 399 } 400 401 static inline void 402 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset, 403 uint64_t value) 404 { 405 #ifdef XHCI_USE_BUS_SPACE_8 406 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value); 407 #else 408 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0, 409 (value >> 0) & 0xffffffff); 410 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4, 411 (value >> 32) & 0xffffffff); 412 #endif 413 } 414 415 #if 0 /* unused */ 416 static inline uint32_t 417 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset) 418 { 419 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset); 420 } 421 #endif /* unused */ 422 423 static inline void 424 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset, 425 uint32_t value) 426 { 427 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value); 428 } 429 430 /* --- */ 431 432 static inline uint8_t 433 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed) 434 { 435 u_int eptype = 0; 436 437 switch (UE_GET_XFERTYPE(ed->bmAttributes)) { 438 case UE_CONTROL: 439 eptype = 0x0; 440 break; 441 case UE_ISOCHRONOUS: 442 eptype = 0x1; 443 break; 444 case UE_BULK: 445 eptype = 0x2; 446 break; 447 case UE_INTERRUPT: 448 eptype = 0x3; 449 break; 450 } 451 452 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 453 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 454 return eptype | 0x4; 455 else 456 return eptype; 457 } 458 459 static u_int 460 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed) 461 { 462 /* xHCI 1.0 section 4.5.1 */ 463 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress); 464 u_int in = 0; 465 466 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 467 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 468 in = 1; 469 470 return epaddr * 2 + in; 471 } 472 473 static inline u_int 474 xhci_dci_to_ici(const u_int i) 475 { 476 return i + 1; 477 } 478 479 static inline void * 480 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs, 481 const u_int dci) 482 { 483 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 484 } 485 486 #if 0 /* unused */ 487 static inline bus_addr_t 488 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs, 489 const u_int dci) 490 { 491 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 492 } 493 #endif /* unused */ 494 495 static inline void * 496 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs, 497 const u_int ici) 498 { 499 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 500 } 501 502 static inline bus_addr_t 503 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs, 504 const u_int ici) 505 { 506 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 507 } 508 509 static inline struct xhci_trb * 510 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx) 511 { 512 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 513 } 514 515 static inline bus_addr_t 516 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx) 517 { 518 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 519 } 520 521 static inline void 522 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx, 523 uint64_t parameter, uint32_t status, uint32_t control) 524 { 525 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb); 526 xx->xx_trb[idx].trb_0 = parameter; 527 xx->xx_trb[idx].trb_2 = status; 528 xx->xx_trb[idx].trb_3 = control; 529 } 530 531 static inline void 532 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status, 533 uint32_t control) 534 { 535 trb->trb_0 = htole64(parameter); 536 trb->trb_2 = htole32(status); 537 trb->trb_3 = htole32(control); 538 } 539 540 static int 541 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx) 542 { 543 /* base address of TRBs */ 544 bus_addr_t trbp = xhci_ring_trbp(xr, 0); 545 546 /* trb_0 range sanity check */ 547 if (trb_0 == 0 || trb_0 < trbp || 548 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 || 549 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) { 550 return 1; 551 } 552 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb); 553 return 0; 554 } 555 556 static unsigned int 557 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs, 558 u_int dci) 559 { 560 uint32_t *cp; 561 562 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 563 cp = xhci_slot_get_dcv(sc, xs, dci); 564 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0])); 565 } 566 567 static inline unsigned int 568 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport) 569 { 570 const unsigned int port = ctlrport - 1; 571 const uint8_t bit = __BIT(port % NBBY); 572 573 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit); 574 } 575 576 /* 577 * Return the roothub port for a controller port. Both are 1..n. 578 */ 579 static inline unsigned int 580 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport) 581 { 582 583 return sc->sc_ctlrportmap[ctrlport - 1]; 584 } 585 586 /* 587 * Return the controller port for a bus roothub port. Both are 1..n. 588 */ 589 static inline unsigned int 590 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn, 591 unsigned int rhport) 592 { 593 594 return sc->sc_rhportmap[bn][rhport - 1]; 595 } 596 597 /* --- */ 598 599 void 600 xhci_childdet(device_t self, device_t child) 601 { 602 struct xhci_softc * const sc = device_private(self); 603 604 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child)); 605 if (child == sc->sc_child2) 606 sc->sc_child2 = NULL; 607 else if (child == sc->sc_child) 608 sc->sc_child = NULL; 609 } 610 611 int 612 xhci_detach(struct xhci_softc *sc, int flags) 613 { 614 int rv = 0; 615 616 if (sc->sc_child2 != NULL) { 617 rv = config_detach(sc->sc_child2, flags); 618 if (rv != 0) 619 return rv; 620 KASSERT(sc->sc_child2 == NULL); 621 } 622 623 if (sc->sc_child != NULL) { 624 rv = config_detach(sc->sc_child, flags); 625 if (rv != 0) 626 return rv; 627 KASSERT(sc->sc_child == NULL); 628 } 629 630 /* XXX unconfigure/free slots */ 631 632 /* verify: */ 633 xhci_rt_write_4(sc, XHCI_IMAN(0), 0); 634 xhci_op_write_4(sc, XHCI_USBCMD, 0); 635 /* do we need to wait for stop? */ 636 637 xhci_op_write_8(sc, XHCI_CRCR, 0); 638 xhci_ring_free(sc, &sc->sc_cr); 639 cv_destroy(&sc->sc_command_cv); 640 cv_destroy(&sc->sc_cmdbusy_cv); 641 642 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0); 643 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0); 644 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY); 645 xhci_ring_free(sc, &sc->sc_er); 646 647 usb_freemem(&sc->sc_eventst_dma); 648 649 xhci_op_write_8(sc, XHCI_DCBAAP, 0); 650 usb_freemem(&sc->sc_dcbaa_dma); 651 652 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots); 653 654 kmem_free(sc->sc_ctlrportbus, 655 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY)); 656 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int)); 657 658 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 659 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int)); 660 } 661 662 mutex_destroy(&sc->sc_lock); 663 mutex_destroy(&sc->sc_intr_lock); 664 665 pool_cache_destroy(sc->sc_xferpool); 666 667 return rv; 668 } 669 670 int 671 xhci_activate(device_t self, enum devact act) 672 { 673 struct xhci_softc * const sc = device_private(self); 674 675 switch (act) { 676 case DVACT_DEACTIVATE: 677 sc->sc_dying = true; 678 return 0; 679 default: 680 return EOPNOTSUPP; 681 } 682 } 683 684 bool 685 xhci_suspend(device_t self, const pmf_qual_t *qual) 686 { 687 struct xhci_softc * const sc = device_private(self); 688 size_t i, j, bn, dci; 689 int port; 690 uint32_t v; 691 usbd_status err; 692 bool ok = false; 693 694 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 695 696 mutex_enter(&sc->sc_lock); 697 698 /* 699 * Block issuance of new commands, and wait for all pending 700 * commands to complete. 701 */ 702 KASSERT(sc->sc_suspender == NULL); 703 sc->sc_suspender = curlwp; 704 while (sc->sc_command_addr != 0) 705 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock); 706 707 /* 708 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2: 709 * xHCI Power Management, p. 342 710 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342 711 */ 712 713 /* 714 * `1. Stop all USB activity by issuing Stop Endpoint Commands 715 * for Busy endpoints in the Running state. If the Force 716 * Save Context Capability (FSC = ``0'') is not supported, 717 * then Stop Endpoint Commands shall be issued for all idle 718 * endpoints in the Running state as well. The Stop 719 * Endpoint Command causes the xHC to update the respective 720 * Endpoint or Stream Contexts in system memory, e.g. the 721 * TR Dequeue Pointer, DCS, etc. fields. Refer to 722 * Implementation Note "0".' 723 */ 724 for (i = 0; i < sc->sc_maxslots; i++) { 725 struct xhci_slot *xs = &sc->sc_slots[i]; 726 727 /* Skip if the slot is not in use. */ 728 if (xs->xs_idx == 0) 729 continue; 730 731 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 732 /* Skip if the endpoint is not Running. */ 733 /* XXX What about Busy? */ 734 if (xhci_get_epstate(sc, xs, dci) != 735 XHCI_EPSTATE_RUNNING) 736 continue; 737 738 /* Stop endpoint. */ 739 err = xhci_stop_endpoint_cmd(sc, xs, dci, 740 XHCI_TRB_3_SUSP_EP_BIT); 741 if (err) { 742 device_printf(self, "failed to stop endpoint" 743 " slot %zu dci %zu err %d\n", 744 i, dci, err); 745 goto out; 746 } 747 } 748 } 749 750 /* 751 * Next, suspend all the ports: 752 * 753 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15: 754 * Suspend-Resume, pp. 276-283 755 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276 756 */ 757 for (bn = 0; bn < 2; bn++) { 758 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) { 759 /* 4.15.1: Port Suspend. */ 760 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i)); 761 762 /* 763 * `System software places individual ports 764 * into suspend mode by writing a ``3'' into 765 * the appropriate PORTSC register Port Link 766 * State (PLS) field (refer to Section 5.4.8). 767 * Software should only set the PLS field to 768 * ``3'' when the port is in the Enabled 769 * state.' 770 * 771 * `Software should not attempt to suspend a 772 * port unless the port reports that it is in 773 * the enabled (PED = ``1''; PLS < ``3'') 774 * state (refer to Section 5.4.8 for more 775 * information about PED and PLS).' 776 */ 777 v = xhci_op_read_4(sc, port); 778 if (((v & XHCI_PS_PED) == 0) || 779 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3) 780 continue; 781 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 782 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3); 783 xhci_op_write_4(sc, port, v); 784 785 /* 786 * `When the PLS field is written with U3 787 * (``3''), the status of the PLS bit will not 788 * change to the target U state U3 until the 789 * suspend signaling has completed to the 790 * attached device (which may be as long as 791 * 10ms.).' 792 * 793 * `Software is required to wait for U3 794 * transitions to complete before it puts the 795 * xHC into a low power state, and before 796 * resuming the port.' 797 * 798 * XXX Take advantage of the technique to 799 * reduce polling on host controllers that 800 * support the U3C capability. 801 */ 802 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) { 803 v = xhci_op_read_4(sc, port); 804 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3) 805 break; 806 usb_delay_ms(&sc->sc_bus, 1); 807 } 808 if (j == XHCI_WAIT_PLS_U3) { 809 device_printf(self, 810 "suspend timeout on bus %zu port %zu\n", 811 bn, i); 812 goto out; 813 } 814 } 815 } 816 817 /* 818 * `2. Ensure that the Command Ring is in the Stopped state 819 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is 820 * empty), and all Command Completion Events associated 821 * with them have been received.' 822 * 823 * XXX 824 */ 825 826 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */ 827 xhci_op_write_4(sc, XHCI_USBCMD, 828 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS); 829 830 /* 831 * `4. Read the Operational Runtime, and VTIO registers in the 832 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ, 833 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their 834 * state.' 835 * 836 * (We don't use VTIO here (XXX for now?).) 837 */ 838 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD); 839 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL); 840 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP); 841 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG); 842 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0)); 843 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0)); 844 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0)); 845 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0)); 846 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0)); 847 848 /* 849 * `5. Set the Controller Save State (CSS) flag in the USBCMD 850 * register (5.4.1)...' 851 */ 852 xhci_op_write_4(sc, XHCI_USBCMD, 853 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS); 854 855 /* 856 * `...and wait for the Save State Status (SSS) flag in the 857 * USBSTS register (5.4.2) to transition to ``0''.' 858 */ 859 for (i = 0; i < XHCI_WAIT_SSS; i++) { 860 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0) 861 break; 862 usb_delay_ms(&sc->sc_bus, 1); 863 } 864 if (i >= XHCI_WAIT_SSS) { 865 device_printf(self, "suspend timeout, USBSTS.SSS\n"); 866 /* 867 * Just optimistically go on and check SRE anyway -- 868 * what's the worst that could happen? 869 */ 870 } 871 872 /* 873 * `Note: After a Save or Restore operation completes, the 874 * Save/Restore Error (SRE) flag in the USBSTS register should 875 * be checked to ensure that the operation completed 876 * successfully.' 877 */ 878 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) { 879 device_printf(self, "suspend error, USBSTS.SRE\n"); 880 goto out; 881 } 882 883 /* Success! */ 884 ok = true; 885 886 out: mutex_exit(&sc->sc_lock); 887 return ok; 888 } 889 890 bool 891 xhci_resume(device_t self, const pmf_qual_t *qual) 892 { 893 struct xhci_softc * const sc = device_private(self); 894 size_t i, j, bn, dci; 895 int port; 896 uint32_t v; 897 bool ok = false; 898 899 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 900 901 mutex_enter(&sc->sc_lock); 902 KASSERT(sc->sc_suspender); 903 904 /* 905 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2: 906 * xHCI Power Management, p. 343 907 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343 908 */ 909 910 /* 911 * `4. Restore the Operational Runtime, and VTIO registers with 912 * their previously saved state in the following order: 913 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN, 914 * IMOD, and VTIO.' 915 * 916 * (We don't use VTIO here (for now?).) 917 */ 918 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd); 919 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl); 920 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap); 921 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config); 922 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0); 923 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0); 924 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0); 925 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0); 926 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0); 927 928 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */ 929 930 /* 931 * `5. Set the Controller Restore State (CRS) flag in the 932 * USBCMD register (5.4.1) to ``1''...' 933 */ 934 xhci_op_write_4(sc, XHCI_USBCMD, 935 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS); 936 937 /* 938 * `...and wait for the Restore State Status (RSS) in the 939 * USBSTS register (5.4.2) to transition to ``0''.' 940 */ 941 for (i = 0; i < XHCI_WAIT_RSS; i++) { 942 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0) 943 break; 944 usb_delay_ms(&sc->sc_bus, 1); 945 } 946 if (i >= XHCI_WAIT_RSS) { 947 device_printf(self, "suspend timeout, USBSTS.RSS\n"); 948 goto out; 949 } 950 951 /* 952 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits 953 * are consistent with the RCS values to be written to the 954 * CRCR.' 955 * 956 * XXX Hope just zeroing it is good enough! 957 */ 958 xhci_host_dequeue(sc->sc_cr); 959 960 /* 961 * `7. Write the CRCR with the address and RCS value of the 962 * reinitialized Command Ring. Note that this write will 963 * cause the Command Ring to restart at the address 964 * specified by the CRCR.' 965 */ 966 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) | 967 sc->sc_cr->xr_cs); 968 969 /* 970 * `8. Enable the controller by setting Run/Stop (R/S) = 971 * ``1''.' 972 */ 973 xhci_op_write_4(sc, XHCI_USBCMD, 974 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS); 975 976 /* 977 * `9. Software shall walk the USB topology and initialize each 978 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and 979 * external hub ports attached to USB devices.' 980 * 981 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2 982 * `Port Resume', 4.15.2.1 `Host Initiated'. 983 * 984 * XXX We should maybe batch up initiating the state 985 * transitions, and then wait for them to complete all at once. 986 */ 987 for (bn = 0; bn < 2; bn++) { 988 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) { 989 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i)); 990 991 /* `When a port is in the U3 state: ...' */ 992 v = xhci_op_read_4(sc, port); 993 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3) 994 continue; 995 996 /* 997 * `For a USB2 protocol port, software shall 998 * write a ``15'' (Resume) to the PLS field to 999 * initiate resume signaling. The port shall 1000 * transition to the Resume substate and the 1001 * xHC shall transmit the resume signaling 1002 * within 1ms (T_URSM). Software shall ensure 1003 * that resume is signaled for at least 20ms 1004 * (T_DRSMDN). Software shall start timing 1005 * T_DRSMDN from the write of ``15'' (Resume) 1006 * to PLS.' 1007 */ 1008 if (bn == 1) { 1009 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0); 1010 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 1011 v |= XHCI_PS_LWS; 1012 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME); 1013 xhci_op_write_4(sc, port, v); 1014 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT); 1015 } else { 1016 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0); 1017 } 1018 1019 /* 1020 * `For a USB3 protocol port [and a USB2 1021 * protocol port after transitioning to 1022 * Resume], software shall write a ``0'' (U0) 1023 * to the PLS field...' 1024 */ 1025 v = xhci_op_read_4(sc, port); 1026 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 1027 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0); 1028 xhci_op_write_4(sc, port, v); 1029 1030 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) { 1031 v = xhci_op_read_4(sc, port); 1032 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0) 1033 break; 1034 usb_delay_ms(&sc->sc_bus, 1); 1035 } 1036 if (j == XHCI_WAIT_PLS_U0) { 1037 device_printf(self, 1038 "resume timeout on bus %zu port %zu\n", 1039 bn, i); 1040 goto out; 1041 } 1042 } 1043 } 1044 1045 /* 1046 * `10. Restart each of the previously Running endpoints by 1047 * ringing their doorbells.' 1048 */ 1049 for (i = 0; i < sc->sc_maxslots; i++) { 1050 struct xhci_slot *xs = &sc->sc_slots[i]; 1051 1052 /* Skip if the slot is not in use. */ 1053 if (xs->xs_idx == 0) 1054 continue; 1055 1056 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 1057 /* Skip if the endpoint is not Running. */ 1058 if (xhci_get_epstate(sc, xs, dci) != 1059 XHCI_EPSTATE_RUNNING) 1060 continue; 1061 1062 /* Ring the doorbell. */ 1063 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 1064 } 1065 } 1066 1067 /* 1068 * `Note: After a Save or Restore operation completes, the 1069 * Save/Restore Error (SRE) flag in the USBSTS register should 1070 * be checked to ensure that the operation completed 1071 * successfully.' 1072 */ 1073 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) { 1074 device_printf(self, "resume error, USBSTS.SRE\n"); 1075 goto out; 1076 } 1077 1078 /* Resume command issuance. */ 1079 sc->sc_suspender = NULL; 1080 cv_broadcast(&sc->sc_cmdbusy_cv); 1081 1082 /* Success! */ 1083 ok = true; 1084 1085 out: mutex_exit(&sc->sc_lock); 1086 return ok; 1087 } 1088 1089 bool 1090 xhci_shutdown(device_t self, int flags) 1091 { 1092 return false; 1093 } 1094 1095 static int 1096 xhci_hc_reset(struct xhci_softc * const sc) 1097 { 1098 uint32_t usbcmd, usbsts; 1099 int i; 1100 1101 /* Check controller not ready */ 1102 for (i = 0; i < XHCI_WAIT_CNR; i++) { 1103 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1104 if ((usbsts & XHCI_STS_CNR) == 0) 1105 break; 1106 usb_delay_ms(&sc->sc_bus, 1); 1107 } 1108 if (i >= XHCI_WAIT_CNR) { 1109 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n"); 1110 return EIO; 1111 } 1112 1113 /* Halt controller */ 1114 usbcmd = 0; 1115 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 1116 usb_delay_ms(&sc->sc_bus, 1); 1117 1118 /* Reset controller */ 1119 usbcmd = XHCI_CMD_HCRST; 1120 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 1121 for (i = 0; i < XHCI_WAIT_HCRST; i++) { 1122 /* 1123 * Wait 1ms first. Existing Intel xHCI requires 1ms delay to 1124 * prevent system hang (Errata). 1125 */ 1126 usb_delay_ms(&sc->sc_bus, 1); 1127 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD); 1128 if ((usbcmd & XHCI_CMD_HCRST) == 0) 1129 break; 1130 } 1131 if (i >= XHCI_WAIT_HCRST) { 1132 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n"); 1133 return EIO; 1134 } 1135 1136 /* Check controller not ready */ 1137 for (i = 0; i < XHCI_WAIT_CNR; i++) { 1138 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1139 if ((usbsts & XHCI_STS_CNR) == 0) 1140 break; 1141 usb_delay_ms(&sc->sc_bus, 1); 1142 } 1143 if (i >= XHCI_WAIT_CNR) { 1144 aprint_error_dev(sc->sc_dev, 1145 "controller not ready timeout after reset\n"); 1146 return EIO; 1147 } 1148 1149 return 0; 1150 } 1151 1152 /* 7.2 xHCI Support Protocol Capability */ 1153 static void 1154 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp) 1155 { 1156 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1157 1158 /* XXX Cache this lot */ 1159 1160 const uint32_t w0 = xhci_read_4(sc, ecp); 1161 const uint32_t w4 = xhci_read_4(sc, ecp + 4); 1162 const uint32_t w8 = xhci_read_4(sc, ecp + 8); 1163 const uint32_t wc = xhci_read_4(sc, ecp + 0xc); 1164 1165 aprint_debug_dev(sc->sc_dev, 1166 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc); 1167 1168 if (w4 != XHCI_XECP_USBID) 1169 return; 1170 1171 const int major = XHCI_XECP_SP_W0_MAJOR(w0); 1172 const int minor = XHCI_XECP_SP_W0_MINOR(w0); 1173 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8); 1174 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8); 1175 1176 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16)); 1177 switch (mm) { 1178 case 0x0200: 1179 case 0x0300: 1180 case 0x0301: 1181 case 0x0310: 1182 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n", 1183 major == 3 ? "ss" : "hs", cpo, cpo + cpc -1); 1184 break; 1185 default: 1186 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n", 1187 major, minor); 1188 return; 1189 } 1190 1191 const size_t bus = (major == 3) ? 0 : 1; 1192 1193 /* Index arrays with 0..n-1 where ports are numbered 1..n */ 1194 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) { 1195 if (sc->sc_ctlrportmap[cp] != 0) { 1196 aprint_error_dev(sc->sc_dev, "controller port %zu " 1197 "already assigned", cp); 1198 continue; 1199 } 1200 1201 sc->sc_ctlrportbus[cp / NBBY] |= 1202 bus == 0 ? 0 : __BIT(cp % NBBY); 1203 1204 const size_t rhp = sc->sc_rhportcount[bus]++; 1205 1206 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0, 1207 "bus %zu rhp %zu is %d", bus, rhp, 1208 sc->sc_rhportmap[bus][rhp]); 1209 1210 sc->sc_rhportmap[bus][rhp] = cp + 1; 1211 sc->sc_ctlrportmap[cp] = rhp + 1; 1212 } 1213 } 1214 1215 /* Process extended capabilities */ 1216 static void 1217 xhci_ecp(struct xhci_softc *sc) 1218 { 1219 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1220 1221 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4; 1222 while (ecp != 0) { 1223 uint32_t ecr = xhci_read_4(sc, ecp); 1224 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr); 1225 switch (XHCI_XECP_ID(ecr)) { 1226 case XHCI_ID_PROTOCOLS: { 1227 xhci_id_protocols(sc, ecp); 1228 break; 1229 } 1230 case XHCI_ID_USB_LEGACY: { 1231 uint8_t bios_sem; 1232 1233 /* Take host controller ownership from BIOS */ 1234 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM); 1235 if (bios_sem) { 1236 /* sets xHCI to be owned by OS */ 1237 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1); 1238 aprint_debug_dev(sc->sc_dev, 1239 "waiting for BIOS to give up control\n"); 1240 for (int i = 0; i < 5000; i++) { 1241 bios_sem = xhci_read_1(sc, ecp + 1242 XHCI_XECP_BIOS_SEM); 1243 if (bios_sem == 0) 1244 break; 1245 DELAY(1000); 1246 } 1247 if (bios_sem) { 1248 aprint_error_dev(sc->sc_dev, 1249 "timed out waiting for BIOS\n"); 1250 } 1251 } 1252 break; 1253 } 1254 default: 1255 break; 1256 } 1257 ecr = xhci_read_4(sc, ecp); 1258 if (XHCI_XECP_NEXT(ecr) == 0) { 1259 ecp = 0; 1260 } else { 1261 ecp += XHCI_XECP_NEXT(ecr) * 4; 1262 } 1263 } 1264 } 1265 1266 #define XHCI_HCCPREV1_BITS \ 1267 "\177\020" /* New bitmask */ \ 1268 "f\020\020XECP\0" \ 1269 "f\014\4MAXPSA\0" \ 1270 "b\013CFC\0" \ 1271 "b\012SEC\0" \ 1272 "b\011SBD\0" \ 1273 "b\010FSE\0" \ 1274 "b\7NSS\0" \ 1275 "b\6LTC\0" \ 1276 "b\5LHRC\0" \ 1277 "b\4PIND\0" \ 1278 "b\3PPC\0" \ 1279 "b\2CZC\0" \ 1280 "b\1BNC\0" \ 1281 "b\0AC64\0" \ 1282 "\0" 1283 #define XHCI_HCCV1_x_BITS \ 1284 "\177\020" /* New bitmask */ \ 1285 "f\020\020XECP\0" \ 1286 "f\014\4MAXPSA\0" \ 1287 "b\013CFC\0" \ 1288 "b\012SEC\0" \ 1289 "b\011SPC\0" \ 1290 "b\010PAE\0" \ 1291 "b\7NSS\0" \ 1292 "b\6LTC\0" \ 1293 "b\5LHRC\0" \ 1294 "b\4PIND\0" \ 1295 "b\3PPC\0" \ 1296 "b\2CSZ\0" \ 1297 "b\1BNC\0" \ 1298 "b\0AC64\0" \ 1299 "\0" 1300 1301 #define XHCI_HCC2_BITS \ 1302 "\177\020" /* New bitmask */ \ 1303 "b\7ETC_TSC\0" \ 1304 "b\6ETC\0" \ 1305 "b\5CIC\0" \ 1306 "b\4LEC\0" \ 1307 "b\3CTC\0" \ 1308 "b\2FSC\0" \ 1309 "b\1CMC\0" \ 1310 "b\0U3C\0" \ 1311 "\0" 1312 1313 void 1314 xhci_start(struct xhci_softc *sc) 1315 { 1316 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA); 1317 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0) 1318 /* Intel xhci needs interrupt rate moderated. */ 1319 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP); 1320 else 1321 xhci_rt_write_4(sc, XHCI_IMOD(0), 0); 1322 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n", 1323 xhci_rt_read_4(sc, XHCI_IMOD(0))); 1324 1325 /* Go! */ 1326 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS); 1327 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n", 1328 xhci_op_read_4(sc, XHCI_USBCMD)); 1329 } 1330 1331 int 1332 xhci_init(struct xhci_softc *sc) 1333 { 1334 bus_size_t bsz; 1335 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff; 1336 uint32_t pagesize, config; 1337 int i = 0; 1338 uint16_t hciversion; 1339 uint8_t caplength; 1340 1341 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1342 1343 /* Set up the bus struct for the usb 3 and usb 2 buses */ 1344 sc->sc_bus.ub_methods = &xhci_bus_methods; 1345 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe); 1346 sc->sc_bus.ub_usedma = true; 1347 sc->sc_bus.ub_hcpriv = sc; 1348 1349 sc->sc_bus2.ub_methods = &xhci_bus_methods; 1350 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe); 1351 sc->sc_bus2.ub_revision = USBREV_2_0; 1352 sc->sc_bus2.ub_usedma = true; 1353 sc->sc_bus2.ub_hcpriv = sc; 1354 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag; 1355 1356 caplength = xhci_read_1(sc, XHCI_CAPLENGTH); 1357 hciversion = xhci_read_2(sc, XHCI_HCIVERSION); 1358 1359 if (hciversion < XHCI_HCIVERSION_0_96 || 1360 hciversion >= 0x0200) { 1361 aprint_normal_dev(sc->sc_dev, 1362 "xHCI version %x.%x not known to be supported\n", 1363 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 1364 } else { 1365 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n", 1366 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 1367 } 1368 1369 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength, 1370 &sc->sc_cbh) != 0) { 1371 aprint_error_dev(sc->sc_dev, "capability subregion failure\n"); 1372 return ENOMEM; 1373 } 1374 1375 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1); 1376 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1); 1377 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1); 1378 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1); 1379 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2); 1380 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3); 1381 aprint_debug_dev(sc->sc_dev, 1382 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3); 1383 1384 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS); 1385 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32; 1386 1387 char sbuf[128]; 1388 if (hciversion < XHCI_HCIVERSION_1_0) 1389 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc); 1390 else 1391 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc); 1392 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf); 1393 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n", 1394 XHCI_HCC_XECP(sc->sc_hcc) * 4); 1395 if (hciversion >= XHCI_HCIVERSION_1_1) { 1396 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2); 1397 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2); 1398 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf); 1399 } 1400 1401 /* default all ports to bus 0, i.e. usb 3 */ 1402 sc->sc_ctlrportbus = kmem_zalloc( 1403 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP); 1404 sc->sc_ctlrportmap = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1405 1406 /* controller port to bus roothub port map */ 1407 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 1408 sc->sc_rhportmap[j] = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1409 } 1410 1411 /* 1412 * Process all Extended Capabilities 1413 */ 1414 xhci_ecp(sc); 1415 1416 bsz = XHCI_PORTSC(sc->sc_maxports); 1417 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz, 1418 &sc->sc_obh) != 0) { 1419 aprint_error_dev(sc->sc_dev, "operational subregion failure\n"); 1420 return ENOMEM; 1421 } 1422 1423 dboff = xhci_cap_read_4(sc, XHCI_DBOFF); 1424 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff, 1425 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) { 1426 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n"); 1427 return ENOMEM; 1428 } 1429 1430 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF); 1431 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff, 1432 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) { 1433 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n"); 1434 return ENOMEM; 1435 } 1436 1437 int rv; 1438 rv = xhci_hc_reset(sc); 1439 if (rv != 0) { 1440 return rv; 1441 } 1442 1443 if (sc->sc_vendor_init) 1444 sc->sc_vendor_init(sc); 1445 1446 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE); 1447 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize); 1448 pagesize = ffs(pagesize); 1449 if (pagesize == 0) { 1450 aprint_error_dev(sc->sc_dev, "pagesize is 0\n"); 1451 return EIO; 1452 } 1453 sc->sc_pgsz = 1 << (12 + (pagesize - 1)); 1454 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz); 1455 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n", 1456 (uint32_t)sc->sc_maxslots); 1457 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports); 1458 1459 int err; 1460 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2); 1461 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf); 1462 if (sc->sc_maxspbuf != 0) { 1463 err = usb_allocmem(sc->sc_bus.ub_dmatag, 1464 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t), 1465 USBMALLOC_COHERENT | USBMALLOC_ZERO, 1466 &sc->sc_spbufarray_dma); 1467 if (err) { 1468 aprint_error_dev(sc->sc_dev, 1469 "spbufarray init fail, err %d\n", err); 1470 return ENOMEM; 1471 } 1472 1473 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) * 1474 sc->sc_maxspbuf, KM_SLEEP); 1475 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0); 1476 for (i = 0; i < sc->sc_maxspbuf; i++) { 1477 usb_dma_t * const dma = &sc->sc_spbuf_dma[i]; 1478 /* allocate contexts */ 1479 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, 1480 sc->sc_pgsz, USBMALLOC_COHERENT | USBMALLOC_ZERO, 1481 dma); 1482 if (err) { 1483 aprint_error_dev(sc->sc_dev, 1484 "spbufarray_dma init fail, err %d\n", err); 1485 rv = ENOMEM; 1486 goto bad1; 1487 } 1488 spbufarray[i] = htole64(DMAADDR(dma, 0)); 1489 usb_syncmem(dma, 0, sc->sc_pgsz, 1490 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1491 } 1492 1493 usb_syncmem(&sc->sc_spbufarray_dma, 0, 1494 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE); 1495 } 1496 1497 config = xhci_op_read_4(sc, XHCI_CONFIG); 1498 config &= ~0xFF; 1499 config |= sc->sc_maxslots & 0xFF; 1500 xhci_op_write_4(sc, XHCI_CONFIG, config); 1501 1502 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS, 1503 XHCI_COMMAND_RING_SEGMENTS_ALIGN); 1504 if (err) { 1505 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n", 1506 err); 1507 rv = ENOMEM; 1508 goto bad1; 1509 } 1510 1511 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS, 1512 XHCI_EVENT_RING_SEGMENTS_ALIGN); 1513 if (err) { 1514 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n", 1515 err); 1516 rv = ENOMEM; 1517 goto bad2; 1518 } 1519 1520 usb_dma_t *dma; 1521 size_t size; 1522 size_t align; 1523 1524 dma = &sc->sc_eventst_dma; 1525 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE, 1526 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN); 1527 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size); 1528 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN; 1529 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 1530 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma); 1531 if (err) { 1532 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n", 1533 err); 1534 rv = ENOMEM; 1535 goto bad3; 1536 } 1537 1538 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n", 1539 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0), 1540 KERNADDR(&sc->sc_eventst_dma, 0), 1541 sc->sc_eventst_dma.udma_block->size); 1542 1543 dma = &sc->sc_dcbaa_dma; 1544 size = (1 + sc->sc_maxslots) * sizeof(uint64_t); 1545 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size); 1546 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN; 1547 err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 1548 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma); 1549 if (err) { 1550 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err); 1551 rv = ENOMEM; 1552 goto bad4; 1553 } 1554 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n", 1555 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0), 1556 KERNADDR(&sc->sc_dcbaa_dma, 0), 1557 sc->sc_dcbaa_dma.udma_block->size); 1558 1559 if (sc->sc_maxspbuf != 0) { 1560 /* 1561 * DCBA entry 0 hold the scratchbuf array pointer. 1562 */ 1563 *(uint64_t *)KERNADDR(dma, 0) = 1564 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0)); 1565 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE); 1566 } 1567 1568 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots, 1569 KM_SLEEP); 1570 if (sc->sc_slots == NULL) { 1571 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err); 1572 rv = ENOMEM; 1573 goto bad; 1574 } 1575 1576 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0, 1577 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL); 1578 if (sc->sc_xferpool == NULL) { 1579 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n", 1580 err); 1581 rv = ENOMEM; 1582 goto bad; 1583 } 1584 1585 cv_init(&sc->sc_command_cv, "xhcicmd"); 1586 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq"); 1587 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 1588 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB); 1589 1590 struct xhci_erste *erst; 1591 erst = KERNADDR(&sc->sc_eventst_dma, 0); 1592 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0)); 1593 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb); 1594 erst[0].erste_3 = htole32(0); 1595 usb_syncmem(&sc->sc_eventst_dma, 0, 1596 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE); 1597 1598 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS); 1599 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0)); 1600 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) | 1601 XHCI_ERDP_BUSY); 1602 1603 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0)); 1604 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) | 1605 sc->sc_cr->xr_cs); 1606 1607 xhci_barrier(sc, BUS_SPACE_BARRIER_WRITE); 1608 1609 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0), 1610 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS); 1611 1612 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0) 1613 xhci_start(sc); 1614 1615 return 0; 1616 1617 bad: 1618 if (sc->sc_xferpool) { 1619 pool_cache_destroy(sc->sc_xferpool); 1620 sc->sc_xferpool = NULL; 1621 } 1622 1623 if (sc->sc_slots) { 1624 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * 1625 sc->sc_maxslots); 1626 sc->sc_slots = NULL; 1627 } 1628 1629 usb_freemem(&sc->sc_dcbaa_dma); 1630 bad4: 1631 usb_freemem(&sc->sc_eventst_dma); 1632 bad3: 1633 xhci_ring_free(sc, &sc->sc_er); 1634 bad2: 1635 xhci_ring_free(sc, &sc->sc_cr); 1636 i = sc->sc_maxspbuf; 1637 bad1: 1638 for (int j = 0; j < i; j++) 1639 usb_freemem(&sc->sc_spbuf_dma[j]); 1640 usb_freemem(&sc->sc_spbufarray_dma); 1641 1642 return rv; 1643 } 1644 1645 static inline bool 1646 xhci_polling_p(struct xhci_softc * const sc) 1647 { 1648 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling; 1649 } 1650 1651 int 1652 xhci_intr(void *v) 1653 { 1654 struct xhci_softc * const sc = v; 1655 int ret = 0; 1656 1657 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1658 1659 if (sc == NULL) 1660 return 0; 1661 1662 mutex_spin_enter(&sc->sc_intr_lock); 1663 1664 if (sc->sc_dying || !device_has_power(sc->sc_dev)) 1665 goto done; 1666 1667 /* If we get an interrupt while polling, then just ignore it. */ 1668 if (xhci_polling_p(sc)) { 1669 #ifdef DIAGNOSTIC 1670 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0); 1671 #endif 1672 goto done; 1673 } 1674 1675 ret = xhci_intr1(sc); 1676 if (ret) { 1677 KASSERT(sc->sc_child || sc->sc_child2); 1678 1679 /* 1680 * One of child busses could be already detached. It doesn't 1681 * matter on which of the two the softintr is scheduled. 1682 */ 1683 if (sc->sc_child) 1684 usb_schedsoftintr(&sc->sc_bus); 1685 else 1686 usb_schedsoftintr(&sc->sc_bus2); 1687 } 1688 done: 1689 mutex_spin_exit(&sc->sc_intr_lock); 1690 return ret; 1691 } 1692 1693 int 1694 xhci_intr1(struct xhci_softc * const sc) 1695 { 1696 uint32_t usbsts; 1697 uint32_t iman; 1698 1699 XHCIHIST_FUNC(); 1700 1701 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1702 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0); 1703 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD | 1704 XHCI_STS_HCE)) == 0) { 1705 DPRINTFN(16, "ignored intr not for %jd", 1706 device_unit(sc->sc_dev), 0, 0, 0); 1707 return 0; 1708 } 1709 1710 /* 1711 * Clear EINT and other transient flags, to not misenterpret 1712 * next shared interrupt. Also, to avoid race, EINT must be cleared 1713 * before XHCI_IMAN_INTR_PEND is cleared. 1714 */ 1715 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0); 1716 1717 #ifdef XHCI_DEBUG 1718 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1719 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1720 #endif 1721 1722 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1723 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1724 iman |= XHCI_IMAN_INTR_PEND; 1725 xhci_rt_write_4(sc, XHCI_IMAN(0), iman); 1726 1727 #ifdef XHCI_DEBUG 1728 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1729 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1730 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1731 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1732 #endif 1733 1734 return 1; 1735 } 1736 1737 /* 1738 * 3 port speed types used in USB stack 1739 * 1740 * usbdi speed 1741 * definition: USB_SPEED_* in usb.h 1742 * They are used in struct usbd_device in USB stack. 1743 * ioctl interface uses these values too. 1744 * port_status speed 1745 * definition: UPS_*_SPEED in usb.h 1746 * They are used in usb_port_status_t and valid only for USB 2.0. 1747 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus 1748 * of usb_port_status_ext_t indicates port speed. 1749 * Note that some 3.0 values overlap with 2.0 values. 1750 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and 1751 * means UPS_LOW_SPEED in HS.) 1752 * port status returned from hub also uses these values. 1753 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed 1754 * or more. 1755 * xspeed: 1756 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1) 1757 * They are used in only slot context and PORTSC reg of xhci. 1758 * The difference between usbdi speed and xspeed is 1759 * that FS and LS values are swapped. 1760 */ 1761 1762 /* convert usbdi speed to xspeed */ 1763 static int 1764 xhci_speed2xspeed(int speed) 1765 { 1766 switch (speed) { 1767 case USB_SPEED_LOW: return 2; 1768 case USB_SPEED_FULL: return 1; 1769 default: return speed; 1770 } 1771 } 1772 1773 #if 0 1774 /* convert xspeed to usbdi speed */ 1775 static int 1776 xhci_xspeed2speed(int xspeed) 1777 { 1778 switch (xspeed) { 1779 case 1: return USB_SPEED_FULL; 1780 case 2: return USB_SPEED_LOW; 1781 default: return xspeed; 1782 } 1783 } 1784 #endif 1785 1786 /* convert xspeed to port status speed */ 1787 static int 1788 xhci_xspeed2psspeed(int xspeed) 1789 { 1790 switch (xspeed) { 1791 case 0: return 0; 1792 case 1: return UPS_FULL_SPEED; 1793 case 2: return UPS_LOW_SPEED; 1794 case 3: return UPS_HIGH_SPEED; 1795 default: return UPS_OTHER_SPEED; 1796 } 1797 } 1798 1799 /* 1800 * Construct input contexts and issue TRB to open pipe. 1801 */ 1802 static usbd_status 1803 xhci_configure_endpoint(struct usbd_pipe *pipe) 1804 { 1805 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1806 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1807 #ifdef USB_DEBUG 1808 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1809 #endif 1810 struct xhci_soft_trb trb; 1811 usbd_status err; 1812 1813 XHCIHIST_FUNC(); 1814 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx", 1815 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress, 1816 pipe->up_endpoint->ue_edesc->bmAttributes); 1817 1818 /* XXX ensure input context is available? */ 1819 1820 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz); 1821 1822 /* set up context */ 1823 xhci_setup_ctx(pipe); 1824 1825 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0), 1826 sc->sc_ctxsz * 1); 1827 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs, 1828 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1); 1829 1830 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 1831 trb.trb_2 = 0; 1832 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1833 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 1834 1835 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 1836 1837 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 1838 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci), 1839 sc->sc_ctxsz * 1); 1840 1841 return err; 1842 } 1843 1844 #if 0 1845 static usbd_status 1846 xhci_unconfigure_endpoint(struct usbd_pipe *pipe) 1847 { 1848 #ifdef USB_DEBUG 1849 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1850 #endif 1851 1852 XHCIHIST_FUNC(); 1853 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 1854 1855 return USBD_NORMAL_COMPLETION; 1856 } 1857 #endif 1858 1859 /* 4.6.8, 6.4.3.7 */ 1860 static usbd_status 1861 xhci_reset_endpoint_locked(struct usbd_pipe *pipe) 1862 { 1863 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1864 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1865 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1866 struct xhci_soft_trb trb; 1867 usbd_status err; 1868 1869 XHCIHIST_FUNC(); 1870 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1871 1872 KASSERT(mutex_owned(&sc->sc_lock)); 1873 1874 trb.trb_0 = 0; 1875 trb.trb_2 = 0; 1876 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1877 XHCI_TRB_3_EP_SET(dci) | 1878 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP); 1879 1880 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1881 1882 return err; 1883 } 1884 1885 static usbd_status 1886 xhci_reset_endpoint(struct usbd_pipe *pipe) 1887 { 1888 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1889 1890 mutex_enter(&sc->sc_lock); 1891 usbd_status ret = xhci_reset_endpoint_locked(pipe); 1892 mutex_exit(&sc->sc_lock); 1893 1894 return ret; 1895 } 1896 1897 /* 1898 * 4.6.9, 6.4.3.8 1899 * Stop execution of TDs on xfer ring. 1900 * Should be called with sc_lock held. 1901 */ 1902 static usbd_status 1903 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci, 1904 uint32_t trb3flags) 1905 { 1906 struct xhci_soft_trb trb; 1907 usbd_status err; 1908 1909 XHCIHIST_FUNC(); 1910 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1911 1912 KASSERT(mutex_owned(&sc->sc_lock)); 1913 1914 trb.trb_0 = 0; 1915 trb.trb_2 = 0; 1916 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1917 XHCI_TRB_3_EP_SET(dci) | 1918 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) | 1919 trb3flags; 1920 1921 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1922 1923 return err; 1924 } 1925 1926 static usbd_status 1927 xhci_stop_endpoint(struct usbd_pipe *pipe) 1928 { 1929 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1930 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1931 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1932 1933 XHCIHIST_FUNC(); 1934 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1935 1936 KASSERT(mutex_owned(&sc->sc_lock)); 1937 1938 return xhci_stop_endpoint_cmd(sc, xs, dci, 0); 1939 } 1940 1941 /* 1942 * Set TR Dequeue Pointer. 1943 * xHCI 1.1 4.6.10 6.4.3.9 1944 * Purge all of the TRBs on ring and reinitialize ring. 1945 * Set TR dequeue Pointer to 0 and Cycle State to 1. 1946 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE 1947 * error will be generated. 1948 */ 1949 static usbd_status 1950 xhci_set_dequeue_locked(struct usbd_pipe *pipe) 1951 { 1952 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1953 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1954 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1955 struct xhci_ring * const xr = xs->xs_xr[dci]; 1956 struct xhci_soft_trb trb; 1957 usbd_status err; 1958 1959 XHCIHIST_FUNC(); 1960 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1961 1962 KASSERT(mutex_owned(&sc->sc_lock)); 1963 KASSERT(xr != NULL); 1964 1965 xhci_host_dequeue(xr); 1966 1967 /* set DCS */ 1968 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */ 1969 trb.trb_2 = 0; 1970 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1971 XHCI_TRB_3_EP_SET(dci) | 1972 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE); 1973 1974 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1975 1976 return err; 1977 } 1978 1979 static usbd_status 1980 xhci_set_dequeue(struct usbd_pipe *pipe) 1981 { 1982 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1983 1984 mutex_enter(&sc->sc_lock); 1985 usbd_status ret = xhci_set_dequeue_locked(pipe); 1986 mutex_exit(&sc->sc_lock); 1987 1988 return ret; 1989 } 1990 1991 /* 1992 * Open new pipe: called from usbd_setup_pipe_flags. 1993 * Fills methods of pipe. 1994 * If pipe is not for ep0, calls configure_endpoint. 1995 */ 1996 static usbd_status 1997 xhci_open(struct usbd_pipe *pipe) 1998 { 1999 struct usbd_device * const dev = pipe->up_dev; 2000 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 2001 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 2002 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2003 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 2004 const u_int dci = xhci_ep_get_dci(ed); 2005 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 2006 usbd_status err; 2007 2008 XHCIHIST_FUNC(); 2009 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr, 2010 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed); 2011 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx", 2012 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress, 2013 ed->bmAttributes); 2014 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize), 2015 ed->bInterval, 0, 0); 2016 2017 if (sc->sc_dying) 2018 return USBD_IOERROR; 2019 2020 /* Root Hub */ 2021 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) { 2022 switch (ed->bEndpointAddress) { 2023 case USB_CONTROL_ENDPOINT: 2024 pipe->up_methods = &roothub_ctrl_methods; 2025 break; 2026 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT: 2027 pipe->up_methods = &xhci_root_intr_methods; 2028 break; 2029 default: 2030 pipe->up_methods = NULL; 2031 DPRINTFN(0, "bad bEndpointAddress 0x%02jx", 2032 ed->bEndpointAddress, 0, 0, 0); 2033 return USBD_INVAL; 2034 } 2035 return USBD_NORMAL_COMPLETION; 2036 } 2037 2038 switch (xfertype) { 2039 case UE_CONTROL: 2040 pipe->up_methods = &xhci_device_ctrl_methods; 2041 break; 2042 case UE_ISOCHRONOUS: 2043 pipe->up_methods = &xhci_device_isoc_methods; 2044 pipe->up_serialise = false; 2045 xpipe->xp_isoc_next = -1; 2046 break; 2047 case UE_BULK: 2048 pipe->up_methods = &xhci_device_bulk_methods; 2049 break; 2050 case UE_INTERRUPT: 2051 pipe->up_methods = &xhci_device_intr_methods; 2052 break; 2053 default: 2054 return USBD_IOERROR; 2055 break; 2056 } 2057 2058 KASSERT(xs != NULL); 2059 KASSERT(xs->xs_xr[dci] == NULL); 2060 2061 /* allocate transfer ring */ 2062 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS, 2063 XHCI_TRB_ALIGN); 2064 if (err) { 2065 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0); 2066 return err; 2067 } 2068 2069 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT) 2070 return xhci_configure_endpoint(pipe); 2071 2072 return USBD_NORMAL_COMPLETION; 2073 } 2074 2075 /* 2076 * Closes pipe, called from usbd_kill_pipe via close methods. 2077 * If the endpoint to be closed is ep0, disable_slot. 2078 * Should be called with sc_lock held. 2079 */ 2080 static void 2081 xhci_close_pipe(struct usbd_pipe *pipe) 2082 { 2083 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2084 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2085 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 2086 const u_int dci = xhci_ep_get_dci(ed); 2087 struct xhci_soft_trb trb; 2088 uint32_t *cp; 2089 2090 XHCIHIST_FUNC(); 2091 2092 if (sc->sc_dying) 2093 return; 2094 2095 /* xs is uninitialized before xhci_init_slot */ 2096 if (xs == NULL || xs->xs_idx == 0) 2097 return; 2098 2099 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju", 2100 (uintptr_t)pipe, xs->xs_idx, dci, 0); 2101 2102 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 2103 KASSERT(mutex_owned(&sc->sc_lock)); 2104 2105 if (pipe->up_dev->ud_depth == 0) 2106 return; 2107 2108 if (dci == XHCI_DCI_EP_CONTROL) { 2109 DPRINTFN(4, "closing ep0", 0, 0, 0, 0); 2110 /* This frees all rings */ 2111 xhci_disable_slot(sc, xs->xs_idx); 2112 return; 2113 } 2114 2115 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED) 2116 (void)xhci_stop_endpoint(pipe); 2117 2118 /* 2119 * set appropriate bit to be dropped. 2120 * don't set DC bit to 1, otherwise all endpoints 2121 * would be deconfigured. 2122 */ 2123 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 2124 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci)); 2125 cp[1] = htole32(0); 2126 2127 /* XXX should be most significant one, not dci? */ 2128 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 2129 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci)); 2130 2131 /* configure ep context performs an implicit dequeue */ 2132 xhci_host_dequeue(xs->xs_xr[dci]); 2133 2134 /* sync input contexts before they are read from memory */ 2135 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 2136 2137 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 2138 trb.trb_2 = 0; 2139 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 2140 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 2141 2142 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 2143 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 2144 2145 xhci_ring_free(sc, &xs->xs_xr[dci]); 2146 xs->xs_xr[dci] = NULL; 2147 } 2148 2149 /* 2150 * Abort transfer. 2151 * Should be called with sc_lock held. 2152 */ 2153 static void 2154 xhci_abortx(struct usbd_xfer *xfer) 2155 { 2156 XHCIHIST_FUNC(); 2157 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 2158 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 2159 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 2160 2161 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx", 2162 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0); 2163 2164 KASSERT(mutex_owned(&sc->sc_lock)); 2165 ASSERT_SLEEPABLE(); 2166 2167 KASSERTMSG((xfer->ux_status == USBD_CANCELLED || 2168 xfer->ux_status == USBD_TIMEOUT), 2169 "bad abort status: %d", xfer->ux_status); 2170 2171 /* 2172 * If we're dying, skip the hardware action and just notify the 2173 * software that we're done. 2174 */ 2175 if (sc->sc_dying) { 2176 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer, 2177 xfer->ux_status, 0, 0); 2178 goto dying; 2179 } 2180 2181 /* 2182 * HC Step 1: Stop execution of TD on the ring. 2183 */ 2184 switch (xhci_get_epstate(sc, xs, dci)) { 2185 case XHCI_EPSTATE_HALTED: 2186 (void)xhci_reset_endpoint_locked(xfer->ux_pipe); 2187 break; 2188 case XHCI_EPSTATE_STOPPED: 2189 break; 2190 default: 2191 (void)xhci_stop_endpoint(xfer->ux_pipe); 2192 break; 2193 } 2194 #ifdef DIAGNOSTIC 2195 uint32_t epst = xhci_get_epstate(sc, xs, dci); 2196 if (epst != XHCI_EPSTATE_STOPPED) 2197 DPRINTFN(4, "dci %ju not stopped %ju", dci, epst, 0, 0); 2198 #endif 2199 2200 /* 2201 * HC Step 2: Remove any vestiges of the xfer from the ring. 2202 */ 2203 xhci_set_dequeue_locked(xfer->ux_pipe); 2204 2205 /* 2206 * Final Step: Notify completion to waiting xfers. 2207 */ 2208 dying: 2209 usb_transfer_complete(xfer); 2210 DPRINTFN(14, "end", 0, 0, 0, 0); 2211 2212 KASSERT(mutex_owned(&sc->sc_lock)); 2213 } 2214 2215 static void 2216 xhci_host_dequeue(struct xhci_ring * const xr) 2217 { 2218 /* When dequeueing the controller, update our struct copy too */ 2219 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE); 2220 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE, 2221 BUS_DMASYNC_PREWRITE); 2222 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies)); 2223 2224 xr->xr_ep = 0; 2225 xr->xr_cs = 1; 2226 } 2227 2228 /* 2229 * Recover STALLed endpoint. 2230 * xHCI 1.1 sect 4.10.2.1 2231 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove 2232 * all transfers on transfer ring. 2233 * These are done in thread context asynchronously. 2234 */ 2235 static void 2236 xhci_clear_endpoint_stall_async_task(void *cookie) 2237 { 2238 struct usbd_xfer * const xfer = cookie; 2239 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 2240 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 2241 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 2242 struct xhci_ring * const tr = xs->xs_xr[dci]; 2243 2244 XHCIHIST_FUNC(); 2245 XHCIHIST_CALLARGS("xfer %#jx slot %ju dci %ju", (uintptr_t)xfer, xs->xs_idx, 2246 dci, 0); 2247 2248 /* 2249 * XXXMRG: Stall task can run after slot is disabled when yanked. 2250 * This hack notices that the xs has been memset() in 2251 * xhci_disable_slot() and returns. Both xhci_reset_endpoint() 2252 * and xhci_set_dequeue() rely upon a valid ring setup for correct 2253 * operation, and the latter will fault, as would 2254 * usb_transfer_complete() if it got that far. 2255 */ 2256 if (xs->xs_idx == 0) { 2257 DPRINTFN(4, "ends xs_idx is 0", 0, 0, 0, 0); 2258 return; 2259 } 2260 2261 KASSERT(tr != NULL); 2262 2263 xhci_reset_endpoint(xfer->ux_pipe); 2264 xhci_set_dequeue(xfer->ux_pipe); 2265 2266 mutex_enter(&sc->sc_lock); 2267 tr->is_halted = false; 2268 usb_transfer_complete(xfer); 2269 mutex_exit(&sc->sc_lock); 2270 DPRINTFN(4, "ends", 0, 0, 0, 0); 2271 } 2272 2273 static usbd_status 2274 xhci_clear_endpoint_stall_async(struct usbd_xfer *xfer) 2275 { 2276 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 2277 struct xhci_pipe * const xp = (struct xhci_pipe *)xfer->ux_pipe; 2278 2279 XHCIHIST_FUNC(); 2280 XHCIHIST_CALLARGS("xfer %#jx", (uintptr_t)xfer, 0, 0, 0); 2281 2282 if (sc->sc_dying) { 2283 return USBD_IOERROR; 2284 } 2285 2286 usb_init_task(&xp->xp_async_task, 2287 xhci_clear_endpoint_stall_async_task, xfer, USB_TASKQ_MPSAFE); 2288 usb_add_task(xfer->ux_pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC); 2289 DPRINTFN(4, "ends", 0, 0, 0, 0); 2290 2291 return USBD_NORMAL_COMPLETION; 2292 } 2293 2294 /* Process roothub port status/change events and notify to uhub_intr. */ 2295 static void 2296 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport) 2297 { 2298 XHCIHIST_FUNC(); 2299 XHCIHIST_CALLARGS("xhci%jd: port %ju status change", 2300 device_unit(sc->sc_dev), ctlrport, 0, 0); 2301 2302 if (ctlrport > sc->sc_maxports) 2303 return; 2304 2305 const size_t bn = xhci_ctlrport2bus(sc, ctlrport); 2306 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport); 2307 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn]; 2308 2309 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change", 2310 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer); 2311 2312 if (xfer == NULL) 2313 return; 2314 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 2315 2316 uint8_t *p = xfer->ux_buf; 2317 memset(p, 0, xfer->ux_length); 2318 p[rhp / NBBY] |= 1 << (rhp % NBBY); 2319 xfer->ux_actlen = xfer->ux_length; 2320 xfer->ux_status = USBD_NORMAL_COMPLETION; 2321 usb_transfer_complete(xfer); 2322 } 2323 2324 /* Process Transfer Events */ 2325 static void 2326 xhci_event_transfer(struct xhci_softc * const sc, 2327 const struct xhci_trb * const trb) 2328 { 2329 uint64_t trb_0; 2330 uint32_t trb_2, trb_3; 2331 uint8_t trbcode; 2332 u_int slot, dci; 2333 struct xhci_slot *xs; 2334 struct xhci_ring *xr; 2335 struct xhci_xfer *xx; 2336 struct usbd_xfer *xfer; 2337 usbd_status err; 2338 2339 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2340 2341 trb_0 = le64toh(trb->trb_0); 2342 trb_2 = le32toh(trb->trb_2); 2343 trb_3 = le32toh(trb->trb_3); 2344 trbcode = XHCI_TRB_2_ERROR_GET(trb_2); 2345 slot = XHCI_TRB_3_SLOT_GET(trb_3); 2346 dci = XHCI_TRB_3_EP_GET(trb_3); 2347 xs = &sc->sc_slots[slot]; 2348 xr = xs->xs_xr[dci]; 2349 2350 /* sanity check */ 2351 KASSERT(xr != NULL); 2352 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots, 2353 "invalid xs_idx %u slot %u", xs->xs_idx, slot); 2354 2355 int idx = 0; 2356 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 2357 if (xhci_trb_get_idx(xr, trb_0, &idx)) { 2358 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0); 2359 return; 2360 } 2361 xx = xr->xr_cookies[idx]; 2362 2363 /* clear cookie of consumed TRB */ 2364 xr->xr_cookies[idx] = NULL; 2365 2366 /* 2367 * xx is NULL if pipe is opened but xfer is not started. 2368 * It happens when stopping idle pipe. 2369 */ 2370 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) { 2371 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju", 2372 idx, (uintptr_t)xx, trbcode, dci); 2373 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0, 2374 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)), 2375 0, 0); 2376 return; 2377 } 2378 } else { 2379 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */ 2380 xx = (void *)(uintptr_t)(trb_0 & ~0x3); 2381 } 2382 /* XXX this may not happen */ 2383 if (xx == NULL) { 2384 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0); 2385 return; 2386 } 2387 xfer = &xx->xx_xfer; 2388 /* XXX this may happen when detaching */ 2389 if (xfer == NULL) { 2390 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx", 2391 (uintptr_t)xx, trb_0, 0, 0); 2392 return; 2393 } 2394 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0); 2395 /* XXX I dunno why this happens */ 2396 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer); 2397 2398 if (!xfer->ux_pipe->up_repeat && 2399 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) { 2400 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer, 2401 0, 0, 0); 2402 return; 2403 } 2404 2405 const uint8_t xfertype = 2406 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes); 2407 2408 /* 4.11.5.2 Event Data TRB */ 2409 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) { 2410 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx" 2411 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0); 2412 if ((trb_0 & 0x3) == 0x3) { 2413 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2); 2414 } 2415 } 2416 2417 switch (trbcode) { 2418 case XHCI_TRB_ERROR_SHORT_PKT: 2419 case XHCI_TRB_ERROR_SUCCESS: 2420 /* 2421 * A ctrl transfer can generate two events if it has a Data 2422 * stage. A short data stage can be OK and should not 2423 * complete the transfer as the status stage needs to be 2424 * performed. 2425 * 2426 * Note: Data and Status stage events point at same xfer. 2427 * ux_actlen and ux_dmabuf will be passed to 2428 * usb_transfer_complete after the Status stage event. 2429 * 2430 * It can be distingished which stage generates the event: 2431 * + by checking least 3 bits of trb_0 if ED==1. 2432 * (see xhci_device_ctrl_start). 2433 * + by checking the type of original TRB if ED==0. 2434 * 2435 * In addition, intr, bulk, and isoc transfer currently 2436 * consists of single TD, so the "skip" is not needed. 2437 * ctrl xfer uses EVENT_DATA, and others do not. 2438 * Thus driver can switch the flow by checking ED bit. 2439 */ 2440 if (xfertype == UE_ISOCHRONOUS) { 2441 xfer->ux_frlengths[xx->xx_isoc_done] -= 2442 XHCI_TRB_2_REM_GET(trb_2); 2443 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done]; 2444 if (++xx->xx_isoc_done < xfer->ux_nframes) 2445 return; 2446 } else 2447 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 2448 if (xfer->ux_actlen == 0) 2449 xfer->ux_actlen = xfer->ux_length - 2450 XHCI_TRB_2_REM_GET(trb_2); 2451 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)) 2452 == XHCI_TRB_TYPE_DATA_STAGE) { 2453 return; 2454 } 2455 } else if ((trb_0 & 0x3) == 0x3) { 2456 return; 2457 } 2458 err = USBD_NORMAL_COMPLETION; 2459 break; 2460 case XHCI_TRB_ERROR_STOPPED: 2461 case XHCI_TRB_ERROR_LENGTH: 2462 case XHCI_TRB_ERROR_STOPPED_SHORT: 2463 err = USBD_IOERROR; 2464 break; 2465 case XHCI_TRB_ERROR_STALL: 2466 case XHCI_TRB_ERROR_BABBLE: 2467 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2468 xr->is_halted = true; 2469 /* 2470 * Try to claim this xfer for completion. If it has already 2471 * completed or aborted, drop it on the floor. 2472 */ 2473 if (!usbd_xfer_trycomplete(xfer)) 2474 return; 2475 2476 /* 2477 * Stalled endpoints can be recoverd by issuing 2478 * command TRB TYPE_RESET_EP on xHCI instead of 2479 * issuing request CLEAR_FEATURE UF_ENDPOINT_HALT 2480 * on the endpoint. However, this function may be 2481 * called from softint context (e.g. from umass), 2482 * in that case driver gets KASSERT in cv_timedwait 2483 * in xhci_do_command. 2484 * To avoid this, this runs reset_endpoint and 2485 * usb_transfer_complete in usb task thread 2486 * asynchronously (and then umass issues clear 2487 * UF_ENDPOINT_HALT). 2488 */ 2489 2490 /* Override the status. */ 2491 xfer->ux_status = USBD_STALLED; 2492 2493 xhci_clear_endpoint_stall_async(xfer); 2494 return; 2495 default: 2496 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2497 err = USBD_IOERROR; 2498 break; 2499 } 2500 2501 /* 2502 * Try to claim this xfer for completion. If it has already 2503 * completed or aborted, drop it on the floor. 2504 */ 2505 if (!usbd_xfer_trycomplete(xfer)) 2506 return; 2507 2508 /* Set the status. */ 2509 xfer->ux_status = err; 2510 2511 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 || 2512 (trb_0 & 0x3) == 0x0) { 2513 usb_transfer_complete(xfer); 2514 } 2515 } 2516 2517 /* Process Command complete events */ 2518 static void 2519 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb) 2520 { 2521 uint64_t trb_0; 2522 uint32_t trb_2, trb_3; 2523 2524 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2525 2526 KASSERT(mutex_owned(&sc->sc_lock)); 2527 2528 trb_0 = le64toh(trb->trb_0); 2529 trb_2 = le32toh(trb->trb_2); 2530 trb_3 = le32toh(trb->trb_3); 2531 2532 if (trb_0 == sc->sc_command_addr) { 2533 sc->sc_resultpending = false; 2534 2535 sc->sc_result_trb.trb_0 = trb_0; 2536 sc->sc_result_trb.trb_2 = trb_2; 2537 sc->sc_result_trb.trb_3 = trb_3; 2538 if (XHCI_TRB_2_ERROR_GET(trb_2) != 2539 XHCI_TRB_ERROR_SUCCESS) { 2540 DPRINTFN(1, "command completion " 2541 "failure: 0x%016jx 0x%08jx 0x%08jx", 2542 trb_0, trb_2, trb_3, 0); 2543 } 2544 cv_signal(&sc->sc_command_cv); 2545 } else { 2546 DPRINTFN(1, "spurious event: %#jx 0x%016jx " 2547 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3); 2548 } 2549 } 2550 2551 /* 2552 * Process events. 2553 * called from xhci_softintr 2554 */ 2555 static void 2556 xhci_handle_event(struct xhci_softc * const sc, 2557 const struct xhci_trb * const trb) 2558 { 2559 uint64_t trb_0; 2560 uint32_t trb_2, trb_3; 2561 2562 XHCIHIST_FUNC(); 2563 2564 trb_0 = le64toh(trb->trb_0); 2565 trb_2 = le32toh(trb->trb_2); 2566 trb_3 = le32toh(trb->trb_3); 2567 2568 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx", 2569 (uintptr_t)trb, trb_0, trb_2, trb_3); 2570 2571 /* 2572 * 4.11.3.1, 6.4.2.1 2573 * TRB Pointer is invalid for these completion codes. 2574 */ 2575 switch (XHCI_TRB_2_ERROR_GET(trb_2)) { 2576 case XHCI_TRB_ERROR_RING_UNDERRUN: 2577 case XHCI_TRB_ERROR_RING_OVERRUN: 2578 case XHCI_TRB_ERROR_VF_RING_FULL: 2579 return; 2580 default: 2581 if (trb_0 == 0) { 2582 return; 2583 } 2584 break; 2585 } 2586 2587 switch (XHCI_TRB_3_TYPE_GET(trb_3)) { 2588 case XHCI_TRB_EVENT_TRANSFER: 2589 xhci_event_transfer(sc, trb); 2590 break; 2591 case XHCI_TRB_EVENT_CMD_COMPLETE: 2592 xhci_event_cmd(sc, trb); 2593 break; 2594 case XHCI_TRB_EVENT_PORT_STS_CHANGE: 2595 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff)); 2596 break; 2597 default: 2598 break; 2599 } 2600 } 2601 2602 static void 2603 xhci_softintr(void *v) 2604 { 2605 struct usbd_bus * const bus = v; 2606 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2607 struct xhci_ring * const er = sc->sc_er; 2608 struct xhci_trb *trb; 2609 int i, j, k; 2610 2611 XHCIHIST_FUNC(); 2612 2613 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2614 2615 i = er->xr_ep; 2616 j = er->xr_cs; 2617 2618 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0); 2619 2620 while (1) { 2621 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE, 2622 BUS_DMASYNC_POSTREAD); 2623 trb = &er->xr_trb[i]; 2624 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0; 2625 2626 if (j != k) 2627 break; 2628 2629 xhci_handle_event(sc, trb); 2630 2631 i++; 2632 if (i == er->xr_ntrb) { 2633 i = 0; 2634 j ^= 1; 2635 } 2636 } 2637 2638 er->xr_ep = i; 2639 er->xr_cs = j; 2640 2641 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) | 2642 XHCI_ERDP_BUSY); 2643 2644 DPRINTFN(16, "ends", 0, 0, 0, 0); 2645 2646 return; 2647 } 2648 2649 static void 2650 xhci_poll(struct usbd_bus *bus) 2651 { 2652 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2653 2654 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2655 2656 mutex_enter(&sc->sc_intr_lock); 2657 int ret = xhci_intr1(sc); 2658 if (ret) { 2659 xhci_softintr(bus); 2660 } 2661 mutex_exit(&sc->sc_intr_lock); 2662 2663 return; 2664 } 2665 2666 static struct usbd_xfer * 2667 xhci_allocx(struct usbd_bus *bus, unsigned int nframes) 2668 { 2669 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2670 struct xhci_xfer *xx; 2671 u_int ntrbs; 2672 2673 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2674 2675 ntrbs = uimax(3, nframes); 2676 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs; 2677 2678 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK); 2679 if (xx != NULL) { 2680 memset(xx, 0, sizeof(*xx)); 2681 if (ntrbs > 0) { 2682 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP); 2683 xx->xx_ntrb = ntrbs; 2684 } 2685 #ifdef DIAGNOSTIC 2686 xx->xx_xfer.ux_state = XFER_BUSY; 2687 #endif 2688 } 2689 2690 return &xx->xx_xfer; 2691 } 2692 2693 static void 2694 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 2695 { 2696 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2697 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 2698 2699 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2700 2701 #ifdef DIAGNOSTIC 2702 if (xfer->ux_state != XFER_BUSY && 2703 xfer->ux_status != USBD_NOT_STARTED) { 2704 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx", 2705 (uintptr_t)xfer, xfer->ux_state, 0, 0); 2706 } 2707 xfer->ux_state = XFER_FREE; 2708 #endif 2709 if (xx->xx_ntrb > 0) { 2710 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb)); 2711 xx->xx_trb = NULL; 2712 xx->xx_ntrb = 0; 2713 } 2714 pool_cache_put(sc->sc_xferpool, xx); 2715 } 2716 2717 static bool 2718 xhci_dying(struct usbd_bus *bus) 2719 { 2720 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2721 2722 return sc->sc_dying; 2723 } 2724 2725 static void 2726 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock) 2727 { 2728 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2729 2730 *lock = &sc->sc_lock; 2731 } 2732 2733 extern uint32_t usb_cookie_no; 2734 2735 /* 2736 * xHCI 4.3 2737 * Called when uhub_explore finds a new device (via usbd_new_device). 2738 * Port initialization and speed detection (4.3.1) are already done in uhub.c. 2739 * This function does: 2740 * Allocate and construct dev structure of default endpoint (ep0). 2741 * Allocate and open pipe of ep0. 2742 * Enable slot and initialize slot context. 2743 * Set Address. 2744 * Read initial device descriptor. 2745 * Determine initial MaxPacketSize (mps) by speed. 2746 * Read full device descriptor. 2747 * Register this device. 2748 * Finally state of device transitions ADDRESSED. 2749 */ 2750 static usbd_status 2751 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth, 2752 int speed, int port, struct usbd_port *up) 2753 { 2754 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2755 struct usbd_device *dev; 2756 usbd_status err; 2757 usb_device_descriptor_t *dd; 2758 struct xhci_slot *xs; 2759 uint32_t *cp; 2760 2761 XHCIHIST_FUNC(); 2762 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx", 2763 port, depth, speed, (uintptr_t)up); 2764 2765 KASSERT(KERNEL_LOCKED_P()); 2766 2767 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP); 2768 dev->ud_bus = bus; 2769 dev->ud_quirks = &usbd_no_quirk; 2770 dev->ud_addr = 0; 2771 dev->ud_ddesc.bMaxPacketSize = 0; 2772 dev->ud_depth = depth; 2773 dev->ud_powersrc = up; 2774 dev->ud_myhub = up->up_parent; 2775 dev->ud_speed = speed; 2776 dev->ud_langid = USBD_NOLANG; 2777 dev->ud_cookie.cookie = ++usb_cookie_no; 2778 2779 /* Set up default endpoint handle. */ 2780 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc; 2781 /* doesn't matter, just don't let it uninitialized */ 2782 dev->ud_ep0.ue_toggle = 0; 2783 2784 /* Set up default endpoint descriptor. */ 2785 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE; 2786 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT; 2787 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT; 2788 dev->ud_ep0desc.bmAttributes = UE_CONTROL; 2789 dev->ud_ep0desc.bInterval = 0; 2790 2791 /* 4.3, 4.8.2.1 */ 2792 switch (speed) { 2793 case USB_SPEED_SUPER: 2794 case USB_SPEED_SUPER_PLUS: 2795 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET); 2796 break; 2797 case USB_SPEED_FULL: 2798 /* XXX using 64 as initial mps of ep0 in FS */ 2799 case USB_SPEED_HIGH: 2800 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET); 2801 break; 2802 case USB_SPEED_LOW: 2803 default: 2804 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET); 2805 break; 2806 } 2807 2808 up->up_dev = dev; 2809 2810 dd = &dev->ud_ddesc; 2811 2812 if (depth == 0 && port == 0) { 2813 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL); 2814 bus->ub_devices[USB_ROOTHUB_INDEX] = dev; 2815 2816 /* Establish the default pipe. */ 2817 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2818 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2819 if (err) { 2820 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0); 2821 goto bad; 2822 } 2823 err = usbd_get_initial_ddesc(dev, dd); 2824 if (err) { 2825 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2826 goto bad; 2827 } 2828 } else { 2829 uint8_t slot = 0; 2830 2831 /* 4.3.2 */ 2832 err = xhci_enable_slot(sc, &slot); 2833 if (err) { 2834 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0); 2835 goto bad; 2836 } 2837 2838 xs = &sc->sc_slots[slot]; 2839 dev->ud_hcpriv = xs; 2840 2841 /* 4.3.3 initialize slot structure */ 2842 err = xhci_init_slot(dev, slot); 2843 if (err) { 2844 DPRINTFN(1, "init slot %ju", err, 0, 0, 0); 2845 dev->ud_hcpriv = NULL; 2846 /* 2847 * We have to disable_slot here because 2848 * xs->xs_idx == 0 when xhci_init_slot fails, 2849 * in that case usbd_remove_dev won't work. 2850 */ 2851 mutex_enter(&sc->sc_lock); 2852 xhci_disable_slot(sc, slot); 2853 mutex_exit(&sc->sc_lock); 2854 goto bad; 2855 } 2856 2857 /* 2858 * We have to establish the default pipe _after_ slot 2859 * structure has been prepared. 2860 */ 2861 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2862 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2863 if (err) { 2864 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0, 2865 0); 2866 goto bad; 2867 } 2868 2869 /* 4.3.4 Address Assignment */ 2870 err = xhci_set_address(dev, slot, false); 2871 if (err) { 2872 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0); 2873 goto bad; 2874 } 2875 2876 /* Allow device time to set new address */ 2877 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE); 2878 2879 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 2880 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT); 2881 HEXDUMP("slot context", cp, sc->sc_ctxsz); 2882 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3])); 2883 DPRINTFN(4, "device address %ju", addr, 0, 0, 0); 2884 /* 2885 * XXX ensure we know when the hardware does something 2886 * we can't yet cope with 2887 */ 2888 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr); 2889 dev->ud_addr = addr; 2890 2891 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL, 2892 "addr %d already allocated", dev->ud_addr); 2893 /* 2894 * The root hub is given its own slot 2895 */ 2896 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev; 2897 2898 err = usbd_get_initial_ddesc(dev, dd); 2899 if (err) { 2900 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2901 goto bad; 2902 } 2903 2904 /* 4.8.2.1 */ 2905 if (USB_IS_SS(speed)) { 2906 if (dd->bMaxPacketSize != 9) { 2907 printf("%s: invalid mps 2^%u for SS ep0," 2908 " using 512\n", 2909 device_xname(sc->sc_dev), 2910 dd->bMaxPacketSize); 2911 dd->bMaxPacketSize = 9; 2912 } 2913 USETW(dev->ud_ep0desc.wMaxPacketSize, 2914 (1 << dd->bMaxPacketSize)); 2915 } else 2916 USETW(dev->ud_ep0desc.wMaxPacketSize, 2917 dd->bMaxPacketSize); 2918 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0); 2919 err = xhci_update_ep0_mps(sc, xs, 2920 UGETW(dev->ud_ep0desc.wMaxPacketSize)); 2921 if (err) { 2922 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0); 2923 goto bad; 2924 } 2925 } 2926 2927 err = usbd_reload_device_desc(dev); 2928 if (err) { 2929 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0); 2930 goto bad; 2931 } 2932 2933 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,", 2934 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0); 2935 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,", 2936 dd->bDeviceClass, dd->bDeviceSubClass, 2937 dd->bDeviceProtocol, 0); 2938 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd", 2939 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations, 2940 dev->ud_speed); 2941 2942 usbd_get_device_strings(dev); 2943 2944 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev); 2945 2946 if (depth == 0 && port == 0) { 2947 usbd_attach_roothub(parent, dev); 2948 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0); 2949 return USBD_NORMAL_COMPLETION; 2950 } 2951 2952 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr); 2953 bad: 2954 if (err != USBD_NORMAL_COMPLETION) { 2955 if (depth == 0 && port == 0 && dev->ud_pipe0) 2956 usbd_kill_pipe(dev->ud_pipe0); 2957 usbd_remove_device(dev, up); 2958 } 2959 2960 return err; 2961 } 2962 2963 static usbd_status 2964 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp, 2965 size_t ntrb, size_t align) 2966 { 2967 size_t size = ntrb * XHCI_TRB_SIZE; 2968 struct xhci_ring *xr; 2969 2970 XHCIHIST_FUNC(); 2971 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx", 2972 (uintptr_t)*xrp, ntrb, align, 0); 2973 2974 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP); 2975 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0); 2976 2977 int err = usb_allocmem(sc->sc_bus.ub_dmatag, size, align, 2978 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xr->xr_dma); 2979 if (err) { 2980 kmem_free(xr, sizeof(struct xhci_ring)); 2981 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0); 2982 return err; 2983 } 2984 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 2985 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP); 2986 xr->xr_trb = xhci_ring_trbv(xr, 0); 2987 xr->xr_ntrb = ntrb; 2988 xr->is_halted = false; 2989 xhci_host_dequeue(xr); 2990 *xrp = xr; 2991 2992 return USBD_NORMAL_COMPLETION; 2993 } 2994 2995 static void 2996 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr) 2997 { 2998 if (*xr == NULL) 2999 return; 3000 3001 usb_freemem(&(*xr)->xr_dma); 3002 mutex_destroy(&(*xr)->xr_lock); 3003 kmem_free((*xr)->xr_cookies, 3004 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb); 3005 kmem_free(*xr, sizeof(struct xhci_ring)); 3006 *xr = NULL; 3007 } 3008 3009 static void 3010 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr, 3011 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs) 3012 { 3013 size_t i; 3014 u_int ri; 3015 u_int cs; 3016 uint64_t parameter; 3017 uint32_t status; 3018 uint32_t control; 3019 3020 XHCIHIST_FUNC(); 3021 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju", 3022 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0); 3023 3024 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u", 3025 ntrbs, xr->xr_ntrb); 3026 for (i = 0; i < ntrbs; i++) { 3027 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr, 3028 (uintptr_t)trbs, i, 0); 3029 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx", 3030 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0); 3031 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) != 3032 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3); 3033 } 3034 3035 ri = xr->xr_ep; 3036 cs = xr->xr_cs; 3037 3038 /* 3039 * Although the xhci hardware can do scatter/gather dma from 3040 * arbitrary sized buffers, there is a non-obvious restriction 3041 * that a LINK trb is only allowed at the end of a burst of 3042 * transfers - which might be 16kB. 3043 * Arbitrary aligned LINK trb definitely fail on Ivy bridge. 3044 * The simple solution is not to allow a LINK trb in the middle 3045 * of anything - as here. 3046 * XXX: (dsl) There are xhci controllers out there (eg some made by 3047 * ASMedia) that seem to lock up if they process a LINK trb but 3048 * cannot process the linked-to trb yet. 3049 * The code should write the 'cycle' bit on the link trb AFTER 3050 * adding the other trb. 3051 */ 3052 u_int firstep = xr->xr_ep; 3053 u_int firstcs = xr->xr_cs; 3054 3055 for (i = 0; i < ntrbs; ) { 3056 u_int oldri = ri; 3057 u_int oldcs = cs; 3058 3059 if (ri >= (xr->xr_ntrb - 1)) { 3060 /* Put Link TD at the end of ring */ 3061 parameter = xhci_ring_trbp(xr, 0); 3062 status = 0; 3063 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) | 3064 XHCI_TRB_3_TC_BIT; 3065 xr->xr_cookies[ri] = NULL; 3066 xr->xr_ep = 0; 3067 xr->xr_cs ^= 1; 3068 ri = xr->xr_ep; 3069 cs = xr->xr_cs; 3070 } else { 3071 parameter = trbs[i].trb_0; 3072 status = trbs[i].trb_2; 3073 control = trbs[i].trb_3; 3074 3075 xr->xr_cookies[ri] = cookie; 3076 ri++; 3077 i++; 3078 } 3079 /* 3080 * If this is a first TRB, mark it invalid to prevent 3081 * xHC from running it immediately. 3082 */ 3083 if (oldri == firstep) { 3084 if (oldcs) { 3085 control &= ~XHCI_TRB_3_CYCLE_BIT; 3086 } else { 3087 control |= XHCI_TRB_3_CYCLE_BIT; 3088 } 3089 } else { 3090 if (oldcs) { 3091 control |= XHCI_TRB_3_CYCLE_BIT; 3092 } else { 3093 control &= ~XHCI_TRB_3_CYCLE_BIT; 3094 } 3095 } 3096 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control); 3097 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri, 3098 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 3099 } 3100 3101 /* Now invert cycle bit of first TRB */ 3102 if (firstcs) { 3103 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT); 3104 } else { 3105 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT); 3106 } 3107 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep, 3108 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 3109 3110 xr->xr_ep = ri; 3111 xr->xr_cs = cs; 3112 3113 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep, 3114 xr->xr_cs, 0); 3115 } 3116 3117 static inline void 3118 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr, 3119 struct xhci_xfer *xx, u_int ntrb) 3120 { 3121 KASSERT(ntrb <= xx->xx_ntrb); 3122 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb); 3123 } 3124 3125 /* 3126 * Stop execution commands, purge all commands on command ring, and 3127 * rewind dequeue pointer. 3128 */ 3129 static void 3130 xhci_abort_command(struct xhci_softc *sc) 3131 { 3132 struct xhci_ring * const cr = sc->sc_cr; 3133 uint64_t crcr; 3134 int i; 3135 3136 XHCIHIST_FUNC(); 3137 XHCIHIST_CALLARGS("command %#jx timeout, aborting", 3138 sc->sc_command_addr, 0, 0, 0); 3139 3140 mutex_enter(&cr->xr_lock); 3141 3142 /* 4.6.1.2 Aborting a Command */ 3143 crcr = xhci_op_read_8(sc, XHCI_CRCR); 3144 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA); 3145 3146 for (i = 0; i < 500; i++) { 3147 crcr = xhci_op_read_8(sc, XHCI_CRCR); 3148 if ((crcr & XHCI_CRCR_LO_CRR) == 0) 3149 break; 3150 usb_delay_ms(&sc->sc_bus, 1); 3151 } 3152 if ((crcr & XHCI_CRCR_LO_CRR) != 0) { 3153 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0); 3154 /* reset HC here? */ 3155 } 3156 3157 /* reset command ring dequeue pointer */ 3158 cr->xr_ep = 0; 3159 cr->xr_cs = 1; 3160 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs); 3161 3162 mutex_exit(&cr->xr_lock); 3163 } 3164 3165 /* 3166 * Put a command on command ring, ring bell, set timer, and cv_timedwait. 3167 * Command completion is notified by cv_signal from xhci_event_cmd() 3168 * (called from xhci_softint), or timed-out. 3169 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(), 3170 * then do_command examines it. 3171 */ 3172 static usbd_status 3173 xhci_do_command_locked(struct xhci_softc * const sc, 3174 struct xhci_soft_trb * const trb, int timeout) 3175 { 3176 struct xhci_ring * const cr = sc->sc_cr; 3177 usbd_status err; 3178 3179 XHCIHIST_FUNC(); 3180 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx", 3181 trb->trb_0, trb->trb_2, trb->trb_3, 0); 3182 3183 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 3184 KASSERT(mutex_owned(&sc->sc_lock)); 3185 3186 while (sc->sc_command_addr != 0 || 3187 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp)) 3188 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock); 3189 3190 /* 3191 * If enqueue pointer points at last of ring, it's Link TRB, 3192 * command TRB will be stored in 0th TRB. 3193 */ 3194 if (cr->xr_ep == cr->xr_ntrb - 1) 3195 sc->sc_command_addr = xhci_ring_trbp(cr, 0); 3196 else 3197 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep); 3198 3199 sc->sc_resultpending = true; 3200 3201 mutex_enter(&cr->xr_lock); 3202 xhci_ring_put(sc, cr, NULL, trb, 1); 3203 mutex_exit(&cr->xr_lock); 3204 3205 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0); 3206 3207 while (sc->sc_resultpending) { 3208 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock, 3209 MAX(1, mstohz(timeout))) == EWOULDBLOCK) { 3210 xhci_abort_command(sc); 3211 err = USBD_TIMEOUT; 3212 goto timedout; 3213 } 3214 } 3215 3216 trb->trb_0 = sc->sc_result_trb.trb_0; 3217 trb->trb_2 = sc->sc_result_trb.trb_2; 3218 trb->trb_3 = sc->sc_result_trb.trb_3; 3219 3220 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx", 3221 trb->trb_0, trb->trb_2, trb->trb_3, 0); 3222 3223 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) { 3224 case XHCI_TRB_ERROR_SUCCESS: 3225 err = USBD_NORMAL_COMPLETION; 3226 break; 3227 default: 3228 case 192 ... 223: 3229 DPRINTFN(5, "error %#jx", 3230 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0); 3231 err = USBD_IOERROR; 3232 break; 3233 case 224 ... 255: 3234 err = USBD_NORMAL_COMPLETION; 3235 break; 3236 } 3237 3238 timedout: 3239 sc->sc_resultpending = false; 3240 sc->sc_command_addr = 0; 3241 cv_broadcast(&sc->sc_cmdbusy_cv); 3242 3243 return err; 3244 } 3245 3246 static usbd_status 3247 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb, 3248 int timeout) 3249 { 3250 3251 mutex_enter(&sc->sc_lock); 3252 usbd_status ret = xhci_do_command_locked(sc, trb, timeout); 3253 mutex_exit(&sc->sc_lock); 3254 3255 return ret; 3256 } 3257 3258 static usbd_status 3259 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp) 3260 { 3261 struct xhci_soft_trb trb; 3262 usbd_status err; 3263 3264 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3265 3266 trb.trb_0 = 0; 3267 trb.trb_2 = 0; 3268 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT); 3269 3270 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3271 if (err != USBD_NORMAL_COMPLETION) { 3272 return err; 3273 } 3274 3275 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3); 3276 3277 return err; 3278 } 3279 3280 /* 3281 * xHCI 4.6.4 3282 * Deallocate ring and device/input context DMA buffers, and disable_slot. 3283 * All endpoints in the slot should be stopped. 3284 * Should be called with sc_lock held. 3285 */ 3286 static usbd_status 3287 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot) 3288 { 3289 struct xhci_soft_trb trb; 3290 struct xhci_slot *xs; 3291 usbd_status err; 3292 3293 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3294 3295 if (sc->sc_dying) 3296 return USBD_IOERROR; 3297 3298 trb.trb_0 = 0; 3299 trb.trb_2 = 0; 3300 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) | 3301 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT); 3302 3303 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 3304 3305 if (!err) { 3306 xs = &sc->sc_slots[slot]; 3307 if (xs->xs_idx != 0) { 3308 xhci_free_slot(sc, xs); 3309 xhci_set_dcba(sc, 0, slot); 3310 memset(xs, 0, sizeof(*xs)); 3311 } 3312 } 3313 3314 return err; 3315 } 3316 3317 /* 3318 * Set address of device and transition slot state from ENABLED to ADDRESSED 3319 * if Block Setaddress Request (BSR) is false. 3320 * If BSR==true, transition slot state from ENABLED to DEFAULT. 3321 * see xHCI 1.1 4.5.3, 3.3.4 3322 * Should be called without sc_lock held. 3323 */ 3324 static usbd_status 3325 xhci_address_device(struct xhci_softc * const sc, 3326 uint64_t icp, uint8_t slot_id, bool bsr) 3327 { 3328 struct xhci_soft_trb trb; 3329 usbd_status err; 3330 3331 XHCIHIST_FUNC(); 3332 if (bsr) { 3333 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr", 3334 icp, slot_id, 0, 0); 3335 } else { 3336 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr", 3337 icp, slot_id, 0, 0); 3338 } 3339 3340 trb.trb_0 = icp; 3341 trb.trb_2 = 0; 3342 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) | 3343 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) | 3344 (bsr ? XHCI_TRB_3_BSR_BIT : 0); 3345 3346 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3347 3348 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS) 3349 err = USBD_NO_ADDR; 3350 3351 return err; 3352 } 3353 3354 static usbd_status 3355 xhci_update_ep0_mps(struct xhci_softc * const sc, 3356 struct xhci_slot * const xs, u_int mps) 3357 { 3358 struct xhci_soft_trb trb; 3359 usbd_status err; 3360 uint32_t * cp; 3361 3362 XHCIHIST_FUNC(); 3363 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0); 3364 3365 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 3366 cp[0] = htole32(0); 3367 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL)); 3368 3369 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL)); 3370 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps)); 3371 3372 /* sync input contexts before they are read from memory */ 3373 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 3374 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 3375 sc->sc_ctxsz * 4); 3376 3377 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 3378 trb.trb_2 = 0; 3379 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 3380 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX); 3381 3382 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3383 return err; 3384 } 3385 3386 static void 3387 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si) 3388 { 3389 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0); 3390 3391 XHCIHIST_FUNC(); 3392 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd", 3393 (uintptr_t)&dcbaa[si], dcba, si, 0); 3394 3395 dcbaa[si] = htole64(dcba); 3396 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t), 3397 BUS_DMASYNC_PREWRITE); 3398 } 3399 3400 /* 3401 * Allocate device and input context DMA buffer, and 3402 * TRB DMA buffer for each endpoint. 3403 */ 3404 static usbd_status 3405 xhci_init_slot(struct usbd_device *dev, uint32_t slot) 3406 { 3407 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3408 struct xhci_slot *xs; 3409 3410 XHCIHIST_FUNC(); 3411 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0); 3412 3413 xs = &sc->sc_slots[slot]; 3414 3415 /* allocate contexts */ 3416 int err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz, 3417 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_dc_dma); 3418 if (err) { 3419 DPRINTFN(1, "failed to allocmem output device context %jd", 3420 err, 0, 0, 0); 3421 return USBD_NOMEM; 3422 } 3423 3424 err = usb_allocmem(sc->sc_bus.ub_dmatag, sc->sc_pgsz, sc->sc_pgsz, 3425 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_ic_dma); 3426 if (err) { 3427 DPRINTFN(1, "failed to allocmem input device context %jd", 3428 err, 0, 0, 0); 3429 goto bad1; 3430 } 3431 3432 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr)); 3433 xs->xs_idx = slot; 3434 3435 return USBD_NORMAL_COMPLETION; 3436 3437 bad1: 3438 usb_freemem(&xs->xs_dc_dma); 3439 xs->xs_idx = 0; 3440 return USBD_NOMEM; 3441 } 3442 3443 static void 3444 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs) 3445 { 3446 u_int dci; 3447 3448 XHCIHIST_FUNC(); 3449 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 3450 3451 /* deallocate all allocated rings in the slot */ 3452 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 3453 if (xs->xs_xr[dci] != NULL) 3454 xhci_ring_free(sc, &xs->xs_xr[dci]); 3455 } 3456 usb_freemem(&xs->xs_ic_dma); 3457 usb_freemem(&xs->xs_dc_dma); 3458 xs->xs_idx = 0; 3459 } 3460 3461 /* 3462 * Setup slot context, set Device Context Base Address, and issue 3463 * Set Address Device command. 3464 */ 3465 static usbd_status 3466 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr) 3467 { 3468 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3469 struct xhci_slot *xs; 3470 usbd_status err; 3471 3472 XHCIHIST_FUNC(); 3473 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0); 3474 3475 xs = &sc->sc_slots[slot]; 3476 3477 xhci_setup_ctx(dev->ud_pipe0); 3478 3479 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 3480 sc->sc_ctxsz * 3); 3481 3482 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot); 3483 3484 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr); 3485 3486 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 3487 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0), 3488 sc->sc_ctxsz * 2); 3489 3490 return err; 3491 } 3492 3493 /* 3494 * 4.8.2, 6.2.3.2 3495 * construct slot/endpoint context parameters and do syncmem 3496 */ 3497 static void 3498 xhci_setup_ctx(struct usbd_pipe *pipe) 3499 { 3500 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3501 struct usbd_device *dev = pipe->up_dev; 3502 struct xhci_slot * const xs = dev->ud_hcpriv; 3503 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3504 const u_int dci = xhci_ep_get_dci(ed); 3505 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3506 uint32_t *cp; 3507 uint16_t mps = UGETW(ed->wMaxPacketSize); 3508 uint8_t speed = dev->ud_speed; 3509 uint8_t ival = ed->bInterval; 3510 3511 XHCIHIST_FUNC(); 3512 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju", 3513 (uintptr_t)pipe, xs->xs_idx, dci, speed); 3514 3515 /* set up initial input control context */ 3516 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 3517 cp[0] = htole32(0); 3518 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci)); 3519 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT)); 3520 cp[7] = htole32(0); 3521 3522 /* set up input slot context */ 3523 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 3524 cp[0] = 3525 XHCI_SCTX_0_CTX_NUM_SET(dci) | 3526 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed)); 3527 cp[1] = 0; 3528 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0); 3529 cp[3] = 0; 3530 xhci_setup_route(pipe, cp); 3531 xhci_setup_tthub(pipe, cp); 3532 3533 cp[0] = htole32(cp[0]); 3534 cp[1] = htole32(cp[1]); 3535 cp[2] = htole32(cp[2]); 3536 cp[3] = htole32(cp[3]); 3537 3538 /* set up input endpoint context */ 3539 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci)); 3540 cp[0] = 3541 XHCI_EPCTX_0_EPSTATE_SET(0) | 3542 XHCI_EPCTX_0_MULT_SET(0) | 3543 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) | 3544 XHCI_EPCTX_0_LSA_SET(0) | 3545 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0); 3546 cp[1] = 3547 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) | 3548 XHCI_EPCTX_1_HID_SET(0) | 3549 XHCI_EPCTX_1_MAXB_SET(0); 3550 3551 if (xfertype != UE_ISOCHRONOUS) 3552 cp[1] |= XHCI_EPCTX_1_CERR_SET(3); 3553 3554 if (xfertype == UE_CONTROL) 3555 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */ 3556 else if (USB_IS_SS(speed)) 3557 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps); 3558 else 3559 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps)); 3560 3561 xhci_setup_maxburst(pipe, cp); 3562 3563 switch (xfertype) { 3564 case UE_CONTROL: 3565 break; 3566 case UE_BULK: 3567 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */ 3568 break; 3569 case UE_INTERRUPT: 3570 if (pipe->up_interval != USBD_DEFAULT_INTERVAL) 3571 ival = pipe->up_interval; 3572 3573 ival = xhci_bival2ival(ival, speed); 3574 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival); 3575 break; 3576 case UE_ISOCHRONOUS: 3577 if (pipe->up_interval != USBD_DEFAULT_INTERVAL) 3578 ival = pipe->up_interval; 3579 3580 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */ 3581 if (speed == USB_SPEED_FULL) 3582 ival += 3; /* 1ms -> 125us */ 3583 ival--; 3584 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival); 3585 break; 3586 default: 3587 break; 3588 } 3589 DPRINTFN(4, "setting ival %ju MaxBurst %#jx", 3590 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0); 3591 3592 /* rewind TR dequeue pointer in xHC */ 3593 /* can't use xhci_ep_get_dci() yet? */ 3594 *(uint64_t *)(&cp[2]) = htole64( 3595 xhci_ring_trbp(xs->xs_xr[dci], 0) | 3596 XHCI_EPCTX_2_DCS_SET(1)); 3597 3598 cp[0] = htole32(cp[0]); 3599 cp[1] = htole32(cp[1]); 3600 cp[4] = htole32(cp[4]); 3601 3602 /* rewind TR dequeue pointer in driver */ 3603 struct xhci_ring *xr = xs->xs_xr[dci]; 3604 mutex_enter(&xr->xr_lock); 3605 xhci_host_dequeue(xr); 3606 mutex_exit(&xr->xr_lock); 3607 3608 /* sync input contexts before they are read from memory */ 3609 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 3610 } 3611 3612 /* 3613 * Setup route string and roothub port of given device for slot context 3614 */ 3615 static void 3616 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp) 3617 { 3618 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3619 struct usbd_device *dev = pipe->up_dev; 3620 struct usbd_port *up = dev->ud_powersrc; 3621 struct usbd_device *hub; 3622 struct usbd_device *adev; 3623 uint8_t rhport = 0; 3624 uint32_t route = 0; 3625 3626 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3627 3628 /* Locate root hub port and Determine route string */ 3629 /* 4.3.3 route string does not include roothub port */ 3630 for (hub = dev; hub != NULL; hub = hub->ud_myhub) { 3631 uint32_t dep; 3632 3633 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd", 3634 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc, 3635 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno : 3636 -1); 3637 3638 if (hub->ud_powersrc == NULL) 3639 break; 3640 dep = hub->ud_depth; 3641 if (dep == 0) 3642 break; 3643 rhport = hub->ud_powersrc->up_portno; 3644 if (dep > USB_HUB_MAX_DEPTH) 3645 continue; 3646 3647 route |= 3648 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport) 3649 << ((dep - 1) * 4); 3650 } 3651 route = route >> 4; 3652 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1; 3653 3654 /* Locate port on upstream high speed hub */ 3655 for (adev = dev, hub = up->up_parent; 3656 hub != NULL && hub->ud_speed != USB_SPEED_HIGH; 3657 adev = hub, hub = hub->ud_myhub) 3658 ; 3659 if (hub) { 3660 int p; 3661 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) { 3662 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) { 3663 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1]; 3664 goto found; 3665 } 3666 } 3667 panic("%s: cannot find HS port", __func__); 3668 found: 3669 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0); 3670 } else { 3671 dev->ud_myhsport = NULL; 3672 } 3673 3674 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport); 3675 3676 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport, 3677 ctlrport, route, (uintptr_t)hub); 3678 3679 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route); 3680 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport); 3681 } 3682 3683 /* 3684 * Setup whether device is hub, whether device uses MTT, and 3685 * TT informations if it uses MTT. 3686 */ 3687 static void 3688 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp) 3689 { 3690 struct usbd_device *dev = pipe->up_dev; 3691 struct usbd_port *myhsport = dev->ud_myhsport; 3692 usb_device_descriptor_t * const dd = &dev->ud_ddesc; 3693 uint32_t speed = dev->ud_speed; 3694 uint8_t rhaddr = dev->ud_bus->ub_rhaddr; 3695 uint8_t tthubslot, ttportnum; 3696 bool ishub; 3697 bool usemtt; 3698 3699 XHCIHIST_FUNC(); 3700 3701 /* 3702 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2 3703 * tthubslot: 3704 * This is the slot ID of parent HS hub 3705 * if LS/FS device is connected && connected through HS hub. 3706 * This is 0 if device is not LS/FS device || 3707 * parent hub is not HS hub || 3708 * attached to root hub. 3709 * ttportnum: 3710 * This is the downstream facing port of parent HS hub 3711 * if LS/FS device is connected. 3712 * This is 0 if device is not LS/FS device || 3713 * parent hub is not HS hub || 3714 * attached to root hub. 3715 */ 3716 if (myhsport && 3717 myhsport->up_parent->ud_addr != rhaddr && 3718 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) { 3719 ttportnum = myhsport->up_portno; 3720 tthubslot = myhsport->up_parent->ud_addr; 3721 } else { 3722 ttportnum = 0; 3723 tthubslot = 0; 3724 } 3725 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd", 3726 (uintptr_t)myhsport, ttportnum, tthubslot, 0); 3727 3728 /* ishub is valid after reading UDESC_DEVICE */ 3729 ishub = (dd->bDeviceClass == UDCLASS_HUB); 3730 3731 /* dev->ud_hub is valid after reading UDESC_HUB */ 3732 if (ishub && dev->ud_hub) { 3733 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc; 3734 uint8_t ttt = 3735 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK); 3736 3737 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts); 3738 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt); 3739 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0); 3740 } 3741 3742 #define IS_MTTHUB(dd) \ 3743 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT) 3744 3745 /* 3746 * MTT flag is set if 3747 * 1. this is HS hub && MTTs are supported and enabled; or 3748 * 2. this is LS or FS device && there is a parent HS hub where MTTs 3749 * are supported and enabled. 3750 * 3751 * XXX enabled is not tested yet 3752 */ 3753 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd)) 3754 usemtt = true; 3755 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) && 3756 myhsport && 3757 myhsport->up_parent->ud_addr != rhaddr && 3758 IS_MTTHUB(&myhsport->up_parent->ud_ddesc)) 3759 usemtt = true; 3760 else 3761 usemtt = false; 3762 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd", 3763 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt); 3764 3765 #undef IS_MTTHUB 3766 3767 cp[0] |= 3768 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) | 3769 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0); 3770 cp[2] |= 3771 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) | 3772 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum); 3773 } 3774 3775 /* set up params for periodic endpoint */ 3776 static void 3777 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp) 3778 { 3779 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 3780 struct usbd_device *dev = pipe->up_dev; 3781 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3782 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3783 usbd_desc_iter_t iter; 3784 const usb_cdc_descriptor_t *cdcd; 3785 uint32_t maxb = 0; 3786 uint16_t mps = UGETW(ed->wMaxPacketSize); 3787 uint8_t speed = dev->ud_speed; 3788 uint8_t mult = 0; 3789 uint8_t ep; 3790 3791 /* config desc is NULL when opening ep0 */ 3792 if (dev == NULL || dev->ud_cdesc == NULL) 3793 goto no_cdcd; 3794 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev, 3795 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY); 3796 if (cdcd == NULL) 3797 goto no_cdcd; 3798 usb_desc_iter_init(dev, &iter); 3799 iter.cur = (const void *)cdcd; 3800 3801 /* find endpoint_ss_comp desc for ep of this pipe */ 3802 for (ep = 0;;) { 3803 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter); 3804 if (cdcd == NULL) 3805 break; 3806 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) { 3807 ep = ((const usb_endpoint_descriptor_t *)cdcd)-> 3808 bEndpointAddress; 3809 if (UE_GET_ADDR(ep) == 3810 UE_GET_ADDR(ed->bEndpointAddress)) { 3811 cdcd = (const usb_cdc_descriptor_t *) 3812 usb_desc_iter_next(&iter); 3813 break; 3814 } 3815 ep = 0; 3816 } 3817 } 3818 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) { 3819 const usb_endpoint_ss_comp_descriptor_t * esscd = 3820 (const usb_endpoint_ss_comp_descriptor_t *)cdcd; 3821 maxb = esscd->bMaxBurst; 3822 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes); 3823 } 3824 3825 no_cdcd: 3826 /* 6.2.3.4, 4.8.2.4 */ 3827 if (USB_IS_SS(speed)) { 3828 /* USB 3.1 9.6.6 */ 3829 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps); 3830 /* USB 3.1 9.6.7 */ 3831 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb); 3832 #ifdef notyet 3833 if (xfertype == UE_ISOCHRONOUS) { 3834 } 3835 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) { 3836 /* use ESIT */ 3837 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x); 3838 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x); 3839 3840 /* XXX if LEC = 1, set ESIT instead */ 3841 cp[0] |= XHCI_EPCTX_0_MULT_SET(0); 3842 } else { 3843 /* use ival */ 3844 } 3845 #endif 3846 } else { 3847 /* USB 2.0 9.6.6 */ 3848 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps)); 3849 3850 /* 6.2.3.4 */ 3851 if (speed == USB_SPEED_HIGH && 3852 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) { 3853 maxb = UE_GET_TRANS(mps); 3854 } else { 3855 /* LS/FS or HS CTRL or HS BULK */ 3856 maxb = 0; 3857 } 3858 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb); 3859 } 3860 xpipe->xp_maxb = maxb + 1; 3861 xpipe->xp_mult = mult + 1; 3862 } 3863 3864 /* 3865 * Convert endpoint bInterval value to endpoint context interval value 3866 * for Interrupt pipe. 3867 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 3868 */ 3869 static uint32_t 3870 xhci_bival2ival(uint32_t ival, uint32_t speed) 3871 { 3872 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) { 3873 int i; 3874 3875 /* 3876 * round ival down to "the nearest base 2 multiple of 3877 * bInterval * 8". 3878 * bInterval is at most 255 as its type is uByte. 3879 * 255(ms) = 2040(x 125us) < 2^11, so start with 10. 3880 */ 3881 for (i = 10; i > 0; i--) { 3882 if ((ival * 8) >= (1 << i)) 3883 break; 3884 } 3885 ival = i; 3886 } else { 3887 /* Interval = bInterval-1 for SS/HS */ 3888 ival--; 3889 } 3890 3891 return ival; 3892 } 3893 3894 /* ----- */ 3895 3896 static void 3897 xhci_noop(struct usbd_pipe *pipe) 3898 { 3899 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3900 } 3901 3902 /* 3903 * Process root hub request. 3904 */ 3905 static int 3906 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req, 3907 void *buf, int buflen) 3908 { 3909 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 3910 usb_port_status_t ps; 3911 int l, totlen = 0; 3912 uint16_t len, value, index; 3913 int port, i; 3914 uint32_t v; 3915 3916 XHCIHIST_FUNC(); 3917 3918 if (sc->sc_dying) 3919 return -1; 3920 3921 size_t bn = bus == &sc->sc_bus ? 0 : 1; 3922 3923 len = UGETW(req->wLength); 3924 value = UGETW(req->wValue); 3925 index = UGETW(req->wIndex); 3926 3927 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx", 3928 req->bmRequestType | (req->bRequest << 8), value, index, len); 3929 3930 #define C(x,y) ((x) | ((y) << 8)) 3931 switch (C(req->bRequest, req->bmRequestType)) { 3932 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 3933 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0); 3934 if (len == 0) 3935 break; 3936 switch (value) { 3937 #define sd ((usb_string_descriptor_t *)buf) 3938 case C(2, UDESC_STRING): 3939 /* Product */ 3940 totlen = usb_makestrdesc(sd, len, "xHCI root hub"); 3941 break; 3942 #undef sd 3943 default: 3944 /* default from usbroothub */ 3945 return buflen; 3946 } 3947 break; 3948 3949 /* Hub requests */ 3950 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): 3951 break; 3952 /* Clear Port Feature request */ 3953 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): { 3954 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 3955 3956 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd", 3957 index, value, bn, cp); 3958 if (index < 1 || index > sc->sc_rhportcount[bn]) { 3959 return -1; 3960 } 3961 port = XHCI_PORTSC(cp); 3962 v = xhci_op_read_4(sc, port); 3963 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0); 3964 v &= ~XHCI_PS_CLEAR; 3965 switch (value) { 3966 case UHF_PORT_ENABLE: 3967 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED); 3968 break; 3969 case UHF_PORT_SUSPEND: 3970 return -1; 3971 case UHF_PORT_POWER: 3972 break; 3973 case UHF_PORT_TEST: 3974 case UHF_PORT_INDICATOR: 3975 return -1; 3976 case UHF_C_PORT_CONNECTION: 3977 xhci_op_write_4(sc, port, v | XHCI_PS_CSC); 3978 break; 3979 case UHF_C_PORT_ENABLE: 3980 case UHF_C_PORT_SUSPEND: 3981 case UHF_C_PORT_OVER_CURRENT: 3982 return -1; 3983 case UHF_C_BH_PORT_RESET: 3984 xhci_op_write_4(sc, port, v | XHCI_PS_WRC); 3985 break; 3986 case UHF_C_PORT_RESET: 3987 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 3988 break; 3989 case UHF_C_PORT_LINK_STATE: 3990 xhci_op_write_4(sc, port, v | XHCI_PS_PLC); 3991 break; 3992 case UHF_C_PORT_CONFIG_ERROR: 3993 xhci_op_write_4(sc, port, v | XHCI_PS_CEC); 3994 break; 3995 default: 3996 return -1; 3997 } 3998 break; 3999 } 4000 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): 4001 if (len == 0) 4002 break; 4003 if ((value & 0xff) != 0) { 4004 return -1; 4005 } 4006 usb_hub_descriptor_t hubd; 4007 4008 totlen = uimin(buflen, sizeof(hubd)); 4009 memcpy(&hubd, buf, totlen); 4010 hubd.bNbrPorts = sc->sc_rhportcount[bn]; 4011 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH); 4012 hubd.bPwrOn2PwrGood = 200; 4013 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) { 4014 /* XXX can't find out? */ 4015 hubd.DeviceRemovable[i++] = 0; 4016 } 4017 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i; 4018 totlen = uimin(totlen, hubd.bDescLength); 4019 memcpy(buf, &hubd, totlen); 4020 break; 4021 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): 4022 if (len != 4) { 4023 return -1; 4024 } 4025 memset(buf, 0, len); /* ? XXX */ 4026 totlen = len; 4027 break; 4028 /* Get Port Status request */ 4029 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): { 4030 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4031 4032 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju", 4033 bn, index, cp, 0); 4034 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4035 DPRINTFN(5, "bad get port status: index=%jd bn=%jd " 4036 "portcount=%jd", 4037 index, bn, sc->sc_rhportcount[bn], 0); 4038 return -1; 4039 } 4040 if (len != 4) { 4041 DPRINTFN(5, "bad get port status: len %jd != 4", 4042 len, 0, 0, 0); 4043 return -1; 4044 } 4045 v = xhci_op_read_4(sc, XHCI_PORTSC(cp)); 4046 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0); 4047 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v)); 4048 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS; 4049 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED; 4050 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR; 4051 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND; 4052 if (v & XHCI_PS_PR) i |= UPS_RESET; 4053 if (v & XHCI_PS_PP) { 4054 if (i & UPS_OTHER_SPEED) 4055 i |= UPS_PORT_POWER_SS; 4056 else 4057 i |= UPS_PORT_POWER; 4058 } 4059 if (i & UPS_OTHER_SPEED) 4060 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v)); 4061 if (sc->sc_vendor_port_status) 4062 i = sc->sc_vendor_port_status(sc, v, i); 4063 USETW(ps.wPortStatus, i); 4064 i = 0; 4065 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS; 4066 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED; 4067 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR; 4068 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET; 4069 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET; 4070 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE; 4071 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR; 4072 USETW(ps.wPortChange, i); 4073 totlen = uimin(len, sizeof(ps)); 4074 memcpy(buf, &ps, totlen); 4075 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx" 4076 " totlen %jd", 4077 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0); 4078 break; 4079 } 4080 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): 4081 return -1; 4082 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE): 4083 break; 4084 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): 4085 break; 4086 /* Set Port Feature request */ 4087 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): { 4088 int optval = (index >> 8) & 0xff; 4089 index &= 0xff; 4090 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4091 return -1; 4092 } 4093 4094 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4095 4096 port = XHCI_PORTSC(cp); 4097 v = xhci_op_read_4(sc, port); 4098 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0); 4099 v &= ~XHCI_PS_CLEAR; 4100 switch (value) { 4101 case UHF_PORT_ENABLE: 4102 xhci_op_write_4(sc, port, v | XHCI_PS_PED); 4103 break; 4104 case UHF_PORT_SUSPEND: 4105 /* XXX suspend */ 4106 break; 4107 case UHF_PORT_RESET: 4108 xhci_op_write_4(sc, port, v | XHCI_PS_PR); 4109 /* Wait for reset to complete. */ 4110 for (i = 0; i < USB_PORT_ROOT_RESET_DELAY / 10; i++) { 4111 if (sc->sc_dying) { 4112 return -1; 4113 } 4114 v = xhci_op_read_4(sc, port); 4115 if ((v & XHCI_PS_PR) == 0) { 4116 break; 4117 } 4118 usb_delay_ms(&sc->sc_bus, 10); 4119 } 4120 break; 4121 case UHF_PORT_POWER: 4122 /* XXX power control */ 4123 break; 4124 /* XXX more */ 4125 case UHF_C_PORT_RESET: 4126 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 4127 break; 4128 case UHF_PORT_U1_TIMEOUT: 4129 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 4130 return -1; 4131 } 4132 port = XHCI_PORTPMSC(cp); 4133 v = xhci_op_read_4(sc, port); 4134 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 4135 index, cp, v, 0); 4136 v &= ~XHCI_PM3_U1TO_SET(0xff); 4137 v |= XHCI_PM3_U1TO_SET(optval); 4138 xhci_op_write_4(sc, port, v); 4139 break; 4140 case UHF_PORT_U2_TIMEOUT: 4141 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 4142 return -1; 4143 } 4144 port = XHCI_PORTPMSC(cp); 4145 v = xhci_op_read_4(sc, port); 4146 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 4147 index, cp, v, 0); 4148 v &= ~XHCI_PM3_U2TO_SET(0xff); 4149 v |= XHCI_PM3_U2TO_SET(optval); 4150 xhci_op_write_4(sc, port, v); 4151 break; 4152 default: 4153 return -1; 4154 } 4155 } 4156 break; 4157 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): 4158 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): 4159 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): 4160 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): 4161 break; 4162 default: 4163 /* default from usbroothub */ 4164 return buflen; 4165 } 4166 4167 return totlen; 4168 } 4169 4170 /* root hub interrupt */ 4171 4172 static usbd_status 4173 xhci_root_intr_transfer(struct usbd_xfer *xfer) 4174 { 4175 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4176 usbd_status err; 4177 4178 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4179 4180 /* Insert last in queue. */ 4181 mutex_enter(&sc->sc_lock); 4182 err = usb_insert_transfer(xfer); 4183 mutex_exit(&sc->sc_lock); 4184 if (err) 4185 return err; 4186 4187 /* Pipe isn't running, start first */ 4188 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4189 } 4190 4191 /* Wait for roothub port status/change */ 4192 static usbd_status 4193 xhci_root_intr_start(struct usbd_xfer *xfer) 4194 { 4195 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4196 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4197 const bool polling = xhci_polling_p(sc); 4198 4199 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4200 4201 if (sc->sc_dying) 4202 return USBD_IOERROR; 4203 4204 if (!polling) 4205 mutex_enter(&sc->sc_lock); 4206 KASSERT(sc->sc_intrxfer[bn] == NULL); 4207 sc->sc_intrxfer[bn] = xfer; 4208 xfer->ux_status = USBD_IN_PROGRESS; 4209 if (!polling) 4210 mutex_exit(&sc->sc_lock); 4211 4212 return USBD_IN_PROGRESS; 4213 } 4214 4215 static void 4216 xhci_root_intr_abort(struct usbd_xfer *xfer) 4217 { 4218 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4219 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4220 4221 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4222 4223 KASSERT(mutex_owned(&sc->sc_lock)); 4224 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 4225 4226 /* If xfer has already completed, nothing to do here. */ 4227 if (sc->sc_intrxfer[bn] == NULL) 4228 return; 4229 4230 /* 4231 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer. 4232 * Cancel it. 4233 */ 4234 KASSERT(sc->sc_intrxfer[bn] == xfer); 4235 xfer->ux_status = USBD_CANCELLED; 4236 usb_transfer_complete(xfer); 4237 } 4238 4239 static void 4240 xhci_root_intr_close(struct usbd_pipe *pipe) 4241 { 4242 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe); 4243 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer; 4244 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4245 4246 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4247 4248 KASSERT(mutex_owned(&sc->sc_lock)); 4249 4250 /* 4251 * Caller must guarantee the xfer has completed first, by 4252 * closing the pipe only after normal completion or an abort. 4253 */ 4254 KASSERT(sc->sc_intrxfer[bn] == NULL); 4255 } 4256 4257 static void 4258 xhci_root_intr_done(struct usbd_xfer *xfer) 4259 { 4260 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4261 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4262 4263 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4264 4265 KASSERT(mutex_owned(&sc->sc_lock)); 4266 4267 /* Claim the xfer so it doesn't get completed again. */ 4268 KASSERT(sc->sc_intrxfer[bn] == xfer); 4269 KASSERT(xfer->ux_status != USBD_IN_PROGRESS); 4270 sc->sc_intrxfer[bn] = NULL; 4271 } 4272 4273 /* -------------- */ 4274 /* device control */ 4275 4276 static usbd_status 4277 xhci_device_ctrl_transfer(struct usbd_xfer *xfer) 4278 { 4279 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4280 usbd_status err; 4281 4282 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4283 4284 /* Insert last in queue. */ 4285 mutex_enter(&sc->sc_lock); 4286 err = usb_insert_transfer(xfer); 4287 mutex_exit(&sc->sc_lock); 4288 if (err) 4289 return err; 4290 4291 /* Pipe isn't running, start first */ 4292 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4293 } 4294 4295 static usbd_status 4296 xhci_device_ctrl_start(struct usbd_xfer *xfer) 4297 { 4298 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4299 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4300 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4301 struct xhci_ring * const tr = xs->xs_xr[dci]; 4302 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4303 usb_device_request_t * const req = &xfer->ux_request; 4304 const bool isread = usbd_xfer_isread(xfer); 4305 const uint32_t len = UGETW(req->wLength); 4306 usb_dma_t * const dma = &xfer->ux_dmabuf; 4307 uint64_t parameter; 4308 uint32_t status; 4309 uint32_t control; 4310 u_int i; 4311 const bool polling = xhci_polling_p(sc); 4312 4313 XHCIHIST_FUNC(); 4314 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx", 4315 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue), 4316 UGETW(req->wIndex), UGETW(req->wLength)); 4317 4318 /* we rely on the bottom bits for extra info */ 4319 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %zx", 4320 (uintptr_t) xfer); 4321 4322 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0); 4323 4324 i = 0; 4325 4326 /* setup phase */ 4327 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */ 4328 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req)); 4329 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE : 4330 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) | 4331 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) | 4332 XHCI_TRB_3_IDT_BIT; 4333 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4334 4335 if (len != 0) { 4336 /* data phase */ 4337 parameter = DMAADDR(dma, 0); 4338 KASSERTMSG(len <= 0x10000, "len %d", len); 4339 status = XHCI_TRB_2_IRQ_SET(0) | 4340 XHCI_TRB_2_TDSZ_SET(0) | 4341 XHCI_TRB_2_BYTES_SET(len); 4342 control = (isread ? XHCI_TRB_3_DIR_IN : 0) | 4343 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) | 4344 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4345 XHCI_TRB_3_IOC_BIT; 4346 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4347 4348 usb_syncmem(dma, 0, len, 4349 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4350 } 4351 4352 parameter = 0; 4353 status = XHCI_TRB_2_IRQ_SET(0); 4354 /* the status stage has inverted direction */ 4355 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) | 4356 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) | 4357 XHCI_TRB_3_IOC_BIT; 4358 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4359 4360 if (!polling) 4361 mutex_enter(&tr->xr_lock); 4362 xhci_ring_put_xfer(sc, tr, xx, i); 4363 if (!polling) 4364 mutex_exit(&tr->xr_lock); 4365 4366 if (!polling) 4367 mutex_enter(&sc->sc_lock); 4368 xfer->ux_status = USBD_IN_PROGRESS; 4369 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4370 usbd_xfer_schedule_timeout(xfer); 4371 if (!polling) 4372 mutex_exit(&sc->sc_lock); 4373 4374 return USBD_IN_PROGRESS; 4375 } 4376 4377 static void 4378 xhci_device_ctrl_done(struct usbd_xfer *xfer) 4379 { 4380 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4381 usb_device_request_t *req = &xfer->ux_request; 4382 int len = UGETW(req->wLength); 4383 int rd = req->bmRequestType & UT_READ; 4384 4385 if (len) 4386 usb_syncmem(&xfer->ux_dmabuf, 0, len, 4387 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4388 } 4389 4390 static void 4391 xhci_device_ctrl_abort(struct usbd_xfer *xfer) 4392 { 4393 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4394 4395 usbd_xfer_abort(xfer); 4396 } 4397 4398 static void 4399 xhci_device_ctrl_close(struct usbd_pipe *pipe) 4400 { 4401 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4402 4403 xhci_close_pipe(pipe); 4404 } 4405 4406 /* ------------------ */ 4407 /* device isochronous */ 4408 4409 static usbd_status 4410 xhci_device_isoc_transfer(struct usbd_xfer *xfer) 4411 { 4412 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4413 usbd_status err; 4414 4415 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4416 4417 /* Insert last in queue. */ 4418 mutex_enter(&sc->sc_lock); 4419 err = usb_insert_transfer(xfer); 4420 mutex_exit(&sc->sc_lock); 4421 if (err) 4422 return err; 4423 4424 return xhci_device_isoc_enter(xfer); 4425 } 4426 4427 static usbd_status 4428 xhci_device_isoc_enter(struct usbd_xfer *xfer) 4429 { 4430 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4431 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4432 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4433 struct xhci_ring * const tr = xs->xs_xr[dci]; 4434 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4435 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe; 4436 uint32_t len = xfer->ux_length; 4437 usb_dma_t * const dma = &xfer->ux_dmabuf; 4438 uint64_t parameter; 4439 uint32_t status; 4440 uint32_t control; 4441 uint32_t mfindex; 4442 uint32_t offs; 4443 int i, ival; 4444 const bool polling = xhci_polling_p(sc); 4445 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize); 4446 const uint16_t mps = UE_GET_SIZE(MPS); 4447 const uint8_t maxb = xpipe->xp_maxb; 4448 u_int tdpc, tbc, tlbpc; 4449 4450 XHCIHIST_FUNC(); 4451 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4452 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4453 4454 if (sc->sc_dying) 4455 return USBD_IOERROR; 4456 4457 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths); 4458 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4459 4460 const bool isread = usbd_xfer_isread(xfer); 4461 if (xfer->ux_length) 4462 usb_syncmem(dma, 0, xfer->ux_length, 4463 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4464 4465 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval; 4466 if (ival >= 1 && ival <= 16) 4467 ival = 1 << (ival - 1); 4468 else 4469 ival = 1; /* fake something up */ 4470 4471 if (xpipe->xp_isoc_next == -1) { 4472 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX); 4473 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0); 4474 mfindex = XHCI_MFINDEX_GET(mfindex + 1); 4475 mfindex /= USB_UFRAMES_PER_FRAME; 4476 mfindex += 7; /* 7 frames is max possible IST */ 4477 xpipe->xp_isoc_next = roundup2(mfindex, ival); 4478 } 4479 4480 offs = 0; 4481 for (i = 0; i < xfer->ux_nframes; i++) { 4482 len = xfer->ux_frlengths[i]; 4483 4484 tdpc = howmany(len, mps); 4485 tbc = howmany(tdpc, maxb) - 1; 4486 tlbpc = tdpc % maxb; 4487 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1; 4488 4489 KASSERTMSG(len <= 0x10000, "len %d", len); 4490 parameter = DMAADDR(dma, offs); 4491 status = XHCI_TRB_2_IRQ_SET(0) | 4492 XHCI_TRB_2_TDSZ_SET(0) | 4493 XHCI_TRB_2_BYTES_SET(len); 4494 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) | 4495 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4496 XHCI_TRB_3_TBC_SET(tbc) | 4497 XHCI_TRB_3_TLBPC_SET(tlbpc) | 4498 XHCI_TRB_3_IOC_BIT; 4499 if (XHCI_HCC_CFC(sc->sc_hcc)) { 4500 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next); 4501 #if 0 4502 } else if (xpipe->xp_isoc_next == -1) { 4503 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next); 4504 #endif 4505 } else { 4506 control |= XHCI_TRB_3_ISO_SIA_BIT; 4507 } 4508 #if 0 4509 if (i != xfer->ux_nframes - 1) 4510 control |= XHCI_TRB_3_BEI_BIT; 4511 #endif 4512 xhci_xfer_put_trb(xx, i, parameter, status, control); 4513 4514 xpipe->xp_isoc_next += ival; 4515 offs += len; 4516 } 4517 4518 xx->xx_isoc_done = 0; 4519 4520 if (!polling) 4521 mutex_enter(&tr->xr_lock); 4522 xhci_ring_put_xfer(sc, tr, xx, i); 4523 if (!polling) 4524 mutex_exit(&tr->xr_lock); 4525 4526 if (!polling) 4527 mutex_enter(&sc->sc_lock); 4528 xfer->ux_status = USBD_IN_PROGRESS; 4529 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4530 usbd_xfer_schedule_timeout(xfer); 4531 if (!polling) 4532 mutex_exit(&sc->sc_lock); 4533 4534 return USBD_IN_PROGRESS; 4535 } 4536 4537 static void 4538 xhci_device_isoc_abort(struct usbd_xfer *xfer) 4539 { 4540 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4541 4542 usbd_xfer_abort(xfer); 4543 } 4544 4545 static void 4546 xhci_device_isoc_close(struct usbd_pipe *pipe) 4547 { 4548 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4549 4550 xhci_close_pipe(pipe); 4551 } 4552 4553 static void 4554 xhci_device_isoc_done(struct usbd_xfer *xfer) 4555 { 4556 #ifdef USB_DEBUG 4557 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4558 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4559 #endif 4560 const bool isread = usbd_xfer_isread(xfer); 4561 4562 XHCIHIST_FUNC(); 4563 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4564 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4565 4566 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4567 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4568 } 4569 4570 /* ----------- */ 4571 /* device bulk */ 4572 4573 static usbd_status 4574 xhci_device_bulk_transfer(struct usbd_xfer *xfer) 4575 { 4576 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4577 usbd_status err; 4578 4579 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4580 4581 /* Insert last in queue. */ 4582 mutex_enter(&sc->sc_lock); 4583 err = usb_insert_transfer(xfer); 4584 mutex_exit(&sc->sc_lock); 4585 if (err) 4586 return err; 4587 4588 /* 4589 * Pipe isn't running (otherwise err would be USBD_INPROG), 4590 * so start it first. 4591 */ 4592 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4593 } 4594 4595 static usbd_status 4596 xhci_device_bulk_start(struct usbd_xfer *xfer) 4597 { 4598 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4599 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4600 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4601 struct xhci_ring * const tr = xs->xs_xr[dci]; 4602 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4603 const uint32_t len = xfer->ux_length; 4604 usb_dma_t * const dma = &xfer->ux_dmabuf; 4605 uint64_t parameter; 4606 uint32_t status; 4607 uint32_t control; 4608 u_int i = 0; 4609 const bool polling = xhci_polling_p(sc); 4610 4611 XHCIHIST_FUNC(); 4612 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4613 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4614 4615 if (sc->sc_dying) 4616 return USBD_IOERROR; 4617 4618 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4619 4620 parameter = DMAADDR(dma, 0); 4621 const bool isread = usbd_xfer_isread(xfer); 4622 if (len) 4623 usb_syncmem(dma, 0, len, 4624 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4625 4626 /* 4627 * XXX: (dsl) The physical buffer must not cross a 64k boundary. 4628 * If the user supplied buffer crosses such a boundary then 2 4629 * (or more) TRB should be used. 4630 * If multiple TRB are used the td_size field must be set correctly. 4631 * For v1.0 devices (like ivy bridge) this is the number of usb data 4632 * blocks needed to complete the transfer. 4633 * Setting it to 1 in the last TRB causes an extra zero-length 4634 * data block be sent. 4635 * The earlier documentation differs, I don't know how it behaves. 4636 */ 4637 KASSERTMSG(len <= 0x10000, "len %d", len); 4638 status = XHCI_TRB_2_IRQ_SET(0) | 4639 XHCI_TRB_2_TDSZ_SET(0) | 4640 XHCI_TRB_2_BYTES_SET(len); 4641 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4642 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4643 XHCI_TRB_3_IOC_BIT; 4644 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4645 4646 if (!polling) 4647 mutex_enter(&tr->xr_lock); 4648 xhci_ring_put_xfer(sc, tr, xx, i); 4649 if (!polling) 4650 mutex_exit(&tr->xr_lock); 4651 4652 if (!polling) 4653 mutex_enter(&sc->sc_lock); 4654 xfer->ux_status = USBD_IN_PROGRESS; 4655 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4656 usbd_xfer_schedule_timeout(xfer); 4657 if (!polling) 4658 mutex_exit(&sc->sc_lock); 4659 4660 return USBD_IN_PROGRESS; 4661 } 4662 4663 static void 4664 xhci_device_bulk_done(struct usbd_xfer *xfer) 4665 { 4666 #ifdef USB_DEBUG 4667 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4668 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4669 #endif 4670 const bool isread = usbd_xfer_isread(xfer); 4671 4672 XHCIHIST_FUNC(); 4673 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4674 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4675 4676 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4677 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4678 } 4679 4680 static void 4681 xhci_device_bulk_abort(struct usbd_xfer *xfer) 4682 { 4683 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4684 4685 usbd_xfer_abort(xfer); 4686 } 4687 4688 static void 4689 xhci_device_bulk_close(struct usbd_pipe *pipe) 4690 { 4691 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4692 4693 xhci_close_pipe(pipe); 4694 } 4695 4696 /* ---------------- */ 4697 /* device interrupt */ 4698 4699 static usbd_status 4700 xhci_device_intr_transfer(struct usbd_xfer *xfer) 4701 { 4702 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4703 usbd_status err; 4704 4705 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4706 4707 /* Insert last in queue. */ 4708 mutex_enter(&sc->sc_lock); 4709 err = usb_insert_transfer(xfer); 4710 mutex_exit(&sc->sc_lock); 4711 if (err) 4712 return err; 4713 4714 /* 4715 * Pipe isn't running (otherwise err would be USBD_INPROG), 4716 * so start it first. 4717 */ 4718 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4719 } 4720 4721 static usbd_status 4722 xhci_device_intr_start(struct usbd_xfer *xfer) 4723 { 4724 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4725 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4726 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4727 struct xhci_ring * const tr = xs->xs_xr[dci]; 4728 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4729 const uint32_t len = xfer->ux_length; 4730 const bool polling = xhci_polling_p(sc); 4731 usb_dma_t * const dma = &xfer->ux_dmabuf; 4732 uint64_t parameter; 4733 uint32_t status; 4734 uint32_t control; 4735 u_int i = 0; 4736 4737 XHCIHIST_FUNC(); 4738 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4739 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4740 4741 if (sc->sc_dying) 4742 return USBD_IOERROR; 4743 4744 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4745 4746 const bool isread = usbd_xfer_isread(xfer); 4747 if (len) 4748 usb_syncmem(dma, 0, len, 4749 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4750 4751 parameter = DMAADDR(dma, 0); 4752 KASSERTMSG(len <= 0x10000, "len %d", len); 4753 status = XHCI_TRB_2_IRQ_SET(0) | 4754 XHCI_TRB_2_TDSZ_SET(0) | 4755 XHCI_TRB_2_BYTES_SET(len); 4756 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4757 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT; 4758 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4759 4760 if (!polling) 4761 mutex_enter(&tr->xr_lock); 4762 xhci_ring_put_xfer(sc, tr, xx, i); 4763 if (!polling) 4764 mutex_exit(&tr->xr_lock); 4765 4766 if (!polling) 4767 mutex_enter(&sc->sc_lock); 4768 xfer->ux_status = USBD_IN_PROGRESS; 4769 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4770 usbd_xfer_schedule_timeout(xfer); 4771 if (!polling) 4772 mutex_exit(&sc->sc_lock); 4773 4774 return USBD_IN_PROGRESS; 4775 } 4776 4777 static void 4778 xhci_device_intr_done(struct usbd_xfer *xfer) 4779 { 4780 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4781 #ifdef USB_DEBUG 4782 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4783 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4784 #endif 4785 const bool isread = usbd_xfer_isread(xfer); 4786 4787 XHCIHIST_FUNC(); 4788 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4789 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4790 4791 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 4792 4793 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4794 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4795 } 4796 4797 static void 4798 xhci_device_intr_abort(struct usbd_xfer *xfer) 4799 { 4800 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4801 4802 XHCIHIST_FUNC(); 4803 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0); 4804 4805 KASSERT(mutex_owned(&sc->sc_lock)); 4806 usbd_xfer_abort(xfer); 4807 } 4808 4809 static void 4810 xhci_device_intr_close(struct usbd_pipe *pipe) 4811 { 4812 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 4813 4814 XHCIHIST_FUNC(); 4815 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0); 4816 4817 xhci_close_pipe(pipe); 4818 } 4819