1 /* $NetBSD: xhci.c,v 1.143 2021/05/29 16:49:30 riastradh Exp $ */ 2 3 /* 4 * Copyright (c) 2013 Jonathan A. Kollasch 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * USB rev 2.0 and rev 3.1 specification 31 * http://www.usb.org/developers/docs/ 32 * xHCI rev 1.1 specification 33 * http://www.intel.com/technology/usb/spec.htm 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.143 2021/05/29 16:49:30 riastradh Exp $"); 38 39 #ifdef _KERNEL_OPT 40 #include "opt_usb.h" 41 #endif 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/kmem.h> 47 #include <sys/device.h> 48 #include <sys/select.h> 49 #include <sys/proc.h> 50 #include <sys/queue.h> 51 #include <sys/mutex.h> 52 #include <sys/condvar.h> 53 #include <sys/bus.h> 54 #include <sys/cpu.h> 55 #include <sys/sysctl.h> 56 57 #include <machine/endian.h> 58 59 #include <dev/usb/usb.h> 60 #include <dev/usb/usbdi.h> 61 #include <dev/usb/usbdivar.h> 62 #include <dev/usb/usbdi_util.h> 63 #include <dev/usb/usbhist.h> 64 #include <dev/usb/usb_mem.h> 65 #include <dev/usb/usb_quirks.h> 66 67 #include <dev/usb/xhcireg.h> 68 #include <dev/usb/xhcivar.h> 69 #include <dev/usb/usbroothub.h> 70 71 72 #ifdef USB_DEBUG 73 #ifndef XHCI_DEBUG 74 #define xhcidebug 0 75 #else /* !XHCI_DEBUG */ 76 #define HEXDUMP(a, b, c) \ 77 do { \ 78 if (xhcidebug > 0) \ 79 hexdump(printf, a, b, c); \ 80 } while (/*CONSTCOND*/0) 81 static int xhcidebug = 0; 82 83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup") 84 { 85 int err; 86 const struct sysctlnode *rnode; 87 const struct sysctlnode *cnode; 88 89 err = sysctl_createv(clog, 0, NULL, &rnode, 90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci", 91 SYSCTL_DESCR("xhci global controls"), 92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 93 94 if (err) 95 goto fail; 96 97 /* control debugging printfs */ 98 err = sysctl_createv(clog, 0, &rnode, &cnode, 99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 100 "debug", SYSCTL_DESCR("Enable debugging output"), 101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL); 102 if (err) 103 goto fail; 104 105 return; 106 fail: 107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err); 108 } 109 110 #endif /* !XHCI_DEBUG */ 111 #endif /* USB_DEBUG */ 112 113 #ifndef HEXDUMP 114 #define HEXDUMP(a, b, c) 115 #endif 116 117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D) 118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D) 119 #define XHCIHIST_FUNC() USBHIST_FUNC() 120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug) 121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \ 122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D) 123 124 #define XHCI_DCI_SLOT 0 125 #define XHCI_DCI_EP_CONTROL 1 126 127 #define XHCI_ICI_INPUT_CONTROL 0 128 129 struct xhci_pipe { 130 struct usbd_pipe xp_pipe; 131 struct usb_task xp_async_task; 132 int16_t xp_isoc_next; /* next frame */ 133 uint8_t xp_maxb; /* max burst */ 134 uint8_t xp_mult; 135 }; 136 137 #define XHCI_COMMAND_RING_TRBS 256 138 #define XHCI_EVENT_RING_TRBS 256 139 #define XHCI_EVENT_RING_SEGMENTS 1 140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT 141 142 static usbd_status xhci_open(struct usbd_pipe *); 143 static void xhci_close_pipe(struct usbd_pipe *); 144 static int xhci_intr1(struct xhci_softc * const); 145 static void xhci_softintr(void *); 146 static void xhci_poll(struct usbd_bus *); 147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int); 148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *); 149 static void xhci_abortx(struct usbd_xfer *); 150 static bool xhci_dying(struct usbd_bus *); 151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **); 152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int, 153 struct usbd_port *); 154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, 155 void *, int); 156 157 static usbd_status xhci_configure_endpoint(struct usbd_pipe *); 158 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *); 159 static usbd_status xhci_reset_endpoint(struct usbd_pipe *); 160 static usbd_status xhci_stop_endpoint_cmd(struct xhci_softc *, 161 struct xhci_slot *, u_int, uint32_t); 162 static usbd_status xhci_stop_endpoint(struct usbd_pipe *); 163 164 static void xhci_host_dequeue(struct xhci_ring * const); 165 static usbd_status xhci_set_dequeue(struct usbd_pipe *); 166 167 static usbd_status xhci_do_command(struct xhci_softc * const, 168 struct xhci_soft_trb * const, int); 169 static usbd_status xhci_do_command_locked(struct xhci_softc * const, 170 struct xhci_soft_trb * const, int); 171 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t); 172 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *); 173 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool); 174 static usbd_status xhci_enable_slot(struct xhci_softc * const, 175 uint8_t * const); 176 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t); 177 static usbd_status xhci_address_device(struct xhci_softc * const, 178 uint64_t, uint8_t, bool); 179 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int); 180 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const, 181 struct xhci_slot * const, u_int); 182 static usbd_status xhci_ring_init(struct xhci_softc * const, 183 struct xhci_ring **, size_t, size_t); 184 static void xhci_ring_free(struct xhci_softc * const, 185 struct xhci_ring ** const); 186 187 static void xhci_setup_ctx(struct usbd_pipe *); 188 static void xhci_setup_route(struct usbd_pipe *, uint32_t *); 189 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *); 190 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *); 191 static uint32_t xhci_bival2ival(uint32_t, uint32_t); 192 193 static void xhci_noop(struct usbd_pipe *); 194 195 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *); 196 static usbd_status xhci_root_intr_start(struct usbd_xfer *); 197 static void xhci_root_intr_abort(struct usbd_xfer *); 198 static void xhci_root_intr_close(struct usbd_pipe *); 199 static void xhci_root_intr_done(struct usbd_xfer *); 200 201 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *); 202 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *); 203 static void xhci_device_ctrl_abort(struct usbd_xfer *); 204 static void xhci_device_ctrl_close(struct usbd_pipe *); 205 static void xhci_device_ctrl_done(struct usbd_xfer *); 206 207 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *); 208 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *); 209 static void xhci_device_isoc_abort(struct usbd_xfer *); 210 static void xhci_device_isoc_close(struct usbd_pipe *); 211 static void xhci_device_isoc_done(struct usbd_xfer *); 212 213 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *); 214 static usbd_status xhci_device_intr_start(struct usbd_xfer *); 215 static void xhci_device_intr_abort(struct usbd_xfer *); 216 static void xhci_device_intr_close(struct usbd_pipe *); 217 static void xhci_device_intr_done(struct usbd_xfer *); 218 219 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *); 220 static usbd_status xhci_device_bulk_start(struct usbd_xfer *); 221 static void xhci_device_bulk_abort(struct usbd_xfer *); 222 static void xhci_device_bulk_close(struct usbd_pipe *); 223 static void xhci_device_bulk_done(struct usbd_xfer *); 224 225 static const struct usbd_bus_methods xhci_bus_methods = { 226 .ubm_open = xhci_open, 227 .ubm_softint = xhci_softintr, 228 .ubm_dopoll = xhci_poll, 229 .ubm_allocx = xhci_allocx, 230 .ubm_freex = xhci_freex, 231 .ubm_abortx = xhci_abortx, 232 .ubm_dying = xhci_dying, 233 .ubm_getlock = xhci_get_lock, 234 .ubm_newdev = xhci_new_device, 235 .ubm_rhctrl = xhci_roothub_ctrl, 236 }; 237 238 static const struct usbd_pipe_methods xhci_root_intr_methods = { 239 .upm_transfer = xhci_root_intr_transfer, 240 .upm_start = xhci_root_intr_start, 241 .upm_abort = xhci_root_intr_abort, 242 .upm_close = xhci_root_intr_close, 243 .upm_cleartoggle = xhci_noop, 244 .upm_done = xhci_root_intr_done, 245 }; 246 247 248 static const struct usbd_pipe_methods xhci_device_ctrl_methods = { 249 .upm_transfer = xhci_device_ctrl_transfer, 250 .upm_start = xhci_device_ctrl_start, 251 .upm_abort = xhci_device_ctrl_abort, 252 .upm_close = xhci_device_ctrl_close, 253 .upm_cleartoggle = xhci_noop, 254 .upm_done = xhci_device_ctrl_done, 255 }; 256 257 static const struct usbd_pipe_methods xhci_device_isoc_methods = { 258 .upm_transfer = xhci_device_isoc_transfer, 259 .upm_abort = xhci_device_isoc_abort, 260 .upm_close = xhci_device_isoc_close, 261 .upm_cleartoggle = xhci_noop, 262 .upm_done = xhci_device_isoc_done, 263 }; 264 265 static const struct usbd_pipe_methods xhci_device_bulk_methods = { 266 .upm_transfer = xhci_device_bulk_transfer, 267 .upm_start = xhci_device_bulk_start, 268 .upm_abort = xhci_device_bulk_abort, 269 .upm_close = xhci_device_bulk_close, 270 .upm_cleartoggle = xhci_noop, 271 .upm_done = xhci_device_bulk_done, 272 }; 273 274 static const struct usbd_pipe_methods xhci_device_intr_methods = { 275 .upm_transfer = xhci_device_intr_transfer, 276 .upm_start = xhci_device_intr_start, 277 .upm_abort = xhci_device_intr_abort, 278 .upm_close = xhci_device_intr_close, 279 .upm_cleartoggle = xhci_noop, 280 .upm_done = xhci_device_intr_done, 281 }; 282 283 static inline uint32_t 284 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset) 285 { 286 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset); 287 } 288 289 static inline uint32_t 290 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset) 291 { 292 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset); 293 } 294 295 static inline uint32_t 296 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset) 297 { 298 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset); 299 } 300 301 static inline void 302 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset, 303 uint32_t value) 304 { 305 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value); 306 } 307 308 #if 0 /* unused */ 309 static inline void 310 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset, 311 uint32_t value) 312 { 313 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value); 314 } 315 #endif /* unused */ 316 317 static inline void 318 xhci_barrier(const struct xhci_softc * const sc, int flags) 319 { 320 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 0, sc->sc_ios, flags); 321 } 322 323 static inline uint32_t 324 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset) 325 { 326 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset); 327 } 328 329 static inline uint32_t 330 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset) 331 { 332 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 333 } 334 335 static inline void 336 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset, 337 uint32_t value) 338 { 339 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value); 340 } 341 342 static inline uint64_t 343 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset) 344 { 345 uint64_t value; 346 347 if (XHCI_HCC_AC64(sc->sc_hcc)) { 348 #ifdef XHCI_USE_BUS_SPACE_8 349 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset); 350 #else 351 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 352 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh, 353 offset + 4) << 32; 354 #endif 355 } else { 356 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 357 } 358 359 return value; 360 } 361 362 static inline void 363 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset, 364 uint64_t value) 365 { 366 if (XHCI_HCC_AC64(sc->sc_hcc)) { 367 #ifdef XHCI_USE_BUS_SPACE_8 368 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value); 369 #else 370 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0, 371 (value >> 0) & 0xffffffff); 372 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4, 373 (value >> 32) & 0xffffffff); 374 #endif 375 } else { 376 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value); 377 } 378 } 379 380 static inline uint32_t 381 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset) 382 { 383 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 384 } 385 386 static inline void 387 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset, 388 uint32_t value) 389 { 390 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value); 391 } 392 393 static inline uint64_t 394 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset) 395 { 396 uint64_t value; 397 398 if (XHCI_HCC_AC64(sc->sc_hcc)) { 399 #ifdef XHCI_USE_BUS_SPACE_8 400 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset); 401 #else 402 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 403 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh, 404 offset + 4) << 32; 405 #endif 406 } else { 407 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 408 } 409 410 return value; 411 } 412 413 static inline void 414 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset, 415 uint64_t value) 416 { 417 if (XHCI_HCC_AC64(sc->sc_hcc)) { 418 #ifdef XHCI_USE_BUS_SPACE_8 419 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value); 420 #else 421 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0, 422 (value >> 0) & 0xffffffff); 423 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4, 424 (value >> 32) & 0xffffffff); 425 #endif 426 } else { 427 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value); 428 } 429 } 430 431 #if 0 /* unused */ 432 static inline uint32_t 433 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset) 434 { 435 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset); 436 } 437 #endif /* unused */ 438 439 static inline void 440 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset, 441 uint32_t value) 442 { 443 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value); 444 } 445 446 /* --- */ 447 448 static inline uint8_t 449 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed) 450 { 451 u_int eptype = 0; 452 453 switch (UE_GET_XFERTYPE(ed->bmAttributes)) { 454 case UE_CONTROL: 455 eptype = 0x0; 456 break; 457 case UE_ISOCHRONOUS: 458 eptype = 0x1; 459 break; 460 case UE_BULK: 461 eptype = 0x2; 462 break; 463 case UE_INTERRUPT: 464 eptype = 0x3; 465 break; 466 } 467 468 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 469 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 470 return eptype | 0x4; 471 else 472 return eptype; 473 } 474 475 static u_int 476 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed) 477 { 478 /* xHCI 1.0 section 4.5.1 */ 479 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress); 480 u_int in = 0; 481 482 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 483 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 484 in = 1; 485 486 return epaddr * 2 + in; 487 } 488 489 static inline u_int 490 xhci_dci_to_ici(const u_int i) 491 { 492 return i + 1; 493 } 494 495 static inline void * 496 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs, 497 const u_int dci) 498 { 499 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 500 } 501 502 #if 0 /* unused */ 503 static inline bus_addr_t 504 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs, 505 const u_int dci) 506 { 507 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 508 } 509 #endif /* unused */ 510 511 static inline void * 512 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs, 513 const u_int ici) 514 { 515 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 516 } 517 518 static inline bus_addr_t 519 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs, 520 const u_int ici) 521 { 522 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 523 } 524 525 static inline struct xhci_trb * 526 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx) 527 { 528 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 529 } 530 531 static inline bus_addr_t 532 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx) 533 { 534 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 535 } 536 537 static inline void 538 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx, 539 uint64_t parameter, uint32_t status, uint32_t control) 540 { 541 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb); 542 xx->xx_trb[idx].trb_0 = parameter; 543 xx->xx_trb[idx].trb_2 = status; 544 xx->xx_trb[idx].trb_3 = control; 545 } 546 547 static inline void 548 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status, 549 uint32_t control) 550 { 551 trb->trb_0 = htole64(parameter); 552 trb->trb_2 = htole32(status); 553 trb->trb_3 = htole32(control); 554 } 555 556 static int 557 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx) 558 { 559 /* base address of TRBs */ 560 bus_addr_t trbp = xhci_ring_trbp(xr, 0); 561 562 /* trb_0 range sanity check */ 563 if (trb_0 == 0 || trb_0 < trbp || 564 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 || 565 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) { 566 return 1; 567 } 568 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb); 569 return 0; 570 } 571 572 static unsigned int 573 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs, 574 u_int dci) 575 { 576 uint32_t *cp; 577 578 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 579 cp = xhci_slot_get_dcv(sc, xs, dci); 580 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0])); 581 } 582 583 static inline unsigned int 584 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport) 585 { 586 const unsigned int port = ctlrport - 1; 587 const uint8_t bit = __BIT(port % NBBY); 588 589 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit); 590 } 591 592 /* 593 * Return the roothub port for a controller port. Both are 1..n. 594 */ 595 static inline unsigned int 596 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport) 597 { 598 599 return sc->sc_ctlrportmap[ctrlport - 1]; 600 } 601 602 /* 603 * Return the controller port for a bus roothub port. Both are 1..n. 604 */ 605 static inline unsigned int 606 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn, 607 unsigned int rhport) 608 { 609 610 return sc->sc_rhportmap[bn][rhport - 1]; 611 } 612 613 /* --- */ 614 615 void 616 xhci_childdet(device_t self, device_t child) 617 { 618 struct xhci_softc * const sc = device_private(self); 619 620 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child)); 621 if (child == sc->sc_child2) 622 sc->sc_child2 = NULL; 623 else if (child == sc->sc_child) 624 sc->sc_child = NULL; 625 } 626 627 int 628 xhci_detach(struct xhci_softc *sc, int flags) 629 { 630 int rv = 0; 631 632 if (sc->sc_child2 != NULL) { 633 rv = config_detach(sc->sc_child2, flags); 634 if (rv != 0) 635 return rv; 636 KASSERT(sc->sc_child2 == NULL); 637 } 638 639 if (sc->sc_child != NULL) { 640 rv = config_detach(sc->sc_child, flags); 641 if (rv != 0) 642 return rv; 643 KASSERT(sc->sc_child == NULL); 644 } 645 646 /* XXX unconfigure/free slots */ 647 648 /* verify: */ 649 xhci_rt_write_4(sc, XHCI_IMAN(0), 0); 650 xhci_op_write_4(sc, XHCI_USBCMD, 0); 651 /* do we need to wait for stop? */ 652 653 xhci_op_write_8(sc, XHCI_CRCR, 0); 654 xhci_ring_free(sc, &sc->sc_cr); 655 cv_destroy(&sc->sc_command_cv); 656 cv_destroy(&sc->sc_cmdbusy_cv); 657 658 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0); 659 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0); 660 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY); 661 xhci_ring_free(sc, &sc->sc_er); 662 663 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma); 664 665 xhci_op_write_8(sc, XHCI_DCBAAP, 0); 666 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma); 667 668 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots); 669 670 kmem_free(sc->sc_ctlrportbus, 671 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY)); 672 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int)); 673 674 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 675 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int)); 676 } 677 678 mutex_destroy(&sc->sc_lock); 679 mutex_destroy(&sc->sc_intr_lock); 680 681 pool_cache_destroy(sc->sc_xferpool); 682 683 return rv; 684 } 685 686 int 687 xhci_activate(device_t self, enum devact act) 688 { 689 struct xhci_softc * const sc = device_private(self); 690 691 switch (act) { 692 case DVACT_DEACTIVATE: 693 sc->sc_dying = true; 694 return 0; 695 default: 696 return EOPNOTSUPP; 697 } 698 } 699 700 bool 701 xhci_suspend(device_t self, const pmf_qual_t *qual) 702 { 703 struct xhci_softc * const sc = device_private(self); 704 size_t i, j, bn, dci; 705 int port; 706 uint32_t v; 707 usbd_status err; 708 bool ok = false; 709 710 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 711 712 mutex_enter(&sc->sc_lock); 713 714 /* 715 * Block issuance of new commands, and wait for all pending 716 * commands to complete. 717 */ 718 KASSERT(sc->sc_suspender == NULL); 719 sc->sc_suspender = curlwp; 720 while (sc->sc_command_addr != 0) 721 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock); 722 723 /* 724 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2: 725 * xHCI Power Management, p. 342 726 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=342 727 */ 728 729 /* 730 * `1. Stop all USB activity by issuing Stop Endpoint Commands 731 * for Busy endpoints in the Running state. If the Force 732 * Save Context Capability (FSC = ``0'') is not supported, 733 * then Stop Endpoint Commands shall be issued for all idle 734 * endpoints in the Running state as well. The Stop 735 * Endpoint Command causes the xHC to update the respective 736 * Endpoint or Stream Contexts in system memory, e.g. the 737 * TR Dequeue Pointer, DCS, etc. fields. Refer to 738 * Implementation Note "0".' 739 */ 740 for (i = 0; i < sc->sc_maxslots; i++) { 741 struct xhci_slot *xs = &sc->sc_slots[i]; 742 743 /* Skip if the slot is not in use. */ 744 if (xs->xs_idx == 0) 745 continue; 746 747 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 748 /* Skip if the endpoint is not Running. */ 749 /* XXX What about Busy? */ 750 if (xhci_get_epstate(sc, xs, dci) != 751 XHCI_EPSTATE_RUNNING) 752 continue; 753 754 /* Stop endpoint. */ 755 err = xhci_stop_endpoint_cmd(sc, xs, dci, 756 XHCI_TRB_3_SUSP_EP_BIT); 757 if (err) { 758 device_printf(self, "failed to stop endpoint" 759 " slot %zu dci %zu err %d\n", 760 i, dci, err); 761 goto out; 762 } 763 } 764 } 765 766 /* 767 * Next, suspend all the ports: 768 * 769 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.15: 770 * Suspend-Resume, pp. 276-283 771 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=276 772 */ 773 for (bn = 0; bn < 2; bn++) { 774 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) { 775 /* 4.15.1: Port Suspend. */ 776 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i)); 777 778 /* 779 * `System software places individual ports 780 * into suspend mode by writing a ``3'' into 781 * the appropriate PORTSC register Port Link 782 * State (PLS) field (refer to Section 5.4.8). 783 * Software should only set the PLS field to 784 * ``3'' when the port is in the Enabled 785 * state.' 786 * 787 * `Software should not attempt to suspend a 788 * port unless the port reports that it is in 789 * the enabled (PED = ``1''; PLS < ``3'') 790 * state (refer to Section 5.4.8 for more 791 * information about PED and PLS).' 792 */ 793 v = xhci_op_read_4(sc, port); 794 if (((v & XHCI_PS_PED) == 0) || 795 XHCI_PS_PLS_GET(v) >= XHCI_PS_PLS_U3) 796 continue; 797 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 798 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU3); 799 xhci_op_write_4(sc, port, v); 800 801 /* 802 * `When the PLS field is written with U3 803 * (``3''), the status of the PLS bit will not 804 * change to the target U state U3 until the 805 * suspend signaling has completed to the 806 * attached device (which may be as long as 807 * 10ms.).' 808 * 809 * `Software is required to wait for U3 810 * transitions to complete before it puts the 811 * xHC into a low power state, and before 812 * resuming the port.' 813 * 814 * XXX Take advantage of the technique to 815 * reduce polling on host controllers that 816 * support the U3C capability. 817 */ 818 for (j = 0; j < XHCI_WAIT_PLS_U3; j++) { 819 v = xhci_op_read_4(sc, port); 820 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U3) 821 break; 822 usb_delay_ms(&sc->sc_bus, 1); 823 } 824 if (j == XHCI_WAIT_PLS_U3) { 825 device_printf(self, 826 "suspend timeout on bus %zu port %zu\n", 827 bn, i); 828 goto out; 829 } 830 } 831 } 832 833 /* 834 * `2. Ensure that the Command Ring is in the Stopped state 835 * (CRR = ``0'') or Idle (i.e. the Command Transfer Ring is 836 * empty), and all Command Completion Events associated 837 * with them have been received.' 838 * 839 * XXX 840 */ 841 842 /* `3. Stop the controller by setting Run/Stop (R/S) = ``0''.' */ 843 xhci_op_write_4(sc, XHCI_USBCMD, 844 xhci_op_read_4(sc, XHCI_USBCMD) & ~XHCI_CMD_RS); 845 846 /* 847 * `4. Read the Operational Runtime, and VTIO registers in the 848 * following order: USBCMD, DNCTRL, DCBAAP, CONFIG, ERSTSZ, 849 * ERSTBA, ERDP, IMAN, IMOD, and VTIO and save their 850 * state.' 851 * 852 * (We don't use VTIO here (XXX for now?).) 853 */ 854 sc->sc_regs.usbcmd = xhci_op_read_4(sc, XHCI_USBCMD); 855 sc->sc_regs.dnctrl = xhci_op_read_4(sc, XHCI_DNCTRL); 856 sc->sc_regs.dcbaap = xhci_op_read_8(sc, XHCI_DCBAAP); 857 sc->sc_regs.config = xhci_op_read_4(sc, XHCI_CONFIG); 858 sc->sc_regs.erstsz0 = xhci_rt_read_4(sc, XHCI_ERSTSZ(0)); 859 sc->sc_regs.erstba0 = xhci_rt_read_8(sc, XHCI_ERSTBA(0)); 860 sc->sc_regs.erdp0 = xhci_rt_read_8(sc, XHCI_ERDP(0)); 861 sc->sc_regs.iman0 = xhci_rt_read_4(sc, XHCI_IMAN(0)); 862 sc->sc_regs.imod0 = xhci_rt_read_4(sc, XHCI_IMOD(0)); 863 864 /* 865 * `5. Set the Controller Save State (CSS) flag in the USBCMD 866 * register (5.4.1)...' 867 */ 868 xhci_op_write_4(sc, XHCI_USBCMD, 869 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CSS); 870 871 /* 872 * `...and wait for the Save State Status (SSS) flag in the 873 * USBSTS register (5.4.2) to transition to ``0''.' 874 */ 875 for (i = 0; i < XHCI_WAIT_SSS; i++) { 876 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SSS) == 0) 877 break; 878 usb_delay_ms(&sc->sc_bus, 1); 879 } 880 if (i >= XHCI_WAIT_SSS) { 881 device_printf(self, "suspend timeout, USBSTS.SSS\n"); 882 /* 883 * Just optimistically go on and check SRE anyway -- 884 * what's the worst that could happen? 885 */ 886 } 887 888 /* 889 * `Note: After a Save or Restore operation completes, the 890 * Save/Restore Error (SRE) flag in the USBSTS register should 891 * be checked to ensure that the operation completed 892 * successfully.' 893 */ 894 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) { 895 device_printf(self, "suspend error, USBSTS.SRE\n"); 896 goto out; 897 } 898 899 /* Success! */ 900 ok = true; 901 902 out: mutex_exit(&sc->sc_lock); 903 return ok; 904 } 905 906 bool 907 xhci_resume(device_t self, const pmf_qual_t *qual) 908 { 909 struct xhci_softc * const sc = device_private(self); 910 size_t i, j, bn, dci; 911 int port; 912 uint32_t v; 913 bool ok = false; 914 915 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 916 917 mutex_enter(&sc->sc_lock); 918 KASSERT(sc->sc_suspender); 919 920 /* 921 * xHCI Requirements Specification 1.2, May 2019, Sec. 4.23.2: 922 * xHCI Power Management, p. 343 923 * https://www.intel.com/content/dam/www/public/us/en/documents/technical-specifications/extensible-host-controler-interface-usb-xhci.pdf#page=343 924 */ 925 926 /* 927 * `4. Restore the Operational Runtime, and VTIO registers with 928 * their previously saved state in the following order: 929 * DNCTRL, DCBAAP, CONFIG, ERSTSZ, ERSTBA, ERDP, IMAN, 930 * IMOD, and VTIO.' 931 * 932 * (We don't use VTIO here (for now?).) 933 */ 934 xhci_op_write_4(sc, XHCI_USBCMD, sc->sc_regs.usbcmd); 935 xhci_op_write_4(sc, XHCI_DNCTRL, sc->sc_regs.dnctrl); 936 xhci_op_write_8(sc, XHCI_DCBAAP, sc->sc_regs.dcbaap); 937 xhci_op_write_4(sc, XHCI_CONFIG, sc->sc_regs.config); 938 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), sc->sc_regs.erstsz0); 939 xhci_rt_write_8(sc, XHCI_ERSTBA(0), sc->sc_regs.erstba0); 940 xhci_rt_write_8(sc, XHCI_ERDP(0), sc->sc_regs.erdp0); 941 xhci_rt_write_4(sc, XHCI_IMAN(0), sc->sc_regs.iman0); 942 xhci_rt_write_4(sc, XHCI_IMOD(0), sc->sc_regs.imod0); 943 944 memset(&sc->sc_regs, 0, sizeof(sc->sc_regs)); /* paranoia */ 945 946 /* 947 * `5. Set the Controller Restore State (CRS) flag in the 948 * USBCMD register (5.4.1) to ``1''...' 949 */ 950 xhci_op_write_4(sc, XHCI_USBCMD, 951 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_CRS); 952 953 /* 954 * `...and wait for the Restore State Status (RSS) in the 955 * USBSTS register (5.4.2) to transition to ``0''.' 956 */ 957 for (i = 0; i < XHCI_WAIT_RSS; i++) { 958 if ((xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_RSS) == 0) 959 break; 960 usb_delay_ms(&sc->sc_bus, 1); 961 } 962 if (i >= XHCI_WAIT_RSS) { 963 device_printf(self, "suspend timeout, USBSTS.RSS\n"); 964 goto out; 965 } 966 967 /* 968 * `6. Reinitialize the Command Ring, i.e. so its Cycle bits 969 * are consistent with the RCS values to be written to the 970 * CRCR.' 971 * 972 * XXX Hope just zeroing it is good enough! 973 */ 974 xhci_host_dequeue(sc->sc_cr); 975 976 /* 977 * `7. Write the CRCR with the address and RCS value of the 978 * reinitialized Command Ring. Note that this write will 979 * cause the Command Ring to restart at the address 980 * specified by the CRCR.' 981 */ 982 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) | 983 sc->sc_cr->xr_cs); 984 985 /* 986 * `8. Enable the controller by setting Run/Stop (R/S) = 987 * ``1''.' 988 */ 989 xhci_op_write_4(sc, XHCI_USBCMD, 990 xhci_op_read_4(sc, XHCI_USBCMD) | XHCI_CMD_RS); 991 992 /* 993 * `9. Software shall walk the USB topology and initialize each 994 * of the xHC PORTSC, PORTPMSC, and PORTLI registers, and 995 * external hub ports attached to USB devices.' 996 * 997 * This follows the procedure in 4.15 `Suspend-Resume', 4.15.2 998 * `Port Resume', 4.15.2.1 `Host Initiated'. 999 * 1000 * XXX We should maybe batch up initiating the state 1001 * transitions, and then wait for them to complete all at once. 1002 */ 1003 for (bn = 0; bn < 2; bn++) { 1004 for (i = 1; i <= sc->sc_rhportcount[bn]; i++) { 1005 port = XHCI_PORTSC(xhci_rhport2ctlrport(sc, bn, i)); 1006 1007 /* `When a port is in the U3 state: ...' */ 1008 v = xhci_op_read_4(sc, port); 1009 if (XHCI_PS_PLS_GET(v) != XHCI_PS_PLS_U3) 1010 continue; 1011 1012 /* 1013 * `For a USB2 protocol port, software shall 1014 * write a ``15'' (Resume) to the PLS field to 1015 * initiate resume signaling. The port shall 1016 * transition to the Resume substate and the 1017 * xHC shall transmit the resume signaling 1018 * within 1ms (T_URSM). Software shall ensure 1019 * that resume is signaled for at least 20ms 1020 * (T_DRSMDN). Software shall start timing 1021 * T_DRSMDN from the write of ``15'' (Resume) 1022 * to PLS.' 1023 */ 1024 if (bn == 1) { 1025 KASSERT(sc->sc_bus2.ub_revision == USBREV_2_0); 1026 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 1027 v |= XHCI_PS_LWS; 1028 v |= XHCI_PS_PLS_SET(XHCI_PS_PLS_SETRESUME); 1029 xhci_op_write_4(sc, port, v); 1030 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT); 1031 } else { 1032 KASSERT(sc->sc_bus.ub_revision > USBREV_2_0); 1033 } 1034 1035 /* 1036 * `For a USB3 protocol port [and a USB2 1037 * protocol port after transitioning to 1038 * Resume], software shall write a ``0'' (U0) 1039 * to the PLS field...' 1040 */ 1041 v = xhci_op_read_4(sc, port); 1042 v &= ~(XHCI_PS_PLS_MASK | XHCI_PS_CLEAR); 1043 v |= XHCI_PS_LWS | XHCI_PS_PLS_SET(XHCI_PS_PLS_SETU0); 1044 xhci_op_write_4(sc, port, v); 1045 1046 for (j = 0; j < XHCI_WAIT_PLS_U0; j++) { 1047 v = xhci_op_read_4(sc, port); 1048 if (XHCI_PS_PLS_GET(v) == XHCI_PS_PLS_U0) 1049 break; 1050 usb_delay_ms(&sc->sc_bus, 1); 1051 } 1052 if (j == XHCI_WAIT_PLS_U0) { 1053 device_printf(self, 1054 "resume timeout on bus %zu port %zu\n", 1055 bn, i); 1056 goto out; 1057 } 1058 } 1059 } 1060 1061 /* 1062 * `10. Restart each of the previously Running endpoints by 1063 * ringing their doorbells.' 1064 */ 1065 for (i = 0; i < sc->sc_maxslots; i++) { 1066 struct xhci_slot *xs = &sc->sc_slots[i]; 1067 1068 /* Skip if the slot is not in use. */ 1069 if (xs->xs_idx == 0) 1070 continue; 1071 1072 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 1073 /* Skip if the endpoint is not Running. */ 1074 if (xhci_get_epstate(sc, xs, dci) != 1075 XHCI_EPSTATE_RUNNING) 1076 continue; 1077 1078 /* Ring the doorbell. */ 1079 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 1080 } 1081 } 1082 1083 /* 1084 * `Note: After a Save or Restore operation completes, the 1085 * Save/Restore Error (SRE) flag in the USBSTS register should 1086 * be checked to ensure that the operation completed 1087 * successfully.' 1088 */ 1089 if (xhci_op_read_4(sc, XHCI_USBSTS) & XHCI_STS_SRE) { 1090 device_printf(self, "resume error, USBSTS.SRE\n"); 1091 goto out; 1092 } 1093 1094 /* Resume command issuance. */ 1095 sc->sc_suspender = NULL; 1096 cv_broadcast(&sc->sc_cmdbusy_cv); 1097 1098 /* Success! */ 1099 ok = true; 1100 1101 out: mutex_exit(&sc->sc_lock); 1102 return ok; 1103 } 1104 1105 bool 1106 xhci_shutdown(device_t self, int flags) 1107 { 1108 return false; 1109 } 1110 1111 static int 1112 xhci_hc_reset(struct xhci_softc * const sc) 1113 { 1114 uint32_t usbcmd, usbsts; 1115 int i; 1116 1117 /* Check controller not ready */ 1118 for (i = 0; i < XHCI_WAIT_CNR; i++) { 1119 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1120 if ((usbsts & XHCI_STS_CNR) == 0) 1121 break; 1122 usb_delay_ms(&sc->sc_bus, 1); 1123 } 1124 if (i >= XHCI_WAIT_CNR) { 1125 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n"); 1126 return EIO; 1127 } 1128 1129 /* Halt controller */ 1130 usbcmd = 0; 1131 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 1132 usb_delay_ms(&sc->sc_bus, 1); 1133 1134 /* Reset controller */ 1135 usbcmd = XHCI_CMD_HCRST; 1136 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 1137 for (i = 0; i < XHCI_WAIT_HCRST; i++) { 1138 /* 1139 * Wait 1ms first. Existing Intel xHCI requies 1ms delay to 1140 * prevent system hang (Errata). 1141 */ 1142 usb_delay_ms(&sc->sc_bus, 1); 1143 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD); 1144 if ((usbcmd & XHCI_CMD_HCRST) == 0) 1145 break; 1146 } 1147 if (i >= XHCI_WAIT_HCRST) { 1148 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n"); 1149 return EIO; 1150 } 1151 1152 /* Check controller not ready */ 1153 for (i = 0; i < XHCI_WAIT_CNR; i++) { 1154 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1155 if ((usbsts & XHCI_STS_CNR) == 0) 1156 break; 1157 usb_delay_ms(&sc->sc_bus, 1); 1158 } 1159 if (i >= XHCI_WAIT_CNR) { 1160 aprint_error_dev(sc->sc_dev, 1161 "controller not ready timeout after reset\n"); 1162 return EIO; 1163 } 1164 1165 return 0; 1166 } 1167 1168 /* 7.2 xHCI Support Protocol Capability */ 1169 static void 1170 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp) 1171 { 1172 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1173 1174 /* XXX Cache this lot */ 1175 1176 const uint32_t w0 = xhci_read_4(sc, ecp); 1177 const uint32_t w4 = xhci_read_4(sc, ecp + 4); 1178 const uint32_t w8 = xhci_read_4(sc, ecp + 8); 1179 const uint32_t wc = xhci_read_4(sc, ecp + 0xc); 1180 1181 aprint_debug_dev(sc->sc_dev, 1182 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc); 1183 1184 if (w4 != XHCI_XECP_USBID) 1185 return; 1186 1187 const int major = XHCI_XECP_SP_W0_MAJOR(w0); 1188 const int minor = XHCI_XECP_SP_W0_MINOR(w0); 1189 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8); 1190 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8); 1191 1192 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16)); 1193 switch (mm) { 1194 case 0x0200: 1195 case 0x0300: 1196 case 0x0301: 1197 case 0x0310: 1198 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n", 1199 major == 3 ? "ss" : "hs", cpo, cpo + cpc -1); 1200 break; 1201 default: 1202 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n", 1203 major, minor); 1204 return; 1205 } 1206 1207 const size_t bus = (major == 3) ? 0 : 1; 1208 1209 /* Index arrays with 0..n-1 where ports are numbered 1..n */ 1210 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) { 1211 if (sc->sc_ctlrportmap[cp] != 0) { 1212 aprint_error_dev(sc->sc_dev, "controller port %zu " 1213 "already assigned", cp); 1214 continue; 1215 } 1216 1217 sc->sc_ctlrportbus[cp / NBBY] |= 1218 bus == 0 ? 0 : __BIT(cp % NBBY); 1219 1220 const size_t rhp = sc->sc_rhportcount[bus]++; 1221 1222 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0, 1223 "bus %zu rhp %zu is %d", bus, rhp, 1224 sc->sc_rhportmap[bus][rhp]); 1225 1226 sc->sc_rhportmap[bus][rhp] = cp + 1; 1227 sc->sc_ctlrportmap[cp] = rhp + 1; 1228 } 1229 } 1230 1231 /* Process extended capabilities */ 1232 static void 1233 xhci_ecp(struct xhci_softc *sc) 1234 { 1235 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1236 1237 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4; 1238 while (ecp != 0) { 1239 uint32_t ecr = xhci_read_4(sc, ecp); 1240 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr); 1241 switch (XHCI_XECP_ID(ecr)) { 1242 case XHCI_ID_PROTOCOLS: { 1243 xhci_id_protocols(sc, ecp); 1244 break; 1245 } 1246 case XHCI_ID_USB_LEGACY: { 1247 uint8_t bios_sem; 1248 1249 /* Take host controller ownership from BIOS */ 1250 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM); 1251 if (bios_sem) { 1252 /* sets xHCI to be owned by OS */ 1253 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1); 1254 aprint_debug_dev(sc->sc_dev, 1255 "waiting for BIOS to give up control\n"); 1256 for (int i = 0; i < 5000; i++) { 1257 bios_sem = xhci_read_1(sc, ecp + 1258 XHCI_XECP_BIOS_SEM); 1259 if (bios_sem == 0) 1260 break; 1261 DELAY(1000); 1262 } 1263 if (bios_sem) { 1264 aprint_error_dev(sc->sc_dev, 1265 "timed out waiting for BIOS\n"); 1266 } 1267 } 1268 break; 1269 } 1270 default: 1271 break; 1272 } 1273 ecr = xhci_read_4(sc, ecp); 1274 if (XHCI_XECP_NEXT(ecr) == 0) { 1275 ecp = 0; 1276 } else { 1277 ecp += XHCI_XECP_NEXT(ecr) * 4; 1278 } 1279 } 1280 } 1281 1282 #define XHCI_HCCPREV1_BITS \ 1283 "\177\020" /* New bitmask */ \ 1284 "f\020\020XECP\0" \ 1285 "f\014\4MAXPSA\0" \ 1286 "b\013CFC\0" \ 1287 "b\012SEC\0" \ 1288 "b\011SBD\0" \ 1289 "b\010FSE\0" \ 1290 "b\7NSS\0" \ 1291 "b\6LTC\0" \ 1292 "b\5LHRC\0" \ 1293 "b\4PIND\0" \ 1294 "b\3PPC\0" \ 1295 "b\2CZC\0" \ 1296 "b\1BNC\0" \ 1297 "b\0AC64\0" \ 1298 "\0" 1299 #define XHCI_HCCV1_x_BITS \ 1300 "\177\020" /* New bitmask */ \ 1301 "f\020\020XECP\0" \ 1302 "f\014\4MAXPSA\0" \ 1303 "b\013CFC\0" \ 1304 "b\012SEC\0" \ 1305 "b\011SPC\0" \ 1306 "b\010PAE\0" \ 1307 "b\7NSS\0" \ 1308 "b\6LTC\0" \ 1309 "b\5LHRC\0" \ 1310 "b\4PIND\0" \ 1311 "b\3PPC\0" \ 1312 "b\2CSZ\0" \ 1313 "b\1BNC\0" \ 1314 "b\0AC64\0" \ 1315 "\0" 1316 1317 #define XHCI_HCC2_BITS \ 1318 "\177\020" /* New bitmask */ \ 1319 "b\7ETC_TSC\0" \ 1320 "b\6ETC\0" \ 1321 "b\5CIC\0" \ 1322 "b\4LEC\0" \ 1323 "b\3CTC\0" \ 1324 "b\2FSC\0" \ 1325 "b\1CMC\0" \ 1326 "b\0U3C\0" \ 1327 "\0" 1328 1329 void 1330 xhci_start(struct xhci_softc *sc) 1331 { 1332 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA); 1333 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0) 1334 /* Intel xhci needs interrupt rate moderated. */ 1335 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP); 1336 else 1337 xhci_rt_write_4(sc, XHCI_IMOD(0), 0); 1338 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n", 1339 xhci_rt_read_4(sc, XHCI_IMOD(0))); 1340 1341 /* Go! */ 1342 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS); 1343 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n", 1344 xhci_op_read_4(sc, XHCI_USBCMD)); 1345 } 1346 1347 int 1348 xhci_init(struct xhci_softc *sc) 1349 { 1350 bus_size_t bsz; 1351 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff; 1352 uint32_t pagesize, config; 1353 int i = 0; 1354 uint16_t hciversion; 1355 uint8_t caplength; 1356 1357 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1358 1359 /* Set up the bus struct for the usb 3 and usb 2 buses */ 1360 sc->sc_bus.ub_methods = &xhci_bus_methods; 1361 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe); 1362 sc->sc_bus.ub_usedma = true; 1363 sc->sc_bus.ub_hcpriv = sc; 1364 1365 sc->sc_bus2.ub_methods = &xhci_bus_methods; 1366 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe); 1367 sc->sc_bus2.ub_revision = USBREV_2_0; 1368 sc->sc_bus2.ub_usedma = true; 1369 sc->sc_bus2.ub_hcpriv = sc; 1370 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag; 1371 1372 caplength = xhci_read_1(sc, XHCI_CAPLENGTH); 1373 hciversion = xhci_read_2(sc, XHCI_HCIVERSION); 1374 1375 if (hciversion < XHCI_HCIVERSION_0_96 || 1376 hciversion >= 0x0200) { 1377 aprint_normal_dev(sc->sc_dev, 1378 "xHCI version %x.%x not known to be supported\n", 1379 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 1380 } else { 1381 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n", 1382 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 1383 } 1384 1385 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength, 1386 &sc->sc_cbh) != 0) { 1387 aprint_error_dev(sc->sc_dev, "capability subregion failure\n"); 1388 return ENOMEM; 1389 } 1390 1391 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1); 1392 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1); 1393 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1); 1394 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1); 1395 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2); 1396 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3); 1397 aprint_debug_dev(sc->sc_dev, 1398 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3); 1399 1400 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS); 1401 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32; 1402 1403 char sbuf[128]; 1404 if (hciversion < XHCI_HCIVERSION_1_0) 1405 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc); 1406 else 1407 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc); 1408 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf); 1409 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n", 1410 XHCI_HCC_XECP(sc->sc_hcc) * 4); 1411 if (hciversion >= XHCI_HCIVERSION_1_1) { 1412 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2); 1413 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2); 1414 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf); 1415 } 1416 1417 /* default all ports to bus 0, i.e. usb 3 */ 1418 sc->sc_ctlrportbus = kmem_zalloc( 1419 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP); 1420 sc->sc_ctlrportmap = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1421 1422 /* controller port to bus roothub port map */ 1423 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 1424 sc->sc_rhportmap[j] = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1425 } 1426 1427 /* 1428 * Process all Extended Capabilities 1429 */ 1430 xhci_ecp(sc); 1431 1432 bsz = XHCI_PORTSC(sc->sc_maxports); 1433 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz, 1434 &sc->sc_obh) != 0) { 1435 aprint_error_dev(sc->sc_dev, "operational subregion failure\n"); 1436 return ENOMEM; 1437 } 1438 1439 dboff = xhci_cap_read_4(sc, XHCI_DBOFF); 1440 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff, 1441 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) { 1442 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n"); 1443 return ENOMEM; 1444 } 1445 1446 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF); 1447 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff, 1448 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) { 1449 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n"); 1450 return ENOMEM; 1451 } 1452 1453 int rv; 1454 rv = xhci_hc_reset(sc); 1455 if (rv != 0) { 1456 return rv; 1457 } 1458 1459 if (sc->sc_vendor_init) 1460 sc->sc_vendor_init(sc); 1461 1462 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE); 1463 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize); 1464 pagesize = ffs(pagesize); 1465 if (pagesize == 0) { 1466 aprint_error_dev(sc->sc_dev, "pagesize is 0\n"); 1467 return EIO; 1468 } 1469 sc->sc_pgsz = 1 << (12 + (pagesize - 1)); 1470 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz); 1471 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n", 1472 (uint32_t)sc->sc_maxslots); 1473 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports); 1474 1475 int err; 1476 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2); 1477 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf); 1478 if (sc->sc_maxspbuf != 0) { 1479 err = usb_allocmem(&sc->sc_bus, 1480 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t), 1481 USBMALLOC_COHERENT | USBMALLOC_ZERO, 1482 &sc->sc_spbufarray_dma); 1483 if (err) { 1484 aprint_error_dev(sc->sc_dev, 1485 "spbufarray init fail, err %d\n", err); 1486 return ENOMEM; 1487 } 1488 1489 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) * 1490 sc->sc_maxspbuf, KM_SLEEP); 1491 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0); 1492 for (i = 0; i < sc->sc_maxspbuf; i++) { 1493 usb_dma_t * const dma = &sc->sc_spbuf_dma[i]; 1494 /* allocate contexts */ 1495 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, 1496 sc->sc_pgsz, USBMALLOC_COHERENT | USBMALLOC_ZERO, 1497 dma); 1498 if (err) { 1499 aprint_error_dev(sc->sc_dev, 1500 "spbufarray_dma init fail, err %d\n", err); 1501 rv = ENOMEM; 1502 goto bad1; 1503 } 1504 spbufarray[i] = htole64(DMAADDR(dma, 0)); 1505 usb_syncmem(dma, 0, sc->sc_pgsz, 1506 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1507 } 1508 1509 usb_syncmem(&sc->sc_spbufarray_dma, 0, 1510 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE); 1511 } 1512 1513 config = xhci_op_read_4(sc, XHCI_CONFIG); 1514 config &= ~0xFF; 1515 config |= sc->sc_maxslots & 0xFF; 1516 xhci_op_write_4(sc, XHCI_CONFIG, config); 1517 1518 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS, 1519 XHCI_COMMAND_RING_SEGMENTS_ALIGN); 1520 if (err) { 1521 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n", 1522 err); 1523 rv = ENOMEM; 1524 goto bad1; 1525 } 1526 1527 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS, 1528 XHCI_EVENT_RING_SEGMENTS_ALIGN); 1529 if (err) { 1530 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n", 1531 err); 1532 rv = ENOMEM; 1533 goto bad2; 1534 } 1535 1536 usb_dma_t *dma; 1537 size_t size; 1538 size_t align; 1539 1540 dma = &sc->sc_eventst_dma; 1541 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE, 1542 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN); 1543 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size); 1544 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN; 1545 err = usb_allocmem(&sc->sc_bus, size, align, 1546 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma); 1547 if (err) { 1548 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n", 1549 err); 1550 rv = ENOMEM; 1551 goto bad3; 1552 } 1553 1554 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n", 1555 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0), 1556 KERNADDR(&sc->sc_eventst_dma, 0), 1557 sc->sc_eventst_dma.udma_block->size); 1558 1559 dma = &sc->sc_dcbaa_dma; 1560 size = (1 + sc->sc_maxslots) * sizeof(uint64_t); 1561 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size); 1562 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN; 1563 err = usb_allocmem(&sc->sc_bus, size, align, 1564 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma); 1565 if (err) { 1566 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err); 1567 rv = ENOMEM; 1568 goto bad4; 1569 } 1570 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n", 1571 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0), 1572 KERNADDR(&sc->sc_dcbaa_dma, 0), 1573 sc->sc_dcbaa_dma.udma_block->size); 1574 1575 if (sc->sc_maxspbuf != 0) { 1576 /* 1577 * DCBA entry 0 hold the scratchbuf array pointer. 1578 */ 1579 *(uint64_t *)KERNADDR(dma, 0) = 1580 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0)); 1581 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE); 1582 } 1583 1584 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots, 1585 KM_SLEEP); 1586 if (sc->sc_slots == NULL) { 1587 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err); 1588 rv = ENOMEM; 1589 goto bad; 1590 } 1591 1592 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0, 1593 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL); 1594 if (sc->sc_xferpool == NULL) { 1595 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n", 1596 err); 1597 rv = ENOMEM; 1598 goto bad; 1599 } 1600 1601 cv_init(&sc->sc_command_cv, "xhcicmd"); 1602 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq"); 1603 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 1604 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB); 1605 1606 struct xhci_erste *erst; 1607 erst = KERNADDR(&sc->sc_eventst_dma, 0); 1608 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0)); 1609 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb); 1610 erst[0].erste_3 = htole32(0); 1611 usb_syncmem(&sc->sc_eventst_dma, 0, 1612 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE); 1613 1614 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS); 1615 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0)); 1616 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) | 1617 XHCI_ERDP_BUSY); 1618 1619 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0)); 1620 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) | 1621 sc->sc_cr->xr_cs); 1622 1623 xhci_barrier(sc, BUS_SPACE_BARRIER_WRITE); 1624 1625 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0), 1626 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS); 1627 1628 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0) 1629 xhci_start(sc); 1630 1631 return 0; 1632 1633 bad: 1634 if (sc->sc_xferpool) { 1635 pool_cache_destroy(sc->sc_xferpool); 1636 sc->sc_xferpool = NULL; 1637 } 1638 1639 if (sc->sc_slots) { 1640 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * 1641 sc->sc_maxslots); 1642 sc->sc_slots = NULL; 1643 } 1644 1645 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma); 1646 bad4: 1647 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma); 1648 bad3: 1649 xhci_ring_free(sc, &sc->sc_er); 1650 bad2: 1651 xhci_ring_free(sc, &sc->sc_cr); 1652 i = sc->sc_maxspbuf; 1653 bad1: 1654 for (int j = 0; j < i; j++) 1655 usb_freemem(&sc->sc_bus, &sc->sc_spbuf_dma[j]); 1656 usb_freemem(&sc->sc_bus, &sc->sc_spbufarray_dma); 1657 1658 return rv; 1659 } 1660 1661 static inline bool 1662 xhci_polling_p(struct xhci_softc * const sc) 1663 { 1664 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling; 1665 } 1666 1667 int 1668 xhci_intr(void *v) 1669 { 1670 struct xhci_softc * const sc = v; 1671 int ret = 0; 1672 1673 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1674 1675 if (sc == NULL) 1676 return 0; 1677 1678 mutex_spin_enter(&sc->sc_intr_lock); 1679 1680 if (sc->sc_dying || !device_has_power(sc->sc_dev)) 1681 goto done; 1682 1683 /* If we get an interrupt while polling, then just ignore it. */ 1684 if (xhci_polling_p(sc)) { 1685 #ifdef DIAGNOSTIC 1686 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0); 1687 #endif 1688 goto done; 1689 } 1690 1691 ret = xhci_intr1(sc); 1692 if (ret) { 1693 KASSERT(sc->sc_child || sc->sc_child2); 1694 1695 /* 1696 * One of child busses could be already detached. It doesn't 1697 * matter on which of the two the softintr is scheduled. 1698 */ 1699 if (sc->sc_child) 1700 usb_schedsoftintr(&sc->sc_bus); 1701 else 1702 usb_schedsoftintr(&sc->sc_bus2); 1703 } 1704 done: 1705 mutex_spin_exit(&sc->sc_intr_lock); 1706 return ret; 1707 } 1708 1709 int 1710 xhci_intr1(struct xhci_softc * const sc) 1711 { 1712 uint32_t usbsts; 1713 uint32_t iman; 1714 1715 XHCIHIST_FUNC(); 1716 1717 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1718 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0); 1719 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD | 1720 XHCI_STS_HCE)) == 0) { 1721 DPRINTFN(16, "ignored intr not for %jd", 1722 device_unit(sc->sc_dev), 0, 0, 0); 1723 return 0; 1724 } 1725 1726 /* 1727 * Clear EINT and other transient flags, to not misenterpret 1728 * next shared interrupt. Also, to avoid race, EINT must be cleared 1729 * before XHCI_IMAN_INTR_PEND is cleared. 1730 */ 1731 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0); 1732 1733 #ifdef XHCI_DEBUG 1734 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1735 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1736 #endif 1737 1738 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1739 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1740 iman |= XHCI_IMAN_INTR_PEND; 1741 xhci_rt_write_4(sc, XHCI_IMAN(0), iman); 1742 1743 #ifdef XHCI_DEBUG 1744 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1745 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1746 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1747 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1748 #endif 1749 1750 return 1; 1751 } 1752 1753 /* 1754 * 3 port speed types used in USB stack 1755 * 1756 * usbdi speed 1757 * definition: USB_SPEED_* in usb.h 1758 * They are used in struct usbd_device in USB stack. 1759 * ioctl interface uses these values too. 1760 * port_status speed 1761 * definition: UPS_*_SPEED in usb.h 1762 * They are used in usb_port_status_t and valid only for USB 2.0. 1763 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus 1764 * of usb_port_status_ext_t indicates port speed. 1765 * Note that some 3.0 values overlap with 2.0 values. 1766 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and 1767 * means UPS_LOW_SPEED in HS.) 1768 * port status returned from hub also uses these values. 1769 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed 1770 * or more. 1771 * xspeed: 1772 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1) 1773 * They are used in only slot context and PORTSC reg of xhci. 1774 * The difference between usbdi speed and xspeed is 1775 * that FS and LS values are swapped. 1776 */ 1777 1778 /* convert usbdi speed to xspeed */ 1779 static int 1780 xhci_speed2xspeed(int speed) 1781 { 1782 switch (speed) { 1783 case USB_SPEED_LOW: return 2; 1784 case USB_SPEED_FULL: return 1; 1785 default: return speed; 1786 } 1787 } 1788 1789 #if 0 1790 /* convert xspeed to usbdi speed */ 1791 static int 1792 xhci_xspeed2speed(int xspeed) 1793 { 1794 switch (xspeed) { 1795 case 1: return USB_SPEED_FULL; 1796 case 2: return USB_SPEED_LOW; 1797 default: return xspeed; 1798 } 1799 } 1800 #endif 1801 1802 /* convert xspeed to port status speed */ 1803 static int 1804 xhci_xspeed2psspeed(int xspeed) 1805 { 1806 switch (xspeed) { 1807 case 0: return 0; 1808 case 1: return UPS_FULL_SPEED; 1809 case 2: return UPS_LOW_SPEED; 1810 case 3: return UPS_HIGH_SPEED; 1811 default: return UPS_OTHER_SPEED; 1812 } 1813 } 1814 1815 /* 1816 * Construct input contexts and issue TRB to open pipe. 1817 */ 1818 static usbd_status 1819 xhci_configure_endpoint(struct usbd_pipe *pipe) 1820 { 1821 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1822 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1823 #ifdef USB_DEBUG 1824 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1825 #endif 1826 struct xhci_soft_trb trb; 1827 usbd_status err; 1828 1829 XHCIHIST_FUNC(); 1830 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx", 1831 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress, 1832 pipe->up_endpoint->ue_edesc->bmAttributes); 1833 1834 /* XXX ensure input context is available? */ 1835 1836 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz); 1837 1838 /* set up context */ 1839 xhci_setup_ctx(pipe); 1840 1841 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0), 1842 sc->sc_ctxsz * 1); 1843 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs, 1844 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1); 1845 1846 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 1847 trb.trb_2 = 0; 1848 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1849 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 1850 1851 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 1852 1853 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 1854 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci), 1855 sc->sc_ctxsz * 1); 1856 1857 return err; 1858 } 1859 1860 #if 0 1861 static usbd_status 1862 xhci_unconfigure_endpoint(struct usbd_pipe *pipe) 1863 { 1864 #ifdef USB_DEBUG 1865 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1866 #endif 1867 1868 XHCIHIST_FUNC(); 1869 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 1870 1871 return USBD_NORMAL_COMPLETION; 1872 } 1873 #endif 1874 1875 /* 4.6.8, 6.4.3.7 */ 1876 static usbd_status 1877 xhci_reset_endpoint_locked(struct usbd_pipe *pipe) 1878 { 1879 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1880 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1881 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1882 struct xhci_soft_trb trb; 1883 usbd_status err; 1884 1885 XHCIHIST_FUNC(); 1886 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1887 1888 KASSERT(mutex_owned(&sc->sc_lock)); 1889 1890 trb.trb_0 = 0; 1891 trb.trb_2 = 0; 1892 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1893 XHCI_TRB_3_EP_SET(dci) | 1894 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP); 1895 1896 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1897 1898 return err; 1899 } 1900 1901 static usbd_status 1902 xhci_reset_endpoint(struct usbd_pipe *pipe) 1903 { 1904 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1905 1906 mutex_enter(&sc->sc_lock); 1907 usbd_status ret = xhci_reset_endpoint_locked(pipe); 1908 mutex_exit(&sc->sc_lock); 1909 1910 return ret; 1911 } 1912 1913 /* 1914 * 4.6.9, 6.4.3.8 1915 * Stop execution of TDs on xfer ring. 1916 * Should be called with sc_lock held. 1917 */ 1918 static usbd_status 1919 xhci_stop_endpoint_cmd(struct xhci_softc *sc, struct xhci_slot *xs, u_int dci, 1920 uint32_t trb3flags) 1921 { 1922 struct xhci_soft_trb trb; 1923 usbd_status err; 1924 1925 XHCIHIST_FUNC(); 1926 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1927 1928 KASSERT(mutex_owned(&sc->sc_lock)); 1929 1930 trb.trb_0 = 0; 1931 trb.trb_2 = 0; 1932 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1933 XHCI_TRB_3_EP_SET(dci) | 1934 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP) | 1935 trb3flags; 1936 1937 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1938 1939 return err; 1940 } 1941 1942 static usbd_status 1943 xhci_stop_endpoint(struct usbd_pipe *pipe) 1944 { 1945 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1946 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1947 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1948 1949 XHCIHIST_FUNC(); 1950 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1951 1952 KASSERT(mutex_owned(&sc->sc_lock)); 1953 1954 return xhci_stop_endpoint_cmd(sc, xs, dci, 0); 1955 } 1956 1957 /* 1958 * Set TR Dequeue Pointer. 1959 * xHCI 1.1 4.6.10 6.4.3.9 1960 * Purge all of the TRBs on ring and reinitialize ring. 1961 * Set TR dequeue Pointr to 0 and Cycle State to 1. 1962 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE 1963 * error will be generated. 1964 */ 1965 static usbd_status 1966 xhci_set_dequeue_locked(struct usbd_pipe *pipe) 1967 { 1968 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1969 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1970 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1971 struct xhci_ring * const xr = xs->xs_xr[dci]; 1972 struct xhci_soft_trb trb; 1973 usbd_status err; 1974 1975 XHCIHIST_FUNC(); 1976 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1977 1978 KASSERT(mutex_owned(&sc->sc_lock)); 1979 KASSERT(xr != NULL); 1980 1981 xhci_host_dequeue(xr); 1982 1983 /* set DCS */ 1984 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */ 1985 trb.trb_2 = 0; 1986 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1987 XHCI_TRB_3_EP_SET(dci) | 1988 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE); 1989 1990 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1991 1992 return err; 1993 } 1994 1995 static usbd_status 1996 xhci_set_dequeue(struct usbd_pipe *pipe) 1997 { 1998 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1999 2000 mutex_enter(&sc->sc_lock); 2001 usbd_status ret = xhci_set_dequeue_locked(pipe); 2002 mutex_exit(&sc->sc_lock); 2003 2004 return ret; 2005 } 2006 2007 /* 2008 * Open new pipe: called from usbd_setup_pipe_flags. 2009 * Fills methods of pipe. 2010 * If pipe is not for ep0, calls configure_endpoint. 2011 */ 2012 static usbd_status 2013 xhci_open(struct usbd_pipe *pipe) 2014 { 2015 struct usbd_device * const dev = pipe->up_dev; 2016 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 2017 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 2018 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2019 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 2020 const u_int dci = xhci_ep_get_dci(ed); 2021 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 2022 usbd_status err; 2023 2024 XHCIHIST_FUNC(); 2025 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr, 2026 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed); 2027 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx", 2028 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress, 2029 ed->bmAttributes); 2030 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize), 2031 ed->bInterval, 0, 0); 2032 2033 if (sc->sc_dying) 2034 return USBD_IOERROR; 2035 2036 /* Root Hub */ 2037 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) { 2038 switch (ed->bEndpointAddress) { 2039 case USB_CONTROL_ENDPOINT: 2040 pipe->up_methods = &roothub_ctrl_methods; 2041 break; 2042 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT: 2043 pipe->up_methods = &xhci_root_intr_methods; 2044 break; 2045 default: 2046 pipe->up_methods = NULL; 2047 DPRINTFN(0, "bad bEndpointAddress 0x%02jx", 2048 ed->bEndpointAddress, 0, 0, 0); 2049 return USBD_INVAL; 2050 } 2051 return USBD_NORMAL_COMPLETION; 2052 } 2053 2054 switch (xfertype) { 2055 case UE_CONTROL: 2056 pipe->up_methods = &xhci_device_ctrl_methods; 2057 break; 2058 case UE_ISOCHRONOUS: 2059 pipe->up_methods = &xhci_device_isoc_methods; 2060 pipe->up_serialise = false; 2061 xpipe->xp_isoc_next = -1; 2062 break; 2063 case UE_BULK: 2064 pipe->up_methods = &xhci_device_bulk_methods; 2065 break; 2066 case UE_INTERRUPT: 2067 pipe->up_methods = &xhci_device_intr_methods; 2068 break; 2069 default: 2070 return USBD_IOERROR; 2071 break; 2072 } 2073 2074 KASSERT(xs != NULL); 2075 KASSERT(xs->xs_xr[dci] == NULL); 2076 2077 /* allocate transfer ring */ 2078 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS, 2079 XHCI_TRB_ALIGN); 2080 if (err) { 2081 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0); 2082 return err; 2083 } 2084 2085 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT) 2086 return xhci_configure_endpoint(pipe); 2087 2088 return USBD_NORMAL_COMPLETION; 2089 } 2090 2091 /* 2092 * Closes pipe, called from usbd_kill_pipe via close methods. 2093 * If the endpoint to be closed is ep0, disable_slot. 2094 * Should be called with sc_lock held. 2095 */ 2096 static void 2097 xhci_close_pipe(struct usbd_pipe *pipe) 2098 { 2099 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 2100 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 2101 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 2102 const u_int dci = xhci_ep_get_dci(ed); 2103 struct xhci_soft_trb trb; 2104 uint32_t *cp; 2105 2106 XHCIHIST_FUNC(); 2107 2108 if (sc->sc_dying) 2109 return; 2110 2111 /* xs is uninitialized before xhci_init_slot */ 2112 if (xs == NULL || xs->xs_idx == 0) 2113 return; 2114 2115 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju", 2116 (uintptr_t)pipe, xs->xs_idx, dci, 0); 2117 2118 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 2119 KASSERT(mutex_owned(&sc->sc_lock)); 2120 2121 if (pipe->up_dev->ud_depth == 0) 2122 return; 2123 2124 if (dci == XHCI_DCI_EP_CONTROL) { 2125 DPRINTFN(4, "closing ep0", 0, 0, 0, 0); 2126 /* This frees all rings */ 2127 xhci_disable_slot(sc, xs->xs_idx); 2128 return; 2129 } 2130 2131 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED) 2132 (void)xhci_stop_endpoint(pipe); 2133 2134 /* 2135 * set appropriate bit to be dropped. 2136 * don't set DC bit to 1, otherwise all endpoints 2137 * would be deconfigured. 2138 */ 2139 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 2140 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci)); 2141 cp[1] = htole32(0); 2142 2143 /* XXX should be most significant one, not dci? */ 2144 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 2145 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci)); 2146 2147 /* configure ep context performs an implicit dequeue */ 2148 xhci_host_dequeue(xs->xs_xr[dci]); 2149 2150 /* sync input contexts before they are read from memory */ 2151 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 2152 2153 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 2154 trb.trb_2 = 0; 2155 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 2156 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 2157 2158 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 2159 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 2160 2161 xhci_ring_free(sc, &xs->xs_xr[dci]); 2162 } 2163 2164 /* 2165 * Abort transfer. 2166 * Should be called with sc_lock held. 2167 */ 2168 static void 2169 xhci_abortx(struct usbd_xfer *xfer) 2170 { 2171 XHCIHIST_FUNC(); 2172 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 2173 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 2174 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 2175 2176 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx", 2177 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0); 2178 2179 KASSERT(mutex_owned(&sc->sc_lock)); 2180 ASSERT_SLEEPABLE(); 2181 2182 KASSERTMSG((xfer->ux_status == USBD_CANCELLED || 2183 xfer->ux_status == USBD_TIMEOUT), 2184 "bad abort status: %d", xfer->ux_status); 2185 2186 /* 2187 * If we're dying, skip the hardware action and just notify the 2188 * software that we're done. 2189 */ 2190 if (sc->sc_dying) { 2191 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer, 2192 xfer->ux_status, 0, 0); 2193 goto dying; 2194 } 2195 2196 /* 2197 * HC Step 1: Stop execution of TD on the ring. 2198 */ 2199 switch (xhci_get_epstate(sc, xs, dci)) { 2200 case XHCI_EPSTATE_HALTED: 2201 (void)xhci_reset_endpoint_locked(xfer->ux_pipe); 2202 break; 2203 case XHCI_EPSTATE_STOPPED: 2204 break; 2205 default: 2206 (void)xhci_stop_endpoint(xfer->ux_pipe); 2207 break; 2208 } 2209 #ifdef DIAGNOSTIC 2210 uint32_t epst = xhci_get_epstate(sc, xs, dci); 2211 if (epst != XHCI_EPSTATE_STOPPED) 2212 DPRINTFN(4, "dci %ju not stopped %ju", dci, epst, 0, 0); 2213 #endif 2214 2215 /* 2216 * HC Step 2: Remove any vestiges of the xfer from the ring. 2217 */ 2218 xhci_set_dequeue_locked(xfer->ux_pipe); 2219 2220 /* 2221 * Final Step: Notify completion to waiting xfers. 2222 */ 2223 dying: 2224 usb_transfer_complete(xfer); 2225 DPRINTFN(14, "end", 0, 0, 0, 0); 2226 2227 KASSERT(mutex_owned(&sc->sc_lock)); 2228 } 2229 2230 static void 2231 xhci_host_dequeue(struct xhci_ring * const xr) 2232 { 2233 /* When dequeueing the controller, update our struct copy too */ 2234 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE); 2235 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE, 2236 BUS_DMASYNC_PREWRITE); 2237 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies)); 2238 2239 xr->xr_ep = 0; 2240 xr->xr_cs = 1; 2241 } 2242 2243 /* 2244 * Recover STALLed endpoint. 2245 * xHCI 1.1 sect 4.10.2.1 2246 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove 2247 * all transfers on transfer ring. 2248 * These are done in thread context asynchronously. 2249 */ 2250 static void 2251 xhci_clear_endpoint_stall_async_task(void *cookie) 2252 { 2253 struct usbd_xfer * const xfer = cookie; 2254 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 2255 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 2256 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 2257 struct xhci_ring * const tr = xs->xs_xr[dci]; 2258 2259 XHCIHIST_FUNC(); 2260 XHCIHIST_CALLARGS("xfer %#jx slot %ju dci %ju", (uintptr_t)xfer, xs->xs_idx, 2261 dci, 0); 2262 2263 /* 2264 * XXXMRG: Stall task can run after slot is disabled when yanked. 2265 * This hack notices that the xs has been memset() in 2266 * xhci_disable_slot() and returns. Both xhci_reset_endpoint() 2267 * and xhci_set_dequeue() rely upon a valid ring setup for correct 2268 * operation, and the latter will fault, as would 2269 * usb_transfer_complete() if it got that far. 2270 */ 2271 if (xs->xs_idx == 0) { 2272 DPRINTFN(4, "ends xs_idx is 0", 0, 0, 0, 0); 2273 return; 2274 } 2275 2276 KASSERT(tr != NULL); 2277 2278 xhci_reset_endpoint(xfer->ux_pipe); 2279 xhci_set_dequeue(xfer->ux_pipe); 2280 2281 mutex_enter(&sc->sc_lock); 2282 tr->is_halted = false; 2283 usb_transfer_complete(xfer); 2284 mutex_exit(&sc->sc_lock); 2285 DPRINTFN(4, "ends", 0, 0, 0, 0); 2286 } 2287 2288 static usbd_status 2289 xhci_clear_endpoint_stall_async(struct usbd_xfer *xfer) 2290 { 2291 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 2292 struct xhci_pipe * const xp = (struct xhci_pipe *)xfer->ux_pipe; 2293 2294 XHCIHIST_FUNC(); 2295 XHCIHIST_CALLARGS("xfer %#jx", (uintptr_t)xfer, 0, 0, 0); 2296 2297 if (sc->sc_dying) { 2298 return USBD_IOERROR; 2299 } 2300 2301 usb_init_task(&xp->xp_async_task, 2302 xhci_clear_endpoint_stall_async_task, xfer, USB_TASKQ_MPSAFE); 2303 usb_add_task(xfer->ux_pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC); 2304 DPRINTFN(4, "ends", 0, 0, 0, 0); 2305 2306 return USBD_NORMAL_COMPLETION; 2307 } 2308 2309 /* Process roothub port status/change events and notify to uhub_intr. */ 2310 static void 2311 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport) 2312 { 2313 XHCIHIST_FUNC(); 2314 XHCIHIST_CALLARGS("xhci%jd: port %ju status change", 2315 device_unit(sc->sc_dev), ctlrport, 0, 0); 2316 2317 if (ctlrport > sc->sc_maxports) 2318 return; 2319 2320 const size_t bn = xhci_ctlrport2bus(sc, ctlrport); 2321 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport); 2322 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn]; 2323 2324 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change", 2325 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer); 2326 2327 if (xfer == NULL) 2328 return; 2329 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 2330 2331 uint8_t *p = xfer->ux_buf; 2332 memset(p, 0, xfer->ux_length); 2333 p[rhp / NBBY] |= 1 << (rhp % NBBY); 2334 xfer->ux_actlen = xfer->ux_length; 2335 xfer->ux_status = USBD_NORMAL_COMPLETION; 2336 usb_transfer_complete(xfer); 2337 } 2338 2339 /* Process Transfer Events */ 2340 static void 2341 xhci_event_transfer(struct xhci_softc * const sc, 2342 const struct xhci_trb * const trb) 2343 { 2344 uint64_t trb_0; 2345 uint32_t trb_2, trb_3; 2346 uint8_t trbcode; 2347 u_int slot, dci; 2348 struct xhci_slot *xs; 2349 struct xhci_ring *xr; 2350 struct xhci_xfer *xx; 2351 struct usbd_xfer *xfer; 2352 usbd_status err; 2353 2354 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2355 2356 trb_0 = le64toh(trb->trb_0); 2357 trb_2 = le32toh(trb->trb_2); 2358 trb_3 = le32toh(trb->trb_3); 2359 trbcode = XHCI_TRB_2_ERROR_GET(trb_2); 2360 slot = XHCI_TRB_3_SLOT_GET(trb_3); 2361 dci = XHCI_TRB_3_EP_GET(trb_3); 2362 xs = &sc->sc_slots[slot]; 2363 xr = xs->xs_xr[dci]; 2364 2365 /* sanity check */ 2366 KASSERT(xr != NULL); 2367 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots, 2368 "invalid xs_idx %u slot %u", xs->xs_idx, slot); 2369 2370 int idx = 0; 2371 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 2372 if (xhci_trb_get_idx(xr, trb_0, &idx)) { 2373 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0); 2374 return; 2375 } 2376 xx = xr->xr_cookies[idx]; 2377 2378 /* clear cookie of consumed TRB */ 2379 xr->xr_cookies[idx] = NULL; 2380 2381 /* 2382 * xx is NULL if pipe is opened but xfer is not started. 2383 * It happens when stopping idle pipe. 2384 */ 2385 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) { 2386 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju", 2387 idx, (uintptr_t)xx, trbcode, dci); 2388 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0, 2389 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)), 2390 0, 0); 2391 return; 2392 } 2393 } else { 2394 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */ 2395 xx = (void *)(uintptr_t)(trb_0 & ~0x3); 2396 } 2397 /* XXX this may not happen */ 2398 if (xx == NULL) { 2399 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0); 2400 return; 2401 } 2402 xfer = &xx->xx_xfer; 2403 /* XXX this may happen when detaching */ 2404 if (xfer == NULL) { 2405 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx", 2406 (uintptr_t)xx, trb_0, 0, 0); 2407 return; 2408 } 2409 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0); 2410 /* XXX I dunno why this happens */ 2411 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer); 2412 2413 if (!xfer->ux_pipe->up_repeat && 2414 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) { 2415 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer, 2416 0, 0, 0); 2417 return; 2418 } 2419 2420 const uint8_t xfertype = 2421 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes); 2422 2423 /* 4.11.5.2 Event Data TRB */ 2424 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) { 2425 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx" 2426 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0); 2427 if ((trb_0 & 0x3) == 0x3) { 2428 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2); 2429 } 2430 } 2431 2432 switch (trbcode) { 2433 case XHCI_TRB_ERROR_SHORT_PKT: 2434 case XHCI_TRB_ERROR_SUCCESS: 2435 /* 2436 * A ctrl transfer can generate two events if it has a Data 2437 * stage. A short data stage can be OK and should not 2438 * complete the transfer as the status stage needs to be 2439 * performed. 2440 * 2441 * Note: Data and Status stage events point at same xfer. 2442 * ux_actlen and ux_dmabuf will be passed to 2443 * usb_transfer_complete after the Status stage event. 2444 * 2445 * It can be distingished which stage generates the event: 2446 * + by checking least 3 bits of trb_0 if ED==1. 2447 * (see xhci_device_ctrl_start). 2448 * + by checking the type of original TRB if ED==0. 2449 * 2450 * In addition, intr, bulk, and isoc transfer currently 2451 * consists of single TD, so the "skip" is not needed. 2452 * ctrl xfer uses EVENT_DATA, and others do not. 2453 * Thus driver can switch the flow by checking ED bit. 2454 */ 2455 if (xfertype == UE_ISOCHRONOUS) { 2456 xfer->ux_frlengths[xx->xx_isoc_done] -= 2457 XHCI_TRB_2_REM_GET(trb_2); 2458 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done]; 2459 if (++xx->xx_isoc_done < xfer->ux_nframes) 2460 return; 2461 } else 2462 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 2463 if (xfer->ux_actlen == 0) 2464 xfer->ux_actlen = xfer->ux_length - 2465 XHCI_TRB_2_REM_GET(trb_2); 2466 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)) 2467 == XHCI_TRB_TYPE_DATA_STAGE) { 2468 return; 2469 } 2470 } else if ((trb_0 & 0x3) == 0x3) { 2471 return; 2472 } 2473 err = USBD_NORMAL_COMPLETION; 2474 break; 2475 case XHCI_TRB_ERROR_STOPPED: 2476 case XHCI_TRB_ERROR_LENGTH: 2477 case XHCI_TRB_ERROR_STOPPED_SHORT: 2478 err = USBD_IOERROR; 2479 break; 2480 case XHCI_TRB_ERROR_STALL: 2481 case XHCI_TRB_ERROR_BABBLE: 2482 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2483 xr->is_halted = true; 2484 /* 2485 * Try to claim this xfer for completion. If it has already 2486 * completed or aborted, drop it on the floor. 2487 */ 2488 if (!usbd_xfer_trycomplete(xfer)) 2489 return; 2490 2491 /* 2492 * Stalled endpoints can be recoverd by issuing 2493 * command TRB TYPE_RESET_EP on xHCI instead of 2494 * issuing request CLEAR_FEATURE UF_ENDPOINT_HALT 2495 * on the endpoint. However, this function may be 2496 * called from softint context (e.g. from umass), 2497 * in that case driver gets KASSERT in cv_timedwait 2498 * in xhci_do_command. 2499 * To avoid this, this runs reset_endpoint and 2500 * usb_transfer_complete in usb task thread 2501 * asynchronously (and then umass issues clear 2502 * UF_ENDPOINT_HALT). 2503 */ 2504 2505 /* Override the status. */ 2506 xfer->ux_status = USBD_STALLED; 2507 2508 xhci_clear_endpoint_stall_async(xfer); 2509 return; 2510 default: 2511 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2512 err = USBD_IOERROR; 2513 break; 2514 } 2515 2516 /* 2517 * Try to claim this xfer for completion. If it has already 2518 * completed or aborted, drop it on the floor. 2519 */ 2520 if (!usbd_xfer_trycomplete(xfer)) 2521 return; 2522 2523 /* Set the status. */ 2524 xfer->ux_status = err; 2525 2526 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 || 2527 (trb_0 & 0x3) == 0x0) { 2528 usb_transfer_complete(xfer); 2529 } 2530 } 2531 2532 /* Process Command complete events */ 2533 static void 2534 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb) 2535 { 2536 uint64_t trb_0; 2537 uint32_t trb_2, trb_3; 2538 2539 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2540 2541 KASSERT(mutex_owned(&sc->sc_lock)); 2542 2543 trb_0 = le64toh(trb->trb_0); 2544 trb_2 = le32toh(trb->trb_2); 2545 trb_3 = le32toh(trb->trb_3); 2546 2547 if (trb_0 == sc->sc_command_addr) { 2548 sc->sc_resultpending = false; 2549 2550 sc->sc_result_trb.trb_0 = trb_0; 2551 sc->sc_result_trb.trb_2 = trb_2; 2552 sc->sc_result_trb.trb_3 = trb_3; 2553 if (XHCI_TRB_2_ERROR_GET(trb_2) != 2554 XHCI_TRB_ERROR_SUCCESS) { 2555 DPRINTFN(1, "command completion " 2556 "failure: 0x%016jx 0x%08jx 0x%08jx", 2557 trb_0, trb_2, trb_3, 0); 2558 } 2559 cv_signal(&sc->sc_command_cv); 2560 } else { 2561 DPRINTFN(1, "spurious event: %#jx 0x%016jx " 2562 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3); 2563 } 2564 } 2565 2566 /* 2567 * Process events. 2568 * called from xhci_softintr 2569 */ 2570 static void 2571 xhci_handle_event(struct xhci_softc * const sc, 2572 const struct xhci_trb * const trb) 2573 { 2574 uint64_t trb_0; 2575 uint32_t trb_2, trb_3; 2576 2577 XHCIHIST_FUNC(); 2578 2579 trb_0 = le64toh(trb->trb_0); 2580 trb_2 = le32toh(trb->trb_2); 2581 trb_3 = le32toh(trb->trb_3); 2582 2583 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx", 2584 (uintptr_t)trb, trb_0, trb_2, trb_3); 2585 2586 /* 2587 * 4.11.3.1, 6.4.2.1 2588 * TRB Pointer is invalid for these completion codes. 2589 */ 2590 switch (XHCI_TRB_2_ERROR_GET(trb_2)) { 2591 case XHCI_TRB_ERROR_RING_UNDERRUN: 2592 case XHCI_TRB_ERROR_RING_OVERRUN: 2593 case XHCI_TRB_ERROR_VF_RING_FULL: 2594 return; 2595 default: 2596 if (trb_0 == 0) { 2597 return; 2598 } 2599 break; 2600 } 2601 2602 switch (XHCI_TRB_3_TYPE_GET(trb_3)) { 2603 case XHCI_TRB_EVENT_TRANSFER: 2604 xhci_event_transfer(sc, trb); 2605 break; 2606 case XHCI_TRB_EVENT_CMD_COMPLETE: 2607 xhci_event_cmd(sc, trb); 2608 break; 2609 case XHCI_TRB_EVENT_PORT_STS_CHANGE: 2610 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff)); 2611 break; 2612 default: 2613 break; 2614 } 2615 } 2616 2617 static void 2618 xhci_softintr(void *v) 2619 { 2620 struct usbd_bus * const bus = v; 2621 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2622 struct xhci_ring * const er = sc->sc_er; 2623 struct xhci_trb *trb; 2624 int i, j, k; 2625 2626 XHCIHIST_FUNC(); 2627 2628 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2629 2630 i = er->xr_ep; 2631 j = er->xr_cs; 2632 2633 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0); 2634 2635 while (1) { 2636 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE, 2637 BUS_DMASYNC_POSTREAD); 2638 trb = &er->xr_trb[i]; 2639 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0; 2640 2641 if (j != k) 2642 break; 2643 2644 xhci_handle_event(sc, trb); 2645 2646 i++; 2647 if (i == er->xr_ntrb) { 2648 i = 0; 2649 j ^= 1; 2650 } 2651 } 2652 2653 er->xr_ep = i; 2654 er->xr_cs = j; 2655 2656 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) | 2657 XHCI_ERDP_BUSY); 2658 2659 DPRINTFN(16, "ends", 0, 0, 0, 0); 2660 2661 return; 2662 } 2663 2664 static void 2665 xhci_poll(struct usbd_bus *bus) 2666 { 2667 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2668 2669 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2670 2671 mutex_enter(&sc->sc_intr_lock); 2672 int ret = xhci_intr1(sc); 2673 if (ret) { 2674 xhci_softintr(bus); 2675 } 2676 mutex_exit(&sc->sc_intr_lock); 2677 2678 return; 2679 } 2680 2681 static struct usbd_xfer * 2682 xhci_allocx(struct usbd_bus *bus, unsigned int nframes) 2683 { 2684 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2685 struct xhci_xfer *xx; 2686 u_int ntrbs; 2687 2688 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2689 2690 ntrbs = uimax(3, nframes); 2691 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs; 2692 2693 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK); 2694 if (xx != NULL) { 2695 memset(xx, 0, sizeof(*xx)); 2696 if (ntrbs > 0) { 2697 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP); 2698 xx->xx_ntrb = ntrbs; 2699 } 2700 #ifdef DIAGNOSTIC 2701 xx->xx_xfer.ux_state = XFER_BUSY; 2702 #endif 2703 } 2704 2705 return &xx->xx_xfer; 2706 } 2707 2708 static void 2709 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 2710 { 2711 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2712 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 2713 2714 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2715 2716 #ifdef DIAGNOSTIC 2717 if (xfer->ux_state != XFER_BUSY && 2718 xfer->ux_status != USBD_NOT_STARTED) { 2719 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx", 2720 (uintptr_t)xfer, xfer->ux_state, 0, 0); 2721 } 2722 xfer->ux_state = XFER_FREE; 2723 #endif 2724 if (xx->xx_ntrb > 0) { 2725 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb)); 2726 xx->xx_trb = NULL; 2727 xx->xx_ntrb = 0; 2728 } 2729 pool_cache_put(sc->sc_xferpool, xx); 2730 } 2731 2732 static bool 2733 xhci_dying(struct usbd_bus *bus) 2734 { 2735 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2736 2737 return sc->sc_dying; 2738 } 2739 2740 static void 2741 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock) 2742 { 2743 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2744 2745 *lock = &sc->sc_lock; 2746 } 2747 2748 extern uint32_t usb_cookie_no; 2749 2750 /* 2751 * xHCI 4.3 2752 * Called when uhub_explore finds a new device (via usbd_new_device). 2753 * Port initialization and speed detection (4.3.1) are already done in uhub.c. 2754 * This function does: 2755 * Allocate and construct dev structure of default endpoint (ep0). 2756 * Allocate and open pipe of ep0. 2757 * Enable slot and initialize slot context. 2758 * Set Address. 2759 * Read initial device descriptor. 2760 * Determine initial MaxPacketSize (mps) by speed. 2761 * Read full device descriptor. 2762 * Register this device. 2763 * Finally state of device transitions ADDRESSED. 2764 */ 2765 static usbd_status 2766 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth, 2767 int speed, int port, struct usbd_port *up) 2768 { 2769 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2770 struct usbd_device *dev; 2771 usbd_status err; 2772 usb_device_descriptor_t *dd; 2773 struct xhci_slot *xs; 2774 uint32_t *cp; 2775 2776 XHCIHIST_FUNC(); 2777 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx", 2778 port, depth, speed, (uintptr_t)up); 2779 2780 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP); 2781 dev->ud_bus = bus; 2782 dev->ud_quirks = &usbd_no_quirk; 2783 dev->ud_addr = 0; 2784 dev->ud_ddesc.bMaxPacketSize = 0; 2785 dev->ud_depth = depth; 2786 dev->ud_powersrc = up; 2787 dev->ud_myhub = up->up_parent; 2788 dev->ud_speed = speed; 2789 dev->ud_langid = USBD_NOLANG; 2790 dev->ud_cookie.cookie = ++usb_cookie_no; 2791 2792 /* Set up default endpoint handle. */ 2793 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc; 2794 /* doesn't matter, just don't let it uninitialized */ 2795 dev->ud_ep0.ue_toggle = 0; 2796 2797 /* Set up default endpoint descriptor. */ 2798 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE; 2799 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT; 2800 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT; 2801 dev->ud_ep0desc.bmAttributes = UE_CONTROL; 2802 dev->ud_ep0desc.bInterval = 0; 2803 2804 /* 4.3, 4.8.2.1 */ 2805 switch (speed) { 2806 case USB_SPEED_SUPER: 2807 case USB_SPEED_SUPER_PLUS: 2808 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET); 2809 break; 2810 case USB_SPEED_FULL: 2811 /* XXX using 64 as initial mps of ep0 in FS */ 2812 case USB_SPEED_HIGH: 2813 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET); 2814 break; 2815 case USB_SPEED_LOW: 2816 default: 2817 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET); 2818 break; 2819 } 2820 2821 up->up_dev = dev; 2822 2823 dd = &dev->ud_ddesc; 2824 2825 if (depth == 0 && port == 0) { 2826 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL); 2827 bus->ub_devices[USB_ROOTHUB_INDEX] = dev; 2828 2829 /* Establish the default pipe. */ 2830 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2831 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2832 if (err) { 2833 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0); 2834 goto bad; 2835 } 2836 err = usbd_get_initial_ddesc(dev, dd); 2837 if (err) { 2838 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2839 goto bad; 2840 } 2841 } else { 2842 uint8_t slot = 0; 2843 2844 /* 4.3.2 */ 2845 err = xhci_enable_slot(sc, &slot); 2846 if (err) { 2847 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0); 2848 goto bad; 2849 } 2850 2851 xs = &sc->sc_slots[slot]; 2852 dev->ud_hcpriv = xs; 2853 2854 /* 4.3.3 initialize slot structure */ 2855 err = xhci_init_slot(dev, slot); 2856 if (err) { 2857 DPRINTFN(1, "init slot %ju", err, 0, 0, 0); 2858 dev->ud_hcpriv = NULL; 2859 /* 2860 * We have to disable_slot here because 2861 * xs->xs_idx == 0 when xhci_init_slot fails, 2862 * in that case usbd_remove_dev won't work. 2863 */ 2864 mutex_enter(&sc->sc_lock); 2865 xhci_disable_slot(sc, slot); 2866 mutex_exit(&sc->sc_lock); 2867 goto bad; 2868 } 2869 2870 /* 2871 * We have to establish the default pipe _after_ slot 2872 * structure has been prepared. 2873 */ 2874 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2875 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2876 if (err) { 2877 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0, 2878 0); 2879 goto bad; 2880 } 2881 2882 /* 4.3.4 Address Assignment */ 2883 err = xhci_set_address(dev, slot, false); 2884 if (err) { 2885 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0); 2886 goto bad; 2887 } 2888 2889 /* Allow device time to set new address */ 2890 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE); 2891 2892 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 2893 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT); 2894 HEXDUMP("slot context", cp, sc->sc_ctxsz); 2895 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3])); 2896 DPRINTFN(4, "device address %ju", addr, 0, 0, 0); 2897 /* 2898 * XXX ensure we know when the hardware does something 2899 * we can't yet cope with 2900 */ 2901 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr); 2902 dev->ud_addr = addr; 2903 2904 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL, 2905 "addr %d already allocated", dev->ud_addr); 2906 /* 2907 * The root hub is given its own slot 2908 */ 2909 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev; 2910 2911 err = usbd_get_initial_ddesc(dev, dd); 2912 if (err) { 2913 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2914 goto bad; 2915 } 2916 2917 /* 4.8.2.1 */ 2918 if (USB_IS_SS(speed)) { 2919 if (dd->bMaxPacketSize != 9) { 2920 printf("%s: invalid mps 2^%u for SS ep0," 2921 " using 512\n", 2922 device_xname(sc->sc_dev), 2923 dd->bMaxPacketSize); 2924 dd->bMaxPacketSize = 9; 2925 } 2926 USETW(dev->ud_ep0desc.wMaxPacketSize, 2927 (1 << dd->bMaxPacketSize)); 2928 } else 2929 USETW(dev->ud_ep0desc.wMaxPacketSize, 2930 dd->bMaxPacketSize); 2931 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0); 2932 err = xhci_update_ep0_mps(sc, xs, 2933 UGETW(dev->ud_ep0desc.wMaxPacketSize)); 2934 if (err) { 2935 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0); 2936 goto bad; 2937 } 2938 } 2939 2940 err = usbd_reload_device_desc(dev); 2941 if (err) { 2942 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0); 2943 goto bad; 2944 } 2945 2946 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,", 2947 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0); 2948 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,", 2949 dd->bDeviceClass, dd->bDeviceSubClass, 2950 dd->bDeviceProtocol, 0); 2951 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd", 2952 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations, 2953 dev->ud_speed); 2954 2955 usbd_get_device_strings(dev); 2956 2957 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev); 2958 2959 if (depth == 0 && port == 0) { 2960 usbd_attach_roothub(parent, dev); 2961 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0); 2962 return USBD_NORMAL_COMPLETION; 2963 } 2964 2965 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr); 2966 bad: 2967 if (err != USBD_NORMAL_COMPLETION) { 2968 usbd_remove_device(dev, up); 2969 } 2970 2971 return err; 2972 } 2973 2974 static usbd_status 2975 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp, 2976 size_t ntrb, size_t align) 2977 { 2978 size_t size = ntrb * XHCI_TRB_SIZE; 2979 struct xhci_ring *xr; 2980 2981 XHCIHIST_FUNC(); 2982 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx", 2983 (uintptr_t)*xrp, ntrb, align, 0); 2984 2985 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP); 2986 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0); 2987 2988 int err = usb_allocmem(&sc->sc_bus, size, align, 2989 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xr->xr_dma); 2990 if (err) { 2991 kmem_free(xr, sizeof(struct xhci_ring)); 2992 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0); 2993 return err; 2994 } 2995 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 2996 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP); 2997 xr->xr_trb = xhci_ring_trbv(xr, 0); 2998 xr->xr_ntrb = ntrb; 2999 xr->is_halted = false; 3000 xhci_host_dequeue(xr); 3001 *xrp = xr; 3002 3003 return USBD_NORMAL_COMPLETION; 3004 } 3005 3006 static void 3007 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr) 3008 { 3009 if (*xr == NULL) 3010 return; 3011 3012 usb_freemem(&sc->sc_bus, &(*xr)->xr_dma); 3013 mutex_destroy(&(*xr)->xr_lock); 3014 kmem_free((*xr)->xr_cookies, 3015 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb); 3016 kmem_free(*xr, sizeof(struct xhci_ring)); 3017 *xr = NULL; 3018 } 3019 3020 static void 3021 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr, 3022 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs) 3023 { 3024 size_t i; 3025 u_int ri; 3026 u_int cs; 3027 uint64_t parameter; 3028 uint32_t status; 3029 uint32_t control; 3030 3031 XHCIHIST_FUNC(); 3032 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju", 3033 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0); 3034 3035 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u", 3036 ntrbs, xr->xr_ntrb); 3037 for (i = 0; i < ntrbs; i++) { 3038 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr, 3039 (uintptr_t)trbs, i, 0); 3040 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx", 3041 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0); 3042 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) != 3043 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3); 3044 } 3045 3046 ri = xr->xr_ep; 3047 cs = xr->xr_cs; 3048 3049 /* 3050 * Although the xhci hardware can do scatter/gather dma from 3051 * arbitrary sized buffers, there is a non-obvious restriction 3052 * that a LINK trb is only allowed at the end of a burst of 3053 * transfers - which might be 16kB. 3054 * Arbitrary aligned LINK trb definitely fail on Ivy bridge. 3055 * The simple solution is not to allow a LINK trb in the middle 3056 * of anything - as here. 3057 * XXX: (dsl) There are xhci controllers out there (eg some made by 3058 * ASMedia) that seem to lock up if they process a LINK trb but 3059 * cannot process the linked-to trb yet. 3060 * The code should write the 'cycle' bit on the link trb AFTER 3061 * adding the other trb. 3062 */ 3063 u_int firstep = xr->xr_ep; 3064 u_int firstcs = xr->xr_cs; 3065 3066 for (i = 0; i < ntrbs; ) { 3067 u_int oldri = ri; 3068 u_int oldcs = cs; 3069 3070 if (ri >= (xr->xr_ntrb - 1)) { 3071 /* Put Link TD at the end of ring */ 3072 parameter = xhci_ring_trbp(xr, 0); 3073 status = 0; 3074 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) | 3075 XHCI_TRB_3_TC_BIT; 3076 xr->xr_cookies[ri] = NULL; 3077 xr->xr_ep = 0; 3078 xr->xr_cs ^= 1; 3079 ri = xr->xr_ep; 3080 cs = xr->xr_cs; 3081 } else { 3082 parameter = trbs[i].trb_0; 3083 status = trbs[i].trb_2; 3084 control = trbs[i].trb_3; 3085 3086 xr->xr_cookies[ri] = cookie; 3087 ri++; 3088 i++; 3089 } 3090 /* 3091 * If this is a first TRB, mark it invalid to prevent 3092 * xHC from running it immediately. 3093 */ 3094 if (oldri == firstep) { 3095 if (oldcs) { 3096 control &= ~XHCI_TRB_3_CYCLE_BIT; 3097 } else { 3098 control |= XHCI_TRB_3_CYCLE_BIT; 3099 } 3100 } else { 3101 if (oldcs) { 3102 control |= XHCI_TRB_3_CYCLE_BIT; 3103 } else { 3104 control &= ~XHCI_TRB_3_CYCLE_BIT; 3105 } 3106 } 3107 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control); 3108 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri, 3109 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 3110 } 3111 3112 /* Now invert cycle bit of first TRB */ 3113 if (firstcs) { 3114 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT); 3115 } else { 3116 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT); 3117 } 3118 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep, 3119 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 3120 3121 xr->xr_ep = ri; 3122 xr->xr_cs = cs; 3123 3124 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep, 3125 xr->xr_cs, 0); 3126 } 3127 3128 static inline void 3129 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr, 3130 struct xhci_xfer *xx, u_int ntrb) 3131 { 3132 KASSERT(ntrb <= xx->xx_ntrb); 3133 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb); 3134 } 3135 3136 /* 3137 * Stop execution commands, purge all commands on command ring, and 3138 * rewind dequeue pointer. 3139 */ 3140 static void 3141 xhci_abort_command(struct xhci_softc *sc) 3142 { 3143 struct xhci_ring * const cr = sc->sc_cr; 3144 uint64_t crcr; 3145 int i; 3146 3147 XHCIHIST_FUNC(); 3148 XHCIHIST_CALLARGS("command %#jx timeout, aborting", 3149 sc->sc_command_addr, 0, 0, 0); 3150 3151 mutex_enter(&cr->xr_lock); 3152 3153 /* 4.6.1.2 Aborting a Command */ 3154 crcr = xhci_op_read_8(sc, XHCI_CRCR); 3155 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA); 3156 3157 for (i = 0; i < 500; i++) { 3158 crcr = xhci_op_read_8(sc, XHCI_CRCR); 3159 if ((crcr & XHCI_CRCR_LO_CRR) == 0) 3160 break; 3161 usb_delay_ms(&sc->sc_bus, 1); 3162 } 3163 if ((crcr & XHCI_CRCR_LO_CRR) != 0) { 3164 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0); 3165 /* reset HC here? */ 3166 } 3167 3168 /* reset command ring dequeue pointer */ 3169 cr->xr_ep = 0; 3170 cr->xr_cs = 1; 3171 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs); 3172 3173 mutex_exit(&cr->xr_lock); 3174 } 3175 3176 /* 3177 * Put a command on command ring, ring bell, set timer, and cv_timedwait. 3178 * Command completion is notified by cv_signal from xhci_event_cmd() 3179 * (called from xhci_softint), or timed-out. 3180 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(), 3181 * then do_command examines it. 3182 */ 3183 static usbd_status 3184 xhci_do_command_locked(struct xhci_softc * const sc, 3185 struct xhci_soft_trb * const trb, int timeout) 3186 { 3187 struct xhci_ring * const cr = sc->sc_cr; 3188 usbd_status err; 3189 3190 XHCIHIST_FUNC(); 3191 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx", 3192 trb->trb_0, trb->trb_2, trb->trb_3, 0); 3193 3194 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 3195 KASSERT(mutex_owned(&sc->sc_lock)); 3196 3197 while (sc->sc_command_addr != 0 || 3198 (sc->sc_suspender != NULL && sc->sc_suspender != curlwp)) 3199 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock); 3200 3201 /* 3202 * If enqueue pointer points at last of ring, it's Link TRB, 3203 * command TRB will be stored in 0th TRB. 3204 */ 3205 if (cr->xr_ep == cr->xr_ntrb - 1) 3206 sc->sc_command_addr = xhci_ring_trbp(cr, 0); 3207 else 3208 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep); 3209 3210 sc->sc_resultpending = true; 3211 3212 mutex_enter(&cr->xr_lock); 3213 xhci_ring_put(sc, cr, NULL, trb, 1); 3214 mutex_exit(&cr->xr_lock); 3215 3216 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0); 3217 3218 while (sc->sc_resultpending) { 3219 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock, 3220 MAX(1, mstohz(timeout))) == EWOULDBLOCK) { 3221 xhci_abort_command(sc); 3222 err = USBD_TIMEOUT; 3223 goto timedout; 3224 } 3225 } 3226 3227 trb->trb_0 = sc->sc_result_trb.trb_0; 3228 trb->trb_2 = sc->sc_result_trb.trb_2; 3229 trb->trb_3 = sc->sc_result_trb.trb_3; 3230 3231 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx", 3232 trb->trb_0, trb->trb_2, trb->trb_3, 0); 3233 3234 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) { 3235 case XHCI_TRB_ERROR_SUCCESS: 3236 err = USBD_NORMAL_COMPLETION; 3237 break; 3238 default: 3239 case 192 ... 223: 3240 DPRINTFN(5, "error %#jx", 3241 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0); 3242 err = USBD_IOERROR; 3243 break; 3244 case 224 ... 255: 3245 err = USBD_NORMAL_COMPLETION; 3246 break; 3247 } 3248 3249 timedout: 3250 sc->sc_resultpending = false; 3251 sc->sc_command_addr = 0; 3252 cv_broadcast(&sc->sc_cmdbusy_cv); 3253 3254 return err; 3255 } 3256 3257 static usbd_status 3258 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb, 3259 int timeout) 3260 { 3261 3262 mutex_enter(&sc->sc_lock); 3263 usbd_status ret = xhci_do_command_locked(sc, trb, timeout); 3264 mutex_exit(&sc->sc_lock); 3265 3266 return ret; 3267 } 3268 3269 static usbd_status 3270 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp) 3271 { 3272 struct xhci_soft_trb trb; 3273 usbd_status err; 3274 3275 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3276 3277 trb.trb_0 = 0; 3278 trb.trb_2 = 0; 3279 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT); 3280 3281 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3282 if (err != USBD_NORMAL_COMPLETION) { 3283 return err; 3284 } 3285 3286 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3); 3287 3288 return err; 3289 } 3290 3291 /* 3292 * xHCI 4.6.4 3293 * Deallocate ring and device/input context DMA buffers, and disable_slot. 3294 * All endpoints in the slot should be stopped. 3295 * Should be called with sc_lock held. 3296 */ 3297 static usbd_status 3298 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot) 3299 { 3300 struct xhci_soft_trb trb; 3301 struct xhci_slot *xs; 3302 usbd_status err; 3303 3304 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3305 3306 if (sc->sc_dying) 3307 return USBD_IOERROR; 3308 3309 trb.trb_0 = 0; 3310 trb.trb_2 = 0; 3311 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) | 3312 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT); 3313 3314 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 3315 3316 if (!err) { 3317 xs = &sc->sc_slots[slot]; 3318 if (xs->xs_idx != 0) { 3319 xhci_free_slot(sc, xs); 3320 xhci_set_dcba(sc, 0, slot); 3321 memset(xs, 0, sizeof(*xs)); 3322 } 3323 } 3324 3325 return err; 3326 } 3327 3328 /* 3329 * Set address of device and transition slot state from ENABLED to ADDRESSED 3330 * if Block Setaddress Request (BSR) is false. 3331 * If BSR==true, transition slot state from ENABLED to DEFAULT. 3332 * see xHCI 1.1 4.5.3, 3.3.4 3333 * Should be called without sc_lock held. 3334 */ 3335 static usbd_status 3336 xhci_address_device(struct xhci_softc * const sc, 3337 uint64_t icp, uint8_t slot_id, bool bsr) 3338 { 3339 struct xhci_soft_trb trb; 3340 usbd_status err; 3341 3342 XHCIHIST_FUNC(); 3343 if (bsr) { 3344 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr", 3345 icp, slot_id, 0, 0); 3346 } else { 3347 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr", 3348 icp, slot_id, 0, 0); 3349 } 3350 3351 trb.trb_0 = icp; 3352 trb.trb_2 = 0; 3353 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) | 3354 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) | 3355 (bsr ? XHCI_TRB_3_BSR_BIT : 0); 3356 3357 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3358 3359 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS) 3360 err = USBD_NO_ADDR; 3361 3362 return err; 3363 } 3364 3365 static usbd_status 3366 xhci_update_ep0_mps(struct xhci_softc * const sc, 3367 struct xhci_slot * const xs, u_int mps) 3368 { 3369 struct xhci_soft_trb trb; 3370 usbd_status err; 3371 uint32_t * cp; 3372 3373 XHCIHIST_FUNC(); 3374 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0); 3375 3376 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 3377 cp[0] = htole32(0); 3378 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL)); 3379 3380 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL)); 3381 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps)); 3382 3383 /* sync input contexts before they are read from memory */ 3384 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 3385 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 3386 sc->sc_ctxsz * 4); 3387 3388 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 3389 trb.trb_2 = 0; 3390 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 3391 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX); 3392 3393 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 3394 return err; 3395 } 3396 3397 static void 3398 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si) 3399 { 3400 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0); 3401 3402 XHCIHIST_FUNC(); 3403 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd", 3404 (uintptr_t)&dcbaa[si], dcba, si, 0); 3405 3406 dcbaa[si] = htole64(dcba); 3407 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t), 3408 BUS_DMASYNC_PREWRITE); 3409 } 3410 3411 /* 3412 * Allocate device and input context DMA buffer, and 3413 * TRB DMA buffer for each endpoint. 3414 */ 3415 static usbd_status 3416 xhci_init_slot(struct usbd_device *dev, uint32_t slot) 3417 { 3418 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3419 struct xhci_slot *xs; 3420 3421 XHCIHIST_FUNC(); 3422 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0); 3423 3424 xs = &sc->sc_slots[slot]; 3425 3426 /* allocate contexts */ 3427 int err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz, 3428 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_dc_dma); 3429 if (err) { 3430 DPRINTFN(1, "failed to allocmem output device context %jd", 3431 err, 0, 0, 0); 3432 return USBD_NOMEM; 3433 } 3434 3435 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz, 3436 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_ic_dma); 3437 if (err) { 3438 DPRINTFN(1, "failed to allocmem input device context %jd", 3439 err, 0, 0, 0); 3440 goto bad1; 3441 } 3442 3443 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr)); 3444 xs->xs_idx = slot; 3445 3446 return USBD_NORMAL_COMPLETION; 3447 3448 bad1: 3449 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma); 3450 xs->xs_idx = 0; 3451 return USBD_NOMEM; 3452 } 3453 3454 static void 3455 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs) 3456 { 3457 u_int dci; 3458 3459 XHCIHIST_FUNC(); 3460 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 3461 3462 /* deallocate all allocated rings in the slot */ 3463 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 3464 if (xs->xs_xr[dci] != NULL) 3465 xhci_ring_free(sc, &xs->xs_xr[dci]); 3466 } 3467 usb_freemem(&sc->sc_bus, &xs->xs_ic_dma); 3468 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma); 3469 xs->xs_idx = 0; 3470 } 3471 3472 /* 3473 * Setup slot context, set Device Context Base Address, and issue 3474 * Set Address Device command. 3475 */ 3476 static usbd_status 3477 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr) 3478 { 3479 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3480 struct xhci_slot *xs; 3481 usbd_status err; 3482 3483 XHCIHIST_FUNC(); 3484 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0); 3485 3486 xs = &sc->sc_slots[slot]; 3487 3488 xhci_setup_ctx(dev->ud_pipe0); 3489 3490 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 3491 sc->sc_ctxsz * 3); 3492 3493 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot); 3494 3495 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr); 3496 3497 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 3498 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0), 3499 sc->sc_ctxsz * 2); 3500 3501 return err; 3502 } 3503 3504 /* 3505 * 4.8.2, 6.2.3.2 3506 * construct slot/endpoint context parameters and do syncmem 3507 */ 3508 static void 3509 xhci_setup_ctx(struct usbd_pipe *pipe) 3510 { 3511 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3512 struct usbd_device *dev = pipe->up_dev; 3513 struct xhci_slot * const xs = dev->ud_hcpriv; 3514 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3515 const u_int dci = xhci_ep_get_dci(ed); 3516 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3517 uint32_t *cp; 3518 uint16_t mps = UGETW(ed->wMaxPacketSize); 3519 uint8_t speed = dev->ud_speed; 3520 uint8_t ival = ed->bInterval; 3521 3522 XHCIHIST_FUNC(); 3523 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju", 3524 (uintptr_t)pipe, xs->xs_idx, dci, speed); 3525 3526 /* set up initial input control context */ 3527 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 3528 cp[0] = htole32(0); 3529 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci)); 3530 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT)); 3531 cp[7] = htole32(0); 3532 3533 /* set up input slot context */ 3534 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 3535 cp[0] = 3536 XHCI_SCTX_0_CTX_NUM_SET(dci) | 3537 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed)); 3538 cp[1] = 0; 3539 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0); 3540 cp[3] = 0; 3541 xhci_setup_route(pipe, cp); 3542 xhci_setup_tthub(pipe, cp); 3543 3544 cp[0] = htole32(cp[0]); 3545 cp[1] = htole32(cp[1]); 3546 cp[2] = htole32(cp[2]); 3547 cp[3] = htole32(cp[3]); 3548 3549 /* set up input endpoint context */ 3550 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci)); 3551 cp[0] = 3552 XHCI_EPCTX_0_EPSTATE_SET(0) | 3553 XHCI_EPCTX_0_MULT_SET(0) | 3554 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) | 3555 XHCI_EPCTX_0_LSA_SET(0) | 3556 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0); 3557 cp[1] = 3558 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) | 3559 XHCI_EPCTX_1_HID_SET(0) | 3560 XHCI_EPCTX_1_MAXB_SET(0); 3561 3562 if (xfertype != UE_ISOCHRONOUS) 3563 cp[1] |= XHCI_EPCTX_1_CERR_SET(3); 3564 3565 if (xfertype == UE_CONTROL) 3566 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */ 3567 else if (USB_IS_SS(speed)) 3568 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps); 3569 else 3570 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps)); 3571 3572 xhci_setup_maxburst(pipe, cp); 3573 3574 switch (xfertype) { 3575 case UE_CONTROL: 3576 break; 3577 case UE_BULK: 3578 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */ 3579 break; 3580 case UE_INTERRUPT: 3581 if (pipe->up_interval != USBD_DEFAULT_INTERVAL) 3582 ival = pipe->up_interval; 3583 3584 ival = xhci_bival2ival(ival, speed); 3585 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival); 3586 break; 3587 case UE_ISOCHRONOUS: 3588 if (pipe->up_interval != USBD_DEFAULT_INTERVAL) 3589 ival = pipe->up_interval; 3590 3591 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */ 3592 if (speed == USB_SPEED_FULL) 3593 ival += 3; /* 1ms -> 125us */ 3594 ival--; 3595 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival); 3596 break; 3597 default: 3598 break; 3599 } 3600 DPRINTFN(4, "setting ival %ju MaxBurst %#jx", 3601 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0); 3602 3603 /* rewind TR dequeue pointer in xHC */ 3604 /* can't use xhci_ep_get_dci() yet? */ 3605 *(uint64_t *)(&cp[2]) = htole64( 3606 xhci_ring_trbp(xs->xs_xr[dci], 0) | 3607 XHCI_EPCTX_2_DCS_SET(1)); 3608 3609 cp[0] = htole32(cp[0]); 3610 cp[1] = htole32(cp[1]); 3611 cp[4] = htole32(cp[4]); 3612 3613 /* rewind TR dequeue pointer in driver */ 3614 struct xhci_ring *xr = xs->xs_xr[dci]; 3615 mutex_enter(&xr->xr_lock); 3616 xhci_host_dequeue(xr); 3617 mutex_exit(&xr->xr_lock); 3618 3619 /* sync input contexts before they are read from memory */ 3620 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 3621 } 3622 3623 /* 3624 * Setup route string and roothub port of given device for slot context 3625 */ 3626 static void 3627 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp) 3628 { 3629 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3630 struct usbd_device *dev = pipe->up_dev; 3631 struct usbd_port *up = dev->ud_powersrc; 3632 struct usbd_device *hub; 3633 struct usbd_device *adev; 3634 uint8_t rhport = 0; 3635 uint32_t route = 0; 3636 3637 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3638 3639 /* Locate root hub port and Determine route string */ 3640 /* 4.3.3 route string does not include roothub port */ 3641 for (hub = dev; hub != NULL; hub = hub->ud_myhub) { 3642 uint32_t dep; 3643 3644 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd", 3645 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc, 3646 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno : 3647 -1); 3648 3649 if (hub->ud_powersrc == NULL) 3650 break; 3651 dep = hub->ud_depth; 3652 if (dep == 0) 3653 break; 3654 rhport = hub->ud_powersrc->up_portno; 3655 if (dep > USB_HUB_MAX_DEPTH) 3656 continue; 3657 3658 route |= 3659 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport) 3660 << ((dep - 1) * 4); 3661 } 3662 route = route >> 4; 3663 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1; 3664 3665 /* Locate port on upstream high speed hub */ 3666 for (adev = dev, hub = up->up_parent; 3667 hub != NULL && hub->ud_speed != USB_SPEED_HIGH; 3668 adev = hub, hub = hub->ud_myhub) 3669 ; 3670 if (hub) { 3671 int p; 3672 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) { 3673 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) { 3674 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1]; 3675 goto found; 3676 } 3677 } 3678 panic("%s: cannot find HS port", __func__); 3679 found: 3680 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0); 3681 } else { 3682 dev->ud_myhsport = NULL; 3683 } 3684 3685 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport); 3686 3687 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport, 3688 ctlrport, route, (uintptr_t)hub); 3689 3690 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route); 3691 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport); 3692 } 3693 3694 /* 3695 * Setup whether device is hub, whether device uses MTT, and 3696 * TT informations if it uses MTT. 3697 */ 3698 static void 3699 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp) 3700 { 3701 struct usbd_device *dev = pipe->up_dev; 3702 struct usbd_port *myhsport = dev->ud_myhsport; 3703 usb_device_descriptor_t * const dd = &dev->ud_ddesc; 3704 uint32_t speed = dev->ud_speed; 3705 uint8_t rhaddr = dev->ud_bus->ub_rhaddr; 3706 uint8_t tthubslot, ttportnum; 3707 bool ishub; 3708 bool usemtt; 3709 3710 XHCIHIST_FUNC(); 3711 3712 /* 3713 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2 3714 * tthubslot: 3715 * This is the slot ID of parent HS hub 3716 * if LS/FS device is connected && connected through HS hub. 3717 * This is 0 if device is not LS/FS device || 3718 * parent hub is not HS hub || 3719 * attached to root hub. 3720 * ttportnum: 3721 * This is the downstream facing port of parent HS hub 3722 * if LS/FS device is connected. 3723 * This is 0 if device is not LS/FS device || 3724 * parent hub is not HS hub || 3725 * attached to root hub. 3726 */ 3727 if (myhsport && 3728 myhsport->up_parent->ud_addr != rhaddr && 3729 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) { 3730 ttportnum = myhsport->up_portno; 3731 tthubslot = myhsport->up_parent->ud_addr; 3732 } else { 3733 ttportnum = 0; 3734 tthubslot = 0; 3735 } 3736 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd", 3737 (uintptr_t)myhsport, ttportnum, tthubslot, 0); 3738 3739 /* ishub is valid after reading UDESC_DEVICE */ 3740 ishub = (dd->bDeviceClass == UDCLASS_HUB); 3741 3742 /* dev->ud_hub is valid after reading UDESC_HUB */ 3743 if (ishub && dev->ud_hub) { 3744 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc; 3745 uint8_t ttt = 3746 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK); 3747 3748 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts); 3749 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt); 3750 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0); 3751 } 3752 3753 #define IS_MTTHUB(dd) \ 3754 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT) 3755 3756 /* 3757 * MTT flag is set if 3758 * 1. this is HS hub && MTTs are supported and enabled; or 3759 * 2. this is LS or FS device && there is a parent HS hub where MTTs 3760 * are supported and enabled. 3761 * 3762 * XXX enabled is not tested yet 3763 */ 3764 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd)) 3765 usemtt = true; 3766 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) && 3767 myhsport && 3768 myhsport->up_parent->ud_addr != rhaddr && 3769 IS_MTTHUB(&myhsport->up_parent->ud_ddesc)) 3770 usemtt = true; 3771 else 3772 usemtt = false; 3773 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd", 3774 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt); 3775 3776 #undef IS_MTTHUB 3777 3778 cp[0] |= 3779 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) | 3780 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0); 3781 cp[2] |= 3782 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) | 3783 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum); 3784 } 3785 3786 /* set up params for periodic endpoint */ 3787 static void 3788 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp) 3789 { 3790 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 3791 struct usbd_device *dev = pipe->up_dev; 3792 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3793 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3794 usbd_desc_iter_t iter; 3795 const usb_cdc_descriptor_t *cdcd; 3796 uint32_t maxb = 0; 3797 uint16_t mps = UGETW(ed->wMaxPacketSize); 3798 uint8_t speed = dev->ud_speed; 3799 uint8_t mult = 0; 3800 uint8_t ep; 3801 3802 /* config desc is NULL when opening ep0 */ 3803 if (dev == NULL || dev->ud_cdesc == NULL) 3804 goto no_cdcd; 3805 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev, 3806 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY); 3807 if (cdcd == NULL) 3808 goto no_cdcd; 3809 usb_desc_iter_init(dev, &iter); 3810 iter.cur = (const void *)cdcd; 3811 3812 /* find endpoint_ss_comp desc for ep of this pipe */ 3813 for (ep = 0;;) { 3814 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter); 3815 if (cdcd == NULL) 3816 break; 3817 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) { 3818 ep = ((const usb_endpoint_descriptor_t *)cdcd)-> 3819 bEndpointAddress; 3820 if (UE_GET_ADDR(ep) == 3821 UE_GET_ADDR(ed->bEndpointAddress)) { 3822 cdcd = (const usb_cdc_descriptor_t *) 3823 usb_desc_iter_next(&iter); 3824 break; 3825 } 3826 ep = 0; 3827 } 3828 } 3829 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) { 3830 const usb_endpoint_ss_comp_descriptor_t * esscd = 3831 (const usb_endpoint_ss_comp_descriptor_t *)cdcd; 3832 maxb = esscd->bMaxBurst; 3833 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes); 3834 } 3835 3836 no_cdcd: 3837 /* 6.2.3.4, 4.8.2.4 */ 3838 if (USB_IS_SS(speed)) { 3839 /* USB 3.1 9.6.6 */ 3840 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps); 3841 /* USB 3.1 9.6.7 */ 3842 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb); 3843 #ifdef notyet 3844 if (xfertype == UE_ISOCHRONOUS) { 3845 } 3846 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) { 3847 /* use ESIT */ 3848 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x); 3849 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x); 3850 3851 /* XXX if LEC = 1, set ESIT instead */ 3852 cp[0] |= XHCI_EPCTX_0_MULT_SET(0); 3853 } else { 3854 /* use ival */ 3855 } 3856 #endif 3857 } else { 3858 /* USB 2.0 9.6.6 */ 3859 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps)); 3860 3861 /* 6.2.3.4 */ 3862 if (speed == USB_SPEED_HIGH && 3863 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) { 3864 maxb = UE_GET_TRANS(mps); 3865 } else { 3866 /* LS/FS or HS CTRL or HS BULK */ 3867 maxb = 0; 3868 } 3869 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb); 3870 } 3871 xpipe->xp_maxb = maxb + 1; 3872 xpipe->xp_mult = mult + 1; 3873 } 3874 3875 /* 3876 * Convert endpoint bInterval value to endpoint context interval value 3877 * for Interrupt pipe. 3878 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 3879 */ 3880 static uint32_t 3881 xhci_bival2ival(uint32_t ival, uint32_t speed) 3882 { 3883 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) { 3884 int i; 3885 3886 /* 3887 * round ival down to "the nearest base 2 multiple of 3888 * bInterval * 8". 3889 * bInterval is at most 255 as its type is uByte. 3890 * 255(ms) = 2040(x 125us) < 2^11, so start with 10. 3891 */ 3892 for (i = 10; i > 0; i--) { 3893 if ((ival * 8) >= (1 << i)) 3894 break; 3895 } 3896 ival = i; 3897 } else { 3898 /* Interval = bInterval-1 for SS/HS */ 3899 ival--; 3900 } 3901 3902 return ival; 3903 } 3904 3905 /* ----- */ 3906 3907 static void 3908 xhci_noop(struct usbd_pipe *pipe) 3909 { 3910 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3911 } 3912 3913 /* 3914 * Process root hub request. 3915 */ 3916 static int 3917 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req, 3918 void *buf, int buflen) 3919 { 3920 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 3921 usb_port_status_t ps; 3922 int l, totlen = 0; 3923 uint16_t len, value, index; 3924 int port, i; 3925 uint32_t v; 3926 3927 XHCIHIST_FUNC(); 3928 3929 if (sc->sc_dying) 3930 return -1; 3931 3932 size_t bn = bus == &sc->sc_bus ? 0 : 1; 3933 3934 len = UGETW(req->wLength); 3935 value = UGETW(req->wValue); 3936 index = UGETW(req->wIndex); 3937 3938 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx", 3939 req->bmRequestType | (req->bRequest << 8), value, index, len); 3940 3941 #define C(x,y) ((x) | ((y) << 8)) 3942 switch (C(req->bRequest, req->bmRequestType)) { 3943 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 3944 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0); 3945 if (len == 0) 3946 break; 3947 switch (value) { 3948 #define sd ((usb_string_descriptor_t *)buf) 3949 case C(2, UDESC_STRING): 3950 /* Product */ 3951 totlen = usb_makestrdesc(sd, len, "xHCI root hub"); 3952 break; 3953 #undef sd 3954 default: 3955 /* default from usbroothub */ 3956 return buflen; 3957 } 3958 break; 3959 3960 /* Hub requests */ 3961 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): 3962 break; 3963 /* Clear Port Feature request */ 3964 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): { 3965 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 3966 3967 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd", 3968 index, value, bn, cp); 3969 if (index < 1 || index > sc->sc_rhportcount[bn]) { 3970 return -1; 3971 } 3972 port = XHCI_PORTSC(cp); 3973 v = xhci_op_read_4(sc, port); 3974 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0); 3975 v &= ~XHCI_PS_CLEAR; 3976 switch (value) { 3977 case UHF_PORT_ENABLE: 3978 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED); 3979 break; 3980 case UHF_PORT_SUSPEND: 3981 return -1; 3982 case UHF_PORT_POWER: 3983 break; 3984 case UHF_PORT_TEST: 3985 case UHF_PORT_INDICATOR: 3986 return -1; 3987 case UHF_C_PORT_CONNECTION: 3988 xhci_op_write_4(sc, port, v | XHCI_PS_CSC); 3989 break; 3990 case UHF_C_PORT_ENABLE: 3991 case UHF_C_PORT_SUSPEND: 3992 case UHF_C_PORT_OVER_CURRENT: 3993 return -1; 3994 case UHF_C_BH_PORT_RESET: 3995 xhci_op_write_4(sc, port, v | XHCI_PS_WRC); 3996 break; 3997 case UHF_C_PORT_RESET: 3998 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 3999 break; 4000 case UHF_C_PORT_LINK_STATE: 4001 xhci_op_write_4(sc, port, v | XHCI_PS_PLC); 4002 break; 4003 case UHF_C_PORT_CONFIG_ERROR: 4004 xhci_op_write_4(sc, port, v | XHCI_PS_CEC); 4005 break; 4006 default: 4007 return -1; 4008 } 4009 break; 4010 } 4011 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): 4012 if (len == 0) 4013 break; 4014 if ((value & 0xff) != 0) { 4015 return -1; 4016 } 4017 usb_hub_descriptor_t hubd; 4018 4019 totlen = uimin(buflen, sizeof(hubd)); 4020 memcpy(&hubd, buf, totlen); 4021 hubd.bNbrPorts = sc->sc_rhportcount[bn]; 4022 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH); 4023 hubd.bPwrOn2PwrGood = 200; 4024 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) { 4025 /* XXX can't find out? */ 4026 hubd.DeviceRemovable[i++] = 0; 4027 } 4028 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i; 4029 totlen = uimin(totlen, hubd.bDescLength); 4030 memcpy(buf, &hubd, totlen); 4031 break; 4032 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): 4033 if (len != 4) { 4034 return -1; 4035 } 4036 memset(buf, 0, len); /* ? XXX */ 4037 totlen = len; 4038 break; 4039 /* Get Port Status request */ 4040 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): { 4041 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4042 4043 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju", 4044 bn, index, cp, 0); 4045 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4046 DPRINTFN(5, "bad get port status: index=%jd bn=%jd " 4047 "portcount=%jd", 4048 index, bn, sc->sc_rhportcount[bn], 0); 4049 return -1; 4050 } 4051 if (len != 4) { 4052 DPRINTFN(5, "bad get port status: len %jd != 4", 4053 len, 0, 0, 0); 4054 return -1; 4055 } 4056 v = xhci_op_read_4(sc, XHCI_PORTSC(cp)); 4057 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0); 4058 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v)); 4059 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS; 4060 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED; 4061 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR; 4062 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND; 4063 if (v & XHCI_PS_PR) i |= UPS_RESET; 4064 if (v & XHCI_PS_PP) { 4065 if (i & UPS_OTHER_SPEED) 4066 i |= UPS_PORT_POWER_SS; 4067 else 4068 i |= UPS_PORT_POWER; 4069 } 4070 if (i & UPS_OTHER_SPEED) 4071 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v)); 4072 if (sc->sc_vendor_port_status) 4073 i = sc->sc_vendor_port_status(sc, v, i); 4074 USETW(ps.wPortStatus, i); 4075 i = 0; 4076 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS; 4077 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED; 4078 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR; 4079 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET; 4080 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET; 4081 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE; 4082 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR; 4083 USETW(ps.wPortChange, i); 4084 totlen = uimin(len, sizeof(ps)); 4085 memcpy(buf, &ps, totlen); 4086 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx" 4087 " totlen %jd", 4088 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0); 4089 break; 4090 } 4091 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): 4092 return -1; 4093 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE): 4094 break; 4095 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): 4096 break; 4097 /* Set Port Feature request */ 4098 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): { 4099 int optval = (index >> 8) & 0xff; 4100 index &= 0xff; 4101 if (index < 1 || index > sc->sc_rhportcount[bn]) { 4102 return -1; 4103 } 4104 4105 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 4106 4107 port = XHCI_PORTSC(cp); 4108 v = xhci_op_read_4(sc, port); 4109 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0); 4110 v &= ~XHCI_PS_CLEAR; 4111 switch (value) { 4112 case UHF_PORT_ENABLE: 4113 xhci_op_write_4(sc, port, v | XHCI_PS_PED); 4114 break; 4115 case UHF_PORT_SUSPEND: 4116 /* XXX suspend */ 4117 break; 4118 case UHF_PORT_RESET: 4119 v &= ~(XHCI_PS_PED | XHCI_PS_PR); 4120 xhci_op_write_4(sc, port, v | XHCI_PS_PR); 4121 /* Wait for reset to complete. */ 4122 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY); 4123 if (sc->sc_dying) { 4124 return -1; 4125 } 4126 v = xhci_op_read_4(sc, port); 4127 if (v & XHCI_PS_PR) { 4128 xhci_op_write_4(sc, port, v & ~XHCI_PS_PR); 4129 usb_delay_ms(&sc->sc_bus, 10); 4130 /* XXX */ 4131 } 4132 break; 4133 case UHF_PORT_POWER: 4134 /* XXX power control */ 4135 break; 4136 /* XXX more */ 4137 case UHF_C_PORT_RESET: 4138 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 4139 break; 4140 case UHF_PORT_U1_TIMEOUT: 4141 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 4142 return -1; 4143 } 4144 port = XHCI_PORTPMSC(cp); 4145 v = xhci_op_read_4(sc, port); 4146 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 4147 index, cp, v, 0); 4148 v &= ~XHCI_PM3_U1TO_SET(0xff); 4149 v |= XHCI_PM3_U1TO_SET(optval); 4150 xhci_op_write_4(sc, port, v); 4151 break; 4152 case UHF_PORT_U2_TIMEOUT: 4153 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 4154 return -1; 4155 } 4156 port = XHCI_PORTPMSC(cp); 4157 v = xhci_op_read_4(sc, port); 4158 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 4159 index, cp, v, 0); 4160 v &= ~XHCI_PM3_U2TO_SET(0xff); 4161 v |= XHCI_PM3_U2TO_SET(optval); 4162 xhci_op_write_4(sc, port, v); 4163 break; 4164 default: 4165 return -1; 4166 } 4167 } 4168 break; 4169 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): 4170 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): 4171 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): 4172 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): 4173 break; 4174 default: 4175 /* default from usbroothub */ 4176 return buflen; 4177 } 4178 4179 return totlen; 4180 } 4181 4182 /* root hub interrupt */ 4183 4184 static usbd_status 4185 xhci_root_intr_transfer(struct usbd_xfer *xfer) 4186 { 4187 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4188 usbd_status err; 4189 4190 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4191 4192 /* Insert last in queue. */ 4193 mutex_enter(&sc->sc_lock); 4194 err = usb_insert_transfer(xfer); 4195 mutex_exit(&sc->sc_lock); 4196 if (err) 4197 return err; 4198 4199 /* Pipe isn't running, start first */ 4200 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4201 } 4202 4203 /* Wait for roothub port status/change */ 4204 static usbd_status 4205 xhci_root_intr_start(struct usbd_xfer *xfer) 4206 { 4207 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4208 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4209 const bool polling = xhci_polling_p(sc); 4210 4211 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4212 4213 if (sc->sc_dying) 4214 return USBD_IOERROR; 4215 4216 if (!polling) 4217 mutex_enter(&sc->sc_lock); 4218 KASSERT(sc->sc_intrxfer[bn] == NULL); 4219 sc->sc_intrxfer[bn] = xfer; 4220 xfer->ux_status = USBD_IN_PROGRESS; 4221 if (!polling) 4222 mutex_exit(&sc->sc_lock); 4223 4224 return USBD_IN_PROGRESS; 4225 } 4226 4227 static void 4228 xhci_root_intr_abort(struct usbd_xfer *xfer) 4229 { 4230 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4231 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4232 4233 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4234 4235 KASSERT(mutex_owned(&sc->sc_lock)); 4236 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 4237 4238 /* If xfer has already completed, nothing to do here. */ 4239 if (sc->sc_intrxfer[bn] == NULL) 4240 return; 4241 4242 /* 4243 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer. 4244 * Cancel it. 4245 */ 4246 KASSERT(sc->sc_intrxfer[bn] == xfer); 4247 xfer->ux_status = USBD_CANCELLED; 4248 usb_transfer_complete(xfer); 4249 } 4250 4251 static void 4252 xhci_root_intr_close(struct usbd_pipe *pipe) 4253 { 4254 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe); 4255 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer; 4256 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4257 4258 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4259 4260 KASSERT(mutex_owned(&sc->sc_lock)); 4261 4262 /* 4263 * Caller must guarantee the xfer has completed first, by 4264 * closing the pipe only after normal completion or an abort. 4265 */ 4266 KASSERT(sc->sc_intrxfer[bn] == NULL); 4267 } 4268 4269 static void 4270 xhci_root_intr_done(struct usbd_xfer *xfer) 4271 { 4272 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4273 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 4274 4275 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4276 4277 KASSERT(mutex_owned(&sc->sc_lock)); 4278 4279 /* Claim the xfer so it doesn't get completed again. */ 4280 KASSERT(sc->sc_intrxfer[bn] == xfer); 4281 KASSERT(xfer->ux_status != USBD_IN_PROGRESS); 4282 sc->sc_intrxfer[bn] = NULL; 4283 } 4284 4285 /* -------------- */ 4286 /* device control */ 4287 4288 static usbd_status 4289 xhci_device_ctrl_transfer(struct usbd_xfer *xfer) 4290 { 4291 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4292 usbd_status err; 4293 4294 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4295 4296 /* Insert last in queue. */ 4297 mutex_enter(&sc->sc_lock); 4298 err = usb_insert_transfer(xfer); 4299 mutex_exit(&sc->sc_lock); 4300 if (err) 4301 return err; 4302 4303 /* Pipe isn't running, start first */ 4304 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4305 } 4306 4307 static usbd_status 4308 xhci_device_ctrl_start(struct usbd_xfer *xfer) 4309 { 4310 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4311 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4312 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4313 struct xhci_ring * const tr = xs->xs_xr[dci]; 4314 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4315 usb_device_request_t * const req = &xfer->ux_request; 4316 const bool isread = usbd_xfer_isread(xfer); 4317 const uint32_t len = UGETW(req->wLength); 4318 usb_dma_t * const dma = &xfer->ux_dmabuf; 4319 uint64_t parameter; 4320 uint32_t status; 4321 uint32_t control; 4322 u_int i; 4323 const bool polling = xhci_polling_p(sc); 4324 4325 XHCIHIST_FUNC(); 4326 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx", 4327 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue), 4328 UGETW(req->wIndex), UGETW(req->wLength)); 4329 4330 /* we rely on the bottom bits for extra info */ 4331 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %zx", 4332 (uintptr_t) xfer); 4333 4334 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0); 4335 4336 i = 0; 4337 4338 /* setup phase */ 4339 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */ 4340 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req)); 4341 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE : 4342 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) | 4343 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) | 4344 XHCI_TRB_3_IDT_BIT; 4345 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4346 4347 if (len != 0) { 4348 /* data phase */ 4349 parameter = DMAADDR(dma, 0); 4350 KASSERTMSG(len <= 0x10000, "len %d", len); 4351 status = XHCI_TRB_2_IRQ_SET(0) | 4352 XHCI_TRB_2_TDSZ_SET(0) | 4353 XHCI_TRB_2_BYTES_SET(len); 4354 control = (isread ? XHCI_TRB_3_DIR_IN : 0) | 4355 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) | 4356 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4357 XHCI_TRB_3_IOC_BIT; 4358 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4359 4360 usb_syncmem(dma, 0, len, 4361 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4362 } 4363 4364 parameter = 0; 4365 status = XHCI_TRB_2_IRQ_SET(0); 4366 /* the status stage has inverted direction */ 4367 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) | 4368 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) | 4369 XHCI_TRB_3_IOC_BIT; 4370 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4371 4372 if (!polling) 4373 mutex_enter(&tr->xr_lock); 4374 xhci_ring_put_xfer(sc, tr, xx, i); 4375 if (!polling) 4376 mutex_exit(&tr->xr_lock); 4377 4378 if (!polling) 4379 mutex_enter(&sc->sc_lock); 4380 xfer->ux_status = USBD_IN_PROGRESS; 4381 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4382 usbd_xfer_schedule_timeout(xfer); 4383 if (!polling) 4384 mutex_exit(&sc->sc_lock); 4385 4386 return USBD_IN_PROGRESS; 4387 } 4388 4389 static void 4390 xhci_device_ctrl_done(struct usbd_xfer *xfer) 4391 { 4392 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4393 usb_device_request_t *req = &xfer->ux_request; 4394 int len = UGETW(req->wLength); 4395 int rd = req->bmRequestType & UT_READ; 4396 4397 if (len) 4398 usb_syncmem(&xfer->ux_dmabuf, 0, len, 4399 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4400 } 4401 4402 static void 4403 xhci_device_ctrl_abort(struct usbd_xfer *xfer) 4404 { 4405 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4406 4407 usbd_xfer_abort(xfer); 4408 } 4409 4410 static void 4411 xhci_device_ctrl_close(struct usbd_pipe *pipe) 4412 { 4413 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4414 4415 xhci_close_pipe(pipe); 4416 } 4417 4418 /* ------------------ */ 4419 /* device isochronous */ 4420 4421 static usbd_status 4422 xhci_device_isoc_transfer(struct usbd_xfer *xfer) 4423 { 4424 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4425 usbd_status err; 4426 4427 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4428 4429 /* Insert last in queue. */ 4430 mutex_enter(&sc->sc_lock); 4431 err = usb_insert_transfer(xfer); 4432 mutex_exit(&sc->sc_lock); 4433 if (err) 4434 return err; 4435 4436 return xhci_device_isoc_enter(xfer); 4437 } 4438 4439 static usbd_status 4440 xhci_device_isoc_enter(struct usbd_xfer *xfer) 4441 { 4442 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4443 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4444 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4445 struct xhci_ring * const tr = xs->xs_xr[dci]; 4446 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4447 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe; 4448 uint32_t len = xfer->ux_length; 4449 usb_dma_t * const dma = &xfer->ux_dmabuf; 4450 uint64_t parameter; 4451 uint32_t status; 4452 uint32_t control; 4453 uint32_t mfindex; 4454 uint32_t offs; 4455 int i, ival; 4456 const bool polling = xhci_polling_p(sc); 4457 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize); 4458 const uint16_t mps = UE_GET_SIZE(MPS); 4459 const uint8_t maxb = xpipe->xp_maxb; 4460 u_int tdpc, tbc, tlbpc; 4461 4462 XHCIHIST_FUNC(); 4463 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4464 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4465 4466 if (sc->sc_dying) 4467 return USBD_IOERROR; 4468 4469 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths); 4470 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4471 4472 const bool isread = usbd_xfer_isread(xfer); 4473 if (xfer->ux_length) 4474 usb_syncmem(dma, 0, xfer->ux_length, 4475 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4476 4477 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval; 4478 if (ival >= 1 && ival <= 16) 4479 ival = 1 << (ival - 1); 4480 else 4481 ival = 1; /* fake something up */ 4482 4483 if (xpipe->xp_isoc_next == -1) { 4484 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX); 4485 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0); 4486 mfindex = XHCI_MFINDEX_GET(mfindex + 1); 4487 mfindex /= USB_UFRAMES_PER_FRAME; 4488 mfindex += 7; /* 7 frames is max possible IST */ 4489 xpipe->xp_isoc_next = roundup2(mfindex, ival); 4490 } 4491 4492 offs = 0; 4493 for (i = 0; i < xfer->ux_nframes; i++) { 4494 len = xfer->ux_frlengths[i]; 4495 4496 tdpc = howmany(len, mps); 4497 tbc = howmany(tdpc, maxb) - 1; 4498 tlbpc = tdpc % maxb; 4499 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1; 4500 4501 KASSERTMSG(len <= 0x10000, "len %d", len); 4502 parameter = DMAADDR(dma, offs); 4503 status = XHCI_TRB_2_IRQ_SET(0) | 4504 XHCI_TRB_2_TDSZ_SET(0) | 4505 XHCI_TRB_2_BYTES_SET(len); 4506 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) | 4507 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4508 XHCI_TRB_3_TBC_SET(tbc) | 4509 XHCI_TRB_3_TLBPC_SET(tlbpc) | 4510 XHCI_TRB_3_IOC_BIT; 4511 if (XHCI_HCC_CFC(sc->sc_hcc)) { 4512 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next); 4513 #if 0 4514 } else if (xpipe->xp_isoc_next == -1) { 4515 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next); 4516 #endif 4517 } else { 4518 control |= XHCI_TRB_3_ISO_SIA_BIT; 4519 } 4520 #if 0 4521 if (i != xfer->ux_nframes - 1) 4522 control |= XHCI_TRB_3_BEI_BIT; 4523 #endif 4524 xhci_xfer_put_trb(xx, i, parameter, status, control); 4525 4526 xpipe->xp_isoc_next += ival; 4527 offs += len; 4528 } 4529 4530 xx->xx_isoc_done = 0; 4531 4532 if (!polling) 4533 mutex_enter(&tr->xr_lock); 4534 xhci_ring_put_xfer(sc, tr, xx, i); 4535 if (!polling) 4536 mutex_exit(&tr->xr_lock); 4537 4538 if (!polling) 4539 mutex_enter(&sc->sc_lock); 4540 xfer->ux_status = USBD_IN_PROGRESS; 4541 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4542 usbd_xfer_schedule_timeout(xfer); 4543 if (!polling) 4544 mutex_exit(&sc->sc_lock); 4545 4546 return USBD_IN_PROGRESS; 4547 } 4548 4549 static void 4550 xhci_device_isoc_abort(struct usbd_xfer *xfer) 4551 { 4552 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4553 4554 usbd_xfer_abort(xfer); 4555 } 4556 4557 static void 4558 xhci_device_isoc_close(struct usbd_pipe *pipe) 4559 { 4560 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4561 4562 xhci_close_pipe(pipe); 4563 } 4564 4565 static void 4566 xhci_device_isoc_done(struct usbd_xfer *xfer) 4567 { 4568 #ifdef USB_DEBUG 4569 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4570 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4571 #endif 4572 const bool isread = usbd_xfer_isread(xfer); 4573 4574 XHCIHIST_FUNC(); 4575 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4576 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4577 4578 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4579 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4580 } 4581 4582 /* ----------- */ 4583 /* device bulk */ 4584 4585 static usbd_status 4586 xhci_device_bulk_transfer(struct usbd_xfer *xfer) 4587 { 4588 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4589 usbd_status err; 4590 4591 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4592 4593 /* Insert last in queue. */ 4594 mutex_enter(&sc->sc_lock); 4595 err = usb_insert_transfer(xfer); 4596 mutex_exit(&sc->sc_lock); 4597 if (err) 4598 return err; 4599 4600 /* 4601 * Pipe isn't running (otherwise err would be USBD_INPROG), 4602 * so start it first. 4603 */ 4604 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4605 } 4606 4607 static usbd_status 4608 xhci_device_bulk_start(struct usbd_xfer *xfer) 4609 { 4610 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4611 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4612 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4613 struct xhci_ring * const tr = xs->xs_xr[dci]; 4614 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4615 const uint32_t len = xfer->ux_length; 4616 usb_dma_t * const dma = &xfer->ux_dmabuf; 4617 uint64_t parameter; 4618 uint32_t status; 4619 uint32_t control; 4620 u_int i = 0; 4621 const bool polling = xhci_polling_p(sc); 4622 4623 XHCIHIST_FUNC(); 4624 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4625 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4626 4627 if (sc->sc_dying) 4628 return USBD_IOERROR; 4629 4630 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4631 4632 parameter = DMAADDR(dma, 0); 4633 const bool isread = usbd_xfer_isread(xfer); 4634 if (len) 4635 usb_syncmem(dma, 0, len, 4636 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4637 4638 /* 4639 * XXX: (dsl) The physical buffer must not cross a 64k boundary. 4640 * If the user supplied buffer crosses such a boundary then 2 4641 * (or more) TRB should be used. 4642 * If multiple TRB are used the td_size field must be set correctly. 4643 * For v1.0 devices (like ivy bridge) this is the number of usb data 4644 * blocks needed to complete the transfer. 4645 * Setting it to 1 in the last TRB causes an extra zero-length 4646 * data block be sent. 4647 * The earlier documentation differs, I don't know how it behaves. 4648 */ 4649 KASSERTMSG(len <= 0x10000, "len %d", len); 4650 status = XHCI_TRB_2_IRQ_SET(0) | 4651 XHCI_TRB_2_TDSZ_SET(0) | 4652 XHCI_TRB_2_BYTES_SET(len); 4653 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4654 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4655 XHCI_TRB_3_IOC_BIT; 4656 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4657 4658 if (!polling) 4659 mutex_enter(&tr->xr_lock); 4660 xhci_ring_put_xfer(sc, tr, xx, i); 4661 if (!polling) 4662 mutex_exit(&tr->xr_lock); 4663 4664 if (!polling) 4665 mutex_enter(&sc->sc_lock); 4666 xfer->ux_status = USBD_IN_PROGRESS; 4667 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4668 usbd_xfer_schedule_timeout(xfer); 4669 if (!polling) 4670 mutex_exit(&sc->sc_lock); 4671 4672 return USBD_IN_PROGRESS; 4673 } 4674 4675 static void 4676 xhci_device_bulk_done(struct usbd_xfer *xfer) 4677 { 4678 #ifdef USB_DEBUG 4679 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4680 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4681 #endif 4682 const bool isread = usbd_xfer_isread(xfer); 4683 4684 XHCIHIST_FUNC(); 4685 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4686 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4687 4688 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4689 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4690 } 4691 4692 static void 4693 xhci_device_bulk_abort(struct usbd_xfer *xfer) 4694 { 4695 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4696 4697 usbd_xfer_abort(xfer); 4698 } 4699 4700 static void 4701 xhci_device_bulk_close(struct usbd_pipe *pipe) 4702 { 4703 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4704 4705 xhci_close_pipe(pipe); 4706 } 4707 4708 /* ---------------- */ 4709 /* device interrupt */ 4710 4711 static usbd_status 4712 xhci_device_intr_transfer(struct usbd_xfer *xfer) 4713 { 4714 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4715 usbd_status err; 4716 4717 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4718 4719 /* Insert last in queue. */ 4720 mutex_enter(&sc->sc_lock); 4721 err = usb_insert_transfer(xfer); 4722 mutex_exit(&sc->sc_lock); 4723 if (err) 4724 return err; 4725 4726 /* 4727 * Pipe isn't running (otherwise err would be USBD_INPROG), 4728 * so start it first. 4729 */ 4730 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4731 } 4732 4733 static usbd_status 4734 xhci_device_intr_start(struct usbd_xfer *xfer) 4735 { 4736 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4737 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4738 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4739 struct xhci_ring * const tr = xs->xs_xr[dci]; 4740 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4741 const uint32_t len = xfer->ux_length; 4742 const bool polling = xhci_polling_p(sc); 4743 usb_dma_t * const dma = &xfer->ux_dmabuf; 4744 uint64_t parameter; 4745 uint32_t status; 4746 uint32_t control; 4747 u_int i = 0; 4748 4749 XHCIHIST_FUNC(); 4750 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4751 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4752 4753 if (sc->sc_dying) 4754 return USBD_IOERROR; 4755 4756 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4757 4758 const bool isread = usbd_xfer_isread(xfer); 4759 if (len) 4760 usb_syncmem(dma, 0, len, 4761 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4762 4763 parameter = DMAADDR(dma, 0); 4764 KASSERTMSG(len <= 0x10000, "len %d", len); 4765 status = XHCI_TRB_2_IRQ_SET(0) | 4766 XHCI_TRB_2_TDSZ_SET(0) | 4767 XHCI_TRB_2_BYTES_SET(len); 4768 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4769 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT; 4770 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4771 4772 if (!polling) 4773 mutex_enter(&tr->xr_lock); 4774 xhci_ring_put_xfer(sc, tr, xx, i); 4775 if (!polling) 4776 mutex_exit(&tr->xr_lock); 4777 4778 if (!polling) 4779 mutex_enter(&sc->sc_lock); 4780 xfer->ux_status = USBD_IN_PROGRESS; 4781 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4782 usbd_xfer_schedule_timeout(xfer); 4783 if (!polling) 4784 mutex_exit(&sc->sc_lock); 4785 4786 return USBD_IN_PROGRESS; 4787 } 4788 4789 static void 4790 xhci_device_intr_done(struct usbd_xfer *xfer) 4791 { 4792 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4793 #ifdef USB_DEBUG 4794 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4795 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4796 #endif 4797 const bool isread = usbd_xfer_isread(xfer); 4798 4799 XHCIHIST_FUNC(); 4800 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4801 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4802 4803 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 4804 4805 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4806 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4807 } 4808 4809 static void 4810 xhci_device_intr_abort(struct usbd_xfer *xfer) 4811 { 4812 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4813 4814 XHCIHIST_FUNC(); 4815 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0); 4816 4817 KASSERT(mutex_owned(&sc->sc_lock)); 4818 usbd_xfer_abort(xfer); 4819 } 4820 4821 static void 4822 xhci_device_intr_close(struct usbd_pipe *pipe) 4823 { 4824 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 4825 4826 XHCIHIST_FUNC(); 4827 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0); 4828 4829 xhci_close_pipe(pipe); 4830 } 4831