1 /* $NetBSD: xhci.c,v 1.138 2021/01/05 18:00:21 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 2013 Jonathan A. Kollasch 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * USB rev 2.0 and rev 3.1 specification 31 * http://www.usb.org/developers/docs/ 32 * xHCI rev 1.1 specification 33 * http://www.intel.com/technology/usb/spec.htm 34 */ 35 36 #include <sys/cdefs.h> 37 __KERNEL_RCSID(0, "$NetBSD: xhci.c,v 1.138 2021/01/05 18:00:21 skrll Exp $"); 38 39 #ifdef _KERNEL_OPT 40 #include "opt_usb.h" 41 #endif 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/kmem.h> 47 #include <sys/device.h> 48 #include <sys/select.h> 49 #include <sys/proc.h> 50 #include <sys/queue.h> 51 #include <sys/mutex.h> 52 #include <sys/condvar.h> 53 #include <sys/bus.h> 54 #include <sys/cpu.h> 55 #include <sys/sysctl.h> 56 57 #include <machine/endian.h> 58 59 #include <dev/usb/usb.h> 60 #include <dev/usb/usbdi.h> 61 #include <dev/usb/usbdivar.h> 62 #include <dev/usb/usbdi_util.h> 63 #include <dev/usb/usbhist.h> 64 #include <dev/usb/usb_mem.h> 65 #include <dev/usb/usb_quirks.h> 66 67 #include <dev/usb/xhcireg.h> 68 #include <dev/usb/xhcivar.h> 69 #include <dev/usb/usbroothub.h> 70 71 72 #ifdef USB_DEBUG 73 #ifndef XHCI_DEBUG 74 #define xhcidebug 0 75 #else /* !XHCI_DEBUG */ 76 #define HEXDUMP(a, b, c) \ 77 do { \ 78 if (xhcidebug > 0) \ 79 hexdump(printf, a, b, c); \ 80 } while (/*CONSTCOND*/0) 81 static int xhcidebug = 0; 82 83 SYSCTL_SETUP(sysctl_hw_xhci_setup, "sysctl hw.xhci setup") 84 { 85 int err; 86 const struct sysctlnode *rnode; 87 const struct sysctlnode *cnode; 88 89 err = sysctl_createv(clog, 0, NULL, &rnode, 90 CTLFLAG_PERMANENT, CTLTYPE_NODE, "xhci", 91 SYSCTL_DESCR("xhci global controls"), 92 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 93 94 if (err) 95 goto fail; 96 97 /* control debugging printfs */ 98 err = sysctl_createv(clog, 0, &rnode, &cnode, 99 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 100 "debug", SYSCTL_DESCR("Enable debugging output"), 101 NULL, 0, &xhcidebug, sizeof(xhcidebug), CTL_CREATE, CTL_EOL); 102 if (err) 103 goto fail; 104 105 return; 106 fail: 107 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err); 108 } 109 110 #endif /* !XHCI_DEBUG */ 111 #endif /* USB_DEBUG */ 112 113 #ifndef HEXDUMP 114 #define HEXDUMP(a, b, c) 115 #endif 116 117 #define DPRINTF(FMT,A,B,C,D) USBHIST_LOG(xhcidebug,FMT,A,B,C,D) 118 #define DPRINTFN(N,FMT,A,B,C,D) USBHIST_LOGN(xhcidebug,N,FMT,A,B,C,D) 119 #define XHCIHIST_FUNC() USBHIST_FUNC() 120 #define XHCIHIST_CALLED(name) USBHIST_CALLED(xhcidebug) 121 #define XHCIHIST_CALLARGS(FMT,A,B,C,D) \ 122 USBHIST_CALLARGS(xhcidebug,FMT,A,B,C,D) 123 124 #define XHCI_DCI_SLOT 0 125 #define XHCI_DCI_EP_CONTROL 1 126 127 #define XHCI_ICI_INPUT_CONTROL 0 128 129 struct xhci_pipe { 130 struct usbd_pipe xp_pipe; 131 struct usb_task xp_async_task; 132 int16_t xp_isoc_next; /* next frame */ 133 uint8_t xp_maxb; /* max burst */ 134 uint8_t xp_mult; 135 }; 136 137 #define XHCI_COMMAND_RING_TRBS 256 138 #define XHCI_EVENT_RING_TRBS 256 139 #define XHCI_EVENT_RING_SEGMENTS 1 140 #define XHCI_TRB_3_ED_BIT XHCI_TRB_3_ISP_BIT 141 142 static usbd_status xhci_open(struct usbd_pipe *); 143 static void xhci_close_pipe(struct usbd_pipe *); 144 static int xhci_intr1(struct xhci_softc * const); 145 static void xhci_softintr(void *); 146 static void xhci_poll(struct usbd_bus *); 147 static struct usbd_xfer *xhci_allocx(struct usbd_bus *, unsigned int); 148 static void xhci_freex(struct usbd_bus *, struct usbd_xfer *); 149 static void xhci_abortx(struct usbd_xfer *); 150 static bool xhci_dying(struct usbd_bus *); 151 static void xhci_get_lock(struct usbd_bus *, kmutex_t **); 152 static usbd_status xhci_new_device(device_t, struct usbd_bus *, int, int, int, 153 struct usbd_port *); 154 static int xhci_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, 155 void *, int); 156 157 static usbd_status xhci_configure_endpoint(struct usbd_pipe *); 158 //static usbd_status xhci_unconfigure_endpoint(struct usbd_pipe *); 159 static usbd_status xhci_reset_endpoint(struct usbd_pipe *); 160 static usbd_status xhci_stop_endpoint(struct usbd_pipe *); 161 162 static void xhci_host_dequeue(struct xhci_ring * const); 163 static usbd_status xhci_set_dequeue(struct usbd_pipe *); 164 165 static usbd_status xhci_do_command(struct xhci_softc * const, 166 struct xhci_soft_trb * const, int); 167 static usbd_status xhci_do_command_locked(struct xhci_softc * const, 168 struct xhci_soft_trb * const, int); 169 static usbd_status xhci_init_slot(struct usbd_device *, uint32_t); 170 static void xhci_free_slot(struct xhci_softc *, struct xhci_slot *); 171 static usbd_status xhci_set_address(struct usbd_device *, uint32_t, bool); 172 static usbd_status xhci_enable_slot(struct xhci_softc * const, 173 uint8_t * const); 174 static usbd_status xhci_disable_slot(struct xhci_softc * const, uint8_t); 175 static usbd_status xhci_address_device(struct xhci_softc * const, 176 uint64_t, uint8_t, bool); 177 static void xhci_set_dcba(struct xhci_softc * const, uint64_t, int); 178 static usbd_status xhci_update_ep0_mps(struct xhci_softc * const, 179 struct xhci_slot * const, u_int); 180 static usbd_status xhci_ring_init(struct xhci_softc * const, 181 struct xhci_ring **, size_t, size_t); 182 static void xhci_ring_free(struct xhci_softc * const, 183 struct xhci_ring ** const); 184 185 static void xhci_setup_ctx(struct usbd_pipe *); 186 static void xhci_setup_route(struct usbd_pipe *, uint32_t *); 187 static void xhci_setup_tthub(struct usbd_pipe *, uint32_t *); 188 static void xhci_setup_maxburst(struct usbd_pipe *, uint32_t *); 189 static uint32_t xhci_bival2ival(uint32_t, uint32_t); 190 191 static void xhci_noop(struct usbd_pipe *); 192 193 static usbd_status xhci_root_intr_transfer(struct usbd_xfer *); 194 static usbd_status xhci_root_intr_start(struct usbd_xfer *); 195 static void xhci_root_intr_abort(struct usbd_xfer *); 196 static void xhci_root_intr_close(struct usbd_pipe *); 197 static void xhci_root_intr_done(struct usbd_xfer *); 198 199 static usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *); 200 static usbd_status xhci_device_ctrl_start(struct usbd_xfer *); 201 static void xhci_device_ctrl_abort(struct usbd_xfer *); 202 static void xhci_device_ctrl_close(struct usbd_pipe *); 203 static void xhci_device_ctrl_done(struct usbd_xfer *); 204 205 static usbd_status xhci_device_isoc_transfer(struct usbd_xfer *); 206 static usbd_status xhci_device_isoc_enter(struct usbd_xfer *); 207 static void xhci_device_isoc_abort(struct usbd_xfer *); 208 static void xhci_device_isoc_close(struct usbd_pipe *); 209 static void xhci_device_isoc_done(struct usbd_xfer *); 210 211 static usbd_status xhci_device_intr_transfer(struct usbd_xfer *); 212 static usbd_status xhci_device_intr_start(struct usbd_xfer *); 213 static void xhci_device_intr_abort(struct usbd_xfer *); 214 static void xhci_device_intr_close(struct usbd_pipe *); 215 static void xhci_device_intr_done(struct usbd_xfer *); 216 217 static usbd_status xhci_device_bulk_transfer(struct usbd_xfer *); 218 static usbd_status xhci_device_bulk_start(struct usbd_xfer *); 219 static void xhci_device_bulk_abort(struct usbd_xfer *); 220 static void xhci_device_bulk_close(struct usbd_pipe *); 221 static void xhci_device_bulk_done(struct usbd_xfer *); 222 223 static const struct usbd_bus_methods xhci_bus_methods = { 224 .ubm_open = xhci_open, 225 .ubm_softint = xhci_softintr, 226 .ubm_dopoll = xhci_poll, 227 .ubm_allocx = xhci_allocx, 228 .ubm_freex = xhci_freex, 229 .ubm_abortx = xhci_abortx, 230 .ubm_dying = xhci_dying, 231 .ubm_getlock = xhci_get_lock, 232 .ubm_newdev = xhci_new_device, 233 .ubm_rhctrl = xhci_roothub_ctrl, 234 }; 235 236 static const struct usbd_pipe_methods xhci_root_intr_methods = { 237 .upm_transfer = xhci_root_intr_transfer, 238 .upm_start = xhci_root_intr_start, 239 .upm_abort = xhci_root_intr_abort, 240 .upm_close = xhci_root_intr_close, 241 .upm_cleartoggle = xhci_noop, 242 .upm_done = xhci_root_intr_done, 243 }; 244 245 246 static const struct usbd_pipe_methods xhci_device_ctrl_methods = { 247 .upm_transfer = xhci_device_ctrl_transfer, 248 .upm_start = xhci_device_ctrl_start, 249 .upm_abort = xhci_device_ctrl_abort, 250 .upm_close = xhci_device_ctrl_close, 251 .upm_cleartoggle = xhci_noop, 252 .upm_done = xhci_device_ctrl_done, 253 }; 254 255 static const struct usbd_pipe_methods xhci_device_isoc_methods = { 256 .upm_transfer = xhci_device_isoc_transfer, 257 .upm_abort = xhci_device_isoc_abort, 258 .upm_close = xhci_device_isoc_close, 259 .upm_cleartoggle = xhci_noop, 260 .upm_done = xhci_device_isoc_done, 261 }; 262 263 static const struct usbd_pipe_methods xhci_device_bulk_methods = { 264 .upm_transfer = xhci_device_bulk_transfer, 265 .upm_start = xhci_device_bulk_start, 266 .upm_abort = xhci_device_bulk_abort, 267 .upm_close = xhci_device_bulk_close, 268 .upm_cleartoggle = xhci_noop, 269 .upm_done = xhci_device_bulk_done, 270 }; 271 272 static const struct usbd_pipe_methods xhci_device_intr_methods = { 273 .upm_transfer = xhci_device_intr_transfer, 274 .upm_start = xhci_device_intr_start, 275 .upm_abort = xhci_device_intr_abort, 276 .upm_close = xhci_device_intr_close, 277 .upm_cleartoggle = xhci_noop, 278 .upm_done = xhci_device_intr_done, 279 }; 280 281 static inline uint32_t 282 xhci_read_1(const struct xhci_softc * const sc, bus_size_t offset) 283 { 284 return bus_space_read_1(sc->sc_iot, sc->sc_ioh, offset); 285 } 286 287 static inline uint32_t 288 xhci_read_2(const struct xhci_softc * const sc, bus_size_t offset) 289 { 290 return bus_space_read_2(sc->sc_iot, sc->sc_ioh, offset); 291 } 292 293 static inline uint32_t 294 xhci_read_4(const struct xhci_softc * const sc, bus_size_t offset) 295 { 296 return bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset); 297 } 298 299 static inline void 300 xhci_write_1(const struct xhci_softc * const sc, bus_size_t offset, 301 uint32_t value) 302 { 303 bus_space_write_1(sc->sc_iot, sc->sc_ioh, offset, value); 304 } 305 306 #if 0 /* unused */ 307 static inline void 308 xhci_write_4(const struct xhci_softc * const sc, bus_size_t offset, 309 uint32_t value) 310 { 311 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value); 312 } 313 #endif /* unused */ 314 315 static inline void 316 xhci_barrier(const struct xhci_softc * const sc, int flags) 317 { 318 bus_space_barrier(sc->sc_iot, sc->sc_ioh, 0, sc->sc_ios, flags); 319 } 320 321 static inline uint32_t 322 xhci_cap_read_4(const struct xhci_softc * const sc, bus_size_t offset) 323 { 324 return bus_space_read_4(sc->sc_iot, sc->sc_cbh, offset); 325 } 326 327 static inline uint32_t 328 xhci_op_read_4(const struct xhci_softc * const sc, bus_size_t offset) 329 { 330 return bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 331 } 332 333 static inline void 334 xhci_op_write_4(const struct xhci_softc * const sc, bus_size_t offset, 335 uint32_t value) 336 { 337 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value); 338 } 339 340 static inline uint64_t 341 xhci_op_read_8(const struct xhci_softc * const sc, bus_size_t offset) 342 { 343 uint64_t value; 344 345 if (XHCI_HCC_AC64(sc->sc_hcc)) { 346 #ifdef XHCI_USE_BUS_SPACE_8 347 value = bus_space_read_8(sc->sc_iot, sc->sc_obh, offset); 348 #else 349 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 350 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_obh, 351 offset + 4) << 32; 352 #endif 353 } else { 354 value = bus_space_read_4(sc->sc_iot, sc->sc_obh, offset); 355 } 356 357 return value; 358 } 359 360 static inline void 361 xhci_op_write_8(const struct xhci_softc * const sc, bus_size_t offset, 362 uint64_t value) 363 { 364 if (XHCI_HCC_AC64(sc->sc_hcc)) { 365 #ifdef XHCI_USE_BUS_SPACE_8 366 bus_space_write_8(sc->sc_iot, sc->sc_obh, offset, value); 367 #else 368 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 0, 369 (value >> 0) & 0xffffffff); 370 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset + 4, 371 (value >> 32) & 0xffffffff); 372 #endif 373 } else { 374 bus_space_write_4(sc->sc_iot, sc->sc_obh, offset, value); 375 } 376 } 377 378 static inline uint32_t 379 xhci_rt_read_4(const struct xhci_softc * const sc, bus_size_t offset) 380 { 381 return bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 382 } 383 384 static inline void 385 xhci_rt_write_4(const struct xhci_softc * const sc, bus_size_t offset, 386 uint32_t value) 387 { 388 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value); 389 } 390 391 #if 0 /* unused */ 392 static inline uint64_t 393 xhci_rt_read_8(const struct xhci_softc * const sc, bus_size_t offset) 394 { 395 uint64_t value; 396 397 if (XHCI_HCC_AC64(sc->sc_hcc)) { 398 #ifdef XHCI_USE_BUS_SPACE_8 399 value = bus_space_read_8(sc->sc_iot, sc->sc_rbh, offset); 400 #else 401 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 402 value |= (uint64_t)bus_space_read_4(sc->sc_iot, sc->sc_rbh, 403 offset + 4) << 32; 404 #endif 405 } else { 406 value = bus_space_read_4(sc->sc_iot, sc->sc_rbh, offset); 407 } 408 409 return value; 410 } 411 #endif /* unused */ 412 413 static inline void 414 xhci_rt_write_8(const struct xhci_softc * const sc, bus_size_t offset, 415 uint64_t value) 416 { 417 if (XHCI_HCC_AC64(sc->sc_hcc)) { 418 #ifdef XHCI_USE_BUS_SPACE_8 419 bus_space_write_8(sc->sc_iot, sc->sc_rbh, offset, value); 420 #else 421 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 0, 422 (value >> 0) & 0xffffffff); 423 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset + 4, 424 (value >> 32) & 0xffffffff); 425 #endif 426 } else { 427 bus_space_write_4(sc->sc_iot, sc->sc_rbh, offset, value); 428 } 429 } 430 431 #if 0 /* unused */ 432 static inline uint32_t 433 xhci_db_read_4(const struct xhci_softc * const sc, bus_size_t offset) 434 { 435 return bus_space_read_4(sc->sc_iot, sc->sc_dbh, offset); 436 } 437 #endif /* unused */ 438 439 static inline void 440 xhci_db_write_4(const struct xhci_softc * const sc, bus_size_t offset, 441 uint32_t value) 442 { 443 bus_space_write_4(sc->sc_iot, sc->sc_dbh, offset, value); 444 } 445 446 /* --- */ 447 448 static inline uint8_t 449 xhci_ep_get_type(usb_endpoint_descriptor_t * const ed) 450 { 451 u_int eptype = 0; 452 453 switch (UE_GET_XFERTYPE(ed->bmAttributes)) { 454 case UE_CONTROL: 455 eptype = 0x0; 456 break; 457 case UE_ISOCHRONOUS: 458 eptype = 0x1; 459 break; 460 case UE_BULK: 461 eptype = 0x2; 462 break; 463 case UE_INTERRUPT: 464 eptype = 0x3; 465 break; 466 } 467 468 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 469 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 470 return eptype | 0x4; 471 else 472 return eptype; 473 } 474 475 static u_int 476 xhci_ep_get_dci(usb_endpoint_descriptor_t * const ed) 477 { 478 /* xHCI 1.0 section 4.5.1 */ 479 u_int epaddr = UE_GET_ADDR(ed->bEndpointAddress); 480 u_int in = 0; 481 482 if ((UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) || 483 (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)) 484 in = 1; 485 486 return epaddr * 2 + in; 487 } 488 489 static inline u_int 490 xhci_dci_to_ici(const u_int i) 491 { 492 return i + 1; 493 } 494 495 static inline void * 496 xhci_slot_get_dcv(struct xhci_softc * const sc, struct xhci_slot * const xs, 497 const u_int dci) 498 { 499 return KERNADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 500 } 501 502 #if 0 /* unused */ 503 static inline bus_addr_t 504 xhci_slot_get_dcp(struct xhci_softc * const sc, struct xhci_slot * const xs, 505 const u_int dci) 506 { 507 return DMAADDR(&xs->xs_dc_dma, sc->sc_ctxsz * dci); 508 } 509 #endif /* unused */ 510 511 static inline void * 512 xhci_slot_get_icv(struct xhci_softc * const sc, struct xhci_slot * const xs, 513 const u_int ici) 514 { 515 return KERNADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 516 } 517 518 static inline bus_addr_t 519 xhci_slot_get_icp(struct xhci_softc * const sc, struct xhci_slot * const xs, 520 const u_int ici) 521 { 522 return DMAADDR(&xs->xs_ic_dma, sc->sc_ctxsz * ici); 523 } 524 525 static inline struct xhci_trb * 526 xhci_ring_trbv(struct xhci_ring * const xr, u_int idx) 527 { 528 return KERNADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 529 } 530 531 static inline bus_addr_t 532 xhci_ring_trbp(struct xhci_ring * const xr, u_int idx) 533 { 534 return DMAADDR(&xr->xr_dma, XHCI_TRB_SIZE * idx); 535 } 536 537 static inline void 538 xhci_xfer_put_trb(struct xhci_xfer * const xx, u_int idx, 539 uint64_t parameter, uint32_t status, uint32_t control) 540 { 541 KASSERTMSG(idx < xx->xx_ntrb, "idx=%u xx_ntrb=%u", idx, xx->xx_ntrb); 542 xx->xx_trb[idx].trb_0 = parameter; 543 xx->xx_trb[idx].trb_2 = status; 544 xx->xx_trb[idx].trb_3 = control; 545 } 546 547 static inline void 548 xhci_trb_put(struct xhci_trb * const trb, uint64_t parameter, uint32_t status, 549 uint32_t control) 550 { 551 trb->trb_0 = htole64(parameter); 552 trb->trb_2 = htole32(status); 553 trb->trb_3 = htole32(control); 554 } 555 556 static int 557 xhci_trb_get_idx(struct xhci_ring *xr, uint64_t trb_0, int *idx) 558 { 559 /* base address of TRBs */ 560 bus_addr_t trbp = xhci_ring_trbp(xr, 0); 561 562 /* trb_0 range sanity check */ 563 if (trb_0 == 0 || trb_0 < trbp || 564 (trb_0 - trbp) % sizeof(struct xhci_trb) != 0 || 565 (trb_0 - trbp) / sizeof(struct xhci_trb) >= xr->xr_ntrb) { 566 return 1; 567 } 568 *idx = (trb_0 - trbp) / sizeof(struct xhci_trb); 569 return 0; 570 } 571 572 static unsigned int 573 xhci_get_epstate(struct xhci_softc * const sc, struct xhci_slot * const xs, 574 u_int dci) 575 { 576 uint32_t *cp; 577 578 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 579 cp = xhci_slot_get_dcv(sc, xs, dci); 580 return XHCI_EPCTX_0_EPSTATE_GET(le32toh(cp[0])); 581 } 582 583 static inline unsigned int 584 xhci_ctlrport2bus(struct xhci_softc * const sc, unsigned int ctlrport) 585 { 586 const unsigned int port = ctlrport - 1; 587 const uint8_t bit = __BIT(port % NBBY); 588 589 return __SHIFTOUT(sc->sc_ctlrportbus[port / NBBY], bit); 590 } 591 592 /* 593 * Return the roothub port for a controller port. Both are 1..n. 594 */ 595 static inline unsigned int 596 xhci_ctlrport2rhport(struct xhci_softc * const sc, unsigned int ctrlport) 597 { 598 599 return sc->sc_ctlrportmap[ctrlport - 1]; 600 } 601 602 /* 603 * Return the controller port for a bus roothub port. Both are 1..n. 604 */ 605 static inline unsigned int 606 xhci_rhport2ctlrport(struct xhci_softc * const sc, unsigned int bn, 607 unsigned int rhport) 608 { 609 610 return sc->sc_rhportmap[bn][rhport - 1]; 611 } 612 613 /* --- */ 614 615 void 616 xhci_childdet(device_t self, device_t child) 617 { 618 struct xhci_softc * const sc = device_private(self); 619 620 KASSERT((sc->sc_child == child) || (sc->sc_child2 == child)); 621 if (child == sc->sc_child2) 622 sc->sc_child2 = NULL; 623 else if (child == sc->sc_child) 624 sc->sc_child = NULL; 625 } 626 627 int 628 xhci_detach(struct xhci_softc *sc, int flags) 629 { 630 int rv = 0; 631 632 if (sc->sc_child2 != NULL) { 633 rv = config_detach(sc->sc_child2, flags); 634 if (rv != 0) 635 return rv; 636 KASSERT(sc->sc_child2 == NULL); 637 } 638 639 if (sc->sc_child != NULL) { 640 rv = config_detach(sc->sc_child, flags); 641 if (rv != 0) 642 return rv; 643 KASSERT(sc->sc_child == NULL); 644 } 645 646 /* XXX unconfigure/free slots */ 647 648 /* verify: */ 649 xhci_rt_write_4(sc, XHCI_IMAN(0), 0); 650 xhci_op_write_4(sc, XHCI_USBCMD, 0); 651 /* do we need to wait for stop? */ 652 653 xhci_op_write_8(sc, XHCI_CRCR, 0); 654 xhci_ring_free(sc, &sc->sc_cr); 655 cv_destroy(&sc->sc_command_cv); 656 cv_destroy(&sc->sc_cmdbusy_cv); 657 658 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), 0); 659 xhci_rt_write_8(sc, XHCI_ERSTBA(0), 0); 660 xhci_rt_write_8(sc, XHCI_ERDP(0), 0 | XHCI_ERDP_BUSY); 661 xhci_ring_free(sc, &sc->sc_er); 662 663 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma); 664 665 xhci_op_write_8(sc, XHCI_DCBAAP, 0); 666 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma); 667 668 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * sc->sc_maxslots); 669 670 kmem_free(sc->sc_ctlrportbus, 671 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY)); 672 kmem_free(sc->sc_ctlrportmap, sc->sc_maxports * sizeof(int)); 673 674 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 675 kmem_free(sc->sc_rhportmap[j], sc->sc_maxports * sizeof(int)); 676 } 677 678 mutex_destroy(&sc->sc_lock); 679 mutex_destroy(&sc->sc_intr_lock); 680 681 pool_cache_destroy(sc->sc_xferpool); 682 683 return rv; 684 } 685 686 int 687 xhci_activate(device_t self, enum devact act) 688 { 689 struct xhci_softc * const sc = device_private(self); 690 691 switch (act) { 692 case DVACT_DEACTIVATE: 693 sc->sc_dying = true; 694 return 0; 695 default: 696 return EOPNOTSUPP; 697 } 698 } 699 700 bool 701 xhci_suspend(device_t dv, const pmf_qual_t *qual) 702 { 703 return false; 704 } 705 706 bool 707 xhci_resume(device_t dv, const pmf_qual_t *qual) 708 { 709 return false; 710 } 711 712 bool 713 xhci_shutdown(device_t self, int flags) 714 { 715 return false; 716 } 717 718 static int 719 xhci_hc_reset(struct xhci_softc * const sc) 720 { 721 uint32_t usbcmd, usbsts; 722 int i; 723 724 /* Check controller not ready */ 725 for (i = 0; i < XHCI_WAIT_CNR; i++) { 726 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 727 if ((usbsts & XHCI_STS_CNR) == 0) 728 break; 729 usb_delay_ms(&sc->sc_bus, 1); 730 } 731 if (i >= XHCI_WAIT_CNR) { 732 aprint_error_dev(sc->sc_dev, "controller not ready timeout\n"); 733 return EIO; 734 } 735 736 /* Halt controller */ 737 usbcmd = 0; 738 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 739 usb_delay_ms(&sc->sc_bus, 1); 740 741 /* Reset controller */ 742 usbcmd = XHCI_CMD_HCRST; 743 xhci_op_write_4(sc, XHCI_USBCMD, usbcmd); 744 for (i = 0; i < XHCI_WAIT_HCRST; i++) { 745 /* 746 * Wait 1ms first. Existing Intel xHCI requies 1ms delay to 747 * prevent system hang (Errata). 748 */ 749 usb_delay_ms(&sc->sc_bus, 1); 750 usbcmd = xhci_op_read_4(sc, XHCI_USBCMD); 751 if ((usbcmd & XHCI_CMD_HCRST) == 0) 752 break; 753 } 754 if (i >= XHCI_WAIT_HCRST) { 755 aprint_error_dev(sc->sc_dev, "host controller reset timeout\n"); 756 return EIO; 757 } 758 759 /* Check controller not ready */ 760 for (i = 0; i < XHCI_WAIT_CNR; i++) { 761 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 762 if ((usbsts & XHCI_STS_CNR) == 0) 763 break; 764 usb_delay_ms(&sc->sc_bus, 1); 765 } 766 if (i >= XHCI_WAIT_CNR) { 767 aprint_error_dev(sc->sc_dev, 768 "controller not ready timeout after reset\n"); 769 return EIO; 770 } 771 772 return 0; 773 } 774 775 776 /* 7.2 xHCI Support Protocol Capability */ 777 static void 778 xhci_id_protocols(struct xhci_softc *sc, bus_size_t ecp) 779 { 780 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 781 782 /* XXX Cache this lot */ 783 784 const uint32_t w0 = xhci_read_4(sc, ecp); 785 const uint32_t w4 = xhci_read_4(sc, ecp + 4); 786 const uint32_t w8 = xhci_read_4(sc, ecp + 8); 787 const uint32_t wc = xhci_read_4(sc, ecp + 0xc); 788 789 aprint_debug_dev(sc->sc_dev, 790 " SP: 0x%08x 0x%08x 0x%08x 0x%08x\n", w0, w4, w8, wc); 791 792 if (w4 != XHCI_XECP_USBID) 793 return; 794 795 const int major = XHCI_XECP_SP_W0_MAJOR(w0); 796 const int minor = XHCI_XECP_SP_W0_MINOR(w0); 797 const uint8_t cpo = XHCI_XECP_SP_W8_CPO(w8); 798 const uint8_t cpc = XHCI_XECP_SP_W8_CPC(w8); 799 800 const uint16_t mm = __SHIFTOUT(w0, __BITS(31, 16)); 801 switch (mm) { 802 case 0x0200: 803 case 0x0300: 804 case 0x0301: 805 case 0x0310: 806 aprint_debug_dev(sc->sc_dev, " %s ports %d - %d\n", 807 major == 3 ? "ss" : "hs", cpo, cpo + cpc -1); 808 break; 809 default: 810 aprint_error_dev(sc->sc_dev, " unknown major/minor (%d/%d)\n", 811 major, minor); 812 return; 813 } 814 815 const size_t bus = (major == 3) ? 0 : 1; 816 817 /* Index arrays with 0..n-1 where ports are numbered 1..n */ 818 for (size_t cp = cpo - 1; cp < cpo + cpc - 1; cp++) { 819 if (sc->sc_ctlrportmap[cp] != 0) { 820 aprint_error_dev(sc->sc_dev, "controller port %zu " 821 "already assigned", cp); 822 continue; 823 } 824 825 sc->sc_ctlrportbus[cp / NBBY] |= 826 bus == 0 ? 0 : __BIT(cp % NBBY); 827 828 const size_t rhp = sc->sc_rhportcount[bus]++; 829 830 KASSERTMSG(sc->sc_rhportmap[bus][rhp] == 0, 831 "bus %zu rhp %zu is %d", bus, rhp, 832 sc->sc_rhportmap[bus][rhp]); 833 834 sc->sc_rhportmap[bus][rhp] = cp + 1; 835 sc->sc_ctlrportmap[cp] = rhp + 1; 836 } 837 } 838 839 /* Process extended capabilities */ 840 static void 841 xhci_ecp(struct xhci_softc *sc) 842 { 843 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 844 845 bus_size_t ecp = XHCI_HCC_XECP(sc->sc_hcc) * 4; 846 while (ecp != 0) { 847 uint32_t ecr = xhci_read_4(sc, ecp); 848 aprint_debug_dev(sc->sc_dev, "ECR: 0x%08x\n", ecr); 849 switch (XHCI_XECP_ID(ecr)) { 850 case XHCI_ID_PROTOCOLS: { 851 xhci_id_protocols(sc, ecp); 852 break; 853 } 854 case XHCI_ID_USB_LEGACY: { 855 uint8_t bios_sem; 856 857 /* Take host controller ownership from BIOS */ 858 bios_sem = xhci_read_1(sc, ecp + XHCI_XECP_BIOS_SEM); 859 if (bios_sem) { 860 /* sets xHCI to be owned by OS */ 861 xhci_write_1(sc, ecp + XHCI_XECP_OS_SEM, 1); 862 aprint_debug_dev(sc->sc_dev, 863 "waiting for BIOS to give up control\n"); 864 for (int i = 0; i < 5000; i++) { 865 bios_sem = xhci_read_1(sc, ecp + 866 XHCI_XECP_BIOS_SEM); 867 if (bios_sem == 0) 868 break; 869 DELAY(1000); 870 } 871 if (bios_sem) { 872 aprint_error_dev(sc->sc_dev, 873 "timed out waiting for BIOS\n"); 874 } 875 } 876 break; 877 } 878 default: 879 break; 880 } 881 ecr = xhci_read_4(sc, ecp); 882 if (XHCI_XECP_NEXT(ecr) == 0) { 883 ecp = 0; 884 } else { 885 ecp += XHCI_XECP_NEXT(ecr) * 4; 886 } 887 } 888 } 889 890 #define XHCI_HCCPREV1_BITS \ 891 "\177\020" /* New bitmask */ \ 892 "f\020\020XECP\0" \ 893 "f\014\4MAXPSA\0" \ 894 "b\013CFC\0" \ 895 "b\012SEC\0" \ 896 "b\011SBD\0" \ 897 "b\010FSE\0" \ 898 "b\7NSS\0" \ 899 "b\6LTC\0" \ 900 "b\5LHRC\0" \ 901 "b\4PIND\0" \ 902 "b\3PPC\0" \ 903 "b\2CZC\0" \ 904 "b\1BNC\0" \ 905 "b\0AC64\0" \ 906 "\0" 907 #define XHCI_HCCV1_x_BITS \ 908 "\177\020" /* New bitmask */ \ 909 "f\020\020XECP\0" \ 910 "f\014\4MAXPSA\0" \ 911 "b\013CFC\0" \ 912 "b\012SEC\0" \ 913 "b\011SPC\0" \ 914 "b\010PAE\0" \ 915 "b\7NSS\0" \ 916 "b\6LTC\0" \ 917 "b\5LHRC\0" \ 918 "b\4PIND\0" \ 919 "b\3PPC\0" \ 920 "b\2CSZ\0" \ 921 "b\1BNC\0" \ 922 "b\0AC64\0" \ 923 "\0" 924 925 #define XHCI_HCC2_BITS \ 926 "\177\020" /* New bitmask */ \ 927 "b\7ETC_TSC\0" \ 928 "b\6ETC\0" \ 929 "b\5CIC\0" \ 930 "b\4LEC\0" \ 931 "b\3CTC\0" \ 932 "b\2FSC\0" \ 933 "b\1CMC\0" \ 934 "b\0U3C\0" \ 935 "\0" 936 937 void 938 xhci_start(struct xhci_softc *sc) 939 { 940 xhci_rt_write_4(sc, XHCI_IMAN(0), XHCI_IMAN_INTR_ENA); 941 if ((sc->sc_quirks & XHCI_QUIRK_INTEL) != 0) 942 /* Intel xhci needs interrupt rate moderated. */ 943 xhci_rt_write_4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT_LP); 944 else 945 xhci_rt_write_4(sc, XHCI_IMOD(0), 0); 946 aprint_debug_dev(sc->sc_dev, "current IMOD %u\n", 947 xhci_rt_read_4(sc, XHCI_IMOD(0))); 948 949 /* Go! */ 950 xhci_op_write_4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS); 951 aprint_debug_dev(sc->sc_dev, "USBCMD 0x%08"PRIx32"\n", 952 xhci_op_read_4(sc, XHCI_USBCMD)); 953 } 954 955 int 956 xhci_init(struct xhci_softc *sc) 957 { 958 bus_size_t bsz; 959 uint32_t hcs1, hcs2, hcs3, dboff, rtsoff; 960 uint32_t pagesize, config; 961 int i = 0; 962 uint16_t hciversion; 963 uint8_t caplength; 964 965 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 966 967 /* Set up the bus struct for the usb 3 and usb 2 buses */ 968 sc->sc_bus.ub_methods = &xhci_bus_methods; 969 sc->sc_bus.ub_pipesize = sizeof(struct xhci_pipe); 970 sc->sc_bus.ub_usedma = true; 971 sc->sc_bus.ub_hcpriv = sc; 972 973 sc->sc_bus2.ub_methods = &xhci_bus_methods; 974 sc->sc_bus2.ub_pipesize = sizeof(struct xhci_pipe); 975 sc->sc_bus2.ub_revision = USBREV_2_0; 976 sc->sc_bus2.ub_usedma = true; 977 sc->sc_bus2.ub_hcpriv = sc; 978 sc->sc_bus2.ub_dmatag = sc->sc_bus.ub_dmatag; 979 980 caplength = xhci_read_1(sc, XHCI_CAPLENGTH); 981 hciversion = xhci_read_2(sc, XHCI_HCIVERSION); 982 983 if (hciversion < XHCI_HCIVERSION_0_96 || 984 hciversion >= 0x0200) { 985 aprint_normal_dev(sc->sc_dev, 986 "xHCI version %x.%x not known to be supported\n", 987 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 988 } else { 989 aprint_verbose_dev(sc->sc_dev, "xHCI version %x.%x\n", 990 (hciversion >> 8) & 0xff, (hciversion >> 0) & 0xff); 991 } 992 993 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, 0, caplength, 994 &sc->sc_cbh) != 0) { 995 aprint_error_dev(sc->sc_dev, "capability subregion failure\n"); 996 return ENOMEM; 997 } 998 999 hcs1 = xhci_cap_read_4(sc, XHCI_HCSPARAMS1); 1000 sc->sc_maxslots = XHCI_HCS1_MAXSLOTS(hcs1); 1001 sc->sc_maxintrs = XHCI_HCS1_MAXINTRS(hcs1); 1002 sc->sc_maxports = XHCI_HCS1_MAXPORTS(hcs1); 1003 hcs2 = xhci_cap_read_4(sc, XHCI_HCSPARAMS2); 1004 hcs3 = xhci_cap_read_4(sc, XHCI_HCSPARAMS3); 1005 aprint_debug_dev(sc->sc_dev, 1006 "hcs1=%"PRIx32" hcs2=%"PRIx32" hcs3=%"PRIx32"\n", hcs1, hcs2, hcs3); 1007 1008 sc->sc_hcc = xhci_cap_read_4(sc, XHCI_HCCPARAMS); 1009 sc->sc_ctxsz = XHCI_HCC_CSZ(sc->sc_hcc) ? 64 : 32; 1010 1011 char sbuf[128]; 1012 if (hciversion < XHCI_HCIVERSION_1_0) 1013 snprintb(sbuf, sizeof(sbuf), XHCI_HCCPREV1_BITS, sc->sc_hcc); 1014 else 1015 snprintb(sbuf, sizeof(sbuf), XHCI_HCCV1_x_BITS, sc->sc_hcc); 1016 aprint_debug_dev(sc->sc_dev, "hcc=%s\n", sbuf); 1017 aprint_debug_dev(sc->sc_dev, "xECP %" __PRIxBITS "\n", 1018 XHCI_HCC_XECP(sc->sc_hcc) * 4); 1019 if (hciversion >= XHCI_HCIVERSION_1_1) { 1020 sc->sc_hcc2 = xhci_cap_read_4(sc, XHCI_HCCPARAMS2); 1021 snprintb(sbuf, sizeof(sbuf), XHCI_HCC2_BITS, sc->sc_hcc2); 1022 aprint_debug_dev(sc->sc_dev, "hcc2=%s\n", sbuf); 1023 } 1024 1025 /* default all ports to bus 0, i.e. usb 3 */ 1026 sc->sc_ctlrportbus = kmem_zalloc( 1027 howmany(sc->sc_maxports * sizeof(uint8_t), NBBY), KM_SLEEP); 1028 sc->sc_ctlrportmap = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1029 1030 /* controller port to bus roothub port map */ 1031 for (size_t j = 0; j < __arraycount(sc->sc_rhportmap); j++) { 1032 sc->sc_rhportmap[j] = kmem_zalloc(sc->sc_maxports * sizeof(int), KM_SLEEP); 1033 } 1034 1035 /* 1036 * Process all Extended Capabilities 1037 */ 1038 xhci_ecp(sc); 1039 1040 bsz = XHCI_PORTSC(sc->sc_maxports); 1041 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, caplength, bsz, 1042 &sc->sc_obh) != 0) { 1043 aprint_error_dev(sc->sc_dev, "operational subregion failure\n"); 1044 return ENOMEM; 1045 } 1046 1047 dboff = xhci_cap_read_4(sc, XHCI_DBOFF); 1048 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, dboff, 1049 sc->sc_maxslots * 4, &sc->sc_dbh) != 0) { 1050 aprint_error_dev(sc->sc_dev, "doorbell subregion failure\n"); 1051 return ENOMEM; 1052 } 1053 1054 rtsoff = xhci_cap_read_4(sc, XHCI_RTSOFF); 1055 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, rtsoff, 1056 sc->sc_maxintrs * 0x20, &sc->sc_rbh) != 0) { 1057 aprint_error_dev(sc->sc_dev, "runtime subregion failure\n"); 1058 return ENOMEM; 1059 } 1060 1061 int rv; 1062 rv = xhci_hc_reset(sc); 1063 if (rv != 0) { 1064 return rv; 1065 } 1066 1067 if (sc->sc_vendor_init) 1068 sc->sc_vendor_init(sc); 1069 1070 pagesize = xhci_op_read_4(sc, XHCI_PAGESIZE); 1071 aprint_debug_dev(sc->sc_dev, "PAGESIZE 0x%08x\n", pagesize); 1072 pagesize = ffs(pagesize); 1073 if (pagesize == 0) { 1074 aprint_error_dev(sc->sc_dev, "pagesize is 0\n"); 1075 return EIO; 1076 } 1077 sc->sc_pgsz = 1 << (12 + (pagesize - 1)); 1078 aprint_debug_dev(sc->sc_dev, "sc_pgsz 0x%08x\n", (uint32_t)sc->sc_pgsz); 1079 aprint_debug_dev(sc->sc_dev, "sc_maxslots 0x%08x\n", 1080 (uint32_t)sc->sc_maxslots); 1081 aprint_debug_dev(sc->sc_dev, "sc_maxports %d\n", sc->sc_maxports); 1082 1083 int err; 1084 sc->sc_maxspbuf = XHCI_HCS2_MAXSPBUF(hcs2); 1085 aprint_debug_dev(sc->sc_dev, "sc_maxspbuf %d\n", sc->sc_maxspbuf); 1086 if (sc->sc_maxspbuf != 0) { 1087 err = usb_allocmem(&sc->sc_bus, 1088 sizeof(uint64_t) * sc->sc_maxspbuf, sizeof(uint64_t), 1089 USBMALLOC_COHERENT | USBMALLOC_ZERO, 1090 &sc->sc_spbufarray_dma); 1091 if (err) { 1092 aprint_error_dev(sc->sc_dev, 1093 "spbufarray init fail, err %d\n", err); 1094 return ENOMEM; 1095 } 1096 1097 sc->sc_spbuf_dma = kmem_zalloc(sizeof(*sc->sc_spbuf_dma) * 1098 sc->sc_maxspbuf, KM_SLEEP); 1099 uint64_t *spbufarray = KERNADDR(&sc->sc_spbufarray_dma, 0); 1100 for (i = 0; i < sc->sc_maxspbuf; i++) { 1101 usb_dma_t * const dma = &sc->sc_spbuf_dma[i]; 1102 /* allocate contexts */ 1103 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, 1104 sc->sc_pgsz, USBMALLOC_COHERENT | USBMALLOC_ZERO, 1105 dma); 1106 if (err) { 1107 aprint_error_dev(sc->sc_dev, 1108 "spbufarray_dma init fail, err %d\n", err); 1109 rv = ENOMEM; 1110 goto bad1; 1111 } 1112 spbufarray[i] = htole64(DMAADDR(dma, 0)); 1113 usb_syncmem(dma, 0, sc->sc_pgsz, 1114 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1115 } 1116 1117 usb_syncmem(&sc->sc_spbufarray_dma, 0, 1118 sizeof(uint64_t) * sc->sc_maxspbuf, BUS_DMASYNC_PREWRITE); 1119 } 1120 1121 config = xhci_op_read_4(sc, XHCI_CONFIG); 1122 config &= ~0xFF; 1123 config |= sc->sc_maxslots & 0xFF; 1124 xhci_op_write_4(sc, XHCI_CONFIG, config); 1125 1126 err = xhci_ring_init(sc, &sc->sc_cr, XHCI_COMMAND_RING_TRBS, 1127 XHCI_COMMAND_RING_SEGMENTS_ALIGN); 1128 if (err) { 1129 aprint_error_dev(sc->sc_dev, "command ring init fail, err %d\n", 1130 err); 1131 rv = ENOMEM; 1132 goto bad1; 1133 } 1134 1135 err = xhci_ring_init(sc, &sc->sc_er, XHCI_EVENT_RING_TRBS, 1136 XHCI_EVENT_RING_SEGMENTS_ALIGN); 1137 if (err) { 1138 aprint_error_dev(sc->sc_dev, "event ring init fail, err %d\n", 1139 err); 1140 rv = ENOMEM; 1141 goto bad2; 1142 } 1143 1144 usb_dma_t *dma; 1145 size_t size; 1146 size_t align; 1147 1148 dma = &sc->sc_eventst_dma; 1149 size = roundup2(XHCI_EVENT_RING_SEGMENTS * XHCI_ERSTE_SIZE, 1150 XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN); 1151 KASSERTMSG(size <= (512 * 1024), "eventst size %zu too large", size); 1152 align = XHCI_EVENT_RING_SEGMENT_TABLE_ALIGN; 1153 err = usb_allocmem(&sc->sc_bus, size, align, 1154 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma); 1155 if (err) { 1156 aprint_error_dev(sc->sc_dev, "eventst init fail, err %d\n", 1157 err); 1158 rv = ENOMEM; 1159 goto bad3; 1160 } 1161 1162 aprint_debug_dev(sc->sc_dev, "eventst: 0x%016jx %p %zx\n", 1163 (uintmax_t)DMAADDR(&sc->sc_eventst_dma, 0), 1164 KERNADDR(&sc->sc_eventst_dma, 0), 1165 sc->sc_eventst_dma.udma_block->size); 1166 1167 dma = &sc->sc_dcbaa_dma; 1168 size = (1 + sc->sc_maxslots) * sizeof(uint64_t); 1169 KASSERTMSG(size <= 2048, "dcbaa size %zu too large", size); 1170 align = XHCI_DEVICE_CONTEXT_BASE_ADDRESS_ARRAY_ALIGN; 1171 err = usb_allocmem(&sc->sc_bus, size, align, 1172 USBMALLOC_COHERENT | USBMALLOC_ZERO, dma); 1173 if (err) { 1174 aprint_error_dev(sc->sc_dev, "dcbaa init fail, err %d\n", err); 1175 rv = ENOMEM; 1176 goto bad4; 1177 } 1178 aprint_debug_dev(sc->sc_dev, "dcbaa: 0x%016jx %p %zx\n", 1179 (uintmax_t)DMAADDR(&sc->sc_dcbaa_dma, 0), 1180 KERNADDR(&sc->sc_dcbaa_dma, 0), 1181 sc->sc_dcbaa_dma.udma_block->size); 1182 1183 if (sc->sc_maxspbuf != 0) { 1184 /* 1185 * DCBA entry 0 hold the scratchbuf array pointer. 1186 */ 1187 *(uint64_t *)KERNADDR(dma, 0) = 1188 htole64(DMAADDR(&sc->sc_spbufarray_dma, 0)); 1189 usb_syncmem(dma, 0, size, BUS_DMASYNC_PREWRITE); 1190 } 1191 1192 sc->sc_slots = kmem_zalloc(sizeof(*sc->sc_slots) * sc->sc_maxslots, 1193 KM_SLEEP); 1194 if (sc->sc_slots == NULL) { 1195 aprint_error_dev(sc->sc_dev, "slots init fail, err %d\n", err); 1196 rv = ENOMEM; 1197 goto bad; 1198 } 1199 1200 sc->sc_xferpool = pool_cache_init(sizeof(struct xhci_xfer), 0, 0, 0, 1201 "xhcixfer", NULL, IPL_USB, NULL, NULL, NULL); 1202 if (sc->sc_xferpool == NULL) { 1203 aprint_error_dev(sc->sc_dev, "pool_cache init fail, err %d\n", 1204 err); 1205 rv = ENOMEM; 1206 goto bad; 1207 } 1208 1209 cv_init(&sc->sc_command_cv, "xhcicmd"); 1210 cv_init(&sc->sc_cmdbusy_cv, "xhcicmdq"); 1211 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 1212 mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_USB); 1213 1214 struct xhci_erste *erst; 1215 erst = KERNADDR(&sc->sc_eventst_dma, 0); 1216 erst[0].erste_0 = htole64(xhci_ring_trbp(sc->sc_er, 0)); 1217 erst[0].erste_2 = htole32(sc->sc_er->xr_ntrb); 1218 erst[0].erste_3 = htole32(0); 1219 usb_syncmem(&sc->sc_eventst_dma, 0, 1220 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS, BUS_DMASYNC_PREWRITE); 1221 1222 xhci_rt_write_4(sc, XHCI_ERSTSZ(0), XHCI_EVENT_RING_SEGMENTS); 1223 xhci_rt_write_8(sc, XHCI_ERSTBA(0), DMAADDR(&sc->sc_eventst_dma, 0)); 1224 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(sc->sc_er, 0) | 1225 XHCI_ERDP_BUSY); 1226 1227 xhci_op_write_8(sc, XHCI_DCBAAP, DMAADDR(&sc->sc_dcbaa_dma, 0)); 1228 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(sc->sc_cr, 0) | 1229 sc->sc_cr->xr_cs); 1230 1231 xhci_barrier(sc, BUS_SPACE_BARRIER_WRITE); 1232 1233 HEXDUMP("eventst", KERNADDR(&sc->sc_eventst_dma, 0), 1234 XHCI_ERSTE_SIZE * XHCI_EVENT_RING_SEGMENTS); 1235 1236 if ((sc->sc_quirks & XHCI_DEFERRED_START) == 0) 1237 xhci_start(sc); 1238 1239 return 0; 1240 1241 bad: 1242 if (sc->sc_xferpool) { 1243 pool_cache_destroy(sc->sc_xferpool); 1244 sc->sc_xferpool = NULL; 1245 } 1246 1247 if (sc->sc_slots) { 1248 kmem_free(sc->sc_slots, sizeof(*sc->sc_slots) * 1249 sc->sc_maxslots); 1250 sc->sc_slots = NULL; 1251 } 1252 1253 usb_freemem(&sc->sc_bus, &sc->sc_dcbaa_dma); 1254 bad4: 1255 usb_freemem(&sc->sc_bus, &sc->sc_eventst_dma); 1256 bad3: 1257 xhci_ring_free(sc, &sc->sc_er); 1258 bad2: 1259 xhci_ring_free(sc, &sc->sc_cr); 1260 i = sc->sc_maxspbuf; 1261 bad1: 1262 for (int j = 0; j < i; j++) 1263 usb_freemem(&sc->sc_bus, &sc->sc_spbuf_dma[j]); 1264 usb_freemem(&sc->sc_bus, &sc->sc_spbufarray_dma); 1265 1266 return rv; 1267 } 1268 1269 static inline bool 1270 xhci_polling_p(struct xhci_softc * const sc) 1271 { 1272 return sc->sc_bus.ub_usepolling || sc->sc_bus2.ub_usepolling; 1273 } 1274 1275 int 1276 xhci_intr(void *v) 1277 { 1278 struct xhci_softc * const sc = v; 1279 int ret = 0; 1280 1281 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1282 1283 if (sc == NULL) 1284 return 0; 1285 1286 mutex_spin_enter(&sc->sc_intr_lock); 1287 1288 if (sc->sc_dying || !device_has_power(sc->sc_dev)) 1289 goto done; 1290 1291 /* If we get an interrupt while polling, then just ignore it. */ 1292 if (xhci_polling_p(sc)) { 1293 #ifdef DIAGNOSTIC 1294 DPRINTFN(16, "ignored interrupt while polling", 0, 0, 0, 0); 1295 #endif 1296 goto done; 1297 } 1298 1299 ret = xhci_intr1(sc); 1300 if (ret) { 1301 KASSERT(sc->sc_child || sc->sc_child2); 1302 1303 /* 1304 * One of child busses could be already detached. It doesn't 1305 * matter on which of the two the softintr is scheduled. 1306 */ 1307 if (sc->sc_child) 1308 usb_schedsoftintr(&sc->sc_bus); 1309 else 1310 usb_schedsoftintr(&sc->sc_bus2); 1311 } 1312 done: 1313 mutex_spin_exit(&sc->sc_intr_lock); 1314 return ret; 1315 } 1316 1317 int 1318 xhci_intr1(struct xhci_softc * const sc) 1319 { 1320 uint32_t usbsts; 1321 uint32_t iman; 1322 1323 XHCIHIST_FUNC(); 1324 1325 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1326 XHCIHIST_CALLARGS("USBSTS 0x%08jx", usbsts, 0, 0, 0); 1327 if ((usbsts & (XHCI_STS_HSE | XHCI_STS_EINT | XHCI_STS_PCD | 1328 XHCI_STS_HCE)) == 0) { 1329 DPRINTFN(16, "ignored intr not for %jd", 1330 device_unit(sc->sc_dev), 0, 0, 0); 1331 return 0; 1332 } 1333 1334 /* 1335 * Clear EINT and other transient flags, to not misenterpret 1336 * next shared interrupt. Also, to avoid race, EINT must be cleared 1337 * before XHCI_IMAN_INTR_PEND is cleared. 1338 */ 1339 xhci_op_write_4(sc, XHCI_USBSTS, usbsts & XHCI_STS_RSVDP0); 1340 1341 #ifdef XHCI_DEBUG 1342 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1343 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1344 #endif 1345 1346 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1347 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1348 iman |= XHCI_IMAN_INTR_PEND; 1349 xhci_rt_write_4(sc, XHCI_IMAN(0), iman); 1350 1351 #ifdef XHCI_DEBUG 1352 iman = xhci_rt_read_4(sc, XHCI_IMAN(0)); 1353 DPRINTFN(16, "IMAN0 0x%08jx", iman, 0, 0, 0); 1354 usbsts = xhci_op_read_4(sc, XHCI_USBSTS); 1355 DPRINTFN(16, "USBSTS 0x%08jx", usbsts, 0, 0, 0); 1356 #endif 1357 1358 return 1; 1359 } 1360 1361 /* 1362 * 3 port speed types used in USB stack 1363 * 1364 * usbdi speed 1365 * definition: USB_SPEED_* in usb.h 1366 * They are used in struct usbd_device in USB stack. 1367 * ioctl interface uses these values too. 1368 * port_status speed 1369 * definition: UPS_*_SPEED in usb.h 1370 * They are used in usb_port_status_t and valid only for USB 2.0. 1371 * Speed value is always 0 for Super Speed or more, and dwExtPortStatus 1372 * of usb_port_status_ext_t indicates port speed. 1373 * Note that some 3.0 values overlap with 2.0 values. 1374 * (e.g. 0x200 means UPS_POER_POWER_SS in SS and 1375 * means UPS_LOW_SPEED in HS.) 1376 * port status returned from hub also uses these values. 1377 * On NetBSD UPS_OTHER_SPEED indicates port speed is super speed 1378 * or more. 1379 * xspeed: 1380 * definition: Protocol Speed ID (PSI) (xHCI 1.1 7.2.1) 1381 * They are used in only slot context and PORTSC reg of xhci. 1382 * The difference between usbdi speed and xspeed is 1383 * that FS and LS values are swapped. 1384 */ 1385 1386 /* convert usbdi speed to xspeed */ 1387 static int 1388 xhci_speed2xspeed(int speed) 1389 { 1390 switch (speed) { 1391 case USB_SPEED_LOW: return 2; 1392 case USB_SPEED_FULL: return 1; 1393 default: return speed; 1394 } 1395 } 1396 1397 #if 0 1398 /* convert xspeed to usbdi speed */ 1399 static int 1400 xhci_xspeed2speed(int xspeed) 1401 { 1402 switch (xspeed) { 1403 case 1: return USB_SPEED_FULL; 1404 case 2: return USB_SPEED_LOW; 1405 default: return xspeed; 1406 } 1407 } 1408 #endif 1409 1410 /* convert xspeed to port status speed */ 1411 static int 1412 xhci_xspeed2psspeed(int xspeed) 1413 { 1414 switch (xspeed) { 1415 case 0: return 0; 1416 case 1: return UPS_FULL_SPEED; 1417 case 2: return UPS_LOW_SPEED; 1418 case 3: return UPS_HIGH_SPEED; 1419 default: return UPS_OTHER_SPEED; 1420 } 1421 } 1422 1423 /* 1424 * Construct input contexts and issue TRB to open pipe. 1425 */ 1426 static usbd_status 1427 xhci_configure_endpoint(struct usbd_pipe *pipe) 1428 { 1429 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1430 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1431 #ifdef USB_DEBUG 1432 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1433 #endif 1434 struct xhci_soft_trb trb; 1435 usbd_status err; 1436 1437 XHCIHIST_FUNC(); 1438 XHCIHIST_CALLARGS("slot %ju dci %ju epaddr 0x%02jx attr 0x%02jx", 1439 xs->xs_idx, dci, pipe->up_endpoint->ue_edesc->bEndpointAddress, 1440 pipe->up_endpoint->ue_edesc->bmAttributes); 1441 1442 /* XXX ensure input context is available? */ 1443 1444 memset(xhci_slot_get_icv(sc, xs, 0), 0, sc->sc_pgsz); 1445 1446 /* set up context */ 1447 xhci_setup_ctx(pipe); 1448 1449 HEXDUMP("input control context", xhci_slot_get_icv(sc, xs, 0), 1450 sc->sc_ctxsz * 1); 1451 HEXDUMP("input endpoint context", xhci_slot_get_icv(sc, xs, 1452 xhci_dci_to_ici(dci)), sc->sc_ctxsz * 1); 1453 1454 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 1455 trb.trb_2 = 0; 1456 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1457 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 1458 1459 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 1460 1461 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 1462 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, dci), 1463 sc->sc_ctxsz * 1); 1464 1465 return err; 1466 } 1467 1468 #if 0 1469 static usbd_status 1470 xhci_unconfigure_endpoint(struct usbd_pipe *pipe) 1471 { 1472 #ifdef USB_DEBUG 1473 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1474 #endif 1475 1476 XHCIHIST_FUNC(); 1477 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 1478 1479 return USBD_NORMAL_COMPLETION; 1480 } 1481 #endif 1482 1483 /* 4.6.8, 6.4.3.7 */ 1484 static usbd_status 1485 xhci_reset_endpoint_locked(struct usbd_pipe *pipe) 1486 { 1487 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1488 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1489 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1490 struct xhci_soft_trb trb; 1491 usbd_status err; 1492 1493 XHCIHIST_FUNC(); 1494 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1495 1496 KASSERT(mutex_owned(&sc->sc_lock)); 1497 1498 trb.trb_0 = 0; 1499 trb.trb_2 = 0; 1500 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1501 XHCI_TRB_3_EP_SET(dci) | 1502 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_RESET_EP); 1503 1504 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1505 1506 return err; 1507 } 1508 1509 static usbd_status 1510 xhci_reset_endpoint(struct usbd_pipe *pipe) 1511 { 1512 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1513 1514 mutex_enter(&sc->sc_lock); 1515 usbd_status ret = xhci_reset_endpoint_locked(pipe); 1516 mutex_exit(&sc->sc_lock); 1517 1518 return ret; 1519 } 1520 1521 /* 1522 * 4.6.9, 6.4.3.8 1523 * Stop execution of TDs on xfer ring. 1524 * Should be called with sc_lock held. 1525 */ 1526 static usbd_status 1527 xhci_stop_endpoint(struct usbd_pipe *pipe) 1528 { 1529 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1530 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1531 struct xhci_soft_trb trb; 1532 usbd_status err; 1533 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1534 1535 XHCIHIST_FUNC(); 1536 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1537 1538 KASSERT(mutex_owned(&sc->sc_lock)); 1539 1540 trb.trb_0 = 0; 1541 trb.trb_2 = 0; 1542 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1543 XHCI_TRB_3_EP_SET(dci) | 1544 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STOP_EP); 1545 1546 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1547 1548 return err; 1549 } 1550 1551 /* 1552 * Set TR Dequeue Pointer. 1553 * xHCI 1.1 4.6.10 6.4.3.9 1554 * Purge all of the TRBs on ring and reinitialize ring. 1555 * Set TR dequeue Pointr to 0 and Cycle State to 1. 1556 * EPSTATE of endpoint must be ERROR or STOPPED, otherwise CONTEXT_STATE 1557 * error will be generated. 1558 */ 1559 static usbd_status 1560 xhci_set_dequeue_locked(struct usbd_pipe *pipe) 1561 { 1562 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1563 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1564 const u_int dci = xhci_ep_get_dci(pipe->up_endpoint->ue_edesc); 1565 struct xhci_ring * const xr = xs->xs_xr[dci]; 1566 struct xhci_soft_trb trb; 1567 usbd_status err; 1568 1569 XHCIHIST_FUNC(); 1570 XHCIHIST_CALLARGS("slot %ju dci %ju", xs->xs_idx, dci, 0, 0); 1571 1572 KASSERT(mutex_owned(&sc->sc_lock)); 1573 KASSERT(xr != NULL); 1574 1575 xhci_host_dequeue(xr); 1576 1577 /* set DCS */ 1578 trb.trb_0 = xhci_ring_trbp(xr, 0) | 1; /* XXX */ 1579 trb.trb_2 = 0; 1580 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1581 XHCI_TRB_3_EP_SET(dci) | 1582 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SET_TR_DEQUEUE); 1583 1584 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1585 1586 return err; 1587 } 1588 1589 static usbd_status 1590 xhci_set_dequeue(struct usbd_pipe *pipe) 1591 { 1592 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1593 1594 mutex_enter(&sc->sc_lock); 1595 usbd_status ret = xhci_set_dequeue_locked(pipe); 1596 mutex_exit(&sc->sc_lock); 1597 1598 return ret; 1599 } 1600 1601 /* 1602 * Open new pipe: called from usbd_setup_pipe_flags. 1603 * Fills methods of pipe. 1604 * If pipe is not for ep0, calls configure_endpoint. 1605 */ 1606 static usbd_status 1607 xhci_open(struct usbd_pipe *pipe) 1608 { 1609 struct usbd_device * const dev = pipe->up_dev; 1610 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 1611 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 1612 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1613 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 1614 const u_int dci = xhci_ep_get_dci(ed); 1615 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1616 usbd_status err; 1617 1618 XHCIHIST_FUNC(); 1619 XHCIHIST_CALLARGS("addr %jd depth %jd port %jd speed %jd", dev->ud_addr, 1620 dev->ud_depth, dev->ud_powersrc->up_portno, dev->ud_speed); 1621 DPRINTFN(1, " dci %ju type 0x%02jx epaddr 0x%02jx attr 0x%02jx", 1622 xhci_ep_get_dci(ed), ed->bDescriptorType, ed->bEndpointAddress, 1623 ed->bmAttributes); 1624 DPRINTFN(1, " mps %ju ival %ju", UGETW(ed->wMaxPacketSize), 1625 ed->bInterval, 0, 0); 1626 1627 if (sc->sc_dying) 1628 return USBD_IOERROR; 1629 1630 /* Root Hub */ 1631 if (dev->ud_depth == 0 && dev->ud_powersrc->up_portno == 0) { 1632 switch (ed->bEndpointAddress) { 1633 case USB_CONTROL_ENDPOINT: 1634 pipe->up_methods = &roothub_ctrl_methods; 1635 break; 1636 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT: 1637 pipe->up_methods = &xhci_root_intr_methods; 1638 break; 1639 default: 1640 pipe->up_methods = NULL; 1641 DPRINTFN(0, "bad bEndpointAddress 0x%02jx", 1642 ed->bEndpointAddress, 0, 0, 0); 1643 return USBD_INVAL; 1644 } 1645 return USBD_NORMAL_COMPLETION; 1646 } 1647 1648 switch (xfertype) { 1649 case UE_CONTROL: 1650 pipe->up_methods = &xhci_device_ctrl_methods; 1651 break; 1652 case UE_ISOCHRONOUS: 1653 pipe->up_methods = &xhci_device_isoc_methods; 1654 pipe->up_serialise = false; 1655 xpipe->xp_isoc_next = -1; 1656 break; 1657 case UE_BULK: 1658 pipe->up_methods = &xhci_device_bulk_methods; 1659 break; 1660 case UE_INTERRUPT: 1661 pipe->up_methods = &xhci_device_intr_methods; 1662 break; 1663 default: 1664 return USBD_IOERROR; 1665 break; 1666 } 1667 1668 KASSERT(xs != NULL); 1669 KASSERT(xs->xs_xr[dci] == NULL); 1670 1671 /* allocate transfer ring */ 1672 err = xhci_ring_init(sc, &xs->xs_xr[dci], XHCI_TRANSFER_RING_TRBS, 1673 XHCI_TRB_ALIGN); 1674 if (err) { 1675 DPRINTFN(1, "ring alloc failed %jd", err, 0, 0, 0); 1676 return err; 1677 } 1678 1679 if (ed->bEndpointAddress != USB_CONTROL_ENDPOINT) 1680 return xhci_configure_endpoint(pipe); 1681 1682 return USBD_NORMAL_COMPLETION; 1683 } 1684 1685 /* 1686 * Closes pipe, called from usbd_kill_pipe via close methods. 1687 * If the endpoint to be closed is ep0, disable_slot. 1688 * Should be called with sc_lock held. 1689 */ 1690 static void 1691 xhci_close_pipe(struct usbd_pipe *pipe) 1692 { 1693 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 1694 struct xhci_slot * const xs = pipe->up_dev->ud_hcpriv; 1695 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 1696 const u_int dci = xhci_ep_get_dci(ed); 1697 struct xhci_soft_trb trb; 1698 uint32_t *cp; 1699 1700 XHCIHIST_FUNC(); 1701 1702 if (sc->sc_dying) 1703 return; 1704 1705 /* xs is uninitialized before xhci_init_slot */ 1706 if (xs == NULL || xs->xs_idx == 0) 1707 return; 1708 1709 XHCIHIST_CALLARGS("pipe %#jx slot %ju dci %ju", 1710 (uintptr_t)pipe, xs->xs_idx, dci, 0); 1711 1712 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 1713 KASSERT(mutex_owned(&sc->sc_lock)); 1714 1715 if (pipe->up_dev->ud_depth == 0) 1716 return; 1717 1718 if (dci == XHCI_DCI_EP_CONTROL) { 1719 DPRINTFN(4, "closing ep0", 0, 0, 0, 0); 1720 /* This frees all rings */ 1721 xhci_disable_slot(sc, xs->xs_idx); 1722 return; 1723 } 1724 1725 if (xhci_get_epstate(sc, xs, dci) != XHCI_EPSTATE_STOPPED) 1726 (void)xhci_stop_endpoint(pipe); 1727 1728 /* 1729 * set appropriate bit to be dropped. 1730 * don't set DC bit to 1, otherwise all endpoints 1731 * would be deconfigured. 1732 */ 1733 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 1734 cp[0] = htole32(XHCI_INCTX_0_DROP_MASK(dci)); 1735 cp[1] = htole32(0); 1736 1737 /* XXX should be most significant one, not dci? */ 1738 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 1739 cp[0] = htole32(XHCI_SCTX_0_CTX_NUM_SET(dci)); 1740 1741 /* configure ep context performs an implicit dequeue */ 1742 xhci_host_dequeue(xs->xs_xr[dci]); 1743 1744 /* sync input contexts before they are read from memory */ 1745 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 1746 1747 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 1748 trb.trb_2 = 0; 1749 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 1750 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_CONFIGURE_EP); 1751 1752 (void)xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 1753 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 1754 1755 xhci_ring_free(sc, &xs->xs_xr[dci]); 1756 } 1757 1758 /* 1759 * Abort transfer. 1760 * Should be called with sc_lock held. 1761 */ 1762 static void 1763 xhci_abortx(struct usbd_xfer *xfer) 1764 { 1765 XHCIHIST_FUNC(); 1766 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 1767 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 1768 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 1769 1770 XHCIHIST_CALLARGS("xfer %#jx pipe %#jx", 1771 (uintptr_t)xfer, (uintptr_t)xfer->ux_pipe, 0, 0); 1772 1773 KASSERT(mutex_owned(&sc->sc_lock)); 1774 ASSERT_SLEEPABLE(); 1775 1776 KASSERTMSG((xfer->ux_status == USBD_CANCELLED || 1777 xfer->ux_status == USBD_TIMEOUT), 1778 "bad abort status: %d", xfer->ux_status); 1779 1780 /* 1781 * If we're dying, skip the hardware action and just notify the 1782 * software that we're done. 1783 */ 1784 if (sc->sc_dying) { 1785 DPRINTFN(4, "xfer %#jx dying %ju", (uintptr_t)xfer, 1786 xfer->ux_status, 0, 0); 1787 goto dying; 1788 } 1789 1790 /* 1791 * HC Step 1: Stop execution of TD on the ring. 1792 */ 1793 switch (xhci_get_epstate(sc, xs, dci)) { 1794 case XHCI_EPSTATE_HALTED: 1795 (void)xhci_reset_endpoint_locked(xfer->ux_pipe); 1796 break; 1797 case XHCI_EPSTATE_STOPPED: 1798 break; 1799 default: 1800 (void)xhci_stop_endpoint(xfer->ux_pipe); 1801 break; 1802 } 1803 #ifdef DIAGNOSTIC 1804 uint32_t epst = xhci_get_epstate(sc, xs, dci); 1805 if (epst != XHCI_EPSTATE_STOPPED) 1806 DPRINTFN(4, "dci %ju not stopped %ju", dci, epst, 0, 0); 1807 #endif 1808 1809 /* 1810 * HC Step 2: Remove any vestiges of the xfer from the ring. 1811 */ 1812 xhci_set_dequeue_locked(xfer->ux_pipe); 1813 1814 /* 1815 * Final Step: Notify completion to waiting xfers. 1816 */ 1817 dying: 1818 usb_transfer_complete(xfer); 1819 DPRINTFN(14, "end", 0, 0, 0, 0); 1820 1821 KASSERT(mutex_owned(&sc->sc_lock)); 1822 } 1823 1824 static void 1825 xhci_host_dequeue(struct xhci_ring * const xr) 1826 { 1827 /* When dequeueing the controller, update our struct copy too */ 1828 memset(xr->xr_trb, 0, xr->xr_ntrb * XHCI_TRB_SIZE); 1829 usb_syncmem(&xr->xr_dma, 0, xr->xr_ntrb * XHCI_TRB_SIZE, 1830 BUS_DMASYNC_PREWRITE); 1831 memset(xr->xr_cookies, 0, xr->xr_ntrb * sizeof(*xr->xr_cookies)); 1832 1833 xr->xr_ep = 0; 1834 xr->xr_cs = 1; 1835 } 1836 1837 /* 1838 * Recover STALLed endpoint. 1839 * xHCI 1.1 sect 4.10.2.1 1840 * Issue RESET_EP to recover halt condition and SET_TR_DEQUEUE to remove 1841 * all transfers on transfer ring. 1842 * These are done in thread context asynchronously. 1843 */ 1844 static void 1845 xhci_clear_endpoint_stall_async_task(void *cookie) 1846 { 1847 struct usbd_xfer * const xfer = cookie; 1848 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 1849 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 1850 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 1851 struct xhci_ring * const tr = xs->xs_xr[dci]; 1852 1853 XHCIHIST_FUNC(); 1854 XHCIHIST_CALLARGS("xfer %#jx slot %ju dci %ju", (uintptr_t)xfer, xs->xs_idx, 1855 dci, 0); 1856 1857 /* 1858 * XXXMRG: Stall task can run after slot is disabled when yanked. 1859 * This hack notices that the xs has been memset() in 1860 * xhci_disable_slot() and returns. Both xhci_reset_endpoint() 1861 * and xhci_set_dequeue() rely upon a valid ring setup for correct 1862 * operation, and the latter will fault, as would 1863 * usb_transfer_complete() if it got that far. 1864 */ 1865 if (xs->xs_idx == 0) { 1866 DPRINTFN(4, "ends xs_idx is 0", 0, 0, 0, 0); 1867 return; 1868 } 1869 1870 KASSERT(tr != NULL); 1871 1872 xhci_reset_endpoint(xfer->ux_pipe); 1873 xhci_set_dequeue(xfer->ux_pipe); 1874 1875 mutex_enter(&sc->sc_lock); 1876 tr->is_halted = false; 1877 usb_transfer_complete(xfer); 1878 mutex_exit(&sc->sc_lock); 1879 DPRINTFN(4, "ends", 0, 0, 0, 0); 1880 } 1881 1882 static usbd_status 1883 xhci_clear_endpoint_stall_async(struct usbd_xfer *xfer) 1884 { 1885 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 1886 struct xhci_pipe * const xp = (struct xhci_pipe *)xfer->ux_pipe; 1887 1888 XHCIHIST_FUNC(); 1889 XHCIHIST_CALLARGS("xfer %#jx", (uintptr_t)xfer, 0, 0, 0); 1890 1891 if (sc->sc_dying) { 1892 return USBD_IOERROR; 1893 } 1894 1895 usb_init_task(&xp->xp_async_task, 1896 xhci_clear_endpoint_stall_async_task, xfer, USB_TASKQ_MPSAFE); 1897 usb_add_task(xfer->ux_pipe->up_dev, &xp->xp_async_task, USB_TASKQ_HC); 1898 DPRINTFN(4, "ends", 0, 0, 0, 0); 1899 1900 return USBD_NORMAL_COMPLETION; 1901 } 1902 1903 /* Process roothub port status/change events and notify to uhub_intr. */ 1904 static void 1905 xhci_rhpsc(struct xhci_softc * const sc, u_int ctlrport) 1906 { 1907 XHCIHIST_FUNC(); 1908 XHCIHIST_CALLARGS("xhci%jd: port %ju status change", 1909 device_unit(sc->sc_dev), ctlrport, 0, 0); 1910 1911 if (ctlrport > sc->sc_maxports) 1912 return; 1913 1914 const size_t bn = xhci_ctlrport2bus(sc, ctlrport); 1915 const size_t rhp = xhci_ctlrport2rhport(sc, ctlrport); 1916 struct usbd_xfer * const xfer = sc->sc_intrxfer[bn]; 1917 1918 DPRINTFN(4, "xhci%jd: bus %jd bp %ju xfer %#jx status change", 1919 device_unit(sc->sc_dev), bn, rhp, (uintptr_t)xfer); 1920 1921 if (xfer == NULL) 1922 return; 1923 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 1924 1925 uint8_t *p = xfer->ux_buf; 1926 memset(p, 0, xfer->ux_length); 1927 p[rhp / NBBY] |= 1 << (rhp % NBBY); 1928 xfer->ux_actlen = xfer->ux_length; 1929 xfer->ux_status = USBD_NORMAL_COMPLETION; 1930 usb_transfer_complete(xfer); 1931 } 1932 1933 /* Process Transfer Events */ 1934 static void 1935 xhci_event_transfer(struct xhci_softc * const sc, 1936 const struct xhci_trb * const trb) 1937 { 1938 uint64_t trb_0; 1939 uint32_t trb_2, trb_3; 1940 uint8_t trbcode; 1941 u_int slot, dci; 1942 struct xhci_slot *xs; 1943 struct xhci_ring *xr; 1944 struct xhci_xfer *xx; 1945 struct usbd_xfer *xfer; 1946 usbd_status err; 1947 1948 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 1949 1950 trb_0 = le64toh(trb->trb_0); 1951 trb_2 = le32toh(trb->trb_2); 1952 trb_3 = le32toh(trb->trb_3); 1953 trbcode = XHCI_TRB_2_ERROR_GET(trb_2); 1954 slot = XHCI_TRB_3_SLOT_GET(trb_3); 1955 dci = XHCI_TRB_3_EP_GET(trb_3); 1956 xs = &sc->sc_slots[slot]; 1957 xr = xs->xs_xr[dci]; 1958 1959 /* sanity check */ 1960 KASSERT(xr != NULL); 1961 KASSERTMSG(xs->xs_idx != 0 && xs->xs_idx <= sc->sc_maxslots, 1962 "invalid xs_idx %u slot %u", xs->xs_idx, slot); 1963 1964 int idx = 0; 1965 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 1966 if (xhci_trb_get_idx(xr, trb_0, &idx)) { 1967 DPRINTFN(0, "invalid trb_0 %#jx", trb_0, 0, 0, 0); 1968 return; 1969 } 1970 xx = xr->xr_cookies[idx]; 1971 1972 /* clear cookie of consumed TRB */ 1973 xr->xr_cookies[idx] = NULL; 1974 1975 /* 1976 * xx is NULL if pipe is opened but xfer is not started. 1977 * It happens when stopping idle pipe. 1978 */ 1979 if (xx == NULL || trbcode == XHCI_TRB_ERROR_LENGTH) { 1980 DPRINTFN(1, "Ignore #%ju: cookie %#jx cc %ju dci %ju", 1981 idx, (uintptr_t)xx, trbcode, dci); 1982 DPRINTFN(1, " orig TRB %#jx type %ju", trb_0, 1983 XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)), 1984 0, 0); 1985 return; 1986 } 1987 } else { 1988 /* When ED != 0, trb_0 is virtual addr of struct xhci_xfer. */ 1989 xx = (void *)(uintptr_t)(trb_0 & ~0x3); 1990 } 1991 /* XXX this may not happen */ 1992 if (xx == NULL) { 1993 DPRINTFN(1, "xfer done: xx is NULL", 0, 0, 0, 0); 1994 return; 1995 } 1996 xfer = &xx->xx_xfer; 1997 /* XXX this may happen when detaching */ 1998 if (xfer == NULL) { 1999 DPRINTFN(1, "xx(%#jx)->xx_xfer is NULL trb_0 %#jx", 2000 (uintptr_t)xx, trb_0, 0, 0); 2001 return; 2002 } 2003 DPRINTFN(14, "xfer %#jx", (uintptr_t)xfer, 0, 0, 0); 2004 /* XXX I dunno why this happens */ 2005 KASSERTMSG(xfer->ux_pipe != NULL, "xfer(%p)->ux_pipe is NULL", xfer); 2006 2007 if (!xfer->ux_pipe->up_repeat && 2008 SIMPLEQ_EMPTY(&xfer->ux_pipe->up_queue)) { 2009 DPRINTFN(1, "xfer(%#jx)->pipe not queued", (uintptr_t)xfer, 2010 0, 0, 0); 2011 return; 2012 } 2013 2014 const uint8_t xfertype = 2015 UE_GET_XFERTYPE(xfer->ux_pipe->up_endpoint->ue_edesc->bmAttributes); 2016 2017 /* 4.11.5.2 Event Data TRB */ 2018 if ((trb_3 & XHCI_TRB_3_ED_BIT) != 0) { 2019 DPRINTFN(14, "transfer Event Data: 0x%016jx 0x%08jx" 2020 " %02jx", trb_0, XHCI_TRB_2_REM_GET(trb_2), trbcode, 0); 2021 if ((trb_0 & 0x3) == 0x3) { 2022 xfer->ux_actlen = XHCI_TRB_2_REM_GET(trb_2); 2023 } 2024 } 2025 2026 switch (trbcode) { 2027 case XHCI_TRB_ERROR_SHORT_PKT: 2028 case XHCI_TRB_ERROR_SUCCESS: 2029 /* 2030 * A ctrl transfer can generate two events if it has a Data 2031 * stage. A short data stage can be OK and should not 2032 * complete the transfer as the status stage needs to be 2033 * performed. 2034 * 2035 * Note: Data and Status stage events point at same xfer. 2036 * ux_actlen and ux_dmabuf will be passed to 2037 * usb_transfer_complete after the Status stage event. 2038 * 2039 * It can be distingished which stage generates the event: 2040 * + by checking least 3 bits of trb_0 if ED==1. 2041 * (see xhci_device_ctrl_start). 2042 * + by checking the type of original TRB if ED==0. 2043 * 2044 * In addition, intr, bulk, and isoc transfer currently 2045 * consists of single TD, so the "skip" is not needed. 2046 * ctrl xfer uses EVENT_DATA, and others do not. 2047 * Thus driver can switch the flow by checking ED bit. 2048 */ 2049 if (xfertype == UE_ISOCHRONOUS) { 2050 xfer->ux_frlengths[xx->xx_isoc_done] -= 2051 XHCI_TRB_2_REM_GET(trb_2); 2052 xfer->ux_actlen += xfer->ux_frlengths[xx->xx_isoc_done]; 2053 if (++xx->xx_isoc_done < xfer->ux_nframes) 2054 return; 2055 } else 2056 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0) { 2057 if (xfer->ux_actlen == 0) 2058 xfer->ux_actlen = xfer->ux_length - 2059 XHCI_TRB_2_REM_GET(trb_2); 2060 if (XHCI_TRB_3_TYPE_GET(le32toh(xr->xr_trb[idx].trb_3)) 2061 == XHCI_TRB_TYPE_DATA_STAGE) { 2062 return; 2063 } 2064 } else if ((trb_0 & 0x3) == 0x3) { 2065 return; 2066 } 2067 err = USBD_NORMAL_COMPLETION; 2068 break; 2069 case XHCI_TRB_ERROR_STOPPED: 2070 case XHCI_TRB_ERROR_LENGTH: 2071 case XHCI_TRB_ERROR_STOPPED_SHORT: 2072 err = USBD_IOERROR; 2073 break; 2074 case XHCI_TRB_ERROR_STALL: 2075 case XHCI_TRB_ERROR_BABBLE: 2076 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2077 xr->is_halted = true; 2078 /* 2079 * Try to claim this xfer for completion. If it has already 2080 * completed or aborted, drop it on the floor. 2081 */ 2082 if (!usbd_xfer_trycomplete(xfer)) 2083 return; 2084 2085 /* 2086 * Stalled endpoints can be recoverd by issuing 2087 * command TRB TYPE_RESET_EP on xHCI instead of 2088 * issuing request CLEAR_FEATURE UF_ENDPOINT_HALT 2089 * on the endpoint. However, this function may be 2090 * called from softint context (e.g. from umass), 2091 * in that case driver gets KASSERT in cv_timedwait 2092 * in xhci_do_command. 2093 * To avoid this, this runs reset_endpoint and 2094 * usb_transfer_complete in usb task thread 2095 * asynchronously (and then umass issues clear 2096 * UF_ENDPOINT_HALT). 2097 */ 2098 2099 /* Override the status. */ 2100 xfer->ux_status = USBD_STALLED; 2101 2102 xhci_clear_endpoint_stall_async(xfer); 2103 return; 2104 default: 2105 DPRINTFN(1, "ERR %ju slot %ju dci %ju", trbcode, slot, dci, 0); 2106 err = USBD_IOERROR; 2107 break; 2108 } 2109 2110 /* 2111 * Try to claim this xfer for completion. If it has already 2112 * completed or aborted, drop it on the floor. 2113 */ 2114 if (!usbd_xfer_trycomplete(xfer)) 2115 return; 2116 2117 /* Set the status. */ 2118 xfer->ux_status = err; 2119 2120 if ((trb_3 & XHCI_TRB_3_ED_BIT) == 0 || 2121 (trb_0 & 0x3) == 0x0) { 2122 usb_transfer_complete(xfer); 2123 } 2124 } 2125 2126 /* Process Command complete events */ 2127 static void 2128 xhci_event_cmd(struct xhci_softc * const sc, const struct xhci_trb * const trb) 2129 { 2130 uint64_t trb_0; 2131 uint32_t trb_2, trb_3; 2132 2133 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2134 2135 KASSERT(mutex_owned(&sc->sc_lock)); 2136 2137 trb_0 = le64toh(trb->trb_0); 2138 trb_2 = le32toh(trb->trb_2); 2139 trb_3 = le32toh(trb->trb_3); 2140 2141 if (trb_0 == sc->sc_command_addr) { 2142 sc->sc_resultpending = false; 2143 2144 sc->sc_result_trb.trb_0 = trb_0; 2145 sc->sc_result_trb.trb_2 = trb_2; 2146 sc->sc_result_trb.trb_3 = trb_3; 2147 if (XHCI_TRB_2_ERROR_GET(trb_2) != 2148 XHCI_TRB_ERROR_SUCCESS) { 2149 DPRINTFN(1, "command completion " 2150 "failure: 0x%016jx 0x%08jx 0x%08jx", 2151 trb_0, trb_2, trb_3, 0); 2152 } 2153 cv_signal(&sc->sc_command_cv); 2154 } else { 2155 DPRINTFN(1, "spurious event: %#jx 0x%016jx " 2156 "0x%08jx 0x%08jx", (uintptr_t)trb, trb_0, trb_2, trb_3); 2157 } 2158 } 2159 2160 /* 2161 * Process events. 2162 * called from xhci_softintr 2163 */ 2164 static void 2165 xhci_handle_event(struct xhci_softc * const sc, 2166 const struct xhci_trb * const trb) 2167 { 2168 uint64_t trb_0; 2169 uint32_t trb_2, trb_3; 2170 2171 XHCIHIST_FUNC(); 2172 2173 trb_0 = le64toh(trb->trb_0); 2174 trb_2 = le32toh(trb->trb_2); 2175 trb_3 = le32toh(trb->trb_3); 2176 2177 XHCIHIST_CALLARGS("event: %#jx 0x%016jx 0x%08jx 0x%08jx", 2178 (uintptr_t)trb, trb_0, trb_2, trb_3); 2179 2180 /* 2181 * 4.11.3.1, 6.4.2.1 2182 * TRB Pointer is invalid for these completion codes. 2183 */ 2184 switch (XHCI_TRB_2_ERROR_GET(trb_2)) { 2185 case XHCI_TRB_ERROR_RING_UNDERRUN: 2186 case XHCI_TRB_ERROR_RING_OVERRUN: 2187 case XHCI_TRB_ERROR_VF_RING_FULL: 2188 return; 2189 default: 2190 if (trb_0 == 0) { 2191 return; 2192 } 2193 break; 2194 } 2195 2196 switch (XHCI_TRB_3_TYPE_GET(trb_3)) { 2197 case XHCI_TRB_EVENT_TRANSFER: 2198 xhci_event_transfer(sc, trb); 2199 break; 2200 case XHCI_TRB_EVENT_CMD_COMPLETE: 2201 xhci_event_cmd(sc, trb); 2202 break; 2203 case XHCI_TRB_EVENT_PORT_STS_CHANGE: 2204 xhci_rhpsc(sc, (uint32_t)((trb_0 >> 24) & 0xff)); 2205 break; 2206 default: 2207 break; 2208 } 2209 } 2210 2211 static void 2212 xhci_softintr(void *v) 2213 { 2214 struct usbd_bus * const bus = v; 2215 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2216 struct xhci_ring * const er = sc->sc_er; 2217 struct xhci_trb *trb; 2218 int i, j, k; 2219 2220 XHCIHIST_FUNC(); 2221 2222 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 2223 2224 i = er->xr_ep; 2225 j = er->xr_cs; 2226 2227 XHCIHIST_CALLARGS("er: xr_ep %jd xr_cs %jd", i, j, 0, 0); 2228 2229 while (1) { 2230 usb_syncmem(&er->xr_dma, XHCI_TRB_SIZE * i, XHCI_TRB_SIZE, 2231 BUS_DMASYNC_POSTREAD); 2232 trb = &er->xr_trb[i]; 2233 k = (le32toh(trb->trb_3) & XHCI_TRB_3_CYCLE_BIT) ? 1 : 0; 2234 2235 if (j != k) 2236 break; 2237 2238 xhci_handle_event(sc, trb); 2239 2240 i++; 2241 if (i == er->xr_ntrb) { 2242 i = 0; 2243 j ^= 1; 2244 } 2245 } 2246 2247 er->xr_ep = i; 2248 er->xr_cs = j; 2249 2250 xhci_rt_write_8(sc, XHCI_ERDP(0), xhci_ring_trbp(er, er->xr_ep) | 2251 XHCI_ERDP_BUSY); 2252 2253 DPRINTFN(16, "ends", 0, 0, 0, 0); 2254 2255 return; 2256 } 2257 2258 static void 2259 xhci_poll(struct usbd_bus *bus) 2260 { 2261 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2262 2263 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2264 2265 mutex_enter(&sc->sc_intr_lock); 2266 int ret = xhci_intr1(sc); 2267 if (ret) { 2268 xhci_softintr(bus); 2269 } 2270 mutex_exit(&sc->sc_intr_lock); 2271 2272 return; 2273 } 2274 2275 static struct usbd_xfer * 2276 xhci_allocx(struct usbd_bus *bus, unsigned int nframes) 2277 { 2278 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2279 struct xhci_xfer *xx; 2280 u_int ntrbs; 2281 2282 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2283 2284 ntrbs = uimax(3, nframes); 2285 const size_t trbsz = sizeof(*xx->xx_trb) * ntrbs; 2286 2287 xx = pool_cache_get(sc->sc_xferpool, PR_WAITOK); 2288 if (xx != NULL) { 2289 memset(xx, 0, sizeof(*xx)); 2290 if (ntrbs > 0) { 2291 xx->xx_trb = kmem_alloc(trbsz, KM_SLEEP); 2292 xx->xx_ntrb = ntrbs; 2293 } 2294 #ifdef DIAGNOSTIC 2295 xx->xx_xfer.ux_state = XFER_BUSY; 2296 #endif 2297 } 2298 2299 return &xx->xx_xfer; 2300 } 2301 2302 static void 2303 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 2304 { 2305 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2306 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 2307 2308 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2309 2310 #ifdef DIAGNOSTIC 2311 if (xfer->ux_state != XFER_BUSY && 2312 xfer->ux_status != USBD_NOT_STARTED) { 2313 DPRINTFN(0, "xfer=%#jx not busy, 0x%08jx", 2314 (uintptr_t)xfer, xfer->ux_state, 0, 0); 2315 } 2316 xfer->ux_state = XFER_FREE; 2317 #endif 2318 if (xx->xx_ntrb > 0) { 2319 kmem_free(xx->xx_trb, xx->xx_ntrb * sizeof(*xx->xx_trb)); 2320 xx->xx_trb = NULL; 2321 xx->xx_ntrb = 0; 2322 } 2323 pool_cache_put(sc->sc_xferpool, xx); 2324 } 2325 2326 static bool 2327 xhci_dying(struct usbd_bus *bus) 2328 { 2329 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2330 2331 return sc->sc_dying; 2332 } 2333 2334 static void 2335 xhci_get_lock(struct usbd_bus *bus, kmutex_t **lock) 2336 { 2337 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2338 2339 *lock = &sc->sc_lock; 2340 } 2341 2342 extern uint32_t usb_cookie_no; 2343 2344 /* 2345 * xHCI 4.3 2346 * Called when uhub_explore finds a new device (via usbd_new_device). 2347 * Port initialization and speed detection (4.3.1) are already done in uhub.c. 2348 * This function does: 2349 * Allocate and construct dev structure of default endpoint (ep0). 2350 * Allocate and open pipe of ep0. 2351 * Enable slot and initialize slot context. 2352 * Set Address. 2353 * Read initial device descriptor. 2354 * Determine initial MaxPacketSize (mps) by speed. 2355 * Read full device descriptor. 2356 * Register this device. 2357 * Finally state of device transitions ADDRESSED. 2358 */ 2359 static usbd_status 2360 xhci_new_device(device_t parent, struct usbd_bus *bus, int depth, 2361 int speed, int port, struct usbd_port *up) 2362 { 2363 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 2364 struct usbd_device *dev; 2365 usbd_status err; 2366 usb_device_descriptor_t *dd; 2367 struct xhci_slot *xs; 2368 uint32_t *cp; 2369 2370 XHCIHIST_FUNC(); 2371 XHCIHIST_CALLARGS("port %ju depth %ju speed %ju up %#jx", 2372 port, depth, speed, (uintptr_t)up); 2373 2374 dev = kmem_zalloc(sizeof(*dev), KM_SLEEP); 2375 dev->ud_bus = bus; 2376 dev->ud_quirks = &usbd_no_quirk; 2377 dev->ud_addr = 0; 2378 dev->ud_ddesc.bMaxPacketSize = 0; 2379 dev->ud_depth = depth; 2380 dev->ud_powersrc = up; 2381 dev->ud_myhub = up->up_parent; 2382 dev->ud_speed = speed; 2383 dev->ud_langid = USBD_NOLANG; 2384 dev->ud_cookie.cookie = ++usb_cookie_no; 2385 2386 /* Set up default endpoint handle. */ 2387 dev->ud_ep0.ue_edesc = &dev->ud_ep0desc; 2388 /* doesn't matter, just don't let it uninitialized */ 2389 dev->ud_ep0.ue_toggle = 0; 2390 2391 /* Set up default endpoint descriptor. */ 2392 dev->ud_ep0desc.bLength = USB_ENDPOINT_DESCRIPTOR_SIZE; 2393 dev->ud_ep0desc.bDescriptorType = UDESC_ENDPOINT; 2394 dev->ud_ep0desc.bEndpointAddress = USB_CONTROL_ENDPOINT; 2395 dev->ud_ep0desc.bmAttributes = UE_CONTROL; 2396 dev->ud_ep0desc.bInterval = 0; 2397 2398 /* 4.3, 4.8.2.1 */ 2399 switch (speed) { 2400 case USB_SPEED_SUPER: 2401 case USB_SPEED_SUPER_PLUS: 2402 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_3_MAX_CTRL_PACKET); 2403 break; 2404 case USB_SPEED_FULL: 2405 /* XXX using 64 as initial mps of ep0 in FS */ 2406 case USB_SPEED_HIGH: 2407 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_2_MAX_CTRL_PACKET); 2408 break; 2409 case USB_SPEED_LOW: 2410 default: 2411 USETW(dev->ud_ep0desc.wMaxPacketSize, USB_MAX_IPACKET); 2412 break; 2413 } 2414 2415 up->up_dev = dev; 2416 2417 dd = &dev->ud_ddesc; 2418 2419 if (depth == 0 && port == 0) { 2420 KASSERT(bus->ub_devices[USB_ROOTHUB_INDEX] == NULL); 2421 bus->ub_devices[USB_ROOTHUB_INDEX] = dev; 2422 2423 /* Establish the default pipe. */ 2424 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2425 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2426 if (err) { 2427 DPRINTFN(1, "setup default pipe failed %jd", err,0,0,0); 2428 goto bad; 2429 } 2430 err = usbd_get_initial_ddesc(dev, dd); 2431 if (err) { 2432 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2433 goto bad; 2434 } 2435 } else { 2436 uint8_t slot = 0; 2437 2438 /* 4.3.2 */ 2439 err = xhci_enable_slot(sc, &slot); 2440 if (err) { 2441 DPRINTFN(1, "enable slot %ju", err, 0, 0, 0); 2442 goto bad; 2443 } 2444 2445 xs = &sc->sc_slots[slot]; 2446 dev->ud_hcpriv = xs; 2447 2448 /* 4.3.3 initialize slot structure */ 2449 err = xhci_init_slot(dev, slot); 2450 if (err) { 2451 DPRINTFN(1, "init slot %ju", err, 0, 0, 0); 2452 dev->ud_hcpriv = NULL; 2453 /* 2454 * We have to disable_slot here because 2455 * xs->xs_idx == 0 when xhci_init_slot fails, 2456 * in that case usbd_remove_dev won't work. 2457 */ 2458 mutex_enter(&sc->sc_lock); 2459 xhci_disable_slot(sc, slot); 2460 mutex_exit(&sc->sc_lock); 2461 goto bad; 2462 } 2463 2464 /* 2465 * We have to establish the default pipe _after_ slot 2466 * structure has been prepared. 2467 */ 2468 err = usbd_setup_pipe(dev, 0, &dev->ud_ep0, 2469 USBD_DEFAULT_INTERVAL, &dev->ud_pipe0); 2470 if (err) { 2471 DPRINTFN(1, "setup default pipe failed %jd", err, 0, 0, 2472 0); 2473 goto bad; 2474 } 2475 2476 /* 4.3.4 Address Assignment */ 2477 err = xhci_set_address(dev, slot, false); 2478 if (err) { 2479 DPRINTFN(1, "failed! to set address: %ju", err, 0, 0, 0); 2480 goto bad; 2481 } 2482 2483 /* Allow device time to set new address */ 2484 usbd_delay_ms(dev, USB_SET_ADDRESS_SETTLE); 2485 2486 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 2487 cp = xhci_slot_get_dcv(sc, xs, XHCI_DCI_SLOT); 2488 HEXDUMP("slot context", cp, sc->sc_ctxsz); 2489 uint8_t addr = XHCI_SCTX_3_DEV_ADDR_GET(le32toh(cp[3])); 2490 DPRINTFN(4, "device address %ju", addr, 0, 0, 0); 2491 /* 2492 * XXX ensure we know when the hardware does something 2493 * we can't yet cope with 2494 */ 2495 KASSERTMSG(addr >= 1 && addr <= 127, "addr %d", addr); 2496 dev->ud_addr = addr; 2497 2498 KASSERTMSG(bus->ub_devices[usb_addr2dindex(dev->ud_addr)] == NULL, 2499 "addr %d already allocated", dev->ud_addr); 2500 /* 2501 * The root hub is given its own slot 2502 */ 2503 bus->ub_devices[usb_addr2dindex(dev->ud_addr)] = dev; 2504 2505 err = usbd_get_initial_ddesc(dev, dd); 2506 if (err) { 2507 DPRINTFN(1, "get_initial_ddesc %ju", err, 0, 0, 0); 2508 goto bad; 2509 } 2510 2511 /* 4.8.2.1 */ 2512 if (USB_IS_SS(speed)) { 2513 if (dd->bMaxPacketSize != 9) { 2514 printf("%s: invalid mps 2^%u for SS ep0," 2515 " using 512\n", 2516 device_xname(sc->sc_dev), 2517 dd->bMaxPacketSize); 2518 dd->bMaxPacketSize = 9; 2519 } 2520 USETW(dev->ud_ep0desc.wMaxPacketSize, 2521 (1 << dd->bMaxPacketSize)); 2522 } else 2523 USETW(dev->ud_ep0desc.wMaxPacketSize, 2524 dd->bMaxPacketSize); 2525 DPRINTFN(4, "bMaxPacketSize %ju", dd->bMaxPacketSize, 0, 0, 0); 2526 err = xhci_update_ep0_mps(sc, xs, 2527 UGETW(dev->ud_ep0desc.wMaxPacketSize)); 2528 if (err) { 2529 DPRINTFN(1, "update mps of ep0 %ju", err, 0, 0, 0); 2530 goto bad; 2531 } 2532 } 2533 2534 err = usbd_reload_device_desc(dev); 2535 if (err) { 2536 DPRINTFN(1, "reload desc %ju", err, 0, 0, 0); 2537 goto bad; 2538 } 2539 2540 DPRINTFN(1, "adding unit addr=%jd, rev=%02jx,", 2541 dev->ud_addr, UGETW(dd->bcdUSB), 0, 0); 2542 DPRINTFN(1, " class=%jd, subclass=%jd, protocol=%jd,", 2543 dd->bDeviceClass, dd->bDeviceSubClass, 2544 dd->bDeviceProtocol, 0); 2545 DPRINTFN(1, " mps=%jd, len=%jd, noconf=%jd, speed=%jd", 2546 dd->bMaxPacketSize, dd->bLength, dd->bNumConfigurations, 2547 dev->ud_speed); 2548 2549 usbd_get_device_strings(dev); 2550 2551 usbd_add_dev_event(USB_EVENT_DEVICE_ATTACH, dev); 2552 2553 if (depth == 0 && port == 0) { 2554 usbd_attach_roothub(parent, dev); 2555 DPRINTFN(1, "root hub %#jx", (uintptr_t)dev, 0, 0, 0); 2556 return USBD_NORMAL_COMPLETION; 2557 } 2558 2559 err = usbd_probe_and_attach(parent, dev, port, dev->ud_addr); 2560 bad: 2561 if (err != USBD_NORMAL_COMPLETION) { 2562 usbd_remove_device(dev, up); 2563 } 2564 2565 return err; 2566 } 2567 2568 static usbd_status 2569 xhci_ring_init(struct xhci_softc * const sc, struct xhci_ring **xrp, 2570 size_t ntrb, size_t align) 2571 { 2572 size_t size = ntrb * XHCI_TRB_SIZE; 2573 struct xhci_ring *xr; 2574 2575 XHCIHIST_FUNC(); 2576 XHCIHIST_CALLARGS("xr %#jx ntrb %#jx align %#jx", 2577 (uintptr_t)*xrp, ntrb, align, 0); 2578 2579 xr = kmem_zalloc(sizeof(struct xhci_ring), KM_SLEEP); 2580 DPRINTFN(1, "ring %#jx", (uintptr_t)xr, 0, 0, 0); 2581 2582 int err = usb_allocmem(&sc->sc_bus, size, align, 2583 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xr->xr_dma); 2584 if (err) { 2585 kmem_free(xr, sizeof(struct xhci_ring)); 2586 DPRINTFN(1, "alloc xr_dma failed %jd", err, 0, 0, 0); 2587 return err; 2588 } 2589 mutex_init(&xr->xr_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 2590 xr->xr_cookies = kmem_zalloc(sizeof(*xr->xr_cookies) * ntrb, KM_SLEEP); 2591 xr->xr_trb = xhci_ring_trbv(xr, 0); 2592 xr->xr_ntrb = ntrb; 2593 xr->is_halted = false; 2594 xhci_host_dequeue(xr); 2595 *xrp = xr; 2596 2597 return USBD_NORMAL_COMPLETION; 2598 } 2599 2600 static void 2601 xhci_ring_free(struct xhci_softc * const sc, struct xhci_ring ** const xr) 2602 { 2603 if (*xr == NULL) 2604 return; 2605 2606 usb_freemem(&sc->sc_bus, &(*xr)->xr_dma); 2607 mutex_destroy(&(*xr)->xr_lock); 2608 kmem_free((*xr)->xr_cookies, 2609 sizeof(*(*xr)->xr_cookies) * (*xr)->xr_ntrb); 2610 kmem_free(*xr, sizeof(struct xhci_ring)); 2611 *xr = NULL; 2612 } 2613 2614 static void 2615 xhci_ring_put(struct xhci_softc * const sc, struct xhci_ring * const xr, 2616 void *cookie, struct xhci_soft_trb * const trbs, size_t ntrbs) 2617 { 2618 size_t i; 2619 u_int ri; 2620 u_int cs; 2621 uint64_t parameter; 2622 uint32_t status; 2623 uint32_t control; 2624 2625 XHCIHIST_FUNC(); 2626 XHCIHIST_CALLARGS("%#jx xr_ep %#jx xr_cs %ju", 2627 (uintptr_t)xr, xr->xr_ep, xr->xr_cs, 0); 2628 2629 KASSERTMSG(ntrbs < xr->xr_ntrb, "ntrbs %zu, xr->xr_ntrb %u", 2630 ntrbs, xr->xr_ntrb); 2631 for (i = 0; i < ntrbs; i++) { 2632 DPRINTFN(12, "xr %#jx trbs %#jx num %ju", (uintptr_t)xr, 2633 (uintptr_t)trbs, i, 0); 2634 DPRINTFN(12, " 0x%016jx 0x%08jx 0x%08jx", 2635 trbs[i].trb_0, trbs[i].trb_2, trbs[i].trb_3, 0); 2636 KASSERTMSG(XHCI_TRB_3_TYPE_GET(trbs[i].trb_3) != 2637 XHCI_TRB_TYPE_LINK, "trbs[%zu].trb3 %#x", i, trbs[i].trb_3); 2638 } 2639 2640 ri = xr->xr_ep; 2641 cs = xr->xr_cs; 2642 2643 /* 2644 * Although the xhci hardware can do scatter/gather dma from 2645 * arbitrary sized buffers, there is a non-obvious restriction 2646 * that a LINK trb is only allowed at the end of a burst of 2647 * transfers - which might be 16kB. 2648 * Arbitrary aligned LINK trb definitely fail on Ivy bridge. 2649 * The simple solution is not to allow a LINK trb in the middle 2650 * of anything - as here. 2651 * XXX: (dsl) There are xhci controllers out there (eg some made by 2652 * ASMedia) that seem to lock up if they process a LINK trb but 2653 * cannot process the linked-to trb yet. 2654 * The code should write the 'cycle' bit on the link trb AFTER 2655 * adding the other trb. 2656 */ 2657 u_int firstep = xr->xr_ep; 2658 u_int firstcs = xr->xr_cs; 2659 2660 for (i = 0; i < ntrbs; ) { 2661 u_int oldri = ri; 2662 u_int oldcs = cs; 2663 2664 if (ri >= (xr->xr_ntrb - 1)) { 2665 /* Put Link TD at the end of ring */ 2666 parameter = xhci_ring_trbp(xr, 0); 2667 status = 0; 2668 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_LINK) | 2669 XHCI_TRB_3_TC_BIT; 2670 xr->xr_cookies[ri] = NULL; 2671 xr->xr_ep = 0; 2672 xr->xr_cs ^= 1; 2673 ri = xr->xr_ep; 2674 cs = xr->xr_cs; 2675 } else { 2676 parameter = trbs[i].trb_0; 2677 status = trbs[i].trb_2; 2678 control = trbs[i].trb_3; 2679 2680 xr->xr_cookies[ri] = cookie; 2681 ri++; 2682 i++; 2683 } 2684 /* 2685 * If this is a first TRB, mark it invalid to prevent 2686 * xHC from running it immediately. 2687 */ 2688 if (oldri == firstep) { 2689 if (oldcs) { 2690 control &= ~XHCI_TRB_3_CYCLE_BIT; 2691 } else { 2692 control |= XHCI_TRB_3_CYCLE_BIT; 2693 } 2694 } else { 2695 if (oldcs) { 2696 control |= XHCI_TRB_3_CYCLE_BIT; 2697 } else { 2698 control &= ~XHCI_TRB_3_CYCLE_BIT; 2699 } 2700 } 2701 xhci_trb_put(&xr->xr_trb[oldri], parameter, status, control); 2702 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * oldri, 2703 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 2704 } 2705 2706 /* Now invert cycle bit of first TRB */ 2707 if (firstcs) { 2708 xr->xr_trb[firstep].trb_3 |= htole32(XHCI_TRB_3_CYCLE_BIT); 2709 } else { 2710 xr->xr_trb[firstep].trb_3 &= ~htole32(XHCI_TRB_3_CYCLE_BIT); 2711 } 2712 usb_syncmem(&xr->xr_dma, XHCI_TRB_SIZE * firstep, 2713 XHCI_TRB_SIZE * 1, BUS_DMASYNC_PREWRITE); 2714 2715 xr->xr_ep = ri; 2716 xr->xr_cs = cs; 2717 2718 DPRINTFN(12, "%#jx xr_ep %#jx xr_cs %ju", (uintptr_t)xr, xr->xr_ep, 2719 xr->xr_cs, 0); 2720 } 2721 2722 static inline void 2723 xhci_ring_put_xfer(struct xhci_softc * const sc, struct xhci_ring * const tr, 2724 struct xhci_xfer *xx, u_int ntrb) 2725 { 2726 KASSERT(ntrb <= xx->xx_ntrb); 2727 xhci_ring_put(sc, tr, xx, xx->xx_trb, ntrb); 2728 } 2729 2730 /* 2731 * Stop execution commands, purge all commands on command ring, and 2732 * rewind dequeue pointer. 2733 */ 2734 static void 2735 xhci_abort_command(struct xhci_softc *sc) 2736 { 2737 struct xhci_ring * const cr = sc->sc_cr; 2738 uint64_t crcr; 2739 int i; 2740 2741 XHCIHIST_FUNC(); 2742 XHCIHIST_CALLARGS("command %#jx timeout, aborting", 2743 sc->sc_command_addr, 0, 0, 0); 2744 2745 mutex_enter(&cr->xr_lock); 2746 2747 /* 4.6.1.2 Aborting a Command */ 2748 crcr = xhci_op_read_8(sc, XHCI_CRCR); 2749 xhci_op_write_8(sc, XHCI_CRCR, crcr | XHCI_CRCR_LO_CA); 2750 2751 for (i = 0; i < 500; i++) { 2752 crcr = xhci_op_read_8(sc, XHCI_CRCR); 2753 if ((crcr & XHCI_CRCR_LO_CRR) == 0) 2754 break; 2755 usb_delay_ms(&sc->sc_bus, 1); 2756 } 2757 if ((crcr & XHCI_CRCR_LO_CRR) != 0) { 2758 DPRINTFN(1, "Command Abort timeout", 0, 0, 0, 0); 2759 /* reset HC here? */ 2760 } 2761 2762 /* reset command ring dequeue pointer */ 2763 cr->xr_ep = 0; 2764 cr->xr_cs = 1; 2765 xhci_op_write_8(sc, XHCI_CRCR, xhci_ring_trbp(cr, 0) | cr->xr_cs); 2766 2767 mutex_exit(&cr->xr_lock); 2768 } 2769 2770 /* 2771 * Put a command on command ring, ring bell, set timer, and cv_timedwait. 2772 * Command completion is notified by cv_signal from xhci_event_cmd() 2773 * (called from xhci_softint), or timed-out. 2774 * The completion code is copied to sc->sc_result_trb in xhci_event_cmd(), 2775 * then do_command examines it. 2776 */ 2777 static usbd_status 2778 xhci_do_command_locked(struct xhci_softc * const sc, 2779 struct xhci_soft_trb * const trb, int timeout) 2780 { 2781 struct xhci_ring * const cr = sc->sc_cr; 2782 usbd_status err; 2783 2784 XHCIHIST_FUNC(); 2785 XHCIHIST_CALLARGS("input: 0x%016jx 0x%08jx 0x%08jx", 2786 trb->trb_0, trb->trb_2, trb->trb_3, 0); 2787 2788 KASSERTMSG(!cpu_intr_p() && !cpu_softintr_p(), "called from intr ctx"); 2789 KASSERT(mutex_owned(&sc->sc_lock)); 2790 2791 while (sc->sc_command_addr != 0) 2792 cv_wait(&sc->sc_cmdbusy_cv, &sc->sc_lock); 2793 2794 /* 2795 * If enqueue pointer points at last of ring, it's Link TRB, 2796 * command TRB will be stored in 0th TRB. 2797 */ 2798 if (cr->xr_ep == cr->xr_ntrb - 1) 2799 sc->sc_command_addr = xhci_ring_trbp(cr, 0); 2800 else 2801 sc->sc_command_addr = xhci_ring_trbp(cr, cr->xr_ep); 2802 2803 sc->sc_resultpending = true; 2804 2805 mutex_enter(&cr->xr_lock); 2806 xhci_ring_put(sc, cr, NULL, trb, 1); 2807 mutex_exit(&cr->xr_lock); 2808 2809 xhci_db_write_4(sc, XHCI_DOORBELL(0), 0); 2810 2811 while (sc->sc_resultpending) { 2812 if (cv_timedwait(&sc->sc_command_cv, &sc->sc_lock, 2813 MAX(1, mstohz(timeout))) == EWOULDBLOCK) { 2814 xhci_abort_command(sc); 2815 err = USBD_TIMEOUT; 2816 goto timedout; 2817 } 2818 } 2819 2820 trb->trb_0 = sc->sc_result_trb.trb_0; 2821 trb->trb_2 = sc->sc_result_trb.trb_2; 2822 trb->trb_3 = sc->sc_result_trb.trb_3; 2823 2824 DPRINTFN(12, "output: 0x%016jx 0x%08jx 0x%08jx", 2825 trb->trb_0, trb->trb_2, trb->trb_3, 0); 2826 2827 switch (XHCI_TRB_2_ERROR_GET(trb->trb_2)) { 2828 case XHCI_TRB_ERROR_SUCCESS: 2829 err = USBD_NORMAL_COMPLETION; 2830 break; 2831 default: 2832 case 192 ... 223: 2833 DPRINTFN(5, "error %#jx", 2834 XHCI_TRB_2_ERROR_GET(trb->trb_2), 0, 0, 0); 2835 err = USBD_IOERROR; 2836 break; 2837 case 224 ... 255: 2838 err = USBD_NORMAL_COMPLETION; 2839 break; 2840 } 2841 2842 timedout: 2843 sc->sc_resultpending = false; 2844 sc->sc_command_addr = 0; 2845 cv_broadcast(&sc->sc_cmdbusy_cv); 2846 2847 return err; 2848 } 2849 2850 static usbd_status 2851 xhci_do_command(struct xhci_softc * const sc, struct xhci_soft_trb * const trb, 2852 int timeout) 2853 { 2854 2855 mutex_enter(&sc->sc_lock); 2856 usbd_status ret = xhci_do_command_locked(sc, trb, timeout); 2857 mutex_exit(&sc->sc_lock); 2858 2859 return ret; 2860 } 2861 2862 static usbd_status 2863 xhci_enable_slot(struct xhci_softc * const sc, uint8_t * const slotp) 2864 { 2865 struct xhci_soft_trb trb; 2866 usbd_status err; 2867 2868 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2869 2870 trb.trb_0 = 0; 2871 trb.trb_2 = 0; 2872 trb.trb_3 = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ENABLE_SLOT); 2873 2874 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 2875 if (err != USBD_NORMAL_COMPLETION) { 2876 return err; 2877 } 2878 2879 *slotp = XHCI_TRB_3_SLOT_GET(trb.trb_3); 2880 2881 return err; 2882 } 2883 2884 /* 2885 * xHCI 4.6.4 2886 * Deallocate ring and device/input context DMA buffers, and disable_slot. 2887 * All endpoints in the slot should be stopped. 2888 * Should be called with sc_lock held. 2889 */ 2890 static usbd_status 2891 xhci_disable_slot(struct xhci_softc * const sc, uint8_t slot) 2892 { 2893 struct xhci_soft_trb trb; 2894 struct xhci_slot *xs; 2895 usbd_status err; 2896 2897 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 2898 2899 if (sc->sc_dying) 2900 return USBD_IOERROR; 2901 2902 trb.trb_0 = 0; 2903 trb.trb_2 = 0; 2904 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot) | 2905 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DISABLE_SLOT); 2906 2907 err = xhci_do_command_locked(sc, &trb, USBD_DEFAULT_TIMEOUT); 2908 2909 if (!err) { 2910 xs = &sc->sc_slots[slot]; 2911 if (xs->xs_idx != 0) { 2912 xhci_free_slot(sc, xs); 2913 xhci_set_dcba(sc, 0, slot); 2914 memset(xs, 0, sizeof(*xs)); 2915 } 2916 } 2917 2918 return err; 2919 } 2920 2921 /* 2922 * Set address of device and transition slot state from ENABLED to ADDRESSED 2923 * if Block Setaddress Request (BSR) is false. 2924 * If BSR==true, transition slot state from ENABLED to DEFAULT. 2925 * see xHCI 1.1 4.5.3, 3.3.4 2926 * Should be called without sc_lock held. 2927 */ 2928 static usbd_status 2929 xhci_address_device(struct xhci_softc * const sc, 2930 uint64_t icp, uint8_t slot_id, bool bsr) 2931 { 2932 struct xhci_soft_trb trb; 2933 usbd_status err; 2934 2935 XHCIHIST_FUNC(); 2936 if (bsr) { 2937 XHCIHIST_CALLARGS("icp %#jx slot %#jx with bsr", 2938 icp, slot_id, 0, 0); 2939 } else { 2940 XHCIHIST_CALLARGS("icp %#jx slot %#jx nobsr", 2941 icp, slot_id, 0, 0); 2942 } 2943 2944 trb.trb_0 = icp; 2945 trb.trb_2 = 0; 2946 trb.trb_3 = XHCI_TRB_3_SLOT_SET(slot_id) | 2947 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ADDRESS_DEVICE) | 2948 (bsr ? XHCI_TRB_3_BSR_BIT : 0); 2949 2950 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 2951 2952 if (XHCI_TRB_2_ERROR_GET(trb.trb_2) == XHCI_TRB_ERROR_NO_SLOTS) 2953 err = USBD_NO_ADDR; 2954 2955 return err; 2956 } 2957 2958 static usbd_status 2959 xhci_update_ep0_mps(struct xhci_softc * const sc, 2960 struct xhci_slot * const xs, u_int mps) 2961 { 2962 struct xhci_soft_trb trb; 2963 usbd_status err; 2964 uint32_t * cp; 2965 2966 XHCIHIST_FUNC(); 2967 XHCIHIST_CALLARGS("slot %ju mps %ju", xs->xs_idx, mps, 0, 0); 2968 2969 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 2970 cp[0] = htole32(0); 2971 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_EP_CONTROL)); 2972 2973 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_EP_CONTROL)); 2974 cp[1] = htole32(XHCI_EPCTX_1_MAXP_SIZE_SET(mps)); 2975 2976 /* sync input contexts before they are read from memory */ 2977 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 2978 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 2979 sc->sc_ctxsz * 4); 2980 2981 trb.trb_0 = xhci_slot_get_icp(sc, xs, 0); 2982 trb.trb_2 = 0; 2983 trb.trb_3 = XHCI_TRB_3_SLOT_SET(xs->xs_idx) | 2984 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_EVALUATE_CTX); 2985 2986 err = xhci_do_command(sc, &trb, USBD_DEFAULT_TIMEOUT); 2987 return err; 2988 } 2989 2990 static void 2991 xhci_set_dcba(struct xhci_softc * const sc, uint64_t dcba, int si) 2992 { 2993 uint64_t * const dcbaa = KERNADDR(&sc->sc_dcbaa_dma, 0); 2994 2995 XHCIHIST_FUNC(); 2996 XHCIHIST_CALLARGS("dcbaa %#jx dc 0x%016jx slot %jd", 2997 (uintptr_t)&dcbaa[si], dcba, si, 0); 2998 2999 dcbaa[si] = htole64(dcba); 3000 usb_syncmem(&sc->sc_dcbaa_dma, si * sizeof(uint64_t), sizeof(uint64_t), 3001 BUS_DMASYNC_PREWRITE); 3002 } 3003 3004 /* 3005 * Allocate device and input context DMA buffer, and 3006 * TRB DMA buffer for each endpoint. 3007 */ 3008 static usbd_status 3009 xhci_init_slot(struct usbd_device *dev, uint32_t slot) 3010 { 3011 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3012 struct xhci_slot *xs; 3013 3014 XHCIHIST_FUNC(); 3015 XHCIHIST_CALLARGS("slot %ju", slot, 0, 0, 0); 3016 3017 xs = &sc->sc_slots[slot]; 3018 3019 /* allocate contexts */ 3020 int err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz, 3021 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_dc_dma); 3022 if (err) { 3023 DPRINTFN(1, "failed to allocmem output device context %jd", 3024 err, 0, 0, 0); 3025 return USBD_NOMEM; 3026 } 3027 3028 err = usb_allocmem(&sc->sc_bus, sc->sc_pgsz, sc->sc_pgsz, 3029 USBMALLOC_COHERENT | USBMALLOC_ZERO, &xs->xs_ic_dma); 3030 if (err) { 3031 DPRINTFN(1, "failed to allocmem input device context %jd", 3032 err, 0, 0, 0); 3033 return USBD_NOMEM; 3034 } 3035 3036 memset(&xs->xs_xr[0], 0, sizeof(xs->xs_xr)); 3037 xs->xs_idx = slot; 3038 3039 return USBD_NORMAL_COMPLETION; 3040 3041 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma); 3042 xs->xs_idx = 0; 3043 return USBD_NOMEM; 3044 } 3045 3046 static void 3047 xhci_free_slot(struct xhci_softc *sc, struct xhci_slot *xs) 3048 { 3049 u_int dci; 3050 3051 XHCIHIST_FUNC(); 3052 XHCIHIST_CALLARGS("slot %ju", xs->xs_idx, 0, 0, 0); 3053 3054 /* deallocate all allocated rings in the slot */ 3055 for (dci = XHCI_DCI_SLOT; dci <= XHCI_MAX_DCI; dci++) { 3056 if (xs->xs_xr[dci] != NULL) 3057 xhci_ring_free(sc, &xs->xs_xr[dci]); 3058 } 3059 usb_freemem(&sc->sc_bus, &xs->xs_ic_dma); 3060 usb_freemem(&sc->sc_bus, &xs->xs_dc_dma); 3061 xs->xs_idx = 0; 3062 } 3063 3064 /* 3065 * Setup slot context, set Device Context Base Address, and issue 3066 * Set Address Device command. 3067 */ 3068 static usbd_status 3069 xhci_set_address(struct usbd_device *dev, uint32_t slot, bool bsr) 3070 { 3071 struct xhci_softc * const sc = XHCI_BUS2SC(dev->ud_bus); 3072 struct xhci_slot *xs; 3073 usbd_status err; 3074 3075 XHCIHIST_FUNC(); 3076 XHCIHIST_CALLARGS("slot %ju bsr %ju", slot, bsr, 0, 0); 3077 3078 xs = &sc->sc_slots[slot]; 3079 3080 xhci_setup_ctx(dev->ud_pipe0); 3081 3082 HEXDUMP("input context", xhci_slot_get_icv(sc, xs, 0), 3083 sc->sc_ctxsz * 3); 3084 3085 xhci_set_dcba(sc, DMAADDR(&xs->xs_dc_dma, 0), slot); 3086 3087 err = xhci_address_device(sc, xhci_slot_get_icp(sc, xs, 0), slot, bsr); 3088 3089 usb_syncmem(&xs->xs_dc_dma, 0, sc->sc_pgsz, BUS_DMASYNC_POSTREAD); 3090 HEXDUMP("output context", xhci_slot_get_dcv(sc, xs, 0), 3091 sc->sc_ctxsz * 2); 3092 3093 return err; 3094 } 3095 3096 /* 3097 * 4.8.2, 6.2.3.2 3098 * construct slot/endpoint context parameters and do syncmem 3099 */ 3100 static void 3101 xhci_setup_ctx(struct usbd_pipe *pipe) 3102 { 3103 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3104 struct usbd_device *dev = pipe->up_dev; 3105 struct xhci_slot * const xs = dev->ud_hcpriv; 3106 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3107 const u_int dci = xhci_ep_get_dci(ed); 3108 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3109 uint32_t *cp; 3110 uint16_t mps = UGETW(ed->wMaxPacketSize); 3111 uint8_t speed = dev->ud_speed; 3112 uint8_t ival = ed->bInterval; 3113 3114 XHCIHIST_FUNC(); 3115 XHCIHIST_CALLARGS("pipe %#jx: slot %ju dci %ju speed %ju", 3116 (uintptr_t)pipe, xs->xs_idx, dci, speed); 3117 3118 /* set up initial input control context */ 3119 cp = xhci_slot_get_icv(sc, xs, XHCI_ICI_INPUT_CONTROL); 3120 cp[0] = htole32(0); 3121 cp[1] = htole32(XHCI_INCTX_1_ADD_MASK(dci)); 3122 cp[1] |= htole32(XHCI_INCTX_1_ADD_MASK(XHCI_DCI_SLOT)); 3123 cp[7] = htole32(0); 3124 3125 /* set up input slot context */ 3126 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(XHCI_DCI_SLOT)); 3127 cp[0] = 3128 XHCI_SCTX_0_CTX_NUM_SET(dci) | 3129 XHCI_SCTX_0_SPEED_SET(xhci_speed2xspeed(speed)); 3130 cp[1] = 0; 3131 cp[2] = XHCI_SCTX_2_IRQ_TARGET_SET(0); 3132 cp[3] = 0; 3133 xhci_setup_route(pipe, cp); 3134 xhci_setup_tthub(pipe, cp); 3135 3136 cp[0] = htole32(cp[0]); 3137 cp[1] = htole32(cp[1]); 3138 cp[2] = htole32(cp[2]); 3139 cp[3] = htole32(cp[3]); 3140 3141 /* set up input endpoint context */ 3142 cp = xhci_slot_get_icv(sc, xs, xhci_dci_to_ici(dci)); 3143 cp[0] = 3144 XHCI_EPCTX_0_EPSTATE_SET(0) | 3145 XHCI_EPCTX_0_MULT_SET(0) | 3146 XHCI_EPCTX_0_MAXP_STREAMS_SET(0) | 3147 XHCI_EPCTX_0_LSA_SET(0) | 3148 XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(0); 3149 cp[1] = 3150 XHCI_EPCTX_1_EPTYPE_SET(xhci_ep_get_type(ed)) | 3151 XHCI_EPCTX_1_HID_SET(0) | 3152 XHCI_EPCTX_1_MAXB_SET(0); 3153 3154 if (xfertype != UE_ISOCHRONOUS) 3155 cp[1] |= XHCI_EPCTX_1_CERR_SET(3); 3156 3157 if (xfertype == UE_CONTROL) 3158 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(8); /* 6.2.3 */ 3159 else if (USB_IS_SS(speed)) 3160 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(mps); 3161 else 3162 cp[4] = XHCI_EPCTX_4_AVG_TRB_LEN_SET(UE_GET_SIZE(mps)); 3163 3164 xhci_setup_maxburst(pipe, cp); 3165 3166 switch (xfertype) { 3167 case UE_CONTROL: 3168 break; 3169 case UE_BULK: 3170 /* XXX Set MaxPStreams, HID, and LSA if streams enabled */ 3171 break; 3172 case UE_INTERRUPT: 3173 if (pipe->up_interval != USBD_DEFAULT_INTERVAL) 3174 ival = pipe->up_interval; 3175 3176 ival = xhci_bival2ival(ival, speed); 3177 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival); 3178 break; 3179 case UE_ISOCHRONOUS: 3180 if (pipe->up_interval != USBD_DEFAULT_INTERVAL) 3181 ival = pipe->up_interval; 3182 3183 /* xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 */ 3184 if (speed == USB_SPEED_FULL) 3185 ival += 3; /* 1ms -> 125us */ 3186 ival--; 3187 cp[0] |= XHCI_EPCTX_0_IVAL_SET(ival); 3188 break; 3189 default: 3190 break; 3191 } 3192 DPRINTFN(4, "setting ival %ju MaxBurst %#jx", 3193 XHCI_EPCTX_0_IVAL_GET(cp[0]), XHCI_EPCTX_1_MAXB_GET(cp[1]), 0, 0); 3194 3195 /* rewind TR dequeue pointer in xHC */ 3196 /* can't use xhci_ep_get_dci() yet? */ 3197 *(uint64_t *)(&cp[2]) = htole64( 3198 xhci_ring_trbp(xs->xs_xr[dci], 0) | 3199 XHCI_EPCTX_2_DCS_SET(1)); 3200 3201 cp[0] = htole32(cp[0]); 3202 cp[1] = htole32(cp[1]); 3203 cp[4] = htole32(cp[4]); 3204 3205 /* rewind TR dequeue pointer in driver */ 3206 struct xhci_ring *xr = xs->xs_xr[dci]; 3207 mutex_enter(&xr->xr_lock); 3208 xhci_host_dequeue(xr); 3209 mutex_exit(&xr->xr_lock); 3210 3211 /* sync input contexts before they are read from memory */ 3212 usb_syncmem(&xs->xs_ic_dma, 0, sc->sc_pgsz, BUS_DMASYNC_PREWRITE); 3213 } 3214 3215 /* 3216 * Setup route string and roothub port of given device for slot context 3217 */ 3218 static void 3219 xhci_setup_route(struct usbd_pipe *pipe, uint32_t *cp) 3220 { 3221 struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 3222 struct usbd_device *dev = pipe->up_dev; 3223 struct usbd_port *up = dev->ud_powersrc; 3224 struct usbd_device *hub; 3225 struct usbd_device *adev; 3226 uint8_t rhport = 0; 3227 uint32_t route = 0; 3228 3229 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3230 3231 /* Locate root hub port and Determine route string */ 3232 /* 4.3.3 route string does not include roothub port */ 3233 for (hub = dev; hub != NULL; hub = hub->ud_myhub) { 3234 uint32_t dep; 3235 3236 DPRINTFN(4, "hub %#jx depth %jd upport %#jx upportno %jd", 3237 (uintptr_t)hub, hub->ud_depth, (uintptr_t)hub->ud_powersrc, 3238 hub->ud_powersrc ? (uintptr_t)hub->ud_powersrc->up_portno : 3239 -1); 3240 3241 if (hub->ud_powersrc == NULL) 3242 break; 3243 dep = hub->ud_depth; 3244 if (dep == 0) 3245 break; 3246 rhport = hub->ud_powersrc->up_portno; 3247 if (dep > USB_HUB_MAX_DEPTH) 3248 continue; 3249 3250 route |= 3251 (rhport > UHD_SS_NPORTS_MAX ? UHD_SS_NPORTS_MAX : rhport) 3252 << ((dep - 1) * 4); 3253 } 3254 route = route >> 4; 3255 size_t bn = hub == sc->sc_bus.ub_roothub ? 0 : 1; 3256 3257 /* Locate port on upstream high speed hub */ 3258 for (adev = dev, hub = up->up_parent; 3259 hub != NULL && hub->ud_speed != USB_SPEED_HIGH; 3260 adev = hub, hub = hub->ud_myhub) 3261 ; 3262 if (hub) { 3263 int p; 3264 for (p = 1; p <= hub->ud_hub->uh_hubdesc.bNbrPorts; p++) { 3265 if (hub->ud_hub->uh_ports[p - 1].up_dev == adev) { 3266 dev->ud_myhsport = &hub->ud_hub->uh_ports[p - 1]; 3267 goto found; 3268 } 3269 } 3270 panic("%s: cannot find HS port", __func__); 3271 found: 3272 DPRINTFN(4, "high speed port %jd", p, 0, 0, 0); 3273 } else { 3274 dev->ud_myhsport = NULL; 3275 } 3276 3277 const size_t ctlrport = xhci_rhport2ctlrport(sc, bn, rhport); 3278 3279 DPRINTFN(4, "rhport %ju ctlrport %ju Route %05jx hub %#jx", rhport, 3280 ctlrport, route, (uintptr_t)hub); 3281 3282 cp[0] |= XHCI_SCTX_0_ROUTE_SET(route); 3283 cp[1] |= XHCI_SCTX_1_RH_PORT_SET(ctlrport); 3284 } 3285 3286 /* 3287 * Setup whether device is hub, whether device uses MTT, and 3288 * TT informations if it uses MTT. 3289 */ 3290 static void 3291 xhci_setup_tthub(struct usbd_pipe *pipe, uint32_t *cp) 3292 { 3293 struct usbd_device *dev = pipe->up_dev; 3294 struct usbd_port *myhsport = dev->ud_myhsport; 3295 usb_device_descriptor_t * const dd = &dev->ud_ddesc; 3296 uint32_t speed = dev->ud_speed; 3297 uint8_t rhaddr = dev->ud_bus->ub_rhaddr; 3298 uint8_t tthubslot, ttportnum; 3299 bool ishub; 3300 bool usemtt; 3301 3302 XHCIHIST_FUNC(); 3303 3304 /* 3305 * 6.2.2, Table 57-60, 6.2.2.1, 6.2.2.2 3306 * tthubslot: 3307 * This is the slot ID of parent HS hub 3308 * if LS/FS device is connected && connected through HS hub. 3309 * This is 0 if device is not LS/FS device || 3310 * parent hub is not HS hub || 3311 * attached to root hub. 3312 * ttportnum: 3313 * This is the downstream facing port of parent HS hub 3314 * if LS/FS device is connected. 3315 * This is 0 if device is not LS/FS device || 3316 * parent hub is not HS hub || 3317 * attached to root hub. 3318 */ 3319 if (myhsport && 3320 myhsport->up_parent->ud_addr != rhaddr && 3321 (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL)) { 3322 ttportnum = myhsport->up_portno; 3323 tthubslot = myhsport->up_parent->ud_addr; 3324 } else { 3325 ttportnum = 0; 3326 tthubslot = 0; 3327 } 3328 XHCIHIST_CALLARGS("myhsport %#jx ttportnum=%jd tthubslot=%jd", 3329 (uintptr_t)myhsport, ttportnum, tthubslot, 0); 3330 3331 /* ishub is valid after reading UDESC_DEVICE */ 3332 ishub = (dd->bDeviceClass == UDCLASS_HUB); 3333 3334 /* dev->ud_hub is valid after reading UDESC_HUB */ 3335 if (ishub && dev->ud_hub) { 3336 usb_hub_descriptor_t *hd = &dev->ud_hub->uh_hubdesc; 3337 uint8_t ttt = 3338 __SHIFTOUT(UGETW(hd->wHubCharacteristics), UHD_TT_THINK); 3339 3340 cp[1] |= XHCI_SCTX_1_NUM_PORTS_SET(hd->bNbrPorts); 3341 cp[2] |= XHCI_SCTX_2_TT_THINK_TIME_SET(ttt); 3342 DPRINTFN(4, "nports=%jd ttt=%jd", hd->bNbrPorts, ttt, 0, 0); 3343 } 3344 3345 #define IS_MTTHUB(dd) \ 3346 ((dd)->bDeviceProtocol == UDPROTO_HSHUBMTT) 3347 3348 /* 3349 * MTT flag is set if 3350 * 1. this is HS hub && MTTs are supported and enabled; or 3351 * 2. this is LS or FS device && there is a parent HS hub where MTTs 3352 * are supported and enabled. 3353 * 3354 * XXX enabled is not tested yet 3355 */ 3356 if (ishub && speed == USB_SPEED_HIGH && IS_MTTHUB(dd)) 3357 usemtt = true; 3358 else if ((speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) && 3359 myhsport && 3360 myhsport->up_parent->ud_addr != rhaddr && 3361 IS_MTTHUB(&myhsport->up_parent->ud_ddesc)) 3362 usemtt = true; 3363 else 3364 usemtt = false; 3365 DPRINTFN(4, "class %ju proto %ju ishub %jd usemtt %jd", 3366 dd->bDeviceClass, dd->bDeviceProtocol, ishub, usemtt); 3367 3368 #undef IS_MTTHUB 3369 3370 cp[0] |= 3371 XHCI_SCTX_0_HUB_SET(ishub ? 1 : 0) | 3372 XHCI_SCTX_0_MTT_SET(usemtt ? 1 : 0); 3373 cp[2] |= 3374 XHCI_SCTX_2_TT_HUB_SID_SET(tthubslot) | 3375 XHCI_SCTX_2_TT_PORT_NUM_SET(ttportnum); 3376 } 3377 3378 /* set up params for periodic endpoint */ 3379 static void 3380 xhci_setup_maxburst(struct usbd_pipe *pipe, uint32_t *cp) 3381 { 3382 struct xhci_pipe * const xpipe = (struct xhci_pipe *)pipe; 3383 struct usbd_device *dev = pipe->up_dev; 3384 usb_endpoint_descriptor_t * const ed = pipe->up_endpoint->ue_edesc; 3385 const uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 3386 usbd_desc_iter_t iter; 3387 const usb_cdc_descriptor_t *cdcd; 3388 uint32_t maxb = 0; 3389 uint16_t mps = UGETW(ed->wMaxPacketSize); 3390 uint8_t speed = dev->ud_speed; 3391 uint8_t mult = 0; 3392 uint8_t ep; 3393 3394 /* config desc is NULL when opening ep0 */ 3395 if (dev == NULL || dev->ud_cdesc == NULL) 3396 goto no_cdcd; 3397 cdcd = (const usb_cdc_descriptor_t *)usb_find_desc(dev, 3398 UDESC_INTERFACE, USBD_CDCSUBTYPE_ANY); 3399 if (cdcd == NULL) 3400 goto no_cdcd; 3401 usb_desc_iter_init(dev, &iter); 3402 iter.cur = (const void *)cdcd; 3403 3404 /* find endpoint_ss_comp desc for ep of this pipe */ 3405 for (ep = 0;;) { 3406 cdcd = (const usb_cdc_descriptor_t *)usb_desc_iter_next(&iter); 3407 if (cdcd == NULL) 3408 break; 3409 if (ep == 0 && cdcd->bDescriptorType == UDESC_ENDPOINT) { 3410 ep = ((const usb_endpoint_descriptor_t *)cdcd)-> 3411 bEndpointAddress; 3412 if (UE_GET_ADDR(ep) == 3413 UE_GET_ADDR(ed->bEndpointAddress)) { 3414 cdcd = (const usb_cdc_descriptor_t *) 3415 usb_desc_iter_next(&iter); 3416 break; 3417 } 3418 ep = 0; 3419 } 3420 } 3421 if (cdcd != NULL && cdcd->bDescriptorType == UDESC_ENDPOINT_SS_COMP) { 3422 const usb_endpoint_ss_comp_descriptor_t * esscd = 3423 (const usb_endpoint_ss_comp_descriptor_t *)cdcd; 3424 maxb = esscd->bMaxBurst; 3425 mult = UE_GET_SS_ISO_MULT(esscd->bmAttributes); 3426 } 3427 3428 no_cdcd: 3429 /* 6.2.3.4, 4.8.2.4 */ 3430 if (USB_IS_SS(speed)) { 3431 /* USB 3.1 9.6.6 */ 3432 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(mps); 3433 /* USB 3.1 9.6.7 */ 3434 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb); 3435 #ifdef notyet 3436 if (xfertype == UE_ISOCHRONOUS) { 3437 } 3438 if (XHCI_HCC2_LEC(sc->sc_hcc2) != 0) { 3439 /* use ESIT */ 3440 cp[4] |= XHCI_EPCTX_4_MAX_ESIT_PAYLOAD_SET(x); 3441 cp[0] |= XHCI_EPCTX_0_MAX_ESIT_PAYLOAD_HI_SET(x); 3442 3443 /* XXX if LEC = 1, set ESIT instead */ 3444 cp[0] |= XHCI_EPCTX_0_MULT_SET(0); 3445 } else { 3446 /* use ival */ 3447 } 3448 #endif 3449 } else { 3450 /* USB 2.0 9.6.6 */ 3451 cp[1] |= XHCI_EPCTX_1_MAXP_SIZE_SET(UE_GET_SIZE(mps)); 3452 3453 /* 6.2.3.4 */ 3454 if (speed == USB_SPEED_HIGH && 3455 (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)) { 3456 maxb = UE_GET_TRANS(mps); 3457 } else { 3458 /* LS/FS or HS CTRL or HS BULK */ 3459 maxb = 0; 3460 } 3461 cp[1] |= XHCI_EPCTX_1_MAXB_SET(maxb); 3462 } 3463 xpipe->xp_maxb = maxb + 1; 3464 xpipe->xp_mult = mult + 1; 3465 } 3466 3467 /* 3468 * Convert endpoint bInterval value to endpoint context interval value 3469 * for Interrupt pipe. 3470 * xHCI 6.2.3.6 Table 65, USB 2.0 9.6.6 3471 */ 3472 static uint32_t 3473 xhci_bival2ival(uint32_t ival, uint32_t speed) 3474 { 3475 if (speed == USB_SPEED_LOW || speed == USB_SPEED_FULL) { 3476 int i; 3477 3478 /* 3479 * round ival down to "the nearest base 2 multiple of 3480 * bInterval * 8". 3481 * bInterval is at most 255 as its type is uByte. 3482 * 255(ms) = 2040(x 125us) < 2^11, so start with 10. 3483 */ 3484 for (i = 10; i > 0; i--) { 3485 if ((ival * 8) >= (1 << i)) 3486 break; 3487 } 3488 ival = i; 3489 } else { 3490 /* Interval = bInterval-1 for SS/HS */ 3491 ival--; 3492 } 3493 3494 return ival; 3495 } 3496 3497 /* ----- */ 3498 3499 static void 3500 xhci_noop(struct usbd_pipe *pipe) 3501 { 3502 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3503 } 3504 3505 /* 3506 * Process root hub request. 3507 */ 3508 static int 3509 xhci_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req, 3510 void *buf, int buflen) 3511 { 3512 struct xhci_softc * const sc = XHCI_BUS2SC(bus); 3513 usb_port_status_t ps; 3514 int l, totlen = 0; 3515 uint16_t len, value, index; 3516 int port, i; 3517 uint32_t v; 3518 3519 XHCIHIST_FUNC(); 3520 3521 if (sc->sc_dying) 3522 return -1; 3523 3524 size_t bn = bus == &sc->sc_bus ? 0 : 1; 3525 3526 len = UGETW(req->wLength); 3527 value = UGETW(req->wValue); 3528 index = UGETW(req->wIndex); 3529 3530 XHCIHIST_CALLARGS("rhreq: %04jx %04jx %04jx %04jx", 3531 req->bmRequestType | (req->bRequest << 8), value, index, len); 3532 3533 #define C(x,y) ((x) | ((y) << 8)) 3534 switch (C(req->bRequest, req->bmRequestType)) { 3535 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 3536 DPRINTFN(8, "getdesc: wValue=0x%04jx", value, 0, 0, 0); 3537 if (len == 0) 3538 break; 3539 switch (value) { 3540 #define sd ((usb_string_descriptor_t *)buf) 3541 case C(2, UDESC_STRING): 3542 /* Product */ 3543 totlen = usb_makestrdesc(sd, len, "xHCI root hub"); 3544 break; 3545 #undef sd 3546 default: 3547 /* default from usbroothub */ 3548 return buflen; 3549 } 3550 break; 3551 3552 /* Hub requests */ 3553 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): 3554 break; 3555 /* Clear Port Feature request */ 3556 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): { 3557 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 3558 3559 DPRINTFN(4, "UR_CLEAR_PORT_FEAT bp=%jd feat=%jd bus=%jd cp=%jd", 3560 index, value, bn, cp); 3561 if (index < 1 || index > sc->sc_rhportcount[bn]) { 3562 return -1; 3563 } 3564 port = XHCI_PORTSC(cp); 3565 v = xhci_op_read_4(sc, port); 3566 DPRINTFN(4, "portsc=0x%08jx", v, 0, 0, 0); 3567 v &= ~XHCI_PS_CLEAR; 3568 switch (value) { 3569 case UHF_PORT_ENABLE: 3570 xhci_op_write_4(sc, port, v & ~XHCI_PS_PED); 3571 break; 3572 case UHF_PORT_SUSPEND: 3573 return -1; 3574 case UHF_PORT_POWER: 3575 break; 3576 case UHF_PORT_TEST: 3577 case UHF_PORT_INDICATOR: 3578 return -1; 3579 case UHF_C_PORT_CONNECTION: 3580 xhci_op_write_4(sc, port, v | XHCI_PS_CSC); 3581 break; 3582 case UHF_C_PORT_ENABLE: 3583 case UHF_C_PORT_SUSPEND: 3584 case UHF_C_PORT_OVER_CURRENT: 3585 return -1; 3586 case UHF_C_BH_PORT_RESET: 3587 xhci_op_write_4(sc, port, v | XHCI_PS_WRC); 3588 break; 3589 case UHF_C_PORT_RESET: 3590 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 3591 break; 3592 case UHF_C_PORT_LINK_STATE: 3593 xhci_op_write_4(sc, port, v | XHCI_PS_PLC); 3594 break; 3595 case UHF_C_PORT_CONFIG_ERROR: 3596 xhci_op_write_4(sc, port, v | XHCI_PS_CEC); 3597 break; 3598 default: 3599 return -1; 3600 } 3601 break; 3602 } 3603 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): 3604 if (len == 0) 3605 break; 3606 if ((value & 0xff) != 0) { 3607 return -1; 3608 } 3609 usb_hub_descriptor_t hubd; 3610 3611 totlen = uimin(buflen, sizeof(hubd)); 3612 memcpy(&hubd, buf, totlen); 3613 hubd.bNbrPorts = sc->sc_rhportcount[bn]; 3614 USETW(hubd.wHubCharacteristics, UHD_PWR_NO_SWITCH); 3615 hubd.bPwrOn2PwrGood = 200; 3616 for (i = 0, l = sc->sc_rhportcount[bn]; l > 0; i++, l -= 8) { 3617 /* XXX can't find out? */ 3618 hubd.DeviceRemovable[i++] = 0; 3619 } 3620 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i; 3621 totlen = uimin(totlen, hubd.bDescLength); 3622 memcpy(buf, &hubd, totlen); 3623 break; 3624 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): 3625 if (len != 4) { 3626 return -1; 3627 } 3628 memset(buf, 0, len); /* ? XXX */ 3629 totlen = len; 3630 break; 3631 /* Get Port Status request */ 3632 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): { 3633 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 3634 3635 DPRINTFN(8, "get port status bn=%jd i=%jd cp=%ju", 3636 bn, index, cp, 0); 3637 if (index < 1 || index > sc->sc_rhportcount[bn]) { 3638 DPRINTFN(5, "bad get port status: index=%jd bn=%jd " 3639 "portcount=%jd", 3640 index, bn, sc->sc_rhportcount[bn], 0); 3641 return -1; 3642 } 3643 if (len != 4) { 3644 DPRINTFN(5, "bad get port status: len %jd != 4", 3645 len, 0, 0, 0); 3646 return -1; 3647 } 3648 v = xhci_op_read_4(sc, XHCI_PORTSC(cp)); 3649 DPRINTFN(4, "getrhportsc %jd 0x%08jx", cp, v, 0, 0); 3650 i = xhci_xspeed2psspeed(XHCI_PS_SPEED_GET(v)); 3651 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS; 3652 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED; 3653 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR; 3654 //if (v & XHCI_PS_SUSP) i |= UPS_SUSPEND; 3655 if (v & XHCI_PS_PR) i |= UPS_RESET; 3656 if (v & XHCI_PS_PP) { 3657 if (i & UPS_OTHER_SPEED) 3658 i |= UPS_PORT_POWER_SS; 3659 else 3660 i |= UPS_PORT_POWER; 3661 } 3662 if (i & UPS_OTHER_SPEED) 3663 i |= UPS_PORT_LS_SET(XHCI_PS_PLS_GET(v)); 3664 if (sc->sc_vendor_port_status) 3665 i = sc->sc_vendor_port_status(sc, v, i); 3666 USETW(ps.wPortStatus, i); 3667 i = 0; 3668 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS; 3669 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED; 3670 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR; 3671 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET; 3672 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET; 3673 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE; 3674 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR; 3675 USETW(ps.wPortChange, i); 3676 totlen = uimin(len, sizeof(ps)); 3677 memcpy(buf, &ps, totlen); 3678 DPRINTFN(5, "get port status: wPortStatus %#jx wPortChange %#jx" 3679 " totlen %jd", 3680 UGETW(ps.wPortStatus), UGETW(ps.wPortChange), totlen, 0); 3681 break; 3682 } 3683 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): 3684 return -1; 3685 case C(UR_SET_HUB_DEPTH, UT_WRITE_CLASS_DEVICE): 3686 break; 3687 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): 3688 break; 3689 /* Set Port Feature request */ 3690 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): { 3691 int optval = (index >> 8) & 0xff; 3692 index &= 0xff; 3693 if (index < 1 || index > sc->sc_rhportcount[bn]) { 3694 return -1; 3695 } 3696 3697 const size_t cp = xhci_rhport2ctlrport(sc, bn, index); 3698 3699 port = XHCI_PORTSC(cp); 3700 v = xhci_op_read_4(sc, port); 3701 DPRINTFN(4, "index %jd cp %jd portsc=0x%08jx", index, cp, v, 0); 3702 v &= ~XHCI_PS_CLEAR; 3703 switch (value) { 3704 case UHF_PORT_ENABLE: 3705 xhci_op_write_4(sc, port, v | XHCI_PS_PED); 3706 break; 3707 case UHF_PORT_SUSPEND: 3708 /* XXX suspend */ 3709 break; 3710 case UHF_PORT_RESET: 3711 v &= ~(XHCI_PS_PED | XHCI_PS_PR); 3712 xhci_op_write_4(sc, port, v | XHCI_PS_PR); 3713 /* Wait for reset to complete. */ 3714 usb_delay_ms(&sc->sc_bus, USB_PORT_ROOT_RESET_DELAY); 3715 if (sc->sc_dying) { 3716 return -1; 3717 } 3718 v = xhci_op_read_4(sc, port); 3719 if (v & XHCI_PS_PR) { 3720 xhci_op_write_4(sc, port, v & ~XHCI_PS_PR); 3721 usb_delay_ms(&sc->sc_bus, 10); 3722 /* XXX */ 3723 } 3724 break; 3725 case UHF_PORT_POWER: 3726 /* XXX power control */ 3727 break; 3728 /* XXX more */ 3729 case UHF_C_PORT_RESET: 3730 xhci_op_write_4(sc, port, v | XHCI_PS_PRC); 3731 break; 3732 case UHF_PORT_U1_TIMEOUT: 3733 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 3734 return -1; 3735 } 3736 port = XHCI_PORTPMSC(cp); 3737 v = xhci_op_read_4(sc, port); 3738 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 3739 index, cp, v, 0); 3740 v &= ~XHCI_PM3_U1TO_SET(0xff); 3741 v |= XHCI_PM3_U1TO_SET(optval); 3742 xhci_op_write_4(sc, port, v); 3743 break; 3744 case UHF_PORT_U2_TIMEOUT: 3745 if (XHCI_PS_SPEED_GET(v) < XHCI_PS_SPEED_SS) { 3746 return -1; 3747 } 3748 port = XHCI_PORTPMSC(cp); 3749 v = xhci_op_read_4(sc, port); 3750 DPRINTFN(4, "index %jd cp %jd portpmsc=0x%08jx", 3751 index, cp, v, 0); 3752 v &= ~XHCI_PM3_U2TO_SET(0xff); 3753 v |= XHCI_PM3_U2TO_SET(optval); 3754 xhci_op_write_4(sc, port, v); 3755 break; 3756 default: 3757 return -1; 3758 } 3759 } 3760 break; 3761 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): 3762 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): 3763 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): 3764 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): 3765 break; 3766 default: 3767 /* default from usbroothub */ 3768 return buflen; 3769 } 3770 3771 return totlen; 3772 } 3773 3774 /* root hub interrupt */ 3775 3776 static usbd_status 3777 xhci_root_intr_transfer(struct usbd_xfer *xfer) 3778 { 3779 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 3780 usbd_status err; 3781 3782 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3783 3784 /* Insert last in queue. */ 3785 mutex_enter(&sc->sc_lock); 3786 err = usb_insert_transfer(xfer); 3787 mutex_exit(&sc->sc_lock); 3788 if (err) 3789 return err; 3790 3791 /* Pipe isn't running, start first */ 3792 return xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 3793 } 3794 3795 /* Wait for roothub port status/change */ 3796 static usbd_status 3797 xhci_root_intr_start(struct usbd_xfer *xfer) 3798 { 3799 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 3800 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 3801 const bool polling = xhci_polling_p(sc); 3802 3803 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3804 3805 if (sc->sc_dying) 3806 return USBD_IOERROR; 3807 3808 if (!polling) 3809 mutex_enter(&sc->sc_lock); 3810 KASSERT(sc->sc_intrxfer[bn] == NULL); 3811 sc->sc_intrxfer[bn] = xfer; 3812 xfer->ux_status = USBD_IN_PROGRESS; 3813 if (!polling) 3814 mutex_exit(&sc->sc_lock); 3815 3816 return USBD_IN_PROGRESS; 3817 } 3818 3819 static void 3820 xhci_root_intr_abort(struct usbd_xfer *xfer) 3821 { 3822 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 3823 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 3824 3825 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3826 3827 KASSERT(mutex_owned(&sc->sc_lock)); 3828 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 3829 3830 /* If xfer has already completed, nothing to do here. */ 3831 if (sc->sc_intrxfer[bn] == NULL) 3832 return; 3833 3834 /* 3835 * Otherwise, sc->sc_intrxfer[bn] had better be this transfer. 3836 * Cancel it. 3837 */ 3838 KASSERT(sc->sc_intrxfer[bn] == xfer); 3839 xfer->ux_status = USBD_CANCELLED; 3840 usb_transfer_complete(xfer); 3841 } 3842 3843 static void 3844 xhci_root_intr_close(struct usbd_pipe *pipe) 3845 { 3846 struct xhci_softc * const sc __diagused = XHCI_PIPE2SC(pipe); 3847 const struct usbd_xfer *xfer __diagused = pipe->up_intrxfer; 3848 const size_t bn __diagused = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 3849 3850 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3851 3852 KASSERT(mutex_owned(&sc->sc_lock)); 3853 3854 /* 3855 * Caller must guarantee the xfer has completed first, by 3856 * closing the pipe only after normal completion or an abort. 3857 */ 3858 KASSERT(sc->sc_intrxfer[bn] == NULL); 3859 } 3860 3861 static void 3862 xhci_root_intr_done(struct usbd_xfer *xfer) 3863 { 3864 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 3865 const size_t bn = XHCI_XFER2BUS(xfer) == &sc->sc_bus ? 0 : 1; 3866 3867 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3868 3869 KASSERT(mutex_owned(&sc->sc_lock)); 3870 3871 /* Claim the xfer so it doesn't get completed again. */ 3872 KASSERT(sc->sc_intrxfer[bn] == xfer); 3873 KASSERT(xfer->ux_status != USBD_IN_PROGRESS); 3874 sc->sc_intrxfer[bn] = NULL; 3875 } 3876 3877 /* -------------- */ 3878 /* device control */ 3879 3880 static usbd_status 3881 xhci_device_ctrl_transfer(struct usbd_xfer *xfer) 3882 { 3883 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 3884 usbd_status err; 3885 3886 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3887 3888 /* Insert last in queue. */ 3889 mutex_enter(&sc->sc_lock); 3890 err = usb_insert_transfer(xfer); 3891 mutex_exit(&sc->sc_lock); 3892 if (err) 3893 return err; 3894 3895 /* Pipe isn't running, start first */ 3896 return xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 3897 } 3898 3899 static usbd_status 3900 xhci_device_ctrl_start(struct usbd_xfer *xfer) 3901 { 3902 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 3903 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 3904 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 3905 struct xhci_ring * const tr = xs->xs_xr[dci]; 3906 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 3907 usb_device_request_t * const req = &xfer->ux_request; 3908 const bool isread = usbd_xfer_isread(xfer); 3909 const uint32_t len = UGETW(req->wLength); 3910 usb_dma_t * const dma = &xfer->ux_dmabuf; 3911 uint64_t parameter; 3912 uint32_t status; 3913 uint32_t control; 3914 u_int i; 3915 const bool polling = xhci_polling_p(sc); 3916 3917 XHCIHIST_FUNC(); 3918 XHCIHIST_CALLARGS("req: %04jx %04jx %04jx %04jx", 3919 req->bmRequestType | (req->bRequest << 8), UGETW(req->wValue), 3920 UGETW(req->wIndex), UGETW(req->wLength)); 3921 3922 /* we rely on the bottom bits for extra info */ 3923 KASSERTMSG(((uintptr_t)xfer & 0x3) == 0x0, "xfer %zx", 3924 (uintptr_t) xfer); 3925 3926 KASSERT((xfer->ux_rqflags & URQ_REQUEST) != 0); 3927 3928 i = 0; 3929 3930 /* setup phase */ 3931 parameter = le64dec(req); /* to keep USB endian after xhci_trb_put() */ 3932 status = XHCI_TRB_2_IRQ_SET(0) | XHCI_TRB_2_BYTES_SET(sizeof(*req)); 3933 control = ((len == 0) ? XHCI_TRB_3_TRT_NONE : 3934 (isread ? XHCI_TRB_3_TRT_IN : XHCI_TRB_3_TRT_OUT)) | 3935 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_SETUP_STAGE) | 3936 XHCI_TRB_3_IDT_BIT; 3937 xhci_xfer_put_trb(xx, i++, parameter, status, control); 3938 3939 if (len != 0) { 3940 /* data phase */ 3941 parameter = DMAADDR(dma, 0); 3942 KASSERTMSG(len <= 0x10000, "len %d", len); 3943 status = XHCI_TRB_2_IRQ_SET(0) | 3944 XHCI_TRB_2_TDSZ_SET(0) | 3945 XHCI_TRB_2_BYTES_SET(len); 3946 control = (isread ? XHCI_TRB_3_DIR_IN : 0) | 3947 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_DATA_STAGE) | 3948 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 3949 XHCI_TRB_3_IOC_BIT; 3950 xhci_xfer_put_trb(xx, i++, parameter, status, control); 3951 3952 usb_syncmem(dma, 0, len, 3953 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 3954 } 3955 3956 parameter = 0; 3957 status = XHCI_TRB_2_IRQ_SET(0); 3958 /* the status stage has inverted direction */ 3959 control = ((isread && (len > 0)) ? 0 : XHCI_TRB_3_DIR_IN) | 3960 XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_STATUS_STAGE) | 3961 XHCI_TRB_3_IOC_BIT; 3962 xhci_xfer_put_trb(xx, i++, parameter, status, control); 3963 3964 if (!polling) 3965 mutex_enter(&tr->xr_lock); 3966 xhci_ring_put_xfer(sc, tr, xx, i); 3967 if (!polling) 3968 mutex_exit(&tr->xr_lock); 3969 3970 if (!polling) 3971 mutex_enter(&sc->sc_lock); 3972 xfer->ux_status = USBD_IN_PROGRESS; 3973 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 3974 usbd_xfer_schedule_timeout(xfer); 3975 if (!polling) 3976 mutex_exit(&sc->sc_lock); 3977 3978 return USBD_IN_PROGRESS; 3979 } 3980 3981 static void 3982 xhci_device_ctrl_done(struct usbd_xfer *xfer) 3983 { 3984 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3985 usb_device_request_t *req = &xfer->ux_request; 3986 int len = UGETW(req->wLength); 3987 int rd = req->bmRequestType & UT_READ; 3988 3989 if (len) 3990 usb_syncmem(&xfer->ux_dmabuf, 0, len, 3991 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 3992 } 3993 3994 static void 3995 xhci_device_ctrl_abort(struct usbd_xfer *xfer) 3996 { 3997 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 3998 3999 usbd_xfer_abort(xfer); 4000 } 4001 4002 static void 4003 xhci_device_ctrl_close(struct usbd_pipe *pipe) 4004 { 4005 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4006 4007 xhci_close_pipe(pipe); 4008 } 4009 4010 /* ------------------ */ 4011 /* device isochronous */ 4012 4013 static usbd_status 4014 xhci_device_isoc_transfer(struct usbd_xfer *xfer) 4015 { 4016 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4017 usbd_status err; 4018 4019 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4020 4021 /* Insert last in queue. */ 4022 mutex_enter(&sc->sc_lock); 4023 err = usb_insert_transfer(xfer); 4024 mutex_exit(&sc->sc_lock); 4025 if (err) 4026 return err; 4027 4028 return xhci_device_isoc_enter(xfer); 4029 } 4030 4031 static usbd_status 4032 xhci_device_isoc_enter(struct usbd_xfer *xfer) 4033 { 4034 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4035 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4036 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4037 struct xhci_ring * const tr = xs->xs_xr[dci]; 4038 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4039 struct xhci_pipe * const xpipe = (struct xhci_pipe *)xfer->ux_pipe; 4040 uint32_t len = xfer->ux_length; 4041 usb_dma_t * const dma = &xfer->ux_dmabuf; 4042 uint64_t parameter; 4043 uint32_t status; 4044 uint32_t control; 4045 uint32_t mfindex; 4046 uint32_t offs; 4047 int i, ival; 4048 const bool polling = xhci_polling_p(sc); 4049 const uint16_t MPS = UGETW(xfer->ux_pipe->up_endpoint->ue_edesc->wMaxPacketSize); 4050 const uint16_t mps = UE_GET_SIZE(MPS); 4051 const uint8_t maxb = xpipe->xp_maxb; 4052 u_int tdpc, tbc, tlbpc; 4053 4054 XHCIHIST_FUNC(); 4055 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4056 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4057 4058 if (sc->sc_dying) 4059 return USBD_IOERROR; 4060 4061 KASSERT(xfer->ux_nframes != 0 && xfer->ux_frlengths); 4062 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4063 4064 const bool isread = usbd_xfer_isread(xfer); 4065 if (xfer->ux_length) 4066 usb_syncmem(dma, 0, xfer->ux_length, 4067 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4068 4069 ival = xfer->ux_pipe->up_endpoint->ue_edesc->bInterval; 4070 if (ival >= 1 && ival <= 16) 4071 ival = 1 << (ival - 1); 4072 else 4073 ival = 1; /* fake something up */ 4074 4075 if (xpipe->xp_isoc_next == -1) { 4076 mfindex = xhci_rt_read_4(sc, XHCI_MFINDEX); 4077 DPRINTF("mfindex %jx", (uintmax_t)mfindex, 0, 0, 0); 4078 mfindex = XHCI_MFINDEX_GET(mfindex + 1); 4079 mfindex /= USB_UFRAMES_PER_FRAME; 4080 mfindex += 7; /* 7 frames is max possible IST */ 4081 xpipe->xp_isoc_next = roundup2(mfindex, ival); 4082 } 4083 4084 offs = 0; 4085 for (i = 0; i < xfer->ux_nframes; i++) { 4086 len = xfer->ux_frlengths[i]; 4087 4088 tdpc = howmany(len, mps); 4089 tbc = howmany(tdpc, maxb) - 1; 4090 tlbpc = tdpc % maxb; 4091 tlbpc = tlbpc ? tlbpc - 1 : maxb - 1; 4092 4093 KASSERTMSG(len <= 0x10000, "len %d", len); 4094 parameter = DMAADDR(dma, offs); 4095 status = XHCI_TRB_2_IRQ_SET(0) | 4096 XHCI_TRB_2_TDSZ_SET(0) | 4097 XHCI_TRB_2_BYTES_SET(len); 4098 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_ISOCH) | 4099 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4100 XHCI_TRB_3_TBC_SET(tbc) | 4101 XHCI_TRB_3_TLBPC_SET(tlbpc) | 4102 XHCI_TRB_3_IOC_BIT; 4103 if (XHCI_HCC_CFC(sc->sc_hcc)) { 4104 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next); 4105 #if 0 4106 } else if (xpipe->xp_isoc_next == -1) { 4107 control |= XHCI_TRB_3_FRID_SET(xpipe->xp_isoc_next); 4108 #endif 4109 } else { 4110 control |= XHCI_TRB_3_ISO_SIA_BIT; 4111 } 4112 #if 0 4113 if (i != xfer->ux_nframes - 1) 4114 control |= XHCI_TRB_3_BEI_BIT; 4115 #endif 4116 xhci_xfer_put_trb(xx, i, parameter, status, control); 4117 4118 xpipe->xp_isoc_next += ival; 4119 offs += len; 4120 } 4121 4122 xx->xx_isoc_done = 0; 4123 4124 if (!polling) 4125 mutex_enter(&tr->xr_lock); 4126 xhci_ring_put_xfer(sc, tr, xx, i); 4127 if (!polling) 4128 mutex_exit(&tr->xr_lock); 4129 4130 if (!polling) 4131 mutex_enter(&sc->sc_lock); 4132 xfer->ux_status = USBD_IN_PROGRESS; 4133 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4134 usbd_xfer_schedule_timeout(xfer); 4135 if (!polling) 4136 mutex_exit(&sc->sc_lock); 4137 4138 return USBD_IN_PROGRESS; 4139 } 4140 4141 static void 4142 xhci_device_isoc_abort(struct usbd_xfer *xfer) 4143 { 4144 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4145 4146 usbd_xfer_abort(xfer); 4147 } 4148 4149 static void 4150 xhci_device_isoc_close(struct usbd_pipe *pipe) 4151 { 4152 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4153 4154 xhci_close_pipe(pipe); 4155 } 4156 4157 static void 4158 xhci_device_isoc_done(struct usbd_xfer *xfer) 4159 { 4160 #ifdef USB_DEBUG 4161 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4162 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4163 #endif 4164 const bool isread = usbd_xfer_isread(xfer); 4165 4166 XHCIHIST_FUNC(); 4167 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4168 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4169 4170 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4171 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4172 } 4173 4174 /* ----------- */ 4175 /* device bulk */ 4176 4177 static usbd_status 4178 xhci_device_bulk_transfer(struct usbd_xfer *xfer) 4179 { 4180 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4181 usbd_status err; 4182 4183 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4184 4185 /* Insert last in queue. */ 4186 mutex_enter(&sc->sc_lock); 4187 err = usb_insert_transfer(xfer); 4188 mutex_exit(&sc->sc_lock); 4189 if (err) 4190 return err; 4191 4192 /* 4193 * Pipe isn't running (otherwise err would be USBD_INPROG), 4194 * so start it first. 4195 */ 4196 return xhci_device_bulk_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4197 } 4198 4199 static usbd_status 4200 xhci_device_bulk_start(struct usbd_xfer *xfer) 4201 { 4202 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4203 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4204 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4205 struct xhci_ring * const tr = xs->xs_xr[dci]; 4206 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4207 const uint32_t len = xfer->ux_length; 4208 usb_dma_t * const dma = &xfer->ux_dmabuf; 4209 uint64_t parameter; 4210 uint32_t status; 4211 uint32_t control; 4212 u_int i = 0; 4213 const bool polling = xhci_polling_p(sc); 4214 4215 XHCIHIST_FUNC(); 4216 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4217 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4218 4219 if (sc->sc_dying) 4220 return USBD_IOERROR; 4221 4222 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4223 4224 parameter = DMAADDR(dma, 0); 4225 const bool isread = usbd_xfer_isread(xfer); 4226 if (len) 4227 usb_syncmem(dma, 0, len, 4228 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4229 4230 /* 4231 * XXX: (dsl) The physical buffer must not cross a 64k boundary. 4232 * If the user supplied buffer crosses such a boundary then 2 4233 * (or more) TRB should be used. 4234 * If multiple TRB are used the td_size field must be set correctly. 4235 * For v1.0 devices (like ivy bridge) this is the number of usb data 4236 * blocks needed to complete the transfer. 4237 * Setting it to 1 in the last TRB causes an extra zero-length 4238 * data block be sent. 4239 * The earlier documentation differs, I don't know how it behaves. 4240 */ 4241 KASSERTMSG(len <= 0x10000, "len %d", len); 4242 status = XHCI_TRB_2_IRQ_SET(0) | 4243 XHCI_TRB_2_TDSZ_SET(0) | 4244 XHCI_TRB_2_BYTES_SET(len); 4245 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4246 (isread ? XHCI_TRB_3_ISP_BIT : 0) | 4247 XHCI_TRB_3_IOC_BIT; 4248 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4249 4250 if (!polling) 4251 mutex_enter(&tr->xr_lock); 4252 xhci_ring_put_xfer(sc, tr, xx, i); 4253 if (!polling) 4254 mutex_exit(&tr->xr_lock); 4255 4256 if (!polling) 4257 mutex_enter(&sc->sc_lock); 4258 xfer->ux_status = USBD_IN_PROGRESS; 4259 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4260 usbd_xfer_schedule_timeout(xfer); 4261 if (!polling) 4262 mutex_exit(&sc->sc_lock); 4263 4264 return USBD_IN_PROGRESS; 4265 } 4266 4267 static void 4268 xhci_device_bulk_done(struct usbd_xfer *xfer) 4269 { 4270 #ifdef USB_DEBUG 4271 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4272 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4273 #endif 4274 const bool isread = usbd_xfer_isread(xfer); 4275 4276 XHCIHIST_FUNC(); 4277 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4278 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4279 4280 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4281 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4282 } 4283 4284 static void 4285 xhci_device_bulk_abort(struct usbd_xfer *xfer) 4286 { 4287 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4288 4289 usbd_xfer_abort(xfer); 4290 } 4291 4292 static void 4293 xhci_device_bulk_close(struct usbd_pipe *pipe) 4294 { 4295 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4296 4297 xhci_close_pipe(pipe); 4298 } 4299 4300 /* ---------------- */ 4301 /* device interrupt */ 4302 4303 static usbd_status 4304 xhci_device_intr_transfer(struct usbd_xfer *xfer) 4305 { 4306 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4307 usbd_status err; 4308 4309 XHCIHIST_FUNC(); XHCIHIST_CALLED(); 4310 4311 /* Insert last in queue. */ 4312 mutex_enter(&sc->sc_lock); 4313 err = usb_insert_transfer(xfer); 4314 mutex_exit(&sc->sc_lock); 4315 if (err) 4316 return err; 4317 4318 /* 4319 * Pipe isn't running (otherwise err would be USBD_INPROG), 4320 * so start it first. 4321 */ 4322 return xhci_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 4323 } 4324 4325 static usbd_status 4326 xhci_device_intr_start(struct usbd_xfer *xfer) 4327 { 4328 struct xhci_softc * const sc = XHCI_XFER2SC(xfer); 4329 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4330 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4331 struct xhci_ring * const tr = xs->xs_xr[dci]; 4332 struct xhci_xfer * const xx = XHCI_XFER2XXFER(xfer); 4333 const uint32_t len = xfer->ux_length; 4334 const bool polling = xhci_polling_p(sc); 4335 usb_dma_t * const dma = &xfer->ux_dmabuf; 4336 uint64_t parameter; 4337 uint32_t status; 4338 uint32_t control; 4339 u_int i = 0; 4340 4341 XHCIHIST_FUNC(); 4342 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4343 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4344 4345 if (sc->sc_dying) 4346 return USBD_IOERROR; 4347 4348 KASSERT((xfer->ux_rqflags & URQ_REQUEST) == 0); 4349 4350 const bool isread = usbd_xfer_isread(xfer); 4351 if (len) 4352 usb_syncmem(dma, 0, len, 4353 isread ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 4354 4355 parameter = DMAADDR(dma, 0); 4356 KASSERTMSG(len <= 0x10000, "len %d", len); 4357 status = XHCI_TRB_2_IRQ_SET(0) | 4358 XHCI_TRB_2_TDSZ_SET(0) | 4359 XHCI_TRB_2_BYTES_SET(len); 4360 control = XHCI_TRB_3_TYPE_SET(XHCI_TRB_TYPE_NORMAL) | 4361 (isread ? XHCI_TRB_3_ISP_BIT : 0) | XHCI_TRB_3_IOC_BIT; 4362 xhci_xfer_put_trb(xx, i++, parameter, status, control); 4363 4364 if (!polling) 4365 mutex_enter(&tr->xr_lock); 4366 xhci_ring_put_xfer(sc, tr, xx, i); 4367 if (!polling) 4368 mutex_exit(&tr->xr_lock); 4369 4370 if (!polling) 4371 mutex_enter(&sc->sc_lock); 4372 xfer->ux_status = USBD_IN_PROGRESS; 4373 xhci_db_write_4(sc, XHCI_DOORBELL(xs->xs_idx), dci); 4374 usbd_xfer_schedule_timeout(xfer); 4375 if (!polling) 4376 mutex_exit(&sc->sc_lock); 4377 4378 return USBD_IN_PROGRESS; 4379 } 4380 4381 static void 4382 xhci_device_intr_done(struct usbd_xfer *xfer) 4383 { 4384 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4385 #ifdef USB_DEBUG 4386 struct xhci_slot * const xs = xfer->ux_pipe->up_dev->ud_hcpriv; 4387 const u_int dci = xhci_ep_get_dci(xfer->ux_pipe->up_endpoint->ue_edesc); 4388 #endif 4389 const bool isread = usbd_xfer_isread(xfer); 4390 4391 XHCIHIST_FUNC(); 4392 XHCIHIST_CALLARGS("%#jx slot %ju dci %ju", 4393 (uintptr_t)xfer, xs->xs_idx, dci, 0); 4394 4395 KASSERT(xhci_polling_p(sc) || mutex_owned(&sc->sc_lock)); 4396 4397 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_length, 4398 isread ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 4399 } 4400 4401 static void 4402 xhci_device_intr_abort(struct usbd_xfer *xfer) 4403 { 4404 struct xhci_softc * const sc __diagused = XHCI_XFER2SC(xfer); 4405 4406 XHCIHIST_FUNC(); 4407 XHCIHIST_CALLARGS("%#jx", (uintptr_t)xfer, 0, 0, 0); 4408 4409 KASSERT(mutex_owned(&sc->sc_lock)); 4410 usbd_xfer_abort(xfer); 4411 } 4412 4413 static void 4414 xhci_device_intr_close(struct usbd_pipe *pipe) 4415 { 4416 //struct xhci_softc * const sc = XHCI_PIPE2SC(pipe); 4417 4418 XHCIHIST_FUNC(); 4419 XHCIHIST_CALLARGS("%#jx", (uintptr_t)pipe, 0, 0, 0); 4420 4421 xhci_close_pipe(pipe); 4422 } 4423