1 /* $OpenBSD: xhci.c,v 1.126 2022/07/15 07:52:06 kettenis Exp $ */ 2 3 /* 4 * Copyright (c) 2014-2015 Martin Pieuchot 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/param.h> 20 #include <sys/systm.h> 21 #include <sys/kernel.h> 22 #include <sys/malloc.h> 23 #include <sys/device.h> 24 #include <sys/queue.h> 25 #include <sys/timeout.h> 26 #include <sys/pool.h> 27 #include <sys/endian.h> 28 #include <sys/rwlock.h> 29 30 #include <machine/bus.h> 31 32 #include <dev/usb/usb.h> 33 #include <dev/usb/usbdi.h> 34 #include <dev/usb/usbdivar.h> 35 #include <dev/usb/usb_mem.h> 36 37 #include <dev/usb/xhcireg.h> 38 #include <dev/usb/xhcivar.h> 39 40 struct cfdriver xhci_cd = { 41 NULL, "xhci", DV_DULL, CD_SKIPHIBERNATE 42 }; 43 44 #ifdef XHCI_DEBUG 45 #define DPRINTF(x) do { if (xhcidebug) printf x; } while(0) 46 #define DPRINTFN(n,x) do { if (xhcidebug>(n)) printf x; } while (0) 47 int xhcidebug = 3; 48 #else 49 #define DPRINTF(x) 50 #define DPRINTFN(n,x) 51 #endif 52 53 #define DEVNAME(sc) ((sc)->sc_bus.bdev.dv_xname) 54 55 #define TRBOFF(r, trb) ((char *)(trb) - (char *)((r)->trbs)) 56 #define DEQPTR(r) ((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index)) 57 58 struct pool *xhcixfer; 59 60 struct xhci_pipe { 61 struct usbd_pipe pipe; 62 63 uint8_t dci; 64 uint8_t slot; /* Device slot ID */ 65 struct xhci_ring ring; 66 67 /* 68 * XXX used to pass the xfer pointer back to the 69 * interrupt routine, better way? 70 */ 71 struct usbd_xfer *pending_xfers[XHCI_MAX_XFER]; 72 struct usbd_xfer *aborted_xfer; 73 int halted; 74 size_t free_trbs; 75 int skip; 76 #define TRB_PROCESSED_NO 0 77 #define TRB_PROCESSED_YES 1 78 #define TRB_PROCESSED_SHORT 2 79 uint8_t trb_processed[XHCI_MAX_XFER]; 80 }; 81 82 int xhci_reset(struct xhci_softc *); 83 int xhci_intr1(struct xhci_softc *); 84 void xhci_event_dequeue(struct xhci_softc *); 85 void xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t); 86 int xhci_event_xfer_generic(struct xhci_softc *, struct usbd_xfer *, 87 struct xhci_pipe *, uint32_t, int, uint8_t, uint8_t, uint8_t); 88 int xhci_event_xfer_isoc(struct usbd_xfer *, struct xhci_pipe *, 89 uint32_t, int, uint8_t); 90 void xhci_event_command(struct xhci_softc *, uint64_t); 91 void xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t); 92 int xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *); 93 int xhci_context_setup(struct xhci_softc *, struct usbd_pipe *); 94 int xhci_scratchpad_alloc(struct xhci_softc *, int); 95 void xhci_scratchpad_free(struct xhci_softc *); 96 int xhci_softdev_alloc(struct xhci_softc *, uint8_t); 97 void xhci_softdev_free(struct xhci_softc *, uint8_t); 98 int xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t, 99 size_t); 100 void xhci_ring_free(struct xhci_softc *, struct xhci_ring *); 101 void xhci_ring_reset(struct xhci_softc *, struct xhci_ring *); 102 struct xhci_trb *xhci_ring_consume(struct xhci_softc *, struct xhci_ring *); 103 struct xhci_trb *xhci_ring_produce(struct xhci_softc *, struct xhci_ring *); 104 105 struct xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*, 106 uint8_t *, int); 107 void xhci_xfer_done(struct usbd_xfer *xfer); 108 /* xHCI command helpers. */ 109 int xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int); 110 int xhci_command_abort(struct xhci_softc *); 111 112 void xhci_cmd_reset_ep_async(struct xhci_softc *, uint8_t, uint8_t); 113 void xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t); 114 int xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t); 115 int xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t); 116 int xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int); 117 int xhci_cmd_set_address(struct xhci_softc *, uint8_t, uint64_t, uint32_t); 118 #ifdef XHCI_DEBUG 119 int xhci_cmd_noop(struct xhci_softc *); 120 #endif 121 122 /* XXX should be part of the Bus interface. */ 123 void xhci_abort_xfer(struct usbd_xfer *, usbd_status); 124 void xhci_pipe_close(struct usbd_pipe *); 125 void xhci_noop(struct usbd_xfer *); 126 127 void xhci_timeout(void *); 128 void xhci_timeout_task(void *); 129 130 /* USBD Bus Interface. */ 131 usbd_status xhci_pipe_open(struct usbd_pipe *); 132 int xhci_setaddr(struct usbd_device *, int); 133 void xhci_softintr(void *); 134 void xhci_poll(struct usbd_bus *); 135 struct usbd_xfer *xhci_allocx(struct usbd_bus *); 136 void xhci_freex(struct usbd_bus *, struct usbd_xfer *); 137 138 usbd_status xhci_root_ctrl_transfer(struct usbd_xfer *); 139 usbd_status xhci_root_ctrl_start(struct usbd_xfer *); 140 141 usbd_status xhci_root_intr_transfer(struct usbd_xfer *); 142 usbd_status xhci_root_intr_start(struct usbd_xfer *); 143 void xhci_root_intr_abort(struct usbd_xfer *); 144 void xhci_root_intr_done(struct usbd_xfer *); 145 146 usbd_status xhci_device_ctrl_transfer(struct usbd_xfer *); 147 usbd_status xhci_device_ctrl_start(struct usbd_xfer *); 148 void xhci_device_ctrl_abort(struct usbd_xfer *); 149 150 usbd_status xhci_device_generic_transfer(struct usbd_xfer *); 151 usbd_status xhci_device_generic_start(struct usbd_xfer *); 152 void xhci_device_generic_abort(struct usbd_xfer *); 153 void xhci_device_generic_done(struct usbd_xfer *); 154 155 usbd_status xhci_device_isoc_transfer(struct usbd_xfer *); 156 usbd_status xhci_device_isoc_start(struct usbd_xfer *); 157 158 #define XHCI_INTR_ENDPT 1 159 160 const struct usbd_bus_methods xhci_bus_methods = { 161 .open_pipe = xhci_pipe_open, 162 .dev_setaddr = xhci_setaddr, 163 .soft_intr = xhci_softintr, 164 .do_poll = xhci_poll, 165 .allocx = xhci_allocx, 166 .freex = xhci_freex, 167 }; 168 169 const struct usbd_pipe_methods xhci_root_ctrl_methods = { 170 .transfer = xhci_root_ctrl_transfer, 171 .start = xhci_root_ctrl_start, 172 .abort = xhci_noop, 173 .close = xhci_pipe_close, 174 .done = xhci_noop, 175 }; 176 177 const struct usbd_pipe_methods xhci_root_intr_methods = { 178 .transfer = xhci_root_intr_transfer, 179 .start = xhci_root_intr_start, 180 .abort = xhci_root_intr_abort, 181 .close = xhci_pipe_close, 182 .done = xhci_root_intr_done, 183 }; 184 185 const struct usbd_pipe_methods xhci_device_ctrl_methods = { 186 .transfer = xhci_device_ctrl_transfer, 187 .start = xhci_device_ctrl_start, 188 .abort = xhci_device_ctrl_abort, 189 .close = xhci_pipe_close, 190 .done = xhci_noop, 191 }; 192 193 const struct usbd_pipe_methods xhci_device_intr_methods = { 194 .transfer = xhci_device_generic_transfer, 195 .start = xhci_device_generic_start, 196 .abort = xhci_device_generic_abort, 197 .close = xhci_pipe_close, 198 .done = xhci_device_generic_done, 199 }; 200 201 const struct usbd_pipe_methods xhci_device_bulk_methods = { 202 .transfer = xhci_device_generic_transfer, 203 .start = xhci_device_generic_start, 204 .abort = xhci_device_generic_abort, 205 .close = xhci_pipe_close, 206 .done = xhci_device_generic_done, 207 }; 208 209 const struct usbd_pipe_methods xhci_device_isoc_methods = { 210 .transfer = xhci_device_isoc_transfer, 211 .start = xhci_device_isoc_start, 212 .abort = xhci_device_generic_abort, 213 .close = xhci_pipe_close, 214 .done = xhci_noop, 215 }; 216 217 #ifdef XHCI_DEBUG 218 static void 219 xhci_dump_trb(struct xhci_trb *trb) 220 { 221 printf("trb=%p (0x%016llx 0x%08x 0x%b)\n", trb, 222 (long long)letoh64(trb->trb_paddr), letoh32(trb->trb_status), 223 (int)letoh32(trb->trb_flags), XHCI_TRB_FLAGS_BITMASK); 224 } 225 #endif 226 227 int usbd_dma_contig_alloc(struct usbd_bus *, struct usbd_dma_info *, 228 void **, bus_size_t, bus_size_t, bus_size_t); 229 void usbd_dma_contig_free(struct usbd_bus *, struct usbd_dma_info *); 230 231 int 232 usbd_dma_contig_alloc(struct usbd_bus *bus, struct usbd_dma_info *dma, 233 void **kvap, bus_size_t size, bus_size_t alignment, bus_size_t boundary) 234 { 235 int error; 236 237 dma->tag = bus->dmatag; 238 dma->size = size; 239 240 error = bus_dmamap_create(dma->tag, size, 1, size, boundary, 241 BUS_DMA_NOWAIT, &dma->map); 242 if (error != 0) 243 return (error); 244 245 error = bus_dmamem_alloc(dma->tag, size, alignment, boundary, &dma->seg, 246 1, &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO); 247 if (error != 0) 248 goto destroy; 249 250 error = bus_dmamem_map(dma->tag, &dma->seg, 1, size, &dma->vaddr, 251 BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 252 if (error != 0) 253 goto free; 254 255 error = bus_dmamap_load_raw(dma->tag, dma->map, &dma->seg, 1, size, 256 BUS_DMA_NOWAIT); 257 if (error != 0) 258 goto unmap; 259 260 bus_dmamap_sync(dma->tag, dma->map, 0, size, BUS_DMASYNC_PREREAD | 261 BUS_DMASYNC_PREWRITE); 262 263 dma->paddr = dma->map->dm_segs[0].ds_addr; 264 if (kvap != NULL) 265 *kvap = dma->vaddr; 266 267 return (0); 268 269 unmap: 270 bus_dmamem_unmap(dma->tag, dma->vaddr, size); 271 free: 272 bus_dmamem_free(dma->tag, &dma->seg, 1); 273 destroy: 274 bus_dmamap_destroy(dma->tag, dma->map); 275 return (error); 276 } 277 278 void 279 usbd_dma_contig_free(struct usbd_bus *bus, struct usbd_dma_info *dma) 280 { 281 if (dma->map != NULL) { 282 bus_dmamap_sync(bus->dmatag, dma->map, 0, dma->size, 283 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 284 bus_dmamap_unload(bus->dmatag, dma->map); 285 bus_dmamem_unmap(bus->dmatag, dma->vaddr, dma->size); 286 bus_dmamem_free(bus->dmatag, &dma->seg, 1); 287 bus_dmamap_destroy(bus->dmatag, dma->map); 288 dma->map = NULL; 289 } 290 } 291 292 int 293 xhci_init(struct xhci_softc *sc) 294 { 295 uint32_t hcr; 296 int npage, error; 297 298 sc->sc_bus.usbrev = USBREV_3_0; 299 sc->sc_bus.methods = &xhci_bus_methods; 300 sc->sc_bus.pipe_size = sizeof(struct xhci_pipe); 301 302 sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH); 303 sc->sc_door_off = XREAD4(sc, XHCI_DBOFF); 304 sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF); 305 306 sc->sc_version = XREAD2(sc, XHCI_HCIVERSION); 307 printf(", xHCI %x.%x\n", sc->sc_version >> 8, sc->sc_version & 0xff); 308 309 #ifdef XHCI_DEBUG 310 printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc), sc->sc_oper_off); 311 printf("%s: DOORBELL=%#lx\n", DEVNAME(sc), sc->sc_door_off); 312 printf("%s: RUNTIME=%#lx\n", DEVNAME(sc), sc->sc_runt_off); 313 #endif 314 315 error = xhci_reset(sc); 316 if (error) 317 return (error); 318 319 if (xhcixfer == NULL) { 320 xhcixfer = malloc(sizeof(struct pool), M_USBHC, M_NOWAIT); 321 if (xhcixfer == NULL) { 322 printf("%s: unable to allocate pool descriptor\n", 323 DEVNAME(sc)); 324 return (ENOMEM); 325 } 326 pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, IPL_SOFTUSB, 327 0, "xhcixfer", NULL); 328 } 329 330 hcr = XREAD4(sc, XHCI_HCCPARAMS); 331 sc->sc_ctxsize = XHCI_HCC_CSZ(hcr) ? 64 : 32; 332 DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize)); 333 334 #ifdef XHCI_DEBUG 335 hcr = XOREAD4(sc, XHCI_PAGESIZE); 336 printf("%s: supported page size 0x%08x\n", DEVNAME(sc), hcr); 337 #endif 338 /* Use 4K for the moment since it's easier. */ 339 sc->sc_pagesize = 4096; 340 341 /* Get port and device slot numbers. */ 342 hcr = XREAD4(sc, XHCI_HCSPARAMS1); 343 sc->sc_noport = XHCI_HCS1_N_PORTS(hcr); 344 sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr); 345 DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport, 346 sc->sc_noslot)); 347 348 /* Setup Device Context Base Address Array. */ 349 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_dcbaa.dma, 350 (void **)&sc->sc_dcbaa.segs, (sc->sc_noslot + 1) * sizeof(uint64_t), 351 XHCI_DCBAA_ALIGN, sc->sc_pagesize); 352 if (error) 353 return (ENOMEM); 354 355 /* Setup command ring. */ 356 rw_init(&sc->sc_cmd_lock, "xhcicmd"); 357 error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_CMDS, 358 XHCI_CMDS_RING_ALIGN); 359 if (error) { 360 printf("%s: could not allocate command ring.\n", DEVNAME(sc)); 361 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 362 return (error); 363 } 364 365 /* Setup one event ring and its segment table (ERST). */ 366 error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVTS, 367 XHCI_EVTS_RING_ALIGN); 368 if (error) { 369 printf("%s: could not allocate event ring.\n", DEVNAME(sc)); 370 xhci_ring_free(sc, &sc->sc_cmd_ring); 371 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 372 return (error); 373 } 374 375 /* Allocate the required entry for the segment table. */ 376 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_erst.dma, 377 (void **)&sc->sc_erst.segs, sizeof(struct xhci_erseg), 378 XHCI_ERST_ALIGN, XHCI_ERST_BOUNDARY); 379 if (error) { 380 printf("%s: could not allocate segment table.\n", DEVNAME(sc)); 381 xhci_ring_free(sc, &sc->sc_evt_ring); 382 xhci_ring_free(sc, &sc->sc_cmd_ring); 383 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 384 return (ENOMEM); 385 } 386 387 /* Set our ring address and size in its corresponding segment. */ 388 sc->sc_erst.segs[0].er_addr = htole64(sc->sc_evt_ring.dma.paddr); 389 sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVTS); 390 sc->sc_erst.segs[0].er_rsvd = 0; 391 bus_dmamap_sync(sc->sc_erst.dma.tag, sc->sc_erst.dma.map, 0, 392 sc->sc_erst.dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 393 394 /* Get the number of scratch pages and configure them if necessary. */ 395 hcr = XREAD4(sc, XHCI_HCSPARAMS2); 396 npage = XHCI_HCS2_SPB_MAX(hcr); 397 DPRINTF(("%s: %u scratch pages, ETE=%u, IST=0x%x\n", DEVNAME(sc), npage, 398 XHCI_HCS2_ETE(hcr), XHCI_HCS2_IST(hcr))); 399 400 if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) { 401 printf("%s: could not allocate scratchpad.\n", DEVNAME(sc)); 402 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma); 403 xhci_ring_free(sc, &sc->sc_evt_ring); 404 xhci_ring_free(sc, &sc->sc_cmd_ring); 405 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 406 return (ENOMEM); 407 } 408 409 410 return (0); 411 } 412 413 void 414 xhci_config(struct xhci_softc *sc) 415 { 416 uint64_t paddr; 417 uint32_t hcr; 418 419 /* Make sure to program a number of device slots we can handle. */ 420 if (sc->sc_noslot > USB_MAX_DEVICES) 421 sc->sc_noslot = USB_MAX_DEVICES; 422 hcr = XOREAD4(sc, XHCI_CONFIG) & ~XHCI_CONFIG_SLOTS_MASK; 423 XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot); 424 425 /* Set the device context base array address. */ 426 paddr = (uint64_t)sc->sc_dcbaa.dma.paddr; 427 XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr); 428 XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32)); 429 430 DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc), 431 XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO))); 432 433 /* Set the command ring address. */ 434 paddr = (uint64_t)sc->sc_cmd_ring.dma.paddr; 435 XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS); 436 XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32)); 437 438 DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc), 439 XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr)); 440 441 /* Set the ERST count number to 1, since we use only one event ring. */ 442 XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1)); 443 444 /* Set the segment table address. */ 445 paddr = (uint64_t)sc->sc_erst.dma.paddr; 446 XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr); 447 XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32)); 448 449 DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc), 450 XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0)))); 451 452 /* Set the ring dequeue address. */ 453 paddr = (uint64_t)sc->sc_evt_ring.dma.paddr; 454 XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr); 455 XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32)); 456 457 DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc), 458 XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0)))); 459 460 /* Enable interrupts. */ 461 hcr = XRREAD4(sc, XHCI_IMAN(0)); 462 XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA); 463 464 /* Set default interrupt moderation. */ 465 XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT); 466 467 /* Allow event interrupt and start the controller. */ 468 XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS); 469 470 DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD))); 471 DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0)))); 472 } 473 474 int 475 xhci_detach(struct device *self, int flags) 476 { 477 struct xhci_softc *sc = (struct xhci_softc *)self; 478 int rv; 479 480 rv = config_detach_children(self, flags); 481 if (rv != 0) { 482 printf("%s: error while detaching %d\n", DEVNAME(sc), rv); 483 return (rv); 484 } 485 486 /* Since the hardware might already be gone, ignore the errors. */ 487 xhci_command_abort(sc); 488 489 xhci_reset(sc); 490 491 /* Disable interrupts. */ 492 XRWRITE4(sc, XHCI_IMOD(0), 0); 493 XRWRITE4(sc, XHCI_IMAN(0), 0); 494 495 /* Clear the event ring address. */ 496 XRWRITE4(sc, XHCI_ERDP_LO(0), 0); 497 XRWRITE4(sc, XHCI_ERDP_HI(0), 0); 498 499 XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0); 500 XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0); 501 502 XRWRITE4(sc, XHCI_ERSTSZ(0), 0); 503 504 /* Clear the command ring address. */ 505 XOWRITE4(sc, XHCI_CRCR_LO, 0); 506 XOWRITE4(sc, XHCI_CRCR_HI, 0); 507 508 XOWRITE4(sc, XHCI_DCBAAP_LO, 0); 509 XOWRITE4(sc, XHCI_DCBAAP_HI, 0); 510 511 if (sc->sc_spad.npage > 0) 512 xhci_scratchpad_free(sc); 513 514 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma); 515 xhci_ring_free(sc, &sc->sc_evt_ring); 516 xhci_ring_free(sc, &sc->sc_cmd_ring); 517 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma); 518 519 return (0); 520 } 521 522 int 523 xhci_activate(struct device *self, int act) 524 { 525 struct xhci_softc *sc = (struct xhci_softc *)self; 526 int rv = 0; 527 528 switch (act) { 529 case DVACT_RESUME: 530 sc->sc_bus.use_polling++; 531 532 xhci_reset(sc); 533 xhci_ring_reset(sc, &sc->sc_cmd_ring); 534 xhci_ring_reset(sc, &sc->sc_evt_ring); 535 536 /* Renesas controllers, at least, need more time to resume. */ 537 usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT); 538 539 xhci_config(sc); 540 541 sc->sc_bus.use_polling--; 542 rv = config_activate_children(self, act); 543 break; 544 case DVACT_POWERDOWN: 545 rv = config_activate_children(self, act); 546 xhci_reset(sc); 547 break; 548 default: 549 rv = config_activate_children(self, act); 550 break; 551 } 552 553 return (rv); 554 } 555 556 int 557 xhci_reset(struct xhci_softc *sc) 558 { 559 uint32_t hcr; 560 int i; 561 562 XOWRITE4(sc, XHCI_USBCMD, 0); /* Halt controller */ 563 for (i = 0; i < 100; i++) { 564 usb_delay_ms(&sc->sc_bus, 1); 565 hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH; 566 if (hcr) 567 break; 568 } 569 570 if (!hcr) 571 printf("%s: halt timeout\n", DEVNAME(sc)); 572 573 XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST); 574 for (i = 0; i < 100; i++) { 575 usb_delay_ms(&sc->sc_bus, 1); 576 hcr = (XOREAD4(sc, XHCI_USBCMD) & XHCI_CMD_HCRST) | 577 (XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_CNR); 578 if (!hcr) 579 break; 580 } 581 582 if (hcr) { 583 printf("%s: reset timeout\n", DEVNAME(sc)); 584 return (EIO); 585 } 586 587 return (0); 588 } 589 590 591 int 592 xhci_intr(void *v) 593 { 594 struct xhci_softc *sc = v; 595 596 if (sc->sc_dead) 597 return (0); 598 599 /* If we get an interrupt while polling, then just ignore it. */ 600 if (sc->sc_bus.use_polling) { 601 DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n")); 602 return (0); 603 } 604 605 return (xhci_intr1(sc)); 606 } 607 608 int 609 xhci_intr1(struct xhci_softc *sc) 610 { 611 uint32_t intrs; 612 613 intrs = XOREAD4(sc, XHCI_USBSTS); 614 if (intrs == 0xffffffff) { 615 sc->sc_bus.dying = 1; 616 sc->sc_dead = 1; 617 return (0); 618 } 619 620 if ((intrs & XHCI_STS_EINT) == 0) 621 return (0); 622 623 sc->sc_bus.no_intrs++; 624 625 if (intrs & XHCI_STS_HSE) { 626 printf("%s: host system error\n", DEVNAME(sc)); 627 sc->sc_bus.dying = 1; 628 return (1); 629 } 630 631 /* Acknowledge interrupts */ 632 XOWRITE4(sc, XHCI_USBSTS, intrs); 633 intrs = XRREAD4(sc, XHCI_IMAN(0)); 634 XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND); 635 636 usb_schedsoftintr(&sc->sc_bus); 637 638 return (1); 639 } 640 641 void 642 xhci_poll(struct usbd_bus *bus) 643 { 644 struct xhci_softc *sc = (struct xhci_softc *)bus; 645 646 if (XOREAD4(sc, XHCI_USBSTS)) 647 xhci_intr1(sc); 648 } 649 650 void 651 xhci_softintr(void *v) 652 { 653 struct xhci_softc *sc = v; 654 655 if (sc->sc_bus.dying) 656 return; 657 658 sc->sc_bus.intr_context++; 659 xhci_event_dequeue(sc); 660 sc->sc_bus.intr_context--; 661 } 662 663 void 664 xhci_event_dequeue(struct xhci_softc *sc) 665 { 666 struct xhci_trb *trb; 667 uint64_t paddr; 668 uint32_t status, flags; 669 670 while ((trb = xhci_ring_consume(sc, &sc->sc_evt_ring)) != NULL) { 671 paddr = letoh64(trb->trb_paddr); 672 status = letoh32(trb->trb_status); 673 flags = letoh32(trb->trb_flags); 674 675 switch (flags & XHCI_TRB_TYPE_MASK) { 676 case XHCI_EVT_XFER: 677 xhci_event_xfer(sc, paddr, status, flags); 678 break; 679 case XHCI_EVT_CMD_COMPLETE: 680 memcpy(&sc->sc_result_trb, trb, sizeof(*trb)); 681 xhci_event_command(sc, paddr); 682 break; 683 case XHCI_EVT_PORT_CHANGE: 684 xhci_event_port_change(sc, paddr, status); 685 break; 686 case XHCI_EVT_HOST_CTRL: 687 /* TODO */ 688 break; 689 default: 690 #ifdef XHCI_DEBUG 691 printf("event (%d): ", XHCI_TRB_TYPE(flags)); 692 xhci_dump_trb(trb); 693 #endif 694 break; 695 } 696 697 } 698 699 paddr = (uint64_t)DEQPTR(sc->sc_evt_ring); 700 XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY); 701 XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32)); 702 } 703 704 void 705 xhci_skip_all(struct xhci_pipe *xp) 706 { 707 struct usbd_xfer *xfer, *last; 708 709 if (xp->skip) { 710 /* 711 * Find the last transfer to skip, this is necessary 712 * as xhci_xfer_done() posts new transfers which we 713 * don't want to skip 714 */ 715 last = SIMPLEQ_FIRST(&xp->pipe.queue); 716 if (last == NULL) 717 goto done; 718 while ((xfer = SIMPLEQ_NEXT(last, next)) != NULL) 719 last = xfer; 720 721 do { 722 xfer = SIMPLEQ_FIRST(&xp->pipe.queue); 723 if (xfer == NULL) 724 goto done; 725 DPRINTF(("%s: skipping %p\n", __func__, xfer)); 726 xfer->status = USBD_NORMAL_COMPLETION; 727 xhci_xfer_done(xfer); 728 } while (xfer != last); 729 done: 730 xp->skip = 0; 731 } 732 } 733 734 void 735 xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status, 736 uint32_t flags) 737 { 738 struct xhci_pipe *xp; 739 struct usbd_xfer *xfer; 740 uint8_t dci, slot, code, xfertype; 741 uint32_t remain; 742 int trb_idx; 743 744 slot = XHCI_TRB_GET_SLOT(flags); 745 dci = XHCI_TRB_GET_EP(flags); 746 if (slot > sc->sc_noslot) { 747 DPRINTF(("%s: incorrect slot (%u)\n", DEVNAME(sc), slot)); 748 return; 749 } 750 751 xp = sc->sc_sdevs[slot].pipes[dci - 1]; 752 if (xp == NULL) { 753 DPRINTF(("%s: incorrect dci (%u)\n", DEVNAME(sc), dci)); 754 return; 755 } 756 757 code = XHCI_TRB_GET_CODE(status); 758 remain = XHCI_TRB_REMAIN(status); 759 760 switch (code) { 761 case XHCI_CODE_RING_UNDERRUN: 762 DPRINTF(("%s: slot %u underrun with %zu TRB\n", DEVNAME(sc), 763 slot, xp->ring.ntrb - xp->free_trbs)); 764 xhci_skip_all(xp); 765 return; 766 case XHCI_CODE_RING_OVERRUN: 767 DPRINTF(("%s: slot %u overrun with %zu TRB\n", DEVNAME(sc), 768 slot, xp->ring.ntrb - xp->free_trbs)); 769 xhci_skip_all(xp); 770 return; 771 case XHCI_CODE_MISSED_SRV: 772 DPRINTF(("%s: slot %u missed srv with %zu TRB\n", DEVNAME(sc), 773 slot, xp->ring.ntrb - xp->free_trbs)); 774 xp->skip = 1; 775 return; 776 default: 777 break; 778 } 779 780 trb_idx = (paddr - xp->ring.dma.paddr) / sizeof(struct xhci_trb); 781 if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) { 782 printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc), 783 trb_idx, xp->ring.ntrb - 1); 784 return; 785 } 786 787 xfer = xp->pending_xfers[trb_idx]; 788 if (xfer == NULL) { 789 DPRINTF(("%s: NULL xfer pointer\n", DEVNAME(sc))); 790 return; 791 } 792 793 if (remain > xfer->length) 794 remain = xfer->length; 795 796 xfertype = UE_GET_XFERTYPE(xfer->pipe->endpoint->edesc->bmAttributes); 797 798 switch (xfertype) { 799 case UE_BULK: 800 case UE_INTERRUPT: 801 case UE_CONTROL: 802 if (xhci_event_xfer_generic(sc, xfer, xp, remain, trb_idx, 803 code, slot, dci)) 804 return; 805 break; 806 case UE_ISOCHRONOUS: 807 if (xhci_event_xfer_isoc(xfer, xp, remain, trb_idx, code)) 808 return; 809 break; 810 default: 811 panic("xhci_event_xfer: unknown xfer type %u", xfertype); 812 } 813 814 xhci_xfer_done(xfer); 815 } 816 817 uint32_t 818 xhci_xfer_length_generic(struct xhci_xfer *xx, struct xhci_pipe *xp, 819 int trb_idx) 820 { 821 int trb0_idx; 822 uint32_t len = 0, type; 823 824 trb0_idx = 825 ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1); 826 827 while (1) { 828 type = letoh32(xp->ring.trbs[trb0_idx].trb_flags) & 829 XHCI_TRB_TYPE_MASK; 830 if (type == XHCI_TRB_TYPE_NORMAL || type == XHCI_TRB_TYPE_DATA) 831 len += XHCI_TRB_LEN(letoh32( 832 xp->ring.trbs[trb0_idx].trb_status)); 833 if (trb0_idx == trb_idx) 834 break; 835 if (++trb0_idx == xp->ring.ntrb) 836 trb0_idx = 0; 837 } 838 return len; 839 } 840 841 int 842 xhci_event_xfer_generic(struct xhci_softc *sc, struct usbd_xfer *xfer, 843 struct xhci_pipe *xp, uint32_t remain, int trb_idx, 844 uint8_t code, uint8_t slot, uint8_t dci) 845 { 846 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 847 848 switch (code) { 849 case XHCI_CODE_SUCCESS: 850 if (xfer->actlen == 0) { 851 if (remain) 852 xfer->actlen = 853 xhci_xfer_length_generic(xx, xp, trb_idx) - 854 remain; 855 else 856 xfer->actlen = xfer->length; 857 } 858 if (xfer->actlen) 859 usb_syncmem(&xfer->dmabuf, 0, xfer->actlen, 860 usbd_xfer_isread(xfer) ? 861 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 862 xfer->status = USBD_NORMAL_COMPLETION; 863 break; 864 case XHCI_CODE_SHORT_XFER: 865 /* 866 * Use values from the transfer TRB instead of the status TRB. 867 */ 868 if (xfer->actlen == 0) 869 xfer->actlen = 870 xhci_xfer_length_generic(xx, xp, trb_idx) - remain; 871 /* 872 * If this is not the last TRB of a transfer, we should 873 * theoretically clear the IOC at the end of the chain 874 * but the HC might have already processed it before we 875 * had a chance to schedule the softinterrupt. 876 */ 877 if (xx->index != trb_idx) { 878 DPRINTF(("%s: short xfer %p for %u\n", 879 DEVNAME(sc), xfer, xx->index)); 880 return (1); 881 } 882 if (xfer->actlen) 883 usb_syncmem(&xfer->dmabuf, 0, xfer->actlen, 884 usbd_xfer_isread(xfer) ? 885 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 886 xfer->status = USBD_NORMAL_COMPLETION; 887 break; 888 case XHCI_CODE_TXERR: 889 case XHCI_CODE_SPLITERR: 890 DPRINTF(("%s: txerr? code %d\n", DEVNAME(sc), code)); 891 xfer->status = USBD_IOERROR; 892 break; 893 case XHCI_CODE_STALL: 894 case XHCI_CODE_BABBLE: 895 DPRINTF(("%s: babble code %d\n", DEVNAME(sc), code)); 896 /* Prevent any timeout to kick in. */ 897 timeout_del(&xfer->timeout_handle); 898 usb_rem_task(xfer->device, &xfer->abort_task); 899 900 /* We need to report this condition for umass(4). */ 901 if (code == XHCI_CODE_STALL) 902 xp->halted = USBD_STALLED; 903 else 904 xp->halted = USBD_IOERROR; 905 /* 906 * Since the stack might try to start a new transfer as 907 * soon as a pending one finishes, make sure the endpoint 908 * is fully reset before calling usb_transfer_complete(). 909 */ 910 xp->aborted_xfer = xfer; 911 xhci_cmd_reset_ep_async(sc, slot, dci); 912 return (1); 913 case XHCI_CODE_XFER_STOPPED: 914 case XHCI_CODE_XFER_STOPINV: 915 /* Endpoint stopped while processing a TD. */ 916 if (xfer == xp->aborted_xfer) { 917 DPRINTF(("%s: stopped xfer=%p\n", __func__, xfer)); 918 return (1); 919 } 920 921 /* FALLTHROUGH */ 922 default: 923 DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code)); 924 xfer->status = USBD_IOERROR; 925 xp->halted = 1; 926 break; 927 } 928 929 return (0); 930 } 931 932 int 933 xhci_event_xfer_isoc(struct usbd_xfer *xfer, struct xhci_pipe *xp, 934 uint32_t remain, int trb_idx, uint8_t code) 935 { 936 struct usbd_xfer *skipxfer; 937 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 938 int trb0_idx, frame_idx = 0, skip_trb = 0; 939 940 KASSERT(xx->index >= 0); 941 942 switch (code) { 943 case XHCI_CODE_SHORT_XFER: 944 xp->trb_processed[trb_idx] = TRB_PROCESSED_SHORT; 945 break; 946 default: 947 xp->trb_processed[trb_idx] = TRB_PROCESSED_YES; 948 break; 949 } 950 951 trb0_idx = 952 ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1); 953 954 /* Find the according frame index for this TRB. */ 955 while (trb0_idx != trb_idx) { 956 if ((letoh32(xp->ring.trbs[trb0_idx].trb_flags) & 957 XHCI_TRB_TYPE_MASK) == XHCI_TRB_TYPE_ISOCH) 958 frame_idx++; 959 if (trb0_idx++ == (xp->ring.ntrb - 1)) 960 trb0_idx = 0; 961 } 962 963 /* 964 * If we queued two TRBs for a frame and this is the second TRB, 965 * check if the first TRB needs accounting since it might not have 966 * raised an interrupt in case of full data received. 967 */ 968 if ((letoh32(xp->ring.trbs[trb_idx].trb_flags) & XHCI_TRB_TYPE_MASK) == 969 XHCI_TRB_TYPE_NORMAL) { 970 frame_idx--; 971 if (trb_idx == 0) 972 trb0_idx = xp->ring.ntrb - 2; 973 else 974 trb0_idx = trb_idx - 1; 975 if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_NO) { 976 xfer->frlengths[frame_idx] = XHCI_TRB_LEN(letoh32( 977 xp->ring.trbs[trb0_idx].trb_status)); 978 } else if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_SHORT) { 979 skip_trb = 1; 980 } 981 } 982 983 if (!skip_trb) { 984 xfer->frlengths[frame_idx] += 985 XHCI_TRB_LEN(letoh32(xp->ring.trbs[trb_idx].trb_status)) - 986 remain; 987 xfer->actlen += xfer->frlengths[frame_idx]; 988 } 989 990 if (xx->index != trb_idx) 991 return (1); 992 993 if (xp->skip) { 994 while (1) { 995 skipxfer = SIMPLEQ_FIRST(&xp->pipe.queue); 996 if (skipxfer == xfer || skipxfer == NULL) 997 break; 998 DPRINTF(("%s: skipping %p\n", __func__, skipxfer)); 999 skipxfer->status = USBD_NORMAL_COMPLETION; 1000 xhci_xfer_done(skipxfer); 1001 } 1002 xp->skip = 0; 1003 } 1004 1005 usb_syncmem(&xfer->dmabuf, 0, xfer->length, 1006 usbd_xfer_isread(xfer) ? 1007 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1008 xfer->status = USBD_NORMAL_COMPLETION; 1009 1010 return (0); 1011 } 1012 1013 void 1014 xhci_event_command(struct xhci_softc *sc, uint64_t paddr) 1015 { 1016 struct xhci_trb *trb; 1017 struct xhci_pipe *xp; 1018 uint32_t flags; 1019 uint8_t dci, slot; 1020 int trb_idx, status; 1021 1022 trb_idx = (paddr - sc->sc_cmd_ring.dma.paddr) / sizeof(*trb); 1023 if (trb_idx < 0 || trb_idx >= sc->sc_cmd_ring.ntrb) { 1024 printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc), 1025 trb_idx, sc->sc_cmd_ring.ntrb - 1); 1026 return; 1027 } 1028 1029 trb = &sc->sc_cmd_ring.trbs[trb_idx]; 1030 1031 bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map, 1032 TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb), 1033 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1034 1035 flags = letoh32(trb->trb_flags); 1036 1037 slot = XHCI_TRB_GET_SLOT(flags); 1038 dci = XHCI_TRB_GET_EP(flags); 1039 1040 switch (flags & XHCI_TRB_TYPE_MASK) { 1041 case XHCI_CMD_RESET_EP: 1042 xp = sc->sc_sdevs[slot].pipes[dci - 1]; 1043 if (xp == NULL) 1044 break; 1045 1046 /* Update the dequeue pointer past the last TRB. */ 1047 xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci, 1048 DEQPTR(xp->ring) | xp->ring.toggle); 1049 break; 1050 case XHCI_CMD_SET_TR_DEQ: 1051 xp = sc->sc_sdevs[slot].pipes[dci - 1]; 1052 if (xp == NULL) 1053 break; 1054 1055 status = xp->halted; 1056 xp->halted = 0; 1057 if (xp->aborted_xfer != NULL) { 1058 xp->aborted_xfer->status = status; 1059 xhci_xfer_done(xp->aborted_xfer); 1060 wakeup(xp); 1061 } 1062 break; 1063 case XHCI_CMD_CONFIG_EP: 1064 case XHCI_CMD_STOP_EP: 1065 case XHCI_CMD_DISABLE_SLOT: 1066 case XHCI_CMD_ENABLE_SLOT: 1067 case XHCI_CMD_ADDRESS_DEVICE: 1068 case XHCI_CMD_EVAL_CTX: 1069 case XHCI_CMD_NOOP: 1070 /* 1071 * All these commands are synchronous. 1072 * 1073 * If TRBs differ, this could be a delayed result after we 1074 * gave up waiting for the expected TRB due to timeout. 1075 */ 1076 if (sc->sc_cmd_trb == trb) { 1077 sc->sc_cmd_trb = NULL; 1078 wakeup(&sc->sc_cmd_trb); 1079 } 1080 break; 1081 default: 1082 DPRINTF(("%s: unexpected command %x\n", DEVNAME(sc), flags)); 1083 } 1084 } 1085 1086 void 1087 xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status) 1088 { 1089 struct usbd_xfer *xfer = sc->sc_intrxfer; 1090 uint32_t port = XHCI_TRB_PORTID(paddr); 1091 uint8_t *p; 1092 1093 if (XHCI_TRB_GET_CODE(status) != XHCI_CODE_SUCCESS) { 1094 DPRINTF(("%s: failed port status event\n", DEVNAME(sc))); 1095 return; 1096 } 1097 1098 if (xfer == NULL) 1099 return; 1100 1101 p = KERNADDR(&xfer->dmabuf, 0); 1102 memset(p, 0, xfer->length); 1103 1104 p[port/8] |= 1 << (port%8); 1105 DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p)); 1106 1107 xfer->actlen = xfer->length; 1108 xfer->status = USBD_NORMAL_COMPLETION; 1109 1110 usb_transfer_complete(xfer); 1111 } 1112 1113 void 1114 xhci_xfer_done(struct usbd_xfer *xfer) 1115 { 1116 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 1117 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 1118 int ntrb, i; 1119 1120 splsoftassert(IPL_SOFTUSB); 1121 1122 #ifdef XHCI_DEBUG 1123 if (xx->index < 0 || xp->pending_xfers[xx->index] == NULL) { 1124 printf("%s: xfer=%p done (idx=%d, ntrb=%zd)\n", __func__, 1125 xfer, xx->index, xx->ntrb); 1126 } 1127 #endif 1128 1129 if (xp->aborted_xfer == xfer) 1130 xp->aborted_xfer = NULL; 1131 1132 for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) { 1133 xp->pending_xfers[i] = NULL; 1134 if (i == 0) 1135 i = (xp->ring.ntrb - 1); 1136 } 1137 xp->free_trbs += xx->ntrb; 1138 xp->free_trbs += xx->zerotd; 1139 xx->index = -1; 1140 xx->ntrb = 0; 1141 xx->zerotd = 0; 1142 1143 timeout_del(&xfer->timeout_handle); 1144 usb_rem_task(xfer->device, &xfer->abort_task); 1145 usb_transfer_complete(xfer); 1146 } 1147 1148 /* 1149 * Calculate the Device Context Index (DCI) for endpoints as stated 1150 * in section 4.5.1 of xHCI specification r1.1. 1151 */ 1152 static inline uint8_t 1153 xhci_ed2dci(usb_endpoint_descriptor_t *ed) 1154 { 1155 uint8_t dir; 1156 1157 if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) 1158 return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + 1); 1159 1160 if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) 1161 dir = 1; 1162 else 1163 dir = 0; 1164 1165 return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + dir); 1166 } 1167 1168 usbd_status 1169 xhci_pipe_open(struct usbd_pipe *pipe) 1170 { 1171 struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus; 1172 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1173 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1174 uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1175 int error; 1176 1177 KASSERT(xp->slot == 0); 1178 1179 if (sc->sc_bus.dying) 1180 return (USBD_IOERROR); 1181 1182 /* Root Hub */ 1183 if (pipe->device->depth == 0) { 1184 switch (ed->bEndpointAddress) { 1185 case USB_CONTROL_ENDPOINT: 1186 pipe->methods = &xhci_root_ctrl_methods; 1187 break; 1188 case UE_DIR_IN | XHCI_INTR_ENDPT: 1189 pipe->methods = &xhci_root_intr_methods; 1190 break; 1191 default: 1192 pipe->methods = NULL; 1193 return (USBD_INVAL); 1194 } 1195 return (USBD_NORMAL_COMPLETION); 1196 } 1197 1198 #if 0 1199 /* Issue a noop to check if the command ring is correctly configured. */ 1200 xhci_cmd_noop(sc); 1201 #endif 1202 1203 switch (xfertype) { 1204 case UE_CONTROL: 1205 pipe->methods = &xhci_device_ctrl_methods; 1206 1207 /* 1208 * Get a slot and init the device's contexts. 1209 * 1210 * Since the control endpoint, represented as the default 1211 * pipe, is always opened first we are dealing with a 1212 * new device. Put a new slot in the ENABLED state. 1213 * 1214 */ 1215 error = xhci_cmd_slot_control(sc, &slot, 1); 1216 if (error || slot == 0 || slot > sc->sc_noslot) 1217 return (USBD_INVAL); 1218 1219 if (xhci_softdev_alloc(sc, slot)) { 1220 xhci_cmd_slot_control(sc, &slot, 0); 1221 return (USBD_NOMEM); 1222 } 1223 1224 break; 1225 case UE_ISOCHRONOUS: 1226 pipe->methods = &xhci_device_isoc_methods; 1227 break; 1228 case UE_BULK: 1229 pipe->methods = &xhci_device_bulk_methods; 1230 break; 1231 case UE_INTERRUPT: 1232 pipe->methods = &xhci_device_intr_methods; 1233 break; 1234 default: 1235 return (USBD_INVAL); 1236 } 1237 1238 /* 1239 * Our USBD Bus Interface is pipe-oriented but for most of the 1240 * operations we need to access a device context, so keep track 1241 * of the slot ID in every pipe. 1242 */ 1243 if (slot == 0) 1244 slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot; 1245 1246 xp->slot = slot; 1247 xp->dci = xhci_ed2dci(ed); 1248 1249 if (xhci_pipe_init(sc, pipe)) { 1250 xhci_cmd_slot_control(sc, &slot, 0); 1251 return (USBD_IOERROR); 1252 } 1253 1254 return (USBD_NORMAL_COMPLETION); 1255 } 1256 1257 /* 1258 * Set the maximum Endpoint Service Interface Time (ESIT) payload and 1259 * the average TRB buffer length for an endpoint. 1260 */ 1261 static inline uint32_t 1262 xhci_get_txinfo(struct xhci_softc *sc, struct usbd_pipe *pipe) 1263 { 1264 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1265 uint32_t mep, atl, mps = UGETW(ed->wMaxPacketSize); 1266 1267 switch (UE_GET_XFERTYPE(ed->bmAttributes)) { 1268 case UE_CONTROL: 1269 mep = 0; 1270 atl = 8; 1271 break; 1272 case UE_INTERRUPT: 1273 case UE_ISOCHRONOUS: 1274 if (pipe->device->speed == USB_SPEED_SUPER) { 1275 /* XXX Read the companion descriptor */ 1276 } 1277 1278 mep = (UE_GET_TRANS(mps) + 1) * UE_GET_SIZE(mps); 1279 atl = mep; 1280 break; 1281 case UE_BULK: 1282 default: 1283 mep = 0; 1284 atl = 0; 1285 } 1286 1287 return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(mep) | XHCI_EPCTX_AVG_TRB_LEN(atl)); 1288 } 1289 1290 static inline uint32_t 1291 xhci_linear_interval(usb_endpoint_descriptor_t *ed) 1292 { 1293 uint32_t ival = min(max(1, ed->bInterval), 255); 1294 1295 return (fls(ival) - 1); 1296 } 1297 1298 static inline uint32_t 1299 xhci_exponential_interval(usb_endpoint_descriptor_t *ed) 1300 { 1301 uint32_t ival = min(max(1, ed->bInterval), 16); 1302 1303 return (ival - 1); 1304 } 1305 /* 1306 * Return interval for endpoint expressed in 2^(ival) * 125us. 1307 * 1308 * See section 6.2.3.6 of xHCI r1.1 Specification for more details. 1309 */ 1310 uint32_t 1311 xhci_pipe_interval(struct usbd_pipe *pipe) 1312 { 1313 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1314 uint8_t speed = pipe->device->speed; 1315 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1316 uint32_t ival; 1317 1318 if (xfertype == UE_CONTROL || xfertype == UE_BULK) { 1319 /* Control and Bulk endpoints never NAKs. */ 1320 ival = 0; 1321 } else { 1322 switch (speed) { 1323 case USB_SPEED_FULL: 1324 if (xfertype == UE_ISOCHRONOUS) { 1325 /* Convert 1-2^(15)ms into 3-18 */ 1326 ival = xhci_exponential_interval(ed) + 3; 1327 break; 1328 } 1329 /* FALLTHROUGH */ 1330 case USB_SPEED_LOW: 1331 /* Convert 1-255ms into 3-10 */ 1332 ival = xhci_linear_interval(ed) + 3; 1333 break; 1334 case USB_SPEED_HIGH: 1335 case USB_SPEED_SUPER: 1336 default: 1337 /* Convert 1-2^(15) * 125us into 0-15 */ 1338 ival = xhci_exponential_interval(ed); 1339 break; 1340 } 1341 } 1342 1343 KASSERT(ival <= 15); 1344 return (XHCI_EPCTX_SET_IVAL(ival)); 1345 } 1346 1347 uint32_t 1348 xhci_pipe_maxburst(struct usbd_pipe *pipe) 1349 { 1350 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1351 uint32_t mps = UGETW(ed->wMaxPacketSize); 1352 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1353 uint32_t maxb = 0; 1354 1355 switch (pipe->device->speed) { 1356 case USB_SPEED_HIGH: 1357 if (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT) 1358 maxb = UE_GET_TRANS(mps); 1359 break; 1360 case USB_SPEED_SUPER: 1361 /* XXX Read the companion descriptor */ 1362 default: 1363 break; 1364 } 1365 1366 return (maxb); 1367 } 1368 1369 static inline uint32_t 1370 xhci_last_valid_dci(struct xhci_pipe **pipes, struct xhci_pipe *ignore) 1371 { 1372 struct xhci_pipe *lxp; 1373 int i; 1374 1375 /* Find the last valid Endpoint Context. */ 1376 for (i = 30; i >= 0; i--) { 1377 lxp = pipes[i]; 1378 if (lxp != NULL && lxp != ignore) 1379 return XHCI_SCTX_DCI(lxp->dci); 1380 } 1381 1382 return 0; 1383 } 1384 1385 int 1386 xhci_context_setup(struct xhci_softc *sc, struct usbd_pipe *pipe) 1387 { 1388 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1389 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1390 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 1391 uint32_t mps = UGETW(ed->wMaxPacketSize); 1392 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1393 uint8_t speed, cerr = 0; 1394 uint32_t route = 0, rhport = 0; 1395 struct usbd_device *hub; 1396 1397 /* 1398 * Calculate the Route String. Assume that there is no hub with 1399 * more than 15 ports and that they all have a detph < 6. See 1400 * section 8.9 of USB 3.1 Specification for more details. 1401 */ 1402 for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub) { 1403 uint32_t port = hub->powersrc->portno; 1404 uint32_t depth = hub->myhub->depth; 1405 1406 route |= port << (4 * (depth - 1)); 1407 } 1408 1409 /* Get Root Hub port */ 1410 rhport = hub->powersrc->portno; 1411 1412 switch (pipe->device->speed) { 1413 case USB_SPEED_LOW: 1414 speed = XHCI_SPEED_LOW; 1415 break; 1416 case USB_SPEED_FULL: 1417 speed = XHCI_SPEED_FULL; 1418 break; 1419 case USB_SPEED_HIGH: 1420 speed = XHCI_SPEED_HIGH; 1421 break; 1422 case USB_SPEED_SUPER: 1423 speed = XHCI_SPEED_SUPER; 1424 break; 1425 default: 1426 return (USBD_INVAL); 1427 } 1428 1429 /* Setup the endpoint context */ 1430 if (xfertype != UE_ISOCHRONOUS) 1431 cerr = 3; 1432 1433 if ((ed->bEndpointAddress & UE_DIR_IN) || (xfertype == UE_CONTROL)) 1434 xfertype |= 0x4; 1435 1436 sdev->ep_ctx[xp->dci-1]->info_lo = htole32(xhci_pipe_interval(pipe)); 1437 sdev->ep_ctx[xp->dci-1]->info_hi = htole32( 1438 XHCI_EPCTX_SET_MPS(UE_GET_SIZE(mps)) | 1439 XHCI_EPCTX_SET_MAXB(xhci_pipe_maxburst(pipe)) | 1440 XHCI_EPCTX_SET_EPTYPE(xfertype) | XHCI_EPCTX_SET_CERR(cerr) 1441 ); 1442 sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_get_txinfo(sc, pipe)); 1443 sdev->ep_ctx[xp->dci-1]->deqp = htole64( 1444 DEQPTR(xp->ring) | xp->ring.toggle 1445 ); 1446 1447 /* Unmask the new endpoint */ 1448 sdev->input_ctx->drop_flags = 0; 1449 sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci)); 1450 1451 /* Setup the slot context */ 1452 sdev->slot_ctx->info_lo = htole32( 1453 xhci_last_valid_dci(sdev->pipes, NULL) | XHCI_SCTX_SPEED(speed) | 1454 XHCI_SCTX_ROUTE(route) 1455 ); 1456 sdev->slot_ctx->info_hi = htole32(XHCI_SCTX_RHPORT(rhport)); 1457 sdev->slot_ctx->tt = 0; 1458 sdev->slot_ctx->state = 0; 1459 1460 /* XXX */ 1461 #define UHUB_IS_MTT(dev) (dev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT) 1462 /* 1463 * If we are opening the interrupt pipe of a hub, update its 1464 * context before putting it in the CONFIGURED state. 1465 */ 1466 if (pipe->device->hub != NULL) { 1467 int nports = pipe->device->hub->nports; 1468 1469 sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_HUB(1)); 1470 sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_NPORTS(nports)); 1471 1472 if (UHUB_IS_MTT(pipe->device)) 1473 sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1)); 1474 1475 sdev->slot_ctx->tt |= htole32( 1476 XHCI_SCTX_TT_THINK_TIME(pipe->device->hub->ttthink) 1477 ); 1478 } 1479 1480 /* 1481 * If this is a Low or Full Speed device below an external High 1482 * Speed hub, it needs some TT love. 1483 */ 1484 if (speed < XHCI_SPEED_HIGH && pipe->device->myhsport != NULL) { 1485 struct usbd_device *hshub = pipe->device->myhsport->parent; 1486 uint8_t slot = ((struct xhci_pipe *)hshub->default_pipe)->slot; 1487 1488 if (UHUB_IS_MTT(hshub)) 1489 sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1)); 1490 1491 sdev->slot_ctx->tt |= htole32( 1492 XHCI_SCTX_TT_HUB_SID(slot) | 1493 XHCI_SCTX_TT_PORT_NUM(pipe->device->myhsport->portno) 1494 ); 1495 } 1496 #undef UHUB_IS_MTT 1497 1498 /* Unmask the slot context */ 1499 sdev->input_ctx->add_flags |= htole32(XHCI_INCTX_MASK_DCI(0)); 1500 1501 bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0, 1502 sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1503 1504 return (0); 1505 } 1506 1507 int 1508 xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe) 1509 { 1510 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1511 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1512 int error; 1513 1514 #ifdef XHCI_DEBUG 1515 struct usbd_device *dev = pipe->device; 1516 printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d dev %d dci %u" 1517 " (epAddr=0x%x)\n", __func__, pipe, dev->address, dev->depth, 1518 dev->powersrc->portno, dev->speed, xp->slot, xp->dci, 1519 pipe->endpoint->edesc->bEndpointAddress); 1520 #endif 1521 1522 if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_XFER, XHCI_XFER_RING_ALIGN)) 1523 return (ENOMEM); 1524 1525 xp->free_trbs = xp->ring.ntrb; 1526 xp->halted = 0; 1527 1528 sdev->pipes[xp->dci - 1] = xp; 1529 1530 error = xhci_context_setup(sc, pipe); 1531 if (error) 1532 return (error); 1533 1534 if (xp->dci == 1) { 1535 /* 1536 * If we are opening the default pipe, the Slot should 1537 * be in the ENABLED state. Issue an "Address Device" 1538 * with BSR=1 to put the device in the DEFAULT state. 1539 * We cannot jump directly to the ADDRESSED state with 1540 * BSR=0 because some Low/Full speed devices won't accept 1541 * a SET_ADDRESS command before we've read their device 1542 * descriptor. 1543 */ 1544 error = xhci_cmd_set_address(sc, xp->slot, 1545 sdev->ictx_dma.paddr, XHCI_TRB_BSR); 1546 } else { 1547 error = xhci_cmd_configure_ep(sc, xp->slot, 1548 sdev->ictx_dma.paddr); 1549 } 1550 1551 if (error) { 1552 xhci_ring_free(sc, &xp->ring); 1553 return (EIO); 1554 } 1555 1556 return (0); 1557 } 1558 1559 void 1560 xhci_pipe_close(struct usbd_pipe *pipe) 1561 { 1562 struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus; 1563 struct xhci_pipe *xp = (struct xhci_pipe *)pipe; 1564 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1565 1566 /* Root Hub */ 1567 if (pipe->device->depth == 0) 1568 return; 1569 1570 /* Mask the endpoint */ 1571 sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci)); 1572 sdev->input_ctx->add_flags = 0; 1573 1574 /* Update last valid Endpoint Context */ 1575 sdev->slot_ctx->info_lo &= htole32(~XHCI_SCTX_DCI(31)); 1576 sdev->slot_ctx->info_lo |= htole32(xhci_last_valid_dci(sdev->pipes, xp)); 1577 1578 /* Clear the Endpoint Context */ 1579 memset(sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx)); 1580 1581 bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0, 1582 sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1583 1584 if (xhci_cmd_configure_ep(sc, xp->slot, sdev->ictx_dma.paddr)) 1585 DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci)); 1586 1587 xhci_ring_free(sc, &xp->ring); 1588 sdev->pipes[xp->dci - 1] = NULL; 1589 1590 /* 1591 * If we are closing the default pipe, the device is probably 1592 * gone, so put its slot in the DISABLED state. 1593 */ 1594 if (xp->dci == 1) { 1595 xhci_cmd_slot_control(sc, &xp->slot, 0); 1596 xhci_softdev_free(sc, xp->slot); 1597 } 1598 } 1599 1600 /* 1601 * Transition a device from DEFAULT to ADDRESSED Slot state, this hook 1602 * is needed for Low/Full speed devices. 1603 * 1604 * See section 4.5.3 of USB 3.1 Specification for more details. 1605 */ 1606 int 1607 xhci_setaddr(struct usbd_device *dev, int addr) 1608 { 1609 struct xhci_softc *sc = (struct xhci_softc *)dev->bus; 1610 struct xhci_pipe *xp = (struct xhci_pipe *)dev->default_pipe; 1611 struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot]; 1612 int error; 1613 1614 /* Root Hub */ 1615 if (dev->depth == 0) 1616 return (0); 1617 1618 KASSERT(xp->dci == 1); 1619 1620 error = xhci_context_setup(sc, dev->default_pipe); 1621 if (error) 1622 return (error); 1623 1624 error = xhci_cmd_set_address(sc, xp->slot, sdev->ictx_dma.paddr, 0); 1625 1626 #ifdef XHCI_DEBUG 1627 if (error == 0) { 1628 struct xhci_sctx *sctx; 1629 uint8_t addr; 1630 1631 bus_dmamap_sync(sdev->octx_dma.tag, sdev->octx_dma.map, 0, 1632 sc->sc_pagesize, BUS_DMASYNC_POSTREAD); 1633 1634 /* Get output slot context. */ 1635 sctx = (struct xhci_sctx *)sdev->octx_dma.vaddr; 1636 addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state)); 1637 error = (addr == 0); 1638 1639 printf("%s: dev %d addr %d\n", DEVNAME(sc), xp->slot, addr); 1640 } 1641 #endif 1642 1643 return (error); 1644 } 1645 1646 struct usbd_xfer * 1647 xhci_allocx(struct usbd_bus *bus) 1648 { 1649 return (pool_get(xhcixfer, PR_NOWAIT | PR_ZERO)); 1650 } 1651 1652 void 1653 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 1654 { 1655 pool_put(xhcixfer, xfer); 1656 } 1657 1658 int 1659 xhci_scratchpad_alloc(struct xhci_softc *sc, int npage) 1660 { 1661 uint64_t *pte; 1662 int error, i; 1663 1664 /* Allocate the required entry for the table. */ 1665 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.table_dma, 1666 (void **)&pte, npage * sizeof(uint64_t), XHCI_SPAD_TABLE_ALIGN, 1667 sc->sc_pagesize); 1668 if (error) 1669 return (ENOMEM); 1670 1671 /* Allocate pages. XXX does not need to be contiguous. */ 1672 error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.pages_dma, 1673 NULL, npage * sc->sc_pagesize, sc->sc_pagesize, 0); 1674 if (error) { 1675 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma); 1676 return (ENOMEM); 1677 } 1678 1679 for (i = 0; i < npage; i++) { 1680 pte[i] = htole64( 1681 sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize) 1682 ); 1683 } 1684 1685 bus_dmamap_sync(sc->sc_spad.table_dma.tag, sc->sc_spad.table_dma.map, 0, 1686 npage * sizeof(uint64_t), BUS_DMASYNC_PREREAD | 1687 BUS_DMASYNC_PREWRITE); 1688 1689 /* Entry 0 points to the table of scratchpad pointers. */ 1690 sc->sc_dcbaa.segs[0] = htole64(sc->sc_spad.table_dma.paddr); 1691 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0, 1692 sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1693 1694 sc->sc_spad.npage = npage; 1695 1696 return (0); 1697 } 1698 1699 void 1700 xhci_scratchpad_free(struct xhci_softc *sc) 1701 { 1702 sc->sc_dcbaa.segs[0] = 0; 1703 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0, 1704 sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1705 1706 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.pages_dma); 1707 usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma); 1708 } 1709 1710 int 1711 xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb, 1712 size_t alignment) 1713 { 1714 size_t size; 1715 int error; 1716 1717 size = ntrb * sizeof(struct xhci_trb); 1718 1719 error = usbd_dma_contig_alloc(&sc->sc_bus, &ring->dma, 1720 (void **)&ring->trbs, size, alignment, XHCI_RING_BOUNDARY); 1721 if (error) 1722 return (error); 1723 1724 ring->ntrb = ntrb; 1725 1726 xhci_ring_reset(sc, ring); 1727 1728 return (0); 1729 } 1730 1731 void 1732 xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring) 1733 { 1734 usbd_dma_contig_free(&sc->sc_bus, &ring->dma); 1735 } 1736 1737 void 1738 xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring) 1739 { 1740 size_t size; 1741 1742 size = ring->ntrb * sizeof(struct xhci_trb); 1743 1744 memset(ring->trbs, 0, size); 1745 1746 ring->index = 0; 1747 ring->toggle = XHCI_TRB_CYCLE; 1748 1749 /* 1750 * Since all our rings use only one segment, at least for 1751 * the moment, link their tail to their head. 1752 */ 1753 if (ring != &sc->sc_evt_ring) { 1754 struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1]; 1755 1756 trb->trb_paddr = htole64(ring->dma.paddr); 1757 trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG | 1758 XHCI_TRB_CYCLE); 1759 bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size, 1760 BUS_DMASYNC_PREWRITE); 1761 } else 1762 bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size, 1763 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1764 } 1765 1766 struct xhci_trb* 1767 xhci_ring_consume(struct xhci_softc *sc, struct xhci_ring *ring) 1768 { 1769 struct xhci_trb *trb = &ring->trbs[ring->index]; 1770 1771 KASSERT(ring->index < ring->ntrb); 1772 1773 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb), 1774 sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD); 1775 1776 /* Make sure this TRB can be consumed. */ 1777 if (ring->toggle != (letoh32(trb->trb_flags) & XHCI_TRB_CYCLE)) 1778 return (NULL); 1779 1780 ring->index++; 1781 1782 if (ring->index == ring->ntrb) { 1783 ring->index = 0; 1784 ring->toggle ^= 1; 1785 } 1786 1787 return (trb); 1788 } 1789 1790 struct xhci_trb* 1791 xhci_ring_produce(struct xhci_softc *sc, struct xhci_ring *ring) 1792 { 1793 struct xhci_trb *lnk, *trb; 1794 1795 KASSERT(ring->index < ring->ntrb); 1796 1797 /* Setup the link TRB after the previous TRB is done. */ 1798 if (ring->index == 0) { 1799 lnk = &ring->trbs[ring->ntrb - 1]; 1800 trb = &ring->trbs[ring->ntrb - 2]; 1801 1802 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk), 1803 sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD | 1804 BUS_DMASYNC_POSTWRITE); 1805 1806 lnk->trb_flags &= htole32(~XHCI_TRB_CHAIN); 1807 if (letoh32(trb->trb_flags) & XHCI_TRB_CHAIN) 1808 lnk->trb_flags |= htole32(XHCI_TRB_CHAIN); 1809 1810 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk), 1811 sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE); 1812 1813 lnk->trb_flags ^= htole32(XHCI_TRB_CYCLE); 1814 1815 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk), 1816 sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE); 1817 } 1818 1819 trb = &ring->trbs[ring->index++]; 1820 bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb), 1821 sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD | 1822 BUS_DMASYNC_POSTWRITE); 1823 1824 /* Toggle cycle state of the link TRB and skip it. */ 1825 if (ring->index == (ring->ntrb - 1)) { 1826 ring->index = 0; 1827 ring->toggle ^= 1; 1828 } 1829 1830 return (trb); 1831 } 1832 1833 struct xhci_trb * 1834 xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer *xfer, 1835 uint8_t *togglep, int last) 1836 { 1837 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 1838 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 1839 1840 KASSERT(xp->free_trbs >= 1); 1841 xp->free_trbs--; 1842 *togglep = xp->ring.toggle; 1843 1844 switch (last) { 1845 case -1: /* This will be a zero-length TD. */ 1846 xp->pending_xfers[xp->ring.index] = NULL; 1847 xx->zerotd += 1; 1848 break; 1849 case 0: /* This will be in a chain. */ 1850 xp->pending_xfers[xp->ring.index] = xfer; 1851 xx->index = -2; 1852 xx->ntrb += 1; 1853 break; 1854 case 1: /* This will terminate a chain. */ 1855 xp->pending_xfers[xp->ring.index] = xfer; 1856 xx->index = xp->ring.index; 1857 xx->ntrb += 1; 1858 break; 1859 } 1860 1861 xp->trb_processed[xp->ring.index] = TRB_PROCESSED_NO; 1862 1863 return (xhci_ring_produce(sc, &xp->ring)); 1864 } 1865 1866 int 1867 xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout) 1868 { 1869 struct xhci_trb *trb; 1870 int s, error = 0; 1871 1872 KASSERT(timeout == 0 || sc->sc_cmd_trb == NULL); 1873 1874 trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle); 1875 1876 trb = xhci_ring_produce(sc, &sc->sc_cmd_ring); 1877 if (trb == NULL) 1878 return (EAGAIN); 1879 trb->trb_paddr = trb0->trb_paddr; 1880 trb->trb_status = trb0->trb_status; 1881 bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map, 1882 TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb), 1883 BUS_DMASYNC_PREWRITE); 1884 1885 trb->trb_flags = trb0->trb_flags; 1886 bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map, 1887 TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb), 1888 BUS_DMASYNC_PREWRITE); 1889 1890 if (timeout == 0) { 1891 XDWRITE4(sc, XHCI_DOORBELL(0), 0); 1892 return (0); 1893 } 1894 1895 rw_assert_wrlock(&sc->sc_cmd_lock); 1896 1897 s = splusb(); 1898 sc->sc_cmd_trb = trb; 1899 XDWRITE4(sc, XHCI_DOORBELL(0), 0); 1900 error = tsleep_nsec(&sc->sc_cmd_trb, PZERO, "xhcicmd", timeout); 1901 if (error) { 1902 #ifdef XHCI_DEBUG 1903 printf("%s: tsleep() = %d\n", __func__, error); 1904 printf("cmd = %d ", XHCI_TRB_TYPE(letoh32(trb->trb_flags))); 1905 xhci_dump_trb(trb); 1906 #endif 1907 KASSERT(sc->sc_cmd_trb == trb || sc->sc_cmd_trb == NULL); 1908 /* 1909 * Just because the timeout expired this does not mean that the 1910 * TRB isn't active anymore! We could get an interrupt from 1911 * this TRB later on and then wonder what to do with it. 1912 * We'd rather abort it. 1913 */ 1914 xhci_command_abort(sc); 1915 sc->sc_cmd_trb = NULL; 1916 splx(s); 1917 return (error); 1918 } 1919 splx(s); 1920 1921 memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb)); 1922 1923 if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)) == XHCI_CODE_SUCCESS) 1924 return (0); 1925 1926 #ifdef XHCI_DEBUG 1927 printf("%s: event error code=%d, result=%d \n", DEVNAME(sc), 1928 XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)), 1929 XHCI_TRB_TYPE(letoh32(trb0->trb_flags))); 1930 xhci_dump_trb(trb0); 1931 #endif 1932 return (EIO); 1933 } 1934 1935 int 1936 xhci_command_abort(struct xhci_softc *sc) 1937 { 1938 uint32_t reg; 1939 int i; 1940 1941 reg = XOREAD4(sc, XHCI_CRCR_LO); 1942 if ((reg & XHCI_CRCR_LO_CRR) == 0) 1943 return (0); 1944 1945 XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA); 1946 XOWRITE4(sc, XHCI_CRCR_HI, 0); 1947 1948 for (i = 0; i < 2500; i++) { 1949 DELAY(100); 1950 reg = XOREAD4(sc, XHCI_CRCR_LO) & XHCI_CRCR_LO_CRR; 1951 if (!reg) 1952 break; 1953 } 1954 1955 if (reg) { 1956 printf("%s: command ring abort timeout\n", DEVNAME(sc)); 1957 return (1); 1958 } 1959 1960 return (0); 1961 } 1962 1963 int 1964 xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr) 1965 { 1966 struct xhci_trb trb; 1967 int error; 1968 1969 DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot)); 1970 1971 trb.trb_paddr = htole64(addr); 1972 trb.trb_status = 0; 1973 trb.trb_flags = htole32( 1974 XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP 1975 ); 1976 1977 rw_enter_write(&sc->sc_cmd_lock); 1978 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 1979 rw_exit_write(&sc->sc_cmd_lock); 1980 return (error); 1981 } 1982 1983 int 1984 xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci) 1985 { 1986 struct xhci_trb trb; 1987 int error; 1988 1989 DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); 1990 1991 trb.trb_paddr = 0; 1992 trb.trb_status = 0; 1993 trb.trb_flags = htole32( 1994 XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP 1995 ); 1996 1997 rw_enter_write(&sc->sc_cmd_lock); 1998 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 1999 rw_exit_write(&sc->sc_cmd_lock); 2000 return (error); 2001 } 2002 2003 void 2004 xhci_cmd_reset_ep_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci) 2005 { 2006 struct xhci_trb trb; 2007 2008 DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); 2009 2010 trb.trb_paddr = 0; 2011 trb.trb_status = 0; 2012 trb.trb_flags = htole32( 2013 XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP 2014 ); 2015 2016 xhci_command_submit(sc, &trb, 0); 2017 } 2018 2019 void 2020 xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci, 2021 uint64_t addr) 2022 { 2023 struct xhci_trb trb; 2024 2025 DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci)); 2026 2027 trb.trb_paddr = htole64(addr); 2028 trb.trb_status = 0; 2029 trb.trb_flags = htole32( 2030 XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ 2031 ); 2032 2033 xhci_command_submit(sc, &trb, 0); 2034 } 2035 2036 int 2037 xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable) 2038 { 2039 struct xhci_trb trb; 2040 int error; 2041 2042 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__)); 2043 2044 trb.trb_paddr = 0; 2045 trb.trb_status = 0; 2046 if (enable) 2047 trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT); 2048 else 2049 trb.trb_flags = htole32( 2050 XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT 2051 ); 2052 2053 rw_enter_write(&sc->sc_cmd_lock); 2054 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 2055 rw_exit_write(&sc->sc_cmd_lock); 2056 if (error != 0) 2057 return (EIO); 2058 2059 if (enable) 2060 *slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags)); 2061 2062 return (0); 2063 } 2064 2065 int 2066 xhci_cmd_set_address(struct xhci_softc *sc, uint8_t slot, uint64_t addr, 2067 uint32_t bsr) 2068 { 2069 struct xhci_trb trb; 2070 int error; 2071 2072 DPRINTF(("%s: %s BSR=%u\n", DEVNAME(sc), __func__, bsr ? 1 : 0)); 2073 2074 trb.trb_paddr = htole64(addr); 2075 trb.trb_status = 0; 2076 trb.trb_flags = htole32( 2077 XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE | bsr 2078 ); 2079 2080 rw_enter_write(&sc->sc_cmd_lock); 2081 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 2082 rw_exit_write(&sc->sc_cmd_lock); 2083 return (error); 2084 } 2085 2086 #ifdef XHCI_DEBUG 2087 int 2088 xhci_cmd_noop(struct xhci_softc *sc) 2089 { 2090 struct xhci_trb trb; 2091 int error; 2092 2093 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__)); 2094 2095 trb.trb_paddr = 0; 2096 trb.trb_status = 0; 2097 trb.trb_flags = htole32(XHCI_CMD_NOOP); 2098 2099 rw_enter_write(&sc->sc_cmd_lock); 2100 error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT); 2101 rw_exit_write(&sc->sc_cmd_lock); 2102 return (error); 2103 } 2104 #endif 2105 2106 int 2107 xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot) 2108 { 2109 struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot]; 2110 int i, error; 2111 uint8_t *kva; 2112 2113 /* 2114 * Setup input context. Even with 64 byte context size, it 2115 * fits into the smallest supported page size, so use that. 2116 */ 2117 error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->ictx_dma, 2118 (void **)&kva, sc->sc_pagesize, XHCI_ICTX_ALIGN, sc->sc_pagesize); 2119 if (error) 2120 return (ENOMEM); 2121 2122 sdev->input_ctx = (struct xhci_inctx *)kva; 2123 sdev->slot_ctx = (struct xhci_sctx *)(kva + sc->sc_ctxsize); 2124 for (i = 0; i < 31; i++) 2125 sdev->ep_ctx[i] = 2126 (struct xhci_epctx *)(kva + (i + 2) * sc->sc_ctxsize); 2127 2128 DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc), 2129 slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0])); 2130 2131 /* Setup output context */ 2132 error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->octx_dma, NULL, 2133 sc->sc_pagesize, XHCI_OCTX_ALIGN, sc->sc_pagesize); 2134 if (error) { 2135 usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma); 2136 return (ENOMEM); 2137 } 2138 2139 memset(&sdev->pipes, 0, sizeof(sdev->pipes)); 2140 2141 DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc), 2142 slot, (long long)sdev->octx_dma.paddr)); 2143 2144 sc->sc_dcbaa.segs[slot] = htole64(sdev->octx_dma.paddr); 2145 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 2146 slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD | 2147 BUS_DMASYNC_PREWRITE); 2148 2149 return (0); 2150 } 2151 2152 void 2153 xhci_softdev_free(struct xhci_softc *sc, uint8_t slot) 2154 { 2155 struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot]; 2156 2157 sc->sc_dcbaa.segs[slot] = 0; 2158 bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 2159 slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD | 2160 BUS_DMASYNC_PREWRITE); 2161 2162 usbd_dma_contig_free(&sc->sc_bus, &sdev->octx_dma); 2163 usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma); 2164 2165 memset(sdev, 0, sizeof(struct xhci_soft_dev)); 2166 } 2167 2168 /* Root hub descriptors. */ 2169 const usb_device_descriptor_t xhci_devd = { 2170 USB_DEVICE_DESCRIPTOR_SIZE, 2171 UDESC_DEVICE, /* type */ 2172 {0x00, 0x03}, /* USB version */ 2173 UDCLASS_HUB, /* class */ 2174 UDSUBCLASS_HUB, /* subclass */ 2175 UDPROTO_HSHUBSTT, /* protocol */ 2176 9, /* max packet */ 2177 {0},{0},{0x00,0x01}, /* device id */ 2178 1,2,0, /* string indexes */ 2179 1 /* # of configurations */ 2180 }; 2181 2182 const usb_config_descriptor_t xhci_confd = { 2183 USB_CONFIG_DESCRIPTOR_SIZE, 2184 UDESC_CONFIG, 2185 {USB_CONFIG_DESCRIPTOR_SIZE + 2186 USB_INTERFACE_DESCRIPTOR_SIZE + 2187 USB_ENDPOINT_DESCRIPTOR_SIZE}, 2188 1, 2189 1, 2190 0, 2191 UC_BUS_POWERED | UC_SELF_POWERED, 2192 0 /* max power */ 2193 }; 2194 2195 const usb_interface_descriptor_t xhci_ifcd = { 2196 USB_INTERFACE_DESCRIPTOR_SIZE, 2197 UDESC_INTERFACE, 2198 0, 2199 0, 2200 1, 2201 UICLASS_HUB, 2202 UISUBCLASS_HUB, 2203 UIPROTO_HSHUBSTT, 2204 0 2205 }; 2206 2207 const usb_endpoint_descriptor_t xhci_endpd = { 2208 USB_ENDPOINT_DESCRIPTOR_SIZE, 2209 UDESC_ENDPOINT, 2210 UE_DIR_IN | XHCI_INTR_ENDPT, 2211 UE_INTERRUPT, 2212 {2, 0}, /* max 15 ports */ 2213 255 2214 }; 2215 2216 const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = { 2217 USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE, 2218 UDESC_ENDPOINT_SS_COMP, 2219 0, 2220 0, 2221 {0, 0} 2222 }; 2223 2224 const usb_hub_descriptor_t xhci_hubd = { 2225 USB_HUB_DESCRIPTOR_SIZE, 2226 UDESC_SS_HUB, 2227 0, 2228 {0,0}, 2229 0, 2230 0, 2231 {0}, 2232 }; 2233 2234 void 2235 xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status) 2236 { 2237 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2238 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 2239 int error; 2240 2241 splsoftassert(IPL_SOFTUSB); 2242 2243 DPRINTF(("%s: xfer=%p status=%s err=%s actlen=%d len=%d idx=%d\n", 2244 __func__, xfer, usbd_errstr(xfer->status), usbd_errstr(status), 2245 xfer->actlen, xfer->length, ((struct xhci_xfer *)xfer)->index)); 2246 2247 /* XXX The stack should not call abort() in this case. */ 2248 if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) { 2249 xfer->status = status; 2250 timeout_del(&xfer->timeout_handle); 2251 usb_rem_task(xfer->device, &xfer->abort_task); 2252 usb_transfer_complete(xfer); 2253 return; 2254 } 2255 2256 /* Transfer is already done. */ 2257 if (xfer->status != USBD_IN_PROGRESS) { 2258 DPRINTF(("%s: already done \n", __func__)); 2259 return; 2260 } 2261 2262 /* Prevent any timeout to kick in. */ 2263 timeout_del(&xfer->timeout_handle); 2264 usb_rem_task(xfer->device, &xfer->abort_task); 2265 2266 /* Indicate that we are aborting this transfer. */ 2267 xp->halted = status; 2268 xp->aborted_xfer = xfer; 2269 2270 /* Stop the endpoint and wait until the hardware says so. */ 2271 if (xhci_cmd_stop_ep(sc, xp->slot, xp->dci)) { 2272 DPRINTF(("%s: error stopping endpoint\n", DEVNAME(sc))); 2273 /* Assume the device is gone. */ 2274 xp->halted = 0; 2275 xp->aborted_xfer = NULL; 2276 xfer->status = status; 2277 usb_transfer_complete(xfer); 2278 return; 2279 } 2280 2281 /* 2282 * The transfer was already completed when we stopped the 2283 * endpoint, no need to move the dequeue pointer past its 2284 * TRBs. 2285 */ 2286 if (xp->aborted_xfer == NULL) { 2287 DPRINTF(("%s: done before stopping the endpoint\n", __func__)); 2288 xp->halted = 0; 2289 return; 2290 } 2291 2292 /* 2293 * At this stage the endpoint has been stopped, so update its 2294 * dequeue pointer past the last TRB of the transfer. 2295 * 2296 * Note: This assumes that only one transfer per endpoint has 2297 * pending TRBs on the ring. 2298 */ 2299 xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci, 2300 DEQPTR(xp->ring) | xp->ring.toggle); 2301 error = tsleep_nsec(xp, PZERO, "xhciab", XHCI_CMD_TIMEOUT); 2302 if (error) 2303 printf("%s: timeout aborting transfer\n", DEVNAME(sc)); 2304 } 2305 2306 void 2307 xhci_timeout(void *addr) 2308 { 2309 struct usbd_xfer *xfer = addr; 2310 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2311 2312 if (sc->sc_bus.dying) { 2313 xhci_timeout_task(addr); 2314 return; 2315 } 2316 2317 usb_init_task(&xfer->abort_task, xhci_timeout_task, addr, 2318 USB_TASK_TYPE_ABORT); 2319 usb_add_task(xfer->device, &xfer->abort_task); 2320 } 2321 2322 void 2323 xhci_timeout_task(void *addr) 2324 { 2325 struct usbd_xfer *xfer = addr; 2326 int s; 2327 2328 s = splusb(); 2329 xhci_abort_xfer(xfer, USBD_TIMEOUT); 2330 splx(s); 2331 } 2332 2333 usbd_status 2334 xhci_root_ctrl_transfer(struct usbd_xfer *xfer) 2335 { 2336 usbd_status err; 2337 2338 err = usb_insert_transfer(xfer); 2339 if (err) 2340 return (err); 2341 2342 return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2343 } 2344 2345 usbd_status 2346 xhci_root_ctrl_start(struct usbd_xfer *xfer) 2347 { 2348 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2349 usb_port_status_t ps; 2350 usb_device_request_t *req; 2351 void *buf = NULL; 2352 usb_device_descriptor_t devd; 2353 usb_hub_descriptor_t hubd; 2354 usbd_status err; 2355 int s, len, value, index; 2356 int l, totlen = 0; 2357 int port, i; 2358 uint32_t v; 2359 2360 KASSERT(xfer->rqflags & URQ_REQUEST); 2361 2362 if (sc->sc_bus.dying) 2363 return (USBD_IOERROR); 2364 2365 req = &xfer->request; 2366 2367 DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__, 2368 req->bmRequestType, req->bRequest)); 2369 2370 len = UGETW(req->wLength); 2371 value = UGETW(req->wValue); 2372 index = UGETW(req->wIndex); 2373 2374 if (len != 0) 2375 buf = KERNADDR(&xfer->dmabuf, 0); 2376 2377 #define C(x,y) ((x) | ((y) << 8)) 2378 switch(C(req->bRequest, req->bmRequestType)) { 2379 case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE): 2380 case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE): 2381 case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT): 2382 /* 2383 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops 2384 * for the integrated root hub. 2385 */ 2386 break; 2387 case C(UR_GET_CONFIG, UT_READ_DEVICE): 2388 if (len > 0) { 2389 *(uint8_t *)buf = sc->sc_conf; 2390 totlen = 1; 2391 } 2392 break; 2393 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 2394 DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value)); 2395 switch(value >> 8) { 2396 case UDESC_DEVICE: 2397 if ((value & 0xff) != 0) { 2398 err = USBD_IOERROR; 2399 goto ret; 2400 } 2401 devd = xhci_devd; 2402 USETW(devd.idVendor, sc->sc_id_vendor); 2403 totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE); 2404 memcpy(buf, &devd, l); 2405 break; 2406 /* 2407 * We can't really operate at another speed, but the spec says 2408 * we need this descriptor. 2409 */ 2410 case UDESC_OTHER_SPEED_CONFIGURATION: 2411 case UDESC_CONFIG: 2412 if ((value & 0xff) != 0) { 2413 err = USBD_IOERROR; 2414 goto ret; 2415 } 2416 totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE); 2417 memcpy(buf, &xhci_confd, l); 2418 ((usb_config_descriptor_t *)buf)->bDescriptorType = 2419 value >> 8; 2420 buf = (char *)buf + l; 2421 len -= l; 2422 l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE); 2423 totlen += l; 2424 memcpy(buf, &xhci_ifcd, l); 2425 buf = (char *)buf + l; 2426 len -= l; 2427 l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE); 2428 totlen += l; 2429 memcpy(buf, &xhci_endpd, l); 2430 break; 2431 case UDESC_STRING: 2432 if (len == 0) 2433 break; 2434 *(u_int8_t *)buf = 0; 2435 totlen = 1; 2436 switch (value & 0xff) { 2437 case 0: /* Language table */ 2438 totlen = usbd_str(buf, len, "\001"); 2439 break; 2440 case 1: /* Vendor */ 2441 totlen = usbd_str(buf, len, sc->sc_vendor); 2442 break; 2443 case 2: /* Product */ 2444 totlen = usbd_str(buf, len, "xHCI root hub"); 2445 break; 2446 } 2447 break; 2448 default: 2449 err = USBD_IOERROR; 2450 goto ret; 2451 } 2452 break; 2453 case C(UR_GET_INTERFACE, UT_READ_INTERFACE): 2454 if (len > 0) { 2455 *(uint8_t *)buf = 0; 2456 totlen = 1; 2457 } 2458 break; 2459 case C(UR_GET_STATUS, UT_READ_DEVICE): 2460 if (len > 1) { 2461 USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED); 2462 totlen = 2; 2463 } 2464 break; 2465 case C(UR_GET_STATUS, UT_READ_INTERFACE): 2466 case C(UR_GET_STATUS, UT_READ_ENDPOINT): 2467 if (len > 1) { 2468 USETW(((usb_status_t *)buf)->wStatus, 0); 2469 totlen = 2; 2470 } 2471 break; 2472 case C(UR_SET_ADDRESS, UT_WRITE_DEVICE): 2473 if (value >= USB_MAX_DEVICES) { 2474 err = USBD_IOERROR; 2475 goto ret; 2476 } 2477 break; 2478 case C(UR_SET_CONFIG, UT_WRITE_DEVICE): 2479 if (value != 0 && value != 1) { 2480 err = USBD_IOERROR; 2481 goto ret; 2482 } 2483 sc->sc_conf = value; 2484 break; 2485 case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE): 2486 break; 2487 case C(UR_SET_FEATURE, UT_WRITE_DEVICE): 2488 case C(UR_SET_FEATURE, UT_WRITE_INTERFACE): 2489 case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT): 2490 err = USBD_IOERROR; 2491 goto ret; 2492 case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE): 2493 break; 2494 case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT): 2495 break; 2496 /* Hub requests */ 2497 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE): 2498 break; 2499 case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER): 2500 DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE " 2501 "port=%d feature=%d\n", index, value)); 2502 if (index < 1 || index > sc->sc_noport) { 2503 err = USBD_IOERROR; 2504 goto ret; 2505 } 2506 port = XHCI_PORTSC(index); 2507 v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR; 2508 switch (value) { 2509 case UHF_PORT_ENABLE: 2510 XOWRITE4(sc, port, v | XHCI_PS_PED); 2511 break; 2512 case UHF_PORT_SUSPEND: 2513 /* TODO */ 2514 break; 2515 case UHF_PORT_POWER: 2516 XOWRITE4(sc, port, v & ~XHCI_PS_PP); 2517 break; 2518 case UHF_PORT_INDICATOR: 2519 XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3)); 2520 break; 2521 case UHF_C_PORT_CONNECTION: 2522 XOWRITE4(sc, port, v | XHCI_PS_CSC); 2523 break; 2524 case UHF_C_PORT_ENABLE: 2525 XOWRITE4(sc, port, v | XHCI_PS_PEC); 2526 break; 2527 case UHF_C_PORT_SUSPEND: 2528 case UHF_C_PORT_LINK_STATE: 2529 XOWRITE4(sc, port, v | XHCI_PS_PLC); 2530 break; 2531 case UHF_C_PORT_OVER_CURRENT: 2532 XOWRITE4(sc, port, v | XHCI_PS_OCC); 2533 break; 2534 case UHF_C_PORT_RESET: 2535 XOWRITE4(sc, port, v | XHCI_PS_PRC); 2536 break; 2537 case UHF_C_BH_PORT_RESET: 2538 XOWRITE4(sc, port, v | XHCI_PS_WRC); 2539 break; 2540 default: 2541 err = USBD_IOERROR; 2542 goto ret; 2543 } 2544 break; 2545 2546 case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE): 2547 if (len == 0) 2548 break; 2549 if ((value & 0xff) != 0) { 2550 err = USBD_IOERROR; 2551 goto ret; 2552 } 2553 v = XREAD4(sc, XHCI_HCCPARAMS); 2554 hubd = xhci_hubd; 2555 hubd.bNbrPorts = sc->sc_noport; 2556 USETW(hubd.wHubCharacteristics, 2557 (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) | 2558 (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0)); 2559 hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */ 2560 for (i = 1; i <= sc->sc_noport; i++) { 2561 v = XOREAD4(sc, XHCI_PORTSC(i)); 2562 if (v & XHCI_PS_DR) 2563 hubd.DeviceRemovable[i / 8] |= 1U << (i % 8); 2564 } 2565 hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i; 2566 l = min(len, hubd.bDescLength); 2567 totlen = l; 2568 memcpy(buf, &hubd, l); 2569 break; 2570 case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE): 2571 if (len != 16) { 2572 err = USBD_IOERROR; 2573 goto ret; 2574 } 2575 memset(buf, 0, len); 2576 totlen = len; 2577 break; 2578 case C(UR_GET_STATUS, UT_READ_CLASS_OTHER): 2579 DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n", 2580 index)); 2581 if (index < 1 || index > sc->sc_noport) { 2582 err = USBD_IOERROR; 2583 goto ret; 2584 } 2585 if (len != 4) { 2586 err = USBD_IOERROR; 2587 goto ret; 2588 } 2589 v = XOREAD4(sc, XHCI_PORTSC(index)); 2590 DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v)); 2591 i = UPS_PORT_LS_SET(XHCI_PS_GET_PLS(v)); 2592 switch (XHCI_PS_SPEED(v)) { 2593 case XHCI_SPEED_FULL: 2594 i |= UPS_FULL_SPEED; 2595 break; 2596 case XHCI_SPEED_LOW: 2597 i |= UPS_LOW_SPEED; 2598 break; 2599 case XHCI_SPEED_HIGH: 2600 i |= UPS_HIGH_SPEED; 2601 break; 2602 case XHCI_SPEED_SUPER: 2603 default: 2604 break; 2605 } 2606 if (v & XHCI_PS_CCS) i |= UPS_CURRENT_CONNECT_STATUS; 2607 if (v & XHCI_PS_PED) i |= UPS_PORT_ENABLED; 2608 if (v & XHCI_PS_OCA) i |= UPS_OVERCURRENT_INDICATOR; 2609 if (v & XHCI_PS_PR) i |= UPS_RESET; 2610 if (v & XHCI_PS_PP) { 2611 if (XHCI_PS_SPEED(v) >= XHCI_SPEED_FULL && 2612 XHCI_PS_SPEED(v) <= XHCI_SPEED_HIGH) 2613 i |= UPS_PORT_POWER; 2614 else 2615 i |= UPS_PORT_POWER_SS; 2616 } 2617 USETW(ps.wPortStatus, i); 2618 i = 0; 2619 if (v & XHCI_PS_CSC) i |= UPS_C_CONNECT_STATUS; 2620 if (v & XHCI_PS_PEC) i |= UPS_C_PORT_ENABLED; 2621 if (v & XHCI_PS_OCC) i |= UPS_C_OVERCURRENT_INDICATOR; 2622 if (v & XHCI_PS_PRC) i |= UPS_C_PORT_RESET; 2623 if (v & XHCI_PS_WRC) i |= UPS_C_BH_PORT_RESET; 2624 if (v & XHCI_PS_PLC) i |= UPS_C_PORT_LINK_STATE; 2625 if (v & XHCI_PS_CEC) i |= UPS_C_PORT_CONFIG_ERROR; 2626 USETW(ps.wPortChange, i); 2627 l = min(len, sizeof ps); 2628 memcpy(buf, &ps, l); 2629 totlen = l; 2630 break; 2631 case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE): 2632 err = USBD_IOERROR; 2633 goto ret; 2634 case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE): 2635 break; 2636 case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER): 2637 2638 i = index >> 8; 2639 index &= 0x00ff; 2640 2641 if (index < 1 || index > sc->sc_noport) { 2642 err = USBD_IOERROR; 2643 goto ret; 2644 } 2645 port = XHCI_PORTSC(index); 2646 v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR; 2647 2648 switch (value) { 2649 case UHF_PORT_ENABLE: 2650 XOWRITE4(sc, port, v | XHCI_PS_PED); 2651 break; 2652 case UHF_PORT_SUSPEND: 2653 DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i)); 2654 if (XHCI_PS_SPEED(v) == XHCI_SPEED_SUPER) { 2655 err = USBD_IOERROR; 2656 goto ret; 2657 } 2658 XOWRITE4(sc, port, v | 2659 XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS); 2660 break; 2661 case UHF_PORT_RESET: 2662 DPRINTFN(6, ("reset port %d\n", index)); 2663 XOWRITE4(sc, port, v | XHCI_PS_PR); 2664 break; 2665 case UHF_PORT_POWER: 2666 DPRINTFN(3, ("set port power %d\n", index)); 2667 XOWRITE4(sc, port, v | XHCI_PS_PP); 2668 break; 2669 case UHF_PORT_INDICATOR: 2670 DPRINTFN(3, ("set port indicator %d\n", index)); 2671 2672 v &= ~XHCI_PS_SET_PIC(3); 2673 v |= XHCI_PS_SET_PIC(1); 2674 2675 XOWRITE4(sc, port, v); 2676 break; 2677 case UHF_C_PORT_RESET: 2678 XOWRITE4(sc, port, v | XHCI_PS_PRC); 2679 break; 2680 case UHF_C_BH_PORT_RESET: 2681 XOWRITE4(sc, port, v | XHCI_PS_WRC); 2682 break; 2683 default: 2684 err = USBD_IOERROR; 2685 goto ret; 2686 } 2687 break; 2688 case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER): 2689 case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER): 2690 case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER): 2691 case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER): 2692 break; 2693 default: 2694 err = USBD_IOERROR; 2695 goto ret; 2696 } 2697 xfer->actlen = totlen; 2698 err = USBD_NORMAL_COMPLETION; 2699 ret: 2700 xfer->status = err; 2701 s = splusb(); 2702 usb_transfer_complete(xfer); 2703 splx(s); 2704 return (err); 2705 } 2706 2707 2708 void 2709 xhci_noop(struct usbd_xfer *xfer) 2710 { 2711 } 2712 2713 2714 usbd_status 2715 xhci_root_intr_transfer(struct usbd_xfer *xfer) 2716 { 2717 usbd_status err; 2718 2719 err = usb_insert_transfer(xfer); 2720 if (err) 2721 return (err); 2722 2723 return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2724 } 2725 2726 usbd_status 2727 xhci_root_intr_start(struct usbd_xfer *xfer) 2728 { 2729 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2730 2731 if (sc->sc_bus.dying) 2732 return (USBD_IOERROR); 2733 2734 sc->sc_intrxfer = xfer; 2735 2736 return (USBD_IN_PROGRESS); 2737 } 2738 2739 void 2740 xhci_root_intr_abort(struct usbd_xfer *xfer) 2741 { 2742 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2743 int s; 2744 2745 sc->sc_intrxfer = NULL; 2746 2747 xfer->status = USBD_CANCELLED; 2748 s = splusb(); 2749 usb_transfer_complete(xfer); 2750 splx(s); 2751 } 2752 2753 void 2754 xhci_root_intr_done(struct usbd_xfer *xfer) 2755 { 2756 } 2757 2758 /* 2759 * Number of packets remaining in the TD after the corresponding TRB. 2760 * 2761 * Section 4.11.2.4 of xHCI specification r1.1. 2762 */ 2763 static inline uint32_t 2764 xhci_xfer_tdsize(struct usbd_xfer *xfer, uint32_t remain, uint32_t len) 2765 { 2766 uint32_t npkt, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize); 2767 2768 if (len == 0) 2769 return XHCI_TRB_TDREM(0); 2770 2771 npkt = howmany(remain - len, UE_GET_SIZE(mps)); 2772 if (npkt > 31) 2773 npkt = 31; 2774 2775 return XHCI_TRB_TDREM(npkt); 2776 } 2777 2778 /* 2779 * Transfer Burst Count (TBC) and Transfer Last Burst Packet Count (TLBPC). 2780 * 2781 * Section 4.11.2.3 of xHCI specification r1.1. 2782 */ 2783 static inline uint32_t 2784 xhci_xfer_tbc(struct usbd_xfer *xfer, uint32_t len, uint32_t *tlbpc) 2785 { 2786 uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize); 2787 uint32_t maxb, tdpc, residue, tbc; 2788 2789 /* Transfer Descriptor Packet Count, section 4.14.1. */ 2790 tdpc = howmany(len, UE_GET_SIZE(mps)); 2791 if (tdpc == 0) 2792 tdpc = 1; 2793 2794 /* Transfer Burst Count */ 2795 maxb = xhci_pipe_maxburst(xfer->pipe); 2796 tbc = howmany(tdpc, maxb + 1) - 1; 2797 2798 /* Transfer Last Burst Packet Count */ 2799 if (xfer->device->speed == USB_SPEED_SUPER) { 2800 residue = tdpc % (maxb + 1); 2801 if (residue == 0) 2802 *tlbpc = maxb; 2803 else 2804 *tlbpc = residue - 1; 2805 } else { 2806 *tlbpc = tdpc - 1; 2807 } 2808 2809 return (tbc); 2810 } 2811 2812 usbd_status 2813 xhci_device_ctrl_transfer(struct usbd_xfer *xfer) 2814 { 2815 usbd_status err; 2816 2817 err = usb_insert_transfer(xfer); 2818 if (err) 2819 return (err); 2820 2821 return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2822 } 2823 2824 usbd_status 2825 xhci_device_ctrl_start(struct usbd_xfer *xfer) 2826 { 2827 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2828 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 2829 struct xhci_trb *trb0, *trb; 2830 uint32_t flags, len = UGETW(xfer->request.wLength); 2831 uint8_t toggle; 2832 int s; 2833 2834 KASSERT(xfer->rqflags & URQ_REQUEST); 2835 2836 if (sc->sc_bus.dying || xp->halted) 2837 return (USBD_IOERROR); 2838 2839 if (xp->free_trbs < 3) 2840 return (USBD_NOMEM); 2841 2842 if (len != 0) 2843 usb_syncmem(&xfer->dmabuf, 0, len, 2844 usbd_xfer_isread(xfer) ? 2845 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2846 2847 /* We'll toggle the setup TRB once we're finished with the stages. */ 2848 trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, 0); 2849 2850 flags = XHCI_TRB_TYPE_SETUP | XHCI_TRB_IDT | (toggle ^ 1); 2851 if (len != 0) { 2852 if (usbd_xfer_isread(xfer)) 2853 flags |= XHCI_TRB_TRT_IN; 2854 else 2855 flags |= XHCI_TRB_TRT_OUT; 2856 } 2857 2858 memcpy(&trb0->trb_paddr, &xfer->request, sizeof(trb0->trb_paddr)); 2859 trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8)); 2860 trb0->trb_flags = htole32(flags); 2861 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2862 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 2863 BUS_DMASYNC_PREWRITE); 2864 2865 /* Data TRB */ 2866 if (len != 0) { 2867 trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0); 2868 2869 flags = XHCI_TRB_TYPE_DATA | toggle; 2870 if (usbd_xfer_isread(xfer)) 2871 flags |= XHCI_TRB_DIR_IN | XHCI_TRB_ISP; 2872 2873 trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0)); 2874 trb->trb_status = htole32( 2875 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 2876 xhci_xfer_tdsize(xfer, len, len) 2877 ); 2878 trb->trb_flags = htole32(flags); 2879 2880 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2881 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 2882 BUS_DMASYNC_PREWRITE); 2883 } 2884 2885 /* Status TRB */ 2886 trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1); 2887 2888 flags = XHCI_TRB_TYPE_STATUS | XHCI_TRB_IOC | toggle; 2889 if (len == 0 || !usbd_xfer_isread(xfer)) 2890 flags |= XHCI_TRB_DIR_IN; 2891 2892 trb->trb_paddr = 0; 2893 trb->trb_status = htole32(XHCI_TRB_INTR(0)); 2894 trb->trb_flags = htole32(flags); 2895 2896 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2897 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 2898 BUS_DMASYNC_PREWRITE); 2899 2900 /* Setup TRB */ 2901 trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE); 2902 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2903 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 2904 BUS_DMASYNC_PREWRITE); 2905 2906 s = splusb(); 2907 XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci); 2908 2909 xfer->status = USBD_IN_PROGRESS; 2910 if (xfer->timeout && !sc->sc_bus.use_polling) { 2911 timeout_del(&xfer->timeout_handle); 2912 timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); 2913 timeout_add_msec(&xfer->timeout_handle, xfer->timeout); 2914 } 2915 splx(s); 2916 2917 return (USBD_IN_PROGRESS); 2918 } 2919 2920 void 2921 xhci_device_ctrl_abort(struct usbd_xfer *xfer) 2922 { 2923 xhci_abort_xfer(xfer, USBD_CANCELLED); 2924 } 2925 2926 usbd_status 2927 xhci_device_generic_transfer(struct usbd_xfer *xfer) 2928 { 2929 usbd_status err; 2930 2931 err = usb_insert_transfer(xfer); 2932 if (err) 2933 return (err); 2934 2935 return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue))); 2936 } 2937 2938 usbd_status 2939 xhci_device_generic_start(struct usbd_xfer *xfer) 2940 { 2941 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 2942 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 2943 struct xhci_trb *trb0, *trb; 2944 uint32_t len, remain, flags; 2945 uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize); 2946 uint64_t paddr = DMAADDR(&xfer->dmabuf, 0); 2947 uint8_t toggle; 2948 int s, i, ntrb, zerotd = 0; 2949 2950 KASSERT(!(xfer->rqflags & URQ_REQUEST)); 2951 2952 if (sc->sc_bus.dying || xp->halted) 2953 return (USBD_IOERROR); 2954 2955 /* How many TRBs do we need for this transfer? */ 2956 ntrb = howmany(xfer->length, XHCI_TRB_MAXSIZE); 2957 2958 /* If the buffer crosses a 64k boundary, we need one more. */ 2959 len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1)); 2960 if (len < xfer->length) 2961 ntrb = howmany(xfer->length - len, XHCI_TRB_MAXSIZE) + 1; 2962 else 2963 len = xfer->length; 2964 2965 /* If we need to append a zero length packet, we need one more. */ 2966 if ((xfer->flags & USBD_FORCE_SHORT_XFER || xfer->length == 0) && 2967 (xfer->length % UE_GET_SIZE(mps) == 0)) 2968 zerotd = 1; 2969 2970 if (xp->free_trbs < (ntrb + zerotd)) 2971 return (USBD_NOMEM); 2972 2973 usb_syncmem(&xfer->dmabuf, 0, xfer->length, 2974 usbd_xfer_isread(xfer) ? 2975 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2976 2977 /* We'll toggle the first TRB once we're finished with the chain. */ 2978 trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1)); 2979 flags = XHCI_TRB_TYPE_NORMAL | (toggle ^ 1); 2980 if (usbd_xfer_isread(xfer)) 2981 flags |= XHCI_TRB_ISP; 2982 flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 2983 2984 trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0)); 2985 trb0->trb_status = htole32( 2986 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 2987 xhci_xfer_tdsize(xfer, xfer->length, len) 2988 ); 2989 trb0->trb_flags = htole32(flags); 2990 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 2991 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 2992 BUS_DMASYNC_PREWRITE); 2993 2994 remain = xfer->length - len; 2995 paddr += len; 2996 2997 /* Chain more TRBs if needed. */ 2998 for (i = ntrb - 1; i > 0; i--) { 2999 len = min(remain, XHCI_TRB_MAXSIZE); 3000 3001 /* Next (or Last) TRB. */ 3002 trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1)); 3003 flags = XHCI_TRB_TYPE_NORMAL | toggle; 3004 if (usbd_xfer_isread(xfer)) 3005 flags |= XHCI_TRB_ISP; 3006 flags |= (i == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 3007 3008 trb->trb_paddr = htole64(paddr); 3009 trb->trb_status = htole32( 3010 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 3011 xhci_xfer_tdsize(xfer, remain, len) 3012 ); 3013 trb->trb_flags = htole32(flags); 3014 3015 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3016 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3017 BUS_DMASYNC_PREWRITE); 3018 3019 remain -= len; 3020 paddr += len; 3021 } 3022 3023 /* Do we need to issue a zero length transfer? */ 3024 if (zerotd == 1) { 3025 trb = xhci_xfer_get_trb(sc, xfer, &toggle, -1); 3026 trb->trb_paddr = 0; 3027 trb->trb_status = 0; 3028 trb->trb_flags = htole32(XHCI_TRB_TYPE_NORMAL | XHCI_TRB_IOC | toggle); 3029 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3030 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3031 BUS_DMASYNC_PREWRITE); 3032 } 3033 3034 /* First TRB. */ 3035 trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE); 3036 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3037 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 3038 BUS_DMASYNC_PREWRITE); 3039 3040 s = splusb(); 3041 XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci); 3042 3043 xfer->status = USBD_IN_PROGRESS; 3044 if (xfer->timeout && !sc->sc_bus.use_polling) { 3045 timeout_del(&xfer->timeout_handle); 3046 timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); 3047 timeout_add_msec(&xfer->timeout_handle, xfer->timeout); 3048 } 3049 splx(s); 3050 3051 return (USBD_IN_PROGRESS); 3052 } 3053 3054 void 3055 xhci_device_generic_done(struct usbd_xfer *xfer) 3056 { 3057 /* Only happens with interrupt transfers. */ 3058 if (xfer->pipe->repeat) { 3059 xfer->actlen = 0; 3060 xhci_device_generic_start(xfer); 3061 } 3062 } 3063 3064 void 3065 xhci_device_generic_abort(struct usbd_xfer *xfer) 3066 { 3067 KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer); 3068 3069 xhci_abort_xfer(xfer, USBD_CANCELLED); 3070 } 3071 3072 usbd_status 3073 xhci_device_isoc_transfer(struct usbd_xfer *xfer) 3074 { 3075 usbd_status err; 3076 3077 err = usb_insert_transfer(xfer); 3078 if (err && err != USBD_IN_PROGRESS) 3079 return (err); 3080 3081 return (xhci_device_isoc_start(xfer)); 3082 } 3083 3084 usbd_status 3085 xhci_device_isoc_start(struct usbd_xfer *xfer) 3086 { 3087 struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus; 3088 struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe; 3089 struct xhci_xfer *xx = (struct xhci_xfer *)xfer; 3090 struct xhci_trb *trb0, *trb; 3091 uint32_t len, remain, flags; 3092 uint64_t paddr; 3093 uint32_t tbc, tlbpc; 3094 int s, i, j, ntrb = xfer->nframes; 3095 uint8_t toggle; 3096 3097 KASSERT(!(xfer->rqflags & URQ_REQUEST)); 3098 3099 /* 3100 * To allow continuous transfers, above we start all transfers 3101 * immediately. However, we're still going to get usbd_start_next call 3102 * this when another xfer completes. So, check if this is already 3103 * in progress or not 3104 */ 3105 if (xx->ntrb > 0) 3106 return (USBD_IN_PROGRESS); 3107 3108 if (sc->sc_bus.dying || xp->halted) 3109 return (USBD_IOERROR); 3110 3111 /* Why would you do that anyway? */ 3112 if (sc->sc_bus.use_polling) 3113 return (USBD_INVAL); 3114 3115 paddr = DMAADDR(&xfer->dmabuf, 0); 3116 3117 /* How many TRBs do for all Transfers? */ 3118 for (i = 0, ntrb = 0; i < xfer->nframes; i++) { 3119 /* How many TRBs do we need for this transfer? */ 3120 ntrb += howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE); 3121 3122 /* If the buffer crosses a 64k boundary, we need one more. */ 3123 len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1)); 3124 if (len < xfer->frlengths[i]) 3125 ntrb++; 3126 3127 paddr += xfer->frlengths[i]; 3128 } 3129 3130 if (xp->free_trbs < ntrb) 3131 return (USBD_NOMEM); 3132 3133 usb_syncmem(&xfer->dmabuf, 0, xfer->length, 3134 usbd_xfer_isread(xfer) ? 3135 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 3136 3137 paddr = DMAADDR(&xfer->dmabuf, 0); 3138 3139 for (i = 0, trb0 = NULL; i < xfer->nframes; i++) { 3140 /* How many TRBs do we need for this transfer? */ 3141 ntrb = howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE); 3142 3143 /* If the buffer crosses a 64k boundary, we need one more. */ 3144 len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1)); 3145 if (len < xfer->frlengths[i]) 3146 ntrb++; 3147 else 3148 len = xfer->frlengths[i]; 3149 3150 KASSERT(ntrb < 3); 3151 3152 /* 3153 * We'll commit the first TRB once we're finished with the 3154 * chain. 3155 */ 3156 trb = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1)); 3157 3158 DPRINTFN(4, ("%s:%d: ring %p trb0_idx %lu ntrb %d paddr %llx " 3159 "len %u\n", __func__, __LINE__, 3160 &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, paddr, 3161 len)); 3162 3163 /* Record the first TRB so we can toggle later. */ 3164 if (trb0 == NULL) { 3165 trb0 = trb; 3166 toggle ^= 1; 3167 } 3168 3169 flags = XHCI_TRB_TYPE_ISOCH | XHCI_TRB_SIA | toggle; 3170 if (usbd_xfer_isread(xfer)) 3171 flags |= XHCI_TRB_ISP; 3172 flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 3173 3174 tbc = xhci_xfer_tbc(xfer, xfer->frlengths[i], &tlbpc); 3175 flags |= XHCI_TRB_ISOC_TBC(tbc) | XHCI_TRB_ISOC_TLBPC(tlbpc); 3176 3177 trb->trb_paddr = htole64(paddr); 3178 trb->trb_status = htole32( 3179 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 3180 xhci_xfer_tdsize(xfer, xfer->frlengths[i], len) 3181 ); 3182 trb->trb_flags = htole32(flags); 3183 3184 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3185 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3186 BUS_DMASYNC_PREWRITE); 3187 3188 remain = xfer->frlengths[i] - len; 3189 paddr += len; 3190 3191 /* Chain more TRBs if needed. */ 3192 for (j = ntrb - 1; j > 0; j--) { 3193 len = min(remain, XHCI_TRB_MAXSIZE); 3194 3195 /* Next (or Last) TRB. */ 3196 trb = xhci_xfer_get_trb(sc, xfer, &toggle, (j == 1)); 3197 flags = XHCI_TRB_TYPE_NORMAL | toggle; 3198 if (usbd_xfer_isread(xfer)) 3199 flags |= XHCI_TRB_ISP; 3200 flags |= (j == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN; 3201 DPRINTFN(3, ("%s:%d: ring %p trb0_idx %lu ntrb %d " 3202 "paddr %llx len %u\n", __func__, __LINE__, 3203 &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, 3204 paddr, len)); 3205 3206 trb->trb_paddr = htole64(paddr); 3207 trb->trb_status = htole32( 3208 XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) | 3209 xhci_xfer_tdsize(xfer, remain, len) 3210 ); 3211 trb->trb_flags = htole32(flags); 3212 3213 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3214 TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb), 3215 BUS_DMASYNC_PREWRITE); 3216 3217 remain -= len; 3218 paddr += len; 3219 } 3220 3221 xfer->frlengths[i] = 0; 3222 } 3223 3224 /* First TRB. */ 3225 trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE); 3226 bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map, 3227 TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb), 3228 BUS_DMASYNC_PREWRITE); 3229 3230 s = splusb(); 3231 XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci); 3232 3233 xfer->status = USBD_IN_PROGRESS; 3234 3235 if (xfer->timeout) { 3236 timeout_del(&xfer->timeout_handle); 3237 timeout_set(&xfer->timeout_handle, xhci_timeout, xfer); 3238 timeout_add_msec(&xfer->timeout_handle, xfer->timeout); 3239 } 3240 splx(s); 3241 3242 return (USBD_IN_PROGRESS); 3243 } 3244