1 /* $NetBSD: dwc2.c,v 1.74 2020/05/15 06:15:42 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nick Hudson 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: dwc2.c,v 1.74 2020/05/15 06:15:42 skrll Exp $"); 34 35 #include "opt_usb.h" 36 37 #include <sys/param.h> 38 39 #include <sys/cpu.h> 40 #include <sys/device.h> 41 #include <sys/kernel.h> 42 #include <sys/kmem.h> 43 #include <sys/proc.h> 44 #include <sys/queue.h> 45 #include <sys/select.h> 46 #include <sys/sysctl.h> 47 #include <sys/systm.h> 48 49 #include <machine/endian.h> 50 51 #include <dev/usb/usb.h> 52 #include <dev/usb/usbdi.h> 53 #include <dev/usb/usbdivar.h> 54 #include <dev/usb/usb_mem.h> 55 #include <dev/usb/usbroothub.h> 56 57 #include <dwc2/dwc2.h> 58 #include <dwc2/dwc2var.h> 59 60 #include "dwc2_core.h" 61 #include "dwc2_hcd.h" 62 63 #ifdef DWC2_COUNTERS 64 #define DWC2_EVCNT_ADD(a,b) ((void)((a).ev_count += (b))) 65 #else 66 #define DWC2_EVCNT_ADD(a,b) do { } while (/*CONSTCOND*/0) 67 #endif 68 #define DWC2_EVCNT_INCR(a) DWC2_EVCNT_ADD((a), 1) 69 70 #ifdef DWC2_DEBUG 71 #define DPRINTFN(n,fmt,...) do { \ 72 if (dwc2debug >= (n)) { \ 73 printf("%s: " fmt, \ 74 __FUNCTION__,## __VA_ARGS__); \ 75 } \ 76 } while (0) 77 #define DPRINTF(...) DPRINTFN(1, __VA_ARGS__) 78 int dwc2debug = 0; 79 80 SYSCTL_SETUP(sysctl_hw_dwc2_setup, "sysctl hw.dwc2 setup") 81 { 82 int err; 83 const struct sysctlnode *rnode; 84 const struct sysctlnode *cnode; 85 86 err = sysctl_createv(clog, 0, NULL, &rnode, 87 CTLFLAG_PERMANENT, CTLTYPE_NODE, "dwc2", 88 SYSCTL_DESCR("dwc2 global controls"), 89 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL); 90 91 if (err) 92 goto fail; 93 94 /* control debugging printfs */ 95 err = sysctl_createv(clog, 0, &rnode, &cnode, 96 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, 97 "debug", SYSCTL_DESCR("Enable debugging output"), 98 NULL, 0, &dwc2debug, sizeof(dwc2debug), CTL_CREATE, CTL_EOL); 99 if (err) 100 goto fail; 101 102 return; 103 fail: 104 aprint_error("%s: sysctl_createv failed (err = %d)\n", __func__, err); 105 } 106 #else 107 #define DPRINTF(...) do { } while (0) 108 #define DPRINTFN(...) do { } while (0) 109 #endif 110 111 Static usbd_status dwc2_open(struct usbd_pipe *); 112 Static void dwc2_poll(struct usbd_bus *); 113 Static void dwc2_softintr(void *); 114 115 Static struct usbd_xfer * 116 dwc2_allocx(struct usbd_bus *, unsigned int); 117 Static void dwc2_freex(struct usbd_bus *, struct usbd_xfer *); 118 Static void dwc2_get_lock(struct usbd_bus *, kmutex_t **); 119 Static bool dwc2_dying(struct usbd_bus *); 120 Static int dwc2_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, 121 void *, int); 122 123 Static usbd_status dwc2_root_intr_transfer(struct usbd_xfer *); 124 Static usbd_status dwc2_root_intr_start(struct usbd_xfer *); 125 Static void dwc2_root_intr_abort(struct usbd_xfer *); 126 Static void dwc2_root_intr_close(struct usbd_pipe *); 127 Static void dwc2_root_intr_done(struct usbd_xfer *); 128 129 Static usbd_status dwc2_device_ctrl_transfer(struct usbd_xfer *); 130 Static usbd_status dwc2_device_ctrl_start(struct usbd_xfer *); 131 Static void dwc2_device_ctrl_abort(struct usbd_xfer *); 132 Static void dwc2_device_ctrl_close(struct usbd_pipe *); 133 Static void dwc2_device_ctrl_done(struct usbd_xfer *); 134 135 Static usbd_status dwc2_device_bulk_transfer(struct usbd_xfer *); 136 Static void dwc2_device_bulk_abort(struct usbd_xfer *); 137 Static void dwc2_device_bulk_close(struct usbd_pipe *); 138 Static void dwc2_device_bulk_done(struct usbd_xfer *); 139 140 Static usbd_status dwc2_device_intr_transfer(struct usbd_xfer *); 141 Static usbd_status dwc2_device_intr_start(struct usbd_xfer *); 142 Static void dwc2_device_intr_abort(struct usbd_xfer *); 143 Static void dwc2_device_intr_close(struct usbd_pipe *); 144 Static void dwc2_device_intr_done(struct usbd_xfer *); 145 146 Static usbd_status dwc2_device_isoc_transfer(struct usbd_xfer *); 147 Static void dwc2_device_isoc_abort(struct usbd_xfer *); 148 Static void dwc2_device_isoc_close(struct usbd_pipe *); 149 Static void dwc2_device_isoc_done(struct usbd_xfer *); 150 151 Static usbd_status dwc2_device_start(struct usbd_xfer *); 152 153 Static void dwc2_close_pipe(struct usbd_pipe *); 154 Static void dwc2_abortx(struct usbd_xfer *); 155 156 Static void dwc2_device_clear_toggle(struct usbd_pipe *); 157 Static void dwc2_noop(struct usbd_pipe *pipe); 158 159 Static int dwc2_interrupt(struct dwc2_softc *); 160 Static void dwc2_rhc(void *); 161 162 163 static inline void 164 dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw, 165 struct usbd_xfer *xfer) 166 { 167 } 168 169 static inline void 170 dwc2_free_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw, 171 struct usbd_xfer *xfer) 172 { 173 } 174 175 Static const struct usbd_bus_methods dwc2_bus_methods = { 176 .ubm_open = dwc2_open, 177 .ubm_softint = dwc2_softintr, 178 .ubm_dopoll = dwc2_poll, 179 .ubm_allocx = dwc2_allocx, 180 .ubm_freex = dwc2_freex, 181 .ubm_abortx = dwc2_abortx, 182 .ubm_dying = dwc2_dying, 183 .ubm_getlock = dwc2_get_lock, 184 .ubm_rhctrl = dwc2_roothub_ctrl, 185 }; 186 187 Static const struct usbd_pipe_methods dwc2_root_intr_methods = { 188 .upm_transfer = dwc2_root_intr_transfer, 189 .upm_start = dwc2_root_intr_start, 190 .upm_abort = dwc2_root_intr_abort, 191 .upm_close = dwc2_root_intr_close, 192 .upm_cleartoggle = dwc2_noop, 193 .upm_done = dwc2_root_intr_done, 194 }; 195 196 Static const struct usbd_pipe_methods dwc2_device_ctrl_methods = { 197 .upm_transfer = dwc2_device_ctrl_transfer, 198 .upm_start = dwc2_device_ctrl_start, 199 .upm_abort = dwc2_device_ctrl_abort, 200 .upm_close = dwc2_device_ctrl_close, 201 .upm_cleartoggle = dwc2_noop, 202 .upm_done = dwc2_device_ctrl_done, 203 }; 204 205 Static const struct usbd_pipe_methods dwc2_device_intr_methods = { 206 .upm_transfer = dwc2_device_intr_transfer, 207 .upm_start = dwc2_device_intr_start, 208 .upm_abort = dwc2_device_intr_abort, 209 .upm_close = dwc2_device_intr_close, 210 .upm_cleartoggle = dwc2_device_clear_toggle, 211 .upm_done = dwc2_device_intr_done, 212 }; 213 214 Static const struct usbd_pipe_methods dwc2_device_bulk_methods = { 215 .upm_transfer = dwc2_device_bulk_transfer, 216 .upm_abort = dwc2_device_bulk_abort, 217 .upm_close = dwc2_device_bulk_close, 218 .upm_cleartoggle = dwc2_device_clear_toggle, 219 .upm_done = dwc2_device_bulk_done, 220 }; 221 222 Static const struct usbd_pipe_methods dwc2_device_isoc_methods = { 223 .upm_transfer = dwc2_device_isoc_transfer, 224 .upm_abort = dwc2_device_isoc_abort, 225 .upm_close = dwc2_device_isoc_close, 226 .upm_cleartoggle = dwc2_noop, 227 .upm_done = dwc2_device_isoc_done, 228 }; 229 230 struct usbd_xfer * 231 dwc2_allocx(struct usbd_bus *bus, unsigned int nframes) 232 { 233 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 234 struct dwc2_xfer *dxfer; 235 236 DPRINTFN(10, "\n"); 237 238 DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget); 239 dxfer = pool_cache_get(sc->sc_xferpool, PR_WAITOK); 240 if (dxfer != NULL) { 241 memset(dxfer, 0, sizeof(*dxfer)); 242 dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg, 243 nframes, GFP_KERNEL); 244 #ifdef DIAGNOSTIC 245 dxfer->xfer.ux_state = XFER_BUSY; 246 #endif 247 } 248 return (struct usbd_xfer *)dxfer; 249 } 250 251 void 252 dwc2_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 253 { 254 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 255 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 256 257 DPRINTFN(10, "\n"); 258 259 #ifdef DIAGNOSTIC 260 if (xfer->ux_state != XFER_BUSY && 261 xfer->ux_status != USBD_NOT_STARTED) { 262 DPRINTF("xfer=%p not busy, 0x%08x\n", xfer, xfer->ux_state); 263 } 264 xfer->ux_state = XFER_FREE; 265 #endif 266 DWC2_EVCNT_INCR(sc->sc_ev_xferpoolput); 267 dwc2_hcd_urb_free(sc->sc_hsotg, dxfer->urb, dxfer->urb->packet_count); 268 pool_cache_put(sc->sc_xferpool, xfer); 269 } 270 271 Static bool 272 dwc2_dying(struct usbd_bus *bus) 273 { 274 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 275 276 return sc->sc_dying; 277 } 278 279 Static void 280 dwc2_get_lock(struct usbd_bus *bus, kmutex_t **lock) 281 { 282 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 283 284 *lock = &sc->sc_lock; 285 } 286 287 Static void 288 dwc2_rhc(void *addr) 289 { 290 struct dwc2_softc *sc = addr; 291 struct usbd_xfer *xfer; 292 u_char *p; 293 294 DPRINTF("\n"); 295 mutex_enter(&sc->sc_lock); 296 xfer = sc->sc_intrxfer; 297 298 if (xfer == NULL) { 299 /* Just ignore the change. */ 300 mutex_exit(&sc->sc_lock); 301 return; 302 303 } 304 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 305 306 /* set port bit */ 307 p = KERNADDR(&xfer->ux_dmabuf, 0); 308 309 p[0] = 0x02; /* we only have one port (1 << 1) */ 310 311 xfer->ux_actlen = xfer->ux_length; 312 xfer->ux_status = USBD_NORMAL_COMPLETION; 313 314 usb_transfer_complete(xfer); 315 mutex_exit(&sc->sc_lock); 316 } 317 318 Static void 319 dwc2_softintr(void *v) 320 { 321 struct usbd_bus *bus = v; 322 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 323 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 324 struct dwc2_xfer *dxfer, *next; 325 TAILQ_HEAD(, dwc2_xfer) claimed = TAILQ_HEAD_INITIALIZER(claimed); 326 327 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 328 329 /* 330 * Grab all the xfers that have not been aborted or timed out. 331 * Do so under a single lock -- without dropping it to run 332 * usb_transfer_complete as we go -- so that dwc2_abortx won't 333 * remove next out from under us during iteration when we've 334 * dropped the lock. 335 */ 336 mutex_spin_enter(&hsotg->lock); 337 TAILQ_FOREACH_SAFE(dxfer, &sc->sc_complete, xnext, next) { 338 if (!usbd_xfer_trycomplete(&dxfer->xfer)) 339 /* 340 * The hard interrput handler decided to 341 * complete the xfer, and put it on sc_complete 342 * to pass it to us in the soft interrupt 343 * handler, but in the time between hard 344 * interrupt and soft interrupt, the xfer was 345 * aborted or timed out and we lost the race. 346 */ 347 continue; 348 KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS); 349 KASSERT(dxfer->intr_status != USBD_CANCELLED); 350 KASSERT(dxfer->intr_status != USBD_TIMEOUT); 351 TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext); 352 TAILQ_INSERT_TAIL(&claimed, dxfer, xnext); 353 } 354 mutex_spin_exit(&hsotg->lock); 355 356 /* Now complete them. */ 357 while (!TAILQ_EMPTY(&claimed)) { 358 dxfer = TAILQ_FIRST(&claimed); 359 KASSERT(dxfer->xfer.ux_status == USBD_IN_PROGRESS); 360 KASSERT(dxfer->intr_status != USBD_CANCELLED); 361 KASSERT(dxfer->intr_status != USBD_TIMEOUT); 362 TAILQ_REMOVE(&claimed, dxfer, xnext); 363 364 dxfer->xfer.ux_status = dxfer->intr_status; 365 usb_transfer_complete(&dxfer->xfer); 366 } 367 } 368 369 usbd_status 370 dwc2_open(struct usbd_pipe *pipe) 371 { 372 struct usbd_device *dev = pipe->up_dev; 373 struct dwc2_softc *sc = DWC2_PIPE2SC(pipe); 374 struct dwc2_pipe *dpipe = DWC2_PIPE2DPIPE(pipe); 375 usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc; 376 uint8_t addr = dev->ud_addr; 377 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 378 usbd_status err; 379 380 DPRINTF("pipe %p addr %d xfertype %d dir %s\n", pipe, addr, xfertype, 381 UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN ? "in" : "out"); 382 383 if (sc->sc_dying) { 384 return USBD_IOERROR; 385 } 386 387 if (addr == dev->ud_bus->ub_rhaddr) { 388 switch (ed->bEndpointAddress) { 389 case USB_CONTROL_ENDPOINT: 390 pipe->up_methods = &roothub_ctrl_methods; 391 break; 392 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT: 393 pipe->up_methods = &dwc2_root_intr_methods; 394 break; 395 default: 396 DPRINTF("bad bEndpointAddress 0x%02x\n", 397 ed->bEndpointAddress); 398 return USBD_INVAL; 399 } 400 DPRINTF("root hub pipe open\n"); 401 return USBD_NORMAL_COMPLETION; 402 } 403 404 switch (xfertype) { 405 case UE_CONTROL: 406 pipe->up_methods = &dwc2_device_ctrl_methods; 407 err = usb_allocmem(&sc->sc_bus, sizeof(usb_device_request_t), 408 0, USBMALLOC_COHERENT, &dpipe->req_dma); 409 if (err) 410 return err; 411 break; 412 case UE_INTERRUPT: 413 pipe->up_methods = &dwc2_device_intr_methods; 414 break; 415 case UE_ISOCHRONOUS: 416 pipe->up_serialise = false; 417 pipe->up_methods = &dwc2_device_isoc_methods; 418 break; 419 case UE_BULK: 420 pipe->up_serialise = false; 421 pipe->up_methods = &dwc2_device_bulk_methods; 422 break; 423 default: 424 DPRINTF("bad xfer type %d\n", xfertype); 425 return USBD_INVAL; 426 } 427 428 /* QH */ 429 dpipe->priv = NULL; 430 431 return USBD_NORMAL_COMPLETION; 432 } 433 434 Static void 435 dwc2_poll(struct usbd_bus *bus) 436 { 437 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 438 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 439 440 mutex_spin_enter(&hsotg->lock); 441 dwc2_interrupt(sc); 442 mutex_spin_exit(&hsotg->lock); 443 } 444 445 /* 446 * Close a reqular pipe. 447 * Assumes that there are no pending transactions. 448 */ 449 Static void 450 dwc2_close_pipe(struct usbd_pipe *pipe) 451 { 452 struct dwc2_softc *sc __diagused = pipe->up_dev->ud_bus->ub_hcpriv; 453 454 KASSERT(mutex_owned(&sc->sc_lock)); 455 } 456 457 /* 458 * Abort a device request. 459 */ 460 Static void 461 dwc2_abortx(struct usbd_xfer *xfer) 462 { 463 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 464 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 465 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 466 struct dwc2_xfer *d; 467 int err; 468 469 DPRINTF("xfer %p pipe %p status 0x%08x", xfer, xfer->ux_pipe, 470 xfer->ux_status); 471 472 KASSERT(mutex_owned(&sc->sc_lock)); 473 ASSERT_SLEEPABLE(); 474 475 KASSERTMSG((xfer->ux_status == USBD_CANCELLED || 476 xfer->ux_status == USBD_TIMEOUT), 477 "bad abort status: %d", xfer->ux_status); 478 479 mutex_spin_enter(&hsotg->lock); 480 481 /* 482 * Check whether we aborted or timed out after the hardware 483 * completion interrupt determined that it's done but before 484 * the soft interrupt could actually complete it. If so, it's 485 * too late for the soft interrupt -- at this point we've 486 * already committed to abort it or time it out, so we need to 487 * take it off the softint's list of work in case the caller, 488 * say, frees the xfer before the softint runs. 489 * 490 * This logic is unusual among host controller drivers, and 491 * happens because dwc2 decides to complete xfers in the hard 492 * interrupt handler rather than in the soft interrupt handler, 493 * but usb_transfer_complete must be deferred to softint -- and 494 * we happened to swoop in between the hard interrupt and the 495 * soft interrupt. Other host controller drivers do almost all 496 * processing in the softint so there's no intermediate stage. 497 * 498 * Fortunately, this linear search to discern the intermediate 499 * stage is not likely to be a serious performance impact 500 * because it happens only on abort or timeout. 501 */ 502 TAILQ_FOREACH(d, &sc->sc_complete, xnext) { 503 if (d == dxfer) { 504 TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext); 505 break; 506 } 507 } 508 509 /* 510 * If we're dying, skip the hardware action and just notify the 511 * software that we're done. 512 */ 513 if (sc->sc_dying) { 514 DPRINTFN(4, "xfer %p dying 0x%08x", xfer, xfer->ux_status); 515 goto dying; 516 } 517 518 /* 519 * HC Step 1: Handle the hardware. 520 */ 521 err = dwc2_hcd_urb_dequeue(hsotg, dxfer->urb); 522 if (err) { 523 DPRINTF("dwc2_hcd_urb_dequeue failed\n"); 524 } 525 526 dying: 527 mutex_spin_exit(&hsotg->lock); 528 529 /* 530 * Final Step: Notify completion to waiting xfers. 531 */ 532 usb_transfer_complete(xfer); 533 KASSERT(mutex_owned(&sc->sc_lock)); 534 } 535 536 Static void 537 dwc2_noop(struct usbd_pipe *pipe) 538 { 539 540 } 541 542 Static void 543 dwc2_device_clear_toggle(struct usbd_pipe *pipe) 544 { 545 546 DPRINTF("toggle %d -> 0", pipe->up_endpoint->ue_toggle); 547 } 548 549 /***********************************************************************/ 550 551 Static int 552 dwc2_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req, 553 void *buf, int buflen) 554 { 555 struct dwc2_softc *sc = bus->ub_hcpriv; 556 usbd_status err = USBD_IOERROR; 557 uint16_t len, value, index; 558 int totlen = 0; 559 560 if (sc->sc_dying) 561 return -1; 562 563 DPRINTFN(4, "type=0x%02x request=%02x\n", 564 req->bmRequestType, req->bRequest); 565 566 len = UGETW(req->wLength); 567 value = UGETW(req->wValue); 568 index = UGETW(req->wIndex); 569 570 #define C(x,y) ((x) | ((y) << 8)) 571 switch (C(req->bRequest, req->bmRequestType)) { 572 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 573 DPRINTFN(8, "wValue=0x%04x\n", value); 574 575 if (len == 0) 576 break; 577 switch (value) { 578 #define sd ((usb_string_descriptor_t *)buf) 579 case C(2, UDESC_STRING): 580 /* Product */ 581 totlen = usb_makestrdesc(sd, len, "DWC2 root hub"); 582 break; 583 #undef sd 584 default: 585 /* default from usbroothub */ 586 return buflen; 587 } 588 break; 589 590 case C(UR_GET_CONFIG, UT_READ_DEVICE): 591 case C(UR_GET_INTERFACE, UT_READ_INTERFACE): 592 case C(UR_GET_STATUS, UT_READ_INTERFACE): 593 case C(UR_GET_STATUS, UT_READ_ENDPOINT): 594 case C(UR_SET_ADDRESS, UT_WRITE_DEVICE): 595 case C(UR_SET_CONFIG, UT_WRITE_DEVICE): 596 /* default from usbroothub */ 597 DPRINTFN(4, "returning %d (usbroothub default)", buflen); 598 599 return buflen; 600 601 default: 602 /* Hub requests */ 603 err = dwc2_hcd_hub_control(sc->sc_hsotg, 604 C(req->bRequest, req->bmRequestType), value, index, 605 buf, len); 606 if (err) { 607 return -1; 608 } 609 totlen = len; 610 } 611 612 return totlen; 613 } 614 615 Static usbd_status 616 dwc2_root_intr_transfer(struct usbd_xfer *xfer) 617 { 618 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 619 usbd_status err; 620 621 DPRINTF("\n"); 622 623 /* Insert last in queue. */ 624 mutex_enter(&sc->sc_lock); 625 err = usb_insert_transfer(xfer); 626 mutex_exit(&sc->sc_lock); 627 if (err) 628 return err; 629 630 /* Pipe isn't running, start first */ 631 return dwc2_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 632 } 633 634 Static usbd_status 635 dwc2_root_intr_start(struct usbd_xfer *xfer) 636 { 637 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 638 const bool polling = sc->sc_bus.ub_usepolling; 639 640 DPRINTF("\n"); 641 642 if (sc->sc_dying) 643 return USBD_IOERROR; 644 645 if (!polling) 646 mutex_enter(&sc->sc_lock); 647 KASSERT(sc->sc_intrxfer == NULL); 648 sc->sc_intrxfer = xfer; 649 xfer->ux_status = USBD_IN_PROGRESS; 650 if (!polling) 651 mutex_exit(&sc->sc_lock); 652 653 return USBD_IN_PROGRESS; 654 } 655 656 /* Abort a root interrupt request. */ 657 Static void 658 dwc2_root_intr_abort(struct usbd_xfer *xfer) 659 { 660 struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer); 661 662 DPRINTF("xfer=%p\n", xfer); 663 664 KASSERT(mutex_owned(&sc->sc_lock)); 665 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 666 667 /* If xfer has already completed, nothing to do here. */ 668 if (sc->sc_intrxfer == NULL) 669 return; 670 671 /* 672 * Otherwise, sc->sc_intrxfer had better be this transfer. 673 * Cancel it. 674 */ 675 KASSERT(sc->sc_intrxfer == xfer); 676 KASSERT(xfer->ux_status == USBD_IN_PROGRESS); 677 xfer->ux_status = USBD_CANCELLED; 678 usb_transfer_complete(xfer); 679 } 680 681 Static void 682 dwc2_root_intr_close(struct usbd_pipe *pipe) 683 { 684 struct dwc2_softc *sc __diagused = DWC2_PIPE2SC(pipe); 685 686 DPRINTF("\n"); 687 688 KASSERT(mutex_owned(&sc->sc_lock)); 689 690 /* 691 * Caller must guarantee the xfer has completed first, by 692 * closing the pipe only after normal completion or an abort. 693 */ 694 KASSERT(sc->sc_intrxfer == NULL); 695 } 696 697 Static void 698 dwc2_root_intr_done(struct usbd_xfer *xfer) 699 { 700 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 701 702 DPRINTF("\n"); 703 704 /* Claim the xfer so it doesn't get completed again. */ 705 KASSERT(sc->sc_intrxfer == xfer); 706 KASSERT(xfer->ux_status != USBD_IN_PROGRESS); 707 sc->sc_intrxfer = NULL; 708 } 709 710 /***********************************************************************/ 711 712 Static usbd_status 713 dwc2_device_ctrl_transfer(struct usbd_xfer *xfer) 714 { 715 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 716 usbd_status err; 717 718 DPRINTF("\n"); 719 720 /* Insert last in queue. */ 721 mutex_enter(&sc->sc_lock); 722 err = usb_insert_transfer(xfer); 723 mutex_exit(&sc->sc_lock); 724 if (err) 725 return err; 726 727 /* Pipe isn't running, start first */ 728 return dwc2_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 729 } 730 731 Static usbd_status 732 dwc2_device_ctrl_start(struct usbd_xfer *xfer) 733 { 734 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 735 usbd_status err; 736 const bool polling = sc->sc_bus.ub_usepolling; 737 738 DPRINTF("\n"); 739 740 if (!polling) 741 mutex_enter(&sc->sc_lock); 742 xfer->ux_status = USBD_IN_PROGRESS; 743 err = dwc2_device_start(xfer); 744 if (!polling) 745 mutex_exit(&sc->sc_lock); 746 747 if (err) 748 return err; 749 750 return USBD_IN_PROGRESS; 751 } 752 753 Static void 754 dwc2_device_ctrl_abort(struct usbd_xfer *xfer) 755 { 756 struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer); 757 758 KASSERT(mutex_owned(&sc->sc_lock)); 759 760 DPRINTF("xfer=%p\n", xfer); 761 usbd_xfer_abort(xfer); 762 } 763 764 Static void 765 dwc2_device_ctrl_close(struct usbd_pipe *pipe) 766 { 767 struct dwc2_softc * const sc = DWC2_PIPE2SC(pipe); 768 struct dwc2_pipe * const dpipe = DWC2_PIPE2DPIPE(pipe); 769 770 DPRINTF("pipe=%p\n", pipe); 771 dwc2_close_pipe(pipe); 772 773 usb_freemem(&sc->sc_bus, &dpipe->req_dma); 774 } 775 776 Static void 777 dwc2_device_ctrl_done(struct usbd_xfer *xfer) 778 { 779 780 DPRINTF("xfer=%p\n", xfer); 781 } 782 783 /***********************************************************************/ 784 785 Static usbd_status 786 dwc2_device_bulk_transfer(struct usbd_xfer *xfer) 787 { 788 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 789 usbd_status err; 790 791 DPRINTF("xfer=%p\n", xfer); 792 793 /* Insert last in queue. */ 794 mutex_enter(&sc->sc_lock); 795 err = usb_insert_transfer(xfer); 796 797 KASSERT(err == USBD_NORMAL_COMPLETION); 798 799 xfer->ux_status = USBD_IN_PROGRESS; 800 err = dwc2_device_start(xfer); 801 mutex_exit(&sc->sc_lock); 802 803 return err; 804 } 805 806 Static void 807 dwc2_device_bulk_abort(struct usbd_xfer *xfer) 808 { 809 struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer); 810 811 KASSERT(mutex_owned(&sc->sc_lock)); 812 813 DPRINTF("xfer=%p\n", xfer); 814 usbd_xfer_abort(xfer); 815 } 816 817 Static void 818 dwc2_device_bulk_close(struct usbd_pipe *pipe) 819 { 820 821 DPRINTF("pipe=%p\n", pipe); 822 823 dwc2_close_pipe(pipe); 824 } 825 826 Static void 827 dwc2_device_bulk_done(struct usbd_xfer *xfer) 828 { 829 830 DPRINTF("xfer=%p\n", xfer); 831 } 832 833 /***********************************************************************/ 834 835 Static usbd_status 836 dwc2_device_intr_transfer(struct usbd_xfer *xfer) 837 { 838 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 839 usbd_status err; 840 841 DPRINTF("xfer=%p\n", xfer); 842 843 /* Insert last in queue. */ 844 mutex_enter(&sc->sc_lock); 845 err = usb_insert_transfer(xfer); 846 mutex_exit(&sc->sc_lock); 847 if (err) 848 return err; 849 850 /* Pipe isn't running, start first */ 851 return dwc2_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 852 } 853 854 Static usbd_status 855 dwc2_device_intr_start(struct usbd_xfer *xfer) 856 { 857 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer) 858 struct usbd_device *dev = dpipe->pipe.up_dev; 859 struct dwc2_softc *sc = dev->ud_bus->ub_hcpriv; 860 usbd_status err; 861 const bool polling = sc->sc_bus.ub_usepolling; 862 863 if (!polling) 864 mutex_enter(&sc->sc_lock); 865 xfer->ux_status = USBD_IN_PROGRESS; 866 err = dwc2_device_start(xfer); 867 if (!polling) 868 mutex_exit(&sc->sc_lock); 869 870 if (err) 871 return err; 872 873 return USBD_IN_PROGRESS; 874 } 875 876 /* Abort a device interrupt request. */ 877 Static void 878 dwc2_device_intr_abort(struct usbd_xfer *xfer) 879 { 880 struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer); 881 882 KASSERT(mutex_owned(&sc->sc_lock)); 883 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 884 885 DPRINTF("xfer=%p\n", xfer); 886 usbd_xfer_abort(xfer); 887 } 888 889 Static void 890 dwc2_device_intr_close(struct usbd_pipe *pipe) 891 { 892 893 DPRINTF("pipe=%p\n", pipe); 894 895 dwc2_close_pipe(pipe); 896 } 897 898 Static void 899 dwc2_device_intr_done(struct usbd_xfer *xfer) 900 { 901 902 DPRINTF("\n"); 903 } 904 905 /***********************************************************************/ 906 907 usbd_status 908 dwc2_device_isoc_transfer(struct usbd_xfer *xfer) 909 { 910 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 911 usbd_status err; 912 913 DPRINTF("xfer=%p\n", xfer); 914 915 /* Insert last in queue. */ 916 mutex_enter(&sc->sc_lock); 917 err = usb_insert_transfer(xfer); 918 919 KASSERT(err == USBD_NORMAL_COMPLETION); 920 921 xfer->ux_status = USBD_IN_PROGRESS; 922 err = dwc2_device_start(xfer); 923 mutex_exit(&sc->sc_lock); 924 925 return err; 926 } 927 928 void 929 dwc2_device_isoc_abort(struct usbd_xfer *xfer) 930 { 931 struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer); 932 KASSERT(mutex_owned(&sc->sc_lock)); 933 934 DPRINTF("xfer=%p\n", xfer); 935 usbd_xfer_abort(xfer); 936 } 937 938 void 939 dwc2_device_isoc_close(struct usbd_pipe *pipe) 940 { 941 DPRINTF("\n"); 942 943 dwc2_close_pipe(pipe); 944 } 945 946 void 947 dwc2_device_isoc_done(struct usbd_xfer *xfer) 948 { 949 950 DPRINTF("\n"); 951 } 952 953 954 usbd_status 955 dwc2_device_start(struct usbd_xfer *xfer) 956 { 957 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 958 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 959 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 960 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 961 struct dwc2_hcd_urb *dwc2_urb; 962 963 struct usbd_device *dev = xfer->ux_pipe->up_dev; 964 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc; 965 uint8_t addr = dev->ud_addr; 966 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 967 uint8_t epnum = UE_GET_ADDR(ed->bEndpointAddress); 968 uint8_t dir = UE_GET_DIR(ed->bEndpointAddress); 969 uint16_t mps = UE_GET_SIZE(UGETW(ed->wMaxPacketSize)); 970 uint32_t len; 971 972 uint32_t flags = 0; 973 uint32_t off = 0; 974 int retval, err; 975 int alloc_bandwidth = 0; 976 977 DPRINTFN(1, "xfer=%p pipe=%p\n", xfer, xfer->ux_pipe); 978 979 if (xfertype == UE_ISOCHRONOUS || 980 xfertype == UE_INTERRUPT) { 981 mutex_spin_enter(&hsotg->lock); 982 if (!dwc2_hcd_is_bandwidth_allocated(hsotg, xfer)) 983 alloc_bandwidth = 1; 984 mutex_spin_exit(&hsotg->lock); 985 } 986 987 /* 988 * For Control pipe the direction is from the request, all other 989 * transfers have been set correctly at pipe open time. 990 */ 991 if (xfertype == UE_CONTROL) { 992 usb_device_request_t *req = &xfer->ux_request; 993 994 DPRINTFN(3, "xfer=%p type=0x%02x request=0x%02x wValue=0x%04x " 995 "wIndex=0x%04x len=%d addr=%d endpt=%d dir=%s speed=%d " 996 "mps=%d\n", 997 xfer, req->bmRequestType, req->bRequest, UGETW(req->wValue), 998 UGETW(req->wIndex), UGETW(req->wLength), dev->ud_addr, 999 epnum, dir == UT_READ ? "in" :"out", dev->ud_speed, mps); 1000 1001 /* Copy request packet to our DMA buffer */ 1002 memcpy(KERNADDR(&dpipe->req_dma, 0), req, sizeof(*req)); 1003 usb_syncmem(&dpipe->req_dma, 0, sizeof(*req), 1004 BUS_DMASYNC_PREWRITE); 1005 len = UGETW(req->wLength); 1006 if ((req->bmRequestType & UT_READ) == UT_READ) { 1007 dir = UE_DIR_IN; 1008 } else { 1009 dir = UE_DIR_OUT; 1010 } 1011 1012 DPRINTFN(3, "req = %p dma = %" PRIxBUSADDR " len %d dir %s\n", 1013 KERNADDR(&dpipe->req_dma, 0), DMAADDR(&dpipe->req_dma, 0), 1014 len, dir == UE_DIR_IN ? "in" : "out"); 1015 } else if (xfertype == UE_ISOCHRONOUS) { 1016 DPRINTFN(3, "xfer=%p nframes=%d flags=%d addr=%d endpt=%d," 1017 " mps=%d dir %s\n", xfer, xfer->ux_nframes, xfer->ux_flags, addr, 1018 epnum, mps, dir == UT_READ ? "in" :"out"); 1019 1020 #ifdef DIAGNOSTIC 1021 len = 0; 1022 for (size_t i = 0; i < xfer->ux_nframes; i++) 1023 len += xfer->ux_frlengths[i]; 1024 if (len != xfer->ux_length) 1025 panic("len (%d) != xfer->ux_length (%d)", len, 1026 xfer->ux_length); 1027 #endif 1028 len = xfer->ux_length; 1029 } else { 1030 DPRINTFN(3, "xfer=%p len=%d flags=%d addr=%d endpt=%d," 1031 " mps=%d dir %s\n", xfer, xfer->ux_length, xfer->ux_flags, addr, 1032 epnum, mps, dir == UT_READ ? "in" :"out"); 1033 1034 len = xfer->ux_length; 1035 } 1036 1037 dwc2_urb = dxfer->urb; 1038 if (!dwc2_urb) 1039 return USBD_NOMEM; 1040 1041 KASSERT(dwc2_urb->packet_count == xfer->ux_nframes); 1042 memset(dwc2_urb, 0, sizeof(*dwc2_urb) + 1043 sizeof(dwc2_urb->iso_descs[0]) * dwc2_urb->packet_count); 1044 1045 dwc2_urb->priv = xfer; 1046 dwc2_urb->packet_count = xfer->ux_nframes; 1047 1048 dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, addr, epnum, xfertype, dir, 1049 mps); 1050 1051 if (xfertype == UE_CONTROL) { 1052 dwc2_urb->setup_usbdma = &dpipe->req_dma; 1053 dwc2_urb->setup_packet = KERNADDR(&dpipe->req_dma, 0); 1054 dwc2_urb->setup_dma = DMAADDR(&dpipe->req_dma, 0); 1055 } else { 1056 /* XXXNH - % mps required? */ 1057 if ((xfer->ux_flags & USBD_FORCE_SHORT_XFER) && (len % mps) == 0) 1058 flags |= URB_SEND_ZERO_PACKET; 1059 } 1060 flags |= URB_GIVEBACK_ASAP; 1061 1062 /* 1063 * control transfers with no data phase don't touch usbdma, but 1064 * everything else does. 1065 */ 1066 if (!(xfertype == UE_CONTROL && len == 0)) { 1067 dwc2_urb->usbdma = &xfer->ux_dmabuf; 1068 dwc2_urb->buf = KERNADDR(dwc2_urb->usbdma, 0); 1069 dwc2_urb->dma = DMAADDR(dwc2_urb->usbdma, 0); 1070 1071 usb_syncmem(&xfer->ux_dmabuf, 0, len, 1072 dir == UE_DIR_IN ? 1073 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1074 } 1075 dwc2_urb->length = len; 1076 dwc2_urb->flags = flags; 1077 dwc2_urb->status = -EINPROGRESS; 1078 1079 if (xfertype == UE_INTERRUPT || 1080 xfertype == UE_ISOCHRONOUS) { 1081 uint16_t ival; 1082 1083 if (xfertype == UE_INTERRUPT && 1084 dpipe->pipe.up_interval != USBD_DEFAULT_INTERVAL) { 1085 ival = dpipe->pipe.up_interval; 1086 } else { 1087 ival = ed->bInterval; 1088 } 1089 1090 if (ival < 1) { 1091 retval = -ENODEV; 1092 goto fail; 1093 } 1094 if (dev->ud_speed == USB_SPEED_HIGH || 1095 (dev->ud_speed == USB_SPEED_FULL && xfertype == UE_ISOCHRONOUS)) { 1096 if (ival > 16) { 1097 /* 1098 * illegal with HS/FS, but there were 1099 * documentation bugs in the spec 1100 */ 1101 ival = 256; 1102 } else { 1103 ival = (1 << (ival - 1)); 1104 } 1105 } else { 1106 if (xfertype == UE_INTERRUPT && ival < 10) 1107 ival = 10; 1108 } 1109 dwc2_urb->interval = ival; 1110 } 1111 1112 /* XXXNH bring down from callers?? */ 1113 // mutex_enter(&sc->sc_lock); 1114 1115 xfer->ux_actlen = 0; 1116 1117 KASSERT(xfertype != UE_ISOCHRONOUS || 1118 xfer->ux_nframes <= dwc2_urb->packet_count); 1119 KASSERTMSG(xfer->ux_nframes == 0 || xfertype == UE_ISOCHRONOUS, 1120 "nframes %d xfertype %d\n", xfer->ux_nframes, xfertype); 1121 1122 off = 0; 1123 for (size_t i = 0; i < xfer->ux_nframes; ++i) { 1124 DPRINTFN(3, "xfer=%p frame=%zd offset=%d length=%d\n", xfer, i, 1125 off, xfer->ux_frlengths[i]); 1126 1127 dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, off, 1128 xfer->ux_frlengths[i]); 1129 off += xfer->ux_frlengths[i]; 1130 } 1131 1132 struct dwc2_qh *qh = dpipe->priv; 1133 struct dwc2_qtd *qtd; 1134 bool qh_allocated = false; 1135 1136 /* Create QH for the endpoint if it doesn't exist */ 1137 if (!qh) { 1138 qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, GFP_ATOMIC); 1139 if (!qh) { 1140 retval = -ENOMEM; 1141 goto fail; 1142 } 1143 dpipe->priv = qh; 1144 qh_allocated = true; 1145 } 1146 1147 qtd = pool_cache_get(sc->sc_qtdpool, PR_NOWAIT); 1148 if (!qtd) { 1149 retval = -ENOMEM; 1150 goto fail1; 1151 } 1152 memset(qtd, 0, sizeof(*qtd)); 1153 1154 /* might need to check cpu_intr_p */ 1155 mutex_spin_enter(&hsotg->lock); 1156 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd); 1157 if (retval) 1158 goto fail2; 1159 usbd_xfer_schedule_timeout(xfer); 1160 xfer->ux_status = USBD_IN_PROGRESS; 1161 1162 if (alloc_bandwidth) { 1163 dwc2_allocate_bus_bandwidth(hsotg, 1164 dwc2_hcd_get_ep_bandwidth(hsotg, dpipe), 1165 xfer); 1166 } 1167 1168 mutex_spin_exit(&hsotg->lock); 1169 // mutex_exit(&sc->sc_lock); 1170 1171 return USBD_IN_PROGRESS; 1172 1173 fail2: 1174 dwc2_urb->priv = NULL; 1175 mutex_spin_exit(&hsotg->lock); 1176 pool_cache_put(sc->sc_qtdpool, qtd); 1177 1178 fail1: 1179 if (qh_allocated) { 1180 dpipe->priv = NULL; 1181 dwc2_hcd_qh_free(hsotg, qh); 1182 } 1183 fail: 1184 1185 switch (retval) { 1186 case -EINVAL: 1187 case -ENODEV: 1188 err = USBD_INVAL; 1189 break; 1190 case -ENOMEM: 1191 err = USBD_NOMEM; 1192 break; 1193 default: 1194 err = USBD_IOERROR; 1195 } 1196 1197 return err; 1198 1199 } 1200 1201 int dwc2_intr(void *p) 1202 { 1203 struct dwc2_softc *sc = p; 1204 struct dwc2_hsotg *hsotg; 1205 int ret = 0; 1206 1207 if (sc == NULL) 1208 return 0; 1209 1210 hsotg = sc->sc_hsotg; 1211 mutex_spin_enter(&hsotg->lock); 1212 1213 if (sc->sc_dying || !device_has_power(sc->sc_dev)) 1214 goto done; 1215 1216 if (sc->sc_bus.ub_usepolling) { 1217 uint32_t intrs; 1218 1219 intrs = dwc2_read_core_intr(hsotg); 1220 DWC2_WRITE_4(hsotg, GINTSTS, intrs); 1221 } else { 1222 ret = dwc2_interrupt(sc); 1223 } 1224 1225 done: 1226 mutex_spin_exit(&hsotg->lock); 1227 1228 return ret; 1229 } 1230 1231 int 1232 dwc2_interrupt(struct dwc2_softc *sc) 1233 { 1234 int ret = 0; 1235 1236 if (sc->sc_hcdenabled) { 1237 ret |= dwc2_handle_hcd_intr(sc->sc_hsotg); 1238 } 1239 1240 ret |= dwc2_handle_common_intr(sc->sc_hsotg); 1241 1242 return ret; 1243 } 1244 1245 /***********************************************************************/ 1246 1247 int 1248 dwc2_detach(struct dwc2_softc *sc, int flags) 1249 { 1250 int rv = 0; 1251 1252 if (sc->sc_child != NULL) 1253 rv = config_detach(sc->sc_child, flags); 1254 1255 return rv; 1256 } 1257 1258 bool 1259 dwc2_shutdown(device_t self, int flags) 1260 { 1261 struct dwc2_softc *sc = device_private(self); 1262 1263 sc = sc; 1264 1265 return true; 1266 } 1267 1268 void 1269 dwc2_childdet(device_t self, device_t child) 1270 { 1271 struct dwc2_softc *sc = device_private(self); 1272 1273 sc = sc; 1274 } 1275 1276 int 1277 dwc2_activate(device_t self, enum devact act) 1278 { 1279 struct dwc2_softc *sc = device_private(self); 1280 1281 sc = sc; 1282 1283 return 0; 1284 } 1285 1286 bool 1287 dwc2_resume(device_t dv, const pmf_qual_t *qual) 1288 { 1289 struct dwc2_softc *sc = device_private(dv); 1290 1291 sc = sc; 1292 1293 return true; 1294 } 1295 1296 bool 1297 dwc2_suspend(device_t dv, const pmf_qual_t *qual) 1298 { 1299 struct dwc2_softc *sc = device_private(dv); 1300 1301 sc = sc; 1302 1303 return true; 1304 } 1305 1306 /***********************************************************************/ 1307 int 1308 dwc2_init(struct dwc2_softc *sc) 1309 { 1310 int err = 0; 1311 1312 err = linux_workqueue_init(); 1313 if (err) 1314 return err; 1315 1316 sc->sc_bus.ub_hcpriv = sc; 1317 sc->sc_bus.ub_revision = USBREV_2_0; 1318 sc->sc_bus.ub_methods = &dwc2_bus_methods; 1319 sc->sc_bus.ub_pipesize = sizeof(struct dwc2_pipe); 1320 sc->sc_bus.ub_usedma = true; 1321 sc->sc_hcdenabled = false; 1322 1323 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 1324 1325 TAILQ_INIT(&sc->sc_complete); 1326 1327 sc->sc_rhc_si = softint_establish(SOFTINT_USB | SOFTINT_MPSAFE, 1328 dwc2_rhc, sc); 1329 1330 sc->sc_xferpool = pool_cache_init(sizeof(struct dwc2_xfer), 0, 0, 0, 1331 "dwc2xfer", NULL, IPL_USB, NULL, NULL, NULL); 1332 sc->sc_qhpool = pool_cache_init(sizeof(struct dwc2_qh), 0, 0, 0, 1333 "dwc2qh", NULL, IPL_USB, NULL, NULL, NULL); 1334 sc->sc_qtdpool = pool_cache_init(sizeof(struct dwc2_qtd), 0, 0, 0, 1335 "dwc2qtd", NULL, IPL_USB, NULL, NULL, NULL); 1336 1337 sc->sc_hsotg = kmem_zalloc(sizeof(struct dwc2_hsotg), KM_SLEEP); 1338 sc->sc_hsotg->hsotg_sc = sc; 1339 sc->sc_hsotg->dev = sc->sc_dev; 1340 sc->sc_hcdenabled = true; 1341 1342 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 1343 struct dwc2_core_params defparams; 1344 int retval; 1345 1346 if (sc->sc_params == NULL) { 1347 /* Default all params to autodetect */ 1348 dwc2_set_all_params(&defparams, -1); 1349 sc->sc_params = &defparams; 1350 1351 /* 1352 * Disable descriptor dma mode by default as the HW can support 1353 * it, but does not support it for SPLIT transactions. 1354 */ 1355 defparams.dma_desc_enable = 0; 1356 } 1357 hsotg->dr_mode = USB_DR_MODE_HOST; 1358 1359 /* Detect config values from hardware */ 1360 retval = dwc2_get_hwparams(hsotg); 1361 if (retval) { 1362 goto fail2; 1363 } 1364 1365 hsotg->core_params = kmem_zalloc(sizeof(*hsotg->core_params), KM_SLEEP); 1366 dwc2_set_all_params(hsotg->core_params, -1); 1367 1368 /* Validate parameter values */ 1369 dwc2_set_parameters(hsotg, sc->sc_params); 1370 1371 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 1372 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1373 if (hsotg->dr_mode != USB_DR_MODE_HOST) { 1374 retval = dwc2_gadget_init(hsotg); 1375 if (retval) 1376 goto fail2; 1377 hsotg->gadget_enabled = 1; 1378 } 1379 #endif 1380 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \ 1381 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1382 if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) { 1383 retval = dwc2_hcd_init(hsotg); 1384 if (retval) { 1385 if (hsotg->gadget_enabled) 1386 dwc2_hsotg_remove(hsotg); 1387 goto fail2; 1388 } 1389 hsotg->hcd_enabled = 1; 1390 } 1391 #endif 1392 1393 uint32_t snpsid = hsotg->hw_params.snpsid; 1394 aprint_verbose_dev(sc->sc_dev, "Core Release: %x.%x%x%x (snpsid=%x)\n", 1395 snpsid >> 12 & 0xf, snpsid >> 8 & 0xf, 1396 snpsid >> 4 & 0xf, snpsid & 0xf, snpsid); 1397 1398 return 0; 1399 1400 fail2: 1401 err = -retval; 1402 kmem_free(sc->sc_hsotg, sizeof(struct dwc2_hsotg)); 1403 softint_disestablish(sc->sc_rhc_si); 1404 1405 return err; 1406 } 1407 1408 #if 0 1409 /* 1410 * curmode is a mode indication bit 0 = device, 1 = host 1411 */ 1412 static const char * const intnames[32] = { 1413 "curmode", "modemis", "otgint", "sof", 1414 "rxflvl", "nptxfemp", "ginnakeff", "goutnakeff", 1415 "ulpickint", "i2cint", "erlysusp", "usbsusp", 1416 "usbrst", "enumdone", "isooutdrop", "eopf", 1417 "restore_done", "epmis", "iepint", "oepint", 1418 "incompisoin", "incomplp", "fetsusp", "resetdet", 1419 "prtint", "hchint", "ptxfemp", "lpm", 1420 "conidstschng", "disconnint", "sessreqint", "wkupint" 1421 }; 1422 1423 1424 /***********************************************************************/ 1425 1426 #endif 1427 1428 void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, 1429 int *hub_port) 1430 { 1431 struct usbd_xfer *xfer = context; 1432 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1433 struct usbd_device *dev = dpipe->pipe.up_dev; 1434 1435 *hub_addr = dev->ud_myhsport->up_parent->ud_addr; 1436 *hub_port = dev->ud_myhsport->up_portno; 1437 } 1438 1439 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context) 1440 { 1441 struct usbd_xfer *xfer = context; 1442 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1443 struct usbd_device *dev = dpipe->pipe.up_dev; 1444 1445 return dev->ud_speed; 1446 } 1447 1448 /* 1449 * Sets the final status of an URB and returns it to the upper layer. Any 1450 * required cleanup of the URB is performed. 1451 * 1452 * Must be called with interrupt disabled and spinlock held 1453 */ 1454 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 1455 int status) 1456 { 1457 struct usbd_xfer *xfer; 1458 struct dwc2_xfer *dxfer; 1459 struct dwc2_softc *sc; 1460 usb_endpoint_descriptor_t *ed; 1461 uint8_t xfertype; 1462 1463 KASSERT(mutex_owned(&hsotg->lock)); 1464 1465 if (!qtd) { 1466 dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__); 1467 return; 1468 } 1469 1470 if (!qtd->urb) { 1471 dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__); 1472 return; 1473 } 1474 1475 xfer = qtd->urb->priv; 1476 if (!xfer) { 1477 dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__); 1478 return; 1479 } 1480 1481 dxfer = DWC2_XFER2DXFER(xfer); 1482 sc = DWC2_XFER2SC(xfer); 1483 ed = xfer->ux_pipe->up_endpoint->ue_edesc; 1484 xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1485 1486 struct dwc2_hcd_urb *urb = qtd->urb; 1487 xfer->ux_actlen = dwc2_hcd_urb_get_actual_length(urb); 1488 1489 DPRINTFN(3, "xfer=%p actlen=%d\n", xfer, xfer->ux_actlen); 1490 1491 if (xfertype == UE_ISOCHRONOUS) { 1492 xfer->ux_actlen = 0; 1493 for (size_t i = 0; i < xfer->ux_nframes; ++i) { 1494 xfer->ux_frlengths[i] = 1495 dwc2_hcd_urb_get_iso_desc_actual_length( 1496 urb, i); 1497 DPRINTFN(1, "xfer=%p frame=%zu length=%d\n", xfer, i, 1498 xfer->ux_frlengths[i]); 1499 xfer->ux_actlen += xfer->ux_frlengths[i]; 1500 } 1501 DPRINTFN(1, "xfer=%p actlen=%d (isoc)\n", xfer, xfer->ux_actlen); 1502 } 1503 1504 if (xfertype == UE_ISOCHRONOUS && dbg_perio()) { 1505 for (size_t i = 0; i < xfer->ux_nframes; i++) 1506 dev_vdbg(hsotg->dev, " ISO Desc %zu status %d\n", 1507 i, urb->iso_descs[i].status); 1508 } 1509 1510 if (!status) { 1511 if (!(xfer->ux_flags & USBD_SHORT_XFER_OK) && 1512 xfer->ux_actlen < xfer->ux_length) 1513 status = -EIO; 1514 } 1515 1516 switch (status) { 1517 case 0: 1518 dxfer->intr_status = USBD_NORMAL_COMPLETION; 1519 break; 1520 case -EPIPE: 1521 dxfer->intr_status = USBD_STALLED; 1522 break; 1523 case -EPROTO: 1524 dxfer->intr_status = USBD_INVAL; 1525 break; 1526 case -EIO: 1527 dxfer->intr_status = USBD_IOERROR; 1528 break; 1529 case -EOVERFLOW: 1530 dxfer->intr_status = USBD_IOERROR; 1531 break; 1532 default: 1533 dxfer->intr_status = USBD_IOERROR; 1534 printf("%s: unknown error status %d\n", __func__, status); 1535 } 1536 1537 if (dxfer->intr_status == USBD_NORMAL_COMPLETION) { 1538 /* 1539 * control transfers with no data phase don't touch dmabuf, but 1540 * everything else does. 1541 */ 1542 if (!(xfertype == UE_CONTROL && 1543 UGETW(xfer->ux_request.wLength) == 0) && 1544 xfer->ux_actlen > 0 /* XXX PR/53503 */ 1545 ) { 1546 int rd = usbd_xfer_isread(xfer); 1547 1548 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_actlen, 1549 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1550 } 1551 } 1552 1553 if (xfertype == UE_ISOCHRONOUS || 1554 xfertype == UE_INTERRUPT) { 1555 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1556 1557 dwc2_free_bus_bandwidth(hsotg, 1558 dwc2_hcd_get_ep_bandwidth(hsotg, dpipe), 1559 xfer); 1560 } 1561 1562 qtd->urb = NULL; 1563 KASSERT(mutex_owned(&hsotg->lock)); 1564 1565 TAILQ_INSERT_TAIL(&sc->sc_complete, dxfer, xnext); 1566 1567 mutex_spin_exit(&hsotg->lock); 1568 usb_schedsoftintr(&sc->sc_bus); 1569 mutex_spin_enter(&hsotg->lock); 1570 } 1571 1572 1573 int 1574 _dwc2_hcd_start(struct dwc2_hsotg *hsotg) 1575 { 1576 dev_dbg(hsotg->dev, "DWC OTG HCD START\n"); 1577 1578 mutex_spin_enter(&hsotg->lock); 1579 1580 hsotg->lx_state = DWC2_L0; 1581 1582 if (dwc2_is_device_mode(hsotg)) { 1583 mutex_spin_exit(&hsotg->lock); 1584 return 0; /* why 0 ?? */ 1585 } 1586 1587 dwc2_hcd_reinit(hsotg); 1588 1589 mutex_spin_exit(&hsotg->lock); 1590 return 0; 1591 } 1592 1593 int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg) 1594 { 1595 1596 return false; 1597 } 1598