1 /* $OpenBSD: dwc2.c,v 1.58 2021/07/30 18:56:01 mglocker Exp $ */ 2 /* $NetBSD: dwc2.c,v 1.32 2014/09/02 23:26:20 macallan Exp $ */ 3 4 /*- 5 * Copyright (c) 2013 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Nick Hudson 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/device.h> 38 #include <sys/select.h> 39 #include <sys/proc.h> 40 #include <sys/queue.h> 41 #include <sys/endian.h> 42 43 #include <machine/bus.h> 44 45 #include <dev/usb/usb.h> 46 #include <dev/usb/usbdi.h> 47 #include <dev/usb/usbdivar.h> 48 #include <dev/usb/usb_mem.h> 49 50 #include <dev/usb/dwc2/dwc2.h> 51 #include <dev/usb/dwc2/dwc2var.h> 52 53 #include <dev/usb/dwc2/dwc2_core.h> 54 #include <dev/usb/dwc2/dwc2_hcd.h> 55 56 #ifdef DWC2_COUNTERS 57 #define DWC2_EVCNT_ADD(a,b) ((void)((a).ev_count += (b))) 58 #else 59 #define DWC2_EVCNT_ADD(a,b) do { } while (/*CONSTCOND*/0) 60 #endif 61 #define DWC2_EVCNT_INCR(a) DWC2_EVCNT_ADD((a), 1) 62 63 #ifdef DWC2_DEBUG 64 #define DPRINTFN(n,fmt,...) do { \ 65 if (dwc2debug >= (n)) { \ 66 printf("%s: " fmt, \ 67 __FUNCTION__,## __VA_ARGS__); \ 68 } \ 69 } while (0) 70 #define DPRINTF(...) DPRINTFN(1, __VA_ARGS__) 71 int dwc2debug = 0; 72 #else 73 #define DPRINTF(...) do { } while (0) 74 #define DPRINTFN(...) do { } while (0) 75 #endif 76 77 STATIC usbd_status dwc2_open(struct usbd_pipe *); 78 STATIC int dwc2_setaddr(struct usbd_device *, int); 79 STATIC void dwc2_poll(struct usbd_bus *); 80 STATIC void dwc2_softintr(void *); 81 82 STATIC struct usbd_xfer *dwc2_allocx(struct usbd_bus *); 83 STATIC void dwc2_freex(struct usbd_bus *, struct usbd_xfer *); 84 85 STATIC usbd_status dwc2_root_ctrl_transfer(struct usbd_xfer *); 86 STATIC usbd_status dwc2_root_ctrl_start(struct usbd_xfer *); 87 STATIC void dwc2_root_ctrl_abort(struct usbd_xfer *); 88 STATIC void dwc2_root_ctrl_close(struct usbd_pipe *); 89 STATIC void dwc2_root_ctrl_done(struct usbd_xfer *); 90 91 STATIC usbd_status dwc2_root_intr_transfer(struct usbd_xfer *); 92 STATIC usbd_status dwc2_root_intr_start(struct usbd_xfer *); 93 STATIC void dwc2_root_intr_abort(struct usbd_xfer *); 94 STATIC void dwc2_root_intr_close(struct usbd_pipe *); 95 STATIC void dwc2_root_intr_done(struct usbd_xfer *); 96 97 STATIC usbd_status dwc2_device_ctrl_transfer(struct usbd_xfer *); 98 STATIC usbd_status dwc2_device_ctrl_start(struct usbd_xfer *); 99 STATIC void dwc2_device_ctrl_abort(struct usbd_xfer *); 100 STATIC void dwc2_device_ctrl_close(struct usbd_pipe *); 101 STATIC void dwc2_device_ctrl_done(struct usbd_xfer *); 102 103 STATIC usbd_status dwc2_device_bulk_transfer(struct usbd_xfer *); 104 STATIC usbd_status dwc2_device_bulk_start(struct usbd_xfer *); 105 STATIC void dwc2_device_bulk_abort(struct usbd_xfer *); 106 STATIC void dwc2_device_bulk_close(struct usbd_pipe *); 107 STATIC void dwc2_device_bulk_done(struct usbd_xfer *); 108 109 STATIC usbd_status dwc2_device_intr_transfer(struct usbd_xfer *); 110 STATIC usbd_status dwc2_device_intr_start(struct usbd_xfer *); 111 STATIC void dwc2_device_intr_abort(struct usbd_xfer *); 112 STATIC void dwc2_device_intr_close(struct usbd_pipe *); 113 STATIC void dwc2_device_intr_done(struct usbd_xfer *); 114 115 STATIC usbd_status dwc2_device_isoc_transfer(struct usbd_xfer *); 116 STATIC usbd_status dwc2_device_isoc_start(struct usbd_xfer *); 117 STATIC void dwc2_device_isoc_abort(struct usbd_xfer *); 118 STATIC void dwc2_device_isoc_close(struct usbd_pipe *); 119 STATIC void dwc2_device_isoc_done(struct usbd_xfer *); 120 121 STATIC usbd_status dwc2_device_start(struct usbd_xfer *); 122 123 STATIC void dwc2_close_pipe(struct usbd_pipe *); 124 STATIC void dwc2_abort_xfer(struct usbd_xfer *, usbd_status); 125 126 STATIC void dwc2_device_clear_toggle(struct usbd_pipe *); 127 STATIC void dwc2_noop(struct usbd_pipe *pipe); 128 129 STATIC int dwc2_interrupt(struct dwc2_softc *); 130 STATIC void dwc2_rhc(void *); 131 132 STATIC void dwc2_timeout(void *); 133 STATIC void dwc2_timeout_task(void *); 134 135 static inline void 136 dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw, 137 struct usbd_xfer *xfer) 138 { 139 } 140 141 static inline void 142 dwc2_free_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw, 143 struct usbd_xfer *xfer) 144 { 145 } 146 147 #define DWC2_INTR_ENDPT 1 148 149 STATIC struct usbd_bus_methods dwc2_bus_methods = { 150 .open_pipe = dwc2_open, 151 .dev_setaddr = dwc2_setaddr, 152 .soft_intr = dwc2_softintr, 153 .do_poll = dwc2_poll, 154 .allocx = dwc2_allocx, 155 .freex = dwc2_freex, 156 }; 157 158 STATIC struct usbd_pipe_methods dwc2_root_ctrl_methods = { 159 .transfer = dwc2_root_ctrl_transfer, 160 .start = dwc2_root_ctrl_start, 161 .abort = dwc2_root_ctrl_abort, 162 .close = dwc2_root_ctrl_close, 163 .cleartoggle = dwc2_noop, 164 .done = dwc2_root_ctrl_done, 165 }; 166 167 STATIC struct usbd_pipe_methods dwc2_root_intr_methods = { 168 .transfer = dwc2_root_intr_transfer, 169 .start = dwc2_root_intr_start, 170 .abort = dwc2_root_intr_abort, 171 .close = dwc2_root_intr_close, 172 .cleartoggle = dwc2_noop, 173 .done = dwc2_root_intr_done, 174 }; 175 176 STATIC struct usbd_pipe_methods dwc2_device_ctrl_methods = { 177 .transfer = dwc2_device_ctrl_transfer, 178 .start = dwc2_device_ctrl_start, 179 .abort = dwc2_device_ctrl_abort, 180 .close = dwc2_device_ctrl_close, 181 .cleartoggle = dwc2_noop, 182 .done = dwc2_device_ctrl_done, 183 }; 184 185 STATIC struct usbd_pipe_methods dwc2_device_intr_methods = { 186 .transfer = dwc2_device_intr_transfer, 187 .start = dwc2_device_intr_start, 188 .abort = dwc2_device_intr_abort, 189 .close = dwc2_device_intr_close, 190 .cleartoggle = dwc2_device_clear_toggle, 191 .done = dwc2_device_intr_done, 192 }; 193 194 STATIC struct usbd_pipe_methods dwc2_device_bulk_methods = { 195 .transfer = dwc2_device_bulk_transfer, 196 .start = dwc2_device_bulk_start, 197 .abort = dwc2_device_bulk_abort, 198 .close = dwc2_device_bulk_close, 199 .cleartoggle = dwc2_device_clear_toggle, 200 .done = dwc2_device_bulk_done, 201 }; 202 203 STATIC struct usbd_pipe_methods dwc2_device_isoc_methods = { 204 .transfer = dwc2_device_isoc_transfer, 205 .start = dwc2_device_isoc_start, 206 .abort = dwc2_device_isoc_abort, 207 .close = dwc2_device_isoc_close, 208 .cleartoggle = dwc2_noop, 209 .done = dwc2_device_isoc_done, 210 }; 211 212 /* 213 * Work around the half configured control (default) pipe when setting 214 * the address of a device. 215 */ 216 STATIC int 217 dwc2_setaddr(struct usbd_device *dev, int addr) 218 { 219 if (usbd_set_address(dev, addr)) 220 return (1); 221 222 dev->address = addr; 223 224 /* 225 * Re-establish the default pipe with the new address and the 226 * new max packet size. 227 */ 228 dwc2_close_pipe(dev->default_pipe); 229 if (dwc2_open(dev->default_pipe)) 230 return (EINVAL); 231 232 return (0); 233 } 234 235 struct usbd_xfer * 236 dwc2_allocx(struct usbd_bus *bus) 237 { 238 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 239 struct dwc2_xfer *dxfer; 240 241 DPRINTFN(10, "\n"); 242 243 DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget); 244 dxfer = pool_get(&sc->sc_xferpool, PR_WAITOK); 245 if (dxfer != NULL) { 246 memset(dxfer, 0, sizeof(*dxfer)); 247 dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg, 248 DWC2_MAXISOCPACKETS, M_NOWAIT); 249 #ifdef DIAGNOSTIC 250 dxfer->xfer.busy_free = XFER_ONQU; 251 #endif 252 } 253 return (struct usbd_xfer *)dxfer; 254 } 255 256 void 257 dwc2_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 258 { 259 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 260 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 261 262 DPRINTFN(10, "\n"); 263 264 #ifdef DIAGNOSTIC 265 if (xfer->busy_free != XFER_ONQU && 266 xfer->status != USBD_NOT_STARTED) { 267 DPRINTF("xfer=%p not busy, 0x%08x\n", xfer, xfer->busy_free); 268 } 269 xfer->busy_free = XFER_FREE; 270 #endif 271 DWC2_EVCNT_INCR(sc->sc_ev_xferpoolput); 272 dwc2_hcd_urb_free(sc->sc_hsotg, dxfer->urb, DWC2_MAXISOCPACKETS); 273 pool_put(&sc->sc_xferpool, xfer); 274 } 275 276 STATIC void 277 dwc2_rhc(void *addr) 278 { 279 struct dwc2_softc *sc = addr; 280 struct usbd_xfer *xfer; 281 u_char *p; 282 283 DPRINTF("\n"); 284 mtx_enter(&sc->sc_lock); 285 xfer = sc->sc_intrxfer; 286 287 if (xfer == NULL) { 288 /* Just ignore the change. */ 289 mtx_leave(&sc->sc_lock); 290 return; 291 292 } 293 294 /* set port bit */ 295 p = KERNADDR(&xfer->dmabuf, 0); 296 297 p[0] = 0x02; /* we only have one port (1 << 1) */ 298 299 xfer->actlen = xfer->length; 300 xfer->status = USBD_NORMAL_COMPLETION; 301 302 usb_transfer_complete(xfer); 303 mtx_leave(&sc->sc_lock); 304 } 305 306 STATIC void 307 dwc2_softintr(void *v) 308 { 309 struct usbd_bus *bus = v; 310 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 311 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 312 struct dwc2_xfer *dxfer, *next; 313 TAILQ_HEAD(, dwc2_xfer) claimed = TAILQ_HEAD_INITIALIZER(claimed); 314 315 /* 316 * Grab all the xfers that have not been aborted or timed out. 317 * Do so under a single lock -- without dropping it to run 318 * usb_transfer_complete as we go -- so that dwc2_abortx won't 319 * remove next out from under us during iteration when we've 320 * dropped the lock. 321 */ 322 mtx_enter(&hsotg->lock); 323 TAILQ_FOREACH_SAFE(dxfer, &sc->sc_complete, xnext, next) { 324 KASSERT(dxfer->xfer.status == USBD_IN_PROGRESS); 325 KASSERT(dxfer->intr_status != USBD_CANCELLED); 326 KASSERT(dxfer->intr_status != USBD_TIMEOUT); 327 TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext); 328 TAILQ_INSERT_TAIL(&claimed, dxfer, xnext); 329 } 330 mtx_leave(&hsotg->lock); 331 332 /* Now complete them. */ 333 while (!TAILQ_EMPTY(&claimed)) { 334 dxfer = TAILQ_FIRST(&claimed); 335 KASSERT(dxfer->xfer.status == USBD_IN_PROGRESS); 336 KASSERT(dxfer->intr_status != USBD_CANCELLED); 337 KASSERT(dxfer->intr_status != USBD_TIMEOUT); 338 TAILQ_REMOVE(&claimed, dxfer, xnext); 339 340 dxfer->xfer.status = dxfer->intr_status; 341 usb_transfer_complete(&dxfer->xfer); 342 } 343 } 344 345 STATIC void 346 dwc2_timeout(void *addr) 347 { 348 struct usbd_xfer *xfer = addr; 349 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 350 351 DPRINTF("xfer=%p\n", xfer); 352 353 if (sc->sc_bus.dying) { 354 dwc2_timeout_task(addr); 355 return; 356 } 357 358 /* Execute the abort in a process context. */ 359 usb_init_task(&xfer->abort_task, dwc2_timeout_task, addr, 360 USB_TASK_TYPE_ABORT); 361 usb_add_task(xfer->device, &xfer->abort_task); 362 } 363 364 STATIC void 365 dwc2_timeout_task(void *addr) 366 { 367 struct usbd_xfer *xfer = addr; 368 int s; 369 370 DPRINTF("xfer=%p\n", xfer); 371 372 s = splusb(); 373 dwc2_abort_xfer(xfer, USBD_TIMEOUT); 374 splx(s); 375 } 376 377 usbd_status 378 dwc2_open(struct usbd_pipe *pipe) 379 { 380 struct usbd_device *dev = pipe->device; 381 struct dwc2_softc *sc = DWC2_PIPE2SC(pipe); 382 struct dwc2_pipe *dpipe = DWC2_PIPE2DPIPE(pipe); 383 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 384 uint8_t addr = dev->address; 385 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 386 usbd_status err; 387 388 DPRINTF("pipe %p addr %d xfertype %d dir %s\n", pipe, addr, xfertype, 389 UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN ? "in" : "out"); 390 391 if (sc->sc_bus.dying) { 392 return USBD_IOERROR; 393 } 394 395 if (addr == sc->sc_addr) { 396 switch (ed->bEndpointAddress) { 397 case USB_CONTROL_ENDPOINT: 398 pipe->methods = &dwc2_root_ctrl_methods; 399 break; 400 case UE_DIR_IN | DWC2_INTR_ENDPT: 401 pipe->methods = &dwc2_root_intr_methods; 402 break; 403 default: 404 DPRINTF("bad bEndpointAddress 0x%02x\n", 405 ed->bEndpointAddress); 406 return USBD_INVAL; 407 } 408 DPRINTF("root hub pipe open\n"); 409 return USBD_NORMAL_COMPLETION; 410 } 411 412 switch (xfertype) { 413 case UE_CONTROL: 414 pipe->methods = &dwc2_device_ctrl_methods; 415 err = usb_allocmem(&sc->sc_bus, sizeof(usb_device_request_t), 416 0, USB_DMA_COHERENT, &dpipe->req_dma); 417 if (err) 418 return USBD_NOMEM; 419 break; 420 case UE_INTERRUPT: 421 pipe->methods = &dwc2_device_intr_methods; 422 break; 423 case UE_ISOCHRONOUS: 424 pipe->methods = &dwc2_device_isoc_methods; 425 break; 426 case UE_BULK: 427 pipe->methods = &dwc2_device_bulk_methods; 428 break; 429 default: 430 DPRINTF("bad xfer type %d\n", xfertype); 431 return USBD_INVAL; 432 } 433 434 /* QH */ 435 dpipe->priv = NULL; 436 437 return USBD_NORMAL_COMPLETION; 438 } 439 440 STATIC void 441 dwc2_poll(struct usbd_bus *bus) 442 { 443 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 444 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 445 446 mtx_enter(&hsotg->lock); 447 dwc2_interrupt(sc); 448 mtx_leave(&hsotg->lock); 449 } 450 451 /* 452 * Close a reqular pipe. 453 * Assumes that there are no pending transactions. 454 */ 455 STATIC void 456 dwc2_close_pipe(struct usbd_pipe *pipe) 457 { 458 /* nothing */ 459 } 460 461 /* 462 * Abort a device request. 463 */ 464 STATIC void 465 dwc2_abort_xfer(struct usbd_xfer *xfer, usbd_status status) 466 { 467 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 468 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 469 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 470 struct dwc2_xfer *d; 471 int err; 472 473 splsoftassert(IPL_SOFTUSB); 474 475 DPRINTF("xfer %p pipe %p status 0x%08x\n", xfer, xfer->pipe, 476 xfer->status); 477 478 /* XXX The stack should not call abort() in this case. */ 479 if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) { 480 xfer->status = status; 481 timeout_del(&xfer->timeout_handle); 482 usb_rem_task(xfer->device, &xfer->abort_task); 483 usb_transfer_complete(xfer); 484 return; 485 } 486 487 KASSERT(xfer->status != USBD_CANCELLED); 488 /* Transfer is already done. */ 489 if (xfer->status != USBD_IN_PROGRESS) { 490 DPRINTF("%s: already done \n", __func__); 491 return; 492 } 493 494 /* Prevent any timeout to kick in. */ 495 timeout_del(&xfer->timeout_handle); 496 usb_rem_task(xfer->device, &xfer->abort_task); 497 498 /* Claim the transfer status as cancelled. */ 499 xfer->status = USBD_CANCELLED; 500 501 KASSERTMSG((xfer->status == USBD_CANCELLED || 502 xfer->status == USBD_TIMEOUT), 503 "bad abort status: %d", xfer->status); 504 505 mtx_enter(&hsotg->lock); 506 507 /* 508 * Check whether we aborted or timed out after the hardware 509 * completion interrupt determined that it's done but before 510 * the soft interrupt could actually complete it. If so, it's 511 * too late for the soft interrupt -- at this point we've 512 * already committed to abort it or time it out, so we need to 513 * take it off the softint's list of work in case the caller, 514 * say, frees the xfer before the softint runs. 515 * 516 * This logic is unusual among host controller drivers, and 517 * happens because dwc2 decides to complete xfers in the hard 518 * interrupt handler rather than in the soft interrupt handler, 519 * but usb_transfer_complete must be deferred to softint -- and 520 * we happened to swoop in between the hard interrupt and the 521 * soft interrupt. Other host controller drivers do almost all 522 * processing in the softint so there's no intermediate stage. 523 * 524 * Fortunately, this linear search to discern the intermediate 525 * stage is not likely to be a serious performance impact 526 * because it happens only on abort or timeout. 527 */ 528 TAILQ_FOREACH(d, &sc->sc_complete, xnext) { 529 if (d == dxfer) { 530 TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext); 531 break; 532 } 533 } 534 535 /* 536 * HC Step 1: Handle the hardware. 537 */ 538 err = dwc2_hcd_urb_dequeue(hsotg, dxfer->urb); 539 if (err) { 540 DPRINTF("dwc2_hcd_urb_dequeue failed\n"); 541 } 542 543 mtx_leave(&hsotg->lock); 544 545 /* 546 * Final Step: Notify completion to waiting xfers. 547 */ 548 usb_transfer_complete(xfer); 549 } 550 551 STATIC void 552 dwc2_noop(struct usbd_pipe *pipe) 553 { 554 555 } 556 557 STATIC void 558 dwc2_device_clear_toggle(struct usbd_pipe *pipe) 559 { 560 561 DPRINTF("toggle %d -> 0", pipe->endpoint->savedtoggle); 562 } 563 564 /***********************************************************************/ 565 566 /* 567 * Data structures and routines to emulate the root hub. 568 */ 569 570 STATIC const usb_device_descriptor_t dwc2_devd = { 571 .bLength = sizeof(usb_device_descriptor_t), 572 .bDescriptorType = UDESC_DEVICE, 573 .bcdUSB = {0x00, 0x02}, 574 .bDeviceClass = UDCLASS_HUB, 575 .bDeviceSubClass = UDSUBCLASS_HUB, 576 .bDeviceProtocol = UDPROTO_HSHUBSTT, 577 .bMaxPacketSize = 64, 578 .bcdDevice = {0x00, 0x01}, 579 .iManufacturer = 1, 580 .iProduct = 2, 581 .bNumConfigurations = 1, 582 }; 583 584 struct dwc2_config_desc { 585 usb_config_descriptor_t confd; 586 usb_interface_descriptor_t ifcd; 587 usb_endpoint_descriptor_t endpd; 588 } __packed; 589 590 STATIC const struct dwc2_config_desc dwc2_confd = { 591 .confd = { 592 .bLength = USB_CONFIG_DESCRIPTOR_SIZE, 593 .bDescriptorType = UDESC_CONFIG, 594 .wTotalLength[0] = sizeof(dwc2_confd), 595 .bNumInterfaces = 1, 596 .bConfigurationValue = 1, 597 .iConfiguration = 0, 598 .bmAttributes = UC_BUS_POWERED | UC_SELF_POWERED, 599 .bMaxPower = 0, 600 }, 601 .ifcd = { 602 .bLength = USB_INTERFACE_DESCRIPTOR_SIZE, 603 .bDescriptorType = UDESC_INTERFACE, 604 .bInterfaceNumber = 0, 605 .bAlternateSetting = 0, 606 .bNumEndpoints = 1, 607 .bInterfaceClass = UICLASS_HUB, 608 .bInterfaceSubClass = UISUBCLASS_HUB, 609 .bInterfaceProtocol = UIPROTO_HSHUBSTT, 610 .iInterface = 0 611 }, 612 .endpd = { 613 .bLength = USB_ENDPOINT_DESCRIPTOR_SIZE, 614 .bDescriptorType = UDESC_ENDPOINT, 615 .bEndpointAddress = UE_DIR_IN | DWC2_INTR_ENDPT, 616 .bmAttributes = UE_INTERRUPT, 617 .wMaxPacketSize = {8, 0}, /* max packet */ 618 .bInterval = 255, 619 }, 620 }; 621 622 STATIC usbd_status 623 dwc2_root_ctrl_transfer(struct usbd_xfer *xfer) 624 { 625 usbd_status err; 626 627 err = usb_insert_transfer(xfer); 628 if (err) 629 return err; 630 631 return dwc2_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 632 } 633 634 STATIC usbd_status 635 dwc2_root_ctrl_start(struct usbd_xfer *xfer) 636 { 637 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 638 usb_device_request_t *req; 639 uint8_t *buf; 640 uint16_t len; 641 int value, index, l, s, totlen; 642 usbd_status err = USBD_IOERROR; 643 644 if (sc->sc_bus.dying) 645 return USBD_IOERROR; 646 647 req = &xfer->request; 648 649 DPRINTFN(4, "type=0x%02x request=%02x\n", 650 req->bmRequestType, req->bRequest); 651 652 len = UGETW(req->wLength); 653 value = UGETW(req->wValue); 654 index = UGETW(req->wIndex); 655 656 buf = len ? KERNADDR(&xfer->dmabuf, 0) : NULL; 657 658 totlen = 0; 659 660 #define C(x,y) ((x) | ((y) << 8)) 661 switch (C(req->bRequest, req->bmRequestType)) { 662 case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE): 663 case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE): 664 case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT): 665 /* 666 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops 667 * for the integrated root hub. 668 */ 669 break; 670 case C(UR_GET_CONFIG, UT_READ_DEVICE): 671 if (len > 0) { 672 *buf = sc->sc_conf; 673 totlen = 1; 674 } 675 break; 676 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 677 DPRINTFN(8, "wValue=0x%04x\n", value); 678 679 if (len == 0) 680 break; 681 switch (value) { 682 case C(0, UDESC_DEVICE): 683 l = min(len, USB_DEVICE_DESCRIPTOR_SIZE); 684 // USETW(dwc2_devd.idVendor, sc->sc_id_vendor); 685 memcpy(buf, &dwc2_devd, l); 686 buf += l; 687 len -= l; 688 totlen += l; 689 690 break; 691 case C(0, UDESC_CONFIG): 692 l = min(len, sizeof(dwc2_confd)); 693 memcpy(buf, &dwc2_confd, l); 694 buf += l; 695 len -= l; 696 totlen += l; 697 698 break; 699 #define sd ((usb_string_descriptor_t *)buf) 700 case C(0, UDESC_STRING): 701 totlen = usbd_str(sd, len, "\001"); 702 break; 703 case C(1, UDESC_STRING): 704 totlen = usbd_str(sd, len, sc->sc_vendor); 705 break; 706 case C(2, UDESC_STRING): 707 totlen = usbd_str(sd, len, "DWC2 root hub"); 708 break; 709 #undef sd 710 default: 711 goto fail; 712 } 713 break; 714 case C(UR_GET_INTERFACE, UT_READ_INTERFACE): 715 if (len > 0) { 716 *buf = 0; 717 totlen = 1; 718 } 719 break; 720 case C(UR_GET_STATUS, UT_READ_DEVICE): 721 if (len > 1) { 722 USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED); 723 totlen = 2; 724 } 725 break; 726 case C(UR_GET_STATUS, UT_READ_INTERFACE): 727 case C(UR_GET_STATUS, UT_READ_ENDPOINT): 728 if (len > 1) { 729 USETW(((usb_status_t *)buf)->wStatus, 0); 730 totlen = 2; 731 } 732 break; 733 case C(UR_SET_ADDRESS, UT_WRITE_DEVICE): 734 DPRINTF("UR_SET_ADDRESS, UT_WRITE_DEVICE: addr %d\n", 735 value); 736 if (value >= USB_MAX_DEVICES) 737 goto fail; 738 739 sc->sc_addr = value; 740 break; 741 case C(UR_SET_CONFIG, UT_WRITE_DEVICE): 742 if (value != 0 && value != 1) 743 goto fail; 744 745 sc->sc_conf = value; 746 break; 747 case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE): 748 break; 749 case C(UR_SET_FEATURE, UT_WRITE_DEVICE): 750 case C(UR_SET_FEATURE, UT_WRITE_INTERFACE): 751 case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT): 752 err = USBD_IOERROR; 753 goto fail; 754 case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE): 755 break; 756 case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT): 757 break; 758 default: 759 /* Hub requests - XXXNH len check? */ 760 err = dwc2_hcd_hub_control(sc->sc_hsotg, 761 C(req->bRequest, req->bmRequestType), value, index, 762 buf, len); 763 if (err) { 764 err = USBD_IOERROR; 765 goto fail; 766 } 767 totlen = len; 768 } 769 xfer->actlen = totlen; 770 err = USBD_NORMAL_COMPLETION; 771 772 fail: 773 s = splusb(); 774 xfer->status = err; 775 usb_transfer_complete(xfer); 776 splx(s); 777 778 return err; 779 } 780 781 STATIC void 782 dwc2_root_ctrl_abort(struct usbd_xfer *xfer) 783 { 784 DPRINTFN(10, "\n"); 785 786 /* Nothing to do, all transfers are synchronous. */ 787 } 788 789 STATIC void 790 dwc2_root_ctrl_close(struct usbd_pipe *pipe) 791 { 792 DPRINTFN(10, "\n"); 793 794 /* Nothing to do. */ 795 } 796 797 STATIC void 798 dwc2_root_ctrl_done(struct usbd_xfer *xfer) 799 { 800 DPRINTFN(10, "\n"); 801 802 /* Nothing to do. */ 803 } 804 805 STATIC usbd_status 806 dwc2_root_intr_transfer(struct usbd_xfer *xfer) 807 { 808 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 809 usbd_status err; 810 811 DPRINTF("\n"); 812 813 /* Insert last in queue. */ 814 mtx_enter(&sc->sc_lock); 815 err = usb_insert_transfer(xfer); 816 mtx_leave(&sc->sc_lock); 817 if (err) 818 return err; 819 820 /* Pipe isn't running, start first */ 821 return dwc2_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 822 } 823 824 STATIC usbd_status 825 dwc2_root_intr_start(struct usbd_xfer *xfer) 826 { 827 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 828 const bool polling = sc->sc_bus.use_polling; 829 830 DPRINTF("\n"); 831 832 if (sc->sc_bus.dying) 833 return USBD_IOERROR; 834 835 if (!polling) 836 mtx_enter(&sc->sc_lock); 837 KASSERT(sc->sc_intrxfer == NULL); 838 sc->sc_intrxfer = xfer; 839 xfer->status = USBD_IN_PROGRESS; 840 if (!polling) 841 mtx_leave(&sc->sc_lock); 842 843 return USBD_IN_PROGRESS; 844 } 845 846 /* Abort a root interrupt request. */ 847 STATIC void 848 dwc2_root_intr_abort(struct usbd_xfer *xfer) 849 { 850 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 851 852 DPRINTF("xfer=%p\n", xfer); 853 854 /* If xfer has already completed, nothing to do here. */ 855 if (sc->sc_intrxfer == NULL) 856 return; 857 858 /* 859 * Otherwise, sc->sc_intrxfer had better be this transfer. 860 * Cancel it. 861 */ 862 KASSERT(sc->sc_intrxfer == xfer); 863 KASSERT(xfer->status == USBD_IN_PROGRESS); 864 xfer->status = USBD_CANCELLED; 865 usb_transfer_complete(xfer); 866 } 867 868 STATIC void 869 dwc2_root_intr_close(struct usbd_pipe *pipe) 870 { 871 struct dwc2_softc *sc = DWC2_PIPE2SC(pipe); 872 873 DPRINTF("\n"); 874 875 /* 876 * Caller must guarantee the xfer has completed first, by 877 * closing the pipe only after normal completion or an abort. 878 */ 879 if (sc->sc_intrxfer == NULL) 880 panic("%s: sc->sc_intrxfer == NULL", __func__); 881 } 882 883 STATIC void 884 dwc2_root_intr_done(struct usbd_xfer *xfer) 885 { 886 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 887 888 DPRINTF("\n"); 889 890 /* Claim the xfer so it doesn't get completed again. */ 891 KASSERT(sc->sc_intrxfer == xfer); 892 KASSERT(xfer->status != USBD_IN_PROGRESS); 893 sc->sc_intrxfer = NULL; 894 } 895 896 /***********************************************************************/ 897 898 STATIC usbd_status 899 dwc2_device_ctrl_transfer(struct usbd_xfer *xfer) 900 { 901 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 902 usbd_status err; 903 904 DPRINTF("\n"); 905 906 /* Insert last in queue. */ 907 mtx_enter(&sc->sc_lock); 908 err = usb_insert_transfer(xfer); 909 mtx_leave(&sc->sc_lock); 910 if (err) 911 return err; 912 913 /* Pipe isn't running, start first */ 914 return dwc2_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 915 } 916 917 STATIC usbd_status 918 dwc2_device_ctrl_start(struct usbd_xfer *xfer) 919 { 920 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 921 usbd_status err; 922 const bool polling = sc->sc_bus.use_polling; 923 924 DPRINTF("\n"); 925 926 if (!polling) 927 mtx_enter(&sc->sc_lock); 928 xfer->status = USBD_IN_PROGRESS; 929 err = dwc2_device_start(xfer); 930 if (!polling) 931 mtx_leave(&sc->sc_lock); 932 933 if (err) 934 return err; 935 936 return USBD_IN_PROGRESS; 937 } 938 939 STATIC void 940 dwc2_device_ctrl_abort(struct usbd_xfer *xfer) 941 { 942 DPRINTF("xfer=%p\n", xfer); 943 dwc2_abort_xfer(xfer, USBD_CANCELLED); 944 } 945 946 STATIC void 947 dwc2_device_ctrl_close(struct usbd_pipe *pipe) 948 { 949 struct dwc2_softc * const sc = DWC2_PIPE2SC(pipe); 950 struct dwc2_pipe * const dpipe = DWC2_PIPE2DPIPE(pipe); 951 952 DPRINTF("pipe=%p\n", pipe); 953 dwc2_close_pipe(pipe); 954 955 usb_freemem(&sc->sc_bus, &dpipe->req_dma); 956 } 957 958 STATIC void 959 dwc2_device_ctrl_done(struct usbd_xfer *xfer) 960 { 961 962 DPRINTF("xfer=%p\n", xfer); 963 } 964 965 /***********************************************************************/ 966 967 STATIC usbd_status 968 dwc2_device_bulk_transfer(struct usbd_xfer *xfer) 969 { 970 usbd_status err; 971 972 DPRINTF("xfer=%p\n", xfer); 973 974 /* Insert last in queue. */ 975 err = usb_insert_transfer(xfer); 976 if (err) 977 return err; 978 979 /* Pipe isn't running, start first */ 980 return dwc2_device_bulk_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 981 } 982 983 STATIC usbd_status 984 dwc2_device_bulk_start(struct usbd_xfer *xfer) 985 { 986 usbd_status err; 987 988 DPRINTF("xfer=%p\n", xfer); 989 990 xfer->status = USBD_IN_PROGRESS; 991 err = dwc2_device_start(xfer); 992 993 return err; 994 } 995 996 STATIC void 997 dwc2_device_bulk_abort(struct usbd_xfer *xfer) 998 { 999 DPRINTF("xfer=%p\n", xfer); 1000 1001 dwc2_abort_xfer(xfer, USBD_CANCELLED); 1002 } 1003 1004 STATIC void 1005 dwc2_device_bulk_close(struct usbd_pipe *pipe) 1006 { 1007 1008 DPRINTF("pipe=%p\n", pipe); 1009 1010 dwc2_close_pipe(pipe); 1011 } 1012 1013 STATIC void 1014 dwc2_device_bulk_done(struct usbd_xfer *xfer) 1015 { 1016 1017 DPRINTF("xfer=%p\n", xfer); 1018 } 1019 1020 /***********************************************************************/ 1021 1022 STATIC usbd_status 1023 dwc2_device_intr_transfer(struct usbd_xfer *xfer) 1024 { 1025 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 1026 usbd_status err; 1027 1028 DPRINTF("xfer=%p\n", xfer); 1029 1030 /* Insert last in queue. */ 1031 mtx_enter(&sc->sc_lock); 1032 err = usb_insert_transfer(xfer); 1033 mtx_leave(&sc->sc_lock); 1034 if (err) 1035 return err; 1036 1037 /* Pipe isn't running, start first */ 1038 return dwc2_device_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 1039 } 1040 1041 STATIC usbd_status 1042 dwc2_device_intr_start(struct usbd_xfer *xfer) 1043 { 1044 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 1045 usbd_status err; 1046 const bool polling = sc->sc_bus.use_polling; 1047 1048 if (!polling) 1049 mtx_enter(&sc->sc_lock); 1050 xfer->status = USBD_IN_PROGRESS; 1051 err = dwc2_device_start(xfer); 1052 if (!polling) 1053 mtx_leave(&sc->sc_lock); 1054 1055 if (err) 1056 return err; 1057 1058 return USBD_IN_PROGRESS; 1059 } 1060 1061 /* Abort a device interrupt request. */ 1062 STATIC void 1063 dwc2_device_intr_abort(struct usbd_xfer *xfer) 1064 { 1065 KASSERT(xfer->pipe->intrxfer == xfer); 1066 1067 DPRINTF("xfer=%p\n", xfer); 1068 1069 dwc2_abort_xfer(xfer, USBD_CANCELLED); 1070 } 1071 1072 STATIC void 1073 dwc2_device_intr_close(struct usbd_pipe *pipe) 1074 { 1075 1076 DPRINTF("pipe=%p\n", pipe); 1077 1078 dwc2_close_pipe(pipe); 1079 } 1080 1081 STATIC void 1082 dwc2_device_intr_done(struct usbd_xfer *xfer) 1083 { 1084 1085 DPRINTF("\n"); 1086 1087 if (xfer->pipe->repeat) { 1088 xfer->status = USBD_IN_PROGRESS; 1089 dwc2_device_start(xfer); 1090 } 1091 } 1092 1093 /***********************************************************************/ 1094 1095 usbd_status 1096 dwc2_device_isoc_transfer(struct usbd_xfer *xfer) 1097 { 1098 usbd_status err; 1099 1100 DPRINTF("xfer=%p\n", xfer); 1101 1102 /* Insert last in queue. */ 1103 err = usb_insert_transfer(xfer); 1104 if (err) 1105 return err; 1106 1107 /* Pipe isn't running, start first */ 1108 return dwc2_device_isoc_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 1109 } 1110 1111 usbd_status 1112 dwc2_device_isoc_start(struct usbd_xfer *xfer) 1113 { 1114 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1115 struct dwc2_softc *sc = DWC2_DPIPE2SC(dpipe); 1116 usbd_status err; 1117 1118 /* Why would you do that anyway? */ 1119 if (sc->sc_bus.use_polling) 1120 return (USBD_INVAL); 1121 1122 xfer->status = USBD_IN_PROGRESS; 1123 err = dwc2_device_start(xfer); 1124 1125 return err; 1126 } 1127 1128 void 1129 dwc2_device_isoc_abort(struct usbd_xfer *xfer) 1130 { 1131 DPRINTF("xfer=%p\n", xfer); 1132 1133 dwc2_abort_xfer(xfer, USBD_CANCELLED); 1134 } 1135 1136 void 1137 dwc2_device_isoc_close(struct usbd_pipe *pipe) 1138 { 1139 DPRINTF("\n"); 1140 1141 dwc2_close_pipe(pipe); 1142 } 1143 1144 void 1145 dwc2_device_isoc_done(struct usbd_xfer *xfer) 1146 { 1147 1148 DPRINTF("\n"); 1149 } 1150 1151 1152 usbd_status 1153 dwc2_device_start(struct usbd_xfer *xfer) 1154 { 1155 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 1156 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1157 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 1158 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 1159 struct dwc2_hcd_urb *dwc2_urb; 1160 1161 struct usbd_device *dev = xfer->pipe->device; 1162 usb_endpoint_descriptor_t *ed = xfer->pipe->endpoint->edesc; 1163 uint8_t addr = dev->address; 1164 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1165 uint8_t epnum = UE_GET_ADDR(ed->bEndpointAddress); 1166 uint8_t dir = UE_GET_DIR(ed->bEndpointAddress); 1167 uint16_t mps = UE_GET_SIZE(UGETW(ed->wMaxPacketSize)); 1168 uint32_t len; 1169 1170 uint32_t flags = 0; 1171 uint32_t off = 0; 1172 int retval, err; 1173 int alloc_bandwidth = 0; 1174 1175 DPRINTFN(1, "xfer=%p pipe=%p\n", xfer, xfer->pipe); 1176 1177 if (xfertype == UE_ISOCHRONOUS || 1178 xfertype == UE_INTERRUPT) { 1179 mtx_enter(&hsotg->lock); 1180 if (!dwc2_hcd_is_bandwidth_allocated(hsotg, xfer)) 1181 alloc_bandwidth = 1; 1182 mtx_leave(&hsotg->lock); 1183 } 1184 1185 /* 1186 * For Control pipe the direction is from the request, all other 1187 * transfers have been set correctly at pipe open time. 1188 */ 1189 if (xfertype == UE_CONTROL) { 1190 usb_device_request_t *req = &xfer->request; 1191 1192 DPRINTFN(3, "xfer=%p type=0x%02x request=0x%02x wValue=0x%04x " 1193 "wIndex=0x%04x len=%d addr=%d endpt=%d dir=%s speed=%d " 1194 "mps=%d\n", 1195 xfer, req->bmRequestType, req->bRequest, UGETW(req->wValue), 1196 UGETW(req->wIndex), UGETW(req->wLength), dev->address, 1197 epnum, dir == UT_READ ? "in" :"out", dev->speed, mps); 1198 1199 /* Copy request packet to our DMA buffer */ 1200 memcpy(KERNADDR(&dpipe->req_dma, 0), req, sizeof(*req)); 1201 usb_syncmem(&dpipe->req_dma, 0, sizeof(*req), 1202 BUS_DMASYNC_PREWRITE); 1203 len = UGETW(req->wLength); 1204 if ((req->bmRequestType & UT_READ) == UT_READ) { 1205 dir = UE_DIR_IN; 1206 } else { 1207 dir = UE_DIR_OUT; 1208 } 1209 1210 DPRINTFN(3, "req = %p dma = %llx len %d dir %s\n", 1211 KERNADDR(&dpipe->req_dma, 0), 1212 (long long)DMAADDR(&dpipe->req_dma, 0), 1213 len, dir == UE_DIR_IN ? "in" : "out"); 1214 } else if (xfertype == UE_ISOCHRONOUS) { 1215 DPRINTFN(3, "xfer=%p nframes=%d flags=%d addr=%d endpt=%d," 1216 " mps=%d dir %s\n", xfer, xfer->nframes, xfer->flags, addr, 1217 epnum, mps, dir == UT_READ ? "in" :"out"); 1218 1219 #ifdef DIAGNOSTIC 1220 len = 0; 1221 for (size_t i = 0; i < xfer->nframes; i++) 1222 len += xfer->frlengths[i]; 1223 if (len != xfer->length) 1224 panic("len (%d) != xfer->length (%d)", len, 1225 xfer->length); 1226 #endif 1227 len = xfer->length; 1228 } else { 1229 DPRINTFN(3, "xfer=%p len=%d flags=%d addr=%d endpt=%d," 1230 " mps=%d dir %s\n", xfer, xfer->length, xfer->flags, addr, 1231 epnum, mps, dir == UT_READ ? "in" :"out"); 1232 1233 len = xfer->length; 1234 } 1235 1236 dwc2_urb = dxfer->urb; 1237 if (!dwc2_urb) 1238 return USBD_NOMEM; 1239 1240 // KASSERT(dwc2_urb->packet_count == xfer->nframes); 1241 memset(dwc2_urb, 0, sizeof(*dwc2_urb) + 1242 sizeof(dwc2_urb->iso_descs[0]) * DWC2_MAXISOCPACKETS); 1243 1244 dwc2_urb->priv = xfer; 1245 dwc2_urb->packet_count = xfer->nframes; 1246 1247 dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, addr, epnum, xfertype, dir, 1248 mps); 1249 1250 if (xfertype == UE_CONTROL) { 1251 dwc2_urb->setup_usbdma = &dpipe->req_dma; 1252 dwc2_urb->setup_packet = KERNADDR(&dpipe->req_dma, 0); 1253 dwc2_urb->setup_dma = DMAADDR(&dpipe->req_dma, 0); 1254 } else { 1255 /* XXXNH - % mps required? */ 1256 if ((xfer->flags & USBD_FORCE_SHORT_XFER) && (len % mps) == 0) 1257 flags |= URB_SEND_ZERO_PACKET; 1258 } 1259 flags |= URB_GIVEBACK_ASAP; 1260 1261 /* 1262 * control transfers with no data phase don't touch usbdma, but 1263 * everything else does. 1264 */ 1265 if (!(xfertype == UE_CONTROL && len == 0)) { 1266 dwc2_urb->usbdma = &xfer->dmabuf; 1267 dwc2_urb->buf = KERNADDR(dwc2_urb->usbdma, 0); 1268 dwc2_urb->dma = DMAADDR(dwc2_urb->usbdma, 0); 1269 1270 usb_syncmem(&xfer->dmabuf, 0, len, 1271 dir == UE_DIR_IN ? 1272 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1273 } 1274 dwc2_urb->length = len; 1275 dwc2_urb->flags = flags; 1276 dwc2_urb->status = -EINPROGRESS; 1277 1278 if (xfertype == UE_INTERRUPT || 1279 xfertype == UE_ISOCHRONOUS) { 1280 uint16_t ival; 1281 1282 if (xfertype == UE_INTERRUPT && 1283 dpipe->pipe.interval != USBD_DEFAULT_INTERVAL) { 1284 ival = dpipe->pipe.interval; 1285 } else { 1286 ival = ed->bInterval; 1287 } 1288 1289 if (ival < 1) { 1290 retval = -ENODEV; 1291 goto fail; 1292 } 1293 if (dev->speed == USB_SPEED_HIGH || 1294 (dev->speed == USB_SPEED_FULL && xfertype == UE_ISOCHRONOUS)) { 1295 if (ival > 16) { 1296 /* 1297 * illegal with HS/FS, but there were 1298 * documentation bugs in the spec 1299 */ 1300 ival = 256; 1301 } else { 1302 ival = (1 << (ival - 1)); 1303 } 1304 } else { 1305 if (xfertype == UE_INTERRUPT && ival < 10) 1306 ival = 10; 1307 } 1308 dwc2_urb->interval = ival; 1309 } 1310 1311 /* XXXNH bring down from callers?? */ 1312 // mtx_enter(&sc->sc_lock); 1313 1314 xfer->actlen = 0; 1315 1316 KASSERT(xfertype != UE_ISOCHRONOUS || 1317 xfer->nframes <= DWC2_MAXISOCPACKETS); 1318 KASSERTMSG(xfer->nframes == 0 || xfertype == UE_ISOCHRONOUS, 1319 "nframes %d xfertype %d\n", xfer->nframes, xfertype); 1320 1321 off = 0; 1322 for (size_t i = 0; i < xfer->nframes; ++i) { 1323 DPRINTFN(3, "xfer=%p frame=%zu offset=%d length=%d\n", xfer, i, 1324 off, xfer->frlengths[i]); 1325 1326 dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, off, 1327 xfer->frlengths[i]); 1328 off += xfer->frlengths[i]; 1329 } 1330 1331 struct dwc2_qh *qh = dpipe->priv; 1332 struct dwc2_qtd *qtd; 1333 bool qh_allocated = false; 1334 1335 /* Create QH for the endpoint if it doesn't exist */ 1336 if (!qh) { 1337 qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, M_NOWAIT); 1338 if (!qh) { 1339 retval = -ENOMEM; 1340 goto fail; 1341 } 1342 dpipe->priv = qh; 1343 qh_allocated = true; 1344 } 1345 1346 qtd = pool_get(&sc->sc_qtdpool, PR_NOWAIT); 1347 if (!qtd) { 1348 retval = -ENOMEM; 1349 goto fail1; 1350 } 1351 memset(qtd, 0, sizeof(*qtd)); 1352 1353 /* might need to check cpu_intr_p */ 1354 mtx_enter(&hsotg->lock); 1355 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd); 1356 if (retval) 1357 goto fail2; 1358 if (xfer->timeout && !sc->sc_bus.use_polling) { 1359 timeout_set(&xfer->timeout_handle, dwc2_timeout, xfer); 1360 timeout_add_msec(&xfer->timeout_handle, xfer->timeout); 1361 } 1362 xfer->status = USBD_IN_PROGRESS; 1363 1364 if (alloc_bandwidth) { 1365 dwc2_allocate_bus_bandwidth(hsotg, 1366 dwc2_hcd_get_ep_bandwidth(hsotg, dpipe), 1367 xfer); 1368 } 1369 1370 mtx_leave(&hsotg->lock); 1371 // mtx_exit(&sc->sc_lock); 1372 1373 return USBD_IN_PROGRESS; 1374 1375 fail2: 1376 dwc2_urb->priv = NULL; 1377 mtx_leave(&hsotg->lock); 1378 pool_put(&sc->sc_qtdpool, qtd); 1379 1380 fail1: 1381 if (qh_allocated) { 1382 dpipe->priv = NULL; 1383 dwc2_hcd_qh_free(hsotg, qh); 1384 } 1385 fail: 1386 1387 switch (retval) { 1388 case -EINVAL: 1389 case -ENODEV: 1390 err = USBD_INVAL; 1391 break; 1392 case -ENOMEM: 1393 err = USBD_NOMEM; 1394 break; 1395 default: 1396 err = USBD_IOERROR; 1397 } 1398 1399 return err; 1400 1401 } 1402 1403 int dwc2_intr(void *p) 1404 { 1405 struct dwc2_softc *sc = p; 1406 struct dwc2_hsotg *hsotg; 1407 int ret = 0; 1408 1409 if (sc == NULL) 1410 return 0; 1411 1412 hsotg = sc->sc_hsotg; 1413 mtx_enter(&hsotg->lock); 1414 1415 if (sc->sc_bus.dying) 1416 goto done; 1417 1418 if (sc->sc_bus.use_polling) { 1419 uint32_t intrs; 1420 1421 intrs = dwc2_read_core_intr(hsotg); 1422 DWC2_WRITE_4(hsotg, GINTSTS, intrs); 1423 } else { 1424 ret = dwc2_interrupt(sc); 1425 } 1426 1427 done: 1428 mtx_leave(&hsotg->lock); 1429 1430 return ret; 1431 } 1432 1433 int 1434 dwc2_interrupt(struct dwc2_softc *sc) 1435 { 1436 int ret = 0; 1437 1438 if (sc->sc_hcdenabled) { 1439 ret |= dwc2_handle_hcd_intr(sc->sc_hsotg); 1440 } 1441 1442 ret |= dwc2_handle_common_intr(sc->sc_hsotg); 1443 1444 return ret; 1445 } 1446 1447 /***********************************************************************/ 1448 1449 int 1450 dwc2_detach(struct dwc2_softc *sc, int flags) 1451 { 1452 int rv = 0; 1453 1454 if (sc->sc_child != NULL) 1455 rv = config_detach(sc->sc_child, flags); 1456 1457 return rv; 1458 } 1459 1460 /***********************************************************************/ 1461 int 1462 dwc2_init(struct dwc2_softc *sc) 1463 { 1464 int err = 0; 1465 1466 sc->sc_bus.usbrev = USBREV_2_0; 1467 sc->sc_bus.methods = &dwc2_bus_methods; 1468 sc->sc_bus.pipe_size = sizeof(struct dwc2_pipe); 1469 sc->sc_hcdenabled = false; 1470 1471 mtx_init(&sc->sc_lock, IPL_SOFTUSB); 1472 1473 TAILQ_INIT(&sc->sc_complete); 1474 1475 sc->sc_rhc_si = softintr_establish(IPL_SOFTUSB, dwc2_rhc, sc); 1476 1477 pool_init(&sc->sc_xferpool, sizeof(struct dwc2_xfer), 0, IPL_USB, 0, 1478 "dwc2xfer", NULL); 1479 pool_init(&sc->sc_qhpool, sizeof(struct dwc2_qh), 0, IPL_USB, 0, 1480 "dwc2qh", NULL); 1481 pool_init(&sc->sc_qtdpool, sizeof(struct dwc2_qtd), 0, IPL_USB, 0, 1482 "dwc2qtd", NULL); 1483 1484 sc->sc_hsotg = malloc(sizeof(struct dwc2_hsotg), M_DEVBUF, 1485 M_ZERO | M_WAITOK); 1486 sc->sc_hsotg->hsotg_sc = sc; 1487 sc->sc_hsotg->dev = &sc->sc_bus.bdev; 1488 sc->sc_hcdenabled = true; 1489 1490 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 1491 struct dwc2_core_params defparams; 1492 int retval; 1493 1494 if (sc->sc_params == NULL) { 1495 /* Default all params to autodetect */ 1496 dwc2_set_all_params(&defparams, -1); 1497 sc->sc_params = &defparams; 1498 1499 /* 1500 * Disable descriptor dma mode by default as the HW can support 1501 * it, but does not support it for SPLIT transactions. 1502 */ 1503 defparams.dma_desc_enable = 0; 1504 } 1505 hsotg->dr_mode = USB_DR_MODE_HOST; 1506 1507 /* 1508 * Reset before dwc2_get_hwparams() then it could get power-on real 1509 * reset value form registers. 1510 */ 1511 dwc2_core_reset(hsotg); 1512 usb_delay_ms(&sc->sc_bus, 500); 1513 1514 /* Detect config values from hardware */ 1515 retval = dwc2_get_hwparams(hsotg); 1516 if (retval) { 1517 goto fail2; 1518 } 1519 1520 hsotg->core_params = malloc(sizeof(*hsotg->core_params), M_DEVBUF, 1521 M_ZERO | M_WAITOK); 1522 dwc2_set_all_params(hsotg->core_params, -1); 1523 1524 /* Validate parameter values */ 1525 dwc2_set_parameters(hsotg, sc->sc_params); 1526 1527 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 1528 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1529 if (hsotg->dr_mode != USB_DR_MODE_HOST) { 1530 retval = dwc2_gadget_init(hsotg); 1531 if (retval) 1532 goto fail2; 1533 hsotg->gadget_enabled = 1; 1534 } 1535 #endif 1536 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \ 1537 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1538 if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) { 1539 retval = dwc2_hcd_init(hsotg); 1540 if (retval) { 1541 if (hsotg->gadget_enabled) 1542 dwc2_hsotg_remove(hsotg); 1543 goto fail2; 1544 } 1545 hsotg->hcd_enabled = 1; 1546 } 1547 #endif 1548 1549 #ifdef DWC2_DEBUG 1550 uint32_t snpsid = hsotg->hw_params.snpsid; 1551 dev_dbg(hsotg->dev, "Core Release: %x.%x%x%x (snpsid=%x)\n", 1552 snpsid >> 12 & 0xf, snpsid >> 8 & 0xf, 1553 snpsid >> 4 & 0xf, snpsid & 0xf, snpsid); 1554 #endif 1555 1556 return 0; 1557 1558 fail2: 1559 err = -retval; 1560 free(sc->sc_hsotg, M_DEVBUF, sizeof(struct dwc2_hsotg)); 1561 softintr_disestablish(sc->sc_rhc_si); 1562 1563 return err; 1564 } 1565 1566 #if 0 1567 /* 1568 * curmode is a mode indication bit 0 = device, 1 = host 1569 */ 1570 STATIC const char * const intnames[32] = { 1571 "curmode", "modemis", "otgint", "sof", 1572 "rxflvl", "nptxfemp", "ginnakeff", "goutnakeff", 1573 "ulpickint", "i2cint", "erlysusp", "usbsusp", 1574 "usbrst", "enumdone", "isooutdrop", "eopf", 1575 "restore_done", "epmis", "iepint", "oepint", 1576 "incompisoin", "incomplp", "fetsusp", "resetdet", 1577 "prtint", "hchint", "ptxfemp", "lpm", 1578 "conidstschng", "disconnint", "sessreqint", "wkupint" 1579 }; 1580 1581 1582 /***********************************************************************/ 1583 1584 #endif 1585 1586 1587 void 1588 dw_timeout(void *arg) 1589 { 1590 struct delayed_work *dw = arg; 1591 1592 task_set(&dw->work, dw->dw_fn, dw->dw_arg); 1593 task_add(dw->dw_wq, &dw->work); 1594 } 1595 1596 void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, 1597 int *hub_port) 1598 { 1599 struct usbd_xfer *xfer = context; 1600 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1601 struct usbd_device *dev = dpipe->pipe.device; 1602 1603 *hub_addr = dev->myhsport->parent->address; 1604 *hub_port = dev->myhsport->portno; 1605 } 1606 1607 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context) 1608 { 1609 struct usbd_xfer *xfer = context; 1610 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1611 struct usbd_device *dev = dpipe->pipe.device; 1612 1613 return dev->speed; 1614 } 1615 1616 /* 1617 * Sets the final status of an URB and returns it to the upper layer. Any 1618 * required cleanup of the URB is performed. 1619 * 1620 * Must be called with interrupt disabled and spinlock held 1621 */ 1622 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 1623 int status) 1624 { 1625 struct usbd_xfer *xfer; 1626 struct dwc2_xfer *dxfer; 1627 struct dwc2_softc *sc; 1628 usb_endpoint_descriptor_t *ed; 1629 uint8_t xfertype; 1630 1631 if (!qtd) { 1632 dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__); 1633 return; 1634 } 1635 1636 if (!qtd->urb) { 1637 dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__); 1638 return; 1639 } 1640 1641 xfer = qtd->urb->priv; 1642 if (!xfer) { 1643 dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__); 1644 return; 1645 } 1646 1647 dxfer = DWC2_XFER2DXFER(xfer); 1648 sc = DWC2_XFER2SC(xfer); 1649 ed = xfer->pipe->endpoint->edesc; 1650 xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1651 1652 struct dwc2_hcd_urb *urb = qtd->urb; 1653 xfer->actlen = dwc2_hcd_urb_get_actual_length(urb); 1654 1655 DPRINTFN(3, "xfer=%p actlen=%d\n", xfer, xfer->actlen); 1656 1657 if (xfertype == UE_ISOCHRONOUS) { 1658 xfer->actlen = 0; 1659 for (size_t i = 0; i < xfer->nframes; ++i) { 1660 xfer->frlengths[i] = 1661 dwc2_hcd_urb_get_iso_desc_actual_length( 1662 urb, i); 1663 DPRINTFN(1, "xfer=%p frame=%zu length=%d\n", xfer, i, 1664 xfer->frlengths[i]); 1665 xfer->actlen += xfer->frlengths[i]; 1666 } 1667 DPRINTFN(1, "xfer=%p actlen=%d (isoc)\n", xfer, xfer->actlen); 1668 } 1669 1670 if (xfertype == UE_ISOCHRONOUS && dbg_perio()) { 1671 for (size_t i = 0; i < xfer->nframes; i++) 1672 dev_vdbg(hsotg->dev, " ISO Desc %zu status %d\n", 1673 i, urb->iso_descs[i].status); 1674 } 1675 1676 if (!status) { 1677 if (!(xfer->flags & USBD_SHORT_XFER_OK) && 1678 xfer->actlen < xfer->length) 1679 status = -EIO; 1680 } 1681 1682 switch (status) { 1683 case 0: 1684 dxfer->intr_status = USBD_NORMAL_COMPLETION; 1685 break; 1686 case -EPIPE: 1687 dxfer->intr_status = USBD_STALLED; 1688 break; 1689 case -EPROTO: 1690 dxfer->intr_status = USBD_INVAL; 1691 break; 1692 case -EIO: 1693 dxfer->intr_status = USBD_IOERROR; 1694 break; 1695 case -EOVERFLOW: 1696 dxfer->intr_status = USBD_IOERROR; 1697 break; 1698 default: 1699 dxfer->intr_status = USBD_IOERROR; 1700 printf("%s: unknown error status %d\n", __func__, status); 1701 } 1702 1703 if (dxfer->intr_status == USBD_NORMAL_COMPLETION) { 1704 /* 1705 * control transfers with no data phase don't touch dmabuf, but 1706 * everything else does. 1707 */ 1708 if (!(xfertype == UE_CONTROL && 1709 xfer->length == 0) && 1710 xfer->actlen > 0 /* XXX PR/53503 */ 1711 ) { 1712 int rd = usbd_xfer_isread(xfer); 1713 1714 usb_syncmem(&xfer->dmabuf, 0, xfer->actlen, 1715 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1716 } 1717 } 1718 1719 if (xfertype == UE_ISOCHRONOUS || 1720 xfertype == UE_INTERRUPT) { 1721 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1722 1723 dwc2_free_bus_bandwidth(hsotg, 1724 dwc2_hcd_get_ep_bandwidth(hsotg, dpipe), 1725 xfer); 1726 } 1727 1728 qtd->urb = NULL; 1729 timeout_del(&xfer->timeout_handle); 1730 usb_rem_task(xfer->device, &xfer->abort_task); 1731 MUTEX_ASSERT_LOCKED(&hsotg->lock); 1732 1733 TAILQ_INSERT_TAIL(&sc->sc_complete, dxfer, xnext); 1734 1735 mtx_leave(&hsotg->lock); 1736 usb_schedsoftintr(&sc->sc_bus); 1737 mtx_enter(&hsotg->lock); 1738 } 1739 1740 1741 int 1742 _dwc2_hcd_start(struct dwc2_hsotg *hsotg) 1743 { 1744 dev_dbg(hsotg->dev, "DWC OTG HCD START\n"); 1745 1746 mtx_enter(&hsotg->lock); 1747 1748 hsotg->lx_state = DWC2_L0; 1749 1750 if (dwc2_is_device_mode(hsotg)) { 1751 mtx_leave(&hsotg->lock); 1752 return 0; /* why 0 ?? */ 1753 } 1754 1755 dwc2_hcd_reinit(hsotg); 1756 1757 mtx_leave(&hsotg->lock); 1758 return 0; 1759 } 1760 1761 int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg) 1762 { 1763 1764 return false; 1765 } 1766