1 /* $OpenBSD: dwc2.c,v 1.60 2022/04/12 19:41:11 naddy Exp $ */ 2 /* $NetBSD: dwc2.c,v 1.32 2014/09/02 23:26:20 macallan Exp $ */ 3 4 /*- 5 * Copyright (c) 2013 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Nick Hudson 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/device.h> 38 #include <sys/select.h> 39 #include <sys/proc.h> 40 #include <sys/queue.h> 41 #include <sys/endian.h> 42 43 #include <machine/bus.h> 44 45 #include <dev/usb/usb.h> 46 #include <dev/usb/usbdi.h> 47 #include <dev/usb/usbdivar.h> 48 #include <dev/usb/usb_mem.h> 49 50 #include <dev/usb/dwc2/dwc2.h> 51 #include <dev/usb/dwc2/dwc2var.h> 52 53 #include <dev/usb/dwc2/dwc2_core.h> 54 #include <dev/usb/dwc2/dwc2_hcd.h> 55 56 #ifdef DWC2_COUNTERS 57 #define DWC2_EVCNT_ADD(a,b) ((void)((a).ev_count += (b))) 58 #else 59 #define DWC2_EVCNT_ADD(a,b) do { } while (/*CONSTCOND*/0) 60 #endif 61 #define DWC2_EVCNT_INCR(a) DWC2_EVCNT_ADD((a), 1) 62 63 #ifdef DWC2_DEBUG 64 #define DPRINTFN(n,fmt,...) do { \ 65 if (dwc2debug >= (n)) { \ 66 printf("%s: " fmt, \ 67 __FUNCTION__,## __VA_ARGS__); \ 68 } \ 69 } while (0) 70 #define DPRINTF(...) DPRINTFN(1, __VA_ARGS__) 71 int dwc2debug = 0; 72 #else 73 #define DPRINTF(...) do { } while (0) 74 #define DPRINTFN(...) do { } while (0) 75 #endif 76 77 STATIC usbd_status dwc2_open(struct usbd_pipe *); 78 STATIC int dwc2_setaddr(struct usbd_device *, int); 79 STATIC void dwc2_poll(struct usbd_bus *); 80 STATIC void dwc2_softintr(void *); 81 82 STATIC struct usbd_xfer *dwc2_allocx(struct usbd_bus *); 83 STATIC void dwc2_freex(struct usbd_bus *, struct usbd_xfer *); 84 85 STATIC usbd_status dwc2_root_ctrl_transfer(struct usbd_xfer *); 86 STATIC usbd_status dwc2_root_ctrl_start(struct usbd_xfer *); 87 STATIC void dwc2_root_ctrl_abort(struct usbd_xfer *); 88 STATIC void dwc2_root_ctrl_close(struct usbd_pipe *); 89 STATIC void dwc2_root_ctrl_done(struct usbd_xfer *); 90 91 STATIC usbd_status dwc2_root_intr_transfer(struct usbd_xfer *); 92 STATIC usbd_status dwc2_root_intr_start(struct usbd_xfer *); 93 STATIC void dwc2_root_intr_abort(struct usbd_xfer *); 94 STATIC void dwc2_root_intr_close(struct usbd_pipe *); 95 STATIC void dwc2_root_intr_done(struct usbd_xfer *); 96 97 STATIC usbd_status dwc2_device_ctrl_transfer(struct usbd_xfer *); 98 STATIC usbd_status dwc2_device_ctrl_start(struct usbd_xfer *); 99 STATIC void dwc2_device_ctrl_abort(struct usbd_xfer *); 100 STATIC void dwc2_device_ctrl_close(struct usbd_pipe *); 101 STATIC void dwc2_device_ctrl_done(struct usbd_xfer *); 102 103 STATIC usbd_status dwc2_device_bulk_transfer(struct usbd_xfer *); 104 STATIC usbd_status dwc2_device_bulk_start(struct usbd_xfer *); 105 STATIC void dwc2_device_bulk_abort(struct usbd_xfer *); 106 STATIC void dwc2_device_bulk_close(struct usbd_pipe *); 107 STATIC void dwc2_device_bulk_done(struct usbd_xfer *); 108 109 STATIC usbd_status dwc2_device_intr_transfer(struct usbd_xfer *); 110 STATIC usbd_status dwc2_device_intr_start(struct usbd_xfer *); 111 STATIC void dwc2_device_intr_abort(struct usbd_xfer *); 112 STATIC void dwc2_device_intr_close(struct usbd_pipe *); 113 STATIC void dwc2_device_intr_done(struct usbd_xfer *); 114 115 STATIC usbd_status dwc2_device_isoc_transfer(struct usbd_xfer *); 116 STATIC usbd_status dwc2_device_isoc_start(struct usbd_xfer *); 117 STATIC void dwc2_device_isoc_abort(struct usbd_xfer *); 118 STATIC void dwc2_device_isoc_close(struct usbd_pipe *); 119 STATIC void dwc2_device_isoc_done(struct usbd_xfer *); 120 121 STATIC usbd_status dwc2_device_start(struct usbd_xfer *); 122 123 STATIC void dwc2_close_pipe(struct usbd_pipe *); 124 STATIC void dwc2_abort_xfer(struct usbd_xfer *, usbd_status); 125 126 STATIC void dwc2_device_clear_toggle(struct usbd_pipe *); 127 STATIC void dwc2_noop(struct usbd_pipe *pipe); 128 129 STATIC int dwc2_interrupt(struct dwc2_softc *); 130 STATIC void dwc2_rhc(void *); 131 132 STATIC void dwc2_timeout(void *); 133 STATIC void dwc2_timeout_task(void *); 134 135 static inline void 136 dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw, 137 struct usbd_xfer *xfer) 138 { 139 } 140 141 static inline void 142 dwc2_free_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw, 143 struct usbd_xfer *xfer) 144 { 145 } 146 147 #define DWC2_INTR_ENDPT 1 148 149 STATIC const struct usbd_bus_methods dwc2_bus_methods = { 150 .open_pipe = dwc2_open, 151 .dev_setaddr = dwc2_setaddr, 152 .soft_intr = dwc2_softintr, 153 .do_poll = dwc2_poll, 154 .allocx = dwc2_allocx, 155 .freex = dwc2_freex, 156 }; 157 158 STATIC const struct usbd_pipe_methods dwc2_root_ctrl_methods = { 159 .transfer = dwc2_root_ctrl_transfer, 160 .start = dwc2_root_ctrl_start, 161 .abort = dwc2_root_ctrl_abort, 162 .close = dwc2_root_ctrl_close, 163 .cleartoggle = dwc2_noop, 164 .done = dwc2_root_ctrl_done, 165 }; 166 167 STATIC const struct usbd_pipe_methods dwc2_root_intr_methods = { 168 .transfer = dwc2_root_intr_transfer, 169 .start = dwc2_root_intr_start, 170 .abort = dwc2_root_intr_abort, 171 .close = dwc2_root_intr_close, 172 .cleartoggle = dwc2_noop, 173 .done = dwc2_root_intr_done, 174 }; 175 176 STATIC const struct usbd_pipe_methods dwc2_device_ctrl_methods = { 177 .transfer = dwc2_device_ctrl_transfer, 178 .start = dwc2_device_ctrl_start, 179 .abort = dwc2_device_ctrl_abort, 180 .close = dwc2_device_ctrl_close, 181 .cleartoggle = dwc2_noop, 182 .done = dwc2_device_ctrl_done, 183 }; 184 185 STATIC const struct usbd_pipe_methods dwc2_device_intr_methods = { 186 .transfer = dwc2_device_intr_transfer, 187 .start = dwc2_device_intr_start, 188 .abort = dwc2_device_intr_abort, 189 .close = dwc2_device_intr_close, 190 .cleartoggle = dwc2_device_clear_toggle, 191 .done = dwc2_device_intr_done, 192 }; 193 194 STATIC const struct usbd_pipe_methods dwc2_device_bulk_methods = { 195 .transfer = dwc2_device_bulk_transfer, 196 .start = dwc2_device_bulk_start, 197 .abort = dwc2_device_bulk_abort, 198 .close = dwc2_device_bulk_close, 199 .cleartoggle = dwc2_device_clear_toggle, 200 .done = dwc2_device_bulk_done, 201 }; 202 203 STATIC const struct usbd_pipe_methods dwc2_device_isoc_methods = { 204 .transfer = dwc2_device_isoc_transfer, 205 .start = dwc2_device_isoc_start, 206 .abort = dwc2_device_isoc_abort, 207 .close = dwc2_device_isoc_close, 208 .cleartoggle = dwc2_noop, 209 .done = dwc2_device_isoc_done, 210 }; 211 212 /* 213 * Work around the half configured control (default) pipe when setting 214 * the address of a device. 215 */ 216 STATIC int 217 dwc2_setaddr(struct usbd_device *dev, int addr) 218 { 219 if (usbd_set_address(dev, addr)) 220 return (1); 221 222 dev->address = addr; 223 224 /* 225 * Re-establish the default pipe with the new address and the 226 * new max packet size. 227 */ 228 dwc2_close_pipe(dev->default_pipe); 229 if (dwc2_open(dev->default_pipe)) 230 return (EINVAL); 231 232 return (0); 233 } 234 235 struct usbd_xfer * 236 dwc2_allocx(struct usbd_bus *bus) 237 { 238 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 239 struct dwc2_xfer *dxfer; 240 241 DPRINTFN(10, "\n"); 242 243 DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget); 244 dxfer = pool_get(&sc->sc_xferpool, PR_WAITOK); 245 if (dxfer != NULL) { 246 memset(dxfer, 0, sizeof(*dxfer)); 247 dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg, 248 DWC2_MAXISOCPACKETS, M_NOWAIT); 249 #ifdef DIAGNOSTIC 250 dxfer->xfer.busy_free = XFER_ONQU; 251 #endif 252 } 253 return (struct usbd_xfer *)dxfer; 254 } 255 256 void 257 dwc2_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 258 { 259 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 260 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 261 262 DPRINTFN(10, "\n"); 263 264 #ifdef DIAGNOSTIC 265 if (xfer->busy_free != XFER_ONQU && 266 xfer->status != USBD_NOT_STARTED) { 267 DPRINTF("xfer=%p not busy, 0x%08x\n", xfer, xfer->busy_free); 268 } 269 xfer->busy_free = XFER_FREE; 270 #endif 271 DWC2_EVCNT_INCR(sc->sc_ev_xferpoolput); 272 dwc2_hcd_urb_free(sc->sc_hsotg, dxfer->urb, DWC2_MAXISOCPACKETS); 273 pool_put(&sc->sc_xferpool, xfer); 274 } 275 276 STATIC void 277 dwc2_rhc(void *addr) 278 { 279 struct dwc2_softc *sc = addr; 280 struct usbd_xfer *xfer; 281 u_char *p; 282 283 DPRINTF("\n"); 284 mtx_enter(&sc->sc_lock); 285 xfer = sc->sc_intrxfer; 286 287 if (xfer == NULL) { 288 /* Just ignore the change. */ 289 mtx_leave(&sc->sc_lock); 290 return; 291 292 } 293 294 /* set port bit */ 295 p = KERNADDR(&xfer->dmabuf, 0); 296 297 p[0] = 0x02; /* we only have one port (1 << 1) */ 298 299 xfer->actlen = xfer->length; 300 xfer->status = USBD_NORMAL_COMPLETION; 301 302 usb_transfer_complete(xfer); 303 mtx_leave(&sc->sc_lock); 304 } 305 306 STATIC void 307 dwc2_softintr(void *v) 308 { 309 struct usbd_bus *bus = v; 310 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 311 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 312 struct dwc2_xfer *dxfer, *next; 313 TAILQ_HEAD(, dwc2_xfer) claimed = TAILQ_HEAD_INITIALIZER(claimed); 314 315 /* 316 * Grab all the xfers that have not been aborted or timed out. 317 * Do so under a single lock -- without dropping it to run 318 * usb_transfer_complete as we go -- so that dwc2_abortx won't 319 * remove next out from under us during iteration when we've 320 * dropped the lock. 321 */ 322 mtx_enter(&hsotg->lock); 323 TAILQ_FOREACH_SAFE(dxfer, &sc->sc_complete, xnext, next) { 324 KASSERT(dxfer->xfer.status == USBD_IN_PROGRESS); 325 KASSERT(dxfer->intr_status != USBD_CANCELLED); 326 KASSERT(dxfer->intr_status != USBD_TIMEOUT); 327 TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext); 328 TAILQ_INSERT_TAIL(&claimed, dxfer, xnext); 329 } 330 mtx_leave(&hsotg->lock); 331 332 /* Now complete them. */ 333 while (!TAILQ_EMPTY(&claimed)) { 334 dxfer = TAILQ_FIRST(&claimed); 335 KASSERT(dxfer->xfer.status == USBD_IN_PROGRESS); 336 KASSERT(dxfer->intr_status != USBD_CANCELLED); 337 KASSERT(dxfer->intr_status != USBD_TIMEOUT); 338 TAILQ_REMOVE(&claimed, dxfer, xnext); 339 340 dxfer->xfer.status = dxfer->intr_status; 341 usb_transfer_complete(&dxfer->xfer); 342 } 343 } 344 345 STATIC void 346 dwc2_timeout(void *addr) 347 { 348 struct usbd_xfer *xfer = addr; 349 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 350 351 DPRINTF("xfer=%p\n", xfer); 352 353 if (sc->sc_bus.dying) { 354 dwc2_timeout_task(addr); 355 return; 356 } 357 358 /* Execute the abort in a process context. */ 359 usb_init_task(&xfer->abort_task, dwc2_timeout_task, addr, 360 USB_TASK_TYPE_ABORT); 361 usb_add_task(xfer->device, &xfer->abort_task); 362 } 363 364 STATIC void 365 dwc2_timeout_task(void *addr) 366 { 367 struct usbd_xfer *xfer = addr; 368 int s; 369 370 DPRINTF("xfer=%p\n", xfer); 371 372 s = splusb(); 373 dwc2_abort_xfer(xfer, USBD_TIMEOUT); 374 splx(s); 375 } 376 377 usbd_status 378 dwc2_open(struct usbd_pipe *pipe) 379 { 380 struct usbd_device *dev = pipe->device; 381 struct dwc2_softc *sc = DWC2_PIPE2SC(pipe); 382 struct dwc2_pipe *dpipe = DWC2_PIPE2DPIPE(pipe); 383 usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc; 384 uint8_t addr = dev->address; 385 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 386 usbd_status err; 387 388 DPRINTF("pipe %p addr %d xfertype %d dir %s\n", pipe, addr, xfertype, 389 UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN ? "in" : "out"); 390 391 if (sc->sc_bus.dying) { 392 return USBD_IOERROR; 393 } 394 395 if (addr == sc->sc_addr) { 396 switch (ed->bEndpointAddress) { 397 case USB_CONTROL_ENDPOINT: 398 pipe->methods = &dwc2_root_ctrl_methods; 399 break; 400 case UE_DIR_IN | DWC2_INTR_ENDPT: 401 pipe->methods = &dwc2_root_intr_methods; 402 break; 403 default: 404 DPRINTF("bad bEndpointAddress 0x%02x\n", 405 ed->bEndpointAddress); 406 return USBD_INVAL; 407 } 408 DPRINTF("root hub pipe open\n"); 409 return USBD_NORMAL_COMPLETION; 410 } 411 412 switch (xfertype) { 413 case UE_CONTROL: 414 pipe->methods = &dwc2_device_ctrl_methods; 415 err = usb_allocmem(&sc->sc_bus, sizeof(usb_device_request_t), 416 0, USB_DMA_COHERENT, &dpipe->req_dma); 417 if (err) 418 return USBD_NOMEM; 419 break; 420 case UE_INTERRUPT: 421 pipe->methods = &dwc2_device_intr_methods; 422 break; 423 case UE_ISOCHRONOUS: 424 pipe->methods = &dwc2_device_isoc_methods; 425 break; 426 case UE_BULK: 427 pipe->methods = &dwc2_device_bulk_methods; 428 break; 429 default: 430 DPRINTF("bad xfer type %d\n", xfertype); 431 return USBD_INVAL; 432 } 433 434 /* QH */ 435 dpipe->priv = NULL; 436 437 return USBD_NORMAL_COMPLETION; 438 } 439 440 STATIC void 441 dwc2_poll(struct usbd_bus *bus) 442 { 443 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 444 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 445 446 mtx_enter(&hsotg->lock); 447 dwc2_interrupt(sc); 448 mtx_leave(&hsotg->lock); 449 } 450 451 /* 452 * Close a reqular pipe. 453 * Assumes that there are no pending transactions. 454 */ 455 STATIC void 456 dwc2_close_pipe(struct usbd_pipe *pipe) 457 { 458 /* nothing */ 459 } 460 461 /* 462 * Abort a device request. 463 */ 464 STATIC void 465 dwc2_abort_xfer(struct usbd_xfer *xfer, usbd_status status) 466 { 467 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 468 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 469 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 470 struct dwc2_xfer *d; 471 int err; 472 473 splsoftassert(IPL_SOFTUSB); 474 475 DPRINTF("xfer %p pipe %p status 0x%08x\n", xfer, xfer->pipe, 476 xfer->status); 477 478 /* XXX The stack should not call abort() in this case. */ 479 if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) { 480 xfer->status = status; 481 timeout_del(&xfer->timeout_handle); 482 usb_rem_task(xfer->device, &xfer->abort_task); 483 usb_transfer_complete(xfer); 484 return; 485 } 486 487 KASSERT(xfer->status != USBD_CANCELLED); 488 /* Transfer is already done. */ 489 if (xfer->status != USBD_IN_PROGRESS) { 490 DPRINTF("%s: already done \n", __func__); 491 return; 492 } 493 494 /* Prevent any timeout to kick in. */ 495 timeout_del(&xfer->timeout_handle); 496 usb_rem_task(xfer->device, &xfer->abort_task); 497 498 /* Claim the transfer status as cancelled. */ 499 xfer->status = USBD_CANCELLED; 500 501 KASSERTMSG((xfer->status == USBD_CANCELLED || 502 xfer->status == USBD_TIMEOUT), 503 "bad abort status: %d", xfer->status); 504 505 mtx_enter(&hsotg->lock); 506 507 /* 508 * Check whether we aborted or timed out after the hardware 509 * completion interrupt determined that it's done but before 510 * the soft interrupt could actually complete it. If so, it's 511 * too late for the soft interrupt -- at this point we've 512 * already committed to abort it or time it out, so we need to 513 * take it off the softint's list of work in case the caller, 514 * say, frees the xfer before the softint runs. 515 * 516 * This logic is unusual among host controller drivers, and 517 * happens because dwc2 decides to complete xfers in the hard 518 * interrupt handler rather than in the soft interrupt handler, 519 * but usb_transfer_complete must be deferred to softint -- and 520 * we happened to swoop in between the hard interrupt and the 521 * soft interrupt. Other host controller drivers do almost all 522 * processing in the softint so there's no intermediate stage. 523 * 524 * Fortunately, this linear search to discern the intermediate 525 * stage is not likely to be a serious performance impact 526 * because it happens only on abort or timeout. 527 */ 528 TAILQ_FOREACH(d, &sc->sc_complete, xnext) { 529 if (d == dxfer) { 530 TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext); 531 break; 532 } 533 } 534 535 /* 536 * HC Step 1: Handle the hardware. 537 */ 538 err = dwc2_hcd_urb_dequeue(hsotg, dxfer->urb); 539 if (err) { 540 DPRINTF("dwc2_hcd_urb_dequeue failed\n"); 541 } 542 543 mtx_leave(&hsotg->lock); 544 545 /* 546 * Final Step: Notify completion to waiting xfers. 547 */ 548 usb_transfer_complete(xfer); 549 } 550 551 STATIC void 552 dwc2_noop(struct usbd_pipe *pipe) 553 { 554 555 } 556 557 STATIC void 558 dwc2_device_clear_toggle(struct usbd_pipe *pipe) 559 { 560 561 DPRINTF("toggle %d -> 0", pipe->endpoint->savedtoggle); 562 } 563 564 /***********************************************************************/ 565 566 /* 567 * Data structures and routines to emulate the root hub. 568 */ 569 570 STATIC const usb_device_descriptor_t dwc2_devd = { 571 .bLength = sizeof(usb_device_descriptor_t), 572 .bDescriptorType = UDESC_DEVICE, 573 .bcdUSB = {0x00, 0x02}, 574 .bDeviceClass = UDCLASS_HUB, 575 .bDeviceSubClass = UDSUBCLASS_HUB, 576 .bDeviceProtocol = UDPROTO_HSHUBSTT, 577 .bMaxPacketSize = 64, 578 .bcdDevice = {0x00, 0x01}, 579 .iManufacturer = 1, 580 .iProduct = 2, 581 .bNumConfigurations = 1, 582 }; 583 584 struct dwc2_config_desc { 585 usb_config_descriptor_t confd; 586 usb_interface_descriptor_t ifcd; 587 usb_endpoint_descriptor_t endpd; 588 } __packed; 589 590 STATIC const struct dwc2_config_desc dwc2_confd = { 591 .confd = { 592 .bLength = USB_CONFIG_DESCRIPTOR_SIZE, 593 .bDescriptorType = UDESC_CONFIG, 594 .wTotalLength[0] = sizeof(dwc2_confd), 595 .bNumInterfaces = 1, 596 .bConfigurationValue = 1, 597 .iConfiguration = 0, 598 .bmAttributes = UC_BUS_POWERED | UC_SELF_POWERED, 599 .bMaxPower = 0, 600 }, 601 .ifcd = { 602 .bLength = USB_INTERFACE_DESCRIPTOR_SIZE, 603 .bDescriptorType = UDESC_INTERFACE, 604 .bInterfaceNumber = 0, 605 .bAlternateSetting = 0, 606 .bNumEndpoints = 1, 607 .bInterfaceClass = UICLASS_HUB, 608 .bInterfaceSubClass = UISUBCLASS_HUB, 609 .bInterfaceProtocol = UIPROTO_HSHUBSTT, 610 .iInterface = 0 611 }, 612 .endpd = { 613 .bLength = USB_ENDPOINT_DESCRIPTOR_SIZE, 614 .bDescriptorType = UDESC_ENDPOINT, 615 .bEndpointAddress = UE_DIR_IN | DWC2_INTR_ENDPT, 616 .bmAttributes = UE_INTERRUPT, 617 .wMaxPacketSize = {8, 0}, /* max packet */ 618 .bInterval = 255, 619 }, 620 }; 621 622 STATIC usbd_status 623 dwc2_root_ctrl_transfer(struct usbd_xfer *xfer) 624 { 625 usbd_status err; 626 627 err = usb_insert_transfer(xfer); 628 if (err) 629 return err; 630 631 return dwc2_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 632 } 633 634 STATIC usbd_status 635 dwc2_root_ctrl_start(struct usbd_xfer *xfer) 636 { 637 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 638 usb_device_request_t *req; 639 uint8_t *buf; 640 uint16_t len; 641 int value, index, l, s, totlen; 642 usbd_status err = USBD_IOERROR; 643 644 if (sc->sc_bus.dying) 645 return USBD_IOERROR; 646 647 req = &xfer->request; 648 649 DPRINTFN(4, "type=0x%02x request=%02x\n", 650 req->bmRequestType, req->bRequest); 651 652 len = UGETW(req->wLength); 653 value = UGETW(req->wValue); 654 index = UGETW(req->wIndex); 655 656 buf = len ? KERNADDR(&xfer->dmabuf, 0) : NULL; 657 658 totlen = 0; 659 660 #define C(x,y) ((x) | ((y) << 8)) 661 switch (C(req->bRequest, req->bmRequestType)) { 662 case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE): 663 case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE): 664 case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT): 665 /* 666 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops 667 * for the integrated root hub. 668 */ 669 break; 670 case C(UR_GET_CONFIG, UT_READ_DEVICE): 671 if (len > 0) { 672 *buf = sc->sc_conf; 673 totlen = 1; 674 } 675 break; 676 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 677 DPRINTFN(8, "wValue=0x%04x\n", value); 678 679 if (len == 0) 680 break; 681 switch (value) { 682 case C(0, UDESC_DEVICE): 683 l = min(len, USB_DEVICE_DESCRIPTOR_SIZE); 684 memcpy(buf, &dwc2_devd, l); 685 buf += l; 686 len -= l; 687 totlen += l; 688 689 break; 690 case C(0, UDESC_CONFIG): 691 l = min(len, sizeof(dwc2_confd)); 692 memcpy(buf, &dwc2_confd, l); 693 buf += l; 694 len -= l; 695 totlen += l; 696 697 break; 698 #define sd ((usb_string_descriptor_t *)buf) 699 case C(0, UDESC_STRING): 700 totlen = usbd_str(sd, len, "\001"); 701 break; 702 case C(1, UDESC_STRING): 703 totlen = usbd_str(sd, len, sc->sc_vendor); 704 break; 705 case C(2, UDESC_STRING): 706 totlen = usbd_str(sd, len, "DWC2 root hub"); 707 break; 708 #undef sd 709 default: 710 goto fail; 711 } 712 break; 713 case C(UR_GET_INTERFACE, UT_READ_INTERFACE): 714 if (len > 0) { 715 *buf = 0; 716 totlen = 1; 717 } 718 break; 719 case C(UR_GET_STATUS, UT_READ_DEVICE): 720 if (len > 1) { 721 USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED); 722 totlen = 2; 723 } 724 break; 725 case C(UR_GET_STATUS, UT_READ_INTERFACE): 726 case C(UR_GET_STATUS, UT_READ_ENDPOINT): 727 if (len > 1) { 728 USETW(((usb_status_t *)buf)->wStatus, 0); 729 totlen = 2; 730 } 731 break; 732 case C(UR_SET_ADDRESS, UT_WRITE_DEVICE): 733 DPRINTF("UR_SET_ADDRESS, UT_WRITE_DEVICE: addr %d\n", 734 value); 735 if (value >= USB_MAX_DEVICES) 736 goto fail; 737 738 sc->sc_addr = value; 739 break; 740 case C(UR_SET_CONFIG, UT_WRITE_DEVICE): 741 if (value != 0 && value != 1) 742 goto fail; 743 744 sc->sc_conf = value; 745 break; 746 case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE): 747 break; 748 case C(UR_SET_FEATURE, UT_WRITE_DEVICE): 749 case C(UR_SET_FEATURE, UT_WRITE_INTERFACE): 750 case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT): 751 err = USBD_IOERROR; 752 goto fail; 753 case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE): 754 break; 755 case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT): 756 break; 757 default: 758 /* Hub requests - XXXNH len check? */ 759 err = dwc2_hcd_hub_control(sc->sc_hsotg, 760 C(req->bRequest, req->bmRequestType), value, index, 761 buf, len); 762 if (err) { 763 err = USBD_IOERROR; 764 goto fail; 765 } 766 totlen = len; 767 } 768 xfer->actlen = totlen; 769 err = USBD_NORMAL_COMPLETION; 770 771 fail: 772 s = splusb(); 773 xfer->status = err; 774 usb_transfer_complete(xfer); 775 splx(s); 776 777 return err; 778 } 779 780 STATIC void 781 dwc2_root_ctrl_abort(struct usbd_xfer *xfer) 782 { 783 DPRINTFN(10, "\n"); 784 785 /* Nothing to do, all transfers are synchronous. */ 786 } 787 788 STATIC void 789 dwc2_root_ctrl_close(struct usbd_pipe *pipe) 790 { 791 DPRINTFN(10, "\n"); 792 793 /* Nothing to do. */ 794 } 795 796 STATIC void 797 dwc2_root_ctrl_done(struct usbd_xfer *xfer) 798 { 799 DPRINTFN(10, "\n"); 800 801 /* Nothing to do. */ 802 } 803 804 STATIC usbd_status 805 dwc2_root_intr_transfer(struct usbd_xfer *xfer) 806 { 807 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 808 usbd_status err; 809 810 DPRINTF("\n"); 811 812 /* Insert last in queue. */ 813 mtx_enter(&sc->sc_lock); 814 err = usb_insert_transfer(xfer); 815 mtx_leave(&sc->sc_lock); 816 if (err) 817 return err; 818 819 /* Pipe isn't running, start first */ 820 return dwc2_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 821 } 822 823 STATIC usbd_status 824 dwc2_root_intr_start(struct usbd_xfer *xfer) 825 { 826 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 827 const bool polling = sc->sc_bus.use_polling; 828 829 DPRINTF("\n"); 830 831 if (sc->sc_bus.dying) 832 return USBD_IOERROR; 833 834 if (!polling) 835 mtx_enter(&sc->sc_lock); 836 KASSERT(sc->sc_intrxfer == NULL); 837 sc->sc_intrxfer = xfer; 838 xfer->status = USBD_IN_PROGRESS; 839 if (!polling) 840 mtx_leave(&sc->sc_lock); 841 842 return USBD_IN_PROGRESS; 843 } 844 845 /* Abort a root interrupt request. */ 846 STATIC void 847 dwc2_root_intr_abort(struct usbd_xfer *xfer) 848 { 849 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 850 851 DPRINTF("xfer=%p\n", xfer); 852 853 /* If xfer has already completed, nothing to do here. */ 854 if (sc->sc_intrxfer == NULL) 855 return; 856 857 /* 858 * Otherwise, sc->sc_intrxfer had better be this transfer. 859 * Cancel it. 860 */ 861 KASSERT(sc->sc_intrxfer == xfer); 862 KASSERT(xfer->status == USBD_IN_PROGRESS); 863 xfer->status = USBD_CANCELLED; 864 usb_transfer_complete(xfer); 865 } 866 867 STATIC void 868 dwc2_root_intr_close(struct usbd_pipe *pipe) 869 { 870 struct dwc2_softc *sc = DWC2_PIPE2SC(pipe); 871 872 DPRINTF("\n"); 873 874 /* 875 * Caller must guarantee the xfer has completed first, by 876 * closing the pipe only after normal completion or an abort. 877 */ 878 if (sc->sc_intrxfer == NULL) 879 panic("%s: sc->sc_intrxfer == NULL", __func__); 880 } 881 882 STATIC void 883 dwc2_root_intr_done(struct usbd_xfer *xfer) 884 { 885 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 886 887 DPRINTF("\n"); 888 889 /* Claim the xfer so it doesn't get completed again. */ 890 KASSERT(sc->sc_intrxfer == xfer); 891 KASSERT(xfer->status != USBD_IN_PROGRESS); 892 sc->sc_intrxfer = NULL; 893 } 894 895 /***********************************************************************/ 896 897 STATIC usbd_status 898 dwc2_device_ctrl_transfer(struct usbd_xfer *xfer) 899 { 900 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 901 usbd_status err; 902 903 DPRINTF("\n"); 904 905 /* Insert last in queue. */ 906 mtx_enter(&sc->sc_lock); 907 err = usb_insert_transfer(xfer); 908 mtx_leave(&sc->sc_lock); 909 if (err) 910 return err; 911 912 /* Pipe isn't running, start first */ 913 return dwc2_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 914 } 915 916 STATIC usbd_status 917 dwc2_device_ctrl_start(struct usbd_xfer *xfer) 918 { 919 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 920 usbd_status err; 921 const bool polling = sc->sc_bus.use_polling; 922 923 DPRINTF("\n"); 924 925 if (!polling) 926 mtx_enter(&sc->sc_lock); 927 xfer->status = USBD_IN_PROGRESS; 928 err = dwc2_device_start(xfer); 929 if (!polling) 930 mtx_leave(&sc->sc_lock); 931 932 if (err) 933 return err; 934 935 return USBD_IN_PROGRESS; 936 } 937 938 STATIC void 939 dwc2_device_ctrl_abort(struct usbd_xfer *xfer) 940 { 941 DPRINTF("xfer=%p\n", xfer); 942 dwc2_abort_xfer(xfer, USBD_CANCELLED); 943 } 944 945 STATIC void 946 dwc2_device_ctrl_close(struct usbd_pipe *pipe) 947 { 948 struct dwc2_softc * const sc = DWC2_PIPE2SC(pipe); 949 struct dwc2_pipe * const dpipe = DWC2_PIPE2DPIPE(pipe); 950 951 DPRINTF("pipe=%p\n", pipe); 952 dwc2_close_pipe(pipe); 953 954 usb_freemem(&sc->sc_bus, &dpipe->req_dma); 955 } 956 957 STATIC void 958 dwc2_device_ctrl_done(struct usbd_xfer *xfer) 959 { 960 961 DPRINTF("xfer=%p\n", xfer); 962 } 963 964 /***********************************************************************/ 965 966 STATIC usbd_status 967 dwc2_device_bulk_transfer(struct usbd_xfer *xfer) 968 { 969 usbd_status err; 970 971 DPRINTF("xfer=%p\n", xfer); 972 973 /* Insert last in queue. */ 974 err = usb_insert_transfer(xfer); 975 if (err) 976 return err; 977 978 /* Pipe isn't running, start first */ 979 return dwc2_device_bulk_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 980 } 981 982 STATIC usbd_status 983 dwc2_device_bulk_start(struct usbd_xfer *xfer) 984 { 985 usbd_status err; 986 987 DPRINTF("xfer=%p\n", xfer); 988 989 xfer->status = USBD_IN_PROGRESS; 990 err = dwc2_device_start(xfer); 991 992 return err; 993 } 994 995 STATIC void 996 dwc2_device_bulk_abort(struct usbd_xfer *xfer) 997 { 998 DPRINTF("xfer=%p\n", xfer); 999 1000 dwc2_abort_xfer(xfer, USBD_CANCELLED); 1001 } 1002 1003 STATIC void 1004 dwc2_device_bulk_close(struct usbd_pipe *pipe) 1005 { 1006 1007 DPRINTF("pipe=%p\n", pipe); 1008 1009 dwc2_close_pipe(pipe); 1010 } 1011 1012 STATIC void 1013 dwc2_device_bulk_done(struct usbd_xfer *xfer) 1014 { 1015 1016 DPRINTF("xfer=%p\n", xfer); 1017 } 1018 1019 /***********************************************************************/ 1020 1021 STATIC usbd_status 1022 dwc2_device_intr_transfer(struct usbd_xfer *xfer) 1023 { 1024 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 1025 usbd_status err; 1026 1027 DPRINTF("xfer=%p\n", xfer); 1028 1029 /* Insert last in queue. */ 1030 mtx_enter(&sc->sc_lock); 1031 err = usb_insert_transfer(xfer); 1032 mtx_leave(&sc->sc_lock); 1033 if (err) 1034 return err; 1035 1036 /* Pipe isn't running, start first */ 1037 return dwc2_device_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 1038 } 1039 1040 STATIC usbd_status 1041 dwc2_device_intr_start(struct usbd_xfer *xfer) 1042 { 1043 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 1044 usbd_status err; 1045 const bool polling = sc->sc_bus.use_polling; 1046 1047 if (!polling) 1048 mtx_enter(&sc->sc_lock); 1049 xfer->status = USBD_IN_PROGRESS; 1050 err = dwc2_device_start(xfer); 1051 if (!polling) 1052 mtx_leave(&sc->sc_lock); 1053 1054 if (err) 1055 return err; 1056 1057 return USBD_IN_PROGRESS; 1058 } 1059 1060 /* Abort a device interrupt request. */ 1061 STATIC void 1062 dwc2_device_intr_abort(struct usbd_xfer *xfer) 1063 { 1064 KASSERT(xfer->pipe->intrxfer == xfer); 1065 1066 DPRINTF("xfer=%p\n", xfer); 1067 1068 dwc2_abort_xfer(xfer, USBD_CANCELLED); 1069 } 1070 1071 STATIC void 1072 dwc2_device_intr_close(struct usbd_pipe *pipe) 1073 { 1074 1075 DPRINTF("pipe=%p\n", pipe); 1076 1077 dwc2_close_pipe(pipe); 1078 } 1079 1080 STATIC void 1081 dwc2_device_intr_done(struct usbd_xfer *xfer) 1082 { 1083 1084 DPRINTF("\n"); 1085 1086 if (xfer->pipe->repeat) { 1087 xfer->status = USBD_IN_PROGRESS; 1088 dwc2_device_start(xfer); 1089 } 1090 } 1091 1092 /***********************************************************************/ 1093 1094 usbd_status 1095 dwc2_device_isoc_transfer(struct usbd_xfer *xfer) 1096 { 1097 usbd_status err; 1098 1099 DPRINTF("xfer=%p\n", xfer); 1100 1101 /* Insert last in queue. */ 1102 err = usb_insert_transfer(xfer); 1103 if (err) 1104 return err; 1105 1106 /* Pipe isn't running, start first */ 1107 return dwc2_device_isoc_start(SIMPLEQ_FIRST(&xfer->pipe->queue)); 1108 } 1109 1110 usbd_status 1111 dwc2_device_isoc_start(struct usbd_xfer *xfer) 1112 { 1113 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1114 struct dwc2_softc *sc = DWC2_DPIPE2SC(dpipe); 1115 usbd_status err; 1116 1117 /* Why would you do that anyway? */ 1118 if (sc->sc_bus.use_polling) 1119 return (USBD_INVAL); 1120 1121 xfer->status = USBD_IN_PROGRESS; 1122 err = dwc2_device_start(xfer); 1123 1124 return err; 1125 } 1126 1127 void 1128 dwc2_device_isoc_abort(struct usbd_xfer *xfer) 1129 { 1130 DPRINTF("xfer=%p\n", xfer); 1131 1132 dwc2_abort_xfer(xfer, USBD_CANCELLED); 1133 } 1134 1135 void 1136 dwc2_device_isoc_close(struct usbd_pipe *pipe) 1137 { 1138 DPRINTF("\n"); 1139 1140 dwc2_close_pipe(pipe); 1141 } 1142 1143 void 1144 dwc2_device_isoc_done(struct usbd_xfer *xfer) 1145 { 1146 1147 DPRINTF("\n"); 1148 } 1149 1150 1151 usbd_status 1152 dwc2_device_start(struct usbd_xfer *xfer) 1153 { 1154 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 1155 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1156 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 1157 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 1158 struct dwc2_hcd_urb *dwc2_urb; 1159 1160 struct usbd_device *dev = xfer->pipe->device; 1161 usb_endpoint_descriptor_t *ed = xfer->pipe->endpoint->edesc; 1162 uint8_t addr = dev->address; 1163 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1164 uint8_t epnum = UE_GET_ADDR(ed->bEndpointAddress); 1165 uint8_t dir = UE_GET_DIR(ed->bEndpointAddress); 1166 uint16_t mps = UE_GET_SIZE(UGETW(ed->wMaxPacketSize)); 1167 uint32_t len; 1168 1169 uint32_t flags = 0; 1170 uint32_t off = 0; 1171 int retval, err; 1172 int alloc_bandwidth = 0; 1173 1174 DPRINTFN(1, "xfer=%p pipe=%p\n", xfer, xfer->pipe); 1175 1176 if (xfertype == UE_ISOCHRONOUS || 1177 xfertype == UE_INTERRUPT) { 1178 mtx_enter(&hsotg->lock); 1179 if (!dwc2_hcd_is_bandwidth_allocated(hsotg, xfer)) 1180 alloc_bandwidth = 1; 1181 mtx_leave(&hsotg->lock); 1182 } 1183 1184 /* 1185 * For Control pipe the direction is from the request, all other 1186 * transfers have been set correctly at pipe open time. 1187 */ 1188 if (xfertype == UE_CONTROL) { 1189 usb_device_request_t *req = &xfer->request; 1190 1191 DPRINTFN(3, "xfer=%p type=0x%02x request=0x%02x wValue=0x%04x " 1192 "wIndex=0x%04x len=%d addr=%d endpt=%d dir=%s speed=%d " 1193 "mps=%d\n", 1194 xfer, req->bmRequestType, req->bRequest, UGETW(req->wValue), 1195 UGETW(req->wIndex), UGETW(req->wLength), dev->address, 1196 epnum, dir == UT_READ ? "in" :"out", dev->speed, mps); 1197 1198 /* Copy request packet to our DMA buffer */ 1199 memcpy(KERNADDR(&dpipe->req_dma, 0), req, sizeof(*req)); 1200 usb_syncmem(&dpipe->req_dma, 0, sizeof(*req), 1201 BUS_DMASYNC_PREWRITE); 1202 len = UGETW(req->wLength); 1203 if ((req->bmRequestType & UT_READ) == UT_READ) { 1204 dir = UE_DIR_IN; 1205 } else { 1206 dir = UE_DIR_OUT; 1207 } 1208 1209 DPRINTFN(3, "req = %p dma = %llx len %d dir %s\n", 1210 KERNADDR(&dpipe->req_dma, 0), 1211 (long long)DMAADDR(&dpipe->req_dma, 0), 1212 len, dir == UE_DIR_IN ? "in" : "out"); 1213 } else if (xfertype == UE_ISOCHRONOUS) { 1214 DPRINTFN(3, "xfer=%p nframes=%d flags=%d addr=%d endpt=%d," 1215 " mps=%d dir %s\n", xfer, xfer->nframes, xfer->flags, addr, 1216 epnum, mps, dir == UT_READ ? "in" :"out"); 1217 1218 #ifdef DIAGNOSTIC 1219 len = 0; 1220 for (size_t i = 0; i < xfer->nframes; i++) 1221 len += xfer->frlengths[i]; 1222 if (len != xfer->length) 1223 panic("len (%d) != xfer->length (%d)", len, 1224 xfer->length); 1225 #endif 1226 len = xfer->length; 1227 } else { 1228 DPRINTFN(3, "xfer=%p len=%d flags=%d addr=%d endpt=%d," 1229 " mps=%d dir %s\n", xfer, xfer->length, xfer->flags, addr, 1230 epnum, mps, dir == UT_READ ? "in" :"out"); 1231 1232 len = xfer->length; 1233 } 1234 1235 dwc2_urb = dxfer->urb; 1236 if (!dwc2_urb) 1237 return USBD_NOMEM; 1238 1239 // KASSERT(dwc2_urb->packet_count == xfer->nframes); 1240 memset(dwc2_urb, 0, sizeof(*dwc2_urb) + 1241 sizeof(dwc2_urb->iso_descs[0]) * DWC2_MAXISOCPACKETS); 1242 1243 dwc2_urb->priv = xfer; 1244 dwc2_urb->packet_count = xfer->nframes; 1245 1246 dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, addr, epnum, xfertype, dir, 1247 mps); 1248 1249 if (xfertype == UE_CONTROL) { 1250 dwc2_urb->setup_usbdma = &dpipe->req_dma; 1251 dwc2_urb->setup_packet = KERNADDR(&dpipe->req_dma, 0); 1252 dwc2_urb->setup_dma = DMAADDR(&dpipe->req_dma, 0); 1253 } else { 1254 /* XXXNH - % mps required? */ 1255 if ((xfer->flags & USBD_FORCE_SHORT_XFER) && (len % mps) == 0) 1256 flags |= URB_SEND_ZERO_PACKET; 1257 } 1258 flags |= URB_GIVEBACK_ASAP; 1259 1260 /* 1261 * control transfers with no data phase don't touch usbdma, but 1262 * everything else does. 1263 */ 1264 if (!(xfertype == UE_CONTROL && len == 0)) { 1265 dwc2_urb->usbdma = &xfer->dmabuf; 1266 dwc2_urb->buf = KERNADDR(dwc2_urb->usbdma, 0); 1267 dwc2_urb->dma = DMAADDR(dwc2_urb->usbdma, 0); 1268 1269 usb_syncmem(&xfer->dmabuf, 0, len, 1270 dir == UE_DIR_IN ? 1271 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1272 } 1273 dwc2_urb->length = len; 1274 dwc2_urb->flags = flags; 1275 dwc2_urb->status = -EINPROGRESS; 1276 1277 if (xfertype == UE_INTERRUPT || 1278 xfertype == UE_ISOCHRONOUS) { 1279 uint16_t ival; 1280 1281 if (xfertype == UE_INTERRUPT && 1282 dpipe->pipe.interval != USBD_DEFAULT_INTERVAL) { 1283 ival = dpipe->pipe.interval; 1284 } else { 1285 ival = ed->bInterval; 1286 } 1287 1288 if (ival < 1) { 1289 retval = -ENODEV; 1290 goto fail; 1291 } 1292 if (dev->speed == USB_SPEED_HIGH || 1293 (dev->speed == USB_SPEED_FULL && xfertype == UE_ISOCHRONOUS)) { 1294 if (ival > 16) { 1295 /* 1296 * illegal with HS/FS, but there were 1297 * documentation bugs in the spec 1298 */ 1299 ival = 256; 1300 } else { 1301 ival = (1 << (ival - 1)); 1302 } 1303 } else { 1304 if (xfertype == UE_INTERRUPT && ival < 10) 1305 ival = 10; 1306 } 1307 dwc2_urb->interval = ival; 1308 } 1309 1310 /* XXXNH bring down from callers?? */ 1311 // mtx_enter(&sc->sc_lock); 1312 1313 xfer->actlen = 0; 1314 1315 KASSERT(xfertype != UE_ISOCHRONOUS || 1316 xfer->nframes <= DWC2_MAXISOCPACKETS); 1317 KASSERTMSG(xfer->nframes == 0 || xfertype == UE_ISOCHRONOUS, 1318 "nframes %d xfertype %d\n", xfer->nframes, xfertype); 1319 1320 off = 0; 1321 for (size_t i = 0; i < xfer->nframes; ++i) { 1322 DPRINTFN(3, "xfer=%p frame=%zu offset=%d length=%d\n", xfer, i, 1323 off, xfer->frlengths[i]); 1324 1325 dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, off, 1326 xfer->frlengths[i]); 1327 off += xfer->frlengths[i]; 1328 } 1329 1330 struct dwc2_qh *qh = dpipe->priv; 1331 struct dwc2_qtd *qtd; 1332 bool qh_allocated = false; 1333 1334 /* Create QH for the endpoint if it doesn't exist */ 1335 if (!qh) { 1336 qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, M_NOWAIT); 1337 if (!qh) { 1338 retval = -ENOMEM; 1339 goto fail; 1340 } 1341 dpipe->priv = qh; 1342 qh_allocated = true; 1343 } 1344 1345 qtd = pool_get(&sc->sc_qtdpool, PR_NOWAIT); 1346 if (!qtd) { 1347 retval = -ENOMEM; 1348 goto fail1; 1349 } 1350 memset(qtd, 0, sizeof(*qtd)); 1351 1352 /* might need to check cpu_intr_p */ 1353 mtx_enter(&hsotg->lock); 1354 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd); 1355 if (retval) 1356 goto fail2; 1357 if (xfer->timeout && !sc->sc_bus.use_polling) { 1358 timeout_set(&xfer->timeout_handle, dwc2_timeout, xfer); 1359 timeout_add_msec(&xfer->timeout_handle, xfer->timeout); 1360 } 1361 xfer->status = USBD_IN_PROGRESS; 1362 1363 if (alloc_bandwidth) { 1364 dwc2_allocate_bus_bandwidth(hsotg, 1365 dwc2_hcd_get_ep_bandwidth(hsotg, dpipe), 1366 xfer); 1367 } 1368 1369 mtx_leave(&hsotg->lock); 1370 // mtx_exit(&sc->sc_lock); 1371 1372 return USBD_IN_PROGRESS; 1373 1374 fail2: 1375 dwc2_urb->priv = NULL; 1376 mtx_leave(&hsotg->lock); 1377 pool_put(&sc->sc_qtdpool, qtd); 1378 1379 fail1: 1380 if (qh_allocated) { 1381 dpipe->priv = NULL; 1382 dwc2_hcd_qh_free(hsotg, qh); 1383 } 1384 fail: 1385 1386 switch (retval) { 1387 case -EINVAL: 1388 case -ENODEV: 1389 err = USBD_INVAL; 1390 break; 1391 case -ENOMEM: 1392 err = USBD_NOMEM; 1393 break; 1394 default: 1395 err = USBD_IOERROR; 1396 } 1397 1398 return err; 1399 1400 } 1401 1402 int dwc2_intr(void *p) 1403 { 1404 struct dwc2_softc *sc = p; 1405 struct dwc2_hsotg *hsotg; 1406 int ret = 0; 1407 1408 if (sc == NULL) 1409 return 0; 1410 1411 hsotg = sc->sc_hsotg; 1412 mtx_enter(&hsotg->lock); 1413 1414 if (sc->sc_bus.dying) 1415 goto done; 1416 1417 if (sc->sc_bus.use_polling) { 1418 uint32_t intrs; 1419 1420 intrs = dwc2_read_core_intr(hsotg); 1421 DWC2_WRITE_4(hsotg, GINTSTS, intrs); 1422 } else { 1423 ret = dwc2_interrupt(sc); 1424 } 1425 1426 done: 1427 mtx_leave(&hsotg->lock); 1428 1429 return ret; 1430 } 1431 1432 int 1433 dwc2_interrupt(struct dwc2_softc *sc) 1434 { 1435 int ret = 0; 1436 1437 if (sc->sc_hcdenabled) { 1438 ret |= dwc2_handle_hcd_intr(sc->sc_hsotg); 1439 } 1440 1441 ret |= dwc2_handle_common_intr(sc->sc_hsotg); 1442 1443 return ret; 1444 } 1445 1446 /***********************************************************************/ 1447 1448 int 1449 dwc2_detach(struct dwc2_softc *sc, int flags) 1450 { 1451 int rv = 0; 1452 1453 if (sc->sc_child != NULL) 1454 rv = config_detach(sc->sc_child, flags); 1455 1456 return rv; 1457 } 1458 1459 /***********************************************************************/ 1460 int 1461 dwc2_init(struct dwc2_softc *sc) 1462 { 1463 int err = 0; 1464 1465 sc->sc_bus.usbrev = USBREV_2_0; 1466 sc->sc_bus.methods = &dwc2_bus_methods; 1467 sc->sc_bus.pipe_size = sizeof(struct dwc2_pipe); 1468 sc->sc_hcdenabled = false; 1469 1470 mtx_init(&sc->sc_lock, IPL_SOFTUSB); 1471 1472 TAILQ_INIT(&sc->sc_complete); 1473 1474 sc->sc_rhc_si = softintr_establish(IPL_SOFTUSB, dwc2_rhc, sc); 1475 1476 pool_init(&sc->sc_xferpool, sizeof(struct dwc2_xfer), 0, IPL_USB, 0, 1477 "dwc2xfer", NULL); 1478 pool_init(&sc->sc_qhpool, sizeof(struct dwc2_qh), 0, IPL_USB, 0, 1479 "dwc2qh", NULL); 1480 pool_init(&sc->sc_qtdpool, sizeof(struct dwc2_qtd), 0, IPL_USB, 0, 1481 "dwc2qtd", NULL); 1482 1483 sc->sc_hsotg = malloc(sizeof(struct dwc2_hsotg), M_USBHC, 1484 M_ZERO | M_WAITOK); 1485 sc->sc_hsotg->hsotg_sc = sc; 1486 sc->sc_hsotg->dev = &sc->sc_bus.bdev; 1487 sc->sc_hcdenabled = true; 1488 1489 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 1490 struct dwc2_core_params defparams; 1491 int retval; 1492 1493 if (sc->sc_params == NULL) { 1494 /* Default all params to autodetect */ 1495 dwc2_set_all_params(&defparams, -1); 1496 sc->sc_params = &defparams; 1497 1498 /* 1499 * Disable descriptor dma mode by default as the HW can support 1500 * it, but does not support it for SPLIT transactions. 1501 */ 1502 defparams.dma_desc_enable = 0; 1503 } 1504 hsotg->dr_mode = USB_DR_MODE_HOST; 1505 1506 /* 1507 * Reset before dwc2_get_hwparams() then it could get power-on real 1508 * reset value form registers. 1509 */ 1510 dwc2_core_reset(hsotg); 1511 usb_delay_ms(&sc->sc_bus, 500); 1512 1513 /* Detect config values from hardware */ 1514 retval = dwc2_get_hwparams(hsotg); 1515 if (retval) { 1516 goto fail2; 1517 } 1518 1519 hsotg->core_params = malloc(sizeof(*hsotg->core_params), M_USBHC, 1520 M_ZERO | M_WAITOK); 1521 dwc2_set_all_params(hsotg->core_params, -1); 1522 1523 /* Validate parameter values */ 1524 dwc2_set_parameters(hsotg, sc->sc_params); 1525 1526 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 1527 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1528 if (hsotg->dr_mode != USB_DR_MODE_HOST) { 1529 retval = dwc2_gadget_init(hsotg); 1530 if (retval) 1531 goto fail2; 1532 hsotg->gadget_enabled = 1; 1533 } 1534 #endif 1535 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \ 1536 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1537 if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) { 1538 retval = dwc2_hcd_init(hsotg); 1539 if (retval) { 1540 if (hsotg->gadget_enabled) 1541 dwc2_hsotg_remove(hsotg); 1542 goto fail2; 1543 } 1544 hsotg->hcd_enabled = 1; 1545 } 1546 #endif 1547 1548 #ifdef DWC2_DEBUG 1549 uint32_t snpsid = hsotg->hw_params.snpsid; 1550 dev_dbg(hsotg->dev, "Core Release: %x.%x%x%x (snpsid=%x)\n", 1551 snpsid >> 12 & 0xf, snpsid >> 8 & 0xf, 1552 snpsid >> 4 & 0xf, snpsid & 0xf, snpsid); 1553 #endif 1554 1555 return 0; 1556 1557 fail2: 1558 err = -retval; 1559 free(sc->sc_hsotg, M_USBHC, sizeof(struct dwc2_hsotg)); 1560 softintr_disestablish(sc->sc_rhc_si); 1561 1562 return err; 1563 } 1564 1565 #if 0 1566 /* 1567 * curmode is a mode indication bit 0 = device, 1 = host 1568 */ 1569 STATIC const char * const intnames[32] = { 1570 "curmode", "modemis", "otgint", "sof", 1571 "rxflvl", "nptxfemp", "ginnakeff", "goutnakeff", 1572 "ulpickint", "i2cint", "erlysusp", "usbsusp", 1573 "usbrst", "enumdone", "isooutdrop", "eopf", 1574 "restore_done", "epmis", "iepint", "oepint", 1575 "incompisoin", "incomplp", "fetsusp", "resetdet", 1576 "prtint", "hchint", "ptxfemp", "lpm", 1577 "conidstschng", "disconnint", "sessreqint", "wkupint" 1578 }; 1579 1580 1581 /***********************************************************************/ 1582 1583 #endif 1584 1585 1586 void 1587 dw_timeout(void *arg) 1588 { 1589 struct delayed_work *dw = arg; 1590 1591 task_set(&dw->work, dw->dw_fn, dw->dw_arg); 1592 task_add(dw->dw_wq, &dw->work); 1593 } 1594 1595 void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, 1596 int *hub_port) 1597 { 1598 struct usbd_xfer *xfer = context; 1599 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1600 struct usbd_device *dev = dpipe->pipe.device; 1601 1602 *hub_addr = dev->myhsport->parent->address; 1603 *hub_port = dev->myhsport->portno; 1604 } 1605 1606 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context) 1607 { 1608 struct usbd_xfer *xfer = context; 1609 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1610 struct usbd_device *dev = dpipe->pipe.device; 1611 1612 return dev->speed; 1613 } 1614 1615 /* 1616 * Sets the final status of an URB and returns it to the upper layer. Any 1617 * required cleanup of the URB is performed. 1618 * 1619 * Must be called with interrupt disabled and spinlock held 1620 */ 1621 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 1622 int status) 1623 { 1624 struct usbd_xfer *xfer; 1625 struct dwc2_xfer *dxfer; 1626 struct dwc2_softc *sc; 1627 usb_endpoint_descriptor_t *ed; 1628 uint8_t xfertype; 1629 1630 if (!qtd) { 1631 dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__); 1632 return; 1633 } 1634 1635 if (!qtd->urb) { 1636 dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__); 1637 return; 1638 } 1639 1640 xfer = qtd->urb->priv; 1641 if (!xfer) { 1642 dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__); 1643 return; 1644 } 1645 1646 dxfer = DWC2_XFER2DXFER(xfer); 1647 sc = DWC2_XFER2SC(xfer); 1648 ed = xfer->pipe->endpoint->edesc; 1649 xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1650 1651 struct dwc2_hcd_urb *urb = qtd->urb; 1652 xfer->actlen = dwc2_hcd_urb_get_actual_length(urb); 1653 1654 DPRINTFN(3, "xfer=%p actlen=%d\n", xfer, xfer->actlen); 1655 1656 if (xfertype == UE_ISOCHRONOUS) { 1657 xfer->actlen = 0; 1658 for (size_t i = 0; i < xfer->nframes; ++i) { 1659 xfer->frlengths[i] = 1660 dwc2_hcd_urb_get_iso_desc_actual_length( 1661 urb, i); 1662 DPRINTFN(1, "xfer=%p frame=%zu length=%d\n", xfer, i, 1663 xfer->frlengths[i]); 1664 xfer->actlen += xfer->frlengths[i]; 1665 } 1666 DPRINTFN(1, "xfer=%p actlen=%d (isoc)\n", xfer, xfer->actlen); 1667 } 1668 1669 if (xfertype == UE_ISOCHRONOUS && dbg_perio()) { 1670 for (size_t i = 0; i < xfer->nframes; i++) 1671 dev_vdbg(hsotg->dev, " ISO Desc %zu status %d\n", 1672 i, urb->iso_descs[i].status); 1673 } 1674 1675 if (!status) { 1676 if (!(xfer->flags & USBD_SHORT_XFER_OK) && 1677 xfer->actlen < xfer->length) 1678 status = -EIO; 1679 } 1680 1681 switch (status) { 1682 case 0: 1683 dxfer->intr_status = USBD_NORMAL_COMPLETION; 1684 break; 1685 case -EPIPE: 1686 dxfer->intr_status = USBD_STALLED; 1687 break; 1688 case -EPROTO: 1689 dxfer->intr_status = USBD_INVAL; 1690 break; 1691 case -EIO: 1692 dxfer->intr_status = USBD_IOERROR; 1693 break; 1694 case -EOVERFLOW: 1695 dxfer->intr_status = USBD_IOERROR; 1696 break; 1697 default: 1698 dxfer->intr_status = USBD_IOERROR; 1699 printf("%s: unknown error status %d\n", __func__, status); 1700 } 1701 1702 if (dxfer->intr_status == USBD_NORMAL_COMPLETION) { 1703 /* 1704 * control transfers with no data phase don't touch dmabuf, but 1705 * everything else does. 1706 */ 1707 if (!(xfertype == UE_CONTROL && 1708 xfer->length == 0) && 1709 xfer->actlen > 0 /* XXX PR/53503 */ 1710 ) { 1711 int rd = usbd_xfer_isread(xfer); 1712 1713 usb_syncmem(&xfer->dmabuf, 0, xfer->actlen, 1714 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1715 } 1716 } 1717 1718 if (xfertype == UE_ISOCHRONOUS || 1719 xfertype == UE_INTERRUPT) { 1720 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1721 1722 dwc2_free_bus_bandwidth(hsotg, 1723 dwc2_hcd_get_ep_bandwidth(hsotg, dpipe), 1724 xfer); 1725 } 1726 1727 qtd->urb = NULL; 1728 timeout_del(&xfer->timeout_handle); 1729 usb_rem_task(xfer->device, &xfer->abort_task); 1730 MUTEX_ASSERT_LOCKED(&hsotg->lock); 1731 1732 TAILQ_INSERT_TAIL(&sc->sc_complete, dxfer, xnext); 1733 1734 mtx_leave(&hsotg->lock); 1735 usb_schedsoftintr(&sc->sc_bus); 1736 mtx_enter(&hsotg->lock); 1737 } 1738 1739 1740 int 1741 _dwc2_hcd_start(struct dwc2_hsotg *hsotg) 1742 { 1743 dev_dbg(hsotg->dev, "DWC OTG HCD START\n"); 1744 1745 mtx_enter(&hsotg->lock); 1746 1747 hsotg->lx_state = DWC2_L0; 1748 1749 if (dwc2_is_device_mode(hsotg)) { 1750 mtx_leave(&hsotg->lock); 1751 return 0; /* why 0 ?? */ 1752 } 1753 1754 dwc2_hcd_reinit(hsotg); 1755 1756 mtx_leave(&hsotg->lock); 1757 return 0; 1758 } 1759 1760 int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg) 1761 { 1762 1763 return false; 1764 } 1765