1 /* $NetBSD: dwc2.c,v 1.44 2016/08/14 14:42:22 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2013 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Nick Hudson 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: dwc2.c,v 1.44 2016/08/14 14:42:22 skrll Exp $"); 34 35 #include "opt_usb.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kmem.h> 40 #include <sys/kernel.h> 41 #include <sys/device.h> 42 #include <sys/select.h> 43 #include <sys/proc.h> 44 #include <sys/queue.h> 45 #include <sys/cpu.h> 46 47 #include <machine/endian.h> 48 49 #include <dev/usb/usb.h> 50 #include <dev/usb/usbdi.h> 51 #include <dev/usb/usbdivar.h> 52 #include <dev/usb/usb_mem.h> 53 #include <dev/usb/usbroothub.h> 54 55 #include <dwc2/dwc2.h> 56 #include <dwc2/dwc2var.h> 57 58 #include "dwc2_core.h" 59 #include "dwc2_hcd.h" 60 61 #ifdef DWC2_COUNTERS 62 #define DWC2_EVCNT_ADD(a,b) ((void)((a).ev_count += (b))) 63 #else 64 #define DWC2_EVCNT_ADD(a,b) do { } while (/*CONSTCOND*/0) 65 #endif 66 #define DWC2_EVCNT_INCR(a) DWC2_EVCNT_ADD((a), 1) 67 68 #ifdef DWC2_DEBUG 69 #define DPRINTFN(n,fmt,...) do { \ 70 if (dwc2debug >= (n)) { \ 71 printf("%s: " fmt, \ 72 __FUNCTION__,## __VA_ARGS__); \ 73 } \ 74 } while (0) 75 #define DPRINTF(...) DPRINTFN(1, __VA_ARGS__) 76 int dwc2debug = 0; 77 #else 78 #define DPRINTF(...) do { } while (0) 79 #define DPRINTFN(...) do { } while (0) 80 #endif 81 82 Static usbd_status dwc2_open(struct usbd_pipe *); 83 Static void dwc2_poll(struct usbd_bus *); 84 Static void dwc2_softintr(void *); 85 86 Static struct usbd_xfer * 87 dwc2_allocx(struct usbd_bus *, unsigned int); 88 Static void dwc2_freex(struct usbd_bus *, struct usbd_xfer *); 89 Static void dwc2_get_lock(struct usbd_bus *, kmutex_t **); 90 Static int dwc2_roothub_ctrl(struct usbd_bus *, usb_device_request_t *, 91 void *, int); 92 93 Static usbd_status dwc2_root_intr_transfer(struct usbd_xfer *); 94 Static usbd_status dwc2_root_intr_start(struct usbd_xfer *); 95 Static void dwc2_root_intr_abort(struct usbd_xfer *); 96 Static void dwc2_root_intr_close(struct usbd_pipe *); 97 Static void dwc2_root_intr_done(struct usbd_xfer *); 98 99 Static usbd_status dwc2_device_ctrl_transfer(struct usbd_xfer *); 100 Static usbd_status dwc2_device_ctrl_start(struct usbd_xfer *); 101 Static void dwc2_device_ctrl_abort(struct usbd_xfer *); 102 Static void dwc2_device_ctrl_close(struct usbd_pipe *); 103 Static void dwc2_device_ctrl_done(struct usbd_xfer *); 104 105 Static usbd_status dwc2_device_bulk_transfer(struct usbd_xfer *); 106 Static void dwc2_device_bulk_abort(struct usbd_xfer *); 107 Static void dwc2_device_bulk_close(struct usbd_pipe *); 108 Static void dwc2_device_bulk_done(struct usbd_xfer *); 109 110 Static usbd_status dwc2_device_intr_transfer(struct usbd_xfer *); 111 Static usbd_status dwc2_device_intr_start(struct usbd_xfer *); 112 Static void dwc2_device_intr_abort(struct usbd_xfer *); 113 Static void dwc2_device_intr_close(struct usbd_pipe *); 114 Static void dwc2_device_intr_done(struct usbd_xfer *); 115 116 Static usbd_status dwc2_device_isoc_transfer(struct usbd_xfer *); 117 Static void dwc2_device_isoc_abort(struct usbd_xfer *); 118 Static void dwc2_device_isoc_close(struct usbd_pipe *); 119 Static void dwc2_device_isoc_done(struct usbd_xfer *); 120 121 Static usbd_status dwc2_device_start(struct usbd_xfer *); 122 123 Static void dwc2_close_pipe(struct usbd_pipe *); 124 Static void dwc2_abort_xfer(struct usbd_xfer *, usbd_status); 125 126 Static void dwc2_device_clear_toggle(struct usbd_pipe *); 127 Static void dwc2_noop(struct usbd_pipe *pipe); 128 129 Static int dwc2_interrupt(struct dwc2_softc *); 130 Static void dwc2_rhc(void *); 131 132 Static void dwc2_timeout(void *); 133 Static void dwc2_timeout_task(void *); 134 135 136 static inline void 137 dwc2_allocate_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw, 138 struct usbd_xfer *xfer) 139 { 140 } 141 142 static inline void 143 dwc2_free_bus_bandwidth(struct dwc2_hsotg *hsotg, u16 bw, 144 struct usbd_xfer *xfer) 145 { 146 } 147 148 Static const struct usbd_bus_methods dwc2_bus_methods = { 149 .ubm_open = dwc2_open, 150 .ubm_softint = dwc2_softintr, 151 .ubm_dopoll = dwc2_poll, 152 .ubm_allocx = dwc2_allocx, 153 .ubm_freex = dwc2_freex, 154 .ubm_getlock = dwc2_get_lock, 155 .ubm_rhctrl = dwc2_roothub_ctrl, 156 }; 157 158 Static const struct usbd_pipe_methods dwc2_root_intr_methods = { 159 .upm_transfer = dwc2_root_intr_transfer, 160 .upm_start = dwc2_root_intr_start, 161 .upm_abort = dwc2_root_intr_abort, 162 .upm_close = dwc2_root_intr_close, 163 .upm_cleartoggle = dwc2_noop, 164 .upm_done = dwc2_root_intr_done, 165 }; 166 167 Static const struct usbd_pipe_methods dwc2_device_ctrl_methods = { 168 .upm_transfer = dwc2_device_ctrl_transfer, 169 .upm_start = dwc2_device_ctrl_start, 170 .upm_abort = dwc2_device_ctrl_abort, 171 .upm_close = dwc2_device_ctrl_close, 172 .upm_cleartoggle = dwc2_noop, 173 .upm_done = dwc2_device_ctrl_done, 174 }; 175 176 Static const struct usbd_pipe_methods dwc2_device_intr_methods = { 177 .upm_transfer = dwc2_device_intr_transfer, 178 .upm_start = dwc2_device_intr_start, 179 .upm_abort = dwc2_device_intr_abort, 180 .upm_close = dwc2_device_intr_close, 181 .upm_cleartoggle = dwc2_device_clear_toggle, 182 .upm_done = dwc2_device_intr_done, 183 }; 184 185 Static const struct usbd_pipe_methods dwc2_device_bulk_methods = { 186 .upm_transfer = dwc2_device_bulk_transfer, 187 .upm_abort = dwc2_device_bulk_abort, 188 .upm_close = dwc2_device_bulk_close, 189 .upm_cleartoggle = dwc2_device_clear_toggle, 190 .upm_done = dwc2_device_bulk_done, 191 }; 192 193 Static const struct usbd_pipe_methods dwc2_device_isoc_methods = { 194 .upm_transfer = dwc2_device_isoc_transfer, 195 .upm_abort = dwc2_device_isoc_abort, 196 .upm_close = dwc2_device_isoc_close, 197 .upm_cleartoggle = dwc2_noop, 198 .upm_done = dwc2_device_isoc_done, 199 }; 200 201 struct usbd_xfer * 202 dwc2_allocx(struct usbd_bus *bus, unsigned int nframes) 203 { 204 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 205 struct dwc2_xfer *dxfer; 206 207 DPRINTFN(10, "\n"); 208 209 DWC2_EVCNT_INCR(sc->sc_ev_xferpoolget); 210 dxfer = pool_cache_get(sc->sc_xferpool, PR_NOWAIT); 211 if (dxfer != NULL) { 212 memset(dxfer, 0, sizeof(*dxfer)); 213 214 dxfer->urb = dwc2_hcd_urb_alloc(sc->sc_hsotg, 215 nframes, GFP_KERNEL); 216 217 #ifdef DIAGNOSTIC 218 dxfer->xfer.ux_state = XFER_BUSY; 219 #endif 220 } 221 return (struct usbd_xfer *)dxfer; 222 } 223 224 void 225 dwc2_freex(struct usbd_bus *bus, struct usbd_xfer *xfer) 226 { 227 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 228 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 229 230 DPRINTFN(10, "\n"); 231 232 #ifdef DIAGNOSTIC 233 if (xfer->ux_state != XFER_BUSY) { 234 DPRINTF("xfer=%p not busy, 0x%08x\n", xfer, xfer->ux_state); 235 } 236 xfer->ux_state = XFER_FREE; 237 #endif 238 DWC2_EVCNT_INCR(sc->sc_ev_xferpoolput); 239 dwc2_hcd_urb_free(sc->sc_hsotg, dxfer->urb, dxfer->urb->packet_count); 240 pool_cache_put(sc->sc_xferpool, xfer); 241 } 242 243 Static void 244 dwc2_get_lock(struct usbd_bus *bus, kmutex_t **lock) 245 { 246 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 247 248 *lock = &sc->sc_lock; 249 } 250 251 Static void 252 dwc2_rhc(void *addr) 253 { 254 struct dwc2_softc *sc = addr; 255 struct usbd_xfer *xfer; 256 u_char *p; 257 258 DPRINTF("\n"); 259 mutex_enter(&sc->sc_lock); 260 xfer = sc->sc_intrxfer; 261 262 if (xfer == NULL) { 263 /* Just ignore the change. */ 264 mutex_exit(&sc->sc_lock); 265 return; 266 267 } 268 /* set port bit */ 269 p = KERNADDR(&xfer->ux_dmabuf, 0); 270 271 p[0] = 0x02; /* we only have one port (1 << 1) */ 272 273 xfer->ux_actlen = xfer->ux_length; 274 xfer->ux_status = USBD_NORMAL_COMPLETION; 275 276 usb_transfer_complete(xfer); 277 mutex_exit(&sc->sc_lock); 278 } 279 280 Static void 281 dwc2_softintr(void *v) 282 { 283 struct usbd_bus *bus = v; 284 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 285 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 286 struct dwc2_xfer *dxfer; 287 288 KASSERT(sc->sc_bus.ub_usepolling || mutex_owned(&sc->sc_lock)); 289 290 mutex_spin_enter(&hsotg->lock); 291 while ((dxfer = TAILQ_FIRST(&sc->sc_complete)) != NULL) { 292 293 KASSERTMSG(!callout_pending(&dxfer->xfer.ux_callout), 294 "xfer %p pipe %p\n", dxfer, dxfer->xfer.ux_pipe); 295 296 /* 297 * dwc2_abort_xfer will remove this transfer from the 298 * sc_complete queue 299 */ 300 /*XXXNH not tested */ 301 if (dxfer->xfer.ux_hcflags & UXFER_ABORTING) { 302 cv_broadcast(&dxfer->xfer.ux_hccv); 303 continue; 304 } 305 306 TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext); 307 308 mutex_spin_exit(&hsotg->lock); 309 usb_transfer_complete(&dxfer->xfer); 310 mutex_spin_enter(&hsotg->lock); 311 } 312 mutex_spin_exit(&hsotg->lock); 313 } 314 315 Static void 316 dwc2_timeout(void *addr) 317 { 318 struct usbd_xfer *xfer = addr; 319 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 320 // struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 321 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 322 323 DPRINTF("dxfer=%p\n", dxfer); 324 325 if (sc->sc_dying) { 326 mutex_enter(&sc->sc_lock); 327 dwc2_abort_xfer(&dxfer->xfer, USBD_TIMEOUT); 328 mutex_exit(&sc->sc_lock); 329 return; 330 } 331 332 /* Execute the abort in a process context. */ 333 usb_init_task(&dxfer->abort_task, dwc2_timeout_task, addr, 334 USB_TASKQ_MPSAFE); 335 usb_add_task(dxfer->xfer.ux_pipe->up_dev, &dxfer->abort_task, 336 USB_TASKQ_HC); 337 } 338 339 Static void 340 dwc2_timeout_task(void *addr) 341 { 342 struct usbd_xfer *xfer = addr; 343 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 344 345 DPRINTF("xfer=%p\n", xfer); 346 347 mutex_enter(&sc->sc_lock); 348 dwc2_abort_xfer(xfer, USBD_TIMEOUT); 349 mutex_exit(&sc->sc_lock); 350 } 351 352 usbd_status 353 dwc2_open(struct usbd_pipe *pipe) 354 { 355 struct usbd_device *dev = pipe->up_dev; 356 struct dwc2_softc *sc = DWC2_PIPE2SC(pipe); 357 struct dwc2_pipe *dpipe = DWC2_PIPE2DPIPE(pipe); 358 usb_endpoint_descriptor_t *ed = pipe->up_endpoint->ue_edesc; 359 uint8_t addr = dev->ud_addr; 360 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 361 usbd_status err; 362 363 DPRINTF("pipe %p addr %d xfertype %d dir %s\n", pipe, addr, xfertype, 364 UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN ? "in" : "out"); 365 366 if (sc->sc_dying) { 367 return USBD_IOERROR; 368 } 369 370 if (addr == dev->ud_bus->ub_rhaddr) { 371 switch (ed->bEndpointAddress) { 372 case USB_CONTROL_ENDPOINT: 373 pipe->up_methods = &roothub_ctrl_methods; 374 break; 375 case UE_DIR_IN | USBROOTHUB_INTR_ENDPT: 376 pipe->up_methods = &dwc2_root_intr_methods; 377 break; 378 default: 379 DPRINTF("bad bEndpointAddress 0x%02x\n", 380 ed->bEndpointAddress); 381 return USBD_INVAL; 382 } 383 DPRINTF("root hub pipe open\n"); 384 return USBD_NORMAL_COMPLETION; 385 } 386 387 switch (xfertype) { 388 case UE_CONTROL: 389 pipe->up_methods = &dwc2_device_ctrl_methods; 390 err = usb_allocmem(&sc->sc_bus, sizeof(usb_device_request_t), 391 0, &dpipe->req_dma); 392 if (err) 393 return err; 394 break; 395 case UE_INTERRUPT: 396 pipe->up_methods = &dwc2_device_intr_methods; 397 break; 398 case UE_ISOCHRONOUS: 399 pipe->up_serialise = false; 400 pipe->up_methods = &dwc2_device_isoc_methods; 401 break; 402 case UE_BULK: 403 pipe->up_serialise = false; 404 pipe->up_methods = &dwc2_device_bulk_methods; 405 break; 406 default: 407 DPRINTF("bad xfer type %d\n", xfertype); 408 return USBD_INVAL; 409 } 410 411 /* QH */ 412 dpipe->priv = NULL; 413 414 return USBD_NORMAL_COMPLETION; 415 } 416 417 Static void 418 dwc2_poll(struct usbd_bus *bus) 419 { 420 struct dwc2_softc *sc = DWC2_BUS2SC(bus); 421 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 422 423 mutex_spin_enter(&hsotg->lock); 424 dwc2_interrupt(sc); 425 mutex_spin_exit(&hsotg->lock); 426 } 427 428 /* 429 * Close a reqular pipe. 430 * Assumes that there are no pending transactions. 431 */ 432 Static void 433 dwc2_close_pipe(struct usbd_pipe *pipe) 434 { 435 #ifdef DIAGNOSTIC 436 struct dwc2_softc *sc = pipe->up_dev->ud_bus->ub_hcpriv; 437 #endif 438 439 KASSERT(mutex_owned(&sc->sc_lock)); 440 } 441 442 /* 443 * Abort a device request. 444 */ 445 Static void 446 dwc2_abort_xfer(struct usbd_xfer *xfer, usbd_status status) 447 { 448 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 449 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 450 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 451 struct dwc2_xfer *d, *tmp; 452 bool wake; 453 int err; 454 455 DPRINTF("xfer=%p\n", xfer); 456 457 KASSERT(mutex_owned(&sc->sc_lock)); 458 KASSERT(!cpu_intr_p() && !cpu_softintr_p()); 459 460 if (sc->sc_dying) { 461 xfer->ux_status = status; 462 callout_stop(&xfer->ux_callout); 463 usb_transfer_complete(xfer); 464 return; 465 } 466 467 /* 468 * If an abort is already in progress then just wait for it to 469 * complete and return. 470 */ 471 if (xfer->ux_hcflags & UXFER_ABORTING) { 472 xfer->ux_status = status; 473 xfer->ux_hcflags |= UXFER_ABORTWAIT; 474 while (xfer->ux_hcflags & UXFER_ABORTING) 475 cv_wait(&xfer->ux_hccv, &sc->sc_lock); 476 return; 477 } 478 479 /* 480 * Step 1: Make the stack ignore it and stop the callout. 481 */ 482 mutex_spin_enter(&hsotg->lock); 483 xfer->ux_hcflags |= UXFER_ABORTING; 484 485 xfer->ux_status = status; /* make software ignore it */ 486 callout_stop(&xfer->ux_callout); 487 488 /* XXXNH suboptimal */ 489 TAILQ_FOREACH_SAFE(d, &sc->sc_complete, xnext, tmp) { 490 if (d == dxfer) { 491 TAILQ_REMOVE(&sc->sc_complete, dxfer, xnext); 492 } 493 } 494 495 err = dwc2_hcd_urb_dequeue(hsotg, dxfer->urb); 496 if (err) { 497 DPRINTF("dwc2_hcd_urb_dequeue failed\n"); 498 } 499 500 mutex_spin_exit(&hsotg->lock); 501 502 /* 503 * Step 2: Execute callback. 504 */ 505 wake = xfer->ux_hcflags & UXFER_ABORTWAIT; 506 xfer->ux_hcflags &= ~(UXFER_ABORTING | UXFER_ABORTWAIT); 507 508 usb_transfer_complete(xfer); 509 if (wake) { 510 cv_broadcast(&xfer->ux_hccv); 511 } 512 } 513 514 Static void 515 dwc2_noop(struct usbd_pipe *pipe) 516 { 517 518 } 519 520 Static void 521 dwc2_device_clear_toggle(struct usbd_pipe *pipe) 522 { 523 524 DPRINTF("toggle %d -> 0", pipe->up_endpoint->ue_toggle); 525 } 526 527 /***********************************************************************/ 528 529 Static int 530 dwc2_roothub_ctrl(struct usbd_bus *bus, usb_device_request_t *req, 531 void *buf, int buflen) 532 { 533 struct dwc2_softc *sc = bus->ub_hcpriv; 534 usbd_status err = USBD_IOERROR; 535 uint16_t len, value, index; 536 int totlen = 0; 537 538 if (sc->sc_dying) 539 return -1; 540 541 DPRINTFN(4, "type=0x%02x request=%02x\n", 542 req->bmRequestType, req->bRequest); 543 544 len = UGETW(req->wLength); 545 value = UGETW(req->wValue); 546 index = UGETW(req->wIndex); 547 548 #define C(x,y) ((x) | ((y) << 8)) 549 switch (C(req->bRequest, req->bmRequestType)) { 550 case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE): 551 DPRINTFN(8, "wValue=0x%04x\n", value); 552 553 if (len == 0) 554 break; 555 switch (value) { 556 #define sd ((usb_string_descriptor_t *)buf) 557 case C(1, UDESC_STRING): 558 /* Vendor */ 559 //totlen = usb_makestrdesc(sd, len, sc->sc_vendor); 560 break; 561 case C(2, UDESC_STRING): 562 /* Product */ 563 totlen = usb_makestrdesc(sd, len, "DWC2 root hub"); 564 break; 565 #undef sd 566 default: 567 /* default from usbroothub */ 568 return buflen; 569 } 570 break; 571 572 case C(UR_GET_CONFIG, UT_READ_DEVICE): 573 case C(UR_GET_INTERFACE, UT_READ_INTERFACE): 574 case C(UR_GET_STATUS, UT_READ_INTERFACE): 575 case C(UR_GET_STATUS, UT_READ_ENDPOINT): 576 case C(UR_SET_ADDRESS, UT_WRITE_DEVICE): 577 case C(UR_SET_CONFIG, UT_WRITE_DEVICE): 578 /* default from usbroothub */ 579 DPRINTFN(4, "returning %d (usbroothub default)", buflen); 580 581 return buflen; 582 583 default: 584 /* Hub requests */ 585 err = dwc2_hcd_hub_control(sc->sc_hsotg, 586 C(req->bRequest, req->bmRequestType), value, index, 587 buf, len); 588 if (err) { 589 return -1; 590 } 591 totlen = len; 592 } 593 594 return totlen; 595 } 596 597 Static usbd_status 598 dwc2_root_intr_transfer(struct usbd_xfer *xfer) 599 { 600 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 601 usbd_status err; 602 603 DPRINTF("\n"); 604 605 /* Insert last in queue. */ 606 mutex_enter(&sc->sc_lock); 607 err = usb_insert_transfer(xfer); 608 mutex_exit(&sc->sc_lock); 609 if (err) 610 return err; 611 612 /* Pipe isn't running, start first */ 613 return dwc2_root_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 614 } 615 616 Static usbd_status 617 dwc2_root_intr_start(struct usbd_xfer *xfer) 618 { 619 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 620 621 DPRINTF("\n"); 622 623 if (sc->sc_dying) 624 return USBD_IOERROR; 625 626 mutex_enter(&sc->sc_lock); 627 KASSERT(sc->sc_intrxfer == NULL); 628 sc->sc_intrxfer = xfer; 629 mutex_exit(&sc->sc_lock); 630 631 return USBD_IN_PROGRESS; 632 } 633 634 /* Abort a root interrupt request. */ 635 Static void 636 dwc2_root_intr_abort(struct usbd_xfer *xfer) 637 { 638 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 639 640 DPRINTF("xfer=%p\n", xfer); 641 642 KASSERT(mutex_owned(&sc->sc_lock)); 643 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 644 645 sc->sc_intrxfer = NULL; 646 647 xfer->ux_status = USBD_CANCELLED; 648 usb_transfer_complete(xfer); 649 } 650 651 Static void 652 dwc2_root_intr_close(struct usbd_pipe *pipe) 653 { 654 struct dwc2_softc *sc = DWC2_PIPE2SC(pipe); 655 656 DPRINTF("\n"); 657 658 KASSERT(mutex_owned(&sc->sc_lock)); 659 660 sc->sc_intrxfer = NULL; 661 } 662 663 Static void 664 dwc2_root_intr_done(struct usbd_xfer *xfer) 665 { 666 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 667 668 KASSERT(sc->sc_intrxfer != NULL); 669 sc->sc_intrxfer = NULL; 670 DPRINTF("\n"); 671 } 672 673 /***********************************************************************/ 674 675 Static usbd_status 676 dwc2_device_ctrl_transfer(struct usbd_xfer *xfer) 677 { 678 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 679 usbd_status err; 680 681 DPRINTF("\n"); 682 683 /* Insert last in queue. */ 684 mutex_enter(&sc->sc_lock); 685 err = usb_insert_transfer(xfer); 686 mutex_exit(&sc->sc_lock); 687 if (err) 688 return err; 689 690 /* Pipe isn't running, start first */ 691 return dwc2_device_ctrl_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 692 } 693 694 Static usbd_status 695 dwc2_device_ctrl_start(struct usbd_xfer *xfer) 696 { 697 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 698 usbd_status err; 699 700 DPRINTF("\n"); 701 702 mutex_enter(&sc->sc_lock); 703 xfer->ux_status = USBD_IN_PROGRESS; 704 err = dwc2_device_start(xfer); 705 mutex_exit(&sc->sc_lock); 706 707 if (err) 708 return err; 709 710 return USBD_IN_PROGRESS; 711 } 712 713 Static void 714 dwc2_device_ctrl_abort(struct usbd_xfer *xfer) 715 { 716 #ifdef DIAGNOSTIC 717 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 718 #endif 719 KASSERT(mutex_owned(&sc->sc_lock)); 720 721 DPRINTF("xfer=%p\n", xfer); 722 dwc2_abort_xfer(xfer, USBD_CANCELLED); 723 } 724 725 Static void 726 dwc2_device_ctrl_close(struct usbd_pipe *pipe) 727 { 728 729 DPRINTF("pipe=%p\n", pipe); 730 dwc2_close_pipe(pipe); 731 } 732 733 Static void 734 dwc2_device_ctrl_done(struct usbd_xfer *xfer) 735 { 736 737 DPRINTF("xfer=%p\n", xfer); 738 } 739 740 /***********************************************************************/ 741 742 Static usbd_status 743 dwc2_device_bulk_transfer(struct usbd_xfer *xfer) 744 { 745 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 746 usbd_status err; 747 748 DPRINTF("xfer=%p\n", xfer); 749 750 /* Insert last in queue. */ 751 mutex_enter(&sc->sc_lock); 752 err = usb_insert_transfer(xfer); 753 754 KASSERT(err == USBD_NORMAL_COMPLETION); 755 756 xfer->ux_status = USBD_IN_PROGRESS; 757 err = dwc2_device_start(xfer); 758 mutex_exit(&sc->sc_lock); 759 760 return err; 761 } 762 763 Static void 764 dwc2_device_bulk_abort(struct usbd_xfer *xfer) 765 { 766 #ifdef DIAGNOSTIC 767 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 768 #endif 769 KASSERT(mutex_owned(&sc->sc_lock)); 770 771 DPRINTF("xfer=%p\n", xfer); 772 dwc2_abort_xfer(xfer, USBD_CANCELLED); 773 } 774 775 Static void 776 dwc2_device_bulk_close(struct usbd_pipe *pipe) 777 { 778 779 DPRINTF("pipe=%p\n", pipe); 780 781 dwc2_close_pipe(pipe); 782 } 783 784 Static void 785 dwc2_device_bulk_done(struct usbd_xfer *xfer) 786 { 787 788 DPRINTF("xfer=%p\n", xfer); 789 } 790 791 /***********************************************************************/ 792 793 Static usbd_status 794 dwc2_device_intr_transfer(struct usbd_xfer *xfer) 795 { 796 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 797 usbd_status err; 798 799 DPRINTF("xfer=%p\n", xfer); 800 801 /* Insert last in queue. */ 802 mutex_enter(&sc->sc_lock); 803 err = usb_insert_transfer(xfer); 804 mutex_exit(&sc->sc_lock); 805 if (err) 806 return err; 807 808 /* Pipe isn't running, start first */ 809 return dwc2_device_intr_start(SIMPLEQ_FIRST(&xfer->ux_pipe->up_queue)); 810 } 811 812 Static usbd_status 813 dwc2_device_intr_start(struct usbd_xfer *xfer) 814 { 815 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer) 816 struct usbd_device *dev = dpipe->pipe.up_dev; 817 struct dwc2_softc *sc = dev->ud_bus->ub_hcpriv; 818 usbd_status err; 819 820 mutex_enter(&sc->sc_lock); 821 xfer->ux_status = USBD_IN_PROGRESS; 822 err = dwc2_device_start(xfer); 823 mutex_exit(&sc->sc_lock); 824 825 if (err) 826 return err; 827 828 return USBD_IN_PROGRESS; 829 } 830 831 /* Abort a device interrupt request. */ 832 Static void 833 dwc2_device_intr_abort(struct usbd_xfer *xfer) 834 { 835 #ifdef DIAGNOSTIC 836 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 837 #endif 838 839 KASSERT(mutex_owned(&sc->sc_lock)); 840 KASSERT(xfer->ux_pipe->up_intrxfer == xfer); 841 842 DPRINTF("xfer=%p\n", xfer); 843 844 dwc2_abort_xfer(xfer, USBD_CANCELLED); 845 } 846 847 Static void 848 dwc2_device_intr_close(struct usbd_pipe *pipe) 849 { 850 851 DPRINTF("pipe=%p\n", pipe); 852 853 dwc2_close_pipe(pipe); 854 } 855 856 Static void 857 dwc2_device_intr_done(struct usbd_xfer *xfer) 858 { 859 860 DPRINTF("\n"); 861 } 862 863 /***********************************************************************/ 864 865 usbd_status 866 dwc2_device_isoc_transfer(struct usbd_xfer *xfer) 867 { 868 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 869 usbd_status err; 870 871 DPRINTF("xfer=%p\n", xfer); 872 873 /* Insert last in queue. */ 874 mutex_enter(&sc->sc_lock); 875 err = usb_insert_transfer(xfer); 876 877 KASSERT(err == USBD_NORMAL_COMPLETION); 878 879 xfer->ux_status = USBD_IN_PROGRESS; 880 err = dwc2_device_start(xfer); 881 mutex_exit(&sc->sc_lock); 882 883 return err; 884 } 885 886 void 887 dwc2_device_isoc_abort(struct usbd_xfer *xfer) 888 { 889 struct dwc2_softc *sc __diagused = DWC2_XFER2SC(xfer); 890 KASSERT(mutex_owned(&sc->sc_lock)); 891 892 DPRINTF("xfer=%p\n", xfer); 893 dwc2_abort_xfer(xfer, USBD_CANCELLED); 894 } 895 896 void 897 dwc2_device_isoc_close(struct usbd_pipe *pipe) 898 { 899 DPRINTF("\n"); 900 901 dwc2_close_pipe(pipe); 902 } 903 904 void 905 dwc2_device_isoc_done(struct usbd_xfer *xfer) 906 { 907 908 DPRINTF("\n"); 909 } 910 911 912 usbd_status 913 dwc2_device_start(struct usbd_xfer *xfer) 914 { 915 struct dwc2_xfer *dxfer = DWC2_XFER2DXFER(xfer); 916 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 917 struct dwc2_softc *sc = DWC2_XFER2SC(xfer); 918 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 919 struct dwc2_hcd_urb *dwc2_urb; 920 921 struct usbd_device *dev = xfer->ux_pipe->up_dev; 922 usb_endpoint_descriptor_t *ed = xfer->ux_pipe->up_endpoint->ue_edesc; 923 uint8_t addr = dev->ud_addr; 924 uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 925 uint8_t epnum = UE_GET_ADDR(ed->bEndpointAddress); 926 uint8_t dir = UE_GET_DIR(ed->bEndpointAddress); 927 uint16_t mps = UE_GET_SIZE(UGETW(ed->wMaxPacketSize)); 928 uint32_t len; 929 930 uint32_t flags = 0; 931 uint32_t off = 0; 932 int retval, err; 933 int alloc_bandwidth = 0; 934 int i; 935 936 DPRINTFN(1, "xfer=%p pipe=%p\n", xfer, xfer->ux_pipe); 937 938 if (xfertype == UE_ISOCHRONOUS || 939 xfertype == UE_INTERRUPT) { 940 mutex_spin_enter(&hsotg->lock); 941 if (!dwc2_hcd_is_bandwidth_allocated(hsotg, xfer)) 942 alloc_bandwidth = 1; 943 mutex_spin_exit(&hsotg->lock); 944 } 945 946 /* 947 * For Control pipe the direction is from the request, all other 948 * transfers have been set correctly at pipe open time. 949 */ 950 if (xfertype == UE_CONTROL) { 951 usb_device_request_t *req = &xfer->ux_request; 952 953 DPRINTFN(3, "xfer=%p type=0x%02x request=0x%02x wValue=0x%04x " 954 "wIndex=0x%04x len=%d addr=%d endpt=%d dir=%s speed=%d " 955 "mps=%d\n", 956 xfer, req->bmRequestType, req->bRequest, UGETW(req->wValue), 957 UGETW(req->wIndex), UGETW(req->wLength), dev->ud_addr, 958 epnum, dir == UT_READ ? "in" :"out", dev->ud_speed, mps); 959 960 /* Copy request packet to our DMA buffer */ 961 memcpy(KERNADDR(&dpipe->req_dma, 0), req, sizeof(*req)); 962 usb_syncmem(&dpipe->req_dma, 0, sizeof(*req), 963 BUS_DMASYNC_PREWRITE); 964 len = UGETW(req->wLength); 965 if ((req->bmRequestType & UT_READ) == UT_READ) { 966 dir = UE_DIR_IN; 967 } else { 968 dir = UE_DIR_OUT; 969 } 970 971 DPRINTFN(3, "req = %p dma = %" PRIxBUSADDR " len %d dir %s\n", 972 KERNADDR(&dpipe->req_dma, 0), DMAADDR(&dpipe->req_dma, 0), 973 len, dir == UE_DIR_IN ? "in" : "out"); 974 } else { 975 DPRINTFN(3, "xfer=%p len=%d flags=%d addr=%d endpt=%d," 976 " mps=%d dir %s\n", xfer, xfer->ux_length, xfer->ux_flags, addr, 977 epnum, mps, dir == UT_READ ? "in" :"out"); 978 979 len = xfer->ux_length; 980 } 981 982 dwc2_urb = dxfer->urb; 983 if (!dwc2_urb) 984 return USBD_NOMEM; 985 986 KASSERT(dwc2_urb->packet_count == xfer->ux_nframes); 987 memset(dwc2_urb, 0, sizeof(*dwc2_urb) + 988 sizeof(dwc2_urb->iso_descs[0]) * dwc2_urb->packet_count); 989 990 dwc2_urb->priv = xfer; 991 dwc2_urb->packet_count = xfer->ux_nframes; 992 993 dwc2_hcd_urb_set_pipeinfo(hsotg, dwc2_urb, addr, epnum, xfertype, dir, 994 mps); 995 996 if (xfertype == UE_CONTROL) { 997 dwc2_urb->setup_usbdma = &dpipe->req_dma; 998 dwc2_urb->setup_packet = KERNADDR(&dpipe->req_dma, 0); 999 dwc2_urb->setup_dma = DMAADDR(&dpipe->req_dma, 0); 1000 } else { 1001 /* XXXNH - % mps required? */ 1002 if ((xfer->ux_flags & USBD_FORCE_SHORT_XFER) && (len % mps) == 0) 1003 flags |= URB_SEND_ZERO_PACKET; 1004 } 1005 flags |= URB_GIVEBACK_ASAP; 1006 1007 /* 1008 * control transfers with no data phase don't touch usbdma, but 1009 * everything else does. 1010 */ 1011 if (!(xfertype == UE_CONTROL && len == 0)) { 1012 dwc2_urb->usbdma = &xfer->ux_dmabuf; 1013 dwc2_urb->buf = KERNADDR(dwc2_urb->usbdma, 0); 1014 dwc2_urb->dma = DMAADDR(dwc2_urb->usbdma, 0); 1015 } 1016 dwc2_urb->length = len; 1017 dwc2_urb->flags = flags; 1018 dwc2_urb->status = -EINPROGRESS; 1019 1020 if (xfertype == UE_INTERRUPT || 1021 xfertype == UE_ISOCHRONOUS) { 1022 uint16_t ival; 1023 1024 if (xfertype == UE_INTERRUPT && 1025 dpipe->pipe.up_interval != USBD_DEFAULT_INTERVAL) { 1026 ival = dpipe->pipe.up_interval; 1027 } else { 1028 ival = ed->bInterval; 1029 } 1030 1031 if (ival < 1) { 1032 retval = -ENODEV; 1033 goto fail; 1034 } 1035 if (dev->ud_speed == USB_SPEED_HIGH || 1036 (dev->ud_speed == USB_SPEED_FULL && xfertype == UE_ISOCHRONOUS)) { 1037 if (ival > 16) { 1038 /* 1039 * illegal with HS/FS, but there were 1040 * documentation bugs in the spec 1041 */ 1042 ival = 256; 1043 } else { 1044 ival = (1 << (ival - 1)); 1045 } 1046 } else { 1047 if (xfertype == UE_INTERRUPT && ival < 10) 1048 ival = 10; 1049 } 1050 dwc2_urb->interval = ival; 1051 } 1052 1053 /* XXXNH bring down from callers?? */ 1054 // mutex_enter(&sc->sc_lock); 1055 1056 xfer->ux_actlen = 0; 1057 1058 KASSERT(xfertype != UE_ISOCHRONOUS || 1059 xfer->ux_nframes <= dwc2_urb->packet_count); 1060 KASSERTMSG(xfer->ux_nframes == 0 || xfertype == UE_ISOCHRONOUS, 1061 "nframes %d xfertype %d\n", xfer->ux_nframes, xfertype); 1062 1063 for (off = i = 0; i < xfer->ux_nframes; ++i) { 1064 DPRINTFN(3, "xfer=%p frame=%d offset=%d length=%d\n", xfer, i, 1065 off, xfer->ux_frlengths[i]); 1066 1067 dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i, off, 1068 xfer->ux_frlengths[i]); 1069 off += xfer->ux_frlengths[i]; 1070 } 1071 1072 struct dwc2_qh *qh = dpipe->priv; 1073 struct dwc2_qtd *qtd; 1074 bool qh_allocated = false; 1075 1076 /* Create QH for the endpoint if it doesn't exist */ 1077 if (!qh) { 1078 qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, GFP_ATOMIC); 1079 if (!qh) { 1080 retval = -ENOMEM; 1081 goto fail; 1082 } 1083 dpipe->priv = qh; 1084 qh_allocated = true; 1085 } 1086 1087 qtd = pool_cache_get(sc->sc_qtdpool, PR_NOWAIT); 1088 if (!qtd) { 1089 retval = -ENOMEM; 1090 goto fail1; 1091 } 1092 memset(qtd, 0, sizeof(*qtd)); 1093 1094 /* might need to check cpu_intr_p */ 1095 mutex_spin_enter(&hsotg->lock); 1096 1097 if (xfer->ux_timeout && !sc->sc_bus.ub_usepolling) { 1098 callout_reset(&xfer->ux_callout, mstohz(xfer->ux_timeout), 1099 dwc2_timeout, xfer); 1100 } 1101 retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, qh, qtd); 1102 if (retval) 1103 goto fail2; 1104 1105 if (alloc_bandwidth) { 1106 dwc2_allocate_bus_bandwidth(hsotg, 1107 dwc2_hcd_get_ep_bandwidth(hsotg, dpipe), 1108 xfer); 1109 } 1110 1111 mutex_spin_exit(&hsotg->lock); 1112 // mutex_exit(&sc->sc_lock); 1113 1114 return USBD_IN_PROGRESS; 1115 1116 fail2: 1117 callout_stop(&xfer->ux_callout); 1118 dwc2_urb->priv = NULL; 1119 mutex_spin_exit(&hsotg->lock); 1120 pool_cache_put(sc->sc_qtdpool, qtd); 1121 1122 fail1: 1123 if (qh_allocated) { 1124 dpipe->priv = NULL; 1125 dwc2_hcd_qh_free(hsotg, qh); 1126 } 1127 fail: 1128 1129 switch (retval) { 1130 case -EINVAL: 1131 case -ENODEV: 1132 err = USBD_INVAL; 1133 break; 1134 case -ENOMEM: 1135 err = USBD_NOMEM; 1136 break; 1137 default: 1138 err = USBD_IOERROR; 1139 } 1140 1141 return err; 1142 1143 } 1144 1145 int dwc2_intr(void *p) 1146 { 1147 struct dwc2_softc *sc = p; 1148 struct dwc2_hsotg *hsotg; 1149 int ret = 0; 1150 1151 if (sc == NULL) 1152 return 0; 1153 1154 hsotg = sc->sc_hsotg; 1155 mutex_spin_enter(&hsotg->lock); 1156 1157 if (sc->sc_dying || !device_has_power(sc->sc_dev)) 1158 goto done; 1159 1160 if (sc->sc_bus.ub_usepolling) { 1161 uint32_t intrs; 1162 1163 intrs = dwc2_read_core_intr(hsotg); 1164 DWC2_WRITE_4(hsotg, GINTSTS, intrs); 1165 } else { 1166 ret = dwc2_interrupt(sc); 1167 } 1168 1169 done: 1170 mutex_spin_exit(&hsotg->lock); 1171 1172 return ret; 1173 } 1174 1175 int 1176 dwc2_interrupt(struct dwc2_softc *sc) 1177 { 1178 int ret = 0; 1179 1180 if (sc->sc_hcdenabled) { 1181 ret |= dwc2_handle_hcd_intr(sc->sc_hsotg); 1182 } 1183 1184 ret |= dwc2_handle_common_intr(sc->sc_hsotg); 1185 1186 return ret; 1187 } 1188 1189 /***********************************************************************/ 1190 1191 int 1192 dwc2_detach(struct dwc2_softc *sc, int flags) 1193 { 1194 int rv = 0; 1195 1196 if (sc->sc_child != NULL) 1197 rv = config_detach(sc->sc_child, flags); 1198 1199 return rv; 1200 } 1201 1202 bool 1203 dwc2_shutdown(device_t self, int flags) 1204 { 1205 struct dwc2_softc *sc = device_private(self); 1206 1207 sc = sc; 1208 1209 return true; 1210 } 1211 1212 void 1213 dwc2_childdet(device_t self, device_t child) 1214 { 1215 struct dwc2_softc *sc = device_private(self); 1216 1217 sc = sc; 1218 } 1219 1220 int 1221 dwc2_activate(device_t self, enum devact act) 1222 { 1223 struct dwc2_softc *sc = device_private(self); 1224 1225 sc = sc; 1226 1227 return 0; 1228 } 1229 1230 bool 1231 dwc2_resume(device_t dv, const pmf_qual_t *qual) 1232 { 1233 struct dwc2_softc *sc = device_private(dv); 1234 1235 sc = sc; 1236 1237 return true; 1238 } 1239 1240 bool 1241 dwc2_suspend(device_t dv, const pmf_qual_t *qual) 1242 { 1243 struct dwc2_softc *sc = device_private(dv); 1244 1245 sc = sc; 1246 1247 return true; 1248 } 1249 1250 /***********************************************************************/ 1251 int 1252 dwc2_init(struct dwc2_softc *sc) 1253 { 1254 int err = 0; 1255 1256 sc->sc_bus.ub_hcpriv = sc; 1257 sc->sc_bus.ub_revision = USBREV_2_0; 1258 sc->sc_bus.ub_methods = &dwc2_bus_methods; 1259 sc->sc_bus.ub_pipesize = sizeof(struct dwc2_pipe); 1260 sc->sc_bus.ub_usedma = true; 1261 sc->sc_hcdenabled = false; 1262 1263 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SOFTUSB); 1264 1265 TAILQ_INIT(&sc->sc_complete); 1266 1267 sc->sc_rhc_si = softint_establish(SOFTINT_USB | SOFTINT_MPSAFE, 1268 dwc2_rhc, sc); 1269 1270 sc->sc_xferpool = pool_cache_init(sizeof(struct dwc2_xfer), 0, 0, 0, 1271 "dwc2xfer", NULL, IPL_USB, NULL, NULL, NULL); 1272 sc->sc_qhpool = pool_cache_init(sizeof(struct dwc2_qh), 0, 0, 0, 1273 "dwc2qh", NULL, IPL_USB, NULL, NULL, NULL); 1274 sc->sc_qtdpool = pool_cache_init(sizeof(struct dwc2_qtd), 0, 0, 0, 1275 "dwc2qtd", NULL, IPL_USB, NULL, NULL, NULL); 1276 1277 sc->sc_hsotg = kmem_zalloc(sizeof(struct dwc2_hsotg), KM_SLEEP); 1278 if (sc->sc_hsotg == NULL) { 1279 err = ENOMEM; 1280 goto fail1; 1281 } 1282 1283 sc->sc_hsotg->hsotg_sc = sc; 1284 sc->sc_hsotg->dev = sc->sc_dev; 1285 sc->sc_hcdenabled = true; 1286 1287 struct dwc2_hsotg *hsotg = sc->sc_hsotg; 1288 struct dwc2_core_params defparams; 1289 int retval; 1290 1291 if (sc->sc_params == NULL) { 1292 /* Default all params to autodetect */ 1293 dwc2_set_all_params(&defparams, -1); 1294 sc->sc_params = &defparams; 1295 1296 /* 1297 * Disable descriptor dma mode by default as the HW can support 1298 * it, but does not support it for SPLIT transactions. 1299 */ 1300 defparams.dma_desc_enable = 0; 1301 } 1302 hsotg->dr_mode = USB_DR_MODE_HOST; 1303 1304 /* Detect config values from hardware */ 1305 retval = dwc2_get_hwparams(hsotg); 1306 if (retval) { 1307 goto fail2; 1308 } 1309 1310 hsotg->core_params = kmem_zalloc(sizeof(*hsotg->core_params), KM_SLEEP); 1311 if (!hsotg->core_params) { 1312 retval = -ENOMEM; 1313 goto fail2; 1314 } 1315 1316 dwc2_set_all_params(hsotg->core_params, -1); 1317 1318 /* Validate parameter values */ 1319 dwc2_set_parameters(hsotg, sc->sc_params); 1320 1321 #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 1322 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1323 if (hsotg->dr_mode != USB_DR_MODE_HOST) { 1324 retval = dwc2_gadget_init(hsotg); 1325 if (retval) 1326 goto fail2; 1327 hsotg->gadget_enabled = 1; 1328 } 1329 #endif 1330 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \ 1331 IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1332 if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) { 1333 retval = dwc2_hcd_init(hsotg); 1334 if (retval) { 1335 if (hsotg->gadget_enabled) 1336 dwc2_hsotg_remove(hsotg); 1337 goto fail2; 1338 } 1339 hsotg->hcd_enabled = 1; 1340 } 1341 #endif 1342 1343 return 0; 1344 1345 fail2: 1346 err = -retval; 1347 kmem_free(sc->sc_hsotg, sizeof(struct dwc2_hsotg)); 1348 fail1: 1349 softint_disestablish(sc->sc_rhc_si); 1350 1351 return err; 1352 } 1353 1354 #if 0 1355 /* 1356 * curmode is a mode indication bit 0 = device, 1 = host 1357 */ 1358 static const char * const intnames[32] = { 1359 "curmode", "modemis", "otgint", "sof", 1360 "rxflvl", "nptxfemp", "ginnakeff", "goutnakeff", 1361 "ulpickint", "i2cint", "erlysusp", "usbsusp", 1362 "usbrst", "enumdone", "isooutdrop", "eopf", 1363 "restore_done", "epmis", "iepint", "oepint", 1364 "incompisoin", "incomplp", "fetsusp", "resetdet", 1365 "prtint", "hchint", "ptxfemp", "lpm", 1366 "conidstschng", "disconnint", "sessreqint", "wkupint" 1367 }; 1368 1369 1370 /***********************************************************************/ 1371 1372 #endif 1373 1374 void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context, int *hub_addr, 1375 int *hub_port) 1376 { 1377 struct usbd_xfer *xfer = context; 1378 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1379 struct usbd_device *dev = dpipe->pipe.up_dev; 1380 1381 *hub_addr = dev->ud_myhsport->up_parent->ud_addr; 1382 *hub_port = dev->ud_myhsport->up_portno; 1383 } 1384 1385 int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context) 1386 { 1387 struct usbd_xfer *xfer = context; 1388 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1389 struct usbd_device *dev = dpipe->pipe.up_dev; 1390 1391 return dev->ud_speed; 1392 } 1393 1394 /* 1395 * Sets the final status of an URB and returns it to the upper layer. Any 1396 * required cleanup of the URB is performed. 1397 * 1398 * Must be called with interrupt disabled and spinlock held 1399 */ 1400 void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 1401 int status) 1402 { 1403 struct usbd_xfer *xfer; 1404 struct dwc2_xfer *dxfer; 1405 struct dwc2_softc *sc; 1406 usb_endpoint_descriptor_t *ed; 1407 uint8_t xfertype; 1408 1409 if (!qtd) { 1410 dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__); 1411 return; 1412 } 1413 1414 if (!qtd->urb) { 1415 dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__); 1416 return; 1417 } 1418 1419 xfer = qtd->urb->priv; 1420 if (!xfer) { 1421 dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__); 1422 return; 1423 } 1424 1425 dxfer = DWC2_XFER2DXFER(xfer); 1426 sc = DWC2_XFER2SC(xfer); 1427 ed = xfer->ux_pipe->up_endpoint->ue_edesc; 1428 xfertype = UE_GET_XFERTYPE(ed->bmAttributes); 1429 1430 struct dwc2_hcd_urb *urb = qtd->urb; 1431 xfer->ux_actlen = dwc2_hcd_urb_get_actual_length(urb); 1432 1433 DPRINTFN(3, "xfer=%p actlen=%d\n", xfer, xfer->ux_actlen); 1434 1435 if (xfertype == UE_ISOCHRONOUS) { 1436 int i; 1437 1438 xfer->ux_actlen = 0; 1439 for (i = 0; i < xfer->ux_nframes; ++i) { 1440 xfer->ux_frlengths[i] = 1441 dwc2_hcd_urb_get_iso_desc_actual_length( 1442 urb, i); 1443 xfer->ux_actlen += xfer->ux_frlengths[i]; 1444 } 1445 } 1446 1447 if (xfertype == UE_ISOCHRONOUS && dbg_perio()) { 1448 int i; 1449 1450 for (i = 0; i < xfer->ux_nframes; i++) 1451 dev_vdbg(hsotg->dev, " ISO Desc %d status %d\n", 1452 i, urb->iso_descs[i].status); 1453 } 1454 1455 if (!status) { 1456 if (!(xfer->ux_flags & USBD_SHORT_XFER_OK) && 1457 xfer->ux_actlen < xfer->ux_length) 1458 status = -EIO; 1459 } 1460 1461 switch (status) { 1462 case 0: 1463 xfer->ux_status = USBD_NORMAL_COMPLETION; 1464 break; 1465 case -EPIPE: 1466 xfer->ux_status = USBD_STALLED; 1467 break; 1468 case -ETIMEDOUT: 1469 xfer->ux_status = USBD_TIMEOUT; 1470 break; 1471 case -EPROTO: 1472 xfer->ux_status = USBD_INVAL; 1473 break; 1474 case -EIO: 1475 xfer->ux_status = USBD_IOERROR; 1476 break; 1477 case -EOVERFLOW: 1478 xfer->ux_status = USBD_IOERROR; 1479 break; 1480 default: 1481 xfer->ux_status = USBD_IOERROR; 1482 printf("%s: unknown error status %d\n", __func__, status); 1483 } 1484 1485 if (xfer->ux_status == USBD_NORMAL_COMPLETION) { 1486 /* 1487 * control transfers with no data phase don't touch dmabuf, but 1488 * everything else does. 1489 */ 1490 if (!(xfertype == UE_CONTROL && 1491 UGETW(xfer->ux_request.wLength) == 0)) { 1492 int rd = usbd_xfer_isread(xfer); 1493 1494 usb_syncmem(&xfer->ux_dmabuf, 0, xfer->ux_actlen, 1495 rd ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1496 } 1497 } 1498 1499 if (xfertype == UE_ISOCHRONOUS || 1500 xfertype == UE_INTERRUPT) { 1501 struct dwc2_pipe *dpipe = DWC2_XFER2DPIPE(xfer); 1502 1503 dwc2_free_bus_bandwidth(hsotg, 1504 dwc2_hcd_get_ep_bandwidth(hsotg, dpipe), 1505 xfer); 1506 } 1507 1508 qtd->urb = NULL; 1509 callout_stop(&xfer->ux_callout); 1510 1511 KASSERT(mutex_owned(&hsotg->lock)); 1512 1513 TAILQ_INSERT_TAIL(&sc->sc_complete, dxfer, xnext); 1514 1515 mutex_spin_exit(&hsotg->lock); 1516 usb_schedsoftintr(&sc->sc_bus); 1517 mutex_spin_enter(&hsotg->lock); 1518 } 1519 1520 1521 int 1522 _dwc2_hcd_start(struct dwc2_hsotg *hsotg) 1523 { 1524 dev_dbg(hsotg->dev, "DWC OTG HCD START\n"); 1525 1526 mutex_spin_enter(&hsotg->lock); 1527 1528 hsotg->lx_state = DWC2_L0; 1529 1530 if (dwc2_is_device_mode(hsotg)) { 1531 mutex_spin_exit(&hsotg->lock); 1532 return 0; /* why 0 ?? */ 1533 } 1534 1535 dwc2_hcd_reinit(hsotg); 1536 1537 mutex_spin_exit(&hsotg->lock); 1538 return 0; 1539 } 1540 1541 int dwc2_host_is_b_hnp_enabled(struct dwc2_hsotg *hsotg) 1542 { 1543 1544 return false; 1545 } 1546