1 /* $NetBSD: usbdi.c,v 1.242 2022/04/06 22:01:45 mlelstv Exp $ */ 2 3 /* 4 * Copyright (c) 1998, 2012, 2015 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Lennart Augustsson (lennart@augustsson.net) at 9 * Carlstedt Research & Technology, Matthew R. Green (mrg@eterna.com.au), 10 * and Nick Hudson. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: usbdi.c,v 1.242 2022/04/06 22:01:45 mlelstv Exp $"); 36 37 #ifdef _KERNEL_OPT 38 #include "opt_usb.h" 39 #include "opt_compat_netbsd.h" 40 #include "usb_dma.h" 41 #endif 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/device.h> 47 #include <sys/kmem.h> 48 #include <sys/proc.h> 49 #include <sys/bus.h> 50 #include <sys/cpu.h> 51 52 #include <dev/usb/usb.h> 53 #include <dev/usb/usbdi.h> 54 #include <dev/usb/usbdi_util.h> 55 #include <dev/usb/usbdivar.h> 56 #include <dev/usb/usb_mem.h> 57 #include <dev/usb/usb_quirks.h> 58 #include <dev/usb/usb_sdt.h> 59 #include <dev/usb/usbhist.h> 60 61 /* UTF-8 encoding stuff */ 62 #include <fs/unicode.h> 63 64 SDT_PROBE_DEFINE5(usb, device, pipe, open, 65 "struct usbd_interface *"/*iface*/, 66 "uint8_t"/*address*/, 67 "uint8_t"/*flags*/, 68 "int"/*ival*/, 69 "struct usbd_pipe *"/*pipe*/); 70 71 SDT_PROBE_DEFINE7(usb, device, pipe, open__intr, 72 "struct usbd_interface *"/*iface*/, 73 "uint8_t"/*address*/, 74 "uint8_t"/*flags*/, 75 "int"/*ival*/, 76 "usbd_callback"/*cb*/, 77 "void *"/*cookie*/, 78 "struct usbd_pipe *"/*pipe*/); 79 80 SDT_PROBE_DEFINE2(usb, device, pipe, transfer__start, 81 "struct usbd_pipe *"/*pipe*/, 82 "struct usbd_xfer *"/*xfer*/); 83 SDT_PROBE_DEFINE3(usb, device, pipe, transfer__done, 84 "struct usbd_pipe *"/*pipe*/, 85 "struct usbd_xfer *"/*xfer*/, 86 "usbd_status"/*err*/); 87 SDT_PROBE_DEFINE2(usb, device, pipe, start, 88 "struct usbd_pipe *"/*pipe*/, 89 "struct usbd_xfer *"/*xfer*/); 90 91 SDT_PROBE_DEFINE1(usb, device, pipe, close, "struct usbd_pipe *"/*pipe*/); 92 SDT_PROBE_DEFINE1(usb, device, pipe, abort__start, 93 "struct usbd_pipe *"/*pipe*/); 94 SDT_PROBE_DEFINE1(usb, device, pipe, abort__done, 95 "struct usbd_pipe *"/*pipe*/); 96 SDT_PROBE_DEFINE1(usb, device, pipe, clear__endpoint__stall, 97 "struct usbd_pipe *"/*pipe*/); 98 SDT_PROBE_DEFINE1(usb, device, pipe, clear__endpoint__toggle, 99 "struct usbd_pipe *"/*pipe*/); 100 101 SDT_PROBE_DEFINE5(usb, device, xfer, create, 102 "struct usbd_xfer *"/*xfer*/, 103 "struct usbd_pipe *"/*pipe*/, 104 "size_t"/*len*/, 105 "unsigned int"/*flags*/, 106 "unsigned int"/*nframes*/); 107 SDT_PROBE_DEFINE1(usb, device, xfer, start, "struct usbd_xfer *"/*xfer*/); 108 SDT_PROBE_DEFINE1(usb, device, xfer, preabort, "struct usbd_xfer *"/*xfer*/); 109 SDT_PROBE_DEFINE1(usb, device, xfer, abort, "struct usbd_xfer *"/*xfer*/); 110 SDT_PROBE_DEFINE1(usb, device, xfer, timeout, "struct usbd_xfer *"/*xfer*/); 111 SDT_PROBE_DEFINE2(usb, device, xfer, done, 112 "struct usbd_xfer *"/*xfer*/, 113 "usbd_status"/*status*/); 114 SDT_PROBE_DEFINE1(usb, device, xfer, destroy, "struct usbd_xfer *"/*xfer*/); 115 116 SDT_PROBE_DEFINE5(usb, device, request, start, 117 "struct usbd_device *"/*dev*/, 118 "usb_device_request_t *"/*req*/, 119 "size_t"/*len*/, 120 "int"/*flags*/, 121 "uint32_t"/*timeout*/); 122 123 SDT_PROBE_DEFINE7(usb, device, request, done, 124 "struct usbd_device *"/*dev*/, 125 "usb_device_request_t *"/*req*/, 126 "size_t"/*actlen*/, 127 "int"/*flags*/, 128 "uint32_t"/*timeout*/, 129 "void *"/*data*/, 130 "usbd_status"/*status*/); 131 132 Static void usbd_ar_pipe(struct usbd_pipe *); 133 Static void usbd_start_next(struct usbd_pipe *); 134 Static usbd_status usbd_open_pipe_ival 135 (struct usbd_interface *, uint8_t, uint8_t, struct usbd_pipe **, int); 136 static void *usbd_alloc_buffer(struct usbd_xfer *, uint32_t); 137 static void usbd_free_buffer(struct usbd_xfer *); 138 static struct usbd_xfer *usbd_alloc_xfer(struct usbd_device *, unsigned int); 139 static void usbd_free_xfer(struct usbd_xfer *); 140 static void usbd_request_async_cb(struct usbd_xfer *, void *, usbd_status); 141 static void usbd_xfer_timeout(void *); 142 static void usbd_xfer_timeout_task(void *); 143 static bool usbd_xfer_probe_timeout(struct usbd_xfer *); 144 static void usbd_xfer_cancel_timeout_async(struct usbd_xfer *); 145 146 #if defined(USB_DEBUG) 147 void 148 usbd_dump_iface(struct usbd_interface *iface) 149 { 150 USBHIST_FUNC(); 151 USBHIST_CALLARGS(usbdebug, "iface %#jx", (uintptr_t)iface, 0, 0, 0); 152 153 if (iface == NULL) 154 return; 155 USBHIST_LOG(usbdebug, " device = %#jx idesc = %#jx index = %jd", 156 (uintptr_t)iface->ui_dev, (uintptr_t)iface->ui_idesc, 157 iface->ui_index, 0); 158 USBHIST_LOG(usbdebug, " altindex=%jd", 159 iface->ui_altindex, 0, 0, 0); 160 } 161 162 void 163 usbd_dump_device(struct usbd_device *dev) 164 { 165 USBHIST_FUNC(); 166 USBHIST_CALLARGS(usbdebug, "dev = %#jx", (uintptr_t)dev, 0, 0, 0); 167 168 if (dev == NULL) 169 return; 170 USBHIST_LOG(usbdebug, " bus = %#jx default_pipe = %#jx", 171 (uintptr_t)dev->ud_bus, (uintptr_t)dev->ud_pipe0, 0, 0); 172 USBHIST_LOG(usbdebug, " address = %jd config = %jd depth = %jd ", 173 dev->ud_addr, dev->ud_config, dev->ud_depth, 0); 174 USBHIST_LOG(usbdebug, " speed = %jd self_powered = %jd " 175 "power = %jd langid = %jd", 176 dev->ud_speed, dev->ud_selfpowered, dev->ud_power, dev->ud_langid); 177 } 178 179 void 180 usbd_dump_endpoint(struct usbd_endpoint *endp) 181 { 182 USBHIST_FUNC(); 183 USBHIST_CALLARGS(usbdebug, "endp = %#jx", (uintptr_t)endp, 0, 0, 0); 184 185 if (endp == NULL) 186 return; 187 USBHIST_LOG(usbdebug, " edesc = %#jx refcnt = %jd", 188 (uintptr_t)endp->ue_edesc, endp->ue_refcnt, 0, 0); 189 if (endp->ue_edesc) 190 USBHIST_LOG(usbdebug, " bEndpointAddress=0x%02jx", 191 endp->ue_edesc->bEndpointAddress, 0, 0, 0); 192 } 193 194 void 195 usbd_dump_queue(struct usbd_pipe *pipe) 196 { 197 struct usbd_xfer *xfer; 198 199 USBHIST_FUNC(); 200 USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0); 201 202 SIMPLEQ_FOREACH(xfer, &pipe->up_queue, ux_next) { 203 USBHIST_LOG(usbdebug, " xfer = %#jx", (uintptr_t)xfer, 204 0, 0, 0); 205 } 206 } 207 208 void 209 usbd_dump_pipe(struct usbd_pipe *pipe) 210 { 211 USBHIST_FUNC(); 212 USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0); 213 214 if (pipe == NULL) 215 return; 216 usbd_dump_iface(pipe->up_iface); 217 usbd_dump_device(pipe->up_dev); 218 usbd_dump_endpoint(pipe->up_endpoint); 219 USBHIST_LOG(usbdebug, "(usbd_dump_pipe)", 0, 0, 0, 0); 220 USBHIST_LOG(usbdebug, " running = %jd aborting = %jd", 221 pipe->up_running, pipe->up_aborting, 0, 0); 222 USBHIST_LOG(usbdebug, " intrxfer = %#jx, repeat = %jd, " 223 "interval = %jd", (uintptr_t)pipe->up_intrxfer, pipe->up_repeat, 224 pipe->up_interval, 0); 225 } 226 #endif 227 228 usbd_status 229 usbd_open_pipe(struct usbd_interface *iface, uint8_t address, 230 uint8_t flags, struct usbd_pipe **pipe) 231 { 232 return (usbd_open_pipe_ival(iface, address, flags, pipe, 233 USBD_DEFAULT_INTERVAL)); 234 } 235 236 usbd_status 237 usbd_open_pipe_ival(struct usbd_interface *iface, uint8_t address, 238 uint8_t flags, struct usbd_pipe **pipe, int ival) 239 { 240 struct usbd_pipe *p = NULL; 241 struct usbd_endpoint *ep = NULL /* XXXGCC */; 242 bool piperef = false; 243 usbd_status err; 244 int i; 245 246 USBHIST_FUNC(); 247 USBHIST_CALLARGS(usbdebug, "iface = %#jx address = %#jx flags = %#jx", 248 (uintptr_t)iface, address, flags, 0); 249 250 /* 251 * Block usbd_set_interface so we have a snapshot of the 252 * interface endpoints. They will remain stable until we drop 253 * the reference in usbd_close_pipe (or on failure here). 254 */ 255 err = usbd_iface_piperef(iface); 256 if (err) 257 goto out; 258 piperef = true; 259 260 /* Find the endpoint at this address. */ 261 for (i = 0; i < iface->ui_idesc->bNumEndpoints; i++) { 262 ep = &iface->ui_endpoints[i]; 263 if (ep->ue_edesc == NULL) { 264 err = USBD_IOERROR; 265 goto out; 266 } 267 if (ep->ue_edesc->bEndpointAddress == address) 268 break; 269 } 270 if (i == iface->ui_idesc->bNumEndpoints) { 271 err = USBD_BAD_ADDRESS; 272 goto out; 273 } 274 275 /* Set up the pipe with this endpoint. */ 276 err = usbd_setup_pipe_flags(iface->ui_dev, iface, ep, ival, &p, flags); 277 if (err) 278 goto out; 279 280 /* Success! */ 281 *pipe = p; 282 p = NULL; /* handed off to caller */ 283 piperef = false; /* handed off to pipe */ 284 SDT_PROBE5(usb, device, pipe, open, 285 iface, address, flags, ival, p); 286 err = USBD_NORMAL_COMPLETION; 287 288 out: if (p) 289 usbd_close_pipe(p); 290 if (piperef) 291 usbd_iface_pipeunref(iface); 292 return err; 293 } 294 295 usbd_status 296 usbd_open_pipe_intr(struct usbd_interface *iface, uint8_t address, 297 uint8_t flags, struct usbd_pipe **pipe, 298 void *priv, void *buffer, uint32_t len, 299 usbd_callback cb, int ival) 300 { 301 usbd_status err; 302 struct usbd_xfer *xfer; 303 struct usbd_pipe *ipipe; 304 305 USBHIST_FUNC(); 306 USBHIST_CALLARGS(usbdebug, "address = %#jx flags = %#jx len = %jd", 307 address, flags, len, 0); 308 309 err = usbd_open_pipe_ival(iface, address, 310 USBD_EXCLUSIVE_USE | (flags & USBD_MPSAFE), 311 &ipipe, ival); 312 if (err) 313 return err; 314 err = usbd_create_xfer(ipipe, len, flags, 0, &xfer); 315 if (err) 316 goto bad1; 317 318 usbd_setup_xfer(xfer, priv, buffer, len, flags, USBD_NO_TIMEOUT, cb); 319 ipipe->up_intrxfer = xfer; 320 ipipe->up_repeat = 1; 321 err = usbd_transfer(xfer); 322 *pipe = ipipe; 323 if (err != USBD_IN_PROGRESS) 324 goto bad3; 325 SDT_PROBE7(usb, device, pipe, open__intr, 326 iface, address, flags, ival, cb, priv, ipipe); 327 return USBD_NORMAL_COMPLETION; 328 329 bad3: 330 ipipe->up_intrxfer = NULL; 331 ipipe->up_repeat = 0; 332 333 usbd_destroy_xfer(xfer); 334 bad1: 335 usbd_close_pipe(ipipe); 336 return err; 337 } 338 339 void 340 usbd_close_pipe(struct usbd_pipe *pipe) 341 { 342 USBHIST_FUNC(); USBHIST_CALLED(usbdebug); 343 344 KASSERT(pipe != NULL); 345 346 usbd_lock_pipe(pipe); 347 SDT_PROBE1(usb, device, pipe, close, pipe); 348 if (!SIMPLEQ_EMPTY(&pipe->up_queue)) { 349 printf("WARNING: pipe closed with active xfers on addr %d\n", 350 pipe->up_dev->ud_addr); 351 usbd_ar_pipe(pipe); 352 } 353 KASSERT(SIMPLEQ_EMPTY(&pipe->up_queue)); 354 pipe->up_methods->upm_close(pipe); 355 usbd_unlock_pipe(pipe); 356 357 cv_destroy(&pipe->up_callingcv); 358 if (pipe->up_intrxfer) 359 usbd_destroy_xfer(pipe->up_intrxfer); 360 usb_rem_task_wait(pipe->up_dev, &pipe->up_async_task, USB_TASKQ_DRIVER, 361 NULL); 362 usbd_endpoint_release(pipe->up_dev, pipe->up_endpoint); 363 if (pipe->up_iface) 364 usbd_iface_pipeunref(pipe->up_iface); 365 kmem_free(pipe, pipe->up_dev->ud_bus->ub_pipesize); 366 } 367 368 usbd_status 369 usbd_transfer(struct usbd_xfer *xfer) 370 { 371 struct usbd_pipe *pipe = xfer->ux_pipe; 372 usbd_status err; 373 unsigned int size, flags; 374 375 USBHIST_FUNC(); USBHIST_CALLARGS(usbdebug, 376 "xfer = %#jx, flags = %#jx, pipe = %#jx, running = %jd", 377 (uintptr_t)xfer, xfer->ux_flags, (uintptr_t)pipe, pipe->up_running); 378 KASSERT(xfer->ux_status == USBD_NOT_STARTED); 379 SDT_PROBE1(usb, device, xfer, start, xfer); 380 381 #ifdef USB_DEBUG 382 if (usbdebug > 5) 383 usbd_dump_queue(pipe); 384 #endif 385 xfer->ux_done = 0; 386 387 KASSERT(xfer->ux_length == 0 || xfer->ux_buf != NULL); 388 389 size = xfer->ux_length; 390 flags = xfer->ux_flags; 391 392 if (size != 0) { 393 /* 394 * Use the xfer buffer if none specified in transfer setup. 395 * isoc transfers always use the xfer buffer, i.e. 396 * ux_buffer is always NULL for isoc. 397 */ 398 if (xfer->ux_buffer == NULL) { 399 xfer->ux_buffer = xfer->ux_buf; 400 } 401 402 /* 403 * If not using the xfer buffer copy data to the 404 * xfer buffer for OUT transfers of >0 length 405 */ 406 if (xfer->ux_buffer != xfer->ux_buf) { 407 KASSERT(xfer->ux_buf); 408 if (!usbd_xfer_isread(xfer)) { 409 memcpy(xfer->ux_buf, xfer->ux_buffer, size); 410 } 411 } 412 } 413 414 usbd_lock_pipe(pipe); 415 if (pipe->up_aborting) { 416 /* 417 * XXX For synchronous transfers this is fine. What to 418 * do for asynchronous transfers? The callback is 419 * never run, not even with status USBD_CANCELLED. 420 */ 421 usbd_unlock_pipe(pipe); 422 USBHIST_LOG(usbdebug, "<- done xfer %#jx, aborting", 423 (uintptr_t)xfer, 0, 0, 0); 424 SDT_PROBE2(usb, device, xfer, done, xfer, USBD_CANCELLED); 425 return USBD_CANCELLED; 426 } 427 428 /* xfer is not valid after the transfer method unless synchronous */ 429 SDT_PROBE2(usb, device, pipe, transfer__start, pipe, xfer); 430 do { 431 #ifdef DIAGNOSTIC 432 xfer->ux_state = XFER_ONQU; 433 #endif 434 SIMPLEQ_INSERT_TAIL(&pipe->up_queue, xfer, ux_next); 435 if (pipe->up_running && pipe->up_serialise) { 436 err = USBD_IN_PROGRESS; 437 } else { 438 pipe->up_running = 1; 439 err = USBD_NORMAL_COMPLETION; 440 } 441 if (err) 442 break; 443 err = pipe->up_methods->upm_transfer(xfer); 444 } while (0); 445 SDT_PROBE3(usb, device, pipe, transfer__done, pipe, xfer, err); 446 447 usbd_unlock_pipe(pipe); 448 449 if (err != USBD_IN_PROGRESS && err) { 450 /* 451 * The transfer made it onto the pipe queue, but didn't get 452 * accepted by the HCD for some reason. It needs removing 453 * from the pipe queue. 454 */ 455 USBHIST_LOG(usbdebug, "xfer failed: %jd, reinserting", 456 err, 0, 0, 0); 457 usbd_lock_pipe(pipe); 458 SDT_PROBE1(usb, device, xfer, preabort, xfer); 459 #ifdef DIAGNOSTIC 460 xfer->ux_state = XFER_BUSY; 461 #endif 462 SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next); 463 if (pipe->up_serialise) 464 usbd_start_next(pipe); 465 usbd_unlock_pipe(pipe); 466 } 467 468 if (!(flags & USBD_SYNCHRONOUS)) { 469 USBHIST_LOG(usbdebug, "<- done xfer %#jx, not sync (err %jd)", 470 (uintptr_t)xfer, err, 0, 0); 471 KASSERTMSG(err != USBD_NORMAL_COMPLETION, 472 "asynchronous xfer %p completed synchronously", xfer); 473 return err; 474 } 475 476 if (err != USBD_IN_PROGRESS) { 477 USBHIST_LOG(usbdebug, "<- done xfer %#jx, sync (err %jd)", 478 (uintptr_t)xfer, err, 0, 0); 479 SDT_PROBE2(usb, device, xfer, done, xfer, err); 480 return err; 481 } 482 483 /* Sync transfer, wait for completion. */ 484 usbd_lock_pipe(pipe); 485 while (!xfer->ux_done) { 486 if (pipe->up_dev->ud_bus->ub_usepolling) 487 panic("usbd_transfer: not done"); 488 USBHIST_LOG(usbdebug, "<- sleeping on xfer %#jx", 489 (uintptr_t)xfer, 0, 0, 0); 490 491 err = 0; 492 if ((flags & USBD_SYNCHRONOUS_SIG) != 0) { 493 err = cv_wait_sig(&xfer->ux_cv, pipe->up_dev->ud_bus->ub_lock); 494 } else { 495 cv_wait(&xfer->ux_cv, pipe->up_dev->ud_bus->ub_lock); 496 } 497 if (err) { 498 if (!xfer->ux_done) { 499 SDT_PROBE1(usb, device, xfer, abort, xfer); 500 pipe->up_methods->upm_abort(xfer); 501 } 502 break; 503 } 504 } 505 SDT_PROBE2(usb, device, xfer, done, xfer, xfer->ux_status); 506 /* XXX Race to read xfer->ux_status? */ 507 usbd_unlock_pipe(pipe); 508 return xfer->ux_status; 509 } 510 511 /* Like usbd_transfer(), but waits for completion. */ 512 usbd_status 513 usbd_sync_transfer(struct usbd_xfer *xfer) 514 { 515 xfer->ux_flags |= USBD_SYNCHRONOUS; 516 return usbd_transfer(xfer); 517 } 518 519 /* Like usbd_transfer(), but waits for completion and listens for signals. */ 520 usbd_status 521 usbd_sync_transfer_sig(struct usbd_xfer *xfer) 522 { 523 xfer->ux_flags |= USBD_SYNCHRONOUS | USBD_SYNCHRONOUS_SIG; 524 return usbd_transfer(xfer); 525 } 526 527 static void * 528 usbd_alloc_buffer(struct usbd_xfer *xfer, uint32_t size) 529 { 530 KASSERT(xfer->ux_buf == NULL); 531 KASSERT(size != 0); 532 533 xfer->ux_bufsize = 0; 534 #if NUSB_DMA > 0 535 struct usbd_bus *bus = xfer->ux_bus; 536 537 if (bus->ub_usedma) { 538 usb_dma_t *dmap = &xfer->ux_dmabuf; 539 540 KASSERT((bus->ub_dmaflags & USBMALLOC_COHERENT) == 0); 541 int err = usb_allocmem(bus->ub_dmatag, size, 0, bus->ub_dmaflags, dmap); 542 if (err) { 543 return NULL; 544 } 545 xfer->ux_buf = KERNADDR(&xfer->ux_dmabuf, 0); 546 xfer->ux_bufsize = size; 547 548 return xfer->ux_buf; 549 } 550 #endif 551 KASSERT(xfer->ux_bus->ub_usedma == false); 552 xfer->ux_buf = kmem_alloc(size, KM_SLEEP); 553 xfer->ux_bufsize = size; 554 return xfer->ux_buf; 555 } 556 557 static void 558 usbd_free_buffer(struct usbd_xfer *xfer) 559 { 560 KASSERT(xfer->ux_buf != NULL); 561 KASSERT(xfer->ux_bufsize != 0); 562 563 void *buf = xfer->ux_buf; 564 uint32_t size = xfer->ux_bufsize; 565 566 xfer->ux_buf = NULL; 567 xfer->ux_bufsize = 0; 568 569 #if NUSB_DMA > 0 570 struct usbd_bus *bus = xfer->ux_bus; 571 572 if (bus->ub_usedma) { 573 usb_dma_t *dmap = &xfer->ux_dmabuf; 574 575 usb_freemem(dmap); 576 return; 577 } 578 #endif 579 KASSERT(xfer->ux_bus->ub_usedma == false); 580 581 kmem_free(buf, size); 582 } 583 584 void * 585 usbd_get_buffer(struct usbd_xfer *xfer) 586 { 587 return xfer->ux_buf; 588 } 589 590 struct usbd_pipe * 591 usbd_get_pipe0(struct usbd_device *dev) 592 { 593 594 return dev->ud_pipe0; 595 } 596 597 static struct usbd_xfer * 598 usbd_alloc_xfer(struct usbd_device *dev, unsigned int nframes) 599 { 600 struct usbd_xfer *xfer; 601 602 USBHIST_FUNC(); 603 604 ASSERT_SLEEPABLE(); 605 606 xfer = dev->ud_bus->ub_methods->ubm_allocx(dev->ud_bus, nframes); 607 if (xfer == NULL) 608 goto out; 609 xfer->ux_bus = dev->ud_bus; 610 callout_init(&xfer->ux_callout, CALLOUT_MPSAFE); 611 callout_setfunc(&xfer->ux_callout, usbd_xfer_timeout, xfer); 612 cv_init(&xfer->ux_cv, "usbxfer"); 613 usb_init_task(&xfer->ux_aborttask, usbd_xfer_timeout_task, xfer, 614 USB_TASKQ_MPSAFE); 615 616 out: 617 USBHIST_CALLARGS(usbdebug, "returns %#jx", (uintptr_t)xfer, 0, 0, 0); 618 619 return xfer; 620 } 621 622 static void 623 usbd_free_xfer(struct usbd_xfer *xfer) 624 { 625 USBHIST_FUNC(); 626 USBHIST_CALLARGS(usbdebug, "%#jx", (uintptr_t)xfer, 0, 0, 0); 627 628 if (xfer->ux_buf) { 629 usbd_free_buffer(xfer); 630 } 631 632 /* Wait for any straggling timeout to complete. */ 633 mutex_enter(xfer->ux_bus->ub_lock); 634 xfer->ux_timeout_reset = false; /* do not resuscitate */ 635 callout_halt(&xfer->ux_callout, xfer->ux_bus->ub_lock); 636 usb_rem_task_wait(xfer->ux_pipe->up_dev, &xfer->ux_aborttask, 637 USB_TASKQ_HC, xfer->ux_bus->ub_lock); 638 mutex_exit(xfer->ux_bus->ub_lock); 639 640 cv_destroy(&xfer->ux_cv); 641 xfer->ux_bus->ub_methods->ubm_freex(xfer->ux_bus, xfer); 642 } 643 644 int 645 usbd_create_xfer(struct usbd_pipe *pipe, size_t len, unsigned int flags, 646 unsigned int nframes, struct usbd_xfer **xp) 647 { 648 KASSERT(xp != NULL); 649 void *buf = NULL; 650 651 struct usbd_xfer *xfer = usbd_alloc_xfer(pipe->up_dev, nframes); 652 if (xfer == NULL) 653 return ENOMEM; 654 655 xfer->ux_pipe = pipe; 656 xfer->ux_flags = flags; 657 xfer->ux_nframes = nframes; 658 xfer->ux_methods = pipe->up_methods; 659 660 if (len) { 661 buf = usbd_alloc_buffer(xfer, len); 662 if (!buf) { 663 usbd_free_xfer(xfer); 664 return ENOMEM; 665 } 666 } 667 668 if (xfer->ux_methods->upm_init) { 669 int err = xfer->ux_methods->upm_init(xfer); 670 if (err) { 671 usbd_free_xfer(xfer); 672 return err; 673 } 674 } 675 676 *xp = xfer; 677 SDT_PROBE5(usb, device, xfer, create, 678 xfer, pipe, len, flags, nframes); 679 return 0; 680 } 681 682 void 683 usbd_destroy_xfer(struct usbd_xfer *xfer) 684 { 685 686 SDT_PROBE1(usb, device, xfer, destroy, xfer); 687 if (xfer->ux_methods->upm_fini) 688 xfer->ux_methods->upm_fini(xfer); 689 690 usbd_free_xfer(xfer); 691 } 692 693 void 694 usbd_setup_xfer(struct usbd_xfer *xfer, void *priv, void *buffer, 695 uint32_t length, uint16_t flags, uint32_t timeout, usbd_callback callback) 696 { 697 KASSERT(xfer->ux_pipe); 698 699 xfer->ux_priv = priv; 700 xfer->ux_buffer = buffer; 701 xfer->ux_length = length; 702 xfer->ux_actlen = 0; 703 xfer->ux_flags = flags; 704 xfer->ux_timeout = timeout; 705 xfer->ux_status = USBD_NOT_STARTED; 706 xfer->ux_callback = callback; 707 xfer->ux_rqflags &= ~URQ_REQUEST; 708 xfer->ux_nframes = 0; 709 } 710 711 void 712 usbd_setup_default_xfer(struct usbd_xfer *xfer, struct usbd_device *dev, 713 void *priv, uint32_t timeout, usb_device_request_t *req, void *buffer, 714 uint32_t length, uint16_t flags, usbd_callback callback) 715 { 716 KASSERT(xfer->ux_pipe == dev->ud_pipe0); 717 718 xfer->ux_priv = priv; 719 xfer->ux_buffer = buffer; 720 xfer->ux_length = length; 721 xfer->ux_actlen = 0; 722 xfer->ux_flags = flags; 723 xfer->ux_timeout = timeout; 724 xfer->ux_status = USBD_NOT_STARTED; 725 xfer->ux_callback = callback; 726 xfer->ux_request = *req; 727 xfer->ux_rqflags |= URQ_REQUEST; 728 xfer->ux_nframes = 0; 729 } 730 731 void 732 usbd_setup_isoc_xfer(struct usbd_xfer *xfer, void *priv, uint16_t *frlengths, 733 uint32_t nframes, uint16_t flags, usbd_callback callback) 734 { 735 xfer->ux_priv = priv; 736 xfer->ux_buffer = NULL; 737 xfer->ux_length = 0; 738 xfer->ux_actlen = 0; 739 xfer->ux_flags = flags; 740 xfer->ux_timeout = USBD_NO_TIMEOUT; 741 xfer->ux_status = USBD_NOT_STARTED; 742 xfer->ux_callback = callback; 743 xfer->ux_rqflags &= ~URQ_REQUEST; 744 xfer->ux_frlengths = frlengths; 745 xfer->ux_nframes = nframes; 746 747 for (size_t i = 0; i < xfer->ux_nframes; i++) 748 xfer->ux_length += xfer->ux_frlengths[i]; 749 } 750 751 void 752 usbd_get_xfer_status(struct usbd_xfer *xfer, void **priv, 753 void **buffer, uint32_t *count, usbd_status *status) 754 { 755 if (priv != NULL) 756 *priv = xfer->ux_priv; 757 if (buffer != NULL) 758 *buffer = xfer->ux_buffer; 759 if (count != NULL) 760 *count = xfer->ux_actlen; 761 if (status != NULL) 762 *status = xfer->ux_status; 763 } 764 765 usb_config_descriptor_t * 766 usbd_get_config_descriptor(struct usbd_device *dev) 767 { 768 KASSERT(dev != NULL); 769 770 return dev->ud_cdesc; 771 } 772 773 usb_interface_descriptor_t * 774 usbd_get_interface_descriptor(struct usbd_interface *iface) 775 { 776 KASSERT(iface != NULL); 777 778 return iface->ui_idesc; 779 } 780 781 usb_device_descriptor_t * 782 usbd_get_device_descriptor(struct usbd_device *dev) 783 { 784 KASSERT(dev != NULL); 785 786 return &dev->ud_ddesc; 787 } 788 789 usb_endpoint_descriptor_t * 790 usbd_interface2endpoint_descriptor(struct usbd_interface *iface, uint8_t index) 791 { 792 793 if (index >= iface->ui_idesc->bNumEndpoints) 794 return NULL; 795 return iface->ui_endpoints[index].ue_edesc; 796 } 797 798 /* Some drivers may wish to abort requests on the default pipe, * 799 * but there is no mechanism for getting a handle on it. */ 800 void 801 usbd_abort_default_pipe(struct usbd_device *device) 802 { 803 usbd_abort_pipe(device->ud_pipe0); 804 } 805 806 void 807 usbd_abort_pipe(struct usbd_pipe *pipe) 808 { 809 810 usbd_suspend_pipe(pipe); 811 usbd_resume_pipe(pipe); 812 } 813 814 void 815 usbd_suspend_pipe(struct usbd_pipe *pipe) 816 { 817 818 usbd_lock_pipe(pipe); 819 usbd_ar_pipe(pipe); 820 usbd_unlock_pipe(pipe); 821 } 822 823 void 824 usbd_resume_pipe(struct usbd_pipe *pipe) 825 { 826 827 usbd_lock_pipe(pipe); 828 KASSERT(SIMPLEQ_EMPTY(&pipe->up_queue)); 829 pipe->up_aborting = 0; 830 usbd_unlock_pipe(pipe); 831 } 832 833 usbd_status 834 usbd_clear_endpoint_stall(struct usbd_pipe *pipe) 835 { 836 struct usbd_device *dev = pipe->up_dev; 837 usbd_status err; 838 839 USBHIST_FUNC(); USBHIST_CALLED(usbdebug); 840 SDT_PROBE1(usb, device, pipe, clear__endpoint__stall, pipe); 841 842 /* 843 * Clearing en endpoint stall resets the endpoint toggle, so 844 * do the same to the HC toggle. 845 */ 846 SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle, pipe); 847 pipe->up_methods->upm_cleartoggle(pipe); 848 849 err = usbd_clear_endpoint_feature(dev, 850 pipe->up_endpoint->ue_edesc->bEndpointAddress, UF_ENDPOINT_HALT); 851 #if 0 852 XXX should we do this? 853 if (!err) { 854 pipe->state = USBD_PIPE_ACTIVE; 855 /* XXX activate pipe */ 856 } 857 #endif 858 return err; 859 } 860 861 void 862 usbd_clear_endpoint_stall_task(void *arg) 863 { 864 struct usbd_pipe *pipe = arg; 865 struct usbd_device *dev = pipe->up_dev; 866 867 SDT_PROBE1(usb, device, pipe, clear__endpoint__stall, pipe); 868 SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle, pipe); 869 pipe->up_methods->upm_cleartoggle(pipe); 870 871 (void)usbd_clear_endpoint_feature(dev, 872 pipe->up_endpoint->ue_edesc->bEndpointAddress, UF_ENDPOINT_HALT); 873 } 874 875 void 876 usbd_clear_endpoint_stall_async(struct usbd_pipe *pipe) 877 { 878 usb_add_task(pipe->up_dev, &pipe->up_async_task, USB_TASKQ_DRIVER); 879 } 880 881 void 882 usbd_clear_endpoint_toggle(struct usbd_pipe *pipe) 883 { 884 885 SDT_PROBE1(usb, device, pipe, clear__endpoint__toggle, pipe); 886 pipe->up_methods->upm_cleartoggle(pipe); 887 } 888 889 usbd_status 890 usbd_endpoint_count(struct usbd_interface *iface, uint8_t *count) 891 { 892 KASSERT(iface != NULL); 893 KASSERT(iface->ui_idesc != NULL); 894 895 *count = iface->ui_idesc->bNumEndpoints; 896 return USBD_NORMAL_COMPLETION; 897 } 898 899 usbd_status 900 usbd_interface_count(struct usbd_device *dev, uint8_t *count) 901 { 902 903 if (dev->ud_cdesc == NULL) 904 return USBD_NOT_CONFIGURED; 905 *count = dev->ud_cdesc->bNumInterface; 906 return USBD_NORMAL_COMPLETION; 907 } 908 909 void 910 usbd_interface2device_handle(struct usbd_interface *iface, 911 struct usbd_device **dev) 912 { 913 914 *dev = iface->ui_dev; 915 } 916 917 usbd_status 918 usbd_device2interface_handle(struct usbd_device *dev, 919 uint8_t ifaceno, struct usbd_interface **iface) 920 { 921 922 if (dev->ud_cdesc == NULL) 923 return USBD_NOT_CONFIGURED; 924 if (ifaceno >= dev->ud_cdesc->bNumInterface) 925 return USBD_INVAL; 926 *iface = &dev->ud_ifaces[ifaceno]; 927 return USBD_NORMAL_COMPLETION; 928 } 929 930 struct usbd_device * 931 usbd_pipe2device_handle(struct usbd_pipe *pipe) 932 { 933 KASSERT(pipe != NULL); 934 935 return pipe->up_dev; 936 } 937 938 /* XXXX use altno */ 939 usbd_status 940 usbd_set_interface(struct usbd_interface *iface, int altidx) 941 { 942 bool locked = false; 943 usb_device_request_t req; 944 usbd_status err; 945 946 USBHIST_FUNC(); 947 USBHIST_CALLARGS(usbdebug, "iface %#jx", (uintptr_t)iface, 0, 0, 0); 948 949 err = usbd_iface_lock(iface); 950 if (err) 951 goto out; 952 locked = true; 953 954 err = usbd_fill_iface_data(iface->ui_dev, iface->ui_index, altidx); 955 if (err) 956 goto out; 957 958 req.bmRequestType = UT_WRITE_INTERFACE; 959 req.bRequest = UR_SET_INTERFACE; 960 USETW(req.wValue, iface->ui_idesc->bAlternateSetting); 961 USETW(req.wIndex, iface->ui_idesc->bInterfaceNumber); 962 USETW(req.wLength, 0); 963 err = usbd_do_request(iface->ui_dev, &req, 0); 964 965 out: /* XXX back out iface data? */ 966 if (locked) 967 usbd_iface_unlock(iface); 968 return err; 969 } 970 971 int 972 usbd_get_no_alts(usb_config_descriptor_t *cdesc, int ifaceno) 973 { 974 char *p = (char *)cdesc; 975 char *end = p + UGETW(cdesc->wTotalLength); 976 usb_descriptor_t *desc; 977 usb_interface_descriptor_t *idesc; 978 int n; 979 980 for (n = 0; end - p >= sizeof(*desc); p += desc->bLength) { 981 desc = (usb_descriptor_t *)p; 982 if (desc->bLength < sizeof(*desc) || desc->bLength > end - p) 983 break; 984 if (desc->bDescriptorType != UDESC_INTERFACE) 985 continue; 986 if (desc->bLength < sizeof(*idesc)) 987 break; 988 idesc = (usb_interface_descriptor_t *)desc; 989 if (idesc->bInterfaceNumber == ifaceno) { 990 n++; 991 if (n == INT_MAX) 992 break; 993 } 994 } 995 return n; 996 } 997 998 int 999 usbd_get_interface_altindex(struct usbd_interface *iface) 1000 { 1001 return iface->ui_altindex; 1002 } 1003 1004 usbd_status 1005 usbd_get_interface(struct usbd_interface *iface, uint8_t *aiface) 1006 { 1007 usb_device_request_t req; 1008 1009 req.bmRequestType = UT_READ_INTERFACE; 1010 req.bRequest = UR_GET_INTERFACE; 1011 USETW(req.wValue, 0); 1012 USETW(req.wIndex, iface->ui_idesc->bInterfaceNumber); 1013 USETW(req.wLength, 1); 1014 return usbd_do_request(iface->ui_dev, &req, aiface); 1015 } 1016 1017 /*** Internal routines ***/ 1018 1019 /* Dequeue all pipe operations, called with bus lock held. */ 1020 Static void 1021 usbd_ar_pipe(struct usbd_pipe *pipe) 1022 { 1023 struct usbd_xfer *xfer; 1024 1025 USBHIST_FUNC(); 1026 USBHIST_CALLARGS(usbdebug, "pipe = %#jx", (uintptr_t)pipe, 0, 0, 0); 1027 SDT_PROBE1(usb, device, pipe, abort__start, pipe); 1028 1029 ASSERT_SLEEPABLE(); 1030 KASSERT(mutex_owned(pipe->up_dev->ud_bus->ub_lock)); 1031 1032 /* 1033 * Allow only one thread at a time to abort the pipe, so we 1034 * don't get confused if upm_abort drops the lock in the middle 1035 * of the abort to wait for hardware completion softints to 1036 * stop using the xfer before returning. 1037 */ 1038 KASSERTMSG(pipe->up_abortlwp == NULL, "pipe->up_abortlwp=%p", 1039 pipe->up_abortlwp); 1040 pipe->up_abortlwp = curlwp; 1041 1042 #ifdef USB_DEBUG 1043 if (usbdebug > 5) 1044 usbd_dump_queue(pipe); 1045 #endif 1046 pipe->up_repeat = 0; 1047 pipe->up_running = 0; 1048 pipe->up_aborting = 1; 1049 while ((xfer = SIMPLEQ_FIRST(&pipe->up_queue)) != NULL) { 1050 USBHIST_LOG(usbdebug, "pipe = %#jx xfer = %#jx " 1051 "(methods = %#jx)", (uintptr_t)pipe, (uintptr_t)xfer, 1052 (uintptr_t)pipe->up_methods, 0); 1053 if (xfer->ux_status == USBD_NOT_STARTED) { 1054 SDT_PROBE1(usb, device, xfer, preabort, xfer); 1055 #ifdef DIAGNOSTIC 1056 xfer->ux_state = XFER_BUSY; 1057 #endif 1058 SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next); 1059 } else { 1060 /* Make the HC abort it (and invoke the callback). */ 1061 SDT_PROBE1(usb, device, xfer, abort, xfer); 1062 pipe->up_methods->upm_abort(xfer); 1063 while (pipe->up_callingxfer == xfer) { 1064 USBHIST_LOG(usbdebug, "wait for callback" 1065 "pipe = %#jx xfer = %#jx", 1066 (uintptr_t)pipe, (uintptr_t)xfer, 0, 0); 1067 cv_wait(&pipe->up_callingcv, 1068 pipe->up_dev->ud_bus->ub_lock); 1069 } 1070 /* XXX only for non-0 usbd_clear_endpoint_stall(pipe); */ 1071 } 1072 } 1073 1074 /* 1075 * There may be an xfer callback already in progress which was 1076 * taken off the queue before we got to it. We must wait for 1077 * the callback to finish before returning control to the 1078 * caller. 1079 */ 1080 while (pipe->up_callingxfer) { 1081 USBHIST_LOG(usbdebug, "wait for callback" 1082 "pipe = %#jx xfer = %#jx", 1083 (uintptr_t)pipe, (uintptr_t)pipe->up_callingxfer, 0, 0); 1084 cv_wait(&pipe->up_callingcv, pipe->up_dev->ud_bus->ub_lock); 1085 } 1086 1087 KASSERT(mutex_owned(pipe->up_dev->ud_bus->ub_lock)); 1088 KASSERTMSG(pipe->up_abortlwp == curlwp, "pipe->up_abortlwp=%p", 1089 pipe->up_abortlwp); 1090 pipe->up_abortlwp = NULL; 1091 1092 SDT_PROBE1(usb, device, pipe, abort__done, pipe); 1093 } 1094 1095 /* Called with USB lock held. */ 1096 void 1097 usb_transfer_complete(struct usbd_xfer *xfer) 1098 { 1099 struct usbd_pipe *pipe = xfer->ux_pipe; 1100 struct usbd_bus *bus = pipe->up_dev->ud_bus; 1101 int sync = xfer->ux_flags & USBD_SYNCHRONOUS; 1102 int erred; 1103 int polling = bus->ub_usepolling; 1104 int repeat = pipe->up_repeat; 1105 1106 USBHIST_FUNC(); 1107 USBHIST_CALLARGS(usbdebug, "pipe = %#jx xfer = %#jx status = %jd " 1108 "actlen = %jd", (uintptr_t)pipe, (uintptr_t)xfer, xfer->ux_status, 1109 xfer->ux_actlen); 1110 1111 KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock)); 1112 KASSERTMSG(xfer->ux_state == XFER_ONQU, "xfer %p state is %x", xfer, 1113 xfer->ux_state); 1114 KASSERT(pipe != NULL); 1115 1116 /* 1117 * If device is known to miss out ack, then pretend that 1118 * output timeout is a success. Userland should handle 1119 * the logic to verify that the operation succeeded. 1120 */ 1121 if (pipe->up_dev->ud_quirks && 1122 pipe->up_dev->ud_quirks->uq_flags & UQ_MISS_OUT_ACK && 1123 xfer->ux_status == USBD_TIMEOUT && 1124 !usbd_xfer_isread(xfer)) { 1125 USBHIST_LOG(usbdebug, "Possible output ack miss for xfer %#jx: " 1126 "hiding write timeout to %jd.%jd for %ju bytes written", 1127 (uintptr_t)xfer, curlwp->l_proc->p_pid, curlwp->l_lid, 1128 xfer->ux_length); 1129 1130 xfer->ux_status = USBD_NORMAL_COMPLETION; 1131 xfer->ux_actlen = xfer->ux_length; 1132 } 1133 1134 erred = xfer->ux_status == USBD_CANCELLED || 1135 xfer->ux_status == USBD_TIMEOUT; 1136 1137 if (!repeat) { 1138 /* Remove request from queue. */ 1139 1140 KASSERTMSG(!SIMPLEQ_EMPTY(&pipe->up_queue), 1141 "pipe %p is empty, but xfer %p wants to complete", pipe, 1142 xfer); 1143 KASSERTMSG(xfer == SIMPLEQ_FIRST(&pipe->up_queue), 1144 "xfer %p is not start of queue (%p is at start)", xfer, 1145 SIMPLEQ_FIRST(&pipe->up_queue)); 1146 1147 #ifdef DIAGNOSTIC 1148 xfer->ux_state = XFER_BUSY; 1149 #endif 1150 SIMPLEQ_REMOVE_HEAD(&pipe->up_queue, ux_next); 1151 } 1152 USBHIST_LOG(usbdebug, "xfer %#jx: repeat %jd new head = %#jx", 1153 (uintptr_t)xfer, repeat, (uintptr_t)SIMPLEQ_FIRST(&pipe->up_queue), 1154 0); 1155 1156 /* Count completed transfers. */ 1157 ++pipe->up_dev->ud_bus->ub_stats.uds_requests 1158 [pipe->up_endpoint->ue_edesc->bmAttributes & UE_XFERTYPE]; 1159 1160 xfer->ux_done = 1; 1161 if (!xfer->ux_status && xfer->ux_actlen < xfer->ux_length && 1162 !(xfer->ux_flags & USBD_SHORT_XFER_OK)) { 1163 USBHIST_LOG(usbdebug, "short transfer %jd < %jd", 1164 xfer->ux_actlen, xfer->ux_length, 0, 0); 1165 xfer->ux_status = USBD_SHORT_XFER; 1166 } 1167 1168 USBHIST_LOG(usbdebug, "xfer %#jx doing done %#jx", (uintptr_t)xfer, 1169 (uintptr_t)pipe->up_methods->upm_done, 0, 0); 1170 SDT_PROBE2(usb, device, xfer, done, xfer, xfer->ux_status); 1171 pipe->up_methods->upm_done(xfer); 1172 1173 if (xfer->ux_length != 0 && xfer->ux_buffer != xfer->ux_buf) { 1174 KDASSERTMSG(xfer->ux_actlen <= xfer->ux_length, 1175 "actlen %d length %d",xfer->ux_actlen, xfer->ux_length); 1176 1177 /* Only if IN transfer */ 1178 if (usbd_xfer_isread(xfer)) { 1179 memcpy(xfer->ux_buffer, xfer->ux_buf, xfer->ux_actlen); 1180 } 1181 } 1182 1183 USBHIST_LOG(usbdebug, "xfer %#jx doing callback %#jx status %jd", 1184 (uintptr_t)xfer, (uintptr_t)xfer->ux_callback, xfer->ux_status, 0); 1185 1186 if (xfer->ux_callback) { 1187 if (!polling) { 1188 KASSERT(pipe->up_callingxfer == NULL); 1189 pipe->up_callingxfer = xfer; 1190 mutex_exit(pipe->up_dev->ud_bus->ub_lock); 1191 if (!(pipe->up_flags & USBD_MPSAFE)) 1192 KERNEL_LOCK(1, curlwp); 1193 } 1194 1195 xfer->ux_callback(xfer, xfer->ux_priv, xfer->ux_status); 1196 1197 if (!polling) { 1198 if (!(pipe->up_flags & USBD_MPSAFE)) 1199 KERNEL_UNLOCK_ONE(curlwp); 1200 mutex_enter(pipe->up_dev->ud_bus->ub_lock); 1201 KASSERT(pipe->up_callingxfer == xfer); 1202 pipe->up_callingxfer = NULL; 1203 cv_broadcast(&pipe->up_callingcv); 1204 } 1205 } 1206 1207 if (sync && !polling) { 1208 USBHIST_LOG(usbdebug, "<- done xfer %#jx, wakeup", 1209 (uintptr_t)xfer, 0, 0, 0); 1210 cv_broadcast(&xfer->ux_cv); 1211 } 1212 1213 if (repeat) { 1214 xfer->ux_actlen = 0; 1215 xfer->ux_status = USBD_NOT_STARTED; 1216 } else { 1217 /* XXX should we stop the queue on all errors? */ 1218 if (erred && pipe->up_iface != NULL) /* not control pipe */ 1219 pipe->up_running = 0; 1220 } 1221 if (pipe->up_running && pipe->up_serialise) 1222 usbd_start_next(pipe); 1223 } 1224 1225 /* Called with USB lock held. */ 1226 void 1227 usbd_start_next(struct usbd_pipe *pipe) 1228 { 1229 struct usbd_xfer *xfer; 1230 usbd_status err; 1231 1232 USBHIST_FUNC(); 1233 1234 KASSERT(pipe != NULL); 1235 KASSERT(pipe->up_methods != NULL); 1236 KASSERT(pipe->up_methods->upm_start != NULL); 1237 KASSERT(pipe->up_serialise == true); 1238 1239 int polling = pipe->up_dev->ud_bus->ub_usepolling; 1240 KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock)); 1241 1242 /* Get next request in queue. */ 1243 xfer = SIMPLEQ_FIRST(&pipe->up_queue); 1244 USBHIST_CALLARGS(usbdebug, "pipe = %#jx, xfer = %#jx", (uintptr_t)pipe, 1245 (uintptr_t)xfer, 0, 0); 1246 if (xfer == NULL) { 1247 pipe->up_running = 0; 1248 } else { 1249 SDT_PROBE2(usb, device, pipe, start, pipe, xfer); 1250 err = pipe->up_methods->upm_start(xfer); 1251 1252 if (err != USBD_IN_PROGRESS) { 1253 USBHIST_LOG(usbdebug, "error = %jd", err, 0, 0, 0); 1254 pipe->up_running = 0; 1255 /* XXX do what? */ 1256 } 1257 } 1258 1259 KASSERT(polling || mutex_owned(pipe->up_dev->ud_bus->ub_lock)); 1260 } 1261 1262 usbd_status 1263 usbd_do_request(struct usbd_device *dev, usb_device_request_t *req, void *data) 1264 { 1265 1266 return usbd_do_request_flags(dev, req, data, 0, 0, 1267 USBD_DEFAULT_TIMEOUT); 1268 } 1269 1270 usbd_status 1271 usbd_do_request_flags(struct usbd_device *dev, usb_device_request_t *req, 1272 void *data, uint16_t flags, int *actlen, uint32_t timeout) 1273 { 1274 size_t len = UGETW(req->wLength); 1275 1276 return usbd_do_request_len(dev, req, len, data, flags, actlen, timeout); 1277 } 1278 1279 usbd_status 1280 usbd_do_request_len(struct usbd_device *dev, usb_device_request_t *req, 1281 size_t len, void *data, uint16_t flags, int *actlen, uint32_t timeout) 1282 { 1283 struct usbd_xfer *xfer; 1284 usbd_status err; 1285 1286 KASSERT(len >= UGETW(req->wLength)); 1287 1288 USBHIST_FUNC(); 1289 USBHIST_CALLARGS(usbdebug, "dev=%#jx req=%jx flags=%jx len=%jx", 1290 (uintptr_t)dev, (uintptr_t)req, flags, len); 1291 1292 ASSERT_SLEEPABLE(); 1293 1294 SDT_PROBE5(usb, device, request, start, 1295 dev, req, len, flags, timeout); 1296 1297 int error = usbd_create_xfer(dev->ud_pipe0, len, 0, 0, &xfer); 1298 if (error) { 1299 SDT_PROBE7(usb, device, request, done, 1300 dev, req, /*actlen*/0, flags, timeout, data, USBD_NOMEM); 1301 return USBD_NOMEM; 1302 } 1303 1304 usbd_setup_default_xfer(xfer, dev, 0, timeout, req, data, 1305 UGETW(req->wLength), flags, NULL); 1306 KASSERT(xfer->ux_pipe == dev->ud_pipe0); 1307 err = usbd_sync_transfer(xfer); 1308 #if defined(USB_DEBUG) || defined(DIAGNOSTIC) 1309 if (xfer->ux_actlen > xfer->ux_length) { 1310 USBHIST_LOG(usbdebug, "overrun addr = %jd type = 0x%02jx", 1311 dev->ud_addr, xfer->ux_request.bmRequestType, 0, 0); 1312 USBHIST_LOG(usbdebug, " req = 0x%02jx val = %jd " 1313 "index = %jd", 1314 xfer->ux_request.bRequest, UGETW(xfer->ux_request.wValue), 1315 UGETW(xfer->ux_request.wIndex), 0); 1316 USBHIST_LOG(usbdebug, " rlen = %jd length = %jd " 1317 "actlen = %jd", 1318 UGETW(xfer->ux_request.wLength), 1319 xfer->ux_length, xfer->ux_actlen, 0); 1320 } 1321 #endif 1322 if (actlen != NULL) 1323 *actlen = xfer->ux_actlen; 1324 1325 usbd_destroy_xfer(xfer); 1326 1327 SDT_PROBE7(usb, device, request, done, 1328 dev, req, xfer->ux_actlen, flags, timeout, data, err); 1329 1330 if (err) { 1331 USBHIST_LOG(usbdebug, "returning err = %jd", err, 0, 0, 0); 1332 } 1333 return err; 1334 } 1335 1336 static void 1337 usbd_request_async_cb(struct usbd_xfer *xfer, void *priv, usbd_status status) 1338 { 1339 usbd_destroy_xfer(xfer); 1340 } 1341 1342 /* 1343 * Execute a request without waiting for completion. 1344 * Can be used from interrupt context. 1345 */ 1346 usbd_status 1347 usbd_request_async(struct usbd_device *dev, struct usbd_xfer *xfer, 1348 usb_device_request_t *req, void *priv, usbd_callback callback) 1349 { 1350 usbd_status err; 1351 1352 if (callback == NULL) 1353 callback = usbd_request_async_cb; 1354 1355 usbd_setup_default_xfer(xfer, dev, priv, 1356 USBD_DEFAULT_TIMEOUT, req, NULL, UGETW(req->wLength), 0, 1357 callback); 1358 err = usbd_transfer(xfer); 1359 if (err != USBD_IN_PROGRESS) { 1360 usbd_destroy_xfer(xfer); 1361 return (err); 1362 } 1363 return (USBD_NORMAL_COMPLETION); 1364 } 1365 1366 const struct usbd_quirks * 1367 usbd_get_quirks(struct usbd_device *dev) 1368 { 1369 #ifdef DIAGNOSTIC 1370 if (dev == NULL) { 1371 printf("usbd_get_quirks: dev == NULL\n"); 1372 return 0; 1373 } 1374 #endif 1375 return dev->ud_quirks; 1376 } 1377 1378 /* XXX do periodic free() of free list */ 1379 1380 /* 1381 * Called from keyboard driver when in polling mode. 1382 */ 1383 void 1384 usbd_dopoll(struct usbd_interface *iface) 1385 { 1386 iface->ui_dev->ud_bus->ub_methods->ubm_dopoll(iface->ui_dev->ud_bus); 1387 } 1388 1389 /* 1390 * This is for keyboard driver as well, which only operates in polling 1391 * mode from the ask root, etc., prompt and from DDB. 1392 */ 1393 void 1394 usbd_set_polling(struct usbd_device *dev, int on) 1395 { 1396 if (on) 1397 dev->ud_bus->ub_usepolling++; 1398 else 1399 dev->ud_bus->ub_usepolling--; 1400 1401 /* Kick the host controller when switching modes */ 1402 mutex_enter(dev->ud_bus->ub_lock); 1403 dev->ud_bus->ub_methods->ubm_softint(dev->ud_bus); 1404 mutex_exit(dev->ud_bus->ub_lock); 1405 } 1406 1407 1408 usb_endpoint_descriptor_t * 1409 usbd_get_endpoint_descriptor(struct usbd_interface *iface, uint8_t address) 1410 { 1411 struct usbd_endpoint *ep; 1412 int i; 1413 1414 for (i = 0; i < iface->ui_idesc->bNumEndpoints; i++) { 1415 ep = &iface->ui_endpoints[i]; 1416 if (ep->ue_edesc->bEndpointAddress == address) 1417 return iface->ui_endpoints[i].ue_edesc; 1418 } 1419 return NULL; 1420 } 1421 1422 /* 1423 * usbd_ratecheck() can limit the number of error messages that occurs. 1424 * When a device is unplugged it may take up to 0.25s for the hub driver 1425 * to notice it. If the driver continuously tries to do I/O operations 1426 * this can generate a large number of messages. 1427 */ 1428 int 1429 usbd_ratecheck(struct timeval *last) 1430 { 1431 static struct timeval errinterval = { 0, 250000 }; /* 0.25 s*/ 1432 1433 return ratecheck(last, &errinterval); 1434 } 1435 1436 /* 1437 * Search for a vendor/product pair in an array. The item size is 1438 * given as an argument. 1439 */ 1440 const struct usb_devno * 1441 usb_match_device(const struct usb_devno *tbl, u_int nentries, u_int sz, 1442 uint16_t vendor, uint16_t product) 1443 { 1444 while (nentries-- > 0) { 1445 uint16_t tproduct = tbl->ud_product; 1446 if (tbl->ud_vendor == vendor && 1447 (tproduct == product || tproduct == USB_PRODUCT_ANY)) 1448 return tbl; 1449 tbl = (const struct usb_devno *)((const char *)tbl + sz); 1450 } 1451 return NULL; 1452 } 1453 1454 usbd_status 1455 usbd_get_string(struct usbd_device *dev, int si, char *buf) 1456 { 1457 return usbd_get_string0(dev, si, buf, 1); 1458 } 1459 1460 usbd_status 1461 usbd_get_string0(struct usbd_device *dev, int si, char *buf, int unicode) 1462 { 1463 int swap = dev->ud_quirks->uq_flags & UQ_SWAP_UNICODE; 1464 usb_string_descriptor_t us; 1465 char *s; 1466 int i, n; 1467 uint16_t c; 1468 usbd_status err; 1469 int size; 1470 1471 USBHIST_FUNC(); USBHIST_CALLED(usbdebug); 1472 1473 buf[0] = '\0'; 1474 if (si == 0) 1475 return USBD_INVAL; 1476 if (dev->ud_quirks->uq_flags & UQ_NO_STRINGS) 1477 return USBD_STALLED; 1478 if (dev->ud_langid == USBD_NOLANG) { 1479 /* Set up default language */ 1480 err = usbd_get_string_desc(dev, USB_LANGUAGE_TABLE, 0, &us, 1481 &size); 1482 if (err || size < 4) { 1483 USBHIST_LOG(usbdebug, "getting lang failed, using 0", 1484 0, 0, 0, 0); 1485 dev->ud_langid = 0; /* Well, just pick something then */ 1486 } else { 1487 /* Pick the first language as the default. */ 1488 dev->ud_langid = UGETW(us.bString[0]); 1489 } 1490 } 1491 err = usbd_get_string_desc(dev, si, dev->ud_langid, &us, &size); 1492 if (err) 1493 return err; 1494 s = buf; 1495 n = size / 2 - 1; 1496 if (unicode) { 1497 for (i = 0; i < n; i++) { 1498 c = UGETW(us.bString[i]); 1499 if (swap) 1500 c = (c >> 8) | (c << 8); 1501 s += wput_utf8(s, 3, c); 1502 } 1503 *s++ = 0; 1504 } 1505 #ifdef COMPAT_30 1506 else { 1507 for (i = 0; i < n; i++) { 1508 c = UGETW(us.bString[i]); 1509 if (swap) 1510 c = (c >> 8) | (c << 8); 1511 *s++ = (c < 0x80) ? c : '?'; 1512 } 1513 *s++ = 0; 1514 } 1515 #endif 1516 return USBD_NORMAL_COMPLETION; 1517 } 1518 1519 /* 1520 * usbd_xfer_trycomplete(xfer) 1521 * 1522 * Try to claim xfer for completion. Return true if successful, 1523 * false if the xfer has been synchronously aborted or has timed 1524 * out. 1525 * 1526 * If this returns true, caller is responsible for setting 1527 * xfer->ux_status and calling usb_transfer_complete. To be used 1528 * in a host controller interrupt handler. 1529 * 1530 * Caller must either hold the bus lock or have the bus in polling 1531 * mode. If this succeeds, caller must proceed to call 1532 * usb_complete_transfer under the bus lock or with polling 1533 * enabled -- must not release and reacquire the bus lock in the 1534 * meantime. Failing to heed this rule may lead to catastrophe 1535 * with abort or timeout. 1536 */ 1537 bool 1538 usbd_xfer_trycomplete(struct usbd_xfer *xfer) 1539 { 1540 struct usbd_bus *bus __diagused = xfer->ux_bus; 1541 1542 KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); 1543 1544 /* 1545 * If software has completed it, either by synchronous abort or 1546 * by timeout, too late. 1547 */ 1548 if (xfer->ux_status != USBD_IN_PROGRESS) 1549 return false; 1550 1551 /* 1552 * We are completing the xfer. Cancel the timeout if we can, 1553 * but only asynchronously. See usbd_xfer_cancel_timeout_async 1554 * for why we need not wait for the callout or task here. 1555 */ 1556 usbd_xfer_cancel_timeout_async(xfer); 1557 1558 /* Success! Note: Caller must set xfer->ux_status afterwar. */ 1559 return true; 1560 } 1561 1562 /* 1563 * usbd_xfer_abort(xfer) 1564 * 1565 * Try to claim xfer to abort. If successful, mark it completed 1566 * with USBD_CANCELLED and call the bus-specific method to abort 1567 * at the hardware level. 1568 * 1569 * To be called in thread context from struct 1570 * usbd_pipe_methods::upm_abort. 1571 * 1572 * Caller must hold the bus lock. 1573 */ 1574 void 1575 usbd_xfer_abort(struct usbd_xfer *xfer) 1576 { 1577 struct usbd_bus *bus = xfer->ux_bus; 1578 1579 KASSERT(mutex_owned(bus->ub_lock)); 1580 1581 /* 1582 * If host controller interrupt or timer interrupt has 1583 * completed it, too late. But the xfer cannot be 1584 * cancelled already -- only one caller can synchronously 1585 * abort. 1586 */ 1587 KASSERT(xfer->ux_status != USBD_CANCELLED); 1588 if (xfer->ux_status != USBD_IN_PROGRESS) 1589 return; 1590 1591 /* 1592 * Cancel the timeout if we can, but only asynchronously; see 1593 * usbd_xfer_cancel_timeout_async for why we need not wait for 1594 * the callout or task here. 1595 */ 1596 usbd_xfer_cancel_timeout_async(xfer); 1597 1598 /* 1599 * We beat everyone else. Claim the status as cancelled, do 1600 * the bus-specific dance to abort the hardware, and complete 1601 * the xfer. 1602 */ 1603 xfer->ux_status = USBD_CANCELLED; 1604 bus->ub_methods->ubm_abortx(xfer); 1605 usb_transfer_complete(xfer); 1606 } 1607 1608 /* 1609 * usbd_xfer_timeout(xfer) 1610 * 1611 * Called at IPL_SOFTCLOCK when too much time has elapsed waiting 1612 * for xfer to complete. Since we can't abort the xfer at 1613 * IPL_SOFTCLOCK, defer to a usb_task to run it in thread context, 1614 * unless the xfer has completed or aborted concurrently -- and if 1615 * the xfer has also been resubmitted, take care of rescheduling 1616 * the callout. 1617 */ 1618 static void 1619 usbd_xfer_timeout(void *cookie) 1620 { 1621 struct usbd_xfer *xfer = cookie; 1622 struct usbd_bus *bus = xfer->ux_bus; 1623 struct usbd_device *dev = xfer->ux_pipe->up_dev; 1624 1625 /* Acquire the lock so we can transition the timeout state. */ 1626 mutex_enter(bus->ub_lock); 1627 1628 /* 1629 * Use usbd_xfer_probe_timeout to check whether the timeout is 1630 * still valid, or to reschedule the callout if necessary. If 1631 * it is still valid, schedule the task. 1632 */ 1633 if (usbd_xfer_probe_timeout(xfer)) 1634 usb_add_task(dev, &xfer->ux_aborttask, USB_TASKQ_HC); 1635 1636 /* 1637 * Notify usbd_xfer_cancel_timeout_async that we may have 1638 * scheduled the task. This causes callout_invoking to return 1639 * false in usbd_xfer_cancel_timeout_async so that it can tell 1640 * which stage in the callout->task->abort process we're at. 1641 */ 1642 callout_ack(&xfer->ux_callout); 1643 1644 /* All done -- release the lock. */ 1645 mutex_exit(bus->ub_lock); 1646 } 1647 1648 /* 1649 * usbd_xfer_timeout_task(xfer) 1650 * 1651 * Called in thread context when too much time has elapsed waiting 1652 * for xfer to complete. Abort the xfer with USBD_TIMEOUT, unless 1653 * it has completed or aborted concurrently -- and if the xfer has 1654 * also been resubmitted, take care of rescheduling the callout. 1655 */ 1656 static void 1657 usbd_xfer_timeout_task(void *cookie) 1658 { 1659 struct usbd_xfer *xfer = cookie; 1660 struct usbd_bus *bus = xfer->ux_bus; 1661 1662 /* Acquire the lock so we can transition the timeout state. */ 1663 mutex_enter(bus->ub_lock); 1664 1665 /* 1666 * Use usbd_xfer_probe_timeout to check whether the timeout is 1667 * still valid, or to reschedule the callout if necessary. If 1668 * it is not valid -- the timeout has been asynchronously 1669 * cancelled, or the xfer has already been resubmitted -- then 1670 * we're done here. 1671 */ 1672 if (!usbd_xfer_probe_timeout(xfer)) 1673 goto out; 1674 1675 /* 1676 * May have completed or been aborted, but we're the only one 1677 * who can time it out. If it has completed or been aborted, 1678 * no need to timeout. 1679 */ 1680 KASSERT(xfer->ux_status != USBD_TIMEOUT); 1681 if (xfer->ux_status != USBD_IN_PROGRESS) 1682 goto out; 1683 1684 /* 1685 * We beat everyone else. Claim the status as timed out, do 1686 * the bus-specific dance to abort the hardware, and complete 1687 * the xfer. 1688 */ 1689 xfer->ux_status = USBD_TIMEOUT; 1690 bus->ub_methods->ubm_abortx(xfer); 1691 usb_transfer_complete(xfer); 1692 1693 out: /* All done -- release the lock. */ 1694 mutex_exit(bus->ub_lock); 1695 } 1696 1697 /* 1698 * usbd_xfer_probe_timeout(xfer) 1699 * 1700 * Probe the status of xfer's timeout. Acknowledge and process a 1701 * request to reschedule. Return true if the timeout is still 1702 * valid and the caller should take further action (queueing a 1703 * task or aborting the xfer), false if it must stop here. 1704 */ 1705 static bool 1706 usbd_xfer_probe_timeout(struct usbd_xfer *xfer) 1707 { 1708 struct usbd_bus *bus = xfer->ux_bus; 1709 bool valid; 1710 1711 KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); 1712 1713 /* The timeout must be set. */ 1714 KASSERT(xfer->ux_timeout_set); 1715 1716 /* 1717 * Neither callout nor task may be pending; they execute 1718 * alternately in lock step. 1719 */ 1720 KASSERT(!callout_pending(&xfer->ux_callout)); 1721 KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); 1722 1723 /* There are a few cases... */ 1724 if (bus->ub_methods->ubm_dying(bus)) { 1725 /* Host controller dying. Drop it all on the floor. */ 1726 xfer->ux_timeout_set = false; 1727 xfer->ux_timeout_reset = false; 1728 valid = false; 1729 } else if (xfer->ux_timeout_reset) { 1730 /* 1731 * The xfer completed _and_ got resubmitted while we 1732 * waited for the lock. Acknowledge the request to 1733 * reschedule, and reschedule it if there is a timeout 1734 * and the bus is not polling. 1735 */ 1736 xfer->ux_timeout_reset = false; 1737 if (xfer->ux_timeout && !bus->ub_usepolling) { 1738 KASSERT(xfer->ux_timeout_set); 1739 callout_schedule(&xfer->ux_callout, 1740 mstohz(xfer->ux_timeout)); 1741 } else { 1742 /* No more callout or task scheduled. */ 1743 xfer->ux_timeout_set = false; 1744 } 1745 valid = false; 1746 } else if (xfer->ux_status != USBD_IN_PROGRESS) { 1747 /* 1748 * The xfer has completed by hardware completion or by 1749 * software abort, and has not been resubmitted, so the 1750 * timeout must be unset, and is no longer valid for 1751 * the caller. 1752 */ 1753 xfer->ux_timeout_set = false; 1754 valid = false; 1755 } else { 1756 /* 1757 * The xfer has not yet completed, so the timeout is 1758 * valid. 1759 */ 1760 valid = true; 1761 } 1762 1763 /* Any reset must have been processed. */ 1764 KASSERT(!xfer->ux_timeout_reset); 1765 1766 /* 1767 * Either we claim the timeout is set, or the callout is idle. 1768 * If the timeout is still set, we may be handing off to the 1769 * task instead, so this is an if but not an iff. 1770 */ 1771 KASSERT(xfer->ux_timeout_set || !callout_pending(&xfer->ux_callout)); 1772 1773 /* 1774 * The task must be idle now. 1775 * 1776 * - If the caller is the callout, _and_ the timeout is still 1777 * valid, the caller will schedule it, but it hasn't been 1778 * scheduled yet. (If the timeout is not valid, the task 1779 * should not be scheduled.) 1780 * 1781 * - If the caller is the task, it cannot be scheduled again 1782 * until the callout runs again, which won't happen until we 1783 * next release the lock. 1784 */ 1785 KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); 1786 1787 KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); 1788 1789 return valid; 1790 } 1791 1792 /* 1793 * usbd_xfer_schedule_timeout(xfer) 1794 * 1795 * Ensure that xfer has a timeout. If the callout is already 1796 * queued or the task is already running, request that they 1797 * reschedule the callout. If not, and if we're not polling, 1798 * schedule the callout anew. 1799 * 1800 * To be called in thread context from struct 1801 * usbd_pipe_methods::upm_start. 1802 */ 1803 void 1804 usbd_xfer_schedule_timeout(struct usbd_xfer *xfer) 1805 { 1806 struct usbd_bus *bus = xfer->ux_bus; 1807 1808 KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); 1809 1810 if (xfer->ux_timeout_set) { 1811 /* 1812 * Callout or task has fired from a prior completed 1813 * xfer but has not yet noticed that the xfer is done. 1814 * Ask it to reschedule itself to ux_timeout. 1815 */ 1816 xfer->ux_timeout_reset = true; 1817 } else if (xfer->ux_timeout && !bus->ub_usepolling) { 1818 /* Callout is not scheduled. Schedule it. */ 1819 KASSERT(!callout_pending(&xfer->ux_callout)); 1820 callout_schedule(&xfer->ux_callout, mstohz(xfer->ux_timeout)); 1821 xfer->ux_timeout_set = true; 1822 } 1823 1824 KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); 1825 } 1826 1827 /* 1828 * usbd_xfer_cancel_timeout_async(xfer) 1829 * 1830 * Cancel the callout and the task of xfer, which have not yet run 1831 * to completion, but don't wait for the callout or task to finish 1832 * running. 1833 * 1834 * If they have already fired, at worst they are waiting for the 1835 * bus lock. They will see that the xfer is no longer in progress 1836 * and give up, or they will see that the xfer has been 1837 * resubmitted with a new timeout and reschedule the callout. 1838 * 1839 * If a resubmitted request completed so fast that the callout 1840 * didn't have time to process a timer reset, just cancel the 1841 * timer reset. 1842 */ 1843 static void 1844 usbd_xfer_cancel_timeout_async(struct usbd_xfer *xfer) 1845 { 1846 struct usbd_bus *bus __diagused = xfer->ux_bus; 1847 1848 KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); 1849 1850 /* 1851 * If the timer wasn't running anyway, forget about it. This 1852 * can happen if we are completing an isochronous transfer 1853 * which doesn't use the same timeout logic. 1854 */ 1855 if (!xfer->ux_timeout_set) 1856 return; 1857 1858 xfer->ux_timeout_reset = false; 1859 if (!callout_stop(&xfer->ux_callout)) { 1860 /* 1861 * We stopped the callout before it ran. The timeout 1862 * is no longer set. 1863 */ 1864 xfer->ux_timeout_set = false; 1865 } else if (callout_invoking(&xfer->ux_callout)) { 1866 /* 1867 * The callout has begun to run but it has not yet 1868 * acquired the lock and called callout_ack. The task 1869 * cannot be queued yet, and the callout cannot have 1870 * been rescheduled yet. 1871 * 1872 * By the time the callout acquires the lock, we will 1873 * have transitioned from USBD_IN_PROGRESS to a 1874 * completed status, and possibly also resubmitted the 1875 * xfer and set xfer->ux_timeout_reset = true. In both 1876 * cases, the callout will DTRT, so no further action 1877 * is needed here. 1878 */ 1879 } else if (usb_rem_task(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)) { 1880 /* 1881 * The callout had fired and scheduled the task, but we 1882 * stopped the task before it could run. The timeout 1883 * is therefore no longer set -- the next resubmission 1884 * of the xfer must schedule a new timeout. 1885 * 1886 * The callout should not be pending at this point: 1887 * it is scheduled only under the lock, and only when 1888 * xfer->ux_timeout_set is false, or by the callout or 1889 * task itself when xfer->ux_timeout_reset is true. 1890 */ 1891 xfer->ux_timeout_set = false; 1892 } 1893 1894 /* 1895 * The callout cannot be scheduled and the task cannot be 1896 * queued at this point. Either we cancelled them, or they are 1897 * already running and waiting for the bus lock. 1898 */ 1899 KASSERT(!callout_pending(&xfer->ux_callout)); 1900 KASSERT(!usb_task_pending(xfer->ux_pipe->up_dev, &xfer->ux_aborttask)); 1901 1902 KASSERT(bus->ub_usepolling || mutex_owned(bus->ub_lock)); 1903 } 1904