xref: /openbsd-src/sys/dev/usb/xhci.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /* $OpenBSD: xhci.c,v 1.16 2014/07/11 16:38:58 pirofti Exp $ */
2 
3 /*
4  * Copyright (c) 2014 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/queue.h>
25 #include <sys/timeout.h>
26 #include <sys/pool.h>
27 
28 #include <machine/bus.h>
29 #include <machine/endian.h>
30 
31 #include <dev/usb/usb.h>
32 #include <dev/usb/usbdi.h>
33 #include <dev/usb/usbdivar.h>
34 #include <dev/usb/usb_mem.h>
35 
36 #include <dev/usb/xhcireg.h>
37 #include <dev/usb/xhcivar.h>
38 
39 struct cfdriver xhci_cd = {
40 	NULL, "xhci", DV_DULL
41 };
42 
43 #ifdef XHCI_DEBUG
44 #define DPRINTF(x)	do { if (xhcidebug) printf x; } while(0)
45 #define DPRINTFN(n,x)	do { if (xhcidebug>(n)) printf x; } while (0)
46 int xhcidebug = 3;
47 #else
48 #define DPRINTF(x)
49 #define DPRINTFN(n,x)
50 #endif
51 
52 #define DEVNAME(sc)		((sc)->sc_bus.bdev.dv_xname)
53 
54 #define TRBOFF(ring, trb)	((void *)(trb) - (void *)((ring).trbs))
55 #define TRBADDR(ring, trb)	DMAADDR(&(ring).dma, TRBOFF(ring, trb))
56 
57 struct pool *xhcixfer;
58 
59 struct xhci_pipe {
60 	struct usbd_pipe	pipe;
61 
62 	uint8_t			dci;
63 	uint8_t			slot;	/* Device slot ID */
64 	struct xhci_ring	ring;
65 
66 	/*
67 	 * XXX used to pass the xfer pointer back to the
68 	 * interrupt routine, better way?
69 	 */
70 	struct usbd_xfer	*pending_xfers[XHCI_MAX_TRANSFERS];
71 	int			 halted;
72 	size_t			 free_trbs;
73 };
74 
75 int	xhci_reset(struct xhci_softc *);
76 void	xhci_config(struct xhci_softc *);
77 int	xhci_intr1(struct xhci_softc *);
78 void	xhci_waitintr(struct xhci_softc *, struct usbd_xfer *);
79 void	xhci_event_dequeue(struct xhci_softc *);
80 void	xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t);
81 void	xhci_event_command(struct xhci_softc *, uint64_t);
82 void	xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t);
83 int	xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *, uint32_t);
84 int	xhci_device_setup(struct xhci_softc *, struct usbd_device *, uint8_t);
85 int	xhci_scratchpad_alloc(struct xhci_softc *, int);
86 void	xhci_scratchpad_free(struct xhci_softc *);
87 int	xhci_softdev_alloc(struct xhci_softc *, uint8_t);
88 void	xhci_softdev_free(struct xhci_softc *, uint8_t);
89 int	xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t);
90 void	xhci_ring_free(struct xhci_softc *, struct xhci_ring *);
91 void	xhci_ring_reset(struct xhci_softc *, struct xhci_ring *);
92 struct	xhci_trb *xhci_ring_dequeue(struct xhci_softc *, struct xhci_ring *,
93 	    int);
94 
95 struct	xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*,
96 	    uint8_t *, int);
97 void	xhci_xfer_done(struct usbd_xfer *xfer);
98 /* xHCI command helpers. */
99 int	xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int);
100 int	xhci_command_abort(struct xhci_softc *);
101 
102 void	xhci_cmd_reset_endpoint_async(struct xhci_softc *, uint8_t, uint8_t);
103 void	xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t);
104 int	xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t);
105 int	xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t);
106 int	xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int);
107 int	xhci_cmd_address_device(struct xhci_softc *,uint8_t,  uint64_t);
108 int	xhci_cmd_evaluate_ctx(struct xhci_softc *, uint8_t, uint64_t);
109 #ifdef XHCI_DEBUG
110 int	xhci_cmd_noop(struct xhci_softc *);
111 #endif
112 
113 /* XXX should be part of the Bus interface. */
114 void	xhci_abort_xfer(struct usbd_xfer *, usbd_status);
115 void	xhci_pipe_close(struct usbd_pipe *);
116 void	xhci_noop(struct usbd_xfer *);
117 
118 /* XXX these are common to all HC drivers and should be merged. */
119 void 	xhci_timeout(void *);
120 void 	xhci_timeout_task(void *);
121 
122 /* USBD Bus Interface. */
123 usbd_status	  xhci_pipe_open(struct usbd_pipe *);
124 void		  xhci_softintr(void *);
125 void		  xhci_poll(struct usbd_bus *);
126 struct usbd_xfer *xhci_allocx(struct usbd_bus *);
127 void		  xhci_freex(struct usbd_bus *, struct usbd_xfer *);
128 
129 usbd_status	  xhci_root_ctrl_transfer(struct usbd_xfer *);
130 usbd_status	  xhci_root_ctrl_start(struct usbd_xfer *);
131 
132 usbd_status	  xhci_root_intr_transfer(struct usbd_xfer *);
133 usbd_status	  xhci_root_intr_start(struct usbd_xfer *);
134 void		  xhci_root_intr_abort(struct usbd_xfer *);
135 void		  xhci_root_intr_done(struct usbd_xfer *);
136 
137 usbd_status	  xhci_device_ctrl_transfer(struct usbd_xfer *);
138 usbd_status	  xhci_device_ctrl_start(struct usbd_xfer *);
139 void		  xhci_device_ctrl_abort(struct usbd_xfer *);
140 
141 usbd_status	  xhci_device_generic_transfer(struct usbd_xfer *);
142 usbd_status	  xhci_device_generic_start(struct usbd_xfer *);
143 void		  xhci_device_generic_abort(struct usbd_xfer *);
144 void		  xhci_device_generic_done(struct usbd_xfer *);
145 
146 #define XHCI_INTR_ENDPT 1
147 
148 struct usbd_bus_methods xhci_bus_methods = {
149 	.open_pipe = xhci_pipe_open,
150 	.soft_intr = xhci_softintr,
151 	.do_poll = xhci_poll,
152 	.allocx = xhci_allocx,
153 	.freex = xhci_freex,
154 };
155 
156 struct usbd_pipe_methods xhci_root_ctrl_methods = {
157 	.transfer = xhci_root_ctrl_transfer,
158 	.start = xhci_root_ctrl_start,
159 	.abort = xhci_noop,
160 	.close = xhci_pipe_close,
161 	.done = xhci_noop,
162 };
163 
164 struct usbd_pipe_methods xhci_root_intr_methods = {
165 	.transfer = xhci_root_intr_transfer,
166 	.start = xhci_root_intr_start,
167 	.abort = xhci_root_intr_abort,
168 	.close = xhci_pipe_close,
169 	.done = xhci_root_intr_done,
170 };
171 
172 struct usbd_pipe_methods xhci_device_ctrl_methods = {
173 	.transfer = xhci_device_ctrl_transfer,
174 	.start = xhci_device_ctrl_start,
175 	.abort = xhci_device_ctrl_abort,
176 	.close = xhci_pipe_close,
177 	.done = xhci_noop,
178 };
179 
180 #if notyet
181 struct usbd_pipe_methods xhci_device_isoc_methods = {
182 };
183 #endif
184 
185 struct usbd_pipe_methods xhci_device_bulk_methods = {
186 	.transfer = xhci_device_generic_transfer,
187 	.start = xhci_device_generic_start,
188 	.abort = xhci_device_generic_abort,
189 	.close = xhci_pipe_close,
190 	.done = xhci_device_generic_done,
191 };
192 
193 struct usbd_pipe_methods xhci_device_generic_methods = {
194 	.transfer = xhci_device_generic_transfer,
195 	.start = xhci_device_generic_start,
196 	.abort = xhci_device_generic_abort,
197 	.close = xhci_pipe_close,
198 	.done = xhci_device_generic_done,
199 };
200 
201 #ifdef XHCI_DEBUG
202 static void
203 xhci_dump_trb(struct xhci_trb *trb)
204 {
205 	printf("trb=%p (0x%016llx 0x%08x 0x%08x)\n", trb,
206 	   (long long)trb->trb_paddr, trb->trb_status, trb->trb_flags);
207 }
208 #endif
209 
210 int
211 xhci_init(struct xhci_softc *sc)
212 {
213 	uint32_t hcr;
214 	int npage, error;
215 
216 #ifdef XHCI_DEBUG
217 	uint16_t vers;
218 
219 	vers = XREAD2(sc, XHCI_HCIVERSION);
220 	printf("%s: xHCI version %x.%x\n", DEVNAME(sc), vers >> 8, vers & 0xff);
221 #endif
222 	sc->sc_bus.usbrev = USBREV_3_0;
223 	sc->sc_bus.methods = &xhci_bus_methods;
224 	sc->sc_bus.pipe_size = sizeof(struct xhci_pipe);
225 
226 	sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH);
227 	sc->sc_door_off = XREAD4(sc, XHCI_DBOFF);
228 	sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF);
229 
230 #ifdef XHCI_DEBUG
231 	printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc), sc->sc_oper_off);
232 	printf("%s: DOORBELL=%#lx\n", DEVNAME(sc), sc->sc_door_off);
233 	printf("%s: RUNTIME=%#lx\n", DEVNAME(sc), sc->sc_runt_off);
234 #endif
235 
236 	error = xhci_reset(sc);
237 	if (error)
238 		return (error);
239 
240 	if (xhcixfer == NULL) {
241 		xhcixfer = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
242 		if (xhcixfer == NULL) {
243 			printf("%s: unable to allocate pool descriptor\n",
244 			    DEVNAME(sc));
245 			return (ENOMEM);
246 		}
247 		pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, 0, 0,
248 		    "xhcixfer", NULL);
249 	}
250 
251 	hcr = XREAD4(sc, XHCI_HCCPARAMS);
252 	sc->sc_ctxsize = XHCI_HCC_CSZ(hcr) ? 64 : 32;
253 	DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize));
254 
255 #ifdef XHCI_DEBUG
256 	hcr = XOREAD4(sc, XHCI_PAGESIZE);
257 	printf("%s: supported page size 0x%08x\n", DEVNAME(sc), hcr);
258 #endif
259 	/* Use 4K for the moment since it's easier. */
260 	sc->sc_pagesize = 4096;
261 
262 	/* Get port and device slot numbers. */
263 	hcr = XREAD4(sc, XHCI_HCSPARAMS1);
264 	sc->sc_noport = XHCI_HCS1_N_PORTS(hcr);
265 	sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr);
266 	DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport,
267 	    sc->sc_noslot));
268 
269 	/*
270 	 * Section 6.1 - Device Context Base Address Array
271 	 * shall be aligned to a 64 byte boundary.
272 	 */
273 	sc->sc_dcbaa.size = (sc->sc_noslot + 1) * sizeof(uint64_t);
274 	error = usb_allocmem(&sc->sc_bus, sc->sc_dcbaa.size, 64,
275 	    &sc->sc_dcbaa.dma);
276 	if (error)
277 		return (ENOMEM);
278 	sc->sc_dcbaa.segs = KERNADDR(&sc->sc_dcbaa.dma, 0);
279 	memset(sc->sc_dcbaa.segs, 0, sc->sc_dcbaa.size);
280 	usb_syncmem(&sc->sc_dcbaa.dma, 0, sc->sc_dcbaa.size,
281 	    BUS_DMASYNC_PREWRITE);
282 
283 	/* Setup command ring. */
284 	error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_COMMANDS);
285 	if (error) {
286 		printf("%s: could not allocate command ring.\n", DEVNAME(sc));
287 		usb_freemem(&sc->sc_bus, &sc->sc_dcbaa.dma);
288 		return (error);
289 	}
290 
291 	/* Setup one event ring and its segment table (ERST). */
292 	error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVENTS);
293 	if (error) {
294 		printf("%s: could not allocate event ring.\n", DEVNAME(sc));
295 		xhci_ring_free(sc, &sc->sc_cmd_ring);
296 		usb_freemem(&sc->sc_bus, &sc->sc_dcbaa.dma);
297 		return (error);
298 	}
299 
300 	/* Allocate the required entry for the segment table. */
301 	sc->sc_erst.size = 1 * sizeof(struct xhci_erseg);
302 	error = usb_allocmem(&sc->sc_bus, sc->sc_erst.size, 64,
303 	    &sc->sc_erst.dma);
304 	if (error) {
305 		printf("%s: could not allocate segment table.\n", DEVNAME(sc));
306 		xhci_ring_free(sc, &sc->sc_evt_ring);
307 		xhci_ring_free(sc, &sc->sc_cmd_ring);
308 		usb_freemem(&sc->sc_bus, &sc->sc_dcbaa.dma);
309 		return (ENOMEM);
310 	}
311 	sc->sc_erst.segs = KERNADDR(&sc->sc_erst.dma, 0);
312 
313 	/* Set our ring address and size in its corresponding segment. */
314 	sc->sc_erst.segs[0].er_addr = htole64(DMAADDR(&sc->sc_evt_ring.dma, 0));
315 	sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVENTS);
316 	sc->sc_erst.segs[0].er_rsvd = 0;
317 	usb_syncmem(&sc->sc_erst.dma, 0, sc->sc_erst.size,
318 	   BUS_DMASYNC_PREWRITE);
319 
320 	/* Get the number of scratch pages and configure them if necessary. */
321 	hcr = XREAD4(sc, XHCI_HCSPARAMS2);
322 	npage = XHCI_HCS2_SPB_MAX(hcr);
323 	DPRINTF(("%s: %d scratch pages\n", DEVNAME(sc), npage));
324 
325 	if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) {
326 		printf("%s: could not allocate scratchpad.\n", DEVNAME(sc));
327 		usb_freemem(&sc->sc_bus, &sc->sc_erst.dma);
328 		xhci_ring_free(sc, &sc->sc_evt_ring);
329 		xhci_ring_free(sc, &sc->sc_cmd_ring);
330 		usb_freemem(&sc->sc_bus, &sc->sc_dcbaa.dma);
331 		return (ENOMEM);
332 	}
333 
334 
335 	xhci_config(sc);
336 
337 	return (0);
338 }
339 
340 void
341 xhci_config(struct xhci_softc *sc)
342 {
343 	uint64_t paddr;
344 	uint32_t hcr;
345 
346 	/* Make sure to program a number of device slots we can handle. */
347 	if (sc->sc_noslot > USB_MAX_DEVICES)
348 		sc->sc_noslot = USB_MAX_DEVICES;
349 	hcr = XOREAD4(sc, XHCI_CONFIG) & ~XHCI_CONFIG_SLOTS_MASK;
350 	XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot);
351 
352 	/* Set the device context base array address. */
353 	paddr = (uint64_t)DMAADDR(&sc->sc_dcbaa.dma, 0);
354 	XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr);
355 	XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32));
356 
357 	DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc),
358 	    XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO)));
359 
360 	/* Set the command ring address. */
361 	paddr = (uint64_t)DMAADDR(&sc->sc_cmd_ring.dma, 0);
362 	XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS);
363 	XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32));
364 
365 	DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc),
366 	    XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr));
367 
368 	/* Set the ERST count number to 1, since we use only one event ring. */
369 	XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1));
370 
371 	/* Set the segment table address. */
372 	paddr = (uint64_t)DMAADDR(&sc->sc_erst.dma, 0);
373 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr);
374 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32));
375 
376 	DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc),
377 	    XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0))));
378 
379 	/* Set the ring dequeue address. */
380 	paddr = (uint64_t)DMAADDR(&sc->sc_evt_ring.dma, 0);
381 	XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr);
382 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
383 
384 	DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc),
385 	    XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0))));
386 
387 	/* Enable interrupts. */
388 	hcr = XRREAD4(sc, XHCI_IMAN(0));
389 	XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA);
390 
391 	/* Set default interrupt moderation. */
392 	XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT);
393 
394 	/* Allow event interrupt and start the controller. */
395 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
396 
397 	DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD)));
398 	DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0))));
399 }
400 
401 int
402 xhci_detach(struct device *self, int flags)
403 {
404 	struct xhci_softc *sc = (struct xhci_softc *)self;
405 	int rv;
406 
407 	rv = config_detach_children(self, flags);
408 	if (rv != 0) {
409 		printf("%s: error while detaching %d\n", DEVNAME(sc), rv);
410 		return (rv);
411 	}
412 
413 	/* Since the hardware might already be gone, ignore the errors. */
414 	xhci_command_abort(sc);
415 
416 	xhci_reset(sc);
417 
418 	/* Disable interrupts. */
419 	XRWRITE4(sc, XHCI_IMOD(0), 0);
420 	XRWRITE4(sc, XHCI_IMAN(0), 0);
421 
422 	/* Clear the event ring address. */
423 	XRWRITE4(sc, XHCI_ERDP_LO(0), 0);
424 	XRWRITE4(sc, XHCI_ERDP_HI(0), 0);
425 
426 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0);
427 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0);
428 
429 	XRWRITE4(sc, XHCI_ERSTSZ(0), 0);
430 
431 	/* Clear the command ring address. */
432 	XOWRITE4(sc, XHCI_CRCR_LO, 0);
433 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
434 
435 	XOWRITE4(sc, XHCI_DCBAAP_LO, 0);
436 	XOWRITE4(sc, XHCI_DCBAAP_HI, 0);
437 
438 	if (sc->sc_spad.npage > 0)
439 		xhci_scratchpad_free(sc);
440 
441 	usb_freemem(&sc->sc_bus, &sc->sc_erst.dma);
442 	xhci_ring_free(sc, &sc->sc_evt_ring);
443 	xhci_ring_free(sc, &sc->sc_cmd_ring);
444 	usb_freemem(&sc->sc_bus, &sc->sc_dcbaa.dma);
445 
446 	return (0);
447 }
448 
449 int
450 xhci_activate(struct device *self, int act)
451 {
452 	struct xhci_softc *sc = (struct xhci_softc *)self;
453 	int rv = 0;
454 
455 	switch (act) {
456 	case DVACT_RESUME:
457 		sc->sc_bus.use_polling++;
458 
459 		xhci_reset(sc);
460 		xhci_ring_reset(sc, &sc->sc_cmd_ring);
461 		xhci_ring_reset(sc, &sc->sc_evt_ring);
462 		xhci_config(sc);
463 
464 		sc->sc_bus.use_polling--;
465 		rv = config_activate_children(self, act);
466 		break;
467 	case DVACT_POWERDOWN:
468 		rv = config_activate_children(self, act);
469 		xhci_reset(sc);
470 		break;
471 	default:
472 		rv = config_activate_children(self, act);
473 		break;
474 	}
475 
476 	return (rv);
477 }
478 
479 int
480 xhci_reset(struct xhci_softc *sc)
481 {
482 	uint32_t hcr;
483 	int i;
484 
485 	XOWRITE4(sc, XHCI_USBCMD, 0);	/* Halt controller */
486 	for (i = 0; i < 100; i++) {
487 		usb_delay_ms(&sc->sc_bus, 1);
488 		hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH;
489 		if (hcr)
490 			break;
491 	}
492 
493 	if (!hcr)
494 		printf("%s: halt timeout\n", DEVNAME(sc));
495 
496 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST);
497 	for (i = 0; i < 100; i++) {
498 		usb_delay_ms(&sc->sc_bus, 1);
499 		hcr = XOREAD4(sc, XHCI_USBCMD) & XHCI_STS_CNR;
500 		if (!hcr)
501 			break;
502 	}
503 
504 	if (hcr) {
505 		printf("%s: reset timeout\n", DEVNAME(sc));
506 		return (EIO);
507 	}
508 
509 	return (0);
510 }
511 
512 
513 int
514 xhci_intr(void *v)
515 {
516 	struct xhci_softc *sc = v;
517 
518 	if (sc == NULL || sc->sc_bus.dying)
519 		return (0);
520 
521 	/* If we get an interrupt while polling, then just ignore it. */
522 	if (sc->sc_bus.use_polling) {
523 		DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n"));
524 		return (0);
525 	}
526 
527 	return (xhci_intr1(sc));
528 }
529 
530 int
531 xhci_intr1(struct xhci_softc *sc)
532 {
533 	uint32_t intrs;
534 
535 	intrs = XOREAD4(sc, XHCI_USBSTS);
536 	if (intrs == 0xffffffff) {
537 		sc->sc_bus.dying = 1;
538 		return (0);
539 	}
540 
541 	if ((intrs & XHCI_STS_EINT) == 0)
542 		return (0);
543 
544 	sc->sc_bus.intr_context++;
545 	sc->sc_bus.no_intrs++;
546 
547 	if (intrs & XHCI_STS_HSE) {
548 		printf("%s: host system error\n", DEVNAME(sc));
549 		sc->sc_bus.dying = 1;
550 		sc->sc_bus.intr_context--;
551 		return (1);
552 	}
553 
554 	XOWRITE4(sc, XHCI_USBSTS, intrs); /* Acknowledge */
555 	usb_schedsoftintr(&sc->sc_bus);
556 
557 	/* Acknowledge PCI interrupt */
558 	intrs = XRREAD4(sc, XHCI_IMAN(0));
559 	XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND);
560 
561 	sc->sc_bus.intr_context--;
562 
563 	return (1);
564 }
565 
566 void
567 xhci_poll(struct usbd_bus *bus)
568 {
569 	struct xhci_softc *sc = (struct xhci_softc *)bus;
570 
571 	if (XOREAD4(sc, XHCI_USBSTS))
572 		xhci_intr1(sc);
573 }
574 
575 void
576 xhci_waitintr(struct xhci_softc *sc, struct usbd_xfer *xfer)
577 {
578 	DPRINTF(("%s: stub\n", __func__));
579 }
580 
581 void
582 xhci_softintr(void *v)
583 {
584 	struct xhci_softc *sc = v;
585 
586 	if (sc->sc_bus.dying)
587 		return;
588 
589 	sc->sc_bus.intr_context++;
590 	xhci_event_dequeue(sc);
591 	sc->sc_bus.intr_context--;
592 }
593 
594 void
595 xhci_event_dequeue(struct xhci_softc *sc)
596 {
597 	struct xhci_trb *trb;
598 	uint64_t paddr;
599 	uint32_t status, flags;
600 
601 	while ((trb = xhci_ring_dequeue(sc, &sc->sc_evt_ring, 1)) != NULL) {
602 		paddr = letoh64(trb->trb_paddr);
603 		status = letoh32(trb->trb_status);
604 		flags = letoh32(trb->trb_flags);
605 
606 		switch (flags & XHCI_TRB_TYPE_MASK) {
607 		case XHCI_EVT_XFER:
608 			xhci_event_xfer(sc, paddr, status, flags);
609 			break;
610 		case XHCI_EVT_CMD_COMPLETE:
611 			memcpy(&sc->sc_result_trb, trb, sizeof(*trb));
612 			xhci_event_command(sc, paddr);
613 			break;
614 		case XHCI_EVT_PORT_CHANGE:
615 			xhci_event_port_change(sc, paddr, status);
616 			break;
617 		default:
618 #ifdef XHCI_DEBUG
619 			printf("event (%d): ", XHCI_TRB_TYPE(flags));
620 			xhci_dump_trb(trb);
621 #endif
622 			break;
623 		}
624 
625 	}
626 
627 	paddr = (uint64_t)DMAADDR(&sc->sc_evt_ring.dma,
628 	    sizeof(struct xhci_trb) * sc->sc_evt_ring.index);
629 	XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY);
630 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
631 }
632 
633 void
634 xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status,
635     uint32_t flags)
636 {
637 	struct xhci_pipe *xp;
638 	struct usbd_xfer *xfer;
639 	uint8_t dci, slot, code, remain;
640 	int trb_idx;
641 
642 	slot = XHCI_TRB_GET_SLOT(flags);
643 	dci = XHCI_TRB_GET_EP(flags);
644 	if (slot > sc->sc_noslot)
645 		return; /* XXX */
646 
647 	xp = sc->sc_sdevs[slot].pipes[dci - 1];
648 
649 	code = XHCI_TRB_GET_CODE(status);
650 	remain = XHCI_TRB_REMAIN(status);
651 
652 	trb_idx = (paddr - DMAADDR(&xp->ring.dma, 0)) / sizeof(struct xhci_trb);
653 	if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) {
654 		printf("%s: wrong trb index (%d) max is %zu\n", DEVNAME(sc),
655 		    trb_idx, xp->ring.ntrb - 1);
656 		return;
657 	}
658 
659 	xfer = xp->pending_xfers[trb_idx];
660 	if (xfer == NULL) {
661 #if 1
662 		DPRINTF(("%s: dev %d dci=%d paddr=0x%016llx idx=%d remain=%u"
663 		    " code=%u\n", DEVNAME(sc), slot, dci, (long long)paddr,
664 		    trb_idx, remain, code));
665 #endif
666 		printf("%s: NULL xfer pointer\n", DEVNAME(sc));
667 		return;
668 	}
669 
670 	switch (code) {
671 	case XHCI_CODE_SUCCESS:
672 	case XHCI_CODE_SHORT_XFER:
673 		xfer->actlen = xfer->length - remain;
674 		xfer->status = USBD_NORMAL_COMPLETION;
675 		break;
676 	case XHCI_CODE_STALL:
677 	case XHCI_CODE_BABBLE:
678 		/*
679 		 * Since the stack might try to start a new transfer as
680 		 * soon as a pending one finishes, make sure the endpoint
681 		 * is fully reset before calling usb_transfer_complete().
682 		 */
683 		xp->halted = 1;
684 		xhci_cmd_reset_endpoint_async(sc, slot, dci);
685 		return;
686 	default:
687 #if 1
688 		DPRINTF(("%s: dev %d dci=%d paddr=0x%016llx idx=%d remain=%u"
689 		    " code=%u\n", DEVNAME(sc), slot, dci, (long long)paddr,
690 		    trb_idx, remain, code));
691 #endif
692 		DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code));
693 		xfer->status = USBD_IOERROR;
694 		xp->halted = 1;
695 		break;
696 	}
697 
698 	xhci_xfer_done(xfer);
699 	usb_transfer_complete(xfer);
700 }
701 
702 void
703 xhci_event_command(struct xhci_softc *sc, uint64_t paddr)
704 {
705 	struct usbd_xfer *xfer;
706 	struct xhci_pipe *xp;
707 	uint32_t flags;
708 	uint8_t dci, slot;
709 	int i;
710 
711 	KASSERT(paddr == TRBADDR(sc->sc_cmd_ring, sc->sc_cmd_trb));
712 
713 	flags = letoh32(sc->sc_cmd_trb->trb_flags);
714 
715 	slot = XHCI_TRB_GET_SLOT(flags);
716 	dci = XHCI_TRB_GET_EP(flags);
717 	xp = sc->sc_sdevs[slot].pipes[dci - 1];
718 
719 	sc->sc_cmd_trb = NULL;
720 
721 	switch (flags & XHCI_TRB_TYPE_MASK) {
722 	case XHCI_CMD_RESET_EP:
723 		/*
724 		 * Clear the TRBs and reconfigure the dequeue pointer
725 		 * before declaring the endpoint ready.
726 		 */
727 		xhci_ring_reset(sc, &xp->ring);
728 		xp->free_trbs = xp->ring.ntrb;
729 		xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
730 		    DMAADDR(&xp->ring.dma, 0) | XHCI_EPCTX_DCS);
731 		break;
732 	case XHCI_CMD_SET_TR_DEQ:
733 		/*
734 		 * Now that the endpoint is in its initial state, we
735 		 * can finish all its pending transfers and let the
736 		 * stack play with it again.
737 		 */
738 		xp->halted = 0;
739 		for (i = 0; i < XHCI_MAX_TRANSFERS; i++) {
740 			xfer = xp->pending_xfers[i];
741 			if (xfer != NULL && xfer->done == 0) {
742 				xfer->status = USBD_IOERROR;
743 				usb_transfer_complete(xfer);
744 			}
745 			xp->pending_xfers[i] = NULL;
746 		}
747 		break;
748 	default:
749 		/* All other commands are synchronous. */
750 		wakeup(&sc->sc_cmd_trb);
751 		break;
752 	}
753 }
754 
755 void
756 xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status)
757 {
758 	struct usbd_xfer *xfer = sc->sc_intrxfer;
759 	uint32_t port = XHCI_TRB_PORTID(paddr);
760 	uint8_t *p;
761 
762 	if (XHCI_TRB_GET_CODE(status) != XHCI_CODE_SUCCESS) {
763 		DPRINTF(("failed port status event\n"));/* XXX can it happen? */
764 		return;
765 	}
766 
767 	if (xfer == NULL)
768 		return;
769 
770 	p = KERNADDR(&xfer->dmabuf, 0);
771 	memset(p, 0, xfer->length);
772 
773 	p[port/8] |= 1 << (port%8);
774 	DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p));
775 
776 	xfer->actlen = xfer->length;
777 	xfer->status = USBD_NORMAL_COMPLETION;
778 
779 	usb_transfer_complete(xfer);
780 }
781 
782 void
783 xhci_xfer_done(struct usbd_xfer *xfer)
784 {
785 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
786 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
787 	int ntrb, i;
788 
789 #ifdef XHCI_DEBUG
790 	if (xp->pending_xfers[xx->index] == NULL) {
791 		printf("%s: xfer=%p already done (index=%d)\n", __func__,
792 		    xfer, xx->index);
793 		return;
794 	}
795 #endif
796 
797 	for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) {
798 		xp->pending_xfers[i] = NULL;
799 		if (i == 0)
800 			i = (xp->ring.ntrb - 1);
801 	}
802 	xp->free_trbs += xx->ntrb;
803 	xx->index = -1;
804 	xx->ntrb = 0;
805 }
806 
807 static inline uint8_t
808 xhci_ed2dci(usb_endpoint_descriptor_t *ed)
809 {
810 	uint8_t dir;
811 
812 	if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL)
813 		return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + 1);
814 
815 	if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)
816 		dir = 1;
817 	else
818 		dir = 0;
819 
820 	return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + dir);
821 }
822 
823 usbd_status
824 xhci_pipe_open(struct usbd_pipe *pipe)
825 {
826 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
827 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
828 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
829 	uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
830 	struct usbd_device *hub;
831 	uint32_t rhport = 0;
832 	int error;
833 
834 	KASSERT(xp->slot == 0);
835 
836 #ifdef XHCI_DEBUG
837 	struct usbd_device *dev = pipe->device;
838 	printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d\n", __func__,
839 	    pipe, dev->address, dev->depth, dev->powersrc->portno, dev->speed);
840 #endif
841 
842 	if (sc->sc_bus.dying)
843 		return (USBD_IOERROR);
844 
845 	/* Root Hub */
846 	if (pipe->device->depth == 0) {
847 		switch (ed->bEndpointAddress) {
848 		case USB_CONTROL_ENDPOINT:
849 			pipe->methods = &xhci_root_ctrl_methods;
850 			break;
851 		case UE_DIR_IN | XHCI_INTR_ENDPT:
852 			pipe->methods = &xhci_root_intr_methods;
853 			break;
854 		default:
855 			pipe->methods = NULL;
856 			DPRINTF(("%s: bad bEndpointAddress 0x%02x\n", __func__,
857 			    ed->bEndpointAddress));
858 			return (USBD_INVAL);
859 		}
860 		return (USBD_NORMAL_COMPLETION);
861 	}
862 
863 #if 0
864 	/* Issue a noop to check if the command ring is correctly configured. */
865 	xhci_cmd_noop(sc);
866 #endif
867 
868 	switch (xfertype) {
869 	case UE_CONTROL:
870 		pipe->methods = &xhci_device_ctrl_methods;
871 
872 		/* Get a slot and init the device's contexts. */
873 		error = xhci_cmd_slot_control(sc, &slot, 1);
874 		if (error || slot == 0 || slot > sc->sc_noslot)
875 			return (USBD_INVAL);
876 
877 		if (xhci_softdev_alloc(sc, slot))
878 			return (USBD_NOMEM);
879 
880 		/* Get root hub port */
881 		for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub)
882 			;
883 		rhport = hub->powersrc->portno;
884 		break;
885 	case UE_ISOCHRONOUS:
886 #if notyet
887 		pipe->methods = &xhci_device_isoc_methods;
888 		break;
889 #else
890 		DPRINTF(("%s: isochronous xfer not supported \n", __func__));
891 		return (USBD_INVAL);
892 #endif
893 	case UE_BULK:
894 		pipe->methods = &xhci_device_bulk_methods;
895 		break;
896 	case UE_INTERRUPT:
897 		pipe->methods = &xhci_device_generic_methods;
898 		break;
899 	default:
900 		DPRINTF(("%s: bad xfer type %d\n", __func__, xfertype));
901 		return (USBD_INVAL);
902 	}
903 
904 	/* XXX Section nb? */
905 	xp->dci = xhci_ed2dci(ed);
906 
907 	if (slot != 0)
908 		xp->slot = slot;
909 	else
910 		xp->slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot;
911 
912 	if (xhci_pipe_init(sc, pipe, rhport))
913 		return (USBD_IOERROR);
914 
915 	return (USBD_NORMAL_COMPLETION);
916 }
917 
918 static inline uint32_t
919 xhci_endpoint_txinfo(struct xhci_softc *sc, usb_endpoint_descriptor_t *ed)
920 {
921 	switch (ed->bmAttributes & UE_XFERTYPE) {
922 	case UE_CONTROL:
923 		return (XHCI_EPCTX_AVG_TRB_LEN(8));
924 	case UE_BULK:
925 		return (0);
926 	case UE_INTERRUPT:
927 	case UE_ISOCHRONOUS:
928 	default:
929 		break;
930 	}
931 
932 	DPRINTF(("%s: partial stub\n", __func__));
933 
934 	return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(0) | XHCI_EPCTX_AVG_TRB_LEN(0));
935 }
936 
937 int
938 xhci_device_setup(struct xhci_softc *sc, struct usbd_device *dev, uint8_t slot)
939 {
940 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
941 	struct xhci_sctx *sctx;
942 	uint8_t addr;
943 	int error;
944 
945 	/*
946 	 * Issue only one Set address to set up the slot context and
947 	 * assign an address.
948 	 */
949 	error = xhci_cmd_address_device(sc, slot, DMAADDR(&sdev->ictx_dma, 0));
950 	if (error)
951 		return (error);
952 
953 	usb_syncmem(&sdev->octx_dma, 0, sc->sc_pagesize,
954 	    BUS_DMASYNC_POSTREAD);
955 
956 	/* Get output slot context. */
957 	sctx = KERNADDR(&sdev->octx_dma, 0);
958 	addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state));
959 	if (addr == 0)
960 		return (EINVAL);
961 
962 	DPRINTF(("%s: dev %d internal addr %d\n", DEVNAME(sc), slot, addr));
963 
964 	return (0);
965 }
966 
967 
968 
969 int
970 xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe, uint32_t port)
971 {
972 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
973 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
974 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
975 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
976 	uint8_t ival, speed, cerr = 0;
977 	uint32_t mps;
978 	int error;
979 
980 	DPRINTF(("%s: dev %d dci %u (epAddr=0x%x)\n", DEVNAME(sc), xp->slot,
981 	    xp->dci, pipe->endpoint->edesc->bEndpointAddress));
982 
983 	if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_TRANSFERS))
984 		return (ENOMEM);
985 
986 	xp->free_trbs = xp->ring.ntrb;
987 	xp->halted = 0;
988 
989 	sdev->pipes[xp->dci - 1] = xp;
990 
991 	switch (pipe->device->speed) {
992 	case USB_SPEED_LOW:
993 		ival= 3;
994 		speed = XHCI_SPEED_LOW;
995 		mps = USB_MAX_IPACKET;
996 		break;
997 	case USB_SPEED_FULL:
998 		ival = 3;
999 		speed = XHCI_SPEED_FULL;
1000 		mps = 64;
1001 		break;
1002 	case USB_SPEED_HIGH:
1003 		ival = min(3, ed->bInterval);
1004 		speed = XHCI_SPEED_HIGH;
1005 		mps = 64;
1006 		break;
1007 	case USB_SPEED_SUPER:
1008 		ival = min(3, ed->bInterval);
1009 		speed = XHCI_SPEED_SUPER;
1010 		mps = 512;
1011 		break;
1012 	default:
1013 		return (EINVAL);
1014 	}
1015 
1016 	/* XXX Until we fix wMaxPacketSize for ctrl ep depending on the speed */
1017 	mps = max(mps, UGETW(ed->wMaxPacketSize));
1018 
1019 	if (pipe->interval != USBD_DEFAULT_INTERVAL)
1020 		ival = min(ival, pipe->interval);
1021 
1022 	DPRINTF(("%s: speed %d mps %d rhport %d\n", DEVNAME(sc), speed, mps,
1023 	    port));
1024 
1025 	/* Setup the endpoint context */
1026 	if (xfertype != UE_ISOCHRONOUS)
1027 		cerr = 3;
1028 
1029 	if (xfertype == UE_CONTROL || xfertype == UE_BULK)
1030 		ival = 0;
1031 
1032 	if ((ed->bEndpointAddress & UE_DIR_IN) || (xfertype == UE_CONTROL))
1033 		xfertype |= 0x4;
1034 
1035 	sdev->ep_ctx[xp->dci-1]->info_lo = htole32(XHCI_EPCTX_SET_IVAL(ival));
1036 	sdev->ep_ctx[xp->dci-1]->info_hi = htole32(
1037 	    XHCI_EPCTX_SET_MPS(mps) | XHCI_EPCTX_SET_EPTYPE(xfertype) |
1038 	    XHCI_EPCTX_SET_CERR(cerr) | XHCI_EPCTX_SET_MAXB(0)
1039 	);
1040 	sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_endpoint_txinfo(sc, ed));
1041 	sdev->ep_ctx[xp->dci-1]->deqp = htole64(
1042 	    DMAADDR(&xp->ring.dma, 0) | XHCI_EPCTX_DCS
1043 	);
1044 
1045 	/* Unmask the new endoint */
1046 	sdev->input_ctx->drop_flags = 0;
1047 	sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1048 
1049 	/* Setup the slot context */
1050 	sdev->slot_ctx->info_lo = htole32(XHCI_SCTX_SET_DCI(xp->dci));
1051 	sdev->slot_ctx->info_hi = 0;
1052 	sdev->slot_ctx->tt = 0;
1053 	sdev->slot_ctx->state = 0;
1054 
1055 	if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) {
1056 		sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_SET_SPEED(speed));
1057 		sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_SET_RHPORT(port));
1058 	}
1059 
1060 	usb_syncmem(&sdev->ictx_dma, 0, sc->sc_pagesize, BUS_DMASYNC_PREWRITE);
1061 
1062 	if (xp->dci == 1)
1063 		error = xhci_device_setup(sc, pipe->device, xp->slot);
1064 	else
1065 		error = xhci_cmd_configure_ep(sc, xp->slot,
1066 		    DMAADDR(&sdev->ictx_dma, 0));
1067 
1068 	if (error) {
1069 		xhci_ring_free(sc, &xp->ring);
1070 		return (EIO);
1071 	}
1072 
1073 	usb_syncmem(&sdev->octx_dma, 0, sc->sc_pagesize, BUS_DMASYNC_POSTREAD);
1074 
1075 	return (0);
1076 }
1077 
1078 void
1079 xhci_pipe_close(struct usbd_pipe *pipe)
1080 {
1081 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
1082 	struct xhci_pipe *lxp, *xp = (struct xhci_pipe *)pipe;
1083 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1084 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1085 	int i;
1086 
1087 	/* Root Hub */
1088 	if (pipe->device->depth == 0)
1089 		return;
1090 
1091 	if (!xp->halted || xhci_cmd_stop_ep(sc, xp->slot, xp->dci))
1092 		DPRINTF(("%s: error stopping ep (%d)\n", DEVNAME(sc), xp->dci));
1093 
1094 	/* Mask the endpoint */
1095 	sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1096 	sdev->input_ctx->add_flags = 0;
1097 
1098 	/* Update last valid Endpoint Context */
1099 	for (i = 30; i >= 0; i--) {
1100 		lxp = sdev->pipes[i];
1101 		if (lxp != NULL && lxp != xp)
1102 			break;
1103 	}
1104 	sdev->slot_ctx->info_lo = htole32(XHCI_SCTX_SET_DCI(lxp->dci));
1105 
1106 	/* Clear the Endpoint Context */
1107 	memset(&sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx));
1108 
1109 	usb_syncmem(&sdev->ictx_dma, 0, sc->sc_pagesize, BUS_DMASYNC_PREWRITE);
1110 
1111 	if (xhci_cmd_configure_ep(sc, xp->slot, DMAADDR(&sdev->ictx_dma, 0)))
1112 		DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci));
1113 
1114 	xhci_ring_free(sc, &xp->ring);
1115 	sdev->pipes[xp->dci - 1] = NULL;
1116 
1117 	if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL) {
1118 		xhci_cmd_slot_control(sc, &xp->slot, 0);
1119 		xhci_softdev_free(sc, xp->slot);
1120 	}
1121 }
1122 
1123 struct usbd_xfer *
1124 xhci_allocx(struct usbd_bus *bus)
1125 {
1126 	struct xhci_xfer *xx;
1127 
1128 	xx = pool_get(xhcixfer, PR_NOWAIT | PR_ZERO);
1129 #ifdef DIAGNOSTIC
1130 	if (xx != NULL)
1131 		xx->xfer.busy_free = XFER_BUSY;
1132 #endif
1133 	return ((struct usbd_xfer *)xx);
1134 }
1135 
1136 void
1137 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
1138 {
1139 	struct xhci_xfer *xx = (struct xhci_xfer*)xfer;
1140 
1141 #ifdef DIAGNOSTIC
1142 	if (xfer->busy_free != XFER_BUSY) {
1143 		printf("%s: xfer=%p not busy, 0x%08x\n", __func__, xfer,
1144 		    xfer->busy_free);
1145 		return;
1146 	}
1147 #endif
1148 	pool_put(xhcixfer, xx);
1149 }
1150 
1151 int
1152 xhci_scratchpad_alloc(struct xhci_softc *sc, int npage)
1153 {
1154 	uint64_t *pte;
1155 	int error, i;
1156 
1157 	/* Allocate the required entry for the table. */
1158 	error = usb_allocmem(&sc->sc_bus, npage * sizeof(uint64_t), 64,
1159 	    &sc->sc_spad.table_dma);
1160 	if (error)
1161 		return (ENOMEM);
1162 	pte = KERNADDR(&sc->sc_spad.table_dma, 0);
1163 
1164 	/* Alloccate space for the pages. */
1165 	error = usb_allocmem(&sc->sc_bus, npage * sc->sc_pagesize,
1166 	    sc->sc_pagesize, &sc->sc_spad.pages_dma);
1167 	if (error) {
1168 		usb_freemem(&sc->sc_bus, &sc->sc_spad.table_dma);
1169 		return (ENOMEM);
1170 	}
1171 	memset(KERNADDR(&sc->sc_spad.pages_dma, 0), 0, npage * sc->sc_pagesize);
1172 	usb_syncmem(&sc->sc_spad.pages_dma, 0, npage * sc->sc_pagesize,
1173 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1174 
1175 	for (i = 0; i < npage; i++) {
1176 		pte[i] = htole64(
1177 		    DMAADDR(&sc->sc_spad.pages_dma, i * sc->sc_pagesize)
1178 		);
1179 	}
1180 	usb_syncmem(&sc->sc_spad.table_dma, 0, npage * sizeof(uint64_t),
1181 	    BUS_DMASYNC_PREWRITE);
1182 
1183 	/*  Entry 0 points to the table of scratchpad pointers. */
1184 	sc->sc_dcbaa.segs[0] = htole64(DMAADDR(&sc->sc_spad.table_dma, 0));
1185 	usb_syncmem(&sc->sc_dcbaa.dma, 0, sizeof(uint64_t),
1186 	    BUS_DMASYNC_PREWRITE);
1187 
1188 	sc->sc_spad.npage = npage;
1189 
1190 	return (0);
1191 }
1192 
1193 void
1194 xhci_scratchpad_free(struct xhci_softc *sc)
1195 {
1196 	sc->sc_dcbaa.segs[0] = 0;
1197 	usb_syncmem(&sc->sc_dcbaa.dma, 0, sizeof(uint64_t),
1198 	    BUS_DMASYNC_PREWRITE);
1199 
1200 	usb_freemem(&sc->sc_bus, &sc->sc_spad.pages_dma);
1201 	usb_freemem(&sc->sc_bus, &sc->sc_spad.table_dma);
1202 }
1203 
1204 
1205 int
1206 xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb)
1207 {
1208 	size_t size;
1209 
1210 	size = ntrb * sizeof(struct xhci_trb);
1211 
1212 	if (usb_allocmem(&sc->sc_bus, size, 16, &ring->dma) != 0)
1213 		return (ENOMEM);
1214 
1215 	ring->trbs = KERNADDR(&ring->dma, 0);
1216 	ring->ntrb = ntrb;
1217 
1218 	xhci_ring_reset(sc, ring);
1219 
1220 	return (0);
1221 }
1222 
1223 void
1224 xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring)
1225 {
1226 	usb_freemem(&sc->sc_bus, &ring->dma);
1227 }
1228 
1229 void
1230 xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring)
1231 {
1232 	size_t size;
1233 
1234 	size = ring->ntrb * sizeof(struct xhci_trb);
1235 
1236 	memset(ring->trbs, 0, size);
1237 
1238 	ring->index = 0;
1239 	ring->toggle = XHCI_TRB_CYCLE;
1240 
1241 	/*
1242 	 * Since all our rings use only one segment, at least for
1243 	 * the moment, link their tail to their head.
1244 	 */
1245 	if (ring != &sc->sc_evt_ring) {
1246 		struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1];
1247 
1248 		trb->trb_paddr = htole64(DMAADDR(&ring->dma, 0));
1249 		trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG);
1250 	}
1251 	usb_syncmem(&ring->dma, 0, size, BUS_DMASYNC_PREWRITE);
1252 }
1253 
1254 struct xhci_trb*
1255 xhci_ring_dequeue(struct xhci_softc *sc, struct xhci_ring *ring, int cons)
1256 {
1257 	struct xhci_trb *trb;
1258 	uint32_t idx = ring->index;
1259 
1260 	KASSERT(idx < ring->ntrb);
1261 
1262 	usb_syncmem(&ring->dma, idx * sizeof(struct xhci_trb),
1263 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
1264 
1265 	trb = &ring->trbs[idx];
1266 
1267 	/* Make sure this TRB can be consumed. */
1268 	if (cons && ring->toggle != (letoh32(trb->trb_flags) & XHCI_TRB_CYCLE))
1269 		return (NULL);
1270 	idx++;
1271 
1272 	if (idx < (ring->ntrb - 1)) {
1273 		ring->index = idx;
1274 	} else {
1275 		if (ring->toggle)
1276 			ring->trbs[idx].trb_flags |= htole32(XHCI_TRB_CYCLE);
1277 		else
1278 			ring->trbs[idx].trb_flags &= ~htole32(XHCI_TRB_CYCLE);
1279 
1280 		usb_syncmem(&ring->dma, sizeof(struct xhci_trb) * idx,
1281 		    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1282 
1283 		ring->index = 0;
1284 		ring->toggle ^= 1;
1285 	}
1286 
1287 	return (trb);
1288 }
1289 
1290 struct xhci_trb *
1291 xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer* xfer,
1292     uint8_t *togglep, int last)
1293 {
1294 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1295 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
1296 
1297 	KASSERT(xp->free_trbs >= 1);
1298 
1299 	/* Associate this TRB to our xfer. */
1300 	xp->pending_xfers[xp->ring.index] = xfer;
1301 	xp->free_trbs--;
1302 
1303 	xx->index = (last) ? xp->ring.index : -1;
1304 	xx->ntrb += 1;
1305 
1306 	*togglep = xp->ring.toggle;
1307 	return (xhci_ring_dequeue(sc, &xp->ring, 0));
1308 }
1309 
1310 int
1311 xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout)
1312 {
1313 	struct xhci_trb *trb;
1314 	int error = 0;
1315 
1316 	KASSERT(sc->sc_cmd_trb == NULL);
1317 
1318 	trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle);
1319 
1320 	trb = xhci_ring_dequeue(sc, &sc->sc_cmd_ring, 0);
1321 	memcpy(trb, trb0, sizeof(struct xhci_trb));
1322 	usb_syncmem(&sc->sc_cmd_ring.dma, TRBOFF(sc->sc_cmd_ring, trb),
1323 	    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1324 
1325 	sc->sc_cmd_trb = trb;
1326 	XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1327 
1328 	if (timeout == 0)
1329 		return (0);
1330 
1331 	assertwaitok();
1332 
1333 	error = tsleep(&sc->sc_cmd_trb, PZERO, "xhcicmd",
1334 	    (timeout*hz+999)/ 1000 + 1);
1335 	if (error) {
1336 #ifdef XHCI_DEBUG
1337 		printf("%s: tsleep() = %d\n", __func__, error);
1338 		printf("cmd = %d " ,XHCI_TRB_TYPE(letoh32(trb->trb_flags)));
1339 		xhci_dump_trb(trb);
1340 #endif
1341 		sc->sc_cmd_trb = NULL;
1342 		return (error);
1343 	}
1344 
1345 	memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb));
1346 
1347 	if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)) != XHCI_CODE_SUCCESS) {
1348 		printf("%s: event error code=%d\n", DEVNAME(sc),
1349 		    XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)));
1350 		error = EIO;
1351 	}
1352 
1353 #ifdef XHCI_DEBUG
1354 	if (error) {
1355 		printf("result = %d ", XHCI_TRB_TYPE(letoh32(trb0->trb_flags)));
1356 		xhci_dump_trb(trb0);
1357 	}
1358 #endif
1359 	return (error);
1360 }
1361 
1362 int
1363 xhci_command_abort(struct xhci_softc *sc)
1364 {
1365 	uint32_t reg;
1366 	int i;
1367 
1368 	reg = XOREAD4(sc, XHCI_CRCR_LO);
1369 	if ((reg & XHCI_CRCR_LO_CRR) == 0)
1370 		return (0);
1371 
1372 	XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA);
1373 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
1374 
1375 	for (i = 0; i < 250; i++) {
1376 		usb_delay_ms(&sc->sc_bus, 1);
1377 		reg = XOREAD4(sc, XHCI_CRCR_LO) & XHCI_CRCR_LO_CRR;
1378 		if (!reg)
1379 			break;
1380 	}
1381 
1382 	if (reg) {
1383 		printf("%s: command ring abort timeout\n", DEVNAME(sc));
1384 		return (1);
1385 	}
1386 
1387 	return (0);
1388 }
1389 
1390 int
1391 xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
1392 {
1393 	struct xhci_trb trb;
1394 
1395 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1396 
1397 	trb.trb_paddr = htole64(addr);
1398 	trb.trb_status = 0;
1399 	trb.trb_flags = htole32(
1400 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP
1401 	);
1402 
1403 	return (xhci_command_submit(sc, &trb, XHCI_COMMAND_TIMEOUT));
1404 }
1405 
1406 int
1407 xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
1408 {
1409 	struct xhci_trb trb;
1410 
1411 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1412 
1413 	trb.trb_paddr = 0;
1414 	trb.trb_status = 0;
1415 	trb.trb_flags = htole32(
1416 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP
1417 	);
1418 
1419 	return (xhci_command_submit(sc, &trb, XHCI_COMMAND_TIMEOUT));
1420 }
1421 
1422 void
1423 xhci_cmd_reset_endpoint_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
1424 {
1425 	struct xhci_trb trb;
1426 
1427 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1428 
1429 	trb.trb_paddr = 0;
1430 	trb.trb_status = 0;
1431 	trb.trb_flags = htole32(
1432 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP
1433 	);
1434 
1435 	xhci_command_submit(sc, &trb, 0);
1436 }
1437 
1438 void
1439 xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci,
1440    uint64_t addr)
1441 {
1442 	struct xhci_trb trb;
1443 
1444 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1445 
1446 	trb.trb_paddr = htole64(addr);
1447 	trb.trb_status = 0;
1448 	trb.trb_flags = htole32(
1449 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ
1450 	);
1451 
1452 	xhci_command_submit(sc, &trb, 0);
1453 }
1454 
1455 int
1456 xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable)
1457 {
1458 	struct xhci_trb trb;
1459 
1460 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1461 
1462 	trb.trb_paddr = 0;
1463 	trb.trb_status = 0;
1464 	if (enable)
1465 		trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT);
1466 	else
1467 		trb.trb_flags = htole32(
1468 			XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT
1469 		);
1470 
1471 	if (xhci_command_submit(sc, &trb, XHCI_COMMAND_TIMEOUT))
1472 		return (EIO);
1473 
1474 	if (enable)
1475 		*slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags));
1476 
1477 	return (0);
1478 }
1479 
1480 int
1481 xhci_cmd_address_device(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
1482 {
1483 	struct xhci_trb trb;
1484 
1485 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1486 
1487 	trb.trb_paddr = htole64(addr);
1488 	trb.trb_status = 0;
1489 	trb.trb_flags = htole32(
1490 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE
1491 	);
1492 
1493 	return (xhci_command_submit(sc, &trb, XHCI_COMMAND_TIMEOUT));
1494 }
1495 
1496 int
1497 xhci_cmd_evaluate_ctx(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
1498 {
1499 	struct xhci_trb trb;
1500 
1501 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1502 
1503 	trb.trb_paddr = htole64(addr);
1504 	trb.trb_status = 0;
1505 	trb.trb_flags = htole32(
1506 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_EVAL_CTX
1507 	);
1508 
1509 	return (xhci_command_submit(sc, &trb, XHCI_COMMAND_TIMEOUT));
1510 }
1511 
1512 #ifdef XHCI_DEBUG
1513 int
1514 xhci_cmd_noop(struct xhci_softc *sc)
1515 {
1516 	struct xhci_trb trb;
1517 
1518 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1519 
1520 	trb.trb_paddr = 0;
1521 	trb.trb_status = 0;
1522 	trb.trb_flags = htole32(XHCI_CMD_NOOP);
1523 
1524 	return (xhci_command_submit(sc, &trb, XHCI_COMMAND_TIMEOUT));
1525 }
1526 #endif
1527 
1528 int
1529 xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot)
1530 {
1531 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
1532 	int i, error;
1533 
1534 	/*
1535 	 * Setup input context.  Even with 64 byte context size, it
1536 	 * fits into the smallest supported page size, so use that.
1537 	 */
1538 	error = usb_allocmem(&sc->sc_bus, sc->sc_pagesize, sc->sc_pagesize,
1539 	    &sdev->ictx_dma);
1540 	if (error)
1541 		return (ENOMEM);
1542 	memset(KERNADDR(&sdev->ictx_dma, 0), 0, sc->sc_pagesize);
1543 
1544 	sdev->input_ctx = KERNADDR(&sdev->ictx_dma, 0);
1545 	sdev->slot_ctx = KERNADDR(&sdev->ictx_dma, sc->sc_ctxsize);
1546 	for (i = 0; i < 31; i++)
1547 		sdev->ep_ctx[i] =
1548 		   KERNADDR(&sdev->ictx_dma, (i + 2) * sc->sc_ctxsize);
1549 
1550 	DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc),
1551 	 slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0]));
1552 
1553 	/* Setup output context */
1554 	error = usb_allocmem(&sc->sc_bus, sc->sc_pagesize, sc->sc_pagesize,
1555 	    &sdev->octx_dma);
1556 	if (error) {
1557 		usb_freemem(&sc->sc_bus, &sdev->ictx_dma);
1558 		return (ENOMEM);
1559 	}
1560 	memset(KERNADDR(&sdev->octx_dma, 0), 0, sc->sc_pagesize);
1561 
1562 	memset(&sdev->pipes, 0, sizeof(sdev->pipes));
1563 
1564 	DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc),
1565 	    slot, (long long)DMAADDR(&sdev->octx_dma, 0)));
1566 
1567 	sc->sc_dcbaa.segs[slot] = htole64(DMAADDR(&sdev->octx_dma, 0));
1568 	usb_syncmem(&sc->sc_dcbaa.dma, slot * sizeof(uint64_t),
1569 	    sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1570 
1571 	return (0);
1572 }
1573 
1574 void
1575 xhci_softdev_free(struct xhci_softc *sc, uint8_t slot)
1576 {
1577 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
1578 
1579 	sc->sc_dcbaa.segs[slot] = 0;
1580 	usb_syncmem(&sc->sc_dcbaa.dma, slot * sizeof(uint64_t),
1581 	    sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1582 
1583 	usb_freemem(&sc->sc_bus, &sdev->octx_dma);
1584 	usb_freemem(&sc->sc_bus, &sdev->ictx_dma);
1585 
1586 	memset(sdev, 0, sizeof(struct xhci_soft_dev));
1587 }
1588 
1589 /* Root hub descriptors. */
1590 usb_device_descriptor_t xhci_devd = {
1591 	USB_DEVICE_DESCRIPTOR_SIZE,
1592 	UDESC_DEVICE,		/* type */
1593 	{0x00, 0x03},		/* USB version */
1594 	UDCLASS_HUB,		/* class */
1595 	UDSUBCLASS_HUB,		/* subclass */
1596 	UDPROTO_HSHUBSTT,	/* protocol */
1597 	9,			/* max packet */
1598 	{0},{0},{0x00,0x01},	/* device id */
1599 	1,2,0,			/* string indexes */
1600 	1			/* # of configurations */
1601 };
1602 
1603 const usb_config_descriptor_t xhci_confd = {
1604 	USB_CONFIG_DESCRIPTOR_SIZE,
1605 	UDESC_CONFIG,
1606 	{USB_CONFIG_DESCRIPTOR_SIZE +
1607 	 USB_INTERFACE_DESCRIPTOR_SIZE +
1608 	 USB_ENDPOINT_DESCRIPTOR_SIZE},
1609 	1,
1610 	1,
1611 	0,
1612 	UC_SELF_POWERED,
1613 	0                      /* max power */
1614 };
1615 
1616 const usb_interface_descriptor_t xhci_ifcd = {
1617 	USB_INTERFACE_DESCRIPTOR_SIZE,
1618 	UDESC_INTERFACE,
1619 	0,
1620 	0,
1621 	1,
1622 	UICLASS_HUB,
1623 	UISUBCLASS_HUB,
1624 	UIPROTO_HSHUBSTT,	/* XXX */
1625 	0
1626 };
1627 
1628 const usb_endpoint_descriptor_t xhci_endpd = {
1629 	USB_ENDPOINT_DESCRIPTOR_SIZE,
1630 	UDESC_ENDPOINT,
1631 	UE_DIR_IN | XHCI_INTR_ENDPT,
1632 	UE_INTERRUPT,
1633 	{2, 0},                 /* max 15 ports */
1634 	255
1635 };
1636 
1637 const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = {
1638 	USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE,
1639 	UDESC_ENDPOINT_SS_COMP,
1640 	0,
1641 	0,
1642 	{0, 0}			/* XXX */
1643 };
1644 
1645 const usb_hub_descriptor_t xhci_hubd = {
1646 	USB_HUB_DESCRIPTOR_SIZE,
1647 	UDESC_SS_HUB,
1648 	0,
1649 	{0,0},
1650 	0,
1651 	0,
1652 	{0},
1653 };
1654 
1655 void
1656 xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
1657 {
1658 	int s;
1659 
1660 	DPRINTF(("%s: partial stub\n", __func__));
1661 
1662 	xhci_xfer_done(xfer);
1663 
1664 	xfer->status = status;
1665 	timeout_del(&xfer->timeout_handle);
1666 	usb_rem_task(xfer->device, &xfer->abort_task);
1667 
1668 	s = splusb();
1669 	usb_transfer_complete(xfer);
1670 	splx(s);
1671 }
1672 
1673 void
1674 xhci_timeout(void *addr)
1675 {
1676 	struct usbd_xfer *xfer = addr;
1677 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
1678 
1679 	if (sc->sc_bus.dying) {
1680 		xhci_timeout_task(addr);
1681 		return;
1682 	}
1683 
1684 	usb_init_task(&xfer->abort_task, xhci_timeout_task, addr,
1685 	    USB_TASK_TYPE_ABORT);
1686 	usb_add_task(xfer->device, &xfer->abort_task);
1687 }
1688 
1689 void
1690 xhci_timeout_task(void *addr)
1691 {
1692 	struct usbd_xfer *xfer = addr;
1693 
1694 	DPRINTF(("%s: xfer=%p\n", __func__, xfer));
1695 
1696 	xhci_abort_xfer(xfer, USBD_TIMEOUT);
1697 }
1698 
1699 usbd_status
1700 xhci_root_ctrl_transfer(struct usbd_xfer *xfer)
1701 {
1702 	usbd_status err;
1703 
1704 	err = usb_insert_transfer(xfer);
1705 	if (err)
1706 		return (err);
1707 
1708 	return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
1709 }
1710 
1711 usbd_status
1712 xhci_root_ctrl_start(struct usbd_xfer *xfer)
1713 {
1714 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
1715 	usb_port_status_t ps;
1716 	usb_device_request_t *req;
1717 	void *buf = NULL;
1718 	usb_hub_descriptor_t hubd;
1719 	usbd_status err;
1720 	int s, len, value, index;
1721 	int l, totlen = 0;
1722 	int port, i;
1723 	uint32_t v;
1724 
1725 	KASSERT(xfer->rqflags & URQ_REQUEST);
1726 
1727 	if (sc->sc_bus.dying)
1728 		return (USBD_IOERROR);
1729 
1730 	req = &xfer->request;
1731 
1732 	DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__,
1733 	    req->bmRequestType, req->bRequest));
1734 
1735 	len = UGETW(req->wLength);
1736 	value = UGETW(req->wValue);
1737 	index = UGETW(req->wIndex);
1738 
1739 	if (len != 0)
1740 		buf = KERNADDR(&xfer->dmabuf, 0);
1741 
1742 #define C(x,y) ((x) | ((y) << 8))
1743 	switch(C(req->bRequest, req->bmRequestType)) {
1744 	case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
1745 	case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
1746 	case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
1747 		/*
1748 		 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops
1749 		 * for the integrated root hub.
1750 		 */
1751 		break;
1752 	case C(UR_GET_CONFIG, UT_READ_DEVICE):
1753 		if (len > 0) {
1754 			*(uint8_t *)buf = sc->sc_conf;
1755 			totlen = 1;
1756 		}
1757 		break;
1758 	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
1759 		DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value));
1760 		switch(value >> 8) {
1761 		case UDESC_DEVICE:
1762 			if ((value & 0xff) != 0) {
1763 				err = USBD_IOERROR;
1764 				goto ret;
1765 			}
1766 			totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE);
1767 			USETW(xhci_devd.idVendor, sc->sc_id_vendor);
1768 			memcpy(buf, &xhci_devd, l);
1769 			break;
1770 		/*
1771 		 * We can't really operate at another speed, but the spec says
1772 		 * we need this descriptor.
1773 		 */
1774 		case UDESC_OTHER_SPEED_CONFIGURATION:
1775 		case UDESC_CONFIG:
1776 			if ((value & 0xff) != 0) {
1777 				err = USBD_IOERROR;
1778 				goto ret;
1779 			}
1780 			totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE);
1781 			memcpy(buf, &xhci_confd, l);
1782 			((usb_config_descriptor_t *)buf)->bDescriptorType =
1783 			    value >> 8;
1784 			buf = (char *)buf + l;
1785 			len -= l;
1786 			l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE);
1787 			totlen += l;
1788 			memcpy(buf, &xhci_ifcd, l);
1789 			buf = (char *)buf + l;
1790 			len -= l;
1791 			l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE);
1792 			totlen += l;
1793 			memcpy(buf, &xhci_endpd, l);
1794 			break;
1795 		case UDESC_STRING:
1796 			if (len == 0)
1797 				break;
1798 			*(u_int8_t *)buf = 0;
1799 			totlen = 1;
1800 			switch (value & 0xff) {
1801 			case 0: /* Language table */
1802 				totlen = usbd_str(buf, len, "\001");
1803 				break;
1804 			case 1: /* Vendor */
1805 				totlen = usbd_str(buf, len, sc->sc_vendor);
1806 				break;
1807 			case 2: /* Product */
1808 				totlen = usbd_str(buf, len, "xHCI root hub");
1809 				break;
1810 			}
1811 			break;
1812 		default:
1813 			err = USBD_IOERROR;
1814 			goto ret;
1815 		}
1816 		break;
1817 	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
1818 		if (len > 0) {
1819 			*(uint8_t *)buf = 0;
1820 			totlen = 1;
1821 		}
1822 		break;
1823 	case C(UR_GET_STATUS, UT_READ_DEVICE):
1824 		if (len > 1) {
1825 			USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED);
1826 			totlen = 2;
1827 		}
1828 		break;
1829 	case C(UR_GET_STATUS, UT_READ_INTERFACE):
1830 	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
1831 		if (len > 1) {
1832 			USETW(((usb_status_t *)buf)->wStatus, 0);
1833 			totlen = 2;
1834 		}
1835 		break;
1836 	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
1837 		if (value >= USB_MAX_DEVICES) {
1838 			err = USBD_IOERROR;
1839 			goto ret;
1840 		}
1841 		break;
1842 	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
1843 		if (value != 0 && value != 1) {
1844 			err = USBD_IOERROR;
1845 			goto ret;
1846 		}
1847 		sc->sc_conf = value;
1848 		break;
1849 	case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
1850 		break;
1851 	case C(UR_SET_FEATURE, UT_WRITE_DEVICE):
1852 	case C(UR_SET_FEATURE, UT_WRITE_INTERFACE):
1853 	case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
1854 		err = USBD_IOERROR;
1855 		goto ret;
1856 	case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
1857 		break;
1858 	case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
1859 		break;
1860 	/* Hub requests */
1861 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
1862 		break;
1863 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
1864 		DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE "
1865 		    "port=%d feature=%d\n", index, value));
1866 		if (index < 1 || index > sc->sc_noport) {
1867 			err = USBD_IOERROR;
1868 			goto ret;
1869 		}
1870 		port = XHCI_PORTSC(index);
1871 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
1872 		switch (value) {
1873 		case UHF_PORT_ENABLE:
1874 			XOWRITE4(sc, port, v | XHCI_PS_PED);
1875 			break;
1876 		case UHF_PORT_SUSPEND:
1877 			/* TODO */
1878 			break;
1879 		case UHF_PORT_POWER:
1880 			XOWRITE4(sc, port, v & ~XHCI_PS_PP);
1881 			break;
1882 		case UHF_PORT_INDICATOR:
1883 			XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3));
1884 			break;
1885 		case UHF_C_PORT_CONNECTION:
1886 			XOWRITE4(sc, port, v | XHCI_PS_CSC);
1887 			break;
1888 		case UHF_C_PORT_ENABLE:
1889 			XOWRITE4(sc, port, v | XHCI_PS_PEC);
1890 			break;
1891 		case UHF_C_PORT_SUSPEND:
1892 			XOWRITE4(sc, port, v | XHCI_PS_PLC);
1893 			break;
1894 		case UHF_C_PORT_OVER_CURRENT:
1895 			XOWRITE4(sc, port, v | XHCI_PS_OCC);
1896 			break;
1897 		case UHF_C_PORT_RESET:
1898 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
1899 			break;
1900 		default:
1901 			err = USBD_IOERROR;
1902 			goto ret;
1903 		}
1904 		break;
1905 
1906 	case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
1907 		if (len == 0)
1908 			break;
1909 		if ((value & 0xff) != 0) {
1910 			err = USBD_IOERROR;
1911 			goto ret;
1912 		}
1913 		v = XREAD4(sc, XHCI_HCCPARAMS);
1914 		hubd = xhci_hubd;
1915 		hubd.bNbrPorts = sc->sc_noport;
1916 		USETW(hubd.wHubCharacteristics,
1917 		    (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) |
1918 		    (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0));
1919 		hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */
1920 		for (i = 1; i <= sc->sc_noport; i++) {
1921 			v = XOREAD4(sc, XHCI_PORTSC(i));
1922 			if (v & XHCI_PS_DR)
1923 				hubd.DeviceRemovable[i / 8] |= 1U << (i % 8);
1924 		}
1925 		hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
1926 		l = min(len, hubd.bDescLength);
1927 		totlen = l;
1928 		memcpy(buf, &hubd, l);
1929 		break;
1930 	case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
1931 		if (len != 16) {
1932 			err = USBD_IOERROR;
1933 			goto ret;
1934 		}
1935 		memset(buf, 0, len);
1936 		totlen = len;
1937 		break;
1938 	case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
1939 		DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n",
1940 		    index));
1941 		if (index < 1 || index > sc->sc_noport) {
1942 			err = USBD_IOERROR;
1943 			goto ret;
1944 		}
1945 		if (len != 4) {
1946 			err = USBD_IOERROR;
1947 			goto ret;
1948 		}
1949 		v = XOREAD4(sc, XHCI_PORTSC(index));
1950 		DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v));
1951 		switch (XHCI_PS_SPEED(v)) {
1952 		case XHCI_SPEED_FULL:
1953 			i = UPS_FULL_SPEED;
1954 			break;
1955 		case XHCI_SPEED_LOW:
1956 			i = UPS_LOW_SPEED;
1957 			break;
1958 		case XHCI_SPEED_HIGH:
1959 			i = UPS_HIGH_SPEED;
1960 			break;
1961 		case XHCI_SPEED_SUPER:
1962 		default:
1963 			i = UPS_SUPER_SPEED;
1964 			break;
1965 		}
1966 		if (v & XHCI_PS_CCS)	i |= UPS_CURRENT_CONNECT_STATUS;
1967 		if (v & XHCI_PS_PED)	i |= UPS_PORT_ENABLED;
1968 		if (v & XHCI_PS_OCA)	i |= UPS_OVERCURRENT_INDICATOR;
1969 		if (v & XHCI_PS_PR)	i |= UPS_RESET;
1970 		if (v & XHCI_PS_PP)	i |= UPS_PORT_POWER;
1971 		USETW(ps.wPortStatus, i);
1972 		i = 0;
1973 		if (v & XHCI_PS_CSC)    i |= UPS_C_CONNECT_STATUS;
1974 		if (v & XHCI_PS_PEC)    i |= UPS_C_PORT_ENABLED;
1975 		if (v & XHCI_PS_OCC)    i |= UPS_C_OVERCURRENT_INDICATOR;
1976 		if (v & XHCI_PS_PRC)	i |= UPS_C_PORT_RESET;
1977 		USETW(ps.wPortChange, i);
1978 		l = min(len, sizeof ps);
1979 		memcpy(buf, &ps, l);
1980 		totlen = l;
1981 		break;
1982 	case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
1983 		err = USBD_IOERROR;
1984 		goto ret;
1985 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
1986 		break;
1987 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
1988 
1989 		i = index >> 8;
1990 		index &= 0x00ff;
1991 
1992 		if (index < 1 || index > sc->sc_noport) {
1993 			err = USBD_IOERROR;
1994 			goto ret;
1995 		}
1996 		port = XHCI_PORTSC(index);
1997 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
1998 
1999 		switch (value) {
2000 		case UHF_PORT_ENABLE:
2001 			XOWRITE4(sc, port, v | XHCI_PS_PED);
2002 			break;
2003 		case UHF_PORT_SUSPEND:
2004 			DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i));
2005 			if (XHCI_PS_SPEED(v) == XHCI_SPEED_SUPER) {
2006 				err = USBD_IOERROR;
2007 				goto ret;
2008 			}
2009 			XOWRITE4(sc, port, v |
2010 			    XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS);
2011 			break;
2012 		case UHF_PORT_RESET:
2013 			DPRINTFN(6, ("reset port %d\n", index));
2014 			XOWRITE4(sc, port, v | XHCI_PS_PR);
2015 			break;
2016 		case UHF_PORT_POWER:
2017 			DPRINTFN(3, ("set port power %d\n", index));
2018 			XOWRITE4(sc, port, v | XHCI_PS_PP);
2019 			break;
2020 		case UHF_PORT_INDICATOR:
2021 			DPRINTFN(3, ("set port indicator %d\n", index));
2022 
2023 			v &= ~XHCI_PS_SET_PIC(3);
2024 			v |= XHCI_PS_SET_PIC(1);
2025 
2026 			XOWRITE4(sc, port, v);
2027 			break;
2028 		case UHF_C_PORT_RESET:
2029 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
2030 			break;
2031 		default:
2032 			err = USBD_IOERROR;
2033 			goto ret;
2034 		}
2035 		break;
2036 	case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
2037 	case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
2038 	case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
2039 	case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
2040 		break;
2041 	default:
2042 		err = USBD_IOERROR;
2043 		goto ret;
2044 	}
2045 	xfer->actlen = totlen;
2046 	err = USBD_NORMAL_COMPLETION;
2047 ret:
2048 	xfer->status = err;
2049 	s = splusb();
2050 	usb_transfer_complete(xfer);
2051 	splx(s);
2052 	return (USBD_IN_PROGRESS);
2053 }
2054 
2055 
2056 void
2057 xhci_noop(struct usbd_xfer *xfer)
2058 {
2059 }
2060 
2061 
2062 usbd_status
2063 xhci_root_intr_transfer(struct usbd_xfer *xfer)
2064 {
2065 	usbd_status err;
2066 
2067 	err = usb_insert_transfer(xfer);
2068 	if (err)
2069 		return (err);
2070 
2071 	return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2072 }
2073 
2074 usbd_status
2075 xhci_root_intr_start(struct usbd_xfer *xfer)
2076 {
2077 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2078 
2079 	if (sc->sc_bus.dying)
2080 		return (USBD_IOERROR);
2081 
2082 	sc->sc_intrxfer = xfer;
2083 
2084 	return (USBD_IN_PROGRESS);
2085 }
2086 
2087 void
2088 xhci_root_intr_abort(struct usbd_xfer *xfer)
2089 {
2090 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2091 	int s;
2092 
2093 	sc->sc_intrxfer = NULL;
2094 
2095 	xfer->status = USBD_CANCELLED;
2096 	s = splusb();
2097 	usb_transfer_complete(xfer);
2098 	splx(s);
2099 }
2100 
2101 void
2102 xhci_root_intr_done(struct usbd_xfer *xfer)
2103 {
2104 }
2105 
2106 usbd_status
2107 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2108 {
2109 	usbd_status err;
2110 
2111 	err = usb_insert_transfer(xfer);
2112 	if (err)
2113 		return (err);
2114 
2115 	return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2116 }
2117 
2118 usbd_status
2119 xhci_device_ctrl_start(struct usbd_xfer *xfer)
2120 {
2121 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2122 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2123 	struct xhci_trb *trb0, *trb;
2124 	uint32_t len = UGETW(xfer->request.wLength);
2125 	uint8_t toggle0, toggle;
2126 
2127 	KASSERT(xfer->rqflags & URQ_REQUEST);
2128 
2129 	if (sc->sc_bus.dying || xp->halted)
2130 		return (USBD_IOERROR);
2131 
2132 	if (xp->free_trbs < 3)
2133 		return (USBD_NOMEM);
2134 
2135 	/* We'll do the setup TRB once we're finished with the other stages. */
2136 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle0, 0);
2137 
2138 	/* Data TRB */
2139 	if (len != 0) {
2140 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2141 		trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2142 		trb->trb_status = htole32(
2143 		    XHCI_TRB_INTR(0) | XHCI_TRB_TDREM(1) | XHCI_TRB_LEN(len)
2144 		);
2145 		trb->trb_flags = htole32(XHCI_TRB_TYPE_DATA | toggle);
2146 
2147 		if (usbd_xfer_isread(xfer))
2148 			trb->trb_flags |= htole32(XHCI_TRB_DIR_IN|XHCI_TRB_ISP);
2149 
2150 	}
2151 
2152 	/* Status TRB */
2153 	trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1);
2154 	trb->trb_paddr = 0;
2155 	trb->trb_status = htole32(XHCI_TRB_INTR(0));
2156 	trb->trb_flags = htole32(XHCI_TRB_TYPE_STATUS | XHCI_TRB_IOC | toggle);
2157 
2158 	if (len == 0 || !usbd_xfer_isread(xfer))
2159 		trb->trb_flags |= htole32(XHCI_TRB_DIR_IN);
2160 
2161 	/* Setup TRB */
2162 	trb0->trb_paddr = (uint64_t)*((uint64_t *)&xfer->request);
2163 	trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8));
2164 	trb0->trb_flags = htole32(XHCI_TRB_TYPE_SETUP | XHCI_TRB_IDT);
2165 
2166 	if (len != 0) {
2167 		if (usbd_xfer_isread(xfer))
2168 			trb0->trb_flags |= htole32(XHCI_TRB_TRT_IN);
2169 		else
2170 			trb0->trb_flags |= htole32(XHCI_TRB_TRT_OUT);
2171 	}
2172 
2173 	trb0->trb_flags |= htole32(toggle0);
2174 
2175 	usb_syncmem(&xp->ring.dma, 0, xp->ring.ntrb * sizeof(struct xhci_trb),
2176 	    BUS_DMASYNC_PREWRITE); /* XXX too big hammer? */
2177 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
2178 
2179 	xfer->status = USBD_IN_PROGRESS;
2180 
2181 	if (sc->sc_bus.use_polling)
2182 		xhci_waitintr(sc, xfer);
2183 #if notyet
2184 	else if (xfer->timeout) {
2185 		timeout_del(&xfer->timeout_handle);
2186 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
2187 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
2188 	}
2189 #endif
2190 
2191 	return (USBD_IN_PROGRESS);
2192 }
2193 
2194 void
2195 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
2196 {
2197 	xhci_abort_xfer(xfer, USBD_CANCELLED);
2198 }
2199 
2200 usbd_status
2201 xhci_device_generic_transfer(struct usbd_xfer *xfer)
2202 {
2203 	usbd_status err;
2204 
2205 	err = usb_insert_transfer(xfer);
2206 	if (err)
2207 		return (err);
2208 
2209 	return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2210 }
2211 
2212 usbd_status
2213 xhci_device_generic_start(struct usbd_xfer *xfer)
2214 {
2215 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2216 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2217 	struct xhci_trb *trb;
2218 	uint8_t toggle;
2219 
2220 	KASSERT(!(xfer->rqflags & URQ_REQUEST));
2221 
2222 	if (sc->sc_bus.dying || xp->halted)
2223 		return (USBD_IOERROR);
2224 
2225 	if (xp->free_trbs < 1)
2226 		return (USBD_NOMEM);
2227 
2228 	trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1);
2229 	trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2230 	trb->trb_status = htole32(
2231 	    XHCI_TRB_INTR(0) | XHCI_TRB_TDREM(1) | XHCI_TRB_LEN(xfer->length)
2232 	);
2233 	trb->trb_flags = htole32(
2234 	    XHCI_TRB_TYPE_NORMAL | XHCI_TRB_ISP | XHCI_TRB_IOC | toggle
2235 	);
2236 
2237 	usb_syncmem(&xp->ring.dma, ((void *)trb - (void *)xp->ring.trbs),
2238 	    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
2239 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
2240 
2241 	xfer->status = USBD_IN_PROGRESS;
2242 
2243 	if (sc->sc_bus.use_polling)
2244 		xhci_waitintr(sc, xfer);
2245 #if notyet
2246 	else if (xfer->timeout) {
2247 		timeout_del(&xfer->timeout_handle);
2248 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
2249 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
2250 	}
2251 #endif
2252 
2253 	return (USBD_IN_PROGRESS);
2254 }
2255 
2256 void
2257 xhci_device_generic_done(struct usbd_xfer *xfer)
2258 {
2259 	usb_syncmem(&xfer->dmabuf, 0, xfer->length, usbd_xfer_isread(xfer) ?
2260 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2261 
2262 	/* Only happens with interrupt transfers. */
2263 	if (xfer->pipe->repeat)
2264 		xfer->status = xhci_device_generic_start(xfer);
2265 }
2266 
2267 void
2268 xhci_device_generic_abort(struct usbd_xfer *xfer)
2269 {
2270 	KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer);
2271 
2272 	xhci_abort_xfer(xfer, USBD_CANCELLED);
2273 }
2274