xref: /openbsd-src/sys/dev/usb/xhci.c (revision f6246b7f478ea7b2b6df549ae5998f8112d22650)
1 /* $OpenBSD: xhci.c,v 1.120 2020/12/24 14:11:38 mglocker Exp $ */
2 
3 /*
4  * Copyright (c) 2014-2015 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/queue.h>
25 #include <sys/timeout.h>
26 #include <sys/pool.h>
27 #include <sys/endian.h>
28 #include <sys/rwlock.h>
29 
30 #include <machine/bus.h>
31 
32 #include <dev/usb/usb.h>
33 #include <dev/usb/usbdi.h>
34 #include <dev/usb/usbdivar.h>
35 #include <dev/usb/usb_mem.h>
36 
37 #include <dev/usb/xhcireg.h>
38 #include <dev/usb/xhcivar.h>
39 
40 struct cfdriver xhci_cd = {
41 	NULL, "xhci", DV_DULL
42 };
43 
44 #ifdef XHCI_DEBUG
45 #define DPRINTF(x)	do { if (xhcidebug) printf x; } while(0)
46 #define DPRINTFN(n,x)	do { if (xhcidebug>(n)) printf x; } while (0)
47 int xhcidebug = 3;
48 #else
49 #define DPRINTF(x)
50 #define DPRINTFN(n,x)
51 #endif
52 
53 #define DEVNAME(sc)	((sc)->sc_bus.bdev.dv_xname)
54 
55 #define TRBOFF(r, trb)	((char *)(trb) - (char *)((r)->trbs))
56 #define DEQPTR(r)	((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index))
57 
58 struct pool *xhcixfer;
59 
60 struct xhci_pipe {
61 	struct usbd_pipe	pipe;
62 
63 	uint8_t			dci;
64 	uint8_t			slot;	/* Device slot ID */
65 	struct xhci_ring	ring;
66 
67 	/*
68 	 * XXX used to pass the xfer pointer back to the
69 	 * interrupt routine, better way?
70 	 */
71 	struct usbd_xfer	*pending_xfers[XHCI_MAX_XFER];
72 	struct usbd_xfer	*aborted_xfer;
73 	int			 halted;
74 	size_t			 free_trbs;
75 	int			 skip;
76 #define TRB_PROCESSED_NO	0
77 #define TRB_PROCESSED_YES 	1
78 #define TRB_PROCESSED_SHORT	2
79 	uint8_t			 trb_processed[XHCI_MAX_XFER];
80 };
81 
82 int	xhci_reset(struct xhci_softc *);
83 int	xhci_intr1(struct xhci_softc *);
84 void	xhci_event_dequeue(struct xhci_softc *);
85 void	xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t);
86 int	xhci_event_xfer_generic(struct xhci_softc *, struct usbd_xfer *,
87 	    struct xhci_pipe *, uint32_t, int, uint8_t, uint8_t, uint8_t);
88 int	xhci_event_xfer_isoc(struct usbd_xfer *, struct xhci_pipe *,
89 	    uint32_t, int, uint8_t);
90 void	xhci_event_command(struct xhci_softc *, uint64_t);
91 void	xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t);
92 int	xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *);
93 int	xhci_context_setup(struct xhci_softc *, struct usbd_pipe *);
94 int	xhci_scratchpad_alloc(struct xhci_softc *, int);
95 void	xhci_scratchpad_free(struct xhci_softc *);
96 int	xhci_softdev_alloc(struct xhci_softc *, uint8_t);
97 void	xhci_softdev_free(struct xhci_softc *, uint8_t);
98 int	xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t,
99 	    size_t);
100 void	xhci_ring_free(struct xhci_softc *, struct xhci_ring *);
101 void	xhci_ring_reset(struct xhci_softc *, struct xhci_ring *);
102 struct	xhci_trb *xhci_ring_consume(struct xhci_softc *, struct xhci_ring *);
103 struct	xhci_trb *xhci_ring_produce(struct xhci_softc *, struct xhci_ring *);
104 
105 struct	xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*,
106 	    uint8_t *, int);
107 void	xhci_xfer_done(struct usbd_xfer *xfer);
108 /* xHCI command helpers. */
109 int	xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int);
110 int	xhci_command_abort(struct xhci_softc *);
111 
112 void	xhci_cmd_reset_ep_async(struct xhci_softc *, uint8_t, uint8_t);
113 void	xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t);
114 int	xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t);
115 int	xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t);
116 int	xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int);
117 int	xhci_cmd_set_address(struct xhci_softc *, uint8_t,  uint64_t, uint32_t);
118 int	xhci_cmd_evaluate_ctx(struct xhci_softc *, uint8_t, uint64_t);
119 #ifdef XHCI_DEBUG
120 int	xhci_cmd_noop(struct xhci_softc *);
121 #endif
122 
123 /* XXX should be part of the Bus interface. */
124 void	xhci_abort_xfer(struct usbd_xfer *, usbd_status);
125 void	xhci_pipe_close(struct usbd_pipe *);
126 void	xhci_noop(struct usbd_xfer *);
127 
128 void 	xhci_timeout(void *);
129 void	xhci_timeout_task(void *);
130 
131 /* USBD Bus Interface. */
132 usbd_status	  xhci_pipe_open(struct usbd_pipe *);
133 int		  xhci_setaddr(struct usbd_device *, int);
134 void		  xhci_softintr(void *);
135 void		  xhci_poll(struct usbd_bus *);
136 struct usbd_xfer *xhci_allocx(struct usbd_bus *);
137 void		  xhci_freex(struct usbd_bus *, struct usbd_xfer *);
138 
139 usbd_status	  xhci_root_ctrl_transfer(struct usbd_xfer *);
140 usbd_status	  xhci_root_ctrl_start(struct usbd_xfer *);
141 
142 usbd_status	  xhci_root_intr_transfer(struct usbd_xfer *);
143 usbd_status	  xhci_root_intr_start(struct usbd_xfer *);
144 void		  xhci_root_intr_abort(struct usbd_xfer *);
145 void		  xhci_root_intr_done(struct usbd_xfer *);
146 
147 usbd_status	  xhci_device_ctrl_transfer(struct usbd_xfer *);
148 usbd_status	  xhci_device_ctrl_start(struct usbd_xfer *);
149 void		  xhci_device_ctrl_abort(struct usbd_xfer *);
150 
151 usbd_status	  xhci_device_generic_transfer(struct usbd_xfer *);
152 usbd_status	  xhci_device_generic_start(struct usbd_xfer *);
153 void		  xhci_device_generic_abort(struct usbd_xfer *);
154 void		  xhci_device_generic_done(struct usbd_xfer *);
155 
156 usbd_status	  xhci_device_isoc_transfer(struct usbd_xfer *);
157 usbd_status	  xhci_device_isoc_start(struct usbd_xfer *);
158 
159 #define XHCI_INTR_ENDPT 1
160 
161 struct usbd_bus_methods xhci_bus_methods = {
162 	.open_pipe = xhci_pipe_open,
163 	.dev_setaddr = xhci_setaddr,
164 	.soft_intr = xhci_softintr,
165 	.do_poll = xhci_poll,
166 	.allocx = xhci_allocx,
167 	.freex = xhci_freex,
168 };
169 
170 struct usbd_pipe_methods xhci_root_ctrl_methods = {
171 	.transfer = xhci_root_ctrl_transfer,
172 	.start = xhci_root_ctrl_start,
173 	.abort = xhci_noop,
174 	.close = xhci_pipe_close,
175 	.done = xhci_noop,
176 };
177 
178 struct usbd_pipe_methods xhci_root_intr_methods = {
179 	.transfer = xhci_root_intr_transfer,
180 	.start = xhci_root_intr_start,
181 	.abort = xhci_root_intr_abort,
182 	.close = xhci_pipe_close,
183 	.done = xhci_root_intr_done,
184 };
185 
186 struct usbd_pipe_methods xhci_device_ctrl_methods = {
187 	.transfer = xhci_device_ctrl_transfer,
188 	.start = xhci_device_ctrl_start,
189 	.abort = xhci_device_ctrl_abort,
190 	.close = xhci_pipe_close,
191 	.done = xhci_noop,
192 };
193 
194 struct usbd_pipe_methods xhci_device_intr_methods = {
195 	.transfer = xhci_device_generic_transfer,
196 	.start = xhci_device_generic_start,
197 	.abort = xhci_device_generic_abort,
198 	.close = xhci_pipe_close,
199 	.done = xhci_device_generic_done,
200 };
201 
202 struct usbd_pipe_methods xhci_device_bulk_methods = {
203 	.transfer = xhci_device_generic_transfer,
204 	.start = xhci_device_generic_start,
205 	.abort = xhci_device_generic_abort,
206 	.close = xhci_pipe_close,
207 	.done = xhci_device_generic_done,
208 };
209 
210 struct usbd_pipe_methods xhci_device_isoc_methods = {
211 	.transfer = xhci_device_isoc_transfer,
212 	.start = xhci_device_isoc_start,
213 	.abort = xhci_device_generic_abort,
214 	.close = xhci_pipe_close,
215 	.done = xhci_noop,
216 };
217 
218 #ifdef XHCI_DEBUG
219 static void
220 xhci_dump_trb(struct xhci_trb *trb)
221 {
222 	printf("trb=%p (0x%016llx 0x%08x 0x%b)\n", trb,
223 	    (long long)letoh64(trb->trb_paddr), letoh32(trb->trb_status),
224 	    (int)letoh32(trb->trb_flags), XHCI_TRB_FLAGS_BITMASK);
225 }
226 #endif
227 
228 int	usbd_dma_contig_alloc(struct usbd_bus *, struct usbd_dma_info *,
229 	    void **, bus_size_t, bus_size_t, bus_size_t);
230 void	usbd_dma_contig_free(struct usbd_bus *, struct usbd_dma_info *);
231 
232 int
233 usbd_dma_contig_alloc(struct usbd_bus *bus, struct usbd_dma_info *dma,
234     void **kvap, bus_size_t size, bus_size_t alignment, bus_size_t boundary)
235 {
236 	int error;
237 
238 	dma->tag = bus->dmatag;
239 	dma->size = size;
240 
241 	error = bus_dmamap_create(dma->tag, size, 1, size, boundary,
242 	    BUS_DMA_NOWAIT, &dma->map);
243 	if (error != 0)
244 		return (error);
245 
246 	error = bus_dmamem_alloc(dma->tag, size, alignment, boundary, &dma->seg,
247 	    1, &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
248 	if (error != 0)
249 		goto destroy;
250 
251 	error = bus_dmamem_map(dma->tag, &dma->seg, 1, size, &dma->vaddr,
252 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
253 	if (error != 0)
254 		goto free;
255 
256 	error = bus_dmamap_load_raw(dma->tag, dma->map, &dma->seg, 1, size,
257 	    BUS_DMA_NOWAIT);
258 	if (error != 0)
259 		goto unmap;
260 
261 	bus_dmamap_sync(dma->tag, dma->map, 0, size, BUS_DMASYNC_PREREAD |
262 	    BUS_DMASYNC_PREWRITE);
263 
264 	dma->paddr = dma->map->dm_segs[0].ds_addr;
265 	if (kvap != NULL)
266 		*kvap = dma->vaddr;
267 
268 	return (0);
269 
270 unmap:
271 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
272 free:
273 	bus_dmamem_free(dma->tag, &dma->seg, 1);
274 destroy:
275 	bus_dmamap_destroy(dma->tag, dma->map);
276 	return (error);
277 }
278 
279 void
280 usbd_dma_contig_free(struct usbd_bus *bus, struct usbd_dma_info *dma)
281 {
282 	if (dma->map != NULL) {
283 		bus_dmamap_sync(bus->dmatag, dma->map, 0, dma->size,
284 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
285 		bus_dmamap_unload(bus->dmatag, dma->map);
286 		bus_dmamem_unmap(bus->dmatag, dma->vaddr, dma->size);
287 		bus_dmamem_free(bus->dmatag, &dma->seg, 1);
288 		bus_dmamap_destroy(bus->dmatag, dma->map);
289 		dma->map = NULL;
290 	}
291 }
292 
293 int
294 xhci_init(struct xhci_softc *sc)
295 {
296 	uint32_t hcr;
297 	int npage, error;
298 
299 	sc->sc_bus.usbrev = USBREV_3_0;
300 	sc->sc_bus.methods = &xhci_bus_methods;
301 	sc->sc_bus.pipe_size = sizeof(struct xhci_pipe);
302 
303 	sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH);
304 	sc->sc_door_off = XREAD4(sc, XHCI_DBOFF);
305 	sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF);
306 
307 	sc->sc_version = XREAD2(sc, XHCI_HCIVERSION);
308 	printf(", xHCI %x.%x\n", sc->sc_version >> 8, sc->sc_version & 0xff);
309 
310 #ifdef XHCI_DEBUG
311 	printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc), sc->sc_oper_off);
312 	printf("%s: DOORBELL=%#lx\n", DEVNAME(sc), sc->sc_door_off);
313 	printf("%s: RUNTIME=%#lx\n", DEVNAME(sc), sc->sc_runt_off);
314 #endif
315 
316 	error = xhci_reset(sc);
317 	if (error)
318 		return (error);
319 
320 	if (xhcixfer == NULL) {
321 		xhcixfer = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
322 		if (xhcixfer == NULL) {
323 			printf("%s: unable to allocate pool descriptor\n",
324 			    DEVNAME(sc));
325 			return (ENOMEM);
326 		}
327 		pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, IPL_SOFTUSB,
328 		    0, "xhcixfer", NULL);
329 	}
330 
331 	hcr = XREAD4(sc, XHCI_HCCPARAMS);
332 	sc->sc_ctxsize = XHCI_HCC_CSZ(hcr) ? 64 : 32;
333 	DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize));
334 
335 #ifdef XHCI_DEBUG
336 	hcr = XOREAD4(sc, XHCI_PAGESIZE);
337 	printf("%s: supported page size 0x%08x\n", DEVNAME(sc), hcr);
338 #endif
339 	/* Use 4K for the moment since it's easier. */
340 	sc->sc_pagesize = 4096;
341 
342 	/* Get port and device slot numbers. */
343 	hcr = XREAD4(sc, XHCI_HCSPARAMS1);
344 	sc->sc_noport = XHCI_HCS1_N_PORTS(hcr);
345 	sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr);
346 	DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport,
347 	    sc->sc_noslot));
348 
349 	/* Setup Device Context Base Address Array. */
350 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_dcbaa.dma,
351 	    (void **)&sc->sc_dcbaa.segs, (sc->sc_noslot + 1) * sizeof(uint64_t),
352 	    XHCI_DCBAA_ALIGN, sc->sc_pagesize);
353 	if (error)
354 		return (ENOMEM);
355 
356 	/* Setup command ring. */
357 	rw_init(&sc->sc_cmd_lock, "xhcicmd");
358 	error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_CMDS,
359 	    XHCI_CMDS_RING_ALIGN);
360 	if (error) {
361 		printf("%s: could not allocate command ring.\n", DEVNAME(sc));
362 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
363 		return (error);
364 	}
365 
366 	/* Setup one event ring and its segment table (ERST). */
367 	error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVTS,
368 	    XHCI_EVTS_RING_ALIGN);
369 	if (error) {
370 		printf("%s: could not allocate event ring.\n", DEVNAME(sc));
371 		xhci_ring_free(sc, &sc->sc_cmd_ring);
372 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
373 		return (error);
374 	}
375 
376 	/* Allocate the required entry for the segment table. */
377 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_erst.dma,
378 	    (void **)&sc->sc_erst.segs, sizeof(struct xhci_erseg),
379 	    XHCI_ERST_ALIGN, XHCI_ERST_BOUNDARY);
380 	if (error) {
381 		printf("%s: could not allocate segment table.\n", DEVNAME(sc));
382 		xhci_ring_free(sc, &sc->sc_evt_ring);
383 		xhci_ring_free(sc, &sc->sc_cmd_ring);
384 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
385 		return (ENOMEM);
386 	}
387 
388 	/* Set our ring address and size in its corresponding segment. */
389 	sc->sc_erst.segs[0].er_addr = htole64(sc->sc_evt_ring.dma.paddr);
390 	sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVTS);
391 	sc->sc_erst.segs[0].er_rsvd = 0;
392 	bus_dmamap_sync(sc->sc_erst.dma.tag, sc->sc_erst.dma.map, 0,
393 	    sc->sc_erst.dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
394 
395 	/* Get the number of scratch pages and configure them if necessary. */
396 	hcr = XREAD4(sc, XHCI_HCSPARAMS2);
397 	npage = XHCI_HCS2_SPB_MAX(hcr);
398 	DPRINTF(("%s: %u scratch pages, ETE=%u, IST=0x%x\n", DEVNAME(sc), npage,
399 	   XHCI_HCS2_ETE(hcr), XHCI_HCS2_IST(hcr)));
400 
401 	if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) {
402 		printf("%s: could not allocate scratchpad.\n", DEVNAME(sc));
403 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
404 		xhci_ring_free(sc, &sc->sc_evt_ring);
405 		xhci_ring_free(sc, &sc->sc_cmd_ring);
406 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
407 		return (ENOMEM);
408 	}
409 
410 
411 	return (0);
412 }
413 
414 void
415 xhci_config(struct xhci_softc *sc)
416 {
417 	uint64_t paddr;
418 	uint32_t hcr;
419 
420 	/* Make sure to program a number of device slots we can handle. */
421 	if (sc->sc_noslot > USB_MAX_DEVICES)
422 		sc->sc_noslot = USB_MAX_DEVICES;
423 	hcr = XOREAD4(sc, XHCI_CONFIG) & ~XHCI_CONFIG_SLOTS_MASK;
424 	XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot);
425 
426 	/* Set the device context base array address. */
427 	paddr = (uint64_t)sc->sc_dcbaa.dma.paddr;
428 	XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr);
429 	XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32));
430 
431 	DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc),
432 	    XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO)));
433 
434 	/* Set the command ring address. */
435 	paddr = (uint64_t)sc->sc_cmd_ring.dma.paddr;
436 	XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS);
437 	XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32));
438 
439 	DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc),
440 	    XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr));
441 
442 	/* Set the ERST count number to 1, since we use only one event ring. */
443 	XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1));
444 
445 	/* Set the segment table address. */
446 	paddr = (uint64_t)sc->sc_erst.dma.paddr;
447 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr);
448 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32));
449 
450 	DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc),
451 	    XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0))));
452 
453 	/* Set the ring dequeue address. */
454 	paddr = (uint64_t)sc->sc_evt_ring.dma.paddr;
455 	XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr);
456 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
457 
458 	DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc),
459 	    XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0))));
460 
461 	/* Enable interrupts. */
462 	hcr = XRREAD4(sc, XHCI_IMAN(0));
463 	XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA);
464 
465 	/* Set default interrupt moderation. */
466 	XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT);
467 
468 	/* Allow event interrupt and start the controller. */
469 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
470 
471 	DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD)));
472 	DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0))));
473 }
474 
475 int
476 xhci_detach(struct device *self, int flags)
477 {
478 	struct xhci_softc *sc = (struct xhci_softc *)self;
479 	int rv;
480 
481 	rv = config_detach_children(self, flags);
482 	if (rv != 0) {
483 		printf("%s: error while detaching %d\n", DEVNAME(sc), rv);
484 		return (rv);
485 	}
486 
487 	/* Since the hardware might already be gone, ignore the errors. */
488 	xhci_command_abort(sc);
489 
490 	xhci_reset(sc);
491 
492 	/* Disable interrupts. */
493 	XRWRITE4(sc, XHCI_IMOD(0), 0);
494 	XRWRITE4(sc, XHCI_IMAN(0), 0);
495 
496 	/* Clear the event ring address. */
497 	XRWRITE4(sc, XHCI_ERDP_LO(0), 0);
498 	XRWRITE4(sc, XHCI_ERDP_HI(0), 0);
499 
500 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0);
501 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0);
502 
503 	XRWRITE4(sc, XHCI_ERSTSZ(0), 0);
504 
505 	/* Clear the command ring address. */
506 	XOWRITE4(sc, XHCI_CRCR_LO, 0);
507 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
508 
509 	XOWRITE4(sc, XHCI_DCBAAP_LO, 0);
510 	XOWRITE4(sc, XHCI_DCBAAP_HI, 0);
511 
512 	if (sc->sc_spad.npage > 0)
513 		xhci_scratchpad_free(sc);
514 
515 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
516 	xhci_ring_free(sc, &sc->sc_evt_ring);
517 	xhci_ring_free(sc, &sc->sc_cmd_ring);
518 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
519 
520 	return (0);
521 }
522 
523 int
524 xhci_activate(struct device *self, int act)
525 {
526 	struct xhci_softc *sc = (struct xhci_softc *)self;
527 	int rv = 0;
528 
529 	switch (act) {
530 	case DVACT_RESUME:
531 		sc->sc_bus.use_polling++;
532 
533 		xhci_reset(sc);
534 		xhci_ring_reset(sc, &sc->sc_cmd_ring);
535 		xhci_ring_reset(sc, &sc->sc_evt_ring);
536 
537 		/* Renesas controllers, at least, need more time to resume. */
538 		usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
539 
540 		xhci_config(sc);
541 
542 		sc->sc_bus.use_polling--;
543 		rv = config_activate_children(self, act);
544 		break;
545 	case DVACT_POWERDOWN:
546 		rv = config_activate_children(self, act);
547 		xhci_reset(sc);
548 		break;
549 	default:
550 		rv = config_activate_children(self, act);
551 		break;
552 	}
553 
554 	return (rv);
555 }
556 
557 int
558 xhci_reset(struct xhci_softc *sc)
559 {
560 	uint32_t hcr;
561 	int i;
562 
563 	XOWRITE4(sc, XHCI_USBCMD, 0);	/* Halt controller */
564 	for (i = 0; i < 100; i++) {
565 		usb_delay_ms(&sc->sc_bus, 1);
566 		hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH;
567 		if (hcr)
568 			break;
569 	}
570 
571 	if (!hcr)
572 		printf("%s: halt timeout\n", DEVNAME(sc));
573 
574 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST);
575 	for (i = 0; i < 100; i++) {
576 		usb_delay_ms(&sc->sc_bus, 1);
577 		hcr = (XOREAD4(sc, XHCI_USBCMD) & XHCI_CMD_HCRST) |
578 		    (XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_CNR);
579 		if (!hcr)
580 			break;
581 	}
582 
583 	if (hcr) {
584 		printf("%s: reset timeout\n", DEVNAME(sc));
585 		return (EIO);
586 	}
587 
588 	return (0);
589 }
590 
591 
592 int
593 xhci_intr(void *v)
594 {
595 	struct xhci_softc *sc = v;
596 
597 	if (sc == NULL || sc->sc_bus.dying)
598 		return (0);
599 
600 	/* If we get an interrupt while polling, then just ignore it. */
601 	if (sc->sc_bus.use_polling) {
602 		DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n"));
603 		return (0);
604 	}
605 
606 	return (xhci_intr1(sc));
607 }
608 
609 int
610 xhci_intr1(struct xhci_softc *sc)
611 {
612 	uint32_t intrs;
613 
614 	intrs = XOREAD4(sc, XHCI_USBSTS);
615 	if (intrs == 0xffffffff) {
616 		sc->sc_bus.dying = 1;
617 		return (0);
618 	}
619 
620 	if ((intrs & XHCI_STS_EINT) == 0)
621 		return (0);
622 
623 	sc->sc_bus.no_intrs++;
624 
625 	if (intrs & XHCI_STS_HSE) {
626 		printf("%s: host system error\n", DEVNAME(sc));
627 		sc->sc_bus.dying = 1;
628 		return (1);
629 	}
630 
631 	/* Acknowledge interrupts */
632 	XOWRITE4(sc, XHCI_USBSTS, intrs);
633 	intrs = XRREAD4(sc, XHCI_IMAN(0));
634 	XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND);
635 
636 	usb_schedsoftintr(&sc->sc_bus);
637 
638 	return (1);
639 }
640 
641 void
642 xhci_poll(struct usbd_bus *bus)
643 {
644 	struct xhci_softc *sc = (struct xhci_softc *)bus;
645 
646 	if (XOREAD4(sc, XHCI_USBSTS))
647 		xhci_intr1(sc);
648 }
649 
650 void
651 xhci_softintr(void *v)
652 {
653 	struct xhci_softc *sc = v;
654 
655 	if (sc->sc_bus.dying)
656 		return;
657 
658 	sc->sc_bus.intr_context++;
659 	xhci_event_dequeue(sc);
660 	sc->sc_bus.intr_context--;
661 }
662 
663 void
664 xhci_event_dequeue(struct xhci_softc *sc)
665 {
666 	struct xhci_trb *trb;
667 	uint64_t paddr;
668 	uint32_t status, flags;
669 
670 	while ((trb = xhci_ring_consume(sc, &sc->sc_evt_ring)) != NULL) {
671 		paddr = letoh64(trb->trb_paddr);
672 		status = letoh32(trb->trb_status);
673 		flags = letoh32(trb->trb_flags);
674 
675 		switch (flags & XHCI_TRB_TYPE_MASK) {
676 		case XHCI_EVT_XFER:
677 			xhci_event_xfer(sc, paddr, status, flags);
678 			break;
679 		case XHCI_EVT_CMD_COMPLETE:
680 			memcpy(&sc->sc_result_trb, trb, sizeof(*trb));
681 			xhci_event_command(sc, paddr);
682 			break;
683 		case XHCI_EVT_PORT_CHANGE:
684 			xhci_event_port_change(sc, paddr, status);
685 			break;
686 		case XHCI_EVT_HOST_CTRL:
687 			/* TODO */
688 			break;
689 		default:
690 #ifdef XHCI_DEBUG
691 			printf("event (%d): ", XHCI_TRB_TYPE(flags));
692 			xhci_dump_trb(trb);
693 #endif
694 			break;
695 		}
696 
697 	}
698 
699 	paddr = (uint64_t)DEQPTR(sc->sc_evt_ring);
700 	XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY);
701 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
702 }
703 
704 void
705 xhci_skip_all(struct xhci_pipe *xp)
706 {
707 	struct usbd_xfer *xfer, *last;
708 
709 	if (xp->skip) {
710 		/*
711 		 * Find the last transfer to skip, this is necessary
712 		 * as xhci_xfer_done() posts new transfers which we
713 		 * don't want to skip
714 		 */
715 		last = SIMPLEQ_FIRST(&xp->pipe.queue);
716 		if (last == NULL)
717 			goto done;
718 		while ((xfer = SIMPLEQ_NEXT(last, next)) != NULL)
719 			last = xfer;
720 
721 		do {
722 			xfer = SIMPLEQ_FIRST(&xp->pipe.queue);
723 			if (xfer == NULL)
724 				goto done;
725 			DPRINTF(("%s: skipping %p\n", __func__, xfer));
726 			xfer->status = USBD_NORMAL_COMPLETION;
727 			xhci_xfer_done(xfer);
728 		} while (xfer != last);
729 	done:
730 		xp->skip = 0;
731 	}
732 }
733 
734 void
735 xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status,
736     uint32_t flags)
737 {
738 	struct xhci_pipe *xp;
739 	struct usbd_xfer *xfer;
740 	uint8_t dci, slot, code, xfertype;
741 	uint32_t remain;
742 	int trb_idx;
743 
744 	slot = XHCI_TRB_GET_SLOT(flags);
745 	dci = XHCI_TRB_GET_EP(flags);
746 	if (slot > sc->sc_noslot) {
747 		DPRINTF(("%s: incorrect slot (%u)\n", DEVNAME(sc), slot));
748 		return;
749 	}
750 
751 	xp = sc->sc_sdevs[slot].pipes[dci - 1];
752 	if (xp == NULL) {
753 		DPRINTF(("%s: incorrect dci (%u)\n", DEVNAME(sc), dci));
754 		return;
755 	}
756 
757 	code = XHCI_TRB_GET_CODE(status);
758 	remain = XHCI_TRB_REMAIN(status);
759 
760 	switch (code) {
761 	case XHCI_CODE_RING_UNDERRUN:
762 		DPRINTF(("%s: slot %u underrun with %zu TRB\n", DEVNAME(sc),
763 		    slot, xp->ring.ntrb - xp->free_trbs));
764 		xhci_skip_all(xp);
765 		return;
766 	case XHCI_CODE_RING_OVERRUN:
767 		DPRINTF(("%s: slot %u overrun with %zu TRB\n", DEVNAME(sc),
768 		    slot, xp->ring.ntrb - xp->free_trbs));
769 		xhci_skip_all(xp);
770 		return;
771 	case XHCI_CODE_MISSED_SRV:
772 		DPRINTF(("%s: slot %u missed srv with %zu TRB\n", DEVNAME(sc),
773 		    slot, xp->ring.ntrb - xp->free_trbs));
774 		xp->skip = 1;
775 		return;
776 	default:
777 		break;
778 	}
779 
780 	trb_idx = (paddr - xp->ring.dma.paddr) / sizeof(struct xhci_trb);
781 	if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) {
782 		printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc),
783 		    trb_idx, xp->ring.ntrb - 1);
784 		return;
785 	}
786 
787 	xfer = xp->pending_xfers[trb_idx];
788 	if (xfer == NULL) {
789 		DPRINTF(("%s: NULL xfer pointer\n", DEVNAME(sc)));
790 		return;
791 	}
792 
793 	if (remain > xfer->length)
794 		remain = xfer->length;
795 
796 	xfertype = UE_GET_XFERTYPE(xfer->pipe->endpoint->edesc->bmAttributes);
797 
798 	switch (xfertype) {
799 	case UE_BULK:
800 	case UE_INTERRUPT:
801 	case UE_CONTROL:
802 		if (xhci_event_xfer_generic(sc, xfer, xp, remain, trb_idx,
803 		    code, slot, dci))
804 			return;
805 		break;
806 	case UE_ISOCHRONOUS:
807 		if (xhci_event_xfer_isoc(xfer, xp, remain, trb_idx, code))
808 			return;
809 		break;
810 	default:
811 		panic("xhci_event_xfer: unknown xfer type %u", xfertype);
812 	}
813 
814 	xhci_xfer_done(xfer);
815 }
816 
817 uint32_t
818 xhci_xfer_length_generic(struct xhci_xfer *xx, struct xhci_pipe *xp,
819     int trb_idx)
820 {
821 	int	 trb0_idx;
822 	uint32_t len = 0, type;
823 
824 	trb0_idx =
825 	    ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1);
826 
827 	while (1) {
828 		type = letoh32(xp->ring.trbs[trb0_idx].trb_flags) &
829 		    XHCI_TRB_TYPE_MASK;
830 		if (type == XHCI_TRB_TYPE_NORMAL || type == XHCI_TRB_TYPE_DATA)
831 			len += XHCI_TRB_LEN(letoh32(
832 			    xp->ring.trbs[trb0_idx].trb_status));
833 		if (trb0_idx == trb_idx)
834 			break;
835 		if (++trb0_idx == xp->ring.ntrb)
836 			trb0_idx = 0;
837 	}
838 	return len;
839 }
840 
841 int
842 xhci_event_xfer_generic(struct xhci_softc *sc, struct usbd_xfer *xfer,
843     struct xhci_pipe *xp, uint32_t remain, int trb_idx,
844     uint8_t code, uint8_t slot, uint8_t dci)
845 {
846 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
847 
848 	switch (code) {
849 	case XHCI_CODE_SUCCESS:
850 		if (xfer->actlen == 0) {
851 			if (remain)
852 				xfer->actlen =
853 				    xhci_xfer_length_generic(xx, xp, trb_idx) -
854 				    remain;
855 			else
856 				xfer->actlen = xfer->length;
857 		}
858 		if (xfer->actlen)
859 			usb_syncmem(&xfer->dmabuf, 0, xfer->actlen,
860 			    usbd_xfer_isread(xfer) ?
861 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
862 		xfer->status = USBD_NORMAL_COMPLETION;
863 		break;
864 	case XHCI_CODE_SHORT_XFER:
865 		/*
866 		 * Use values from the transfer TRB instead of the status TRB.
867 		 */
868 		if (xfer->actlen == 0)
869 			xfer->actlen =
870 			    xhci_xfer_length_generic(xx, xp, trb_idx) - remain;
871 		/*
872 		 * If this is not the last TRB of a transfer, we should
873 		 * theoretically clear the IOC at the end of the chain
874 		 * but the HC might have already processed it before we
875 		 * had a chance to schedule the softinterrupt.
876 		 */
877 		if (xx->index != trb_idx) {
878 			DPRINTF(("%s: short xfer %p for %u\n",
879 			    DEVNAME(sc), xfer, xx->index));
880 			return (1);
881 		}
882 		if (xfer->actlen)
883 			usb_syncmem(&xfer->dmabuf, 0, xfer->actlen,
884 			    usbd_xfer_isread(xfer) ?
885 			    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
886 		xfer->status = USBD_NORMAL_COMPLETION;
887 		break;
888 	case XHCI_CODE_TXERR:
889 	case XHCI_CODE_SPLITERR:
890 		DPRINTF(("%s: txerr? code %d\n", DEVNAME(sc), code));
891 		xfer->status = USBD_IOERROR;
892 		break;
893 	case XHCI_CODE_STALL:
894 	case XHCI_CODE_BABBLE:
895 		DPRINTF(("%s: babble code %d\n", DEVNAME(sc), code));
896 		/* Prevent any timeout to kick in. */
897 		timeout_del(&xfer->timeout_handle);
898 		usb_rem_task(xfer->device, &xfer->abort_task);
899 
900 		/* We need to report this condition for umass(4). */
901 		if (code == XHCI_CODE_STALL)
902 			xp->halted = USBD_STALLED;
903 		else
904 			xp->halted = USBD_IOERROR;
905 		/*
906 		 * Since the stack might try to start a new transfer as
907 		 * soon as a pending one finishes, make sure the endpoint
908 		 * is fully reset before calling usb_transfer_complete().
909 		 */
910 		xp->aborted_xfer = xfer;
911 		xhci_cmd_reset_ep_async(sc, slot, dci);
912 		return (1);
913 	case XHCI_CODE_XFER_STOPPED:
914 	case XHCI_CODE_XFER_STOPINV:
915 		/* Endpoint stopped while processing a TD. */
916 		if (xfer == xp->aborted_xfer) {
917 			DPRINTF(("%s: stopped xfer=%p\n", __func__, xfer));
918 		    	return (1);
919 		}
920 
921 		/* FALLTHROUGH */
922 	default:
923 		DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code));
924 		xfer->status = USBD_IOERROR;
925 		xp->halted = 1;
926 		break;
927 	}
928 
929 	return (0);
930 }
931 
932 int
933 xhci_event_xfer_isoc(struct usbd_xfer *xfer, struct xhci_pipe *xp,
934     uint32_t remain, int trb_idx, uint8_t code)
935 {
936 	struct usbd_xfer *skipxfer;
937 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
938 	int trb0_idx, frame_idx = 0, skip_trb = 0;
939 
940 	KASSERT(xx->index >= 0);
941 
942 	switch (code) {
943 	case XHCI_CODE_SHORT_XFER:
944 		xp->trb_processed[trb_idx] = TRB_PROCESSED_SHORT;
945 		break;
946 	default:
947 		xp->trb_processed[trb_idx] = TRB_PROCESSED_YES;
948 		break;
949 	}
950 
951 	trb0_idx =
952 	    ((xx->index + xp->ring.ntrb) - xx->ntrb) % (xp->ring.ntrb - 1);
953 
954 	/* Find the according frame index for this TRB. */
955 	while (trb0_idx != trb_idx) {
956 		if ((letoh32(xp->ring.trbs[trb0_idx].trb_flags) &
957 		    XHCI_TRB_TYPE_MASK) == XHCI_TRB_TYPE_ISOCH)
958 			frame_idx++;
959 		if (trb0_idx++ == (xp->ring.ntrb - 1))
960 			trb0_idx = 0;
961 	}
962 
963 	/*
964 	 * If we queued two TRBs for a frame and this is the second TRB,
965 	 * check if the first TRB needs accounting since it might not have
966 	 * raised an interrupt in case of full data received.
967 	 */
968 	if ((letoh32(xp->ring.trbs[trb_idx].trb_flags) & XHCI_TRB_TYPE_MASK) ==
969 	    XHCI_TRB_TYPE_NORMAL) {
970 		frame_idx--;
971 		if (trb_idx == 0)
972 			trb0_idx = xp->ring.ntrb - 2;
973 		else
974 			trb0_idx = trb_idx - 1;
975 		if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_NO) {
976 			xfer->frlengths[frame_idx] = XHCI_TRB_LEN(letoh32(
977 			    xp->ring.trbs[trb0_idx].trb_status));
978 		} else if (xp->trb_processed[trb0_idx] == TRB_PROCESSED_SHORT) {
979 			skip_trb = 1;
980 		}
981 	}
982 
983 	if (!skip_trb) {
984 		xfer->frlengths[frame_idx] +=
985 		    XHCI_TRB_LEN(letoh32(xp->ring.trbs[trb_idx].trb_status)) -
986 		    remain;
987 		xfer->actlen += xfer->frlengths[frame_idx];
988 	}
989 
990 	if (xx->index != trb_idx)
991 		return (1);
992 
993 	if (xp->skip) {
994 		while (1) {
995 			skipxfer = SIMPLEQ_FIRST(&xp->pipe.queue);
996 			if (skipxfer == xfer || skipxfer == NULL)
997 				break;
998 			DPRINTF(("%s: skipping %p\n", __func__, skipxfer));
999 			skipxfer->status = USBD_NORMAL_COMPLETION;
1000 			xhci_xfer_done(skipxfer);
1001 		}
1002 		xp->skip = 0;
1003 	}
1004 
1005 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
1006 	    usbd_xfer_isread(xfer) ?
1007 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1008 	xfer->status = USBD_NORMAL_COMPLETION;
1009 
1010 	return (0);
1011 }
1012 
1013 void
1014 xhci_event_command(struct xhci_softc *sc, uint64_t paddr)
1015 {
1016 	struct xhci_trb *trb;
1017 	struct xhci_pipe *xp;
1018 	uint32_t flags;
1019 	uint8_t dci, slot;
1020 	int trb_idx, status;
1021 
1022 	trb_idx = (paddr - sc->sc_cmd_ring.dma.paddr) / sizeof(*trb);
1023 	if (trb_idx < 0 || trb_idx >= sc->sc_cmd_ring.ntrb) {
1024 		printf("%s: wrong trb index (%u) max is %zu\n", DEVNAME(sc),
1025 		    trb_idx, sc->sc_cmd_ring.ntrb - 1);
1026 		return;
1027 	}
1028 
1029 	trb = &sc->sc_cmd_ring.trbs[trb_idx];
1030 
1031 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1032 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1033 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1034 
1035 	flags = letoh32(trb->trb_flags);
1036 
1037 	slot = XHCI_TRB_GET_SLOT(flags);
1038 	dci = XHCI_TRB_GET_EP(flags);
1039 
1040 	switch (flags & XHCI_TRB_TYPE_MASK) {
1041 	case XHCI_CMD_RESET_EP:
1042 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
1043 		if (xp == NULL)
1044 			break;
1045 
1046 		/* Update the dequeue pointer past the last TRB. */
1047 		xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
1048 		    DEQPTR(xp->ring) | xp->ring.toggle);
1049 		break;
1050 	case XHCI_CMD_SET_TR_DEQ:
1051 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
1052 		if (xp == NULL)
1053 			break;
1054 
1055 		status = xp->halted;
1056 		xp->halted = 0;
1057 		if (xp->aborted_xfer != NULL) {
1058 			xp->aborted_xfer->status = status;
1059 			xhci_xfer_done(xp->aborted_xfer);
1060 			wakeup(xp);
1061 		}
1062 		break;
1063 	case XHCI_CMD_CONFIG_EP:
1064 	case XHCI_CMD_STOP_EP:
1065 	case XHCI_CMD_DISABLE_SLOT:
1066 	case XHCI_CMD_ENABLE_SLOT:
1067 	case XHCI_CMD_ADDRESS_DEVICE:
1068 	case XHCI_CMD_EVAL_CTX:
1069 	case XHCI_CMD_NOOP:
1070 		/*
1071 		 * All these commands are synchronous.
1072 		 *
1073 		 * If TRBs differ, this could be a delayed result after we
1074 		 * gave up waiting for the expected TRB due to timeout.
1075 		 */
1076 		if (sc->sc_cmd_trb == trb) {
1077 			sc->sc_cmd_trb = NULL;
1078 			wakeup(&sc->sc_cmd_trb);
1079 		}
1080 		break;
1081 	default:
1082 		DPRINTF(("%s: unexpected command %x\n", DEVNAME(sc), flags));
1083 	}
1084 }
1085 
1086 void
1087 xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status)
1088 {
1089 	struct usbd_xfer *xfer = sc->sc_intrxfer;
1090 	uint32_t port = XHCI_TRB_PORTID(paddr);
1091 	uint8_t *p;
1092 
1093 	if (XHCI_TRB_GET_CODE(status) != XHCI_CODE_SUCCESS) {
1094 		DPRINTF(("%s: failed port status event\n", DEVNAME(sc)));
1095 		return;
1096 	}
1097 
1098 	if (xfer == NULL)
1099 		return;
1100 
1101 	p = KERNADDR(&xfer->dmabuf, 0);
1102 	memset(p, 0, xfer->length);
1103 
1104 	p[port/8] |= 1 << (port%8);
1105 	DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p));
1106 
1107 	xfer->actlen = xfer->length;
1108 	xfer->status = USBD_NORMAL_COMPLETION;
1109 
1110 	usb_transfer_complete(xfer);
1111 }
1112 
1113 void
1114 xhci_xfer_done(struct usbd_xfer *xfer)
1115 {
1116 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1117 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
1118 	int ntrb, i;
1119 
1120 	splsoftassert(IPL_SOFTUSB);
1121 
1122 #ifdef XHCI_DEBUG
1123 	if (xx->index < 0 || xp->pending_xfers[xx->index] == NULL) {
1124 		printf("%s: xfer=%p done (idx=%d, ntrb=%zd)\n", __func__,
1125 		    xfer, xx->index, xx->ntrb);
1126 	}
1127 #endif
1128 
1129 	if (xp->aborted_xfer == xfer)
1130 		xp->aborted_xfer = NULL;
1131 
1132 	for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) {
1133 		xp->pending_xfers[i] = NULL;
1134 		if (i == 0)
1135 			i = (xp->ring.ntrb - 1);
1136 	}
1137 	xp->free_trbs += xx->ntrb;
1138 	xp->free_trbs += xx->zerotd;
1139 	xx->index = -1;
1140 	xx->ntrb = 0;
1141 	xx->zerotd = 0;
1142 
1143 	timeout_del(&xfer->timeout_handle);
1144 	usb_rem_task(xfer->device, &xfer->abort_task);
1145 	usb_transfer_complete(xfer);
1146 }
1147 
1148 /*
1149  * Calculate the Device Context Index (DCI) for endpoints as stated
1150  * in section 4.5.1 of xHCI specification r1.1.
1151  */
1152 static inline uint8_t
1153 xhci_ed2dci(usb_endpoint_descriptor_t *ed)
1154 {
1155 	uint8_t dir;
1156 
1157 	if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL)
1158 		return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + 1);
1159 
1160 	if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)
1161 		dir = 1;
1162 	else
1163 		dir = 0;
1164 
1165 	return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + dir);
1166 }
1167 
1168 usbd_status
1169 xhci_pipe_open(struct usbd_pipe *pipe)
1170 {
1171 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
1172 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1173 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1174 	uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1175 	int error;
1176 
1177 	KASSERT(xp->slot == 0);
1178 
1179 	if (sc->sc_bus.dying)
1180 		return (USBD_IOERROR);
1181 
1182 	/* Root Hub */
1183 	if (pipe->device->depth == 0) {
1184 		switch (ed->bEndpointAddress) {
1185 		case USB_CONTROL_ENDPOINT:
1186 			pipe->methods = &xhci_root_ctrl_methods;
1187 			break;
1188 		case UE_DIR_IN | XHCI_INTR_ENDPT:
1189 			pipe->methods = &xhci_root_intr_methods;
1190 			break;
1191 		default:
1192 			pipe->methods = NULL;
1193 			return (USBD_INVAL);
1194 		}
1195 		return (USBD_NORMAL_COMPLETION);
1196 	}
1197 
1198 #if 0
1199 	/* Issue a noop to check if the command ring is correctly configured. */
1200 	xhci_cmd_noop(sc);
1201 #endif
1202 
1203 	switch (xfertype) {
1204 	case UE_CONTROL:
1205 		pipe->methods = &xhci_device_ctrl_methods;
1206 
1207 		/*
1208 		 * Get a slot and init the device's contexts.
1209 		 *
1210 		 * Since the control enpoint, represented as the default
1211 		 * pipe, is always opened first we are dealing with a
1212 		 * new device.  Put a new slot in the ENABLED state.
1213 		 *
1214 		 */
1215 		error = xhci_cmd_slot_control(sc, &slot, 1);
1216 		if (error || slot == 0 || slot > sc->sc_noslot)
1217 			return (USBD_INVAL);
1218 
1219 		if (xhci_softdev_alloc(sc, slot)) {
1220 			xhci_cmd_slot_control(sc, &slot, 0);
1221 			return (USBD_NOMEM);
1222 		}
1223 
1224 		break;
1225 	case UE_ISOCHRONOUS:
1226 		pipe->methods = &xhci_device_isoc_methods;
1227 		break;
1228 	case UE_BULK:
1229 		pipe->methods = &xhci_device_bulk_methods;
1230 		break;
1231 	case UE_INTERRUPT:
1232 		pipe->methods = &xhci_device_intr_methods;
1233 		break;
1234 	default:
1235 		return (USBD_INVAL);
1236 	}
1237 
1238 	/*
1239 	 * Our USBD Bus Interface is pipe-oriented but for most of the
1240 	 * operations we need to access a device context, so keep track
1241 	 * of the slot ID in every pipe.
1242 	 */
1243 	if (slot == 0)
1244 		slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot;
1245 
1246 	xp->slot = slot;
1247 	xp->dci = xhci_ed2dci(ed);
1248 
1249 	if (xhci_pipe_init(sc, pipe)) {
1250 		xhci_cmd_slot_control(sc, &slot, 0);
1251 		return (USBD_IOERROR);
1252 	}
1253 
1254 	return (USBD_NORMAL_COMPLETION);
1255 }
1256 
1257 /*
1258  * Set the maximum Endpoint Service Interface Time (ESIT) payload and
1259  * the average TRB buffer length for an endpoint.
1260  */
1261 static inline uint32_t
1262 xhci_get_txinfo(struct xhci_softc *sc, struct usbd_pipe *pipe)
1263 {
1264 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1265 	uint32_t mep, atl, mps = UGETW(ed->wMaxPacketSize);
1266 
1267 	switch (UE_GET_XFERTYPE(ed->bmAttributes)) {
1268 	case UE_CONTROL:
1269 		mep = 0;
1270 		atl = 8;
1271 		break;
1272 	case UE_INTERRUPT:
1273 	case UE_ISOCHRONOUS:
1274 		if (pipe->device->speed == USB_SPEED_SUPER) {
1275 			/*  XXX Read the companion descriptor */
1276 		}
1277 
1278 		mep = (UE_GET_TRANS(mps) + 1) * UE_GET_SIZE(mps);
1279 		atl = mep;
1280 		break;
1281 	case UE_BULK:
1282 	default:
1283 		mep = 0;
1284 		atl = 0;
1285 	}
1286 
1287 	return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(mep) | XHCI_EPCTX_AVG_TRB_LEN(atl));
1288 }
1289 
1290 static inline uint32_t
1291 xhci_linear_interval(usb_endpoint_descriptor_t *ed)
1292 {
1293 	uint32_t ival = min(max(1, ed->bInterval), 255);
1294 
1295 	return (fls(ival) - 1);
1296 }
1297 
1298 static inline uint32_t
1299 xhci_exponential_interval(usb_endpoint_descriptor_t *ed)
1300 {
1301 	uint32_t ival = min(max(1, ed->bInterval), 16);
1302 
1303 	return (ival - 1);
1304 }
1305 /*
1306  * Return interval for endpoint expressed in 2^(ival) * 125us.
1307  *
1308  * See section 6.2.3.6 of xHCI r1.1 Specification for more details.
1309  */
1310 uint32_t
1311 xhci_pipe_interval(struct usbd_pipe *pipe)
1312 {
1313 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1314 	uint8_t speed = pipe->device->speed;
1315 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1316 	uint32_t ival;
1317 
1318 	if (xfertype == UE_CONTROL || xfertype == UE_BULK) {
1319 		/* Control and Bulk endpoints never NAKs. */
1320 		ival = 0;
1321 	} else {
1322 		switch (speed) {
1323 		case USB_SPEED_FULL:
1324 			if (xfertype == UE_ISOCHRONOUS) {
1325 				/* Convert 1-2^(15)ms into 3-18 */
1326 				ival = xhci_exponential_interval(ed) + 3;
1327 				break;
1328 			}
1329 			/* FALLTHROUGH */
1330 		case USB_SPEED_LOW:
1331 			/* Convert 1-255ms into 3-10 */
1332 			ival = xhci_linear_interval(ed) + 3;
1333 			break;
1334 		case USB_SPEED_HIGH:
1335 		case USB_SPEED_SUPER:
1336 		default:
1337 			/* Convert 1-2^(15) * 125us into 0-15 */
1338 			ival = xhci_exponential_interval(ed);
1339 			break;
1340 		}
1341 	}
1342 
1343 	KASSERT(ival <= 15);
1344 	return (XHCI_EPCTX_SET_IVAL(ival));
1345 }
1346 
1347 uint32_t
1348 xhci_pipe_maxburst(struct usbd_pipe *pipe)
1349 {
1350 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1351 	uint32_t mps = UGETW(ed->wMaxPacketSize);
1352 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1353 	uint32_t maxb = 0;
1354 
1355 	switch (pipe->device->speed) {
1356 	case USB_SPEED_HIGH:
1357 		if (xfertype == UE_ISOCHRONOUS || xfertype == UE_INTERRUPT)
1358 			maxb = UE_GET_TRANS(mps);
1359 		break;
1360 	case USB_SPEED_SUPER:
1361 		/*  XXX Read the companion descriptor */
1362 	default:
1363 		break;
1364 	}
1365 
1366 	return (maxb);
1367 }
1368 
1369 static inline uint32_t
1370 xhci_last_valid_dci(struct xhci_pipe **pipes, struct xhci_pipe *ignore)
1371 {
1372 	struct xhci_pipe *lxp;
1373 	int i;
1374 
1375 	/* Find the last valid Endpoint Context. */
1376 	for (i = 30; i >= 0; i--) {
1377 		lxp = pipes[i];
1378 		if (lxp != NULL && lxp != ignore)
1379 			return XHCI_SCTX_DCI(lxp->dci);
1380 	}
1381 
1382 	return 0;
1383 }
1384 
1385 int
1386 xhci_context_setup(struct xhci_softc *sc, struct usbd_pipe *pipe)
1387 {
1388 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1389 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1390 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1391 	uint32_t mps = UGETW(ed->wMaxPacketSize);
1392 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1393 	uint8_t speed, cerr = 0;
1394 	uint32_t route = 0, rhport = 0;
1395 	struct usbd_device *hub;
1396 
1397 	/*
1398 	 * Calculate the Route String.  Assume that there is no hub with
1399 	 * more than 15 ports and that they all have a detph < 6.  See
1400 	 * section 8.9 of USB 3.1 Specification for more details.
1401 	 */
1402 	for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub) {
1403 		uint32_t port = hub->powersrc->portno;
1404 		uint32_t depth = hub->myhub->depth;
1405 
1406 		route |= port << (4 * (depth - 1));
1407 	}
1408 
1409 	/* Get Root Hub port */
1410 	rhport = hub->powersrc->portno;
1411 
1412 	switch (pipe->device->speed) {
1413 	case USB_SPEED_LOW:
1414 		speed = XHCI_SPEED_LOW;
1415 		break;
1416 	case USB_SPEED_FULL:
1417 		speed = XHCI_SPEED_FULL;
1418 		break;
1419 	case USB_SPEED_HIGH:
1420 		speed = XHCI_SPEED_HIGH;
1421 		break;
1422 	case USB_SPEED_SUPER:
1423 		speed = XHCI_SPEED_SUPER;
1424 		break;
1425 	default:
1426 		return (USBD_INVAL);
1427 	}
1428 
1429 	/* Setup the endpoint context */
1430 	if (xfertype != UE_ISOCHRONOUS)
1431 		cerr = 3;
1432 
1433 	if ((ed->bEndpointAddress & UE_DIR_IN) || (xfertype == UE_CONTROL))
1434 		xfertype |= 0x4;
1435 
1436 	sdev->ep_ctx[xp->dci-1]->info_lo = htole32(xhci_pipe_interval(pipe));
1437 	sdev->ep_ctx[xp->dci-1]->info_hi = htole32(
1438 	    XHCI_EPCTX_SET_MPS(UE_GET_SIZE(mps)) |
1439 	    XHCI_EPCTX_SET_MAXB(xhci_pipe_maxburst(pipe)) |
1440 	    XHCI_EPCTX_SET_EPTYPE(xfertype) | XHCI_EPCTX_SET_CERR(cerr)
1441 	);
1442 	sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_get_txinfo(sc, pipe));
1443 	sdev->ep_ctx[xp->dci-1]->deqp = htole64(
1444 	    DEQPTR(xp->ring) | xp->ring.toggle
1445 	);
1446 
1447 	/* Unmask the new endoint */
1448 	sdev->input_ctx->drop_flags = 0;
1449 	sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1450 
1451 	/* Setup the slot context */
1452 	sdev->slot_ctx->info_lo = htole32(
1453 	    xhci_last_valid_dci(sdev->pipes, NULL) | XHCI_SCTX_SPEED(speed) |
1454 	    XHCI_SCTX_ROUTE(route)
1455 	);
1456 	sdev->slot_ctx->info_hi = htole32(XHCI_SCTX_RHPORT(rhport));
1457 	sdev->slot_ctx->tt = 0;
1458 	sdev->slot_ctx->state = 0;
1459 
1460 /* XXX */
1461 #define UHUB_IS_MTT(dev) (dev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT)
1462 	/*
1463 	 * If we are opening the interrupt pipe of a hub, update its
1464 	 * context before putting it in the CONFIGURED state.
1465 	 */
1466 	if (pipe->device->hub != NULL) {
1467 		int nports = pipe->device->hub->nports;
1468 
1469 		sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_HUB(1));
1470 		sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_NPORTS(nports));
1471 
1472 		if (UHUB_IS_MTT(pipe->device))
1473 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1474 
1475 		sdev->slot_ctx->tt |= htole32(
1476 		    XHCI_SCTX_TT_THINK_TIME(pipe->device->hub->ttthink)
1477 		);
1478 	}
1479 
1480 	/*
1481 	 * If this is a Low or Full Speed device below an external High
1482 	 * Speed hub, it needs some TT love.
1483 	 */
1484 	if (speed < XHCI_SPEED_HIGH && pipe->device->myhsport != NULL) {
1485 		struct usbd_device *hshub = pipe->device->myhsport->parent;
1486 		uint8_t slot = ((struct xhci_pipe *)hshub->default_pipe)->slot;
1487 
1488 		if (UHUB_IS_MTT(hshub))
1489 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1490 
1491 		sdev->slot_ctx->tt |= htole32(
1492 		    XHCI_SCTX_TT_HUB_SID(slot) |
1493 		    XHCI_SCTX_TT_PORT_NUM(pipe->device->myhsport->portno)
1494 		);
1495 	}
1496 #undef UHUB_IS_MTT
1497 
1498 	/* Unmask the slot context */
1499 	sdev->input_ctx->add_flags |= htole32(XHCI_INCTX_MASK_DCI(0));
1500 
1501 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1502 	    sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1503 
1504 	return (0);
1505 }
1506 
1507 int
1508 xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe)
1509 {
1510 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1511 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1512 	int error;
1513 
1514 #ifdef XHCI_DEBUG
1515 	struct usbd_device *dev = pipe->device;
1516 	printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d dev %d dci %u"
1517 	    " (epAddr=0x%x)\n", __func__, pipe, dev->address, dev->depth,
1518 	    dev->powersrc->portno, dev->speed, xp->slot, xp->dci,
1519 	    pipe->endpoint->edesc->bEndpointAddress);
1520 #endif
1521 
1522 	if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_XFER, XHCI_XFER_RING_ALIGN))
1523 		return (ENOMEM);
1524 
1525 	xp->free_trbs = xp->ring.ntrb;
1526 	xp->halted = 0;
1527 
1528 	sdev->pipes[xp->dci - 1] = xp;
1529 
1530 	error = xhci_context_setup(sc, pipe);
1531 	if (error)
1532 		return (error);
1533 
1534 	if (xp->dci == 1) {
1535 		/*
1536 		 * If we are opening the default pipe, the Slot should
1537 		 * be in the ENABLED state.  Issue an "Address Device"
1538 		 * with BSR=1 to put the device in the DEFAULT state.
1539 		 * We cannot jump directly to the ADDRESSED state with
1540 		 * BSR=0 because some Low/Full speed devices won't accept
1541 		 * a SET_ADDRESS command before we've read their device
1542 		 * descriptor.
1543 		 */
1544 		error = xhci_cmd_set_address(sc, xp->slot,
1545 		    sdev->ictx_dma.paddr, XHCI_TRB_BSR);
1546 	} else {
1547 		error = xhci_cmd_configure_ep(sc, xp->slot,
1548 		    sdev->ictx_dma.paddr);
1549 	}
1550 
1551 	if (error) {
1552 		xhci_ring_free(sc, &xp->ring);
1553 		return (EIO);
1554 	}
1555 
1556 	return (0);
1557 }
1558 
1559 void
1560 xhci_pipe_close(struct usbd_pipe *pipe)
1561 {
1562 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
1563 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1564 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1565 
1566 	/* Root Hub */
1567 	if (pipe->device->depth == 0)
1568 		return;
1569 
1570 	/* Mask the endpoint */
1571 	sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1572 	sdev->input_ctx->add_flags = 0;
1573 
1574 	/* Update last valid Endpoint Context */
1575 	sdev->slot_ctx->info_lo &= htole32(~XHCI_SCTX_DCI(31));
1576 	sdev->slot_ctx->info_lo |= htole32(xhci_last_valid_dci(sdev->pipes, xp));
1577 
1578 	/* Clear the Endpoint Context */
1579 	memset(sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx));
1580 
1581 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1582 	    sc->sc_pagesize, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1583 
1584 	if (xhci_cmd_configure_ep(sc, xp->slot, sdev->ictx_dma.paddr))
1585 		DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci));
1586 
1587 	xhci_ring_free(sc, &xp->ring);
1588 	sdev->pipes[xp->dci - 1] = NULL;
1589 
1590 	/*
1591 	 * If we are closing the default pipe, the device is probably
1592 	 * gone, so put its slot in the DISABLED state.
1593 	 */
1594 	if (xp->dci == 1) {
1595 		xhci_cmd_slot_control(sc, &xp->slot, 0);
1596 		xhci_softdev_free(sc, xp->slot);
1597 	}
1598 }
1599 
1600 /*
1601  * Transition a device from DEFAULT to ADDRESSED Slot state, this hook
1602  * is needed for Low/Full speed devices.
1603  *
1604  * See section 4.5.3 of USB 3.1 Specification for more details.
1605  */
1606 int
1607 xhci_setaddr(struct usbd_device *dev, int addr)
1608 {
1609 	struct xhci_softc *sc = (struct xhci_softc *)dev->bus;
1610 	struct xhci_pipe *xp = (struct xhci_pipe *)dev->default_pipe;
1611 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1612 	int error;
1613 
1614 	/* Root Hub */
1615 	if (dev->depth == 0)
1616 		return (0);
1617 
1618 	KASSERT(xp->dci == 1);
1619 
1620 	error = xhci_context_setup(sc, dev->default_pipe);
1621 	if (error)
1622 		return (error);
1623 
1624 	error = xhci_cmd_set_address(sc, xp->slot, sdev->ictx_dma.paddr, 0);
1625 
1626 #ifdef XHCI_DEBUG
1627 	if (error == 0) {
1628 		struct xhci_sctx *sctx;
1629 		uint8_t addr;
1630 
1631 		bus_dmamap_sync(sdev->octx_dma.tag, sdev->octx_dma.map, 0,
1632 		    sc->sc_pagesize, BUS_DMASYNC_POSTREAD);
1633 
1634 		/* Get output slot context. */
1635 		sctx = (struct xhci_sctx *)sdev->octx_dma.vaddr;
1636 		addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state));
1637 		error = (addr == 0);
1638 
1639 		printf("%s: dev %d addr %d\n", DEVNAME(sc), xp->slot, addr);
1640 	}
1641 #endif
1642 
1643 	return (error);
1644 }
1645 
1646 struct usbd_xfer *
1647 xhci_allocx(struct usbd_bus *bus)
1648 {
1649 	return (pool_get(xhcixfer, PR_NOWAIT | PR_ZERO));
1650 }
1651 
1652 void
1653 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
1654 {
1655 	pool_put(xhcixfer, xfer);
1656 }
1657 
1658 int
1659 xhci_scratchpad_alloc(struct xhci_softc *sc, int npage)
1660 {
1661 	uint64_t *pte;
1662 	int error, i;
1663 
1664 	/* Allocate the required entry for the table. */
1665 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.table_dma,
1666 	    (void **)&pte, npage * sizeof(uint64_t), XHCI_SPAD_TABLE_ALIGN,
1667 	    sc->sc_pagesize);
1668 	if (error)
1669 		return (ENOMEM);
1670 
1671 	/* Allocate pages. XXX does not need to be contiguous. */
1672 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.pages_dma,
1673 	    NULL, npage * sc->sc_pagesize, sc->sc_pagesize, 0);
1674 	if (error) {
1675 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1676 		return (ENOMEM);
1677 	}
1678 
1679 	for (i = 0; i < npage; i++) {
1680 		pte[i] = htole64(
1681 		    sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize)
1682 		);
1683 	}
1684 
1685 	bus_dmamap_sync(sc->sc_spad.table_dma.tag, sc->sc_spad.table_dma.map, 0,
1686 	    npage * sizeof(uint64_t), BUS_DMASYNC_PREREAD |
1687 	    BUS_DMASYNC_PREWRITE);
1688 
1689 	/*  Entry 0 points to the table of scratchpad pointers. */
1690 	sc->sc_dcbaa.segs[0] = htole64(sc->sc_spad.table_dma.paddr);
1691 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1692 	    sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1693 
1694 	sc->sc_spad.npage = npage;
1695 
1696 	return (0);
1697 }
1698 
1699 void
1700 xhci_scratchpad_free(struct xhci_softc *sc)
1701 {
1702 	sc->sc_dcbaa.segs[0] = 0;
1703 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1704 	    sizeof(uint64_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1705 
1706 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.pages_dma);
1707 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1708 }
1709 
1710 int
1711 xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb,
1712     size_t alignment)
1713 {
1714 	size_t size;
1715 	int error;
1716 
1717 	size = ntrb * sizeof(struct xhci_trb);
1718 
1719 	error = usbd_dma_contig_alloc(&sc->sc_bus, &ring->dma,
1720 	    (void **)&ring->trbs, size, alignment, XHCI_RING_BOUNDARY);
1721 	if (error)
1722 		return (error);
1723 
1724 	ring->ntrb = ntrb;
1725 
1726 	xhci_ring_reset(sc, ring);
1727 
1728 	return (0);
1729 }
1730 
1731 void
1732 xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring)
1733 {
1734 	usbd_dma_contig_free(&sc->sc_bus, &ring->dma);
1735 }
1736 
1737 void
1738 xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring)
1739 {
1740 	size_t size;
1741 
1742 	size = ring->ntrb * sizeof(struct xhci_trb);
1743 
1744 	memset(ring->trbs, 0, size);
1745 
1746 	ring->index = 0;
1747 	ring->toggle = XHCI_TRB_CYCLE;
1748 
1749 	/*
1750 	 * Since all our rings use only one segment, at least for
1751 	 * the moment, link their tail to their head.
1752 	 */
1753 	if (ring != &sc->sc_evt_ring) {
1754 		struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1];
1755 
1756 		trb->trb_paddr = htole64(ring->dma.paddr);
1757 		trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG |
1758 		    XHCI_TRB_CYCLE);
1759 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1760 		    BUS_DMASYNC_PREWRITE);
1761 	} else
1762 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1763 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1764 }
1765 
1766 struct xhci_trb*
1767 xhci_ring_consume(struct xhci_softc *sc, struct xhci_ring *ring)
1768 {
1769 	struct xhci_trb *trb = &ring->trbs[ring->index];
1770 
1771 	KASSERT(ring->index < ring->ntrb);
1772 
1773 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1774 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
1775 
1776 	/* Make sure this TRB can be consumed. */
1777 	if (ring->toggle != (letoh32(trb->trb_flags) & XHCI_TRB_CYCLE))
1778 		return (NULL);
1779 
1780 	ring->index++;
1781 
1782 	if (ring->index == ring->ntrb) {
1783 		ring->index = 0;
1784 		ring->toggle ^= 1;
1785 	}
1786 
1787 	return (trb);
1788 }
1789 
1790 struct xhci_trb*
1791 xhci_ring_produce(struct xhci_softc *sc, struct xhci_ring *ring)
1792 {
1793 	struct xhci_trb *lnk, *trb;
1794 
1795 	KASSERT(ring->index < ring->ntrb);
1796 
1797 	/* Setup the link TRB after the previous TRB is done. */
1798 	if (ring->index == 0) {
1799 		lnk = &ring->trbs[ring->ntrb - 1];
1800 		trb = &ring->trbs[ring->ntrb - 2];
1801 
1802 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1803 		    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |
1804 		    BUS_DMASYNC_POSTWRITE);
1805 
1806 		lnk->trb_flags &= htole32(~XHCI_TRB_CHAIN);
1807 		if (letoh32(trb->trb_flags) & XHCI_TRB_CHAIN)
1808 			lnk->trb_flags |= htole32(XHCI_TRB_CHAIN);
1809 
1810 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1811 		    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1812 
1813 		lnk->trb_flags ^= htole32(XHCI_TRB_CYCLE);
1814 
1815 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1816 		    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1817 	}
1818 
1819 	trb = &ring->trbs[ring->index++];
1820 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1821 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD |
1822 	    BUS_DMASYNC_POSTWRITE);
1823 
1824 	/* Toggle cycle state of the link TRB and skip it. */
1825 	if (ring->index == (ring->ntrb - 1)) {
1826 		ring->index = 0;
1827 		ring->toggle ^= 1;
1828 	}
1829 
1830 	return (trb);
1831 }
1832 
1833 struct xhci_trb *
1834 xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer *xfer,
1835     uint8_t *togglep, int last)
1836 {
1837 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1838 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
1839 
1840 	KASSERT(xp->free_trbs >= 1);
1841 	xp->free_trbs--;
1842 	*togglep = xp->ring.toggle;
1843 
1844 	switch (last) {
1845 	case -1:	/* This will be a zero-length TD. */
1846 		xp->pending_xfers[xp->ring.index] = NULL;
1847 		xx->zerotd += 1;
1848 		break;
1849 	case 0:		/* This will be in a chain. */
1850 		xp->pending_xfers[xp->ring.index] = xfer;
1851 		xx->index = -2;
1852 		xx->ntrb += 1;
1853 		break;
1854 	case 1:		/* This will terminate a chain. */
1855 		xp->pending_xfers[xp->ring.index] = xfer;
1856 		xx->index = xp->ring.index;
1857 		xx->ntrb += 1;
1858 		break;
1859 	}
1860 
1861 	xp->trb_processed[xp->ring.index] = TRB_PROCESSED_NO;
1862 
1863 	return (xhci_ring_produce(sc, &xp->ring));
1864 }
1865 
1866 int
1867 xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout)
1868 {
1869 	struct xhci_trb *trb;
1870 	int s, error = 0;
1871 
1872 	KASSERT(timeout == 0 || sc->sc_cmd_trb == NULL);
1873 
1874 	trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle);
1875 
1876 	trb = xhci_ring_produce(sc, &sc->sc_cmd_ring);
1877 	if (trb == NULL)
1878 		return (EAGAIN);
1879 	trb->trb_paddr = trb0->trb_paddr;
1880 	trb->trb_status = trb0->trb_status;
1881 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1882 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1883 	    BUS_DMASYNC_PREWRITE);
1884 
1885 	trb->trb_flags = trb0->trb_flags;
1886 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1887 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1888 	    BUS_DMASYNC_PREWRITE);
1889 
1890 	if (timeout == 0) {
1891 		XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1892 		return (0);
1893 	}
1894 
1895 	rw_assert_wrlock(&sc->sc_cmd_lock);
1896 
1897 	s = splusb();
1898 	sc->sc_cmd_trb = trb;
1899 	XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1900 	error = tsleep_nsec(&sc->sc_cmd_trb, PZERO, "xhcicmd", timeout);
1901 	if (error) {
1902 #ifdef XHCI_DEBUG
1903 		printf("%s: tsleep() = %d\n", __func__, error);
1904 		printf("cmd = %d ", XHCI_TRB_TYPE(letoh32(trb->trb_flags)));
1905 		xhci_dump_trb(trb);
1906 #endif
1907 		KASSERT(sc->sc_cmd_trb == trb || sc->sc_cmd_trb == NULL);
1908 		/*
1909 		 * Just because the timeout expired this does not mean that the
1910 		 * TRB isn't active anymore! We could get an interrupt from
1911 		 * this TRB later on and then wonder what to do with it.
1912 		 * We'd rather abort it.
1913 		 */
1914 		xhci_command_abort(sc);
1915 		sc->sc_cmd_trb = NULL;
1916 		splx(s);
1917 		return (error);
1918 	}
1919 	splx(s);
1920 
1921 	memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb));
1922 
1923 	if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)) == XHCI_CODE_SUCCESS)
1924 		return (0);
1925 
1926 #ifdef XHCI_DEBUG
1927 	printf("%s: event error code=%d, result=%d  \n", DEVNAME(sc),
1928 	    XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)),
1929 	    XHCI_TRB_TYPE(letoh32(trb0->trb_flags)));
1930 	xhci_dump_trb(trb0);
1931 #endif
1932 	return (EIO);
1933 }
1934 
1935 int
1936 xhci_command_abort(struct xhci_softc *sc)
1937 {
1938 	uint32_t reg;
1939 	int i;
1940 
1941 	reg = XOREAD4(sc, XHCI_CRCR_LO);
1942 	if ((reg & XHCI_CRCR_LO_CRR) == 0)
1943 		return (0);
1944 
1945 	XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA);
1946 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
1947 
1948 	for (i = 0; i < 2500; i++) {
1949 		DELAY(100);
1950 		reg = XOREAD4(sc, XHCI_CRCR_LO) & XHCI_CRCR_LO_CRR;
1951 		if (!reg)
1952 			break;
1953 	}
1954 
1955 	if (reg) {
1956 		printf("%s: command ring abort timeout\n", DEVNAME(sc));
1957 		return (1);
1958 	}
1959 
1960 	return (0);
1961 }
1962 
1963 int
1964 xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
1965 {
1966 	struct xhci_trb trb;
1967 	int error;
1968 
1969 	DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot));
1970 
1971 	trb.trb_paddr = htole64(addr);
1972 	trb.trb_status = 0;
1973 	trb.trb_flags = htole32(
1974 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP
1975 	);
1976 
1977 	rw_enter_write(&sc->sc_cmd_lock);
1978 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
1979 	rw_exit_write(&sc->sc_cmd_lock);
1980 	return (error);
1981 }
1982 
1983 int
1984 xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
1985 {
1986 	struct xhci_trb trb;
1987 	int error;
1988 
1989 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
1990 
1991 	trb.trb_paddr = 0;
1992 	trb.trb_status = 0;
1993 	trb.trb_flags = htole32(
1994 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP
1995 	);
1996 
1997 	rw_enter_write(&sc->sc_cmd_lock);
1998 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
1999 	rw_exit_write(&sc->sc_cmd_lock);
2000 	return (error);
2001 }
2002 
2003 void
2004 xhci_cmd_reset_ep_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
2005 {
2006 	struct xhci_trb trb;
2007 
2008 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
2009 
2010 	trb.trb_paddr = 0;
2011 	trb.trb_status = 0;
2012 	trb.trb_flags = htole32(
2013 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP
2014 	);
2015 
2016 	xhci_command_submit(sc, &trb, 0);
2017 }
2018 
2019 void
2020 xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci,
2021    uint64_t addr)
2022 {
2023 	struct xhci_trb trb;
2024 
2025 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
2026 
2027 	trb.trb_paddr = htole64(addr);
2028 	trb.trb_status = 0;
2029 	trb.trb_flags = htole32(
2030 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ
2031 	);
2032 
2033 	xhci_command_submit(sc, &trb, 0);
2034 }
2035 
2036 int
2037 xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable)
2038 {
2039 	struct xhci_trb trb;
2040 	int error;
2041 
2042 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
2043 
2044 	trb.trb_paddr = 0;
2045 	trb.trb_status = 0;
2046 	if (enable)
2047 		trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT);
2048 	else
2049 		trb.trb_flags = htole32(
2050 			XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT
2051 		);
2052 
2053 	rw_enter_write(&sc->sc_cmd_lock);
2054 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2055 	rw_exit_write(&sc->sc_cmd_lock);
2056 	if (error != 0)
2057 		return (EIO);
2058 
2059 	if (enable)
2060 		*slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags));
2061 
2062 	return (0);
2063 }
2064 
2065 int
2066 xhci_cmd_set_address(struct xhci_softc *sc, uint8_t slot, uint64_t addr,
2067     uint32_t bsr)
2068 {
2069 	struct xhci_trb trb;
2070 	int error;
2071 
2072 	DPRINTF(("%s: %s BSR=%u\n", DEVNAME(sc), __func__, bsr ? 1 : 0));
2073 
2074 	trb.trb_paddr = htole64(addr);
2075 	trb.trb_status = 0;
2076 	trb.trb_flags = htole32(
2077 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE | bsr
2078 	);
2079 
2080 	rw_enter_write(&sc->sc_cmd_lock);
2081 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2082 	rw_exit_write(&sc->sc_cmd_lock);
2083 	return (error);
2084 }
2085 
2086 int
2087 xhci_cmd_evaluate_ctx(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
2088 {
2089 	struct xhci_trb trb;
2090 	int error;
2091 
2092 	DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot));
2093 
2094 	trb.trb_paddr = htole64(addr);
2095 	trb.trb_status = 0;
2096 	trb.trb_flags = htole32(
2097 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_EVAL_CTX
2098 	);
2099 
2100 	rw_enter_write(&sc->sc_cmd_lock);
2101 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2102 	rw_exit_write(&sc->sc_cmd_lock);
2103 	return (error);
2104 }
2105 
2106 #ifdef XHCI_DEBUG
2107 int
2108 xhci_cmd_noop(struct xhci_softc *sc)
2109 {
2110 	struct xhci_trb trb;
2111 	int error;
2112 
2113 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
2114 
2115 	trb.trb_paddr = 0;
2116 	trb.trb_status = 0;
2117 	trb.trb_flags = htole32(XHCI_CMD_NOOP);
2118 
2119 	rw_enter_write(&sc->sc_cmd_lock);
2120 	error = xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT);
2121 	rw_exit_write(&sc->sc_cmd_lock);
2122 	return (error);
2123 }
2124 #endif
2125 
2126 int
2127 xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot)
2128 {
2129 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
2130 	int i, error;
2131 	uint8_t *kva;
2132 
2133 	/*
2134 	 * Setup input context.  Even with 64 byte context size, it
2135 	 * fits into the smallest supported page size, so use that.
2136 	 */
2137 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->ictx_dma,
2138 	    (void **)&kva, sc->sc_pagesize, XHCI_ICTX_ALIGN, sc->sc_pagesize);
2139 	if (error)
2140 		return (ENOMEM);
2141 
2142 	sdev->input_ctx = (struct xhci_inctx *)kva;
2143 	sdev->slot_ctx = (struct xhci_sctx *)(kva + sc->sc_ctxsize);
2144 	for (i = 0; i < 31; i++)
2145 		sdev->ep_ctx[i] =
2146 		    (struct xhci_epctx *)(kva + (i + 2) * sc->sc_ctxsize);
2147 
2148 	DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc),
2149 	 slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0]));
2150 
2151 	/* Setup output context */
2152 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->octx_dma, NULL,
2153 	    sc->sc_pagesize, XHCI_OCTX_ALIGN, sc->sc_pagesize);
2154 	if (error) {
2155 		usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
2156 		return (ENOMEM);
2157 	}
2158 
2159 	memset(&sdev->pipes, 0, sizeof(sdev->pipes));
2160 
2161 	DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc),
2162 	    slot, (long long)sdev->octx_dma.paddr));
2163 
2164 	sc->sc_dcbaa.segs[slot] = htole64(sdev->octx_dma.paddr);
2165 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
2166 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |
2167 	    BUS_DMASYNC_PREWRITE);
2168 
2169 	return (0);
2170 }
2171 
2172 void
2173 xhci_softdev_free(struct xhci_softc *sc, uint8_t slot)
2174 {
2175 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
2176 
2177 	sc->sc_dcbaa.segs[slot] = 0;
2178 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
2179 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREREAD |
2180 	    BUS_DMASYNC_PREWRITE);
2181 
2182 	usbd_dma_contig_free(&sc->sc_bus, &sdev->octx_dma);
2183 	usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
2184 
2185 	memset(sdev, 0, sizeof(struct xhci_soft_dev));
2186 }
2187 
2188 /* Root hub descriptors. */
2189 usb_device_descriptor_t xhci_devd = {
2190 	USB_DEVICE_DESCRIPTOR_SIZE,
2191 	UDESC_DEVICE,		/* type */
2192 	{0x00, 0x03},		/* USB version */
2193 	UDCLASS_HUB,		/* class */
2194 	UDSUBCLASS_HUB,		/* subclass */
2195 	UDPROTO_HSHUBSTT,	/* protocol */
2196 	9,			/* max packet */
2197 	{0},{0},{0x00,0x01},	/* device id */
2198 	1,2,0,			/* string indexes */
2199 	1			/* # of configurations */
2200 };
2201 
2202 const usb_config_descriptor_t xhci_confd = {
2203 	USB_CONFIG_DESCRIPTOR_SIZE,
2204 	UDESC_CONFIG,
2205 	{USB_CONFIG_DESCRIPTOR_SIZE +
2206 	 USB_INTERFACE_DESCRIPTOR_SIZE +
2207 	 USB_ENDPOINT_DESCRIPTOR_SIZE},
2208 	1,
2209 	1,
2210 	0,
2211 	UC_BUS_POWERED | UC_SELF_POWERED,
2212 	0                      /* max power */
2213 };
2214 
2215 const usb_interface_descriptor_t xhci_ifcd = {
2216 	USB_INTERFACE_DESCRIPTOR_SIZE,
2217 	UDESC_INTERFACE,
2218 	0,
2219 	0,
2220 	1,
2221 	UICLASS_HUB,
2222 	UISUBCLASS_HUB,
2223 	UIPROTO_HSHUBSTT,
2224 	0
2225 };
2226 
2227 const usb_endpoint_descriptor_t xhci_endpd = {
2228 	USB_ENDPOINT_DESCRIPTOR_SIZE,
2229 	UDESC_ENDPOINT,
2230 	UE_DIR_IN | XHCI_INTR_ENDPT,
2231 	UE_INTERRUPT,
2232 	{2, 0},                 /* max 15 ports */
2233 	255
2234 };
2235 
2236 const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = {
2237 	USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE,
2238 	UDESC_ENDPOINT_SS_COMP,
2239 	0,
2240 	0,
2241 	{0, 0}
2242 };
2243 
2244 const usb_hub_descriptor_t xhci_hubd = {
2245 	USB_HUB_DESCRIPTOR_SIZE,
2246 	UDESC_SS_HUB,
2247 	0,
2248 	{0,0},
2249 	0,
2250 	0,
2251 	{0},
2252 };
2253 
2254 void
2255 xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
2256 {
2257 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2258 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2259 	int error;
2260 
2261 	splsoftassert(IPL_SOFTUSB);
2262 
2263 	DPRINTF(("%s: xfer=%p status=%s err=%s actlen=%d len=%d idx=%d\n",
2264 	    __func__, xfer, usbd_errstr(xfer->status), usbd_errstr(status),
2265 	    xfer->actlen, xfer->length, ((struct xhci_xfer *)xfer)->index));
2266 
2267 	/* XXX The stack should not call abort() in this case. */
2268 	if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) {
2269 		xfer->status = status;
2270 		timeout_del(&xfer->timeout_handle);
2271 		usb_rem_task(xfer->device, &xfer->abort_task);
2272 		usb_transfer_complete(xfer);
2273 		return;
2274 	}
2275 
2276 	/* Transfer is already done. */
2277 	if (xfer->status != USBD_IN_PROGRESS) {
2278 		DPRINTF(("%s: already done \n", __func__));
2279 		return;
2280 	}
2281 
2282 	/* Prevent any timeout to kick in. */
2283 	timeout_del(&xfer->timeout_handle);
2284 	usb_rem_task(xfer->device, &xfer->abort_task);
2285 
2286 	/* Indicate that we are aborting this transfer. */
2287 	xp->halted = status;
2288 	xp->aborted_xfer = xfer;
2289 
2290 	/* Stop the endpoint and wait until the hardware says so. */
2291 	if (xhci_cmd_stop_ep(sc, xp->slot, xp->dci)) {
2292 		DPRINTF(("%s: error stopping endpoint\n", DEVNAME(sc)));
2293 		/* Assume the device is gone. */
2294 		xp->halted = 0;
2295 		xp->aborted_xfer = NULL;
2296 		xfer->status = status;
2297 		usb_transfer_complete(xfer);
2298 		return;
2299 	}
2300 
2301 	/*
2302 	 * The transfer was already completed when we stopped the
2303 	 * endpoint, no need to move the dequeue pointer past its
2304 	 * TRBs.
2305 	 */
2306 	if (xp->aborted_xfer == NULL) {
2307 		DPRINTF(("%s: done before stopping the endpoint\n", __func__));
2308 		xp->halted = 0;
2309 		return;
2310 	}
2311 
2312 	/*
2313 	 * At this stage the endpoint has been stopped, so update its
2314 	 * dequeue pointer past the last TRB of the transfer.
2315 	 *
2316 	 * Note: This assumes that only one transfer per endpoint has
2317 	 *	 pending TRBs on the ring.
2318 	 */
2319 	xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
2320 	    DEQPTR(xp->ring) | xp->ring.toggle);
2321 	error = tsleep_nsec(xp, PZERO, "xhciab", XHCI_CMD_TIMEOUT);
2322 	if (error)
2323 		printf("%s: timeout aborting transfer\n", DEVNAME(sc));
2324 }
2325 
2326 void
2327 xhci_timeout(void *addr)
2328 {
2329 	struct usbd_xfer *xfer = addr;
2330 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2331 
2332 	if (sc->sc_bus.dying) {
2333 		xhci_timeout_task(addr);
2334 		return;
2335 	}
2336 
2337 	usb_init_task(&xfer->abort_task, xhci_timeout_task, addr,
2338 	    USB_TASK_TYPE_ABORT);
2339 	usb_add_task(xfer->device, &xfer->abort_task);
2340 }
2341 
2342 void
2343 xhci_timeout_task(void *addr)
2344 {
2345 	struct usbd_xfer *xfer = addr;
2346 	int s;
2347 
2348 	s = splusb();
2349 	xhci_abort_xfer(xfer, USBD_TIMEOUT);
2350 	splx(s);
2351 }
2352 
2353 usbd_status
2354 xhci_root_ctrl_transfer(struct usbd_xfer *xfer)
2355 {
2356 	usbd_status err;
2357 
2358 	err = usb_insert_transfer(xfer);
2359 	if (err)
2360 		return (err);
2361 
2362 	return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2363 }
2364 
2365 usbd_status
2366 xhci_root_ctrl_start(struct usbd_xfer *xfer)
2367 {
2368 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2369 	usb_port_status_t ps;
2370 	usb_device_request_t *req;
2371 	void *buf = NULL;
2372 	usb_hub_descriptor_t hubd;
2373 	usbd_status err;
2374 	int s, len, value, index;
2375 	int l, totlen = 0;
2376 	int port, i;
2377 	uint32_t v;
2378 
2379 	KASSERT(xfer->rqflags & URQ_REQUEST);
2380 
2381 	if (sc->sc_bus.dying)
2382 		return (USBD_IOERROR);
2383 
2384 	req = &xfer->request;
2385 
2386 	DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__,
2387 	    req->bmRequestType, req->bRequest));
2388 
2389 	len = UGETW(req->wLength);
2390 	value = UGETW(req->wValue);
2391 	index = UGETW(req->wIndex);
2392 
2393 	if (len != 0)
2394 		buf = KERNADDR(&xfer->dmabuf, 0);
2395 
2396 #define C(x,y) ((x) | ((y) << 8))
2397 	switch(C(req->bRequest, req->bmRequestType)) {
2398 	case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
2399 	case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
2400 	case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
2401 		/*
2402 		 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops
2403 		 * for the integrated root hub.
2404 		 */
2405 		break;
2406 	case C(UR_GET_CONFIG, UT_READ_DEVICE):
2407 		if (len > 0) {
2408 			*(uint8_t *)buf = sc->sc_conf;
2409 			totlen = 1;
2410 		}
2411 		break;
2412 	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
2413 		DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value));
2414 		switch(value >> 8) {
2415 		case UDESC_DEVICE:
2416 			if ((value & 0xff) != 0) {
2417 				err = USBD_IOERROR;
2418 				goto ret;
2419 			}
2420 			totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE);
2421 			USETW(xhci_devd.idVendor, sc->sc_id_vendor);
2422 			memcpy(buf, &xhci_devd, l);
2423 			break;
2424 		/*
2425 		 * We can't really operate at another speed, but the spec says
2426 		 * we need this descriptor.
2427 		 */
2428 		case UDESC_OTHER_SPEED_CONFIGURATION:
2429 		case UDESC_CONFIG:
2430 			if ((value & 0xff) != 0) {
2431 				err = USBD_IOERROR;
2432 				goto ret;
2433 			}
2434 			totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE);
2435 			memcpy(buf, &xhci_confd, l);
2436 			((usb_config_descriptor_t *)buf)->bDescriptorType =
2437 			    value >> 8;
2438 			buf = (char *)buf + l;
2439 			len -= l;
2440 			l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE);
2441 			totlen += l;
2442 			memcpy(buf, &xhci_ifcd, l);
2443 			buf = (char *)buf + l;
2444 			len -= l;
2445 			l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE);
2446 			totlen += l;
2447 			memcpy(buf, &xhci_endpd, l);
2448 			break;
2449 		case UDESC_STRING:
2450 			if (len == 0)
2451 				break;
2452 			*(u_int8_t *)buf = 0;
2453 			totlen = 1;
2454 			switch (value & 0xff) {
2455 			case 0: /* Language table */
2456 				totlen = usbd_str(buf, len, "\001");
2457 				break;
2458 			case 1: /* Vendor */
2459 				totlen = usbd_str(buf, len, sc->sc_vendor);
2460 				break;
2461 			case 2: /* Product */
2462 				totlen = usbd_str(buf, len, "xHCI root hub");
2463 				break;
2464 			}
2465 			break;
2466 		default:
2467 			err = USBD_IOERROR;
2468 			goto ret;
2469 		}
2470 		break;
2471 	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
2472 		if (len > 0) {
2473 			*(uint8_t *)buf = 0;
2474 			totlen = 1;
2475 		}
2476 		break;
2477 	case C(UR_GET_STATUS, UT_READ_DEVICE):
2478 		if (len > 1) {
2479 			USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED);
2480 			totlen = 2;
2481 		}
2482 		break;
2483 	case C(UR_GET_STATUS, UT_READ_INTERFACE):
2484 	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
2485 		if (len > 1) {
2486 			USETW(((usb_status_t *)buf)->wStatus, 0);
2487 			totlen = 2;
2488 		}
2489 		break;
2490 	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
2491 		if (value >= USB_MAX_DEVICES) {
2492 			err = USBD_IOERROR;
2493 			goto ret;
2494 		}
2495 		break;
2496 	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
2497 		if (value != 0 && value != 1) {
2498 			err = USBD_IOERROR;
2499 			goto ret;
2500 		}
2501 		sc->sc_conf = value;
2502 		break;
2503 	case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
2504 		break;
2505 	case C(UR_SET_FEATURE, UT_WRITE_DEVICE):
2506 	case C(UR_SET_FEATURE, UT_WRITE_INTERFACE):
2507 	case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
2508 		err = USBD_IOERROR;
2509 		goto ret;
2510 	case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
2511 		break;
2512 	case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
2513 		break;
2514 	/* Hub requests */
2515 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
2516 		break;
2517 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
2518 		DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE "
2519 		    "port=%d feature=%d\n", index, value));
2520 		if (index < 1 || index > sc->sc_noport) {
2521 			err = USBD_IOERROR;
2522 			goto ret;
2523 		}
2524 		port = XHCI_PORTSC(index);
2525 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2526 		switch (value) {
2527 		case UHF_PORT_ENABLE:
2528 			XOWRITE4(sc, port, v | XHCI_PS_PED);
2529 			break;
2530 		case UHF_PORT_SUSPEND:
2531 			/* TODO */
2532 			break;
2533 		case UHF_PORT_POWER:
2534 			XOWRITE4(sc, port, v & ~XHCI_PS_PP);
2535 			break;
2536 		case UHF_PORT_INDICATOR:
2537 			XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3));
2538 			break;
2539 		case UHF_C_PORT_CONNECTION:
2540 			XOWRITE4(sc, port, v | XHCI_PS_CSC);
2541 			break;
2542 		case UHF_C_PORT_ENABLE:
2543 			XOWRITE4(sc, port, v | XHCI_PS_PEC);
2544 			break;
2545 		case UHF_C_PORT_SUSPEND:
2546 		case UHF_C_PORT_LINK_STATE:
2547 			XOWRITE4(sc, port, v | XHCI_PS_PLC);
2548 			break;
2549 		case UHF_C_PORT_OVER_CURRENT:
2550 			XOWRITE4(sc, port, v | XHCI_PS_OCC);
2551 			break;
2552 		case UHF_C_PORT_RESET:
2553 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
2554 			break;
2555 		case UHF_C_BH_PORT_RESET:
2556 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
2557 			break;
2558 		default:
2559 			err = USBD_IOERROR;
2560 			goto ret;
2561 		}
2562 		break;
2563 
2564 	case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
2565 		if (len == 0)
2566 			break;
2567 		if ((value & 0xff) != 0) {
2568 			err = USBD_IOERROR;
2569 			goto ret;
2570 		}
2571 		v = XREAD4(sc, XHCI_HCCPARAMS);
2572 		hubd = xhci_hubd;
2573 		hubd.bNbrPorts = sc->sc_noport;
2574 		USETW(hubd.wHubCharacteristics,
2575 		    (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) |
2576 		    (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0));
2577 		hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */
2578 		for (i = 1; i <= sc->sc_noport; i++) {
2579 			v = XOREAD4(sc, XHCI_PORTSC(i));
2580 			if (v & XHCI_PS_DR)
2581 				hubd.DeviceRemovable[i / 8] |= 1U << (i % 8);
2582 		}
2583 		hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
2584 		l = min(len, hubd.bDescLength);
2585 		totlen = l;
2586 		memcpy(buf, &hubd, l);
2587 		break;
2588 	case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
2589 		if (len != 16) {
2590 			err = USBD_IOERROR;
2591 			goto ret;
2592 		}
2593 		memset(buf, 0, len);
2594 		totlen = len;
2595 		break;
2596 	case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
2597 		DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n",
2598 		    index));
2599 		if (index < 1 || index > sc->sc_noport) {
2600 			err = USBD_IOERROR;
2601 			goto ret;
2602 		}
2603 		if (len != 4) {
2604 			err = USBD_IOERROR;
2605 			goto ret;
2606 		}
2607 		v = XOREAD4(sc, XHCI_PORTSC(index));
2608 		DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v));
2609 		i = UPS_PORT_LS_SET(XHCI_PS_GET_PLS(v));
2610 		switch (XHCI_PS_SPEED(v)) {
2611 		case XHCI_SPEED_FULL:
2612 			i |= UPS_FULL_SPEED;
2613 			break;
2614 		case XHCI_SPEED_LOW:
2615 			i |= UPS_LOW_SPEED;
2616 			break;
2617 		case XHCI_SPEED_HIGH:
2618 			i |= UPS_HIGH_SPEED;
2619 			break;
2620 		case XHCI_SPEED_SUPER:
2621 		default:
2622 			break;
2623 		}
2624 		if (v & XHCI_PS_CCS)	i |= UPS_CURRENT_CONNECT_STATUS;
2625 		if (v & XHCI_PS_PED)	i |= UPS_PORT_ENABLED;
2626 		if (v & XHCI_PS_OCA)	i |= UPS_OVERCURRENT_INDICATOR;
2627 		if (v & XHCI_PS_PR)	i |= UPS_RESET;
2628 		if (v & XHCI_PS_PP)	{
2629 			if (XHCI_PS_SPEED(v) >= XHCI_SPEED_FULL &&
2630 			    XHCI_PS_SPEED(v) <= XHCI_SPEED_HIGH)
2631 				i |= UPS_PORT_POWER;
2632 			else
2633 				i |= UPS_PORT_POWER_SS;
2634 		}
2635 		USETW(ps.wPortStatus, i);
2636 		i = 0;
2637 		if (v & XHCI_PS_CSC)    i |= UPS_C_CONNECT_STATUS;
2638 		if (v & XHCI_PS_PEC)    i |= UPS_C_PORT_ENABLED;
2639 		if (v & XHCI_PS_OCC)    i |= UPS_C_OVERCURRENT_INDICATOR;
2640 		if (v & XHCI_PS_PRC)	i |= UPS_C_PORT_RESET;
2641 		if (v & XHCI_PS_WRC)	i |= UPS_C_BH_PORT_RESET;
2642 		if (v & XHCI_PS_PLC)	i |= UPS_C_PORT_LINK_STATE;
2643 		if (v & XHCI_PS_CEC)	i |= UPS_C_PORT_CONFIG_ERROR;
2644 		USETW(ps.wPortChange, i);
2645 		l = min(len, sizeof ps);
2646 		memcpy(buf, &ps, l);
2647 		totlen = l;
2648 		break;
2649 	case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
2650 		err = USBD_IOERROR;
2651 		goto ret;
2652 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
2653 		break;
2654 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
2655 
2656 		i = index >> 8;
2657 		index &= 0x00ff;
2658 
2659 		if (index < 1 || index > sc->sc_noport) {
2660 			err = USBD_IOERROR;
2661 			goto ret;
2662 		}
2663 		port = XHCI_PORTSC(index);
2664 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2665 
2666 		switch (value) {
2667 		case UHF_PORT_ENABLE:
2668 			XOWRITE4(sc, port, v | XHCI_PS_PED);
2669 			break;
2670 		case UHF_PORT_SUSPEND:
2671 			DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i));
2672 			if (XHCI_PS_SPEED(v) == XHCI_SPEED_SUPER) {
2673 				err = USBD_IOERROR;
2674 				goto ret;
2675 			}
2676 			XOWRITE4(sc, port, v |
2677 			    XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS);
2678 			break;
2679 		case UHF_PORT_RESET:
2680 			DPRINTFN(6, ("reset port %d\n", index));
2681 			XOWRITE4(sc, port, v | XHCI_PS_PR);
2682 			break;
2683 		case UHF_PORT_POWER:
2684 			DPRINTFN(3, ("set port power %d\n", index));
2685 			XOWRITE4(sc, port, v | XHCI_PS_PP);
2686 			break;
2687 		case UHF_PORT_INDICATOR:
2688 			DPRINTFN(3, ("set port indicator %d\n", index));
2689 
2690 			v &= ~XHCI_PS_SET_PIC(3);
2691 			v |= XHCI_PS_SET_PIC(1);
2692 
2693 			XOWRITE4(sc, port, v);
2694 			break;
2695 		case UHF_C_PORT_RESET:
2696 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
2697 			break;
2698 		case UHF_C_BH_PORT_RESET:
2699 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
2700 			break;
2701 		default:
2702 			err = USBD_IOERROR;
2703 			goto ret;
2704 		}
2705 		break;
2706 	case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
2707 	case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
2708 	case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
2709 	case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
2710 		break;
2711 	default:
2712 		err = USBD_IOERROR;
2713 		goto ret;
2714 	}
2715 	xfer->actlen = totlen;
2716 	err = USBD_NORMAL_COMPLETION;
2717 ret:
2718 	xfer->status = err;
2719 	s = splusb();
2720 	usb_transfer_complete(xfer);
2721 	splx(s);
2722 	return (err);
2723 }
2724 
2725 
2726 void
2727 xhci_noop(struct usbd_xfer *xfer)
2728 {
2729 }
2730 
2731 
2732 usbd_status
2733 xhci_root_intr_transfer(struct usbd_xfer *xfer)
2734 {
2735 	usbd_status err;
2736 
2737 	err = usb_insert_transfer(xfer);
2738 	if (err)
2739 		return (err);
2740 
2741 	return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2742 }
2743 
2744 usbd_status
2745 xhci_root_intr_start(struct usbd_xfer *xfer)
2746 {
2747 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2748 
2749 	if (sc->sc_bus.dying)
2750 		return (USBD_IOERROR);
2751 
2752 	sc->sc_intrxfer = xfer;
2753 
2754 	return (USBD_IN_PROGRESS);
2755 }
2756 
2757 void
2758 xhci_root_intr_abort(struct usbd_xfer *xfer)
2759 {
2760 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2761 	int s;
2762 
2763 	sc->sc_intrxfer = NULL;
2764 
2765 	xfer->status = USBD_CANCELLED;
2766 	s = splusb();
2767 	usb_transfer_complete(xfer);
2768 	splx(s);
2769 }
2770 
2771 void
2772 xhci_root_intr_done(struct usbd_xfer *xfer)
2773 {
2774 }
2775 
2776 /*
2777  * Number of packets remaining in the TD after the corresponding TRB.
2778  *
2779  * Section 4.11.2.4 of xHCI specification r1.1.
2780  */
2781 static inline uint32_t
2782 xhci_xfer_tdsize(struct usbd_xfer *xfer, uint32_t remain, uint32_t len)
2783 {
2784 	uint32_t npkt, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2785 
2786 	if (len == 0)
2787 		return XHCI_TRB_TDREM(0);
2788 
2789 	npkt = howmany(remain - len, UE_GET_SIZE(mps));
2790 	if (npkt > 31)
2791 		npkt = 31;
2792 
2793 	return XHCI_TRB_TDREM(npkt);
2794 }
2795 
2796 /*
2797  * Transfer Burst Count (TBC) and Transfer Last Burst Packet Count (TLBPC).
2798  *
2799  * Section 4.11.2.3  of xHCI specification r1.1.
2800  */
2801 static inline uint32_t
2802 xhci_xfer_tbc(struct usbd_xfer *xfer, uint32_t len, uint32_t *tlbpc)
2803 {
2804 	uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2805 	uint32_t maxb, tdpc, residue, tbc;
2806 
2807 	/* Transfer Descriptor Packet Count, section 4.14.1. */
2808 	tdpc = howmany(len, UE_GET_SIZE(mps));
2809 	if (tdpc == 0)
2810 		tdpc = 1;
2811 
2812 	/* Transfer Burst Count */
2813 	maxb = xhci_pipe_maxburst(xfer->pipe);
2814 	tbc = howmany(tdpc, maxb + 1) - 1;
2815 
2816 	/* Transfer Last Burst Packet Count */
2817 	if (xfer->device->speed == USB_SPEED_SUPER) {
2818 		residue = tdpc % (maxb + 1);
2819 		if (residue == 0)
2820 			*tlbpc = maxb;
2821 		else
2822 			*tlbpc = residue - 1;
2823 	} else {
2824 		*tlbpc = tdpc - 1;
2825 	}
2826 
2827 	return (tbc);
2828 }
2829 
2830 usbd_status
2831 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2832 {
2833 	usbd_status err;
2834 
2835 	err = usb_insert_transfer(xfer);
2836 	if (err)
2837 		return (err);
2838 
2839 	return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2840 }
2841 
2842 usbd_status
2843 xhci_device_ctrl_start(struct usbd_xfer *xfer)
2844 {
2845 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2846 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2847 	struct xhci_trb *trb0, *trb;
2848 	uint32_t flags, len = UGETW(xfer->request.wLength);
2849 	uint8_t toggle;
2850 	int s;
2851 
2852 	KASSERT(xfer->rqflags & URQ_REQUEST);
2853 
2854 	if (sc->sc_bus.dying || xp->halted)
2855 		return (USBD_IOERROR);
2856 
2857 	if (xp->free_trbs < 3)
2858 		return (USBD_NOMEM);
2859 
2860 	if (len != 0)
2861 		usb_syncmem(&xfer->dmabuf, 0, len,
2862 		    usbd_xfer_isread(xfer) ?
2863 		    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2864 
2865 	/* We'll toggle the setup TRB once we're finished with the stages. */
2866 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2867 
2868 	flags = XHCI_TRB_TYPE_SETUP | XHCI_TRB_IDT | (toggle ^ 1);
2869 	if (len != 0) {
2870 		if (usbd_xfer_isread(xfer))
2871 			flags |= XHCI_TRB_TRT_IN;
2872 		else
2873 			flags |= XHCI_TRB_TRT_OUT;
2874 	}
2875 
2876 	memcpy(&trb0->trb_paddr, &xfer->request, sizeof(trb0->trb_paddr));
2877 	trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8));
2878 	trb0->trb_flags = htole32(flags);
2879 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2880 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2881 	    BUS_DMASYNC_PREWRITE);
2882 
2883 	/* Data TRB */
2884 	if (len != 0) {
2885 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2886 
2887 		flags = XHCI_TRB_TYPE_DATA | toggle;
2888 		if (usbd_xfer_isread(xfer))
2889 			flags |= XHCI_TRB_DIR_IN | XHCI_TRB_ISP;
2890 
2891 		trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2892 		trb->trb_status = htole32(
2893 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
2894 		    xhci_xfer_tdsize(xfer, len, len)
2895 		);
2896 		trb->trb_flags = htole32(flags);
2897 
2898 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2899 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2900 		    BUS_DMASYNC_PREWRITE);
2901 	}
2902 
2903 	/* Status TRB */
2904 	trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1);
2905 
2906 	flags = XHCI_TRB_TYPE_STATUS | XHCI_TRB_IOC | toggle;
2907 	if (len == 0 || !usbd_xfer_isread(xfer))
2908 		flags |= XHCI_TRB_DIR_IN;
2909 
2910 	trb->trb_paddr = 0;
2911 	trb->trb_status = htole32(XHCI_TRB_INTR(0));
2912 	trb->trb_flags = htole32(flags);
2913 
2914 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2915 	    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
2916 	    BUS_DMASYNC_PREWRITE);
2917 
2918 	/* Setup TRB */
2919 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
2920 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2921 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
2922 	    BUS_DMASYNC_PREWRITE);
2923 
2924 	s = splusb();
2925 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
2926 
2927 	xfer->status = USBD_IN_PROGRESS;
2928 	if (xfer->timeout && !sc->sc_bus.use_polling) {
2929 		timeout_del(&xfer->timeout_handle);
2930 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
2931 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
2932 	}
2933 	splx(s);
2934 
2935 	return (USBD_IN_PROGRESS);
2936 }
2937 
2938 void
2939 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
2940 {
2941 	xhci_abort_xfer(xfer, USBD_CANCELLED);
2942 }
2943 
2944 usbd_status
2945 xhci_device_generic_transfer(struct usbd_xfer *xfer)
2946 {
2947 	usbd_status err;
2948 
2949 	err = usb_insert_transfer(xfer);
2950 	if (err)
2951 		return (err);
2952 
2953 	return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2954 }
2955 
2956 usbd_status
2957 xhci_device_generic_start(struct usbd_xfer *xfer)
2958 {
2959 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2960 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2961 	struct xhci_trb *trb0, *trb;
2962 	uint32_t len, remain, flags;
2963 	uint32_t mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2964 	uint64_t paddr = DMAADDR(&xfer->dmabuf, 0);
2965 	uint8_t toggle;
2966 	int s, i, ntrb, zerotd = 0;
2967 
2968 	KASSERT(!(xfer->rqflags & URQ_REQUEST));
2969 
2970 	if (sc->sc_bus.dying || xp->halted)
2971 		return (USBD_IOERROR);
2972 
2973 	/* How many TRBs do we need for this transfer? */
2974 	ntrb = howmany(xfer->length, XHCI_TRB_MAXSIZE);
2975 
2976 	/* If the buffer crosses a 64k boundary, we need one more. */
2977 	len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
2978 	if (len < xfer->length)
2979 		ntrb = howmany(xfer->length - len, XHCI_TRB_MAXSIZE) + 1;
2980 	else
2981 		len = xfer->length;
2982 
2983 	/* If we need to append a zero length packet, we need one more. */
2984 	if ((xfer->flags & USBD_FORCE_SHORT_XFER || xfer->length == 0) &&
2985 	    (xfer->length % UE_GET_SIZE(mps) == 0))
2986 		zerotd = 1;
2987 
2988 	if (xp->free_trbs < (ntrb + zerotd))
2989 		return (USBD_NOMEM);
2990 
2991 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
2992 	    usbd_xfer_isread(xfer) ?
2993 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
2994 
2995 	/* We'll toggle the first TRB once we're finished with the chain. */
2996 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1));
2997 	flags = XHCI_TRB_TYPE_NORMAL | (toggle ^ 1);
2998 	if (usbd_xfer_isread(xfer))
2999 		flags |= XHCI_TRB_ISP;
3000 	flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3001 
3002 	trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
3003 	trb0->trb_status = htole32(
3004 	    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3005 	    xhci_xfer_tdsize(xfer, xfer->length, len)
3006 	);
3007 	trb0->trb_flags = htole32(flags);
3008 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3009 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
3010 	    BUS_DMASYNC_PREWRITE);
3011 
3012 	remain = xfer->length - len;
3013 	paddr += len;
3014 
3015 	/* Chain more TRBs if needed. */
3016 	for (i = ntrb - 1; i > 0; i--) {
3017 		len = min(remain, XHCI_TRB_MAXSIZE);
3018 
3019 		/* Next (or Last) TRB. */
3020 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1));
3021 		flags = XHCI_TRB_TYPE_NORMAL | toggle;
3022 		if (usbd_xfer_isread(xfer))
3023 			flags |= XHCI_TRB_ISP;
3024 		flags |= (i == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3025 
3026 		trb->trb_paddr = htole64(paddr);
3027 		trb->trb_status = htole32(
3028 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3029 		    xhci_xfer_tdsize(xfer, remain, len)
3030 		);
3031 		trb->trb_flags = htole32(flags);
3032 
3033 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3034 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3035 		    BUS_DMASYNC_PREWRITE);
3036 
3037 		remain -= len;
3038 		paddr += len;
3039 	}
3040 
3041 	/* Do we need to issue a zero length transfer? */
3042 	if (zerotd == 1) {
3043 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, -1);
3044 		trb->trb_paddr = 0;
3045 		trb->trb_status = 0;
3046 		trb->trb_flags = htole32(XHCI_TRB_TYPE_NORMAL | XHCI_TRB_IOC | toggle);
3047 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3048 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3049 		    BUS_DMASYNC_PREWRITE);
3050 	}
3051 
3052 	/* First TRB. */
3053 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
3054 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3055 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
3056 	    BUS_DMASYNC_PREWRITE);
3057 
3058 	s = splusb();
3059 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
3060 
3061 	xfer->status = USBD_IN_PROGRESS;
3062 	if (xfer->timeout && !sc->sc_bus.use_polling) {
3063 		timeout_del(&xfer->timeout_handle);
3064 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
3065 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
3066 	}
3067 	splx(s);
3068 
3069 	return (USBD_IN_PROGRESS);
3070 }
3071 
3072 void
3073 xhci_device_generic_done(struct usbd_xfer *xfer)
3074 {
3075 	/* Only happens with interrupt transfers. */
3076 	if (xfer->pipe->repeat) {
3077 		xfer->actlen = 0;
3078 		xhci_device_generic_start(xfer);
3079 	}
3080 }
3081 
3082 void
3083 xhci_device_generic_abort(struct usbd_xfer *xfer)
3084 {
3085 	KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer);
3086 
3087 	xhci_abort_xfer(xfer, USBD_CANCELLED);
3088 }
3089 
3090 usbd_status
3091 xhci_device_isoc_transfer(struct usbd_xfer *xfer)
3092 {
3093 	usbd_status err;
3094 
3095 	err = usb_insert_transfer(xfer);
3096 	if (err && err != USBD_IN_PROGRESS)
3097 		return (err);
3098 
3099 	return (xhci_device_isoc_start(xfer));
3100 }
3101 
3102 usbd_status
3103 xhci_device_isoc_start(struct usbd_xfer *xfer)
3104 {
3105 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
3106 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
3107 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
3108 	struct xhci_trb *trb0, *trb;
3109 	uint32_t len, remain, flags;
3110 	uint64_t paddr;
3111 	uint32_t tbc, tlbpc;
3112 	int s, i, j, ntrb = xfer->nframes;
3113 	uint8_t toggle;
3114 
3115 	KASSERT(!(xfer->rqflags & URQ_REQUEST));
3116 
3117 	/*
3118 	 * To allow continuous transfers, above we start all transfers
3119 	 * immediately. However, we're still going to get usbd_start_next call
3120 	 * this when another xfer completes. So, check if this is already
3121 	 * in progress or not
3122 	 */
3123 	if (xx->ntrb > 0)
3124 		return (USBD_IN_PROGRESS);
3125 
3126 	if (sc->sc_bus.dying || xp->halted)
3127 		return (USBD_IOERROR);
3128 
3129 	/* Why would you do that anyway? */
3130 	if (sc->sc_bus.use_polling)
3131 		return (USBD_INVAL);
3132 
3133 	paddr = DMAADDR(&xfer->dmabuf, 0);
3134 
3135 	/* How many TRBs do for all Transfers? */
3136 	for (i = 0, ntrb = 0; i < xfer->nframes; i++) {
3137 		/* How many TRBs do we need for this transfer? */
3138 		ntrb += howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE);
3139 
3140 		/* If the buffer crosses a 64k boundary, we need one more. */
3141 		len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
3142 		if (len < xfer->frlengths[i])
3143 			ntrb++;
3144 
3145 		paddr += xfer->frlengths[i];
3146 	}
3147 
3148 	if (xp->free_trbs < ntrb)
3149 		return (USBD_NOMEM);
3150 
3151 	usb_syncmem(&xfer->dmabuf, 0, xfer->length,
3152 	    usbd_xfer_isread(xfer) ?
3153 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
3154 
3155 	paddr = DMAADDR(&xfer->dmabuf, 0);
3156 
3157 	for (i = 0, trb0 = NULL; i < xfer->nframes; i++) {
3158 		/* How many TRBs do we need for this transfer? */
3159 		ntrb = howmany(xfer->frlengths[i], XHCI_TRB_MAXSIZE);
3160 
3161 		/* If the buffer crosses a 64k boundary, we need one more. */
3162 		len = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
3163 		if (len < xfer->frlengths[i])
3164 			ntrb++;
3165 		else
3166 			len = xfer->frlengths[i];
3167 
3168 		KASSERT(ntrb < 3);
3169 
3170 		/*
3171 		 * We'll commit the first TRB once we're finished with the
3172 		 * chain.
3173 		 */
3174 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, (ntrb == 1));
3175 
3176 		DPRINTFN(4, ("%s:%d: ring %p trb0_idx %lu ntrb %d paddr %llx "
3177 		    "len %u\n", __func__, __LINE__,
3178 		    &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb, paddr,
3179 		    len));
3180 
3181 		/* Record the first TRB so we can toggle later. */
3182 		if (trb0 == NULL) {
3183 			trb0 = trb;
3184 			toggle ^= 1;
3185 		}
3186 
3187 		flags = XHCI_TRB_TYPE_ISOCH | XHCI_TRB_SIA | toggle;
3188 		if (usbd_xfer_isread(xfer))
3189 			flags |= XHCI_TRB_ISP;
3190 		flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3191 
3192 		tbc = xhci_xfer_tbc(xfer, xfer->frlengths[i], &tlbpc);
3193 		flags |= XHCI_TRB_ISOC_TBC(tbc) | XHCI_TRB_ISOC_TLBPC(tlbpc);
3194 
3195 		trb->trb_paddr = htole64(paddr);
3196 		trb->trb_status = htole32(
3197 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3198 		    xhci_xfer_tdsize(xfer, xfer->frlengths[i], len)
3199 		);
3200 		trb->trb_flags = htole32(flags);
3201 
3202 		bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3203 		    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3204 		    BUS_DMASYNC_PREWRITE);
3205 
3206 		remain = xfer->frlengths[i] - len;
3207 		paddr += len;
3208 
3209 		/* Chain more TRBs if needed. */
3210 		for (j = ntrb - 1; j > 0; j--) {
3211 			len = min(remain, XHCI_TRB_MAXSIZE);
3212 
3213 			/* Next (or Last) TRB. */
3214 			trb = xhci_xfer_get_trb(sc, xfer, &toggle, (j == 1));
3215 			flags = XHCI_TRB_TYPE_NORMAL | toggle;
3216 			if (usbd_xfer_isread(xfer))
3217 				flags |= XHCI_TRB_ISP;
3218 			flags |= (j == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
3219 			DPRINTFN(3, ("%s:%d: ring %p trb0_idx %lu ntrb %d "
3220 			    "paddr %llx len %u\n", __func__, __LINE__,
3221 			    &xp->ring.trbs[0], (trb - &xp->ring.trbs[0]), ntrb,
3222 			    paddr, len));
3223 
3224 			trb->trb_paddr = htole64(paddr);
3225 			trb->trb_status = htole32(
3226 			    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
3227 			    xhci_xfer_tdsize(xfer, remain, len)
3228 			);
3229 			trb->trb_flags = htole32(flags);
3230 
3231 			bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3232 			    TRBOFF(&xp->ring, trb), sizeof(struct xhci_trb),
3233 			    BUS_DMASYNC_PREWRITE);
3234 
3235 			remain -= len;
3236 			paddr += len;
3237 		}
3238 
3239 		xfer->frlengths[i] = 0;
3240 	}
3241 
3242 	/* First TRB. */
3243 	trb0->trb_flags ^= htole32(XHCI_TRB_CYCLE);
3244 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
3245 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb),
3246 	    BUS_DMASYNC_PREWRITE);
3247 
3248 	s = splusb();
3249 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
3250 
3251 	xfer->status = USBD_IN_PROGRESS;
3252 
3253 	if (xfer->timeout) {
3254 		timeout_del(&xfer->timeout_handle);
3255 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
3256 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
3257 	}
3258 	splx(s);
3259 
3260 	return (USBD_IN_PROGRESS);
3261 }
3262