xref: /openbsd-src/sys/dev/usb/xhci.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /* $OpenBSD: xhci.c,v 1.66 2015/12/02 09:23:23 mpi Exp $ */
2 
3 /*
4  * Copyright (c) 2014-2015 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/queue.h>
25 #include <sys/timeout.h>
26 #include <sys/pool.h>
27 #include <sys/endian.h>
28 
29 #include <machine/bus.h>
30 
31 #include <dev/usb/usb.h>
32 #include <dev/usb/usbdi.h>
33 #include <dev/usb/usbdivar.h>
34 #include <dev/usb/usb_mem.h>
35 
36 #include <dev/usb/xhcireg.h>
37 #include <dev/usb/xhcivar.h>
38 
39 struct cfdriver xhci_cd = {
40 	NULL, "xhci", DV_DULL
41 };
42 
43 #ifdef XHCI_DEBUG
44 #define DPRINTF(x)	do { if (xhcidebug) printf x; } while(0)
45 #define DPRINTFN(n,x)	do { if (xhcidebug>(n)) printf x; } while (0)
46 int xhcidebug = 3;
47 #else
48 #define DPRINTF(x)
49 #define DPRINTFN(n,x)
50 #endif
51 
52 #define DEVNAME(sc)	((sc)->sc_bus.bdev.dv_xname)
53 
54 #define TRBOFF(r, trb)	((char *)(trb) - (char *)((r)->trbs))
55 #define DEQPTR(r)	((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index))
56 
57 struct pool *xhcixfer;
58 
59 struct xhci_pipe {
60 	struct usbd_pipe	pipe;
61 
62 	uint8_t			dci;
63 	uint8_t			slot;	/* Device slot ID */
64 	struct xhci_ring	ring;
65 
66 	/*
67 	 * XXX used to pass the xfer pointer back to the
68 	 * interrupt routine, better way?
69 	 */
70 	struct usbd_xfer	*pending_xfers[XHCI_MAX_XFER];
71 	struct usbd_xfer	*aborted_xfer;
72 	int			 halted;
73 	size_t			 free_trbs;
74 };
75 
76 int	xhci_reset(struct xhci_softc *);
77 int	xhci_intr1(struct xhci_softc *);
78 void	xhci_waitintr(struct xhci_softc *, struct usbd_xfer *);
79 void	xhci_event_dequeue(struct xhci_softc *);
80 void	xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t);
81 void	xhci_event_command(struct xhci_softc *, uint64_t);
82 void	xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t);
83 int	xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *);
84 void	xhci_context_setup(struct xhci_softc *, struct usbd_pipe *);
85 int	xhci_scratchpad_alloc(struct xhci_softc *, int);
86 void	xhci_scratchpad_free(struct xhci_softc *);
87 int	xhci_softdev_alloc(struct xhci_softc *, uint8_t);
88 void	xhci_softdev_free(struct xhci_softc *, uint8_t);
89 int	xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t,
90 	    size_t);
91 void	xhci_ring_free(struct xhci_softc *, struct xhci_ring *);
92 void	xhci_ring_reset(struct xhci_softc *, struct xhci_ring *);
93 struct	xhci_trb *xhci_ring_consume(struct xhci_softc *, struct xhci_ring *);
94 struct	xhci_trb *xhci_ring_produce(struct xhci_softc *, struct xhci_ring *);
95 
96 struct	xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*,
97 	    uint8_t *, int);
98 void	xhci_xfer_done(struct usbd_xfer *xfer);
99 /* xHCI command helpers. */
100 int	xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int);
101 int	xhci_command_abort(struct xhci_softc *);
102 
103 void	xhci_cmd_reset_ep_async(struct xhci_softc *, uint8_t, uint8_t);
104 void	xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t);
105 int	xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t);
106 int	xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t);
107 int	xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int);
108 int	xhci_cmd_set_address(struct xhci_softc *, uint8_t,  uint64_t, uint32_t);
109 int	xhci_cmd_evaluate_ctx(struct xhci_softc *, uint8_t, uint64_t);
110 #ifdef XHCI_DEBUG
111 int	xhci_cmd_noop(struct xhci_softc *);
112 #endif
113 
114 /* XXX should be part of the Bus interface. */
115 void	xhci_abort_xfer(struct usbd_xfer *, usbd_status);
116 void	xhci_pipe_close(struct usbd_pipe *);
117 void	xhci_noop(struct usbd_xfer *);
118 
119 void 	xhci_timeout(void *);
120 void	xhci_timeout_task(void *);
121 
122 /* USBD Bus Interface. */
123 usbd_status	  xhci_pipe_open(struct usbd_pipe *);
124 int		  xhci_setaddr(struct usbd_device *, int);
125 void		  xhci_softintr(void *);
126 void		  xhci_poll(struct usbd_bus *);
127 struct usbd_xfer *xhci_allocx(struct usbd_bus *);
128 void		  xhci_freex(struct usbd_bus *, struct usbd_xfer *);
129 
130 usbd_status	  xhci_root_ctrl_transfer(struct usbd_xfer *);
131 usbd_status	  xhci_root_ctrl_start(struct usbd_xfer *);
132 
133 usbd_status	  xhci_root_intr_transfer(struct usbd_xfer *);
134 usbd_status	  xhci_root_intr_start(struct usbd_xfer *);
135 void		  xhci_root_intr_abort(struct usbd_xfer *);
136 void		  xhci_root_intr_done(struct usbd_xfer *);
137 
138 usbd_status	  xhci_device_ctrl_transfer(struct usbd_xfer *);
139 usbd_status	  xhci_device_ctrl_start(struct usbd_xfer *);
140 void		  xhci_device_ctrl_abort(struct usbd_xfer *);
141 
142 usbd_status	  xhci_device_generic_transfer(struct usbd_xfer *);
143 usbd_status	  xhci_device_generic_start(struct usbd_xfer *);
144 void		  xhci_device_generic_abort(struct usbd_xfer *);
145 void		  xhci_device_generic_done(struct usbd_xfer *);
146 
147 #define XHCI_INTR_ENDPT 1
148 
149 struct usbd_bus_methods xhci_bus_methods = {
150 	.open_pipe = xhci_pipe_open,
151 	.dev_setaddr = xhci_setaddr,
152 	.soft_intr = xhci_softintr,
153 	.do_poll = xhci_poll,
154 	.allocx = xhci_allocx,
155 	.freex = xhci_freex,
156 };
157 
158 struct usbd_pipe_methods xhci_root_ctrl_methods = {
159 	.transfer = xhci_root_ctrl_transfer,
160 	.start = xhci_root_ctrl_start,
161 	.abort = xhci_noop,
162 	.close = xhci_pipe_close,
163 	.done = xhci_noop,
164 };
165 
166 struct usbd_pipe_methods xhci_root_intr_methods = {
167 	.transfer = xhci_root_intr_transfer,
168 	.start = xhci_root_intr_start,
169 	.abort = xhci_root_intr_abort,
170 	.close = xhci_pipe_close,
171 	.done = xhci_root_intr_done,
172 };
173 
174 struct usbd_pipe_methods xhci_device_ctrl_methods = {
175 	.transfer = xhci_device_ctrl_transfer,
176 	.start = xhci_device_ctrl_start,
177 	.abort = xhci_device_ctrl_abort,
178 	.close = xhci_pipe_close,
179 	.done = xhci_noop,
180 };
181 
182 #if notyet
183 struct usbd_pipe_methods xhci_device_isoc_methods = {
184 };
185 #endif
186 
187 struct usbd_pipe_methods xhci_device_bulk_methods = {
188 	.transfer = xhci_device_generic_transfer,
189 	.start = xhci_device_generic_start,
190 	.abort = xhci_device_generic_abort,
191 	.close = xhci_pipe_close,
192 	.done = xhci_device_generic_done,
193 };
194 
195 struct usbd_pipe_methods xhci_device_generic_methods = {
196 	.transfer = xhci_device_generic_transfer,
197 	.start = xhci_device_generic_start,
198 	.abort = xhci_device_generic_abort,
199 	.close = xhci_pipe_close,
200 	.done = xhci_device_generic_done,
201 };
202 
203 #ifdef XHCI_DEBUG
204 static void
205 xhci_dump_trb(struct xhci_trb *trb)
206 {
207 	printf("trb=%p (0x%016llx 0x%08x 0x%b)\n", trb,
208 	    (long long)letoh64(trb->trb_paddr), letoh32(trb->trb_status),
209 	    (int)letoh32(trb->trb_flags), XHCI_TRB_FLAGS_BITMASK);
210 }
211 #endif
212 
213 int	usbd_dma_contig_alloc(struct usbd_bus *, struct usbd_dma_info *,
214 	    void **, bus_size_t, bus_size_t, bus_size_t);
215 void	usbd_dma_contig_free(struct usbd_bus *, struct usbd_dma_info *);
216 
217 int
218 usbd_dma_contig_alloc(struct usbd_bus *bus, struct usbd_dma_info *dma,
219     void **kvap, bus_size_t size, bus_size_t alignment, bus_size_t boundary)
220 {
221 	int error;
222 
223 	dma->tag = bus->dmatag;
224 	dma->size = size;
225 
226 	error = bus_dmamap_create(dma->tag, size, 1, size, boundary,
227 	    BUS_DMA_NOWAIT, &dma->map);
228 	if (error != 0)
229 		return (error);;
230 
231 	error = bus_dmamem_alloc(dma->tag, size, alignment, boundary, &dma->seg,
232 	    1, &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
233 	if (error != 0)
234 		goto destroy;
235 
236 	error = bus_dmamem_map(dma->tag, &dma->seg, 1, size, &dma->vaddr,
237 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
238 	if (error != 0)
239 		goto free;
240 
241 	error = bus_dmamap_load_raw(dma->tag, dma->map, &dma->seg, 1, size,
242 	    BUS_DMA_NOWAIT);
243 	if (error != 0)
244 		goto unmap;
245 
246 	bus_dmamap_sync(dma->tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
247 
248 	dma->paddr = dma->map->dm_segs[0].ds_addr;
249 	if (kvap != NULL)
250 		*kvap = dma->vaddr;
251 
252 	return (0);
253 
254 unmap:
255 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
256 free:
257 	bus_dmamem_free(dma->tag, &dma->seg, 1);
258 destroy:
259 	bus_dmamap_destroy(dma->tag, dma->map);
260 	return (error);
261 }
262 
263 void
264 usbd_dma_contig_free(struct usbd_bus *bus, struct usbd_dma_info *dma)
265 {
266 	if (dma->map != NULL) {
267 		bus_dmamap_sync(bus->dmatag, dma->map, 0, dma->size,
268 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
269 		bus_dmamap_unload(bus->dmatag, dma->map);
270 		bus_dmamem_unmap(bus->dmatag, dma->vaddr, dma->size);
271 		bus_dmamem_free(bus->dmatag, &dma->seg, 1);
272 		bus_dmamap_destroy(bus->dmatag, dma->map);
273 		dma->map = NULL;
274 	}
275 }
276 
277 int
278 xhci_init(struct xhci_softc *sc)
279 {
280 	uint32_t hcr;
281 	int npage, error;
282 
283 #ifdef XHCI_DEBUG
284 	uint16_t vers;
285 
286 	vers = XREAD2(sc, XHCI_HCIVERSION);
287 	printf("%s: xHCI version %x.%x\n", DEVNAME(sc), vers >> 8, vers & 0xff);
288 #endif
289 	sc->sc_bus.usbrev = USBREV_3_0;
290 	sc->sc_bus.methods = &xhci_bus_methods;
291 	sc->sc_bus.pipe_size = sizeof(struct xhci_pipe);
292 
293 	sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH);
294 	sc->sc_door_off = XREAD4(sc, XHCI_DBOFF);
295 	sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF);
296 
297 #ifdef XHCI_DEBUG
298 	printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc), sc->sc_oper_off);
299 	printf("%s: DOORBELL=%#lx\n", DEVNAME(sc), sc->sc_door_off);
300 	printf("%s: RUNTIME=%#lx\n", DEVNAME(sc), sc->sc_runt_off);
301 #endif
302 
303 	error = xhci_reset(sc);
304 	if (error)
305 		return (error);
306 
307 	if (xhcixfer == NULL) {
308 		xhcixfer = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
309 		if (xhcixfer == NULL) {
310 			printf("%s: unable to allocate pool descriptor\n",
311 			    DEVNAME(sc));
312 			return (ENOMEM);
313 		}
314 		pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, 0, 0,
315 		    "xhcixfer", NULL);
316 		pool_setipl(xhcixfer, IPL_SOFTUSB);
317 	}
318 
319 	hcr = XREAD4(sc, XHCI_HCCPARAMS);
320 	sc->sc_ctxsize = XHCI_HCC_CSZ(hcr) ? 64 : 32;
321 	DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize));
322 
323 #ifdef XHCI_DEBUG
324 	hcr = XOREAD4(sc, XHCI_PAGESIZE);
325 	printf("%s: supported page size 0x%08x\n", DEVNAME(sc), hcr);
326 #endif
327 	/* Use 4K for the moment since it's easier. */
328 	sc->sc_pagesize = 4096;
329 
330 	/* Get port and device slot numbers. */
331 	hcr = XREAD4(sc, XHCI_HCSPARAMS1);
332 	sc->sc_noport = XHCI_HCS1_N_PORTS(hcr);
333 	sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr);
334 	DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport,
335 	    sc->sc_noslot));
336 
337 	/* Setup Device Context Base Address Array. */
338 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_dcbaa.dma,
339 	    (void **)&sc->sc_dcbaa.segs, (sc->sc_noslot + 1) * sizeof(uint64_t),
340 	    XHCI_DCBAA_ALIGN, sc->sc_pagesize);
341 	if (error)
342 		return (ENOMEM);
343 
344 	/* Setup command ring. */
345 	error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_CMDS,
346 	    XHCI_CMDS_RING_ALIGN);
347 	if (error) {
348 		printf("%s: could not allocate command ring.\n", DEVNAME(sc));
349 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
350 		return (error);
351 	}
352 
353 	/* Setup one event ring and its segment table (ERST). */
354 	error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVTS,
355 	    XHCI_EVTS_RING_ALIGN);
356 	if (error) {
357 		printf("%s: could not allocate event ring.\n", DEVNAME(sc));
358 		xhci_ring_free(sc, &sc->sc_cmd_ring);
359 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
360 		return (error);
361 	}
362 
363 	/* Allocate the required entry for the segment table. */
364 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_erst.dma,
365 	    (void **)&sc->sc_erst.segs, sizeof(struct xhci_erseg),
366 	    XHCI_ERST_ALIGN, XHCI_ERST_BOUNDARY);
367 	if (error) {
368 		printf("%s: could not allocate segment table.\n", DEVNAME(sc));
369 		xhci_ring_free(sc, &sc->sc_evt_ring);
370 		xhci_ring_free(sc, &sc->sc_cmd_ring);
371 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
372 		return (ENOMEM);
373 	}
374 
375 	/* Set our ring address and size in its corresponding segment. */
376 	sc->sc_erst.segs[0].er_addr = htole64(sc->sc_evt_ring.dma.paddr);
377 	sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVTS);
378 	sc->sc_erst.segs[0].er_rsvd = 0;
379 	bus_dmamap_sync(sc->sc_erst.dma.tag, sc->sc_erst.dma.map, 0,
380 	    sc->sc_erst.dma.size, BUS_DMASYNC_PREWRITE);
381 
382 	/* Get the number of scratch pages and configure them if necessary. */
383 	hcr = XREAD4(sc, XHCI_HCSPARAMS2);
384 	npage = XHCI_HCS2_SPB_MAX(hcr);
385 	DPRINTF(("%s: %d scratch pages\n", DEVNAME(sc), npage));
386 
387 	if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) {
388 		printf("%s: could not allocate scratchpad.\n", DEVNAME(sc));
389 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
390 		xhci_ring_free(sc, &sc->sc_evt_ring);
391 		xhci_ring_free(sc, &sc->sc_cmd_ring);
392 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
393 		return (ENOMEM);
394 	}
395 
396 
397 	return (0);
398 }
399 
400 void
401 xhci_config(struct xhci_softc *sc)
402 {
403 	uint64_t paddr;
404 	uint32_t hcr;
405 
406 	/* Make sure to program a number of device slots we can handle. */
407 	if (sc->sc_noslot > USB_MAX_DEVICES)
408 		sc->sc_noslot = USB_MAX_DEVICES;
409 	hcr = XOREAD4(sc, XHCI_CONFIG) & ~XHCI_CONFIG_SLOTS_MASK;
410 	XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot);
411 
412 	/* Set the device context base array address. */
413 	paddr = (uint64_t)sc->sc_dcbaa.dma.paddr;
414 	XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr);
415 	XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32));
416 
417 	DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc),
418 	    XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO)));
419 
420 	/* Set the command ring address. */
421 	paddr = (uint64_t)sc->sc_cmd_ring.dma.paddr;
422 	XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS);
423 	XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32));
424 
425 	DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc),
426 	    XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr));
427 
428 	/* Set the ERST count number to 1, since we use only one event ring. */
429 	XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1));
430 
431 	/* Set the segment table address. */
432 	paddr = (uint64_t)sc->sc_erst.dma.paddr;
433 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr);
434 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32));
435 
436 	DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc),
437 	    XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0))));
438 
439 	/* Set the ring dequeue address. */
440 	paddr = (uint64_t)sc->sc_evt_ring.dma.paddr;
441 	XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr);
442 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
443 
444 	DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc),
445 	    XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0))));
446 
447 	/* Enable interrupts. */
448 	hcr = XRREAD4(sc, XHCI_IMAN(0));
449 	XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA);
450 
451 	/* Set default interrupt moderation. */
452 	XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT);
453 
454 	/* Allow event interrupt and start the controller. */
455 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
456 
457 	DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD)));
458 	DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0))));
459 }
460 
461 int
462 xhci_detach(struct device *self, int flags)
463 {
464 	struct xhci_softc *sc = (struct xhci_softc *)self;
465 	int rv;
466 
467 	rv = config_detach_children(self, flags);
468 	if (rv != 0) {
469 		printf("%s: error while detaching %d\n", DEVNAME(sc), rv);
470 		return (rv);
471 	}
472 
473 	/* Since the hardware might already be gone, ignore the errors. */
474 	xhci_command_abort(sc);
475 
476 	xhci_reset(sc);
477 
478 	/* Disable interrupts. */
479 	XRWRITE4(sc, XHCI_IMOD(0), 0);
480 	XRWRITE4(sc, XHCI_IMAN(0), 0);
481 
482 	/* Clear the event ring address. */
483 	XRWRITE4(sc, XHCI_ERDP_LO(0), 0);
484 	XRWRITE4(sc, XHCI_ERDP_HI(0), 0);
485 
486 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0);
487 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0);
488 
489 	XRWRITE4(sc, XHCI_ERSTSZ(0), 0);
490 
491 	/* Clear the command ring address. */
492 	XOWRITE4(sc, XHCI_CRCR_LO, 0);
493 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
494 
495 	XOWRITE4(sc, XHCI_DCBAAP_LO, 0);
496 	XOWRITE4(sc, XHCI_DCBAAP_HI, 0);
497 
498 	if (sc->sc_spad.npage > 0)
499 		xhci_scratchpad_free(sc);
500 
501 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
502 	xhci_ring_free(sc, &sc->sc_evt_ring);
503 	xhci_ring_free(sc, &sc->sc_cmd_ring);
504 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
505 
506 	return (0);
507 }
508 
509 int
510 xhci_activate(struct device *self, int act)
511 {
512 	struct xhci_softc *sc = (struct xhci_softc *)self;
513 	int rv = 0;
514 
515 	switch (act) {
516 	case DVACT_RESUME:
517 		sc->sc_bus.use_polling++;
518 
519 		xhci_reset(sc);
520 		xhci_ring_reset(sc, &sc->sc_cmd_ring);
521 		xhci_ring_reset(sc, &sc->sc_evt_ring);
522 
523 		/* Renesas controllers, at least, need more time to resume. */
524 		usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
525 
526 		xhci_config(sc);
527 
528 		sc->sc_bus.use_polling--;
529 		rv = config_activate_children(self, act);
530 		break;
531 	case DVACT_POWERDOWN:
532 		rv = config_activate_children(self, act);
533 		xhci_reset(sc);
534 		break;
535 	default:
536 		rv = config_activate_children(self, act);
537 		break;
538 	}
539 
540 	return (rv);
541 }
542 
543 int
544 xhci_reset(struct xhci_softc *sc)
545 {
546 	uint32_t hcr;
547 	int i;
548 
549 	XOWRITE4(sc, XHCI_USBCMD, 0);	/* Halt controller */
550 	for (i = 0; i < 100; i++) {
551 		usb_delay_ms(&sc->sc_bus, 1);
552 		hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH;
553 		if (hcr)
554 			break;
555 	}
556 
557 	if (!hcr)
558 		printf("%s: halt timeout\n", DEVNAME(sc));
559 
560 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST);
561 	for (i = 0; i < 100; i++) {
562 		usb_delay_ms(&sc->sc_bus, 1);
563 		hcr = (XOREAD4(sc, XHCI_USBCMD) & XHCI_CMD_HCRST) |
564 		    (XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_CNR);
565 		if (!hcr)
566 			break;
567 	}
568 
569 	if (hcr) {
570 		printf("%s: reset timeout\n", DEVNAME(sc));
571 		return (EIO);
572 	}
573 
574 	return (0);
575 }
576 
577 
578 int
579 xhci_intr(void *v)
580 {
581 	struct xhci_softc *sc = v;
582 
583 	if (sc == NULL || sc->sc_bus.dying)
584 		return (0);
585 
586 	/* If we get an interrupt while polling, then just ignore it. */
587 	if (sc->sc_bus.use_polling) {
588 		DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n"));
589 		return (0);
590 	}
591 
592 	return (xhci_intr1(sc));
593 }
594 
595 int
596 xhci_intr1(struct xhci_softc *sc)
597 {
598 	uint32_t intrs;
599 
600 	intrs = XOREAD4(sc, XHCI_USBSTS);
601 	if (intrs == 0xffffffff) {
602 		sc->sc_bus.dying = 1;
603 		return (0);
604 	}
605 
606 	if ((intrs & XHCI_STS_EINT) == 0)
607 		return (0);
608 
609 	sc->sc_bus.no_intrs++;
610 
611 	if (intrs & XHCI_STS_HSE) {
612 		printf("%s: host system error\n", DEVNAME(sc));
613 		sc->sc_bus.dying = 1;
614 		return (1);
615 	}
616 
617 	XOWRITE4(sc, XHCI_USBSTS, intrs); /* Acknowledge */
618 	usb_schedsoftintr(&sc->sc_bus);
619 
620 	/* Acknowledge PCI interrupt */
621 	intrs = XRREAD4(sc, XHCI_IMAN(0));
622 	XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND);
623 
624 	return (1);
625 }
626 
627 void
628 xhci_poll(struct usbd_bus *bus)
629 {
630 	struct xhci_softc *sc = (struct xhci_softc *)bus;
631 
632 	if (XOREAD4(sc, XHCI_USBSTS))
633 		xhci_intr1(sc);
634 }
635 
636 void
637 xhci_waitintr(struct xhci_softc *sc, struct usbd_xfer *xfer)
638 {
639 	int timo;
640 
641 	for (timo = xfer->timeout; timo >= 0; timo--) {
642 		usb_delay_ms(&sc->sc_bus, 1);
643 		if (sc->sc_bus.dying)
644 			break;
645 
646 		if (xfer->status != USBD_IN_PROGRESS)
647 			return;
648 
649 		xhci_intr1(sc);
650 	}
651 
652 	xfer->status = USBD_TIMEOUT;
653 	usb_transfer_complete(xfer);
654 }
655 
656 void
657 xhci_softintr(void *v)
658 {
659 	struct xhci_softc *sc = v;
660 
661 	if (sc->sc_bus.dying)
662 		return;
663 
664 	sc->sc_bus.intr_context++;
665 	xhci_event_dequeue(sc);
666 	sc->sc_bus.intr_context--;
667 }
668 
669 void
670 xhci_event_dequeue(struct xhci_softc *sc)
671 {
672 	struct xhci_trb *trb;
673 	uint64_t paddr;
674 	uint32_t status, flags;
675 
676 	while ((trb = xhci_ring_consume(sc, &sc->sc_evt_ring)) != NULL) {
677 		paddr = letoh64(trb->trb_paddr);
678 		status = letoh32(trb->trb_status);
679 		flags = letoh32(trb->trb_flags);
680 
681 		switch (flags & XHCI_TRB_TYPE_MASK) {
682 		case XHCI_EVT_XFER:
683 			xhci_event_xfer(sc, paddr, status, flags);
684 			break;
685 		case XHCI_EVT_CMD_COMPLETE:
686 			memcpy(&sc->sc_result_trb, trb, sizeof(*trb));
687 			xhci_event_command(sc, paddr);
688 			break;
689 		case XHCI_EVT_PORT_CHANGE:
690 			xhci_event_port_change(sc, paddr, status);
691 			break;
692 		default:
693 #ifdef XHCI_DEBUG
694 			printf("event (%d): ", XHCI_TRB_TYPE(flags));
695 			xhci_dump_trb(trb);
696 #endif
697 			break;
698 		}
699 
700 	}
701 
702 	paddr = (uint64_t)DEQPTR(sc->sc_evt_ring);
703 	XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY);
704 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
705 }
706 
707 void
708 xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status,
709     uint32_t flags)
710 {
711 	struct xhci_pipe *xp;
712 	struct usbd_xfer *xfer;
713 	struct xhci_xfer *xx;
714 	uint8_t dci, slot, code;
715 	uint32_t remain;
716 	int trb_idx;
717 
718 	slot = XHCI_TRB_GET_SLOT(flags);
719 	dci = XHCI_TRB_GET_EP(flags);
720 	if (slot > sc->sc_noslot) {
721 		DPRINTF(("%s: incorrect slot (%u)\n", DEVNAME(sc), slot));
722 		return;
723 	}
724 
725 	xp = sc->sc_sdevs[slot].pipes[dci - 1];
726 	if (xp == NULL)
727 		return;
728 
729 	code = XHCI_TRB_GET_CODE(status);
730 	remain = XHCI_TRB_REMAIN(status);
731 
732 	trb_idx = (paddr - xp->ring.dma.paddr) / sizeof(struct xhci_trb);
733 	if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) {
734 		printf("%s: wrong trb index (%d) max is %zu\n", DEVNAME(sc),
735 		    trb_idx, xp->ring.ntrb - 1);
736 		return;
737 	}
738 
739 	xfer = xp->pending_xfers[trb_idx];
740 	if (xfer == NULL) {
741 		printf("%s: NULL xfer pointer\n", DEVNAME(sc));
742 		return;
743 	}
744 
745 	if (remain > xfer->length)
746 		remain = xfer->length;
747 
748 	switch (code) {
749 	case XHCI_CODE_SUCCESS:
750 		/*
751 		 * This might be the last TRB of a TD that ended up
752 		 * with a Short Transfer condition, see below.
753 		 */
754 		if (xfer->actlen == 0)
755 			xfer->actlen = xfer->length - remain;
756 
757 		xfer->status = USBD_NORMAL_COMPLETION;
758 		break;
759 	case XHCI_CODE_SHORT_XFER:
760 		xfer->actlen = xfer->length - remain;
761 
762 		/*
763 		 * If this is not the last TRB of a transfer, we should
764 		 * theoretically clear the IOC at the end of the chain
765 		 * but the HC might have already processed it before we
766 		 * had a change to schedule the softinterrupt.
767 		 */
768 		xx = (struct xhci_xfer *)xfer;
769 		if (xx->index != trb_idx)
770 			return;
771 
772 		xfer->status = USBD_NORMAL_COMPLETION;
773 		break;
774 	case XHCI_CODE_TXERR:
775 	case XHCI_CODE_SPLITERR:
776 		xfer->status = USBD_IOERROR;
777 		break;
778 	case XHCI_CODE_STALL:
779 	case XHCI_CODE_BABBLE:
780 		/* Prevent any timeout to kick in. */
781 		timeout_del(&xfer->timeout_handle);
782 		usb_rem_task(xfer->device, &xfer->abort_task);
783 
784 		/* We need to report this condition for umass(4). */
785 		if (code == XHCI_CODE_STALL)
786 			xp->halted = USBD_STALLED;
787 		else
788 			xp->halted = USBD_IOERROR;
789 		/*
790 		 * Since the stack might try to start a new transfer as
791 		 * soon as a pending one finishes, make sure the endpoint
792 		 * is fully reset before calling usb_transfer_complete().
793 		 */
794 		xp->aborted_xfer = xfer;
795 		xhci_cmd_reset_ep_async(sc, slot, dci);
796 		return;
797 	case XHCI_CODE_XFER_STOPPED:
798 	case XHCI_CODE_XFER_STOPINV:
799 		/* Endpoint stopped while processing a TD. */
800 		if (xfer == xp->aborted_xfer) {
801 			DPRINTF(("%s: stopped xfer=%p\n", __func__, xfer));
802 		    	return;
803 		}
804 
805 		/* FALLTHROUGH */
806 	default:
807 		DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code));
808 		xfer->status = USBD_IOERROR;
809 		xp->halted = 1;
810 		break;
811 	}
812 
813 	xhci_xfer_done(xfer);
814 }
815 
816 void
817 xhci_event_command(struct xhci_softc *sc, uint64_t paddr)
818 {
819 	struct xhci_trb *trb;
820 	struct xhci_pipe *xp;
821 	uint32_t flags;
822 	uint8_t dci, slot;
823 	int trb_idx, status;
824 
825 	trb_idx = (paddr - sc->sc_cmd_ring.dma.paddr) / sizeof(*trb);
826 	if (trb_idx < 0 || trb_idx >= sc->sc_cmd_ring.ntrb) {
827 		printf("%s: wrong trb index (%d) max is %zu\n", DEVNAME(sc),
828 		    trb_idx, sc->sc_cmd_ring.ntrb - 1);
829 		return;
830 	}
831 
832 	trb = &sc->sc_cmd_ring.trbs[trb_idx];
833 
834 	flags = letoh32(trb->trb_flags);
835 
836 	slot = XHCI_TRB_GET_SLOT(flags);
837 	dci = XHCI_TRB_GET_EP(flags);
838 
839 	switch (flags & XHCI_TRB_TYPE_MASK) {
840 	case XHCI_CMD_RESET_EP:
841 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
842 		if (xp == NULL)
843 			break;
844 
845 		/* Update the dequeue pointer past the last TRB. */
846 		xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
847 		    DEQPTR(xp->ring) | xp->ring.toggle);
848 		break;
849 	case XHCI_CMD_SET_TR_DEQ:
850 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
851 		if (xp == NULL)
852 			break;
853 
854 		status = xp->halted;
855 		xp->halted = 0;
856 		if (xp->aborted_xfer != NULL) {
857 			xp->aborted_xfer->status = status;
858 			xhci_xfer_done(xp->aborted_xfer);
859 			wakeup(xp);
860 		}
861 		break;
862 	case XHCI_CMD_CONFIG_EP:
863 	case XHCI_CMD_STOP_EP:
864 	case XHCI_CMD_DISABLE_SLOT:
865 	case XHCI_CMD_ENABLE_SLOT:
866 	case XHCI_CMD_ADDRESS_DEVICE:
867 	case XHCI_CMD_EVAL_CTX:
868 	case XHCI_CMD_NOOP:
869 		/* All these commands are synchronous. */
870 		KASSERT(sc->sc_cmd_trb == trb);
871 		sc->sc_cmd_trb = NULL;
872 		wakeup(&sc->sc_cmd_trb);
873 		break;
874 	default:
875 		DPRINTF(("%s: unexpected command %x\n", DEVNAME(sc), flags));
876 	}
877 }
878 
879 void
880 xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status)
881 {
882 	struct usbd_xfer *xfer = sc->sc_intrxfer;
883 	uint32_t port = XHCI_TRB_PORTID(paddr);
884 	uint8_t *p;
885 
886 	if (XHCI_TRB_GET_CODE(status) != XHCI_CODE_SUCCESS) {
887 		DPRINTF(("%s: failed port status event\n", DEVNAME(sc)));
888 		return;
889 	}
890 
891 	if (xfer == NULL)
892 		return;
893 
894 	p = KERNADDR(&xfer->dmabuf, 0);
895 	memset(p, 0, xfer->length);
896 
897 	p[port/8] |= 1 << (port%8);
898 	DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p));
899 
900 	xfer->actlen = xfer->length;
901 	xfer->status = USBD_NORMAL_COMPLETION;
902 
903 	usb_transfer_complete(xfer);
904 }
905 
906 void
907 xhci_xfer_done(struct usbd_xfer *xfer)
908 {
909 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
910 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
911 	int ntrb, i;
912 
913 	splsoftassert(IPL_SOFTUSB);
914 
915 #ifdef XHCI_DEBUG
916 	if (xx->index < 0 || xp->pending_xfers[xx->index] == NULL) {
917 		printf("%s: xfer=%p done (idx=%d, ntrb=%zd)\n", __func__,
918 		    xfer, xx->index, xx->ntrb);
919 	}
920 #endif
921 
922 	if (xp->aborted_xfer == xfer)
923 		xp->aborted_xfer = NULL;
924 
925 	for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) {
926 		xp->pending_xfers[i] = NULL;
927 		if (i == 0)
928 			i = (xp->ring.ntrb - 1);
929 	}
930 	xp->free_trbs += xx->ntrb;
931 	xx->index = -1;
932 	xx->ntrb = 0;
933 
934 	timeout_del(&xfer->timeout_handle);
935 	usb_rem_task(xfer->device, &xfer->abort_task);
936 	usb_transfer_complete(xfer);
937 }
938 
939 /*
940  * Calculate the Device Context Index (DCI) for endpoints as stated
941  * in section 4.5.1 of xHCI specification r1.1.
942  */
943 static inline uint8_t
944 xhci_ed2dci(usb_endpoint_descriptor_t *ed)
945 {
946 	uint8_t dir;
947 
948 	if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL)
949 		return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + 1);
950 
951 	if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)
952 		dir = 1;
953 	else
954 		dir = 0;
955 
956 	return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + dir);
957 }
958 
959 usbd_status
960 xhci_pipe_open(struct usbd_pipe *pipe)
961 {
962 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
963 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
964 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
965 	uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
966 	int error;
967 
968 	KASSERT(xp->slot == 0);
969 
970 	if (sc->sc_bus.dying)
971 		return (USBD_IOERROR);
972 
973 	/* Root Hub */
974 	if (pipe->device->depth == 0) {
975 		switch (ed->bEndpointAddress) {
976 		case USB_CONTROL_ENDPOINT:
977 			pipe->methods = &xhci_root_ctrl_methods;
978 			break;
979 		case UE_DIR_IN | XHCI_INTR_ENDPT:
980 			pipe->methods = &xhci_root_intr_methods;
981 			break;
982 		default:
983 			pipe->methods = NULL;
984 			return (USBD_INVAL);
985 		}
986 		return (USBD_NORMAL_COMPLETION);
987 	}
988 
989 #if 0
990 	/* Issue a noop to check if the command ring is correctly configured. */
991 	xhci_cmd_noop(sc);
992 #endif
993 
994 	switch (xfertype) {
995 	case UE_CONTROL:
996 		pipe->methods = &xhci_device_ctrl_methods;
997 
998 		/*
999 		 * Get a slot and init the device's contexts.
1000 		 *
1001 		 * Since the control enpoint, represented as the default
1002 		 * pipe, is always opened first we are dealing with a
1003 		 * new device.  Put a new slot in the ENABLED state.
1004 		 *
1005 		 */
1006 		error = xhci_cmd_slot_control(sc, &slot, 1);
1007 		if (error || slot == 0 || slot > sc->sc_noslot)
1008 			return (USBD_INVAL);
1009 
1010 		if (xhci_softdev_alloc(sc, slot)) {
1011 			xhci_cmd_slot_control(sc, &slot, 0);
1012 			return (USBD_NOMEM);
1013 		}
1014 
1015 		break;
1016 	case UE_ISOCHRONOUS:
1017 #if notyet
1018 		pipe->methods = &xhci_device_isoc_methods;
1019 		break;
1020 #else
1021 		DPRINTF(("%s: isochronous xfer not supported \n", __func__));
1022 		return (USBD_INVAL);
1023 #endif
1024 	case UE_BULK:
1025 		pipe->methods = &xhci_device_bulk_methods;
1026 		break;
1027 	case UE_INTERRUPT:
1028 		pipe->methods = &xhci_device_generic_methods;
1029 		break;
1030 	default:
1031 		return (USBD_INVAL);
1032 	}
1033 
1034 	/*
1035 	 * Our USBD Bus Interface is pipe-oriented but for most of the
1036 	 * operations we need to access a device context, so keep trace
1037 	 * of the slot ID in every pipe.
1038 	 */
1039 	if (slot == 0)
1040 		slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot;
1041 
1042 	xp->slot = slot;
1043 	xp->dci = xhci_ed2dci(ed);
1044 
1045 	if (xhci_pipe_init(sc, pipe)) {
1046 		xhci_cmd_slot_control(sc, &slot, 0);
1047 		return (USBD_IOERROR);
1048 	}
1049 
1050 	return (USBD_NORMAL_COMPLETION);
1051 }
1052 
1053 /*
1054  * Set the maximum Endpoint Service Interface Time (ESIT) payload and
1055  * the average TRB buffer length for an endpoint.
1056  */
1057 static inline uint32_t
1058 xhci_get_txinfo(struct xhci_softc *sc, struct usbd_pipe *pipe)
1059 {
1060 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1061 	uint32_t mep, atl, mps = UGETW(ed->wMaxPacketSize);
1062 
1063 	switch (ed->bmAttributes & UE_XFERTYPE) {
1064 	case UE_CONTROL:
1065 		mep = 0;
1066 		atl = 8;
1067 		break;
1068 	case UE_INTERRUPT:
1069 	case UE_ISOCHRONOUS:
1070 		if (pipe->device->speed == USB_SPEED_SUPER) {
1071 			/*  XXX Read the companion descriptor */
1072 		}
1073 
1074 		mep = (UE_GET_TRANS(mps) | 0x1) * UE_GET_SIZE(mps);
1075 		atl = min(sc->sc_pagesize, mep);
1076 		break;
1077 	case UE_BULK:
1078 	default:
1079 		mep = 0;
1080 		atl = 0;
1081 	}
1082 
1083 	return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(mep) | XHCI_EPCTX_AVG_TRB_LEN(atl));
1084 }
1085 
1086 void
1087 xhci_context_setup(struct xhci_softc *sc, struct usbd_pipe *pipe)
1088 {
1089 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1090 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1091 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1092 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1093 	uint8_t ival, speed, cerr = 0;
1094 	uint32_t mps, route = 0, rhport = 0;
1095 	struct usbd_device *hub;
1096 
1097 	/*
1098 	 * Calculate the Route String.  Assume that there is no hub with
1099 	 * more than 15 ports and that they all have a detph < 6.  See
1100 	 * section 8.9 of USB 3.1 Specification for more details.
1101 	 */
1102 	for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub) {
1103 		uint32_t port = hub->powersrc->portno;
1104 		uint32_t depth = hub->myhub->depth;
1105 
1106 		route |= port << (4 * (depth - 1));
1107 	}
1108 
1109 	/* Get Root Hub port */
1110 	rhport = hub->powersrc->portno;
1111 
1112 	switch (pipe->device->speed) {
1113 	case USB_SPEED_LOW:
1114 		ival= 3;
1115 		speed = XHCI_SPEED_LOW;
1116 		mps = 8;
1117 		break;
1118 	case USB_SPEED_FULL:
1119 		ival = 3;
1120 		speed = XHCI_SPEED_FULL;
1121 		mps = 8;
1122 		break;
1123 	case USB_SPEED_HIGH:
1124 		ival = min(3, ed->bInterval);
1125 		speed = XHCI_SPEED_HIGH;
1126 		mps = 64;
1127 		break;
1128 	case USB_SPEED_SUPER:
1129 		ival = min(3, ed->bInterval);
1130 		speed = XHCI_SPEED_SUPER;
1131 		mps = 512;
1132 		break;
1133 	default:
1134 		return;
1135 	}
1136 
1137 	/* XXX Until we fix wMaxPacketSize for ctrl ep depending on the speed */
1138 	mps = max(mps, UE_GET_SIZE(UGETW(ed->wMaxPacketSize)));
1139 
1140 	if (pipe->interval != USBD_DEFAULT_INTERVAL)
1141 		ival = min(ival, pipe->interval);
1142 
1143 	/* Setup the endpoint context */
1144 	if (xfertype != UE_ISOCHRONOUS)
1145 		cerr = 3;
1146 
1147 	if (xfertype == UE_CONTROL || xfertype == UE_BULK)
1148 		ival = 0;
1149 
1150 	if ((ed->bEndpointAddress & UE_DIR_IN) || (xfertype == UE_CONTROL))
1151 		xfertype |= 0x4;
1152 
1153 	sdev->ep_ctx[xp->dci-1]->info_lo = htole32(XHCI_EPCTX_SET_IVAL(ival));
1154 	sdev->ep_ctx[xp->dci-1]->info_hi = htole32(
1155 	    XHCI_EPCTX_SET_MPS(mps) | XHCI_EPCTX_SET_EPTYPE(xfertype) |
1156 	    XHCI_EPCTX_SET_CERR(cerr) | XHCI_EPCTX_SET_MAXB(0)
1157 	);
1158 	sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_get_txinfo(sc, pipe));
1159 	sdev->ep_ctx[xp->dci-1]->deqp = htole64(
1160 	    DEQPTR(xp->ring) | xp->ring.toggle
1161 	);
1162 
1163 	/* Unmask the new endoint */
1164 	sdev->input_ctx->drop_flags = 0;
1165 	sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1166 
1167 	/* Setup the slot context */
1168 	sdev->slot_ctx->info_lo = htole32(
1169 	    XHCI_SCTX_DCI(xp->dci) | XHCI_SCTX_SPEED(speed) |
1170 	    XHCI_SCTX_ROUTE(route)
1171 	);
1172 	sdev->slot_ctx->info_hi = htole32(XHCI_SCTX_RHPORT(rhport));
1173 	sdev->slot_ctx->tt = 0;
1174 	sdev->slot_ctx->state = 0;
1175 
1176 /* XXX */
1177 #define UHUB_IS_MTT(dev) (dev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT)
1178 	/*
1179 	 * If we are opening the interrupt pipe of a hub, update its
1180 	 * context before putting it in the CONFIGURED state.
1181 	 */
1182 	if (pipe->device->hub != NULL) {
1183 		int nports = pipe->device->hub->nports;
1184 
1185 		sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_HUB(1));
1186 		sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_NPORTS(nports));
1187 
1188 		if (UHUB_IS_MTT(pipe->device))
1189 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1190 
1191 		sdev->slot_ctx->tt |= htole32(
1192 		    XHCI_SCTX_TT_THINK_TIME(pipe->device->hub->ttthink)
1193 		);
1194 	}
1195 
1196 	/*
1197 	 * If this is a Low or Full Speed device below an external High
1198 	 * Speed hub, it needs some TT love.
1199 	 */
1200 	if (speed < XHCI_SPEED_HIGH && pipe->device->myhsport != NULL) {
1201 		struct usbd_device *hshub = pipe->device->myhsport->parent;
1202 		uint8_t slot = ((struct xhci_pipe *)hshub->default_pipe)->slot;
1203 
1204 		if (UHUB_IS_MTT(hshub))
1205 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1206 
1207 		sdev->slot_ctx->tt |= htole32(
1208 		    XHCI_SCTX_TT_HUB_SID(slot) |
1209 		    XHCI_SCTX_TT_PORT_NUM(pipe->device->myhsport->portno)
1210 		);
1211 	}
1212 #undef UHUB_IS_MTT
1213 
1214 	/* Unmask the slot context */
1215 	sdev->input_ctx->add_flags |= htole32(XHCI_INCTX_MASK_DCI(0));
1216 
1217 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1218 	    sc->sc_pagesize, BUS_DMASYNC_PREWRITE);
1219 }
1220 
1221 int
1222 xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe)
1223 {
1224 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1225 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1226 	int error;
1227 
1228 #ifdef XHCI_DEBUG
1229 	struct usbd_device *dev = pipe->device;
1230 	printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d dev %d dci %u"
1231 	    " (epAddr=0x%x)\n", __func__, pipe, dev->address, dev->depth,
1232 	    dev->powersrc->portno, dev->speed, xp->slot, xp->dci,
1233 	    pipe->endpoint->edesc->bEndpointAddress);
1234 #endif
1235 
1236 	if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_XFER, XHCI_XFER_RING_ALIGN))
1237 		return (ENOMEM);
1238 
1239 	xp->free_trbs = xp->ring.ntrb;
1240 	xp->halted = 0;
1241 
1242 	sdev->pipes[xp->dci - 1] = xp;
1243 
1244 	xhci_context_setup(sc, pipe);
1245 
1246 	if (xp->dci == 1) {
1247 		/*
1248 		 * If we are opening the default pipe, the Slot should
1249 		 * be in the ENABLED state.  Issue an "Address Device"
1250 		 * with BSR=1 to put the device in the DEFAULT state.
1251 		 * We cannot jump directly to the ADDRESSED state with
1252 		 * BSR=0 because some Low/Full speed devices wont accept
1253 		 * a SET_ADDRESS command before we've read their device
1254 		 * descriptor.
1255 		 */
1256 		error = xhci_cmd_set_address(sc, xp->slot,
1257 		    sdev->ictx_dma.paddr, XHCI_TRB_BSR);
1258 	} else {
1259 		error = xhci_cmd_configure_ep(sc, xp->slot,
1260 		    sdev->ictx_dma.paddr);
1261 	}
1262 
1263 	if (error) {
1264 		xhci_ring_free(sc, &xp->ring);
1265 		return (EIO);
1266 	}
1267 
1268 	return (0);
1269 }
1270 
1271 void
1272 xhci_pipe_close(struct usbd_pipe *pipe)
1273 {
1274 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
1275 	struct xhci_pipe *lxp, *xp = (struct xhci_pipe *)pipe;
1276 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1277 	int i;
1278 
1279 	/* Root Hub */
1280 	if (pipe->device->depth == 0)
1281 		return;
1282 
1283 	/* Mask the endpoint */
1284 	sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1285 	sdev->input_ctx->add_flags = 0;
1286 
1287 	/* Update last valid Endpoint Context */
1288 	for (i = 30; i >= 0; i--) {
1289 		lxp = sdev->pipes[i];
1290 		if (lxp != NULL && lxp != xp)
1291 			break;
1292 	}
1293 	sdev->slot_ctx->info_lo = htole32(XHCI_SCTX_DCI(lxp->dci));
1294 
1295 	/* Clear the Endpoint Context */
1296 	memset(sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx));
1297 
1298 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1299 	    sc->sc_pagesize, BUS_DMASYNC_PREWRITE);
1300 
1301 	if (xhci_cmd_configure_ep(sc, xp->slot, sdev->ictx_dma.paddr))
1302 		DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci));
1303 
1304 	xhci_ring_free(sc, &xp->ring);
1305 	sdev->pipes[xp->dci - 1] = NULL;
1306 
1307 	/*
1308 	 * If we are closing the default pipe, the device is probably
1309 	 * gone, so put its slot in the DISABLED state.
1310 	 */
1311 	if (xp->dci == 1) {
1312 		xhci_cmd_slot_control(sc, &xp->slot, 0);
1313 		xhci_softdev_free(sc, xp->slot);
1314 	}
1315 }
1316 
1317 /*
1318  * Transition a device from DEFAULT to ADDRESSED Slot state, this hook
1319  * is needed for Low/Full speed devices.
1320  *
1321  * See section 4.5.3 of USB 3.1 Specification for more details.
1322  */
1323 int
1324 xhci_setaddr(struct usbd_device *dev, int addr)
1325 {
1326 	struct xhci_softc *sc = (struct xhci_softc *)dev->bus;
1327 	struct xhci_pipe *xp = (struct xhci_pipe *)dev->default_pipe;
1328 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1329 	int error;
1330 
1331 	/* Root Hub */
1332 	if (dev->depth == 0)
1333 		return (0);
1334 
1335 	KASSERT(xp->dci == 1);
1336 
1337 	xhci_context_setup(sc, dev->default_pipe);
1338 
1339 	error = xhci_cmd_set_address(sc, xp->slot, sdev->ictx_dma.paddr, 0);
1340 
1341 #ifdef XHCI_DEBUG
1342 	if (error == 0) {
1343 		struct xhci_sctx *sctx;
1344 		uint8_t addr;
1345 
1346 		bus_dmamap_sync(sdev->octx_dma.tag, sdev->octx_dma.map, 0,
1347 		    sc->sc_pagesize, BUS_DMASYNC_POSTREAD);
1348 
1349 		/* Get output slot context. */
1350 		sctx = (struct xhci_sctx *)sdev->octx_dma.vaddr;
1351 		addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state));
1352 		error = (addr == 0);
1353 
1354 		printf("%s: dev %d addr %d\n", DEVNAME(sc), xp->slot, addr);
1355 	}
1356 #endif
1357 
1358 	return (error);
1359 }
1360 
1361 struct usbd_xfer *
1362 xhci_allocx(struct usbd_bus *bus)
1363 {
1364 	return (pool_get(xhcixfer, PR_NOWAIT | PR_ZERO));
1365 }
1366 
1367 void
1368 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
1369 {
1370 	pool_put(xhcixfer, xfer);
1371 }
1372 
1373 int
1374 xhci_scratchpad_alloc(struct xhci_softc *sc, int npage)
1375 {
1376 	uint64_t *pte;
1377 	int error, i;
1378 
1379 	/* Allocate the required entry for the table. */
1380 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.table_dma,
1381 	    (void **)&pte, npage * sizeof(uint64_t), XHCI_SPAD_TABLE_ALIGN,
1382 	    sc->sc_pagesize);
1383 	if (error)
1384 		return (ENOMEM);
1385 
1386 	/* Allocate pages. XXX does not need to be contiguous. */
1387 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.pages_dma,
1388 	    NULL, npage * sc->sc_pagesize, sc->sc_pagesize, 0);
1389 	if (error) {
1390 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1391 		return (ENOMEM);
1392 	}
1393 
1394 	for (i = 0; i < npage; i++) {
1395 		pte[i] = htole64(
1396 		    sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize)
1397 		);
1398 	}
1399 
1400 	bus_dmamap_sync(sc->sc_spad.table_dma.tag, sc->sc_spad.table_dma.map, 0,
1401 	    npage * sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1402 
1403 	/*  Entry 0 points to the table of scratchpad pointers. */
1404 	sc->sc_dcbaa.segs[0] = htole64(sc->sc_spad.table_dma.paddr);
1405 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1406 	    sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1407 
1408 	sc->sc_spad.npage = npage;
1409 
1410 	return (0);
1411 }
1412 
1413 void
1414 xhci_scratchpad_free(struct xhci_softc *sc)
1415 {
1416 	sc->sc_dcbaa.segs[0] = 0;
1417 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1418 	    sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1419 
1420 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.pages_dma);
1421 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1422 }
1423 
1424 int
1425 xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb,
1426     size_t alignment)
1427 {
1428 	size_t size;
1429 	int error;
1430 
1431 	size = ntrb * sizeof(struct xhci_trb);
1432 
1433 	error = usbd_dma_contig_alloc(&sc->sc_bus, &ring->dma,
1434 	    (void **)&ring->trbs, size, alignment, XHCI_RING_BOUNDARY);
1435 	if (error)
1436 		return (error);
1437 
1438 	ring->ntrb = ntrb;
1439 
1440 	xhci_ring_reset(sc, ring);
1441 
1442 	return (0);
1443 }
1444 
1445 void
1446 xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring)
1447 {
1448 	usbd_dma_contig_free(&sc->sc_bus, &ring->dma);
1449 }
1450 
1451 void
1452 xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring)
1453 {
1454 	size_t size;
1455 
1456 	size = ring->ntrb * sizeof(struct xhci_trb);
1457 
1458 	memset(ring->trbs, 0, size);
1459 
1460 	ring->index = 0;
1461 	ring->toggle = XHCI_TRB_CYCLE;
1462 
1463 	/*
1464 	 * Since all our rings use only one segment, at least for
1465 	 * the moment, link their tail to their head.
1466 	 */
1467 	if (ring != &sc->sc_evt_ring) {
1468 		struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1];
1469 
1470 		trb->trb_paddr = htole64(ring->dma.paddr);
1471 		trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG);
1472 	}
1473 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1474 	    BUS_DMASYNC_PREWRITE);
1475 }
1476 
1477 struct xhci_trb*
1478 xhci_ring_consume(struct xhci_softc *sc, struct xhci_ring *ring)
1479 {
1480 	struct xhci_trb *trb = &ring->trbs[ring->index];
1481 
1482 	KASSERT(ring->index < ring->ntrb);
1483 
1484 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1485 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
1486 
1487 	/* Make sure this TRB can be consumed. */
1488 	if (ring->toggle != (letoh32(trb->trb_flags) & XHCI_TRB_CYCLE))
1489 		return (NULL);
1490 
1491 	ring->index++;
1492 
1493 	if (ring->index == ring->ntrb) {
1494 		ring->index = 0;
1495 		ring->toggle ^= 1;
1496 	}
1497 
1498 	return (trb);
1499 }
1500 
1501 struct xhci_trb*
1502 xhci_ring_produce(struct xhci_softc *sc, struct xhci_ring *ring)
1503 {
1504 	struct xhci_trb *trb = &ring->trbs[ring->index];
1505 
1506 	KASSERT(ring->index < ring->ntrb);
1507 
1508 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1509 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
1510 
1511 	ring->index++;
1512 
1513 	/* Toggle cycle state of the link TRB and skip it. */
1514 	if (ring->index == (ring->ntrb - 1)) {
1515 		struct xhci_trb *lnk = &ring->trbs[ring->index];
1516 
1517 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1518 		    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
1519 
1520 		lnk->trb_flags ^= htole32(XHCI_TRB_CYCLE);
1521 
1522 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1523 		    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1524 
1525 		ring->index = 0;
1526 		ring->toggle ^= 1;
1527 	}
1528 
1529 	return (trb);
1530 }
1531 
1532 struct xhci_trb *
1533 xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer *xfer,
1534     uint8_t *togglep, int last)
1535 {
1536 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1537 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
1538 
1539 	KASSERT(xp->free_trbs >= 1);
1540 
1541 	/* Associate this TRB to our xfer. */
1542 	xp->pending_xfers[xp->ring.index] = xfer;
1543 	xp->free_trbs--;
1544 
1545 	xx->index = (last) ? xp->ring.index : -2;
1546 	xx->ntrb += 1;
1547 
1548 	*togglep = xp->ring.toggle;
1549 	return (xhci_ring_produce(sc, &xp->ring));
1550 }
1551 
1552 int
1553 xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout)
1554 {
1555 	struct xhci_trb *trb;
1556 	int s, error = 0;
1557 
1558 	KASSERT(timeout == 0 || sc->sc_cmd_trb == NULL);
1559 
1560 	trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle);
1561 
1562 	trb = xhci_ring_produce(sc, &sc->sc_cmd_ring);
1563 	if (trb == NULL)
1564 		return (EAGAIN);
1565 	memcpy(trb, trb0, sizeof(struct xhci_trb));
1566 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1567 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1568 	    BUS_DMASYNC_PREWRITE);
1569 
1570 
1571 	if (timeout == 0) {
1572 		XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1573 		return (0);
1574 	}
1575 
1576 	assertwaitok();
1577 
1578 	s = splusb();
1579 	sc->sc_cmd_trb = trb;
1580 	XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1581 	error = tsleep(&sc->sc_cmd_trb, PZERO, "xhcicmd",
1582 	    (timeout*hz+999)/ 1000 + 1);
1583 	if (error) {
1584 #ifdef XHCI_DEBUG
1585 		printf("%s: tsleep() = %d\n", __func__, error);
1586 		printf("cmd = %d ", XHCI_TRB_TYPE(letoh32(trb->trb_flags)));
1587 		xhci_dump_trb(trb);
1588 #endif
1589 		KASSERT(sc->sc_cmd_trb == trb);
1590 		sc->sc_cmd_trb = NULL;
1591 		splx(s);
1592 		return (error);
1593 	}
1594 	splx(s);
1595 
1596 	memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb));
1597 
1598 	if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)) == XHCI_CODE_SUCCESS)
1599 		return (0);
1600 
1601 #ifdef XHCI_DEBUG
1602 	printf("%s: event error code=%d, result=%d  \n", DEVNAME(sc),
1603 	    XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)),
1604 	    XHCI_TRB_TYPE(letoh32(trb0->trb_flags)));
1605 	xhci_dump_trb(trb0);
1606 #endif
1607 	return (EIO);
1608 }
1609 
1610 int
1611 xhci_command_abort(struct xhci_softc *sc)
1612 {
1613 	uint32_t reg;
1614 	int i;
1615 
1616 	reg = XOREAD4(sc, XHCI_CRCR_LO);
1617 	if ((reg & XHCI_CRCR_LO_CRR) == 0)
1618 		return (0);
1619 
1620 	XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA);
1621 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
1622 
1623 	for (i = 0; i < 250; i++) {
1624 		usb_delay_ms(&sc->sc_bus, 1);
1625 		reg = XOREAD4(sc, XHCI_CRCR_LO) & XHCI_CRCR_LO_CRR;
1626 		if (!reg)
1627 			break;
1628 	}
1629 
1630 	if (reg) {
1631 		printf("%s: command ring abort timeout\n", DEVNAME(sc));
1632 		return (1);
1633 	}
1634 
1635 	return (0);
1636 }
1637 
1638 int
1639 xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
1640 {
1641 	struct xhci_trb trb;
1642 
1643 	DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot));
1644 
1645 	trb.trb_paddr = htole64(addr);
1646 	trb.trb_status = 0;
1647 	trb.trb_flags = htole32(
1648 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP
1649 	);
1650 
1651 	return (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT));
1652 }
1653 
1654 int
1655 xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
1656 {
1657 	struct xhci_trb trb;
1658 
1659 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
1660 
1661 	trb.trb_paddr = 0;
1662 	trb.trb_status = 0;
1663 	trb.trb_flags = htole32(
1664 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP
1665 	);
1666 
1667 	return (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT));
1668 }
1669 
1670 void
1671 xhci_cmd_reset_ep_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
1672 {
1673 	struct xhci_trb trb;
1674 
1675 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
1676 
1677 	trb.trb_paddr = 0;
1678 	trb.trb_status = 0;
1679 	trb.trb_flags = htole32(
1680 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP
1681 	);
1682 
1683 	xhci_command_submit(sc, &trb, 0);
1684 }
1685 
1686 void
1687 xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci,
1688    uint64_t addr)
1689 {
1690 	struct xhci_trb trb;
1691 
1692 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
1693 
1694 	trb.trb_paddr = htole64(addr);
1695 	trb.trb_status = 0;
1696 	trb.trb_flags = htole32(
1697 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ
1698 	);
1699 
1700 	xhci_command_submit(sc, &trb, 0);
1701 }
1702 
1703 int
1704 xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable)
1705 {
1706 	struct xhci_trb trb;
1707 
1708 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1709 
1710 	trb.trb_paddr = 0;
1711 	trb.trb_status = 0;
1712 	if (enable)
1713 		trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT);
1714 	else
1715 		trb.trb_flags = htole32(
1716 			XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT
1717 		);
1718 
1719 	if (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT))
1720 		return (EIO);
1721 
1722 	if (enable)
1723 		*slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags));
1724 
1725 	return (0);
1726 }
1727 
1728 int
1729 xhci_cmd_set_address(struct xhci_softc *sc, uint8_t slot, uint64_t addr,
1730     uint32_t bsr)
1731 {
1732 	struct xhci_trb trb;
1733 
1734 	DPRINTF(("%s: %s BSR=%u\n", DEVNAME(sc), __func__, bsr ? 1 : 0));
1735 
1736 	trb.trb_paddr = htole64(addr);
1737 	trb.trb_status = 0;
1738 	trb.trb_flags = htole32(
1739 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE | bsr
1740 	);
1741 
1742 	return (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT));
1743 }
1744 
1745 int
1746 xhci_cmd_evaluate_ctx(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
1747 {
1748 	struct xhci_trb trb;
1749 
1750 	DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot));
1751 
1752 	trb.trb_paddr = htole64(addr);
1753 	trb.trb_status = 0;
1754 	trb.trb_flags = htole32(
1755 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_EVAL_CTX
1756 	);
1757 
1758 	return (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT));
1759 }
1760 
1761 #ifdef XHCI_DEBUG
1762 int
1763 xhci_cmd_noop(struct xhci_softc *sc)
1764 {
1765 	struct xhci_trb trb;
1766 
1767 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1768 
1769 	trb.trb_paddr = 0;
1770 	trb.trb_status = 0;
1771 	trb.trb_flags = htole32(XHCI_CMD_NOOP);
1772 
1773 	return (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT));
1774 }
1775 #endif
1776 
1777 int
1778 xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot)
1779 {
1780 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
1781 	int i, error;
1782 	uint8_t *kva;
1783 
1784 	/*
1785 	 * Setup input context.  Even with 64 byte context size, it
1786 	 * fits into the smallest supported page size, so use that.
1787 	 */
1788 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->ictx_dma,
1789 	    (void **)&kva, sc->sc_pagesize, XHCI_ICTX_ALIGN, sc->sc_pagesize);
1790 	if (error)
1791 		return (ENOMEM);
1792 
1793 	sdev->input_ctx = (struct xhci_inctx *)kva;
1794 	sdev->slot_ctx = (struct xhci_sctx *)(kva + sc->sc_ctxsize);
1795 	for (i = 0; i < 31; i++)
1796 		sdev->ep_ctx[i] =
1797 		    (struct xhci_epctx *)(kva + (i + 2) * sc->sc_ctxsize);
1798 
1799 	DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc),
1800 	 slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0]));
1801 
1802 	/* Setup output context */
1803 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->octx_dma, NULL,
1804 	    sc->sc_pagesize, XHCI_OCTX_ALIGN, sc->sc_pagesize);
1805 	if (error) {
1806 		usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
1807 		return (ENOMEM);
1808 	}
1809 
1810 	memset(&sdev->pipes, 0, sizeof(sdev->pipes));
1811 
1812 	DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc),
1813 	    slot, (long long)sdev->octx_dma.paddr));
1814 
1815 	sc->sc_dcbaa.segs[slot] = htole64(sdev->octx_dma.paddr);
1816 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
1817 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1818 
1819 	return (0);
1820 }
1821 
1822 void
1823 xhci_softdev_free(struct xhci_softc *sc, uint8_t slot)
1824 {
1825 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
1826 
1827 	sc->sc_dcbaa.segs[slot] = 0;
1828 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
1829 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1830 
1831 	usbd_dma_contig_free(&sc->sc_bus, &sdev->octx_dma);
1832 	usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
1833 
1834 	memset(sdev, 0, sizeof(struct xhci_soft_dev));
1835 }
1836 
1837 /* Root hub descriptors. */
1838 usb_device_descriptor_t xhci_devd = {
1839 	USB_DEVICE_DESCRIPTOR_SIZE,
1840 	UDESC_DEVICE,		/* type */
1841 	{0x00, 0x03},		/* USB version */
1842 	UDCLASS_HUB,		/* class */
1843 	UDSUBCLASS_HUB,		/* subclass */
1844 	UDPROTO_HSHUBSTT,	/* protocol */
1845 	9,			/* max packet */
1846 	{0},{0},{0x00,0x01},	/* device id */
1847 	1,2,0,			/* string indexes */
1848 	1			/* # of configurations */
1849 };
1850 
1851 const usb_config_descriptor_t xhci_confd = {
1852 	USB_CONFIG_DESCRIPTOR_SIZE,
1853 	UDESC_CONFIG,
1854 	{USB_CONFIG_DESCRIPTOR_SIZE +
1855 	 USB_INTERFACE_DESCRIPTOR_SIZE +
1856 	 USB_ENDPOINT_DESCRIPTOR_SIZE},
1857 	1,
1858 	1,
1859 	0,
1860 	UC_SELF_POWERED,
1861 	0                      /* max power */
1862 };
1863 
1864 const usb_interface_descriptor_t xhci_ifcd = {
1865 	USB_INTERFACE_DESCRIPTOR_SIZE,
1866 	UDESC_INTERFACE,
1867 	0,
1868 	0,
1869 	1,
1870 	UICLASS_HUB,
1871 	UISUBCLASS_HUB,
1872 	UIPROTO_HSHUBSTT,
1873 	0
1874 };
1875 
1876 const usb_endpoint_descriptor_t xhci_endpd = {
1877 	USB_ENDPOINT_DESCRIPTOR_SIZE,
1878 	UDESC_ENDPOINT,
1879 	UE_DIR_IN | XHCI_INTR_ENDPT,
1880 	UE_INTERRUPT,
1881 	{2, 0},                 /* max 15 ports */
1882 	255
1883 };
1884 
1885 const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = {
1886 	USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE,
1887 	UDESC_ENDPOINT_SS_COMP,
1888 	0,
1889 	0,
1890 	{0, 0}
1891 };
1892 
1893 const usb_hub_descriptor_t xhci_hubd = {
1894 	USB_HUB_DESCRIPTOR_SIZE,
1895 	UDESC_SS_HUB,
1896 	0,
1897 	{0,0},
1898 	0,
1899 	0,
1900 	{0},
1901 };
1902 
1903 void
1904 xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
1905 {
1906 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
1907 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1908 	int error;
1909 
1910 	splsoftassert(IPL_SOFTUSB);
1911 
1912 	DPRINTF(("%s: xfer=%p status=%s err=%s actlen=%d len=%d idx=%d\n",
1913 	    __func__, xfer, usbd_errstr(xfer->status), usbd_errstr(status),
1914 	    xfer->actlen, xfer->length, ((struct xhci_xfer *)xfer)->index));
1915 
1916 	/* XXX The stack should not call abort() in this case. */
1917 	if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) {
1918 		xfer->status = status;
1919 		timeout_del(&xfer->timeout_handle);
1920 		usb_rem_task(xfer->device, &xfer->abort_task);
1921 		usb_transfer_complete(xfer);
1922 		return;
1923 	}
1924 
1925 	/* Transfer is already done. */
1926 	if (xfer->status != USBD_IN_PROGRESS) {
1927 		DPRINTF(("%s: already done \n", __func__));
1928 		return;
1929 	}
1930 
1931 	/* Prevent any timeout to kick in. */
1932 	timeout_del(&xfer->timeout_handle);
1933 	usb_rem_task(xfer->device, &xfer->abort_task);
1934 
1935 	/* Indicate that we are aborting this transfer. */
1936 	xp->halted = status;
1937 	xp->aborted_xfer = xfer;
1938 
1939 	/* Stop the endpoint and wait until the hardware says so. */
1940 	if (xhci_cmd_stop_ep(sc, xp->slot, xp->dci))
1941 		DPRINTF(("%s: error stopping endpoint\n", DEVNAME(sc)));
1942 
1943 	/*
1944 	 * The transfer was already completed when we stopped the
1945 	 * endpoint, no need to move the dequeue pointer past its
1946 	 * TRBs.
1947 	 */
1948 	if (xp->aborted_xfer == NULL) {
1949 		DPRINTF(("%s: done before stopping the endpoint\n", __func__));
1950 		xp->halted = 0;
1951 		return;
1952 	}
1953 
1954 	/*
1955 	 * At this stage the endpoint has been stopped, so update its
1956 	 * dequeue pointer past the last TRB of the transfer.
1957 	 *
1958 	 * Note: This assume that only one transfer per endpoint has
1959 	 *	 pending TRBs on the ring.
1960 	 */
1961 	xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
1962 	    DEQPTR(xp->ring) | xp->ring.toggle);
1963 	error = tsleep(xp, PZERO, "xhciab", (XHCI_CMD_TIMEOUT*hz+999)/1000 + 1);
1964 	if (error)
1965 		printf("%s: timeout aborting transfer\n", DEVNAME(sc));
1966 }
1967 
1968 void
1969 xhci_timeout(void *addr)
1970 {
1971 	struct usbd_xfer *xfer = addr;
1972 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
1973 
1974 	if (sc->sc_bus.dying) {
1975 		xhci_timeout_task(addr);
1976 		return;
1977 	}
1978 
1979 	usb_init_task(&xfer->abort_task, xhci_timeout_task, addr,
1980 	    USB_TASK_TYPE_ABORT);
1981 	usb_add_task(xfer->device, &xfer->abort_task);
1982 }
1983 
1984 void
1985 xhci_timeout_task(void *addr)
1986 {
1987 	struct usbd_xfer *xfer = addr;
1988 	int s;
1989 
1990 	s = splusb();
1991 	xhci_abort_xfer(xfer, USBD_TIMEOUT);
1992 	splx(s);
1993 }
1994 
1995 usbd_status
1996 xhci_root_ctrl_transfer(struct usbd_xfer *xfer)
1997 {
1998 	usbd_status err;
1999 
2000 	err = usb_insert_transfer(xfer);
2001 	if (err)
2002 		return (err);
2003 
2004 	return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2005 }
2006 
2007 usbd_status
2008 xhci_root_ctrl_start(struct usbd_xfer *xfer)
2009 {
2010 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2011 	usb_port_status_t ps;
2012 	usb_device_request_t *req;
2013 	void *buf = NULL;
2014 	usb_hub_descriptor_t hubd;
2015 	usbd_status err;
2016 	int s, len, value, index;
2017 	int l, totlen = 0;
2018 	int port, i;
2019 	uint32_t v;
2020 
2021 	KASSERT(xfer->rqflags & URQ_REQUEST);
2022 
2023 	if (sc->sc_bus.dying)
2024 		return (USBD_IOERROR);
2025 
2026 	req = &xfer->request;
2027 
2028 	DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__,
2029 	    req->bmRequestType, req->bRequest));
2030 
2031 	len = UGETW(req->wLength);
2032 	value = UGETW(req->wValue);
2033 	index = UGETW(req->wIndex);
2034 
2035 	if (len != 0)
2036 		buf = KERNADDR(&xfer->dmabuf, 0);
2037 
2038 #define C(x,y) ((x) | ((y) << 8))
2039 	switch(C(req->bRequest, req->bmRequestType)) {
2040 	case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
2041 	case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
2042 	case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
2043 		/*
2044 		 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops
2045 		 * for the integrated root hub.
2046 		 */
2047 		break;
2048 	case C(UR_GET_CONFIG, UT_READ_DEVICE):
2049 		if (len > 0) {
2050 			*(uint8_t *)buf = sc->sc_conf;
2051 			totlen = 1;
2052 		}
2053 		break;
2054 	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
2055 		DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value));
2056 		switch(value >> 8) {
2057 		case UDESC_DEVICE:
2058 			if ((value & 0xff) != 0) {
2059 				err = USBD_IOERROR;
2060 				goto ret;
2061 			}
2062 			totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE);
2063 			USETW(xhci_devd.idVendor, sc->sc_id_vendor);
2064 			memcpy(buf, &xhci_devd, l);
2065 			break;
2066 		/*
2067 		 * We can't really operate at another speed, but the spec says
2068 		 * we need this descriptor.
2069 		 */
2070 		case UDESC_OTHER_SPEED_CONFIGURATION:
2071 		case UDESC_CONFIG:
2072 			if ((value & 0xff) != 0) {
2073 				err = USBD_IOERROR;
2074 				goto ret;
2075 			}
2076 			totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE);
2077 			memcpy(buf, &xhci_confd, l);
2078 			((usb_config_descriptor_t *)buf)->bDescriptorType =
2079 			    value >> 8;
2080 			buf = (char *)buf + l;
2081 			len -= l;
2082 			l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE);
2083 			totlen += l;
2084 			memcpy(buf, &xhci_ifcd, l);
2085 			buf = (char *)buf + l;
2086 			len -= l;
2087 			l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE);
2088 			totlen += l;
2089 			memcpy(buf, &xhci_endpd, l);
2090 			break;
2091 		case UDESC_STRING:
2092 			if (len == 0)
2093 				break;
2094 			*(u_int8_t *)buf = 0;
2095 			totlen = 1;
2096 			switch (value & 0xff) {
2097 			case 0: /* Language table */
2098 				totlen = usbd_str(buf, len, "\001");
2099 				break;
2100 			case 1: /* Vendor */
2101 				totlen = usbd_str(buf, len, sc->sc_vendor);
2102 				break;
2103 			case 2: /* Product */
2104 				totlen = usbd_str(buf, len, "xHCI root hub");
2105 				break;
2106 			}
2107 			break;
2108 		default:
2109 			err = USBD_IOERROR;
2110 			goto ret;
2111 		}
2112 		break;
2113 	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
2114 		if (len > 0) {
2115 			*(uint8_t *)buf = 0;
2116 			totlen = 1;
2117 		}
2118 		break;
2119 	case C(UR_GET_STATUS, UT_READ_DEVICE):
2120 		if (len > 1) {
2121 			USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED);
2122 			totlen = 2;
2123 		}
2124 		break;
2125 	case C(UR_GET_STATUS, UT_READ_INTERFACE):
2126 	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
2127 		if (len > 1) {
2128 			USETW(((usb_status_t *)buf)->wStatus, 0);
2129 			totlen = 2;
2130 		}
2131 		break;
2132 	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
2133 		if (value >= USB_MAX_DEVICES) {
2134 			err = USBD_IOERROR;
2135 			goto ret;
2136 		}
2137 		break;
2138 	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
2139 		if (value != 0 && value != 1) {
2140 			err = USBD_IOERROR;
2141 			goto ret;
2142 		}
2143 		sc->sc_conf = value;
2144 		break;
2145 	case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
2146 		break;
2147 	case C(UR_SET_FEATURE, UT_WRITE_DEVICE):
2148 	case C(UR_SET_FEATURE, UT_WRITE_INTERFACE):
2149 	case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
2150 		err = USBD_IOERROR;
2151 		goto ret;
2152 	case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
2153 		break;
2154 	case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
2155 		break;
2156 	/* Hub requests */
2157 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
2158 		break;
2159 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
2160 		DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE "
2161 		    "port=%d feature=%d\n", index, value));
2162 		if (index < 1 || index > sc->sc_noport) {
2163 			err = USBD_IOERROR;
2164 			goto ret;
2165 		}
2166 		port = XHCI_PORTSC(index);
2167 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2168 		switch (value) {
2169 		case UHF_PORT_ENABLE:
2170 			XOWRITE4(sc, port, v | XHCI_PS_PED);
2171 			break;
2172 		case UHF_PORT_SUSPEND:
2173 			/* TODO */
2174 			break;
2175 		case UHF_PORT_POWER:
2176 			XOWRITE4(sc, port, v & ~XHCI_PS_PP);
2177 			break;
2178 		case UHF_PORT_INDICATOR:
2179 			XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3));
2180 			break;
2181 		case UHF_C_PORT_CONNECTION:
2182 			XOWRITE4(sc, port, v | XHCI_PS_CSC);
2183 			break;
2184 		case UHF_C_PORT_ENABLE:
2185 			XOWRITE4(sc, port, v | XHCI_PS_PEC);
2186 			break;
2187 		case UHF_C_PORT_SUSPEND:
2188 		case UHF_C_PORT_LINK_STATE:
2189 			XOWRITE4(sc, port, v | XHCI_PS_PLC);
2190 			break;
2191 		case UHF_C_PORT_OVER_CURRENT:
2192 			XOWRITE4(sc, port, v | XHCI_PS_OCC);
2193 			break;
2194 		case UHF_C_PORT_RESET:
2195 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
2196 			break;
2197 		case UHF_C_BH_PORT_RESET:
2198 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
2199 			break;
2200 		default:
2201 			err = USBD_IOERROR;
2202 			goto ret;
2203 		}
2204 		break;
2205 
2206 	case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
2207 		if (len == 0)
2208 			break;
2209 		if ((value & 0xff) != 0) {
2210 			err = USBD_IOERROR;
2211 			goto ret;
2212 		}
2213 		v = XREAD4(sc, XHCI_HCCPARAMS);
2214 		hubd = xhci_hubd;
2215 		hubd.bNbrPorts = sc->sc_noport;
2216 		USETW(hubd.wHubCharacteristics,
2217 		    (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) |
2218 		    (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0));
2219 		hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */
2220 		for (i = 1; i <= sc->sc_noport; i++) {
2221 			v = XOREAD4(sc, XHCI_PORTSC(i));
2222 			if (v & XHCI_PS_DR)
2223 				hubd.DeviceRemovable[i / 8] |= 1U << (i % 8);
2224 		}
2225 		hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
2226 		l = min(len, hubd.bDescLength);
2227 		totlen = l;
2228 		memcpy(buf, &hubd, l);
2229 		break;
2230 	case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
2231 		if (len != 16) {
2232 			err = USBD_IOERROR;
2233 			goto ret;
2234 		}
2235 		memset(buf, 0, len);
2236 		totlen = len;
2237 		break;
2238 	case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
2239 		DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n",
2240 		    index));
2241 		if (index < 1 || index > sc->sc_noport) {
2242 			err = USBD_IOERROR;
2243 			goto ret;
2244 		}
2245 		if (len != 4) {
2246 			err = USBD_IOERROR;
2247 			goto ret;
2248 		}
2249 		v = XOREAD4(sc, XHCI_PORTSC(index));
2250 		DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v));
2251 		i = UPS_PORT_LS_SET(XHCI_PS_GET_PLS(v));
2252 		switch (XHCI_PS_SPEED(v)) {
2253 		case XHCI_SPEED_FULL:
2254 			i |= UPS_FULL_SPEED;
2255 			break;
2256 		case XHCI_SPEED_LOW:
2257 			i |= UPS_LOW_SPEED;
2258 			break;
2259 		case XHCI_SPEED_HIGH:
2260 			i |= UPS_HIGH_SPEED;
2261 			break;
2262 		case XHCI_SPEED_SUPER:
2263 		default:
2264 			break;
2265 		}
2266 		if (v & XHCI_PS_CCS)	i |= UPS_CURRENT_CONNECT_STATUS;
2267 		if (v & XHCI_PS_PED)	i |= UPS_PORT_ENABLED;
2268 		if (v & XHCI_PS_OCA)	i |= UPS_OVERCURRENT_INDICATOR;
2269 		if (v & XHCI_PS_PR)	i |= UPS_RESET;
2270 		if (v & XHCI_PS_PP)	{
2271 			if (XHCI_PS_SPEED(v) >= XHCI_SPEED_FULL &&
2272 			    XHCI_PS_SPEED(v) <= XHCI_SPEED_HIGH)
2273 				i |= UPS_PORT_POWER;
2274 			else
2275 				i |= UPS_PORT_POWER_SS;
2276 		}
2277 		USETW(ps.wPortStatus, i);
2278 		i = 0;
2279 		if (v & XHCI_PS_CSC)    i |= UPS_C_CONNECT_STATUS;
2280 		if (v & XHCI_PS_PEC)    i |= UPS_C_PORT_ENABLED;
2281 		if (v & XHCI_PS_OCC)    i |= UPS_C_OVERCURRENT_INDICATOR;
2282 		if (v & XHCI_PS_PRC)	i |= UPS_C_PORT_RESET;
2283 		if (v & XHCI_PS_WRC)	i |= UPS_C_BH_PORT_RESET;
2284 		if (v & XHCI_PS_PLC)	i |= UPS_C_PORT_LINK_STATE;
2285 		if (v & XHCI_PS_CEC)	i |= UPS_C_PORT_CONFIG_ERROR;
2286 		USETW(ps.wPortChange, i);
2287 		l = min(len, sizeof ps);
2288 		memcpy(buf, &ps, l);
2289 		totlen = l;
2290 		break;
2291 	case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
2292 		err = USBD_IOERROR;
2293 		goto ret;
2294 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
2295 		break;
2296 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
2297 
2298 		i = index >> 8;
2299 		index &= 0x00ff;
2300 
2301 		if (index < 1 || index > sc->sc_noport) {
2302 			err = USBD_IOERROR;
2303 			goto ret;
2304 		}
2305 		port = XHCI_PORTSC(index);
2306 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2307 
2308 		switch (value) {
2309 		case UHF_PORT_ENABLE:
2310 			XOWRITE4(sc, port, v | XHCI_PS_PED);
2311 			break;
2312 		case UHF_PORT_SUSPEND:
2313 			DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i));
2314 			if (XHCI_PS_SPEED(v) == XHCI_SPEED_SUPER) {
2315 				err = USBD_IOERROR;
2316 				goto ret;
2317 			}
2318 			XOWRITE4(sc, port, v |
2319 			    XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS);
2320 			break;
2321 		case UHF_PORT_RESET:
2322 			DPRINTFN(6, ("reset port %d\n", index));
2323 			XOWRITE4(sc, port, v | XHCI_PS_PR);
2324 			break;
2325 		case UHF_PORT_POWER:
2326 			DPRINTFN(3, ("set port power %d\n", index));
2327 			XOWRITE4(sc, port, v | XHCI_PS_PP);
2328 			break;
2329 		case UHF_PORT_INDICATOR:
2330 			DPRINTFN(3, ("set port indicator %d\n", index));
2331 
2332 			v &= ~XHCI_PS_SET_PIC(3);
2333 			v |= XHCI_PS_SET_PIC(1);
2334 
2335 			XOWRITE4(sc, port, v);
2336 			break;
2337 		case UHF_C_PORT_RESET:
2338 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
2339 			break;
2340 		case UHF_C_BH_PORT_RESET:
2341 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
2342 			break;
2343 		default:
2344 			err = USBD_IOERROR;
2345 			goto ret;
2346 		}
2347 		break;
2348 	case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
2349 	case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
2350 	case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
2351 	case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
2352 		break;
2353 	default:
2354 		err = USBD_IOERROR;
2355 		goto ret;
2356 	}
2357 	xfer->actlen = totlen;
2358 	err = USBD_NORMAL_COMPLETION;
2359 ret:
2360 	xfer->status = err;
2361 	s = splusb();
2362 	usb_transfer_complete(xfer);
2363 	splx(s);
2364 	return (USBD_IN_PROGRESS);
2365 }
2366 
2367 
2368 void
2369 xhci_noop(struct usbd_xfer *xfer)
2370 {
2371 }
2372 
2373 
2374 usbd_status
2375 xhci_root_intr_transfer(struct usbd_xfer *xfer)
2376 {
2377 	usbd_status err;
2378 
2379 	err = usb_insert_transfer(xfer);
2380 	if (err)
2381 		return (err);
2382 
2383 	return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2384 }
2385 
2386 usbd_status
2387 xhci_root_intr_start(struct usbd_xfer *xfer)
2388 {
2389 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2390 
2391 	if (sc->sc_bus.dying)
2392 		return (USBD_IOERROR);
2393 
2394 	sc->sc_intrxfer = xfer;
2395 
2396 	return (USBD_IN_PROGRESS);
2397 }
2398 
2399 void
2400 xhci_root_intr_abort(struct usbd_xfer *xfer)
2401 {
2402 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2403 	int s;
2404 
2405 	sc->sc_intrxfer = NULL;
2406 
2407 	xfer->status = USBD_CANCELLED;
2408 	s = splusb();
2409 	usb_transfer_complete(xfer);
2410 	splx(s);
2411 }
2412 
2413 void
2414 xhci_root_intr_done(struct usbd_xfer *xfer)
2415 {
2416 }
2417 
2418 /* Number of packets remaining in the TD after the corresponding TRB. */
2419 static inline uint32_t
2420 xhci_xfer_tdsize(struct usbd_xfer *xfer, uint32_t remain, uint32_t len)
2421 {
2422 	uint32_t npkt, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2423 
2424 	if (len == 0)
2425 		return XHCI_TRB_TDREM(0);
2426 
2427 	npkt = (remain - len) / mps;
2428 	if (npkt > 31)
2429 		npkt = 31;
2430 
2431 	return XHCI_TRB_TDREM(npkt);
2432 }
2433 
2434 usbd_status
2435 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2436 {
2437 	usbd_status err;
2438 
2439 	err = usb_insert_transfer(xfer);
2440 	if (err)
2441 		return (err);
2442 
2443 	return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2444 }
2445 
2446 usbd_status
2447 xhci_device_ctrl_start(struct usbd_xfer *xfer)
2448 {
2449 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2450 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2451 	struct xhci_trb *trb0, *trb;
2452 	uint32_t flags, len = UGETW(xfer->request.wLength);
2453 	uint8_t toggle0, toggle;
2454 	int s;
2455 
2456 	KASSERT(xfer->rqflags & URQ_REQUEST);
2457 
2458 	if (sc->sc_bus.dying || xp->halted)
2459 		return (USBD_IOERROR);
2460 
2461 	if (xp->free_trbs < 3)
2462 		return (USBD_NOMEM);
2463 
2464 	/* We'll do the setup TRB once we're finished with the other stages. */
2465 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle0, 0);
2466 
2467 	/* Data TRB */
2468 	if (len != 0) {
2469 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2470 
2471 		flags = XHCI_TRB_TYPE_DATA | toggle;
2472 		if (usbd_xfer_isread(xfer))
2473 			flags |= XHCI_TRB_DIR_IN | XHCI_TRB_ISP;
2474 
2475 		trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2476 		trb->trb_status = htole32(
2477 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
2478 		    xhci_xfer_tdsize(xfer, len, len)
2479 		);
2480 		trb->trb_flags = htole32(flags);
2481 
2482 	}
2483 
2484 	/* Status TRB */
2485 	trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1);
2486 
2487 	flags = XHCI_TRB_TYPE_STATUS | XHCI_TRB_IOC | toggle;
2488 	if (len == 0 || !usbd_xfer_isread(xfer))
2489 		flags |= XHCI_TRB_DIR_IN;
2490 
2491 	trb->trb_paddr = 0;
2492 	trb->trb_status = htole32(XHCI_TRB_INTR(0));
2493 	trb->trb_flags = htole32(flags);
2494 
2495 	/* Setup TRB */
2496 	flags = XHCI_TRB_TYPE_SETUP | XHCI_TRB_IDT | toggle0;
2497 	if (len != 0) {
2498 		if (usbd_xfer_isread(xfer))
2499 			flags |= XHCI_TRB_TRT_IN;
2500 		else
2501 			flags |= XHCI_TRB_TRT_OUT;
2502 	}
2503 
2504 	trb0->trb_paddr = (uint64_t)*((uint64_t *)&xfer->request);
2505 	trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8));
2506 	trb0->trb_flags = htole32(flags);
2507 
2508 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2509 	    TRBOFF(&xp->ring, trb0), 3 * sizeof(struct xhci_trb),
2510 	    BUS_DMASYNC_PREWRITE);
2511 
2512 	s = splusb();
2513 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
2514 
2515 	xfer->status = USBD_IN_PROGRESS;
2516 
2517 	if (sc->sc_bus.use_polling)
2518 		xhci_waitintr(sc, xfer);
2519 	else if (xfer->timeout) {
2520 		timeout_del(&xfer->timeout_handle);
2521 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
2522 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
2523 	}
2524 	splx(s);
2525 
2526 	return (USBD_IN_PROGRESS);
2527 }
2528 
2529 void
2530 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
2531 {
2532 	xhci_abort_xfer(xfer, USBD_CANCELLED);
2533 }
2534 
2535 usbd_status
2536 xhci_device_generic_transfer(struct usbd_xfer *xfer)
2537 {
2538 	usbd_status err;
2539 
2540 	err = usb_insert_transfer(xfer);
2541 	if (err)
2542 		return (err);
2543 
2544 	return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2545 }
2546 
2547 usbd_status
2548 xhci_device_generic_start(struct usbd_xfer *xfer)
2549 {
2550 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2551 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2552 	struct xhci_trb *trb0, *trb;
2553 	uint32_t len, remain, flags;
2554 	uint32_t len0, mps;
2555 	uint64_t paddr = DMAADDR(&xfer->dmabuf, 0);
2556 	uint8_t toggle0, toggle;
2557 	int s, i, ntrb;
2558 
2559 	KASSERT(!(xfer->rqflags & URQ_REQUEST));
2560 
2561 	if (sc->sc_bus.dying || xp->halted)
2562 		return (USBD_IOERROR);
2563 
2564 	/* How many TRBs do we need for this transfer? */
2565 	ntrb = (xfer->length + XHCI_TRB_MAXSIZE - 1) / XHCI_TRB_MAXSIZE;
2566 
2567 	/* If the buffer crosses a 64k boundary, we need one more. */
2568 	len0 = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
2569 	if (len0 < xfer->length)
2570 		ntrb++;
2571 	else
2572 		len0 = xfer->length;
2573 
2574 	/* If we need to append a zero length packet, we need one more. */
2575 	mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2576 	if ((xfer->flags & USBD_FORCE_SHORT_XFER || xfer->length == 0) &&
2577 	    (xfer->length % mps == 0))
2578 		ntrb++;
2579 
2580 	if (xp->free_trbs < ntrb)
2581 		return (USBD_NOMEM);
2582 
2583 	/* We'll do the first TRB once we're finished with the chain. */
2584 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle0, (ntrb == 1));
2585 
2586 	remain = xfer->length - len0;
2587 	paddr += len0;
2588 	len = min(remain, XHCI_TRB_MAXSIZE);
2589 
2590 	/* Chain more TRBs if needed. */
2591 	for (i = ntrb - 1; i > 0; i--) {
2592 		/* Next (or Last) TRB. */
2593 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1));
2594 		flags = XHCI_TRB_TYPE_NORMAL | toggle;
2595 		if (usbd_xfer_isread(xfer))
2596 			flags |= XHCI_TRB_ISP;
2597 		flags |= (i == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
2598 
2599 		trb->trb_paddr = htole64(paddr);
2600 		trb->trb_status = htole32(
2601 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
2602 		    xhci_xfer_tdsize(xfer, remain, len)
2603 		);
2604 		trb->trb_flags = htole32(flags);
2605 
2606 		remain -= len;
2607 		paddr += len;
2608 		len = min(remain, XHCI_TRB_MAXSIZE);
2609 	}
2610 
2611 	/* First TRB. */
2612 	flags = XHCI_TRB_TYPE_NORMAL | toggle0;
2613 	if (usbd_xfer_isread(xfer))
2614 		flags |= XHCI_TRB_ISP;
2615 	flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
2616 
2617 	trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2618 	trb0->trb_status = htole32(
2619 	    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len0) |
2620 	    xhci_xfer_tdsize(xfer, xfer->length, len0)
2621  	);
2622 	trb0->trb_flags = htole32(flags);
2623 
2624 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2625 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb) * ntrb,
2626 	    BUS_DMASYNC_PREWRITE);
2627 
2628 	s = splusb();
2629 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
2630 
2631 	xfer->status = USBD_IN_PROGRESS;
2632 
2633 	if (sc->sc_bus.use_polling)
2634 		xhci_waitintr(sc, xfer);
2635 	else if (xfer->timeout) {
2636 		timeout_del(&xfer->timeout_handle);
2637 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
2638 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
2639 	}
2640 	splx(s);
2641 
2642 	return (USBD_IN_PROGRESS);
2643 }
2644 
2645 void
2646 xhci_device_generic_done(struct usbd_xfer *xfer)
2647 {
2648 	usb_syncmem(&xfer->dmabuf, 0, xfer->length, usbd_xfer_isread(xfer) ?
2649 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2650 
2651 	/* Only happens with interrupt transfers. */
2652 	if (xfer->pipe->repeat) {
2653 		xfer->actlen = 0;
2654 		xhci_device_generic_start(xfer);
2655 	}
2656 }
2657 
2658 void
2659 xhci_device_generic_abort(struct usbd_xfer *xfer)
2660 {
2661 	KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer);
2662 
2663 	xhci_abort_xfer(xfer, USBD_CANCELLED);
2664 }
2665