xref: /openbsd-src/sys/dev/usb/xhci.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /* $OpenBSD: xhci.c,v 1.67 2016/09/15 02:00:17 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2014-2015 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/kernel.h>
22 #include <sys/malloc.h>
23 #include <sys/device.h>
24 #include <sys/queue.h>
25 #include <sys/timeout.h>
26 #include <sys/pool.h>
27 #include <sys/endian.h>
28 
29 #include <machine/bus.h>
30 
31 #include <dev/usb/usb.h>
32 #include <dev/usb/usbdi.h>
33 #include <dev/usb/usbdivar.h>
34 #include <dev/usb/usb_mem.h>
35 
36 #include <dev/usb/xhcireg.h>
37 #include <dev/usb/xhcivar.h>
38 
39 struct cfdriver xhci_cd = {
40 	NULL, "xhci", DV_DULL
41 };
42 
43 #ifdef XHCI_DEBUG
44 #define DPRINTF(x)	do { if (xhcidebug) printf x; } while(0)
45 #define DPRINTFN(n,x)	do { if (xhcidebug>(n)) printf x; } while (0)
46 int xhcidebug = 3;
47 #else
48 #define DPRINTF(x)
49 #define DPRINTFN(n,x)
50 #endif
51 
52 #define DEVNAME(sc)	((sc)->sc_bus.bdev.dv_xname)
53 
54 #define TRBOFF(r, trb)	((char *)(trb) - (char *)((r)->trbs))
55 #define DEQPTR(r)	((r).dma.paddr + (sizeof(struct xhci_trb) * (r).index))
56 
57 struct pool *xhcixfer;
58 
59 struct xhci_pipe {
60 	struct usbd_pipe	pipe;
61 
62 	uint8_t			dci;
63 	uint8_t			slot;	/* Device slot ID */
64 	struct xhci_ring	ring;
65 
66 	/*
67 	 * XXX used to pass the xfer pointer back to the
68 	 * interrupt routine, better way?
69 	 */
70 	struct usbd_xfer	*pending_xfers[XHCI_MAX_XFER];
71 	struct usbd_xfer	*aborted_xfer;
72 	int			 halted;
73 	size_t			 free_trbs;
74 };
75 
76 int	xhci_reset(struct xhci_softc *);
77 int	xhci_intr1(struct xhci_softc *);
78 void	xhci_waitintr(struct xhci_softc *, struct usbd_xfer *);
79 void	xhci_event_dequeue(struct xhci_softc *);
80 void	xhci_event_xfer(struct xhci_softc *, uint64_t, uint32_t, uint32_t);
81 void	xhci_event_command(struct xhci_softc *, uint64_t);
82 void	xhci_event_port_change(struct xhci_softc *, uint64_t, uint32_t);
83 int	xhci_pipe_init(struct xhci_softc *, struct usbd_pipe *);
84 void	xhci_context_setup(struct xhci_softc *, struct usbd_pipe *);
85 int	xhci_scratchpad_alloc(struct xhci_softc *, int);
86 void	xhci_scratchpad_free(struct xhci_softc *);
87 int	xhci_softdev_alloc(struct xhci_softc *, uint8_t);
88 void	xhci_softdev_free(struct xhci_softc *, uint8_t);
89 int	xhci_ring_alloc(struct xhci_softc *, struct xhci_ring *, size_t,
90 	    size_t);
91 void	xhci_ring_free(struct xhci_softc *, struct xhci_ring *);
92 void	xhci_ring_reset(struct xhci_softc *, struct xhci_ring *);
93 struct	xhci_trb *xhci_ring_consume(struct xhci_softc *, struct xhci_ring *);
94 struct	xhci_trb *xhci_ring_produce(struct xhci_softc *, struct xhci_ring *);
95 
96 struct	xhci_trb *xhci_xfer_get_trb(struct xhci_softc *, struct usbd_xfer*,
97 	    uint8_t *, int);
98 void	xhci_xfer_done(struct usbd_xfer *xfer);
99 /* xHCI command helpers. */
100 int	xhci_command_submit(struct xhci_softc *, struct xhci_trb *, int);
101 int	xhci_command_abort(struct xhci_softc *);
102 
103 void	xhci_cmd_reset_ep_async(struct xhci_softc *, uint8_t, uint8_t);
104 void	xhci_cmd_set_tr_deq_async(struct xhci_softc *, uint8_t, uint8_t, uint64_t);
105 int	xhci_cmd_configure_ep(struct xhci_softc *, uint8_t, uint64_t);
106 int	xhci_cmd_stop_ep(struct xhci_softc *, uint8_t, uint8_t);
107 int	xhci_cmd_slot_control(struct xhci_softc *, uint8_t *, int);
108 int	xhci_cmd_set_address(struct xhci_softc *, uint8_t,  uint64_t, uint32_t);
109 int	xhci_cmd_evaluate_ctx(struct xhci_softc *, uint8_t, uint64_t);
110 #ifdef XHCI_DEBUG
111 int	xhci_cmd_noop(struct xhci_softc *);
112 #endif
113 
114 /* XXX should be part of the Bus interface. */
115 void	xhci_abort_xfer(struct usbd_xfer *, usbd_status);
116 void	xhci_pipe_close(struct usbd_pipe *);
117 void	xhci_noop(struct usbd_xfer *);
118 
119 void 	xhci_timeout(void *);
120 void	xhci_timeout_task(void *);
121 
122 /* USBD Bus Interface. */
123 usbd_status	  xhci_pipe_open(struct usbd_pipe *);
124 int		  xhci_setaddr(struct usbd_device *, int);
125 void		  xhci_softintr(void *);
126 void		  xhci_poll(struct usbd_bus *);
127 struct usbd_xfer *xhci_allocx(struct usbd_bus *);
128 void		  xhci_freex(struct usbd_bus *, struct usbd_xfer *);
129 
130 usbd_status	  xhci_root_ctrl_transfer(struct usbd_xfer *);
131 usbd_status	  xhci_root_ctrl_start(struct usbd_xfer *);
132 
133 usbd_status	  xhci_root_intr_transfer(struct usbd_xfer *);
134 usbd_status	  xhci_root_intr_start(struct usbd_xfer *);
135 void		  xhci_root_intr_abort(struct usbd_xfer *);
136 void		  xhci_root_intr_done(struct usbd_xfer *);
137 
138 usbd_status	  xhci_device_ctrl_transfer(struct usbd_xfer *);
139 usbd_status	  xhci_device_ctrl_start(struct usbd_xfer *);
140 void		  xhci_device_ctrl_abort(struct usbd_xfer *);
141 
142 usbd_status	  xhci_device_generic_transfer(struct usbd_xfer *);
143 usbd_status	  xhci_device_generic_start(struct usbd_xfer *);
144 void		  xhci_device_generic_abort(struct usbd_xfer *);
145 void		  xhci_device_generic_done(struct usbd_xfer *);
146 
147 #define XHCI_INTR_ENDPT 1
148 
149 struct usbd_bus_methods xhci_bus_methods = {
150 	.open_pipe = xhci_pipe_open,
151 	.dev_setaddr = xhci_setaddr,
152 	.soft_intr = xhci_softintr,
153 	.do_poll = xhci_poll,
154 	.allocx = xhci_allocx,
155 	.freex = xhci_freex,
156 };
157 
158 struct usbd_pipe_methods xhci_root_ctrl_methods = {
159 	.transfer = xhci_root_ctrl_transfer,
160 	.start = xhci_root_ctrl_start,
161 	.abort = xhci_noop,
162 	.close = xhci_pipe_close,
163 	.done = xhci_noop,
164 };
165 
166 struct usbd_pipe_methods xhci_root_intr_methods = {
167 	.transfer = xhci_root_intr_transfer,
168 	.start = xhci_root_intr_start,
169 	.abort = xhci_root_intr_abort,
170 	.close = xhci_pipe_close,
171 	.done = xhci_root_intr_done,
172 };
173 
174 struct usbd_pipe_methods xhci_device_ctrl_methods = {
175 	.transfer = xhci_device_ctrl_transfer,
176 	.start = xhci_device_ctrl_start,
177 	.abort = xhci_device_ctrl_abort,
178 	.close = xhci_pipe_close,
179 	.done = xhci_noop,
180 };
181 
182 #if notyet
183 struct usbd_pipe_methods xhci_device_isoc_methods = {
184 };
185 #endif
186 
187 struct usbd_pipe_methods xhci_device_bulk_methods = {
188 	.transfer = xhci_device_generic_transfer,
189 	.start = xhci_device_generic_start,
190 	.abort = xhci_device_generic_abort,
191 	.close = xhci_pipe_close,
192 	.done = xhci_device_generic_done,
193 };
194 
195 struct usbd_pipe_methods xhci_device_generic_methods = {
196 	.transfer = xhci_device_generic_transfer,
197 	.start = xhci_device_generic_start,
198 	.abort = xhci_device_generic_abort,
199 	.close = xhci_pipe_close,
200 	.done = xhci_device_generic_done,
201 };
202 
203 #ifdef XHCI_DEBUG
204 static void
205 xhci_dump_trb(struct xhci_trb *trb)
206 {
207 	printf("trb=%p (0x%016llx 0x%08x 0x%b)\n", trb,
208 	    (long long)letoh64(trb->trb_paddr), letoh32(trb->trb_status),
209 	    (int)letoh32(trb->trb_flags), XHCI_TRB_FLAGS_BITMASK);
210 }
211 #endif
212 
213 int	usbd_dma_contig_alloc(struct usbd_bus *, struct usbd_dma_info *,
214 	    void **, bus_size_t, bus_size_t, bus_size_t);
215 void	usbd_dma_contig_free(struct usbd_bus *, struct usbd_dma_info *);
216 
217 int
218 usbd_dma_contig_alloc(struct usbd_bus *bus, struct usbd_dma_info *dma,
219     void **kvap, bus_size_t size, bus_size_t alignment, bus_size_t boundary)
220 {
221 	int error;
222 
223 	dma->tag = bus->dmatag;
224 	dma->size = size;
225 
226 	error = bus_dmamap_create(dma->tag, size, 1, size, boundary,
227 	    BUS_DMA_NOWAIT, &dma->map);
228 	if (error != 0)
229 		return (error);;
230 
231 	error = bus_dmamem_alloc(dma->tag, size, alignment, boundary, &dma->seg,
232 	    1, &dma->nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO);
233 	if (error != 0)
234 		goto destroy;
235 
236 	error = bus_dmamem_map(dma->tag, &dma->seg, 1, size, &dma->vaddr,
237 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
238 	if (error != 0)
239 		goto free;
240 
241 	error = bus_dmamap_load_raw(dma->tag, dma->map, &dma->seg, 1, size,
242 	    BUS_DMA_NOWAIT);
243 	if (error != 0)
244 		goto unmap;
245 
246 	bus_dmamap_sync(dma->tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
247 
248 	dma->paddr = dma->map->dm_segs[0].ds_addr;
249 	if (kvap != NULL)
250 		*kvap = dma->vaddr;
251 
252 	return (0);
253 
254 unmap:
255 	bus_dmamem_unmap(dma->tag, dma->vaddr, size);
256 free:
257 	bus_dmamem_free(dma->tag, &dma->seg, 1);
258 destroy:
259 	bus_dmamap_destroy(dma->tag, dma->map);
260 	return (error);
261 }
262 
263 void
264 usbd_dma_contig_free(struct usbd_bus *bus, struct usbd_dma_info *dma)
265 {
266 	if (dma->map != NULL) {
267 		bus_dmamap_sync(bus->dmatag, dma->map, 0, dma->size,
268 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
269 		bus_dmamap_unload(bus->dmatag, dma->map);
270 		bus_dmamem_unmap(bus->dmatag, dma->vaddr, dma->size);
271 		bus_dmamem_free(bus->dmatag, &dma->seg, 1);
272 		bus_dmamap_destroy(bus->dmatag, dma->map);
273 		dma->map = NULL;
274 	}
275 }
276 
277 int
278 xhci_init(struct xhci_softc *sc)
279 {
280 	uint32_t hcr;
281 	int npage, error;
282 
283 #ifdef XHCI_DEBUG
284 	uint16_t vers;
285 
286 	vers = XREAD2(sc, XHCI_HCIVERSION);
287 	printf("%s: xHCI version %x.%x\n", DEVNAME(sc), vers >> 8, vers & 0xff);
288 #endif
289 	sc->sc_bus.usbrev = USBREV_3_0;
290 	sc->sc_bus.methods = &xhci_bus_methods;
291 	sc->sc_bus.pipe_size = sizeof(struct xhci_pipe);
292 
293 	sc->sc_oper_off = XREAD1(sc, XHCI_CAPLENGTH);
294 	sc->sc_door_off = XREAD4(sc, XHCI_DBOFF);
295 	sc->sc_runt_off = XREAD4(sc, XHCI_RTSOFF);
296 
297 #ifdef XHCI_DEBUG
298 	printf("%s: CAPLENGTH=%#lx\n", DEVNAME(sc), sc->sc_oper_off);
299 	printf("%s: DOORBELL=%#lx\n", DEVNAME(sc), sc->sc_door_off);
300 	printf("%s: RUNTIME=%#lx\n", DEVNAME(sc), sc->sc_runt_off);
301 #endif
302 
303 	error = xhci_reset(sc);
304 	if (error)
305 		return (error);
306 
307 	if (xhcixfer == NULL) {
308 		xhcixfer = malloc(sizeof(struct pool), M_DEVBUF, M_NOWAIT);
309 		if (xhcixfer == NULL) {
310 			printf("%s: unable to allocate pool descriptor\n",
311 			    DEVNAME(sc));
312 			return (ENOMEM);
313 		}
314 		pool_init(xhcixfer, sizeof(struct xhci_xfer), 0, IPL_SOFTUSB,
315 		    0, "xhcixfer", NULL);
316 	}
317 
318 	hcr = XREAD4(sc, XHCI_HCCPARAMS);
319 	sc->sc_ctxsize = XHCI_HCC_CSZ(hcr) ? 64 : 32;
320 	DPRINTF(("%s: %d bytes context\n", DEVNAME(sc), sc->sc_ctxsize));
321 
322 #ifdef XHCI_DEBUG
323 	hcr = XOREAD4(sc, XHCI_PAGESIZE);
324 	printf("%s: supported page size 0x%08x\n", DEVNAME(sc), hcr);
325 #endif
326 	/* Use 4K for the moment since it's easier. */
327 	sc->sc_pagesize = 4096;
328 
329 	/* Get port and device slot numbers. */
330 	hcr = XREAD4(sc, XHCI_HCSPARAMS1);
331 	sc->sc_noport = XHCI_HCS1_N_PORTS(hcr);
332 	sc->sc_noslot = XHCI_HCS1_DEVSLOT_MAX(hcr);
333 	DPRINTF(("%s: %d ports and %d slots\n", DEVNAME(sc), sc->sc_noport,
334 	    sc->sc_noslot));
335 
336 	/* Setup Device Context Base Address Array. */
337 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_dcbaa.dma,
338 	    (void **)&sc->sc_dcbaa.segs, (sc->sc_noslot + 1) * sizeof(uint64_t),
339 	    XHCI_DCBAA_ALIGN, sc->sc_pagesize);
340 	if (error)
341 		return (ENOMEM);
342 
343 	/* Setup command ring. */
344 	error = xhci_ring_alloc(sc, &sc->sc_cmd_ring, XHCI_MAX_CMDS,
345 	    XHCI_CMDS_RING_ALIGN);
346 	if (error) {
347 		printf("%s: could not allocate command ring.\n", DEVNAME(sc));
348 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
349 		return (error);
350 	}
351 
352 	/* Setup one event ring and its segment table (ERST). */
353 	error = xhci_ring_alloc(sc, &sc->sc_evt_ring, XHCI_MAX_EVTS,
354 	    XHCI_EVTS_RING_ALIGN);
355 	if (error) {
356 		printf("%s: could not allocate event ring.\n", DEVNAME(sc));
357 		xhci_ring_free(sc, &sc->sc_cmd_ring);
358 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
359 		return (error);
360 	}
361 
362 	/* Allocate the required entry for the segment table. */
363 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_erst.dma,
364 	    (void **)&sc->sc_erst.segs, sizeof(struct xhci_erseg),
365 	    XHCI_ERST_ALIGN, XHCI_ERST_BOUNDARY);
366 	if (error) {
367 		printf("%s: could not allocate segment table.\n", DEVNAME(sc));
368 		xhci_ring_free(sc, &sc->sc_evt_ring);
369 		xhci_ring_free(sc, &sc->sc_cmd_ring);
370 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
371 		return (ENOMEM);
372 	}
373 
374 	/* Set our ring address and size in its corresponding segment. */
375 	sc->sc_erst.segs[0].er_addr = htole64(sc->sc_evt_ring.dma.paddr);
376 	sc->sc_erst.segs[0].er_size = htole32(XHCI_MAX_EVTS);
377 	sc->sc_erst.segs[0].er_rsvd = 0;
378 	bus_dmamap_sync(sc->sc_erst.dma.tag, sc->sc_erst.dma.map, 0,
379 	    sc->sc_erst.dma.size, BUS_DMASYNC_PREWRITE);
380 
381 	/* Get the number of scratch pages and configure them if necessary. */
382 	hcr = XREAD4(sc, XHCI_HCSPARAMS2);
383 	npage = XHCI_HCS2_SPB_MAX(hcr);
384 	DPRINTF(("%s: %d scratch pages\n", DEVNAME(sc), npage));
385 
386 	if (npage > 0 && xhci_scratchpad_alloc(sc, npage)) {
387 		printf("%s: could not allocate scratchpad.\n", DEVNAME(sc));
388 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
389 		xhci_ring_free(sc, &sc->sc_evt_ring);
390 		xhci_ring_free(sc, &sc->sc_cmd_ring);
391 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
392 		return (ENOMEM);
393 	}
394 
395 
396 	return (0);
397 }
398 
399 void
400 xhci_config(struct xhci_softc *sc)
401 {
402 	uint64_t paddr;
403 	uint32_t hcr;
404 
405 	/* Make sure to program a number of device slots we can handle. */
406 	if (sc->sc_noslot > USB_MAX_DEVICES)
407 		sc->sc_noslot = USB_MAX_DEVICES;
408 	hcr = XOREAD4(sc, XHCI_CONFIG) & ~XHCI_CONFIG_SLOTS_MASK;
409 	XOWRITE4(sc, XHCI_CONFIG, hcr | sc->sc_noslot);
410 
411 	/* Set the device context base array address. */
412 	paddr = (uint64_t)sc->sc_dcbaa.dma.paddr;
413 	XOWRITE4(sc, XHCI_DCBAAP_LO, (uint32_t)paddr);
414 	XOWRITE4(sc, XHCI_DCBAAP_HI, (uint32_t)(paddr >> 32));
415 
416 	DPRINTF(("%s: DCBAAP=%#x%#x\n", DEVNAME(sc),
417 	    XOREAD4(sc, XHCI_DCBAAP_HI), XOREAD4(sc, XHCI_DCBAAP_LO)));
418 
419 	/* Set the command ring address. */
420 	paddr = (uint64_t)sc->sc_cmd_ring.dma.paddr;
421 	XOWRITE4(sc, XHCI_CRCR_LO, ((uint32_t)paddr) | XHCI_CRCR_LO_RCS);
422 	XOWRITE4(sc, XHCI_CRCR_HI, (uint32_t)(paddr >> 32));
423 
424 	DPRINTF(("%s: CRCR=%#x%#x (%016llx)\n", DEVNAME(sc),
425 	    XOREAD4(sc, XHCI_CRCR_HI), XOREAD4(sc, XHCI_CRCR_LO), paddr));
426 
427 	/* Set the ERST count number to 1, since we use only one event ring. */
428 	XRWRITE4(sc, XHCI_ERSTSZ(0), XHCI_ERSTS_SET(1));
429 
430 	/* Set the segment table address. */
431 	paddr = (uint64_t)sc->sc_erst.dma.paddr;
432 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), (uint32_t)paddr);
433 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), (uint32_t)(paddr >> 32));
434 
435 	DPRINTF(("%s: ERSTBA=%#x%#x\n", DEVNAME(sc),
436 	    XRREAD4(sc, XHCI_ERSTBA_HI(0)), XRREAD4(sc, XHCI_ERSTBA_LO(0))));
437 
438 	/* Set the ring dequeue address. */
439 	paddr = (uint64_t)sc->sc_evt_ring.dma.paddr;
440 	XRWRITE4(sc, XHCI_ERDP_LO(0), (uint32_t)paddr);
441 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
442 
443 	DPRINTF(("%s: ERDP=%#x%#x\n", DEVNAME(sc),
444 	    XRREAD4(sc, XHCI_ERDP_HI(0)), XRREAD4(sc, XHCI_ERDP_LO(0))));
445 
446 	/* Enable interrupts. */
447 	hcr = XRREAD4(sc, XHCI_IMAN(0));
448 	XRWRITE4(sc, XHCI_IMAN(0), hcr | XHCI_IMAN_INTR_ENA);
449 
450 	/* Set default interrupt moderation. */
451 	XRWRITE4(sc, XHCI_IMOD(0), XHCI_IMOD_DEFAULT);
452 
453 	/* Allow event interrupt and start the controller. */
454 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_INTE|XHCI_CMD_RS);
455 
456 	DPRINTF(("%s: USBCMD=%#x\n", DEVNAME(sc), XOREAD4(sc, XHCI_USBCMD)));
457 	DPRINTF(("%s: IMAN=%#x\n", DEVNAME(sc), XRREAD4(sc, XHCI_IMAN(0))));
458 }
459 
460 int
461 xhci_detach(struct device *self, int flags)
462 {
463 	struct xhci_softc *sc = (struct xhci_softc *)self;
464 	int rv;
465 
466 	rv = config_detach_children(self, flags);
467 	if (rv != 0) {
468 		printf("%s: error while detaching %d\n", DEVNAME(sc), rv);
469 		return (rv);
470 	}
471 
472 	/* Since the hardware might already be gone, ignore the errors. */
473 	xhci_command_abort(sc);
474 
475 	xhci_reset(sc);
476 
477 	/* Disable interrupts. */
478 	XRWRITE4(sc, XHCI_IMOD(0), 0);
479 	XRWRITE4(sc, XHCI_IMAN(0), 0);
480 
481 	/* Clear the event ring address. */
482 	XRWRITE4(sc, XHCI_ERDP_LO(0), 0);
483 	XRWRITE4(sc, XHCI_ERDP_HI(0), 0);
484 
485 	XRWRITE4(sc, XHCI_ERSTBA_LO(0), 0);
486 	XRWRITE4(sc, XHCI_ERSTBA_HI(0), 0);
487 
488 	XRWRITE4(sc, XHCI_ERSTSZ(0), 0);
489 
490 	/* Clear the command ring address. */
491 	XOWRITE4(sc, XHCI_CRCR_LO, 0);
492 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
493 
494 	XOWRITE4(sc, XHCI_DCBAAP_LO, 0);
495 	XOWRITE4(sc, XHCI_DCBAAP_HI, 0);
496 
497 	if (sc->sc_spad.npage > 0)
498 		xhci_scratchpad_free(sc);
499 
500 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_erst.dma);
501 	xhci_ring_free(sc, &sc->sc_evt_ring);
502 	xhci_ring_free(sc, &sc->sc_cmd_ring);
503 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_dcbaa.dma);
504 
505 	return (0);
506 }
507 
508 int
509 xhci_activate(struct device *self, int act)
510 {
511 	struct xhci_softc *sc = (struct xhci_softc *)self;
512 	int rv = 0;
513 
514 	switch (act) {
515 	case DVACT_RESUME:
516 		sc->sc_bus.use_polling++;
517 
518 		xhci_reset(sc);
519 		xhci_ring_reset(sc, &sc->sc_cmd_ring);
520 		xhci_ring_reset(sc, &sc->sc_evt_ring);
521 
522 		/* Renesas controllers, at least, need more time to resume. */
523 		usb_delay_ms(&sc->sc_bus, USB_RESUME_WAIT);
524 
525 		xhci_config(sc);
526 
527 		sc->sc_bus.use_polling--;
528 		rv = config_activate_children(self, act);
529 		break;
530 	case DVACT_POWERDOWN:
531 		rv = config_activate_children(self, act);
532 		xhci_reset(sc);
533 		break;
534 	default:
535 		rv = config_activate_children(self, act);
536 		break;
537 	}
538 
539 	return (rv);
540 }
541 
542 int
543 xhci_reset(struct xhci_softc *sc)
544 {
545 	uint32_t hcr;
546 	int i;
547 
548 	XOWRITE4(sc, XHCI_USBCMD, 0);	/* Halt controller */
549 	for (i = 0; i < 100; i++) {
550 		usb_delay_ms(&sc->sc_bus, 1);
551 		hcr = XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_HCH;
552 		if (hcr)
553 			break;
554 	}
555 
556 	if (!hcr)
557 		printf("%s: halt timeout\n", DEVNAME(sc));
558 
559 	XOWRITE4(sc, XHCI_USBCMD, XHCI_CMD_HCRST);
560 	for (i = 0; i < 100; i++) {
561 		usb_delay_ms(&sc->sc_bus, 1);
562 		hcr = (XOREAD4(sc, XHCI_USBCMD) & XHCI_CMD_HCRST) |
563 		    (XOREAD4(sc, XHCI_USBSTS) & XHCI_STS_CNR);
564 		if (!hcr)
565 			break;
566 	}
567 
568 	if (hcr) {
569 		printf("%s: reset timeout\n", DEVNAME(sc));
570 		return (EIO);
571 	}
572 
573 	return (0);
574 }
575 
576 
577 int
578 xhci_intr(void *v)
579 {
580 	struct xhci_softc *sc = v;
581 
582 	if (sc == NULL || sc->sc_bus.dying)
583 		return (0);
584 
585 	/* If we get an interrupt while polling, then just ignore it. */
586 	if (sc->sc_bus.use_polling) {
587 		DPRINTFN(16, ("xhci_intr: ignored interrupt while polling\n"));
588 		return (0);
589 	}
590 
591 	return (xhci_intr1(sc));
592 }
593 
594 int
595 xhci_intr1(struct xhci_softc *sc)
596 {
597 	uint32_t intrs;
598 
599 	intrs = XOREAD4(sc, XHCI_USBSTS);
600 	if (intrs == 0xffffffff) {
601 		sc->sc_bus.dying = 1;
602 		return (0);
603 	}
604 
605 	if ((intrs & XHCI_STS_EINT) == 0)
606 		return (0);
607 
608 	sc->sc_bus.no_intrs++;
609 
610 	if (intrs & XHCI_STS_HSE) {
611 		printf("%s: host system error\n", DEVNAME(sc));
612 		sc->sc_bus.dying = 1;
613 		return (1);
614 	}
615 
616 	XOWRITE4(sc, XHCI_USBSTS, intrs); /* Acknowledge */
617 	usb_schedsoftintr(&sc->sc_bus);
618 
619 	/* Acknowledge PCI interrupt */
620 	intrs = XRREAD4(sc, XHCI_IMAN(0));
621 	XRWRITE4(sc, XHCI_IMAN(0), intrs | XHCI_IMAN_INTR_PEND);
622 
623 	return (1);
624 }
625 
626 void
627 xhci_poll(struct usbd_bus *bus)
628 {
629 	struct xhci_softc *sc = (struct xhci_softc *)bus;
630 
631 	if (XOREAD4(sc, XHCI_USBSTS))
632 		xhci_intr1(sc);
633 }
634 
635 void
636 xhci_waitintr(struct xhci_softc *sc, struct usbd_xfer *xfer)
637 {
638 	int timo;
639 
640 	for (timo = xfer->timeout; timo >= 0; timo--) {
641 		usb_delay_ms(&sc->sc_bus, 1);
642 		if (sc->sc_bus.dying)
643 			break;
644 
645 		if (xfer->status != USBD_IN_PROGRESS)
646 			return;
647 
648 		xhci_intr1(sc);
649 	}
650 
651 	xfer->status = USBD_TIMEOUT;
652 	usb_transfer_complete(xfer);
653 }
654 
655 void
656 xhci_softintr(void *v)
657 {
658 	struct xhci_softc *sc = v;
659 
660 	if (sc->sc_bus.dying)
661 		return;
662 
663 	sc->sc_bus.intr_context++;
664 	xhci_event_dequeue(sc);
665 	sc->sc_bus.intr_context--;
666 }
667 
668 void
669 xhci_event_dequeue(struct xhci_softc *sc)
670 {
671 	struct xhci_trb *trb;
672 	uint64_t paddr;
673 	uint32_t status, flags;
674 
675 	while ((trb = xhci_ring_consume(sc, &sc->sc_evt_ring)) != NULL) {
676 		paddr = letoh64(trb->trb_paddr);
677 		status = letoh32(trb->trb_status);
678 		flags = letoh32(trb->trb_flags);
679 
680 		switch (flags & XHCI_TRB_TYPE_MASK) {
681 		case XHCI_EVT_XFER:
682 			xhci_event_xfer(sc, paddr, status, flags);
683 			break;
684 		case XHCI_EVT_CMD_COMPLETE:
685 			memcpy(&sc->sc_result_trb, trb, sizeof(*trb));
686 			xhci_event_command(sc, paddr);
687 			break;
688 		case XHCI_EVT_PORT_CHANGE:
689 			xhci_event_port_change(sc, paddr, status);
690 			break;
691 		default:
692 #ifdef XHCI_DEBUG
693 			printf("event (%d): ", XHCI_TRB_TYPE(flags));
694 			xhci_dump_trb(trb);
695 #endif
696 			break;
697 		}
698 
699 	}
700 
701 	paddr = (uint64_t)DEQPTR(sc->sc_evt_ring);
702 	XRWRITE4(sc, XHCI_ERDP_LO(0), ((uint32_t)paddr) | XHCI_ERDP_LO_BUSY);
703 	XRWRITE4(sc, XHCI_ERDP_HI(0), (uint32_t)(paddr >> 32));
704 }
705 
706 void
707 xhci_event_xfer(struct xhci_softc *sc, uint64_t paddr, uint32_t status,
708     uint32_t flags)
709 {
710 	struct xhci_pipe *xp;
711 	struct usbd_xfer *xfer;
712 	struct xhci_xfer *xx;
713 	uint8_t dci, slot, code;
714 	uint32_t remain;
715 	int trb_idx;
716 
717 	slot = XHCI_TRB_GET_SLOT(flags);
718 	dci = XHCI_TRB_GET_EP(flags);
719 	if (slot > sc->sc_noslot) {
720 		DPRINTF(("%s: incorrect slot (%u)\n", DEVNAME(sc), slot));
721 		return;
722 	}
723 
724 	xp = sc->sc_sdevs[slot].pipes[dci - 1];
725 	if (xp == NULL)
726 		return;
727 
728 	code = XHCI_TRB_GET_CODE(status);
729 	remain = XHCI_TRB_REMAIN(status);
730 
731 	trb_idx = (paddr - xp->ring.dma.paddr) / sizeof(struct xhci_trb);
732 	if (trb_idx < 0 || trb_idx >= xp->ring.ntrb) {
733 		printf("%s: wrong trb index (%d) max is %zu\n", DEVNAME(sc),
734 		    trb_idx, xp->ring.ntrb - 1);
735 		return;
736 	}
737 
738 	xfer = xp->pending_xfers[trb_idx];
739 	if (xfer == NULL) {
740 		printf("%s: NULL xfer pointer\n", DEVNAME(sc));
741 		return;
742 	}
743 
744 	if (remain > xfer->length)
745 		remain = xfer->length;
746 
747 	switch (code) {
748 	case XHCI_CODE_SUCCESS:
749 		/*
750 		 * This might be the last TRB of a TD that ended up
751 		 * with a Short Transfer condition, see below.
752 		 */
753 		if (xfer->actlen == 0)
754 			xfer->actlen = xfer->length - remain;
755 
756 		xfer->status = USBD_NORMAL_COMPLETION;
757 		break;
758 	case XHCI_CODE_SHORT_XFER:
759 		xfer->actlen = xfer->length - remain;
760 
761 		/*
762 		 * If this is not the last TRB of a transfer, we should
763 		 * theoretically clear the IOC at the end of the chain
764 		 * but the HC might have already processed it before we
765 		 * had a change to schedule the softinterrupt.
766 		 */
767 		xx = (struct xhci_xfer *)xfer;
768 		if (xx->index != trb_idx)
769 			return;
770 
771 		xfer->status = USBD_NORMAL_COMPLETION;
772 		break;
773 	case XHCI_CODE_TXERR:
774 	case XHCI_CODE_SPLITERR:
775 		xfer->status = USBD_IOERROR;
776 		break;
777 	case XHCI_CODE_STALL:
778 	case XHCI_CODE_BABBLE:
779 		/* Prevent any timeout to kick in. */
780 		timeout_del(&xfer->timeout_handle);
781 		usb_rem_task(xfer->device, &xfer->abort_task);
782 
783 		/* We need to report this condition for umass(4). */
784 		if (code == XHCI_CODE_STALL)
785 			xp->halted = USBD_STALLED;
786 		else
787 			xp->halted = USBD_IOERROR;
788 		/*
789 		 * Since the stack might try to start a new transfer as
790 		 * soon as a pending one finishes, make sure the endpoint
791 		 * is fully reset before calling usb_transfer_complete().
792 		 */
793 		xp->aborted_xfer = xfer;
794 		xhci_cmd_reset_ep_async(sc, slot, dci);
795 		return;
796 	case XHCI_CODE_XFER_STOPPED:
797 	case XHCI_CODE_XFER_STOPINV:
798 		/* Endpoint stopped while processing a TD. */
799 		if (xfer == xp->aborted_xfer) {
800 			DPRINTF(("%s: stopped xfer=%p\n", __func__, xfer));
801 		    	return;
802 		}
803 
804 		/* FALLTHROUGH */
805 	default:
806 		DPRINTF(("%s: unhandled code %d\n", DEVNAME(sc), code));
807 		xfer->status = USBD_IOERROR;
808 		xp->halted = 1;
809 		break;
810 	}
811 
812 	xhci_xfer_done(xfer);
813 }
814 
815 void
816 xhci_event_command(struct xhci_softc *sc, uint64_t paddr)
817 {
818 	struct xhci_trb *trb;
819 	struct xhci_pipe *xp;
820 	uint32_t flags;
821 	uint8_t dci, slot;
822 	int trb_idx, status;
823 
824 	trb_idx = (paddr - sc->sc_cmd_ring.dma.paddr) / sizeof(*trb);
825 	if (trb_idx < 0 || trb_idx >= sc->sc_cmd_ring.ntrb) {
826 		printf("%s: wrong trb index (%d) max is %zu\n", DEVNAME(sc),
827 		    trb_idx, sc->sc_cmd_ring.ntrb - 1);
828 		return;
829 	}
830 
831 	trb = &sc->sc_cmd_ring.trbs[trb_idx];
832 
833 	flags = letoh32(trb->trb_flags);
834 
835 	slot = XHCI_TRB_GET_SLOT(flags);
836 	dci = XHCI_TRB_GET_EP(flags);
837 
838 	switch (flags & XHCI_TRB_TYPE_MASK) {
839 	case XHCI_CMD_RESET_EP:
840 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
841 		if (xp == NULL)
842 			break;
843 
844 		/* Update the dequeue pointer past the last TRB. */
845 		xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
846 		    DEQPTR(xp->ring) | xp->ring.toggle);
847 		break;
848 	case XHCI_CMD_SET_TR_DEQ:
849 		xp = sc->sc_sdevs[slot].pipes[dci - 1];
850 		if (xp == NULL)
851 			break;
852 
853 		status = xp->halted;
854 		xp->halted = 0;
855 		if (xp->aborted_xfer != NULL) {
856 			xp->aborted_xfer->status = status;
857 			xhci_xfer_done(xp->aborted_xfer);
858 			wakeup(xp);
859 		}
860 		break;
861 	case XHCI_CMD_CONFIG_EP:
862 	case XHCI_CMD_STOP_EP:
863 	case XHCI_CMD_DISABLE_SLOT:
864 	case XHCI_CMD_ENABLE_SLOT:
865 	case XHCI_CMD_ADDRESS_DEVICE:
866 	case XHCI_CMD_EVAL_CTX:
867 	case XHCI_CMD_NOOP:
868 		/* All these commands are synchronous. */
869 		KASSERT(sc->sc_cmd_trb == trb);
870 		sc->sc_cmd_trb = NULL;
871 		wakeup(&sc->sc_cmd_trb);
872 		break;
873 	default:
874 		DPRINTF(("%s: unexpected command %x\n", DEVNAME(sc), flags));
875 	}
876 }
877 
878 void
879 xhci_event_port_change(struct xhci_softc *sc, uint64_t paddr, uint32_t status)
880 {
881 	struct usbd_xfer *xfer = sc->sc_intrxfer;
882 	uint32_t port = XHCI_TRB_PORTID(paddr);
883 	uint8_t *p;
884 
885 	if (XHCI_TRB_GET_CODE(status) != XHCI_CODE_SUCCESS) {
886 		DPRINTF(("%s: failed port status event\n", DEVNAME(sc)));
887 		return;
888 	}
889 
890 	if (xfer == NULL)
891 		return;
892 
893 	p = KERNADDR(&xfer->dmabuf, 0);
894 	memset(p, 0, xfer->length);
895 
896 	p[port/8] |= 1 << (port%8);
897 	DPRINTF(("%s: port=%d change=0x%02x\n", DEVNAME(sc), port, *p));
898 
899 	xfer->actlen = xfer->length;
900 	xfer->status = USBD_NORMAL_COMPLETION;
901 
902 	usb_transfer_complete(xfer);
903 }
904 
905 void
906 xhci_xfer_done(struct usbd_xfer *xfer)
907 {
908 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
909 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
910 	int ntrb, i;
911 
912 	splsoftassert(IPL_SOFTUSB);
913 
914 #ifdef XHCI_DEBUG
915 	if (xx->index < 0 || xp->pending_xfers[xx->index] == NULL) {
916 		printf("%s: xfer=%p done (idx=%d, ntrb=%zd)\n", __func__,
917 		    xfer, xx->index, xx->ntrb);
918 	}
919 #endif
920 
921 	if (xp->aborted_xfer == xfer)
922 		xp->aborted_xfer = NULL;
923 
924 	for (ntrb = 0, i = xx->index; ntrb < xx->ntrb; ntrb++, i--) {
925 		xp->pending_xfers[i] = NULL;
926 		if (i == 0)
927 			i = (xp->ring.ntrb - 1);
928 	}
929 	xp->free_trbs += xx->ntrb;
930 	xx->index = -1;
931 	xx->ntrb = 0;
932 
933 	timeout_del(&xfer->timeout_handle);
934 	usb_rem_task(xfer->device, &xfer->abort_task);
935 	usb_transfer_complete(xfer);
936 }
937 
938 /*
939  * Calculate the Device Context Index (DCI) for endpoints as stated
940  * in section 4.5.1 of xHCI specification r1.1.
941  */
942 static inline uint8_t
943 xhci_ed2dci(usb_endpoint_descriptor_t *ed)
944 {
945 	uint8_t dir;
946 
947 	if (UE_GET_XFERTYPE(ed->bmAttributes) == UE_CONTROL)
948 		return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + 1);
949 
950 	if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN)
951 		dir = 1;
952 	else
953 		dir = 0;
954 
955 	return (UE_GET_ADDR(ed->bEndpointAddress) * 2 + dir);
956 }
957 
958 usbd_status
959 xhci_pipe_open(struct usbd_pipe *pipe)
960 {
961 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
962 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
963 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
964 	uint8_t slot = 0, xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
965 	int error;
966 
967 	KASSERT(xp->slot == 0);
968 
969 	if (sc->sc_bus.dying)
970 		return (USBD_IOERROR);
971 
972 	/* Root Hub */
973 	if (pipe->device->depth == 0) {
974 		switch (ed->bEndpointAddress) {
975 		case USB_CONTROL_ENDPOINT:
976 			pipe->methods = &xhci_root_ctrl_methods;
977 			break;
978 		case UE_DIR_IN | XHCI_INTR_ENDPT:
979 			pipe->methods = &xhci_root_intr_methods;
980 			break;
981 		default:
982 			pipe->methods = NULL;
983 			return (USBD_INVAL);
984 		}
985 		return (USBD_NORMAL_COMPLETION);
986 	}
987 
988 #if 0
989 	/* Issue a noop to check if the command ring is correctly configured. */
990 	xhci_cmd_noop(sc);
991 #endif
992 
993 	switch (xfertype) {
994 	case UE_CONTROL:
995 		pipe->methods = &xhci_device_ctrl_methods;
996 
997 		/*
998 		 * Get a slot and init the device's contexts.
999 		 *
1000 		 * Since the control enpoint, represented as the default
1001 		 * pipe, is always opened first we are dealing with a
1002 		 * new device.  Put a new slot in the ENABLED state.
1003 		 *
1004 		 */
1005 		error = xhci_cmd_slot_control(sc, &slot, 1);
1006 		if (error || slot == 0 || slot > sc->sc_noslot)
1007 			return (USBD_INVAL);
1008 
1009 		if (xhci_softdev_alloc(sc, slot)) {
1010 			xhci_cmd_slot_control(sc, &slot, 0);
1011 			return (USBD_NOMEM);
1012 		}
1013 
1014 		break;
1015 	case UE_ISOCHRONOUS:
1016 #if notyet
1017 		pipe->methods = &xhci_device_isoc_methods;
1018 		break;
1019 #else
1020 		DPRINTF(("%s: isochronous xfer not supported \n", __func__));
1021 		return (USBD_INVAL);
1022 #endif
1023 	case UE_BULK:
1024 		pipe->methods = &xhci_device_bulk_methods;
1025 		break;
1026 	case UE_INTERRUPT:
1027 		pipe->methods = &xhci_device_generic_methods;
1028 		break;
1029 	default:
1030 		return (USBD_INVAL);
1031 	}
1032 
1033 	/*
1034 	 * Our USBD Bus Interface is pipe-oriented but for most of the
1035 	 * operations we need to access a device context, so keep trace
1036 	 * of the slot ID in every pipe.
1037 	 */
1038 	if (slot == 0)
1039 		slot = ((struct xhci_pipe *)pipe->device->default_pipe)->slot;
1040 
1041 	xp->slot = slot;
1042 	xp->dci = xhci_ed2dci(ed);
1043 
1044 	if (xhci_pipe_init(sc, pipe)) {
1045 		xhci_cmd_slot_control(sc, &slot, 0);
1046 		return (USBD_IOERROR);
1047 	}
1048 
1049 	return (USBD_NORMAL_COMPLETION);
1050 }
1051 
1052 /*
1053  * Set the maximum Endpoint Service Interface Time (ESIT) payload and
1054  * the average TRB buffer length for an endpoint.
1055  */
1056 static inline uint32_t
1057 xhci_get_txinfo(struct xhci_softc *sc, struct usbd_pipe *pipe)
1058 {
1059 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1060 	uint32_t mep, atl, mps = UGETW(ed->wMaxPacketSize);
1061 
1062 	switch (ed->bmAttributes & UE_XFERTYPE) {
1063 	case UE_CONTROL:
1064 		mep = 0;
1065 		atl = 8;
1066 		break;
1067 	case UE_INTERRUPT:
1068 	case UE_ISOCHRONOUS:
1069 		if (pipe->device->speed == USB_SPEED_SUPER) {
1070 			/*  XXX Read the companion descriptor */
1071 		}
1072 
1073 		mep = (UE_GET_TRANS(mps) | 0x1) * UE_GET_SIZE(mps);
1074 		atl = min(sc->sc_pagesize, mep);
1075 		break;
1076 	case UE_BULK:
1077 	default:
1078 		mep = 0;
1079 		atl = 0;
1080 	}
1081 
1082 	return (XHCI_EPCTX_MAX_ESIT_PAYLOAD(mep) | XHCI_EPCTX_AVG_TRB_LEN(atl));
1083 }
1084 
1085 void
1086 xhci_context_setup(struct xhci_softc *sc, struct usbd_pipe *pipe)
1087 {
1088 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1089 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1090 	usb_endpoint_descriptor_t *ed = pipe->endpoint->edesc;
1091 	uint8_t xfertype = UE_GET_XFERTYPE(ed->bmAttributes);
1092 	uint8_t ival, speed, cerr = 0;
1093 	uint32_t mps, route = 0, rhport = 0;
1094 	struct usbd_device *hub;
1095 
1096 	/*
1097 	 * Calculate the Route String.  Assume that there is no hub with
1098 	 * more than 15 ports and that they all have a detph < 6.  See
1099 	 * section 8.9 of USB 3.1 Specification for more details.
1100 	 */
1101 	for (hub = pipe->device; hub->myhub->depth; hub = hub->myhub) {
1102 		uint32_t port = hub->powersrc->portno;
1103 		uint32_t depth = hub->myhub->depth;
1104 
1105 		route |= port << (4 * (depth - 1));
1106 	}
1107 
1108 	/* Get Root Hub port */
1109 	rhport = hub->powersrc->portno;
1110 
1111 	switch (pipe->device->speed) {
1112 	case USB_SPEED_LOW:
1113 		ival= 3;
1114 		speed = XHCI_SPEED_LOW;
1115 		mps = 8;
1116 		break;
1117 	case USB_SPEED_FULL:
1118 		ival = 3;
1119 		speed = XHCI_SPEED_FULL;
1120 		mps = 8;
1121 		break;
1122 	case USB_SPEED_HIGH:
1123 		ival = min(3, ed->bInterval);
1124 		speed = XHCI_SPEED_HIGH;
1125 		mps = 64;
1126 		break;
1127 	case USB_SPEED_SUPER:
1128 		ival = min(3, ed->bInterval);
1129 		speed = XHCI_SPEED_SUPER;
1130 		mps = 512;
1131 		break;
1132 	default:
1133 		return;
1134 	}
1135 
1136 	/* XXX Until we fix wMaxPacketSize for ctrl ep depending on the speed */
1137 	mps = max(mps, UE_GET_SIZE(UGETW(ed->wMaxPacketSize)));
1138 
1139 	if (pipe->interval != USBD_DEFAULT_INTERVAL)
1140 		ival = min(ival, pipe->interval);
1141 
1142 	/* Setup the endpoint context */
1143 	if (xfertype != UE_ISOCHRONOUS)
1144 		cerr = 3;
1145 
1146 	if (xfertype == UE_CONTROL || xfertype == UE_BULK)
1147 		ival = 0;
1148 
1149 	if ((ed->bEndpointAddress & UE_DIR_IN) || (xfertype == UE_CONTROL))
1150 		xfertype |= 0x4;
1151 
1152 	sdev->ep_ctx[xp->dci-1]->info_lo = htole32(XHCI_EPCTX_SET_IVAL(ival));
1153 	sdev->ep_ctx[xp->dci-1]->info_hi = htole32(
1154 	    XHCI_EPCTX_SET_MPS(mps) | XHCI_EPCTX_SET_EPTYPE(xfertype) |
1155 	    XHCI_EPCTX_SET_CERR(cerr) | XHCI_EPCTX_SET_MAXB(0)
1156 	);
1157 	sdev->ep_ctx[xp->dci-1]->txinfo = htole32(xhci_get_txinfo(sc, pipe));
1158 	sdev->ep_ctx[xp->dci-1]->deqp = htole64(
1159 	    DEQPTR(xp->ring) | xp->ring.toggle
1160 	);
1161 
1162 	/* Unmask the new endoint */
1163 	sdev->input_ctx->drop_flags = 0;
1164 	sdev->input_ctx->add_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1165 
1166 	/* Setup the slot context */
1167 	sdev->slot_ctx->info_lo = htole32(
1168 	    XHCI_SCTX_DCI(xp->dci) | XHCI_SCTX_SPEED(speed) |
1169 	    XHCI_SCTX_ROUTE(route)
1170 	);
1171 	sdev->slot_ctx->info_hi = htole32(XHCI_SCTX_RHPORT(rhport));
1172 	sdev->slot_ctx->tt = 0;
1173 	sdev->slot_ctx->state = 0;
1174 
1175 /* XXX */
1176 #define UHUB_IS_MTT(dev) (dev->ddesc.bDeviceProtocol == UDPROTO_HSHUBMTT)
1177 	/*
1178 	 * If we are opening the interrupt pipe of a hub, update its
1179 	 * context before putting it in the CONFIGURED state.
1180 	 */
1181 	if (pipe->device->hub != NULL) {
1182 		int nports = pipe->device->hub->nports;
1183 
1184 		sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_HUB(1));
1185 		sdev->slot_ctx->info_hi |= htole32(XHCI_SCTX_NPORTS(nports));
1186 
1187 		if (UHUB_IS_MTT(pipe->device))
1188 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1189 
1190 		sdev->slot_ctx->tt |= htole32(
1191 		    XHCI_SCTX_TT_THINK_TIME(pipe->device->hub->ttthink)
1192 		);
1193 	}
1194 
1195 	/*
1196 	 * If this is a Low or Full Speed device below an external High
1197 	 * Speed hub, it needs some TT love.
1198 	 */
1199 	if (speed < XHCI_SPEED_HIGH && pipe->device->myhsport != NULL) {
1200 		struct usbd_device *hshub = pipe->device->myhsport->parent;
1201 		uint8_t slot = ((struct xhci_pipe *)hshub->default_pipe)->slot;
1202 
1203 		if (UHUB_IS_MTT(hshub))
1204 			sdev->slot_ctx->info_lo |= htole32(XHCI_SCTX_MTT(1));
1205 
1206 		sdev->slot_ctx->tt |= htole32(
1207 		    XHCI_SCTX_TT_HUB_SID(slot) |
1208 		    XHCI_SCTX_TT_PORT_NUM(pipe->device->myhsport->portno)
1209 		);
1210 	}
1211 #undef UHUB_IS_MTT
1212 
1213 	/* Unmask the slot context */
1214 	sdev->input_ctx->add_flags |= htole32(XHCI_INCTX_MASK_DCI(0));
1215 
1216 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1217 	    sc->sc_pagesize, BUS_DMASYNC_PREWRITE);
1218 }
1219 
1220 int
1221 xhci_pipe_init(struct xhci_softc *sc, struct usbd_pipe *pipe)
1222 {
1223 	struct xhci_pipe *xp = (struct xhci_pipe *)pipe;
1224 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1225 	int error;
1226 
1227 #ifdef XHCI_DEBUG
1228 	struct usbd_device *dev = pipe->device;
1229 	printf("%s: pipe=%p addr=%d depth=%d port=%d speed=%d dev %d dci %u"
1230 	    " (epAddr=0x%x)\n", __func__, pipe, dev->address, dev->depth,
1231 	    dev->powersrc->portno, dev->speed, xp->slot, xp->dci,
1232 	    pipe->endpoint->edesc->bEndpointAddress);
1233 #endif
1234 
1235 	if (xhci_ring_alloc(sc, &xp->ring, XHCI_MAX_XFER, XHCI_XFER_RING_ALIGN))
1236 		return (ENOMEM);
1237 
1238 	xp->free_trbs = xp->ring.ntrb;
1239 	xp->halted = 0;
1240 
1241 	sdev->pipes[xp->dci - 1] = xp;
1242 
1243 	xhci_context_setup(sc, pipe);
1244 
1245 	if (xp->dci == 1) {
1246 		/*
1247 		 * If we are opening the default pipe, the Slot should
1248 		 * be in the ENABLED state.  Issue an "Address Device"
1249 		 * with BSR=1 to put the device in the DEFAULT state.
1250 		 * We cannot jump directly to the ADDRESSED state with
1251 		 * BSR=0 because some Low/Full speed devices wont accept
1252 		 * a SET_ADDRESS command before we've read their device
1253 		 * descriptor.
1254 		 */
1255 		error = xhci_cmd_set_address(sc, xp->slot,
1256 		    sdev->ictx_dma.paddr, XHCI_TRB_BSR);
1257 	} else {
1258 		error = xhci_cmd_configure_ep(sc, xp->slot,
1259 		    sdev->ictx_dma.paddr);
1260 	}
1261 
1262 	if (error) {
1263 		xhci_ring_free(sc, &xp->ring);
1264 		return (EIO);
1265 	}
1266 
1267 	return (0);
1268 }
1269 
1270 void
1271 xhci_pipe_close(struct usbd_pipe *pipe)
1272 {
1273 	struct xhci_softc *sc = (struct xhci_softc *)pipe->device->bus;
1274 	struct xhci_pipe *lxp, *xp = (struct xhci_pipe *)pipe;
1275 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1276 	int i;
1277 
1278 	/* Root Hub */
1279 	if (pipe->device->depth == 0)
1280 		return;
1281 
1282 	/* Mask the endpoint */
1283 	sdev->input_ctx->drop_flags = htole32(XHCI_INCTX_MASK_DCI(xp->dci));
1284 	sdev->input_ctx->add_flags = 0;
1285 
1286 	/* Update last valid Endpoint Context */
1287 	for (i = 30; i >= 0; i--) {
1288 		lxp = sdev->pipes[i];
1289 		if (lxp != NULL && lxp != xp)
1290 			break;
1291 	}
1292 	sdev->slot_ctx->info_lo = htole32(XHCI_SCTX_DCI(lxp->dci));
1293 
1294 	/* Clear the Endpoint Context */
1295 	memset(sdev->ep_ctx[xp->dci - 1], 0, sizeof(struct xhci_epctx));
1296 
1297 	bus_dmamap_sync(sdev->ictx_dma.tag, sdev->ictx_dma.map, 0,
1298 	    sc->sc_pagesize, BUS_DMASYNC_PREWRITE);
1299 
1300 	if (xhci_cmd_configure_ep(sc, xp->slot, sdev->ictx_dma.paddr))
1301 		DPRINTF(("%s: error clearing ep (%d)\n", DEVNAME(sc), xp->dci));
1302 
1303 	xhci_ring_free(sc, &xp->ring);
1304 	sdev->pipes[xp->dci - 1] = NULL;
1305 
1306 	/*
1307 	 * If we are closing the default pipe, the device is probably
1308 	 * gone, so put its slot in the DISABLED state.
1309 	 */
1310 	if (xp->dci == 1) {
1311 		xhci_cmd_slot_control(sc, &xp->slot, 0);
1312 		xhci_softdev_free(sc, xp->slot);
1313 	}
1314 }
1315 
1316 /*
1317  * Transition a device from DEFAULT to ADDRESSED Slot state, this hook
1318  * is needed for Low/Full speed devices.
1319  *
1320  * See section 4.5.3 of USB 3.1 Specification for more details.
1321  */
1322 int
1323 xhci_setaddr(struct usbd_device *dev, int addr)
1324 {
1325 	struct xhci_softc *sc = (struct xhci_softc *)dev->bus;
1326 	struct xhci_pipe *xp = (struct xhci_pipe *)dev->default_pipe;
1327 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[xp->slot];
1328 	int error;
1329 
1330 	/* Root Hub */
1331 	if (dev->depth == 0)
1332 		return (0);
1333 
1334 	KASSERT(xp->dci == 1);
1335 
1336 	xhci_context_setup(sc, dev->default_pipe);
1337 
1338 	error = xhci_cmd_set_address(sc, xp->slot, sdev->ictx_dma.paddr, 0);
1339 
1340 #ifdef XHCI_DEBUG
1341 	if (error == 0) {
1342 		struct xhci_sctx *sctx;
1343 		uint8_t addr;
1344 
1345 		bus_dmamap_sync(sdev->octx_dma.tag, sdev->octx_dma.map, 0,
1346 		    sc->sc_pagesize, BUS_DMASYNC_POSTREAD);
1347 
1348 		/* Get output slot context. */
1349 		sctx = (struct xhci_sctx *)sdev->octx_dma.vaddr;
1350 		addr = XHCI_SCTX_DEV_ADDR(letoh32(sctx->state));
1351 		error = (addr == 0);
1352 
1353 		printf("%s: dev %d addr %d\n", DEVNAME(sc), xp->slot, addr);
1354 	}
1355 #endif
1356 
1357 	return (error);
1358 }
1359 
1360 struct usbd_xfer *
1361 xhci_allocx(struct usbd_bus *bus)
1362 {
1363 	return (pool_get(xhcixfer, PR_NOWAIT | PR_ZERO));
1364 }
1365 
1366 void
1367 xhci_freex(struct usbd_bus *bus, struct usbd_xfer *xfer)
1368 {
1369 	pool_put(xhcixfer, xfer);
1370 }
1371 
1372 int
1373 xhci_scratchpad_alloc(struct xhci_softc *sc, int npage)
1374 {
1375 	uint64_t *pte;
1376 	int error, i;
1377 
1378 	/* Allocate the required entry for the table. */
1379 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.table_dma,
1380 	    (void **)&pte, npage * sizeof(uint64_t), XHCI_SPAD_TABLE_ALIGN,
1381 	    sc->sc_pagesize);
1382 	if (error)
1383 		return (ENOMEM);
1384 
1385 	/* Allocate pages. XXX does not need to be contiguous. */
1386 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sc->sc_spad.pages_dma,
1387 	    NULL, npage * sc->sc_pagesize, sc->sc_pagesize, 0);
1388 	if (error) {
1389 		usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1390 		return (ENOMEM);
1391 	}
1392 
1393 	for (i = 0; i < npage; i++) {
1394 		pte[i] = htole64(
1395 		    sc->sc_spad.pages_dma.paddr + (i * sc->sc_pagesize)
1396 		);
1397 	}
1398 
1399 	bus_dmamap_sync(sc->sc_spad.table_dma.tag, sc->sc_spad.table_dma.map, 0,
1400 	    npage * sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1401 
1402 	/*  Entry 0 points to the table of scratchpad pointers. */
1403 	sc->sc_dcbaa.segs[0] = htole64(sc->sc_spad.table_dma.paddr);
1404 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1405 	    sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1406 
1407 	sc->sc_spad.npage = npage;
1408 
1409 	return (0);
1410 }
1411 
1412 void
1413 xhci_scratchpad_free(struct xhci_softc *sc)
1414 {
1415 	sc->sc_dcbaa.segs[0] = 0;
1416 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map, 0,
1417 	    sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1418 
1419 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.pages_dma);
1420 	usbd_dma_contig_free(&sc->sc_bus, &sc->sc_spad.table_dma);
1421 }
1422 
1423 int
1424 xhci_ring_alloc(struct xhci_softc *sc, struct xhci_ring *ring, size_t ntrb,
1425     size_t alignment)
1426 {
1427 	size_t size;
1428 	int error;
1429 
1430 	size = ntrb * sizeof(struct xhci_trb);
1431 
1432 	error = usbd_dma_contig_alloc(&sc->sc_bus, &ring->dma,
1433 	    (void **)&ring->trbs, size, alignment, XHCI_RING_BOUNDARY);
1434 	if (error)
1435 		return (error);
1436 
1437 	ring->ntrb = ntrb;
1438 
1439 	xhci_ring_reset(sc, ring);
1440 
1441 	return (0);
1442 }
1443 
1444 void
1445 xhci_ring_free(struct xhci_softc *sc, struct xhci_ring *ring)
1446 {
1447 	usbd_dma_contig_free(&sc->sc_bus, &ring->dma);
1448 }
1449 
1450 void
1451 xhci_ring_reset(struct xhci_softc *sc, struct xhci_ring *ring)
1452 {
1453 	size_t size;
1454 
1455 	size = ring->ntrb * sizeof(struct xhci_trb);
1456 
1457 	memset(ring->trbs, 0, size);
1458 
1459 	ring->index = 0;
1460 	ring->toggle = XHCI_TRB_CYCLE;
1461 
1462 	/*
1463 	 * Since all our rings use only one segment, at least for
1464 	 * the moment, link their tail to their head.
1465 	 */
1466 	if (ring != &sc->sc_evt_ring) {
1467 		struct xhci_trb *trb = &ring->trbs[ring->ntrb - 1];
1468 
1469 		trb->trb_paddr = htole64(ring->dma.paddr);
1470 		trb->trb_flags = htole32(XHCI_TRB_TYPE_LINK | XHCI_TRB_LINKSEG);
1471 	}
1472 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, 0, size,
1473 	    BUS_DMASYNC_PREWRITE);
1474 }
1475 
1476 struct xhci_trb*
1477 xhci_ring_consume(struct xhci_softc *sc, struct xhci_ring *ring)
1478 {
1479 	struct xhci_trb *trb = &ring->trbs[ring->index];
1480 
1481 	KASSERT(ring->index < ring->ntrb);
1482 
1483 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1484 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
1485 
1486 	/* Make sure this TRB can be consumed. */
1487 	if (ring->toggle != (letoh32(trb->trb_flags) & XHCI_TRB_CYCLE))
1488 		return (NULL);
1489 
1490 	ring->index++;
1491 
1492 	if (ring->index == ring->ntrb) {
1493 		ring->index = 0;
1494 		ring->toggle ^= 1;
1495 	}
1496 
1497 	return (trb);
1498 }
1499 
1500 struct xhci_trb*
1501 xhci_ring_produce(struct xhci_softc *sc, struct xhci_ring *ring)
1502 {
1503 	struct xhci_trb *trb = &ring->trbs[ring->index];
1504 
1505 	KASSERT(ring->index < ring->ntrb);
1506 
1507 	bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, trb),
1508 	    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
1509 
1510 	ring->index++;
1511 
1512 	/* Toggle cycle state of the link TRB and skip it. */
1513 	if (ring->index == (ring->ntrb - 1)) {
1514 		struct xhci_trb *lnk = &ring->trbs[ring->index];
1515 
1516 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1517 		    sizeof(struct xhci_trb), BUS_DMASYNC_POSTREAD);
1518 
1519 		lnk->trb_flags ^= htole32(XHCI_TRB_CYCLE);
1520 
1521 		bus_dmamap_sync(ring->dma.tag, ring->dma.map, TRBOFF(ring, lnk),
1522 		    sizeof(struct xhci_trb), BUS_DMASYNC_PREWRITE);
1523 
1524 		ring->index = 0;
1525 		ring->toggle ^= 1;
1526 	}
1527 
1528 	return (trb);
1529 }
1530 
1531 struct xhci_trb *
1532 xhci_xfer_get_trb(struct xhci_softc *sc, struct usbd_xfer *xfer,
1533     uint8_t *togglep, int last)
1534 {
1535 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1536 	struct xhci_xfer *xx = (struct xhci_xfer *)xfer;
1537 
1538 	KASSERT(xp->free_trbs >= 1);
1539 
1540 	/* Associate this TRB to our xfer. */
1541 	xp->pending_xfers[xp->ring.index] = xfer;
1542 	xp->free_trbs--;
1543 
1544 	xx->index = (last) ? xp->ring.index : -2;
1545 	xx->ntrb += 1;
1546 
1547 	*togglep = xp->ring.toggle;
1548 	return (xhci_ring_produce(sc, &xp->ring));
1549 }
1550 
1551 int
1552 xhci_command_submit(struct xhci_softc *sc, struct xhci_trb *trb0, int timeout)
1553 {
1554 	struct xhci_trb *trb;
1555 	int s, error = 0;
1556 
1557 	KASSERT(timeout == 0 || sc->sc_cmd_trb == NULL);
1558 
1559 	trb0->trb_flags |= htole32(sc->sc_cmd_ring.toggle);
1560 
1561 	trb = xhci_ring_produce(sc, &sc->sc_cmd_ring);
1562 	if (trb == NULL)
1563 		return (EAGAIN);
1564 	memcpy(trb, trb0, sizeof(struct xhci_trb));
1565 	bus_dmamap_sync(sc->sc_cmd_ring.dma.tag, sc->sc_cmd_ring.dma.map,
1566 	    TRBOFF(&sc->sc_cmd_ring, trb), sizeof(struct xhci_trb),
1567 	    BUS_DMASYNC_PREWRITE);
1568 
1569 
1570 	if (timeout == 0) {
1571 		XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1572 		return (0);
1573 	}
1574 
1575 	assertwaitok();
1576 
1577 	s = splusb();
1578 	sc->sc_cmd_trb = trb;
1579 	XDWRITE4(sc, XHCI_DOORBELL(0), 0);
1580 	error = tsleep(&sc->sc_cmd_trb, PZERO, "xhcicmd",
1581 	    (timeout*hz+999)/ 1000 + 1);
1582 	if (error) {
1583 #ifdef XHCI_DEBUG
1584 		printf("%s: tsleep() = %d\n", __func__, error);
1585 		printf("cmd = %d ", XHCI_TRB_TYPE(letoh32(trb->trb_flags)));
1586 		xhci_dump_trb(trb);
1587 #endif
1588 		KASSERT(sc->sc_cmd_trb == trb);
1589 		sc->sc_cmd_trb = NULL;
1590 		splx(s);
1591 		return (error);
1592 	}
1593 	splx(s);
1594 
1595 	memcpy(trb0, &sc->sc_result_trb, sizeof(struct xhci_trb));
1596 
1597 	if (XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)) == XHCI_CODE_SUCCESS)
1598 		return (0);
1599 
1600 #ifdef XHCI_DEBUG
1601 	printf("%s: event error code=%d, result=%d  \n", DEVNAME(sc),
1602 	    XHCI_TRB_GET_CODE(letoh32(trb0->trb_status)),
1603 	    XHCI_TRB_TYPE(letoh32(trb0->trb_flags)));
1604 	xhci_dump_trb(trb0);
1605 #endif
1606 	return (EIO);
1607 }
1608 
1609 int
1610 xhci_command_abort(struct xhci_softc *sc)
1611 {
1612 	uint32_t reg;
1613 	int i;
1614 
1615 	reg = XOREAD4(sc, XHCI_CRCR_LO);
1616 	if ((reg & XHCI_CRCR_LO_CRR) == 0)
1617 		return (0);
1618 
1619 	XOWRITE4(sc, XHCI_CRCR_LO, reg | XHCI_CRCR_LO_CA);
1620 	XOWRITE4(sc, XHCI_CRCR_HI, 0);
1621 
1622 	for (i = 0; i < 250; i++) {
1623 		usb_delay_ms(&sc->sc_bus, 1);
1624 		reg = XOREAD4(sc, XHCI_CRCR_LO) & XHCI_CRCR_LO_CRR;
1625 		if (!reg)
1626 			break;
1627 	}
1628 
1629 	if (reg) {
1630 		printf("%s: command ring abort timeout\n", DEVNAME(sc));
1631 		return (1);
1632 	}
1633 
1634 	return (0);
1635 }
1636 
1637 int
1638 xhci_cmd_configure_ep(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
1639 {
1640 	struct xhci_trb trb;
1641 
1642 	DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot));
1643 
1644 	trb.trb_paddr = htole64(addr);
1645 	trb.trb_status = 0;
1646 	trb.trb_flags = htole32(
1647 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_CONFIG_EP
1648 	);
1649 
1650 	return (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT));
1651 }
1652 
1653 int
1654 xhci_cmd_stop_ep(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
1655 {
1656 	struct xhci_trb trb;
1657 
1658 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
1659 
1660 	trb.trb_paddr = 0;
1661 	trb.trb_status = 0;
1662 	trb.trb_flags = htole32(
1663 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_STOP_EP
1664 	);
1665 
1666 	return (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT));
1667 }
1668 
1669 void
1670 xhci_cmd_reset_ep_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci)
1671 {
1672 	struct xhci_trb trb;
1673 
1674 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
1675 
1676 	trb.trb_paddr = 0;
1677 	trb.trb_status = 0;
1678 	trb.trb_flags = htole32(
1679 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_RESET_EP
1680 	);
1681 
1682 	xhci_command_submit(sc, &trb, 0);
1683 }
1684 
1685 void
1686 xhci_cmd_set_tr_deq_async(struct xhci_softc *sc, uint8_t slot, uint8_t dci,
1687    uint64_t addr)
1688 {
1689 	struct xhci_trb trb;
1690 
1691 	DPRINTF(("%s: %s dev %u dci %u\n", DEVNAME(sc), __func__, slot, dci));
1692 
1693 	trb.trb_paddr = htole64(addr);
1694 	trb.trb_status = 0;
1695 	trb.trb_flags = htole32(
1696 	    XHCI_TRB_SET_SLOT(slot) | XHCI_TRB_SET_EP(dci) | XHCI_CMD_SET_TR_DEQ
1697 	);
1698 
1699 	xhci_command_submit(sc, &trb, 0);
1700 }
1701 
1702 int
1703 xhci_cmd_slot_control(struct xhci_softc *sc, uint8_t *slotp, int enable)
1704 {
1705 	struct xhci_trb trb;
1706 
1707 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1708 
1709 	trb.trb_paddr = 0;
1710 	trb.trb_status = 0;
1711 	if (enable)
1712 		trb.trb_flags = htole32(XHCI_CMD_ENABLE_SLOT);
1713 	else
1714 		trb.trb_flags = htole32(
1715 			XHCI_TRB_SET_SLOT(*slotp) | XHCI_CMD_DISABLE_SLOT
1716 		);
1717 
1718 	if (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT))
1719 		return (EIO);
1720 
1721 	if (enable)
1722 		*slotp = XHCI_TRB_GET_SLOT(letoh32(trb.trb_flags));
1723 
1724 	return (0);
1725 }
1726 
1727 int
1728 xhci_cmd_set_address(struct xhci_softc *sc, uint8_t slot, uint64_t addr,
1729     uint32_t bsr)
1730 {
1731 	struct xhci_trb trb;
1732 
1733 	DPRINTF(("%s: %s BSR=%u\n", DEVNAME(sc), __func__, bsr ? 1 : 0));
1734 
1735 	trb.trb_paddr = htole64(addr);
1736 	trb.trb_status = 0;
1737 	trb.trb_flags = htole32(
1738 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_ADDRESS_DEVICE | bsr
1739 	);
1740 
1741 	return (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT));
1742 }
1743 
1744 int
1745 xhci_cmd_evaluate_ctx(struct xhci_softc *sc, uint8_t slot, uint64_t addr)
1746 {
1747 	struct xhci_trb trb;
1748 
1749 	DPRINTF(("%s: %s dev %u\n", DEVNAME(sc), __func__, slot));
1750 
1751 	trb.trb_paddr = htole64(addr);
1752 	trb.trb_status = 0;
1753 	trb.trb_flags = htole32(
1754 	    XHCI_TRB_SET_SLOT(slot) | XHCI_CMD_EVAL_CTX
1755 	);
1756 
1757 	return (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT));
1758 }
1759 
1760 #ifdef XHCI_DEBUG
1761 int
1762 xhci_cmd_noop(struct xhci_softc *sc)
1763 {
1764 	struct xhci_trb trb;
1765 
1766 	DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
1767 
1768 	trb.trb_paddr = 0;
1769 	trb.trb_status = 0;
1770 	trb.trb_flags = htole32(XHCI_CMD_NOOP);
1771 
1772 	return (xhci_command_submit(sc, &trb, XHCI_CMD_TIMEOUT));
1773 }
1774 #endif
1775 
1776 int
1777 xhci_softdev_alloc(struct xhci_softc *sc, uint8_t slot)
1778 {
1779 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
1780 	int i, error;
1781 	uint8_t *kva;
1782 
1783 	/*
1784 	 * Setup input context.  Even with 64 byte context size, it
1785 	 * fits into the smallest supported page size, so use that.
1786 	 */
1787 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->ictx_dma,
1788 	    (void **)&kva, sc->sc_pagesize, XHCI_ICTX_ALIGN, sc->sc_pagesize);
1789 	if (error)
1790 		return (ENOMEM);
1791 
1792 	sdev->input_ctx = (struct xhci_inctx *)kva;
1793 	sdev->slot_ctx = (struct xhci_sctx *)(kva + sc->sc_ctxsize);
1794 	for (i = 0; i < 31; i++)
1795 		sdev->ep_ctx[i] =
1796 		    (struct xhci_epctx *)(kva + (i + 2) * sc->sc_ctxsize);
1797 
1798 	DPRINTF(("%s: dev %d, input=%p slot=%p ep0=%p\n", DEVNAME(sc),
1799 	 slot, sdev->input_ctx, sdev->slot_ctx, sdev->ep_ctx[0]));
1800 
1801 	/* Setup output context */
1802 	error = usbd_dma_contig_alloc(&sc->sc_bus, &sdev->octx_dma, NULL,
1803 	    sc->sc_pagesize, XHCI_OCTX_ALIGN, sc->sc_pagesize);
1804 	if (error) {
1805 		usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
1806 		return (ENOMEM);
1807 	}
1808 
1809 	memset(&sdev->pipes, 0, sizeof(sdev->pipes));
1810 
1811 	DPRINTF(("%s: dev %d, setting DCBAA to 0x%016llx\n", DEVNAME(sc),
1812 	    slot, (long long)sdev->octx_dma.paddr));
1813 
1814 	sc->sc_dcbaa.segs[slot] = htole64(sdev->octx_dma.paddr);
1815 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
1816 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1817 
1818 	return (0);
1819 }
1820 
1821 void
1822 xhci_softdev_free(struct xhci_softc *sc, uint8_t slot)
1823 {
1824 	struct xhci_soft_dev *sdev = &sc->sc_sdevs[slot];
1825 
1826 	sc->sc_dcbaa.segs[slot] = 0;
1827 	bus_dmamap_sync(sc->sc_dcbaa.dma.tag, sc->sc_dcbaa.dma.map,
1828 	    slot * sizeof(uint64_t), sizeof(uint64_t), BUS_DMASYNC_PREWRITE);
1829 
1830 	usbd_dma_contig_free(&sc->sc_bus, &sdev->octx_dma);
1831 	usbd_dma_contig_free(&sc->sc_bus, &sdev->ictx_dma);
1832 
1833 	memset(sdev, 0, sizeof(struct xhci_soft_dev));
1834 }
1835 
1836 /* Root hub descriptors. */
1837 usb_device_descriptor_t xhci_devd = {
1838 	USB_DEVICE_DESCRIPTOR_SIZE,
1839 	UDESC_DEVICE,		/* type */
1840 	{0x00, 0x03},		/* USB version */
1841 	UDCLASS_HUB,		/* class */
1842 	UDSUBCLASS_HUB,		/* subclass */
1843 	UDPROTO_HSHUBSTT,	/* protocol */
1844 	9,			/* max packet */
1845 	{0},{0},{0x00,0x01},	/* device id */
1846 	1,2,0,			/* string indexes */
1847 	1			/* # of configurations */
1848 };
1849 
1850 const usb_config_descriptor_t xhci_confd = {
1851 	USB_CONFIG_DESCRIPTOR_SIZE,
1852 	UDESC_CONFIG,
1853 	{USB_CONFIG_DESCRIPTOR_SIZE +
1854 	 USB_INTERFACE_DESCRIPTOR_SIZE +
1855 	 USB_ENDPOINT_DESCRIPTOR_SIZE},
1856 	1,
1857 	1,
1858 	0,
1859 	UC_SELF_POWERED,
1860 	0                      /* max power */
1861 };
1862 
1863 const usb_interface_descriptor_t xhci_ifcd = {
1864 	USB_INTERFACE_DESCRIPTOR_SIZE,
1865 	UDESC_INTERFACE,
1866 	0,
1867 	0,
1868 	1,
1869 	UICLASS_HUB,
1870 	UISUBCLASS_HUB,
1871 	UIPROTO_HSHUBSTT,
1872 	0
1873 };
1874 
1875 const usb_endpoint_descriptor_t xhci_endpd = {
1876 	USB_ENDPOINT_DESCRIPTOR_SIZE,
1877 	UDESC_ENDPOINT,
1878 	UE_DIR_IN | XHCI_INTR_ENDPT,
1879 	UE_INTERRUPT,
1880 	{2, 0},                 /* max 15 ports */
1881 	255
1882 };
1883 
1884 const usb_endpoint_ss_comp_descriptor_t xhci_endpcd = {
1885 	USB_ENDPOINT_SS_COMP_DESCRIPTOR_SIZE,
1886 	UDESC_ENDPOINT_SS_COMP,
1887 	0,
1888 	0,
1889 	{0, 0}
1890 };
1891 
1892 const usb_hub_descriptor_t xhci_hubd = {
1893 	USB_HUB_DESCRIPTOR_SIZE,
1894 	UDESC_SS_HUB,
1895 	0,
1896 	{0,0},
1897 	0,
1898 	0,
1899 	{0},
1900 };
1901 
1902 void
1903 xhci_abort_xfer(struct usbd_xfer *xfer, usbd_status status)
1904 {
1905 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
1906 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
1907 	int error;
1908 
1909 	splsoftassert(IPL_SOFTUSB);
1910 
1911 	DPRINTF(("%s: xfer=%p status=%s err=%s actlen=%d len=%d idx=%d\n",
1912 	    __func__, xfer, usbd_errstr(xfer->status), usbd_errstr(status),
1913 	    xfer->actlen, xfer->length, ((struct xhci_xfer *)xfer)->index));
1914 
1915 	/* XXX The stack should not call abort() in this case. */
1916 	if (sc->sc_bus.dying || xfer->status == USBD_NOT_STARTED) {
1917 		xfer->status = status;
1918 		timeout_del(&xfer->timeout_handle);
1919 		usb_rem_task(xfer->device, &xfer->abort_task);
1920 		usb_transfer_complete(xfer);
1921 		return;
1922 	}
1923 
1924 	/* Transfer is already done. */
1925 	if (xfer->status != USBD_IN_PROGRESS) {
1926 		DPRINTF(("%s: already done \n", __func__));
1927 		return;
1928 	}
1929 
1930 	/* Prevent any timeout to kick in. */
1931 	timeout_del(&xfer->timeout_handle);
1932 	usb_rem_task(xfer->device, &xfer->abort_task);
1933 
1934 	/* Indicate that we are aborting this transfer. */
1935 	xp->halted = status;
1936 	xp->aborted_xfer = xfer;
1937 
1938 	/* Stop the endpoint and wait until the hardware says so. */
1939 	if (xhci_cmd_stop_ep(sc, xp->slot, xp->dci))
1940 		DPRINTF(("%s: error stopping endpoint\n", DEVNAME(sc)));
1941 
1942 	/*
1943 	 * The transfer was already completed when we stopped the
1944 	 * endpoint, no need to move the dequeue pointer past its
1945 	 * TRBs.
1946 	 */
1947 	if (xp->aborted_xfer == NULL) {
1948 		DPRINTF(("%s: done before stopping the endpoint\n", __func__));
1949 		xp->halted = 0;
1950 		return;
1951 	}
1952 
1953 	/*
1954 	 * At this stage the endpoint has been stopped, so update its
1955 	 * dequeue pointer past the last TRB of the transfer.
1956 	 *
1957 	 * Note: This assume that only one transfer per endpoint has
1958 	 *	 pending TRBs on the ring.
1959 	 */
1960 	xhci_cmd_set_tr_deq_async(sc, xp->slot, xp->dci,
1961 	    DEQPTR(xp->ring) | xp->ring.toggle);
1962 	error = tsleep(xp, PZERO, "xhciab", (XHCI_CMD_TIMEOUT*hz+999)/1000 + 1);
1963 	if (error)
1964 		printf("%s: timeout aborting transfer\n", DEVNAME(sc));
1965 }
1966 
1967 void
1968 xhci_timeout(void *addr)
1969 {
1970 	struct usbd_xfer *xfer = addr;
1971 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
1972 
1973 	if (sc->sc_bus.dying) {
1974 		xhci_timeout_task(addr);
1975 		return;
1976 	}
1977 
1978 	usb_init_task(&xfer->abort_task, xhci_timeout_task, addr,
1979 	    USB_TASK_TYPE_ABORT);
1980 	usb_add_task(xfer->device, &xfer->abort_task);
1981 }
1982 
1983 void
1984 xhci_timeout_task(void *addr)
1985 {
1986 	struct usbd_xfer *xfer = addr;
1987 	int s;
1988 
1989 	s = splusb();
1990 	xhci_abort_xfer(xfer, USBD_TIMEOUT);
1991 	splx(s);
1992 }
1993 
1994 usbd_status
1995 xhci_root_ctrl_transfer(struct usbd_xfer *xfer)
1996 {
1997 	usbd_status err;
1998 
1999 	err = usb_insert_transfer(xfer);
2000 	if (err)
2001 		return (err);
2002 
2003 	return (xhci_root_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2004 }
2005 
2006 usbd_status
2007 xhci_root_ctrl_start(struct usbd_xfer *xfer)
2008 {
2009 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2010 	usb_port_status_t ps;
2011 	usb_device_request_t *req;
2012 	void *buf = NULL;
2013 	usb_hub_descriptor_t hubd;
2014 	usbd_status err;
2015 	int s, len, value, index;
2016 	int l, totlen = 0;
2017 	int port, i;
2018 	uint32_t v;
2019 
2020 	KASSERT(xfer->rqflags & URQ_REQUEST);
2021 
2022 	if (sc->sc_bus.dying)
2023 		return (USBD_IOERROR);
2024 
2025 	req = &xfer->request;
2026 
2027 	DPRINTFN(4,("%s: type=0x%02x request=%02x\n", __func__,
2028 	    req->bmRequestType, req->bRequest));
2029 
2030 	len = UGETW(req->wLength);
2031 	value = UGETW(req->wValue);
2032 	index = UGETW(req->wIndex);
2033 
2034 	if (len != 0)
2035 		buf = KERNADDR(&xfer->dmabuf, 0);
2036 
2037 #define C(x,y) ((x) | ((y) << 8))
2038 	switch(C(req->bRequest, req->bmRequestType)) {
2039 	case C(UR_CLEAR_FEATURE, UT_WRITE_DEVICE):
2040 	case C(UR_CLEAR_FEATURE, UT_WRITE_INTERFACE):
2041 	case C(UR_CLEAR_FEATURE, UT_WRITE_ENDPOINT):
2042 		/*
2043 		 * DEVICE_REMOTE_WAKEUP and ENDPOINT_HALT are no-ops
2044 		 * for the integrated root hub.
2045 		 */
2046 		break;
2047 	case C(UR_GET_CONFIG, UT_READ_DEVICE):
2048 		if (len > 0) {
2049 			*(uint8_t *)buf = sc->sc_conf;
2050 			totlen = 1;
2051 		}
2052 		break;
2053 	case C(UR_GET_DESCRIPTOR, UT_READ_DEVICE):
2054 		DPRINTFN(8,("xhci_root_ctrl_start: wValue=0x%04x\n", value));
2055 		switch(value >> 8) {
2056 		case UDESC_DEVICE:
2057 			if ((value & 0xff) != 0) {
2058 				err = USBD_IOERROR;
2059 				goto ret;
2060 			}
2061 			totlen = l = min(len, USB_DEVICE_DESCRIPTOR_SIZE);
2062 			USETW(xhci_devd.idVendor, sc->sc_id_vendor);
2063 			memcpy(buf, &xhci_devd, l);
2064 			break;
2065 		/*
2066 		 * We can't really operate at another speed, but the spec says
2067 		 * we need this descriptor.
2068 		 */
2069 		case UDESC_OTHER_SPEED_CONFIGURATION:
2070 		case UDESC_CONFIG:
2071 			if ((value & 0xff) != 0) {
2072 				err = USBD_IOERROR;
2073 				goto ret;
2074 			}
2075 			totlen = l = min(len, USB_CONFIG_DESCRIPTOR_SIZE);
2076 			memcpy(buf, &xhci_confd, l);
2077 			((usb_config_descriptor_t *)buf)->bDescriptorType =
2078 			    value >> 8;
2079 			buf = (char *)buf + l;
2080 			len -= l;
2081 			l = min(len, USB_INTERFACE_DESCRIPTOR_SIZE);
2082 			totlen += l;
2083 			memcpy(buf, &xhci_ifcd, l);
2084 			buf = (char *)buf + l;
2085 			len -= l;
2086 			l = min(len, USB_ENDPOINT_DESCRIPTOR_SIZE);
2087 			totlen += l;
2088 			memcpy(buf, &xhci_endpd, l);
2089 			break;
2090 		case UDESC_STRING:
2091 			if (len == 0)
2092 				break;
2093 			*(u_int8_t *)buf = 0;
2094 			totlen = 1;
2095 			switch (value & 0xff) {
2096 			case 0: /* Language table */
2097 				totlen = usbd_str(buf, len, "\001");
2098 				break;
2099 			case 1: /* Vendor */
2100 				totlen = usbd_str(buf, len, sc->sc_vendor);
2101 				break;
2102 			case 2: /* Product */
2103 				totlen = usbd_str(buf, len, "xHCI root hub");
2104 				break;
2105 			}
2106 			break;
2107 		default:
2108 			err = USBD_IOERROR;
2109 			goto ret;
2110 		}
2111 		break;
2112 	case C(UR_GET_INTERFACE, UT_READ_INTERFACE):
2113 		if (len > 0) {
2114 			*(uint8_t *)buf = 0;
2115 			totlen = 1;
2116 		}
2117 		break;
2118 	case C(UR_GET_STATUS, UT_READ_DEVICE):
2119 		if (len > 1) {
2120 			USETW(((usb_status_t *)buf)->wStatus,UDS_SELF_POWERED);
2121 			totlen = 2;
2122 		}
2123 		break;
2124 	case C(UR_GET_STATUS, UT_READ_INTERFACE):
2125 	case C(UR_GET_STATUS, UT_READ_ENDPOINT):
2126 		if (len > 1) {
2127 			USETW(((usb_status_t *)buf)->wStatus, 0);
2128 			totlen = 2;
2129 		}
2130 		break;
2131 	case C(UR_SET_ADDRESS, UT_WRITE_DEVICE):
2132 		if (value >= USB_MAX_DEVICES) {
2133 			err = USBD_IOERROR;
2134 			goto ret;
2135 		}
2136 		break;
2137 	case C(UR_SET_CONFIG, UT_WRITE_DEVICE):
2138 		if (value != 0 && value != 1) {
2139 			err = USBD_IOERROR;
2140 			goto ret;
2141 		}
2142 		sc->sc_conf = value;
2143 		break;
2144 	case C(UR_SET_DESCRIPTOR, UT_WRITE_DEVICE):
2145 		break;
2146 	case C(UR_SET_FEATURE, UT_WRITE_DEVICE):
2147 	case C(UR_SET_FEATURE, UT_WRITE_INTERFACE):
2148 	case C(UR_SET_FEATURE, UT_WRITE_ENDPOINT):
2149 		err = USBD_IOERROR;
2150 		goto ret;
2151 	case C(UR_SET_INTERFACE, UT_WRITE_INTERFACE):
2152 		break;
2153 	case C(UR_SYNCH_FRAME, UT_WRITE_ENDPOINT):
2154 		break;
2155 	/* Hub requests */
2156 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_DEVICE):
2157 		break;
2158 	case C(UR_CLEAR_FEATURE, UT_WRITE_CLASS_OTHER):
2159 		DPRINTFN(8, ("xhci_root_ctrl_start: UR_CLEAR_PORT_FEATURE "
2160 		    "port=%d feature=%d\n", index, value));
2161 		if (index < 1 || index > sc->sc_noport) {
2162 			err = USBD_IOERROR;
2163 			goto ret;
2164 		}
2165 		port = XHCI_PORTSC(index);
2166 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2167 		switch (value) {
2168 		case UHF_PORT_ENABLE:
2169 			XOWRITE4(sc, port, v | XHCI_PS_PED);
2170 			break;
2171 		case UHF_PORT_SUSPEND:
2172 			/* TODO */
2173 			break;
2174 		case UHF_PORT_POWER:
2175 			XOWRITE4(sc, port, v & ~XHCI_PS_PP);
2176 			break;
2177 		case UHF_PORT_INDICATOR:
2178 			XOWRITE4(sc, port, v & ~XHCI_PS_SET_PIC(3));
2179 			break;
2180 		case UHF_C_PORT_CONNECTION:
2181 			XOWRITE4(sc, port, v | XHCI_PS_CSC);
2182 			break;
2183 		case UHF_C_PORT_ENABLE:
2184 			XOWRITE4(sc, port, v | XHCI_PS_PEC);
2185 			break;
2186 		case UHF_C_PORT_SUSPEND:
2187 		case UHF_C_PORT_LINK_STATE:
2188 			XOWRITE4(sc, port, v | XHCI_PS_PLC);
2189 			break;
2190 		case UHF_C_PORT_OVER_CURRENT:
2191 			XOWRITE4(sc, port, v | XHCI_PS_OCC);
2192 			break;
2193 		case UHF_C_PORT_RESET:
2194 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
2195 			break;
2196 		case UHF_C_BH_PORT_RESET:
2197 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
2198 			break;
2199 		default:
2200 			err = USBD_IOERROR;
2201 			goto ret;
2202 		}
2203 		break;
2204 
2205 	case C(UR_GET_DESCRIPTOR, UT_READ_CLASS_DEVICE):
2206 		if (len == 0)
2207 			break;
2208 		if ((value & 0xff) != 0) {
2209 			err = USBD_IOERROR;
2210 			goto ret;
2211 		}
2212 		v = XREAD4(sc, XHCI_HCCPARAMS);
2213 		hubd = xhci_hubd;
2214 		hubd.bNbrPorts = sc->sc_noport;
2215 		USETW(hubd.wHubCharacteristics,
2216 		    (XHCI_HCC_PPC(v) ? UHD_PWR_INDIVIDUAL : UHD_PWR_GANGED) |
2217 		    (XHCI_HCC_PIND(v) ? UHD_PORT_IND : 0));
2218 		hubd.bPwrOn2PwrGood = 10; /* xHCI section 5.4.9 */
2219 		for (i = 1; i <= sc->sc_noport; i++) {
2220 			v = XOREAD4(sc, XHCI_PORTSC(i));
2221 			if (v & XHCI_PS_DR)
2222 				hubd.DeviceRemovable[i / 8] |= 1U << (i % 8);
2223 		}
2224 		hubd.bDescLength = USB_HUB_DESCRIPTOR_SIZE + i;
2225 		l = min(len, hubd.bDescLength);
2226 		totlen = l;
2227 		memcpy(buf, &hubd, l);
2228 		break;
2229 	case C(UR_GET_STATUS, UT_READ_CLASS_DEVICE):
2230 		if (len != 16) {
2231 			err = USBD_IOERROR;
2232 			goto ret;
2233 		}
2234 		memset(buf, 0, len);
2235 		totlen = len;
2236 		break;
2237 	case C(UR_GET_STATUS, UT_READ_CLASS_OTHER):
2238 		DPRINTFN(8,("xhci_root_ctrl_start: get port status i=%d\n",
2239 		    index));
2240 		if (index < 1 || index > sc->sc_noport) {
2241 			err = USBD_IOERROR;
2242 			goto ret;
2243 		}
2244 		if (len != 4) {
2245 			err = USBD_IOERROR;
2246 			goto ret;
2247 		}
2248 		v = XOREAD4(sc, XHCI_PORTSC(index));
2249 		DPRINTFN(8,("xhci_root_ctrl_start: port status=0x%04x\n", v));
2250 		i = UPS_PORT_LS_SET(XHCI_PS_GET_PLS(v));
2251 		switch (XHCI_PS_SPEED(v)) {
2252 		case XHCI_SPEED_FULL:
2253 			i |= UPS_FULL_SPEED;
2254 			break;
2255 		case XHCI_SPEED_LOW:
2256 			i |= UPS_LOW_SPEED;
2257 			break;
2258 		case XHCI_SPEED_HIGH:
2259 			i |= UPS_HIGH_SPEED;
2260 			break;
2261 		case XHCI_SPEED_SUPER:
2262 		default:
2263 			break;
2264 		}
2265 		if (v & XHCI_PS_CCS)	i |= UPS_CURRENT_CONNECT_STATUS;
2266 		if (v & XHCI_PS_PED)	i |= UPS_PORT_ENABLED;
2267 		if (v & XHCI_PS_OCA)	i |= UPS_OVERCURRENT_INDICATOR;
2268 		if (v & XHCI_PS_PR)	i |= UPS_RESET;
2269 		if (v & XHCI_PS_PP)	{
2270 			if (XHCI_PS_SPEED(v) >= XHCI_SPEED_FULL &&
2271 			    XHCI_PS_SPEED(v) <= XHCI_SPEED_HIGH)
2272 				i |= UPS_PORT_POWER;
2273 			else
2274 				i |= UPS_PORT_POWER_SS;
2275 		}
2276 		USETW(ps.wPortStatus, i);
2277 		i = 0;
2278 		if (v & XHCI_PS_CSC)    i |= UPS_C_CONNECT_STATUS;
2279 		if (v & XHCI_PS_PEC)    i |= UPS_C_PORT_ENABLED;
2280 		if (v & XHCI_PS_OCC)    i |= UPS_C_OVERCURRENT_INDICATOR;
2281 		if (v & XHCI_PS_PRC)	i |= UPS_C_PORT_RESET;
2282 		if (v & XHCI_PS_WRC)	i |= UPS_C_BH_PORT_RESET;
2283 		if (v & XHCI_PS_PLC)	i |= UPS_C_PORT_LINK_STATE;
2284 		if (v & XHCI_PS_CEC)	i |= UPS_C_PORT_CONFIG_ERROR;
2285 		USETW(ps.wPortChange, i);
2286 		l = min(len, sizeof ps);
2287 		memcpy(buf, &ps, l);
2288 		totlen = l;
2289 		break;
2290 	case C(UR_SET_DESCRIPTOR, UT_WRITE_CLASS_DEVICE):
2291 		err = USBD_IOERROR;
2292 		goto ret;
2293 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_DEVICE):
2294 		break;
2295 	case C(UR_SET_FEATURE, UT_WRITE_CLASS_OTHER):
2296 
2297 		i = index >> 8;
2298 		index &= 0x00ff;
2299 
2300 		if (index < 1 || index > sc->sc_noport) {
2301 			err = USBD_IOERROR;
2302 			goto ret;
2303 		}
2304 		port = XHCI_PORTSC(index);
2305 		v = XOREAD4(sc, port) & ~XHCI_PS_CLEAR;
2306 
2307 		switch (value) {
2308 		case UHF_PORT_ENABLE:
2309 			XOWRITE4(sc, port, v | XHCI_PS_PED);
2310 			break;
2311 		case UHF_PORT_SUSPEND:
2312 			DPRINTFN(6, ("suspend port %u (LPM=%u)\n", index, i));
2313 			if (XHCI_PS_SPEED(v) == XHCI_SPEED_SUPER) {
2314 				err = USBD_IOERROR;
2315 				goto ret;
2316 			}
2317 			XOWRITE4(sc, port, v |
2318 			    XHCI_PS_SET_PLS(i ? 2 /* LPM */ : 3) | XHCI_PS_LWS);
2319 			break;
2320 		case UHF_PORT_RESET:
2321 			DPRINTFN(6, ("reset port %d\n", index));
2322 			XOWRITE4(sc, port, v | XHCI_PS_PR);
2323 			break;
2324 		case UHF_PORT_POWER:
2325 			DPRINTFN(3, ("set port power %d\n", index));
2326 			XOWRITE4(sc, port, v | XHCI_PS_PP);
2327 			break;
2328 		case UHF_PORT_INDICATOR:
2329 			DPRINTFN(3, ("set port indicator %d\n", index));
2330 
2331 			v &= ~XHCI_PS_SET_PIC(3);
2332 			v |= XHCI_PS_SET_PIC(1);
2333 
2334 			XOWRITE4(sc, port, v);
2335 			break;
2336 		case UHF_C_PORT_RESET:
2337 			XOWRITE4(sc, port, v | XHCI_PS_PRC);
2338 			break;
2339 		case UHF_C_BH_PORT_RESET:
2340 			XOWRITE4(sc, port, v | XHCI_PS_WRC);
2341 			break;
2342 		default:
2343 			err = USBD_IOERROR;
2344 			goto ret;
2345 		}
2346 		break;
2347 	case C(UR_CLEAR_TT_BUFFER, UT_WRITE_CLASS_OTHER):
2348 	case C(UR_RESET_TT, UT_WRITE_CLASS_OTHER):
2349 	case C(UR_GET_TT_STATE, UT_READ_CLASS_OTHER):
2350 	case C(UR_STOP_TT, UT_WRITE_CLASS_OTHER):
2351 		break;
2352 	default:
2353 		err = USBD_IOERROR;
2354 		goto ret;
2355 	}
2356 	xfer->actlen = totlen;
2357 	err = USBD_NORMAL_COMPLETION;
2358 ret:
2359 	xfer->status = err;
2360 	s = splusb();
2361 	usb_transfer_complete(xfer);
2362 	splx(s);
2363 	return (USBD_IN_PROGRESS);
2364 }
2365 
2366 
2367 void
2368 xhci_noop(struct usbd_xfer *xfer)
2369 {
2370 }
2371 
2372 
2373 usbd_status
2374 xhci_root_intr_transfer(struct usbd_xfer *xfer)
2375 {
2376 	usbd_status err;
2377 
2378 	err = usb_insert_transfer(xfer);
2379 	if (err)
2380 		return (err);
2381 
2382 	return (xhci_root_intr_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2383 }
2384 
2385 usbd_status
2386 xhci_root_intr_start(struct usbd_xfer *xfer)
2387 {
2388 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2389 
2390 	if (sc->sc_bus.dying)
2391 		return (USBD_IOERROR);
2392 
2393 	sc->sc_intrxfer = xfer;
2394 
2395 	return (USBD_IN_PROGRESS);
2396 }
2397 
2398 void
2399 xhci_root_intr_abort(struct usbd_xfer *xfer)
2400 {
2401 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2402 	int s;
2403 
2404 	sc->sc_intrxfer = NULL;
2405 
2406 	xfer->status = USBD_CANCELLED;
2407 	s = splusb();
2408 	usb_transfer_complete(xfer);
2409 	splx(s);
2410 }
2411 
2412 void
2413 xhci_root_intr_done(struct usbd_xfer *xfer)
2414 {
2415 }
2416 
2417 /* Number of packets remaining in the TD after the corresponding TRB. */
2418 static inline uint32_t
2419 xhci_xfer_tdsize(struct usbd_xfer *xfer, uint32_t remain, uint32_t len)
2420 {
2421 	uint32_t npkt, mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2422 
2423 	if (len == 0)
2424 		return XHCI_TRB_TDREM(0);
2425 
2426 	npkt = (remain - len) / mps;
2427 	if (npkt > 31)
2428 		npkt = 31;
2429 
2430 	return XHCI_TRB_TDREM(npkt);
2431 }
2432 
2433 usbd_status
2434 xhci_device_ctrl_transfer(struct usbd_xfer *xfer)
2435 {
2436 	usbd_status err;
2437 
2438 	err = usb_insert_transfer(xfer);
2439 	if (err)
2440 		return (err);
2441 
2442 	return (xhci_device_ctrl_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2443 }
2444 
2445 usbd_status
2446 xhci_device_ctrl_start(struct usbd_xfer *xfer)
2447 {
2448 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2449 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2450 	struct xhci_trb *trb0, *trb;
2451 	uint32_t flags, len = UGETW(xfer->request.wLength);
2452 	uint8_t toggle0, toggle;
2453 	int s;
2454 
2455 	KASSERT(xfer->rqflags & URQ_REQUEST);
2456 
2457 	if (sc->sc_bus.dying || xp->halted)
2458 		return (USBD_IOERROR);
2459 
2460 	if (xp->free_trbs < 3)
2461 		return (USBD_NOMEM);
2462 
2463 	/* We'll do the setup TRB once we're finished with the other stages. */
2464 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle0, 0);
2465 
2466 	/* Data TRB */
2467 	if (len != 0) {
2468 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, 0);
2469 
2470 		flags = XHCI_TRB_TYPE_DATA | toggle;
2471 		if (usbd_xfer_isread(xfer))
2472 			flags |= XHCI_TRB_DIR_IN | XHCI_TRB_ISP;
2473 
2474 		trb->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2475 		trb->trb_status = htole32(
2476 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
2477 		    xhci_xfer_tdsize(xfer, len, len)
2478 		);
2479 		trb->trb_flags = htole32(flags);
2480 
2481 	}
2482 
2483 	/* Status TRB */
2484 	trb = xhci_xfer_get_trb(sc, xfer, &toggle, 1);
2485 
2486 	flags = XHCI_TRB_TYPE_STATUS | XHCI_TRB_IOC | toggle;
2487 	if (len == 0 || !usbd_xfer_isread(xfer))
2488 		flags |= XHCI_TRB_DIR_IN;
2489 
2490 	trb->trb_paddr = 0;
2491 	trb->trb_status = htole32(XHCI_TRB_INTR(0));
2492 	trb->trb_flags = htole32(flags);
2493 
2494 	/* Setup TRB */
2495 	flags = XHCI_TRB_TYPE_SETUP | XHCI_TRB_IDT | toggle0;
2496 	if (len != 0) {
2497 		if (usbd_xfer_isread(xfer))
2498 			flags |= XHCI_TRB_TRT_IN;
2499 		else
2500 			flags |= XHCI_TRB_TRT_OUT;
2501 	}
2502 
2503 	trb0->trb_paddr = (uint64_t)*((uint64_t *)&xfer->request);
2504 	trb0->trb_status = htole32(XHCI_TRB_INTR(0) | XHCI_TRB_LEN(8));
2505 	trb0->trb_flags = htole32(flags);
2506 
2507 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2508 	    TRBOFF(&xp->ring, trb0), 3 * sizeof(struct xhci_trb),
2509 	    BUS_DMASYNC_PREWRITE);
2510 
2511 	s = splusb();
2512 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
2513 
2514 	xfer->status = USBD_IN_PROGRESS;
2515 
2516 	if (sc->sc_bus.use_polling)
2517 		xhci_waitintr(sc, xfer);
2518 	else if (xfer->timeout) {
2519 		timeout_del(&xfer->timeout_handle);
2520 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
2521 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
2522 	}
2523 	splx(s);
2524 
2525 	return (USBD_IN_PROGRESS);
2526 }
2527 
2528 void
2529 xhci_device_ctrl_abort(struct usbd_xfer *xfer)
2530 {
2531 	xhci_abort_xfer(xfer, USBD_CANCELLED);
2532 }
2533 
2534 usbd_status
2535 xhci_device_generic_transfer(struct usbd_xfer *xfer)
2536 {
2537 	usbd_status err;
2538 
2539 	err = usb_insert_transfer(xfer);
2540 	if (err)
2541 		return (err);
2542 
2543 	return (xhci_device_generic_start(SIMPLEQ_FIRST(&xfer->pipe->queue)));
2544 }
2545 
2546 usbd_status
2547 xhci_device_generic_start(struct usbd_xfer *xfer)
2548 {
2549 	struct xhci_softc *sc = (struct xhci_softc *)xfer->device->bus;
2550 	struct xhci_pipe *xp = (struct xhci_pipe *)xfer->pipe;
2551 	struct xhci_trb *trb0, *trb;
2552 	uint32_t len, remain, flags;
2553 	uint32_t len0, mps;
2554 	uint64_t paddr = DMAADDR(&xfer->dmabuf, 0);
2555 	uint8_t toggle0, toggle;
2556 	int s, i, ntrb;
2557 
2558 	KASSERT(!(xfer->rqflags & URQ_REQUEST));
2559 
2560 	if (sc->sc_bus.dying || xp->halted)
2561 		return (USBD_IOERROR);
2562 
2563 	/* How many TRBs do we need for this transfer? */
2564 	ntrb = (xfer->length + XHCI_TRB_MAXSIZE - 1) / XHCI_TRB_MAXSIZE;
2565 
2566 	/* If the buffer crosses a 64k boundary, we need one more. */
2567 	len0 = XHCI_TRB_MAXSIZE - (paddr & (XHCI_TRB_MAXSIZE - 1));
2568 	if (len0 < xfer->length)
2569 		ntrb++;
2570 	else
2571 		len0 = xfer->length;
2572 
2573 	/* If we need to append a zero length packet, we need one more. */
2574 	mps = UGETW(xfer->pipe->endpoint->edesc->wMaxPacketSize);
2575 	if ((xfer->flags & USBD_FORCE_SHORT_XFER || xfer->length == 0) &&
2576 	    (xfer->length % mps == 0))
2577 		ntrb++;
2578 
2579 	if (xp->free_trbs < ntrb)
2580 		return (USBD_NOMEM);
2581 
2582 	/* We'll do the first TRB once we're finished with the chain. */
2583 	trb0 = xhci_xfer_get_trb(sc, xfer, &toggle0, (ntrb == 1));
2584 
2585 	remain = xfer->length - len0;
2586 	paddr += len0;
2587 	len = min(remain, XHCI_TRB_MAXSIZE);
2588 
2589 	/* Chain more TRBs if needed. */
2590 	for (i = ntrb - 1; i > 0; i--) {
2591 		/* Next (or Last) TRB. */
2592 		trb = xhci_xfer_get_trb(sc, xfer, &toggle, (i == 1));
2593 		flags = XHCI_TRB_TYPE_NORMAL | toggle;
2594 		if (usbd_xfer_isread(xfer))
2595 			flags |= XHCI_TRB_ISP;
2596 		flags |= (i == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
2597 
2598 		trb->trb_paddr = htole64(paddr);
2599 		trb->trb_status = htole32(
2600 		    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len) |
2601 		    xhci_xfer_tdsize(xfer, remain, len)
2602 		);
2603 		trb->trb_flags = htole32(flags);
2604 
2605 		remain -= len;
2606 		paddr += len;
2607 		len = min(remain, XHCI_TRB_MAXSIZE);
2608 	}
2609 
2610 	/* First TRB. */
2611 	flags = XHCI_TRB_TYPE_NORMAL | toggle0;
2612 	if (usbd_xfer_isread(xfer))
2613 		flags |= XHCI_TRB_ISP;
2614 	flags |= (ntrb == 1) ? XHCI_TRB_IOC : XHCI_TRB_CHAIN;
2615 
2616 	trb0->trb_paddr = htole64(DMAADDR(&xfer->dmabuf, 0));
2617 	trb0->trb_status = htole32(
2618 	    XHCI_TRB_INTR(0) | XHCI_TRB_LEN(len0) |
2619 	    xhci_xfer_tdsize(xfer, xfer->length, len0)
2620  	);
2621 	trb0->trb_flags = htole32(flags);
2622 
2623 	bus_dmamap_sync(xp->ring.dma.tag, xp->ring.dma.map,
2624 	    TRBOFF(&xp->ring, trb0), sizeof(struct xhci_trb) * ntrb,
2625 	    BUS_DMASYNC_PREWRITE);
2626 
2627 	s = splusb();
2628 	XDWRITE4(sc, XHCI_DOORBELL(xp->slot), xp->dci);
2629 
2630 	xfer->status = USBD_IN_PROGRESS;
2631 
2632 	if (sc->sc_bus.use_polling)
2633 		xhci_waitintr(sc, xfer);
2634 	else if (xfer->timeout) {
2635 		timeout_del(&xfer->timeout_handle);
2636 		timeout_set(&xfer->timeout_handle, xhci_timeout, xfer);
2637 		timeout_add_msec(&xfer->timeout_handle, xfer->timeout);
2638 	}
2639 	splx(s);
2640 
2641 	return (USBD_IN_PROGRESS);
2642 }
2643 
2644 void
2645 xhci_device_generic_done(struct usbd_xfer *xfer)
2646 {
2647 	usb_syncmem(&xfer->dmabuf, 0, xfer->length, usbd_xfer_isread(xfer) ?
2648 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2649 
2650 	/* Only happens with interrupt transfers. */
2651 	if (xfer->pipe->repeat) {
2652 		xfer->actlen = 0;
2653 		xhci_device_generic_start(xfer);
2654 	}
2655 }
2656 
2657 void
2658 xhci_device_generic_abort(struct usbd_xfer *xfer)
2659 {
2660 	KASSERT(!xfer->pipe->repeat || xfer->pipe->intrxfer == xfer);
2661 
2662 	xhci_abort_xfer(xfer, USBD_CANCELLED);
2663 }
2664