xref: /netbsd-src/sys/external/bsd/dwc2/dist/dwc2_hcdqueue.c (revision 6a493d6bc668897c91594964a732d38505b70cbb)
1 /*	$NetBSD: dwc2_hcdqueue.c,v 1.6 2013/11/24 12:25:19 skrll Exp $	*/
2 
3 /*
4  * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
5  *
6  * Copyright (C) 2004-2013 Synopsys, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The names of the above-listed copyright holders may not be used
18  *    to endorse or promote products derived from this software without
19  *    specific prior written permission.
20  *
21  * ALTERNATIVELY, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") as published by the Free Software
23  * Foundation; either version 2 of the License, or (at your option) any
24  * later version.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * This file contains the functions to manage Queue Heads and Queue
41  * Transfer Descriptors for Host mode
42  */
43 
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdqueue.c,v 1.6 2013/11/24 12:25:19 skrll Exp $");
46 
47 #include <sys/types.h>
48 #include <sys/kmem.h>
49 #include <sys/pool.h>
50 
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdivar.h>
54 #include <dev/usb/usb_mem.h>
55 
56 #include <machine/param.h>
57 
58 #include <linux/kernel.h>
59 
60 #include <dwc2/dwc2.h>
61 #include <dwc2/dwc2var.h>
62 
63 #include "dwc2_core.h"
64 #include "dwc2_hcd.h"
65 
66 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *, int, int, int, int);
67 
68 /**
69  * dwc2_qh_init() - Initializes a QH structure
70  *
71  * @hsotg: The HCD state structure for the DWC OTG controller
72  * @qh:    The QH to init
73  * @urb:   Holds the information about the device/endpoint needed to initialize
74  *         the QH
75  */
76 #define SCHEDULE_SLOP 10
77 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
78 			 struct dwc2_hcd_urb *urb)
79 {
80 	int dev_speed, hub_addr, hub_port;
81 	const char *speed, *type;
82 
83 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
84 
85 	/* Initialize QH */
86 	qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
87 	qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
88 
89 	qh->data_toggle = DWC2_HC_PID_DATA0;
90 	qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
91 	INIT_LIST_HEAD(&qh->qtd_list);
92 	INIT_LIST_HEAD(&qh->qh_list_entry);
93 
94 	/* FS/LS Endpoint on HS Hub, NOT virtual root hub */
95 	dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
96 
97 	dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
98 
99 	if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
100 	    hub_addr != 0 && hub_addr != 1) {
101 		dev_vdbg(hsotg->dev,
102 			 "QH init: EP %d: TT found at hub addr %d, for port %d\n",
103 			 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
104 			 hub_port);
105 		qh->do_split = 1;
106 	}
107 
108 	if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
109 	    qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
110 		/* Compute scheduling parameters once and save them */
111 		u32 hprt, prtspd;
112 
113 		/* Todo: Account for split transfers in the bus time */
114 		int bytecount =
115 			dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
116 
117 		qh->usecs = dwc2_calc_bus_time(hsotg, qh->do_split ?
118 				USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
119 				qh->ep_type == USB_ENDPOINT_XFER_ISOC,
120 				bytecount);
121 		/* Start in a slightly future (micro)frame */
122 		qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
123 						     SCHEDULE_SLOP);
124 		qh->interval = urb->interval;
125 #if 0
126 		/* Increase interrupt polling rate for debugging */
127 		if (qh->ep_type == USB_ENDPOINT_XFER_INT)
128 			qh->interval = 8;
129 #endif
130 		hprt = DWC2_READ_4(hsotg, HPRT0);
131 		prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
132 		if (prtspd == HPRT0_SPD_HIGH_SPEED &&
133 		    (dev_speed == USB_SPEED_LOW ||
134 		     dev_speed == USB_SPEED_FULL)) {
135 			qh->interval *= 8;
136 			qh->sched_frame |= 0x7;
137 			qh->start_split_frame = qh->sched_frame;
138 		}
139 		dev_dbg(hsotg->dev, "interval=%d\n", qh->interval);
140 	}
141 
142 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
143 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
144 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
145 		 dwc2_hcd_get_dev_addr(&urb->pipe_info));
146 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
147 		 dwc2_hcd_get_ep_num(&urb->pipe_info),
148 		 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
149 
150 	qh->dev_speed = dev_speed;
151 
152 	switch (dev_speed) {
153 	case USB_SPEED_LOW:
154 		speed = "low";
155 		break;
156 	case USB_SPEED_FULL:
157 		speed = "full";
158 		break;
159 	case USB_SPEED_HIGH:
160 		speed = "high";
161 		break;
162 	default:
163 		speed = "?";
164 		break;
165 	}
166 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
167 
168 	switch (qh->ep_type) {
169 	case USB_ENDPOINT_XFER_ISOC:
170 		type = "isochronous";
171 		break;
172 	case USB_ENDPOINT_XFER_INT:
173 		type = "interrupt";
174 		break;
175 	case USB_ENDPOINT_XFER_CONTROL:
176 		type = "control";
177 		break;
178 	case USB_ENDPOINT_XFER_BULK:
179 		type = "bulk";
180 		break;
181 	default:
182 		type = "?";
183 		break;
184 	}
185 
186 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
187 
188 	if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
189 		dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
190 			 qh->usecs);
191 		dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
192 			 qh->interval);
193 	}
194 }
195 
196 /**
197  * dwc2_hcd_qh_create() - Allocates and initializes a QH
198  *
199  * @hsotg:     The HCD state structure for the DWC OTG controller
200  * @urb:       Holds the information about the device/endpoint needed
201  *             to initialize the QH
202  * @mem_flags: Flag to do atomic allocation if needed
203  *
204  * Return: Pointer to the newly allocated QH, or NULL on error
205  */
206 static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
207 					  struct dwc2_hcd_urb *urb,
208 					  gfp_t mem_flags)
209 {
210 	struct dwc2_softc *sc = hsotg->hsotg_sc;
211 	struct dwc2_qh *qh;
212 
213 	if (!urb->priv)
214 		return NULL;
215 
216 	/* Allocate memory */
217 	qh = pool_cache_get(sc->sc_qhpool, PR_NOWAIT);
218 	if (!qh)
219 		return NULL;
220 
221 	memset(qh, 0, sizeof(*qh));
222 	dwc2_qh_init(hsotg, qh, urb);
223 
224 	if (hsotg->core_params->dma_desc_enable > 0 &&
225 	    dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
226 		dwc2_hcd_qh_free(hsotg, qh);
227 		return NULL;
228 	}
229 
230 	return qh;
231 }
232 
233 /**
234  * dwc2_hcd_qh_free() - Frees the QH
235  *
236  * @hsotg: HCD instance
237  * @qh:    The QH to free
238  *
239  * QH should already be removed from the list. QTD list should already be empty
240  * if called from URB Dequeue.
241  *
242  * Must NOT be called with interrupt disabled or spinlock held
243  */
244 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
245 {
246 	struct dwc2_softc *sc = hsotg->hsotg_sc;
247 
248 	if (hsotg->core_params->dma_desc_enable > 0) {
249 		dwc2_hcd_qh_free_ddma(hsotg, qh);
250 	} else if (qh->dw_align_buf) {
251 		/* XXXNH */
252 		usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->dw_align_buf_usbdma);
253 	}
254 
255 	pool_cache_put(sc->sc_qhpool, qh);
256 }
257 
258 /**
259  * dwc2_periodic_channel_available() - Checks that a channel is available for a
260  * periodic transfer
261  *
262  * @hsotg: The HCD state structure for the DWC OTG controller
263  *
264  * Return: 0 if successful, negative error code otherwise
265  */
266 static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
267 {
268 	/*
269 	 * Currently assuming that there is a dedicated host channel for
270 	 * each periodic transaction plus at least one host channel for
271 	 * non-periodic transactions
272 	 */
273 	int status;
274 	int num_channels;
275 
276 	num_channels = hsotg->core_params->host_channels;
277 	if (hsotg->periodic_channels + hsotg->non_periodic_channels <
278 								num_channels
279 	    && hsotg->periodic_channels < num_channels - 1) {
280 		status = 0;
281 	} else {
282 		dev_dbg(hsotg->dev,
283 			"%s: Total channels: %d, Periodic: %d, "
284 			"Non-periodic: %d\n", __func__, num_channels,
285 			hsotg->periodic_channels, hsotg->non_periodic_channels);
286 		status = -ENOSPC;
287 	}
288 
289 	return status;
290 }
291 
292 /**
293  * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
294  * for the specified QH in the periodic schedule
295  *
296  * @hsotg: The HCD state structure for the DWC OTG controller
297  * @qh:    QH containing periodic bandwidth required
298  *
299  * Return: 0 if successful, negative error code otherwise
300  *
301  * For simplicity, this calculation assumes that all the transfers in the
302  * periodic schedule may occur in the same (micro)frame
303  */
304 static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
305 					 struct dwc2_qh *qh)
306 {
307 	int status;
308 	s16 max_claimed_usecs;
309 
310 	status = 0;
311 
312 	if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
313 		/*
314 		 * High speed mode
315 		 * Max periodic usecs is 80% x 125 usec = 100 usec
316 		 */
317 		max_claimed_usecs = 100 - qh->usecs;
318 	} else {
319 		/*
320 		 * Full speed mode
321 		 * Max periodic usecs is 90% x 1000 usec = 900 usec
322 		 */
323 		max_claimed_usecs = 900 - qh->usecs;
324 	}
325 
326 	if (hsotg->periodic_usecs > max_claimed_usecs) {
327 		dev_err(hsotg->dev,
328 			"%s: already claimed usecs %d, required usecs %d\n",
329 			__func__, hsotg->periodic_usecs, qh->usecs);
330 		status = -ENOSPC;
331 	}
332 
333 	return status;
334 }
335 
336 /**
337  * Microframe scheduler
338  * track the total use in hsotg->frame_usecs
339  * keep each qh use in qh->frame_usecs
340  * when surrendering the qh then donate the time back
341  */
342 static const unsigned short max_uframe_usecs[] = {
343 	100, 100, 100, 100, 100, 100, 30, 0
344 };
345 
346 void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
347 {
348 	int i;
349 
350 	for (i = 0; i < 8; i++)
351 		hsotg->frame_usecs[i] = max_uframe_usecs[i];
352 }
353 
354 static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
355 {
356 	unsigned short utime = qh->usecs;
357 	int i;
358 
359 	for (i = 0; i < 8; i++) {
360 		/* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
361 		if (utime <= hsotg->frame_usecs[i]) {
362 			hsotg->frame_usecs[i] -= utime;
363 			qh->frame_usecs[i] += utime;
364 			return i;
365 		}
366 	}
367 	return -1;
368 }
369 
370 /*
371  * use this for FS apps that can span multiple uframes
372  */
373 static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
374 {
375 	unsigned short utime = qh->usecs;
376 	unsigned short xtime;
377 	int t_left;
378 	int i;
379 	int j;
380 	int k;
381 
382 	for (i = 0; i < 8; i++) {
383 		if (hsotg->frame_usecs[i] <= 0)
384 			continue;
385 
386 		/*
387 		 * we need n consecutive slots so use j as a start slot
388 		 * j plus j+1 must be enough time (for now)
389 		 */
390 		xtime = hsotg->frame_usecs[i];
391 		for (j = i + 1; j < 8; j++) {
392 			/*
393 			 * if we add this frame remaining time to xtime we may
394 			 * be OK, if not we need to test j for a complete frame
395 			 */
396 			if (xtime + hsotg->frame_usecs[j] < utime) {
397 				if (hsotg->frame_usecs[j] <
398 							max_uframe_usecs[j])
399 					continue;
400 			}
401 			if (xtime >= utime) {
402 				t_left = utime;
403 				for (k = i; k < 8; k++) {
404 					t_left -= hsotg->frame_usecs[k];
405 					if (t_left <= 0) {
406 						qh->frame_usecs[k] +=
407 							hsotg->frame_usecs[k]
408 								+ t_left;
409 						hsotg->frame_usecs[k] = -t_left;
410 						return i;
411 					} else {
412 						qh->frame_usecs[k] +=
413 							hsotg->frame_usecs[k];
414 						hsotg->frame_usecs[k] = 0;
415 					}
416 				}
417 			}
418 			/* add the frame time to x time */
419 			xtime += hsotg->frame_usecs[j];
420 			/* we must have a fully available next frame or break */
421 			if (xtime < utime &&
422 			   hsotg->frame_usecs[j] == max_uframe_usecs[j])
423 				continue;
424 		}
425 	}
426 	return -1;
427 }
428 
429 static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
430 {
431 	int ret;
432 
433 	if (qh->dev_speed == USB_SPEED_HIGH) {
434 		/* if this is a hs transaction we need a full frame */
435 		ret = dwc2_find_single_uframe(hsotg, qh);
436 	} else {
437 		/*
438 		 * if this is a fs transaction we may need a sequence
439 		 * of frames
440 		 */
441 		ret = dwc2_find_multi_uframe(hsotg, qh);
442 	}
443 	return ret;
444 }
445 
446 /**
447  * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
448  * host channel is large enough to handle the maximum data transfer in a single
449  * (micro)frame for a periodic transfer
450  *
451  * @hsotg: The HCD state structure for the DWC OTG controller
452  * @qh:    QH for a periodic endpoint
453  *
454  * Return: 0 if successful, negative error code otherwise
455  */
456 static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
457 				    struct dwc2_qh *qh)
458 {
459 	u32 max_xfer_size;
460 	u32 max_channel_xfer_size;
461 	int status = 0;
462 
463 	max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
464 	max_channel_xfer_size = hsotg->core_params->max_transfer_size;
465 
466 	if (max_xfer_size > max_channel_xfer_size) {
467 		dev_err(hsotg->dev,
468 			"%s: Periodic xfer length %d > max xfer length for channel %d\n",
469 			__func__, max_xfer_size, max_channel_xfer_size);
470 		status = -ENOSPC;
471 	}
472 
473 	return status;
474 }
475 
476 /**
477  * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
478  * the periodic schedule
479  *
480  * @hsotg: The HCD state structure for the DWC OTG controller
481  * @qh:    QH for the periodic transfer. The QH should already contain the
482  *         scheduling information.
483  *
484  * Return: 0 if successful, negative error code otherwise
485  */
486 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
487 {
488 	int status;
489 
490 	if (hsotg->core_params->uframe_sched > 0) {
491 		int frame = -1;
492 
493 		status = dwc2_find_uframe(hsotg, qh);
494 		if (status == 0)
495 			frame = 7;
496 		else if (status > 0)
497 			frame = status - 1;
498 
499 		/* Set the new frame up */
500 		if (frame > -1) {
501 			qh->sched_frame &= ~0x7;
502 			qh->sched_frame |= (frame & 7);
503 		}
504 
505 		if (status != -1)
506 			status = 0;
507 	} else {
508 		status = dwc2_periodic_channel_available(hsotg);
509 		if (status) {
510 			dev_info(hsotg->dev,
511 				 "%s: No host channel available for periodic transfer\n",
512 				 __func__);
513 			return status;
514 		}
515 
516 		status = dwc2_check_periodic_bandwidth(hsotg, qh);
517 	}
518 
519 	if (status) {
520 		dev_dbg(hsotg->dev,
521 			"%s: Insufficient periodic bandwidth for periodic transfer\n",
522 			__func__);
523 		return status;
524 	}
525 
526 	status = dwc2_check_max_xfer_size(hsotg, qh);
527 	if (status) {
528 		dev_dbg(hsotg->dev,
529 			"%s: Channel max transfer size too small for periodic transfer\n",
530 			__func__);
531 		return status;
532 	}
533 
534 	if (hsotg->core_params->dma_desc_enable > 0)
535 		/* Don't rely on SOF and start in ready schedule */
536 		list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
537 	else
538 		/* Always start in inactive schedule */
539 		list_add_tail(&qh->qh_list_entry,
540 			      &hsotg->periodic_sched_inactive);
541 
542 	if (hsotg->core_params->uframe_sched <= 0)
543 		/* Reserve periodic channel */
544 		hsotg->periodic_channels++;
545 
546 	/* Update claimed usecs per (micro)frame */
547 	hsotg->periodic_usecs += qh->usecs;
548 
549 	return status;
550 }
551 
552 /**
553  * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
554  * from the periodic schedule
555  *
556  * @hsotg: The HCD state structure for the DWC OTG controller
557  * @qh:	   QH for the periodic transfer
558  */
559 static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
560 				     struct dwc2_qh *qh)
561 {
562 	int i;
563 
564 	list_del_init(&qh->qh_list_entry);
565 
566 	/* Update claimed usecs per (micro)frame */
567 	hsotg->periodic_usecs -= qh->usecs;
568 
569 	if (hsotg->core_params->uframe_sched > 0) {
570 		for (i = 0; i < 8; i++) {
571 			hsotg->frame_usecs[i] += qh->frame_usecs[i];
572 			qh->frame_usecs[i] = 0;
573 		}
574 	} else {
575 		/* Release periodic channel reservation */
576 		hsotg->periodic_channels--;
577 	}
578 }
579 
580 /**
581  * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
582  * schedule if it is not already in the schedule. If the QH is already in
583  * the schedule, no action is taken.
584  *
585  * @hsotg: The HCD state structure for the DWC OTG controller
586  * @qh:    The QH to add
587  *
588  * Return: 0 if successful, negative error code otherwise
589  */
590 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
591 {
592 	int status = 0;
593 	u32 intr_mask;
594 
595 	if (dbg_qh(qh))
596 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
597 
598 	if (!list_empty(&qh->qh_list_entry))
599 		/* QH already in a schedule */
600 		return status;
601 
602 	/* Add the new QH to the appropriate schedule */
603 	if (dwc2_qh_is_non_per(qh)) {
604 		/* Always start in inactive schedule */
605 		list_add_tail(&qh->qh_list_entry,
606 			      &hsotg->non_periodic_sched_inactive);
607 	} else {
608 		status = dwc2_schedule_periodic(hsotg, qh);
609 		if (status == 0) {
610 			if (!hsotg->periodic_qh_count) {
611 				intr_mask = DWC2_READ_4(hsotg, GINTMSK);
612 				intr_mask |= GINTSTS_SOF;
613 				DWC2_WRITE_4(hsotg, GINTMSK, intr_mask);
614 			}
615 			hsotg->periodic_qh_count++;
616 		}
617 	}
618 
619 	return status;
620 }
621 
622 /**
623  * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
624  * schedule. Memory is not freed.
625  *
626  * @hsotg: The HCD state structure
627  * @qh:    QH to remove from schedule
628  */
629 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
630 {
631 	u32 intr_mask;
632 
633 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
634 
635 	if (list_empty(&qh->qh_list_entry))
636 		/* QH is not in a schedule */
637 		return;
638 
639 	if (dwc2_qh_is_non_per(qh)) {
640 		if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
641 			hsotg->non_periodic_qh_ptr =
642 					hsotg->non_periodic_qh_ptr->next;
643 		list_del_init(&qh->qh_list_entry);
644 	} else {
645 		dwc2_deschedule_periodic(hsotg, qh);
646 		hsotg->periodic_qh_count--;
647 		if (!hsotg->periodic_qh_count) {
648 			intr_mask = DWC2_READ_4(hsotg, GINTMSK);
649 			intr_mask &= ~GINTSTS_SOF;
650 			DWC2_WRITE_4(hsotg, GINTMSK, intr_mask);
651 		}
652 	}
653 }
654 
655 /*
656  * Schedule the next continuing periodic split transfer
657  */
658 static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
659 				      struct dwc2_qh *qh, u16 frame_number,
660 				      int sched_next_periodic_split)
661 {
662 	u16 incr;
663 
664 	if (sched_next_periodic_split) {
665 		qh->sched_frame = frame_number;
666 		incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
667 		if (dwc2_frame_num_le(frame_number, incr)) {
668 			/*
669 			 * Allow one frame to elapse after start split
670 			 * microframe before scheduling complete split, but
671 			 * DON'T if we are doing the next start split in the
672 			 * same frame for an ISOC out
673 			 */
674 			if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
675 			    qh->ep_is_in != 0) {
676 				qh->sched_frame =
677 					dwc2_frame_num_inc(qh->sched_frame, 1);
678 			}
679 		}
680 	} else {
681 		qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame,
682 						     qh->interval);
683 		if (dwc2_frame_num_le(qh->sched_frame, frame_number))
684 			qh->sched_frame = frame_number;
685 		qh->sched_frame |= 0x7;
686 		qh->start_split_frame = qh->sched_frame;
687 	}
688 }
689 
690 /*
691  * Deactivates a QH. For non-periodic QHs, removes the QH from the active
692  * non-periodic schedule. The QH is added to the inactive non-periodic
693  * schedule if any QTDs are still attached to the QH.
694  *
695  * For periodic QHs, the QH is removed from the periodic queued schedule. If
696  * there are any QTDs still attached to the QH, the QH is added to either the
697  * periodic inactive schedule or the periodic ready schedule and its next
698  * scheduled frame is calculated. The QH is placed in the ready schedule if
699  * the scheduled frame has been reached already. Otherwise it's placed in the
700  * inactive schedule. If there are no QTDs attached to the QH, the QH is
701  * completely removed from the periodic schedule.
702  */
703 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
704 			    int sched_next_periodic_split)
705 {
706 	if (dbg_qh(qh))
707 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
708 
709 	if (dwc2_qh_is_non_per(qh)) {
710 		dwc2_hcd_qh_unlink(hsotg, qh);
711 		if (!list_empty(&qh->qtd_list))
712 			/* Add back to inactive non-periodic schedule */
713 			dwc2_hcd_qh_add(hsotg, qh);
714 	} else {
715 		u16 frame_number = dwc2_hcd_get_frame_number(hsotg);
716 
717 		if (qh->do_split) {
718 			dwc2_sched_periodic_split(hsotg, qh, frame_number,
719 						  sched_next_periodic_split);
720 		} else {
721 			qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
722 							     qh->interval);
723 			if (dwc2_frame_num_le(qh->sched_frame, frame_number))
724 				qh->sched_frame = frame_number;
725 		}
726 
727 		if (list_empty(&qh->qtd_list)) {
728 			dwc2_hcd_qh_unlink(hsotg, qh);
729 		} else {
730 			/*
731 			 * Remove from periodic_sched_queued and move to
732 			 * appropriate queue
733 			 */
734 			if ((hsotg->core_params->uframe_sched > 0 &&
735 			     dwc2_frame_num_le(qh->sched_frame, frame_number))
736 			 || (hsotg->core_params->uframe_sched <= 0 &&
737 			     qh->sched_frame == frame_number))
738 				list_move(&qh->qh_list_entry,
739 					  &hsotg->periodic_sched_ready);
740 			else
741 				list_move(&qh->qh_list_entry,
742 					  &hsotg->periodic_sched_inactive);
743 		}
744 	}
745 }
746 
747 /**
748  * dwc2_hcd_qtd_init() - Initializes a QTD structure
749  *
750  * @qtd: The QTD to initialize
751  * @urb: The associated URB
752  */
753 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
754 {
755 	qtd->urb = urb;
756 	if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
757 			USB_ENDPOINT_XFER_CONTROL) {
758 		/*
759 		 * The only time the QTD data toggle is used is on the data
760 		 * phase of control transfers. This phase always starts with
761 		 * DATA1.
762 		 */
763 		qtd->data_toggle = DWC2_HC_PID_DATA1;
764 		qtd->control_phase = DWC2_CONTROL_SETUP;
765 	}
766 
767 	/* Start split */
768 	qtd->complete_split = 0;
769 	qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
770 	qtd->isoc_split_offset = 0;
771 	qtd->in_process = 0;
772 
773 	/* Store the qtd ptr in the urb to reference the QTD */
774 	urb->qtd = qtd;
775 }
776 
777 /**
778  * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
779  *
780  * @hsotg:     The DWC HCD structure
781  * @qtd:       The QTD to add
782  * @qh:        Out parameter to return queue head
783  * @mem_flags: Flag to do atomic alloc if needed
784  *
785  * Return: 0 if successful, negative error code otherwise
786  *
787  * Finds the correct QH to place the QTD into. If it does not find a QH, it
788  * will create a new QH. If the QH to which the QTD is added is not currently
789  * scheduled, it is placed into the proper schedule based on its EP type.
790  */
791 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
792 		     struct dwc2_qh **qh, gfp_t mem_flags)
793 {
794 	struct dwc2_hcd_urb *urb = qtd->urb;
795 	unsigned long flags;
796 	int allocated = 0;
797 	int retval;
798 
799 	/*
800 	 * Get the QH which holds the QTD-list to insert to. Create QH if it
801 	 * doesn't exist.
802 	 */
803 	if (*qh == NULL) {
804 		*qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
805 		if (*qh == NULL)
806 			return -ENOMEM;
807 		allocated = 1;
808 	}
809 
810 	spin_lock_irqsave(&hsotg->lock, flags);
811 
812 	retval = dwc2_hcd_qh_add(hsotg, *qh);
813 	if (retval)
814 		goto fail;
815 
816 	qtd->qh = *qh;
817 	list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
818 	spin_unlock_irqrestore(&hsotg->lock, flags);
819 
820 	return 0;
821 
822 fail:
823 	if (allocated) {
824 		struct dwc2_qtd *qtd2, *qtd2_tmp;
825 		struct dwc2_qh *qh_tmp = *qh;
826 
827 		*qh = NULL;
828 		dwc2_hcd_qh_unlink(hsotg, qh_tmp);
829 
830 		/* Free each QTD in the QH's QTD list */
831 		list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list,
832 					 qtd_list_entry)
833 			dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
834 
835 		spin_unlock_irqrestore(&hsotg->lock, flags);
836 		dwc2_hcd_qh_free(hsotg, qh_tmp);
837 	} else {
838 		spin_unlock_irqrestore(&hsotg->lock, flags);
839 	}
840 
841 	return retval;
842 }
843 
844 void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
845 				  struct dwc2_qtd *qtd,
846 				  struct dwc2_qh *qh)
847 {
848 	struct dwc2_softc *sc = hsotg->hsotg_sc;
849 
850 	list_del_init(&qtd->qtd_list_entry);
851  	pool_cache_put(sc->sc_qtdpool, qtd);
852 }
853 
854 #define BITSTUFFTIME(bytecount)	((8 * 7 * (bytecount)) / 6)
855 #define HS_HOST_DELAY		5	/* nanoseconds */
856 #define FS_LS_HOST_DELAY	1000	/* nanoseconds */
857 #define HUB_LS_SETUP		333	/* nanoseconds */
858 
859 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *hsotg, int speed, int is_in,
860 			      int is_isoc, int bytecount)
861 {
862 	unsigned long retval;
863 
864 	switch (speed) {
865 	case USB_SPEED_HIGH:
866 		if (is_isoc)
867 			retval =
868 			    ((38 * 8 * 2083) +
869 			     (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 +
870 			    HS_HOST_DELAY;
871 		else
872 			retval =
873 			    ((55 * 8 * 2083) +
874 			     (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 +
875 			    HS_HOST_DELAY;
876 		break;
877 	case USB_SPEED_FULL:
878 		if (is_isoc) {
879 			retval =
880 			    (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000;
881 			if (is_in)
882 				retval = 7268 + FS_LS_HOST_DELAY + retval;
883 			else
884 				retval = 6265 + FS_LS_HOST_DELAY + retval;
885 		} else {
886 			retval =
887 			    (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000;
888 			retval = 9107 + FS_LS_HOST_DELAY + retval;
889 		}
890 		break;
891 	case USB_SPEED_LOW:
892 		if (is_in) {
893 			retval =
894 			    (67667 * (31 + 10 * BITSTUFFTIME(bytecount))) /
895 			    1000;
896 			retval =
897 			    64060 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
898 			    retval;
899 		} else {
900 			retval =
901 			    (66700 * (31 + 10 * BITSTUFFTIME(bytecount))) /
902 			    1000;
903 			retval =
904 			    64107 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
905 			    retval;
906 		}
907 		break;
908 	default:
909 		dev_warn(hsotg->dev, "Unknown device speed\n");
910 		retval = -1;
911 	}
912 
913 	return NS_TO_US(retval);
914 }
915