xref: /netbsd-src/sys/external/bsd/dwc2/dist/dwc2_hcdqueue.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: dwc2_hcdqueue.c,v 1.10 2014/07/03 07:18:42 skrll Exp $	*/
2 
3 /*
4  * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
5  *
6  * Copyright (C) 2004-2013 Synopsys, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The names of the above-listed copyright holders may not be used
18  *    to endorse or promote products derived from this software without
19  *    specific prior written permission.
20  *
21  * ALTERNATIVELY, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") as published by the Free Software
23  * Foundation; either version 2 of the License, or (at your option) any
24  * later version.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * This file contains the functions to manage Queue Heads and Queue
41  * Transfer Descriptors for Host mode
42  */
43 
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdqueue.c,v 1.10 2014/07/03 07:18:42 skrll Exp $");
46 
47 #include <sys/types.h>
48 #include <sys/kmem.h>
49 #include <sys/pool.h>
50 
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdivar.h>
54 #include <dev/usb/usb_mem.h>
55 
56 #include <machine/param.h>
57 
58 #include <linux/kernel.h>
59 
60 #include <dwc2/dwc2.h>
61 #include <dwc2/dwc2var.h>
62 
63 #include "dwc2_core.h"
64 #include "dwc2_hcd.h"
65 
66 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *, int, int, int, int);
67 
68 /**
69  * dwc2_qh_init() - Initializes a QH structure
70  *
71  * @hsotg: The HCD state structure for the DWC OTG controller
72  * @qh:    The QH to init
73  * @urb:   Holds the information about the device/endpoint needed to initialize
74  *         the QH
75  */
76 #define SCHEDULE_SLOP 10
77 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
78 			 struct dwc2_hcd_urb *urb)
79 {
80 	int dev_speed, hub_addr, hub_port;
81 
82 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
83 
84 	/* Initialize QH */
85 	qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
86 	qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
87 
88 	qh->data_toggle = DWC2_HC_PID_DATA0;
89 	qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
90 	INIT_LIST_HEAD(&qh->qtd_list);
91 	INIT_LIST_HEAD(&qh->qh_list_entry);
92 
93 	/* FS/LS Endpoint on HS Hub, NOT virtual root hub */
94 	dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
95 
96 	dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
97 
98 	if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
99 	    hub_addr != 0 && hub_addr != 1) {
100 		dev_vdbg(hsotg->dev,
101 			 "QH init: EP %d: TT found at hub addr %d, for port %d\n",
102 			 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
103 			 hub_port);
104 		qh->do_split = 1;
105 	}
106 
107 	if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
108 	    qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
109 		/* Compute scheduling parameters once and save them */
110 		u32 hprt, prtspd;
111 
112 		/* Todo: Account for split transfers in the bus time */
113 		int bytecount =
114 			dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
115 
116 		qh->usecs = dwc2_calc_bus_time(hsotg, qh->do_split ?
117 				USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
118 				qh->ep_type == USB_ENDPOINT_XFER_ISOC,
119 				bytecount);
120 		/* Start in a slightly future (micro)frame */
121 		qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
122 						     SCHEDULE_SLOP);
123 		qh->interval = urb->interval;
124 #if 0
125 		/* Increase interrupt polling rate for debugging */
126 		if (qh->ep_type == USB_ENDPOINT_XFER_INT)
127 			qh->interval = 8;
128 #endif
129 		hprt = DWC2_READ_4(hsotg, HPRT0);
130 		prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
131 		if (prtspd == HPRT0_SPD_HIGH_SPEED &&
132 		    (dev_speed == USB_SPEED_LOW ||
133 		     dev_speed == USB_SPEED_FULL)) {
134 			qh->interval *= 8;
135 			qh->sched_frame |= 0x7;
136 			qh->start_split_frame = qh->sched_frame;
137 		}
138 		dev_dbg(hsotg->dev, "interval=%d\n", qh->interval);
139 	}
140 
141 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
142 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
143 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
144 		 dwc2_hcd_get_dev_addr(&urb->pipe_info));
145 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
146 		 dwc2_hcd_get_ep_num(&urb->pipe_info),
147 		 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
148 
149 	qh->dev_speed = dev_speed;
150 
151 #ifdef DWC2_DEBUG
152 	const char *speed, *type;
153 	switch (dev_speed) {
154 	case USB_SPEED_LOW:
155 		speed = "low";
156 		break;
157 	case USB_SPEED_FULL:
158 		speed = "full";
159 		break;
160 	case USB_SPEED_HIGH:
161 		speed = "high";
162 		break;
163 	default:
164 		speed = "?";
165 		break;
166 	}
167 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
168 
169 	switch (qh->ep_type) {
170 	case USB_ENDPOINT_XFER_ISOC:
171 		type = "isochronous";
172 		break;
173 	case USB_ENDPOINT_XFER_INT:
174 		type = "interrupt";
175 		break;
176 	case USB_ENDPOINT_XFER_CONTROL:
177 		type = "control";
178 		break;
179 	case USB_ENDPOINT_XFER_BULK:
180 		type = "bulk";
181 		break;
182 	default:
183 		type = "?";
184 		break;
185 	}
186 
187 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
188 #endif
189 
190 	if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
191 		dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
192 			 qh->usecs);
193 		dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
194 			 qh->interval);
195 	}
196 }
197 
198 /**
199  * dwc2_hcd_qh_create() - Allocates and initializes a QH
200  *
201  * @hsotg:     The HCD state structure for the DWC OTG controller
202  * @urb:       Holds the information about the device/endpoint needed
203  *             to initialize the QH
204  * @mem_flags: Flag to do atomic allocation if needed
205  *
206  * Return: Pointer to the newly allocated QH, or NULL on error
207  */
208 static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
209 					  struct dwc2_hcd_urb *urb,
210 					  gfp_t mem_flags)
211 {
212 	struct dwc2_softc *sc = hsotg->hsotg_sc;
213 	struct dwc2_qh *qh;
214 
215 	if (!urb->priv)
216 		return NULL;
217 
218 	/* Allocate memory */
219 	qh = pool_cache_get(sc->sc_qhpool, PR_NOWAIT);
220 	if (!qh)
221 		return NULL;
222 
223 	memset(qh, 0, sizeof(*qh));
224 	dwc2_qh_init(hsotg, qh, urb);
225 
226 	if (hsotg->core_params->dma_desc_enable > 0 &&
227 	    dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
228 		dwc2_hcd_qh_free(hsotg, qh);
229 		return NULL;
230 	}
231 
232 	return qh;
233 }
234 
235 /**
236  * dwc2_hcd_qh_free() - Frees the QH
237  *
238  * @hsotg: HCD instance
239  * @qh:    The QH to free
240  *
241  * QH should already be removed from the list. QTD list should already be empty
242  * if called from URB Dequeue.
243  *
244  * Must NOT be called with interrupt disabled or spinlock held
245  */
246 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
247 {
248 	struct dwc2_softc *sc = hsotg->hsotg_sc;
249 
250 	if (hsotg->core_params->dma_desc_enable > 0) {
251 		dwc2_hcd_qh_free_ddma(hsotg, qh);
252 	} else if (qh->dw_align_buf) {
253 		/* XXXNH */
254 		usb_freemem(&hsotg->hsotg_sc->sc_bus, &qh->dw_align_buf_usbdma);
255 	}
256 
257 	pool_cache_put(sc->sc_qhpool, qh);
258 }
259 
260 /**
261  * dwc2_periodic_channel_available() - Checks that a channel is available for a
262  * periodic transfer
263  *
264  * @hsotg: The HCD state structure for the DWC OTG controller
265  *
266  * Return: 0 if successful, negative error code otherwise
267  */
268 static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
269 {
270 	/*
271 	 * Currently assuming that there is a dedicated host channel for
272 	 * each periodic transaction plus at least one host channel for
273 	 * non-periodic transactions
274 	 */
275 	int status;
276 	int num_channels;
277 
278 	num_channels = hsotg->core_params->host_channels;
279 	if (hsotg->periodic_channels + hsotg->non_periodic_channels <
280 								num_channels
281 	    && hsotg->periodic_channels < num_channels - 1) {
282 		status = 0;
283 	} else {
284 		dev_dbg(hsotg->dev,
285 			"%s: Total channels: %d, Periodic: %d, "
286 			"Non-periodic: %d\n", __func__, num_channels,
287 			hsotg->periodic_channels, hsotg->non_periodic_channels);
288 		status = -ENOSPC;
289 	}
290 
291 	return status;
292 }
293 
294 /**
295  * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
296  * for the specified QH in the periodic schedule
297  *
298  * @hsotg: The HCD state structure for the DWC OTG controller
299  * @qh:    QH containing periodic bandwidth required
300  *
301  * Return: 0 if successful, negative error code otherwise
302  *
303  * For simplicity, this calculation assumes that all the transfers in the
304  * periodic schedule may occur in the same (micro)frame
305  */
306 static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
307 					 struct dwc2_qh *qh)
308 {
309 	int status;
310 	s16 max_claimed_usecs;
311 
312 	status = 0;
313 
314 	if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
315 		/*
316 		 * High speed mode
317 		 * Max periodic usecs is 80% x 125 usec = 100 usec
318 		 */
319 		max_claimed_usecs = 100 - qh->usecs;
320 	} else {
321 		/*
322 		 * Full speed mode
323 		 * Max periodic usecs is 90% x 1000 usec = 900 usec
324 		 */
325 		max_claimed_usecs = 900 - qh->usecs;
326 	}
327 
328 	if (hsotg->periodic_usecs > max_claimed_usecs) {
329 		dev_err(hsotg->dev,
330 			"%s: already claimed usecs %d, required usecs %d\n",
331 			__func__, hsotg->periodic_usecs, qh->usecs);
332 		status = -ENOSPC;
333 	}
334 
335 	return status;
336 }
337 
338 /**
339  * Microframe scheduler
340  * track the total use in hsotg->frame_usecs
341  * keep each qh use in qh->frame_usecs
342  * when surrendering the qh then donate the time back
343  */
344 static const unsigned short max_uframe_usecs[] = {
345 	100, 100, 100, 100, 100, 100, 30, 0
346 };
347 
348 void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
349 {
350 	int i;
351 
352 	for (i = 0; i < 8; i++)
353 		hsotg->frame_usecs[i] = max_uframe_usecs[i];
354 }
355 
356 static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
357 {
358 	unsigned short utime = qh->usecs;
359 	int i;
360 
361 	for (i = 0; i < 8; i++) {
362 		/* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
363 		if (utime <= hsotg->frame_usecs[i]) {
364 			hsotg->frame_usecs[i] -= utime;
365 			qh->frame_usecs[i] += utime;
366 			return i;
367 		}
368 	}
369 	return -ENOSPC;
370 }
371 
372 /*
373  * use this for FS apps that can span multiple uframes
374  */
375 static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
376 {
377 	unsigned short utime = qh->usecs;
378 	unsigned short xtime;
379 	int t_left;
380 	int i;
381 	int j;
382 	int k;
383 
384 	for (i = 0; i < 8; i++) {
385 		if (hsotg->frame_usecs[i] <= 0)
386 			continue;
387 
388 		/*
389 		 * we need n consecutive slots so use j as a start slot
390 		 * j plus j+1 must be enough time (for now)
391 		 */
392 		xtime = hsotg->frame_usecs[i];
393 		for (j = i + 1; j < 8; j++) {
394 			/*
395 			 * if we add this frame remaining time to xtime we may
396 			 * be OK, if not we need to test j for a complete frame
397 			 */
398 			if (xtime + hsotg->frame_usecs[j] < utime) {
399 				if (hsotg->frame_usecs[j] <
400 							max_uframe_usecs[j])
401 					continue;
402 			}
403 			if (xtime >= utime) {
404 				t_left = utime;
405 				for (k = i; k < 8; k++) {
406 					t_left -= hsotg->frame_usecs[k];
407 					if (t_left <= 0) {
408 						qh->frame_usecs[k] +=
409 							hsotg->frame_usecs[k]
410 								+ t_left;
411 						hsotg->frame_usecs[k] = -t_left;
412 						return i;
413 					} else {
414 						qh->frame_usecs[k] +=
415 							hsotg->frame_usecs[k];
416 						hsotg->frame_usecs[k] = 0;
417 					}
418 				}
419 			}
420 			/* add the frame time to x time */
421 			xtime += hsotg->frame_usecs[j];
422 			/* we must have a fully available next frame or break */
423 			if (xtime < utime &&
424 			   hsotg->frame_usecs[j] == max_uframe_usecs[j])
425 				continue;
426 		}
427 	}
428 	return -ENOSPC;
429 }
430 
431 static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
432 {
433 	int ret;
434 
435 	if (qh->dev_speed == USB_SPEED_HIGH) {
436 		/* if this is a hs transaction we need a full frame */
437 		ret = dwc2_find_single_uframe(hsotg, qh);
438 	} else {
439 		/*
440 		 * if this is a fs transaction we may need a sequence
441 		 * of frames
442 		 */
443 		ret = dwc2_find_multi_uframe(hsotg, qh);
444 	}
445 	return ret;
446 }
447 
448 /**
449  * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
450  * host channel is large enough to handle the maximum data transfer in a single
451  * (micro)frame for a periodic transfer
452  *
453  * @hsotg: The HCD state structure for the DWC OTG controller
454  * @qh:    QH for a periodic endpoint
455  *
456  * Return: 0 if successful, negative error code otherwise
457  */
458 static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
459 				    struct dwc2_qh *qh)
460 {
461 	u32 max_xfer_size;
462 	u32 max_channel_xfer_size;
463 	int status = 0;
464 
465 	max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
466 	max_channel_xfer_size = hsotg->core_params->max_transfer_size;
467 
468 	if (max_xfer_size > max_channel_xfer_size) {
469 		dev_err(hsotg->dev,
470 			"%s: Periodic xfer length %d > max xfer length for channel %d\n",
471 			__func__, max_xfer_size, max_channel_xfer_size);
472 		status = -ENOSPC;
473 	}
474 
475 	return status;
476 }
477 
478 /**
479  * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
480  * the periodic schedule
481  *
482  * @hsotg: The HCD state structure for the DWC OTG controller
483  * @qh:    QH for the periodic transfer. The QH should already contain the
484  *         scheduling information.
485  *
486  * Return: 0 if successful, negative error code otherwise
487  */
488 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
489 {
490 	int status;
491 
492 	if (hsotg->core_params->uframe_sched > 0) {
493 		int frame = -1;
494 
495 		status = dwc2_find_uframe(hsotg, qh);
496 		if (status == 0)
497 			frame = 7;
498 		else if (status > 0)
499 			frame = status - 1;
500 
501 		/* Set the new frame up */
502 		if (frame >= 0) {
503 			qh->sched_frame &= ~0x7;
504 			qh->sched_frame |= (frame & 7);
505 		}
506 
507 		if (status > 0)
508 			status = 0;
509 	} else {
510 		status = dwc2_periodic_channel_available(hsotg);
511 		if (status) {
512 			dev_info(hsotg->dev,
513 				 "%s: No host channel available for periodic transfer\n",
514 				 __func__);
515 			return status;
516 		}
517 
518 		status = dwc2_check_periodic_bandwidth(hsotg, qh);
519 	}
520 
521 	if (status) {
522 		dev_dbg(hsotg->dev,
523 			"%s: Insufficient periodic bandwidth for periodic transfer\n",
524 			__func__);
525 		return status;
526 	}
527 
528 	status = dwc2_check_max_xfer_size(hsotg, qh);
529 	if (status) {
530 		dev_dbg(hsotg->dev,
531 			"%s: Channel max transfer size too small for periodic transfer\n",
532 			__func__);
533 		return status;
534 	}
535 
536 	if (hsotg->core_params->dma_desc_enable > 0)
537 		/* Don't rely on SOF and start in ready schedule */
538 		list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
539 	else
540 		/* Always start in inactive schedule */
541 		list_add_tail(&qh->qh_list_entry,
542 			      &hsotg->periodic_sched_inactive);
543 
544 	if (hsotg->core_params->uframe_sched <= 0)
545 		/* Reserve periodic channel */
546 		hsotg->periodic_channels++;
547 
548 	/* Update claimed usecs per (micro)frame */
549 	hsotg->periodic_usecs += qh->usecs;
550 
551 	return status;
552 }
553 
554 /**
555  * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
556  * from the periodic schedule
557  *
558  * @hsotg: The HCD state structure for the DWC OTG controller
559  * @qh:	   QH for the periodic transfer
560  */
561 static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
562 				     struct dwc2_qh *qh)
563 {
564 	int i;
565 
566 	list_del_init(&qh->qh_list_entry);
567 
568 	/* Update claimed usecs per (micro)frame */
569 	hsotg->periodic_usecs -= qh->usecs;
570 
571 	if (hsotg->core_params->uframe_sched > 0) {
572 		for (i = 0; i < 8; i++) {
573 			hsotg->frame_usecs[i] += qh->frame_usecs[i];
574 			qh->frame_usecs[i] = 0;
575 		}
576 	} else {
577 		/* Release periodic channel reservation */
578 		hsotg->periodic_channels--;
579 	}
580 }
581 
582 /**
583  * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
584  * schedule if it is not already in the schedule. If the QH is already in
585  * the schedule, no action is taken.
586  *
587  * @hsotg: The HCD state structure for the DWC OTG controller
588  * @qh:    The QH to add
589  *
590  * Return: 0 if successful, negative error code otherwise
591  */
592 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
593 {
594 	int status;
595 	u32 intr_mask;
596 
597 	if (dbg_qh(qh))
598 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
599 
600 	if (!list_empty(&qh->qh_list_entry))
601 		/* QH already in a schedule */
602 		return 0;
603 
604 	/* Add the new QH to the appropriate schedule */
605 	if (dwc2_qh_is_non_per(qh)) {
606 		/* Always start in inactive schedule */
607 		list_add_tail(&qh->qh_list_entry,
608 			      &hsotg->non_periodic_sched_inactive);
609 		return 0;
610 	}
611 	status = dwc2_schedule_periodic(hsotg, qh);
612 	if (status)
613 		return status;
614 	if (!hsotg->periodic_qh_count) {
615 		intr_mask = DWC2_READ_4(hsotg, GINTMSK);
616 		intr_mask |= GINTSTS_SOF;
617 		DWC2_WRITE_4(hsotg, GINTMSK, intr_mask);
618 	}
619 	hsotg->periodic_qh_count++;
620 
621 	return 0;
622 }
623 
624 /**
625  * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
626  * schedule. Memory is not freed.
627  *
628  * @hsotg: The HCD state structure
629  * @qh:    QH to remove from schedule
630  */
631 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
632 {
633 	u32 intr_mask;
634 
635 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
636 
637 	if (list_empty(&qh->qh_list_entry))
638 		/* QH is not in a schedule */
639 		return;
640 
641 	if (dwc2_qh_is_non_per(qh)) {
642 		if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
643 			hsotg->non_periodic_qh_ptr =
644 					hsotg->non_periodic_qh_ptr->next;
645 		list_del_init(&qh->qh_list_entry);
646 		return;
647 	}
648 	dwc2_deschedule_periodic(hsotg, qh);
649 	hsotg->periodic_qh_count--;
650 	if (!hsotg->periodic_qh_count) {
651 		intr_mask = DWC2_READ_4(hsotg, GINTMSK);
652 		intr_mask &= ~GINTSTS_SOF;
653 		DWC2_WRITE_4(hsotg, GINTMSK, intr_mask);
654 	}
655 }
656 
657 /*
658  * Schedule the next continuing periodic split transfer
659  */
660 static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
661 				      struct dwc2_qh *qh, u16 frame_number,
662 				      int sched_next_periodic_split)
663 {
664 	u16 incr;
665 
666 	if (sched_next_periodic_split) {
667 		qh->sched_frame = frame_number;
668 		incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
669 		if (dwc2_frame_num_le(frame_number, incr)) {
670 			/*
671 			 * Allow one frame to elapse after start split
672 			 * microframe before scheduling complete split, but
673 			 * DON'T if we are doing the next start split in the
674 			 * same frame for an ISOC out
675 			 */
676 			if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
677 			    qh->ep_is_in != 0) {
678 				qh->sched_frame =
679 					dwc2_frame_num_inc(qh->sched_frame, 1);
680 			}
681 		}
682 	} else {
683 		qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame,
684 						     qh->interval);
685 		if (dwc2_frame_num_le(qh->sched_frame, frame_number))
686 			qh->sched_frame = frame_number;
687 		qh->sched_frame |= 0x7;
688 		qh->start_split_frame = qh->sched_frame;
689 	}
690 }
691 
692 /*
693  * Deactivates a QH. For non-periodic QHs, removes the QH from the active
694  * non-periodic schedule. The QH is added to the inactive non-periodic
695  * schedule if any QTDs are still attached to the QH.
696  *
697  * For periodic QHs, the QH is removed from the periodic queued schedule. If
698  * there are any QTDs still attached to the QH, the QH is added to either the
699  * periodic inactive schedule or the periodic ready schedule and its next
700  * scheduled frame is calculated. The QH is placed in the ready schedule if
701  * the scheduled frame has been reached already. Otherwise it's placed in the
702  * inactive schedule. If there are no QTDs attached to the QH, the QH is
703  * completely removed from the periodic schedule.
704  */
705 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
706 			    int sched_next_periodic_split)
707 {
708 	u16 frame_number;
709 
710 	if (dbg_qh(qh))
711 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
712 
713 	if (dwc2_qh_is_non_per(qh)) {
714 		dwc2_hcd_qh_unlink(hsotg, qh);
715 		if (!list_empty(&qh->qtd_list))
716 			/* Add back to inactive non-periodic schedule */
717 			dwc2_hcd_qh_add(hsotg, qh);
718 		return;
719 	}
720 
721 	frame_number = dwc2_hcd_get_frame_number(hsotg);
722 
723 	if (qh->do_split) {
724 		dwc2_sched_periodic_split(hsotg, qh, frame_number,
725 					  sched_next_periodic_split);
726 	} else {
727 		qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
728 						     qh->interval);
729 		if (dwc2_frame_num_le(qh->sched_frame, frame_number))
730 			qh->sched_frame = frame_number;
731 	}
732 
733 	if (list_empty(&qh->qtd_list)) {
734 		dwc2_hcd_qh_unlink(hsotg, qh);
735 		return;
736 	}
737 	/*
738 	 * Remove from periodic_sched_queued and move to
739 	 * appropriate queue
740 	 */
741 	if ((hsotg->core_params->uframe_sched > 0 &&
742 	     dwc2_frame_num_le(qh->sched_frame, frame_number)) ||
743 	    (hsotg->core_params->uframe_sched <= 0 &&
744 	     qh->sched_frame == frame_number))
745 		list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
746 	else
747 		list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive);
748 }
749 
750 /**
751  * dwc2_hcd_qtd_init() - Initializes a QTD structure
752  *
753  * @qtd: The QTD to initialize
754  * @urb: The associated URB
755  */
756 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
757 {
758 	qtd->urb = urb;
759 	if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
760 			USB_ENDPOINT_XFER_CONTROL) {
761 		/*
762 		 * The only time the QTD data toggle is used is on the data
763 		 * phase of control transfers. This phase always starts with
764 		 * DATA1.
765 		 */
766 		qtd->data_toggle = DWC2_HC_PID_DATA1;
767 		qtd->control_phase = DWC2_CONTROL_SETUP;
768 	}
769 
770 	/* Start split */
771 	qtd->complete_split = 0;
772 	qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
773 	qtd->isoc_split_offset = 0;
774 	qtd->in_process = 0;
775 
776 	/* Store the qtd ptr in the urb to reference the QTD */
777 	urb->qtd = qtd;
778 }
779 
780 /**
781  * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
782  *
783  * @hsotg:     The DWC HCD structure
784  * @qtd:       The QTD to add
785  * @qh:        Out parameter to return queue head
786  * @mem_flags: Flag to do atomic alloc if needed
787  *
788  * Return: 0 if successful, negative error code otherwise
789  *
790  * Finds the correct QH to place the QTD into. If it does not find a QH, it
791  * will create a new QH. If the QH to which the QTD is added is not currently
792  * scheduled, it is placed into the proper schedule based on its EP type.
793  *
794  * HCD lock must be held and interrupts must be disabled on entry
795  */
796 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
797 		     struct dwc2_qh **qh, gfp_t mem_flags)
798 {
799 	struct dwc2_hcd_urb *urb = qtd->urb;
800 	int allocated = 0;
801 	int retval;
802 
803 	/*
804 	 * Get the QH which holds the QTD-list to insert to. Create QH if it
805 	 * doesn't exist.
806 	 */
807 	if (*qh == NULL) {
808 		*qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
809 		if (*qh == NULL)
810 			return -ENOMEM;
811 		allocated = 1;
812 	}
813 
814 	retval = dwc2_hcd_qh_add(hsotg, *qh);
815 	if (retval)
816 		goto fail;
817 
818 	qtd->qh = *qh;
819 	list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
820 
821 	return 0;
822 
823 fail:
824 	if (allocated) {
825 		struct dwc2_qtd *qtd2, *qtd2_tmp;
826 		struct dwc2_qh *qh_tmp = *qh;
827 
828 		*qh = NULL;
829 		dwc2_hcd_qh_unlink(hsotg, qh_tmp);
830 
831 		/* Free each QTD in the QH's QTD list */
832 		list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list,
833 					 qtd_list_entry)
834 			dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
835 
836 		dwc2_hcd_qh_free(hsotg, qh_tmp);
837 	}
838 
839 	return retval;
840 }
841 
842 void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
843 				  struct dwc2_qtd *qtd,
844 				  struct dwc2_qh *qh)
845 {
846 	struct dwc2_softc *sc = hsotg->hsotg_sc;
847 
848 	list_del_init(&qtd->qtd_list_entry);
849  	pool_cache_put(sc->sc_qtdpool, qtd);
850 }
851 
852 #define BITSTUFFTIME(bytecount)	((8 * 7 * (bytecount)) / 6)
853 #define HS_HOST_DELAY		5	/* nanoseconds */
854 #define FS_LS_HOST_DELAY	1000	/* nanoseconds */
855 #define HUB_LS_SETUP		333	/* nanoseconds */
856 
857 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *hsotg, int speed, int is_in,
858 			      int is_isoc, int bytecount)
859 {
860 	unsigned long retval;
861 
862 	switch (speed) {
863 	case USB_SPEED_HIGH:
864 		if (is_isoc)
865 			retval =
866 			    ((38 * 8 * 2083) +
867 			     (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 +
868 			    HS_HOST_DELAY;
869 		else
870 			retval =
871 			    ((55 * 8 * 2083) +
872 			     (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 +
873 			    HS_HOST_DELAY;
874 		break;
875 	case USB_SPEED_FULL:
876 		if (is_isoc) {
877 			retval =
878 			    (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000;
879 			if (is_in)
880 				retval = 7268 + FS_LS_HOST_DELAY + retval;
881 			else
882 				retval = 6265 + FS_LS_HOST_DELAY + retval;
883 		} else {
884 			retval =
885 			    (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000;
886 			retval = 9107 + FS_LS_HOST_DELAY + retval;
887 		}
888 		break;
889 	case USB_SPEED_LOW:
890 		if (is_in) {
891 			retval =
892 			    (67667 * (31 + 10 * BITSTUFFTIME(bytecount))) /
893 			    1000;
894 			retval =
895 			    64060 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
896 			    retval;
897 		} else {
898 			retval =
899 			    (66700 * (31 + 10 * BITSTUFFTIME(bytecount))) /
900 			    1000;
901 			retval =
902 			    64107 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
903 			    retval;
904 		}
905 		break;
906 	default:
907 		dev_warn(hsotg->dev, "Unknown device speed\n");
908 		retval = -1;
909 	}
910 
911 	return NS_TO_US(retval);
912 }
913