xref: /netbsd-src/sys/external/bsd/dwc2/dist/dwc2_hcdqueue.c (revision b378d10e650f70b3025ff0907354b9dad86d7ddb)
1 /*	$NetBSD: dwc2_hcdqueue.c,v 1.16 2021/12/21 09:51:22 skrll Exp $	*/
2 
3 /*
4  * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
5  *
6  * Copyright (C) 2004-2013 Synopsys, Inc.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions, and the following disclaimer,
13  *    without modification.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. The names of the above-listed copyright holders may not be used
18  *    to endorse or promote products derived from this software without
19  *    specific prior written permission.
20  *
21  * ALTERNATIVELY, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") as published by the Free Software
23  * Foundation; either version 2 of the License, or (at your option) any
24  * later version.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
27  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
28  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
30  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * This file contains the functions to manage Queue Heads and Queue
41  * Transfer Descriptors for Host mode
42  */
43 
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: dwc2_hcdqueue.c,v 1.16 2021/12/21 09:51:22 skrll Exp $");
46 
47 #include <sys/types.h>
48 #include <sys/kmem.h>
49 #include <sys/pool.h>
50 
51 #include <dev/usb/usb.h>
52 #include <dev/usb/usbdi.h>
53 #include <dev/usb/usbdivar.h>
54 #include <dev/usb/usb_mem.h>
55 
56 #include <machine/param.h>
57 
58 #include <linux/kernel.h>
59 
60 #include <dwc2/dwc2.h>
61 #include <dwc2/dwc2var.h>
62 
63 #include "dwc2_core.h"
64 #include "dwc2_hcd.h"
65 
66 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *, int, int, int, int);
67 static void dwc2_wait_timer_fn(void *);
68 
69 /* If we get a NAK, wait this long before retrying */
70 #define DWC2_RETRY_WAIT_DELAY 1	/* msec */
71 
72 /**
73  * dwc2_qh_init() - Initializes a QH structure
74  *
75  * @hsotg: The HCD state structure for the DWC OTG controller
76  * @qh:    The QH to init
77  * @urb:   Holds the information about the device/endpoint needed to initialize
78  *         the QH
79  */
80 #define SCHEDULE_SLOP 10
dwc2_qh_init(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh,struct dwc2_hcd_urb * urb)81 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
82 			 struct dwc2_hcd_urb *urb)
83 {
84 	int dev_speed, hub_addr, hub_port;
85 
86 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
87 
88 	/* Initialize QH */
89 	qh->hsotg = hsotg;
90 	/* XXX timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0); */
91 	callout_init(&qh->wait_timer, 0);
92 	callout_setfunc(&qh->wait_timer, dwc2_wait_timer_fn, qh);
93 	qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
94 	qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
95 
96 	qh->data_toggle = DWC2_HC_PID_DATA0;
97 	qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
98 	INIT_LIST_HEAD(&qh->qtd_list);
99 	INIT_LIST_HEAD(&qh->qh_list_entry);
100 
101 	/* FS/LS Endpoint on HS Hub, NOT virtual root hub */
102 	dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
103 
104 	dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
105 	qh->nak_frame = 0xffff;
106 
107 	if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
108 	    hub_addr != 0 && hub_addr != 1) {
109 		dev_vdbg(hsotg->dev,
110 			 "QH init: EP %d: TT found at hub addr %d, for port %d\n",
111 			 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
112 			 hub_port);
113 		qh->do_split = 1;
114 	}
115 
116 	if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
117 	    qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
118 		/* Compute scheduling parameters once and save them */
119 		u32 hprt, prtspd;
120 
121 		/* Todo: Account for split transfers in the bus time */
122 		int bytecount =
123 			dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
124 
125 		qh->usecs = dwc2_calc_bus_time(hsotg, qh->do_split ?
126 				USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
127 				qh->ep_type == USB_ENDPOINT_XFER_ISOC,
128 				bytecount);
129 
130 		/* Ensure frame_number corresponds to the reality */
131 		hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
132 		/* Start in a slightly future (micro)frame */
133 		qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
134 						     SCHEDULE_SLOP);
135 		qh->interval = urb->interval;
136 #if 0
137 		/* Increase interrupt polling rate for debugging */
138 		if (qh->ep_type == USB_ENDPOINT_XFER_INT)
139 			qh->interval = 8;
140 #endif
141 		hprt = DWC2_READ_4(hsotg, HPRT0);
142 		prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
143 		if (prtspd == HPRT0_SPD_HIGH_SPEED &&
144 		    (dev_speed == USB_SPEED_LOW ||
145 		     dev_speed == USB_SPEED_FULL)) {
146 			qh->interval *= 8;
147 			qh->sched_frame |= 0x7;
148 			qh->start_split_frame = qh->sched_frame;
149 		}
150 		dev_dbg(hsotg->dev, "interval=%d\n", qh->interval);
151 	}
152 
153 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
154 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
155 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
156 		 dwc2_hcd_get_dev_addr(&urb->pipe_info));
157 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
158 		 dwc2_hcd_get_ep_num(&urb->pipe_info),
159 		 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
160 
161 	qh->dev_speed = dev_speed;
162 
163 #ifdef DWC2_DEBUG
164 	const char *speed, *type;
165 	switch (dev_speed) {
166 	case USB_SPEED_LOW:
167 		speed = "low";
168 		break;
169 	case USB_SPEED_FULL:
170 		speed = "full";
171 		break;
172 	case USB_SPEED_HIGH:
173 		speed = "high";
174 		break;
175 	default:
176 		speed = "?";
177 		break;
178 	}
179 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
180 
181 	switch (qh->ep_type) {
182 	case USB_ENDPOINT_XFER_ISOC:
183 		type = "isochronous";
184 		break;
185 	case USB_ENDPOINT_XFER_INT:
186 		type = "interrupt";
187 		break;
188 	case USB_ENDPOINT_XFER_CONTROL:
189 		type = "control";
190 		break;
191 	case USB_ENDPOINT_XFER_BULK:
192 		type = "bulk";
193 		break;
194 	default:
195 		type = "?";
196 		break;
197 	}
198 
199 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
200 #endif
201 
202 	if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
203 		dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
204 			 qh->usecs);
205 		dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
206 			 qh->interval);
207 	}
208 }
209 
210 /**
211  * dwc2_hcd_qh_create() - Allocates and initializes a QH
212  *
213  * @hsotg:     The HCD state structure for the DWC OTG controller
214  * @urb:       Holds the information about the device/endpoint needed
215  *             to initialize the QH
216  * @mem_flags: Flag to do atomic allocation if needed
217  *
218  * Return: Pointer to the newly allocated QH, or NULL on error
219  */
dwc2_hcd_qh_create(struct dwc2_hsotg * hsotg,struct dwc2_hcd_urb * urb,gfp_t mem_flags)220 struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
221 					  struct dwc2_hcd_urb *urb,
222 					  gfp_t mem_flags)
223 {
224 	struct dwc2_softc *sc = hsotg->hsotg_sc;
225 	struct dwc2_qh *qh;
226 
227 	if (!urb->priv)
228 		return NULL;
229 
230 	/* Allocate memory */
231 	qh = pool_cache_get(sc->sc_qhpool, PR_NOWAIT);
232 	if (!qh)
233 		return NULL;
234 
235 	memset(qh, 0, sizeof(*qh));
236 	dwc2_qh_init(hsotg, qh, urb);
237 
238 	if (hsotg->core_params->dma_desc_enable > 0 &&
239 	    dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
240 		dwc2_hcd_qh_free(hsotg, qh);
241 		return NULL;
242 	}
243 
244 	return qh;
245 }
246 
247 /**
248  * dwc2_hcd_qh_free() - Frees the QH
249  *
250  * @hsotg: HCD instance
251  * @qh:    The QH to free
252  *
253  * QH should already be removed from the list. QTD list should already be empty
254  * if called from URB Dequeue.
255  *
256  * Must NOT be called with interrupt disabled or spinlock held
257  */
dwc2_hcd_qh_free(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh)258 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
259 {
260 	struct dwc2_softc *sc = hsotg->hsotg_sc;
261 
262 	/*
263 	 * We don't have the lock so we can safely wait until the wait timer
264 	 * finishes.  Of course, at this point in time we'd better have set
265 	 * wait_timer_active to false so if this timer was still pending it
266 	 * won't do anything anyway, but we want it to finish before we free
267 	 * memory.
268 	 */
269 	/* XXX del_timer_sync(&qh->wait_timer); */
270 	callout_destroy(&qh->wait_timer);	/* XXX need to callout_halt() first? */
271 
272 	if (qh->desc_list) {
273 		dwc2_hcd_qh_free_ddma(hsotg, qh);
274 	} else if (qh->dw_align_buf) {
275 		usb_freemem(&qh->dw_align_buf_usbdma);
276  		qh->dw_align_buf_dma = (dma_addr_t)0;
277 	}
278 
279 	pool_cache_put(sc->sc_qhpool, qh);
280 }
281 
282 /**
283  * dwc2_periodic_channel_available() - Checks that a channel is available for a
284  * periodic transfer
285  *
286  * @hsotg: The HCD state structure for the DWC OTG controller
287  *
288  * Return: 0 if successful, negative error code otherwise
289  */
dwc2_periodic_channel_available(struct dwc2_hsotg * hsotg)290 static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
291 {
292 	/*
293 	 * Currently assuming that there is a dedicated host channel for
294 	 * each periodic transaction plus at least one host channel for
295 	 * non-periodic transactions
296 	 */
297 	int status;
298 	int num_channels;
299 
300 	num_channels = hsotg->core_params->host_channels;
301 	if (hsotg->periodic_channels + hsotg->non_periodic_channels <
302 								num_channels
303 	    && hsotg->periodic_channels < num_channels - 1) {
304 		status = 0;
305 	} else {
306 		dev_dbg(hsotg->dev,
307 			"%s: Total channels: %d, Periodic: %d, "
308 			"Non-periodic: %d\n", __func__, num_channels,
309 			hsotg->periodic_channels, hsotg->non_periodic_channels);
310 		status = -ENOSPC;
311 	}
312 
313 	return status;
314 }
315 
316 /**
317  * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
318  * for the specified QH in the periodic schedule
319  *
320  * @hsotg: The HCD state structure for the DWC OTG controller
321  * @qh:    QH containing periodic bandwidth required
322  *
323  * Return: 0 if successful, negative error code otherwise
324  *
325  * For simplicity, this calculation assumes that all the transfers in the
326  * periodic schedule may occur in the same (micro)frame
327  */
dwc2_check_periodic_bandwidth(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh)328 static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
329 					 struct dwc2_qh *qh)
330 {
331 	int status;
332 	s16 max_claimed_usecs;
333 
334 	status = 0;
335 
336 	if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
337 		/*
338 		 * High speed mode
339 		 * Max periodic usecs is 80% x 125 usec = 100 usec
340 		 */
341 		max_claimed_usecs = 100 - qh->usecs;
342 	} else {
343 		/*
344 		 * Full speed mode
345 		 * Max periodic usecs is 90% x 1000 usec = 900 usec
346 		 */
347 		max_claimed_usecs = 900 - qh->usecs;
348 	}
349 
350 	if (hsotg->periodic_usecs > max_claimed_usecs) {
351 		dev_err(hsotg->dev,
352 			"%s: already claimed usecs %d, required usecs %d\n",
353 			__func__, hsotg->periodic_usecs, qh->usecs);
354 		status = -ENOSPC;
355 	}
356 
357 	return status;
358 }
359 
360 /**
361  * Microframe scheduler
362  * track the total use in hsotg->frame_usecs
363  * keep each qh use in qh->frame_usecs
364  * when surrendering the qh then donate the time back
365  */
366 static const unsigned short max_uframe_usecs[] = {
367 	100, 100, 100, 100, 100, 100, 30, 0
368 };
369 
dwc2_hcd_init_usecs(struct dwc2_hsotg * hsotg)370 void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
371 {
372 	int i;
373 
374 	for (i = 0; i < 8; i++)
375 		hsotg->frame_usecs[i] = max_uframe_usecs[i];
376 }
377 
dwc2_find_single_uframe(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh)378 static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
379 {
380 	unsigned short utime = qh->usecs;
381 	int i;
382 
383 	for (i = 0; i < 8; i++) {
384 		/* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
385 		if (utime <= hsotg->frame_usecs[i]) {
386 			hsotg->frame_usecs[i] -= utime;
387 			qh->frame_usecs[i] += utime;
388 			return i;
389 		}
390 	}
391 	return -ENOSPC;
392 }
393 
394 /*
395  * use this for FS apps that can span multiple uframes
396  */
dwc2_find_multi_uframe(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh)397 static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
398 {
399 	unsigned short utime = qh->usecs;
400 	unsigned short xtime;
401 	int t_left;
402 	int i;
403 	int j;
404 	int k;
405 
406 	for (i = 0; i < 8; i++) {
407 		if (hsotg->frame_usecs[i] <= 0)
408 			continue;
409 
410 		/*
411 		 * we need n consecutive slots so use j as a start slot
412 		 * j plus j+1 must be enough time (for now)
413 		 */
414 		xtime = hsotg->frame_usecs[i];
415 		for (j = i + 1; j < 8; j++) {
416 			/*
417 			 * if we add this frame remaining time to xtime we may
418 			 * be OK, if not we need to test j for a complete frame
419 			 */
420 			if (xtime + hsotg->frame_usecs[j] < utime) {
421 				if (hsotg->frame_usecs[j] <
422 							max_uframe_usecs[j])
423 					continue;
424 			}
425 			if (xtime >= utime) {
426 				t_left = utime;
427 				for (k = i; k < 8; k++) {
428 					t_left -= hsotg->frame_usecs[k];
429 					if (t_left <= 0) {
430 						qh->frame_usecs[k] +=
431 							hsotg->frame_usecs[k]
432 								+ t_left;
433 						hsotg->frame_usecs[k] = -t_left;
434 						return i;
435 					} else {
436 						qh->frame_usecs[k] +=
437 							hsotg->frame_usecs[k];
438 						hsotg->frame_usecs[k] = 0;
439 					}
440 				}
441 			}
442 			/* add the frame time to x time */
443 			xtime += hsotg->frame_usecs[j];
444 			/* we must have a fully available next frame or break */
445 			if (xtime < utime &&
446 			   hsotg->frame_usecs[j] == max_uframe_usecs[j])
447 				continue;
448 		}
449 	}
450 	return -ENOSPC;
451 }
452 
dwc2_find_uframe(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh)453 static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
454 {
455 	int ret;
456 
457 	if (qh->dev_speed == USB_SPEED_HIGH) {
458 		/* if this is a hs transaction we need a full frame */
459 		ret = dwc2_find_single_uframe(hsotg, qh);
460 	} else {
461 		/*
462 		 * if this is a fs transaction we may need a sequence
463 		 * of frames
464 		 */
465 		ret = dwc2_find_multi_uframe(hsotg, qh);
466 	}
467 	return ret;
468 }
469 
470 /**
471  * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
472  * host channel is large enough to handle the maximum data transfer in a single
473  * (micro)frame for a periodic transfer
474  *
475  * @hsotg: The HCD state structure for the DWC OTG controller
476  * @qh:    QH for a periodic endpoint
477  *
478  * Return: 0 if successful, negative error code otherwise
479  */
dwc2_check_max_xfer_size(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh)480 static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
481 				    struct dwc2_qh *qh)
482 {
483 	u32 max_xfer_size;
484 	u32 max_channel_xfer_size;
485 	int status = 0;
486 
487 	max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
488 	max_channel_xfer_size = hsotg->core_params->max_transfer_size;
489 
490 	if (max_xfer_size > max_channel_xfer_size) {
491 		dev_err(hsotg->dev,
492 			"%s: Periodic xfer length %d > max xfer length for channel %d\n",
493 			__func__, max_xfer_size, max_channel_xfer_size);
494 		status = -ENOSPC;
495 	}
496 
497 	return status;
498 }
499 
500 /**
501  * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
502  * the periodic schedule
503  *
504  * @hsotg: The HCD state structure for the DWC OTG controller
505  * @qh:    QH for the periodic transfer. The QH should already contain the
506  *         scheduling information.
507  *
508  * Return: 0 if successful, negative error code otherwise
509  */
dwc2_schedule_periodic(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh)510 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
511 {
512 	int status;
513 
514 	if (hsotg->core_params->uframe_sched > 0) {
515 		int frame = -1;
516 
517 		status = dwc2_find_uframe(hsotg, qh);
518 		if (status == 0)
519 			frame = 7;
520 		else if (status > 0)
521 			frame = status - 1;
522 
523 		/* Set the new frame up */
524 		if (frame >= 0) {
525 			qh->sched_frame &= ~0x7;
526 			qh->sched_frame |= (frame & 7);
527 		}
528 
529 		if (status > 0)
530 			status = 0;
531 	} else {
532 		status = dwc2_periodic_channel_available(hsotg);
533 		if (status) {
534 			dev_info(hsotg->dev,
535 				 "%s: No host channel available for periodic transfer\n",
536 				 __func__);
537 			return status;
538 		}
539 
540 		status = dwc2_check_periodic_bandwidth(hsotg, qh);
541 	}
542 
543 	if (status) {
544 		dev_dbg(hsotg->dev,
545 			"%s: Insufficient periodic bandwidth for periodic transfer\n",
546 			__func__);
547 		return status;
548 	}
549 
550 	status = dwc2_check_max_xfer_size(hsotg, qh);
551 	if (status) {
552 		dev_dbg(hsotg->dev,
553 			"%s: Channel max transfer size too small for periodic transfer\n",
554 			__func__);
555 		return status;
556 	}
557 
558 	if (hsotg->core_params->dma_desc_enable > 0)
559 		/* Don't rely on SOF and start in ready schedule */
560 		list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
561 	else
562 		/* Always start in inactive schedule */
563 		list_add_tail(&qh->qh_list_entry,
564 			      &hsotg->periodic_sched_inactive);
565 
566 	if (hsotg->core_params->uframe_sched <= 0)
567 		/* Reserve periodic channel */
568 		hsotg->periodic_channels++;
569 
570 	/* Update claimed usecs per (micro)frame */
571 	hsotg->periodic_usecs += qh->usecs;
572 
573 	return status;
574 }
575 
576 /**
577  * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
578  * from the periodic schedule
579  *
580  * @hsotg: The HCD state structure for the DWC OTG controller
581  * @qh:	   QH for the periodic transfer
582  */
dwc2_deschedule_periodic(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh)583 static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
584 				     struct dwc2_qh *qh)
585 {
586 	int i;
587 
588 	list_del_init(&qh->qh_list_entry);
589 
590 	/* Update claimed usecs per (micro)frame */
591 	hsotg->periodic_usecs -= qh->usecs;
592 
593 	if (hsotg->core_params->uframe_sched > 0) {
594 		for (i = 0; i < 8; i++) {
595 			hsotg->frame_usecs[i] += qh->frame_usecs[i];
596 			qh->frame_usecs[i] = 0;
597 		}
598 	} else {
599 		/* Release periodic channel reservation */
600 		hsotg->periodic_channels--;
601 	}
602 }
603 
604 /**
605  * dwc2_wait_timer_fn() - Timer function to re-queue after waiting
606  *
607  * As per the spec, a NAK indicates that "a function is temporarily unable to
608  * transmit or receive data, but will eventually be able to do so without need
609  * of host intervention".
610  *
611  * That means that when we encounter a NAK we're supposed to retry.
612  *
613  * ...but if we retry right away (from the interrupt handler that saw the NAK)
614  * then we can end up with an interrupt storm (if the other side keeps NAKing
615  * us) because on slow enough CPUs it could take us longer to get out of the
616  * interrupt routine than it takes for the device to send another NAK.  That
617  * leads to a constant stream of NAK interrupts and the CPU locks.
618  *
619  * ...so instead of retrying right away in the case of a NAK we'll set a timer
620  * to retry some time later.  This function handles that timer and moves the
621  * qh back to the "inactive" list, then queues transactions.
622  *
623  * @t: Pointer to wait_timer in a qh.
624  */
dwc2_wait_timer_fn(void * arg)625 static void dwc2_wait_timer_fn(void *arg)
626 {
627 	struct dwc2_qh *qh = arg;
628 	struct dwc2_hsotg *hsotg = qh->hsotg;
629 	unsigned long flags;
630 
631 	spin_lock_irqsave(&hsotg->lock, flags);
632 
633 	/*
634 	 * We'll set wait_timer_cancel to true if we want to cancel this
635 	 * operation in dwc2_hcd_qh_unlink().
636 	 */
637 	if (!qh->wait_timer_cancel) {
638 		enum dwc2_transaction_type tr_type;
639 
640 		qh->want_wait = false;
641 
642 		list_move(&qh->qh_list_entry,
643 			  &hsotg->non_periodic_sched_inactive);
644 
645 		tr_type = dwc2_hcd_select_transactions(hsotg);
646 		if (tr_type != DWC2_TRANSACTION_NONE)
647 			dwc2_hcd_queue_transactions(hsotg, tr_type);
648 	}
649 
650 	spin_unlock_irqrestore(&hsotg->lock, flags);
651 }
652 
653 /**
654  * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
655  * schedule if it is not already in the schedule. If the QH is already in
656  * the schedule, no action is taken.
657  *
658  * @hsotg: The HCD state structure for the DWC OTG controller
659  * @qh:    The QH to add
660  *
661  * Return: 0 if successful, negative error code otherwise
662  */
dwc2_hcd_qh_add(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh)663 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
664 {
665 	int status;
666 	u32 intr_mask;
667 
668 	if (dbg_qh(qh))
669 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
670 
671 	if (!list_empty(&qh->qh_list_entry))
672 		/* QH already in a schedule */
673 		return 0;
674 
675 	if (!dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number) &&
676 			!hsotg->frame_number) {
677 		dev_dbg(hsotg->dev,
678 				"reset frame number counter\n");
679 		qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
680 				SCHEDULE_SLOP);
681 	}
682 
683 	/* Add the new QH to the appropriate schedule */
684 	if (dwc2_qh_is_non_per(qh)) {
685 		if (qh->want_wait) {
686 			list_add_tail(&qh->qh_list_entry,
687 				      &hsotg->non_periodic_sched_waiting);
688 			qh->wait_timer_cancel = false;
689 			/* XXX mod_timer(&qh->wait_timer,
690 				  jiffies + DWC2_RETRY_WAIT_DELAY + 1); */
691 			callout_schedule(&qh->wait_timer,
692 			    mstohz(DWC2_RETRY_WAIT_DELAY));
693 		} else {
694 			list_add_tail(&qh->qh_list_entry,
695 				      &hsotg->non_periodic_sched_inactive);
696 		}
697 		return 0;
698 	}
699 
700 	status = dwc2_schedule_periodic(hsotg, qh);
701 	if (status)
702 		return status;
703 	if (!hsotg->periodic_qh_count) {
704 		intr_mask = DWC2_READ_4(hsotg, GINTMSK);
705 		intr_mask |= GINTSTS_SOF;
706 		DWC2_WRITE_4(hsotg, GINTMSK, intr_mask);
707 	}
708 	hsotg->periodic_qh_count++;
709 
710 	return 0;
711 }
712 
713 /**
714  * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
715  * schedule. Memory is not freed.
716  *
717  * @hsotg: The HCD state structure
718  * @qh:    QH to remove from schedule
719  */
dwc2_hcd_qh_unlink(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh)720 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
721 {
722 	u32 intr_mask;
723 
724 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
725 
726 	/* If the wait_timer is pending, this will stop it from acting */
727 	qh->wait_timer_cancel = true;
728 
729 	if (list_empty(&qh->qh_list_entry))
730 		/* QH is not in a schedule */
731 		return;
732 
733 	if (dwc2_qh_is_non_per(qh)) {
734 		if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
735 			hsotg->non_periodic_qh_ptr =
736 					hsotg->non_periodic_qh_ptr->next;
737 		list_del_init(&qh->qh_list_entry);
738 		return;
739 	}
740 
741 	dwc2_deschedule_periodic(hsotg, qh);
742 	hsotg->periodic_qh_count--;
743 	if (!hsotg->periodic_qh_count) {
744 		intr_mask = DWC2_READ_4(hsotg, GINTMSK);
745 		intr_mask &= ~GINTSTS_SOF;
746 		DWC2_WRITE_4(hsotg, GINTMSK, intr_mask);
747 	}
748 }
749 
750 /*
751  * Schedule the next continuing periodic split transfer
752  */
dwc2_sched_periodic_split(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh,u16 frame_number,int sched_next_periodic_split)753 static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
754 				      struct dwc2_qh *qh, u16 frame_number,
755 				      int sched_next_periodic_split)
756 {
757 	u16 incr;
758 
759 	if (sched_next_periodic_split) {
760 		qh->sched_frame = frame_number;
761 		incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
762 		if (dwc2_frame_num_le(frame_number, incr)) {
763 			/*
764 			 * Allow one frame to elapse after start split
765 			 * microframe before scheduling complete split, but
766 			 * DON'T if we are doing the next start split in the
767 			 * same frame for an ISOC out
768 			 */
769 			if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
770 			    qh->ep_is_in != 0) {
771 				qh->sched_frame =
772 					dwc2_frame_num_inc(qh->sched_frame, 1);
773 			}
774 		}
775 	} else {
776 		qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame,
777 						     qh->interval);
778 		if (dwc2_frame_num_le(qh->sched_frame, frame_number))
779 			qh->sched_frame = frame_number;
780 		qh->sched_frame |= 0x7;
781 		qh->start_split_frame = qh->sched_frame;
782 	}
783 }
784 
785 /*
786  * Deactivates a QH. For non-periodic QHs, removes the QH from the active
787  * non-periodic schedule. The QH is added to the inactive non-periodic
788  * schedule if any QTDs are still attached to the QH.
789  *
790  * For periodic QHs, the QH is removed from the periodic queued schedule. If
791  * there are any QTDs still attached to the QH, the QH is added to either the
792  * periodic inactive schedule or the periodic ready schedule and its next
793  * scheduled frame is calculated. The QH is placed in the ready schedule if
794  * the scheduled frame has been reached already. Otherwise it's placed in the
795  * inactive schedule. If there are no QTDs attached to the QH, the QH is
796  * completely removed from the periodic schedule.
797  */
dwc2_hcd_qh_deactivate(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh,int sched_next_periodic_split)798 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
799 			    int sched_next_periodic_split)
800 {
801 	u16 frame_number;
802 
803 	if (dbg_qh(qh))
804 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
805 
806 	if (dwc2_qh_is_non_per(qh)) {
807 		dwc2_hcd_qh_unlink(hsotg, qh);
808 		if (!list_empty(&qh->qtd_list))
809 			/* Add back to inactive/waiting non-periodic schedule */
810 			dwc2_hcd_qh_add(hsotg, qh);
811 		return;
812 	}
813 
814 	frame_number = dwc2_hcd_get_frame_number(hsotg);
815 
816 	if (qh->do_split) {
817 		dwc2_sched_periodic_split(hsotg, qh, frame_number,
818 					  sched_next_periodic_split);
819 	} else {
820 		qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
821 						     qh->interval);
822 		if (dwc2_frame_num_le(qh->sched_frame, frame_number))
823 			qh->sched_frame = frame_number;
824 	}
825 
826 	if (list_empty(&qh->qtd_list)) {
827 		dwc2_hcd_qh_unlink(hsotg, qh);
828 		return;
829 	}
830 	/*
831 	 * Remove from periodic_sched_queued and move to
832 	 * appropriate queue
833 	 */
834 	if ((hsotg->core_params->uframe_sched > 0 &&
835 	     dwc2_frame_num_le(qh->sched_frame, frame_number)) ||
836 	    (hsotg->core_params->uframe_sched <= 0 &&
837 	     qh->sched_frame == frame_number))
838 		list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
839 	else
840 		list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive);
841 }
842 
843 /**
844  * dwc2_hcd_qtd_init() - Initializes a QTD structure
845  *
846  * @qtd: The QTD to initialize
847  * @urb: The associated URB
848  */
dwc2_hcd_qtd_init(struct dwc2_qtd * qtd,struct dwc2_hcd_urb * urb)849 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
850 {
851 	qtd->urb = urb;
852 	if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
853 			USB_ENDPOINT_XFER_CONTROL) {
854 		/*
855 		 * The only time the QTD data toggle is used is on the data
856 		 * phase of control transfers. This phase always starts with
857 		 * DATA1.
858 		 */
859 		qtd->data_toggle = DWC2_HC_PID_DATA1;
860 		qtd->control_phase = DWC2_CONTROL_SETUP;
861 	}
862 
863 	/* Start split */
864 	qtd->complete_split = 0;
865 	qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
866 	qtd->isoc_split_offset = 0;
867 	qtd->in_process = 0;
868 
869 	/* Store the qtd ptr in the urb to reference the QTD */
870 	urb->qtd = qtd;
871 }
872 
873 /**
874  * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
875  *			Caller must hold driver lock.
876  *
877  * @hsotg:        The DWC HCD structure
878  * @qtd:          The QTD to add
879  * @qh:           Queue head to add qtd to
880  *
881  * Return: 0 if successful, negative error code otherwise
882  *
883  * If the QH to which the QTD is added is not currently scheduled, it is placed
884  * into the proper schedule based on its EP type.
885  */
dwc2_hcd_qtd_add(struct dwc2_hsotg * hsotg,struct dwc2_qtd * qtd,struct dwc2_qh * qh)886 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
887 		     struct dwc2_qh *qh)
888 {
889 
890 	KASSERT(mutex_owned(&hsotg->lock));
891 	int retval;
892 
893 	if (unlikely(!qh)) {
894 		dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
895 		retval = -EINVAL;
896 		goto fail;
897 	}
898 
899 	retval = dwc2_hcd_qh_add(hsotg, qh);
900 	if (retval)
901 		goto fail;
902 
903 	qtd->qh = qh;
904 	list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
905 
906 	return 0;
907 fail:
908 	return retval;
909 }
910 
dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg * hsotg,struct dwc2_qtd * qtd,struct dwc2_qh * qh)911 void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg,
912 				  struct dwc2_qtd *qtd,
913 				  struct dwc2_qh *qh)
914 {
915 	struct dwc2_softc *sc = hsotg->hsotg_sc;
916 
917 	list_del_init(&qtd->qtd_list_entry);
918  	pool_cache_put(sc->sc_qtdpool, qtd);
919 }
920 
921 #define BITSTUFFTIME(bytecount)	((8 * 7 * (bytecount)) / 6)
922 #define HS_HOST_DELAY		5	/* nanoseconds */
923 #define FS_LS_HOST_DELAY	1000	/* nanoseconds */
924 #define HUB_LS_SETUP		333	/* nanoseconds */
925 
dwc2_calc_bus_time(struct dwc2_hsotg * hsotg,int speed,int is_in,int is_isoc,int bytecount)926 static u32 dwc2_calc_bus_time(struct dwc2_hsotg *hsotg, int speed, int is_in,
927 			      int is_isoc, int bytecount)
928 {
929 	unsigned long retval;
930 
931 	switch (speed) {
932 	case USB_SPEED_HIGH:
933 		if (is_isoc)
934 			retval =
935 			    ((38 * 8 * 2083) +
936 			     (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 +
937 			    HS_HOST_DELAY;
938 		else
939 			retval =
940 			    ((55 * 8 * 2083) +
941 			     (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 +
942 			    HS_HOST_DELAY;
943 		break;
944 	case USB_SPEED_FULL:
945 		if (is_isoc) {
946 			retval =
947 			    (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000;
948 			if (is_in)
949 				retval = 7268 + FS_LS_HOST_DELAY + retval;
950 			else
951 				retval = 6265 + FS_LS_HOST_DELAY + retval;
952 		} else {
953 			retval =
954 			    (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000;
955 			retval = 9107 + FS_LS_HOST_DELAY + retval;
956 		}
957 		break;
958 	case USB_SPEED_LOW:
959 		if (is_in) {
960 			retval =
961 			    (67667 * (31 + 10 * BITSTUFFTIME(bytecount))) /
962 			    1000;
963 			retval =
964 			    64060 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
965 			    retval;
966 		} else {
967 			retval =
968 			    (66700 * (31 + 10 * BITSTUFFTIME(bytecount))) /
969 			    1000;
970 			retval =
971 			    64107 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
972 			    retval;
973 		}
974 		break;
975 	default:
976 		dev_warn(hsotg->dev, "Unknown device speed\n");
977 		retval = -1;
978 	}
979 
980 	return NS_TO_US(retval);
981 }
982