xref: /netbsd-src/sys/dev/ata/ata_subr.c (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1 /*	$NetBSD: ata_subr.c,v 1.11 2020/05/02 19:09:56 thorpej Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2001 Manuel Bouyer.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: ata_subr.c,v 1.11 2020/05/02 19:09:56 thorpej Exp $");
29 
30 #include "opt_ata.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/device.h>
36 #include <sys/conf.h>
37 #include <sys/fcntl.h>
38 #include <sys/proc.h>
39 #include <sys/kthread.h>
40 #include <sys/errno.h>
41 #include <sys/ataio.h>
42 #include <sys/kmem.h>
43 #include <sys/intr.h>
44 #include <sys/bus.h>
45 #include <sys/once.h>
46 #include <sys/bitops.h>
47 
48 #define ATABUS_PRIVATE
49 
50 #include <dev/ata/ataconf.h>
51 #include <dev/ata/atareg.h>
52 #include <dev/ata/atavar.h>
53 #include <dev/ic/wdcvar.h>	/* for PIOBM */
54 
55 #define DEBUG_FUNCS  0x08
56 #define DEBUG_PROBE  0x10
57 #define DEBUG_DETACH 0x20
58 #define	DEBUG_XFERS  0x40
59 #ifdef ATADEBUG
60 extern int atadebug_mask;
61 #define ATADEBUG_PRINT(args, level) \
62 	if (atadebug_mask & (level)) \
63 		printf args
64 #else
65 #define ATADEBUG_PRINT(args, level)
66 #endif
67 
68 static void
69 ata_queue_reset(struct ata_queue *chq)
70 {
71 	/* make sure that we can use polled commands */
72 	SIMPLEQ_INIT(&chq->queue_xfer);
73 	TAILQ_INIT(&chq->active_xfers);
74 	chq->queue_freeze = 0;
75 	chq->queue_active = 0;
76 	chq->active_xfers_used = 0;
77 	chq->queue_xfers_avail = __BIT(chq->queue_openings) - 1;
78 }
79 
80 struct ata_xfer *
81 ata_queue_hwslot_to_xfer(struct ata_channel *chp, int hwslot)
82 {
83 	struct ata_queue *chq = chp->ch_queue;
84 	struct ata_xfer *xfer = NULL;
85 
86 	ata_channel_lock(chp);
87 
88 	KASSERTMSG(hwslot < chq->queue_openings, "hwslot %d > openings %d",
89 	    hwslot, chq->queue_openings);
90 	KASSERTMSG((chq->active_xfers_used & __BIT(hwslot)) != 0,
91 	    "hwslot %d not active", hwslot);
92 
93 	/* Usually the first entry will be the one */
94 	TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) {
95 		if (xfer->c_slot == hwslot)
96 			break;
97 	}
98 
99 	ata_channel_unlock(chp);
100 
101 	KASSERTMSG((xfer != NULL),
102 	    "%s: xfer with slot %d not found (active %x)", __func__,
103 	    hwslot, chq->active_xfers_used);
104 
105 	return xfer;
106 }
107 
108 struct ata_xfer *
109 ata_queue_get_active_xfer_locked(struct ata_channel *chp)
110 {
111 	struct ata_xfer *xfer;
112 
113 	KASSERT(mutex_owned(&chp->ch_lock));
114 	xfer = TAILQ_FIRST(&chp->ch_queue->active_xfers);
115 
116 	if (xfer && ISSET(xfer->c_flags, C_NCQ)) {
117 		/* Spurious call, never return NCQ xfer from this interface */
118 		xfer = NULL;
119 	}
120 
121 	return xfer;
122 }
123 
124 /*
125  * This interface is supposed only to be used when there is exactly
126  * one outstanding command, when there is no information about the slot,
127  * which triggered the command. ata_queue_hwslot_to_xfer() interface
128  * is preferred in all NCQ cases.
129  */
130 struct ata_xfer *
131 ata_queue_get_active_xfer(struct ata_channel *chp)
132 {
133 	struct ata_xfer *xfer = NULL;
134 
135 	ata_channel_lock(chp);
136 	xfer = ata_queue_get_active_xfer_locked(chp);
137 	ata_channel_unlock(chp);
138 
139 	return xfer;
140 }
141 
142 struct ata_xfer *
143 ata_queue_drive_active_xfer(struct ata_channel *chp, int drive)
144 {
145 	struct ata_xfer *xfer = NULL;
146 
147 	ata_channel_lock(chp);
148 
149 	TAILQ_FOREACH(xfer, &chp->ch_queue->active_xfers, c_activechain) {
150 		if (xfer->c_drive == drive)
151 			break;
152 	}
153 	KASSERT(xfer != NULL);
154 
155 	ata_channel_unlock(chp);
156 
157 	return xfer;
158 }
159 
160 struct ata_queue *
161 ata_queue_alloc(uint8_t openings)
162 {
163 	if (openings == 0)
164 		openings = 1;
165 
166 	if (openings > ATA_MAX_OPENINGS)
167 		openings = ATA_MAX_OPENINGS;
168 
169 	struct ata_queue *chq = kmem_zalloc(sizeof(*chq), KM_SLEEP);
170 
171 	chq->queue_openings = openings;
172 	ata_queue_reset(chq);
173 
174 	cv_init(&chq->queue_drain, "atdrn");
175 	cv_init(&chq->queue_idle, "qidl");
176 
177 	cv_init(&chq->c_active, "ataact");
178 	cv_init(&chq->c_cmd_finish, "atafin");
179 
180 	return chq;
181 }
182 
183 void
184 ata_queue_free(struct ata_queue *chq)
185 {
186 	cv_destroy(&chq->queue_drain);
187 	cv_destroy(&chq->queue_idle);
188 
189 	cv_destroy(&chq->c_active);
190 	cv_destroy(&chq->c_cmd_finish);
191 
192 	kmem_free(chq, sizeof(*chq));
193 }
194 
195 void
196 ata_channel_init(struct ata_channel *chp)
197 {
198 	mutex_init(&chp->ch_lock, MUTEX_DEFAULT, IPL_BIO);
199 	cv_init(&chp->ch_thr_idle, "atath");
200 
201 	callout_init(&chp->c_timo_callout, 0); 	/* XXX MPSAFE */
202 
203 	/* Optionally setup the queue, too */
204 	if (chp->ch_queue == NULL) {
205 		chp->ch_queue = ata_queue_alloc(1);
206 	}
207 }
208 
209 void
210 ata_channel_destroy(struct ata_channel *chp)
211 {
212 	if (chp->ch_queue != NULL) {
213 		ata_queue_free(chp->ch_queue);
214 		chp->ch_queue = NULL;
215 	}
216 
217 	mutex_enter(&chp->ch_lock);
218 	callout_halt(&chp->c_timo_callout, &chp->ch_lock);
219 	callout_destroy(&chp->c_timo_callout);
220 	mutex_exit(&chp->ch_lock);
221 
222 	mutex_destroy(&chp->ch_lock);
223 	cv_destroy(&chp->ch_thr_idle);
224 }
225 
226 void
227 ata_timeout(void *v)
228 {
229 	struct ata_channel *chp = v;
230 	struct ata_queue *chq = chp->ch_queue;
231 	struct ata_xfer *xfer, *nxfer;
232 	int s;
233 
234 	s = splbio();				/* XXX MPSAFE */
235 
236 	callout_ack(&chp->c_timo_callout);
237 
238 	if (chp->ch_flags & ATACH_RECOVERING) {
239 		/* Do nothing, recovery will requeue the xfers */
240 		return;
241 	}
242 
243 	/*
244 	 * If there is a timeout, means the last enqueued command
245 	 * timed out, and thus all commands timed out.
246 	 * XXX locking
247 	 */
248 	TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, nxfer) {
249 		ATADEBUG_PRINT(("%s: slot %d\n", __func__, xfer->c_slot),
250 		    DEBUG_FUNCS|DEBUG_XFERS);
251 
252 		if (ata_timo_xfer_check(xfer)) {
253 			/* Already logged */
254 			continue;
255 		}
256 
257 		/* Mark as timed out. Do not print anything, wd(4) will. */
258 		xfer->c_flags |= C_TIMEOU;
259 		xfer->ops->c_intr(xfer->c_chp, xfer, 0);
260 	}
261 
262 	splx(s);
263 }
264 
265 void
266 ata_channel_lock(struct ata_channel *chp)
267 {
268 	mutex_enter(&chp->ch_lock);
269 }
270 
271 void
272 ata_channel_unlock(struct ata_channel *chp)
273 {
274 	mutex_exit(&chp->ch_lock);
275 }
276 
277 void
278 ata_channel_lock_owned(struct ata_channel *chp)
279 {
280 	KASSERT(mutex_owned(&chp->ch_lock));
281 }
282 
283 #ifdef ATADEBUG
284 void
285 atachannel_debug(struct ata_channel *chp)
286 {
287 	struct ata_queue *chq = chp->ch_queue;
288 
289 	printf("  ch %s flags 0x%x ndrives %d\n",
290 	    device_xname(chp->atabus), chp->ch_flags, chp->ch_ndrives);
291 	printf("  que: flags 0x%x avail 0x%x used 0x%x\n",
292 	    chq->queue_flags, chq->queue_xfers_avail, chq->active_xfers_used);
293 	printf("        act %d freez %d open %u\n",
294 	    chq->queue_active, chq->queue_freeze, chq->queue_openings);
295 }
296 #endif /* ATADEBUG */
297 
298 bool
299 ata_queue_alloc_slot(struct ata_channel *chp, uint8_t *c_slot,
300     uint8_t drv_openings)
301 {
302 	struct ata_queue *chq = chp->ch_queue;
303 	uint32_t avail, mask;
304 
305 	KASSERT(mutex_owned(&chp->ch_lock));
306 	KASSERT(chq->queue_active < chq->queue_openings);
307 
308 	ATADEBUG_PRINT(("%s: channel %d qavail 0x%x qact %d",
309 	    __func__, chp->ch_channel,
310 	    chq->queue_xfers_avail, chq->queue_active),
311 	    DEBUG_XFERS);
312 
313 	mask = __BIT(MIN(chq->queue_openings, drv_openings)) - 1;
314 
315 	avail = ffs32(chq->queue_xfers_avail & mask);
316 	if (avail == 0)
317 		return false;
318 
319 	KASSERT(avail > 0);
320 	KASSERT(avail <= drv_openings);
321 
322 	*c_slot = avail - 1;
323 	chq->queue_xfers_avail &= ~__BIT(*c_slot);
324 
325 	KASSERT((chq->active_xfers_used & __BIT(*c_slot)) == 0);
326 	return true;
327 }
328 
329 void
330 ata_queue_free_slot(struct ata_channel *chp, uint8_t c_slot)
331 {
332 	struct ata_queue *chq = chp->ch_queue;
333 
334 	KASSERT(mutex_owned(&chp->ch_lock));
335 
336 	KASSERT((chq->active_xfers_used & __BIT(c_slot)) == 0);
337 	KASSERT((chq->queue_xfers_avail & __BIT(c_slot)) == 0);
338 
339 	chq->queue_xfers_avail |= __BIT(c_slot);
340 }
341 
342 void
343 ata_queue_hold(struct ata_channel *chp)
344 {
345 	struct ata_queue *chq = chp->ch_queue;
346 
347 	KASSERT(mutex_owned(&chp->ch_lock));
348 
349 	chq->queue_hold |= chq->active_xfers_used;
350 	chq->active_xfers_used = 0;
351 }
352 
353 void
354 ata_queue_unhold(struct ata_channel *chp)
355 {
356 	struct ata_queue *chq = chp->ch_queue;
357 
358 	KASSERT(mutex_owned(&chp->ch_lock));
359 
360 	chq->active_xfers_used |= chq->queue_hold;
361 	chq->queue_hold = 0;
362 }
363 
364 /*
365  * Must be called with interrupts blocked.
366  */
367 uint32_t
368 ata_queue_active(struct ata_channel *chp)
369 {
370 	struct ata_queue *chq = chp->ch_queue;
371 
372 	if (chp->ch_flags & ATACH_DETACHED)
373 		return 0;
374 
375 	return chq->active_xfers_used;
376 }
377 
378 uint8_t
379 ata_queue_openings(struct ata_channel *chp)
380 {
381 	return chp->ch_queue->queue_openings;
382 }
383