xref: /netbsd-src/sys/dev/sdmmc/ld_sdmmc.c (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1 /*	$NetBSD: ld_sdmmc.c,v 1.38 2020/05/24 17:26:18 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2008 KIYOHARA Takashi
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  *
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: ld_sdmmc.c,v 1.38 2020/05/24 17:26:18 riastradh Exp $");
32 
33 #ifdef _KERNEL_OPT
34 #include "opt_sdmmc.h"
35 #endif
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/device.h>
41 #include <sys/buf.h>
42 #include <sys/bufq.h>
43 #include <sys/bus.h>
44 #include <sys/endian.h>
45 #include <sys/dkio.h>
46 #include <sys/disk.h>
47 #include <sys/disklabel.h>
48 #include <sys/kthread.h>
49 #include <sys/syslog.h>
50 #include <sys/module.h>
51 #include <sys/pcq.h>
52 
53 #include <dev/ldvar.h>
54 
55 #include <dev/sdmmc/sdmmcvar.h>
56 
57 #include "ioconf.h"
58 
59 #ifdef LD_SDMMC_DEBUG
60 #define DPRINTF(s)	printf s
61 #else
62 #define DPRINTF(s)	__nothing
63 #endif
64 
65 #define	LD_SDMMC_IORETRIES	5	/* number of retries before giving up */
66 #define	RECOVERYTIME		hz/2	/* time to wait before retrying a cmd */
67 
68 #define	LD_SDMMC_MAXQUEUECNT	4	/* number of queued bio requests */
69 #define	LD_SDMMC_MAXTASKCNT	8	/* number of tasks in task pool */
70 
71 struct ld_sdmmc_softc;
72 
73 struct ld_sdmmc_task {
74 	struct sdmmc_task task;
75 	struct ld_sdmmc_softc *task_sc;
76 
77 	struct buf *task_bp;
78 	int task_retries; /* number of xfer retry */
79 	struct callout task_restart_ch;
80 
81 	bool task_poll;
82 	int *task_errorp;
83 
84 	TAILQ_ENTRY(ld_sdmmc_task) task_entry;
85 };
86 
87 struct ld_sdmmc_softc {
88 	struct ld_softc sc_ld;
89 	int sc_hwunit;
90 	char *sc_typename;
91 	struct sdmmc_function *sc_sf;
92 
93 	kmutex_t sc_lock;
94 	kcondvar_t sc_cv;
95 	TAILQ_HEAD(, ld_sdmmc_task) sc_freeq;
96 	TAILQ_HEAD(, ld_sdmmc_task) sc_xferq;
97 	unsigned sc_busy;
98 	bool sc_dying;
99 
100 	struct evcnt sc_ev_discard;	/* discard counter */
101 	struct evcnt sc_ev_discarderr;	/* discard error counter */
102 	struct evcnt sc_ev_discardbusy;	/* discard busy counter */
103 	struct evcnt sc_ev_cachesyncbusy; /* cache sync busy counter */
104 
105 	struct ld_sdmmc_task sc_task[LD_SDMMC_MAXTASKCNT];
106 };
107 
108 static int ld_sdmmc_match(device_t, cfdata_t, void *);
109 static void ld_sdmmc_attach(device_t, device_t, void *);
110 static int ld_sdmmc_detach(device_t, int);
111 
112 static int ld_sdmmc_dump(struct ld_softc *, void *, int, int);
113 static int ld_sdmmc_start(struct ld_softc *, struct buf *);
114 static void ld_sdmmc_restart(void *);
115 static int ld_sdmmc_discard(struct ld_softc *, struct buf *);
116 static int ld_sdmmc_ioctl(struct ld_softc *, u_long, void *, int32_t, bool);
117 
118 static void ld_sdmmc_doattach(void *);
119 static void ld_sdmmc_dobio(void *);
120 static void ld_sdmmc_dodiscard(void *);
121 
122 CFATTACH_DECL_NEW(ld_sdmmc, sizeof(struct ld_sdmmc_softc),
123     ld_sdmmc_match, ld_sdmmc_attach, ld_sdmmc_detach, NULL);
124 
125 static struct ld_sdmmc_task *
126 ld_sdmmc_task_get(struct ld_sdmmc_softc *sc)
127 {
128 	struct ld_sdmmc_task *task;
129 
130 	KASSERT(mutex_owned(&sc->sc_lock));
131 
132 	if (sc->sc_dying || (task = TAILQ_FIRST(&sc->sc_freeq)) == NULL)
133 		return NULL;
134 	TAILQ_REMOVE(&sc->sc_freeq, task, task_entry);
135 	TAILQ_INSERT_TAIL(&sc->sc_xferq, task, task_entry);
136 	KASSERT(task->task_bp == NULL);
137 	KASSERT(task->task_errorp == NULL);
138 
139 	return task;
140 }
141 
142 static void
143 ld_sdmmc_task_put(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
144 {
145 
146 	KASSERT(mutex_owned(&sc->sc_lock));
147 
148 	TAILQ_REMOVE(&sc->sc_xferq, task, task_entry);
149 	TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
150 	task->task_bp = NULL;
151 	task->task_errorp = NULL;
152 }
153 
154 static void
155 ld_sdmmc_task_cancel(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
156 {
157 	struct buf *bp;
158 	int *errorp;
159 
160 	KASSERT(mutex_owned(&sc->sc_lock));
161 	KASSERT(sc->sc_dying);
162 
163 	/*
164 	 * Either the callout or the task may be pending, but not both.
165 	 * First, determine whether the callout is pending.
166 	 */
167 	if (callout_pending(&task->task_restart_ch) ||
168 	    callout_invoking(&task->task_restart_ch)) {
169 		/*
170 		 * The callout either is pending, or just started but
171 		 * is waiting for us to release the lock.  At this
172 		 * point, it will notice sc->sc_dying and give up, so
173 		 * just wait for it to complete and then we will
174 		 * release everything.
175 		 */
176 		callout_halt(&task->task_restart_ch, &sc->sc_lock);
177 	} else {
178 		/*
179 		 * If the callout is running, it has just scheduled, so
180 		 * after we wait for the callout to finish running, the
181 		 * task is either pending or running.  If the task is
182 		 * already running, it will notice sc->sc_dying and
183 		 * give up; otherwise we have to release everything.
184 		 */
185 		callout_halt(&task->task_restart_ch, &sc->sc_lock);
186 		if (!sdmmc_del_task(sc->sc_sf->sc, &task->task, &sc->sc_lock))
187 			return; /* task already started, let it clean up */
188 	}
189 
190 	/*
191 	 * It is our responsibility to clean up.  Move it from xferq
192 	 * back to freeq and make sure to notify anyone waiting that
193 	 * it's finished.
194 	 */
195 	bp = task->task_bp;
196 	errorp = task->task_errorp;
197 	ld_sdmmc_task_put(sc, task);
198 
199 	/*
200 	 * If the task was for an asynchronous I/O xfer, fail the I/O
201 	 * xfer, with the softc lock dropped since this is a callback
202 	 * into arbitrary other subsystems.
203 	 */
204 	if (bp) {
205 		mutex_exit(&sc->sc_lock);
206 		/*
207 		 * XXX We assume that the same sequence works for bio
208 		 * and discard -- that lddiscardend is just the same as
209 		 * setting bp->b_resid = bp->b_bcount in the event of
210 		 * error and then calling lddone.
211 		 */
212 		bp->b_error = ENXIO;
213 		bp->b_resid = bp->b_bcount;
214 		lddone(&sc->sc_ld, bp);
215 		mutex_enter(&sc->sc_lock);
216 	}
217 
218 	/*
219 	 * If the task was for a synchronous operation (cachesync),
220 	 * then just set the error indicator and wake up the waiter.
221 	 */
222 	if (errorp) {
223 		*errorp = ENXIO;
224 		cv_broadcast(&sc->sc_cv);
225 	}
226 }
227 
228 /* ARGSUSED */
229 static int
230 ld_sdmmc_match(device_t parent, cfdata_t match, void *aux)
231 {
232 	struct sdmmc_softc *sdmsc = device_private(parent);
233 
234 	if (ISSET(sdmsc->sc_flags, SMF_MEM_MODE))
235 		return 1;
236 	return 0;
237 }
238 
239 /* ARGSUSED */
240 static void
241 ld_sdmmc_attach(device_t parent, device_t self, void *aux)
242 {
243 	struct ld_sdmmc_softc *sc = device_private(self);
244 	struct sdmmc_attach_args *sa = aux;
245 	struct ld_softc *ld = &sc->sc_ld;
246 	struct ld_sdmmc_task *task;
247 	struct lwp *lwp;
248 	int i;
249 
250 	ld->sc_dv = self;
251 
252 	aprint_normal(": <0x%02x:0x%04x:%s:0x%02x:0x%08x:0x%03x>\n",
253 	    sa->sf->cid.mid, sa->sf->cid.oid, sa->sf->cid.pnm,
254 	    sa->sf->cid.rev, sa->sf->cid.psn, sa->sf->cid.mdt);
255 	aprint_naive("\n");
256 
257 	sc->sc_typename = kmem_asprintf("0x%02x:0x%04x:%s",
258 	    sa->sf->cid.mid, sa->sf->cid.oid, sa->sf->cid.pnm);
259 
260 	evcnt_attach_dynamic(&sc->sc_ev_discard, EVCNT_TYPE_MISC,
261 	    NULL, device_xname(self), "sdmmc discard count");
262 	evcnt_attach_dynamic(&sc->sc_ev_discarderr, EVCNT_TYPE_MISC,
263 	    NULL, device_xname(self), "sdmmc discard errors");
264 	evcnt_attach_dynamic(&sc->sc_ev_discardbusy, EVCNT_TYPE_MISC,
265 	    NULL, device_xname(self), "sdmmc discard busy");
266 
267 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SDMMC);
268 	cv_init(&sc->sc_cv, "ldsdmmc");
269 	TAILQ_INIT(&sc->sc_freeq);
270 	TAILQ_INIT(&sc->sc_xferq);
271 	sc->sc_dying = false;
272 
273 	const int ntask = __arraycount(sc->sc_task);
274 	for (i = 0; i < ntask; i++) {
275 		task = &sc->sc_task[i];
276 		task->task_sc = sc;
277 		callout_init(&task->task_restart_ch, CALLOUT_MPSAFE);
278 		TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
279 	}
280 
281 	sc->sc_hwunit = 0;	/* always 0? */
282 	sc->sc_sf = sa->sf;
283 
284 	ld->sc_flags = LDF_ENABLED | LDF_MPSAFE;
285 	ld->sc_secperunit = sc->sc_sf->csd.capacity;
286 	ld->sc_secsize = SDMMC_SECTOR_SIZE;
287 	ld->sc_maxxfer = MAXPHYS;
288 	ld->sc_maxqueuecnt = LD_SDMMC_MAXQUEUECNT;
289 	ld->sc_dump = ld_sdmmc_dump;
290 	ld->sc_start = ld_sdmmc_start;
291 	ld->sc_discard = ld_sdmmc_discard;
292 	ld->sc_ioctl = ld_sdmmc_ioctl;
293 	ld->sc_typename = sc->sc_typename;
294 
295 	/*
296 	 * Defer attachment of ld + disk subsystem to a thread.
297 	 *
298 	 * This is necessary because wedge autodiscover needs to
299 	 * open and call into the ld driver, which could deadlock
300 	 * when the sdmmc driver isn't ready in early bootstrap.
301 	 *
302 	 * Don't mark thread as MPSAFE to keep aprint output sane.
303 	 */
304 	config_pending_incr(self);
305 	if (kthread_create(PRI_NONE, 0, NULL,
306 	    ld_sdmmc_doattach, sc, &lwp, "%sattach", device_xname(self))) {
307 		aprint_error_dev(self, "couldn't create thread\n");
308 	}
309 }
310 
311 static void
312 ld_sdmmc_doattach(void *arg)
313 {
314 	struct ld_sdmmc_softc *sc = (struct ld_sdmmc_softc *)arg;
315 	struct ld_softc *ld = &sc->sc_ld;
316 	struct sdmmc_softc *ssc = device_private(device_parent(ld->sc_dv));
317 	const u_int cache_size = sc->sc_sf->ext_csd.cache_size;
318 	char buf[sizeof("9999 KB")];
319 
320 	ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
321 	aprint_normal_dev(ld->sc_dv, "%d-bit width,", sc->sc_sf->width);
322 	if (ssc->sc_transfer_mode != NULL)
323 		aprint_normal(" %s,", ssc->sc_transfer_mode);
324 	if (cache_size > 0) {
325 		format_bytes(buf, sizeof(buf), cache_size);
326 		aprint_normal(" %s cache%s,", buf,
327 		    ISSET(sc->sc_sf->flags, SFF_CACHE_ENABLED) ? "" :
328 		    " (disabled)");
329 	}
330 	if ((ssc->sc_busclk / 1000) != 0)
331 		aprint_normal(" %u.%03u MHz\n",
332 		    ssc->sc_busclk / 1000, ssc->sc_busclk % 1000);
333 	else
334 		aprint_normal(" %u KHz\n", ssc->sc_busclk % 1000);
335 	config_pending_decr(ld->sc_dv);
336 	kthread_exit(0);
337 }
338 
339 static int
340 ld_sdmmc_detach(device_t dev, int flags)
341 {
342 	struct ld_sdmmc_softc *sc = device_private(dev);
343 	struct ld_softc *ld = &sc->sc_ld;
344 	struct ld_sdmmc_task *task;
345 	int rv, i;
346 
347 	/*
348 	 * Block new xfers, abort all pending tasks, and wait for all
349 	 * pending waiters to notice that we're gone.
350 	 */
351 	mutex_enter(&sc->sc_lock);
352 	sc->sc_dying = true;
353 	while ((task = TAILQ_FIRST(&sc->sc_xferq)) != NULL)
354 		ld_sdmmc_task_cancel(sc, task);
355 	while (sc->sc_busy)
356 		cv_wait(&sc->sc_cv, &sc->sc_lock);
357 	mutex_exit(&sc->sc_lock);
358 
359 	/* Do the ld detach dance.  */
360 	if ((rv = ldbegindetach(ld, flags)) != 0) {
361 		/* Detach failed -- back out.  */
362 		mutex_enter(&sc->sc_lock);
363 		sc->sc_dying = false;
364 		mutex_exit(&sc->sc_lock);
365 		return rv;
366 	}
367 	ldenddetach(ld);
368 
369 	KASSERT(TAILQ_EMPTY(&sc->sc_xferq));
370 
371 	for (i = 0; i < __arraycount(sc->sc_task); i++)
372 		callout_destroy(&sc->sc_task[i].task_restart_ch);
373 
374 	cv_destroy(&sc->sc_cv);
375 	mutex_destroy(&sc->sc_lock);
376 
377 	evcnt_detach(&sc->sc_ev_discard);
378 	evcnt_detach(&sc->sc_ev_discarderr);
379 	evcnt_detach(&sc->sc_ev_discardbusy);
380 	kmem_free(sc->sc_typename, strlen(sc->sc_typename) + 1);
381 
382 	return 0;
383 }
384 
385 static int
386 ld_sdmmc_start(struct ld_softc *ld, struct buf *bp)
387 {
388 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
389 	struct ld_sdmmc_task *task;
390 	int error;
391 
392 	mutex_enter(&sc->sc_lock);
393 	if ((task = ld_sdmmc_task_get(sc)) == NULL) {
394 		error = EAGAIN;
395 		goto out;
396 	}
397 
398 	task->task_bp = bp;
399 	task->task_retries = 0;
400 	sdmmc_init_task(&task->task, ld_sdmmc_dobio, task);
401 
402 	sdmmc_add_task(sc->sc_sf->sc, &task->task);
403 
404 	/* Success!  The xfer is now queued.  */
405 	error = 0;
406 
407 out:	mutex_exit(&sc->sc_lock);
408 	return error;
409 }
410 
411 static void
412 ld_sdmmc_restart(void *arg)
413 {
414 	struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
415 	struct ld_sdmmc_softc *sc = task->task_sc;
416 	struct buf *bp = task->task_bp;
417 
418 	bp->b_resid = bp->b_bcount;
419 
420 	mutex_enter(&sc->sc_lock);
421 	callout_ack(&task->task_restart_ch);
422 	if (!sc->sc_dying)
423 		sdmmc_add_task(sc->sc_sf->sc, &task->task);
424 	mutex_exit(&sc->sc_lock);
425 }
426 
427 static void
428 ld_sdmmc_dobio(void *arg)
429 {
430 	struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
431 	struct ld_sdmmc_softc *sc = task->task_sc;
432 	struct buf *bp = task->task_bp;
433 	int error;
434 
435 	/*
436 	 * I/O operation
437 	 */
438 	DPRINTF(("%s: I/O operation (dir=%s, blkno=0x%jx, bcnt=0x%x)\n",
439 	    device_xname(sc->sc_ld.sc_dv), bp->b_flags & B_READ ? "IN" : "OUT",
440 	    bp->b_rawblkno, bp->b_bcount));
441 
442 	/* is everything done in terms of blocks? */
443 	if (bp->b_rawblkno >= sc->sc_sf->csd.capacity) {
444 		/* trying to read or write past end of device */
445 		aprint_error_dev(sc->sc_ld.sc_dv,
446 		    "blkno 0x%" PRIu64 " exceeds capacity %d\n",
447 		    bp->b_rawblkno, sc->sc_sf->csd.capacity);
448 		bp->b_error = EINVAL;
449 		bp->b_resid = bp->b_bcount;
450 
451 		goto done;
452 	}
453 
454 	if (bp->b_flags & B_READ)
455 		error = sdmmc_mem_read_block(sc->sc_sf, bp->b_rawblkno,
456 		    bp->b_data, bp->b_bcount);
457 	else
458 		error = sdmmc_mem_write_block(sc->sc_sf, bp->b_rawblkno,
459 		    bp->b_data, bp->b_bcount);
460 	if (error) {
461 		if (task->task_retries < LD_SDMMC_IORETRIES) {
462 			struct dk_softc *dksc = &sc->sc_ld.sc_dksc;
463 			struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
464 
465 			diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
466 				dksc->sc_dkdev.dk_label);
467 			printf(", retrying\n");
468 			task->task_retries++;
469 			mutex_enter(&sc->sc_lock);
470 			if (sc->sc_dying) {
471 				bp->b_resid = bp->b_bcount;
472 				bp->b_error = error;
473 				goto done_locked;
474 			} else {
475 				callout_reset(&task->task_restart_ch,
476 				    RECOVERYTIME, ld_sdmmc_restart, task);
477 			}
478 			mutex_exit(&sc->sc_lock);
479 			return;
480 		}
481 		bp->b_error = error;
482 		bp->b_resid = bp->b_bcount;
483 	} else {
484 		bp->b_resid = 0;
485 	}
486 
487 done:
488 	/* Dissociate the task from the I/O xfer and release it.  */
489 	mutex_enter(&sc->sc_lock);
490 done_locked:
491 	ld_sdmmc_task_put(sc, task);
492 	mutex_exit(&sc->sc_lock);
493 
494 	lddone(&sc->sc_ld, bp);
495 }
496 
497 static int
498 ld_sdmmc_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
499 {
500 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
501 
502 	return sdmmc_mem_write_block(sc->sc_sf, blkno, data,
503 	    blkcnt * ld->sc_secsize);
504 }
505 
506 static void
507 ld_sdmmc_dodiscard(void *arg)
508 {
509 	struct ld_sdmmc_task *task = arg;
510 	struct ld_sdmmc_softc *sc = task->task_sc;
511 	struct buf *bp = task->task_bp;
512 	uint32_t sblkno, nblks;
513 	int error;
514 
515 	/* first and last block to erase */
516 	sblkno = bp->b_rawblkno;
517 	nblks  = howmany(bp->b_bcount, sc->sc_ld.sc_secsize);
518 
519 	/* An error from discard is non-fatal */
520 	error = sdmmc_mem_discard(sc->sc_sf, sblkno, sblkno + nblks - 1);
521 
522 	/* Count error or success and release the task.  */
523 	mutex_enter(&sc->sc_lock);
524 	if (error)
525 		sc->sc_ev_discarderr.ev_count++;
526 	else
527 		sc->sc_ev_discard.ev_count++;
528 	ld_sdmmc_task_put(sc, task);
529 	mutex_exit(&sc->sc_lock);
530 
531 	/* Record the error and notify the xfer of completion.  */
532 	if (error)
533 		bp->b_error = error;
534 	lddiscardend(&sc->sc_ld, bp);
535 }
536 
537 static int
538 ld_sdmmc_discard(struct ld_softc *ld, struct buf *bp)
539 {
540 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
541 	struct ld_sdmmc_task *task;
542 	int error;
543 
544 	mutex_enter(&sc->sc_lock);
545 
546 	/* Acquire a free task, or drop the request altogether.  */
547 	if ((task = ld_sdmmc_task_get(sc)) == NULL) {
548 		sc->sc_ev_discardbusy.ev_count++;
549 		error = EBUSY;
550 		goto out;
551 	}
552 
553 	/* Set up the task and schedule it.  */
554 	task->task_bp = bp;
555 	sdmmc_init_task(&task->task, ld_sdmmc_dodiscard, task);
556 
557 	sdmmc_add_task(sc->sc_sf->sc, &task->task);
558 
559 	/* Success!  The request is queued.  */
560 	error = 0;
561 
562 out:	mutex_exit(&sc->sc_lock);
563 	return error;
564 }
565 
566 static void
567 ld_sdmmc_docachesync(void *arg)
568 {
569 	struct ld_sdmmc_task *task = arg;
570 	struct ld_sdmmc_softc *sc = task->task_sc;
571 	int error;
572 
573 	/* Flush the cache.  */
574 	error = sdmmc_mem_flush_cache(sc->sc_sf, task->task_poll);
575 
576 	mutex_enter(&sc->sc_lock);
577 
578 	/* Notify the other thread that we're done; pass on the error.  */
579 	*task->task_errorp = error;
580 	cv_broadcast(&sc->sc_cv);
581 
582 	/* Release the task.  */
583 	ld_sdmmc_task_put(sc, task);
584 
585 	mutex_exit(&sc->sc_lock);
586 }
587 
588 static int
589 ld_sdmmc_cachesync(struct ld_softc *ld, bool poll)
590 {
591 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
592 	struct ld_sdmmc_task *task;
593 	int error = -1;
594 
595 	mutex_enter(&sc->sc_lock);
596 
597 	/* Acquire a free task, or fail with EBUSY.  */
598 	if ((task = ld_sdmmc_task_get(sc)) == NULL) {
599 		sc->sc_ev_cachesyncbusy.ev_count++;
600 		error = EBUSY;
601 		goto out;
602 	}
603 
604 	/* Set up the task and schedule it.  */
605 	task->task_poll = poll;
606 	task->task_errorp = &error;
607 	sdmmc_init_task(&task->task, ld_sdmmc_docachesync, task);
608 
609 	sdmmc_add_task(sc->sc_sf->sc, &task->task);
610 
611 	/*
612 	 * Wait for the task to complete.  If the device is yanked,
613 	 * detach will notify us.  Keep the busy count up until we're
614 	 * done waiting so that the softc doesn't go away until we're
615 	 * done.
616 	 */
617 	sc->sc_busy++;
618 	KASSERT(sc->sc_busy <= LD_SDMMC_MAXTASKCNT);
619 	while (error == -1)
620 		cv_wait(&sc->sc_cv, &sc->sc_lock);
621 	if (--sc->sc_busy == 0)
622 		cv_broadcast(&sc->sc_cv);
623 
624 out:	mutex_exit(&sc->sc_lock);
625 	return error;
626 }
627 
628 static int
629 ld_sdmmc_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag,
630     bool poll)
631 {
632 
633 	switch (cmd) {
634 	case DIOCCACHESYNC:
635 		return ld_sdmmc_cachesync(ld, poll);
636 	default:
637 		return EPASSTHROUGH;
638 	}
639 }
640 
641 MODULE(MODULE_CLASS_DRIVER, ld_sdmmc, "ld");
642 
643 #ifdef _MODULE
644 /*
645  * XXX Don't allow ioconf.c to redefine the "struct cfdriver ld_cd"
646  * XXX it will be defined in the common-code module
647  */
648 #undef  CFDRIVER_DECL
649 #define CFDRIVER_DECL(name, class, attr)
650 #include "ioconf.c"
651 #endif
652 
653 static int
654 ld_sdmmc_modcmd(modcmd_t cmd, void *opaque)
655 {
656 #ifdef _MODULE
657 	/*
658 	 * We ignore the cfdriver_vec[] that ioconf provides, since
659 	 * the cfdrivers are attached already.
660 	 */
661 	static struct cfdriver * const no_cfdriver_vec[] = { NULL };
662 #endif
663 	int error = 0;
664 
665 #ifdef _MODULE
666 	switch (cmd) {
667 	case MODULE_CMD_INIT:
668 		error = config_init_component(no_cfdriver_vec,
669 		    cfattach_ioconf_ld_sdmmc, cfdata_ioconf_ld_sdmmc);
670 		break;
671 	case MODULE_CMD_FINI:
672 		error = config_fini_component(no_cfdriver_vec,
673 		    cfattach_ioconf_ld_sdmmc, cfdata_ioconf_ld_sdmmc);
674 		break;
675 	default:
676 		error = ENOTTY;
677 		break;
678 	}
679 #endif
680 
681 	return error;
682 }
683