xref: /netbsd-src/sys/dev/sdmmc/ld_sdmmc.c (revision a60f4468d86362f15c5006ff57f9b85a370903c8)
1*a60f4468Sjmcneill /*	$NetBSD: ld_sdmmc.c,v 1.44 2024/10/18 11:03:52 jmcneill Exp $	*/
2e0297d1eSnonaka 
3e0297d1eSnonaka /*
4e0297d1eSnonaka  * Copyright (c) 2008 KIYOHARA Takashi
5e0297d1eSnonaka  * All rights reserved.
6e0297d1eSnonaka  *
7e0297d1eSnonaka  * Redistribution and use in source and binary forms, with or without
8e0297d1eSnonaka  * modification, are permitted provided that the following conditions
9e0297d1eSnonaka  * are met:
10e0297d1eSnonaka  * 1. Redistributions of source code must retain the above copyright
11e0297d1eSnonaka  *    notice, this list of conditions and the following disclaimer.
12e0297d1eSnonaka  * 2. Redistributions in binary form must reproduce the above copyright
13e0297d1eSnonaka  *    notice, this list of conditions and the following disclaimer in the
14e0297d1eSnonaka  *    documentation and/or other materials provided with the distribution.
15e0297d1eSnonaka  *
16e0297d1eSnonaka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17e0297d1eSnonaka  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18e0297d1eSnonaka  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19e0297d1eSnonaka  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20e0297d1eSnonaka  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21e0297d1eSnonaka  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22e0297d1eSnonaka  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23e0297d1eSnonaka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24e0297d1eSnonaka  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25e0297d1eSnonaka  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26e0297d1eSnonaka  * POSSIBILITY OF SUCH DAMAGE.
27e0297d1eSnonaka  *
28e0297d1eSnonaka  */
29e0297d1eSnonaka 
30e0297d1eSnonaka #include <sys/cdefs.h>
31*a60f4468Sjmcneill __KERNEL_RCSID(0, "$NetBSD: ld_sdmmc.c,v 1.44 2024/10/18 11:03:52 jmcneill Exp $");
32e0297d1eSnonaka 
335e2f4552Smatt #ifdef _KERNEL_OPT
345e2f4552Smatt #include "opt_sdmmc.h"
355e2f4552Smatt #endif
36e0297d1eSnonaka 
37e0297d1eSnonaka #include <sys/param.h>
38bdc07cffSriastradh #include <sys/types.h>
39bdc07cffSriastradh 
40e0297d1eSnonaka #include <sys/buf.h>
41e0297d1eSnonaka #include <sys/bufq.h>
42e0297d1eSnonaka #include <sys/bus.h>
43bdc07cffSriastradh #include <sys/device.h>
44e0297d1eSnonaka #include <sys/disk.h>
45b42e1be6Smartin #include <sys/disklabel.h>
46bdc07cffSriastradh #include <sys/dkio.h>
47bdc07cffSriastradh #include <sys/endian.h>
48bdc07cffSriastradh #include <sys/kernel.h>
49487ce959Sriastradh #include <sys/kmem.h>
50bdc07cffSriastradh #include <sys/kthread.h>
51bdc07cffSriastradh #include <sys/module.h>
52bdc07cffSriastradh #include <sys/syslog.h>
53bdc07cffSriastradh #include <sys/systm.h>
54e0297d1eSnonaka 
55e0297d1eSnonaka #include <dev/ldvar.h>
56e0297d1eSnonaka 
57e0297d1eSnonaka #include <dev/sdmmc/sdmmcvar.h>
58e0297d1eSnonaka 
59916bdfa5Spgoyette #include "ioconf.h"
60916bdfa5Spgoyette 
61cb216b0dSjmcneill #ifdef LD_SDMMC_DEBUG
62e0297d1eSnonaka #define DPRINTF(s)	printf s
63e0297d1eSnonaka #else
640723c0bfSriastradh #define DPRINTF(s)	__nothing
65e0297d1eSnonaka #endif
66e0297d1eSnonaka 
678ad0b042Skiyohara #define	LD_SDMMC_IORETRIES	5	/* number of retries before giving up */
688ad0b042Skiyohara #define	RECOVERYTIME		hz/2	/* time to wait before retrying a cmd */
698ad0b042Skiyohara 
7082e84007Sjmcneill #define	LD_SDMMC_MAXQUEUECNT	4	/* number of queued bio requests */
7182e84007Sjmcneill #define	LD_SDMMC_MAXTASKCNT	8	/* number of tasks in task pool */
7282e84007Sjmcneill 
73e0297d1eSnonaka struct ld_sdmmc_softc;
74e0297d1eSnonaka 
75e0297d1eSnonaka struct ld_sdmmc_task {
76e0297d1eSnonaka 	struct sdmmc_task task;
77e0297d1eSnonaka 	struct ld_sdmmc_softc *task_sc;
7882e84007Sjmcneill 
79e0297d1eSnonaka 	struct buf *task_bp;
808ad0b042Skiyohara 	int task_retries; /* number of xfer retry */
818ad0b042Skiyohara 	struct callout task_restart_ch;
82d991bdc4Sjmcneill 
830723c0bfSriastradh 	bool task_poll;
840723c0bfSriastradh 	int *task_errorp;
85d991bdc4Sjmcneill 
860723c0bfSriastradh 	TAILQ_ENTRY(ld_sdmmc_task) task_entry;
87e0297d1eSnonaka };
88e0297d1eSnonaka 
89e0297d1eSnonaka struct ld_sdmmc_softc {
90e0297d1eSnonaka 	struct ld_softc sc_ld;
91e0297d1eSnonaka 	int sc_hwunit;
92e0d0b61bSmlelstv 	char *sc_typename;
930723c0bfSriastradh 	struct sdmmc_function *sc_sf;
940723c0bfSriastradh 
950723c0bfSriastradh 	kmutex_t sc_lock;
960723c0bfSriastradh 	kcondvar_t sc_cv;
970723c0bfSriastradh 	TAILQ_HEAD(, ld_sdmmc_task) sc_freeq;
980723c0bfSriastradh 	TAILQ_HEAD(, ld_sdmmc_task) sc_xferq;
990723c0bfSriastradh 	unsigned sc_busy;
1000723c0bfSriastradh 	bool sc_dying;
10182e84007Sjmcneill 
10282e84007Sjmcneill 	struct evcnt sc_ev_discard;	/* discard counter */
10382e84007Sjmcneill 	struct evcnt sc_ev_discarderr;	/* discard error counter */
10482e84007Sjmcneill 	struct evcnt sc_ev_discardbusy;	/* discard busy counter */
105d991bdc4Sjmcneill 	struct evcnt sc_ev_cachesyncbusy; /* cache sync busy counter */
1060723c0bfSriastradh 
1070723c0bfSriastradh 	struct ld_sdmmc_task sc_task[LD_SDMMC_MAXTASKCNT];
108e0297d1eSnonaka };
109e0297d1eSnonaka 
1103261738bScegger static int ld_sdmmc_match(device_t, cfdata_t, void *);
111e0297d1eSnonaka static void ld_sdmmc_attach(device_t, device_t, void *);
112e0297d1eSnonaka static int ld_sdmmc_detach(device_t, int);
113e0297d1eSnonaka 
114e0297d1eSnonaka static int ld_sdmmc_dump(struct ld_softc *, void *, int, int);
115e0297d1eSnonaka static int ld_sdmmc_start(struct ld_softc *, struct buf *);
1168ad0b042Skiyohara static void ld_sdmmc_restart(void *);
117aaf4d313Smlelstv static int ld_sdmmc_discard(struct ld_softc *, struct buf *);
1183eecdf69Sjmcneill static int ld_sdmmc_ioctl(struct ld_softc *, u_long, void *, int32_t, bool);
119e0297d1eSnonaka 
12084b5216cSnonaka static void ld_sdmmc_doattach(void *);
121e0297d1eSnonaka static void ld_sdmmc_dobio(void *);
12282e84007Sjmcneill static void ld_sdmmc_dodiscard(void *);
123e0297d1eSnonaka 
124e0297d1eSnonaka CFATTACH_DECL_NEW(ld_sdmmc, sizeof(struct ld_sdmmc_softc),
125e0297d1eSnonaka     ld_sdmmc_match, ld_sdmmc_attach, ld_sdmmc_detach, NULL);
126e0297d1eSnonaka 
1270723c0bfSriastradh static struct ld_sdmmc_task *
1280723c0bfSriastradh ld_sdmmc_task_get(struct ld_sdmmc_softc *sc)
1290723c0bfSriastradh {
1300723c0bfSriastradh 	struct ld_sdmmc_task *task;
1310723c0bfSriastradh 
1320723c0bfSriastradh 	KASSERT(mutex_owned(&sc->sc_lock));
1330723c0bfSriastradh 
1340723c0bfSriastradh 	if (sc->sc_dying || (task = TAILQ_FIRST(&sc->sc_freeq)) == NULL)
1350723c0bfSriastradh 		return NULL;
1360723c0bfSriastradh 	TAILQ_REMOVE(&sc->sc_freeq, task, task_entry);
1370723c0bfSriastradh 	TAILQ_INSERT_TAIL(&sc->sc_xferq, task, task_entry);
1380723c0bfSriastradh 	KASSERT(task->task_bp == NULL);
1390723c0bfSriastradh 	KASSERT(task->task_errorp == NULL);
1400723c0bfSriastradh 
1410723c0bfSriastradh 	return task;
1420723c0bfSriastradh }
1430723c0bfSriastradh 
1440723c0bfSriastradh static void
1450723c0bfSriastradh ld_sdmmc_task_put(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
1460723c0bfSriastradh {
1470723c0bfSriastradh 
1480723c0bfSriastradh 	KASSERT(mutex_owned(&sc->sc_lock));
1490723c0bfSriastradh 
1500723c0bfSriastradh 	TAILQ_REMOVE(&sc->sc_xferq, task, task_entry);
1510723c0bfSriastradh 	TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
1520723c0bfSriastradh 	task->task_bp = NULL;
1530723c0bfSriastradh 	task->task_errorp = NULL;
1540723c0bfSriastradh }
1550723c0bfSriastradh 
1560723c0bfSriastradh static void
1570723c0bfSriastradh ld_sdmmc_task_cancel(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
1580723c0bfSriastradh {
1590723c0bfSriastradh 	struct buf *bp;
1600723c0bfSriastradh 	int *errorp;
1610723c0bfSriastradh 
1620723c0bfSriastradh 	KASSERT(mutex_owned(&sc->sc_lock));
1630723c0bfSriastradh 	KASSERT(sc->sc_dying);
1640723c0bfSriastradh 
1650723c0bfSriastradh 	/*
1660723c0bfSriastradh 	 * Either the callout or the task may be pending, but not both.
1670723c0bfSriastradh 	 * First, determine whether the callout is pending.
1680723c0bfSriastradh 	 */
1690723c0bfSriastradh 	if (callout_pending(&task->task_restart_ch) ||
1700723c0bfSriastradh 	    callout_invoking(&task->task_restart_ch)) {
1710723c0bfSriastradh 		/*
1720723c0bfSriastradh 		 * The callout either is pending, or just started but
1730723c0bfSriastradh 		 * is waiting for us to release the lock.  At this
1740723c0bfSriastradh 		 * point, it will notice sc->sc_dying and give up, so
1750723c0bfSriastradh 		 * just wait for it to complete and then we will
1760723c0bfSriastradh 		 * release everything.
1770723c0bfSriastradh 		 */
1780723c0bfSriastradh 		callout_halt(&task->task_restart_ch, &sc->sc_lock);
1790723c0bfSriastradh 	} else {
1800723c0bfSriastradh 		/*
1810723c0bfSriastradh 		 * If the callout is running, it has just scheduled, so
1820723c0bfSriastradh 		 * after we wait for the callout to finish running, the
1830723c0bfSriastradh 		 * task is either pending or running.  If the task is
1840723c0bfSriastradh 		 * already running, it will notice sc->sc_dying and
1850723c0bfSriastradh 		 * give up; otherwise we have to release everything.
1860723c0bfSriastradh 		 */
1870723c0bfSriastradh 		callout_halt(&task->task_restart_ch, &sc->sc_lock);
1880723c0bfSriastradh 		if (!sdmmc_del_task(sc->sc_sf->sc, &task->task, &sc->sc_lock))
1890723c0bfSriastradh 			return; /* task already started, let it clean up */
1900723c0bfSriastradh 	}
1910723c0bfSriastradh 
1920723c0bfSriastradh 	/*
1930723c0bfSriastradh 	 * It is our responsibility to clean up.  Move it from xferq
1940723c0bfSriastradh 	 * back to freeq and make sure to notify anyone waiting that
1950723c0bfSriastradh 	 * it's finished.
1960723c0bfSriastradh 	 */
1970723c0bfSriastradh 	bp = task->task_bp;
1980723c0bfSriastradh 	errorp = task->task_errorp;
1990723c0bfSriastradh 	ld_sdmmc_task_put(sc, task);
2000723c0bfSriastradh 
2010723c0bfSriastradh 	/*
2020723c0bfSriastradh 	 * If the task was for an asynchronous I/O xfer, fail the I/O
2030723c0bfSriastradh 	 * xfer, with the softc lock dropped since this is a callback
2040723c0bfSriastradh 	 * into arbitrary other subsystems.
2050723c0bfSriastradh 	 */
2060723c0bfSriastradh 	if (bp) {
2070723c0bfSriastradh 		mutex_exit(&sc->sc_lock);
2080723c0bfSriastradh 		/*
2090723c0bfSriastradh 		 * XXX We assume that the same sequence works for bio
2100723c0bfSriastradh 		 * and discard -- that lddiscardend is just the same as
2110723c0bfSriastradh 		 * setting bp->b_resid = bp->b_bcount in the event of
2120723c0bfSriastradh 		 * error and then calling lddone.
2130723c0bfSriastradh 		 */
2140723c0bfSriastradh 		bp->b_error = ENXIO;
2150723c0bfSriastradh 		bp->b_resid = bp->b_bcount;
2160723c0bfSriastradh 		lddone(&sc->sc_ld, bp);
2170723c0bfSriastradh 		mutex_enter(&sc->sc_lock);
2180723c0bfSriastradh 	}
2190723c0bfSriastradh 
2200723c0bfSriastradh 	/*
2210723c0bfSriastradh 	 * If the task was for a synchronous operation (cachesync),
2220723c0bfSriastradh 	 * then just set the error indicator and wake up the waiter.
2230723c0bfSriastradh 	 */
2240723c0bfSriastradh 	if (errorp) {
2250723c0bfSriastradh 		*errorp = ENXIO;
2260723c0bfSriastradh 		cv_broadcast(&sc->sc_cv);
2270723c0bfSriastradh 	}
2280723c0bfSriastradh }
229e0297d1eSnonaka 
230e0297d1eSnonaka /* ARGSUSED */
231e0297d1eSnonaka static int
2323261738bScegger ld_sdmmc_match(device_t parent, cfdata_t match, void *aux)
233e0297d1eSnonaka {
234e0297d1eSnonaka 	struct sdmmc_softc *sdmsc = device_private(parent);
235e0297d1eSnonaka 
236e0297d1eSnonaka 	if (ISSET(sdmsc->sc_flags, SMF_MEM_MODE))
237e0297d1eSnonaka 		return 1;
238e0297d1eSnonaka 	return 0;
239e0297d1eSnonaka }
240e0297d1eSnonaka 
241e0297d1eSnonaka /* ARGSUSED */
242e0297d1eSnonaka static void
243e0297d1eSnonaka ld_sdmmc_attach(device_t parent, device_t self, void *aux)
244e0297d1eSnonaka {
245e0297d1eSnonaka 	struct ld_sdmmc_softc *sc = device_private(self);
246e0297d1eSnonaka 	struct sdmmc_attach_args *sa = aux;
247e0297d1eSnonaka 	struct ld_softc *ld = &sc->sc_ld;
2488ad0b042Skiyohara 	struct ld_sdmmc_task *task;
24984b5216cSnonaka 	struct lwp *lwp;
2500f00bfddSjmcneill 	const char *cardtype;
2518ad0b042Skiyohara 	int i;
252e0297d1eSnonaka 
253e0297d1eSnonaka 	ld->sc_dv = self;
254e0297d1eSnonaka 
255cba25cf0Sjakllsch 	aprint_normal(": <0x%02x:0x%04x:%s:0x%02x:0x%08x:0x%03x>\n",
256cba25cf0Sjakllsch 	    sa->sf->cid.mid, sa->sf->cid.oid, sa->sf->cid.pnm,
257cba25cf0Sjakllsch 	    sa->sf->cid.rev, sa->sf->cid.psn, sa->sf->cid.mdt);
258e0297d1eSnonaka 	aprint_naive("\n");
259e0297d1eSnonaka 
2600f00bfddSjmcneill 	if (ISSET(sa->sf->sc->sc_flags, SMF_SD_MODE)) {
2610f00bfddSjmcneill 		cardtype = "SD card";
2620f00bfddSjmcneill 	} else {
2630f00bfddSjmcneill 		cardtype = "MMC";
2640f00bfddSjmcneill 	}
2650f00bfddSjmcneill 	sc->sc_typename = kmem_asprintf("%s 0x%02x:0x%04x:%s",
2660f00bfddSjmcneill 	    cardtype, sa->sf->cid.mid, sa->sf->cid.oid, sa->sf->cid.pnm);
267e0d0b61bSmlelstv 
26882e84007Sjmcneill 	evcnt_attach_dynamic(&sc->sc_ev_discard, EVCNT_TYPE_MISC,
26982e84007Sjmcneill 	    NULL, device_xname(self), "sdmmc discard count");
27082e84007Sjmcneill 	evcnt_attach_dynamic(&sc->sc_ev_discarderr, EVCNT_TYPE_MISC,
27182e84007Sjmcneill 	    NULL, device_xname(self), "sdmmc discard errors");
27282e84007Sjmcneill 	evcnt_attach_dynamic(&sc->sc_ev_discardbusy, EVCNT_TYPE_MISC,
27382e84007Sjmcneill 	    NULL, device_xname(self), "sdmmc discard busy");
27482e84007Sjmcneill 
2750723c0bfSriastradh 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SDMMC);
2760723c0bfSriastradh 	cv_init(&sc->sc_cv, "ldsdmmc");
2770723c0bfSriastradh 	TAILQ_INIT(&sc->sc_freeq);
2780723c0bfSriastradh 	TAILQ_INIT(&sc->sc_xferq);
2790723c0bfSriastradh 	sc->sc_dying = false;
2800723c0bfSriastradh 
28191ce6106Sjmcneill 	const int ntask = __arraycount(sc->sc_task);
28291ce6106Sjmcneill 	for (i = 0; i < ntask; i++) {
2838ad0b042Skiyohara 		task = &sc->sc_task[i];
2848ad0b042Skiyohara 		task->task_sc = sc;
28591ce6106Sjmcneill 		callout_init(&task->task_restart_ch, CALLOUT_MPSAFE);
2860723c0bfSriastradh 		TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
2878ad0b042Skiyohara 	}
2889866aa05Smlelstv 
289e0297d1eSnonaka 	sc->sc_hwunit = 0;	/* always 0? */
290e0297d1eSnonaka 	sc->sc_sf = sa->sf;
291e0297d1eSnonaka 
2925b68a5eeSmlelstv 	ld->sc_flags = LDF_ENABLED | LDF_MPSAFE;
293e0297d1eSnonaka 	ld->sc_secperunit = sc->sc_sf->csd.capacity;
294c6e1f64dSnonaka 	ld->sc_secsize = SDMMC_SECTOR_SIZE;
295e0297d1eSnonaka 	ld->sc_maxxfer = MAXPHYS;
2969866aa05Smlelstv 	ld->sc_maxqueuecnt = LD_SDMMC_MAXQUEUECNT;
297e0297d1eSnonaka 	ld->sc_dump = ld_sdmmc_dump;
298e0297d1eSnonaka 	ld->sc_start = ld_sdmmc_start;
299087ef2d9Sjmcneill 	ld->sc_discard = ld_sdmmc_discard;
3003eecdf69Sjmcneill 	ld->sc_ioctl = ld_sdmmc_ioctl;
301e0d0b61bSmlelstv 	ld->sc_typename = sc->sc_typename;
302e0297d1eSnonaka 
30384b5216cSnonaka 	/*
3046043c15fSmlelstv 	 * Defer attachment of ld + disk subsystem to a thread.
3056043c15fSmlelstv 	 *
3066043c15fSmlelstv 	 * This is necessary because wedge autodiscover needs to
3076043c15fSmlelstv 	 * open and call into the ld driver, which could deadlock
3086043c15fSmlelstv 	 * when the sdmmc driver isn't ready in early bootstrap.
3096043c15fSmlelstv 	 *
3106043c15fSmlelstv 	 * Don't mark thread as MPSAFE to keep aprint output sane.
31184b5216cSnonaka 	 */
31211beb626Schristos 	config_pending_incr(self);
313f7cf4f64Sjmcneill 	if (kthread_create(PRI_NONE, 0, NULL,
31484b5216cSnonaka 	    ld_sdmmc_doattach, sc, &lwp, "%sattach", device_xname(self))) {
31584b5216cSnonaka 		aprint_error_dev(self, "couldn't create thread\n");
31684b5216cSnonaka 	}
31784b5216cSnonaka }
31884b5216cSnonaka 
31984b5216cSnonaka static void
32084b5216cSnonaka ld_sdmmc_doattach(void *arg)
32184b5216cSnonaka {
32284b5216cSnonaka 	struct ld_sdmmc_softc *sc = (struct ld_sdmmc_softc *)arg;
32384b5216cSnonaka 	struct ld_softc *ld = &sc->sc_ld;
324014c3244Skiyohara 	struct sdmmc_softc *ssc = device_private(device_parent(ld->sc_dv));
325*a60f4468Sjmcneill 	const u_int emmc_cache_size = sc->sc_sf->ext_csd.cache_size;
326*a60f4468Sjmcneill 	const bool sd_cache = sc->sc_sf->ssr.cache;
3273eecdf69Sjmcneill 	char buf[sizeof("9999 KB")];
32884b5216cSnonaka 
329e90b5bddSjdolecek 	ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
3304c1760c2Sjmcneill 	aprint_normal_dev(ld->sc_dv, "%d-bit width,", sc->sc_sf->width);
3314c1760c2Sjmcneill 	if (ssc->sc_transfer_mode != NULL)
3324c1760c2Sjmcneill 		aprint_normal(" %s,", ssc->sc_transfer_mode);
333*a60f4468Sjmcneill 	if (emmc_cache_size > 0) {
334*a60f4468Sjmcneill 		format_bytes(buf, sizeof(buf), emmc_cache_size);
3353eecdf69Sjmcneill 		aprint_normal(" %s cache%s,", buf,
3363eecdf69Sjmcneill 		    ISSET(sc->sc_sf->flags, SFF_CACHE_ENABLED) ? "" :
3373eecdf69Sjmcneill 		    " (disabled)");
338*a60f4468Sjmcneill 	} else if (sd_cache) {
339*a60f4468Sjmcneill 		aprint_normal(" Cache%s,",
340*a60f4468Sjmcneill 		    ISSET(sc->sc_sf->flags, SFF_CACHE_ENABLED) ? "" :
341*a60f4468Sjmcneill 		    " (disabled)");
3423eecdf69Sjmcneill 	}
343014c3244Skiyohara 	if ((ssc->sc_busclk / 1000) != 0)
344014c3244Skiyohara 		aprint_normal(" %u.%03u MHz\n",
345014c3244Skiyohara 		    ssc->sc_busclk / 1000, ssc->sc_busclk % 1000);
346014c3244Skiyohara 	else
347014c3244Skiyohara 		aprint_normal(" %u KHz\n", ssc->sc_busclk % 1000);
34811beb626Schristos 	config_pending_decr(ld->sc_dv);
34984b5216cSnonaka 	kthread_exit(0);
350e0297d1eSnonaka }
351e0297d1eSnonaka 
352e0297d1eSnonaka static int
353e0297d1eSnonaka ld_sdmmc_detach(device_t dev, int flags)
354e0297d1eSnonaka {
355e0297d1eSnonaka 	struct ld_sdmmc_softc *sc = device_private(dev);
356e0297d1eSnonaka 	struct ld_softc *ld = &sc->sc_ld;
3570723c0bfSriastradh 	struct ld_sdmmc_task *task;
3580773cbe1Sriastradh 	int error, i;
359e0297d1eSnonaka 
3600723c0bfSriastradh 	/*
3610773cbe1Sriastradh 	 * Block new xfers, or fail if the disk is still open and the
3620773cbe1Sriastradh 	 * detach isn't forced.  After this point, we are committed to
3630773cbe1Sriastradh 	 * detaching.
3640773cbe1Sriastradh 	 */
3650773cbe1Sriastradh 	error = ldbegindetach(ld, flags);
3660773cbe1Sriastradh 	if (error)
3670773cbe1Sriastradh 		return error;
3680773cbe1Sriastradh 
3690773cbe1Sriastradh 	/*
3700773cbe1Sriastradh 	 * Abort all pending tasks, and wait for all pending waiters to
3710773cbe1Sriastradh 	 * notice that we're gone.
3720723c0bfSriastradh 	 */
3730723c0bfSriastradh 	mutex_enter(&sc->sc_lock);
3740723c0bfSriastradh 	sc->sc_dying = true;
3750723c0bfSriastradh 	while ((task = TAILQ_FIRST(&sc->sc_xferq)) != NULL)
3760723c0bfSriastradh 		ld_sdmmc_task_cancel(sc, task);
3770723c0bfSriastradh 	while (sc->sc_busy)
3780723c0bfSriastradh 		cv_wait(&sc->sc_cv, &sc->sc_lock);
3790723c0bfSriastradh 	mutex_exit(&sc->sc_lock);
3800723c0bfSriastradh 
3810773cbe1Sriastradh 	/* Done!  Destroy the disk.  */
382e0297d1eSnonaka 	ldenddetach(ld);
383e0297d1eSnonaka 
3840723c0bfSriastradh 	KASSERT(TAILQ_EMPTY(&sc->sc_xferq));
3858ad0b042Skiyohara 
3860723c0bfSriastradh 	for (i = 0; i < __arraycount(sc->sc_task); i++)
3870723c0bfSriastradh 		callout_destroy(&sc->sc_task[i].task_restart_ch);
3880723c0bfSriastradh 
3890723c0bfSriastradh 	cv_destroy(&sc->sc_cv);
3900723c0bfSriastradh 	mutex_destroy(&sc->sc_lock);
3910723c0bfSriastradh 
39282e84007Sjmcneill 	evcnt_detach(&sc->sc_ev_discard);
39382e84007Sjmcneill 	evcnt_detach(&sc->sc_ev_discarderr);
39482e84007Sjmcneill 	evcnt_detach(&sc->sc_ev_discardbusy);
395e0d0b61bSmlelstv 	kmem_free(sc->sc_typename, strlen(sc->sc_typename) + 1);
39691ce6106Sjmcneill 
397e0297d1eSnonaka 	return 0;
398e0297d1eSnonaka }
399e0297d1eSnonaka 
400e0297d1eSnonaka static int
401e0297d1eSnonaka ld_sdmmc_start(struct ld_softc *ld, struct buf *bp)
402e0297d1eSnonaka {
403e0297d1eSnonaka 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
4040723c0bfSriastradh 	struct ld_sdmmc_task *task;
4050723c0bfSriastradh 	int error;
4069866aa05Smlelstv 
4070723c0bfSriastradh 	mutex_enter(&sc->sc_lock);
4080723c0bfSriastradh 	if ((task = ld_sdmmc_task_get(sc)) == NULL) {
4090723c0bfSriastradh 		error = EAGAIN;
4100723c0bfSriastradh 		goto out;
4110723c0bfSriastradh 	}
412e0297d1eSnonaka 
413e0297d1eSnonaka 	task->task_bp = bp;
4148ad0b042Skiyohara 	task->task_retries = 0;
415e0297d1eSnonaka 	sdmmc_init_task(&task->task, ld_sdmmc_dobio, task);
416e0297d1eSnonaka 
417e0297d1eSnonaka 	sdmmc_add_task(sc->sc_sf->sc, &task->task);
418e0297d1eSnonaka 
4190723c0bfSriastradh 	/* Success!  The xfer is now queued.  */
4200723c0bfSriastradh 	error = 0;
4210723c0bfSriastradh 
4220723c0bfSriastradh out:	mutex_exit(&sc->sc_lock);
4230723c0bfSriastradh 	return error;
424e0297d1eSnonaka }
425e0297d1eSnonaka 
426e0297d1eSnonaka static void
4278ad0b042Skiyohara ld_sdmmc_restart(void *arg)
4288ad0b042Skiyohara {
4298ad0b042Skiyohara 	struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
4308ad0b042Skiyohara 	struct ld_sdmmc_softc *sc = task->task_sc;
4318ad0b042Skiyohara 	struct buf *bp = task->task_bp;
4328ad0b042Skiyohara 
4338ad0b042Skiyohara 	bp->b_resid = bp->b_bcount;
4348ad0b042Skiyohara 
4350723c0bfSriastradh 	mutex_enter(&sc->sc_lock);
4360723c0bfSriastradh 	callout_ack(&task->task_restart_ch);
4370723c0bfSriastradh 	if (!sc->sc_dying)
4388ad0b042Skiyohara 		sdmmc_add_task(sc->sc_sf->sc, &task->task);
4390723c0bfSriastradh 	mutex_exit(&sc->sc_lock);
4408ad0b042Skiyohara }
4418ad0b042Skiyohara 
4428ad0b042Skiyohara static void
443e0297d1eSnonaka ld_sdmmc_dobio(void *arg)
444e0297d1eSnonaka {
445e0297d1eSnonaka 	struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
446e0297d1eSnonaka 	struct ld_sdmmc_softc *sc = task->task_sc;
447e0297d1eSnonaka 	struct buf *bp = task->task_bp;
448ab5c3234Smlelstv 	int error;
449e0297d1eSnonaka 
450e0297d1eSnonaka 	/*
451e0297d1eSnonaka 	 * I/O operation
452e0297d1eSnonaka 	 */
453e0297d1eSnonaka 	DPRINTF(("%s: I/O operation (dir=%s, blkno=0x%jx, bcnt=0x%x)\n",
454e0297d1eSnonaka 	    device_xname(sc->sc_ld.sc_dv), bp->b_flags & B_READ ? "IN" : "OUT",
455e0297d1eSnonaka 	    bp->b_rawblkno, bp->b_bcount));
456e0297d1eSnonaka 
457e0297d1eSnonaka 	/* is everything done in terms of blocks? */
458e0297d1eSnonaka 	if (bp->b_rawblkno >= sc->sc_sf->csd.capacity) {
459e0297d1eSnonaka 		/* trying to read or write past end of device */
460b3a64839Smlelstv 		aprint_error_dev(sc->sc_ld.sc_dv,
461b3a64839Smlelstv 		    "blkno 0x%" PRIu64 " exceeds capacity %d\n",
462b3a64839Smlelstv 		    bp->b_rawblkno, sc->sc_sf->csd.capacity);
463b3a64839Smlelstv 		bp->b_error = EINVAL;
464e0297d1eSnonaka 		bp->b_resid = bp->b_bcount;
46571dda274Sjmcneill 
46671dda274Sjmcneill 		goto done;
467e0297d1eSnonaka 	}
468e0297d1eSnonaka 
469e0297d1eSnonaka 	if (bp->b_flags & B_READ)
470e0297d1eSnonaka 		error = sdmmc_mem_read_block(sc->sc_sf, bp->b_rawblkno,
471e0297d1eSnonaka 		    bp->b_data, bp->b_bcount);
472e0297d1eSnonaka 	else
473e0297d1eSnonaka 		error = sdmmc_mem_write_block(sc->sc_sf, bp->b_rawblkno,
474e0297d1eSnonaka 		    bp->b_data, bp->b_bcount);
475e0297d1eSnonaka 	if (error) {
4768ad0b042Skiyohara 		if (task->task_retries < LD_SDMMC_IORETRIES) {
4778ad0b042Skiyohara 			struct dk_softc *dksc = &sc->sc_ld.sc_dksc;
4788ad0b042Skiyohara 			struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
4798ad0b042Skiyohara 
4808ad0b042Skiyohara 			diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
4818ad0b042Skiyohara 				dksc->sc_dkdev.dk_label);
4828ad0b042Skiyohara 			printf(", retrying\n");
4838ad0b042Skiyohara 			task->task_retries++;
4840723c0bfSriastradh 			mutex_enter(&sc->sc_lock);
4850723c0bfSriastradh 			if (sc->sc_dying) {
4860723c0bfSriastradh 				bp->b_resid = bp->b_bcount;
4870723c0bfSriastradh 				bp->b_error = error;
4880723c0bfSriastradh 				goto done_locked;
4890723c0bfSriastradh 			} else {
4900723c0bfSriastradh 				callout_reset(&task->task_restart_ch,
4910723c0bfSriastradh 				    RECOVERYTIME, ld_sdmmc_restart, task);
4920723c0bfSriastradh 			}
4930723c0bfSriastradh 			mutex_exit(&sc->sc_lock);
4948ad0b042Skiyohara 			return;
4958ad0b042Skiyohara 		}
496b3a64839Smlelstv 		bp->b_error = error;
497e0297d1eSnonaka 		bp->b_resid = bp->b_bcount;
498e0297d1eSnonaka 	} else {
499e0297d1eSnonaka 		bp->b_resid = 0;
500e0297d1eSnonaka 	}
501e0297d1eSnonaka 
50271dda274Sjmcneill done:
5030723c0bfSriastradh 	/* Dissociate the task from the I/O xfer and release it.  */
5040723c0bfSriastradh 	mutex_enter(&sc->sc_lock);
5050723c0bfSriastradh done_locked:
5060723c0bfSriastradh 	ld_sdmmc_task_put(sc, task);
5070723c0bfSriastradh 	mutex_exit(&sc->sc_lock);
5088ad0b042Skiyohara 
509e0297d1eSnonaka 	lddone(&sc->sc_ld, bp);
510e0297d1eSnonaka }
511e0297d1eSnonaka 
512e0297d1eSnonaka static int
513e0297d1eSnonaka ld_sdmmc_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
514e0297d1eSnonaka {
515e0297d1eSnonaka 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
516e0297d1eSnonaka 
517e0297d1eSnonaka 	return sdmmc_mem_write_block(sc->sc_sf, blkno, data,
518e0297d1eSnonaka 	    blkcnt * ld->sc_secsize);
519e0297d1eSnonaka }
520916bdfa5Spgoyette 
52182e84007Sjmcneill static void
52282e84007Sjmcneill ld_sdmmc_dodiscard(void *arg)
52382e84007Sjmcneill {
52482e84007Sjmcneill 	struct ld_sdmmc_task *task = arg;
52582e84007Sjmcneill 	struct ld_sdmmc_softc *sc = task->task_sc;
526aaf4d313Smlelstv 	struct buf *bp = task->task_bp;
527aaf4d313Smlelstv 	uint32_t sblkno, nblks;
52882e84007Sjmcneill 	int error;
52982e84007Sjmcneill 
530aaf4d313Smlelstv 	/* first and last block to erase */
531aaf4d313Smlelstv 	sblkno = bp->b_rawblkno;
532aaf4d313Smlelstv 	nblks  = howmany(bp->b_bcount, sc->sc_ld.sc_secsize);
533aaf4d313Smlelstv 
53482e84007Sjmcneill 	/* An error from discard is non-fatal */
535aaf4d313Smlelstv 	error = sdmmc_mem_discard(sc->sc_sf, sblkno, sblkno + nblks - 1);
5360723c0bfSriastradh 
5370723c0bfSriastradh 	/* Count error or success and release the task.  */
5380723c0bfSriastradh 	mutex_enter(&sc->sc_lock);
5390723c0bfSriastradh 	if (error)
54082e84007Sjmcneill 		sc->sc_ev_discarderr.ev_count++;
54182e84007Sjmcneill 	else
54282e84007Sjmcneill 		sc->sc_ev_discard.ev_count++;
5430723c0bfSriastradh 	ld_sdmmc_task_put(sc, task);
5440723c0bfSriastradh 	mutex_exit(&sc->sc_lock);
545aaf4d313Smlelstv 
5460723c0bfSriastradh 	/* Record the error and notify the xfer of completion.  */
547aaf4d313Smlelstv 	if (error)
548aaf4d313Smlelstv 		bp->b_error = error;
549aaf4d313Smlelstv 	lddiscardend(&sc->sc_ld, bp);
55082e84007Sjmcneill }
55182e84007Sjmcneill 
552087ef2d9Sjmcneill static int
553aaf4d313Smlelstv ld_sdmmc_discard(struct ld_softc *ld, struct buf *bp)
554087ef2d9Sjmcneill {
555087ef2d9Sjmcneill 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
5560723c0bfSriastradh 	struct ld_sdmmc_task *task;
5570723c0bfSriastradh 	int error;
558087ef2d9Sjmcneill 
5590723c0bfSriastradh 	mutex_enter(&sc->sc_lock);
5600723c0bfSriastradh 
5610723c0bfSriastradh 	/* Acquire a free task, or drop the request altogether.  */
5620723c0bfSriastradh 	if ((task = ld_sdmmc_task_get(sc)) == NULL) {
56382e84007Sjmcneill 		sc->sc_ev_discardbusy.ev_count++;
5640723c0bfSriastradh 		error = EBUSY;
5650723c0bfSriastradh 		goto out;
56682e84007Sjmcneill 	}
56782e84007Sjmcneill 
5680723c0bfSriastradh 	/* Set up the task and schedule it.  */
569aaf4d313Smlelstv 	task->task_bp = bp;
57082e84007Sjmcneill 	sdmmc_init_task(&task->task, ld_sdmmc_dodiscard, task);
57182e84007Sjmcneill 
57282e84007Sjmcneill 	sdmmc_add_task(sc->sc_sf->sc, &task->task);
57382e84007Sjmcneill 
5740723c0bfSriastradh 	/* Success!  The request is queued.  */
5750723c0bfSriastradh 	error = 0;
5760723c0bfSriastradh 
5770723c0bfSriastradh out:	mutex_exit(&sc->sc_lock);
5780723c0bfSriastradh 	return error;
579087ef2d9Sjmcneill }
580087ef2d9Sjmcneill 
581d991bdc4Sjmcneill static void
582d991bdc4Sjmcneill ld_sdmmc_docachesync(void *arg)
583d991bdc4Sjmcneill {
584d991bdc4Sjmcneill 	struct ld_sdmmc_task *task = arg;
585d991bdc4Sjmcneill 	struct ld_sdmmc_softc *sc = task->task_sc;
5860723c0bfSriastradh 	int error;
587d991bdc4Sjmcneill 
5880723c0bfSriastradh 	/* Flush the cache.  */
5890723c0bfSriastradh 	error = sdmmc_mem_flush_cache(sc->sc_sf, task->task_poll);
590d991bdc4Sjmcneill 
5910723c0bfSriastradh 	mutex_enter(&sc->sc_lock);
5920723c0bfSriastradh 
5930723c0bfSriastradh 	/* Notify the other thread that we're done; pass on the error.  */
5940723c0bfSriastradh 	*task->task_errorp = error;
5950723c0bfSriastradh 	cv_broadcast(&sc->sc_cv);
5960723c0bfSriastradh 
5970723c0bfSriastradh 	/* Release the task.  */
5980723c0bfSriastradh 	ld_sdmmc_task_put(sc, task);
5990723c0bfSriastradh 
6000723c0bfSriastradh 	mutex_exit(&sc->sc_lock);
601d991bdc4Sjmcneill }
602d991bdc4Sjmcneill 
603d991bdc4Sjmcneill static int
604d991bdc4Sjmcneill ld_sdmmc_cachesync(struct ld_softc *ld, bool poll)
605d991bdc4Sjmcneill {
606d991bdc4Sjmcneill 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
6073521f823Sriastradh 	struct sdmmc_softc *sdmmc = device_private(device_parent(ld->sc_dv));
6080723c0bfSriastradh 	struct ld_sdmmc_task *task;
6090723c0bfSriastradh 	int error = -1;
610d991bdc4Sjmcneill 
6113521f823Sriastradh 	/*
6123521f823Sriastradh 	 * If we come here through the sdmmc discovery task, we can't
6133521f823Sriastradh 	 * wait for a new task because the new task can't even begin
6143521f823Sriastradh 	 * until the sdmmc discovery task has completed.
6153521f823Sriastradh 	 *
6163521f823Sriastradh 	 * XXX This is wrong, because there may already be queued I/O
6173521f823Sriastradh 	 * tasks ahead of us.  Fixing this properly requires doing
6183521f823Sriastradh 	 * discovery in a separate thread.  But this should avoid the
6193521f823Sriastradh 	 * deadlock of PR kern/57870 (https://gnats.NetBSD.org/57870)
6203521f823Sriastradh 	 * until we do split that up.
6213521f823Sriastradh 	 */
6223521f823Sriastradh 	if (curlwp == sdmmc->sc_tskq_lwp)
6233521f823Sriastradh 		return sdmmc_mem_flush_cache(sc->sc_sf, poll);
6243521f823Sriastradh 
6250723c0bfSriastradh 	mutex_enter(&sc->sc_lock);
6260723c0bfSriastradh 
6270723c0bfSriastradh 	/* Acquire a free task, or fail with EBUSY.  */
6280723c0bfSriastradh 	if ((task = ld_sdmmc_task_get(sc)) == NULL) {
629d991bdc4Sjmcneill 		sc->sc_ev_cachesyncbusy.ev_count++;
6300723c0bfSriastradh 		error = EBUSY;
6310723c0bfSriastradh 		goto out;
632d991bdc4Sjmcneill 	}
633d991bdc4Sjmcneill 
6340723c0bfSriastradh 	/* Set up the task and schedule it.  */
6350723c0bfSriastradh 	task->task_poll = poll;
6360723c0bfSriastradh 	task->task_errorp = &error;
637d991bdc4Sjmcneill 	sdmmc_init_task(&task->task, ld_sdmmc_docachesync, task);
638d991bdc4Sjmcneill 
639d991bdc4Sjmcneill 	sdmmc_add_task(sc->sc_sf->sc, &task->task);
640d991bdc4Sjmcneill 
6410723c0bfSriastradh 	/*
6420723c0bfSriastradh 	 * Wait for the task to complete.  If the device is yanked,
6430723c0bfSriastradh 	 * detach will notify us.  Keep the busy count up until we're
6440723c0bfSriastradh 	 * done waiting so that the softc doesn't go away until we're
6450723c0bfSriastradh 	 * done.
6460723c0bfSriastradh 	 */
6470723c0bfSriastradh 	sc->sc_busy++;
6480723c0bfSriastradh 	KASSERT(sc->sc_busy <= LD_SDMMC_MAXTASKCNT);
6490723c0bfSriastradh 	while (error == -1)
6500723c0bfSriastradh 		cv_wait(&sc->sc_cv, &sc->sc_lock);
6510723c0bfSriastradh 	if (--sc->sc_busy == 0)
6520723c0bfSriastradh 		cv_broadcast(&sc->sc_cv);
653d991bdc4Sjmcneill 
6540723c0bfSriastradh out:	mutex_exit(&sc->sc_lock);
655d991bdc4Sjmcneill 	return error;
656d991bdc4Sjmcneill }
657d991bdc4Sjmcneill 
6583eecdf69Sjmcneill static int
6593eecdf69Sjmcneill ld_sdmmc_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag,
6603eecdf69Sjmcneill     bool poll)
6613eecdf69Sjmcneill {
6623eecdf69Sjmcneill 
6633eecdf69Sjmcneill 	switch (cmd) {
6643eecdf69Sjmcneill 	case DIOCCACHESYNC:
665d991bdc4Sjmcneill 		return ld_sdmmc_cachesync(ld, poll);
6663eecdf69Sjmcneill 	default:
6673eecdf69Sjmcneill 		return EPASSTHROUGH;
6683eecdf69Sjmcneill 	}
6693eecdf69Sjmcneill }
6703eecdf69Sjmcneill 
671916bdfa5Spgoyette MODULE(MODULE_CLASS_DRIVER, ld_sdmmc, "ld");
672916bdfa5Spgoyette 
673916bdfa5Spgoyette #ifdef _MODULE
674916bdfa5Spgoyette /*
675916bdfa5Spgoyette  * XXX Don't allow ioconf.c to redefine the "struct cfdriver ld_cd"
676916bdfa5Spgoyette  * XXX it will be defined in the common-code module
677916bdfa5Spgoyette  */
678916bdfa5Spgoyette #undef  CFDRIVER_DECL
679916bdfa5Spgoyette #define CFDRIVER_DECL(name, class, attr)
680916bdfa5Spgoyette #include "ioconf.c"
681916bdfa5Spgoyette #endif
682916bdfa5Spgoyette 
683916bdfa5Spgoyette static int
684916bdfa5Spgoyette ld_sdmmc_modcmd(modcmd_t cmd, void *opaque)
685916bdfa5Spgoyette {
686916bdfa5Spgoyette #ifdef _MODULE
687916bdfa5Spgoyette 	/*
688916bdfa5Spgoyette 	 * We ignore the cfdriver_vec[] that ioconf provides, since
689916bdfa5Spgoyette 	 * the cfdrivers are attached already.
690916bdfa5Spgoyette 	 */
691916bdfa5Spgoyette 	static struct cfdriver * const no_cfdriver_vec[] = { NULL };
692916bdfa5Spgoyette #endif
693916bdfa5Spgoyette 	int error = 0;
694916bdfa5Spgoyette 
695916bdfa5Spgoyette #ifdef _MODULE
696916bdfa5Spgoyette 	switch (cmd) {
697916bdfa5Spgoyette 	case MODULE_CMD_INIT:
698916bdfa5Spgoyette 		error = config_init_component(no_cfdriver_vec,
699916bdfa5Spgoyette 		    cfattach_ioconf_ld_sdmmc, cfdata_ioconf_ld_sdmmc);
700916bdfa5Spgoyette 		break;
701916bdfa5Spgoyette 	case MODULE_CMD_FINI:
702916bdfa5Spgoyette 		error = config_fini_component(no_cfdriver_vec,
703916bdfa5Spgoyette 		    cfattach_ioconf_ld_sdmmc, cfdata_ioconf_ld_sdmmc);
704916bdfa5Spgoyette 		break;
705916bdfa5Spgoyette 	default:
706916bdfa5Spgoyette 		error = ENOTTY;
707916bdfa5Spgoyette 		break;
708916bdfa5Spgoyette 	}
709916bdfa5Spgoyette #endif
710916bdfa5Spgoyette 
711916bdfa5Spgoyette 	return error;
712916bdfa5Spgoyette }
713