Home | History | Annotate | Line # | Download | only in sdmmc
      1  1.45       rin /*	$NetBSD: ld_sdmmc.c,v 1.45 2025/04/13 02:34:03 rin Exp $	*/
      2   1.1    nonaka 
      3   1.1    nonaka /*
      4   1.1    nonaka  * Copyright (c) 2008 KIYOHARA Takashi
      5   1.1    nonaka  * All rights reserved.
      6   1.1    nonaka  *
      7   1.1    nonaka  * Redistribution and use in source and binary forms, with or without
      8   1.1    nonaka  * modification, are permitted provided that the following conditions
      9   1.1    nonaka  * are met:
     10   1.1    nonaka  * 1. Redistributions of source code must retain the above copyright
     11   1.1    nonaka  *    notice, this list of conditions and the following disclaimer.
     12   1.1    nonaka  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.1    nonaka  *    notice, this list of conditions and the following disclaimer in the
     14   1.1    nonaka  *    documentation and/or other materials provided with the distribution.
     15   1.1    nonaka  *
     16   1.1    nonaka  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     17   1.1    nonaka  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
     18   1.1    nonaka  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
     19   1.1    nonaka  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
     20   1.1    nonaka  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     21   1.1    nonaka  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     22   1.1    nonaka  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     23   1.1    nonaka  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     24   1.1    nonaka  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
     25   1.1    nonaka  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26   1.1    nonaka  * POSSIBILITY OF SUCH DAMAGE.
     27   1.1    nonaka  *
     28   1.1    nonaka  */
     29   1.1    nonaka 
     30   1.1    nonaka #include <sys/cdefs.h>
     31  1.45       rin __KERNEL_RCSID(0, "$NetBSD: ld_sdmmc.c,v 1.45 2025/04/13 02:34:03 rin Exp $");
     32   1.1    nonaka 
     33   1.9      matt #ifdef _KERNEL_OPT
     34   1.9      matt #include "opt_sdmmc.h"
     35   1.9      matt #endif
     36   1.1    nonaka 
     37   1.1    nonaka #include <sys/param.h>
     38  1.40  riastrad #include <sys/types.h>
     39  1.40  riastrad 
     40   1.1    nonaka #include <sys/buf.h>
     41   1.1    nonaka #include <sys/bufq.h>
     42   1.1    nonaka #include <sys/bus.h>
     43  1.40  riastrad #include <sys/device.h>
     44   1.1    nonaka #include <sys/disk.h>
     45  1.25    martin #include <sys/disklabel.h>
     46  1.40  riastrad #include <sys/dkio.h>
     47  1.40  riastrad #include <sys/endian.h>
     48  1.40  riastrad #include <sys/kernel.h>
     49  1.40  riastrad #include <sys/kmem.h>
     50   1.3    nonaka #include <sys/kthread.h>
     51  1.40  riastrad #include <sys/module.h>
     52  1.25    martin #include <sys/syslog.h>
     53  1.40  riastrad #include <sys/systm.h>
     54   1.1    nonaka 
     55   1.1    nonaka #include <dev/ldvar.h>
     56   1.1    nonaka 
     57   1.1    nonaka #include <dev/sdmmc/sdmmcvar.h>
     58   1.1    nonaka 
     59  1.23  pgoyette #include "ioconf.h"
     60  1.23  pgoyette 
     61  1.14  jmcneill #ifdef LD_SDMMC_DEBUG
     62   1.1    nonaka #define DPRINTF(s)	printf s
     63   1.1    nonaka #else
     64  1.38  riastrad #define DPRINTF(s)	__nothing
     65   1.1    nonaka #endif
     66   1.1    nonaka 
     67  1.24  kiyohara #define	LD_SDMMC_IORETRIES	5	/* number of retries before giving up */
     68  1.24  kiyohara #define	RECOVERYTIME		hz/2	/* time to wait before retrying a cmd */
     69  1.24  kiyohara 
     70  1.33  jmcneill #define	LD_SDMMC_MAXQUEUECNT	4	/* number of queued bio requests */
     71  1.33  jmcneill #define	LD_SDMMC_MAXTASKCNT	8	/* number of tasks in task pool */
     72  1.33  jmcneill 
     73   1.1    nonaka struct ld_sdmmc_softc;
     74   1.1    nonaka 
     75   1.1    nonaka struct ld_sdmmc_task {
     76   1.1    nonaka 	struct sdmmc_task task;
     77   1.1    nonaka 	struct ld_sdmmc_softc *task_sc;
     78  1.33  jmcneill 
     79   1.1    nonaka 	struct buf *task_bp;
     80  1.24  kiyohara 	int task_retries; /* number of xfer retry */
     81  1.24  kiyohara 	struct callout task_restart_ch;
     82  1.35  jmcneill 
     83  1.38  riastrad 	bool task_poll;
     84  1.38  riastrad 	int *task_errorp;
     85  1.35  jmcneill 
     86  1.38  riastrad 	TAILQ_ENTRY(ld_sdmmc_task) task_entry;
     87   1.1    nonaka };
     88   1.1    nonaka 
     89   1.1    nonaka struct ld_sdmmc_softc {
     90   1.1    nonaka 	struct ld_softc sc_ld;
     91   1.1    nonaka 	int sc_hwunit;
     92  1.38  riastrad 	char *sc_typename;
     93  1.38  riastrad 	struct sdmmc_function *sc_sf;
     94   1.1    nonaka 
     95  1.38  riastrad 	kmutex_t sc_lock;
     96  1.38  riastrad 	kcondvar_t sc_cv;
     97  1.38  riastrad 	TAILQ_HEAD(, ld_sdmmc_task) sc_freeq;
     98  1.38  riastrad 	TAILQ_HEAD(, ld_sdmmc_task) sc_xferq;
     99  1.38  riastrad 	unsigned sc_busy;
    100  1.38  riastrad 	bool sc_dying;
    101  1.33  jmcneill 
    102  1.33  jmcneill 	struct evcnt sc_ev_discard;	/* discard counter */
    103  1.33  jmcneill 	struct evcnt sc_ev_discarderr;	/* discard error counter */
    104  1.33  jmcneill 	struct evcnt sc_ev_discardbusy;	/* discard busy counter */
    105  1.35  jmcneill 	struct evcnt sc_ev_cachesyncbusy; /* cache sync busy counter */
    106  1.38  riastrad 
    107  1.38  riastrad 	struct ld_sdmmc_task sc_task[LD_SDMMC_MAXTASKCNT];
    108   1.1    nonaka };
    109   1.1    nonaka 
    110   1.2    cegger static int ld_sdmmc_match(device_t, cfdata_t, void *);
    111   1.1    nonaka static void ld_sdmmc_attach(device_t, device_t, void *);
    112   1.1    nonaka static int ld_sdmmc_detach(device_t, int);
    113   1.1    nonaka 
    114  1.45       rin static int ld_sdmmc_dump(struct ld_softc *, void *, daddr_t, int);
    115   1.1    nonaka static int ld_sdmmc_start(struct ld_softc *, struct buf *);
    116  1.24  kiyohara static void ld_sdmmc_restart(void *);
    117  1.34   mlelstv static int ld_sdmmc_discard(struct ld_softc *, struct buf *);
    118  1.31  jmcneill static int ld_sdmmc_ioctl(struct ld_softc *, u_long, void *, int32_t, bool);
    119   1.1    nonaka 
    120   1.3    nonaka static void ld_sdmmc_doattach(void *);
    121   1.1    nonaka static void ld_sdmmc_dobio(void *);
    122  1.33  jmcneill static void ld_sdmmc_dodiscard(void *);
    123   1.1    nonaka 
    124   1.1    nonaka CFATTACH_DECL_NEW(ld_sdmmc, sizeof(struct ld_sdmmc_softc),
    125   1.1    nonaka     ld_sdmmc_match, ld_sdmmc_attach, ld_sdmmc_detach, NULL);
    126   1.1    nonaka 
    127  1.38  riastrad static struct ld_sdmmc_task *
    128  1.38  riastrad ld_sdmmc_task_get(struct ld_sdmmc_softc *sc)
    129  1.38  riastrad {
    130  1.38  riastrad 	struct ld_sdmmc_task *task;
    131  1.38  riastrad 
    132  1.38  riastrad 	KASSERT(mutex_owned(&sc->sc_lock));
    133  1.38  riastrad 
    134  1.38  riastrad 	if (sc->sc_dying || (task = TAILQ_FIRST(&sc->sc_freeq)) == NULL)
    135  1.38  riastrad 		return NULL;
    136  1.38  riastrad 	TAILQ_REMOVE(&sc->sc_freeq, task, task_entry);
    137  1.38  riastrad 	TAILQ_INSERT_TAIL(&sc->sc_xferq, task, task_entry);
    138  1.38  riastrad 	KASSERT(task->task_bp == NULL);
    139  1.38  riastrad 	KASSERT(task->task_errorp == NULL);
    140  1.38  riastrad 
    141  1.38  riastrad 	return task;
    142  1.38  riastrad }
    143  1.38  riastrad 
    144  1.38  riastrad static void
    145  1.38  riastrad ld_sdmmc_task_put(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
    146  1.38  riastrad {
    147  1.38  riastrad 
    148  1.38  riastrad 	KASSERT(mutex_owned(&sc->sc_lock));
    149  1.38  riastrad 
    150  1.38  riastrad 	TAILQ_REMOVE(&sc->sc_xferq, task, task_entry);
    151  1.38  riastrad 	TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
    152  1.38  riastrad 	task->task_bp = NULL;
    153  1.38  riastrad 	task->task_errorp = NULL;
    154  1.38  riastrad }
    155  1.38  riastrad 
    156  1.38  riastrad static void
    157  1.38  riastrad ld_sdmmc_task_cancel(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
    158  1.38  riastrad {
    159  1.38  riastrad 	struct buf *bp;
    160  1.38  riastrad 	int *errorp;
    161  1.38  riastrad 
    162  1.38  riastrad 	KASSERT(mutex_owned(&sc->sc_lock));
    163  1.38  riastrad 	KASSERT(sc->sc_dying);
    164  1.38  riastrad 
    165  1.38  riastrad 	/*
    166  1.38  riastrad 	 * Either the callout or the task may be pending, but not both.
    167  1.38  riastrad 	 * First, determine whether the callout is pending.
    168  1.38  riastrad 	 */
    169  1.38  riastrad 	if (callout_pending(&task->task_restart_ch) ||
    170  1.38  riastrad 	    callout_invoking(&task->task_restart_ch)) {
    171  1.38  riastrad 		/*
    172  1.38  riastrad 		 * The callout either is pending, or just started but
    173  1.38  riastrad 		 * is waiting for us to release the lock.  At this
    174  1.38  riastrad 		 * point, it will notice sc->sc_dying and give up, so
    175  1.38  riastrad 		 * just wait for it to complete and then we will
    176  1.38  riastrad 		 * release everything.
    177  1.38  riastrad 		 */
    178  1.38  riastrad 		callout_halt(&task->task_restart_ch, &sc->sc_lock);
    179  1.38  riastrad 	} else {
    180  1.38  riastrad 		/*
    181  1.38  riastrad 		 * If the callout is running, it has just scheduled, so
    182  1.38  riastrad 		 * after we wait for the callout to finish running, the
    183  1.38  riastrad 		 * task is either pending or running.  If the task is
    184  1.38  riastrad 		 * already running, it will notice sc->sc_dying and
    185  1.38  riastrad 		 * give up; otherwise we have to release everything.
    186  1.38  riastrad 		 */
    187  1.38  riastrad 		callout_halt(&task->task_restart_ch, &sc->sc_lock);
    188  1.38  riastrad 		if (!sdmmc_del_task(sc->sc_sf->sc, &task->task, &sc->sc_lock))
    189  1.38  riastrad 			return; /* task already started, let it clean up */
    190  1.38  riastrad 	}
    191  1.38  riastrad 
    192  1.38  riastrad 	/*
    193  1.38  riastrad 	 * It is our responsibility to clean up.  Move it from xferq
    194  1.38  riastrad 	 * back to freeq and make sure to notify anyone waiting that
    195  1.38  riastrad 	 * it's finished.
    196  1.38  riastrad 	 */
    197  1.38  riastrad 	bp = task->task_bp;
    198  1.38  riastrad 	errorp = task->task_errorp;
    199  1.38  riastrad 	ld_sdmmc_task_put(sc, task);
    200  1.38  riastrad 
    201  1.38  riastrad 	/*
    202  1.38  riastrad 	 * If the task was for an asynchronous I/O xfer, fail the I/O
    203  1.38  riastrad 	 * xfer, with the softc lock dropped since this is a callback
    204  1.38  riastrad 	 * into arbitrary other subsystems.
    205  1.38  riastrad 	 */
    206  1.38  riastrad 	if (bp) {
    207  1.38  riastrad 		mutex_exit(&sc->sc_lock);
    208  1.38  riastrad 		/*
    209  1.38  riastrad 		 * XXX We assume that the same sequence works for bio
    210  1.38  riastrad 		 * and discard -- that lddiscardend is just the same as
    211  1.38  riastrad 		 * setting bp->b_resid = bp->b_bcount in the event of
    212  1.38  riastrad 		 * error and then calling lddone.
    213  1.38  riastrad 		 */
    214  1.38  riastrad 		bp->b_error = ENXIO;
    215  1.38  riastrad 		bp->b_resid = bp->b_bcount;
    216  1.38  riastrad 		lddone(&sc->sc_ld, bp);
    217  1.38  riastrad 		mutex_enter(&sc->sc_lock);
    218  1.38  riastrad 	}
    219  1.38  riastrad 
    220  1.38  riastrad 	/*
    221  1.38  riastrad 	 * If the task was for a synchronous operation (cachesync),
    222  1.38  riastrad 	 * then just set the error indicator and wake up the waiter.
    223  1.38  riastrad 	 */
    224  1.38  riastrad 	if (errorp) {
    225  1.38  riastrad 		*errorp = ENXIO;
    226  1.38  riastrad 		cv_broadcast(&sc->sc_cv);
    227  1.38  riastrad 	}
    228  1.38  riastrad }
    229   1.1    nonaka 
    230   1.1    nonaka /* ARGSUSED */
    231   1.1    nonaka static int
    232   1.2    cegger ld_sdmmc_match(device_t parent, cfdata_t match, void *aux)
    233   1.1    nonaka {
    234   1.1    nonaka 	struct sdmmc_softc *sdmsc = device_private(parent);
    235   1.1    nonaka 
    236   1.1    nonaka 	if (ISSET(sdmsc->sc_flags, SMF_MEM_MODE))
    237   1.1    nonaka 		return 1;
    238   1.1    nonaka 	return 0;
    239   1.1    nonaka }
    240   1.1    nonaka 
    241   1.1    nonaka /* ARGSUSED */
    242   1.1    nonaka static void
    243   1.1    nonaka ld_sdmmc_attach(device_t parent, device_t self, void *aux)
    244   1.1    nonaka {
    245   1.1    nonaka 	struct ld_sdmmc_softc *sc = device_private(self);
    246   1.1    nonaka 	struct sdmmc_attach_args *sa = aux;
    247   1.1    nonaka 	struct ld_softc *ld = &sc->sc_ld;
    248  1.24  kiyohara 	struct ld_sdmmc_task *task;
    249   1.3    nonaka 	struct lwp *lwp;
    250  1.42  jmcneill 	const char *cardtype;
    251  1.24  kiyohara 	int i;
    252   1.1    nonaka 
    253   1.1    nonaka 	ld->sc_dv = self;
    254   1.1    nonaka 
    255  1.11  jakllsch 	aprint_normal(": <0x%02x:0x%04x:%s:0x%02x:0x%08x:0x%03x>\n",
    256  1.11  jakllsch 	    sa->sf->cid.mid, sa->sf->cid.oid, sa->sf->cid.pnm,
    257  1.11  jakllsch 	    sa->sf->cid.rev, sa->sf->cid.psn, sa->sf->cid.mdt);
    258   1.1    nonaka 	aprint_naive("\n");
    259   1.1    nonaka 
    260  1.42  jmcneill 	if (ISSET(sa->sf->sc->sc_flags, SMF_SD_MODE)) {
    261  1.42  jmcneill 		cardtype = "SD card";
    262  1.42  jmcneill 	} else {
    263  1.42  jmcneill 		cardtype = "MMC";
    264  1.42  jmcneill 	}
    265  1.42  jmcneill 	sc->sc_typename = kmem_asprintf("%s 0x%02x:0x%04x:%s",
    266  1.42  jmcneill 	    cardtype, sa->sf->cid.mid, sa->sf->cid.oid, sa->sf->cid.pnm);
    267  1.36   mlelstv 
    268  1.33  jmcneill 	evcnt_attach_dynamic(&sc->sc_ev_discard, EVCNT_TYPE_MISC,
    269  1.33  jmcneill 	    NULL, device_xname(self), "sdmmc discard count");
    270  1.33  jmcneill 	evcnt_attach_dynamic(&sc->sc_ev_discarderr, EVCNT_TYPE_MISC,
    271  1.33  jmcneill 	    NULL, device_xname(self), "sdmmc discard errors");
    272  1.33  jmcneill 	evcnt_attach_dynamic(&sc->sc_ev_discardbusy, EVCNT_TYPE_MISC,
    273  1.33  jmcneill 	    NULL, device_xname(self), "sdmmc discard busy");
    274  1.33  jmcneill 
    275  1.38  riastrad 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SDMMC);
    276  1.38  riastrad 	cv_init(&sc->sc_cv, "ldsdmmc");
    277  1.38  riastrad 	TAILQ_INIT(&sc->sc_freeq);
    278  1.38  riastrad 	TAILQ_INIT(&sc->sc_xferq);
    279  1.38  riastrad 	sc->sc_dying = false;
    280  1.38  riastrad 
    281  1.27  jmcneill 	const int ntask = __arraycount(sc->sc_task);
    282  1.27  jmcneill 	for (i = 0; i < ntask; i++) {
    283  1.24  kiyohara 		task = &sc->sc_task[i];
    284  1.24  kiyohara 		task->task_sc = sc;
    285  1.27  jmcneill 		callout_init(&task->task_restart_ch, CALLOUT_MPSAFE);
    286  1.38  riastrad 		TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
    287  1.24  kiyohara 	}
    288  1.20   mlelstv 
    289   1.1    nonaka 	sc->sc_hwunit = 0;	/* always 0? */
    290   1.1    nonaka 	sc->sc_sf = sa->sf;
    291   1.1    nonaka 
    292  1.32   mlelstv 	ld->sc_flags = LDF_ENABLED | LDF_MPSAFE;
    293   1.1    nonaka 	ld->sc_secperunit = sc->sc_sf->csd.capacity;
    294   1.4    nonaka 	ld->sc_secsize = SDMMC_SECTOR_SIZE;
    295   1.1    nonaka 	ld->sc_maxxfer = MAXPHYS;
    296  1.20   mlelstv 	ld->sc_maxqueuecnt = LD_SDMMC_MAXQUEUECNT;
    297   1.1    nonaka 	ld->sc_dump = ld_sdmmc_dump;
    298   1.1    nonaka 	ld->sc_start = ld_sdmmc_start;
    299  1.28  jmcneill 	ld->sc_discard = ld_sdmmc_discard;
    300  1.31  jmcneill 	ld->sc_ioctl = ld_sdmmc_ioctl;
    301  1.36   mlelstv 	ld->sc_typename = sc->sc_typename;
    302   1.1    nonaka 
    303   1.3    nonaka 	/*
    304  1.30   mlelstv 	 * Defer attachment of ld + disk subsystem to a thread.
    305  1.30   mlelstv 	 *
    306  1.30   mlelstv 	 * This is necessary because wedge autodiscover needs to
    307  1.30   mlelstv 	 * open and call into the ld driver, which could deadlock
    308  1.30   mlelstv 	 * when the sdmmc driver isn't ready in early bootstrap.
    309  1.30   mlelstv 	 *
    310  1.30   mlelstv 	 * Don't mark thread as MPSAFE to keep aprint output sane.
    311   1.3    nonaka 	 */
    312  1.12  christos 	config_pending_incr(self);
    313  1.29  jmcneill 	if (kthread_create(PRI_NONE, 0, NULL,
    314   1.3    nonaka 	    ld_sdmmc_doattach, sc, &lwp, "%sattach", device_xname(self))) {
    315   1.3    nonaka 		aprint_error_dev(self, "couldn't create thread\n");
    316   1.3    nonaka 	}
    317   1.3    nonaka }
    318   1.3    nonaka 
    319   1.3    nonaka static void
    320   1.3    nonaka ld_sdmmc_doattach(void *arg)
    321   1.3    nonaka {
    322   1.3    nonaka 	struct ld_sdmmc_softc *sc = (struct ld_sdmmc_softc *)arg;
    323   1.3    nonaka 	struct ld_softc *ld = &sc->sc_ld;
    324   1.6  kiyohara 	struct sdmmc_softc *ssc = device_private(device_parent(ld->sc_dv));
    325  1.44  jmcneill 	const u_int emmc_cache_size = sc->sc_sf->ext_csd.cache_size;
    326  1.44  jmcneill 	const bool sd_cache = sc->sc_sf->ssr.cache;
    327  1.31  jmcneill 	char buf[sizeof("9999 KB")];
    328   1.3    nonaka 
    329  1.22  jdolecek 	ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
    330  1.19  jmcneill 	aprint_normal_dev(ld->sc_dv, "%d-bit width,", sc->sc_sf->width);
    331  1.19  jmcneill 	if (ssc->sc_transfer_mode != NULL)
    332  1.19  jmcneill 		aprint_normal(" %s,", ssc->sc_transfer_mode);
    333  1.44  jmcneill 	if (emmc_cache_size > 0) {
    334  1.44  jmcneill 		format_bytes(buf, sizeof(buf), emmc_cache_size);
    335  1.31  jmcneill 		aprint_normal(" %s cache%s,", buf,
    336  1.31  jmcneill 		    ISSET(sc->sc_sf->flags, SFF_CACHE_ENABLED) ? "" :
    337  1.31  jmcneill 		    " (disabled)");
    338  1.44  jmcneill 	} else if (sd_cache) {
    339  1.44  jmcneill 		aprint_normal(" Cache%s,",
    340  1.44  jmcneill 		    ISSET(sc->sc_sf->flags, SFF_CACHE_ENABLED) ? "" :
    341  1.44  jmcneill 		    " (disabled)");
    342  1.31  jmcneill 	}
    343   1.6  kiyohara 	if ((ssc->sc_busclk / 1000) != 0)
    344   1.6  kiyohara 		aprint_normal(" %u.%03u MHz\n",
    345   1.6  kiyohara 		    ssc->sc_busclk / 1000, ssc->sc_busclk % 1000);
    346   1.6  kiyohara 	else
    347   1.6  kiyohara 		aprint_normal(" %u KHz\n", ssc->sc_busclk % 1000);
    348  1.12  christos 	config_pending_decr(ld->sc_dv);
    349   1.3    nonaka 	kthread_exit(0);
    350   1.1    nonaka }
    351   1.1    nonaka 
    352   1.1    nonaka static int
    353   1.1    nonaka ld_sdmmc_detach(device_t dev, int flags)
    354   1.1    nonaka {
    355   1.1    nonaka 	struct ld_sdmmc_softc *sc = device_private(dev);
    356   1.1    nonaka 	struct ld_softc *ld = &sc->sc_ld;
    357  1.38  riastrad 	struct ld_sdmmc_task *task;
    358  1.41  riastrad 	int error, i;
    359   1.1    nonaka 
    360  1.38  riastrad 	/*
    361  1.41  riastrad 	 * Block new xfers, or fail if the disk is still open and the
    362  1.41  riastrad 	 * detach isn't forced.  After this point, we are committed to
    363  1.41  riastrad 	 * detaching.
    364  1.41  riastrad 	 */
    365  1.41  riastrad 	error = ldbegindetach(ld, flags);
    366  1.41  riastrad 	if (error)
    367  1.41  riastrad 		return error;
    368  1.41  riastrad 
    369  1.41  riastrad 	/*
    370  1.41  riastrad 	 * Abort all pending tasks, and wait for all pending waiters to
    371  1.41  riastrad 	 * notice that we're gone.
    372  1.38  riastrad 	 */
    373  1.38  riastrad 	mutex_enter(&sc->sc_lock);
    374  1.38  riastrad 	sc->sc_dying = true;
    375  1.38  riastrad 	while ((task = TAILQ_FIRST(&sc->sc_xferq)) != NULL)
    376  1.38  riastrad 		ld_sdmmc_task_cancel(sc, task);
    377  1.38  riastrad 	while (sc->sc_busy)
    378  1.38  riastrad 		cv_wait(&sc->sc_cv, &sc->sc_lock);
    379  1.38  riastrad 	mutex_exit(&sc->sc_lock);
    380  1.38  riastrad 
    381  1.41  riastrad 	/* Done!  Destroy the disk.  */
    382   1.1    nonaka 	ldenddetach(ld);
    383   1.1    nonaka 
    384  1.38  riastrad 	KASSERT(TAILQ_EMPTY(&sc->sc_xferq));
    385  1.38  riastrad 
    386  1.38  riastrad 	for (i = 0; i < __arraycount(sc->sc_task); i++)
    387  1.24  kiyohara 		callout_destroy(&sc->sc_task[i].task_restart_ch);
    388  1.24  kiyohara 
    389  1.38  riastrad 	cv_destroy(&sc->sc_cv);
    390  1.38  riastrad 	mutex_destroy(&sc->sc_lock);
    391  1.38  riastrad 
    392  1.33  jmcneill 	evcnt_detach(&sc->sc_ev_discard);
    393  1.33  jmcneill 	evcnt_detach(&sc->sc_ev_discarderr);
    394  1.33  jmcneill 	evcnt_detach(&sc->sc_ev_discardbusy);
    395  1.36   mlelstv 	kmem_free(sc->sc_typename, strlen(sc->sc_typename) + 1);
    396  1.27  jmcneill 
    397   1.1    nonaka 	return 0;
    398   1.1    nonaka }
    399   1.1    nonaka 
    400   1.1    nonaka static int
    401   1.1    nonaka ld_sdmmc_start(struct ld_softc *ld, struct buf *bp)
    402   1.1    nonaka {
    403   1.1    nonaka 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
    404  1.38  riastrad 	struct ld_sdmmc_task *task;
    405  1.38  riastrad 	int error;
    406  1.20   mlelstv 
    407  1.38  riastrad 	mutex_enter(&sc->sc_lock);
    408  1.38  riastrad 	if ((task = ld_sdmmc_task_get(sc)) == NULL) {
    409  1.38  riastrad 		error = EAGAIN;
    410  1.38  riastrad 		goto out;
    411  1.38  riastrad 	}
    412   1.1    nonaka 
    413   1.1    nonaka 	task->task_bp = bp;
    414  1.24  kiyohara 	task->task_retries = 0;
    415   1.1    nonaka 	sdmmc_init_task(&task->task, ld_sdmmc_dobio, task);
    416   1.1    nonaka 
    417   1.1    nonaka 	sdmmc_add_task(sc->sc_sf->sc, &task->task);
    418   1.1    nonaka 
    419  1.38  riastrad 	/* Success!  The xfer is now queued.  */
    420  1.38  riastrad 	error = 0;
    421  1.38  riastrad 
    422  1.38  riastrad out:	mutex_exit(&sc->sc_lock);
    423  1.38  riastrad 	return error;
    424   1.1    nonaka }
    425   1.1    nonaka 
    426   1.1    nonaka static void
    427  1.24  kiyohara ld_sdmmc_restart(void *arg)
    428  1.24  kiyohara {
    429  1.24  kiyohara 	struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
    430  1.24  kiyohara 	struct ld_sdmmc_softc *sc = task->task_sc;
    431  1.24  kiyohara 	struct buf *bp = task->task_bp;
    432  1.24  kiyohara 
    433  1.24  kiyohara 	bp->b_resid = bp->b_bcount;
    434  1.24  kiyohara 
    435  1.38  riastrad 	mutex_enter(&sc->sc_lock);
    436  1.38  riastrad 	callout_ack(&task->task_restart_ch);
    437  1.38  riastrad 	if (!sc->sc_dying)
    438  1.38  riastrad 		sdmmc_add_task(sc->sc_sf->sc, &task->task);
    439  1.38  riastrad 	mutex_exit(&sc->sc_lock);
    440  1.24  kiyohara }
    441  1.24  kiyohara 
    442  1.24  kiyohara static void
    443   1.1    nonaka ld_sdmmc_dobio(void *arg)
    444   1.1    nonaka {
    445   1.1    nonaka 	struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
    446   1.1    nonaka 	struct ld_sdmmc_softc *sc = task->task_sc;
    447   1.1    nonaka 	struct buf *bp = task->task_bp;
    448  1.18   mlelstv 	int error;
    449   1.1    nonaka 
    450   1.1    nonaka 	/*
    451   1.1    nonaka 	 * I/O operation
    452   1.1    nonaka 	 */
    453   1.1    nonaka 	DPRINTF(("%s: I/O operation (dir=%s, blkno=0x%jx, bcnt=0x%x)\n",
    454   1.1    nonaka 	    device_xname(sc->sc_ld.sc_dv), bp->b_flags & B_READ ? "IN" : "OUT",
    455   1.1    nonaka 	    bp->b_rawblkno, bp->b_bcount));
    456   1.1    nonaka 
    457   1.1    nonaka 	/* is everything done in terms of blocks? */
    458   1.1    nonaka 	if (bp->b_rawblkno >= sc->sc_sf->csd.capacity) {
    459   1.1    nonaka 		/* trying to read or write past end of device */
    460  1.13   mlelstv 		aprint_error_dev(sc->sc_ld.sc_dv,
    461  1.13   mlelstv 		    "blkno 0x%" PRIu64 " exceeds capacity %d\n",
    462  1.13   mlelstv 		    bp->b_rawblkno, sc->sc_sf->csd.capacity);
    463  1.13   mlelstv 		bp->b_error = EINVAL;
    464   1.1    nonaka 		bp->b_resid = bp->b_bcount;
    465  1.26  jmcneill 
    466  1.26  jmcneill 		goto done;
    467   1.1    nonaka 	}
    468   1.1    nonaka 
    469   1.1    nonaka 	if (bp->b_flags & B_READ)
    470   1.1    nonaka 		error = sdmmc_mem_read_block(sc->sc_sf, bp->b_rawblkno,
    471   1.1    nonaka 		    bp->b_data, bp->b_bcount);
    472   1.1    nonaka 	else
    473   1.1    nonaka 		error = sdmmc_mem_write_block(sc->sc_sf, bp->b_rawblkno,
    474   1.1    nonaka 		    bp->b_data, bp->b_bcount);
    475   1.1    nonaka 	if (error) {
    476  1.24  kiyohara 		if (task->task_retries < LD_SDMMC_IORETRIES) {
    477  1.24  kiyohara 			struct dk_softc *dksc = &sc->sc_ld.sc_dksc;
    478  1.24  kiyohara 			struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
    479  1.24  kiyohara 
    480  1.24  kiyohara 			diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
    481  1.24  kiyohara 				dksc->sc_dkdev.dk_label);
    482  1.24  kiyohara 			printf(", retrying\n");
    483  1.24  kiyohara 			task->task_retries++;
    484  1.38  riastrad 			mutex_enter(&sc->sc_lock);
    485  1.38  riastrad 			if (sc->sc_dying) {
    486  1.38  riastrad 				bp->b_resid = bp->b_bcount;
    487  1.38  riastrad 				bp->b_error = error;
    488  1.38  riastrad 				goto done_locked;
    489  1.38  riastrad 			} else {
    490  1.38  riastrad 				callout_reset(&task->task_restart_ch,
    491  1.38  riastrad 				    RECOVERYTIME, ld_sdmmc_restart, task);
    492  1.38  riastrad 			}
    493  1.38  riastrad 			mutex_exit(&sc->sc_lock);
    494  1.24  kiyohara 			return;
    495  1.24  kiyohara 		}
    496  1.13   mlelstv 		bp->b_error = error;
    497   1.1    nonaka 		bp->b_resid = bp->b_bcount;
    498   1.1    nonaka 	} else {
    499   1.1    nonaka 		bp->b_resid = 0;
    500   1.1    nonaka 	}
    501   1.1    nonaka 
    502  1.26  jmcneill done:
    503  1.38  riastrad 	/* Dissociate the task from the I/O xfer and release it.  */
    504  1.38  riastrad 	mutex_enter(&sc->sc_lock);
    505  1.38  riastrad done_locked:
    506  1.38  riastrad 	ld_sdmmc_task_put(sc, task);
    507  1.38  riastrad 	mutex_exit(&sc->sc_lock);
    508  1.24  kiyohara 
    509   1.1    nonaka 	lddone(&sc->sc_ld, bp);
    510   1.1    nonaka }
    511   1.1    nonaka 
    512   1.1    nonaka static int
    513  1.45       rin ld_sdmmc_dump(struct ld_softc *ld, void *data, daddr_t blkno, int blkcnt)
    514   1.1    nonaka {
    515   1.1    nonaka 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
    516   1.1    nonaka 
    517  1.45       rin 	if (blkno + blkcnt - 1 >= sc->sc_sf->csd.capacity)
    518  1.45       rin 		return EIO;
    519  1.45       rin 
    520   1.1    nonaka 	return sdmmc_mem_write_block(sc->sc_sf, blkno, data,
    521   1.1    nonaka 	    blkcnt * ld->sc_secsize);
    522   1.1    nonaka }
    523  1.23  pgoyette 
    524  1.33  jmcneill static void
    525  1.33  jmcneill ld_sdmmc_dodiscard(void *arg)
    526  1.33  jmcneill {
    527  1.33  jmcneill 	struct ld_sdmmc_task *task = arg;
    528  1.33  jmcneill 	struct ld_sdmmc_softc *sc = task->task_sc;
    529  1.34   mlelstv 	struct buf *bp = task->task_bp;
    530  1.34   mlelstv 	uint32_t sblkno, nblks;
    531  1.33  jmcneill 	int error;
    532  1.33  jmcneill 
    533  1.34   mlelstv 	/* first and last block to erase */
    534  1.34   mlelstv 	sblkno = bp->b_rawblkno;
    535  1.34   mlelstv 	nblks  = howmany(bp->b_bcount, sc->sc_ld.sc_secsize);
    536  1.34   mlelstv 
    537  1.33  jmcneill 	/* An error from discard is non-fatal */
    538  1.34   mlelstv 	error = sdmmc_mem_discard(sc->sc_sf, sblkno, sblkno + nblks - 1);
    539  1.38  riastrad 
    540  1.38  riastrad 	/* Count error or success and release the task.  */
    541  1.38  riastrad 	mutex_enter(&sc->sc_lock);
    542  1.38  riastrad 	if (error)
    543  1.33  jmcneill 		sc->sc_ev_discarderr.ev_count++;
    544  1.33  jmcneill 	else
    545  1.33  jmcneill 		sc->sc_ev_discard.ev_count++;
    546  1.38  riastrad 	ld_sdmmc_task_put(sc, task);
    547  1.38  riastrad 	mutex_exit(&sc->sc_lock);
    548  1.33  jmcneill 
    549  1.38  riastrad 	/* Record the error and notify the xfer of completion.  */
    550  1.34   mlelstv 	if (error)
    551  1.34   mlelstv 		bp->b_error = error;
    552  1.34   mlelstv 	lddiscardend(&sc->sc_ld, bp);
    553  1.33  jmcneill }
    554  1.33  jmcneill 
    555  1.28  jmcneill static int
    556  1.34   mlelstv ld_sdmmc_discard(struct ld_softc *ld, struct buf *bp)
    557  1.28  jmcneill {
    558  1.28  jmcneill 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
    559  1.38  riastrad 	struct ld_sdmmc_task *task;
    560  1.38  riastrad 	int error;
    561  1.38  riastrad 
    562  1.38  riastrad 	mutex_enter(&sc->sc_lock);
    563  1.33  jmcneill 
    564  1.38  riastrad 	/* Acquire a free task, or drop the request altogether.  */
    565  1.38  riastrad 	if ((task = ld_sdmmc_task_get(sc)) == NULL) {
    566  1.33  jmcneill 		sc->sc_ev_discardbusy.ev_count++;
    567  1.38  riastrad 		error = EBUSY;
    568  1.38  riastrad 		goto out;
    569  1.33  jmcneill 	}
    570  1.28  jmcneill 
    571  1.38  riastrad 	/* Set up the task and schedule it.  */
    572  1.34   mlelstv 	task->task_bp = bp;
    573  1.33  jmcneill 	sdmmc_init_task(&task->task, ld_sdmmc_dodiscard, task);
    574  1.33  jmcneill 
    575  1.33  jmcneill 	sdmmc_add_task(sc->sc_sf->sc, &task->task);
    576  1.33  jmcneill 
    577  1.38  riastrad 	/* Success!  The request is queued.  */
    578  1.38  riastrad 	error = 0;
    579  1.38  riastrad 
    580  1.38  riastrad out:	mutex_exit(&sc->sc_lock);
    581  1.38  riastrad 	return error;
    582  1.28  jmcneill }
    583  1.28  jmcneill 
    584  1.35  jmcneill static void
    585  1.35  jmcneill ld_sdmmc_docachesync(void *arg)
    586  1.35  jmcneill {
    587  1.35  jmcneill 	struct ld_sdmmc_task *task = arg;
    588  1.35  jmcneill 	struct ld_sdmmc_softc *sc = task->task_sc;
    589  1.38  riastrad 	int error;
    590  1.35  jmcneill 
    591  1.38  riastrad 	/* Flush the cache.  */
    592  1.38  riastrad 	error = sdmmc_mem_flush_cache(sc->sc_sf, task->task_poll);
    593  1.35  jmcneill 
    594  1.38  riastrad 	mutex_enter(&sc->sc_lock);
    595  1.38  riastrad 
    596  1.38  riastrad 	/* Notify the other thread that we're done; pass on the error.  */
    597  1.38  riastrad 	*task->task_errorp = error;
    598  1.38  riastrad 	cv_broadcast(&sc->sc_cv);
    599  1.38  riastrad 
    600  1.38  riastrad 	/* Release the task.  */
    601  1.38  riastrad 	ld_sdmmc_task_put(sc, task);
    602  1.38  riastrad 
    603  1.38  riastrad 	mutex_exit(&sc->sc_lock);
    604  1.35  jmcneill }
    605  1.35  jmcneill 
    606  1.35  jmcneill static int
    607  1.35  jmcneill ld_sdmmc_cachesync(struct ld_softc *ld, bool poll)
    608  1.35  jmcneill {
    609  1.35  jmcneill 	struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
    610  1.43  riastrad 	struct sdmmc_softc *sdmmc = device_private(device_parent(ld->sc_dv));
    611  1.38  riastrad 	struct ld_sdmmc_task *task;
    612  1.38  riastrad 	int error = -1;
    613  1.38  riastrad 
    614  1.43  riastrad 	/*
    615  1.43  riastrad 	 * If we come here through the sdmmc discovery task, we can't
    616  1.43  riastrad 	 * wait for a new task because the new task can't even begin
    617  1.43  riastrad 	 * until the sdmmc discovery task has completed.
    618  1.43  riastrad 	 *
    619  1.43  riastrad 	 * XXX This is wrong, because there may already be queued I/O
    620  1.43  riastrad 	 * tasks ahead of us.  Fixing this properly requires doing
    621  1.43  riastrad 	 * discovery in a separate thread.  But this should avoid the
    622  1.43  riastrad 	 * deadlock of PR kern/57870 (https://gnats.NetBSD.org/57870)
    623  1.43  riastrad 	 * until we do split that up.
    624  1.43  riastrad 	 */
    625  1.43  riastrad 	if (curlwp == sdmmc->sc_tskq_lwp)
    626  1.43  riastrad 		return sdmmc_mem_flush_cache(sc->sc_sf, poll);
    627  1.43  riastrad 
    628  1.38  riastrad 	mutex_enter(&sc->sc_lock);
    629  1.35  jmcneill 
    630  1.38  riastrad 	/* Acquire a free task, or fail with EBUSY.  */
    631  1.38  riastrad 	if ((task = ld_sdmmc_task_get(sc)) == NULL) {
    632  1.35  jmcneill 		sc->sc_ev_cachesyncbusy.ev_count++;
    633  1.38  riastrad 		error = EBUSY;
    634  1.38  riastrad 		goto out;
    635  1.35  jmcneill 	}
    636  1.35  jmcneill 
    637  1.38  riastrad 	/* Set up the task and schedule it.  */
    638  1.38  riastrad 	task->task_poll = poll;
    639  1.38  riastrad 	task->task_errorp = &error;
    640  1.35  jmcneill 	sdmmc_init_task(&task->task, ld_sdmmc_docachesync, task);
    641  1.35  jmcneill 
    642  1.35  jmcneill 	sdmmc_add_task(sc->sc_sf->sc, &task->task);
    643  1.35  jmcneill 
    644  1.38  riastrad 	/*
    645  1.38  riastrad 	 * Wait for the task to complete.  If the device is yanked,
    646  1.38  riastrad 	 * detach will notify us.  Keep the busy count up until we're
    647  1.38  riastrad 	 * done waiting so that the softc doesn't go away until we're
    648  1.38  riastrad 	 * done.
    649  1.38  riastrad 	 */
    650  1.38  riastrad 	sc->sc_busy++;
    651  1.38  riastrad 	KASSERT(sc->sc_busy <= LD_SDMMC_MAXTASKCNT);
    652  1.38  riastrad 	while (error == -1)
    653  1.38  riastrad 		cv_wait(&sc->sc_cv, &sc->sc_lock);
    654  1.38  riastrad 	if (--sc->sc_busy == 0)
    655  1.38  riastrad 		cv_broadcast(&sc->sc_cv);
    656  1.35  jmcneill 
    657  1.38  riastrad out:	mutex_exit(&sc->sc_lock);
    658  1.35  jmcneill 	return error;
    659  1.35  jmcneill }
    660  1.35  jmcneill 
    661  1.31  jmcneill static int
    662  1.31  jmcneill ld_sdmmc_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag,
    663  1.31  jmcneill     bool poll)
    664  1.31  jmcneill {
    665  1.31  jmcneill 
    666  1.31  jmcneill 	switch (cmd) {
    667  1.31  jmcneill 	case DIOCCACHESYNC:
    668  1.35  jmcneill 		return ld_sdmmc_cachesync(ld, poll);
    669  1.31  jmcneill 	default:
    670  1.31  jmcneill 		return EPASSTHROUGH;
    671  1.31  jmcneill 	}
    672  1.31  jmcneill }
    673  1.31  jmcneill 
    674  1.23  pgoyette MODULE(MODULE_CLASS_DRIVER, ld_sdmmc, "ld");
    675  1.23  pgoyette 
    676  1.23  pgoyette #ifdef _MODULE
    677  1.23  pgoyette /*
    678  1.23  pgoyette  * XXX Don't allow ioconf.c to redefine the "struct cfdriver ld_cd"
    679  1.23  pgoyette  * XXX it will be defined in the common-code module
    680  1.23  pgoyette  */
    681  1.23  pgoyette #undef  CFDRIVER_DECL
    682  1.23  pgoyette #define CFDRIVER_DECL(name, class, attr)
    683  1.37   mlelstv #include "ioconf.c"
    684  1.23  pgoyette #endif
    685  1.23  pgoyette 
    686  1.23  pgoyette static int
    687  1.23  pgoyette ld_sdmmc_modcmd(modcmd_t cmd, void *opaque)
    688  1.23  pgoyette {
    689  1.23  pgoyette #ifdef _MODULE
    690  1.23  pgoyette 	/*
    691  1.23  pgoyette 	 * We ignore the cfdriver_vec[] that ioconf provides, since
    692  1.23  pgoyette 	 * the cfdrivers are attached already.
    693  1.23  pgoyette 	 */
    694  1.23  pgoyette 	static struct cfdriver * const no_cfdriver_vec[] = { NULL };
    695  1.23  pgoyette #endif
    696  1.23  pgoyette 	int error = 0;
    697  1.37   mlelstv 
    698  1.23  pgoyette #ifdef _MODULE
    699  1.23  pgoyette 	switch (cmd) {
    700  1.23  pgoyette 	case MODULE_CMD_INIT:
    701  1.23  pgoyette 		error = config_init_component(no_cfdriver_vec,
    702  1.23  pgoyette 		    cfattach_ioconf_ld_sdmmc, cfdata_ioconf_ld_sdmmc);
    703  1.37   mlelstv 		break;
    704  1.23  pgoyette 	case MODULE_CMD_FINI:
    705  1.23  pgoyette 		error = config_fini_component(no_cfdriver_vec,
    706  1.23  pgoyette 		    cfattach_ioconf_ld_sdmmc, cfdata_ioconf_ld_sdmmc);
    707  1.23  pgoyette 		break;
    708  1.23  pgoyette 	default:
    709  1.23  pgoyette 		error = ENOTTY;
    710  1.23  pgoyette 		break;
    711  1.23  pgoyette 	}
    712  1.23  pgoyette #endif
    713  1.23  pgoyette 
    714  1.23  pgoyette 	return error;
    715  1.23  pgoyette }
    716