ld_sdmmc.c revision 1.41 1 1.41 riastrad /* $NetBSD: ld_sdmmc.c,v 1.41 2020/08/02 01:17:56 riastradh Exp $ */
2 1.1 nonaka
3 1.1 nonaka /*
4 1.1 nonaka * Copyright (c) 2008 KIYOHARA Takashi
5 1.1 nonaka * All rights reserved.
6 1.1 nonaka *
7 1.1 nonaka * Redistribution and use in source and binary forms, with or without
8 1.1 nonaka * modification, are permitted provided that the following conditions
9 1.1 nonaka * are met:
10 1.1 nonaka * 1. Redistributions of source code must retain the above copyright
11 1.1 nonaka * notice, this list of conditions and the following disclaimer.
12 1.1 nonaka * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 nonaka * notice, this list of conditions and the following disclaimer in the
14 1.1 nonaka * documentation and/or other materials provided with the distribution.
15 1.1 nonaka *
16 1.1 nonaka * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 1.1 nonaka * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 1.1 nonaka * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 1.1 nonaka * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 1.1 nonaka * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 1.1 nonaka * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 1.1 nonaka * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 1.1 nonaka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 1.1 nonaka * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25 1.1 nonaka * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.1 nonaka * POSSIBILITY OF SUCH DAMAGE.
27 1.1 nonaka *
28 1.1 nonaka */
29 1.1 nonaka
30 1.1 nonaka #include <sys/cdefs.h>
31 1.41 riastrad __KERNEL_RCSID(0, "$NetBSD: ld_sdmmc.c,v 1.41 2020/08/02 01:17:56 riastradh Exp $");
32 1.1 nonaka
33 1.9 matt #ifdef _KERNEL_OPT
34 1.9 matt #include "opt_sdmmc.h"
35 1.9 matt #endif
36 1.1 nonaka
37 1.1 nonaka #include <sys/param.h>
38 1.40 riastrad #include <sys/types.h>
39 1.40 riastrad
40 1.1 nonaka #include <sys/buf.h>
41 1.1 nonaka #include <sys/bufq.h>
42 1.1 nonaka #include <sys/bus.h>
43 1.40 riastrad #include <sys/device.h>
44 1.1 nonaka #include <sys/disk.h>
45 1.25 martin #include <sys/disklabel.h>
46 1.40 riastrad #include <sys/dkio.h>
47 1.40 riastrad #include <sys/endian.h>
48 1.40 riastrad #include <sys/kernel.h>
49 1.40 riastrad #include <sys/kmem.h>
50 1.3 nonaka #include <sys/kthread.h>
51 1.40 riastrad #include <sys/module.h>
52 1.25 martin #include <sys/syslog.h>
53 1.40 riastrad #include <sys/systm.h>
54 1.1 nonaka
55 1.1 nonaka #include <dev/ldvar.h>
56 1.1 nonaka
57 1.1 nonaka #include <dev/sdmmc/sdmmcvar.h>
58 1.1 nonaka
59 1.23 pgoyette #include "ioconf.h"
60 1.23 pgoyette
61 1.14 jmcneill #ifdef LD_SDMMC_DEBUG
62 1.1 nonaka #define DPRINTF(s) printf s
63 1.1 nonaka #else
64 1.38 riastrad #define DPRINTF(s) __nothing
65 1.1 nonaka #endif
66 1.1 nonaka
67 1.24 kiyohara #define LD_SDMMC_IORETRIES 5 /* number of retries before giving up */
68 1.24 kiyohara #define RECOVERYTIME hz/2 /* time to wait before retrying a cmd */
69 1.24 kiyohara
70 1.33 jmcneill #define LD_SDMMC_MAXQUEUECNT 4 /* number of queued bio requests */
71 1.33 jmcneill #define LD_SDMMC_MAXTASKCNT 8 /* number of tasks in task pool */
72 1.33 jmcneill
73 1.1 nonaka struct ld_sdmmc_softc;
74 1.1 nonaka
75 1.1 nonaka struct ld_sdmmc_task {
76 1.1 nonaka struct sdmmc_task task;
77 1.1 nonaka struct ld_sdmmc_softc *task_sc;
78 1.33 jmcneill
79 1.1 nonaka struct buf *task_bp;
80 1.24 kiyohara int task_retries; /* number of xfer retry */
81 1.24 kiyohara struct callout task_restart_ch;
82 1.35 jmcneill
83 1.38 riastrad bool task_poll;
84 1.38 riastrad int *task_errorp;
85 1.35 jmcneill
86 1.38 riastrad TAILQ_ENTRY(ld_sdmmc_task) task_entry;
87 1.1 nonaka };
88 1.1 nonaka
89 1.1 nonaka struct ld_sdmmc_softc {
90 1.1 nonaka struct ld_softc sc_ld;
91 1.1 nonaka int sc_hwunit;
92 1.38 riastrad char *sc_typename;
93 1.38 riastrad struct sdmmc_function *sc_sf;
94 1.1 nonaka
95 1.38 riastrad kmutex_t sc_lock;
96 1.38 riastrad kcondvar_t sc_cv;
97 1.38 riastrad TAILQ_HEAD(, ld_sdmmc_task) sc_freeq;
98 1.38 riastrad TAILQ_HEAD(, ld_sdmmc_task) sc_xferq;
99 1.38 riastrad unsigned sc_busy;
100 1.38 riastrad bool sc_dying;
101 1.33 jmcneill
102 1.33 jmcneill struct evcnt sc_ev_discard; /* discard counter */
103 1.33 jmcneill struct evcnt sc_ev_discarderr; /* discard error counter */
104 1.33 jmcneill struct evcnt sc_ev_discardbusy; /* discard busy counter */
105 1.35 jmcneill struct evcnt sc_ev_cachesyncbusy; /* cache sync busy counter */
106 1.38 riastrad
107 1.38 riastrad struct ld_sdmmc_task sc_task[LD_SDMMC_MAXTASKCNT];
108 1.1 nonaka };
109 1.1 nonaka
110 1.2 cegger static int ld_sdmmc_match(device_t, cfdata_t, void *);
111 1.1 nonaka static void ld_sdmmc_attach(device_t, device_t, void *);
112 1.1 nonaka static int ld_sdmmc_detach(device_t, int);
113 1.1 nonaka
114 1.1 nonaka static int ld_sdmmc_dump(struct ld_softc *, void *, int, int);
115 1.1 nonaka static int ld_sdmmc_start(struct ld_softc *, struct buf *);
116 1.24 kiyohara static void ld_sdmmc_restart(void *);
117 1.34 mlelstv static int ld_sdmmc_discard(struct ld_softc *, struct buf *);
118 1.31 jmcneill static int ld_sdmmc_ioctl(struct ld_softc *, u_long, void *, int32_t, bool);
119 1.1 nonaka
120 1.3 nonaka static void ld_sdmmc_doattach(void *);
121 1.1 nonaka static void ld_sdmmc_dobio(void *);
122 1.33 jmcneill static void ld_sdmmc_dodiscard(void *);
123 1.1 nonaka
124 1.1 nonaka CFATTACH_DECL_NEW(ld_sdmmc, sizeof(struct ld_sdmmc_softc),
125 1.1 nonaka ld_sdmmc_match, ld_sdmmc_attach, ld_sdmmc_detach, NULL);
126 1.1 nonaka
127 1.38 riastrad static struct ld_sdmmc_task *
128 1.38 riastrad ld_sdmmc_task_get(struct ld_sdmmc_softc *sc)
129 1.38 riastrad {
130 1.38 riastrad struct ld_sdmmc_task *task;
131 1.38 riastrad
132 1.38 riastrad KASSERT(mutex_owned(&sc->sc_lock));
133 1.38 riastrad
134 1.38 riastrad if (sc->sc_dying || (task = TAILQ_FIRST(&sc->sc_freeq)) == NULL)
135 1.38 riastrad return NULL;
136 1.38 riastrad TAILQ_REMOVE(&sc->sc_freeq, task, task_entry);
137 1.38 riastrad TAILQ_INSERT_TAIL(&sc->sc_xferq, task, task_entry);
138 1.38 riastrad KASSERT(task->task_bp == NULL);
139 1.38 riastrad KASSERT(task->task_errorp == NULL);
140 1.38 riastrad
141 1.38 riastrad return task;
142 1.38 riastrad }
143 1.38 riastrad
144 1.38 riastrad static void
145 1.38 riastrad ld_sdmmc_task_put(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
146 1.38 riastrad {
147 1.38 riastrad
148 1.38 riastrad KASSERT(mutex_owned(&sc->sc_lock));
149 1.38 riastrad
150 1.38 riastrad TAILQ_REMOVE(&sc->sc_xferq, task, task_entry);
151 1.38 riastrad TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
152 1.38 riastrad task->task_bp = NULL;
153 1.38 riastrad task->task_errorp = NULL;
154 1.38 riastrad }
155 1.38 riastrad
156 1.38 riastrad static void
157 1.38 riastrad ld_sdmmc_task_cancel(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
158 1.38 riastrad {
159 1.38 riastrad struct buf *bp;
160 1.38 riastrad int *errorp;
161 1.38 riastrad
162 1.38 riastrad KASSERT(mutex_owned(&sc->sc_lock));
163 1.38 riastrad KASSERT(sc->sc_dying);
164 1.38 riastrad
165 1.38 riastrad /*
166 1.38 riastrad * Either the callout or the task may be pending, but not both.
167 1.38 riastrad * First, determine whether the callout is pending.
168 1.38 riastrad */
169 1.38 riastrad if (callout_pending(&task->task_restart_ch) ||
170 1.38 riastrad callout_invoking(&task->task_restart_ch)) {
171 1.38 riastrad /*
172 1.38 riastrad * The callout either is pending, or just started but
173 1.38 riastrad * is waiting for us to release the lock. At this
174 1.38 riastrad * point, it will notice sc->sc_dying and give up, so
175 1.38 riastrad * just wait for it to complete and then we will
176 1.38 riastrad * release everything.
177 1.38 riastrad */
178 1.38 riastrad callout_halt(&task->task_restart_ch, &sc->sc_lock);
179 1.38 riastrad } else {
180 1.38 riastrad /*
181 1.38 riastrad * If the callout is running, it has just scheduled, so
182 1.38 riastrad * after we wait for the callout to finish running, the
183 1.38 riastrad * task is either pending or running. If the task is
184 1.38 riastrad * already running, it will notice sc->sc_dying and
185 1.38 riastrad * give up; otherwise we have to release everything.
186 1.38 riastrad */
187 1.38 riastrad callout_halt(&task->task_restart_ch, &sc->sc_lock);
188 1.38 riastrad if (!sdmmc_del_task(sc->sc_sf->sc, &task->task, &sc->sc_lock))
189 1.38 riastrad return; /* task already started, let it clean up */
190 1.38 riastrad }
191 1.38 riastrad
192 1.38 riastrad /*
193 1.38 riastrad * It is our responsibility to clean up. Move it from xferq
194 1.38 riastrad * back to freeq and make sure to notify anyone waiting that
195 1.38 riastrad * it's finished.
196 1.38 riastrad */
197 1.38 riastrad bp = task->task_bp;
198 1.38 riastrad errorp = task->task_errorp;
199 1.38 riastrad ld_sdmmc_task_put(sc, task);
200 1.38 riastrad
201 1.38 riastrad /*
202 1.38 riastrad * If the task was for an asynchronous I/O xfer, fail the I/O
203 1.38 riastrad * xfer, with the softc lock dropped since this is a callback
204 1.38 riastrad * into arbitrary other subsystems.
205 1.38 riastrad */
206 1.38 riastrad if (bp) {
207 1.38 riastrad mutex_exit(&sc->sc_lock);
208 1.38 riastrad /*
209 1.38 riastrad * XXX We assume that the same sequence works for bio
210 1.38 riastrad * and discard -- that lddiscardend is just the same as
211 1.38 riastrad * setting bp->b_resid = bp->b_bcount in the event of
212 1.38 riastrad * error and then calling lddone.
213 1.38 riastrad */
214 1.38 riastrad bp->b_error = ENXIO;
215 1.38 riastrad bp->b_resid = bp->b_bcount;
216 1.38 riastrad lddone(&sc->sc_ld, bp);
217 1.38 riastrad mutex_enter(&sc->sc_lock);
218 1.38 riastrad }
219 1.38 riastrad
220 1.38 riastrad /*
221 1.38 riastrad * If the task was for a synchronous operation (cachesync),
222 1.38 riastrad * then just set the error indicator and wake up the waiter.
223 1.38 riastrad */
224 1.38 riastrad if (errorp) {
225 1.38 riastrad *errorp = ENXIO;
226 1.38 riastrad cv_broadcast(&sc->sc_cv);
227 1.38 riastrad }
228 1.38 riastrad }
229 1.1 nonaka
230 1.1 nonaka /* ARGSUSED */
231 1.1 nonaka static int
232 1.2 cegger ld_sdmmc_match(device_t parent, cfdata_t match, void *aux)
233 1.1 nonaka {
234 1.1 nonaka struct sdmmc_softc *sdmsc = device_private(parent);
235 1.1 nonaka
236 1.1 nonaka if (ISSET(sdmsc->sc_flags, SMF_MEM_MODE))
237 1.1 nonaka return 1;
238 1.1 nonaka return 0;
239 1.1 nonaka }
240 1.1 nonaka
241 1.1 nonaka /* ARGSUSED */
242 1.1 nonaka static void
243 1.1 nonaka ld_sdmmc_attach(device_t parent, device_t self, void *aux)
244 1.1 nonaka {
245 1.1 nonaka struct ld_sdmmc_softc *sc = device_private(self);
246 1.1 nonaka struct sdmmc_attach_args *sa = aux;
247 1.1 nonaka struct ld_softc *ld = &sc->sc_ld;
248 1.24 kiyohara struct ld_sdmmc_task *task;
249 1.3 nonaka struct lwp *lwp;
250 1.24 kiyohara int i;
251 1.1 nonaka
252 1.1 nonaka ld->sc_dv = self;
253 1.1 nonaka
254 1.11 jakllsch aprint_normal(": <0x%02x:0x%04x:%s:0x%02x:0x%08x:0x%03x>\n",
255 1.11 jakllsch sa->sf->cid.mid, sa->sf->cid.oid, sa->sf->cid.pnm,
256 1.11 jakllsch sa->sf->cid.rev, sa->sf->cid.psn, sa->sf->cid.mdt);
257 1.1 nonaka aprint_naive("\n");
258 1.1 nonaka
259 1.36 mlelstv sc->sc_typename = kmem_asprintf("0x%02x:0x%04x:%s",
260 1.36 mlelstv sa->sf->cid.mid, sa->sf->cid.oid, sa->sf->cid.pnm);
261 1.36 mlelstv
262 1.33 jmcneill evcnt_attach_dynamic(&sc->sc_ev_discard, EVCNT_TYPE_MISC,
263 1.33 jmcneill NULL, device_xname(self), "sdmmc discard count");
264 1.33 jmcneill evcnt_attach_dynamic(&sc->sc_ev_discarderr, EVCNT_TYPE_MISC,
265 1.33 jmcneill NULL, device_xname(self), "sdmmc discard errors");
266 1.33 jmcneill evcnt_attach_dynamic(&sc->sc_ev_discardbusy, EVCNT_TYPE_MISC,
267 1.33 jmcneill NULL, device_xname(self), "sdmmc discard busy");
268 1.33 jmcneill
269 1.38 riastrad mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SDMMC);
270 1.38 riastrad cv_init(&sc->sc_cv, "ldsdmmc");
271 1.38 riastrad TAILQ_INIT(&sc->sc_freeq);
272 1.38 riastrad TAILQ_INIT(&sc->sc_xferq);
273 1.38 riastrad sc->sc_dying = false;
274 1.38 riastrad
275 1.27 jmcneill const int ntask = __arraycount(sc->sc_task);
276 1.27 jmcneill for (i = 0; i < ntask; i++) {
277 1.24 kiyohara task = &sc->sc_task[i];
278 1.24 kiyohara task->task_sc = sc;
279 1.27 jmcneill callout_init(&task->task_restart_ch, CALLOUT_MPSAFE);
280 1.38 riastrad TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
281 1.24 kiyohara }
282 1.20 mlelstv
283 1.1 nonaka sc->sc_hwunit = 0; /* always 0? */
284 1.1 nonaka sc->sc_sf = sa->sf;
285 1.1 nonaka
286 1.32 mlelstv ld->sc_flags = LDF_ENABLED | LDF_MPSAFE;
287 1.1 nonaka ld->sc_secperunit = sc->sc_sf->csd.capacity;
288 1.4 nonaka ld->sc_secsize = SDMMC_SECTOR_SIZE;
289 1.1 nonaka ld->sc_maxxfer = MAXPHYS;
290 1.20 mlelstv ld->sc_maxqueuecnt = LD_SDMMC_MAXQUEUECNT;
291 1.1 nonaka ld->sc_dump = ld_sdmmc_dump;
292 1.1 nonaka ld->sc_start = ld_sdmmc_start;
293 1.28 jmcneill ld->sc_discard = ld_sdmmc_discard;
294 1.31 jmcneill ld->sc_ioctl = ld_sdmmc_ioctl;
295 1.36 mlelstv ld->sc_typename = sc->sc_typename;
296 1.1 nonaka
297 1.3 nonaka /*
298 1.30 mlelstv * Defer attachment of ld + disk subsystem to a thread.
299 1.30 mlelstv *
300 1.30 mlelstv * This is necessary because wedge autodiscover needs to
301 1.30 mlelstv * open and call into the ld driver, which could deadlock
302 1.30 mlelstv * when the sdmmc driver isn't ready in early bootstrap.
303 1.30 mlelstv *
304 1.30 mlelstv * Don't mark thread as MPSAFE to keep aprint output sane.
305 1.3 nonaka */
306 1.12 christos config_pending_incr(self);
307 1.29 jmcneill if (kthread_create(PRI_NONE, 0, NULL,
308 1.3 nonaka ld_sdmmc_doattach, sc, &lwp, "%sattach", device_xname(self))) {
309 1.3 nonaka aprint_error_dev(self, "couldn't create thread\n");
310 1.3 nonaka }
311 1.3 nonaka }
312 1.3 nonaka
313 1.3 nonaka static void
314 1.3 nonaka ld_sdmmc_doattach(void *arg)
315 1.3 nonaka {
316 1.3 nonaka struct ld_sdmmc_softc *sc = (struct ld_sdmmc_softc *)arg;
317 1.3 nonaka struct ld_softc *ld = &sc->sc_ld;
318 1.6 kiyohara struct sdmmc_softc *ssc = device_private(device_parent(ld->sc_dv));
319 1.31 jmcneill const u_int cache_size = sc->sc_sf->ext_csd.cache_size;
320 1.31 jmcneill char buf[sizeof("9999 KB")];
321 1.3 nonaka
322 1.22 jdolecek ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
323 1.19 jmcneill aprint_normal_dev(ld->sc_dv, "%d-bit width,", sc->sc_sf->width);
324 1.19 jmcneill if (ssc->sc_transfer_mode != NULL)
325 1.19 jmcneill aprint_normal(" %s,", ssc->sc_transfer_mode);
326 1.31 jmcneill if (cache_size > 0) {
327 1.31 jmcneill format_bytes(buf, sizeof(buf), cache_size);
328 1.31 jmcneill aprint_normal(" %s cache%s,", buf,
329 1.31 jmcneill ISSET(sc->sc_sf->flags, SFF_CACHE_ENABLED) ? "" :
330 1.31 jmcneill " (disabled)");
331 1.31 jmcneill }
332 1.6 kiyohara if ((ssc->sc_busclk / 1000) != 0)
333 1.6 kiyohara aprint_normal(" %u.%03u MHz\n",
334 1.6 kiyohara ssc->sc_busclk / 1000, ssc->sc_busclk % 1000);
335 1.6 kiyohara else
336 1.6 kiyohara aprint_normal(" %u KHz\n", ssc->sc_busclk % 1000);
337 1.12 christos config_pending_decr(ld->sc_dv);
338 1.3 nonaka kthread_exit(0);
339 1.1 nonaka }
340 1.1 nonaka
341 1.1 nonaka static int
342 1.1 nonaka ld_sdmmc_detach(device_t dev, int flags)
343 1.1 nonaka {
344 1.1 nonaka struct ld_sdmmc_softc *sc = device_private(dev);
345 1.1 nonaka struct ld_softc *ld = &sc->sc_ld;
346 1.38 riastrad struct ld_sdmmc_task *task;
347 1.41 riastrad int error, i;
348 1.1 nonaka
349 1.38 riastrad /*
350 1.41 riastrad * Block new xfers, or fail if the disk is still open and the
351 1.41 riastrad * detach isn't forced. After this point, we are committed to
352 1.41 riastrad * detaching.
353 1.41 riastrad */
354 1.41 riastrad error = ldbegindetach(ld, flags);
355 1.41 riastrad if (error)
356 1.41 riastrad return error;
357 1.41 riastrad
358 1.41 riastrad /*
359 1.41 riastrad * Abort all pending tasks, and wait for all pending waiters to
360 1.41 riastrad * notice that we're gone.
361 1.38 riastrad */
362 1.38 riastrad mutex_enter(&sc->sc_lock);
363 1.38 riastrad sc->sc_dying = true;
364 1.38 riastrad while ((task = TAILQ_FIRST(&sc->sc_xferq)) != NULL)
365 1.38 riastrad ld_sdmmc_task_cancel(sc, task);
366 1.38 riastrad while (sc->sc_busy)
367 1.38 riastrad cv_wait(&sc->sc_cv, &sc->sc_lock);
368 1.38 riastrad mutex_exit(&sc->sc_lock);
369 1.38 riastrad
370 1.41 riastrad /* Done! Destroy the disk. */
371 1.1 nonaka ldenddetach(ld);
372 1.1 nonaka
373 1.38 riastrad KASSERT(TAILQ_EMPTY(&sc->sc_xferq));
374 1.38 riastrad
375 1.38 riastrad for (i = 0; i < __arraycount(sc->sc_task); i++)
376 1.24 kiyohara callout_destroy(&sc->sc_task[i].task_restart_ch);
377 1.24 kiyohara
378 1.38 riastrad cv_destroy(&sc->sc_cv);
379 1.38 riastrad mutex_destroy(&sc->sc_lock);
380 1.38 riastrad
381 1.33 jmcneill evcnt_detach(&sc->sc_ev_discard);
382 1.33 jmcneill evcnt_detach(&sc->sc_ev_discarderr);
383 1.33 jmcneill evcnt_detach(&sc->sc_ev_discardbusy);
384 1.36 mlelstv kmem_free(sc->sc_typename, strlen(sc->sc_typename) + 1);
385 1.27 jmcneill
386 1.1 nonaka return 0;
387 1.1 nonaka }
388 1.1 nonaka
389 1.1 nonaka static int
390 1.1 nonaka ld_sdmmc_start(struct ld_softc *ld, struct buf *bp)
391 1.1 nonaka {
392 1.1 nonaka struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
393 1.38 riastrad struct ld_sdmmc_task *task;
394 1.38 riastrad int error;
395 1.20 mlelstv
396 1.38 riastrad mutex_enter(&sc->sc_lock);
397 1.38 riastrad if ((task = ld_sdmmc_task_get(sc)) == NULL) {
398 1.38 riastrad error = EAGAIN;
399 1.38 riastrad goto out;
400 1.38 riastrad }
401 1.1 nonaka
402 1.1 nonaka task->task_bp = bp;
403 1.24 kiyohara task->task_retries = 0;
404 1.1 nonaka sdmmc_init_task(&task->task, ld_sdmmc_dobio, task);
405 1.1 nonaka
406 1.1 nonaka sdmmc_add_task(sc->sc_sf->sc, &task->task);
407 1.1 nonaka
408 1.38 riastrad /* Success! The xfer is now queued. */
409 1.38 riastrad error = 0;
410 1.38 riastrad
411 1.38 riastrad out: mutex_exit(&sc->sc_lock);
412 1.38 riastrad return error;
413 1.1 nonaka }
414 1.1 nonaka
415 1.1 nonaka static void
416 1.24 kiyohara ld_sdmmc_restart(void *arg)
417 1.24 kiyohara {
418 1.24 kiyohara struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
419 1.24 kiyohara struct ld_sdmmc_softc *sc = task->task_sc;
420 1.24 kiyohara struct buf *bp = task->task_bp;
421 1.24 kiyohara
422 1.24 kiyohara bp->b_resid = bp->b_bcount;
423 1.24 kiyohara
424 1.38 riastrad mutex_enter(&sc->sc_lock);
425 1.38 riastrad callout_ack(&task->task_restart_ch);
426 1.38 riastrad if (!sc->sc_dying)
427 1.38 riastrad sdmmc_add_task(sc->sc_sf->sc, &task->task);
428 1.38 riastrad mutex_exit(&sc->sc_lock);
429 1.24 kiyohara }
430 1.24 kiyohara
431 1.24 kiyohara static void
432 1.1 nonaka ld_sdmmc_dobio(void *arg)
433 1.1 nonaka {
434 1.1 nonaka struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
435 1.1 nonaka struct ld_sdmmc_softc *sc = task->task_sc;
436 1.1 nonaka struct buf *bp = task->task_bp;
437 1.18 mlelstv int error;
438 1.1 nonaka
439 1.1 nonaka /*
440 1.1 nonaka * I/O operation
441 1.1 nonaka */
442 1.1 nonaka DPRINTF(("%s: I/O operation (dir=%s, blkno=0x%jx, bcnt=0x%x)\n",
443 1.1 nonaka device_xname(sc->sc_ld.sc_dv), bp->b_flags & B_READ ? "IN" : "OUT",
444 1.1 nonaka bp->b_rawblkno, bp->b_bcount));
445 1.1 nonaka
446 1.1 nonaka /* is everything done in terms of blocks? */
447 1.1 nonaka if (bp->b_rawblkno >= sc->sc_sf->csd.capacity) {
448 1.1 nonaka /* trying to read or write past end of device */
449 1.13 mlelstv aprint_error_dev(sc->sc_ld.sc_dv,
450 1.13 mlelstv "blkno 0x%" PRIu64 " exceeds capacity %d\n",
451 1.13 mlelstv bp->b_rawblkno, sc->sc_sf->csd.capacity);
452 1.13 mlelstv bp->b_error = EINVAL;
453 1.1 nonaka bp->b_resid = bp->b_bcount;
454 1.26 jmcneill
455 1.26 jmcneill goto done;
456 1.1 nonaka }
457 1.1 nonaka
458 1.1 nonaka if (bp->b_flags & B_READ)
459 1.1 nonaka error = sdmmc_mem_read_block(sc->sc_sf, bp->b_rawblkno,
460 1.1 nonaka bp->b_data, bp->b_bcount);
461 1.1 nonaka else
462 1.1 nonaka error = sdmmc_mem_write_block(sc->sc_sf, bp->b_rawblkno,
463 1.1 nonaka bp->b_data, bp->b_bcount);
464 1.1 nonaka if (error) {
465 1.24 kiyohara if (task->task_retries < LD_SDMMC_IORETRIES) {
466 1.24 kiyohara struct dk_softc *dksc = &sc->sc_ld.sc_dksc;
467 1.24 kiyohara struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
468 1.24 kiyohara
469 1.24 kiyohara diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
470 1.24 kiyohara dksc->sc_dkdev.dk_label);
471 1.24 kiyohara printf(", retrying\n");
472 1.24 kiyohara task->task_retries++;
473 1.38 riastrad mutex_enter(&sc->sc_lock);
474 1.38 riastrad if (sc->sc_dying) {
475 1.38 riastrad bp->b_resid = bp->b_bcount;
476 1.38 riastrad bp->b_error = error;
477 1.38 riastrad goto done_locked;
478 1.38 riastrad } else {
479 1.38 riastrad callout_reset(&task->task_restart_ch,
480 1.38 riastrad RECOVERYTIME, ld_sdmmc_restart, task);
481 1.38 riastrad }
482 1.38 riastrad mutex_exit(&sc->sc_lock);
483 1.24 kiyohara return;
484 1.24 kiyohara }
485 1.13 mlelstv bp->b_error = error;
486 1.1 nonaka bp->b_resid = bp->b_bcount;
487 1.1 nonaka } else {
488 1.1 nonaka bp->b_resid = 0;
489 1.1 nonaka }
490 1.1 nonaka
491 1.26 jmcneill done:
492 1.38 riastrad /* Dissociate the task from the I/O xfer and release it. */
493 1.38 riastrad mutex_enter(&sc->sc_lock);
494 1.38 riastrad done_locked:
495 1.38 riastrad ld_sdmmc_task_put(sc, task);
496 1.38 riastrad mutex_exit(&sc->sc_lock);
497 1.24 kiyohara
498 1.1 nonaka lddone(&sc->sc_ld, bp);
499 1.1 nonaka }
500 1.1 nonaka
501 1.1 nonaka static int
502 1.1 nonaka ld_sdmmc_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
503 1.1 nonaka {
504 1.1 nonaka struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
505 1.1 nonaka
506 1.1 nonaka return sdmmc_mem_write_block(sc->sc_sf, blkno, data,
507 1.1 nonaka blkcnt * ld->sc_secsize);
508 1.1 nonaka }
509 1.23 pgoyette
510 1.33 jmcneill static void
511 1.33 jmcneill ld_sdmmc_dodiscard(void *arg)
512 1.33 jmcneill {
513 1.33 jmcneill struct ld_sdmmc_task *task = arg;
514 1.33 jmcneill struct ld_sdmmc_softc *sc = task->task_sc;
515 1.34 mlelstv struct buf *bp = task->task_bp;
516 1.34 mlelstv uint32_t sblkno, nblks;
517 1.33 jmcneill int error;
518 1.33 jmcneill
519 1.34 mlelstv /* first and last block to erase */
520 1.34 mlelstv sblkno = bp->b_rawblkno;
521 1.34 mlelstv nblks = howmany(bp->b_bcount, sc->sc_ld.sc_secsize);
522 1.34 mlelstv
523 1.33 jmcneill /* An error from discard is non-fatal */
524 1.34 mlelstv error = sdmmc_mem_discard(sc->sc_sf, sblkno, sblkno + nblks - 1);
525 1.38 riastrad
526 1.38 riastrad /* Count error or success and release the task. */
527 1.38 riastrad mutex_enter(&sc->sc_lock);
528 1.38 riastrad if (error)
529 1.33 jmcneill sc->sc_ev_discarderr.ev_count++;
530 1.33 jmcneill else
531 1.33 jmcneill sc->sc_ev_discard.ev_count++;
532 1.38 riastrad ld_sdmmc_task_put(sc, task);
533 1.38 riastrad mutex_exit(&sc->sc_lock);
534 1.33 jmcneill
535 1.38 riastrad /* Record the error and notify the xfer of completion. */
536 1.34 mlelstv if (error)
537 1.34 mlelstv bp->b_error = error;
538 1.34 mlelstv lddiscardend(&sc->sc_ld, bp);
539 1.33 jmcneill }
540 1.33 jmcneill
541 1.28 jmcneill static int
542 1.34 mlelstv ld_sdmmc_discard(struct ld_softc *ld, struct buf *bp)
543 1.28 jmcneill {
544 1.28 jmcneill struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
545 1.38 riastrad struct ld_sdmmc_task *task;
546 1.38 riastrad int error;
547 1.38 riastrad
548 1.38 riastrad mutex_enter(&sc->sc_lock);
549 1.33 jmcneill
550 1.38 riastrad /* Acquire a free task, or drop the request altogether. */
551 1.38 riastrad if ((task = ld_sdmmc_task_get(sc)) == NULL) {
552 1.33 jmcneill sc->sc_ev_discardbusy.ev_count++;
553 1.38 riastrad error = EBUSY;
554 1.38 riastrad goto out;
555 1.33 jmcneill }
556 1.28 jmcneill
557 1.38 riastrad /* Set up the task and schedule it. */
558 1.34 mlelstv task->task_bp = bp;
559 1.33 jmcneill sdmmc_init_task(&task->task, ld_sdmmc_dodiscard, task);
560 1.33 jmcneill
561 1.33 jmcneill sdmmc_add_task(sc->sc_sf->sc, &task->task);
562 1.33 jmcneill
563 1.38 riastrad /* Success! The request is queued. */
564 1.38 riastrad error = 0;
565 1.38 riastrad
566 1.38 riastrad out: mutex_exit(&sc->sc_lock);
567 1.38 riastrad return error;
568 1.28 jmcneill }
569 1.28 jmcneill
570 1.35 jmcneill static void
571 1.35 jmcneill ld_sdmmc_docachesync(void *arg)
572 1.35 jmcneill {
573 1.35 jmcneill struct ld_sdmmc_task *task = arg;
574 1.35 jmcneill struct ld_sdmmc_softc *sc = task->task_sc;
575 1.38 riastrad int error;
576 1.35 jmcneill
577 1.38 riastrad /* Flush the cache. */
578 1.38 riastrad error = sdmmc_mem_flush_cache(sc->sc_sf, task->task_poll);
579 1.35 jmcneill
580 1.38 riastrad mutex_enter(&sc->sc_lock);
581 1.38 riastrad
582 1.38 riastrad /* Notify the other thread that we're done; pass on the error. */
583 1.38 riastrad *task->task_errorp = error;
584 1.38 riastrad cv_broadcast(&sc->sc_cv);
585 1.38 riastrad
586 1.38 riastrad /* Release the task. */
587 1.38 riastrad ld_sdmmc_task_put(sc, task);
588 1.38 riastrad
589 1.38 riastrad mutex_exit(&sc->sc_lock);
590 1.35 jmcneill }
591 1.35 jmcneill
592 1.35 jmcneill static int
593 1.35 jmcneill ld_sdmmc_cachesync(struct ld_softc *ld, bool poll)
594 1.35 jmcneill {
595 1.35 jmcneill struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
596 1.38 riastrad struct ld_sdmmc_task *task;
597 1.38 riastrad int error = -1;
598 1.38 riastrad
599 1.38 riastrad mutex_enter(&sc->sc_lock);
600 1.35 jmcneill
601 1.38 riastrad /* Acquire a free task, or fail with EBUSY. */
602 1.38 riastrad if ((task = ld_sdmmc_task_get(sc)) == NULL) {
603 1.35 jmcneill sc->sc_ev_cachesyncbusy.ev_count++;
604 1.38 riastrad error = EBUSY;
605 1.38 riastrad goto out;
606 1.35 jmcneill }
607 1.35 jmcneill
608 1.38 riastrad /* Set up the task and schedule it. */
609 1.38 riastrad task->task_poll = poll;
610 1.38 riastrad task->task_errorp = &error;
611 1.35 jmcneill sdmmc_init_task(&task->task, ld_sdmmc_docachesync, task);
612 1.35 jmcneill
613 1.35 jmcneill sdmmc_add_task(sc->sc_sf->sc, &task->task);
614 1.35 jmcneill
615 1.38 riastrad /*
616 1.38 riastrad * Wait for the task to complete. If the device is yanked,
617 1.38 riastrad * detach will notify us. Keep the busy count up until we're
618 1.38 riastrad * done waiting so that the softc doesn't go away until we're
619 1.38 riastrad * done.
620 1.38 riastrad */
621 1.38 riastrad sc->sc_busy++;
622 1.38 riastrad KASSERT(sc->sc_busy <= LD_SDMMC_MAXTASKCNT);
623 1.38 riastrad while (error == -1)
624 1.38 riastrad cv_wait(&sc->sc_cv, &sc->sc_lock);
625 1.38 riastrad if (--sc->sc_busy == 0)
626 1.38 riastrad cv_broadcast(&sc->sc_cv);
627 1.35 jmcneill
628 1.38 riastrad out: mutex_exit(&sc->sc_lock);
629 1.35 jmcneill return error;
630 1.35 jmcneill }
631 1.35 jmcneill
632 1.31 jmcneill static int
633 1.31 jmcneill ld_sdmmc_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag,
634 1.31 jmcneill bool poll)
635 1.31 jmcneill {
636 1.31 jmcneill
637 1.31 jmcneill switch (cmd) {
638 1.31 jmcneill case DIOCCACHESYNC:
639 1.35 jmcneill return ld_sdmmc_cachesync(ld, poll);
640 1.31 jmcneill default:
641 1.31 jmcneill return EPASSTHROUGH;
642 1.31 jmcneill }
643 1.31 jmcneill }
644 1.31 jmcneill
645 1.23 pgoyette MODULE(MODULE_CLASS_DRIVER, ld_sdmmc, "ld");
646 1.23 pgoyette
647 1.23 pgoyette #ifdef _MODULE
648 1.23 pgoyette /*
649 1.23 pgoyette * XXX Don't allow ioconf.c to redefine the "struct cfdriver ld_cd"
650 1.23 pgoyette * XXX it will be defined in the common-code module
651 1.23 pgoyette */
652 1.23 pgoyette #undef CFDRIVER_DECL
653 1.23 pgoyette #define CFDRIVER_DECL(name, class, attr)
654 1.37 mlelstv #include "ioconf.c"
655 1.23 pgoyette #endif
656 1.23 pgoyette
657 1.23 pgoyette static int
658 1.23 pgoyette ld_sdmmc_modcmd(modcmd_t cmd, void *opaque)
659 1.23 pgoyette {
660 1.23 pgoyette #ifdef _MODULE
661 1.23 pgoyette /*
662 1.23 pgoyette * We ignore the cfdriver_vec[] that ioconf provides, since
663 1.23 pgoyette * the cfdrivers are attached already.
664 1.23 pgoyette */
665 1.23 pgoyette static struct cfdriver * const no_cfdriver_vec[] = { NULL };
666 1.23 pgoyette #endif
667 1.23 pgoyette int error = 0;
668 1.37 mlelstv
669 1.23 pgoyette #ifdef _MODULE
670 1.23 pgoyette switch (cmd) {
671 1.23 pgoyette case MODULE_CMD_INIT:
672 1.23 pgoyette error = config_init_component(no_cfdriver_vec,
673 1.23 pgoyette cfattach_ioconf_ld_sdmmc, cfdata_ioconf_ld_sdmmc);
674 1.37 mlelstv break;
675 1.23 pgoyette case MODULE_CMD_FINI:
676 1.23 pgoyette error = config_fini_component(no_cfdriver_vec,
677 1.23 pgoyette cfattach_ioconf_ld_sdmmc, cfdata_ioconf_ld_sdmmc);
678 1.23 pgoyette break;
679 1.23 pgoyette default:
680 1.23 pgoyette error = ENOTTY;
681 1.23 pgoyette break;
682 1.23 pgoyette }
683 1.23 pgoyette #endif
684 1.23 pgoyette
685 1.23 pgoyette return error;
686 1.23 pgoyette }
687