ld_sdmmc.c revision 1.44 1 /* $NetBSD: ld_sdmmc.c,v 1.44 2024/10/18 11:03:52 jmcneill Exp $ */
2
3 /*
4 * Copyright (c) 2008 KIYOHARA Takashi
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
25 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 *
28 */
29
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: ld_sdmmc.c,v 1.44 2024/10/18 11:03:52 jmcneill Exp $");
32
33 #ifdef _KERNEL_OPT
34 #include "opt_sdmmc.h"
35 #endif
36
37 #include <sys/param.h>
38 #include <sys/types.h>
39
40 #include <sys/buf.h>
41 #include <sys/bufq.h>
42 #include <sys/bus.h>
43 #include <sys/device.h>
44 #include <sys/disk.h>
45 #include <sys/disklabel.h>
46 #include <sys/dkio.h>
47 #include <sys/endian.h>
48 #include <sys/kernel.h>
49 #include <sys/kmem.h>
50 #include <sys/kthread.h>
51 #include <sys/module.h>
52 #include <sys/syslog.h>
53 #include <sys/systm.h>
54
55 #include <dev/ldvar.h>
56
57 #include <dev/sdmmc/sdmmcvar.h>
58
59 #include "ioconf.h"
60
61 #ifdef LD_SDMMC_DEBUG
62 #define DPRINTF(s) printf s
63 #else
64 #define DPRINTF(s) __nothing
65 #endif
66
67 #define LD_SDMMC_IORETRIES 5 /* number of retries before giving up */
68 #define RECOVERYTIME hz/2 /* time to wait before retrying a cmd */
69
70 #define LD_SDMMC_MAXQUEUECNT 4 /* number of queued bio requests */
71 #define LD_SDMMC_MAXTASKCNT 8 /* number of tasks in task pool */
72
73 struct ld_sdmmc_softc;
74
75 struct ld_sdmmc_task {
76 struct sdmmc_task task;
77 struct ld_sdmmc_softc *task_sc;
78
79 struct buf *task_bp;
80 int task_retries; /* number of xfer retry */
81 struct callout task_restart_ch;
82
83 bool task_poll;
84 int *task_errorp;
85
86 TAILQ_ENTRY(ld_sdmmc_task) task_entry;
87 };
88
89 struct ld_sdmmc_softc {
90 struct ld_softc sc_ld;
91 int sc_hwunit;
92 char *sc_typename;
93 struct sdmmc_function *sc_sf;
94
95 kmutex_t sc_lock;
96 kcondvar_t sc_cv;
97 TAILQ_HEAD(, ld_sdmmc_task) sc_freeq;
98 TAILQ_HEAD(, ld_sdmmc_task) sc_xferq;
99 unsigned sc_busy;
100 bool sc_dying;
101
102 struct evcnt sc_ev_discard; /* discard counter */
103 struct evcnt sc_ev_discarderr; /* discard error counter */
104 struct evcnt sc_ev_discardbusy; /* discard busy counter */
105 struct evcnt sc_ev_cachesyncbusy; /* cache sync busy counter */
106
107 struct ld_sdmmc_task sc_task[LD_SDMMC_MAXTASKCNT];
108 };
109
110 static int ld_sdmmc_match(device_t, cfdata_t, void *);
111 static void ld_sdmmc_attach(device_t, device_t, void *);
112 static int ld_sdmmc_detach(device_t, int);
113
114 static int ld_sdmmc_dump(struct ld_softc *, void *, int, int);
115 static int ld_sdmmc_start(struct ld_softc *, struct buf *);
116 static void ld_sdmmc_restart(void *);
117 static int ld_sdmmc_discard(struct ld_softc *, struct buf *);
118 static int ld_sdmmc_ioctl(struct ld_softc *, u_long, void *, int32_t, bool);
119
120 static void ld_sdmmc_doattach(void *);
121 static void ld_sdmmc_dobio(void *);
122 static void ld_sdmmc_dodiscard(void *);
123
124 CFATTACH_DECL_NEW(ld_sdmmc, sizeof(struct ld_sdmmc_softc),
125 ld_sdmmc_match, ld_sdmmc_attach, ld_sdmmc_detach, NULL);
126
127 static struct ld_sdmmc_task *
128 ld_sdmmc_task_get(struct ld_sdmmc_softc *sc)
129 {
130 struct ld_sdmmc_task *task;
131
132 KASSERT(mutex_owned(&sc->sc_lock));
133
134 if (sc->sc_dying || (task = TAILQ_FIRST(&sc->sc_freeq)) == NULL)
135 return NULL;
136 TAILQ_REMOVE(&sc->sc_freeq, task, task_entry);
137 TAILQ_INSERT_TAIL(&sc->sc_xferq, task, task_entry);
138 KASSERT(task->task_bp == NULL);
139 KASSERT(task->task_errorp == NULL);
140
141 return task;
142 }
143
144 static void
145 ld_sdmmc_task_put(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
146 {
147
148 KASSERT(mutex_owned(&sc->sc_lock));
149
150 TAILQ_REMOVE(&sc->sc_xferq, task, task_entry);
151 TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
152 task->task_bp = NULL;
153 task->task_errorp = NULL;
154 }
155
156 static void
157 ld_sdmmc_task_cancel(struct ld_sdmmc_softc *sc, struct ld_sdmmc_task *task)
158 {
159 struct buf *bp;
160 int *errorp;
161
162 KASSERT(mutex_owned(&sc->sc_lock));
163 KASSERT(sc->sc_dying);
164
165 /*
166 * Either the callout or the task may be pending, but not both.
167 * First, determine whether the callout is pending.
168 */
169 if (callout_pending(&task->task_restart_ch) ||
170 callout_invoking(&task->task_restart_ch)) {
171 /*
172 * The callout either is pending, or just started but
173 * is waiting for us to release the lock. At this
174 * point, it will notice sc->sc_dying and give up, so
175 * just wait for it to complete and then we will
176 * release everything.
177 */
178 callout_halt(&task->task_restart_ch, &sc->sc_lock);
179 } else {
180 /*
181 * If the callout is running, it has just scheduled, so
182 * after we wait for the callout to finish running, the
183 * task is either pending or running. If the task is
184 * already running, it will notice sc->sc_dying and
185 * give up; otherwise we have to release everything.
186 */
187 callout_halt(&task->task_restart_ch, &sc->sc_lock);
188 if (!sdmmc_del_task(sc->sc_sf->sc, &task->task, &sc->sc_lock))
189 return; /* task already started, let it clean up */
190 }
191
192 /*
193 * It is our responsibility to clean up. Move it from xferq
194 * back to freeq and make sure to notify anyone waiting that
195 * it's finished.
196 */
197 bp = task->task_bp;
198 errorp = task->task_errorp;
199 ld_sdmmc_task_put(sc, task);
200
201 /*
202 * If the task was for an asynchronous I/O xfer, fail the I/O
203 * xfer, with the softc lock dropped since this is a callback
204 * into arbitrary other subsystems.
205 */
206 if (bp) {
207 mutex_exit(&sc->sc_lock);
208 /*
209 * XXX We assume that the same sequence works for bio
210 * and discard -- that lddiscardend is just the same as
211 * setting bp->b_resid = bp->b_bcount in the event of
212 * error and then calling lddone.
213 */
214 bp->b_error = ENXIO;
215 bp->b_resid = bp->b_bcount;
216 lddone(&sc->sc_ld, bp);
217 mutex_enter(&sc->sc_lock);
218 }
219
220 /*
221 * If the task was for a synchronous operation (cachesync),
222 * then just set the error indicator and wake up the waiter.
223 */
224 if (errorp) {
225 *errorp = ENXIO;
226 cv_broadcast(&sc->sc_cv);
227 }
228 }
229
230 /* ARGSUSED */
231 static int
232 ld_sdmmc_match(device_t parent, cfdata_t match, void *aux)
233 {
234 struct sdmmc_softc *sdmsc = device_private(parent);
235
236 if (ISSET(sdmsc->sc_flags, SMF_MEM_MODE))
237 return 1;
238 return 0;
239 }
240
241 /* ARGSUSED */
242 static void
243 ld_sdmmc_attach(device_t parent, device_t self, void *aux)
244 {
245 struct ld_sdmmc_softc *sc = device_private(self);
246 struct sdmmc_attach_args *sa = aux;
247 struct ld_softc *ld = &sc->sc_ld;
248 struct ld_sdmmc_task *task;
249 struct lwp *lwp;
250 const char *cardtype;
251 int i;
252
253 ld->sc_dv = self;
254
255 aprint_normal(": <0x%02x:0x%04x:%s:0x%02x:0x%08x:0x%03x>\n",
256 sa->sf->cid.mid, sa->sf->cid.oid, sa->sf->cid.pnm,
257 sa->sf->cid.rev, sa->sf->cid.psn, sa->sf->cid.mdt);
258 aprint_naive("\n");
259
260 if (ISSET(sa->sf->sc->sc_flags, SMF_SD_MODE)) {
261 cardtype = "SD card";
262 } else {
263 cardtype = "MMC";
264 }
265 sc->sc_typename = kmem_asprintf("%s 0x%02x:0x%04x:%s",
266 cardtype, sa->sf->cid.mid, sa->sf->cid.oid, sa->sf->cid.pnm);
267
268 evcnt_attach_dynamic(&sc->sc_ev_discard, EVCNT_TYPE_MISC,
269 NULL, device_xname(self), "sdmmc discard count");
270 evcnt_attach_dynamic(&sc->sc_ev_discarderr, EVCNT_TYPE_MISC,
271 NULL, device_xname(self), "sdmmc discard errors");
272 evcnt_attach_dynamic(&sc->sc_ev_discardbusy, EVCNT_TYPE_MISC,
273 NULL, device_xname(self), "sdmmc discard busy");
274
275 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_SDMMC);
276 cv_init(&sc->sc_cv, "ldsdmmc");
277 TAILQ_INIT(&sc->sc_freeq);
278 TAILQ_INIT(&sc->sc_xferq);
279 sc->sc_dying = false;
280
281 const int ntask = __arraycount(sc->sc_task);
282 for (i = 0; i < ntask; i++) {
283 task = &sc->sc_task[i];
284 task->task_sc = sc;
285 callout_init(&task->task_restart_ch, CALLOUT_MPSAFE);
286 TAILQ_INSERT_TAIL(&sc->sc_freeq, task, task_entry);
287 }
288
289 sc->sc_hwunit = 0; /* always 0? */
290 sc->sc_sf = sa->sf;
291
292 ld->sc_flags = LDF_ENABLED | LDF_MPSAFE;
293 ld->sc_secperunit = sc->sc_sf->csd.capacity;
294 ld->sc_secsize = SDMMC_SECTOR_SIZE;
295 ld->sc_maxxfer = MAXPHYS;
296 ld->sc_maxqueuecnt = LD_SDMMC_MAXQUEUECNT;
297 ld->sc_dump = ld_sdmmc_dump;
298 ld->sc_start = ld_sdmmc_start;
299 ld->sc_discard = ld_sdmmc_discard;
300 ld->sc_ioctl = ld_sdmmc_ioctl;
301 ld->sc_typename = sc->sc_typename;
302
303 /*
304 * Defer attachment of ld + disk subsystem to a thread.
305 *
306 * This is necessary because wedge autodiscover needs to
307 * open and call into the ld driver, which could deadlock
308 * when the sdmmc driver isn't ready in early bootstrap.
309 *
310 * Don't mark thread as MPSAFE to keep aprint output sane.
311 */
312 config_pending_incr(self);
313 if (kthread_create(PRI_NONE, 0, NULL,
314 ld_sdmmc_doattach, sc, &lwp, "%sattach", device_xname(self))) {
315 aprint_error_dev(self, "couldn't create thread\n");
316 }
317 }
318
319 static void
320 ld_sdmmc_doattach(void *arg)
321 {
322 struct ld_sdmmc_softc *sc = (struct ld_sdmmc_softc *)arg;
323 struct ld_softc *ld = &sc->sc_ld;
324 struct sdmmc_softc *ssc = device_private(device_parent(ld->sc_dv));
325 const u_int emmc_cache_size = sc->sc_sf->ext_csd.cache_size;
326 const bool sd_cache = sc->sc_sf->ssr.cache;
327 char buf[sizeof("9999 KB")];
328
329 ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
330 aprint_normal_dev(ld->sc_dv, "%d-bit width,", sc->sc_sf->width);
331 if (ssc->sc_transfer_mode != NULL)
332 aprint_normal(" %s,", ssc->sc_transfer_mode);
333 if (emmc_cache_size > 0) {
334 format_bytes(buf, sizeof(buf), emmc_cache_size);
335 aprint_normal(" %s cache%s,", buf,
336 ISSET(sc->sc_sf->flags, SFF_CACHE_ENABLED) ? "" :
337 " (disabled)");
338 } else if (sd_cache) {
339 aprint_normal(" Cache%s,",
340 ISSET(sc->sc_sf->flags, SFF_CACHE_ENABLED) ? "" :
341 " (disabled)");
342 }
343 if ((ssc->sc_busclk / 1000) != 0)
344 aprint_normal(" %u.%03u MHz\n",
345 ssc->sc_busclk / 1000, ssc->sc_busclk % 1000);
346 else
347 aprint_normal(" %u KHz\n", ssc->sc_busclk % 1000);
348 config_pending_decr(ld->sc_dv);
349 kthread_exit(0);
350 }
351
352 static int
353 ld_sdmmc_detach(device_t dev, int flags)
354 {
355 struct ld_sdmmc_softc *sc = device_private(dev);
356 struct ld_softc *ld = &sc->sc_ld;
357 struct ld_sdmmc_task *task;
358 int error, i;
359
360 /*
361 * Block new xfers, or fail if the disk is still open and the
362 * detach isn't forced. After this point, we are committed to
363 * detaching.
364 */
365 error = ldbegindetach(ld, flags);
366 if (error)
367 return error;
368
369 /*
370 * Abort all pending tasks, and wait for all pending waiters to
371 * notice that we're gone.
372 */
373 mutex_enter(&sc->sc_lock);
374 sc->sc_dying = true;
375 while ((task = TAILQ_FIRST(&sc->sc_xferq)) != NULL)
376 ld_sdmmc_task_cancel(sc, task);
377 while (sc->sc_busy)
378 cv_wait(&sc->sc_cv, &sc->sc_lock);
379 mutex_exit(&sc->sc_lock);
380
381 /* Done! Destroy the disk. */
382 ldenddetach(ld);
383
384 KASSERT(TAILQ_EMPTY(&sc->sc_xferq));
385
386 for (i = 0; i < __arraycount(sc->sc_task); i++)
387 callout_destroy(&sc->sc_task[i].task_restart_ch);
388
389 cv_destroy(&sc->sc_cv);
390 mutex_destroy(&sc->sc_lock);
391
392 evcnt_detach(&sc->sc_ev_discard);
393 evcnt_detach(&sc->sc_ev_discarderr);
394 evcnt_detach(&sc->sc_ev_discardbusy);
395 kmem_free(sc->sc_typename, strlen(sc->sc_typename) + 1);
396
397 return 0;
398 }
399
400 static int
401 ld_sdmmc_start(struct ld_softc *ld, struct buf *bp)
402 {
403 struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
404 struct ld_sdmmc_task *task;
405 int error;
406
407 mutex_enter(&sc->sc_lock);
408 if ((task = ld_sdmmc_task_get(sc)) == NULL) {
409 error = EAGAIN;
410 goto out;
411 }
412
413 task->task_bp = bp;
414 task->task_retries = 0;
415 sdmmc_init_task(&task->task, ld_sdmmc_dobio, task);
416
417 sdmmc_add_task(sc->sc_sf->sc, &task->task);
418
419 /* Success! The xfer is now queued. */
420 error = 0;
421
422 out: mutex_exit(&sc->sc_lock);
423 return error;
424 }
425
426 static void
427 ld_sdmmc_restart(void *arg)
428 {
429 struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
430 struct ld_sdmmc_softc *sc = task->task_sc;
431 struct buf *bp = task->task_bp;
432
433 bp->b_resid = bp->b_bcount;
434
435 mutex_enter(&sc->sc_lock);
436 callout_ack(&task->task_restart_ch);
437 if (!sc->sc_dying)
438 sdmmc_add_task(sc->sc_sf->sc, &task->task);
439 mutex_exit(&sc->sc_lock);
440 }
441
442 static void
443 ld_sdmmc_dobio(void *arg)
444 {
445 struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg;
446 struct ld_sdmmc_softc *sc = task->task_sc;
447 struct buf *bp = task->task_bp;
448 int error;
449
450 /*
451 * I/O operation
452 */
453 DPRINTF(("%s: I/O operation (dir=%s, blkno=0x%jx, bcnt=0x%x)\n",
454 device_xname(sc->sc_ld.sc_dv), bp->b_flags & B_READ ? "IN" : "OUT",
455 bp->b_rawblkno, bp->b_bcount));
456
457 /* is everything done in terms of blocks? */
458 if (bp->b_rawblkno >= sc->sc_sf->csd.capacity) {
459 /* trying to read or write past end of device */
460 aprint_error_dev(sc->sc_ld.sc_dv,
461 "blkno 0x%" PRIu64 " exceeds capacity %d\n",
462 bp->b_rawblkno, sc->sc_sf->csd.capacity);
463 bp->b_error = EINVAL;
464 bp->b_resid = bp->b_bcount;
465
466 goto done;
467 }
468
469 if (bp->b_flags & B_READ)
470 error = sdmmc_mem_read_block(sc->sc_sf, bp->b_rawblkno,
471 bp->b_data, bp->b_bcount);
472 else
473 error = sdmmc_mem_write_block(sc->sc_sf, bp->b_rawblkno,
474 bp->b_data, bp->b_bcount);
475 if (error) {
476 if (task->task_retries < LD_SDMMC_IORETRIES) {
477 struct dk_softc *dksc = &sc->sc_ld.sc_dksc;
478 struct cfdriver *cd = device_cfdriver(dksc->sc_dev);
479
480 diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
481 dksc->sc_dkdev.dk_label);
482 printf(", retrying\n");
483 task->task_retries++;
484 mutex_enter(&sc->sc_lock);
485 if (sc->sc_dying) {
486 bp->b_resid = bp->b_bcount;
487 bp->b_error = error;
488 goto done_locked;
489 } else {
490 callout_reset(&task->task_restart_ch,
491 RECOVERYTIME, ld_sdmmc_restart, task);
492 }
493 mutex_exit(&sc->sc_lock);
494 return;
495 }
496 bp->b_error = error;
497 bp->b_resid = bp->b_bcount;
498 } else {
499 bp->b_resid = 0;
500 }
501
502 done:
503 /* Dissociate the task from the I/O xfer and release it. */
504 mutex_enter(&sc->sc_lock);
505 done_locked:
506 ld_sdmmc_task_put(sc, task);
507 mutex_exit(&sc->sc_lock);
508
509 lddone(&sc->sc_ld, bp);
510 }
511
512 static int
513 ld_sdmmc_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
514 {
515 struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
516
517 return sdmmc_mem_write_block(sc->sc_sf, blkno, data,
518 blkcnt * ld->sc_secsize);
519 }
520
521 static void
522 ld_sdmmc_dodiscard(void *arg)
523 {
524 struct ld_sdmmc_task *task = arg;
525 struct ld_sdmmc_softc *sc = task->task_sc;
526 struct buf *bp = task->task_bp;
527 uint32_t sblkno, nblks;
528 int error;
529
530 /* first and last block to erase */
531 sblkno = bp->b_rawblkno;
532 nblks = howmany(bp->b_bcount, sc->sc_ld.sc_secsize);
533
534 /* An error from discard is non-fatal */
535 error = sdmmc_mem_discard(sc->sc_sf, sblkno, sblkno + nblks - 1);
536
537 /* Count error or success and release the task. */
538 mutex_enter(&sc->sc_lock);
539 if (error)
540 sc->sc_ev_discarderr.ev_count++;
541 else
542 sc->sc_ev_discard.ev_count++;
543 ld_sdmmc_task_put(sc, task);
544 mutex_exit(&sc->sc_lock);
545
546 /* Record the error and notify the xfer of completion. */
547 if (error)
548 bp->b_error = error;
549 lddiscardend(&sc->sc_ld, bp);
550 }
551
552 static int
553 ld_sdmmc_discard(struct ld_softc *ld, struct buf *bp)
554 {
555 struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
556 struct ld_sdmmc_task *task;
557 int error;
558
559 mutex_enter(&sc->sc_lock);
560
561 /* Acquire a free task, or drop the request altogether. */
562 if ((task = ld_sdmmc_task_get(sc)) == NULL) {
563 sc->sc_ev_discardbusy.ev_count++;
564 error = EBUSY;
565 goto out;
566 }
567
568 /* Set up the task and schedule it. */
569 task->task_bp = bp;
570 sdmmc_init_task(&task->task, ld_sdmmc_dodiscard, task);
571
572 sdmmc_add_task(sc->sc_sf->sc, &task->task);
573
574 /* Success! The request is queued. */
575 error = 0;
576
577 out: mutex_exit(&sc->sc_lock);
578 return error;
579 }
580
581 static void
582 ld_sdmmc_docachesync(void *arg)
583 {
584 struct ld_sdmmc_task *task = arg;
585 struct ld_sdmmc_softc *sc = task->task_sc;
586 int error;
587
588 /* Flush the cache. */
589 error = sdmmc_mem_flush_cache(sc->sc_sf, task->task_poll);
590
591 mutex_enter(&sc->sc_lock);
592
593 /* Notify the other thread that we're done; pass on the error. */
594 *task->task_errorp = error;
595 cv_broadcast(&sc->sc_cv);
596
597 /* Release the task. */
598 ld_sdmmc_task_put(sc, task);
599
600 mutex_exit(&sc->sc_lock);
601 }
602
603 static int
604 ld_sdmmc_cachesync(struct ld_softc *ld, bool poll)
605 {
606 struct ld_sdmmc_softc *sc = device_private(ld->sc_dv);
607 struct sdmmc_softc *sdmmc = device_private(device_parent(ld->sc_dv));
608 struct ld_sdmmc_task *task;
609 int error = -1;
610
611 /*
612 * If we come here through the sdmmc discovery task, we can't
613 * wait for a new task because the new task can't even begin
614 * until the sdmmc discovery task has completed.
615 *
616 * XXX This is wrong, because there may already be queued I/O
617 * tasks ahead of us. Fixing this properly requires doing
618 * discovery in a separate thread. But this should avoid the
619 * deadlock of PR kern/57870 (https://gnats.NetBSD.org/57870)
620 * until we do split that up.
621 */
622 if (curlwp == sdmmc->sc_tskq_lwp)
623 return sdmmc_mem_flush_cache(sc->sc_sf, poll);
624
625 mutex_enter(&sc->sc_lock);
626
627 /* Acquire a free task, or fail with EBUSY. */
628 if ((task = ld_sdmmc_task_get(sc)) == NULL) {
629 sc->sc_ev_cachesyncbusy.ev_count++;
630 error = EBUSY;
631 goto out;
632 }
633
634 /* Set up the task and schedule it. */
635 task->task_poll = poll;
636 task->task_errorp = &error;
637 sdmmc_init_task(&task->task, ld_sdmmc_docachesync, task);
638
639 sdmmc_add_task(sc->sc_sf->sc, &task->task);
640
641 /*
642 * Wait for the task to complete. If the device is yanked,
643 * detach will notify us. Keep the busy count up until we're
644 * done waiting so that the softc doesn't go away until we're
645 * done.
646 */
647 sc->sc_busy++;
648 KASSERT(sc->sc_busy <= LD_SDMMC_MAXTASKCNT);
649 while (error == -1)
650 cv_wait(&sc->sc_cv, &sc->sc_lock);
651 if (--sc->sc_busy == 0)
652 cv_broadcast(&sc->sc_cv);
653
654 out: mutex_exit(&sc->sc_lock);
655 return error;
656 }
657
658 static int
659 ld_sdmmc_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag,
660 bool poll)
661 {
662
663 switch (cmd) {
664 case DIOCCACHESYNC:
665 return ld_sdmmc_cachesync(ld, poll);
666 default:
667 return EPASSTHROUGH;
668 }
669 }
670
671 MODULE(MODULE_CLASS_DRIVER, ld_sdmmc, "ld");
672
673 #ifdef _MODULE
674 /*
675 * XXX Don't allow ioconf.c to redefine the "struct cfdriver ld_cd"
676 * XXX it will be defined in the common-code module
677 */
678 #undef CFDRIVER_DECL
679 #define CFDRIVER_DECL(name, class, attr)
680 #include "ioconf.c"
681 #endif
682
683 static int
684 ld_sdmmc_modcmd(modcmd_t cmd, void *opaque)
685 {
686 #ifdef _MODULE
687 /*
688 * We ignore the cfdriver_vec[] that ioconf provides, since
689 * the cfdrivers are attached already.
690 */
691 static struct cfdriver * const no_cfdriver_vec[] = { NULL };
692 #endif
693 int error = 0;
694
695 #ifdef _MODULE
696 switch (cmd) {
697 case MODULE_CMD_INIT:
698 error = config_init_component(no_cfdriver_vec,
699 cfattach_ioconf_ld_sdmmc, cfdata_ioconf_ld_sdmmc);
700 break;
701 case MODULE_CMD_FINI:
702 error = config_fini_component(no_cfdriver_vec,
703 cfattach_ioconf_ld_sdmmc, cfdata_ioconf_ld_sdmmc);
704 break;
705 default:
706 error = ENOTTY;
707 break;
708 }
709 #endif
710
711 return error;
712 }
713