mmemcard.c revision 1.3 1 /* $NetBSD: mmemcard.c,v 1.3 2003/07/15 01:31:40 lukem Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by ITOH Yasufumi.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: mmemcard.c,v 1.3 2003/07/15 01:31:40 lukem Exp $");
41
42 #include <sys/param.h>
43 #include <sys/buf.h>
44 #include <sys/device.h>
45 #include <sys/disklabel.h>
46 #include <sys/disk.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/proc.h>
50 #include <sys/stat.h>
51 #include <sys/systm.h>
52 #include <sys/vnode.h>
53 #include <sys/conf.h>
54
55 #include <dreamcast/dev/maple/maple.h>
56 #include <dreamcast/dev/maple/mapleconf.h>
57
58 #define MMEM_MAXACCSIZE 1012 /* (255*4) - 8 = 253*32 / 8 */
59
60 struct mmem_funcdef { /* XXX assuming little-endian structure packing */
61 unsigned unused : 8,
62 ra : 4, /* number of access / read */
63 wa : 4, /* number of access / write */
64 bb : 8, /* block size / 32 - 1 */
65 pt : 8; /* number of partition - 1 */
66 };
67
68 struct mmem_request_read_data {
69 u_int32_t func_code;
70 u_int8_t pt;
71 u_int8_t phase;
72 u_int16_t block;
73 };
74
75 struct mmem_response_read_data {
76 u_int32_t func_code; /* function code (big endian) */
77 u_int32_t blkno; /* 512byte block number (big endian) */
78 u_int8_t data[MMEM_MAXACCSIZE];
79 };
80
81 struct mmem_request_write_data {
82 u_int32_t func_code;
83 u_int8_t pt;
84 u_int8_t phase; /* 0, 1, 2, 3: for each 128 byte */
85 u_int16_t block;
86 u_int8_t data[MMEM_MAXACCSIZE];
87 };
88 #define MMEM_SIZE_REQW(sc) ((sc)->sc_waccsz + 8)
89
90 struct mmem_request_get_media_info {
91 u_int32_t func_code;
92 u_int32_t pt; /* pt (1 byte) and unused 3 bytes */
93 };
94
95 struct mmem_media_info {
96 u_int16_t maxblk, minblk;
97 u_int16_t infpos;
98 u_int16_t fatpos, fatsz;
99 u_int16_t dirpos, dirsz;
100 u_int16_t icon;
101 u_int16_t datasz;
102 u_int16_t rsvd[3];
103 };
104
105 struct mmem_response_media_info {
106 u_int32_t func_code; /* function code (big endian) */
107 struct mmem_media_info info;
108 };
109
110 struct mmem_softc {
111 struct device sc_dev;
112
113 struct device *sc_parent;
114 struct maple_unit *sc_unit;
115 struct maple_devinfo *sc_devinfo;
116
117 enum mmem_stat {
118 MMEM_INIT, /* during initialization */
119 MMEM_INIT2, /* during initialization */
120 MMEM_IDLE, /* init done, not in I/O */
121 MMEM_READ, /* in read operation */
122 MMEM_WRITE1, /* in write operation (read and compare) */
123 MMEM_WRITE2, /* in write operation (write) */
124 MMEM_DETACH /* detaching */
125 } sc_stat;
126
127 int sc_npt; /* number of partitions */
128 int sc_bsize; /* block size */
129 int sc_wacc; /* number of write access per block */
130 int sc_waccsz; /* size of a write access */
131 int sc_racc; /* number of read access per block */
132 int sc_raccsz; /* size of a read access */
133
134 struct mmem_pt {
135 int pt_flags;
136 #define MMEM_PT_OK 1 /* partition is alive */
137 struct disk pt_dk; /* disk(9) */
138 struct mmem_media_info pt_info; /* geometry per part */
139
140 char pt_name[16 /* see device.h */ + 4 /* ".255" */];
141 } *sc_pt;
142
143 /* write request buffer (only one is used at a time) */
144 union {
145 struct mmem_request_read_data req_read;
146 struct mmem_request_write_data req_write;
147 struct mmem_request_get_media_info req_minfo;
148 } sc_req;
149 #define sc_reqr sc_req.req_read
150 #define sc_reqw sc_req.req_write
151 #define sc_reqm sc_req.req_minfo
152
153 /* pending buffers */
154 struct bufq_state sc_q;
155
156 /* current I/O access */
157 struct buf *sc_bp;
158 int sc_cnt;
159 char *sc_iobuf;
160 int sc_retry;
161 #define MMEM_MAXRETRY 12
162 };
163
164 /*
165 * minor number layout (mmemdetach() depends on this layout):
166 *
167 * 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
168 * |---------------------| |---------------------| |---------|
169 * unit part disklabel partition
170 */
171 #define MMEM_PART(diskunit) ((diskunit) & 0xff)
172 #define MMEM_UNIT(diskunit) ((diskunit) >> 8)
173 #define MMEM_DISKMINOR(unit, part, disklabel_partition) \
174 DISKMINOR(((unit) << 8) | (part), (disklabel_partition))
175
176 static int mmemmatch __P((struct device *, struct cfdata *, void *));
177 static void mmemattach __P((struct device *, struct device *, void *));
178 static void mmem_defaultlabel __P((struct mmem_softc *, struct mmem_pt *,
179 struct disklabel *));
180 static int mmemdetach __P((struct device *, int));
181 static void mmem_intr __P((void *, struct maple_response *, int, int));
182 static void mmem_printerror __P((const char *, int, int, u_int32_t));
183 static void mmemstart __P((struct mmem_softc *));
184 static void mmemstart_bp __P((struct mmem_softc *));
185 static void mmemstart_write2 __P((struct mmem_softc *));
186 static void mmemdone __P((struct mmem_softc *, struct mmem_pt *, int));
187
188 dev_type_open(mmemopen);
189 dev_type_close(mmemclose);
190 dev_type_read(mmemread);
191 dev_type_write(mmemwrite);
192 dev_type_ioctl(mmemioctl);
193 dev_type_strategy(mmemstrategy);
194
195 const struct bdevsw mmem_bdevsw = {
196 mmemopen, mmemclose, mmemstrategy, mmemioctl, nodump,
197 nosize, D_DISK
198 };
199
200 const struct cdevsw mmem_cdevsw = {
201 mmemopen, mmemclose, mmemread, mmemwrite, mmemioctl,
202 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
203 };
204
205 CFATTACH_DECL(mmem, sizeof(struct mmem_softc),
206 mmemmatch, mmemattach, mmemdetach, NULL);
207
208 extern struct cfdriver mmem_cd;
209
210 struct dkdriver mmemdkdriver = { mmemstrategy };
211
212 static int
213 mmemmatch(parent, cf, aux)
214 struct device *parent;
215 struct cfdata *cf;
216 void *aux;
217 {
218 struct maple_attach_args *ma = aux;
219
220 return (ma->ma_function == MAPLE_FN_MEMCARD ? MAPLE_MATCH_FUNC : 0);
221 }
222
223 static void
224 mmemattach(parent, self, aux)
225 struct device *parent, *self;
226 void *aux;
227 {
228 struct mmem_softc *sc = (void *) self;
229 struct maple_attach_args *ma = aux;
230 int i;
231 union {
232 u_int32_t v;
233 struct mmem_funcdef s;
234 } funcdef;
235
236 sc->sc_parent = parent;
237 sc->sc_unit = ma->ma_unit;
238 sc->sc_devinfo = ma->ma_devinfo;
239
240 funcdef.v = maple_get_function_data(ma->ma_devinfo, MAPLE_FN_MEMCARD);
241 printf(": Memory card\n");
242 printf("%s: %d part, %d bytes/block, ",
243 sc->sc_dev.dv_xname,
244 sc->sc_npt = funcdef.s.pt + 1,
245 sc->sc_bsize = (funcdef.s.bb + 1) << 5);
246 if ((sc->sc_wacc = funcdef.s.wa) == 0)
247 printf("no write, ");
248 else
249 printf("%d acc/write, ", sc->sc_wacc);
250 if ((sc->sc_racc = funcdef.s.ra) == 0)
251 printf("no read\n");
252 else
253 printf("%d acc/read\n", sc->sc_racc);
254
255 /*
256 * start init sequence
257 */
258 sc->sc_stat = MMEM_INIT;
259 bufq_alloc(&sc->sc_q, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
260
261 /* check consistency */
262 if (sc->sc_wacc != 0) {
263 sc->sc_waccsz = sc->sc_bsize / sc->sc_wacc;
264 if (sc->sc_bsize != sc->sc_waccsz * sc->sc_wacc) {
265 printf("%s: write access isn't equally divided\n",
266 sc->sc_dev.dv_xname);
267 sc->sc_wacc = 0; /* no write */
268 } else if (sc->sc_waccsz > MMEM_MAXACCSIZE) {
269 printf("%s: write access size is too large\n",
270 sc->sc_dev.dv_xname);
271 sc->sc_wacc = 0; /* no write */
272 }
273 }
274 if (sc->sc_racc != 0) {
275 sc->sc_raccsz = sc->sc_bsize / sc->sc_racc;
276 if (sc->sc_bsize != sc->sc_raccsz * sc->sc_racc) {
277 printf("%s: read access isn't equally divided\n",
278 sc->sc_dev.dv_xname);
279 sc->sc_racc = 0; /* no read */
280 } else if (sc->sc_raccsz > MMEM_MAXACCSIZE) {
281 printf("%s: read access size is too large\n",
282 sc->sc_dev.dv_xname);
283 sc->sc_racc = 0; /* no read */
284 }
285 }
286 if (sc->sc_wacc == 0 && sc->sc_racc == 0) {
287 printf("%s: device doesn't support read nor write\n",
288 sc->sc_dev.dv_xname);
289 return;
290 }
291
292 /* per-part structure */
293 sc->sc_pt = malloc(sizeof(struct mmem_pt) * sc->sc_npt, M_DEVBUF,
294 M_WAITOK|M_ZERO);
295
296 for (i = 0; i < sc->sc_npt; i++) {
297 sprintf(sc->sc_pt[i].pt_name, "%s.%d", sc->sc_dev.dv_xname, i);
298 }
299
300 maple_set_callback(parent, sc->sc_unit, MAPLE_FN_MEMCARD,
301 mmem_intr, sc);
302
303 /*
304 * get capacity (start from partition 0)
305 */
306 sc->sc_reqm.func_code = htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
307 sc->sc_reqm.pt = 0;
308 maple_command(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD,
309 MAPLE_COMMAND_GETMINFO, sizeof sc->sc_reqm / 4, &sc->sc_reqm, 0);
310 }
311
312 static int
313 mmemdetach(self, flags)
314 struct device *self;
315 int flags;
316 {
317 struct mmem_softc *sc = (struct mmem_softc *) self;
318 struct buf *bp;
319 int i;
320 int minor_l, minor_h;
321
322 sc->sc_stat = MMEM_DETACH; /* just in case */
323
324 /*
325 * kill pending I/O
326 */
327 if ((bp = sc->sc_bp) != NULL) {
328 bp->b_error = EIO;
329 bp->b_flags |= B_ERROR;
330 bp->b_resid = bp->b_bcount;
331 biodone(bp);
332 }
333 while ((bp = BUFQ_GET(&sc->sc_q)) != NULL) {
334 bp->b_error = EIO;
335 bp->b_flags |= B_ERROR;
336 bp->b_resid = bp->b_bcount;
337 biodone(bp);
338 }
339 bufq_free(&sc->sc_q);
340
341 /*
342 * revoke vnodes
343 */
344 #ifdef __HAVE_OLD_DISKLABEL
345 #error This code assumes DISKUNIT() is contiguous in minor number.
346 #endif
347 minor_l = MMEM_DISKMINOR(self->dv_unit, 0, 0);
348 minor_h = MMEM_DISKMINOR(self->dv_unit, sc->sc_npt - 1,
349 MAXPARTITIONS - 1);
350 vdevgone(bdevsw_lookup_major(&mmem_bdevsw), minor_l, minor_h, VBLK);
351 vdevgone(cdevsw_lookup_major(&mmem_cdevsw), minor_l, minor_h, VCHR);
352
353 /*
354 * free per-partition structure
355 */
356 if (sc->sc_pt) {
357 /*
358 * detach disks
359 */
360 for (i = 0; i < sc->sc_npt; i++) {
361 if (sc->sc_pt[i].pt_flags & MMEM_PT_OK)
362 disk_detach(&sc->sc_pt[i].pt_dk);
363 }
364 free(sc->sc_pt, M_DEVBUF);
365 }
366
367 return 0;
368 }
369
370 /* fake disklabel */
371 static void
372 mmem_defaultlabel(sc, pt, d)
373 struct mmem_softc *sc;
374 struct mmem_pt *pt;
375 struct disklabel *d;
376 {
377
378 bzero(d, sizeof *d);
379
380 #if 0
381 d->d_type = DTYPE_FLOPPY; /* XXX? */
382 #endif
383 strncpy(d->d_typename, sc->sc_devinfo->di_product_name,
384 sizeof d->d_typename);
385 strcpy(d->d_packname, "fictitious");
386 d->d_secsize = sc->sc_bsize;
387 d->d_ntracks = 1; /* XXX */
388 d->d_nsectors = d->d_secpercyl = 8; /* XXX */
389 d->d_secperunit = pt->pt_info.maxblk - pt->pt_info.minblk + 1;
390 d->d_ncylinders = d->d_secperunit / d->d_secpercyl;
391 d->d_rpm = 1; /* when 4 acc/write */
392
393 d->d_npartitions = RAW_PART + 1;
394 d->d_partitions[RAW_PART].p_size = d->d_secperunit;
395
396 d->d_magic = d->d_magic2 = DISKMAGIC;
397 d->d_checksum = dkcksum(d);
398 }
399
400 /*
401 * called back from maple bus driver
402 */
403 static void
404 mmem_intr(dev, response, sz, flags)
405 void *dev;
406 struct maple_response *response;
407 int sz, flags;
408 {
409 struct mmem_softc *sc = dev;
410 struct mmem_response_read_data *r = (void *) response->data;
411 struct mmem_response_media_info *rm = (void *) response->data;
412 struct buf *bp;
413 int part;
414 struct mmem_pt *pt;
415 char pbuf[9];
416 int off;
417
418 switch (sc->sc_stat) {
419 case MMEM_INIT:
420 /* checking part geometry */
421 part = sc->sc_reqm.pt;
422 pt = &sc->sc_pt[part];
423 switch ((maple_response_t) response->response_code) {
424 case MAPLE_RESPONSE_DATATRF:
425 pt->pt_info = rm->info;
426 format_bytes(pbuf, sizeof(pbuf),
427 (u_int64_t)
428 ((pt->pt_info.maxblk - pt->pt_info.minblk + 1)
429 * sc->sc_bsize));
430 printf("%s: %s, blk %d %d, inf %d, fat %d %d, dir %d %d, icon %d, data %d\n",
431 pt->pt_name,
432 pbuf,
433 pt->pt_info.maxblk, pt->pt_info.minblk,
434 pt->pt_info.infpos,
435 pt->pt_info.fatpos, pt->pt_info.fatsz,
436 pt->pt_info.dirpos, pt->pt_info.dirsz,
437 pt->pt_info.icon,
438 pt->pt_info.datasz);
439
440 pt->pt_dk.dk_driver = &mmemdkdriver;
441 pt->pt_dk.dk_name = pt->pt_name;
442 disk_attach(&pt->pt_dk);
443
444 mmem_defaultlabel(sc, pt, pt->pt_dk.dk_label);
445
446 /* this partition is active */
447 pt->pt_flags = MMEM_PT_OK;
448
449 break;
450 default:
451 printf("%s: init: unexpected response %#x, sz %d\n",
452 pt->pt_name, ntohl(response->response_code), sz);
453 break;
454 }
455 if (++part == sc->sc_npt) {
456 #if 1
457 /*
458 * XXX Read a block and discard the contents (only to
459 * turn off the access indicator on Visual Memory).
460 */
461 pt = &sc->sc_pt[0];
462 sc->sc_reqr.func_code =
463 htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
464 sc->sc_reqr.pt = 0;
465 sc->sc_reqr.block = htons(pt->pt_info.minblk);
466 sc->sc_reqr.phase = 0;
467 maple_command(sc->sc_parent, sc->sc_unit,
468 MAPLE_FN_MEMCARD, MAPLE_COMMAND_BREAD,
469 sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
470 sc->sc_stat = MMEM_INIT2;
471 #else
472 sc->sc_stat = MMEM_IDLE; /* init done */
473 #endif
474 } else {
475 sc->sc_reqm.pt = part;
476 maple_command(sc->sc_parent, sc->sc_unit,
477 MAPLE_FN_MEMCARD, MAPLE_COMMAND_GETMINFO,
478 sizeof sc->sc_reqm / 4, &sc->sc_reqm, 0);
479 }
480 break;
481
482 case MMEM_INIT2:
483 /* XXX just discard */
484 sc->sc_stat = MMEM_IDLE; /* init done */
485 break;
486
487 case MMEM_READ:
488 bp = sc->sc_bp;
489
490 switch ((maple_response_t) response->response_code) {
491 case MAPLE_RESPONSE_DATATRF: /* read done */
492 off = sc->sc_raccsz * sc->sc_reqr.phase;
493 bcopy(r->data + off, sc->sc_iobuf + off, sc->sc_raccsz);
494
495 if (++sc->sc_reqr.phase == sc->sc_racc) {
496 /* all phase done */
497 pt = &sc->sc_pt[sc->sc_reqr.pt];
498 mmemdone(sc, pt, 0);
499 } else {
500 /* go next phase */
501 maple_command(sc->sc_parent, sc->sc_unit,
502 MAPLE_FN_MEMCARD, MAPLE_COMMAND_BREAD,
503 sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
504 }
505 break;
506 case MAPLE_RESPONSE_FILEERR:
507 mmem_printerror(sc->sc_pt[sc->sc_reqr.pt].pt_name,
508 1, bp->b_rawblkno,
509 r->func_code /* XXX */);
510 mmemstart_bp(sc); /* retry */
511 break;
512 default:
513 printf("%s: read: unexpected response %#x %#x, sz %d\n",
514 sc->sc_pt[sc->sc_reqr.pt].pt_name,
515 ntohl(response->response_code),
516 ntohl(r->func_code), sz);
517 mmemstart_bp(sc); /* retry */
518 break;
519 }
520 break;
521
522 case MMEM_WRITE1: /* read before write / verify after write */
523 bp = sc->sc_bp;
524
525 switch ((maple_response_t) response->response_code) {
526 case MAPLE_RESPONSE_DATATRF: /* read done */
527 off = sc->sc_raccsz * sc->sc_reqr.phase;
528 if (bcmp(r->data + off, sc->sc_iobuf + off,
529 sc->sc_raccsz)) {
530 /*
531 * data differ, start writing
532 */
533 mmemstart_write2(sc);
534 } else if (++sc->sc_reqr.phase == sc->sc_racc) {
535 /*
536 * all phase done and compared equal
537 */
538 pt = &sc->sc_pt[sc->sc_reqr.pt];
539 mmemdone(sc, pt, 0);
540 } else {
541 /* go next phase */
542 maple_command(sc->sc_parent, sc->sc_unit,
543 MAPLE_FN_MEMCARD, MAPLE_COMMAND_BREAD,
544 sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
545 }
546 break;
547 case MAPLE_RESPONSE_FILEERR:
548 mmem_printerror(sc->sc_pt[sc->sc_reqr.pt].pt_name,
549 1, bp->b_rawblkno,
550 r->func_code /* XXX */);
551 mmemstart_write2(sc); /* start writing */
552 break;
553 default:
554 printf("%s: verify: unexpected response %#x %#x, sz %d\n",
555 sc->sc_pt[sc->sc_reqr.pt].pt_name,
556 ntohl(response->response_code),
557 ntohl(r->func_code), sz);
558 mmemstart_write2(sc); /* start writing */
559 break;
560 }
561 break;
562
563 case MMEM_WRITE2: /* write */
564 bp = sc->sc_bp;
565
566 switch ((maple_response_t) response->response_code) {
567 case MAPLE_RESPONSE_OK: /* write done */
568 if (sc->sc_reqw.phase == sc->sc_wacc) {
569 /* all phase done */
570 mmemstart_bp(sc); /* start verify */
571 } else if (++sc->sc_reqw.phase == sc->sc_wacc) {
572 /* check error */
573 maple_command(sc->sc_parent, sc->sc_unit,
574 MAPLE_FN_MEMCARD, MAPLE_COMMAND_GETLASTERR,
575 2 /* no data */ , &sc->sc_reqw,
576 MAPLE_FLAG_CMD_PERIODIC_TIMING);
577 } else {
578 /* go next phase */
579 bcopy(sc->sc_iobuf
580 + sc->sc_waccsz * sc->sc_reqw.phase,
581 sc->sc_reqw.data, sc->sc_waccsz);
582 maple_command(sc->sc_parent, sc->sc_unit,
583 MAPLE_FN_MEMCARD, MAPLE_COMMAND_BWRITE,
584 MMEM_SIZE_REQW(sc) / 4, &sc->sc_reqw,
585 MAPLE_FLAG_CMD_PERIODIC_TIMING);
586 }
587 break;
588 case MAPLE_RESPONSE_FILEERR:
589 mmem_printerror(sc->sc_pt[sc->sc_reqw.pt].pt_name,
590 0, bp->b_rawblkno,
591 r->func_code /* XXX */);
592 mmemstart_write2(sc); /* retry writing */
593 break;
594 default:
595 printf("%s: write: unexpected response %#x, %#x, sz %d\n",
596 sc->sc_pt[sc->sc_reqw.pt].pt_name,
597 ntohl(response->response_code),
598 ntohl(r->func_code), sz);
599 mmemstart_write2(sc); /* retry writing */
600 break;
601 }
602 break;
603
604 default:
605 break;
606 }
607 }
608
609 static void
610 mmem_printerror(head, rd, blk, code)
611 const char *head;
612 int rd; /* 1: read, 0: write */
613 int blk;
614 u_int32_t code;
615 {
616
617 printf("%s: error %sing blk %d:", head, rd? "read" : "writ", blk);
618 NTOHL(code);
619 if (code & 1)
620 printf(" PT error");
621 if (code & 2)
622 printf(" Phase error");
623 if (code & 4)
624 printf(" Block error");
625 if (code & 010)
626 printf(" Write error");
627 if (code & 020)
628 printf(" Length error");
629 if (code & 040)
630 printf(" CRC error");
631 if (code & ~077)
632 printf(" Unknown error %#x", code & ~077);
633 printf("\n");
634 }
635
636 int
637 mmemopen(dev, flags, devtype, p)
638 dev_t dev;
639 int flags, devtype;
640 struct proc *p;
641 {
642 int diskunit, unit, part, labelpart;
643 struct mmem_softc *sc;
644 struct mmem_pt *pt;
645
646 diskunit = DISKUNIT(dev);
647 unit = MMEM_UNIT(diskunit);
648 part = MMEM_PART(diskunit);
649 labelpart = DISKPART(dev);
650 if ((sc = device_lookup(&mmem_cd, unit)) == NULL
651 || sc->sc_stat == MMEM_INIT
652 || sc->sc_stat == MMEM_INIT2
653 || part >= sc->sc_npt || (pt = &sc->sc_pt[part])->pt_flags == 0)
654 return ENXIO;
655
656 switch (devtype) {
657 case S_IFCHR:
658 pt->pt_dk.dk_copenmask |= (1 << labelpart);
659 break;
660 case S_IFBLK:
661 pt->pt_dk.dk_bopenmask |= (1 << labelpart);
662 break;
663 }
664
665 return 0;
666 }
667
668 int
669 mmemclose(dev, flags, devtype, p)
670 dev_t dev;
671 int flags, devtype;
672 struct proc *p;
673 {
674 int diskunit, unit, part, labelpart;
675 struct mmem_softc *sc;
676 struct mmem_pt *pt;
677
678 diskunit = DISKUNIT(dev);
679 unit = MMEM_UNIT(diskunit);
680 part = MMEM_PART(diskunit);
681 sc = mmem_cd.cd_devs[unit];
682 pt = &sc->sc_pt[part];
683 labelpart = DISKPART(dev);
684
685 switch (devtype) {
686 case S_IFCHR:
687 pt->pt_dk.dk_copenmask &= ~(1 << labelpart);
688 break;
689 case S_IFBLK:
690 pt->pt_dk.dk_bopenmask &= ~(1 << labelpart);
691 break;
692 }
693
694 return 0;
695 }
696
697 void
698 mmemstrategy(bp)
699 struct buf *bp;
700 {
701 int diskunit, unit, part, labelpart;
702 struct mmem_softc *sc;
703 struct mmem_pt *pt;
704 daddr_t off, nblk, cnt;
705
706 diskunit = DISKUNIT(bp->b_dev);
707 unit = MMEM_UNIT(diskunit);
708 part = MMEM_PART(diskunit);
709 if ((sc = device_lookup(&mmem_cd, unit)) == NULL
710 || sc->sc_stat == MMEM_INIT
711 || sc->sc_stat == MMEM_INIT2
712 || part >= sc->sc_npt || (pt = &sc->sc_pt[part])->pt_flags == 0)
713 goto inval;
714
715 #if 0
716 printf("%s: mmemstrategy: blkno %d, count %ld\n",
717 pt->pt_name, bp->b_blkno, bp->b_bcount);
718 #endif
719
720 if (bp->b_flags & B_READ) {
721 if (sc->sc_racc == 0)
722 goto inval; /* no read */
723 } else if (sc->sc_wacc == 0) {
724 bp->b_error = EROFS; /* no write */
725 goto bad;
726 }
727
728 if (bp->b_blkno & ~(~(daddr_t)0 >> (DEV_BSHIFT + 1 /* sign bit */))
729 || (bp->b_bcount % sc->sc_bsize) != 0)
730 goto inval;
731
732 cnt = howmany(bp->b_bcount, sc->sc_bsize);
733 if (cnt == 0)
734 goto done; /* no work */
735
736 off = bp->b_blkno * DEV_BSIZE / sc->sc_bsize;
737
738 /* offset to disklabel partition */
739 labelpart = DISKPART(bp->b_dev);
740 if (labelpart == RAW_PART) {
741 nblk = pt->pt_info.maxblk - pt->pt_info.minblk + 1;
742 } else {
743 off +=
744 nblk = pt->pt_dk.dk_label->d_partitions[labelpart].p_offset;
745 nblk += pt->pt_dk.dk_label->d_partitions[labelpart].p_size;
746 }
747
748 /* deal with the EOF condition */
749 if (off + cnt > nblk) {
750 if (off >= nblk) {
751 if (off == nblk)
752 goto done;
753 goto inval;
754 }
755 cnt = nblk - off;
756 bp->b_resid = bp->b_bcount - (cnt * sc->sc_bsize);
757 }
758
759 bp->b_rawblkno = off;
760
761 /* queue this transfer */
762 BUFQ_PUT(&sc->sc_q, bp);
763
764 if (sc->sc_stat == MMEM_IDLE)
765 mmemstart(sc);
766
767 return;
768
769 inval: bp->b_error = EINVAL;
770 bad: bp->b_flags |= B_ERROR;
771 done: bp->b_resid = bp->b_bcount;
772 biodone(bp);
773 }
774
775 /*
776 * start I/O operations
777 */
778 static void
779 mmemstart(sc)
780 struct mmem_softc *sc;
781 {
782 struct buf *bp;
783 struct mmem_pt *pt;
784 int s;
785
786 if ((bp = BUFQ_GET(&sc->sc_q)) == NULL) {
787 sc->sc_stat = MMEM_IDLE;
788 maple_enable_unit_ping(sc->sc_parent, sc->sc_unit,
789 MAPLE_FN_MEMCARD, 1);
790 return;
791 }
792
793 sc->sc_bp = bp;
794 sc->sc_cnt = howmany(bp->b_bcount - bp->b_resid, sc->sc_bsize);
795 KASSERT(sc->sc_cnt);
796 sc->sc_iobuf = bp->b_data;
797 sc->sc_retry = 0;
798
799 pt = &sc->sc_pt[MMEM_PART(DISKUNIT(bp->b_dev))];
800 s = splbio();
801 disk_busy(&pt->pt_dk);
802 splx(s);
803
804 /*
805 * I/O access will fail if the removal detection (by maple driver)
806 * occurs before finishing the I/O, so disable it.
807 * We are sending commands, and the removal detection is still alive.
808 */
809 maple_enable_unit_ping(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD, 0);
810
811 mmemstart_bp(sc);
812 }
813
814 /*
815 * start/retry a specified I/O operation
816 */
817 static void
818 mmemstart_bp(sc)
819 struct mmem_softc *sc;
820 {
821 struct buf *bp;
822 int diskunit, part;
823 struct mmem_pt *pt;
824
825 bp = sc->sc_bp;
826 diskunit = DISKUNIT(bp->b_dev);
827 part = MMEM_PART(diskunit);
828 pt = &sc->sc_pt[part];
829
830 /* handle retry */
831 if (sc->sc_retry++ > MMEM_MAXRETRY) {
832 /* retry count exceeded */
833 mmemdone(sc, pt, EIO);
834 return;
835 }
836
837 /*
838 * Start the first phase (phase# = 0).
839 */
840 /* start read */
841 sc->sc_stat = (bp->b_flags & B_READ) ? MMEM_READ : MMEM_WRITE1;
842 sc->sc_reqr.func_code = htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
843 sc->sc_reqr.pt = part;
844 sc->sc_reqr.block = htons(bp->b_rawblkno);
845 sc->sc_reqr.phase = 0; /* first phase */
846 maple_command(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD,
847 MAPLE_COMMAND_BREAD, sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
848 }
849
850 static void
851 mmemstart_write2(sc)
852 struct mmem_softc *sc;
853 {
854 struct buf *bp;
855 int diskunit, part;
856 struct mmem_pt *pt;
857
858 bp = sc->sc_bp;
859 diskunit = DISKUNIT(bp->b_dev);
860 part = MMEM_PART(diskunit);
861 pt = &sc->sc_pt[part];
862
863 /* handle retry */
864 if (sc->sc_retry++ > MMEM_MAXRETRY - 2 /* spare for verify read */) {
865 /* retry count exceeded */
866 mmemdone(sc, pt, EIO);
867 return;
868 }
869
870 /*
871 * Start the first phase (phase# = 0).
872 */
873 /* start write */
874 sc->sc_stat = MMEM_WRITE2;
875 sc->sc_reqw.func_code = htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
876 sc->sc_reqw.pt = part;
877 sc->sc_reqw.block = htons(bp->b_rawblkno);
878 sc->sc_reqw.phase = 0; /* first phase */
879 bcopy(sc->sc_iobuf /* + sc->sc_waccsz * phase */,
880 sc->sc_reqw.data, sc->sc_waccsz);
881 maple_command(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD,
882 MAPLE_COMMAND_BWRITE, MMEM_SIZE_REQW(sc) / 4, &sc->sc_reqw,
883 MAPLE_FLAG_CMD_PERIODIC_TIMING);
884 }
885
886 static void
887 mmemdone(sc, pt, err)
888 struct mmem_softc *sc;
889 struct mmem_pt *pt;
890 int err;
891 {
892 struct buf *bp = sc->sc_bp;
893 int s;
894 int bcnt;
895
896 KASSERT(bp);
897
898 if (err) {
899 bcnt = sc->sc_iobuf - bp->b_data;
900 bp->b_resid = bp->b_bcount - bcnt;
901
902 /* raise error if no block is read */
903 if (bcnt == 0) {
904 bp->b_error = err;
905 bp->b_flags |= B_ERROR;
906 }
907 goto term_xfer;
908 }
909
910 sc->sc_iobuf += sc->sc_bsize;
911 if (--sc->sc_cnt == 0) {
912 term_xfer:
913 /* terminate current transfer */
914 sc->sc_bp = NULL;
915 s = splbio();
916 disk_unbusy(&pt->pt_dk, sc->sc_iobuf - bp->b_data,
917 sc->sc_stat == MMEM_READ);
918 biodone(bp);
919 splx(s);
920
921 /* go next transfer */
922 mmemstart(sc);
923 } else {
924 /* go next block */
925 bp->b_rawblkno++;
926 sc->sc_retry = 0;
927 mmemstart_bp(sc);
928 }
929 }
930
931 int
932 mmemread(dev, uio, flags)
933 dev_t dev;
934 struct uio *uio;
935 int flags;
936 {
937
938 return (physio(mmemstrategy, NULL, dev, B_READ, minphys, uio));
939 }
940
941 int
942 mmemwrite(dev, uio, flags)
943 dev_t dev;
944 struct uio *uio;
945 int flags;
946 {
947
948 return (physio(mmemstrategy, NULL, dev, B_WRITE, minphys, uio));
949 }
950
951 int
952 mmemioctl(dev, cmd, data, flag, p)
953 dev_t dev;
954 u_long cmd;
955 caddr_t data;
956 int flag;
957 struct proc *p;
958 {
959 int diskunit, unit, part;
960 struct mmem_softc *sc;
961 struct mmem_pt *pt;
962
963 diskunit = DISKUNIT(dev);
964 unit = MMEM_UNIT(diskunit);
965 part = MMEM_PART(diskunit);
966 sc = mmem_cd.cd_devs[unit];
967 pt = &sc->sc_pt[part];
968
969 switch (cmd) {
970 case DIOCGDINFO:
971 *(struct disklabel *)data = *pt->pt_dk.dk_label; /* XXX */
972 break;
973
974 default:
975 /* generic maple ioctl */
976 return maple_unit_ioctl(sc->sc_parent, sc->sc_unit, cmd, data,
977 flag, p);
978 }
979
980 return 0;
981 }
982