mmemcard.c revision 1.4 1 /* $NetBSD: mmemcard.c,v 1.4 2004/10/28 07:07:36 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by ITOH Yasufumi.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: mmemcard.c,v 1.4 2004/10/28 07:07:36 yamt Exp $");
41
42 #include <sys/param.h>
43 #include <sys/buf.h>
44 #include <sys/bufq.h>
45 #include <sys/device.h>
46 #include <sys/disklabel.h>
47 #include <sys/disk.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/proc.h>
51 #include <sys/stat.h>
52 #include <sys/systm.h>
53 #include <sys/vnode.h>
54 #include <sys/conf.h>
55
56 #include <dreamcast/dev/maple/maple.h>
57 #include <dreamcast/dev/maple/mapleconf.h>
58
59 #define MMEM_MAXACCSIZE 1012 /* (255*4) - 8 = 253*32 / 8 */
60
61 struct mmem_funcdef { /* XXX assuming little-endian structure packing */
62 unsigned unused : 8,
63 ra : 4, /* number of access / read */
64 wa : 4, /* number of access / write */
65 bb : 8, /* block size / 32 - 1 */
66 pt : 8; /* number of partition - 1 */
67 };
68
69 struct mmem_request_read_data {
70 u_int32_t func_code;
71 u_int8_t pt;
72 u_int8_t phase;
73 u_int16_t block;
74 };
75
76 struct mmem_response_read_data {
77 u_int32_t func_code; /* function code (big endian) */
78 u_int32_t blkno; /* 512byte block number (big endian) */
79 u_int8_t data[MMEM_MAXACCSIZE];
80 };
81
82 struct mmem_request_write_data {
83 u_int32_t func_code;
84 u_int8_t pt;
85 u_int8_t phase; /* 0, 1, 2, 3: for each 128 byte */
86 u_int16_t block;
87 u_int8_t data[MMEM_MAXACCSIZE];
88 };
89 #define MMEM_SIZE_REQW(sc) ((sc)->sc_waccsz + 8)
90
91 struct mmem_request_get_media_info {
92 u_int32_t func_code;
93 u_int32_t pt; /* pt (1 byte) and unused 3 bytes */
94 };
95
96 struct mmem_media_info {
97 u_int16_t maxblk, minblk;
98 u_int16_t infpos;
99 u_int16_t fatpos, fatsz;
100 u_int16_t dirpos, dirsz;
101 u_int16_t icon;
102 u_int16_t datasz;
103 u_int16_t rsvd[3];
104 };
105
106 struct mmem_response_media_info {
107 u_int32_t func_code; /* function code (big endian) */
108 struct mmem_media_info info;
109 };
110
111 struct mmem_softc {
112 struct device sc_dev;
113
114 struct device *sc_parent;
115 struct maple_unit *sc_unit;
116 struct maple_devinfo *sc_devinfo;
117
118 enum mmem_stat {
119 MMEM_INIT, /* during initialization */
120 MMEM_INIT2, /* during initialization */
121 MMEM_IDLE, /* init done, not in I/O */
122 MMEM_READ, /* in read operation */
123 MMEM_WRITE1, /* in write operation (read and compare) */
124 MMEM_WRITE2, /* in write operation (write) */
125 MMEM_DETACH /* detaching */
126 } sc_stat;
127
128 int sc_npt; /* number of partitions */
129 int sc_bsize; /* block size */
130 int sc_wacc; /* number of write access per block */
131 int sc_waccsz; /* size of a write access */
132 int sc_racc; /* number of read access per block */
133 int sc_raccsz; /* size of a read access */
134
135 struct mmem_pt {
136 int pt_flags;
137 #define MMEM_PT_OK 1 /* partition is alive */
138 struct disk pt_dk; /* disk(9) */
139 struct mmem_media_info pt_info; /* geometry per part */
140
141 char pt_name[16 /* see device.h */ + 4 /* ".255" */];
142 } *sc_pt;
143
144 /* write request buffer (only one is used at a time) */
145 union {
146 struct mmem_request_read_data req_read;
147 struct mmem_request_write_data req_write;
148 struct mmem_request_get_media_info req_minfo;
149 } sc_req;
150 #define sc_reqr sc_req.req_read
151 #define sc_reqw sc_req.req_write
152 #define sc_reqm sc_req.req_minfo
153
154 /* pending buffers */
155 struct bufq_state sc_q;
156
157 /* current I/O access */
158 struct buf *sc_bp;
159 int sc_cnt;
160 char *sc_iobuf;
161 int sc_retry;
162 #define MMEM_MAXRETRY 12
163 };
164
165 /*
166 * minor number layout (mmemdetach() depends on this layout):
167 *
168 * 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
169 * |---------------------| |---------------------| |---------|
170 * unit part disklabel partition
171 */
172 #define MMEM_PART(diskunit) ((diskunit) & 0xff)
173 #define MMEM_UNIT(diskunit) ((diskunit) >> 8)
174 #define MMEM_DISKMINOR(unit, part, disklabel_partition) \
175 DISKMINOR(((unit) << 8) | (part), (disklabel_partition))
176
177 static int mmemmatch __P((struct device *, struct cfdata *, void *));
178 static void mmemattach __P((struct device *, struct device *, void *));
179 static void mmem_defaultlabel __P((struct mmem_softc *, struct mmem_pt *,
180 struct disklabel *));
181 static int mmemdetach __P((struct device *, int));
182 static void mmem_intr __P((void *, struct maple_response *, int, int));
183 static void mmem_printerror __P((const char *, int, int, u_int32_t));
184 static void mmemstart __P((struct mmem_softc *));
185 static void mmemstart_bp __P((struct mmem_softc *));
186 static void mmemstart_write2 __P((struct mmem_softc *));
187 static void mmemdone __P((struct mmem_softc *, struct mmem_pt *, int));
188
189 dev_type_open(mmemopen);
190 dev_type_close(mmemclose);
191 dev_type_read(mmemread);
192 dev_type_write(mmemwrite);
193 dev_type_ioctl(mmemioctl);
194 dev_type_strategy(mmemstrategy);
195
196 const struct bdevsw mmem_bdevsw = {
197 mmemopen, mmemclose, mmemstrategy, mmemioctl, nodump,
198 nosize, D_DISK
199 };
200
201 const struct cdevsw mmem_cdevsw = {
202 mmemopen, mmemclose, mmemread, mmemwrite, mmemioctl,
203 nostop, notty, nopoll, nommap, nokqfilter, D_DISK
204 };
205
206 CFATTACH_DECL(mmem, sizeof(struct mmem_softc),
207 mmemmatch, mmemattach, mmemdetach, NULL);
208
209 extern struct cfdriver mmem_cd;
210
211 struct dkdriver mmemdkdriver = { mmemstrategy };
212
213 static int
214 mmemmatch(parent, cf, aux)
215 struct device *parent;
216 struct cfdata *cf;
217 void *aux;
218 {
219 struct maple_attach_args *ma = aux;
220
221 return (ma->ma_function == MAPLE_FN_MEMCARD ? MAPLE_MATCH_FUNC : 0);
222 }
223
224 static void
225 mmemattach(parent, self, aux)
226 struct device *parent, *self;
227 void *aux;
228 {
229 struct mmem_softc *sc = (void *) self;
230 struct maple_attach_args *ma = aux;
231 int i;
232 union {
233 u_int32_t v;
234 struct mmem_funcdef s;
235 } funcdef;
236
237 sc->sc_parent = parent;
238 sc->sc_unit = ma->ma_unit;
239 sc->sc_devinfo = ma->ma_devinfo;
240
241 funcdef.v = maple_get_function_data(ma->ma_devinfo, MAPLE_FN_MEMCARD);
242 printf(": Memory card\n");
243 printf("%s: %d part, %d bytes/block, ",
244 sc->sc_dev.dv_xname,
245 sc->sc_npt = funcdef.s.pt + 1,
246 sc->sc_bsize = (funcdef.s.bb + 1) << 5);
247 if ((sc->sc_wacc = funcdef.s.wa) == 0)
248 printf("no write, ");
249 else
250 printf("%d acc/write, ", sc->sc_wacc);
251 if ((sc->sc_racc = funcdef.s.ra) == 0)
252 printf("no read\n");
253 else
254 printf("%d acc/read\n", sc->sc_racc);
255
256 /*
257 * start init sequence
258 */
259 sc->sc_stat = MMEM_INIT;
260 bufq_alloc(&sc->sc_q, BUFQ_DISKSORT|BUFQ_SORT_RAWBLOCK);
261
262 /* check consistency */
263 if (sc->sc_wacc != 0) {
264 sc->sc_waccsz = sc->sc_bsize / sc->sc_wacc;
265 if (sc->sc_bsize != sc->sc_waccsz * sc->sc_wacc) {
266 printf("%s: write access isn't equally divided\n",
267 sc->sc_dev.dv_xname);
268 sc->sc_wacc = 0; /* no write */
269 } else if (sc->sc_waccsz > MMEM_MAXACCSIZE) {
270 printf("%s: write access size is too large\n",
271 sc->sc_dev.dv_xname);
272 sc->sc_wacc = 0; /* no write */
273 }
274 }
275 if (sc->sc_racc != 0) {
276 sc->sc_raccsz = sc->sc_bsize / sc->sc_racc;
277 if (sc->sc_bsize != sc->sc_raccsz * sc->sc_racc) {
278 printf("%s: read access isn't equally divided\n",
279 sc->sc_dev.dv_xname);
280 sc->sc_racc = 0; /* no read */
281 } else if (sc->sc_raccsz > MMEM_MAXACCSIZE) {
282 printf("%s: read access size is too large\n",
283 sc->sc_dev.dv_xname);
284 sc->sc_racc = 0; /* no read */
285 }
286 }
287 if (sc->sc_wacc == 0 && sc->sc_racc == 0) {
288 printf("%s: device doesn't support read nor write\n",
289 sc->sc_dev.dv_xname);
290 return;
291 }
292
293 /* per-part structure */
294 sc->sc_pt = malloc(sizeof(struct mmem_pt) * sc->sc_npt, M_DEVBUF,
295 M_WAITOK|M_ZERO);
296
297 for (i = 0; i < sc->sc_npt; i++) {
298 sprintf(sc->sc_pt[i].pt_name, "%s.%d", sc->sc_dev.dv_xname, i);
299 }
300
301 maple_set_callback(parent, sc->sc_unit, MAPLE_FN_MEMCARD,
302 mmem_intr, sc);
303
304 /*
305 * get capacity (start from partition 0)
306 */
307 sc->sc_reqm.func_code = htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
308 sc->sc_reqm.pt = 0;
309 maple_command(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD,
310 MAPLE_COMMAND_GETMINFO, sizeof sc->sc_reqm / 4, &sc->sc_reqm, 0);
311 }
312
313 static int
314 mmemdetach(self, flags)
315 struct device *self;
316 int flags;
317 {
318 struct mmem_softc *sc = (struct mmem_softc *) self;
319 struct buf *bp;
320 int i;
321 int minor_l, minor_h;
322
323 sc->sc_stat = MMEM_DETACH; /* just in case */
324
325 /*
326 * kill pending I/O
327 */
328 if ((bp = sc->sc_bp) != NULL) {
329 bp->b_error = EIO;
330 bp->b_flags |= B_ERROR;
331 bp->b_resid = bp->b_bcount;
332 biodone(bp);
333 }
334 while ((bp = BUFQ_GET(&sc->sc_q)) != NULL) {
335 bp->b_error = EIO;
336 bp->b_flags |= B_ERROR;
337 bp->b_resid = bp->b_bcount;
338 biodone(bp);
339 }
340 bufq_free(&sc->sc_q);
341
342 /*
343 * revoke vnodes
344 */
345 #ifdef __HAVE_OLD_DISKLABEL
346 #error This code assumes DISKUNIT() is contiguous in minor number.
347 #endif
348 minor_l = MMEM_DISKMINOR(self->dv_unit, 0, 0);
349 minor_h = MMEM_DISKMINOR(self->dv_unit, sc->sc_npt - 1,
350 MAXPARTITIONS - 1);
351 vdevgone(bdevsw_lookup_major(&mmem_bdevsw), minor_l, minor_h, VBLK);
352 vdevgone(cdevsw_lookup_major(&mmem_cdevsw), minor_l, minor_h, VCHR);
353
354 /*
355 * free per-partition structure
356 */
357 if (sc->sc_pt) {
358 /*
359 * detach disks
360 */
361 for (i = 0; i < sc->sc_npt; i++) {
362 if (sc->sc_pt[i].pt_flags & MMEM_PT_OK)
363 disk_detach(&sc->sc_pt[i].pt_dk);
364 }
365 free(sc->sc_pt, M_DEVBUF);
366 }
367
368 return 0;
369 }
370
371 /* fake disklabel */
372 static void
373 mmem_defaultlabel(sc, pt, d)
374 struct mmem_softc *sc;
375 struct mmem_pt *pt;
376 struct disklabel *d;
377 {
378
379 bzero(d, sizeof *d);
380
381 #if 0
382 d->d_type = DTYPE_FLOPPY; /* XXX? */
383 #endif
384 strncpy(d->d_typename, sc->sc_devinfo->di_product_name,
385 sizeof d->d_typename);
386 strcpy(d->d_packname, "fictitious");
387 d->d_secsize = sc->sc_bsize;
388 d->d_ntracks = 1; /* XXX */
389 d->d_nsectors = d->d_secpercyl = 8; /* XXX */
390 d->d_secperunit = pt->pt_info.maxblk - pt->pt_info.minblk + 1;
391 d->d_ncylinders = d->d_secperunit / d->d_secpercyl;
392 d->d_rpm = 1; /* when 4 acc/write */
393
394 d->d_npartitions = RAW_PART + 1;
395 d->d_partitions[RAW_PART].p_size = d->d_secperunit;
396
397 d->d_magic = d->d_magic2 = DISKMAGIC;
398 d->d_checksum = dkcksum(d);
399 }
400
401 /*
402 * called back from maple bus driver
403 */
404 static void
405 mmem_intr(dev, response, sz, flags)
406 void *dev;
407 struct maple_response *response;
408 int sz, flags;
409 {
410 struct mmem_softc *sc = dev;
411 struct mmem_response_read_data *r = (void *) response->data;
412 struct mmem_response_media_info *rm = (void *) response->data;
413 struct buf *bp;
414 int part;
415 struct mmem_pt *pt;
416 char pbuf[9];
417 int off;
418
419 switch (sc->sc_stat) {
420 case MMEM_INIT:
421 /* checking part geometry */
422 part = sc->sc_reqm.pt;
423 pt = &sc->sc_pt[part];
424 switch ((maple_response_t) response->response_code) {
425 case MAPLE_RESPONSE_DATATRF:
426 pt->pt_info = rm->info;
427 format_bytes(pbuf, sizeof(pbuf),
428 (u_int64_t)
429 ((pt->pt_info.maxblk - pt->pt_info.minblk + 1)
430 * sc->sc_bsize));
431 printf("%s: %s, blk %d %d, inf %d, fat %d %d, dir %d %d, icon %d, data %d\n",
432 pt->pt_name,
433 pbuf,
434 pt->pt_info.maxblk, pt->pt_info.minblk,
435 pt->pt_info.infpos,
436 pt->pt_info.fatpos, pt->pt_info.fatsz,
437 pt->pt_info.dirpos, pt->pt_info.dirsz,
438 pt->pt_info.icon,
439 pt->pt_info.datasz);
440
441 pt->pt_dk.dk_driver = &mmemdkdriver;
442 pt->pt_dk.dk_name = pt->pt_name;
443 disk_attach(&pt->pt_dk);
444
445 mmem_defaultlabel(sc, pt, pt->pt_dk.dk_label);
446
447 /* this partition is active */
448 pt->pt_flags = MMEM_PT_OK;
449
450 break;
451 default:
452 printf("%s: init: unexpected response %#x, sz %d\n",
453 pt->pt_name, ntohl(response->response_code), sz);
454 break;
455 }
456 if (++part == sc->sc_npt) {
457 #if 1
458 /*
459 * XXX Read a block and discard the contents (only to
460 * turn off the access indicator on Visual Memory).
461 */
462 pt = &sc->sc_pt[0];
463 sc->sc_reqr.func_code =
464 htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
465 sc->sc_reqr.pt = 0;
466 sc->sc_reqr.block = htons(pt->pt_info.minblk);
467 sc->sc_reqr.phase = 0;
468 maple_command(sc->sc_parent, sc->sc_unit,
469 MAPLE_FN_MEMCARD, MAPLE_COMMAND_BREAD,
470 sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
471 sc->sc_stat = MMEM_INIT2;
472 #else
473 sc->sc_stat = MMEM_IDLE; /* init done */
474 #endif
475 } else {
476 sc->sc_reqm.pt = part;
477 maple_command(sc->sc_parent, sc->sc_unit,
478 MAPLE_FN_MEMCARD, MAPLE_COMMAND_GETMINFO,
479 sizeof sc->sc_reqm / 4, &sc->sc_reqm, 0);
480 }
481 break;
482
483 case MMEM_INIT2:
484 /* XXX just discard */
485 sc->sc_stat = MMEM_IDLE; /* init done */
486 break;
487
488 case MMEM_READ:
489 bp = sc->sc_bp;
490
491 switch ((maple_response_t) response->response_code) {
492 case MAPLE_RESPONSE_DATATRF: /* read done */
493 off = sc->sc_raccsz * sc->sc_reqr.phase;
494 bcopy(r->data + off, sc->sc_iobuf + off, sc->sc_raccsz);
495
496 if (++sc->sc_reqr.phase == sc->sc_racc) {
497 /* all phase done */
498 pt = &sc->sc_pt[sc->sc_reqr.pt];
499 mmemdone(sc, pt, 0);
500 } else {
501 /* go next phase */
502 maple_command(sc->sc_parent, sc->sc_unit,
503 MAPLE_FN_MEMCARD, MAPLE_COMMAND_BREAD,
504 sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
505 }
506 break;
507 case MAPLE_RESPONSE_FILEERR:
508 mmem_printerror(sc->sc_pt[sc->sc_reqr.pt].pt_name,
509 1, bp->b_rawblkno,
510 r->func_code /* XXX */);
511 mmemstart_bp(sc); /* retry */
512 break;
513 default:
514 printf("%s: read: unexpected response %#x %#x, sz %d\n",
515 sc->sc_pt[sc->sc_reqr.pt].pt_name,
516 ntohl(response->response_code),
517 ntohl(r->func_code), sz);
518 mmemstart_bp(sc); /* retry */
519 break;
520 }
521 break;
522
523 case MMEM_WRITE1: /* read before write / verify after write */
524 bp = sc->sc_bp;
525
526 switch ((maple_response_t) response->response_code) {
527 case MAPLE_RESPONSE_DATATRF: /* read done */
528 off = sc->sc_raccsz * sc->sc_reqr.phase;
529 if (bcmp(r->data + off, sc->sc_iobuf + off,
530 sc->sc_raccsz)) {
531 /*
532 * data differ, start writing
533 */
534 mmemstart_write2(sc);
535 } else if (++sc->sc_reqr.phase == sc->sc_racc) {
536 /*
537 * all phase done and compared equal
538 */
539 pt = &sc->sc_pt[sc->sc_reqr.pt];
540 mmemdone(sc, pt, 0);
541 } else {
542 /* go next phase */
543 maple_command(sc->sc_parent, sc->sc_unit,
544 MAPLE_FN_MEMCARD, MAPLE_COMMAND_BREAD,
545 sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
546 }
547 break;
548 case MAPLE_RESPONSE_FILEERR:
549 mmem_printerror(sc->sc_pt[sc->sc_reqr.pt].pt_name,
550 1, bp->b_rawblkno,
551 r->func_code /* XXX */);
552 mmemstart_write2(sc); /* start writing */
553 break;
554 default:
555 printf("%s: verify: unexpected response %#x %#x, sz %d\n",
556 sc->sc_pt[sc->sc_reqr.pt].pt_name,
557 ntohl(response->response_code),
558 ntohl(r->func_code), sz);
559 mmemstart_write2(sc); /* start writing */
560 break;
561 }
562 break;
563
564 case MMEM_WRITE2: /* write */
565 bp = sc->sc_bp;
566
567 switch ((maple_response_t) response->response_code) {
568 case MAPLE_RESPONSE_OK: /* write done */
569 if (sc->sc_reqw.phase == sc->sc_wacc) {
570 /* all phase done */
571 mmemstart_bp(sc); /* start verify */
572 } else if (++sc->sc_reqw.phase == sc->sc_wacc) {
573 /* check error */
574 maple_command(sc->sc_parent, sc->sc_unit,
575 MAPLE_FN_MEMCARD, MAPLE_COMMAND_GETLASTERR,
576 2 /* no data */ , &sc->sc_reqw,
577 MAPLE_FLAG_CMD_PERIODIC_TIMING);
578 } else {
579 /* go next phase */
580 bcopy(sc->sc_iobuf
581 + sc->sc_waccsz * sc->sc_reqw.phase,
582 sc->sc_reqw.data, sc->sc_waccsz);
583 maple_command(sc->sc_parent, sc->sc_unit,
584 MAPLE_FN_MEMCARD, MAPLE_COMMAND_BWRITE,
585 MMEM_SIZE_REQW(sc) / 4, &sc->sc_reqw,
586 MAPLE_FLAG_CMD_PERIODIC_TIMING);
587 }
588 break;
589 case MAPLE_RESPONSE_FILEERR:
590 mmem_printerror(sc->sc_pt[sc->sc_reqw.pt].pt_name,
591 0, bp->b_rawblkno,
592 r->func_code /* XXX */);
593 mmemstart_write2(sc); /* retry writing */
594 break;
595 default:
596 printf("%s: write: unexpected response %#x, %#x, sz %d\n",
597 sc->sc_pt[sc->sc_reqw.pt].pt_name,
598 ntohl(response->response_code),
599 ntohl(r->func_code), sz);
600 mmemstart_write2(sc); /* retry writing */
601 break;
602 }
603 break;
604
605 default:
606 break;
607 }
608 }
609
610 static void
611 mmem_printerror(head, rd, blk, code)
612 const char *head;
613 int rd; /* 1: read, 0: write */
614 int blk;
615 u_int32_t code;
616 {
617
618 printf("%s: error %sing blk %d:", head, rd? "read" : "writ", blk);
619 NTOHL(code);
620 if (code & 1)
621 printf(" PT error");
622 if (code & 2)
623 printf(" Phase error");
624 if (code & 4)
625 printf(" Block error");
626 if (code & 010)
627 printf(" Write error");
628 if (code & 020)
629 printf(" Length error");
630 if (code & 040)
631 printf(" CRC error");
632 if (code & ~077)
633 printf(" Unknown error %#x", code & ~077);
634 printf("\n");
635 }
636
637 int
638 mmemopen(dev, flags, devtype, p)
639 dev_t dev;
640 int flags, devtype;
641 struct proc *p;
642 {
643 int diskunit, unit, part, labelpart;
644 struct mmem_softc *sc;
645 struct mmem_pt *pt;
646
647 diskunit = DISKUNIT(dev);
648 unit = MMEM_UNIT(diskunit);
649 part = MMEM_PART(diskunit);
650 labelpart = DISKPART(dev);
651 if ((sc = device_lookup(&mmem_cd, unit)) == NULL
652 || sc->sc_stat == MMEM_INIT
653 || sc->sc_stat == MMEM_INIT2
654 || part >= sc->sc_npt || (pt = &sc->sc_pt[part])->pt_flags == 0)
655 return ENXIO;
656
657 switch (devtype) {
658 case S_IFCHR:
659 pt->pt_dk.dk_copenmask |= (1 << labelpart);
660 break;
661 case S_IFBLK:
662 pt->pt_dk.dk_bopenmask |= (1 << labelpart);
663 break;
664 }
665
666 return 0;
667 }
668
669 int
670 mmemclose(dev, flags, devtype, p)
671 dev_t dev;
672 int flags, devtype;
673 struct proc *p;
674 {
675 int diskunit, unit, part, labelpart;
676 struct mmem_softc *sc;
677 struct mmem_pt *pt;
678
679 diskunit = DISKUNIT(dev);
680 unit = MMEM_UNIT(diskunit);
681 part = MMEM_PART(diskunit);
682 sc = mmem_cd.cd_devs[unit];
683 pt = &sc->sc_pt[part];
684 labelpart = DISKPART(dev);
685
686 switch (devtype) {
687 case S_IFCHR:
688 pt->pt_dk.dk_copenmask &= ~(1 << labelpart);
689 break;
690 case S_IFBLK:
691 pt->pt_dk.dk_bopenmask &= ~(1 << labelpart);
692 break;
693 }
694
695 return 0;
696 }
697
698 void
699 mmemstrategy(bp)
700 struct buf *bp;
701 {
702 int diskunit, unit, part, labelpart;
703 struct mmem_softc *sc;
704 struct mmem_pt *pt;
705 daddr_t off, nblk, cnt;
706
707 diskunit = DISKUNIT(bp->b_dev);
708 unit = MMEM_UNIT(diskunit);
709 part = MMEM_PART(diskunit);
710 if ((sc = device_lookup(&mmem_cd, unit)) == NULL
711 || sc->sc_stat == MMEM_INIT
712 || sc->sc_stat == MMEM_INIT2
713 || part >= sc->sc_npt || (pt = &sc->sc_pt[part])->pt_flags == 0)
714 goto inval;
715
716 #if 0
717 printf("%s: mmemstrategy: blkno %d, count %ld\n",
718 pt->pt_name, bp->b_blkno, bp->b_bcount);
719 #endif
720
721 if (bp->b_flags & B_READ) {
722 if (sc->sc_racc == 0)
723 goto inval; /* no read */
724 } else if (sc->sc_wacc == 0) {
725 bp->b_error = EROFS; /* no write */
726 goto bad;
727 }
728
729 if (bp->b_blkno & ~(~(daddr_t)0 >> (DEV_BSHIFT + 1 /* sign bit */))
730 || (bp->b_bcount % sc->sc_bsize) != 0)
731 goto inval;
732
733 cnt = howmany(bp->b_bcount, sc->sc_bsize);
734 if (cnt == 0)
735 goto done; /* no work */
736
737 off = bp->b_blkno * DEV_BSIZE / sc->sc_bsize;
738
739 /* offset to disklabel partition */
740 labelpart = DISKPART(bp->b_dev);
741 if (labelpart == RAW_PART) {
742 nblk = pt->pt_info.maxblk - pt->pt_info.minblk + 1;
743 } else {
744 off +=
745 nblk = pt->pt_dk.dk_label->d_partitions[labelpart].p_offset;
746 nblk += pt->pt_dk.dk_label->d_partitions[labelpart].p_size;
747 }
748
749 /* deal with the EOF condition */
750 if (off + cnt > nblk) {
751 if (off >= nblk) {
752 if (off == nblk)
753 goto done;
754 goto inval;
755 }
756 cnt = nblk - off;
757 bp->b_resid = bp->b_bcount - (cnt * sc->sc_bsize);
758 }
759
760 bp->b_rawblkno = off;
761
762 /* queue this transfer */
763 BUFQ_PUT(&sc->sc_q, bp);
764
765 if (sc->sc_stat == MMEM_IDLE)
766 mmemstart(sc);
767
768 return;
769
770 inval: bp->b_error = EINVAL;
771 bad: bp->b_flags |= B_ERROR;
772 done: bp->b_resid = bp->b_bcount;
773 biodone(bp);
774 }
775
776 /*
777 * start I/O operations
778 */
779 static void
780 mmemstart(sc)
781 struct mmem_softc *sc;
782 {
783 struct buf *bp;
784 struct mmem_pt *pt;
785 int s;
786
787 if ((bp = BUFQ_GET(&sc->sc_q)) == NULL) {
788 sc->sc_stat = MMEM_IDLE;
789 maple_enable_unit_ping(sc->sc_parent, sc->sc_unit,
790 MAPLE_FN_MEMCARD, 1);
791 return;
792 }
793
794 sc->sc_bp = bp;
795 sc->sc_cnt = howmany(bp->b_bcount - bp->b_resid, sc->sc_bsize);
796 KASSERT(sc->sc_cnt);
797 sc->sc_iobuf = bp->b_data;
798 sc->sc_retry = 0;
799
800 pt = &sc->sc_pt[MMEM_PART(DISKUNIT(bp->b_dev))];
801 s = splbio();
802 disk_busy(&pt->pt_dk);
803 splx(s);
804
805 /*
806 * I/O access will fail if the removal detection (by maple driver)
807 * occurs before finishing the I/O, so disable it.
808 * We are sending commands, and the removal detection is still alive.
809 */
810 maple_enable_unit_ping(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD, 0);
811
812 mmemstart_bp(sc);
813 }
814
815 /*
816 * start/retry a specified I/O operation
817 */
818 static void
819 mmemstart_bp(sc)
820 struct mmem_softc *sc;
821 {
822 struct buf *bp;
823 int diskunit, part;
824 struct mmem_pt *pt;
825
826 bp = sc->sc_bp;
827 diskunit = DISKUNIT(bp->b_dev);
828 part = MMEM_PART(diskunit);
829 pt = &sc->sc_pt[part];
830
831 /* handle retry */
832 if (sc->sc_retry++ > MMEM_MAXRETRY) {
833 /* retry count exceeded */
834 mmemdone(sc, pt, EIO);
835 return;
836 }
837
838 /*
839 * Start the first phase (phase# = 0).
840 */
841 /* start read */
842 sc->sc_stat = (bp->b_flags & B_READ) ? MMEM_READ : MMEM_WRITE1;
843 sc->sc_reqr.func_code = htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
844 sc->sc_reqr.pt = part;
845 sc->sc_reqr.block = htons(bp->b_rawblkno);
846 sc->sc_reqr.phase = 0; /* first phase */
847 maple_command(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD,
848 MAPLE_COMMAND_BREAD, sizeof sc->sc_reqr / 4, &sc->sc_reqr, 0);
849 }
850
851 static void
852 mmemstart_write2(sc)
853 struct mmem_softc *sc;
854 {
855 struct buf *bp;
856 int diskunit, part;
857 struct mmem_pt *pt;
858
859 bp = sc->sc_bp;
860 diskunit = DISKUNIT(bp->b_dev);
861 part = MMEM_PART(diskunit);
862 pt = &sc->sc_pt[part];
863
864 /* handle retry */
865 if (sc->sc_retry++ > MMEM_MAXRETRY - 2 /* spare for verify read */) {
866 /* retry count exceeded */
867 mmemdone(sc, pt, EIO);
868 return;
869 }
870
871 /*
872 * Start the first phase (phase# = 0).
873 */
874 /* start write */
875 sc->sc_stat = MMEM_WRITE2;
876 sc->sc_reqw.func_code = htonl(MAPLE_FUNC(MAPLE_FN_MEMCARD));
877 sc->sc_reqw.pt = part;
878 sc->sc_reqw.block = htons(bp->b_rawblkno);
879 sc->sc_reqw.phase = 0; /* first phase */
880 bcopy(sc->sc_iobuf /* + sc->sc_waccsz * phase */,
881 sc->sc_reqw.data, sc->sc_waccsz);
882 maple_command(sc->sc_parent, sc->sc_unit, MAPLE_FN_MEMCARD,
883 MAPLE_COMMAND_BWRITE, MMEM_SIZE_REQW(sc) / 4, &sc->sc_reqw,
884 MAPLE_FLAG_CMD_PERIODIC_TIMING);
885 }
886
887 static void
888 mmemdone(sc, pt, err)
889 struct mmem_softc *sc;
890 struct mmem_pt *pt;
891 int err;
892 {
893 struct buf *bp = sc->sc_bp;
894 int s;
895 int bcnt;
896
897 KASSERT(bp);
898
899 if (err) {
900 bcnt = sc->sc_iobuf - bp->b_data;
901 bp->b_resid = bp->b_bcount - bcnt;
902
903 /* raise error if no block is read */
904 if (bcnt == 0) {
905 bp->b_error = err;
906 bp->b_flags |= B_ERROR;
907 }
908 goto term_xfer;
909 }
910
911 sc->sc_iobuf += sc->sc_bsize;
912 if (--sc->sc_cnt == 0) {
913 term_xfer:
914 /* terminate current transfer */
915 sc->sc_bp = NULL;
916 s = splbio();
917 disk_unbusy(&pt->pt_dk, sc->sc_iobuf - bp->b_data,
918 sc->sc_stat == MMEM_READ);
919 biodone(bp);
920 splx(s);
921
922 /* go next transfer */
923 mmemstart(sc);
924 } else {
925 /* go next block */
926 bp->b_rawblkno++;
927 sc->sc_retry = 0;
928 mmemstart_bp(sc);
929 }
930 }
931
932 int
933 mmemread(dev, uio, flags)
934 dev_t dev;
935 struct uio *uio;
936 int flags;
937 {
938
939 return (physio(mmemstrategy, NULL, dev, B_READ, minphys, uio));
940 }
941
942 int
943 mmemwrite(dev, uio, flags)
944 dev_t dev;
945 struct uio *uio;
946 int flags;
947 {
948
949 return (physio(mmemstrategy, NULL, dev, B_WRITE, minphys, uio));
950 }
951
952 int
953 mmemioctl(dev, cmd, data, flag, p)
954 dev_t dev;
955 u_long cmd;
956 caddr_t data;
957 int flag;
958 struct proc *p;
959 {
960 int diskunit, unit, part;
961 struct mmem_softc *sc;
962 struct mmem_pt *pt;
963
964 diskunit = DISKUNIT(dev);
965 unit = MMEM_UNIT(diskunit);
966 part = MMEM_PART(diskunit);
967 sc = mmem_cd.cd_devs[unit];
968 pt = &sc->sc_pt[part];
969
970 switch (cmd) {
971 case DIOCGDINFO:
972 *(struct disklabel *)data = *pt->pt_dk.dk_label; /* XXX */
973 break;
974
975 default:
976 /* generic maple ioctl */
977 return maple_unit_ioctl(sc->sc_parent, sc->sc_unit, cmd, data,
978 flag, p);
979 }
980
981 return 0;
982 }
983