ed_mca.c revision 1.8.2.2 1 /* $NetBSD: ed_mca.c,v 1.8.2.2 2001/09/26 15:28:14 fvdl Exp $ */
2
3 /*
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Jaromir Dolecek.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /*
37 * Disk goo for MCA ESDI controller driver.
38 */
39
40 #include "rnd.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/conf.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/ioctl.h>
49 #include <sys/buf.h>
50 #include <sys/uio.h>
51 #include <sys/malloc.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/disk.h>
55 #include <sys/syslog.h>
56 #include <sys/proc.h>
57 #include <sys/vnode.h>
58 #include <sys/kthread.h>
59 #if NRND > 0
60 #include <sys/rnd.h>
61 #endif
62
63 #include <miscfs/specfs/specdev.h>
64
65 #include <machine/intr.h>
66 #include <machine/bus.h>
67
68 #include <dev/mca/mcavar.h>
69
70 #include <dev/mca/edcreg.h>
71 #include <dev/mca/edvar.h>
72 #include <dev/mca/edcvar.h>
73
74 /* #define WDCDEBUG */
75
76 #ifdef WDCDEBUG
77 #define WDCDEBUG_PRINT(args, level) printf args
78 #else
79 #define WDCDEBUG_PRINT(args, level)
80 #endif
81
82 #define EDLABELDEV(dev) (MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART))
83
84 /* XXX: these should go elsewhere */
85 cdev_decl(edmca);
86 bdev_decl(edmca);
87
88 static int ed_mca_probe __P((struct device *, struct cfdata *, void *));
89 static void ed_mca_attach __P((struct device *, struct device *, void *));
90
91 struct cfattach ed_mca_ca = {
92 sizeof(struct ed_softc), ed_mca_probe, ed_mca_attach
93 };
94
95 extern struct cfdriver ed_cd;
96
97 static int ed_get_params __P((struct ed_softc *));
98 static int ed_lock __P((struct ed_softc *));
99 static void ed_unlock __P((struct ed_softc *));
100 static void edgetdisklabel __P((struct vnode *));
101 static void edgetdefaultlabel __P((struct ed_softc *, struct disklabel *));
102 static void ed_shutdown __P((void*));
103 static void __edstart __P((struct ed_softc*, struct buf *));
104 static void bad144intern __P((struct ed_softc *));
105 static void edworker __P((void *));
106 static void ed_spawn_worker __P((void *));
107 static void edmcadone __P((struct ed_softc *, struct buf *));
108 static void ed_bio __P((struct ed_softc *, int, int));
109 static void ed_bio_done __P((struct ed_softc *));
110
111 static struct dkdriver eddkdriver = { edmcastrategy };
112
113 /*
114 * Just check if it's possible to identify the disk.
115 */
116 static int
117 ed_mca_probe(parent, match, aux)
118 struct device *parent;
119 struct cfdata *match;
120 void *aux;
121 {
122 u_int16_t cmd_args[2];
123 struct edc_mca_softc *sc = (void *) parent;
124 struct ed_attach_args *eda = (void *) aux;
125 int found = 1;
126
127 /*
128 * Get Device Configuration (09).
129 */
130 cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */
131 cmd_args[1] = 0;
132 if (edc_run_cmd(sc, CMD_GET_DEV_CONF, eda->sc_devno, cmd_args, 2, 0, 1))
133 found = 0;
134
135 return (found);
136 }
137
138 static void
139 ed_mca_attach(parent, self, aux)
140 struct device *parent, *self;
141 void *aux;
142 {
143 struct ed_softc *ed = (void *) self;
144 struct edc_mca_softc *sc = (void *) parent;
145 struct ed_attach_args *eda = (void *) aux;
146 char pbuf[8];
147 int error, nsegs;
148
149 ed->edc_softc = sc;
150 ed->sc_dmat = eda->sc_dmat;
151 ed->sc_devno = eda->sc_devno;
152 edc_add_disk(sc, ed, eda->sc_devno);
153
154 BUFQ_INIT(&ed->sc_q);
155 simple_lock_init(&ed->sc_q_lock);
156 lockinit(&ed->sc_lock, PRIBIO | PCATCH, "edlck", 0, 0);
157
158 if (ed_get_params(ed)) {
159 printf(": IDENTIFY failed, no disk found\n");
160 return;
161 }
162
163 format_bytes(pbuf, sizeof(pbuf),
164 (u_int64_t) ed->sc_capacity * DEV_BSIZE);
165 printf(": %s, %u cyl, %u head, %u sec, 512 bytes/sect x %u sectors\n",
166 pbuf,
167 ed->cyl, ed->heads, ed->sectors,
168 ed->sc_capacity);
169
170 printf("%s: %u spares/cyl, %s, %s, %s, %s, %s\n",
171 ed->sc_dev.dv_xname, ed->spares,
172 (ed->drv_flags & (1 << 0)) ? "NoRetries" : "Retries",
173 (ed->drv_flags & (1 << 1)) ? "Removable" : "Fixed",
174 (ed->drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew",
175 (ed->drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects",
176 (ed->drv_flags & (1 << 4)) ? "InvalidSecondary" : "SecondaryOK"
177 );
178
179 /* Create a DMA map for mapping individual transfer bufs */
180 if ((error = bus_dmamap_create(ed->sc_dmat, 65536, 1,
181 65536, 65536, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
182 &ed->dmamap_xfer)) != 0) {
183 printf("%s: unable to create xfer DMA map, error=%d\n",
184 ed->sc_dev.dv_xname, error);
185 return;
186 }
187
188 /*
189 * Allocate DMA memory used in case where passed buf isn't
190 * physically contiguous.
191 */
192 ed->sc_dmam_sz = MAXPHYS;
193 if ((error = bus_dmamem_alloc(ed->sc_dmat, ed->sc_dmam_sz,
194 ed->sc_dmam_sz, 65536, ed->sc_dmam, 1, &nsegs,
195 BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
196 printf("%s: unable to allocate DMA memory for xfer, errno=%d\n",
197 ed->sc_dev.dv_xname, error);
198 bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
199 return;
200 }
201 /*
202 * Map the memory.
203 */
204 if ((error = bus_dmamem_map(ed->sc_dmat, ed->sc_dmam, 1,
205 ed->sc_dmam_sz, &ed->sc_dmamkva, BUS_DMA_WAITOK)) != 0) {
206 printf("%s: unable to map DMA memory, error=%d\n",
207 ed->sc_dev.dv_xname, error);
208 bus_dmamem_free(ed->sc_dmat, ed->sc_dmam, 1);
209 bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
210 return;
211 }
212
213
214 /*
215 * Initialize and attach the disk structure.
216 */
217 ed->sc_dk.dk_driver = &eddkdriver;
218 ed->sc_dk.dk_name = ed->sc_dev.dv_xname;
219 disk_attach(&ed->sc_dk);
220 #if 0
221 wd->sc_wdc_bio.lp = wd->sc_dk.dk_label;
222 #endif
223 ed->sc_sdhook = shutdownhook_establish(ed_shutdown, ed);
224 if (ed->sc_sdhook == NULL)
225 printf("%s: WARNING: unable to establish shutdown hook\n",
226 ed->sc_dev.dv_xname);
227 #if NRND > 0
228 rnd_attach_source(&ed->rnd_source, ed->sc_dev.dv_xname,
229 RND_TYPE_DISK, 0);
230 #endif
231
232 config_pending_incr();
233 kthread_create(ed_spawn_worker, (void *) ed);
234
235 ed->sc_flags |= EDF_INIT;
236 }
237
238 void
239 ed_spawn_worker(arg)
240 void *arg;
241 {
242 struct ed_softc *ed = (struct ed_softc *) arg;
243 int error;
244
245 /* Now, everything is ready, start a kthread */
246 if ((error = kthread_create1(edworker, ed, &ed->sc_worker,
247 "%s", ed->sc_dev.dv_xname))) {
248 printf("%s: cannot spawn worker thread: errno=%d\n",
249 ed->sc_dev.dv_xname, error);
250 panic("ed_spawn_worker");
251 }
252 }
253
254 /*
255 * Read/write routine for a buffer. Validates the arguments and schedules the
256 * transfer. Does not wait for the transfer to complete.
257 */
258 void
259 edmcastrategy(bp)
260 struct buf *bp;
261 {
262 struct ed_softc *wd;
263 struct disklabel *lp;
264 daddr_t blkno;
265 int s;
266 dev_t rdev;
267
268 wd = vdev_privdata(bp->b_devvp);
269 rdev = vdev_rdev(bp->b_devvp);
270 lp = wd->sc_dk.dk_label;
271
272 WDCDEBUG_PRINT(("edmcastrategy (%s)\n", wd->sc_dev.dv_xname),
273 DEBUG_XFERS);
274
275 /* Valid request? */
276 if (bp->b_blkno < 0 ||
277 (bp->b_bcount % lp->d_secsize) != 0 ||
278 (bp->b_bcount / lp->d_secsize) >= (1 << NBBY)) {
279 bp->b_error = EINVAL;
280 goto bad;
281 }
282
283 /* If device invalidated (e.g. media change, door open), error. */
284 if ((wd->sc_flags & WDF_LOADED) == 0) {
285 bp->b_error = EIO;
286 goto bad;
287 }
288
289 /* If it's a null transfer, return immediately. */
290 if (bp->b_bcount == 0)
291 goto done;
292
293 /*
294 * Do bounds checking, adjust transfer. if error, process.
295 * If end of partition, just return.
296 */
297 if (DISKPART(rdev) != RAW_PART &&
298 (bp->b_flags & B_DKLABEL) == 0 &&
299 bounds_check_with_label(bp, wd->sc_dk.dk_label,
300 (wd->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0)
301 goto done;
302
303 /*
304 * Now convert the block number to absolute and put it in
305 * terms of the device's logical block size.
306 */
307 if (lp->d_secsize >= DEV_BSIZE)
308 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
309 else
310 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
311
312 if (DISKPART(rdev) != RAW_PART &&
313 (bp->b_flags & B_DKLABEL) == 0)
314 blkno +=
315 lp->d_partitions[DISKPART(rdev)].p_offset;
316
317 bp->b_rawblkno = blkno;
318
319 /* Queue transfer on drive, activate drive and controller if idle. */
320 s = splbio();
321 simple_lock(&wd->sc_q_lock);
322 disksort_blkno(&wd->sc_q, bp);
323 simple_unlock(&wd->sc_q_lock);
324
325 /* Ring the worker thread */
326 wd->sc_flags |= EDF_PROCESS_QUEUE;
327 wakeup_one(&wd->sc_q);
328
329 splx(s);
330 return;
331 bad:
332 bp->b_flags |= B_ERROR;
333 done:
334 /* Toss transfer; we're done early. */
335 bp->b_resid = bp->b_bcount;
336 biodone(bp);
337 }
338
339 static void
340 ed_bio(struct ed_softc *ed, int async, int poll)
341 {
342 u_int16_t cmd_args[4];
343 int error=0;
344 u_int16_t track;
345 u_int16_t cyl;
346 u_int8_t head;
347 u_int8_t sector;
348
349 /* Get physical bus mapping for buf. */
350 if (bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
351 ed->sc_data, ed->sc_bcount, NULL,
352 BUS_DMA_WAITOK|BUS_DMA_STREAMING) != 0) {
353
354 /*
355 * Use our DMA safe memory to get data to/from device.
356 */
357 if ((error = bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
358 ed->sc_dmamkva, ed->sc_bcount, NULL,
359 BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
360 printf("%s: unable to load raw data for xfer, errno=%d\n",
361 ed->sc_dev.dv_xname, error);
362 goto out;
363 }
364 ed->sc_flags |= EDF_BOUNCEBUF;
365
366 /* If data write, copy the data to our bounce buffer. */
367 if (!ed->sc_read)
368 memcpy(ed->sc_dmamkva, ed->sc_data, ed->sc_bcount);
369 }
370
371 ed->sc_flags |= EDF_DMAMAP_LOADED;
372
373 track = ed->sc_rawblkno / ed->sectors;
374 head = track % ed->heads;
375 cyl = track / ed->heads;
376 sector = ed->sc_rawblkno % ed->sectors;
377
378 WDCDEBUG_PRINT(("__edstart %s: map: %u %u %u\n", ed->sc_dev.dv_xname,
379 cyl, sector, head),
380 DEBUG_XFERS);
381
382 mca_disk_busy();
383
384 /* Read or Write Data command */
385 cmd_args[0] = 2; /* Options 0000010 */
386 cmd_args[1] = ed->sc_bcount / DEV_BSIZE;
387 cmd_args[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
388 cmd_args[3] = ((cyl & 0x3E0) >> 5);
389 if (edc_run_cmd(ed->edc_softc,
390 (ed->sc_read) ? CMD_READ_DATA : CMD_WRITE_DATA,
391 ed->sc_devno, cmd_args, 4, async, poll)) {
392 printf("%s: data i/o command failed\n", ed->sc_dev.dv_xname);
393 mca_disk_unbusy();
394 error = EIO;
395 }
396
397 out:
398 if (error)
399 ed->sc_error = error;
400 }
401
402 static void
403 __edstart(ed, bp)
404 struct ed_softc *ed;
405 struct buf *bp;
406 {
407 WDCDEBUG_PRINT(("__edstart %s (%s): %lu %lu %u\n", ed->sc_dev.dv_xname,
408 (bp->b_flags & B_READ) ? "read" : "write",
409 bp->b_bcount, bp->b_resid, bp->b_rawblkno),
410 DEBUG_XFERS);
411
412 /* Instrumentation. */
413 disk_busy(&ed->sc_dk);
414 ed->sc_flags |= EDF_DK_BUSY;
415
416 ed->sc_data = bp->b_data;
417 ed->sc_rawblkno = bp->b_rawblkno;
418 ed->sc_bcount = bp->b_bcount;
419 ed->sc_read = bp->b_flags & B_READ;
420 ed_bio(ed, 1, 0);
421 }
422
423 static void
424 ed_bio_done(ed)
425 struct ed_softc *ed;
426 {
427 /*
428 * If read transfer finished without error and using a bounce
429 * buffer, copy the data to buf.
430 */
431 if (ed->sc_error == 0 && (ed->sc_flags & EDF_BOUNCEBUF) && ed->sc_read)
432 memcpy(ed->sc_data, ed->sc_dmamkva, ed->sc_bcount);
433 ed->sc_flags &= ~EDF_BOUNCEBUF;
434
435 /* Unload buf from DMA map */
436 if (ed->sc_flags & EDF_DMAMAP_LOADED) {
437 bus_dmamap_unload(ed->sc_dmat, ed->dmamap_xfer);
438 ed->sc_flags &= ~EDF_DMAMAP_LOADED;
439 }
440
441 mca_disk_unbusy();
442 }
443
444 static void
445 edmcadone(ed, bp)
446 struct ed_softc *ed;
447 struct buf *bp;
448 {
449 WDCDEBUG_PRINT(("eddone %s\n", ed->sc_dev.dv_xname),
450 DEBUG_XFERS);
451
452 if (ed->sc_error) {
453 bp->b_error = ed->sc_error;
454 bp->b_flags |= B_ERROR;
455 } else {
456 /* Set resid, most commonly to zero. */
457 bp->b_resid = ed->sc_status_block[SB_RESBLKCNT_IDX] * DEV_BSIZE;
458 }
459
460 ed_bio_done(ed);
461
462 /* If disk was busied, unbusy it now */
463 if (ed->sc_flags & EDF_DK_BUSY) {
464 disk_unbusy(&ed->sc_dk, (bp->b_bcount - bp->b_resid));
465 ed->sc_flags &= ~EDF_DK_BUSY;
466 }
467
468 #if NRND > 0
469 rnd_add_uint32(&ed->rnd_source, bp->b_blkno);
470 #endif
471 biodone(bp);
472 }
473
474 int
475 edmcaread(devvp, uio, flags)
476 struct vnode *devvp;
477 struct uio *uio;
478 int flags;
479 {
480 WDCDEBUG_PRINT(("edread\n"), DEBUG_XFERS);
481 return (physio(edmcastrategy, NULL, devvp, B_READ, minphys, uio));
482 }
483
484 int
485 edmcawrite(devvp, uio, flags)
486 struct vnode *devvp;
487 struct uio *uio;
488 int flags;
489 {
490 WDCDEBUG_PRINT(("edwrite\n"), DEBUG_XFERS);
491 return (physio(edmcastrategy, NULL, devvp, B_WRITE, minphys, uio));
492 }
493
494 /*
495 * Wait interruptibly for an exclusive lock.
496 */
497 static int
498 ed_lock(ed)
499 struct ed_softc *ed;
500 {
501 int error;
502 int s;
503
504 WDCDEBUG_PRINT(("ed_lock\n"), DEBUG_FUNCS);
505
506 s = splbio();
507 error = lockmgr(&ed->sc_lock, LK_EXCLUSIVE, NULL);
508 splx(s);
509
510 return (error);
511 }
512
513 /*
514 * Unlock and wake up any waiters.
515 */
516 static void
517 ed_unlock(ed)
518 struct ed_softc *ed;
519 {
520 WDCDEBUG_PRINT(("ed_unlock\n"), DEBUG_FUNCS);
521
522 (void) lockmgr(&ed->sc_lock, LK_RELEASE, NULL);
523 }
524
525 int
526 edmcaopen(devvp, flag, fmt, p)
527 struct vnode *devvp;
528 int flag, fmt;
529 struct proc *p;
530 {
531 struct ed_softc *wd;
532 int part, error;
533 dev_t rdev;
534
535 WDCDEBUG_PRINT(("edopen\n"), DEBUG_FUNCS);
536 rdev = vdev_rdev(devvp);
537 wd = device_lookup(&ed_cd, DISKUNIT(rdev));
538 if (wd == NULL || (wd->sc_flags & EDF_INIT) == 0)
539 return (ENXIO);
540
541 vdev_setprivdata(devvp, wd);
542
543 if ((error = ed_lock(wd)) != 0)
544 goto bad4;
545
546 if (wd->sc_dk.dk_openmask != 0) {
547 /*
548 * If any partition is open, but the disk has been invalidated,
549 * disallow further opens.
550 */
551 if ((wd->sc_flags & WDF_LOADED) == 0) {
552 error = EIO;
553 goto bad3;
554 }
555 } else {
556 if ((wd->sc_flags & WDF_LOADED) == 0) {
557 wd->sc_flags |= WDF_LOADED;
558
559 /* Load the physical device parameters. */
560 ed_get_params(wd);
561
562 /* Load the partition info if not already loaded. */
563 edgetdisklabel(devvp);
564 }
565 }
566
567 part = DISKPART(rdev);
568
569 /* Check that the partition exists. */
570 if (part != RAW_PART &&
571 (part >= wd->sc_dk.dk_label->d_npartitions ||
572 wd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
573 error = ENXIO;
574 goto bad;
575 }
576
577 /* Insure only one open at a time. */
578 switch (fmt) {
579 case S_IFCHR:
580 wd->sc_dk.dk_copenmask |= (1 << part);
581 break;
582 case S_IFBLK:
583 wd->sc_dk.dk_bopenmask |= (1 << part);
584 break;
585 }
586 wd->sc_dk.dk_openmask =
587 wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;
588
589 ed_unlock(wd);
590 return 0;
591
592 bad:
593 if (wd->sc_dk.dk_openmask == 0) {
594 }
595
596 bad3:
597 ed_unlock(wd);
598 bad4:
599 return (error);
600 }
601
602 int
603 edmcaclose(devvp, flag, fmt, p)
604 struct vnode *devvp;
605 int flag, fmt;
606 struct proc *p;
607 {
608 struct ed_softc *wd;
609 int part;
610 int error;
611
612 wd = vdev_privdata(devvp);
613 part = DISKPART(vdev_rdev(devvp));
614
615
616 WDCDEBUG_PRINT(("edmcaclose\n"), DEBUG_FUNCS);
617 if ((error = ed_lock(wd)) != 0)
618 return error;
619
620 switch (fmt) {
621 case S_IFCHR:
622 wd->sc_dk.dk_copenmask &= ~(1 << part);
623 break;
624 case S_IFBLK:
625 wd->sc_dk.dk_bopenmask &= ~(1 << part);
626 break;
627 }
628 wd->sc_dk.dk_openmask =
629 wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;
630
631 if (wd->sc_dk.dk_openmask == 0) {
632 #if 0
633 wd_flushcache(wd, AT_WAIT);
634 #endif
635 /* XXXX Must wait for I/O to complete! */
636
637 if (! (wd->sc_flags & WDF_KLABEL))
638 wd->sc_flags &= ~WDF_LOADED;
639 }
640
641 ed_unlock(wd);
642
643 return 0;
644 }
645
646 static void
647 edgetdefaultlabel(wd, lp)
648 struct ed_softc *wd;
649 struct disklabel *lp;
650 {
651 WDCDEBUG_PRINT(("edgetdefaultlabel\n"), DEBUG_FUNCS);
652 memset(lp, 0, sizeof(struct disklabel));
653
654 lp->d_secsize = DEV_BSIZE;
655 lp->d_ntracks = wd->heads;
656 lp->d_nsectors = wd->sectors;
657 lp->d_ncylinders = wd->cyl;
658 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
659
660 lp->d_type = DTYPE_ESDI;
661
662 strncpy(lp->d_typename, "ESDI", 16);
663 strncpy(lp->d_packname, "fictitious", 16);
664 lp->d_secperunit = wd->sc_capacity;
665 lp->d_rpm = 3600;
666 lp->d_interleave = 1;
667 lp->d_flags = 0;
668
669 lp->d_partitions[RAW_PART].p_offset = 0;
670 lp->d_partitions[RAW_PART].p_size =
671 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
672 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
673 lp->d_npartitions = RAW_PART + 1;
674
675 lp->d_magic = DISKMAGIC;
676 lp->d_magic2 = DISKMAGIC;
677 lp->d_checksum = dkcksum(lp);
678 }
679
680 /*
681 * Fabricate a default disk label, and try to read the correct one.
682 */
683 static void
684 edgetdisklabel(devvp)
685 struct vnode *devvp;
686 {
687 struct ed_softc *wd;
688 struct disklabel *lp;
689 char *errstring;
690
691 WDCDEBUG_PRINT(("edgetdisklabel\n"), DEBUG_FUNCS);
692
693 wd = vdev_privdata(devvp);
694 lp = wd->sc_dk.dk_label;
695
696 memset(wd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
697
698 edgetdefaultlabel(wd, lp);
699
700 #if 0
701 wd->sc_badsect[0] = -1;
702
703 if (wd->drvp->state > RECAL)
704 wd->drvp->drive_flags |= DRIVE_RESET;
705 #endif
706 errstring = readdisklabel(devvp,
707 edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
708 if (errstring) {
709 /*
710 * This probably happened because the drive's default
711 * geometry doesn't match the DOS geometry. We
712 * assume the DOS geometry is now in the label and try
713 * again. XXX This is a kluge.
714 */
715 #if 0
716 if (wd->drvp->state > RECAL)
717 wd->drvp->drive_flags |= DRIVE_RESET;
718 #endif
719 errstring = readdisklabel(devvp,
720 edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
721 }
722 if (errstring) {
723 printf("%s: %s\n", wd->sc_dev.dv_xname, errstring);
724 return;
725 }
726
727 #if 0
728 if (wd->drvp->state > RECAL)
729 wd->drvp->drive_flags |= DRIVE_RESET;
730 #endif
731 #ifdef HAS_BAD144_HANDLING
732 if ((lp->d_flags & D_BADSECT) != 0)
733 bad144intern(wd);
734 #endif
735 }
736
737 int
738 edmcaioctl(devvp, xfer, addr, flag, p)
739 struct vnode *devvp;
740 u_long xfer;
741 caddr_t addr;
742 int flag;
743 struct proc *p;
744 {
745 struct ed_softc *wd;
746 int error;
747 #ifdef __HAVE_OLD_DISKLABEL
748 struct disklabel newlabel;
749 #endif
750
751 WDCDEBUG_PRINT(("edioctl\n"), DEBUG_FUNCS);
752
753 wd = vdev_privdata(devvp);
754
755 if ((wd->sc_flags & WDF_LOADED) == 0)
756 return EIO;
757
758 switch (xfer) {
759 #ifdef HAS_BAD144_HANDLING
760 case DIOCSBAD:
761 if ((flag & FWRITE) == 0)
762 return EBADF;
763 wd->sc_dk.dk_cpulabel->bad = *(struct dkbad *)addr;
764 wd->sc_dk.dk_label->d_flags |= D_BADSECT;
765 bad144intern(wd);
766 return 0;
767 #endif
768
769 case DIOCGDINFO:
770 *(struct disklabel *)addr = *(wd->sc_dk.dk_label);
771 return 0;
772 #ifdef __HAVE_OLD_DISKLABEL
773 case ODIOCGDINFO:
774 newlabel = *(wd->sc_dk.dk_label);
775 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
776 return ENOTTY;
777 memcpy(addr, &newlabel, sizeof (struct olddisklabel));
778 return 0;
779 #endif
780
781 case DIOCGPART:
782 ((struct partinfo *)addr)->disklab = wd->sc_dk.dk_label;
783 ((struct partinfo *)addr)->part =
784 &wd->sc_dk.dk_label->d_partitions[DISKPART(vdev_rdev(devvpv)];
785 return 0;
786
787 case DIOCWDINFO:
788 case DIOCSDINFO:
789 #ifdef __HAVE_OLD_DISKLABEL
790 case ODIOCWDINFO:
791 case ODIOCSDINFO:
792 #endif
793 {
794 struct disklabel *lp;
795
796 #ifdef __HAVE_OLD_DISKLABEL
797 if (xfer == ODIOCSDINFO || xfer == ODIOCWDINFO) {
798 memset(&newlabel, 0, sizeof newlabel);
799 memcpy(&newlabel, addr, sizeof (struct olddisklabel));
800 lp = &newlabel;
801 } else
802 #endif
803 lp = (struct disklabel *)addr;
804
805 if ((flag & FWRITE) == 0)
806 return EBADF;
807
808 if ((error = ed_lock(wd)) != 0)
809 return error;
810 wd->sc_flags |= WDF_LABELLING;
811
812 error = setdisklabel(wd->sc_dk.dk_label,
813 lp, /*wd->sc_dk.dk_openmask : */0,
814 wd->sc_dk.dk_cpulabel);
815 if (error == 0) {
816 #if 0
817 if (wd->drvp->state > RECAL)
818 wd->drvp->drive_flags |= DRIVE_RESET;
819 #endif
820 if (xfer == DIOCWDINFO
821 #ifdef __HAVE_OLD_DISKLABEL
822 || xfer == ODIOCWDINFO
823 #endif
824 )
825 error = writedisklabel(devvp,
826 edmcastrategy, wd->sc_dk.dk_label,
827 wd->sc_dk.dk_cpulabel);
828 }
829
830 wd->sc_flags &= ~WDF_LABELLING;
831 ed_unlock(wd);
832 return error;
833 }
834
835 case DIOCKLABEL:
836 if (*(int *)addr)
837 wd->sc_flags |= WDF_KLABEL;
838 else
839 wd->sc_flags &= ~WDF_KLABEL;
840 return 0;
841
842 case DIOCWLABEL:
843 if ((flag & FWRITE) == 0)
844 return EBADF;
845 if (*(int *)addr)
846 wd->sc_flags |= WDF_WLABEL;
847 else
848 wd->sc_flags &= ~WDF_WLABEL;
849 return 0;
850
851 case DIOCGDEFLABEL:
852 edgetdefaultlabel(wd, (struct disklabel *)addr);
853 return 0;
854 #ifdef __HAVE_OLD_DISKLABEL
855 case ODIOCGDEFLABEL:
856 edgetdefaultlabel(wd, &newlabel);
857 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
858 return ENOTTY;
859 memcpy(addr, &newlabel, sizeof (struct olddisklabel));
860 return 0;
861 #endif
862
863 #ifdef notyet
864 case DIOCWFORMAT:
865 if ((flag & FWRITE) == 0)
866 return EBADF;
867 {
868 register struct format_op *fop;
869 struct iovec aiov;
870 struct uio auio;
871
872 fop = (struct format_op *)addr;
873 aiov.iov_base = fop->df_buf;
874 aiov.iov_len = fop->df_count;
875 auio.uio_iov = &aiov;
876 auio.uio_iovcnt = 1;
877 auio.uio_resid = fop->df_count;
878 auio.uio_segflg = 0;
879 auio.uio_offset =
880 fop->df_startblk * wd->sc_dk.dk_label->d_secsize;
881 auio.uio_procp = p;
882 error = physio(wdformat, NULL, dev, B_WRITE, minphys,
883 &auio);
884 fop->df_count -= auio.uio_resid;
885 fop->df_reg[0] = wdc->sc_status;
886 fop->df_reg[1] = wdc->sc_error;
887 return error;
888 }
889 #endif
890
891 default:
892 return ENOTTY;
893 }
894
895 #ifdef DIAGNOSTIC
896 panic("edioctl: impossible");
897 #endif
898 }
899
900 #if 0
901 #ifdef B_FORMAT
902 int
903 edmcaformat(struct buf *bp)
904 {
905
906 bp->b_flags |= B_FORMAT;
907 return edmcastrategy(bp);
908 }
909 #endif
910 #endif
911
912 int
913 edmcasize(dev)
914 dev_t dev;
915 {
916 struct ed_softc *wd;
917 struct vnode *vp;
918 int part, omask;
919 int size;
920
921 WDCDEBUG_PRINT(("edsize\n"), DEBUG_FUNCS);
922
923 wd = device_lookup(&ed_cd, DISKUNIT(dev));
924 if (wd == NULL)
925 return (-1);
926
927 part = DISKPART(dev);
928 omask = wd->sc_dk.dk_openmask & (1 << part);
929
930 /* XXXDEVVP */
931
932 if (omask == 0) {
933 if (bdevvp(dev, &vp) != 0)
934 return (-1);
935 if (edmcaopen(vp, 0, S_IFBLK, NULL) != 0) {
936 vrele(vp);
937 return (-1);
938 }
939 }
940 if (wd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
941 size = -1;
942 else
943 size = wd->sc_dk.dk_label->d_partitions[part].p_size *
944 (wd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
945
946 if (omask == 0) {
947 if (edmcaclose(vp, 0, S_IFBLK, NULL) != 0)
948 size = -1;
949 vrele(vp);
950 }
951 return (size);
952 }
953
954 /* #define WD_DUMP_NOT_TRUSTED if you just want to watch */
955 static int eddoingadump = 0;
956 static int eddumprecalibrated = 0;
957 static int eddumpmulti = 1;
958
959 /*
960 * Dump core after a system crash.
961 */
962 int
963 edmcadump(dev, blkno, va, size)
964 dev_t dev;
965 daddr_t blkno;
966 caddr_t va;
967 size_t size;
968 {
969 struct ed_softc *ed; /* disk unit to do the I/O */
970 struct disklabel *lp; /* disk's disklabel */
971 int part;
972 int nblks; /* total number of sectors left to write */
973
974 /* Check if recursive dump; if so, punt. */
975 if (eddoingadump)
976 return EFAULT;
977 eddoingadump = 1;
978
979 ed = device_lookup(&ed_cd, DISKUNIT(dev));
980 if (ed == NULL)
981 return (ENXIO);
982
983 part = DISKPART(dev);
984
985 /* Make sure it was initialized. */
986 if ((ed->sc_flags & EDF_INIT) == 0)
987 return ENXIO;
988
989 /* Convert to disk sectors. Request must be a multiple of size. */
990 lp = ed->sc_dk.dk_label;
991 if ((size % lp->d_secsize) != 0)
992 return EFAULT;
993 nblks = size / lp->d_secsize;
994 blkno = blkno / (lp->d_secsize / DEV_BSIZE);
995
996 /* Check transfer bounds against partition size. */
997 if ((blkno < 0) || ((blkno + nblks) > lp->d_partitions[part].p_size))
998 return EINVAL;
999
1000 /* Offset block number to start of partition. */
1001 blkno += lp->d_partitions[part].p_offset;
1002
1003 /* Recalibrate, if first dump transfer. */
1004 if (eddumprecalibrated == 0) {
1005 eddumprecalibrated = 1;
1006 eddumpmulti = 8;
1007 #if 0
1008 wd->drvp->state = RESET;
1009 #endif
1010 }
1011
1012 while (nblks > 0) {
1013 ed->sc_data = va;
1014 ed->sc_rawblkno = blkno;
1015 ed->sc_bcount = min(nblks, eddumpmulti) * lp->d_secsize;
1016 ed->sc_read = 0;
1017
1018 ed_bio(ed, 0, 1);
1019 if (ed->sc_error)
1020 return (ed->sc_error);
1021
1022 ed_bio_done(ed);
1023
1024 /* update block count */
1025 nblks -= min(nblks, eddumpmulti);
1026 blkno += min(nblks, eddumpmulti);
1027 va += min(nblks, eddumpmulti) * lp->d_secsize;
1028 }
1029
1030 eddoingadump = 0;
1031 return (0);
1032 }
1033
1034 #ifdef HAS_BAD144_HANDLING
1035 /*
1036 * Internalize the bad sector table.
1037 */
1038 static void
1039 bad144intern(wd)
1040 struct ed_softc *wd;
1041 {
1042 struct dkbad *bt = &wd->sc_dk.dk_cpulabel->bad;
1043 struct disklabel *lp = wd->sc_dk.dk_label;
1044 int i = 0;
1045
1046 WDCDEBUG_PRINT(("bad144intern\n"), DEBUG_XFERS);
1047
1048 for (; i < NBT_BAD; i++) {
1049 if (bt->bt_bad[i].bt_cyl == 0xffff)
1050 break;
1051 wd->sc_badsect[i] =
1052 bt->bt_bad[i].bt_cyl * lp->d_secpercyl +
1053 (bt->bt_bad[i].bt_trksec >> 8) * lp->d_nsectors +
1054 (bt->bt_bad[i].bt_trksec & 0xff);
1055 }
1056 for (; i < NBT_BAD+1; i++)
1057 wd->sc_badsect[i] = -1;
1058 }
1059 #endif
1060
1061 static int
1062 ed_get_params(ed)
1063 struct ed_softc *ed;
1064 {
1065 u_int16_t cmd_args[2];
1066
1067 /*
1068 * Get Device Configuration (09).
1069 */
1070 cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */
1071 cmd_args[1] = 0;
1072 if (edc_run_cmd(ed->edc_softc, CMD_GET_DEV_CONF, ed->sc_devno,
1073 cmd_args, 2, 0, 1))
1074 return (1);
1075
1076 ed->spares = ed->sc_status_block[1] >> 8;
1077 ed->drv_flags = ed->sc_status_block[1] & 0x1f;
1078 ed->rba = ed->sc_status_block[2] |
1079 (ed->sc_status_block[3] << 16);
1080 /* Instead of using:
1081 ed->cyl = ed->sc_status_block[4];
1082 ed->heads = ed->sc_status_block[5] & 0xff;
1083 ed->sectors = ed->sc_status_block[5] >> 8;
1084 * we fabricate the numbers from RBA count, so that
1085 * number of sectors is 32 and heads 64. This seems
1086 * to be necessary for integrated ESDI controller.
1087 */
1088 ed->sectors = 32;
1089 ed->heads = 64;
1090 ed->cyl = ed->rba / (ed->heads * ed->sectors);
1091 ed->sc_capacity = ed->rba;
1092
1093 return (0);
1094 }
1095
1096 /*
1097 * Our shutdown hook. We attempt to park disk's head only.
1098 */
1099 void
1100 ed_shutdown(arg)
1101 void *arg;
1102 {
1103 #if 0
1104 struct ed_softc *ed = arg;
1105 u_int16_t cmd_args[2];
1106
1107 /* Issue Park Head command */
1108 cmd_args[0] = 6; /* Options: 000110 */
1109 cmd_args[1] = 0;
1110 (void) edc_run_cmd(ed->edc_softc, CMD_PARK_HEAD, ed->sc_devno,
1111 cmd_args, 2, 0);
1112 #endif
1113 }
1114
1115 /*
1116 * Main worker thread function.
1117 */
1118 void
1119 edworker(arg)
1120 void *arg;
1121 {
1122 struct ed_softc *ed = (struct ed_softc *) arg;
1123 struct buf *bp;
1124 int s;
1125
1126 config_pending_decr();
1127
1128 for(;;) {
1129 /* Wait until awakened */
1130 (void) tsleep(&ed->sc_q, PRIBIO, "edidle", 0);
1131
1132 if ((ed->sc_flags & EDF_PROCESS_QUEUE) == 0)
1133 panic("edworker: expecting process queue");
1134 ed->sc_flags &= ~EDF_PROCESS_QUEUE;
1135
1136 for(;;) {
1137 /* Is there a buf for us ? */
1138 simple_lock(&ed->sc_q_lock);
1139 if ((bp = BUFQ_FIRST(&ed->sc_q)) == NULL) {
1140 simple_unlock(&ed->sc_q_lock);
1141 break;
1142 }
1143 BUFQ_REMOVE(&ed->sc_q, bp);
1144 simple_unlock(&ed->sc_q_lock);
1145
1146 /* Schedule i/o operation */
1147 ed->sc_error = 0;
1148 s = splbio();
1149 __edstart(ed, bp);
1150
1151 /*
1152 * Wait until the command executes; edc_intr() wakes
1153 * us up.
1154 */
1155 if (ed->sc_error == 0)
1156 (void)tsleep(&ed->edc_softc, PRIBIO, "edwrk",0);
1157
1158 /* Handle i/o results */
1159 edmcadone(ed, bp);
1160 splx(s);
1161 }
1162 }
1163 }
1164