ed_mca.c revision 1.9 1 /* $NetBSD: ed_mca.c,v 1.9 2001/11/13 07:46:25 lukem Exp $ */
2
3 /*
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Jaromir Dolecek.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /*
37 * Disk goo for MCA ESDI controller driver.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: ed_mca.c,v 1.9 2001/11/13 07:46:25 lukem Exp $");
42
43 #include "rnd.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/conf.h>
49 #include <sys/file.h>
50 #include <sys/stat.h>
51 #include <sys/ioctl.h>
52 #include <sys/buf.h>
53 #include <sys/uio.h>
54 #include <sys/malloc.h>
55 #include <sys/device.h>
56 #include <sys/disklabel.h>
57 #include <sys/disk.h>
58 #include <sys/syslog.h>
59 #include <sys/proc.h>
60 #include <sys/vnode.h>
61 #include <sys/kthread.h>
62 #if NRND > 0
63 #include <sys/rnd.h>
64 #endif
65
66 #include <machine/intr.h>
67 #include <machine/bus.h>
68
69 #include <dev/mca/mcavar.h>
70
71 #include <dev/mca/edcreg.h>
72 #include <dev/mca/edvar.h>
73 #include <dev/mca/edcvar.h>
74
75 /* #define WDCDEBUG */
76
77 #ifdef WDCDEBUG
78 #define WDCDEBUG_PRINT(args, level) printf args
79 #else
80 #define WDCDEBUG_PRINT(args, level)
81 #endif
82
83 #define EDLABELDEV(dev) (MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART))
84
85 /* XXX: these should go elsewhere */
86 cdev_decl(edmca);
87 bdev_decl(edmca);
88
89 static int ed_mca_probe __P((struct device *, struct cfdata *, void *));
90 static void ed_mca_attach __P((struct device *, struct device *, void *));
91
92 struct cfattach ed_mca_ca = {
93 sizeof(struct ed_softc), ed_mca_probe, ed_mca_attach
94 };
95
96 extern struct cfdriver ed_cd;
97
98 static int ed_get_params __P((struct ed_softc *));
99 static int ed_lock __P((struct ed_softc *));
100 static void ed_unlock __P((struct ed_softc *));
101 static void edgetdisklabel __P((struct ed_softc *));
102 static void edgetdefaultlabel __P((struct ed_softc *, struct disklabel *));
103 static void ed_shutdown __P((void*));
104 static void __edstart __P((struct ed_softc*, struct buf *));
105 static void bad144intern __P((struct ed_softc *));
106 static void edworker __P((void *));
107 static void ed_spawn_worker __P((void *));
108 static void edmcadone __P((struct ed_softc *, struct buf *));
109 static void ed_bio __P((struct ed_softc *, int, int));
110 static void ed_bio_done __P((struct ed_softc *));
111
112 static struct dkdriver eddkdriver = { edmcastrategy };
113
114 /*
115 * Just check if it's possible to identify the disk.
116 */
117 static int
118 ed_mca_probe(parent, match, aux)
119 struct device *parent;
120 struct cfdata *match;
121 void *aux;
122 {
123 u_int16_t cmd_args[2];
124 struct edc_mca_softc *sc = (void *) parent;
125 struct ed_attach_args *eda = (void *) aux;
126 int found = 1;
127
128 /*
129 * Get Device Configuration (09).
130 */
131 cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */
132 cmd_args[1] = 0;
133 if (edc_run_cmd(sc, CMD_GET_DEV_CONF, eda->sc_devno, cmd_args, 2, 0, 1))
134 found = 0;
135
136 return (found);
137 }
138
139 static void
140 ed_mca_attach(parent, self, aux)
141 struct device *parent, *self;
142 void *aux;
143 {
144 struct ed_softc *ed = (void *) self;
145 struct edc_mca_softc *sc = (void *) parent;
146 struct ed_attach_args *eda = (void *) aux;
147 char pbuf[8];
148 int error, nsegs;
149
150 ed->edc_softc = sc;
151 ed->sc_dmat = eda->sc_dmat;
152 ed->sc_devno = eda->sc_devno;
153 edc_add_disk(sc, ed, eda->sc_devno);
154
155 BUFQ_INIT(&ed->sc_q);
156 simple_lock_init(&ed->sc_q_lock);
157 lockinit(&ed->sc_lock, PRIBIO | PCATCH, "edlck", 0, 0);
158
159 if (ed_get_params(ed)) {
160 printf(": IDENTIFY failed, no disk found\n");
161 return;
162 }
163
164 format_bytes(pbuf, sizeof(pbuf),
165 (u_int64_t) ed->sc_capacity * DEV_BSIZE);
166 printf(": %s, %u cyl, %u head, %u sec, 512 bytes/sect x %u sectors\n",
167 pbuf,
168 ed->cyl, ed->heads, ed->sectors,
169 ed->sc_capacity);
170
171 printf("%s: %u spares/cyl, %s, %s, %s, %s, %s\n",
172 ed->sc_dev.dv_xname, ed->spares,
173 (ed->drv_flags & (1 << 0)) ? "NoRetries" : "Retries",
174 (ed->drv_flags & (1 << 1)) ? "Removable" : "Fixed",
175 (ed->drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew",
176 (ed->drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects",
177 (ed->drv_flags & (1 << 4)) ? "InvalidSecondary" : "SecondaryOK"
178 );
179
180 /* Create a DMA map for mapping individual transfer bufs */
181 if ((error = bus_dmamap_create(ed->sc_dmat, 65536, 1,
182 65536, 65536, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
183 &ed->dmamap_xfer)) != 0) {
184 printf("%s: unable to create xfer DMA map, error=%d\n",
185 ed->sc_dev.dv_xname, error);
186 return;
187 }
188
189 /*
190 * Allocate DMA memory used in case where passed buf isn't
191 * physically contiguous.
192 */
193 ed->sc_dmam_sz = MAXPHYS;
194 if ((error = bus_dmamem_alloc(ed->sc_dmat, ed->sc_dmam_sz,
195 ed->sc_dmam_sz, 65536, ed->sc_dmam, 1, &nsegs,
196 BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
197 printf("%s: unable to allocate DMA memory for xfer, errno=%d\n",
198 ed->sc_dev.dv_xname, error);
199 bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
200 return;
201 }
202 /*
203 * Map the memory.
204 */
205 if ((error = bus_dmamem_map(ed->sc_dmat, ed->sc_dmam, 1,
206 ed->sc_dmam_sz, &ed->sc_dmamkva, BUS_DMA_WAITOK)) != 0) {
207 printf("%s: unable to map DMA memory, error=%d\n",
208 ed->sc_dev.dv_xname, error);
209 bus_dmamem_free(ed->sc_dmat, ed->sc_dmam, 1);
210 bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
211 return;
212 }
213
214
215 /*
216 * Initialize and attach the disk structure.
217 */
218 ed->sc_dk.dk_driver = &eddkdriver;
219 ed->sc_dk.dk_name = ed->sc_dev.dv_xname;
220 disk_attach(&ed->sc_dk);
221 #if 0
222 wd->sc_wdc_bio.lp = wd->sc_dk.dk_label;
223 #endif
224 ed->sc_sdhook = shutdownhook_establish(ed_shutdown, ed);
225 if (ed->sc_sdhook == NULL)
226 printf("%s: WARNING: unable to establish shutdown hook\n",
227 ed->sc_dev.dv_xname);
228 #if NRND > 0
229 rnd_attach_source(&ed->rnd_source, ed->sc_dev.dv_xname,
230 RND_TYPE_DISK, 0);
231 #endif
232
233 config_pending_incr();
234 kthread_create(ed_spawn_worker, (void *) ed);
235
236 ed->sc_flags |= EDF_INIT;
237 }
238
239 void
240 ed_spawn_worker(arg)
241 void *arg;
242 {
243 struct ed_softc *ed = (struct ed_softc *) arg;
244 int error;
245
246 /* Now, everything is ready, start a kthread */
247 if ((error = kthread_create1(edworker, ed, &ed->sc_worker,
248 "%s", ed->sc_dev.dv_xname))) {
249 printf("%s: cannot spawn worker thread: errno=%d\n",
250 ed->sc_dev.dv_xname, error);
251 panic("ed_spawn_worker");
252 }
253 }
254
255 /*
256 * Read/write routine for a buffer. Validates the arguments and schedules the
257 * transfer. Does not wait for the transfer to complete.
258 */
259 void
260 edmcastrategy(bp)
261 struct buf *bp;
262 {
263 struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(bp->b_dev));
264 struct disklabel *lp = wd->sc_dk.dk_label;
265 daddr_t blkno;
266 int s;
267
268 WDCDEBUG_PRINT(("edmcastrategy (%s)\n", wd->sc_dev.dv_xname),
269 DEBUG_XFERS);
270
271 /* Valid request? */
272 if (bp->b_blkno < 0 ||
273 (bp->b_bcount % lp->d_secsize) != 0 ||
274 (bp->b_bcount / lp->d_secsize) >= (1 << NBBY)) {
275 bp->b_error = EINVAL;
276 goto bad;
277 }
278
279 /* If device invalidated (e.g. media change, door open), error. */
280 if ((wd->sc_flags & WDF_LOADED) == 0) {
281 bp->b_error = EIO;
282 goto bad;
283 }
284
285 /* If it's a null transfer, return immediately. */
286 if (bp->b_bcount == 0)
287 goto done;
288
289 /*
290 * Do bounds checking, adjust transfer. if error, process.
291 * If end of partition, just return.
292 */
293 if (DISKPART(bp->b_dev) != RAW_PART &&
294 bounds_check_with_label(bp, wd->sc_dk.dk_label,
295 (wd->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0)
296 goto done;
297
298 /*
299 * Now convert the block number to absolute and put it in
300 * terms of the device's logical block size.
301 */
302 if (lp->d_secsize >= DEV_BSIZE)
303 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
304 else
305 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
306
307 if (DISKPART(bp->b_dev) != RAW_PART)
308 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
309
310 bp->b_rawblkno = blkno;
311
312 /* Queue transfer on drive, activate drive and controller if idle. */
313 s = splbio();
314 simple_lock(&wd->sc_q_lock);
315 disksort_blkno(&wd->sc_q, bp);
316 simple_unlock(&wd->sc_q_lock);
317
318 /* Ring the worker thread */
319 wd->sc_flags |= EDF_PROCESS_QUEUE;
320 wakeup_one(&wd->sc_q);
321
322 splx(s);
323 return;
324 bad:
325 bp->b_flags |= B_ERROR;
326 done:
327 /* Toss transfer; we're done early. */
328 bp->b_resid = bp->b_bcount;
329 biodone(bp);
330 }
331
332 static void
333 ed_bio(struct ed_softc *ed, int async, int poll)
334 {
335 u_int16_t cmd_args[4];
336 int error=0;
337 u_int16_t track;
338 u_int16_t cyl;
339 u_int8_t head;
340 u_int8_t sector;
341
342 /* Get physical bus mapping for buf. */
343 if (bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
344 ed->sc_data, ed->sc_bcount, NULL,
345 BUS_DMA_WAITOK|BUS_DMA_STREAMING) != 0) {
346
347 /*
348 * Use our DMA safe memory to get data to/from device.
349 */
350 if ((error = bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
351 ed->sc_dmamkva, ed->sc_bcount, NULL,
352 BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
353 printf("%s: unable to load raw data for xfer, errno=%d\n",
354 ed->sc_dev.dv_xname, error);
355 goto out;
356 }
357 ed->sc_flags |= EDF_BOUNCEBUF;
358
359 /* If data write, copy the data to our bounce buffer. */
360 if (!ed->sc_read)
361 memcpy(ed->sc_dmamkva, ed->sc_data, ed->sc_bcount);
362 }
363
364 ed->sc_flags |= EDF_DMAMAP_LOADED;
365
366 track = ed->sc_rawblkno / ed->sectors;
367 head = track % ed->heads;
368 cyl = track / ed->heads;
369 sector = ed->sc_rawblkno % ed->sectors;
370
371 WDCDEBUG_PRINT(("__edstart %s: map: %u %u %u\n", ed->sc_dev.dv_xname,
372 cyl, sector, head),
373 DEBUG_XFERS);
374
375 mca_disk_busy();
376
377 /* Read or Write Data command */
378 cmd_args[0] = 2; /* Options 0000010 */
379 cmd_args[1] = ed->sc_bcount / DEV_BSIZE;
380 cmd_args[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
381 cmd_args[3] = ((cyl & 0x3E0) >> 5);
382 if (edc_run_cmd(ed->edc_softc,
383 (ed->sc_read) ? CMD_READ_DATA : CMD_WRITE_DATA,
384 ed->sc_devno, cmd_args, 4, async, poll)) {
385 printf("%s: data i/o command failed\n", ed->sc_dev.dv_xname);
386 mca_disk_unbusy();
387 error = EIO;
388 }
389
390 out:
391 if (error)
392 ed->sc_error = error;
393 }
394
395 static void
396 __edstart(ed, bp)
397 struct ed_softc *ed;
398 struct buf *bp;
399 {
400 WDCDEBUG_PRINT(("__edstart %s (%s): %lu %lu %u\n", ed->sc_dev.dv_xname,
401 (bp->b_flags & B_READ) ? "read" : "write",
402 bp->b_bcount, bp->b_resid, bp->b_rawblkno),
403 DEBUG_XFERS);
404
405 /* Instrumentation. */
406 disk_busy(&ed->sc_dk);
407 ed->sc_flags |= EDF_DK_BUSY;
408
409 ed->sc_data = bp->b_data;
410 ed->sc_rawblkno = bp->b_rawblkno;
411 ed->sc_bcount = bp->b_bcount;
412 ed->sc_read = bp->b_flags & B_READ;
413 ed_bio(ed, 1, 0);
414 }
415
416 static void
417 ed_bio_done(ed)
418 struct ed_softc *ed;
419 {
420 /*
421 * If read transfer finished without error and using a bounce
422 * buffer, copy the data to buf.
423 */
424 if (ed->sc_error == 0 && (ed->sc_flags & EDF_BOUNCEBUF) && ed->sc_read)
425 memcpy(ed->sc_data, ed->sc_dmamkva, ed->sc_bcount);
426 ed->sc_flags &= ~EDF_BOUNCEBUF;
427
428 /* Unload buf from DMA map */
429 if (ed->sc_flags & EDF_DMAMAP_LOADED) {
430 bus_dmamap_unload(ed->sc_dmat, ed->dmamap_xfer);
431 ed->sc_flags &= ~EDF_DMAMAP_LOADED;
432 }
433
434 mca_disk_unbusy();
435 }
436
437 static void
438 edmcadone(ed, bp)
439 struct ed_softc *ed;
440 struct buf *bp;
441 {
442 WDCDEBUG_PRINT(("eddone %s\n", ed->sc_dev.dv_xname),
443 DEBUG_XFERS);
444
445 if (ed->sc_error) {
446 bp->b_error = ed->sc_error;
447 bp->b_flags |= B_ERROR;
448 } else {
449 /* Set resid, most commonly to zero. */
450 bp->b_resid = ed->sc_status_block[SB_RESBLKCNT_IDX] * DEV_BSIZE;
451 }
452
453 ed_bio_done(ed);
454
455 /* If disk was busied, unbusy it now */
456 if (ed->sc_flags & EDF_DK_BUSY) {
457 disk_unbusy(&ed->sc_dk, (bp->b_bcount - bp->b_resid));
458 ed->sc_flags &= ~EDF_DK_BUSY;
459 }
460
461 #if NRND > 0
462 rnd_add_uint32(&ed->rnd_source, bp->b_blkno);
463 #endif
464 biodone(bp);
465 }
466
467 int
468 edmcaread(dev, uio, flags)
469 dev_t dev;
470 struct uio *uio;
471 int flags;
472 {
473 WDCDEBUG_PRINT(("edread\n"), DEBUG_XFERS);
474 return (physio(edmcastrategy, NULL, dev, B_READ, minphys, uio));
475 }
476
477 int
478 edmcawrite(dev, uio, flags)
479 dev_t dev;
480 struct uio *uio;
481 int flags;
482 {
483 WDCDEBUG_PRINT(("edwrite\n"), DEBUG_XFERS);
484 return (physio(edmcastrategy, NULL, dev, B_WRITE, minphys, uio));
485 }
486
487 /*
488 * Wait interruptibly for an exclusive lock.
489 */
490 static int
491 ed_lock(ed)
492 struct ed_softc *ed;
493 {
494 int error;
495 int s;
496
497 WDCDEBUG_PRINT(("ed_lock\n"), DEBUG_FUNCS);
498
499 s = splbio();
500 error = lockmgr(&ed->sc_lock, LK_EXCLUSIVE, NULL);
501 splx(s);
502
503 return (error);
504 }
505
506 /*
507 * Unlock and wake up any waiters.
508 */
509 static void
510 ed_unlock(ed)
511 struct ed_softc *ed;
512 {
513 WDCDEBUG_PRINT(("ed_unlock\n"), DEBUG_FUNCS);
514
515 (void) lockmgr(&ed->sc_lock, LK_RELEASE, NULL);
516 }
517
518 int
519 edmcaopen(dev, flag, fmt, p)
520 dev_t dev;
521 int flag, fmt;
522 struct proc *p;
523 {
524 struct ed_softc *wd;
525 int part, error;
526
527 WDCDEBUG_PRINT(("edopen\n"), DEBUG_FUNCS);
528 wd = device_lookup(&ed_cd, DISKUNIT(dev));
529 if (wd == NULL || (wd->sc_flags & EDF_INIT) == 0)
530 return (ENXIO);
531
532 if ((error = ed_lock(wd)) != 0)
533 goto bad4;
534
535 if (wd->sc_dk.dk_openmask != 0) {
536 /*
537 * If any partition is open, but the disk has been invalidated,
538 * disallow further opens.
539 */
540 if ((wd->sc_flags & WDF_LOADED) == 0) {
541 error = EIO;
542 goto bad3;
543 }
544 } else {
545 if ((wd->sc_flags & WDF_LOADED) == 0) {
546 wd->sc_flags |= WDF_LOADED;
547
548 /* Load the physical device parameters. */
549 ed_get_params(wd);
550
551 /* Load the partition info if not already loaded. */
552 edgetdisklabel(wd);
553 }
554 }
555
556 part = DISKPART(dev);
557
558 /* Check that the partition exists. */
559 if (part != RAW_PART &&
560 (part >= wd->sc_dk.dk_label->d_npartitions ||
561 wd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
562 error = ENXIO;
563 goto bad;
564 }
565
566 /* Insure only one open at a time. */
567 switch (fmt) {
568 case S_IFCHR:
569 wd->sc_dk.dk_copenmask |= (1 << part);
570 break;
571 case S_IFBLK:
572 wd->sc_dk.dk_bopenmask |= (1 << part);
573 break;
574 }
575 wd->sc_dk.dk_openmask =
576 wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;
577
578 ed_unlock(wd);
579 return 0;
580
581 bad:
582 if (wd->sc_dk.dk_openmask == 0) {
583 }
584
585 bad3:
586 ed_unlock(wd);
587 bad4:
588 return (error);
589 }
590
591 int
592 edmcaclose(dev, flag, fmt, p)
593 dev_t dev;
594 int flag, fmt;
595 struct proc *p;
596 {
597 struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(dev));
598 int part = DISKPART(dev);
599 int error;
600
601 WDCDEBUG_PRINT(("edmcaclose\n"), DEBUG_FUNCS);
602 if ((error = ed_lock(wd)) != 0)
603 return error;
604
605 switch (fmt) {
606 case S_IFCHR:
607 wd->sc_dk.dk_copenmask &= ~(1 << part);
608 break;
609 case S_IFBLK:
610 wd->sc_dk.dk_bopenmask &= ~(1 << part);
611 break;
612 }
613 wd->sc_dk.dk_openmask =
614 wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;
615
616 if (wd->sc_dk.dk_openmask == 0) {
617 #if 0
618 wd_flushcache(wd, AT_WAIT);
619 #endif
620 /* XXXX Must wait for I/O to complete! */
621
622 if (! (wd->sc_flags & WDF_KLABEL))
623 wd->sc_flags &= ~WDF_LOADED;
624 }
625
626 ed_unlock(wd);
627
628 return 0;
629 }
630
631 static void
632 edgetdefaultlabel(wd, lp)
633 struct ed_softc *wd;
634 struct disklabel *lp;
635 {
636 WDCDEBUG_PRINT(("edgetdefaultlabel\n"), DEBUG_FUNCS);
637 memset(lp, 0, sizeof(struct disklabel));
638
639 lp->d_secsize = DEV_BSIZE;
640 lp->d_ntracks = wd->heads;
641 lp->d_nsectors = wd->sectors;
642 lp->d_ncylinders = wd->cyl;
643 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
644
645 lp->d_type = DTYPE_ESDI;
646
647 strncpy(lp->d_typename, "ESDI", 16);
648 strncpy(lp->d_packname, "fictitious", 16);
649 lp->d_secperunit = wd->sc_capacity;
650 lp->d_rpm = 3600;
651 lp->d_interleave = 1;
652 lp->d_flags = 0;
653
654 lp->d_partitions[RAW_PART].p_offset = 0;
655 lp->d_partitions[RAW_PART].p_size =
656 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
657 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
658 lp->d_npartitions = RAW_PART + 1;
659
660 lp->d_magic = DISKMAGIC;
661 lp->d_magic2 = DISKMAGIC;
662 lp->d_checksum = dkcksum(lp);
663 }
664
665 /*
666 * Fabricate a default disk label, and try to read the correct one.
667 */
668 static void
669 edgetdisklabel(wd)
670 struct ed_softc *wd;
671 {
672 struct disklabel *lp = wd->sc_dk.dk_label;
673 char *errstring;
674
675 WDCDEBUG_PRINT(("edgetdisklabel\n"), DEBUG_FUNCS);
676
677 memset(wd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
678
679 edgetdefaultlabel(wd, lp);
680
681 #if 0
682 wd->sc_badsect[0] = -1;
683
684 if (wd->drvp->state > RECAL)
685 wd->drvp->drive_flags |= DRIVE_RESET;
686 #endif
687 errstring = readdisklabel(MAKEDISKDEV(0, wd->sc_dev.dv_unit, RAW_PART),
688 edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
689 if (errstring) {
690 /*
691 * This probably happened because the drive's default
692 * geometry doesn't match the DOS geometry. We
693 * assume the DOS geometry is now in the label and try
694 * again. XXX This is a kluge.
695 */
696 #if 0
697 if (wd->drvp->state > RECAL)
698 wd->drvp->drive_flags |= DRIVE_RESET;
699 #endif
700 errstring = readdisklabel(MAKEDISKDEV(0, wd->sc_dev.dv_unit,
701 RAW_PART), edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
702 }
703 if (errstring) {
704 printf("%s: %s\n", wd->sc_dev.dv_xname, errstring);
705 return;
706 }
707
708 #if 0
709 if (wd->drvp->state > RECAL)
710 wd->drvp->drive_flags |= DRIVE_RESET;
711 #endif
712 #ifdef HAS_BAD144_HANDLING
713 if ((lp->d_flags & D_BADSECT) != 0)
714 bad144intern(wd);
715 #endif
716 }
717
718 int
719 edmcaioctl(dev, xfer, addr, flag, p)
720 dev_t dev;
721 u_long xfer;
722 caddr_t addr;
723 int flag;
724 struct proc *p;
725 {
726 struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(dev));
727 int error;
728 #ifdef __HAVE_OLD_DISKLABEL
729 struct disklabel newlabel;
730 #endif
731
732 WDCDEBUG_PRINT(("edioctl\n"), DEBUG_FUNCS);
733
734 if ((wd->sc_flags & WDF_LOADED) == 0)
735 return EIO;
736
737 switch (xfer) {
738 #ifdef HAS_BAD144_HANDLING
739 case DIOCSBAD:
740 if ((flag & FWRITE) == 0)
741 return EBADF;
742 wd->sc_dk.dk_cpulabel->bad = *(struct dkbad *)addr;
743 wd->sc_dk.dk_label->d_flags |= D_BADSECT;
744 bad144intern(wd);
745 return 0;
746 #endif
747
748 case DIOCGDINFO:
749 *(struct disklabel *)addr = *(wd->sc_dk.dk_label);
750 return 0;
751 #ifdef __HAVE_OLD_DISKLABEL
752 case ODIOCGDINFO:
753 newlabel = *(wd->sc_dk.dk_label);
754 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
755 return ENOTTY;
756 memcpy(addr, &newlabel, sizeof (struct olddisklabel));
757 return 0;
758 #endif
759
760 case DIOCGPART:
761 ((struct partinfo *)addr)->disklab = wd->sc_dk.dk_label;
762 ((struct partinfo *)addr)->part =
763 &wd->sc_dk.dk_label->d_partitions[DISKPART(dev)];
764 return 0;
765
766 case DIOCWDINFO:
767 case DIOCSDINFO:
768 #ifdef __HAVE_OLD_DISKLABEL
769 case ODIOCWDINFO:
770 case ODIOCSDINFO:
771 #endif
772 {
773 struct disklabel *lp;
774
775 #ifdef __HAVE_OLD_DISKLABEL
776 if (xfer == ODIOCSDINFO || xfer == ODIOCWDINFO) {
777 memset(&newlabel, 0, sizeof newlabel);
778 memcpy(&newlabel, addr, sizeof (struct olddisklabel));
779 lp = &newlabel;
780 } else
781 #endif
782 lp = (struct disklabel *)addr;
783
784 if ((flag & FWRITE) == 0)
785 return EBADF;
786
787 if ((error = ed_lock(wd)) != 0)
788 return error;
789 wd->sc_flags |= WDF_LABELLING;
790
791 error = setdisklabel(wd->sc_dk.dk_label,
792 lp, /*wd->sc_dk.dk_openmask : */0,
793 wd->sc_dk.dk_cpulabel);
794 if (error == 0) {
795 #if 0
796 if (wd->drvp->state > RECAL)
797 wd->drvp->drive_flags |= DRIVE_RESET;
798 #endif
799 if (xfer == DIOCWDINFO
800 #ifdef __HAVE_OLD_DISKLABEL
801 || xfer == ODIOCWDINFO
802 #endif
803 )
804 error = writedisklabel(EDLABELDEV(dev),
805 edmcastrategy, wd->sc_dk.dk_label,
806 wd->sc_dk.dk_cpulabel);
807 }
808
809 wd->sc_flags &= ~WDF_LABELLING;
810 ed_unlock(wd);
811 return error;
812 }
813
814 case DIOCKLABEL:
815 if (*(int *)addr)
816 wd->sc_flags |= WDF_KLABEL;
817 else
818 wd->sc_flags &= ~WDF_KLABEL;
819 return 0;
820
821 case DIOCWLABEL:
822 if ((flag & FWRITE) == 0)
823 return EBADF;
824 if (*(int *)addr)
825 wd->sc_flags |= WDF_WLABEL;
826 else
827 wd->sc_flags &= ~WDF_WLABEL;
828 return 0;
829
830 case DIOCGDEFLABEL:
831 edgetdefaultlabel(wd, (struct disklabel *)addr);
832 return 0;
833 #ifdef __HAVE_OLD_DISKLABEL
834 case ODIOCGDEFLABEL:
835 edgetdefaultlabel(wd, &newlabel);
836 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
837 return ENOTTY;
838 memcpy(addr, &newlabel, sizeof (struct olddisklabel));
839 return 0;
840 #endif
841
842 #ifdef notyet
843 case DIOCWFORMAT:
844 if ((flag & FWRITE) == 0)
845 return EBADF;
846 {
847 register struct format_op *fop;
848 struct iovec aiov;
849 struct uio auio;
850
851 fop = (struct format_op *)addr;
852 aiov.iov_base = fop->df_buf;
853 aiov.iov_len = fop->df_count;
854 auio.uio_iov = &aiov;
855 auio.uio_iovcnt = 1;
856 auio.uio_resid = fop->df_count;
857 auio.uio_segflg = 0;
858 auio.uio_offset =
859 fop->df_startblk * wd->sc_dk.dk_label->d_secsize;
860 auio.uio_procp = p;
861 error = physio(wdformat, NULL, dev, B_WRITE, minphys,
862 &auio);
863 fop->df_count -= auio.uio_resid;
864 fop->df_reg[0] = wdc->sc_status;
865 fop->df_reg[1] = wdc->sc_error;
866 return error;
867 }
868 #endif
869
870 default:
871 return ENOTTY;
872 }
873
874 #ifdef DIAGNOSTIC
875 panic("edioctl: impossible");
876 #endif
877 }
878
879 #if 0
880 #ifdef B_FORMAT
881 int
882 edmcaformat(struct buf *bp)
883 {
884
885 bp->b_flags |= B_FORMAT;
886 return edmcastrategy(bp);
887 }
888 #endif
889 #endif
890
891 int
892 edmcasize(dev)
893 dev_t dev;
894 {
895 struct ed_softc *wd;
896 int part, omask;
897 int size;
898
899 WDCDEBUG_PRINT(("edsize\n"), DEBUG_FUNCS);
900
901 wd = device_lookup(&ed_cd, DISKUNIT(dev));
902 if (wd == NULL)
903 return (-1);
904
905 part = DISKPART(dev);
906 omask = wd->sc_dk.dk_openmask & (1 << part);
907
908 if (omask == 0 && edmcaopen(dev, 0, S_IFBLK, NULL) != 0)
909 return (-1);
910 if (wd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
911 size = -1;
912 else
913 size = wd->sc_dk.dk_label->d_partitions[part].p_size *
914 (wd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
915 if (omask == 0 && edmcaclose(dev, 0, S_IFBLK, NULL) != 0)
916 return (-1);
917 return (size);
918 }
919
920 /* #define WD_DUMP_NOT_TRUSTED if you just want to watch */
921 static int eddoingadump = 0;
922 static int eddumprecalibrated = 0;
923 static int eddumpmulti = 1;
924
925 /*
926 * Dump core after a system crash.
927 */
928 int
929 edmcadump(dev, blkno, va, size)
930 dev_t dev;
931 daddr_t blkno;
932 caddr_t va;
933 size_t size;
934 {
935 struct ed_softc *ed; /* disk unit to do the I/O */
936 struct disklabel *lp; /* disk's disklabel */
937 int part;
938 int nblks; /* total number of sectors left to write */
939
940 /* Check if recursive dump; if so, punt. */
941 if (eddoingadump)
942 return EFAULT;
943 eddoingadump = 1;
944
945 ed = device_lookup(&ed_cd, DISKUNIT(dev));
946 if (ed == NULL)
947 return (ENXIO);
948
949 part = DISKPART(dev);
950
951 /* Make sure it was initialized. */
952 if ((ed->sc_flags & EDF_INIT) == 0)
953 return ENXIO;
954
955 /* Convert to disk sectors. Request must be a multiple of size. */
956 lp = ed->sc_dk.dk_label;
957 if ((size % lp->d_secsize) != 0)
958 return EFAULT;
959 nblks = size / lp->d_secsize;
960 blkno = blkno / (lp->d_secsize / DEV_BSIZE);
961
962 /* Check transfer bounds against partition size. */
963 if ((blkno < 0) || ((blkno + nblks) > lp->d_partitions[part].p_size))
964 return EINVAL;
965
966 /* Offset block number to start of partition. */
967 blkno += lp->d_partitions[part].p_offset;
968
969 /* Recalibrate, if first dump transfer. */
970 if (eddumprecalibrated == 0) {
971 eddumprecalibrated = 1;
972 eddumpmulti = 8;
973 #if 0
974 wd->drvp->state = RESET;
975 #endif
976 }
977
978 while (nblks > 0) {
979 ed->sc_data = va;
980 ed->sc_rawblkno = blkno;
981 ed->sc_bcount = min(nblks, eddumpmulti) * lp->d_secsize;
982 ed->sc_read = 0;
983
984 ed_bio(ed, 0, 1);
985 if (ed->sc_error)
986 return (ed->sc_error);
987
988 ed_bio_done(ed);
989
990 /* update block count */
991 nblks -= min(nblks, eddumpmulti);
992 blkno += min(nblks, eddumpmulti);
993 va += min(nblks, eddumpmulti) * lp->d_secsize;
994 }
995
996 eddoingadump = 0;
997 return (0);
998 }
999
1000 #ifdef HAS_BAD144_HANDLING
1001 /*
1002 * Internalize the bad sector table.
1003 */
1004 static void
1005 bad144intern(wd)
1006 struct ed_softc *wd;
1007 {
1008 struct dkbad *bt = &wd->sc_dk.dk_cpulabel->bad;
1009 struct disklabel *lp = wd->sc_dk.dk_label;
1010 int i = 0;
1011
1012 WDCDEBUG_PRINT(("bad144intern\n"), DEBUG_XFERS);
1013
1014 for (; i < NBT_BAD; i++) {
1015 if (bt->bt_bad[i].bt_cyl == 0xffff)
1016 break;
1017 wd->sc_badsect[i] =
1018 bt->bt_bad[i].bt_cyl * lp->d_secpercyl +
1019 (bt->bt_bad[i].bt_trksec >> 8) * lp->d_nsectors +
1020 (bt->bt_bad[i].bt_trksec & 0xff);
1021 }
1022 for (; i < NBT_BAD+1; i++)
1023 wd->sc_badsect[i] = -1;
1024 }
1025 #endif
1026
1027 static int
1028 ed_get_params(ed)
1029 struct ed_softc *ed;
1030 {
1031 u_int16_t cmd_args[2];
1032
1033 /*
1034 * Get Device Configuration (09).
1035 */
1036 cmd_args[0] = 14; /* Options: 00s110, s: 0=Physical 1=Pseudo */
1037 cmd_args[1] = 0;
1038 if (edc_run_cmd(ed->edc_softc, CMD_GET_DEV_CONF, ed->sc_devno,
1039 cmd_args, 2, 0, 1))
1040 return (1);
1041
1042 ed->spares = ed->sc_status_block[1] >> 8;
1043 ed->drv_flags = ed->sc_status_block[1] & 0x1f;
1044 ed->rba = ed->sc_status_block[2] |
1045 (ed->sc_status_block[3] << 16);
1046 /* Instead of using:
1047 ed->cyl = ed->sc_status_block[4];
1048 ed->heads = ed->sc_status_block[5] & 0xff;
1049 ed->sectors = ed->sc_status_block[5] >> 8;
1050 * we fabricate the numbers from RBA count, so that
1051 * number of sectors is 32 and heads 64. This seems
1052 * to be necessary for integrated ESDI controller.
1053 */
1054 ed->sectors = 32;
1055 ed->heads = 64;
1056 ed->cyl = ed->rba / (ed->heads * ed->sectors);
1057 ed->sc_capacity = ed->rba;
1058
1059 return (0);
1060 }
1061
1062 /*
1063 * Our shutdown hook. We attempt to park disk's head only.
1064 */
1065 void
1066 ed_shutdown(arg)
1067 void *arg;
1068 {
1069 #if 0
1070 struct ed_softc *ed = arg;
1071 u_int16_t cmd_args[2];
1072
1073 /* Issue Park Head command */
1074 cmd_args[0] = 6; /* Options: 000110 */
1075 cmd_args[1] = 0;
1076 (void) edc_run_cmd(ed->edc_softc, CMD_PARK_HEAD, ed->sc_devno,
1077 cmd_args, 2, 0);
1078 #endif
1079 }
1080
1081 /*
1082 * Main worker thread function.
1083 */
1084 void
1085 edworker(arg)
1086 void *arg;
1087 {
1088 struct ed_softc *ed = (struct ed_softc *) arg;
1089 struct buf *bp;
1090 int s;
1091
1092 config_pending_decr();
1093
1094 for(;;) {
1095 /* Wait until awakened */
1096 (void) tsleep(&ed->sc_q, PRIBIO, "edidle", 0);
1097
1098 if ((ed->sc_flags & EDF_PROCESS_QUEUE) == 0)
1099 panic("edworker: expecting process queue");
1100 ed->sc_flags &= ~EDF_PROCESS_QUEUE;
1101
1102 for(;;) {
1103 /* Is there a buf for us ? */
1104 simple_lock(&ed->sc_q_lock);
1105 if ((bp = BUFQ_FIRST(&ed->sc_q)) == NULL) {
1106 simple_unlock(&ed->sc_q_lock);
1107 break;
1108 }
1109 BUFQ_REMOVE(&ed->sc_q, bp);
1110 simple_unlock(&ed->sc_q_lock);
1111
1112 /* Schedule i/o operation */
1113 ed->sc_error = 0;
1114 s = splbio();
1115 __edstart(ed, bp);
1116
1117 /*
1118 * Wait until the command executes; edc_intr() wakes
1119 * us up.
1120 */
1121 if (ed->sc_error == 0)
1122 (void)tsleep(&ed->edc_softc, PRIBIO, "edwrk",0);
1123
1124 /* Handle i/o results */
1125 edmcadone(ed, bp);
1126 splx(s);
1127 }
1128 }
1129 }
1130