ed_mca.c revision 1.4 1 /* $NetBSD: ed_mca.c,v 1.4 2001/04/22 11:52:18 jdolecek Exp $ */
2
3 /*
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Jaromir Dolecek.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the NetBSD
20 * Foundation, Inc. and its contributors.
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 */
35
36 /*
37 * Disk goo for MCA ESDI controller driver.
38 */
39
40 #include "rnd.h"
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/conf.h>
46 #include <sys/file.h>
47 #include <sys/stat.h>
48 #include <sys/ioctl.h>
49 #include <sys/buf.h>
50 #include <sys/uio.h>
51 #include <sys/malloc.h>
52 #include <sys/device.h>
53 #include <sys/disklabel.h>
54 #include <sys/disk.h>
55 #include <sys/syslog.h>
56 #include <sys/proc.h>
57 #include <sys/vnode.h>
58 #include <sys/kthread.h>
59 #if NRND > 0
60 #include <sys/rnd.h>
61 #endif
62
63 #include <machine/intr.h>
64 #include <machine/bus.h>
65
66 #include <dev/mca/mcavar.h>
67
68 #include <dev/mca/edcreg.h>
69 #include <dev/mca/edvar.h>
70 #include <dev/mca/edcvar.h>
71
72 /* #define WDCDEBUG */
73
74 #ifdef WDCDEBUG
75 #define WDCDEBUG_PRINT(args, level) printf args
76 #else
77 #define WDCDEBUG_PRINT(args, level)
78 #endif
79
80 #define EDLABELDEV(dev) (MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART))
81
82 /* XXX: these should go elsewhere */
83 cdev_decl(edmca);
84 bdev_decl(edmca);
85
86 static int ed_mca_probe __P((struct device *, struct cfdata *, void *));
87 static void ed_mca_attach __P((struct device *, struct device *, void *));
88
89 struct cfattach ed_mca_ca = {
90 sizeof(struct ed_softc), ed_mca_probe, ed_mca_attach
91 };
92
93 extern struct cfdriver ed_cd;
94
95 static int ed_get_params __P((struct ed_softc *));
96 static int ed_lock __P((struct ed_softc *));
97 static void ed_unlock __P((struct ed_softc *));
98 static void edgetdisklabel __P((struct ed_softc *));
99 static void edgetdefaultlabel __P((struct ed_softc *, struct disklabel *));
100 static void ed_shutdown __P((void*));
101 static void __edstart __P((struct ed_softc*, struct buf *));
102 static void bad144intern __P((struct ed_softc *));
103 static void edworker __P((void *));
104 static void ed_spawn_worker __P((void *));
105 static void edmcadone __P((struct ed_softc *));
106
107 static struct dkdriver eddkdriver = { edmcastrategy };
108
109 /*
110 * Just check if it's possible to identify the disk.
111 */
112 static int
113 ed_mca_probe(parent, match, aux)
114 struct device *parent;
115 struct cfdata *match;
116 void *aux;
117 {
118 u_int16_t cmd_args[2];
119 struct edc_mca_softc *sc = (void *) parent;
120 struct ed_attach_args *eda = (void *) aux;
121
122 /*
123 * Get Device Configuration (09).
124 */
125 cmd_args[0] = 6; /* Options: 00s110, s: 0=Physical 1=Pseudo */
126 cmd_args[1] = 0;
127 if (edc_run_cmd(sc, CMD_GET_DEV_CONF, eda->sc_devno, cmd_args, 2, 0))
128 return (0);
129
130 return (1);
131 }
132
133 static void
134 ed_mca_attach(parent, self, aux)
135 struct device *parent, *self;
136 void *aux;
137 {
138 struct ed_softc *ed = (void *) self;
139 struct edc_mca_softc *sc = (void *) parent;
140 struct ed_attach_args *eda = (void *) aux;
141 char pbuf[8];
142 int error, nsegs;
143
144 ed->edc_softc = sc;
145 ed->sc_dmat = eda->sc_dmat;
146 ed->sc_devno = eda->sc_devno;
147 edc_add_disk(sc, ed, eda->sc_devno);
148
149 BUFQ_INIT(&ed->sc_q);
150 spinlockinit(&ed->sc_q_lock, "edbqlock", 0);
151 lockinit(&ed->sc_lock, PRIBIO | PCATCH, "edlck", 0, 0);
152
153 if (ed_get_params(ed)) {
154 printf(": IDENTIFY failed, no disk found\n");
155 return;
156 }
157
158 format_bytes(pbuf, sizeof(pbuf),
159 (u_int64_t) ed->sc_capacity * DEV_BSIZE);
160 printf(": %s, %u cyl, %u head, %u sec, 512 bytes/sect x %u sectors\n",
161 pbuf,
162 ed->cyl, ed->heads, ed->sectors,
163 ed->sc_capacity);
164
165 printf("%s: %u spares/cyl, %s.%s.%s.%s.%s\n",
166 ed->sc_dev.dv_xname, ed->spares,
167 (ed->drv_flags & (1 << 0)) ? "NoRetries" : "Retries",
168 (ed->drv_flags & (1 << 1)) ? "Removable" : "Fixed",
169 (ed->drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew",
170 (ed->drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects",
171 (ed->drv_flags & (1 << 4)) ? "InvalidSecondary" : "SeconOK"
172 );
173
174 /* Create a DMA map for mapping individual transfer bufs */
175 if ((error = bus_dmamap_create(ed->sc_dmat, 65536, 1,
176 65536, 65536, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
177 &ed->dmamap_xfer)) != 0) {
178 printf("%s: unable to create xfer DMA map, error=%d\n",
179 ed->sc_dev.dv_xname, error);
180 return;
181 }
182
183 /*
184 * Allocate DMA memory used in case where passed buf isn't
185 * physically contiguous.
186 */
187 ed->sc_dmam_sz = MAXPHYS;
188 if ((error = bus_dmamem_alloc(ed->sc_dmat, ed->sc_dmam_sz,
189 ed->sc_dmam_sz, 65536, ed->sc_dmam, 1, &nsegs,
190 BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
191 printf("%s: unable to allocate DMA memory for xfer, errno=%d\n",
192 ed->sc_dev.dv_xname, error);
193 bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
194 return;
195 }
196 /*
197 * Map the memory.
198 */
199 if ((error = bus_dmamem_map(ed->sc_dmat, ed->sc_dmam, 1,
200 ed->sc_dmam_sz, &ed->sc_dmamkva, BUS_DMA_WAITOK)) != 0) {
201 printf("%s: unable to map DMA memory, error=%d\n",
202 ed->sc_dev.dv_xname, error);
203 bus_dmamem_free(ed->sc_dmat, ed->sc_dmam, 1);
204 bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
205 return;
206 }
207
208
209 /*
210 * Initialize and attach the disk structure.
211 */
212 ed->sc_dk.dk_driver = &eddkdriver;
213 ed->sc_dk.dk_name = ed->sc_dev.dv_xname;
214 disk_attach(&ed->sc_dk);
215 #if 0
216 wd->sc_wdc_bio.lp = wd->sc_dk.dk_label;
217 #endif
218 ed->sc_sdhook = shutdownhook_establish(ed_shutdown, ed);
219 if (ed->sc_sdhook == NULL)
220 printf("%s: WARNING: unable to establish shutdown hook\n",
221 ed->sc_dev.dv_xname);
222 #if NRND > 0
223 rnd_attach_source(&ed->rnd_source, ed->sc_dev.dv_xname,
224 RND_TYPE_DISK, 0);
225 #endif
226
227 config_pending_incr();
228 kthread_create(ed_spawn_worker, (void *) ed);
229 }
230
231 void
232 ed_spawn_worker(arg)
233 void *arg;
234 {
235 struct ed_softc *ed = (struct ed_softc *) arg;
236 int error;
237
238 /* Now, everything is ready, start a kthread */
239 if ((error = kthread_create1(edworker, ed, &ed->sc_worker,
240 "%s", ed->sc_dev.dv_xname))) {
241 printf("%s: cannot spawn worker thread: errno=%d\n",
242 ed->sc_dev.dv_xname, error);
243 panic("ed_spawn_worker");
244 }
245 }
246
247 /*
248 * Read/write routine for a buffer. Validates the arguments and schedules the
249 * transfer. Does not wait for the transfer to complete.
250 */
251 void
252 edmcastrategy(bp)
253 struct buf *bp;
254 {
255 struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(bp->b_dev));
256 struct disklabel *lp = wd->sc_dk.dk_label;
257 daddr_t blkno;
258 int s;
259
260 WDCDEBUG_PRINT(("edmcastrategy (%s)\n", wd->sc_dev.dv_xname),
261 DEBUG_XFERS);
262
263 /* Valid request? */
264 if (bp->b_blkno < 0 ||
265 (bp->b_bcount % lp->d_secsize) != 0 ||
266 (bp->b_bcount / lp->d_secsize) >= (1 << NBBY)) {
267 bp->b_error = EINVAL;
268 goto bad;
269 }
270
271 /* If device invalidated (e.g. media change, door open), error. */
272 if ((wd->sc_flags & WDF_LOADED) == 0) {
273 bp->b_error = EIO;
274 goto bad;
275 }
276
277 /* If it's a null transfer, return immediately. */
278 if (bp->b_bcount == 0)
279 goto done;
280
281 /*
282 * Do bounds checking, adjust transfer. if error, process.
283 * If end of partition, just return.
284 */
285 if (DISKPART(bp->b_dev) != RAW_PART &&
286 bounds_check_with_label(bp, wd->sc_dk.dk_label,
287 (wd->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0)
288 goto done;
289
290 /*
291 * Now convert the block number to absolute and put it in
292 * terms of the device's logical block size.
293 */
294 if (lp->d_secsize >= DEV_BSIZE)
295 blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
296 else
297 blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
298
299 if (DISKPART(bp->b_dev) != RAW_PART)
300 blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
301
302 bp->b_rawblkno = blkno;
303
304 /* Queue transfer on drive, activate drive and controller if idle. */
305 s = splbio();
306 simple_lock(&wd->sc_q_lock);
307 disksort_blkno(&wd->sc_q, bp);
308 simple_unlock(&wd->sc_q_lock);
309
310 /* Ring the worker thread */
311 wd->sc_flags |= EDF_PROCESS_QUEUE;
312 wakeup_one(&wd->sc_q);
313
314 splx(s);
315 return;
316 bad:
317 bp->b_flags |= B_ERROR;
318 done:
319 /* Toss transfer; we're done early. */
320 bp->b_resid = bp->b_bcount;
321 biodone(bp);
322 }
323
324 static void
325 __edstart(ed, bp)
326 struct ed_softc *ed;
327 struct buf *bp;
328 {
329 u_int16_t cmd_args[4];
330 int error=0;
331 u_int16_t track;
332 u_int16_t cyl;
333 u_int8_t head;
334 u_int8_t sector;
335
336 WDCDEBUG_PRINT(("__edstart %s (%s): %lu %lu %u\n", ed->sc_dev.dv_xname,
337 (bp->b_flags & B_READ) ? "read" : "write",
338 bp->b_bcount, bp->b_resid, bp->b_rawblkno),
339 DEBUG_XFERS);
340
341 ed->sc_bp = bp;
342
343 /* Get physical bus mapping for buf. */
344 if (bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
345 bp->b_data, bp->b_bcount, NULL,
346 BUS_DMA_WAITOK|BUS_DMA_STREAMING) != 0) {
347
348 /*
349 * Use our DMA safe memory to get data to/from device.
350 */
351 if ((error = bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
352 ed->sc_dmamkva, bp->b_bcount, NULL,
353 BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
354 printf("%s: unable to load raw data for xfer, errno=%d\n",
355 ed->sc_dev.dv_xname, error);
356 goto out;
357 }
358 ed->sc_flags |= EDF_BOUNCEBUF;
359
360 /* If data write, copy the data to our bounce buffer. */
361 if ((bp->b_flags & B_READ) == 0)
362 memcpy(ed->sc_dmamkva, bp->b_data, bp->b_bcount);
363 }
364
365 ed->sc_flags |= EDF_DMAMAP_LOADED;
366
367 track = bp->b_rawblkno / ed->sectors;
368 head = track % ed->heads;
369 cyl = track / ed->heads;
370 sector = bp->b_rawblkno % ed->sectors;
371
372 WDCDEBUG_PRINT(("__edstart %s: map: %u %u %u\n", ed->sc_dev.dv_xname,
373 cyl, sector, head),
374 DEBUG_XFERS);
375
376 /* Instrumentation. */
377 disk_busy(&ed->sc_dk);
378 ed->sc_flags |= EDF_DK_BUSY;
379 mca_disk_busy();
380
381 /* Read or Write Data command */
382 cmd_args[0] = 2; /* Options 0000010 */
383 cmd_args[1] = bp->b_bcount / DEV_BSIZE;
384 cmd_args[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
385 cmd_args[3] = ((cyl & 0x3E0) >> 5);
386 if (edc_run_cmd(ed->edc_softc,
387 (bp->b_flags & B_READ) ? CMD_READ_DATA : CMD_WRITE_DATA,
388 ed->sc_devno, cmd_args, 4, 1)) {
389 printf("%s: data i/o command failed\n", ed->sc_dev.dv_xname);
390 error = EIO;
391 }
392
393 out:
394 if (error)
395 ed->sc_error = error;
396 }
397
398
399 static void
400 edmcadone(ed)
401 struct ed_softc *ed;
402 {
403 struct buf *bp = ed->sc_bp;
404
405 WDCDEBUG_PRINT(("eddone %s\n", ed->sc_dev.dv_xname),
406 DEBUG_XFERS);
407
408 if (ed->sc_error) {
409 bp->b_error = ed->sc_error;
410 bp->b_flags |= B_ERROR;
411 } else {
412 /* Set resid, most commonly to zero. */
413 bp->b_resid = ed->sc_status_block[SB_RESBLKCNT_IDX] * DEV_BSIZE;
414 }
415
416 /*
417 * If read transfer finished without error and using a bounce
418 * buffer, copy the data to buf.
419 */
420 if ((bp->b_flags & B_ERROR) == 0 && (ed->sc_flags & EDF_BOUNCEBUF)
421 && (bp->b_flags & B_READ)) {
422 memcpy(bp->b_data, ed->sc_dmamkva, bp->b_bcount);
423 }
424 ed->sc_flags &= ~EDF_BOUNCEBUF;
425
426 /* Unload buf from DMA map */
427 if (ed->sc_flags & EDF_DMAMAP_LOADED) {
428 bus_dmamap_unload(ed->sc_dmat, ed->dmamap_xfer);
429 ed->sc_flags &= ~EDF_DMAMAP_LOADED;
430 }
431
432 /* If disk was busied, unbusy it now */
433 if (ed->sc_flags & EDF_DK_BUSY) {
434 disk_unbusy(&ed->sc_dk, (bp->b_bcount - bp->b_resid));
435 ed->sc_flags &= ~EDF_DK_BUSY;
436 mca_disk_unbusy();
437 }
438
439 ed->sc_flags &= ~EDF_IODONE;
440
441 #if NRND > 0
442 rnd_add_uint32(&ed->rnd_source, bp->b_blkno);
443 #endif
444 biodone(bp);
445 }
446
447 int
448 edmcaread(dev, uio, flags)
449 dev_t dev;
450 struct uio *uio;
451 int flags;
452 {
453 WDCDEBUG_PRINT(("edread\n"), DEBUG_XFERS);
454 return (physio(edmcastrategy, NULL, dev, B_READ, minphys, uio));
455 }
456
457 int
458 edmcawrite(dev, uio, flags)
459 dev_t dev;
460 struct uio *uio;
461 int flags;
462 {
463 WDCDEBUG_PRINT(("edwrite\n"), DEBUG_XFERS);
464 return (physio(edmcastrategy, NULL, dev, B_WRITE, minphys, uio));
465 }
466
467 /*
468 * Wait interruptibly for an exclusive lock.
469 */
470 static int
471 ed_lock(ed)
472 struct ed_softc *ed;
473 {
474 int error;
475 int s;
476
477 WDCDEBUG_PRINT(("ed_lock\n"), DEBUG_FUNCS);
478
479 s = splbio();
480 error = lockmgr(&ed->sc_lock, LK_EXCLUSIVE, NULL);
481 splx(s);
482
483 return (error);
484 }
485
486 /*
487 * Unlock and wake up any waiters.
488 */
489 static void
490 ed_unlock(ed)
491 struct ed_softc *ed;
492 {
493 WDCDEBUG_PRINT(("ed_unlock\n"), DEBUG_FUNCS);
494
495 (void) lockmgr(&ed->sc_lock, LK_RELEASE, NULL);
496 }
497
498 int
499 edmcaopen(dev, flag, fmt, p)
500 dev_t dev;
501 int flag, fmt;
502 struct proc *p;
503 {
504 struct ed_softc *wd;
505 int part, error;
506
507 WDCDEBUG_PRINT(("edopen\n"), DEBUG_FUNCS);
508 wd = device_lookup(&ed_cd, DISKUNIT(dev));
509 if (wd == NULL)
510 return (ENXIO);
511
512 if ((error = ed_lock(wd)) != 0)
513 goto bad4;
514
515 if (wd->sc_dk.dk_openmask != 0) {
516 /*
517 * If any partition is open, but the disk has been invalidated,
518 * disallow further opens.
519 */
520 if ((wd->sc_flags & WDF_LOADED) == 0) {
521 error = EIO;
522 goto bad3;
523 }
524 } else {
525 if ((wd->sc_flags & WDF_LOADED) == 0) {
526 wd->sc_flags |= WDF_LOADED;
527
528 /* Load the physical device parameters. */
529 ed_get_params(wd);
530
531 /* Load the partition info if not already loaded. */
532 edgetdisklabel(wd);
533 }
534 }
535
536 part = DISKPART(dev);
537
538 /* Check that the partition exists. */
539 if (part != RAW_PART &&
540 (part >= wd->sc_dk.dk_label->d_npartitions ||
541 wd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
542 error = ENXIO;
543 goto bad;
544 }
545
546 /* Insure only one open at a time. */
547 switch (fmt) {
548 case S_IFCHR:
549 wd->sc_dk.dk_copenmask |= (1 << part);
550 break;
551 case S_IFBLK:
552 wd->sc_dk.dk_bopenmask |= (1 << part);
553 break;
554 }
555 wd->sc_dk.dk_openmask =
556 wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;
557
558 ed_unlock(wd);
559 return 0;
560
561 bad:
562 if (wd->sc_dk.dk_openmask == 0) {
563 }
564
565 bad3:
566 ed_unlock(wd);
567 bad4:
568 return (error);
569 }
570
571 int
572 edmcaclose(dev, flag, fmt, p)
573 dev_t dev;
574 int flag, fmt;
575 struct proc *p;
576 {
577 struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(dev));
578 int part = DISKPART(dev);
579 int error;
580
581 WDCDEBUG_PRINT(("edmcaclose\n"), DEBUG_FUNCS);
582 if ((error = ed_lock(wd)) != 0)
583 return error;
584
585 switch (fmt) {
586 case S_IFCHR:
587 wd->sc_dk.dk_copenmask &= ~(1 << part);
588 break;
589 case S_IFBLK:
590 wd->sc_dk.dk_bopenmask &= ~(1 << part);
591 break;
592 }
593 wd->sc_dk.dk_openmask =
594 wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;
595
596 if (wd->sc_dk.dk_openmask == 0) {
597 #if 0
598 wd_flushcache(wd, AT_WAIT);
599 #endif
600 /* XXXX Must wait for I/O to complete! */
601
602 if (! (wd->sc_flags & WDF_KLABEL))
603 wd->sc_flags &= ~WDF_LOADED;
604 }
605
606 ed_unlock(wd);
607
608 return 0;
609 }
610
611 static void
612 edgetdefaultlabel(wd, lp)
613 struct ed_softc *wd;
614 struct disklabel *lp;
615 {
616 WDCDEBUG_PRINT(("edgetdefaultlabel\n"), DEBUG_FUNCS);
617 memset(lp, 0, sizeof(struct disklabel));
618
619 lp->d_secsize = DEV_BSIZE;
620 lp->d_ntracks = wd->heads;
621 lp->d_nsectors = wd->sectors;
622 lp->d_ncylinders = wd->cyl;
623 lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
624
625 lp->d_type = DTYPE_ESDI;
626
627 strncpy(lp->d_typename, "ESDI", 16);
628 strncpy(lp->d_packname, "fictitious", 16);
629 lp->d_secperunit = wd->sc_capacity;
630 lp->d_rpm = 3600;
631 lp->d_interleave = 1;
632 lp->d_flags = 0;
633
634 lp->d_partitions[RAW_PART].p_offset = 0;
635 lp->d_partitions[RAW_PART].p_size =
636 lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
637 lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
638 lp->d_npartitions = RAW_PART + 1;
639
640 lp->d_magic = DISKMAGIC;
641 lp->d_magic2 = DISKMAGIC;
642 lp->d_checksum = dkcksum(lp);
643 }
644
645 /*
646 * Fabricate a default disk label, and try to read the correct one.
647 */
648 static void
649 edgetdisklabel(wd)
650 struct ed_softc *wd;
651 {
652 struct disklabel *lp = wd->sc_dk.dk_label;
653 char *errstring;
654
655 WDCDEBUG_PRINT(("edgetdisklabel\n"), DEBUG_FUNCS);
656
657 memset(wd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
658
659 edgetdefaultlabel(wd, lp);
660
661 #if 0
662 wd->sc_badsect[0] = -1;
663
664 if (wd->drvp->state > RECAL)
665 wd->drvp->drive_flags |= DRIVE_RESET;
666 #endif
667 errstring = readdisklabel(MAKEDISKDEV(0, wd->sc_dev.dv_unit, RAW_PART),
668 edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
669 if (errstring) {
670 /*
671 * This probably happened because the drive's default
672 * geometry doesn't match the DOS geometry. We
673 * assume the DOS geometry is now in the label and try
674 * again. XXX This is a kluge.
675 */
676 #if 0
677 if (wd->drvp->state > RECAL)
678 wd->drvp->drive_flags |= DRIVE_RESET;
679 #endif
680 errstring = readdisklabel(MAKEDISKDEV(0, wd->sc_dev.dv_unit,
681 RAW_PART), edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
682 }
683 if (errstring) {
684 printf("%s: %s\n", wd->sc_dev.dv_xname, errstring);
685 return;
686 }
687
688 #if 0
689 if (wd->drvp->state > RECAL)
690 wd->drvp->drive_flags |= DRIVE_RESET;
691 #endif
692 #ifdef HAS_BAD144_HANDLING
693 if ((lp->d_flags & D_BADSECT) != 0)
694 bad144intern(wd);
695 #endif
696 }
697
698 int
699 edmcaioctl(dev, xfer, addr, flag, p)
700 dev_t dev;
701 u_long xfer;
702 caddr_t addr;
703 int flag;
704 struct proc *p;
705 {
706 struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(dev));
707 int error;
708 #ifdef __HAVE_OLD_DISKLABEL
709 struct disklabel newlabel;
710 #endif
711
712 WDCDEBUG_PRINT(("edioctl\n"), DEBUG_FUNCS);
713
714 if ((wd->sc_flags & WDF_LOADED) == 0)
715 return EIO;
716
717 switch (xfer) {
718 #ifdef HAS_BAD144_HANDLING
719 case DIOCSBAD:
720 if ((flag & FWRITE) == 0)
721 return EBADF;
722 wd->sc_dk.dk_cpulabel->bad = *(struct dkbad *)addr;
723 wd->sc_dk.dk_label->d_flags |= D_BADSECT;
724 bad144intern(wd);
725 return 0;
726 #endif
727
728 case DIOCGDINFO:
729 *(struct disklabel *)addr = *(wd->sc_dk.dk_label);
730 return 0;
731 #ifdef __HAVE_OLD_DISKLABEL
732 case ODIOCGDINFO:
733 newlabel = *(wd->sc_dk.dk_label);
734 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
735 return ENOTTY;
736 memcpy(addr, &newlabel, sizeof (struct olddisklabel));
737 return 0;
738 #endif
739
740 case DIOCGPART:
741 ((struct partinfo *)addr)->disklab = wd->sc_dk.dk_label;
742 ((struct partinfo *)addr)->part =
743 &wd->sc_dk.dk_label->d_partitions[DISKPART(dev)];
744 return 0;
745
746 case DIOCWDINFO:
747 case DIOCSDINFO:
748 #ifdef __HAVE_OLD_DISKLABEL
749 case ODIOCWDINFO:
750 case ODIOCSDINFO:
751 #endif
752 {
753 struct disklabel *lp;
754
755 #ifdef __HAVE_OLD_DISKLABEL
756 if (xfer == ODIOCSDINFO || xfer == ODIOCWDINFO) {
757 memset(&newlabel, 0, sizeof newlabel);
758 memcpy(&newlabel, addr, sizeof (struct olddisklabel));
759 lp = &newlabel;
760 } else
761 #endif
762 lp = (struct disklabel *)addr;
763
764 if ((flag & FWRITE) == 0)
765 return EBADF;
766
767 if ((error = ed_lock(wd)) != 0)
768 return error;
769 wd->sc_flags |= WDF_LABELLING;
770
771 error = setdisklabel(wd->sc_dk.dk_label,
772 lp, /*wd->sc_dk.dk_openmask : */0,
773 wd->sc_dk.dk_cpulabel);
774 if (error == 0) {
775 #if 0
776 if (wd->drvp->state > RECAL)
777 wd->drvp->drive_flags |= DRIVE_RESET;
778 #endif
779 if (xfer == DIOCWDINFO
780 #ifdef __HAVE_OLD_DISKLABEL
781 || xfer == ODIOCWDINFO
782 #endif
783 )
784 error = writedisklabel(EDLABELDEV(dev),
785 edmcastrategy, wd->sc_dk.dk_label,
786 wd->sc_dk.dk_cpulabel);
787 }
788
789 wd->sc_flags &= ~WDF_LABELLING;
790 ed_unlock(wd);
791 return error;
792 }
793
794 case DIOCKLABEL:
795 if (*(int *)addr)
796 wd->sc_flags |= WDF_KLABEL;
797 else
798 wd->sc_flags &= ~WDF_KLABEL;
799 return 0;
800
801 case DIOCWLABEL:
802 if ((flag & FWRITE) == 0)
803 return EBADF;
804 if (*(int *)addr)
805 wd->sc_flags |= WDF_WLABEL;
806 else
807 wd->sc_flags &= ~WDF_WLABEL;
808 return 0;
809
810 case DIOCGDEFLABEL:
811 edgetdefaultlabel(wd, (struct disklabel *)addr);
812 return 0;
813 #ifdef __HAVE_OLD_DISKLABEL
814 case ODIOCGDEFLABEL:
815 edgetdefaultlabel(wd, &newlabel);
816 if (newlabel.d_npartitions > OLDMAXPARTITIONS)
817 return ENOTTY;
818 memcpy(addr, &newlabel, sizeof (struct olddisklabel));
819 return 0;
820 #endif
821
822 #ifdef notyet
823 case DIOCWFORMAT:
824 if ((flag & FWRITE) == 0)
825 return EBADF;
826 {
827 register struct format_op *fop;
828 struct iovec aiov;
829 struct uio auio;
830
831 fop = (struct format_op *)addr;
832 aiov.iov_base = fop->df_buf;
833 aiov.iov_len = fop->df_count;
834 auio.uio_iov = &aiov;
835 auio.uio_iovcnt = 1;
836 auio.uio_resid = fop->df_count;
837 auio.uio_segflg = 0;
838 auio.uio_offset =
839 fop->df_startblk * wd->sc_dk.dk_label->d_secsize;
840 auio.uio_procp = p;
841 error = physio(wdformat, NULL, dev, B_WRITE, minphys,
842 &auio);
843 fop->df_count -= auio.uio_resid;
844 fop->df_reg[0] = wdc->sc_status;
845 fop->df_reg[1] = wdc->sc_error;
846 return error;
847 }
848 #endif
849
850 default:
851 return ENOTTY;
852 }
853
854 #ifdef DIAGNOSTIC
855 panic("edioctl: impossible");
856 #endif
857 }
858
859 #if 0
860 #ifdef B_FORMAT
861 int
862 edmcaformat(struct buf *bp)
863 {
864
865 bp->b_flags |= B_FORMAT;
866 return edmcastrategy(bp);
867 }
868 #endif
869 #endif
870
871 int
872 edmcasize(dev)
873 dev_t dev;
874 {
875 struct ed_softc *wd;
876 int part, omask;
877 int size;
878
879 WDCDEBUG_PRINT(("edsize\n"), DEBUG_FUNCS);
880
881 wd = device_lookup(&ed_cd, DISKUNIT(dev));
882 if (wd == NULL)
883 return (-1);
884
885 part = DISKPART(dev);
886 omask = wd->sc_dk.dk_openmask & (1 << part);
887
888 if (omask == 0 && edmcaopen(dev, 0, S_IFBLK, NULL) != 0)
889 return (-1);
890 if (wd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
891 size = -1;
892 else
893 size = wd->sc_dk.dk_label->d_partitions[part].p_size *
894 (wd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
895 if (omask == 0 && edmcaclose(dev, 0, S_IFBLK, NULL) != 0)
896 return (-1);
897 return (size);
898 }
899
900 /* #define WD_DUMP_NOT_TRUSTED if you just want to watch */
901 static int wddoingadump = 0;
902 static int wddumprecalibrated = 0;
903
904 /*
905 * Dump core after a system crash.
906 */
907 int
908 edmcadump(dev, blkno, va, size)
909 dev_t dev;
910 daddr_t blkno;
911 caddr_t va;
912 size_t size;
913 {
914 struct ed_softc *wd; /* disk unit to do the I/O */
915 struct disklabel *lp; /* disk's disklabel */
916 int part; // , err;
917 int nblks; /* total number of sectors left to write */
918
919 /* Check if recursive dump; if so, punt. */
920 if (wddoingadump)
921 return EFAULT;
922 wddoingadump = 1;
923
924 wd = device_lookup(&ed_cd, DISKUNIT(dev));
925 if (wd == NULL)
926 return (ENXIO);
927
928 part = DISKPART(dev);
929
930 #if 0
931 /* Make sure it was initialized. */
932 if (wd->drvp->state < READY)
933 return ENXIO;
934 #endif
935
936 /* Convert to disk sectors. Request must be a multiple of size. */
937 lp = wd->sc_dk.dk_label;
938 if ((size % lp->d_secsize) != 0)
939 return EFAULT;
940 nblks = size / lp->d_secsize;
941 blkno = blkno / (lp->d_secsize / DEV_BSIZE);
942
943 /* Check transfer bounds against partition size. */
944 if ((blkno < 0) || ((blkno + nblks) > lp->d_partitions[part].p_size))
945 return EINVAL;
946
947 /* Offset block number to start of partition. */
948 blkno += lp->d_partitions[part].p_offset;
949
950 /* Recalibrate, if first dump transfer. */
951 if (wddumprecalibrated == 0) {
952 wddumprecalibrated = 1;
953 #if 0
954 wd->drvp->state = RESET;
955 #endif
956 }
957
958 while (nblks > 0) {
959 #if 0
960 wd->sc_wdc_bio.blkno = blkno;
961 wd->sc_wdc_bio.flags = ATA_POLL;
962 wd->sc_wdc_bio.bcount = lp->d_secsize;
963 wd->sc_wdc_bio.databuf = va;
964 #ifndef WD_DUMP_NOT_TRUSTED
965 switch (wdc_ata_bio(wd->drvp, &wd->sc_wdc_bio)) {
966 case WDC_TRY_AGAIN:
967 panic("wddump: try again");
968 break;
969 case WDC_QUEUED:
970 panic("wddump: polled command has been queued");
971 break;
972 case WDC_COMPLETE:
973 break;
974 }
975 if (err != 0) {
976 printf("\n");
977 return err;
978 }
979 #else /* WD_DUMP_NOT_TRUSTED */
980 /* Let's just talk about this first... */
981 printf("ed%d: dump addr 0x%x, cylin %d, head %d, sector %d\n",
982 unit, va, cylin, head, sector);
983 delay(500 * 1000); /* half a second */
984 #endif
985 #endif /* 0 */
986
987 /* update block count */
988 nblks -= 1;
989 blkno += 1;
990 va += lp->d_secsize;
991 }
992
993 wddoingadump = 0;
994 return (ESPIPE);
995 }
996
997 #ifdef HAS_BAD144_HANDLING
998 /*
999 * Internalize the bad sector table.
1000 */
1001 static void
1002 bad144intern(wd)
1003 struct ed_softc *wd;
1004 {
1005 struct dkbad *bt = &wd->sc_dk.dk_cpulabel->bad;
1006 struct disklabel *lp = wd->sc_dk.dk_label;
1007 int i = 0;
1008
1009 WDCDEBUG_PRINT(("bad144intern\n"), DEBUG_XFERS);
1010
1011 for (; i < NBT_BAD; i++) {
1012 if (bt->bt_bad[i].bt_cyl == 0xffff)
1013 break;
1014 wd->sc_badsect[i] =
1015 bt->bt_bad[i].bt_cyl * lp->d_secpercyl +
1016 (bt->bt_bad[i].bt_trksec >> 8) * lp->d_nsectors +
1017 (bt->bt_bad[i].bt_trksec & 0xff);
1018 }
1019 for (; i < NBT_BAD+1; i++)
1020 wd->sc_badsect[i] = -1;
1021 }
1022 #endif
1023
1024 static int
1025 ed_get_params(ed)
1026 struct ed_softc *ed;
1027 {
1028 u_int16_t cmd_args[2];
1029
1030 /*
1031 * Get Device Configuration (09).
1032 */
1033 cmd_args[0] = 6; /* Options: 00s110, s: 0=Physical 1=Pseudo */
1034 cmd_args[1] = 0;
1035 if (edc_run_cmd(ed->edc_softc, CMD_GET_DEV_CONF, ed->sc_devno, cmd_args, 2, 0))
1036 return (1);
1037
1038 ed->spares = ed->sc_status_block[1] >> 8;
1039 ed->drv_flags = ed->sc_status_block[1] & 0x1f;
1040 ed->rba = ed->sc_status_block[2] |
1041 (ed->sc_status_block[3] << 16);
1042 /* Instead of using:
1043 ed->cyl = ed->sc_status_block[4];
1044 ed->heads = ed->sc_status_block[5] & 0xff;
1045 ed->sectors = ed->sc_status_block[5] >> 8;
1046 * we fabricate the numbers from RBA count, so that
1047 * number of sectors is 32 and heads 64. This seems
1048 * to be necessary for integrated ESDI controller.
1049 */
1050 ed->sectors = 32;
1051 ed->heads = 64;
1052 ed->cyl = ed->rba / (ed->heads * ed->sectors);
1053 ed->sc_capacity = ed->rba;
1054
1055 return (0);
1056 }
1057
1058 /*
1059 * Our shutdown hook. We attempt to park disk's head only.
1060 */
1061 void
1062 ed_shutdown(arg)
1063 void *arg;
1064 {
1065 #if 0
1066 struct ed_softc *ed = arg;
1067 u_int16_t cmd_args[2];
1068
1069 /* Issue Park Head command */
1070 cmd_args[0] = 6; /* Options: 000110 */
1071 cmd_args[1] = 0;
1072 (void) edc_run_cmd(ed->edc_softc, CMD_PARK_HEAD, ed->sc_devno,
1073 cmd_args, 2, 0);
1074 #endif
1075 }
1076
1077 /*
1078 * Main worker thread function.
1079 */
1080 void
1081 edworker(arg)
1082 void *arg;
1083 {
1084 struct ed_softc *ed = (struct ed_softc *) arg;
1085 struct buf *bp;
1086 int s;
1087
1088 config_pending_decr();
1089
1090 for(;;) {
1091 /* Wait until awakened */
1092 (void) tsleep(&ed->sc_q, PRIBIO, "edidle", 0);
1093
1094 if ((ed->sc_flags & EDF_PROCESS_QUEUE) == 0)
1095 panic("edworker: expecting process queue");
1096 ed->sc_flags &= ~EDF_PROCESS_QUEUE;
1097
1098 for(;;) {
1099 /* Is there a buf for us ? */
1100 simple_lock(&ed->sc_q_lock);
1101 if ((bp = BUFQ_FIRST(&ed->sc_q)) == NULL) {
1102 simple_unlock(&ed->sc_q_lock);
1103 break;
1104 }
1105 BUFQ_REMOVE(&ed->sc_q, bp);
1106 simple_unlock(&ed->sc_q_lock);
1107
1108 /* Schedule i/o operation */
1109 ed->sc_error = 0;
1110 s = splbio();
1111 __edstart(ed, bp);
1112 splx(s);
1113
1114 /*
1115 * Wait until the command executes; edc_intr() wakes
1116 * us up.
1117 */
1118 if (ed->sc_error == 0
1119 && (ed->sc_flags & EDF_IODONE) == 0) {
1120 (void)tsleep(&ed->edc_softc, PRIBIO, "edwrk",0);
1121 edc_cmd_wait(ed->edc_softc, ed->sc_devno, 5);
1122 }
1123
1124 /* Handle i/o results */
1125 s = splbio();
1126 edmcadone(ed);
1127 splx(s);
1128 }
1129 }
1130 }
1131