Home | History | Annotate | Line # | Download | only in mca
ed_mca.c revision 1.8
      1 /*	$NetBSD: ed_mca.c,v 1.8 2001/09/03 18:13:13 sommerfeld Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Jaromir Dolecek.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *        This product includes software developed by the NetBSD
     20  *        Foundation, Inc. and its contributors.
     21  * 4. The name of the author may not be used to endorse or promote products
     22  *    derived from this software without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     27  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     29  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     31  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     32  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     33  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     34  */
     35 
     36 /*
     37  * Disk goo for MCA ESDI controller driver.
     38  */
     39 
     40 #include "rnd.h"
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/kernel.h>
     45 #include <sys/conf.h>
     46 #include <sys/file.h>
     47 #include <sys/stat.h>
     48 #include <sys/ioctl.h>
     49 #include <sys/buf.h>
     50 #include <sys/uio.h>
     51 #include <sys/malloc.h>
     52 #include <sys/device.h>
     53 #include <sys/disklabel.h>
     54 #include <sys/disk.h>
     55 #include <sys/syslog.h>
     56 #include <sys/proc.h>
     57 #include <sys/vnode.h>
     58 #include <sys/kthread.h>
     59 #if NRND > 0
     60 #include <sys/rnd.h>
     61 #endif
     62 
     63 #include <machine/intr.h>
     64 #include <machine/bus.h>
     65 
     66 #include <dev/mca/mcavar.h>
     67 
     68 #include <dev/mca/edcreg.h>
     69 #include <dev/mca/edvar.h>
     70 #include <dev/mca/edcvar.h>
     71 
     72 /* #define WDCDEBUG */
     73 
     74 #ifdef WDCDEBUG
     75 #define WDCDEBUG_PRINT(args, level)  printf args
     76 #else
     77 #define WDCDEBUG_PRINT(args, level)
     78 #endif
     79 
     80 #define	EDLABELDEV(dev) (MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART))
     81 
     82 /* XXX: these should go elsewhere */
     83 cdev_decl(edmca);
     84 bdev_decl(edmca);
     85 
     86 static int     ed_mca_probe   __P((struct device *, struct cfdata *, void *));
     87 static void    ed_mca_attach  __P((struct device *, struct device *, void *));
     88 
     89 struct cfattach ed_mca_ca = {
     90 	sizeof(struct ed_softc), ed_mca_probe, ed_mca_attach
     91 };
     92 
     93 extern struct cfdriver ed_cd;
     94 
     95 static int	ed_get_params __P((struct ed_softc *));
     96 static int	ed_lock	__P((struct ed_softc *));
     97 static void	ed_unlock	__P((struct ed_softc *));
     98 static void	edgetdisklabel	__P((struct ed_softc *));
     99 static void	edgetdefaultlabel __P((struct ed_softc *, struct disklabel *));
    100 static void	ed_shutdown __P((void*));
    101 static void	__edstart __P((struct ed_softc*, struct buf *));
    102 static void	bad144intern __P((struct ed_softc *));
    103 static void	edworker __P((void *));
    104 static void	ed_spawn_worker __P((void *));
    105 static void	edmcadone __P((struct ed_softc *, struct buf *));
    106 static void	ed_bio __P((struct ed_softc *, int, int));
    107 static void	ed_bio_done __P((struct ed_softc *));
    108 
    109 static struct dkdriver eddkdriver = { edmcastrategy };
    110 
    111 /*
    112  * Just check if it's possible to identify the disk.
    113  */
    114 static int
    115 ed_mca_probe(parent, match, aux)
    116 	struct device *parent;
    117 	struct cfdata *match;
    118 	void *aux;
    119 {
    120 	u_int16_t cmd_args[2];
    121 	struct edc_mca_softc *sc = (void *) parent;
    122 	struct ed_attach_args *eda = (void *) aux;
    123 	int found = 1;
    124 
    125 	/*
    126 	 * Get Device Configuration (09).
    127 	 */
    128 	cmd_args[0] = 14;	/* Options: 00s110, s: 0=Physical 1=Pseudo */
    129 	cmd_args[1] = 0;
    130 	if (edc_run_cmd(sc, CMD_GET_DEV_CONF, eda->sc_devno, cmd_args, 2, 0, 1))
    131 		found = 0;
    132 
    133 	return (found);
    134 }
    135 
    136 static void
    137 ed_mca_attach(parent, self, aux)
    138 	struct device *parent, *self;
    139 	void *aux;
    140 {
    141 	struct ed_softc *ed = (void *) self;
    142 	struct edc_mca_softc *sc = (void *) parent;
    143 	struct ed_attach_args *eda = (void *) aux;
    144 	char pbuf[8];
    145 	int error, nsegs;
    146 
    147 	ed->edc_softc = sc;
    148 	ed->sc_dmat = eda->sc_dmat;
    149 	ed->sc_devno = eda->sc_devno;
    150 	edc_add_disk(sc, ed, eda->sc_devno);
    151 
    152 	BUFQ_INIT(&ed->sc_q);
    153 	simple_lock_init(&ed->sc_q_lock);
    154 	lockinit(&ed->sc_lock, PRIBIO | PCATCH, "edlck", 0, 0);
    155 
    156 	if (ed_get_params(ed)) {
    157 		printf(": IDENTIFY failed, no disk found\n");
    158 		return;
    159 	}
    160 
    161 	format_bytes(pbuf, sizeof(pbuf),
    162 		(u_int64_t) ed->sc_capacity * DEV_BSIZE);
    163 	printf(": %s, %u cyl, %u head, %u sec, 512 bytes/sect x %u sectors\n",
    164 		pbuf,
    165 		ed->cyl, ed->heads, ed->sectors,
    166 		ed->sc_capacity);
    167 
    168 	printf("%s: %u spares/cyl, %s, %s, %s, %s, %s\n",
    169 		ed->sc_dev.dv_xname, ed->spares,
    170 		(ed->drv_flags & (1 << 0)) ? "NoRetries" : "Retries",
    171 		(ed->drv_flags & (1 << 1)) ? "Removable" : "Fixed",
    172 		(ed->drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew",
    173 		(ed->drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects",
    174 		(ed->drv_flags & (1 << 4)) ? "InvalidSecondary" : "SecondaryOK"
    175 		);
    176 
    177 	/* Create a DMA map for mapping individual transfer bufs */
    178 	if ((error = bus_dmamap_create(ed->sc_dmat, 65536, 1,
    179 		65536, 65536, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
    180 		&ed->dmamap_xfer)) != 0) {
    181 		printf("%s: unable to create xfer DMA map, error=%d\n",
    182 			ed->sc_dev.dv_xname, error);
    183 		return;
    184 	}
    185 
    186 	/*
    187 	 * Allocate DMA memory used in case where passed buf isn't
    188 	 * physically contiguous.
    189 	 */
    190 	ed->sc_dmam_sz = MAXPHYS;
    191 	if ((error = bus_dmamem_alloc(ed->sc_dmat, ed->sc_dmam_sz,
    192 		ed->sc_dmam_sz, 65536, ed->sc_dmam, 1, &nsegs,
    193 		BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
    194 		printf("%s: unable to allocate DMA memory for xfer, errno=%d\n",
    195 				ed->sc_dev.dv_xname, error);
    196 		bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
    197 		return;
    198 	}
    199 	/*
    200 	 * Map the memory.
    201 	 */
    202 	if ((error = bus_dmamem_map(ed->sc_dmat, ed->sc_dmam, 1,
    203 		ed->sc_dmam_sz, &ed->sc_dmamkva, BUS_DMA_WAITOK)) != 0) {
    204 		printf("%s: unable to map DMA memory, error=%d\n",
    205 			ed->sc_dev.dv_xname, error);
    206 		bus_dmamem_free(ed->sc_dmat, ed->sc_dmam, 1);
    207 		bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
    208 		return;
    209 	}
    210 
    211 
    212 	/*
    213 	 * Initialize and attach the disk structure.
    214 	 */
    215 	ed->sc_dk.dk_driver = &eddkdriver;
    216 	ed->sc_dk.dk_name = ed->sc_dev.dv_xname;
    217 	disk_attach(&ed->sc_dk);
    218 #if 0
    219 	wd->sc_wdc_bio.lp = wd->sc_dk.dk_label;
    220 #endif
    221 	ed->sc_sdhook = shutdownhook_establish(ed_shutdown, ed);
    222 	if (ed->sc_sdhook == NULL)
    223 		printf("%s: WARNING: unable to establish shutdown hook\n",
    224 		    ed->sc_dev.dv_xname);
    225 #if NRND > 0
    226 	rnd_attach_source(&ed->rnd_source, ed->sc_dev.dv_xname,
    227 			  RND_TYPE_DISK, 0);
    228 #endif
    229 
    230 	config_pending_incr();
    231 	kthread_create(ed_spawn_worker, (void *) ed);
    232 
    233 	ed->sc_flags |= EDF_INIT;
    234 }
    235 
    236 void
    237 ed_spawn_worker(arg)
    238 	void *arg;
    239 {
    240 	struct ed_softc *ed = (struct ed_softc *) arg;
    241 	int error;
    242 
    243 	/* Now, everything is ready, start a kthread */
    244 	if ((error = kthread_create1(edworker, ed, &ed->sc_worker,
    245 			"%s", ed->sc_dev.dv_xname))) {
    246 		printf("%s: cannot spawn worker thread: errno=%d\n",
    247 			ed->sc_dev.dv_xname, error);
    248 		panic("ed_spawn_worker");
    249 	}
    250 }
    251 
    252 /*
    253  * Read/write routine for a buffer.  Validates the arguments and schedules the
    254  * transfer.  Does not wait for the transfer to complete.
    255  */
    256 void
    257 edmcastrategy(bp)
    258 	struct buf *bp;
    259 {
    260 	struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(bp->b_dev));
    261 	struct disklabel *lp = wd->sc_dk.dk_label;
    262 	daddr_t blkno;
    263 	int s;
    264 
    265 	WDCDEBUG_PRINT(("edmcastrategy (%s)\n", wd->sc_dev.dv_xname),
    266 	    DEBUG_XFERS);
    267 
    268 	/* Valid request?  */
    269 	if (bp->b_blkno < 0 ||
    270 	    (bp->b_bcount % lp->d_secsize) != 0 ||
    271 	    (bp->b_bcount / lp->d_secsize) >= (1 << NBBY)) {
    272 		bp->b_error = EINVAL;
    273 		goto bad;
    274 	}
    275 
    276 	/* If device invalidated (e.g. media change, door open), error. */
    277 	if ((wd->sc_flags & WDF_LOADED) == 0) {
    278 		bp->b_error = EIO;
    279 		goto bad;
    280 	}
    281 
    282 	/* If it's a null transfer, return immediately. */
    283 	if (bp->b_bcount == 0)
    284 		goto done;
    285 
    286 	/*
    287 	 * Do bounds checking, adjust transfer. if error, process.
    288 	 * If end of partition, just return.
    289 	 */
    290 	if (DISKPART(bp->b_dev) != RAW_PART &&
    291 	    bounds_check_with_label(bp, wd->sc_dk.dk_label,
    292 	    (wd->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0)
    293 		goto done;
    294 
    295 	/*
    296 	 * Now convert the block number to absolute and put it in
    297 	 * terms of the device's logical block size.
    298 	 */
    299 	if (lp->d_secsize >= DEV_BSIZE)
    300 		blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
    301 	else
    302 		blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
    303 
    304 	if (DISKPART(bp->b_dev) != RAW_PART)
    305 		blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
    306 
    307 	bp->b_rawblkno = blkno;
    308 
    309 	/* Queue transfer on drive, activate drive and controller if idle. */
    310 	s = splbio();
    311 	simple_lock(&wd->sc_q_lock);
    312 	disksort_blkno(&wd->sc_q, bp);
    313 	simple_unlock(&wd->sc_q_lock);
    314 
    315 	/* Ring the worker thread */
    316 	wd->sc_flags |= EDF_PROCESS_QUEUE;
    317 	wakeup_one(&wd->sc_q);
    318 
    319 	splx(s);
    320 	return;
    321 bad:
    322 	bp->b_flags |= B_ERROR;
    323 done:
    324 	/* Toss transfer; we're done early. */
    325 	bp->b_resid = bp->b_bcount;
    326 	biodone(bp);
    327 }
    328 
    329 static void
    330 ed_bio(struct ed_softc *ed, int async, int poll)
    331 {
    332 	u_int16_t cmd_args[4];
    333 	int error=0;
    334 	u_int16_t track;
    335 	u_int16_t cyl;
    336 	u_int8_t head;
    337 	u_int8_t sector;
    338 
    339 	/* Get physical bus mapping for buf. */
    340 	if (bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
    341 			ed->sc_data, ed->sc_bcount, NULL,
    342 			BUS_DMA_WAITOK|BUS_DMA_STREAMING) != 0) {
    343 
    344 		/*
    345 		 * Use our DMA safe memory to get data to/from device.
    346 		 */
    347 		if ((error = bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
    348 			ed->sc_dmamkva, ed->sc_bcount, NULL,
    349 			BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
    350 			printf("%s: unable to load raw data for xfer, errno=%d\n",
    351 				ed->sc_dev.dv_xname, error);
    352 			goto out;
    353 		}
    354 		ed->sc_flags |= EDF_BOUNCEBUF;
    355 
    356 		/* If data write, copy the data to our bounce buffer. */
    357 		if (!ed->sc_read)
    358 			memcpy(ed->sc_dmamkva, ed->sc_data, ed->sc_bcount);
    359 	}
    360 
    361 	ed->sc_flags |= EDF_DMAMAP_LOADED;
    362 
    363 	track = ed->sc_rawblkno / ed->sectors;
    364 	head = track % ed->heads;
    365 	cyl = track / ed->heads;
    366 	sector = ed->sc_rawblkno % ed->sectors;
    367 
    368 	WDCDEBUG_PRINT(("__edstart %s: map: %u %u %u\n", ed->sc_dev.dv_xname,
    369 		cyl, sector, head),
    370 	    DEBUG_XFERS);
    371 
    372 	mca_disk_busy();
    373 
    374 	/* Read or Write Data command */
    375 	cmd_args[0] = 2;	/* Options 0000010 */
    376 	cmd_args[1] = ed->sc_bcount / DEV_BSIZE;
    377 	cmd_args[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
    378 	cmd_args[3] = ((cyl & 0x3E0) >> 5);
    379 	if (edc_run_cmd(ed->edc_softc,
    380 			(ed->sc_read) ? CMD_READ_DATA : CMD_WRITE_DATA,
    381 			ed->sc_devno, cmd_args, 4, async, poll)) {
    382 		printf("%s: data i/o command failed\n", ed->sc_dev.dv_xname);
    383 		mca_disk_unbusy();
    384 		error = EIO;
    385 	}
    386 
    387     out:
    388 	if (error)
    389 		ed->sc_error = error;
    390 }
    391 
    392 static void
    393 __edstart(ed, bp)
    394 	struct ed_softc *ed;
    395 	struct buf *bp;
    396 {
    397 	WDCDEBUG_PRINT(("__edstart %s (%s): %lu %lu %u\n", ed->sc_dev.dv_xname,
    398 		(bp->b_flags & B_READ) ? "read" : "write",
    399 		bp->b_bcount, bp->b_resid, bp->b_rawblkno),
    400 	    DEBUG_XFERS);
    401 
    402 	/* Instrumentation. */
    403 	disk_busy(&ed->sc_dk);
    404 	ed->sc_flags |= EDF_DK_BUSY;
    405 
    406 	ed->sc_data = bp->b_data;
    407 	ed->sc_rawblkno = bp->b_rawblkno;
    408 	ed->sc_bcount = bp->b_bcount;
    409 	ed->sc_read = bp->b_flags & B_READ;
    410 	ed_bio(ed, 1, 0);
    411 }
    412 
    413 static void
    414 ed_bio_done(ed)
    415 	struct ed_softc *ed;
    416 {
    417 	/*
    418 	 * If read transfer finished without error and using a bounce
    419 	 * buffer, copy the data to buf.
    420 	 */
    421 	if (ed->sc_error == 0 && (ed->sc_flags & EDF_BOUNCEBUF) && ed->sc_read)
    422 		memcpy(ed->sc_data, ed->sc_dmamkva, ed->sc_bcount);
    423 	ed->sc_flags &= ~EDF_BOUNCEBUF;
    424 
    425 	/* Unload buf from DMA map */
    426 	if (ed->sc_flags & EDF_DMAMAP_LOADED) {
    427 		bus_dmamap_unload(ed->sc_dmat, ed->dmamap_xfer);
    428 		ed->sc_flags &= ~EDF_DMAMAP_LOADED;
    429 	}
    430 
    431 	mca_disk_unbusy();
    432 }
    433 
    434 static void
    435 edmcadone(ed, bp)
    436 	struct ed_softc *ed;
    437 	struct buf *bp;
    438 {
    439 	WDCDEBUG_PRINT(("eddone %s\n", ed->sc_dev.dv_xname),
    440 	    DEBUG_XFERS);
    441 
    442 	if (ed->sc_error) {
    443 		bp->b_error = ed->sc_error;
    444 		bp->b_flags |= B_ERROR;
    445 	} else {
    446 		/* Set resid, most commonly to zero. */
    447 		bp->b_resid = ed->sc_status_block[SB_RESBLKCNT_IDX] * DEV_BSIZE;
    448 	}
    449 
    450 	ed_bio_done(ed);
    451 
    452 	/* If disk was busied, unbusy it now */
    453 	if (ed->sc_flags & EDF_DK_BUSY) {
    454 		disk_unbusy(&ed->sc_dk, (bp->b_bcount - bp->b_resid));
    455 		ed->sc_flags &= ~EDF_DK_BUSY;
    456 	}
    457 
    458 #if NRND > 0
    459 	rnd_add_uint32(&ed->rnd_source, bp->b_blkno);
    460 #endif
    461 	biodone(bp);
    462 }
    463 
    464 int
    465 edmcaread(dev, uio, flags)
    466 	dev_t dev;
    467 	struct uio *uio;
    468 	int flags;
    469 {
    470 	WDCDEBUG_PRINT(("edread\n"), DEBUG_XFERS);
    471 	return (physio(edmcastrategy, NULL, dev, B_READ, minphys, uio));
    472 }
    473 
    474 int
    475 edmcawrite(dev, uio, flags)
    476 	dev_t dev;
    477 	struct uio *uio;
    478 	int flags;
    479 {
    480 	WDCDEBUG_PRINT(("edwrite\n"), DEBUG_XFERS);
    481 	return (physio(edmcastrategy, NULL, dev, B_WRITE, minphys, uio));
    482 }
    483 
    484 /*
    485  * Wait interruptibly for an exclusive lock.
    486  */
    487 static int
    488 ed_lock(ed)
    489 	struct ed_softc *ed;
    490 {
    491 	int error;
    492 	int s;
    493 
    494 	WDCDEBUG_PRINT(("ed_lock\n"), DEBUG_FUNCS);
    495 
    496 	s = splbio();
    497 	error = lockmgr(&ed->sc_lock, LK_EXCLUSIVE, NULL);
    498 	splx(s);
    499 
    500 	return (error);
    501 }
    502 
    503 /*
    504  * Unlock and wake up any waiters.
    505  */
    506 static void
    507 ed_unlock(ed)
    508 	struct ed_softc *ed;
    509 {
    510 	WDCDEBUG_PRINT(("ed_unlock\n"), DEBUG_FUNCS);
    511 
    512 	(void) lockmgr(&ed->sc_lock, LK_RELEASE, NULL);
    513 }
    514 
    515 int
    516 edmcaopen(dev, flag, fmt, p)
    517 	dev_t dev;
    518 	int flag, fmt;
    519 	struct proc *p;
    520 {
    521 	struct ed_softc *wd;
    522 	int part, error;
    523 
    524 	WDCDEBUG_PRINT(("edopen\n"), DEBUG_FUNCS);
    525 	wd = device_lookup(&ed_cd, DISKUNIT(dev));
    526 	if (wd == NULL || (wd->sc_flags & EDF_INIT) == 0)
    527 		return (ENXIO);
    528 
    529 	if ((error = ed_lock(wd)) != 0)
    530 		goto bad4;
    531 
    532 	if (wd->sc_dk.dk_openmask != 0) {
    533 		/*
    534 		 * If any partition is open, but the disk has been invalidated,
    535 		 * disallow further opens.
    536 		 */
    537 		if ((wd->sc_flags & WDF_LOADED) == 0) {
    538 			error = EIO;
    539 			goto bad3;
    540 		}
    541 	} else {
    542 		if ((wd->sc_flags & WDF_LOADED) == 0) {
    543 			wd->sc_flags |= WDF_LOADED;
    544 
    545 			/* Load the physical device parameters. */
    546 			ed_get_params(wd);
    547 
    548 			/* Load the partition info if not already loaded. */
    549 			edgetdisklabel(wd);
    550 		}
    551 	}
    552 
    553 	part = DISKPART(dev);
    554 
    555 	/* Check that the partition exists. */
    556 	if (part != RAW_PART &&
    557 	    (part >= wd->sc_dk.dk_label->d_npartitions ||
    558 	     wd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
    559 		error = ENXIO;
    560 		goto bad;
    561 	}
    562 
    563 	/* Insure only one open at a time. */
    564 	switch (fmt) {
    565 	case S_IFCHR:
    566 		wd->sc_dk.dk_copenmask |= (1 << part);
    567 		break;
    568 	case S_IFBLK:
    569 		wd->sc_dk.dk_bopenmask |= (1 << part);
    570 		break;
    571 	}
    572 	wd->sc_dk.dk_openmask =
    573 	    wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;
    574 
    575 	ed_unlock(wd);
    576 	return 0;
    577 
    578 bad:
    579 	if (wd->sc_dk.dk_openmask == 0) {
    580 	}
    581 
    582 bad3:
    583 	ed_unlock(wd);
    584 bad4:
    585 	return (error);
    586 }
    587 
    588 int
    589 edmcaclose(dev, flag, fmt, p)
    590 	dev_t dev;
    591 	int flag, fmt;
    592 	struct proc *p;
    593 {
    594 	struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(dev));
    595 	int part = DISKPART(dev);
    596 	int error;
    597 
    598 	WDCDEBUG_PRINT(("edmcaclose\n"), DEBUG_FUNCS);
    599 	if ((error = ed_lock(wd)) != 0)
    600 		return error;
    601 
    602 	switch (fmt) {
    603 	case S_IFCHR:
    604 		wd->sc_dk.dk_copenmask &= ~(1 << part);
    605 		break;
    606 	case S_IFBLK:
    607 		wd->sc_dk.dk_bopenmask &= ~(1 << part);
    608 		break;
    609 	}
    610 	wd->sc_dk.dk_openmask =
    611 	    wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;
    612 
    613 	if (wd->sc_dk.dk_openmask == 0) {
    614 #if 0
    615 		wd_flushcache(wd, AT_WAIT);
    616 #endif
    617 		/* XXXX Must wait for I/O to complete! */
    618 
    619 		if (! (wd->sc_flags & WDF_KLABEL))
    620 			wd->sc_flags &= ~WDF_LOADED;
    621 	}
    622 
    623 	ed_unlock(wd);
    624 
    625 	return 0;
    626 }
    627 
    628 static void
    629 edgetdefaultlabel(wd, lp)
    630 	struct ed_softc *wd;
    631 	struct disklabel *lp;
    632 {
    633 	WDCDEBUG_PRINT(("edgetdefaultlabel\n"), DEBUG_FUNCS);
    634 	memset(lp, 0, sizeof(struct disklabel));
    635 
    636 	lp->d_secsize = DEV_BSIZE;
    637 	lp->d_ntracks = wd->heads;
    638 	lp->d_nsectors = wd->sectors;
    639 	lp->d_ncylinders = wd->cyl;
    640 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
    641 
    642 	lp->d_type = DTYPE_ESDI;
    643 
    644 	strncpy(lp->d_typename, "ESDI", 16);
    645 	strncpy(lp->d_packname, "fictitious", 16);
    646 	lp->d_secperunit = wd->sc_capacity;
    647 	lp->d_rpm = 3600;
    648 	lp->d_interleave = 1;
    649 	lp->d_flags = 0;
    650 
    651 	lp->d_partitions[RAW_PART].p_offset = 0;
    652 	lp->d_partitions[RAW_PART].p_size =
    653 	lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
    654 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
    655 	lp->d_npartitions = RAW_PART + 1;
    656 
    657 	lp->d_magic = DISKMAGIC;
    658 	lp->d_magic2 = DISKMAGIC;
    659 	lp->d_checksum = dkcksum(lp);
    660 }
    661 
    662 /*
    663  * Fabricate a default disk label, and try to read the correct one.
    664  */
    665 static void
    666 edgetdisklabel(wd)
    667 	struct ed_softc *wd;
    668 {
    669 	struct disklabel *lp = wd->sc_dk.dk_label;
    670 	char *errstring;
    671 
    672 	WDCDEBUG_PRINT(("edgetdisklabel\n"), DEBUG_FUNCS);
    673 
    674 	memset(wd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
    675 
    676 	edgetdefaultlabel(wd, lp);
    677 
    678 #if 0
    679 	wd->sc_badsect[0] = -1;
    680 
    681 	if (wd->drvp->state > RECAL)
    682 		wd->drvp->drive_flags |= DRIVE_RESET;
    683 #endif
    684 	errstring = readdisklabel(MAKEDISKDEV(0, wd->sc_dev.dv_unit, RAW_PART),
    685 	    edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
    686 	if (errstring) {
    687 		/*
    688 		 * This probably happened because the drive's default
    689 		 * geometry doesn't match the DOS geometry.  We
    690 		 * assume the DOS geometry is now in the label and try
    691 		 * again.  XXX This is a kluge.
    692 		 */
    693 #if 0
    694 		if (wd->drvp->state > RECAL)
    695 			wd->drvp->drive_flags |= DRIVE_RESET;
    696 #endif
    697 		errstring = readdisklabel(MAKEDISKDEV(0, wd->sc_dev.dv_unit,
    698 		    RAW_PART), edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
    699 	}
    700 	if (errstring) {
    701 		printf("%s: %s\n", wd->sc_dev.dv_xname, errstring);
    702 		return;
    703 	}
    704 
    705 #if 0
    706 	if (wd->drvp->state > RECAL)
    707 		wd->drvp->drive_flags |= DRIVE_RESET;
    708 #endif
    709 #ifdef HAS_BAD144_HANDLING
    710 	if ((lp->d_flags & D_BADSECT) != 0)
    711 		bad144intern(wd);
    712 #endif
    713 }
    714 
    715 int
    716 edmcaioctl(dev, xfer, addr, flag, p)
    717 	dev_t dev;
    718 	u_long xfer;
    719 	caddr_t addr;
    720 	int flag;
    721 	struct proc *p;
    722 {
    723 	struct ed_softc *wd = device_lookup(&ed_cd, DISKUNIT(dev));
    724 	int error;
    725 #ifdef __HAVE_OLD_DISKLABEL
    726 	struct disklabel newlabel;
    727 #endif
    728 
    729 	WDCDEBUG_PRINT(("edioctl\n"), DEBUG_FUNCS);
    730 
    731 	if ((wd->sc_flags & WDF_LOADED) == 0)
    732 		return EIO;
    733 
    734 	switch (xfer) {
    735 #ifdef HAS_BAD144_HANDLING
    736 	case DIOCSBAD:
    737 		if ((flag & FWRITE) == 0)
    738 			return EBADF;
    739 		wd->sc_dk.dk_cpulabel->bad = *(struct dkbad *)addr;
    740 		wd->sc_dk.dk_label->d_flags |= D_BADSECT;
    741 		bad144intern(wd);
    742 		return 0;
    743 #endif
    744 
    745 	case DIOCGDINFO:
    746 		*(struct disklabel *)addr = *(wd->sc_dk.dk_label);
    747 		return 0;
    748 #ifdef __HAVE_OLD_DISKLABEL
    749 	case ODIOCGDINFO:
    750 		newlabel = *(wd->sc_dk.dk_label);
    751 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
    752 			return ENOTTY;
    753 		memcpy(addr, &newlabel, sizeof (struct olddisklabel));
    754 		return 0;
    755 #endif
    756 
    757 	case DIOCGPART:
    758 		((struct partinfo *)addr)->disklab = wd->sc_dk.dk_label;
    759 		((struct partinfo *)addr)->part =
    760 		    &wd->sc_dk.dk_label->d_partitions[DISKPART(dev)];
    761 		return 0;
    762 
    763 	case DIOCWDINFO:
    764 	case DIOCSDINFO:
    765 #ifdef __HAVE_OLD_DISKLABEL
    766 	case ODIOCWDINFO:
    767 	case ODIOCSDINFO:
    768 #endif
    769 	{
    770 		struct disklabel *lp;
    771 
    772 #ifdef __HAVE_OLD_DISKLABEL
    773 		if (xfer == ODIOCSDINFO || xfer == ODIOCWDINFO) {
    774 			memset(&newlabel, 0, sizeof newlabel);
    775 			memcpy(&newlabel, addr, sizeof (struct olddisklabel));
    776 			lp = &newlabel;
    777 		} else
    778 #endif
    779 		lp = (struct disklabel *)addr;
    780 
    781 		if ((flag & FWRITE) == 0)
    782 			return EBADF;
    783 
    784 		if ((error = ed_lock(wd)) != 0)
    785 			return error;
    786 		wd->sc_flags |= WDF_LABELLING;
    787 
    788 		error = setdisklabel(wd->sc_dk.dk_label,
    789 		    lp, /*wd->sc_dk.dk_openmask : */0,
    790 		    wd->sc_dk.dk_cpulabel);
    791 		if (error == 0) {
    792 #if 0
    793 			if (wd->drvp->state > RECAL)
    794 				wd->drvp->drive_flags |= DRIVE_RESET;
    795 #endif
    796 			if (xfer == DIOCWDINFO
    797 #ifdef __HAVE_OLD_DISKLABEL
    798 			    || xfer == ODIOCWDINFO
    799 #endif
    800 			    )
    801 				error = writedisklabel(EDLABELDEV(dev),
    802 				    edmcastrategy, wd->sc_dk.dk_label,
    803 				    wd->sc_dk.dk_cpulabel);
    804 		}
    805 
    806 		wd->sc_flags &= ~WDF_LABELLING;
    807 		ed_unlock(wd);
    808 		return error;
    809 	}
    810 
    811 	case DIOCKLABEL:
    812 		if (*(int *)addr)
    813 			wd->sc_flags |= WDF_KLABEL;
    814 		else
    815 			wd->sc_flags &= ~WDF_KLABEL;
    816 		return 0;
    817 
    818 	case DIOCWLABEL:
    819 		if ((flag & FWRITE) == 0)
    820 			return EBADF;
    821 		if (*(int *)addr)
    822 			wd->sc_flags |= WDF_WLABEL;
    823 		else
    824 			wd->sc_flags &= ~WDF_WLABEL;
    825 		return 0;
    826 
    827 	case DIOCGDEFLABEL:
    828 		edgetdefaultlabel(wd, (struct disklabel *)addr);
    829 		return 0;
    830 #ifdef __HAVE_OLD_DISKLABEL
    831 	case ODIOCGDEFLABEL:
    832 		edgetdefaultlabel(wd, &newlabel);
    833 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
    834 			return ENOTTY;
    835 		memcpy(addr, &newlabel, sizeof (struct olddisklabel));
    836 		return 0;
    837 #endif
    838 
    839 #ifdef notyet
    840 	case DIOCWFORMAT:
    841 		if ((flag & FWRITE) == 0)
    842 			return EBADF;
    843 		{
    844 		register struct format_op *fop;
    845 		struct iovec aiov;
    846 		struct uio auio;
    847 
    848 		fop = (struct format_op *)addr;
    849 		aiov.iov_base = fop->df_buf;
    850 		aiov.iov_len = fop->df_count;
    851 		auio.uio_iov = &aiov;
    852 		auio.uio_iovcnt = 1;
    853 		auio.uio_resid = fop->df_count;
    854 		auio.uio_segflg = 0;
    855 		auio.uio_offset =
    856 			fop->df_startblk * wd->sc_dk.dk_label->d_secsize;
    857 		auio.uio_procp = p;
    858 		error = physio(wdformat, NULL, dev, B_WRITE, minphys,
    859 		    &auio);
    860 		fop->df_count -= auio.uio_resid;
    861 		fop->df_reg[0] = wdc->sc_status;
    862 		fop->df_reg[1] = wdc->sc_error;
    863 		return error;
    864 		}
    865 #endif
    866 
    867 	default:
    868 		return ENOTTY;
    869 	}
    870 
    871 #ifdef DIAGNOSTIC
    872 	panic("edioctl: impossible");
    873 #endif
    874 }
    875 
    876 #if 0
    877 #ifdef B_FORMAT
    878 int
    879 edmcaformat(struct buf *bp)
    880 {
    881 
    882 	bp->b_flags |= B_FORMAT;
    883 	return edmcastrategy(bp);
    884 }
    885 #endif
    886 #endif
    887 
    888 int
    889 edmcasize(dev)
    890 	dev_t dev;
    891 {
    892 	struct ed_softc *wd;
    893 	int part, omask;
    894 	int size;
    895 
    896 	WDCDEBUG_PRINT(("edsize\n"), DEBUG_FUNCS);
    897 
    898 	wd = device_lookup(&ed_cd, DISKUNIT(dev));
    899 	if (wd == NULL)
    900 		return (-1);
    901 
    902 	part = DISKPART(dev);
    903 	omask = wd->sc_dk.dk_openmask & (1 << part);
    904 
    905 	if (omask == 0 && edmcaopen(dev, 0, S_IFBLK, NULL) != 0)
    906 		return (-1);
    907 	if (wd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
    908 		size = -1;
    909 	else
    910 		size = wd->sc_dk.dk_label->d_partitions[part].p_size *
    911 		    (wd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
    912 	if (omask == 0 && edmcaclose(dev, 0, S_IFBLK, NULL) != 0)
    913 		return (-1);
    914 	return (size);
    915 }
    916 
    917 /* #define WD_DUMP_NOT_TRUSTED if you just want to watch */
    918 static int eddoingadump = 0;
    919 static int eddumprecalibrated = 0;
    920 static int eddumpmulti = 1;
    921 
    922 /*
    923  * Dump core after a system crash.
    924  */
    925 int
    926 edmcadump(dev, blkno, va, size)
    927 	dev_t dev;
    928 	daddr_t blkno;
    929 	caddr_t va;
    930 	size_t size;
    931 {
    932 	struct ed_softc *ed;	/* disk unit to do the I/O */
    933 	struct disklabel *lp;   /* disk's disklabel */
    934 	int part;
    935 	int nblks;	/* total number of sectors left to write */
    936 
    937 	/* Check if recursive dump; if so, punt. */
    938 	if (eddoingadump)
    939 		return EFAULT;
    940 	eddoingadump = 1;
    941 
    942 	ed = device_lookup(&ed_cd, DISKUNIT(dev));
    943 	if (ed == NULL)
    944 		return (ENXIO);
    945 
    946 	part = DISKPART(dev);
    947 
    948 	/* Make sure it was initialized. */
    949 	if ((ed->sc_flags & EDF_INIT) == 0)
    950 		return ENXIO;
    951 
    952 	/* Convert to disk sectors.  Request must be a multiple of size. */
    953 	lp = ed->sc_dk.dk_label;
    954 	if ((size % lp->d_secsize) != 0)
    955 		return EFAULT;
    956 	nblks = size / lp->d_secsize;
    957 	blkno = blkno / (lp->d_secsize / DEV_BSIZE);
    958 
    959 	/* Check transfer bounds against partition size. */
    960 	if ((blkno < 0) || ((blkno + nblks) > lp->d_partitions[part].p_size))
    961 		return EINVAL;
    962 
    963 	/* Offset block number to start of partition. */
    964 	blkno += lp->d_partitions[part].p_offset;
    965 
    966 	/* Recalibrate, if first dump transfer. */
    967 	if (eddumprecalibrated == 0) {
    968 		eddumprecalibrated = 1;
    969 		eddumpmulti = 8;
    970 #if 0
    971 		wd->drvp->state = RESET;
    972 #endif
    973 	}
    974 
    975 	while (nblks > 0) {
    976 		ed->sc_data = va;
    977 		ed->sc_rawblkno = blkno;
    978 		ed->sc_bcount = min(nblks, eddumpmulti) * lp->d_secsize;
    979 		ed->sc_read = 0;
    980 
    981 		ed_bio(ed, 0, 1);
    982 		if (ed->sc_error)
    983 			return (ed->sc_error);
    984 
    985 		ed_bio_done(ed);
    986 
    987 		/* update block count */
    988 		nblks -= min(nblks, eddumpmulti);
    989 		blkno += min(nblks, eddumpmulti);
    990 		va += min(nblks, eddumpmulti) * lp->d_secsize;
    991 	}
    992 
    993 	eddoingadump = 0;
    994 	return (0);
    995 }
    996 
    997 #ifdef HAS_BAD144_HANDLING
    998 /*
    999  * Internalize the bad sector table.
   1000  */
   1001 static void
   1002 bad144intern(wd)
   1003 	struct ed_softc *wd;
   1004 {
   1005 	struct dkbad *bt = &wd->sc_dk.dk_cpulabel->bad;
   1006 	struct disklabel *lp = wd->sc_dk.dk_label;
   1007 	int i = 0;
   1008 
   1009 	WDCDEBUG_PRINT(("bad144intern\n"), DEBUG_XFERS);
   1010 
   1011 	for (; i < NBT_BAD; i++) {
   1012 		if (bt->bt_bad[i].bt_cyl == 0xffff)
   1013 			break;
   1014 		wd->sc_badsect[i] =
   1015 		    bt->bt_bad[i].bt_cyl * lp->d_secpercyl +
   1016 		    (bt->bt_bad[i].bt_trksec >> 8) * lp->d_nsectors +
   1017 		    (bt->bt_bad[i].bt_trksec & 0xff);
   1018 	}
   1019 	for (; i < NBT_BAD+1; i++)
   1020 		wd->sc_badsect[i] = -1;
   1021 }
   1022 #endif
   1023 
   1024 static int
   1025 ed_get_params(ed)
   1026 	struct ed_softc *ed;
   1027 {
   1028 	u_int16_t cmd_args[2];
   1029 
   1030 	/*
   1031 	 * Get Device Configuration (09).
   1032 	 */
   1033 	cmd_args[0] = 14;	/* Options: 00s110, s: 0=Physical 1=Pseudo */
   1034 	cmd_args[1] = 0;
   1035 	if (edc_run_cmd(ed->edc_softc, CMD_GET_DEV_CONF, ed->sc_devno,
   1036 	    cmd_args, 2, 0, 1))
   1037 		return (1);
   1038 
   1039 	ed->spares = ed->sc_status_block[1] >> 8;
   1040 	ed->drv_flags = ed->sc_status_block[1] & 0x1f;
   1041 	ed->rba = ed->sc_status_block[2] |
   1042 		(ed->sc_status_block[3] << 16);
   1043 	/* Instead of using:
   1044 		ed->cyl = ed->sc_status_block[4];
   1045 		ed->heads = ed->sc_status_block[5] & 0xff;
   1046 		ed->sectors = ed->sc_status_block[5] >> 8;
   1047 	 * we fabricate the numbers from RBA count, so that
   1048 	 * number of sectors is 32 and heads 64. This seems
   1049 	 * to be necessary for integrated ESDI controller.
   1050 	 */
   1051 	ed->sectors = 32;
   1052 	ed->heads = 64;
   1053 	ed->cyl = ed->rba / (ed->heads * ed->sectors);
   1054 	ed->sc_capacity = ed->rba;
   1055 
   1056 	return (0);
   1057 }
   1058 
   1059 /*
   1060  * Our shutdown hook. We attempt to park disk's head only.
   1061  */
   1062 void
   1063 ed_shutdown(arg)
   1064 	void *arg;
   1065 {
   1066 #if 0
   1067 	struct ed_softc *ed = arg;
   1068 	u_int16_t cmd_args[2];
   1069 
   1070 	/* Issue Park Head command */
   1071 	cmd_args[0] = 6;	/* Options: 000110 */
   1072 	cmd_args[1] = 0;
   1073 	(void) edc_run_cmd(ed->edc_softc, CMD_PARK_HEAD, ed->sc_devno,
   1074 			cmd_args, 2, 0);
   1075 #endif
   1076 }
   1077 
   1078 /*
   1079  * Main worker thread function.
   1080  */
   1081 void
   1082 edworker(arg)
   1083 	void *arg;
   1084 {
   1085 	struct ed_softc *ed = (struct ed_softc *) arg;
   1086 	struct buf *bp;
   1087 	int s;
   1088 
   1089 	config_pending_decr();
   1090 
   1091 	for(;;) {
   1092 		/* Wait until awakened */
   1093 		(void) tsleep(&ed->sc_q, PRIBIO, "edidle", 0);
   1094 
   1095 		if ((ed->sc_flags & EDF_PROCESS_QUEUE) == 0)
   1096 			panic("edworker: expecting process queue");
   1097 		ed->sc_flags &= ~EDF_PROCESS_QUEUE;
   1098 
   1099 		for(;;) {
   1100 			/* Is there a buf for us ? */
   1101 			simple_lock(&ed->sc_q_lock);
   1102 			if ((bp = BUFQ_FIRST(&ed->sc_q)) == NULL) {
   1103 				simple_unlock(&ed->sc_q_lock);
   1104 				break;
   1105 			}
   1106 			BUFQ_REMOVE(&ed->sc_q, bp);
   1107 			simple_unlock(&ed->sc_q_lock);
   1108 
   1109 			/* Schedule i/o operation */
   1110 			ed->sc_error = 0;
   1111 			s = splbio();
   1112 			__edstart(ed, bp);
   1113 
   1114 			/*
   1115 			 * Wait until the command executes; edc_intr() wakes
   1116 			 * us up.
   1117 			 */
   1118 			if (ed->sc_error == 0)
   1119 				(void)tsleep(&ed->edc_softc, PRIBIO, "edwrk",0);
   1120 
   1121 			/* Handle i/o results */
   1122 			edmcadone(ed, bp);
   1123 			splx(s);
   1124 		}
   1125 	}
   1126 }
   1127