Home | History | Annotate | Line # | Download | only in mca
ed_mca.c revision 1.8.2.1
      1 /*	$NetBSD: ed_mca.c,v 1.8.2.1 2001/09/07 04:45:27 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Jaromir Dolecek.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *        This product includes software developed by the NetBSD
     20  *        Foundation, Inc. and its contributors.
     21  * 4. The name of the author may not be used to endorse or promote products
     22  *    derived from this software without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     27  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     29  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     31  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     32  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     33  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     34  */
     35 
     36 /*
     37  * Disk goo for MCA ESDI controller driver.
     38  */
     39 
     40 #include "rnd.h"
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/kernel.h>
     45 #include <sys/conf.h>
     46 #include <sys/file.h>
     47 #include <sys/stat.h>
     48 #include <sys/ioctl.h>
     49 #include <sys/buf.h>
     50 #include <sys/uio.h>
     51 #include <sys/malloc.h>
     52 #include <sys/device.h>
     53 #include <sys/disklabel.h>
     54 #include <sys/disk.h>
     55 #include <sys/syslog.h>
     56 #include <sys/proc.h>
     57 #include <sys/vnode.h>
     58 #include <sys/kthread.h>
     59 #if NRND > 0
     60 #include <sys/rnd.h>
     61 #endif
     62 
     63 #include <miscfs/specfs/specdev.h>
     64 
     65 #include <machine/intr.h>
     66 #include <machine/bus.h>
     67 
     68 #include <dev/mca/mcavar.h>
     69 
     70 #include <dev/mca/edcreg.h>
     71 #include <dev/mca/edvar.h>
     72 #include <dev/mca/edcvar.h>
     73 
     74 /* #define WDCDEBUG */
     75 
     76 #ifdef WDCDEBUG
     77 #define WDCDEBUG_PRINT(args, level)  printf args
     78 #else
     79 #define WDCDEBUG_PRINT(args, level)
     80 #endif
     81 
     82 #define	EDLABELDEV(dev) (MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART))
     83 
     84 /* XXX: these should go elsewhere */
     85 cdev_decl(edmca);
     86 bdev_decl(edmca);
     87 
     88 static int     ed_mca_probe   __P((struct device *, struct cfdata *, void *));
     89 static void    ed_mca_attach  __P((struct device *, struct device *, void *));
     90 
     91 struct cfattach ed_mca_ca = {
     92 	sizeof(struct ed_softc), ed_mca_probe, ed_mca_attach
     93 };
     94 
     95 extern struct cfdriver ed_cd;
     96 
     97 static int	ed_get_params __P((struct ed_softc *));
     98 static int	ed_lock	__P((struct ed_softc *));
     99 static void	ed_unlock	__P((struct ed_softc *));
    100 static void	edgetdisklabel	__P((struct vnode *));
    101 static void	edgetdefaultlabel __P((struct ed_softc *, struct disklabel *));
    102 static void	ed_shutdown __P((void*));
    103 static void	__edstart __P((struct ed_softc*, struct buf *));
    104 static void	bad144intern __P((struct ed_softc *));
    105 static void	edworker __P((void *));
    106 static void	ed_spawn_worker __P((void *));
    107 static void	edmcadone __P((struct ed_softc *, struct buf *));
    108 static void	ed_bio __P((struct ed_softc *, int, int));
    109 static void	ed_bio_done __P((struct ed_softc *));
    110 
    111 static struct dkdriver eddkdriver = { edmcastrategy };
    112 
    113 /*
    114  * Just check if it's possible to identify the disk.
    115  */
    116 static int
    117 ed_mca_probe(parent, match, aux)
    118 	struct device *parent;
    119 	struct cfdata *match;
    120 	void *aux;
    121 {
    122 	u_int16_t cmd_args[2];
    123 	struct edc_mca_softc *sc = (void *) parent;
    124 	struct ed_attach_args *eda = (void *) aux;
    125 	int found = 1;
    126 
    127 	/*
    128 	 * Get Device Configuration (09).
    129 	 */
    130 	cmd_args[0] = 14;	/* Options: 00s110, s: 0=Physical 1=Pseudo */
    131 	cmd_args[1] = 0;
    132 	if (edc_run_cmd(sc, CMD_GET_DEV_CONF, eda->sc_devno, cmd_args, 2, 0, 1))
    133 		found = 0;
    134 
    135 	return (found);
    136 }
    137 
    138 static void
    139 ed_mca_attach(parent, self, aux)
    140 	struct device *parent, *self;
    141 	void *aux;
    142 {
    143 	struct ed_softc *ed = (void *) self;
    144 	struct edc_mca_softc *sc = (void *) parent;
    145 	struct ed_attach_args *eda = (void *) aux;
    146 	char pbuf[8];
    147 	int error, nsegs;
    148 
    149 	ed->edc_softc = sc;
    150 	ed->sc_dmat = eda->sc_dmat;
    151 	ed->sc_devno = eda->sc_devno;
    152 	edc_add_disk(sc, ed, eda->sc_devno);
    153 
    154 	BUFQ_INIT(&ed->sc_q);
    155 	simple_lock_init(&ed->sc_q_lock);
    156 	lockinit(&ed->sc_lock, PRIBIO | PCATCH, "edlck", 0, 0);
    157 
    158 	if (ed_get_params(ed)) {
    159 		printf(": IDENTIFY failed, no disk found\n");
    160 		return;
    161 	}
    162 
    163 	format_bytes(pbuf, sizeof(pbuf),
    164 		(u_int64_t) ed->sc_capacity * DEV_BSIZE);
    165 	printf(": %s, %u cyl, %u head, %u sec, 512 bytes/sect x %u sectors\n",
    166 		pbuf,
    167 		ed->cyl, ed->heads, ed->sectors,
    168 		ed->sc_capacity);
    169 
    170 	printf("%s: %u spares/cyl, %s, %s, %s, %s, %s\n",
    171 		ed->sc_dev.dv_xname, ed->spares,
    172 		(ed->drv_flags & (1 << 0)) ? "NoRetries" : "Retries",
    173 		(ed->drv_flags & (1 << 1)) ? "Removable" : "Fixed",
    174 		(ed->drv_flags & (1 << 2)) ? "SkewedFormat" : "NoSkew",
    175 		(ed->drv_flags & (1 << 3)) ? "ZeroDefect" : "Defects",
    176 		(ed->drv_flags & (1 << 4)) ? "InvalidSecondary" : "SecondaryOK"
    177 		);
    178 
    179 	/* Create a DMA map for mapping individual transfer bufs */
    180 	if ((error = bus_dmamap_create(ed->sc_dmat, 65536, 1,
    181 		65536, 65536, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW,
    182 		&ed->dmamap_xfer)) != 0) {
    183 		printf("%s: unable to create xfer DMA map, error=%d\n",
    184 			ed->sc_dev.dv_xname, error);
    185 		return;
    186 	}
    187 
    188 	/*
    189 	 * Allocate DMA memory used in case where passed buf isn't
    190 	 * physically contiguous.
    191 	 */
    192 	ed->sc_dmam_sz = MAXPHYS;
    193 	if ((error = bus_dmamem_alloc(ed->sc_dmat, ed->sc_dmam_sz,
    194 		ed->sc_dmam_sz, 65536, ed->sc_dmam, 1, &nsegs,
    195 		BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
    196 		printf("%s: unable to allocate DMA memory for xfer, errno=%d\n",
    197 				ed->sc_dev.dv_xname, error);
    198 		bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
    199 		return;
    200 	}
    201 	/*
    202 	 * Map the memory.
    203 	 */
    204 	if ((error = bus_dmamem_map(ed->sc_dmat, ed->sc_dmam, 1,
    205 		ed->sc_dmam_sz, &ed->sc_dmamkva, BUS_DMA_WAITOK)) != 0) {
    206 		printf("%s: unable to map DMA memory, error=%d\n",
    207 			ed->sc_dev.dv_xname, error);
    208 		bus_dmamem_free(ed->sc_dmat, ed->sc_dmam, 1);
    209 		bus_dmamap_destroy(ed->sc_dmat, ed->dmamap_xfer);
    210 		return;
    211 	}
    212 
    213 
    214 	/*
    215 	 * Initialize and attach the disk structure.
    216 	 */
    217 	ed->sc_dk.dk_driver = &eddkdriver;
    218 	ed->sc_dk.dk_name = ed->sc_dev.dv_xname;
    219 	disk_attach(&ed->sc_dk);
    220 #if 0
    221 	wd->sc_wdc_bio.lp = wd->sc_dk.dk_label;
    222 #endif
    223 	ed->sc_sdhook = shutdownhook_establish(ed_shutdown, ed);
    224 	if (ed->sc_sdhook == NULL)
    225 		printf("%s: WARNING: unable to establish shutdown hook\n",
    226 		    ed->sc_dev.dv_xname);
    227 #if NRND > 0
    228 	rnd_attach_source(&ed->rnd_source, ed->sc_dev.dv_xname,
    229 			  RND_TYPE_DISK, 0);
    230 #endif
    231 
    232 	config_pending_incr();
    233 	kthread_create(ed_spawn_worker, (void *) ed);
    234 
    235 	ed->sc_flags |= EDF_INIT;
    236 }
    237 
    238 void
    239 ed_spawn_worker(arg)
    240 	void *arg;
    241 {
    242 	struct ed_softc *ed = (struct ed_softc *) arg;
    243 	int error;
    244 
    245 	/* Now, everything is ready, start a kthread */
    246 	if ((error = kthread_create1(edworker, ed, &ed->sc_worker,
    247 			"%s", ed->sc_dev.dv_xname))) {
    248 		printf("%s: cannot spawn worker thread: errno=%d\n",
    249 			ed->sc_dev.dv_xname, error);
    250 		panic("ed_spawn_worker");
    251 	}
    252 }
    253 
    254 /*
    255  * Read/write routine for a buffer.  Validates the arguments and schedules the
    256  * transfer.  Does not wait for the transfer to complete.
    257  */
    258 void
    259 edmcastrategy(bp)
    260 	struct buf *bp;
    261 {
    262 	struct ed_softc *wd = bp->b_devvp->v_devcookie;
    263 	struct disklabel *lp = wd->sc_dk.dk_label;
    264 	daddr_t blkno;
    265 	int s;
    266 
    267 	WDCDEBUG_PRINT(("edmcastrategy (%s)\n", wd->sc_dev.dv_xname),
    268 	    DEBUG_XFERS);
    269 
    270 	/* Valid request?  */
    271 	if (bp->b_blkno < 0 ||
    272 	    (bp->b_bcount % lp->d_secsize) != 0 ||
    273 	    (bp->b_bcount / lp->d_secsize) >= (1 << NBBY)) {
    274 		bp->b_error = EINVAL;
    275 		goto bad;
    276 	}
    277 
    278 	/* If device invalidated (e.g. media change, door open), error. */
    279 	if ((wd->sc_flags & WDF_LOADED) == 0) {
    280 		bp->b_error = EIO;
    281 		goto bad;
    282 	}
    283 
    284 	/* If it's a null transfer, return immediately. */
    285 	if (bp->b_bcount == 0)
    286 		goto done;
    287 
    288 	/*
    289 	 * Do bounds checking, adjust transfer. if error, process.
    290 	 * If end of partition, just return.
    291 	 */
    292 	if (DISKPART(bp->b_devvp->v_rdev) != RAW_PART &&
    293 	    (bp->b_flags & B_DKLABEL) == 0 &&
    294 	    bounds_check_with_label(bp, wd->sc_dk.dk_label,
    295 	    (wd->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0)
    296 		goto done;
    297 
    298 	/*
    299 	 * Now convert the block number to absolute and put it in
    300 	 * terms of the device's logical block size.
    301 	 */
    302 	if (lp->d_secsize >= DEV_BSIZE)
    303 		blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
    304 	else
    305 		blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);
    306 
    307 	if (DISKPART(bp->b_devvp->v_rdev) != RAW_PART &&
    308 	    (bp->b_flags & B_DKLABEL) == 0)
    309 		blkno +=
    310 		    lp->d_partitions[DISKPART(bp->b_devvp->v_rdev)].p_offset;
    311 
    312 	bp->b_rawblkno = blkno;
    313 
    314 	/* Queue transfer on drive, activate drive and controller if idle. */
    315 	s = splbio();
    316 	simple_lock(&wd->sc_q_lock);
    317 	disksort_blkno(&wd->sc_q, bp);
    318 	simple_unlock(&wd->sc_q_lock);
    319 
    320 	/* Ring the worker thread */
    321 	wd->sc_flags |= EDF_PROCESS_QUEUE;
    322 	wakeup_one(&wd->sc_q);
    323 
    324 	splx(s);
    325 	return;
    326 bad:
    327 	bp->b_flags |= B_ERROR;
    328 done:
    329 	/* Toss transfer; we're done early. */
    330 	bp->b_resid = bp->b_bcount;
    331 	biodone(bp);
    332 }
    333 
    334 static void
    335 ed_bio(struct ed_softc *ed, int async, int poll)
    336 {
    337 	u_int16_t cmd_args[4];
    338 	int error=0;
    339 	u_int16_t track;
    340 	u_int16_t cyl;
    341 	u_int8_t head;
    342 	u_int8_t sector;
    343 
    344 	/* Get physical bus mapping for buf. */
    345 	if (bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
    346 			ed->sc_data, ed->sc_bcount, NULL,
    347 			BUS_DMA_WAITOK|BUS_DMA_STREAMING) != 0) {
    348 
    349 		/*
    350 		 * Use our DMA safe memory to get data to/from device.
    351 		 */
    352 		if ((error = bus_dmamap_load(ed->sc_dmat, ed->dmamap_xfer,
    353 			ed->sc_dmamkva, ed->sc_bcount, NULL,
    354 			BUS_DMA_WAITOK|BUS_DMA_STREAMING)) != 0) {
    355 			printf("%s: unable to load raw data for xfer, errno=%d\n",
    356 				ed->sc_dev.dv_xname, error);
    357 			goto out;
    358 		}
    359 		ed->sc_flags |= EDF_BOUNCEBUF;
    360 
    361 		/* If data write, copy the data to our bounce buffer. */
    362 		if (!ed->sc_read)
    363 			memcpy(ed->sc_dmamkva, ed->sc_data, ed->sc_bcount);
    364 	}
    365 
    366 	ed->sc_flags |= EDF_DMAMAP_LOADED;
    367 
    368 	track = ed->sc_rawblkno / ed->sectors;
    369 	head = track % ed->heads;
    370 	cyl = track / ed->heads;
    371 	sector = ed->sc_rawblkno % ed->sectors;
    372 
    373 	WDCDEBUG_PRINT(("__edstart %s: map: %u %u %u\n", ed->sc_dev.dv_xname,
    374 		cyl, sector, head),
    375 	    DEBUG_XFERS);
    376 
    377 	mca_disk_busy();
    378 
    379 	/* Read or Write Data command */
    380 	cmd_args[0] = 2;	/* Options 0000010 */
    381 	cmd_args[1] = ed->sc_bcount / DEV_BSIZE;
    382 	cmd_args[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
    383 	cmd_args[3] = ((cyl & 0x3E0) >> 5);
    384 	if (edc_run_cmd(ed->edc_softc,
    385 			(ed->sc_read) ? CMD_READ_DATA : CMD_WRITE_DATA,
    386 			ed->sc_devno, cmd_args, 4, async, poll)) {
    387 		printf("%s: data i/o command failed\n", ed->sc_dev.dv_xname);
    388 		mca_disk_unbusy();
    389 		error = EIO;
    390 	}
    391 
    392     out:
    393 	if (error)
    394 		ed->sc_error = error;
    395 }
    396 
    397 static void
    398 __edstart(ed, bp)
    399 	struct ed_softc *ed;
    400 	struct buf *bp;
    401 {
    402 	WDCDEBUG_PRINT(("__edstart %s (%s): %lu %lu %u\n", ed->sc_dev.dv_xname,
    403 		(bp->b_flags & B_READ) ? "read" : "write",
    404 		bp->b_bcount, bp->b_resid, bp->b_rawblkno),
    405 	    DEBUG_XFERS);
    406 
    407 	/* Instrumentation. */
    408 	disk_busy(&ed->sc_dk);
    409 	ed->sc_flags |= EDF_DK_BUSY;
    410 
    411 	ed->sc_data = bp->b_data;
    412 	ed->sc_rawblkno = bp->b_rawblkno;
    413 	ed->sc_bcount = bp->b_bcount;
    414 	ed->sc_read = bp->b_flags & B_READ;
    415 	ed_bio(ed, 1, 0);
    416 }
    417 
    418 static void
    419 ed_bio_done(ed)
    420 	struct ed_softc *ed;
    421 {
    422 	/*
    423 	 * If read transfer finished without error and using a bounce
    424 	 * buffer, copy the data to buf.
    425 	 */
    426 	if (ed->sc_error == 0 && (ed->sc_flags & EDF_BOUNCEBUF) && ed->sc_read)
    427 		memcpy(ed->sc_data, ed->sc_dmamkva, ed->sc_bcount);
    428 	ed->sc_flags &= ~EDF_BOUNCEBUF;
    429 
    430 	/* Unload buf from DMA map */
    431 	if (ed->sc_flags & EDF_DMAMAP_LOADED) {
    432 		bus_dmamap_unload(ed->sc_dmat, ed->dmamap_xfer);
    433 		ed->sc_flags &= ~EDF_DMAMAP_LOADED;
    434 	}
    435 
    436 	mca_disk_unbusy();
    437 }
    438 
    439 static void
    440 edmcadone(ed, bp)
    441 	struct ed_softc *ed;
    442 	struct buf *bp;
    443 {
    444 	WDCDEBUG_PRINT(("eddone %s\n", ed->sc_dev.dv_xname),
    445 	    DEBUG_XFERS);
    446 
    447 	if (ed->sc_error) {
    448 		bp->b_error = ed->sc_error;
    449 		bp->b_flags |= B_ERROR;
    450 	} else {
    451 		/* Set resid, most commonly to zero. */
    452 		bp->b_resid = ed->sc_status_block[SB_RESBLKCNT_IDX] * DEV_BSIZE;
    453 	}
    454 
    455 	ed_bio_done(ed);
    456 
    457 	/* If disk was busied, unbusy it now */
    458 	if (ed->sc_flags & EDF_DK_BUSY) {
    459 		disk_unbusy(&ed->sc_dk, (bp->b_bcount - bp->b_resid));
    460 		ed->sc_flags &= ~EDF_DK_BUSY;
    461 	}
    462 
    463 #if NRND > 0
    464 	rnd_add_uint32(&ed->rnd_source, bp->b_blkno);
    465 #endif
    466 	biodone(bp);
    467 }
    468 
    469 int
    470 edmcaread(devvp, uio, flags)
    471 	struct vnode *devvp;
    472 	struct uio *uio;
    473 	int flags;
    474 {
    475 	WDCDEBUG_PRINT(("edread\n"), DEBUG_XFERS);
    476 	return (physio(edmcastrategy, NULL, devvp, B_READ, minphys, uio));
    477 }
    478 
    479 int
    480 edmcawrite(devvp, uio, flags)
    481 	struct vnode *devvp;
    482 	struct uio *uio;
    483 	int flags;
    484 {
    485 	WDCDEBUG_PRINT(("edwrite\n"), DEBUG_XFERS);
    486 	return (physio(edmcastrategy, NULL, devvp, B_WRITE, minphys, uio));
    487 }
    488 
    489 /*
    490  * Wait interruptibly for an exclusive lock.
    491  */
    492 static int
    493 ed_lock(ed)
    494 	struct ed_softc *ed;
    495 {
    496 	int error;
    497 	int s;
    498 
    499 	WDCDEBUG_PRINT(("ed_lock\n"), DEBUG_FUNCS);
    500 
    501 	s = splbio();
    502 	error = lockmgr(&ed->sc_lock, LK_EXCLUSIVE, NULL);
    503 	splx(s);
    504 
    505 	return (error);
    506 }
    507 
    508 /*
    509  * Unlock and wake up any waiters.
    510  */
    511 static void
    512 ed_unlock(ed)
    513 	struct ed_softc *ed;
    514 {
    515 	WDCDEBUG_PRINT(("ed_unlock\n"), DEBUG_FUNCS);
    516 
    517 	(void) lockmgr(&ed->sc_lock, LK_RELEASE, NULL);
    518 }
    519 
    520 int
    521 edmcaopen(devvp, flag, fmt, p)
    522 	struct vnode *devvp;
    523 	int flag, fmt;
    524 	struct proc *p;
    525 {
    526 	struct ed_softc *wd;
    527 	int part, error;
    528 
    529 	WDCDEBUG_PRINT(("edopen\n"), DEBUG_FUNCS);
    530 	wd = device_lookup(&ed_cd, DISKUNIT(devvp->v_rdev));
    531 	if (wd == NULL || (wd->sc_flags & EDF_INIT) == 0)
    532 		return (ENXIO);
    533 
    534 	devvp->v_devcookie = wd;
    535 
    536 	if ((error = ed_lock(wd)) != 0)
    537 		goto bad4;
    538 
    539 	if (wd->sc_dk.dk_openmask != 0) {
    540 		/*
    541 		 * If any partition is open, but the disk has been invalidated,
    542 		 * disallow further opens.
    543 		 */
    544 		if ((wd->sc_flags & WDF_LOADED) == 0) {
    545 			error = EIO;
    546 			goto bad3;
    547 		}
    548 	} else {
    549 		if ((wd->sc_flags & WDF_LOADED) == 0) {
    550 			wd->sc_flags |= WDF_LOADED;
    551 
    552 			/* Load the physical device parameters. */
    553 			ed_get_params(wd);
    554 
    555 			/* Load the partition info if not already loaded. */
    556 			edgetdisklabel(devvp);
    557 		}
    558 	}
    559 
    560 	part = DISKPART(devvp->v_rdev);
    561 
    562 	/* Check that the partition exists. */
    563 	if (part != RAW_PART &&
    564 	    (part >= wd->sc_dk.dk_label->d_npartitions ||
    565 	     wd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
    566 		error = ENXIO;
    567 		goto bad;
    568 	}
    569 
    570 	/* Insure only one open at a time. */
    571 	switch (fmt) {
    572 	case S_IFCHR:
    573 		wd->sc_dk.dk_copenmask |= (1 << part);
    574 		break;
    575 	case S_IFBLK:
    576 		wd->sc_dk.dk_bopenmask |= (1 << part);
    577 		break;
    578 	}
    579 	wd->sc_dk.dk_openmask =
    580 	    wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;
    581 
    582 	ed_unlock(wd);
    583 	return 0;
    584 
    585 bad:
    586 	if (wd->sc_dk.dk_openmask == 0) {
    587 	}
    588 
    589 bad3:
    590 	ed_unlock(wd);
    591 bad4:
    592 	return (error);
    593 }
    594 
    595 int
    596 edmcaclose(devvp, flag, fmt, p)
    597 	struct vnode *devvp;
    598 	int flag, fmt;
    599 	struct proc *p;
    600 {
    601 	struct ed_softc *wd = devvp->v_devcookie;
    602 	int part = DISKPART(devvp->v_rdev);
    603 	int error;
    604 
    605 	WDCDEBUG_PRINT(("edmcaclose\n"), DEBUG_FUNCS);
    606 	if ((error = ed_lock(wd)) != 0)
    607 		return error;
    608 
    609 	switch (fmt) {
    610 	case S_IFCHR:
    611 		wd->sc_dk.dk_copenmask &= ~(1 << part);
    612 		break;
    613 	case S_IFBLK:
    614 		wd->sc_dk.dk_bopenmask &= ~(1 << part);
    615 		break;
    616 	}
    617 	wd->sc_dk.dk_openmask =
    618 	    wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;
    619 
    620 	if (wd->sc_dk.dk_openmask == 0) {
    621 #if 0
    622 		wd_flushcache(wd, AT_WAIT);
    623 #endif
    624 		/* XXXX Must wait for I/O to complete! */
    625 
    626 		if (! (wd->sc_flags & WDF_KLABEL))
    627 			wd->sc_flags &= ~WDF_LOADED;
    628 	}
    629 
    630 	ed_unlock(wd);
    631 
    632 	return 0;
    633 }
    634 
    635 static void
    636 edgetdefaultlabel(wd, lp)
    637 	struct ed_softc *wd;
    638 	struct disklabel *lp;
    639 {
    640 	WDCDEBUG_PRINT(("edgetdefaultlabel\n"), DEBUG_FUNCS);
    641 	memset(lp, 0, sizeof(struct disklabel));
    642 
    643 	lp->d_secsize = DEV_BSIZE;
    644 	lp->d_ntracks = wd->heads;
    645 	lp->d_nsectors = wd->sectors;
    646 	lp->d_ncylinders = wd->cyl;
    647 	lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;
    648 
    649 	lp->d_type = DTYPE_ESDI;
    650 
    651 	strncpy(lp->d_typename, "ESDI", 16);
    652 	strncpy(lp->d_packname, "fictitious", 16);
    653 	lp->d_secperunit = wd->sc_capacity;
    654 	lp->d_rpm = 3600;
    655 	lp->d_interleave = 1;
    656 	lp->d_flags = 0;
    657 
    658 	lp->d_partitions[RAW_PART].p_offset = 0;
    659 	lp->d_partitions[RAW_PART].p_size =
    660 	lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
    661 	lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
    662 	lp->d_npartitions = RAW_PART + 1;
    663 
    664 	lp->d_magic = DISKMAGIC;
    665 	lp->d_magic2 = DISKMAGIC;
    666 	lp->d_checksum = dkcksum(lp);
    667 }
    668 
    669 /*
    670  * Fabricate a default disk label, and try to read the correct one.
    671  */
    672 static void
    673 edgetdisklabel(devvp)
    674 	struct vnode *devvp;
    675 {
    676 	struct ed_softc *wd = devvp->v_devcookie;
    677 	struct disklabel *lp = wd->sc_dk.dk_label;
    678 	char *errstring;
    679 
    680 	WDCDEBUG_PRINT(("edgetdisklabel\n"), DEBUG_FUNCS);
    681 
    682 	memset(wd->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));
    683 
    684 	edgetdefaultlabel(wd, lp);
    685 
    686 #if 0
    687 	wd->sc_badsect[0] = -1;
    688 
    689 	if (wd->drvp->state > RECAL)
    690 		wd->drvp->drive_flags |= DRIVE_RESET;
    691 #endif
    692 	errstring = readdisklabel(devvp,
    693 	    edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
    694 	if (errstring) {
    695 		/*
    696 		 * This probably happened because the drive's default
    697 		 * geometry doesn't match the DOS geometry.  We
    698 		 * assume the DOS geometry is now in the label and try
    699 		 * again.  XXX This is a kluge.
    700 		 */
    701 #if 0
    702 		if (wd->drvp->state > RECAL)
    703 			wd->drvp->drive_flags |= DRIVE_RESET;
    704 #endif
    705 		errstring = readdisklabel(devvp,
    706 		    edmcastrategy, lp, wd->sc_dk.dk_cpulabel);
    707 	}
    708 	if (errstring) {
    709 		printf("%s: %s\n", wd->sc_dev.dv_xname, errstring);
    710 		return;
    711 	}
    712 
    713 #if 0
    714 	if (wd->drvp->state > RECAL)
    715 		wd->drvp->drive_flags |= DRIVE_RESET;
    716 #endif
    717 #ifdef HAS_BAD144_HANDLING
    718 	if ((lp->d_flags & D_BADSECT) != 0)
    719 		bad144intern(wd);
    720 #endif
    721 }
    722 
    723 int
    724 edmcaioctl(devvp, xfer, addr, flag, p)
    725 	struct vnode *devvp;
    726 	u_long xfer;
    727 	caddr_t addr;
    728 	int flag;
    729 	struct proc *p;
    730 {
    731 	struct ed_softc *wd = devvp->v_devcookie;
    732 	int error;
    733 #ifdef __HAVE_OLD_DISKLABEL
    734 	struct disklabel newlabel;
    735 #endif
    736 
    737 	WDCDEBUG_PRINT(("edioctl\n"), DEBUG_FUNCS);
    738 
    739 	if ((wd->sc_flags & WDF_LOADED) == 0)
    740 		return EIO;
    741 
    742 	switch (xfer) {
    743 #ifdef HAS_BAD144_HANDLING
    744 	case DIOCSBAD:
    745 		if ((flag & FWRITE) == 0)
    746 			return EBADF;
    747 		wd->sc_dk.dk_cpulabel->bad = *(struct dkbad *)addr;
    748 		wd->sc_dk.dk_label->d_flags |= D_BADSECT;
    749 		bad144intern(wd);
    750 		return 0;
    751 #endif
    752 
    753 	case DIOCGDINFO:
    754 		*(struct disklabel *)addr = *(wd->sc_dk.dk_label);
    755 		return 0;
    756 #ifdef __HAVE_OLD_DISKLABEL
    757 	case ODIOCGDINFO:
    758 		newlabel = *(wd->sc_dk.dk_label);
    759 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
    760 			return ENOTTY;
    761 		memcpy(addr, &newlabel, sizeof (struct olddisklabel));
    762 		return 0;
    763 #endif
    764 
    765 	case DIOCGPART:
    766 		((struct partinfo *)addr)->disklab = wd->sc_dk.dk_label;
    767 		((struct partinfo *)addr)->part =
    768 		    &wd->sc_dk.dk_label->d_partitions[DISKPART(devvp->v_rdev)];
    769 		return 0;
    770 
    771 	case DIOCWDINFO:
    772 	case DIOCSDINFO:
    773 #ifdef __HAVE_OLD_DISKLABEL
    774 	case ODIOCWDINFO:
    775 	case ODIOCSDINFO:
    776 #endif
    777 	{
    778 		struct disklabel *lp;
    779 
    780 #ifdef __HAVE_OLD_DISKLABEL
    781 		if (xfer == ODIOCSDINFO || xfer == ODIOCWDINFO) {
    782 			memset(&newlabel, 0, sizeof newlabel);
    783 			memcpy(&newlabel, addr, sizeof (struct olddisklabel));
    784 			lp = &newlabel;
    785 		} else
    786 #endif
    787 		lp = (struct disklabel *)addr;
    788 
    789 		if ((flag & FWRITE) == 0)
    790 			return EBADF;
    791 
    792 		if ((error = ed_lock(wd)) != 0)
    793 			return error;
    794 		wd->sc_flags |= WDF_LABELLING;
    795 
    796 		error = setdisklabel(wd->sc_dk.dk_label,
    797 		    lp, /*wd->sc_dk.dk_openmask : */0,
    798 		    wd->sc_dk.dk_cpulabel);
    799 		if (error == 0) {
    800 #if 0
    801 			if (wd->drvp->state > RECAL)
    802 				wd->drvp->drive_flags |= DRIVE_RESET;
    803 #endif
    804 			if (xfer == DIOCWDINFO
    805 #ifdef __HAVE_OLD_DISKLABEL
    806 			    || xfer == ODIOCWDINFO
    807 #endif
    808 			    )
    809 				error = writedisklabel(devvp,
    810 				    edmcastrategy, wd->sc_dk.dk_label,
    811 				    wd->sc_dk.dk_cpulabel);
    812 		}
    813 
    814 		wd->sc_flags &= ~WDF_LABELLING;
    815 		ed_unlock(wd);
    816 		return error;
    817 	}
    818 
    819 	case DIOCKLABEL:
    820 		if (*(int *)addr)
    821 			wd->sc_flags |= WDF_KLABEL;
    822 		else
    823 			wd->sc_flags &= ~WDF_KLABEL;
    824 		return 0;
    825 
    826 	case DIOCWLABEL:
    827 		if ((flag & FWRITE) == 0)
    828 			return EBADF;
    829 		if (*(int *)addr)
    830 			wd->sc_flags |= WDF_WLABEL;
    831 		else
    832 			wd->sc_flags &= ~WDF_WLABEL;
    833 		return 0;
    834 
    835 	case DIOCGDEFLABEL:
    836 		edgetdefaultlabel(wd, (struct disklabel *)addr);
    837 		return 0;
    838 #ifdef __HAVE_OLD_DISKLABEL
    839 	case ODIOCGDEFLABEL:
    840 		edgetdefaultlabel(wd, &newlabel);
    841 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
    842 			return ENOTTY;
    843 		memcpy(addr, &newlabel, sizeof (struct olddisklabel));
    844 		return 0;
    845 #endif
    846 
    847 #ifdef notyet
    848 	case DIOCWFORMAT:
    849 		if ((flag & FWRITE) == 0)
    850 			return EBADF;
    851 		{
    852 		register struct format_op *fop;
    853 		struct iovec aiov;
    854 		struct uio auio;
    855 
    856 		fop = (struct format_op *)addr;
    857 		aiov.iov_base = fop->df_buf;
    858 		aiov.iov_len = fop->df_count;
    859 		auio.uio_iov = &aiov;
    860 		auio.uio_iovcnt = 1;
    861 		auio.uio_resid = fop->df_count;
    862 		auio.uio_segflg = 0;
    863 		auio.uio_offset =
    864 			fop->df_startblk * wd->sc_dk.dk_label->d_secsize;
    865 		auio.uio_procp = p;
    866 		error = physio(wdformat, NULL, dev, B_WRITE, minphys,
    867 		    &auio);
    868 		fop->df_count -= auio.uio_resid;
    869 		fop->df_reg[0] = wdc->sc_status;
    870 		fop->df_reg[1] = wdc->sc_error;
    871 		return error;
    872 		}
    873 #endif
    874 
    875 	default:
    876 		return ENOTTY;
    877 	}
    878 
    879 #ifdef DIAGNOSTIC
    880 	panic("edioctl: impossible");
    881 #endif
    882 }
    883 
    884 #if 0
    885 #ifdef B_FORMAT
    886 int
    887 edmcaformat(struct buf *bp)
    888 {
    889 
    890 	bp->b_flags |= B_FORMAT;
    891 	return edmcastrategy(bp);
    892 }
    893 #endif
    894 #endif
    895 
    896 int
    897 edmcasize(dev)
    898 	dev_t dev;
    899 {
    900 	struct ed_softc *wd;
    901 	struct vnode *vp;
    902 	int part, omask;
    903 	int size;
    904 
    905 	WDCDEBUG_PRINT(("edsize\n"), DEBUG_FUNCS);
    906 
    907 	wd = device_lookup(&ed_cd, DISKUNIT(dev));
    908 	if (wd == NULL)
    909 		return (-1);
    910 
    911 	part = DISKPART(dev);
    912 	omask = wd->sc_dk.dk_openmask & (1 << part);
    913 
    914 	/* XXXDEVVP */
    915 
    916 	if (omask == 0) {
    917 		if (bdevvp(dev, &vp) != 0)
    918 			return (-1);
    919 		if (edmcaopen(vp, 0, S_IFBLK, NULL) != 0) {
    920 			vrele(vp);
    921 			return (-1);
    922 		}
    923 	}
    924 	if (wd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
    925 		size = -1;
    926 	else
    927 		size = wd->sc_dk.dk_label->d_partitions[part].p_size *
    928 		    (wd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
    929 
    930 	if (omask == 0) {
    931 		if (edmcaclose(vp, 0, S_IFBLK, NULL) != 0)
    932 			size = -1;
    933 		vrele(vp);
    934 	}
    935 	return (size);
    936 }
    937 
    938 /* #define WD_DUMP_NOT_TRUSTED if you just want to watch */
    939 static int eddoingadump = 0;
    940 static int eddumprecalibrated = 0;
    941 static int eddumpmulti = 1;
    942 
    943 /*
    944  * Dump core after a system crash.
    945  */
    946 int
    947 edmcadump(dev, blkno, va, size)
    948 	dev_t dev;
    949 	daddr_t blkno;
    950 	caddr_t va;
    951 	size_t size;
    952 {
    953 	struct ed_softc *ed;	/* disk unit to do the I/O */
    954 	struct disklabel *lp;   /* disk's disklabel */
    955 	int part;
    956 	int nblks;	/* total number of sectors left to write */
    957 
    958 	/* Check if recursive dump; if so, punt. */
    959 	if (eddoingadump)
    960 		return EFAULT;
    961 	eddoingadump = 1;
    962 
    963 	ed = device_lookup(&ed_cd, DISKUNIT(dev));
    964 	if (ed == NULL)
    965 		return (ENXIO);
    966 
    967 	part = DISKPART(dev);
    968 
    969 	/* Make sure it was initialized. */
    970 	if ((ed->sc_flags & EDF_INIT) == 0)
    971 		return ENXIO;
    972 
    973 	/* Convert to disk sectors.  Request must be a multiple of size. */
    974 	lp = ed->sc_dk.dk_label;
    975 	if ((size % lp->d_secsize) != 0)
    976 		return EFAULT;
    977 	nblks = size / lp->d_secsize;
    978 	blkno = blkno / (lp->d_secsize / DEV_BSIZE);
    979 
    980 	/* Check transfer bounds against partition size. */
    981 	if ((blkno < 0) || ((blkno + nblks) > lp->d_partitions[part].p_size))
    982 		return EINVAL;
    983 
    984 	/* Offset block number to start of partition. */
    985 	blkno += lp->d_partitions[part].p_offset;
    986 
    987 	/* Recalibrate, if first dump transfer. */
    988 	if (eddumprecalibrated == 0) {
    989 		eddumprecalibrated = 1;
    990 		eddumpmulti = 8;
    991 #if 0
    992 		wd->drvp->state = RESET;
    993 #endif
    994 	}
    995 
    996 	while (nblks > 0) {
    997 		ed->sc_data = va;
    998 		ed->sc_rawblkno = blkno;
    999 		ed->sc_bcount = min(nblks, eddumpmulti) * lp->d_secsize;
   1000 		ed->sc_read = 0;
   1001 
   1002 		ed_bio(ed, 0, 1);
   1003 		if (ed->sc_error)
   1004 			return (ed->sc_error);
   1005 
   1006 		ed_bio_done(ed);
   1007 
   1008 		/* update block count */
   1009 		nblks -= min(nblks, eddumpmulti);
   1010 		blkno += min(nblks, eddumpmulti);
   1011 		va += min(nblks, eddumpmulti) * lp->d_secsize;
   1012 	}
   1013 
   1014 	eddoingadump = 0;
   1015 	return (0);
   1016 }
   1017 
   1018 #ifdef HAS_BAD144_HANDLING
   1019 /*
   1020  * Internalize the bad sector table.
   1021  */
   1022 static void
   1023 bad144intern(wd)
   1024 	struct ed_softc *wd;
   1025 {
   1026 	struct dkbad *bt = &wd->sc_dk.dk_cpulabel->bad;
   1027 	struct disklabel *lp = wd->sc_dk.dk_label;
   1028 	int i = 0;
   1029 
   1030 	WDCDEBUG_PRINT(("bad144intern\n"), DEBUG_XFERS);
   1031 
   1032 	for (; i < NBT_BAD; i++) {
   1033 		if (bt->bt_bad[i].bt_cyl == 0xffff)
   1034 			break;
   1035 		wd->sc_badsect[i] =
   1036 		    bt->bt_bad[i].bt_cyl * lp->d_secpercyl +
   1037 		    (bt->bt_bad[i].bt_trksec >> 8) * lp->d_nsectors +
   1038 		    (bt->bt_bad[i].bt_trksec & 0xff);
   1039 	}
   1040 	for (; i < NBT_BAD+1; i++)
   1041 		wd->sc_badsect[i] = -1;
   1042 }
   1043 #endif
   1044 
   1045 static int
   1046 ed_get_params(ed)
   1047 	struct ed_softc *ed;
   1048 {
   1049 	u_int16_t cmd_args[2];
   1050 
   1051 	/*
   1052 	 * Get Device Configuration (09).
   1053 	 */
   1054 	cmd_args[0] = 14;	/* Options: 00s110, s: 0=Physical 1=Pseudo */
   1055 	cmd_args[1] = 0;
   1056 	if (edc_run_cmd(ed->edc_softc, CMD_GET_DEV_CONF, ed->sc_devno,
   1057 	    cmd_args, 2, 0, 1))
   1058 		return (1);
   1059 
   1060 	ed->spares = ed->sc_status_block[1] >> 8;
   1061 	ed->drv_flags = ed->sc_status_block[1] & 0x1f;
   1062 	ed->rba = ed->sc_status_block[2] |
   1063 		(ed->sc_status_block[3] << 16);
   1064 	/* Instead of using:
   1065 		ed->cyl = ed->sc_status_block[4];
   1066 		ed->heads = ed->sc_status_block[5] & 0xff;
   1067 		ed->sectors = ed->sc_status_block[5] >> 8;
   1068 	 * we fabricate the numbers from RBA count, so that
   1069 	 * number of sectors is 32 and heads 64. This seems
   1070 	 * to be necessary for integrated ESDI controller.
   1071 	 */
   1072 	ed->sectors = 32;
   1073 	ed->heads = 64;
   1074 	ed->cyl = ed->rba / (ed->heads * ed->sectors);
   1075 	ed->sc_capacity = ed->rba;
   1076 
   1077 	return (0);
   1078 }
   1079 
   1080 /*
   1081  * Our shutdown hook. We attempt to park disk's head only.
   1082  */
   1083 void
   1084 ed_shutdown(arg)
   1085 	void *arg;
   1086 {
   1087 #if 0
   1088 	struct ed_softc *ed = arg;
   1089 	u_int16_t cmd_args[2];
   1090 
   1091 	/* Issue Park Head command */
   1092 	cmd_args[0] = 6;	/* Options: 000110 */
   1093 	cmd_args[1] = 0;
   1094 	(void) edc_run_cmd(ed->edc_softc, CMD_PARK_HEAD, ed->sc_devno,
   1095 			cmd_args, 2, 0);
   1096 #endif
   1097 }
   1098 
   1099 /*
   1100  * Main worker thread function.
   1101  */
   1102 void
   1103 edworker(arg)
   1104 	void *arg;
   1105 {
   1106 	struct ed_softc *ed = (struct ed_softc *) arg;
   1107 	struct buf *bp;
   1108 	int s;
   1109 
   1110 	config_pending_decr();
   1111 
   1112 	for(;;) {
   1113 		/* Wait until awakened */
   1114 		(void) tsleep(&ed->sc_q, PRIBIO, "edidle", 0);
   1115 
   1116 		if ((ed->sc_flags & EDF_PROCESS_QUEUE) == 0)
   1117 			panic("edworker: expecting process queue");
   1118 		ed->sc_flags &= ~EDF_PROCESS_QUEUE;
   1119 
   1120 		for(;;) {
   1121 			/* Is there a buf for us ? */
   1122 			simple_lock(&ed->sc_q_lock);
   1123 			if ((bp = BUFQ_FIRST(&ed->sc_q)) == NULL) {
   1124 				simple_unlock(&ed->sc_q_lock);
   1125 				break;
   1126 			}
   1127 			BUFQ_REMOVE(&ed->sc_q, bp);
   1128 			simple_unlock(&ed->sc_q_lock);
   1129 
   1130 			/* Schedule i/o operation */
   1131 			ed->sc_error = 0;
   1132 			s = splbio();
   1133 			__edstart(ed, bp);
   1134 
   1135 			/*
   1136 			 * Wait until the command executes; edc_intr() wakes
   1137 			 * us up.
   1138 			 */
   1139 			if (ed->sc_error == 0)
   1140 				(void)tsleep(&ed->edc_softc, PRIBIO, "edwrk",0);
   1141 
   1142 			/* Handle i/o results */
   1143 			edmcadone(ed, bp);
   1144 			splx(s);
   1145 		}
   1146 	}
   1147 }
   1148