Home | History | Annotate | Line # | Download | only in i2o
ld_iop.c revision 1.37
      1 /*	$NetBSD: ld_iop.c,v 1.37 2017/02/27 21:32:33 jdolecek Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * I2O front-end for ld(4) driver, supporting random block storage class
     34  * devices.  Currently, this doesn't handle anything more complex than
     35  * fixed direct-access devices.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.37 2017/02/27 21:32:33 jdolecek Exp $");
     40 
     41 #include <sys/param.h>
     42 #include <sys/systm.h>
     43 #include <sys/kernel.h>
     44 #include <sys/device.h>
     45 #include <sys/buf.h>
     46 #include <sys/bufq.h>
     47 #include <sys/endian.h>
     48 #include <sys/dkio.h>
     49 #include <sys/disk.h>
     50 #include <sys/proc.h>
     51 
     52 #include <sys/bus.h>
     53 
     54 #include <dev/ldvar.h>
     55 
     56 #include <dev/i2o/i2o.h>
     57 #include <dev/i2o/iopio.h>
     58 #include <dev/i2o/iopvar.h>
     59 
     60 #define	LD_IOP_TIMEOUT		30*1000
     61 
     62 #define	LD_IOP_CLAIMED		0x01
     63 #define	LD_IOP_NEW_EVTMASK	0x02
     64 
     65 struct ld_iop_softc {
     66 	struct	ld_softc sc_ld;
     67 	struct	iop_initiator sc_ii;
     68 	struct	iop_initiator sc_eventii;
     69 	int	sc_flags;
     70 };
     71 
     72 static void	ld_iop_adjqparam(device_t, int);
     73 static void	ld_iop_attach(device_t, device_t, void *);
     74 static int	ld_iop_detach(device_t, int);
     75 static int	ld_iop_dump(struct ld_softc *, void *, int, int);
     76 static int	ld_iop_flush(struct ld_softc *, bool);
     77 static int	ld_iop_ioctl(struct ld_softc *, u_long, void *, int32_t, bool);
     78 static void	ld_iop_intr(device_t, struct iop_msg *, void *);
     79 static void	ld_iop_intr_event(device_t, struct iop_msg *, void *);
     80 static int	ld_iop_match(device_t, cfdata_t, void *);
     81 static int	ld_iop_start(struct ld_softc *, struct buf *);
     82 static void	ld_iop_unconfig(struct ld_iop_softc *, int);
     83 
     84 CFATTACH_DECL_NEW(ld_iop, sizeof(struct ld_iop_softc),
     85     ld_iop_match, ld_iop_attach, ld_iop_detach, NULL);
     86 
     87 static const char * const ld_iop_errors[] = {
     88 	"success",
     89 	"media error",
     90 	"access error",
     91 	"device failure",
     92 	"device not ready",
     93 	"media not present",
     94 	"media locked",
     95 	"media failure",
     96 	"protocol failure",
     97 	"bus failure",
     98 	"access violation",
     99 	"media write protected",
    100 	"device reset",
    101 	"volume changed, waiting for acknowledgement",
    102 	"timeout",
    103 };
    104 
    105 static int
    106 ld_iop_match(device_t parent, cfdata_t match, void *aux)
    107 {
    108 	struct iop_attach_args *ia;
    109 
    110 	ia = aux;
    111 
    112 	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
    113 }
    114 
    115 static void
    116 ld_iop_attach(device_t parent, device_t self, void *aux)
    117 {
    118 	struct iop_attach_args *ia = aux;
    119 	struct ld_iop_softc *sc = device_private(self);
    120 	struct iop_softc *iop = device_private(parent);
    121 	struct ld_softc *ld = &sc->sc_ld;
    122 	int rv, evreg, enable;
    123 	const char *typestr, *fixedstr;
    124 	u_int cachesz;
    125 	u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
    126 	struct {
    127 		struct	i2o_param_op_results pr;
    128 		struct	i2o_param_read_results prr;
    129 		union {
    130 			struct	i2o_param_rbs_cache_control cc;
    131 			struct	i2o_param_rbs_device_info bdi;
    132 		} p;
    133 	} __packed param;
    134 
    135 	ld->sc_dv = self;
    136 	evreg = 0;
    137 
    138 	/* Register us as an initiator. */
    139 	sc->sc_ii.ii_dv = self;
    140 	sc->sc_ii.ii_intr = ld_iop_intr;
    141 	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
    142 	sc->sc_ii.ii_flags = 0;
    143 	sc->sc_ii.ii_tid = ia->ia_tid;
    144 	iop_initiator_register(iop, &sc->sc_ii);
    145 
    146 	/* Register another initiator to handle events from the device. */
    147 	sc->sc_eventii.ii_dv = self;
    148 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
    149 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
    150 	sc->sc_eventii.ii_tid = ia->ia_tid;
    151 	iop_initiator_register(iop, &sc->sc_eventii);
    152 
    153 	rv = iop_util_eventreg(iop, &sc->sc_eventii,
    154 	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
    155 	    I2O_EVENT_GEN_DEVICE_RESET |
    156 	    I2O_EVENT_GEN_STATE_CHANGE |
    157 	    I2O_EVENT_GEN_GENERAL_WARNING);
    158 	if (rv != 0) {
    159 		aprint_error_dev(self, "unable to register for events");
    160 		goto bad;
    161 	}
    162 	evreg = 1;
    163 
    164 	/*
    165 	 * Start out with one queued command.  The `iop' driver will adjust
    166 	 * the queue parameters once we're up and running.
    167 	 */
    168 	ld->sc_maxqueuecnt = 1;
    169 
    170 	ld->sc_maxxfer = IOP_MAX_XFER;
    171 	ld->sc_dump = ld_iop_dump;
    172 	ld->sc_ioctl = ld_iop_ioctl;
    173 	ld->sc_start = ld_iop_start;
    174 
    175 	/* Say what the device is. */
    176 	printf(":");
    177 	iop_print_ident(iop, ia->ia_tid);
    178 
    179 	/*
    180 	 * Claim the device so that we don't get any nasty surprises.  Allow
    181 	 * failure.
    182 	 */
    183 	rv = iop_util_claim(iop, &sc->sc_ii, 0,
    184 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
    185 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
    186 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
    187 	    I2O_UTIL_CLAIM_PRIMARY_USER);
    188 	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
    189 
    190 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
    191 	    &param, sizeof(param), NULL);
    192 	if (rv != 0)
    193 		goto bad;
    194 
    195 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
    196 	ld->sc_secperunit = (int)
    197 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
    198 
    199 	switch (param.p.bdi.type) {
    200 	case I2O_RBS_TYPE_DIRECT:
    201 		typestr = "direct access";
    202 		enable = 1;
    203 		break;
    204 	case I2O_RBS_TYPE_WORM:
    205 		typestr = "WORM";
    206 		enable = 0;
    207 		break;
    208 	case I2O_RBS_TYPE_CDROM:
    209 		typestr = "CD-ROM";
    210 		enable = 0;
    211 		break;
    212 	case I2O_RBS_TYPE_OPTICAL:
    213 		typestr = "optical";
    214 		enable = 0;
    215 		break;
    216 	default:
    217 		typestr = "unknown";
    218 		enable = 0;
    219 		break;
    220 	}
    221 
    222 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVABLE_MEDIA)
    223 	    != 0) {
    224 		/* ld->sc_flags = LDF_REMOVABLE; */
    225 		fixedstr = "removable";
    226 		enable = 0;
    227 	} else
    228 		fixedstr = "fixed";
    229 
    230 	printf(" %s, %s", typestr, fixedstr);
    231 
    232 	/*
    233 	 * Determine if the device has an private cache.  If so, print the
    234 	 * cache size.  Even if the device doesn't appear to have a cache,
    235 	 * we perform a flush at shutdown.
    236 	 */
    237 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
    238 	    &param, sizeof(param), NULL);
    239 	if (rv != 0)
    240 		goto bad;
    241 
    242 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
    243 		printf(", %dkB cache", cachesz >> 10);
    244 
    245 	printf("\n");
    246 
    247 	/*
    248 	 * Configure the DDM's timeout functions to time out all commands
    249 	 * after 30 seconds.
    250 	 */
    251 	timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
    252 	rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
    253 	rwvtimeout = 0;
    254 
    255 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    256 	    &timeoutbase, sizeof(timeoutbase),
    257 	    I2O_PARAM_RBS_OPERATION_timeoutbase);
    258 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    259 	    &rwvtimeoutbase, sizeof(rwvtimeoutbase),
    260 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
    261 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    262 	    &rwvtimeout, sizeof(rwvtimeout),
    263 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
    264 
    265 	if (enable)
    266 		ld->sc_flags |= LDF_ENABLED;
    267 	else
    268 		aprint_error_dev(self, "device not yet supported\n");
    269 
    270 	ldattach(ld, BUFQ_DISK_DEFAULT_STRAT);
    271 	return;
    272 
    273  bad:
    274 	ld_iop_unconfig(sc, evreg);
    275 }
    276 
    277 static void
    278 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
    279 {
    280 	struct iop_softc *iop;
    281 
    282 	iop = device_private(device_parent(sc->sc_ld.sc_dv));
    283 
    284 	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
    285 		iop_util_claim(iop, &sc->sc_ii, 1,
    286 		    I2O_UTIL_CLAIM_PRIMARY_USER);
    287 
    288 	if (evreg) {
    289 		/*
    290 		 * Mask off events, and wait up to 5 seconds for a reply.
    291 		 * Note that some adapters won't reply to this (XXX We
    292 		 * should check the event capabilities).
    293 		 */
    294 		mutex_spin_enter(&iop->sc_intrlock);
    295 		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
    296 		mutex_spin_exit(&iop->sc_intrlock);
    297 
    298 		iop_util_eventreg(iop, &sc->sc_eventii,
    299 		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
    300 
    301 		mutex_spin_enter(&iop->sc_intrlock);
    302 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
    303 			cv_timedwait(&sc->sc_eventii.ii_cv,
    304 			    &iop->sc_intrlock, hz * 5);
    305 		mutex_spin_exit(&iop->sc_intrlock);
    306 	}
    307 
    308 	iop_initiator_unregister(iop, &sc->sc_eventii);
    309 	iop_initiator_unregister(iop, &sc->sc_ii);
    310 }
    311 
    312 static int
    313 ld_iop_detach(device_t self, int flags)
    314 {
    315 	struct ld_iop_softc *sc;
    316 	struct iop_softc *iop;
    317 	int rv;
    318 
    319 	sc = device_private(self);
    320 	iop = device_private(device_parent(self));
    321 
    322 	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
    323 		return (rv);
    324 
    325 	/*
    326 	 * Abort any requests queued with the IOP, but allow requests that
    327 	 * are already in progress to complete.
    328 	 */
    329 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
    330 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
    331 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
    332 
    333 	ldenddetach(&sc->sc_ld);
    334 
    335 	/* Un-claim the target, and un-register our initiators. */
    336 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
    337 		ld_iop_unconfig(sc, 1);
    338 
    339 	return (0);
    340 }
    341 
    342 static int
    343 ld_iop_start(struct ld_softc *ld, struct buf *bp)
    344 {
    345 	struct iop_msg *im;
    346 	struct iop_softc *iop;
    347 	struct ld_iop_softc *sc;
    348 	struct i2o_rbs_block_read *mf;
    349 	u_int rv, flags, write;
    350 	u_int64_t ba;
    351 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
    352 
    353 	sc = device_private(ld->sc_dv);
    354 	iop = device_private(device_parent(ld->sc_dv));
    355 
    356 	im = iop_msg_alloc(iop, 0);
    357 	im->im_dvcontext = bp;
    358 
    359 	write = ((bp->b_flags & B_READ) == 0);
    360 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
    361 
    362 	/*
    363 	 * Write through the cache when performing synchronous writes.  When
    364 	 * performing a read, we don't request that the DDM cache the data,
    365 	 * as there's little advantage to it.
    366 	 */
    367 	if (write) {
    368 		if ((bp->b_flags & B_ASYNC) == 0)
    369 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
    370 		else
    371 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
    372 	} else
    373 		flags = 0;
    374 
    375 	/*
    376 	 * Fill the message frame.  We can use the block_read structure for
    377 	 * both reads and writes, as it's almost identical to the
    378 	 * block_write structure.
    379 	 */
    380 	mf = (struct i2o_rbs_block_read *)mb;
    381 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
    382 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
    383 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
    384 	mf->msgictx = sc->sc_ii.ii_ictx;
    385 	mf->msgtctx = im->im_tctx;
    386 	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
    387 	mf->datasize = bp->b_bcount;
    388 	mf->lowoffset = (u_int32_t)ba;
    389 	mf->highoffset = (u_int32_t)(ba >> 32);
    390 
    391 	/* Map the data transfer and enqueue the command. */
    392 	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
    393 	if (rv == 0) {
    394 		if ((rv = iop_post(iop, mb)) != 0) {
    395 			iop_msg_unmap(iop, im);
    396 			iop_msg_free(iop, im);
    397 		}
    398 	}
    399 	return (rv);
    400 }
    401 
    402 static int
    403 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
    404 {
    405 	struct iop_msg *im;
    406 	struct iop_softc *iop;
    407 	struct ld_iop_softc *sc;
    408 	struct i2o_rbs_block_write *mf;
    409 	int rv, bcount;
    410 	u_int64_t ba;
    411 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
    412 
    413 	sc = device_private(ld->sc_dv);
    414 	iop = device_private(device_parent(ld->sc_dv));
    415 	bcount = blkcnt * ld->sc_secsize;
    416 	ba = (u_int64_t)blkno * ld->sc_secsize;
    417 	im = iop_msg_alloc(iop, IM_POLL);
    418 
    419 	mf = (struct i2o_rbs_block_write *)mb;
    420 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
    421 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
    422 	mf->msgictx = sc->sc_ii.ii_ictx;
    423 	mf->msgtctx = im->im_tctx;
    424 	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
    425 	mf->datasize = bcount;
    426 	mf->lowoffset = (u_int32_t)ba;
    427 	mf->highoffset = (u_int32_t)(ba >> 32);
    428 
    429 	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
    430 		iop_msg_free(iop, im);
    431 		return (rv);
    432 	}
    433 
    434 	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
    435 	iop_msg_unmap(iop, im);
    436 	iop_msg_free(iop, im);
    437  	return (rv);
    438 }
    439 
    440 static int
    441 ld_iop_flush(struct ld_softc *ld, bool poll)
    442 {
    443 	struct iop_msg *im;
    444 	struct iop_softc *iop;
    445 	struct ld_iop_softc *sc;
    446 	struct i2o_rbs_cache_flush mf;
    447 	int rv;
    448 
    449 	sc = device_private(ld->sc_dv);
    450 	iop = device_private(device_parent(ld->sc_dv));
    451 	im = iop_msg_alloc(iop, IM_WAIT);
    452 
    453 	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
    454 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
    455 	mf.msgictx = sc->sc_ii.ii_ictx;
    456 	mf.msgtctx = im->im_tctx;
    457 	mf.flags = 1 << 16;			/* time multiplier */
    458 
    459 	/* Ancient disks will return an error here. */
    460 	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
    461 	iop_msg_free(iop, im);
    462 	return (rv);
    463 }
    464 
    465 static int
    466 ld_iop_ioctl(struct ld_softc *ld, u_long cmd, void *addr, int32_t flag, bool poll)
    467 {
    468 	int error;
    469 
    470 	switch (cmd) {
    471         case DIOCCACHESYNC:
    472 		error = ld_iop_flush(ld, poll);
    473 		break;
    474 
    475 	default:
    476 		error = EPASSTHROUGH;
    477 		break;
    478 	}
    479 
    480         return error;
    481 }
    482 
    483 static void
    484 ld_iop_intr(device_t dv, struct iop_msg *im, void *reply)
    485 {
    486 	struct i2o_rbs_reply *rb;
    487 	struct buf *bp;
    488 	struct ld_iop_softc *sc;
    489 	struct iop_softc *iop;
    490 	int err, detail;
    491 	const char *errstr;
    492 
    493 	rb = reply;
    494 	bp = im->im_dvcontext;
    495 	sc = device_private(dv);
    496 	iop = device_private(device_parent(dv));
    497 
    498 	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
    499 
    500 	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
    501 		detail = le16toh(rb->detail);
    502 		if (detail >= __arraycount(ld_iop_errors))
    503 			errstr = "<unknown>";
    504 		else
    505 			errstr = ld_iop_errors[detail];
    506 		aprint_error_dev(dv, "error 0x%04x: %s\n", detail, errstr);
    507 		err = 1;
    508 	}
    509 
    510 	if (err) {
    511 		bp->b_error = EIO;
    512 		bp->b_resid = bp->b_bcount;
    513 	} else
    514 		bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
    515 
    516 	iop_msg_unmap(iop, im);
    517 	iop_msg_free(iop, im);
    518 	lddone(&sc->sc_ld, bp);
    519 }
    520 
    521 static void
    522 ld_iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
    523 {
    524 	struct i2o_util_event_register_reply *rb;
    525 	struct ld_iop_softc *sc;
    526 	struct iop_softc *iop;
    527 	u_int event;
    528 
    529 	rb = reply;
    530 
    531 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
    532 		return;
    533 
    534 	event = le32toh(rb->event);
    535 	sc = device_private(dv);
    536 
    537 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
    538 		iop = device_private(device_parent(dv));
    539 		mutex_spin_enter(&iop->sc_intrlock);
    540 		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
    541 		cv_broadcast(&sc->sc_eventii.ii_cv);
    542 		mutex_spin_exit(&iop->sc_intrlock);
    543 		return;
    544 	}
    545 
    546 	printf("%s: event 0x%08x received\n", device_xname(dv), event);
    547 }
    548 
    549 static void
    550 ld_iop_adjqparam(device_t dv, int mpi)
    551 {
    552 	struct ld_iop_softc *sc = device_private(dv);
    553 	struct iop_softc *iop = device_private(device_parent(dv));
    554 	struct ld_softc *ld = &sc->sc_ld;
    555 
    556 	/*
    557 	 * AMI controllers seem to loose the plot if you hand off lots of
    558 	 * queued commands.
    559 	 */
    560 	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
    561 		mpi = 64;
    562 
    563 	ldadjqparam(ld, mpi);
    564 }
    565