Home | History | Annotate | Line # | Download | only in i2o
ld_iop.c revision 1.23.8.2
      1 /*	$NetBSD: ld_iop.c,v 1.23.8.2 2007/05/27 00:17:17 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2001, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * I2O front-end for ld(4) driver, supporting random block storage class
     41  * devices.  Currently, this doesn't handle anything more complex than
     42  * fixed direct-access devices.
     43  */
     44 
     45 #include <sys/cdefs.h>
     46 __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.23.8.2 2007/05/27 00:17:17 ad Exp $");
     47 
     48 #include "opt_i2o.h"
     49 #include "rnd.h"
     50 
     51 #include <sys/param.h>
     52 #include <sys/systm.h>
     53 #include <sys/kernel.h>
     54 #include <sys/device.h>
     55 #include <sys/buf.h>
     56 #include <sys/bufq.h>
     57 #include <sys/endian.h>
     58 #include <sys/dkio.h>
     59 #include <sys/disk.h>
     60 #include <sys/proc.h>
     61 #if NRND > 0
     62 #include <sys/rnd.h>
     63 #endif
     64 
     65 #include <machine/bus.h>
     66 
     67 #include <dev/ldvar.h>
     68 
     69 #include <dev/i2o/i2o.h>
     70 #include <dev/i2o/iopio.h>
     71 #include <dev/i2o/iopvar.h>
     72 
     73 #define	LD_IOP_TIMEOUT		30*1000
     74 
     75 #define	LD_IOP_CLAIMED		0x01
     76 #define	LD_IOP_NEW_EVTMASK	0x02
     77 
     78 struct ld_iop_softc {
     79 	struct	ld_softc sc_ld;
     80 	struct	iop_initiator sc_ii;
     81 	struct	iop_initiator sc_eventii;
     82 	int	sc_flags;
     83 };
     84 
     85 static void	ld_iop_adjqparam(struct device *, int);
     86 static void	ld_iop_attach(struct device *, struct device *, void *);
     87 static int	ld_iop_detach(struct device *, int);
     88 static int	ld_iop_dump(struct ld_softc *, void *, int, int);
     89 static int	ld_iop_flush(struct ld_softc *);
     90 static void	ld_iop_intr(struct device *, struct iop_msg *, void *);
     91 static void	ld_iop_intr_event(struct device *, struct iop_msg *, void *);
     92 static int	ld_iop_match(struct device *, struct cfdata *, void *);
     93 static int	ld_iop_start(struct ld_softc *, struct buf *);
     94 static void	ld_iop_unconfig(struct ld_iop_softc *, int);
     95 
     96 CFATTACH_DECL(ld_iop, sizeof(struct ld_iop_softc),
     97     ld_iop_match, ld_iop_attach, ld_iop_detach, NULL);
     98 
     99 static const char * const ld_iop_errors[] = {
    100 	"success",
    101 	"media error",
    102 	"access error",
    103 	"device failure",
    104 	"device not ready",
    105 	"media not present",
    106 	"media locked",
    107 	"media failure",
    108 	"protocol failure",
    109 	"bus failure",
    110 	"access violation",
    111 	"media write protected",
    112 	"device reset",
    113 	"volume changed, waiting for acknowledgement",
    114 	"timeout",
    115 };
    116 
    117 static int
    118 ld_iop_match(struct device *parent, struct cfdata *match,
    119     void *aux)
    120 {
    121 	struct iop_attach_args *ia;
    122 
    123 	ia = aux;
    124 
    125 	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
    126 }
    127 
    128 static void
    129 ld_iop_attach(struct device *parent, struct device *self, void *aux)
    130 {
    131 	struct iop_attach_args *ia;
    132 	struct ld_softc *ld;
    133 	struct ld_iop_softc *sc;
    134 	struct iop_softc *iop;
    135 	int rv, evreg, enable;
    136 	const char *typestr, *fixedstr;
    137 	u_int cachesz;
    138 	u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
    139 	struct {
    140 		struct	i2o_param_op_results pr;
    141 		struct	i2o_param_read_results prr;
    142 		union {
    143 			struct	i2o_param_rbs_cache_control cc;
    144 			struct	i2o_param_rbs_device_info bdi;
    145 		} p;
    146 	} __attribute__ ((__packed__)) param;
    147 
    148 	sc = device_private(self);
    149 	ld = &sc->sc_ld;
    150 	iop = device_private(parent);
    151 	ia = (struct iop_attach_args *)aux;
    152 	evreg = 0;
    153 
    154 	/* Register us as an initiator. */
    155 	sc->sc_ii.ii_dv = self;
    156 	sc->sc_ii.ii_intr = ld_iop_intr;
    157 	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
    158 	sc->sc_ii.ii_flags = 0;
    159 	sc->sc_ii.ii_tid = ia->ia_tid;
    160 	iop_initiator_register(iop, &sc->sc_ii);
    161 
    162 	/* Register another initiator to handle events from the device. */
    163 	sc->sc_eventii.ii_dv = self;
    164 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
    165 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
    166 	sc->sc_eventii.ii_tid = ia->ia_tid;
    167 	iop_initiator_register(iop, &sc->sc_eventii);
    168 
    169 	rv = iop_util_eventreg(iop, &sc->sc_eventii,
    170 	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
    171 	    I2O_EVENT_GEN_DEVICE_RESET |
    172 	    I2O_EVENT_GEN_STATE_CHANGE |
    173 	    I2O_EVENT_GEN_GENERAL_WARNING);
    174 	if (rv != 0) {
    175 		printf("%s: unable to register for events", self->dv_xname);
    176 		goto bad;
    177 	}
    178 	evreg = 1;
    179 
    180 	/*
    181 	 * Start out with one queued command.  The `iop' driver will adjust
    182 	 * the queue parameters once we're up and running.
    183 	 */
    184 	ld->sc_maxqueuecnt = 1;
    185 
    186 	ld->sc_maxxfer = IOP_MAX_XFER;
    187 	ld->sc_dump = ld_iop_dump;
    188 	ld->sc_flush = ld_iop_flush;
    189 	ld->sc_start = ld_iop_start;
    190 
    191 	/* Say what the device is. */
    192 	printf(":");
    193 	iop_print_ident(iop, ia->ia_tid);
    194 
    195 	/*
    196 	 * Claim the device so that we don't get any nasty surprises.  Allow
    197 	 * failure.
    198 	 */
    199 	rv = iop_util_claim(iop, &sc->sc_ii, 0,
    200 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
    201 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
    202 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
    203 	    I2O_UTIL_CLAIM_PRIMARY_USER);
    204 	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
    205 
    206 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
    207 	    &param, sizeof(param), NULL);
    208 	if (rv != 0)
    209 		goto bad;
    210 
    211 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
    212 	ld->sc_secperunit = (int)
    213 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
    214 
    215 	switch (param.p.bdi.type) {
    216 	case I2O_RBS_TYPE_DIRECT:
    217 		typestr = "direct access";
    218 		enable = 1;
    219 		break;
    220 	case I2O_RBS_TYPE_WORM:
    221 		typestr = "WORM";
    222 		enable = 0;
    223 		break;
    224 	case I2O_RBS_TYPE_CDROM:
    225 		typestr = "CD-ROM";
    226 		enable = 0;
    227 		break;
    228 	case I2O_RBS_TYPE_OPTICAL:
    229 		typestr = "optical";
    230 		enable = 0;
    231 		break;
    232 	default:
    233 		typestr = "unknown";
    234 		enable = 0;
    235 		break;
    236 	}
    237 
    238 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVABLE_MEDIA)
    239 	    != 0) {
    240 		/* ld->sc_flags = LDF_REMOVABLE; */
    241 		fixedstr = "removable";
    242 		enable = 0;
    243 	} else
    244 		fixedstr = "fixed";
    245 
    246 	printf(" %s, %s", typestr, fixedstr);
    247 
    248 	/*
    249 	 * Determine if the device has an private cache.  If so, print the
    250 	 * cache size.  Even if the device doesn't appear to have a cache,
    251 	 * we perform a flush at shutdown.
    252 	 */
    253 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
    254 	    &param, sizeof(param), NULL);
    255 	if (rv != 0)
    256 		goto bad;
    257 
    258 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
    259 		printf(", %dkB cache", cachesz >> 10);
    260 
    261 	printf("\n");
    262 
    263 	/*
    264 	 * Configure the DDM's timeout functions to time out all commands
    265 	 * after 30 seconds.
    266 	 */
    267 	timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
    268 	rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
    269 	rwvtimeout = 0;
    270 
    271 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    272 	    &timeoutbase, sizeof(timeoutbase),
    273 	    I2O_PARAM_RBS_OPERATION_timeoutbase);
    274 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    275 	    &rwvtimeoutbase, sizeof(rwvtimeoutbase),
    276 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
    277 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    278 	    &rwvtimeout, sizeof(rwvtimeout),
    279 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
    280 
    281 	if (enable)
    282 		ld->sc_flags |= LDF_ENABLED;
    283 	else
    284 		printf("%s: device not yet supported\n", self->dv_xname);
    285 
    286 	ldattach(ld);
    287 	return;
    288 
    289  bad:
    290 	ld_iop_unconfig(sc, evreg);
    291 }
    292 
    293 static void
    294 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
    295 {
    296 	struct iop_softc *iop;
    297 
    298 	iop = (struct iop_softc *)device_parent(&sc->sc_ld.sc_dv);
    299 
    300 	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
    301 		iop_util_claim(iop, &sc->sc_ii, 1,
    302 		    I2O_UTIL_CLAIM_PRIMARY_USER);
    303 
    304 	if (evreg) {
    305 		/*
    306 		 * Mask off events, and wait up to 5 seconds for a reply.
    307 		 * Note that some adapters won't reply to this (XXX We
    308 		 * should check the event capabilities).
    309 		 */
    310 		mutex_spin_enter(&iop->sc_intrlock);
    311 		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
    312 		mutex_spin_exit(&iop->sc_intrlock);
    313 
    314 		iop_util_eventreg(iop, &sc->sc_eventii,
    315 		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
    316 
    317 		mutex_spin_enter(&iop->sc_intrlock);
    318 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
    319 			cv_timedwait(&sc->sc_eventii.ii_cv,
    320 			    &iop->sc_intrlock, hz * 5);
    321 		mutex_spin_exit(&iop->sc_intrlock);
    322 	}
    323 
    324 	iop_initiator_unregister(iop, &sc->sc_eventii);
    325 	iop_initiator_unregister(iop, &sc->sc_ii);
    326 }
    327 
    328 static int
    329 ld_iop_detach(struct device *self, int flags)
    330 {
    331 	struct ld_iop_softc *sc;
    332 	struct iop_softc *iop;
    333 	int rv;
    334 
    335 	sc = device_private(self);
    336 	iop = device_private(device_parent(self));
    337 
    338 	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
    339 		return (rv);
    340 
    341 	/*
    342 	 * Abort any requests queued with the IOP, but allow requests that
    343 	 * are already in progress to complete.
    344 	 */
    345 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
    346 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
    347 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
    348 
    349 	ldenddetach(&sc->sc_ld);
    350 
    351 	/* Un-claim the target, and un-register our initiators. */
    352 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
    353 		ld_iop_unconfig(sc, 1);
    354 
    355 	return (0);
    356 }
    357 
    358 static int
    359 ld_iop_start(struct ld_softc *ld, struct buf *bp)
    360 {
    361 	struct iop_msg *im;
    362 	struct iop_softc *iop;
    363 	struct ld_iop_softc *sc;
    364 	struct i2o_rbs_block_read *mf;
    365 	u_int rv, flags, write;
    366 	u_int64_t ba;
    367 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
    368 
    369 	sc = (struct ld_iop_softc *)ld;
    370 	iop = (struct iop_softc *)device_parent(&ld->sc_dv);
    371 
    372 	im = iop_msg_alloc(iop, 0);
    373 	im->im_dvcontext = bp;
    374 
    375 	write = ((bp->b_flags & B_READ) == 0);
    376 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
    377 
    378 	/*
    379 	 * Write through the cache when performing synchronous writes.  When
    380 	 * performing a read, we don't request that the DDM cache the data,
    381 	 * as there's little advantage to it.
    382 	 */
    383 	if (write) {
    384 		if ((bp->b_flags & B_ASYNC) == 0)
    385 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
    386 		else
    387 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
    388 	} else
    389 		flags = 0;
    390 
    391 	/*
    392 	 * Fill the message frame.  We can use the block_read structure for
    393 	 * both reads and writes, as it's almost identical to the
    394 	 * block_write structure.
    395 	 */
    396 	mf = (struct i2o_rbs_block_read *)mb;
    397 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
    398 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
    399 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
    400 	mf->msgictx = sc->sc_ii.ii_ictx;
    401 	mf->msgtctx = im->im_tctx;
    402 	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
    403 	mf->datasize = bp->b_bcount;
    404 	mf->lowoffset = (u_int32_t)ba;
    405 	mf->highoffset = (u_int32_t)(ba >> 32);
    406 
    407 	/* Map the data transfer and enqueue the command. */
    408 	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
    409 	if (rv == 0) {
    410 		if ((rv = iop_post(iop, mb)) != 0) {
    411 			iop_msg_unmap(iop, im);
    412 			iop_msg_free(iop, im);
    413 		}
    414 	}
    415 	return (rv);
    416 }
    417 
    418 static int
    419 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
    420 {
    421 	struct iop_msg *im;
    422 	struct iop_softc *iop;
    423 	struct ld_iop_softc *sc;
    424 	struct i2o_rbs_block_write *mf;
    425 	int rv, bcount;
    426 	u_int64_t ba;
    427 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
    428 
    429 	sc = (struct ld_iop_softc *)ld;
    430 	iop = (struct iop_softc *)device_parent(&ld->sc_dv);
    431 	bcount = blkcnt * ld->sc_secsize;
    432 	ba = (u_int64_t)blkno * ld->sc_secsize;
    433 	im = iop_msg_alloc(iop, IM_POLL);
    434 
    435 	mf = (struct i2o_rbs_block_write *)mb;
    436 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
    437 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
    438 	mf->msgictx = sc->sc_ii.ii_ictx;
    439 	mf->msgtctx = im->im_tctx;
    440 	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
    441 	mf->datasize = bcount;
    442 	mf->lowoffset = (u_int32_t)ba;
    443 	mf->highoffset = (u_int32_t)(ba >> 32);
    444 
    445 	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
    446 		iop_msg_free(iop, im);
    447 		return (rv);
    448 	}
    449 
    450 	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
    451 	iop_msg_unmap(iop, im);
    452 	iop_msg_free(iop, im);
    453  	return (rv);
    454 }
    455 
    456 static int
    457 ld_iop_flush(struct ld_softc *ld)
    458 {
    459 	struct iop_msg *im;
    460 	struct iop_softc *iop;
    461 	struct ld_iop_softc *sc;
    462 	struct i2o_rbs_cache_flush mf;
    463 	int rv;
    464 
    465 	sc = (struct ld_iop_softc *)ld;
    466 	iop = (struct iop_softc *)device_parent(&ld->sc_dv);
    467 	im = iop_msg_alloc(iop, IM_WAIT);
    468 
    469 	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
    470 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
    471 	mf.msgictx = sc->sc_ii.ii_ictx;
    472 	mf.msgtctx = im->im_tctx;
    473 	mf.flags = 1 << 16;			/* time multiplier */
    474 
    475 	/* Aincent disks will return an error here. */
    476 	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
    477 	iop_msg_free(iop, im);
    478 	return (rv);
    479 }
    480 
    481 void
    482 ld_iop_intr(struct device *dv, struct iop_msg *im, void *reply)
    483 {
    484 	struct i2o_rbs_reply *rb;
    485 	struct buf *bp;
    486 	struct ld_iop_softc *sc;
    487 	struct iop_softc *iop;
    488 	int err, detail;
    489 	const char *errstr;
    490 
    491 	rb = reply;
    492 	bp = im->im_dvcontext;
    493 	sc = (struct ld_iop_softc *)dv;
    494 	iop = (struct iop_softc *)device_parent(dv);
    495 
    496 	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
    497 
    498 	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
    499 		detail = le16toh(rb->detail);
    500 		if (detail >= __arraycount(ld_iop_errors))
    501 			errstr = "<unknown>";
    502 		else
    503 			errstr = ld_iop_errors[detail];
    504 		printf("%s: error 0x%04x: %s\n", dv->dv_xname, detail, errstr);
    505 		err = 1;
    506 	}
    507 
    508 	iop_msg_unmap(iop, im);
    509 	iop_msg_free(iop, im);
    510 	lddone(&sc->sc_ld, bp, (err ? EIO : 0));
    511 }
    512 
    513 static void
    514 ld_iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
    515 {
    516 	struct i2o_util_event_register_reply *rb;
    517 	struct ld_iop_softc *sc;
    518 	struct iop_softc *iop;
    519 	u_int event;
    520 
    521 	rb = reply;
    522 
    523 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
    524 		return;
    525 
    526 	event = le32toh(rb->event);
    527 	sc = (struct ld_iop_softc *)dv;
    528 
    529 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
    530 		iop = device_private(device_parent(dv));
    531 		mutex_spin_enter(&iop->sc_intrlock);
    532 		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
    533 		cv_broadcast(&sc->sc_eventii.ii_cv);
    534 		mutex_spin_exit(&iop->sc_intrlock);
    535 		return;
    536 	}
    537 
    538 	printf("%s: event 0x%08x received\n", dv->dv_xname, event);
    539 }
    540 
    541 static void
    542 ld_iop_adjqparam(struct device *dv, int mpi)
    543 {
    544 	struct iop_softc *iop;
    545 
    546 	/*
    547 	 * AMI controllers seem to loose the plot if you hand off lots of
    548 	 * queued commands.
    549 	 */
    550 	iop = (struct iop_softc *)device_parent(dv);
    551 	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
    552 		mpi = 64;
    553 
    554 	ldadjqparam((struct ld_softc *)dv, mpi);
    555 }
    556