Home | History | Annotate | Line # | Download | only in i2o
ld_iop.c revision 1.29
      1 /*	$NetBSD: ld_iop.c,v 1.29 2008/05/10 14:52:55 simonb Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * I2O front-end for ld(4) driver, supporting random block storage class
     34  * devices.  Currently, this doesn't handle anything more complex than
     35  * fixed direct-access devices.
     36  */
     37 
     38 #include <sys/cdefs.h>
     39 __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.29 2008/05/10 14:52:55 simonb Exp $");
     40 
     41 #include "rnd.h"
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/kernel.h>
     46 #include <sys/device.h>
     47 #include <sys/buf.h>
     48 #include <sys/bufq.h>
     49 #include <sys/endian.h>
     50 #include <sys/dkio.h>
     51 #include <sys/disk.h>
     52 #include <sys/proc.h>
     53 #if NRND > 0
     54 #include <sys/rnd.h>
     55 #endif
     56 
     57 #include <sys/bus.h>
     58 
     59 #include <dev/ldvar.h>
     60 
     61 #include <dev/i2o/i2o.h>
     62 #include <dev/i2o/iopio.h>
     63 #include <dev/i2o/iopvar.h>
     64 
     65 #define	LD_IOP_TIMEOUT		30*1000
     66 
     67 #define	LD_IOP_CLAIMED		0x01
     68 #define	LD_IOP_NEW_EVTMASK	0x02
     69 
     70 struct ld_iop_softc {
     71 	struct	ld_softc sc_ld;
     72 	struct	iop_initiator sc_ii;
     73 	struct	iop_initiator sc_eventii;
     74 	int	sc_flags;
     75 };
     76 
     77 static void	ld_iop_adjqparam(struct device *, int);
     78 static void	ld_iop_attach(struct device *, struct device *, void *);
     79 static int	ld_iop_detach(struct device *, int);
     80 static int	ld_iop_dump(struct ld_softc *, void *, int, int);
     81 static int	ld_iop_flush(struct ld_softc *);
     82 static void	ld_iop_intr(struct device *, struct iop_msg *, void *);
     83 static void	ld_iop_intr_event(struct device *, struct iop_msg *, void *);
     84 static int	ld_iop_match(struct device *, struct cfdata *, void *);
     85 static int	ld_iop_start(struct ld_softc *, struct buf *);
     86 static void	ld_iop_unconfig(struct ld_iop_softc *, int);
     87 
     88 CFATTACH_DECL(ld_iop, sizeof(struct ld_iop_softc),
     89     ld_iop_match, ld_iop_attach, ld_iop_detach, NULL);
     90 
     91 static const char * const ld_iop_errors[] = {
     92 	"success",
     93 	"media error",
     94 	"access error",
     95 	"device failure",
     96 	"device not ready",
     97 	"media not present",
     98 	"media locked",
     99 	"media failure",
    100 	"protocol failure",
    101 	"bus failure",
    102 	"access violation",
    103 	"media write protected",
    104 	"device reset",
    105 	"volume changed, waiting for acknowledgement",
    106 	"timeout",
    107 };
    108 
    109 static int
    110 ld_iop_match(struct device *parent, struct cfdata *match,
    111     void *aux)
    112 {
    113 	struct iop_attach_args *ia;
    114 
    115 	ia = aux;
    116 
    117 	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
    118 }
    119 
    120 static void
    121 ld_iop_attach(struct device *parent, struct device *self, void *aux)
    122 {
    123 	struct iop_attach_args *ia;
    124 	struct ld_softc *ld;
    125 	struct ld_iop_softc *sc;
    126 	struct iop_softc *iop;
    127 	int rv, evreg, enable;
    128 	const char *typestr, *fixedstr;
    129 	u_int cachesz;
    130 	u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
    131 	struct {
    132 		struct	i2o_param_op_results pr;
    133 		struct	i2o_param_read_results prr;
    134 		union {
    135 			struct	i2o_param_rbs_cache_control cc;
    136 			struct	i2o_param_rbs_device_info bdi;
    137 		} p;
    138 	} __attribute__ ((__packed__)) param;
    139 
    140 	sc = device_private(self);
    141 	ld = &sc->sc_ld;
    142 	iop = device_private(parent);
    143 	ia = (struct iop_attach_args *)aux;
    144 	evreg = 0;
    145 
    146 	/* Register us as an initiator. */
    147 	sc->sc_ii.ii_dv = self;
    148 	sc->sc_ii.ii_intr = ld_iop_intr;
    149 	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
    150 	sc->sc_ii.ii_flags = 0;
    151 	sc->sc_ii.ii_tid = ia->ia_tid;
    152 	iop_initiator_register(iop, &sc->sc_ii);
    153 
    154 	/* Register another initiator to handle events from the device. */
    155 	sc->sc_eventii.ii_dv = self;
    156 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
    157 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
    158 	sc->sc_eventii.ii_tid = ia->ia_tid;
    159 	iop_initiator_register(iop, &sc->sc_eventii);
    160 
    161 	rv = iop_util_eventreg(iop, &sc->sc_eventii,
    162 	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
    163 	    I2O_EVENT_GEN_DEVICE_RESET |
    164 	    I2O_EVENT_GEN_STATE_CHANGE |
    165 	    I2O_EVENT_GEN_GENERAL_WARNING);
    166 	if (rv != 0) {
    167 		aprint_error_dev(self, "unable to register for events");
    168 		goto bad;
    169 	}
    170 	evreg = 1;
    171 
    172 	/*
    173 	 * Start out with one queued command.  The `iop' driver will adjust
    174 	 * the queue parameters once we're up and running.
    175 	 */
    176 	ld->sc_maxqueuecnt = 1;
    177 
    178 	ld->sc_maxxfer = IOP_MAX_XFER;
    179 	ld->sc_dump = ld_iop_dump;
    180 	ld->sc_flush = ld_iop_flush;
    181 	ld->sc_start = ld_iop_start;
    182 
    183 	/* Say what the device is. */
    184 	printf(":");
    185 	iop_print_ident(iop, ia->ia_tid);
    186 
    187 	/*
    188 	 * Claim the device so that we don't get any nasty surprises.  Allow
    189 	 * failure.
    190 	 */
    191 	rv = iop_util_claim(iop, &sc->sc_ii, 0,
    192 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
    193 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
    194 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
    195 	    I2O_UTIL_CLAIM_PRIMARY_USER);
    196 	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
    197 
    198 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
    199 	    &param, sizeof(param), NULL);
    200 	if (rv != 0)
    201 		goto bad;
    202 
    203 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
    204 	ld->sc_secperunit = (int)
    205 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
    206 
    207 	switch (param.p.bdi.type) {
    208 	case I2O_RBS_TYPE_DIRECT:
    209 		typestr = "direct access";
    210 		enable = 1;
    211 		break;
    212 	case I2O_RBS_TYPE_WORM:
    213 		typestr = "WORM";
    214 		enable = 0;
    215 		break;
    216 	case I2O_RBS_TYPE_CDROM:
    217 		typestr = "CD-ROM";
    218 		enable = 0;
    219 		break;
    220 	case I2O_RBS_TYPE_OPTICAL:
    221 		typestr = "optical";
    222 		enable = 0;
    223 		break;
    224 	default:
    225 		typestr = "unknown";
    226 		enable = 0;
    227 		break;
    228 	}
    229 
    230 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVABLE_MEDIA)
    231 	    != 0) {
    232 		/* ld->sc_flags = LDF_REMOVABLE; */
    233 		fixedstr = "removable";
    234 		enable = 0;
    235 	} else
    236 		fixedstr = "fixed";
    237 
    238 	printf(" %s, %s", typestr, fixedstr);
    239 
    240 	/*
    241 	 * Determine if the device has an private cache.  If so, print the
    242 	 * cache size.  Even if the device doesn't appear to have a cache,
    243 	 * we perform a flush at shutdown.
    244 	 */
    245 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
    246 	    &param, sizeof(param), NULL);
    247 	if (rv != 0)
    248 		goto bad;
    249 
    250 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
    251 		printf(", %dkB cache", cachesz >> 10);
    252 
    253 	printf("\n");
    254 
    255 	/*
    256 	 * Configure the DDM's timeout functions to time out all commands
    257 	 * after 30 seconds.
    258 	 */
    259 	timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
    260 	rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
    261 	rwvtimeout = 0;
    262 
    263 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    264 	    &timeoutbase, sizeof(timeoutbase),
    265 	    I2O_PARAM_RBS_OPERATION_timeoutbase);
    266 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    267 	    &rwvtimeoutbase, sizeof(rwvtimeoutbase),
    268 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
    269 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
    270 	    &rwvtimeout, sizeof(rwvtimeout),
    271 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
    272 
    273 	if (enable)
    274 		ld->sc_flags |= LDF_ENABLED;
    275 	else
    276 		aprint_error_dev(self, "device not yet supported\n");
    277 
    278 	ldattach(ld);
    279 	return;
    280 
    281  bad:
    282 	ld_iop_unconfig(sc, evreg);
    283 }
    284 
    285 static void
    286 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
    287 {
    288 	struct iop_softc *iop;
    289 
    290 	iop = (struct iop_softc *)device_parent(&sc->sc_ld.sc_dv);
    291 
    292 	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
    293 		iop_util_claim(iop, &sc->sc_ii, 1,
    294 		    I2O_UTIL_CLAIM_PRIMARY_USER);
    295 
    296 	if (evreg) {
    297 		/*
    298 		 * Mask off events, and wait up to 5 seconds for a reply.
    299 		 * Note that some adapters won't reply to this (XXX We
    300 		 * should check the event capabilities).
    301 		 */
    302 		mutex_spin_enter(&iop->sc_intrlock);
    303 		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
    304 		mutex_spin_exit(&iop->sc_intrlock);
    305 
    306 		iop_util_eventreg(iop, &sc->sc_eventii,
    307 		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
    308 
    309 		mutex_spin_enter(&iop->sc_intrlock);
    310 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
    311 			cv_timedwait(&sc->sc_eventii.ii_cv,
    312 			    &iop->sc_intrlock, hz * 5);
    313 		mutex_spin_exit(&iop->sc_intrlock);
    314 	}
    315 
    316 	iop_initiator_unregister(iop, &sc->sc_eventii);
    317 	iop_initiator_unregister(iop, &sc->sc_ii);
    318 }
    319 
    320 static int
    321 ld_iop_detach(struct device *self, int flags)
    322 {
    323 	struct ld_iop_softc *sc;
    324 	struct iop_softc *iop;
    325 	int rv;
    326 
    327 	sc = device_private(self);
    328 	iop = device_private(device_parent(self));
    329 
    330 	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
    331 		return (rv);
    332 
    333 	/*
    334 	 * Abort any requests queued with the IOP, but allow requests that
    335 	 * are already in progress to complete.
    336 	 */
    337 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
    338 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
    339 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
    340 
    341 	ldenddetach(&sc->sc_ld);
    342 
    343 	/* Un-claim the target, and un-register our initiators. */
    344 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
    345 		ld_iop_unconfig(sc, 1);
    346 
    347 	return (0);
    348 }
    349 
    350 static int
    351 ld_iop_start(struct ld_softc *ld, struct buf *bp)
    352 {
    353 	struct iop_msg *im;
    354 	struct iop_softc *iop;
    355 	struct ld_iop_softc *sc;
    356 	struct i2o_rbs_block_read *mf;
    357 	u_int rv, flags, write;
    358 	u_int64_t ba;
    359 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
    360 
    361 	sc = (struct ld_iop_softc *)ld;
    362 	iop = (struct iop_softc *)device_parent(&ld->sc_dv);
    363 
    364 	im = iop_msg_alloc(iop, 0);
    365 	im->im_dvcontext = bp;
    366 
    367 	write = ((bp->b_flags & B_READ) == 0);
    368 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
    369 
    370 	/*
    371 	 * Write through the cache when performing synchronous writes.  When
    372 	 * performing a read, we don't request that the DDM cache the data,
    373 	 * as there's little advantage to it.
    374 	 */
    375 	if (write) {
    376 		if ((bp->b_flags & B_ASYNC) == 0)
    377 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
    378 		else
    379 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
    380 	} else
    381 		flags = 0;
    382 
    383 	/*
    384 	 * Fill the message frame.  We can use the block_read structure for
    385 	 * both reads and writes, as it's almost identical to the
    386 	 * block_write structure.
    387 	 */
    388 	mf = (struct i2o_rbs_block_read *)mb;
    389 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
    390 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
    391 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
    392 	mf->msgictx = sc->sc_ii.ii_ictx;
    393 	mf->msgtctx = im->im_tctx;
    394 	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
    395 	mf->datasize = bp->b_bcount;
    396 	mf->lowoffset = (u_int32_t)ba;
    397 	mf->highoffset = (u_int32_t)(ba >> 32);
    398 
    399 	/* Map the data transfer and enqueue the command. */
    400 	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
    401 	if (rv == 0) {
    402 		if ((rv = iop_post(iop, mb)) != 0) {
    403 			iop_msg_unmap(iop, im);
    404 			iop_msg_free(iop, im);
    405 		}
    406 	}
    407 	return (rv);
    408 }
    409 
    410 static int
    411 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
    412 {
    413 	struct iop_msg *im;
    414 	struct iop_softc *iop;
    415 	struct ld_iop_softc *sc;
    416 	struct i2o_rbs_block_write *mf;
    417 	int rv, bcount;
    418 	u_int64_t ba;
    419 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
    420 
    421 	sc = (struct ld_iop_softc *)ld;
    422 	iop = (struct iop_softc *)device_parent(&ld->sc_dv);
    423 	bcount = blkcnt * ld->sc_secsize;
    424 	ba = (u_int64_t)blkno * ld->sc_secsize;
    425 	im = iop_msg_alloc(iop, IM_POLL);
    426 
    427 	mf = (struct i2o_rbs_block_write *)mb;
    428 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
    429 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
    430 	mf->msgictx = sc->sc_ii.ii_ictx;
    431 	mf->msgtctx = im->im_tctx;
    432 	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
    433 	mf->datasize = bcount;
    434 	mf->lowoffset = (u_int32_t)ba;
    435 	mf->highoffset = (u_int32_t)(ba >> 32);
    436 
    437 	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
    438 		iop_msg_free(iop, im);
    439 		return (rv);
    440 	}
    441 
    442 	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
    443 	iop_msg_unmap(iop, im);
    444 	iop_msg_free(iop, im);
    445  	return (rv);
    446 }
    447 
    448 static int
    449 ld_iop_flush(struct ld_softc *ld)
    450 {
    451 	struct iop_msg *im;
    452 	struct iop_softc *iop;
    453 	struct ld_iop_softc *sc;
    454 	struct i2o_rbs_cache_flush mf;
    455 	int rv;
    456 
    457 	sc = (struct ld_iop_softc *)ld;
    458 	iop = (struct iop_softc *)device_parent(&ld->sc_dv);
    459 	im = iop_msg_alloc(iop, IM_WAIT);
    460 
    461 	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
    462 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
    463 	mf.msgictx = sc->sc_ii.ii_ictx;
    464 	mf.msgtctx = im->im_tctx;
    465 	mf.flags = 1 << 16;			/* time multiplier */
    466 
    467 	/* Ancient disks will return an error here. */
    468 	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
    469 	iop_msg_free(iop, im);
    470 	return (rv);
    471 }
    472 
    473 void
    474 ld_iop_intr(struct device *dv, struct iop_msg *im, void *reply)
    475 {
    476 	struct i2o_rbs_reply *rb;
    477 	struct buf *bp;
    478 	struct ld_iop_softc *sc;
    479 	struct iop_softc *iop;
    480 	int err, detail;
    481 	const char *errstr;
    482 
    483 	rb = reply;
    484 	bp = im->im_dvcontext;
    485 	sc = (struct ld_iop_softc *)dv;
    486 	iop = (struct iop_softc *)device_parent(dv);
    487 
    488 	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
    489 
    490 	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
    491 		detail = le16toh(rb->detail);
    492 		if (detail >= __arraycount(ld_iop_errors))
    493 			errstr = "<unknown>";
    494 		else
    495 			errstr = ld_iop_errors[detail];
    496 		aprint_error_dev(dv, "error 0x%04x: %s\n", detail, errstr);
    497 		err = 1;
    498 	}
    499 
    500 	if (err) {
    501 		bp->b_error = EIO;
    502 		bp->b_resid = bp->b_bcount;
    503 	} else
    504 		bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
    505 
    506 	iop_msg_unmap(iop, im);
    507 	iop_msg_free(iop, im);
    508 	lddone(&sc->sc_ld, bp);
    509 }
    510 
    511 static void
    512 ld_iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
    513 {
    514 	struct i2o_util_event_register_reply *rb;
    515 	struct ld_iop_softc *sc;
    516 	struct iop_softc *iop;
    517 	u_int event;
    518 
    519 	rb = reply;
    520 
    521 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
    522 		return;
    523 
    524 	event = le32toh(rb->event);
    525 	sc = (struct ld_iop_softc *)dv;
    526 
    527 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
    528 		iop = device_private(device_parent(dv));
    529 		mutex_spin_enter(&iop->sc_intrlock);
    530 		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
    531 		cv_broadcast(&sc->sc_eventii.ii_cv);
    532 		mutex_spin_exit(&iop->sc_intrlock);
    533 		return;
    534 	}
    535 
    536 	printf("%s: event 0x%08x received\n", device_xname(dv), event);
    537 }
    538 
    539 static void
    540 ld_iop_adjqparam(struct device *dv, int mpi)
    541 {
    542 	struct iop_softc *iop;
    543 
    544 	/*
    545 	 * AMI controllers seem to loose the plot if you hand off lots of
    546 	 * queued commands.
    547 	 */
    548 	iop = (struct iop_softc *)device_parent(dv);
    549 	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
    550 		mpi = 64;
    551 
    552 	ldadjqparam((struct ld_softc *)dv, mpi);
    553 }
    554