Home | History | Annotate | Line # | Download | only in i2o
ld_iop.c revision 1.5.2.3
      1 /*	$NetBSD: ld_iop.c,v 1.5.2.3 2001/08/24 00:09:10 nathanw Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * I2O front-end for ld(4) driver, supporting random block storage class
     41  * devices.  Currently, this doesn't handle anything more complex than
     42  * fixed direct-access devices.
     43  */
     44 
     45 #include "opt_i2o.h"
     46 #include "rnd.h"
     47 
     48 #include <sys/param.h>
     49 #include <sys/systm.h>
     50 #include <sys/kernel.h>
     51 #include <sys/device.h>
     52 #include <sys/buf.h>
     53 #include <sys/endian.h>
     54 #include <sys/dkio.h>
     55 #include <sys/disk.h>
     56 #include <sys/proc.h>
     57 #if NRND > 0
     58 #include <sys/rnd.h>
     59 #endif
     60 
     61 #include <machine/bus.h>
     62 
     63 #include <dev/ldvar.h>
     64 
     65 #include <dev/i2o/i2o.h>
     66 #include <dev/i2o/iopio.h>
     67 #include <dev/i2o/iopvar.h>
     68 
     69 #define	LD_IOP_TIMEOUT		30*1000
     70 
     71 #define	LD_IOP_CLAIMED		0x01
     72 #define	LD_IOP_NEW_EVTMASK	0x02
     73 
     74 struct ld_iop_softc {
     75 	struct	ld_softc sc_ld;
     76 	struct	iop_initiator sc_ii;
     77 	struct	iop_initiator sc_eventii;
     78 	int	sc_flags;
     79 };
     80 
     81 static void	ld_iop_adjqparam(struct device *, int);
     82 static void	ld_iop_attach(struct device *, struct device *, void *);
     83 static int	ld_iop_detach(struct device *, int);
     84 static int	ld_iop_dump(struct ld_softc *, void *, int, int);
     85 static int	ld_iop_flush(struct ld_softc *);
     86 static void	ld_iop_intr(struct device *, struct iop_msg *, void *);
     87 static void	ld_iop_intr_event(struct device *, struct iop_msg *, void *);
     88 static int	ld_iop_match(struct device *, struct cfdata *, void *);
     89 static int	ld_iop_start(struct ld_softc *, struct buf *);
     90 static void	ld_iop_unconfig(struct ld_iop_softc *, int);
     91 
     92 struct cfattach ld_iop_ca = {
     93 	sizeof(struct ld_iop_softc),
     94 	ld_iop_match,
     95 	ld_iop_attach,
     96 	ld_iop_detach
     97 };
     98 
     99 #ifdef I2OVERBOSE
    100 static const char * const ld_iop_errors[] = {
    101 	"success",
    102 	"media error",
    103 	"access error",
    104 	"device failure",
    105 	"device not ready",
    106 	"media not present",
    107 	"media locked",
    108 	"media failure",
    109 	"protocol failure",
    110 	"bus failure",
    111 	"access violation",
    112 	"media write protected",
    113 	"device reset",
    114 	"volume changed, waiting for acknowledgement",
    115 	"timeout",
    116 };
    117 #endif
    118 
    119 static int
    120 ld_iop_match(struct device *parent, struct cfdata *match, void *aux)
    121 {
    122 	struct iop_attach_args *ia;
    123 
    124 	ia = aux;
    125 
    126 	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
    127 }
    128 
    129 static void
    130 ld_iop_attach(struct device *parent, struct device *self, void *aux)
    131 {
    132 	struct iop_attach_args *ia;
    133 	struct ld_softc *ld;
    134 	struct ld_iop_softc *sc;
    135 	struct iop_softc *iop;
    136 	int rv, evreg, enable;
    137 	char *typestr, *fixedstr;
    138 	u_int cachesz;
    139 	struct {
    140 		struct	i2o_param_op_results pr;
    141 		struct	i2o_param_read_results prr;
    142 		union {
    143 			struct	i2o_param_rbs_cache_control cc;
    144 			struct	i2o_param_rbs_device_info bdi;
    145 			struct	i2o_param_rbs_operation op;
    146 		} p;
    147 	} __attribute__ ((__packed__)) param;
    148 
    149 	sc = (struct ld_iop_softc *)self;
    150 	ld = &sc->sc_ld;
    151 	iop = (struct iop_softc *)parent;
    152 	ia = (struct iop_attach_args *)aux;
    153 	evreg = 0;
    154 
    155 	/* Register us as an initiator. */
    156 	sc->sc_ii.ii_dv = self;
    157 	sc->sc_ii.ii_intr = ld_iop_intr;
    158 	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
    159 	sc->sc_ii.ii_flags = 0;
    160 	sc->sc_ii.ii_tid = ia->ia_tid;
    161 	iop_initiator_register(iop, &sc->sc_ii);
    162 
    163 	/* Register another initiator to handle events from the device. */
    164 	sc->sc_eventii.ii_dv = self;
    165 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
    166 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
    167 	sc->sc_eventii.ii_tid = ia->ia_tid;
    168 	iop_initiator_register(iop, &sc->sc_eventii);
    169 
    170 	rv = iop_util_eventreg(iop, &sc->sc_eventii,
    171 	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
    172 	    I2O_EVENT_GEN_DEVICE_RESET |
    173 	    I2O_EVENT_GEN_STATE_CHANGE |
    174 	    I2O_EVENT_GEN_GENERAL_WARNING);
    175 	if (rv != 0) {
    176 		printf("%s: unable to register for events", self->dv_xname);
    177 		goto bad;
    178 	}
    179 	evreg = 1;
    180 
    181 	/*
    182 	 * Start out with one queued command.  The `iop' driver will adjust
    183 	 * the queue parameters once we're up and running.
    184 	 */
    185 	ld->sc_maxqueuecnt = 1;
    186 
    187 	ld->sc_maxxfer = IOP_MAX_XFER;
    188 	ld->sc_dump = ld_iop_dump;
    189 	ld->sc_flush = ld_iop_flush;
    190 	ld->sc_start = ld_iop_start;
    191 
    192 	/* Say what the device is. */
    193 	printf(":");
    194 	iop_print_ident(iop, ia->ia_tid);
    195 
    196 	/*
    197 	 * Claim the device so that we don't get any nasty surprises.  Allow
    198 	 * failure.
    199 	 */
    200 	rv = iop_util_claim(iop, &sc->sc_ii, 0,
    201 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
    202 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
    203 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
    204 	    I2O_UTIL_CLAIM_PRIMARY_USER);
    205 	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
    206 
    207 	rv = iop_param_op(iop, ia->ia_tid, NULL, 0, I2O_PARAM_RBS_DEVICE_INFO,
    208 	    &param, sizeof(param));
    209 	if (rv != 0) {
    210 		printf("%s: unable to get parameters (0x%04x; %d)\n",
    211 		   ld->sc_dv.dv_xname, I2O_PARAM_RBS_DEVICE_INFO, rv);
    212 		goto bad;
    213 	}
    214 
    215 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
    216 	ld->sc_secperunit = (int)
    217 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
    218 
    219 	switch (param.p.bdi.type) {
    220 	case I2O_RBS_TYPE_DIRECT:
    221 		typestr = "direct access";
    222 		enable = 1;
    223 		break;
    224 	case I2O_RBS_TYPE_WORM:
    225 		typestr = "WORM";
    226 		enable = 0;
    227 		break;
    228 	case I2O_RBS_TYPE_CDROM:
    229 		typestr = "CD-ROM";
    230 		enable = 0;
    231 		break;
    232 	case I2O_RBS_TYPE_OPTICAL:
    233 		typestr = "optical";
    234 		enable = 0;
    235 		break;
    236 	default:
    237 		typestr = "unknown";
    238 		enable = 0;
    239 		break;
    240 	}
    241 
    242 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVEABLE_MEDIA)
    243 	    != 0) {
    244 		/* ld->sc_flags = LDF_REMOVEABLE; */
    245 		fixedstr = "removeable";
    246 		enable = 0;
    247 	} else
    248 		fixedstr = "fixed";
    249 
    250 	printf(" %s, %s", typestr, fixedstr);
    251 
    252 	/*
    253 	 * Determine if the device has an private cache.  If so, print the
    254 	 * cache size.  Even if the device doesn't appear to have a cache,
    255 	 * we perform a flush at shutdown.
    256 	 */
    257 	rv = iop_param_op(iop, ia->ia_tid, NULL, 0,
    258 	    I2O_PARAM_RBS_CACHE_CONTROL, &param, sizeof(param));
    259 	if (rv != 0) {
    260 		printf("%s: unable to get parameters (0x%04x; %d)\n",
    261 		   ld->sc_dv.dv_xname, I2O_PARAM_RBS_CACHE_CONTROL, rv);
    262 		goto bad;
    263 	}
    264 
    265 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
    266 		printf(", %dkB cache", cachesz >> 10);
    267 
    268 	printf("\n");
    269 
    270 	/*
    271 	 * Configure the DDM's timeout functions to time out all commands
    272 	 * after 30 seconds.
    273 	 */
    274 	rv = iop_param_op(iop, ia->ia_tid, NULL, 0, I2O_PARAM_RBS_OPERATION,
    275 	    &param, sizeof(param));
    276 	if (rv != 0) {
    277 		printf("%s: unable to get parameters (0x%04x; %d)\n",
    278 		   ld->sc_dv.dv_xname, I2O_PARAM_RBS_OPERATION, rv);
    279 		goto bad;
    280 	}
    281 
    282 	param.p.op.timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
    283 	param.p.op.rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
    284 	param.p.op.rwvtimeout = 0;
    285 
    286 	rv = iop_param_op(iop, ia->ia_tid, NULL, 1, I2O_PARAM_RBS_OPERATION,
    287 	    &param, sizeof(param));
    288 #ifdef notdef
    289 	/*
    290 	 * Intel RAID adapters don't like the above, but do post a
    291 	 * `parameter changed' event.  Perhaps we're doing something
    292 	 * wrong...
    293 	 */
    294 	if (rv != 0) {
    295 		printf("%s: unable to set parameters (0x%04x; %d)\n",
    296 		   ld->sc_dv.dv_xname, I2O_PARAM_RBS_OPERATION, rv);
    297 		goto bad;
    298 	}
    299 #endif
    300 
    301 	if (enable)
    302 		ld->sc_flags |= LDF_ENABLED;
    303 	else
    304 		printf("%s: device not yet supported\n", self->dv_xname);
    305 
    306 	ldattach(ld);
    307 	return;
    308 
    309  bad:
    310 	ld_iop_unconfig(sc, evreg);
    311 }
    312 
    313 static void
    314 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
    315 {
    316 	struct iop_softc *iop;
    317 	int s;
    318 
    319 	iop = (struct iop_softc *)sc->sc_ld.sc_dv.dv_parent;
    320 
    321 	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
    322 		iop_util_claim(iop, &sc->sc_ii, 1,
    323 		    I2O_UTIL_CLAIM_PRIMARY_USER);
    324 
    325 	if (evreg) {
    326 		/*
    327 		 * Mask off events, and wait up to 5 seconds for a reply.
    328 		 * Note that some adapters won't reply to this (XXX We
    329 		 * should check the event capabilities).
    330 		 */
    331 		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
    332 		iop_util_eventreg(iop, &sc->sc_eventii,
    333 		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
    334 		s = splbio();
    335 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
    336 			tsleep(&sc->sc_eventii, PRIBIO, "ld_iopevt", hz * 5);
    337 		splx(s);
    338 #ifdef I2ODEBUG
    339 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
    340 			printf("%s: didn't reply to event unregister",
    341 			    sc->sc_ld.sc_dv.dv_xname);
    342 #endif
    343 	}
    344 
    345 	iop_initiator_unregister(iop, &sc->sc_eventii);
    346 	iop_initiator_unregister(iop, &sc->sc_ii);
    347 }
    348 
    349 static int
    350 ld_iop_detach(struct device *self, int flags)
    351 {
    352 	struct ld_iop_softc *sc;
    353 	struct iop_softc *iop;
    354 	int rv;
    355 
    356 	sc = (struct ld_iop_softc *)self;
    357 	iop = (struct iop_softc *)self->dv_parent;
    358 
    359 	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
    360 		return (rv);
    361 
    362 	/*
    363 	 * Abort any requests queued with the IOP, but allow requests that
    364 	 * are already in progress to complete.
    365 	 */
    366 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
    367 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
    368 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
    369 
    370 	ldenddetach(&sc->sc_ld);
    371 
    372 	/* Un-claim the target, and un-register our initiators. */
    373 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
    374 		ld_iop_unconfig(sc, 1);
    375 
    376 	return (0);
    377 }
    378 
    379 static int
    380 ld_iop_start(struct ld_softc *ld, struct buf *bp)
    381 {
    382 	struct iop_msg *im;
    383 	struct iop_softc *iop;
    384 	struct ld_iop_softc *sc;
    385 	struct i2o_rbs_block_read *mf;
    386 	u_int rv, flags, write;
    387 	u_int64_t ba;
    388 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
    389 
    390 	sc = (struct ld_iop_softc *)ld;
    391 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
    392 
    393 	im = iop_msg_alloc(iop, 0);
    394 	im->im_dvcontext = bp;
    395 
    396 	write = ((bp->b_flags & B_READ) == 0);
    397 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
    398 
    399 	/*
    400 	 * Write through the cache when performing synchronous writes.  When
    401 	 * performing a read, we don't request that the DDM cache the data,
    402 	 * as there's little advantage to it.
    403 	 */
    404 	if (write) {
    405 		if ((bp->b_flags & B_ASYNC) == 0)
    406 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
    407 		else
    408 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
    409 	} else
    410 		flags = 0;
    411 
    412 	/*
    413 	 * Fill the message frame.  We can use the block_read structure for
    414 	 * both reads and writes, as it's almost identical to the
    415 	 * block_write structure.
    416 	 */
    417 	mf = (struct i2o_rbs_block_read *)mb;
    418 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
    419 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
    420 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
    421 	mf->msgictx = sc->sc_ii.ii_ictx;
    422 	mf->msgtctx = im->im_tctx;
    423 	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
    424 	mf->datasize = bp->b_bcount;
    425 	mf->lowoffset = (u_int32_t)ba;
    426 	mf->highoffset = (u_int32_t)(ba >> 32);
    427 
    428 	/* Map the data transfer and enqueue the command. */
    429 	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
    430 	if (rv == 0) {
    431 		if ((rv = iop_post(iop, mb)) != 0) {
    432 			iop_msg_unmap(iop, im);
    433 			iop_msg_free(iop, im);
    434 		}
    435 	}
    436 	return (rv);
    437 }
    438 
    439 static int
    440 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
    441 {
    442 	struct iop_msg *im;
    443 	struct iop_softc *iop;
    444 	struct ld_iop_softc *sc;
    445 	struct i2o_rbs_block_write *mf;
    446 	int rv, bcount;
    447 	u_int64_t ba;
    448 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
    449 
    450 	sc = (struct ld_iop_softc *)ld;
    451 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
    452 	bcount = blkcnt * ld->sc_secsize;
    453 	ba = (u_int64_t)blkno * ld->sc_secsize;
    454 	im = iop_msg_alloc(iop, IM_POLL);
    455 
    456 	mf = (struct i2o_rbs_block_write *)mb;
    457 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
    458 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
    459 	mf->msgictx = sc->sc_ii.ii_ictx;
    460 	mf->msgtctx = im->im_tctx;
    461 	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
    462 	mf->datasize = bcount;
    463 	mf->lowoffset = (u_int32_t)ba;
    464 	mf->highoffset = (u_int32_t)(ba >> 32);
    465 
    466 	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
    467 		iop_msg_free(iop, im);
    468 		return (rv);
    469 	}
    470 
    471 	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
    472 	iop_msg_unmap(iop, im);
    473 	iop_msg_free(iop, im);
    474  	return (rv);
    475 }
    476 
    477 static int
    478 ld_iop_flush(struct ld_softc *ld)
    479 {
    480 	struct iop_msg *im;
    481 	struct iop_softc *iop;
    482 	struct ld_iop_softc *sc;
    483 	struct i2o_rbs_cache_flush mf;
    484 	int rv;
    485 
    486 	sc = (struct ld_iop_softc *)ld;
    487 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
    488 	im = iop_msg_alloc(iop, IM_WAIT);
    489 
    490 	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
    491 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
    492 	mf.msgictx = sc->sc_ii.ii_ictx;
    493 	mf.msgtctx = im->im_tctx;
    494 	mf.flags = 1 << 16;			/* time multiplier */
    495 
    496 	/* XXX Aincent disks will return an error here. */
    497 	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
    498 	iop_msg_free(iop, im);
    499 	return (rv);
    500 }
    501 
    502 void
    503 ld_iop_intr(struct device *dv, struct iop_msg *im, void *reply)
    504 {
    505 	struct i2o_rbs_reply *rb;
    506 	struct buf *bp;
    507 	struct ld_iop_softc *sc;
    508 	struct iop_softc *iop;
    509 	int err, detail;
    510 #ifdef I2OVERBOSE
    511 	const char *errstr;
    512 #endif
    513 
    514 	rb = reply;
    515 	bp = im->im_dvcontext;
    516 	sc = (struct ld_iop_softc *)dv;
    517 	iop = (struct iop_softc *)dv->dv_parent;
    518 
    519 	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
    520 
    521 	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
    522 		detail = le16toh(rb->detail);
    523 #ifdef I2OVERBOSE
    524 		if (detail > sizeof(ld_iop_errors) / sizeof(ld_iop_errors[0]))
    525 			errstr = "<unknown>";
    526 		else
    527 			errstr = ld_iop_errors[detail];
    528 		printf("%s: error 0x%04x: %s\n", dv->dv_xname, detail, errstr);
    529 #else
    530 		printf("%s: error 0x%04x\n", dv->dv_xname, detail);
    531 #endif
    532 		err = 1;
    533 	}
    534 
    535 	if (err) {
    536 		bp->b_flags |= B_ERROR;
    537 		bp->b_error = EIO;
    538 		bp->b_resid = bp->b_bcount;
    539 	} else
    540 		bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
    541 
    542 	iop_msg_unmap(iop, im);
    543 	iop_msg_free(iop, im);
    544 	lddone(&sc->sc_ld, bp);
    545 }
    546 
    547 static void
    548 ld_iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
    549 {
    550 	struct i2o_util_event_register_reply *rb;
    551 	struct ld_iop_softc *sc;
    552 	u_int event;
    553 
    554 	rb = reply;
    555 
    556 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
    557 		return;
    558 
    559 	event = le32toh(rb->event);
    560 	sc = (struct ld_iop_softc *)dv;
    561 
    562 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
    563 		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
    564 		wakeup(&sc->sc_eventii);
    565 #ifndef I2ODEBUG
    566 		return;
    567 #endif
    568 	}
    569 
    570 	printf("%s: event 0x%08x received\n", dv->dv_xname, event);
    571 }
    572 
    573 static void
    574 ld_iop_adjqparam(struct device *dv, int mpi)
    575 {
    576 	struct iop_softc *iop;
    577 
    578 	/*
    579 	 * AMI controllers seem to loose the plot if you hand off lots of
    580 	 * queued commands.
    581 	 */
    582 	iop = (struct iop_softc *)dv->dv_parent;
    583 	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
    584 		mpi = 64;
    585 
    586 	ldadjqparam((struct ld_softc *)dv, mpi);
    587 }
    588