Home | History | Annotate | Line # | Download | only in i2o
ld_iop.c revision 1.2
      1 /*	$NetBSD: ld_iop.c,v 1.2 2000/12/03 13:17:03 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * I2O front-end for ld(4) driver, supporting random block storage class
     41  * devices.  Currently, this doesn't handle anything more complex than
     42  * fixed direct-access devices.
     43  */
     44 
     45 #include "opt_i2o.h"
     46 #include "rnd.h"
     47 
     48 #include <sys/param.h>
     49 #include <sys/systm.h>
     50 #include <sys/kernel.h>
     51 #include <sys/device.h>
     52 #include <sys/buf.h>
     53 #include <sys/endian.h>
     54 #include <sys/dkio.h>
     55 #include <sys/disk.h>
     56 #include <sys/proc.h>
     57 #if NRND > 0
     58 #include <sys/rnd.h>
     59 #endif
     60 
     61 #include <machine/bus.h>
     62 
     63 #include <dev/ldvar.h>
     64 
     65 #include <dev/i2o/i2o.h>
     66 #include <dev/i2o/iopvar.h>
     67 
     68 #define	LD_IOP_MAXQUEUECNT	64		/* XXX */
     69 #define	LD_IOP_TIMEOUT		10*1000*1000
     70 
     71 struct ld_iop_softc {
     72 	struct	ld_softc sc_ld;
     73 	struct	iop_initiator sc_ii;
     74 	struct	iop_initiator sc_eventii;
     75 	int	sc_claimed;
     76 	u_int	sc_tid;
     77 };
     78 
     79 static void	ld_iop_attach(struct device *, struct device *, void *);
     80 static int	ld_iop_detach(struct device *, int);
     81 static int	ld_iop_dump(struct ld_softc *, void *, int, int);
     82 static int	ld_iop_flush(struct ld_softc *);
     83 static void	ld_iop_intr(struct device *, struct iop_msg *, void *);
     84 static void	ld_iop_intr_event(struct device *, struct iop_msg *, void *);
     85 static int	ld_iop_start(struct ld_softc *, struct buf *);
     86 static int	ld_iop_match(struct device *, struct cfdata *, void *);
     87 
     88 struct cfattach ld_iop_ca = {
     89 	sizeof(struct ld_iop_softc),
     90 	ld_iop_match,
     91 	ld_iop_attach,
     92 	ld_iop_detach
     93 };
     94 
     95 #ifdef I2OVERBOSE
     96 static const char *ld_iop_errors[] = {
     97 	"success",
     98 	"media error",
     99 	"failure communicating with device",
    100 	"device failure",
    101 	"device is not ready",
    102 	"media not present",
    103 	"media locked by another user",
    104 	"media failure",
    105 	"failure communicating to device",
    106 	"device bus failure",
    107 	"device locked by another user",
    108 	"device write protected",
    109 	"device reset",
    110 	"volume has changed, waiting for acknowledgement",
    111 };
    112 #endif
    113 
    114 static int
    115 ld_iop_match(struct device *parent, struct cfdata *match, void *aux)
    116 {
    117 	struct iop_attach_args *ia;
    118 
    119 	ia = aux;
    120 
    121 	return (ia->ia_class != I2O_CLASS_RANDOM_BLOCK_STORAGE);
    122 }
    123 
    124 static void
    125 ld_iop_attach(struct device *parent, struct device *self, void *aux)
    126 {
    127 	struct iop_attach_args *ia;
    128 	struct ld_softc *ld;
    129 	struct ld_iop_softc *sc;
    130 	struct iop_softc *iop;
    131 	int rv, evreg, enable;
    132 	char ident[64 + 1], *typestr, *fixedstr;
    133 	u_int cachesz;
    134 	struct {
    135 		struct	i2o_param_op_results pr;
    136 		struct	i2o_param_read_results prr;
    137 		union {
    138 			struct	i2o_param_rbs_cache_control cc;
    139 			struct	i2o_param_rbs_device_info bdi;
    140 			struct	i2o_param_device_identity di;
    141 			struct	i2o_param_rbs_operation op;
    142 		} p;
    143 	} param;
    144 
    145 	sc = (struct ld_iop_softc *)self;
    146 	ld = &sc->sc_ld;
    147 	iop = (struct iop_softc *)parent;
    148 	ia = (struct iop_attach_args *)aux;
    149 	sc->sc_tid = ia->ia_tid;
    150 	evreg = 0;
    151 
    152 	/* Register us as an initiator. */
    153 	sc->sc_ii.ii_dv = self;
    154 	sc->sc_ii.ii_intr = ld_iop_intr;
    155 	sc->sc_ii.ii_flags = 0;
    156 	sc->sc_ii.ii_tid = ia->ia_tid;
    157 	if (iop_initiator_register(iop, &sc->sc_ii) != 0) {
    158 		printf("%s: unable to register initiator\n", self->dv_xname);
    159 		return;
    160 	}
    161 
    162 	/* Register another initiator to handle events from the device. */
    163 	sc->sc_eventii.ii_dv = self;
    164 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
    165 	sc->sc_eventii.ii_flags = II_DISCARD | II_UTILITY;
    166 	sc->sc_eventii.ii_tid = ia->ia_tid;
    167 	if (iop_initiator_register(iop, &sc->sc_eventii) != 0) {
    168 		printf("%s: unable to register initiator", self->dv_xname);
    169 		goto bad;
    170 	}
    171 	if (iop_util_eventreg(iop, &sc->sc_eventii, 0xffffffff)) {
    172 		printf("%s: unable to register for events", self->dv_xname);
    173 		goto bad;
    174 	}
    175 	evreg = 1;
    176 
    177 	ld->sc_maxxfer = IOP_MAX_XFER;
    178 	ld->sc_maxqueuecnt = LD_IOP_MAXQUEUECNT;
    179 	ld->sc_dump = ld_iop_dump;
    180 	ld->sc_flush = ld_iop_flush;
    181 	ld->sc_start = ld_iop_start;
    182 
    183 	/* Say what the device is. */
    184 	printf(": ");
    185 	if (iop_param_op(iop, ia->ia_tid, 0, I2O_PARAM_DEVICE_IDENTITY, &param,
    186 	    sizeof(param)) == 0) {
    187 		iop_strvis(iop, param.p.di.vendorinfo,
    188 		    sizeof(param.p.di.vendorinfo), ident, sizeof(ident));
    189 		printf("<%s, ", ident);
    190 		iop_strvis(iop, param.p.di.productinfo,
    191 		    sizeof(param.p.di.productinfo), ident, sizeof(ident));
    192 		printf("%s, ", ident);
    193 		iop_strvis(iop, param.p.di.revlevel,
    194 		    sizeof(param.p.di.revlevel), ident, sizeof(ident));
    195 		printf("%s> ", ident);
    196 	}
    197 
    198 	/*
    199 	 * Claim the device so that we don't get any nasty surprises.  Allow
    200 	 * failure.
    201 	 */
    202 	sc->sc_claimed = !iop_util_claim(iop, &sc->sc_ii, 0,
    203 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
    204 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
    205 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
    206 	    I2O_UTIL_CLAIM_PRIMARY_USER);
    207 
    208 	rv = iop_param_op(iop, ia->ia_tid, 0, I2O_PARAM_RBS_DEVICE_INFO,
    209 	    &param, sizeof(param));
    210 	if (rv != 0) {
    211 		printf("%s: unable to get parameters (0x%04x; %d)\n",
    212 		   ld->sc_dv.dv_xname, I2O_PARAM_RBS_DEVICE_INFO, rv);
    213 		goto bad;
    214 	}
    215 
    216 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
    217 	ld->sc_secperunit = (int)
    218 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
    219 
    220 	/* Build synthetic geometry. */
    221 	if (ld->sc_secperunit <= 528 * 2048)		/* 528MB */
    222 		ld->sc_nheads = 16;
    223 	else if (ld->sc_secperunit <= 1024 * 2048)	/* 1GB */
    224 		ld->sc_nheads = 32;
    225 	else if (ld->sc_secperunit <= 21504 * 2048)	/* 21GB */
    226 		ld->sc_nheads = 64;
    227 	else if (ld->sc_secperunit <= 43008 * 2048)	/* 42GB */
    228 		ld->sc_nheads = 128;
    229 	else
    230 		ld->sc_nheads = 255;
    231 
    232 	ld->sc_nsectors = 63;
    233 	ld->sc_ncylinders = ld->sc_secperunit /
    234 	    (ld->sc_nheads * ld->sc_nsectors);
    235 
    236 	switch (param.p.bdi.type) {
    237 	case I2O_RBS_TYPE_DIRECT:
    238 		typestr = "direct access";
    239 		enable = 1;
    240 		break;
    241 	case I2O_RBS_TYPE_WORM:
    242 		typestr = "WORM";
    243 		enable = 0;
    244 		break;
    245 	case I2O_RBS_TYPE_CDROM:
    246 		typestr = "cdrom";
    247 		enable = 0;
    248 		break;
    249 	case I2O_RBS_TYPE_OPTICAL:
    250 		typestr = "optical";
    251 		enable = 0;
    252 		break;
    253 	default:
    254 		typestr = "unknown";
    255 		enable = 0;
    256 		break;
    257 	}
    258 
    259 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVEABLE_MEDIA)
    260 	    != 0) {
    261 		/* ld->sc_flags = LDF_REMOVEABLE; */
    262 		fixedstr = "removeable";
    263 		enable = 0;
    264 	} else
    265 		fixedstr = "fixed";
    266 
    267 	printf("%s, %s", typestr, fixedstr);
    268 
    269 	/*
    270 	 * Determine if the device has an private cache.  If so, print the
    271 	 * cache size.  Even if the device doesn't appear to have a cache,
    272 	 * we perform a flush at shutdown, as it is still valid to do so.
    273 	 */
    274 	rv = iop_param_op(iop, ia->ia_tid, 0, I2O_PARAM_RBS_CACHE_CONTROL,
    275 	    &param, sizeof(param));
    276 	if (rv != 0) {
    277 		printf("%s: unable to get parameters (0x%04x; %d)\n",
    278 		   ld->sc_dv.dv_xname, I2O_PARAM_RBS_CACHE_CONTROL, rv);
    279 		goto bad;
    280 	}
    281 
    282 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
    283 		printf(", %dkB cache", cachesz >> 10);
    284 
    285 	printf("\n");
    286 
    287 	/*
    288 	 * Configure the DDM's timeout functions to time out all commands
    289 	 * after 10 seconds.
    290 	 */
    291 	rv = iop_param_op(iop, ia->ia_tid, 0, I2O_PARAM_RBS_OPERATION,
    292 	    &param, sizeof(param));
    293 	if (rv != 0) {
    294 		printf("%s: unable to get parameters (0x%04x; %d)\n",
    295 		   ld->sc_dv.dv_xname, I2O_PARAM_RBS_OPERATION, rv);
    296 		goto bad;
    297 	}
    298 
    299 	param.p.op.timeoutbase = htole32(LD_IOP_TIMEOUT);
    300 	param.p.op.rwvtimeoutbase = htole32(LD_IOP_TIMEOUT);
    301 	param.p.op.rwvtimeout = 0;
    302 
    303 	rv = iop_param_op(iop, ia->ia_tid, 1, I2O_PARAM_RBS_OPERATION,
    304 	    &param, sizeof(param));
    305 	if (rv != 0) {
    306 		printf("%s: unable to set parameters (0x%04x; %d)\n",
    307 		   ld->sc_dv.dv_xname, I2O_PARAM_RBS_OPERATION, rv);
    308 		goto bad;
    309 	}
    310 
    311 	if (enable)
    312 		ld->sc_flags |= LDF_ENABLED;
    313 	else
    314 		printf("%s: device not yet supported\n", self->dv_xname);
    315 
    316 	ldattach(ld);
    317 	return;
    318 
    319 bad:
    320 	if (sc->sc_claimed)
    321 		iop_util_claim(iop, &sc->sc_ii, 1,
    322 		    I2O_UTIL_CLAIM_PRIMARY_USER);
    323 	if (evreg)
    324 		iop_util_eventreg(iop, &sc->sc_eventii, 0);
    325 	if (sc->sc_eventii.ii_intr != NULL)
    326 		iop_initiator_unregister(iop, &sc->sc_eventii);
    327 	iop_initiator_unregister(iop, &sc->sc_ii);
    328 }
    329 
    330 static int
    331 ld_iop_detach(struct device *self, int flags)
    332 {
    333 	struct ld_iop_softc *sc;
    334 	struct iop_softc *iop;
    335 	int s, rv;
    336 
    337 	sc = (struct ld_iop_softc *)self;
    338 
    339 	/* XXX */
    340 	if ((flags & DETACH_FORCE) == 0 && sc->sc_ld.sc_dk.dk_openmask != 0)
    341 		return (EBUSY);
    342 	s = splbio();
    343 	sc->sc_ld.sc_flags |= LDF_DRAIN;
    344 	splx(s);
    345 
    346 	iop = (struct iop_softc *)self->dv_parent;
    347 
    348 	/*
    349 	 * Abort any requests queued with the IOP, but allow requests that
    350 	 * are already in progress to complete.
    351 	 */
    352 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
    353 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
    354 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
    355 
    356 	lddetach(&sc->sc_ld);
    357 
    358 	/* Un-claim the target, and un-register us as an initiator. */
    359 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0) {
    360 		if (sc->sc_claimed) {
    361 			rv = iop_util_claim(iop, &sc->sc_ii, 1,
    362 			    I2O_UTIL_CLAIM_PRIMARY_USER);
    363 			if (rv != 0)
    364 				return (rv);
    365 		}
    366 		iop_util_eventreg(iop, &sc->sc_eventii, 0);
    367 		iop_initiator_unregister(iop, &sc->sc_eventii);
    368 		iop_initiator_unregister(iop, &sc->sc_ii);
    369 	}
    370 
    371 	return (0);
    372 }
    373 
    374 static int
    375 ld_iop_start(struct ld_softc *ld, struct buf *bp)
    376 {
    377 	struct iop_msg *im;
    378 	struct iop_softc *iop;
    379 	struct ld_iop_softc *sc;
    380 	struct i2o_rbs_block_read *mb;
    381 	int rv, flags, write;
    382 	u_int64_t ba;
    383 
    384 	sc = (struct ld_iop_softc *)ld;
    385 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
    386 
    387 	im = NULL;
    388 	if ((rv = iop_msg_alloc(iop, &sc->sc_ii, &im, IM_NOWAIT)) != 0)
    389 		goto bad;
    390 	im->im_dvcontext = bp;
    391 
    392 	write = ((bp->b_flags & B_READ) == 0);
    393 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
    394 
    395 	/*
    396 	 * Write through the cache when performing synchronous writes.  When
    397 	 * performing a read, we don't request that the DDM cache the data,
    398 	 * as there's little advantage to it.
    399 	 */
    400 	if (write) {
    401 		if ((bp->b_flags & B_ASYNC) == 0)
    402 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
    403 		else
    404 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
    405 	} else
    406 		flags = 0;
    407 
    408 	/*
    409 	 * Fill the message frame.  We can use the block_read structure for
    410 	 * both reads and writes, as it's almost identical to the
    411 	 * block_write structure.
    412 	 */
    413 	mb = (struct i2o_rbs_block_read *)im->im_msg;
    414 	mb->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
    415 	mb->msgfunc = I2O_MSGFUNC(sc->sc_tid,
    416 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
    417 	mb->msgictx = sc->sc_ii.ii_ictx;
    418 	mb->msgtctx = im->im_tctx;
    419 	mb->flags = flags | (1 << 16);		/* flags & time multiplier */
    420 	mb->datasize = bp->b_bcount;
    421 	mb->lowoffset = (u_int32_t)ba;
    422 	mb->highoffset = (u_int32_t)(ba >> 32);
    423 
    424 	/* Map the data transfer. */
    425 	if ((rv = iop_msg_map(iop, im, bp->b_data, bp->b_bcount, write)) != 0)
    426 		goto bad;
    427 
    428 	/* Enqueue the command. */
    429 	iop_msg_enqueue(iop, im, 0);
    430 	return (0);
    431 
    432 bad:
    433 	if (im != NULL)
    434 		iop_msg_free(iop, &sc->sc_ii, im);
    435 	return (rv);
    436 }
    437 
    438 static int
    439 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
    440 {
    441 	struct iop_msg *im;
    442 	struct iop_softc *iop;
    443 	struct ld_iop_softc *sc;
    444 	struct i2o_rbs_block_write *mb;
    445 	int rv, bcount;
    446 	u_int64_t ba;
    447 
    448 	sc = (struct ld_iop_softc *)ld;
    449 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
    450 	bcount = blkcnt * ld->sc_secsize;
    451 	ba = (u_int64_t)blkno * ld->sc_secsize;
    452 
    453 	rv = iop_msg_alloc(iop, &sc->sc_ii, &im, IM_NOWAIT | IM_NOINTR);
    454 	if (rv != 0)
    455 		return (rv);
    456 
    457 	mb = (struct i2o_rbs_block_write *)im->im_msg;
    458 	mb->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
    459 	mb->msgfunc = I2O_MSGFUNC(sc->sc_tid, I2O_RBS_BLOCK_WRITE);
    460 	mb->msgictx = sc->sc_ii.ii_ictx;
    461 	mb->msgtctx = im->im_tctx;
    462 	mb->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
    463 	mb->datasize = bcount;
    464 	mb->lowoffset = (u_int32_t)ba;
    465 	mb->highoffset = (u_int32_t)(ba >> 32);
    466 
    467 	if ((rv = iop_msg_map(iop, im, data, bcount, 1)) != 0) {
    468 		iop_msg_free(iop, &sc->sc_ii, im);
    469 		return (rv);
    470 	}
    471 
    472 	rv = (iop_msg_send(iop, im, 5000) != 0 ? EIO : 0);
    473 	iop_msg_unmap(iop, im);
    474 	iop_msg_free(iop, &sc->sc_ii, im);
    475  	return (rv);
    476 }
    477 
    478 static int
    479 ld_iop_flush(struct ld_softc *ld)
    480 {
    481 	struct iop_msg *im;
    482 	struct iop_softc *iop;
    483 	struct ld_iop_softc *sc;
    484 	struct i2o_rbs_cache_flush *mb;
    485 	int rv;
    486 
    487 	sc = (struct ld_iop_softc *)ld;
    488 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
    489 
    490 	rv = iop_msg_alloc(iop, &sc->sc_ii, &im, IM_NOWAIT | IM_NOINTR);
    491 	if (rv != 0)
    492 		return (rv);
    493 
    494 	mb = (struct i2o_rbs_cache_flush *)im->im_msg;
    495 	mb->msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
    496 	mb->msgfunc = I2O_MSGFUNC(sc->sc_tid, I2O_RBS_CACHE_FLUSH);
    497 	mb->msgictx = sc->sc_ii.ii_ictx;
    498 	mb->msgtctx = im->im_tctx;
    499 	mb->flags = 1 << 16;			/* time multiplier */
    500 
    501  	rv = iop_msg_send(iop, im, 10000);
    502 	iop_msg_free(iop, &sc->sc_ii, im);
    503 	return (rv);
    504 }
    505 
    506 void
    507 ld_iop_intr(struct device *dv, struct iop_msg *im, void *reply)
    508 {
    509 	struct i2o_rbs_reply *rb;
    510 	struct buf *bp;
    511 	struct ld_iop_softc *sc;
    512 	struct iop_softc *iop;
    513 #ifdef I2OVERBOSE
    514 	int detail;
    515 	const char *errstr;
    516 #endif
    517 
    518 	rb = reply;
    519 	bp = im->im_dvcontext;
    520 	sc = (struct ld_iop_softc *)dv;
    521 	iop = (struct iop_softc *)dv->dv_parent;
    522 
    523 #ifdef I2OVERBOSE
    524 	if (rb->reqstatus != I2O_STATUS_SUCCESS) {
    525 		detail = le16toh(rb->detail);
    526 		if (detail > sizeof(ld_iop_errors) / sizeof(ld_iop_errors[0]))
    527 			errstr = "unknown error";
    528 		else
    529 			errstr = ld_iop_errors[detail];
    530 		printf("%s: %s\n", dv->dv_xname, errstr);
    531 #else
    532 	if (rb->reqstatus != I2O_STATUS_SUCCESS) {
    533 #endif
    534 		bp->b_flags |= B_ERROR;
    535 		bp->b_error = EIO;
    536 #ifndef notyet
    537 		bp->b_resid = bp->b_bcount;
    538 	} else
    539 		bp->b_resid = 0;
    540 #else
    541 	}
    542 	bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
    543 #endif
    544 
    545 	iop_msg_unmap(iop, im);
    546 	iop_msg_free(iop, &sc->sc_ii, im);
    547 	lddone(&sc->sc_ld, bp);
    548 }
    549 
    550 static void
    551 ld_iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
    552 {
    553 	struct i2o_util_event_register_reply *rb;
    554 	struct iop_softc *sc;
    555 	u_int event;
    556 
    557 	sc = (struct iop_softc *)dv;
    558 	rb = reply;
    559 	event = le32toh(rb->event);
    560 
    561 #ifndef I2ODEBUG
    562 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED)
    563 		return;
    564 #endif
    565 
    566 	printf("%s: event 0x%08x received\n", dv->dv_xname, event);
    567 }
    568