Home | History | Annotate | Line # | Download | only in i2o
iop.c revision 1.68.12.4
      1 /*	$NetBSD: iop.c,v 1.68.12.4 2008/09/28 10:40:21 mjf Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Support for I2O IOPs (intelligent I/O processors).
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.68.12.4 2008/09/28 10:40:21 mjf Exp $");
     38 
     39 #include "iop.h"
     40 
     41 #include <sys/param.h>
     42 #include <sys/systm.h>
     43 #include <sys/kernel.h>
     44 #include <sys/device.h>
     45 #include <sys/queue.h>
     46 #include <sys/proc.h>
     47 #include <sys/malloc.h>
     48 #include <sys/ioctl.h>
     49 #include <sys/endian.h>
     50 #include <sys/conf.h>
     51 #include <sys/kthread.h>
     52 #include <sys/kauth.h>
     53 #include <sys/bus.h>
     54 
     55 #include <uvm/uvm_extern.h>
     56 
     57 #include <dev/i2o/i2o.h>
     58 #include <dev/i2o/iopio.h>
     59 #include <dev/i2o/iopreg.h>
     60 #include <dev/i2o/iopvar.h>
     61 
     62 #include "locators.h"
     63 
     64 #define POLL(ms, cond)				\
     65 do {						\
     66 	int xi;					\
     67 	for (xi = (ms) * 10; xi; xi--) {	\
     68 		if (cond)			\
     69 			break;			\
     70 		DELAY(100);			\
     71 	}					\
     72 } while (/* CONSTCOND */0);
     73 
     74 #ifdef I2ODEBUG
     75 #define DPRINTF(x)	printf x
     76 #else
     77 #define	DPRINTF(x)
     78 #endif
     79 
     80 #define IOP_ICTXHASH_NBUCKETS	16
     81 #define	IOP_ICTXHASH(ictx)	(&iop_ictxhashtbl[(ictx) & iop_ictxhash])
     82 
     83 #define	IOP_MAX_SEGS	(((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
     84 
     85 #define	IOP_TCTX_SHIFT	12
     86 #define	IOP_TCTX_MASK	((1 << IOP_TCTX_SHIFT) - 1)
     87 
     88 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
     89 static u_long	iop_ictxhash;
     90 static void	*iop_sdh;
     91 static struct	i2o_systab *iop_systab;
     92 static int	iop_systab_size;
     93 
     94 extern struct cfdriver iop_cd;
     95 
     96 dev_type_open(iopopen);
     97 dev_type_close(iopclose);
     98 dev_type_ioctl(iopioctl);
     99 
    100 const struct cdevsw iop_cdevsw = {
    101 	iopopen, iopclose, noread, nowrite, iopioctl,
    102 	nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
    103 };
    104 
    105 #define	IC_CONFIGURE	0x01
    106 #define	IC_PRIORITY	0x02
    107 
    108 static struct iop_class {
    109 	u_short	ic_class;
    110 	u_short	ic_flags;
    111 	const char *ic_caption;
    112 } const iop_class[] = {
    113 	{
    114 		I2O_CLASS_EXECUTIVE,
    115 		0,
    116 		"executive"
    117 	},
    118 	{
    119 		I2O_CLASS_DDM,
    120 		0,
    121 		"device driver module"
    122 	},
    123 	{
    124 		I2O_CLASS_RANDOM_BLOCK_STORAGE,
    125 		IC_CONFIGURE | IC_PRIORITY,
    126 		"random block storage"
    127 	},
    128 	{
    129 		I2O_CLASS_SEQUENTIAL_STORAGE,
    130 		IC_CONFIGURE | IC_PRIORITY,
    131 		"sequential storage"
    132 	},
    133 	{
    134 		I2O_CLASS_LAN,
    135 		IC_CONFIGURE | IC_PRIORITY,
    136 		"LAN port"
    137 	},
    138 	{
    139 		I2O_CLASS_WAN,
    140 		IC_CONFIGURE | IC_PRIORITY,
    141 		"WAN port"
    142 	},
    143 	{
    144 		I2O_CLASS_FIBRE_CHANNEL_PORT,
    145 		IC_CONFIGURE,
    146 		"fibrechannel port"
    147 	},
    148 	{
    149 		I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
    150 		0,
    151 		"fibrechannel peripheral"
    152 	},
    153  	{
    154  		I2O_CLASS_SCSI_PERIPHERAL,
    155  		0,
    156  		"SCSI peripheral"
    157  	},
    158 	{
    159 		I2O_CLASS_ATE_PORT,
    160 		IC_CONFIGURE,
    161 		"ATE port"
    162 	},
    163 	{
    164 		I2O_CLASS_ATE_PERIPHERAL,
    165 		0,
    166 		"ATE peripheral"
    167 	},
    168 	{
    169 		I2O_CLASS_FLOPPY_CONTROLLER,
    170 		IC_CONFIGURE,
    171 		"floppy controller"
    172 	},
    173 	{
    174 		I2O_CLASS_FLOPPY_DEVICE,
    175 		0,
    176 		"floppy device"
    177 	},
    178 	{
    179 		I2O_CLASS_BUS_ADAPTER_PORT,
    180 		IC_CONFIGURE,
    181 		"bus adapter port"
    182 	},
    183 };
    184 
    185 static const char * const iop_status[] = {
    186 	"success",
    187 	"abort (dirty)",
    188 	"abort (no data transfer)",
    189 	"abort (partial transfer)",
    190 	"error (dirty)",
    191 	"error (no data transfer)",
    192 	"error (partial transfer)",
    193 	"undefined error code",
    194 	"process abort (dirty)",
    195 	"process abort (no data transfer)",
    196 	"process abort (partial transfer)",
    197 	"transaction error",
    198 };
    199 
    200 static inline u_int32_t	iop_inl(struct iop_softc *, int);
    201 static inline void	iop_outl(struct iop_softc *, int, u_int32_t);
    202 
    203 static inline u_int32_t	iop_inl_msg(struct iop_softc *, int);
    204 static inline void	iop_outl_msg(struct iop_softc *, int, u_int32_t);
    205 
    206 static void	iop_config_interrupts(struct device *);
    207 static void	iop_configure_devices(struct iop_softc *, int, int);
    208 static void	iop_devinfo(int, char *, size_t);
    209 static int	iop_print(void *, const char *);
    210 static void	iop_shutdown(void *);
    211 
    212 static void	iop_adjqparam(struct iop_softc *, int);
    213 static int	iop_handle_reply(struct iop_softc *, u_int32_t);
    214 static int	iop_hrt_get(struct iop_softc *);
    215 static int	iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
    216 static void	iop_intr_event(struct device *, struct iop_msg *, void *);
    217 static int	iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
    218 			     u_int32_t);
    219 static void	iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
    220 static void	iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
    221 static int	iop_ofifo_init(struct iop_softc *);
    222 static int	iop_passthrough(struct iop_softc *, struct ioppt *,
    223 				struct proc *);
    224 static void	iop_reconf_thread(void *);
    225 static void	iop_release_mfa(struct iop_softc *, u_int32_t);
    226 static int	iop_reset(struct iop_softc *);
    227 static int	iop_sys_enable(struct iop_softc *);
    228 static int	iop_systab_set(struct iop_softc *);
    229 static void	iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
    230 
    231 #ifdef I2ODEBUG
    232 static void	iop_reply_print(struct iop_softc *, struct i2o_reply *);
    233 #endif
    234 
    235 static inline u_int32_t
    236 iop_inl(struct iop_softc *sc, int off)
    237 {
    238 
    239 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
    240 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
    241 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
    242 }
    243 
    244 static inline void
    245 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
    246 {
    247 
    248 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
    249 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
    250 	    BUS_SPACE_BARRIER_WRITE);
    251 }
    252 
    253 static inline u_int32_t
    254 iop_inl_msg(struct iop_softc *sc, int off)
    255 {
    256 
    257 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
    258 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
    259 	return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
    260 }
    261 
    262 static inline void
    263 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
    264 {
    265 
    266 	bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
    267 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
    268 	    BUS_SPACE_BARRIER_WRITE);
    269 }
    270 
    271 /*
    272  * Initialise the IOP and our interface.
    273  */
    274 void
    275 iop_init(struct iop_softc *sc, const char *intrstr)
    276 {
    277 	struct iop_msg *im;
    278 	int rv, i, j, state, nsegs, maj;
    279 	u_int32_t mask;
    280 	char ident[64];
    281 	device_t dev = &sc->sc_dv;
    282 
    283 	state = 0;
    284 
    285 	printf("I2O adapter");
    286 
    287 	mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
    288 	mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
    289 	cv_init(&sc->sc_confcv, "iopconf");
    290 
    291 	if (iop_ictxhashtbl == NULL) {
    292 		iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
    293 		    true, &iop_ictxhash);
    294 	}
    295 
    296 	/* Disable interrupts at the IOP. */
    297 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
    298 	iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
    299 
    300 	/* Allocate a scratch DMA map for small miscellaneous shared data. */
    301 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
    302 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
    303 		aprint_error_dev(&sc->sc_dv, "cannot create scratch dmamap\n");
    304 		return;
    305 	}
    306 
    307 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
    308 	    sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
    309 		aprint_error_dev(&sc->sc_dv, "cannot alloc scratch dmamem\n");
    310 		goto bail_out;
    311 	}
    312 	state++;
    313 
    314 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
    315 	    &sc->sc_scr, 0)) {
    316 		aprint_error_dev(&sc->sc_dv, "cannot map scratch dmamem\n");
    317 		goto bail_out;
    318 	}
    319 	state++;
    320 
    321 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
    322 	    PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
    323 		aprint_error_dev(&sc->sc_dv, "cannot load scratch dmamap\n");
    324 		goto bail_out;
    325 	}
    326 	state++;
    327 
    328 #ifdef I2ODEBUG
    329 	/* So that our debug checks don't choke. */
    330 	sc->sc_framesize = 128;
    331 #endif
    332 
    333 	/* Avoid syncing the reply map until it's set up. */
    334 	sc->sc_curib = 0x123;
    335 
    336 	/* Reset the adapter and request status. */
    337  	if ((rv = iop_reset(sc)) != 0) {
    338  		aprint_error_dev(&sc->sc_dv, "not responding (reset)\n");
    339 		goto bail_out;
    340  	}
    341 
    342  	if ((rv = iop_status_get(sc, 1)) != 0) {
    343 		aprint_error_dev(&sc->sc_dv, "not responding (get status)\n");
    344 		goto bail_out;
    345  	}
    346 
    347 	sc->sc_flags |= IOP_HAVESTATUS;
    348 	iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
    349 	    ident, sizeof(ident));
    350 	printf(" <%s>\n", ident);
    351 
    352 #ifdef I2ODEBUG
    353 	printf("%s: orgid=0x%04x version=%d\n",
    354 	    device_xname(&sc->sc_dv),
    355 	    le16toh(sc->sc_status.orgid),
    356 	    (le32toh(sc->sc_status.segnumber) >> 12) & 15);
    357 	printf("%s: type want have cbase\n", device_xname(&sc->sc_dv));
    358 	printf("%s: mem  %04x %04x %08x\n", device_xname(&sc->sc_dv),
    359 	    le32toh(sc->sc_status.desiredprivmemsize),
    360 	    le32toh(sc->sc_status.currentprivmemsize),
    361 	    le32toh(sc->sc_status.currentprivmembase));
    362 	printf("%s: i/o  %04x %04x %08x\n", device_xname(&sc->sc_dv),
    363 	    le32toh(sc->sc_status.desiredpriviosize),
    364 	    le32toh(sc->sc_status.currentpriviosize),
    365 	    le32toh(sc->sc_status.currentpriviobase));
    366 #endif
    367 
    368 	sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
    369 	if (sc->sc_maxob > IOP_MAX_OUTBOUND)
    370 		sc->sc_maxob = IOP_MAX_OUTBOUND;
    371 	sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
    372 	if (sc->sc_maxib > IOP_MAX_INBOUND)
    373 		sc->sc_maxib = IOP_MAX_INBOUND;
    374 	sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
    375 	if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
    376 		sc->sc_framesize = IOP_MAX_MSG_SIZE;
    377 
    378 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
    379 	if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
    380 		aprint_error_dev(&sc->sc_dv, "frame size too small (%d)\n",
    381 		    sc->sc_framesize);
    382 		goto bail_out;
    383 	}
    384 #endif
    385 
    386 	/* Allocate message wrappers. */
    387 	im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
    388 	if (im == NULL) {
    389 		aprint_error_dev(&sc->sc_dv, "memory allocation failure\n");
    390 		goto bail_out;
    391 	}
    392 	state++;
    393 	sc->sc_ims = im;
    394 	SLIST_INIT(&sc->sc_im_freelist);
    395 
    396 	for (i = 0; i < sc->sc_maxib; i++, im++) {
    397 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
    398 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
    399 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    400 		    &im->im_xfer[0].ix_map);
    401 		if (rv != 0) {
    402 			aprint_error_dev(&sc->sc_dv, "couldn't create dmamap (%d)", rv);
    403 			goto bail_out3;
    404 		}
    405 
    406 		im->im_tctx = i;
    407 		SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
    408 		cv_init(&im->im_cv, "iopmsg");
    409 	}
    410 
    411 	/* Initialise the IOP's outbound FIFO. */
    412 	if (iop_ofifo_init(sc) != 0) {
    413 		aprint_error_dev(&sc->sc_dv, "unable to init oubound FIFO\n");
    414 		goto bail_out3;
    415 	}
    416 
    417 	/*
    418  	 * Defer further configuration until (a) interrupts are working and
    419  	 * (b) we have enough information to build the system table.
    420  	 */
    421 	config_interrupts((struct device *)sc, iop_config_interrupts);
    422 
    423 	/* Configure shutdown hook before we start any device activity. */
    424 	if (iop_sdh == NULL)
    425 		iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
    426 
    427 	/* Ensure interrupts are enabled at the IOP. */
    428 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
    429 	iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
    430 
    431 	if (intrstr != NULL)
    432 		printf("%s: interrupting at %s\n", device_xname(&sc->sc_dv),
    433 		    intrstr);
    434 
    435 #ifdef I2ODEBUG
    436 	printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
    437 	    device_xname(&sc->sc_dv), sc->sc_maxib,
    438 	    le32toh(sc->sc_status.maxinboundmframes),
    439 	    sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
    440 #endif
    441 
    442 	maj = cdevsw_lookup_major(&iop_cdevsw);
    443 	device_register_name(makedev(maj, device_unit(dev)), dev, true,
    444 	     DEV_OTHER, device_xname(dev));
    445 
    446 	return;
    447 
    448  bail_out3:
    449  	if (state > 3) {
    450 		for (j = 0; j < i; j++)
    451 			bus_dmamap_destroy(sc->sc_dmat,
    452 			    sc->sc_ims[j].im_xfer[0].ix_map);
    453 		free(sc->sc_ims, M_DEVBUF);
    454 	}
    455  bail_out:
    456 	if (state > 2)
    457 		bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
    458 	if (state > 1)
    459 		bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
    460 	if (state > 0)
    461 		bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
    462 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
    463 }
    464 
    465 /*
    466  * Perform autoconfiguration tasks.
    467  */
    468 static void
    469 iop_config_interrupts(struct device *self)
    470 {
    471 	struct iop_attach_args ia;
    472 	struct iop_softc *sc, *iop;
    473 	struct i2o_systab_entry *ste;
    474 	int rv, i, niop;
    475 	int locs[IOPCF_NLOCS];
    476 
    477 	sc = device_private(self);
    478 	mutex_enter(&sc->sc_conflock);
    479 
    480 	LIST_INIT(&sc->sc_iilist);
    481 
    482 	printf("%s: configuring...\n", device_xname(&sc->sc_dv));
    483 
    484 	if (iop_hrt_get(sc) != 0) {
    485 		printf("%s: unable to retrieve HRT\n", device_xname(&sc->sc_dv));
    486 		mutex_exit(&sc->sc_conflock);
    487 		return;
    488 	}
    489 
    490 	/*
    491  	 * Build the system table.
    492  	 */
    493 	if (iop_systab == NULL) {
    494 		for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
    495 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
    496 				continue;
    497 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
    498 				continue;
    499 			if (iop_status_get(iop, 1) != 0) {
    500 				aprint_error_dev(&sc->sc_dv, "unable to retrieve status\n");
    501 				iop->sc_flags &= ~IOP_HAVESTATUS;
    502 				continue;
    503 			}
    504 			niop++;
    505 		}
    506 		if (niop == 0) {
    507 			mutex_exit(&sc->sc_conflock);
    508 			return;
    509 		}
    510 
    511 		i = sizeof(struct i2o_systab_entry) * (niop - 1) +
    512 		    sizeof(struct i2o_systab);
    513 		iop_systab_size = i;
    514 		iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
    515 
    516 		iop_systab->numentries = niop;
    517 		iop_systab->version = I2O_VERSION_11;
    518 
    519 		for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
    520 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
    521 				continue;
    522 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
    523 				continue;
    524 
    525 			ste->orgid = iop->sc_status.orgid;
    526 			ste->iopid = device_unit(&iop->sc_dv) + 2;
    527 			ste->segnumber =
    528 			    htole32(le32toh(iop->sc_status.segnumber) & ~4095);
    529 			ste->iopcaps = iop->sc_status.iopcaps;
    530 			ste->inboundmsgframesize =
    531 			    iop->sc_status.inboundmframesize;
    532 			ste->inboundmsgportaddresslow =
    533 			    htole32(iop->sc_memaddr + IOP_REG_IFIFO);
    534 			ste++;
    535 		}
    536 	}
    537 
    538 	/*
    539 	 * Post the system table to the IOP and bring it to the OPERATIONAL
    540 	 * state.
    541 	 */
    542 	if (iop_systab_set(sc) != 0) {
    543 		aprint_error_dev(&sc->sc_dv, "unable to set system table\n");
    544 		mutex_exit(&sc->sc_conflock);
    545 		return;
    546 	}
    547 	if (iop_sys_enable(sc) != 0) {
    548 		aprint_error_dev(&sc->sc_dv, "unable to enable system\n");
    549 		mutex_exit(&sc->sc_conflock);
    550 		return;
    551 	}
    552 
    553 	/*
    554 	 * Set up an event handler for this IOP.
    555 	 */
    556 	sc->sc_eventii.ii_dv = self;
    557 	sc->sc_eventii.ii_intr = iop_intr_event;
    558 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
    559 	sc->sc_eventii.ii_tid = I2O_TID_IOP;
    560 	iop_initiator_register(sc, &sc->sc_eventii);
    561 
    562 	rv = iop_util_eventreg(sc, &sc->sc_eventii,
    563 	    I2O_EVENT_EXEC_RESOURCE_LIMITS |
    564 	    I2O_EVENT_EXEC_CONNECTION_FAIL |
    565 	    I2O_EVENT_EXEC_ADAPTER_FAULT |
    566 	    I2O_EVENT_EXEC_POWER_FAIL |
    567 	    I2O_EVENT_EXEC_RESET_PENDING |
    568 	    I2O_EVENT_EXEC_RESET_IMMINENT |
    569 	    I2O_EVENT_EXEC_HARDWARE_FAIL |
    570 	    I2O_EVENT_EXEC_XCT_CHANGE |
    571 	    I2O_EVENT_EXEC_DDM_AVAILIBILITY |
    572 	    I2O_EVENT_GEN_DEVICE_RESET |
    573 	    I2O_EVENT_GEN_STATE_CHANGE |
    574 	    I2O_EVENT_GEN_GENERAL_WARNING);
    575 	if (rv != 0) {
    576 		aprint_error_dev(&sc->sc_dv, "unable to register for events");
    577 		mutex_exit(&sc->sc_conflock);
    578 		return;
    579 	}
    580 
    581 	/*
    582 	 * Attempt to match and attach a product-specific extension.
    583 	 */
    584 	ia.ia_class = I2O_CLASS_ANY;
    585 	ia.ia_tid = I2O_TID_IOP;
    586 	locs[IOPCF_TID] = I2O_TID_IOP;
    587 	config_found_sm_loc(self, "iop", locs, &ia, iop_print,
    588 		config_stdsubmatch);
    589 
    590 	/*
    591 	 * Start device configuration.
    592 	 */
    593 	if ((rv = iop_reconfigure(sc, 0)) == -1)
    594 		aprint_error_dev(&sc->sc_dv, "configure failed (%d)\n", rv);
    595 
    596 
    597 	sc->sc_flags |= IOP_ONLINE;
    598 	rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
    599 	    &sc->sc_reconf_thread, "%s", device_xname(&sc->sc_dv));
    600 	mutex_exit(&sc->sc_conflock);
    601  	if (rv != 0) {
    602 		aprint_error_dev(&sc->sc_dv, "unable to create reconfiguration thread (%d)", rv);
    603  		return;
    604  	}
    605 }
    606 
    607 /*
    608  * Reconfiguration thread; listens for LCT change notification, and
    609  * initiates re-configuration if received.
    610  */
    611 static void
    612 iop_reconf_thread(void *cookie)
    613 {
    614 	struct iop_softc *sc;
    615 	struct lwp *l;
    616 	struct i2o_lct lct;
    617 	u_int32_t chgind;
    618 	int rv;
    619 
    620 	sc = cookie;
    621 	chgind = sc->sc_chgind + 1;
    622 	l = curlwp;
    623 
    624 	for (;;) {
    625 		DPRINTF(("%s: async reconfig: requested 0x%08x\n",
    626 		    device_xname(&sc->sc_dv), chgind));
    627 
    628 		rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
    629 
    630 		DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
    631 		    device_xname(&sc->sc_dv), le32toh(lct.changeindicator), rv));
    632 
    633 		mutex_enter(&sc->sc_conflock);
    634 		if (rv == 0) {
    635 			iop_reconfigure(sc, le32toh(lct.changeindicator));
    636 			chgind = sc->sc_chgind + 1;
    637 		}
    638 		(void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
    639 		mutex_exit(&sc->sc_conflock);
    640 	}
    641 }
    642 
    643 /*
    644  * Reconfigure: find new and removed devices.
    645  */
    646 int
    647 iop_reconfigure(struct iop_softc *sc, u_int chgind)
    648 {
    649 	struct iop_msg *im;
    650 	struct i2o_hba_bus_scan mf;
    651 	struct i2o_lct_entry *le;
    652 	struct iop_initiator *ii, *nextii;
    653 	int rv, tid, i;
    654 
    655 	KASSERT(mutex_owned(&sc->sc_conflock));
    656 
    657 	/*
    658 	 * If the reconfiguration request isn't the result of LCT change
    659 	 * notification, then be more thorough: ask all bus ports to scan
    660 	 * their busses.  Wait up to 5 minutes for each bus port to complete
    661 	 * the request.
    662 	 */
    663 	if (chgind == 0) {
    664 		if ((rv = iop_lct_get(sc)) != 0) {
    665 			DPRINTF(("iop_reconfigure: unable to read LCT\n"));
    666 			return (rv);
    667 		}
    668 
    669 		le = sc->sc_lct->entry;
    670 		for (i = 0; i < sc->sc_nlctent; i++, le++) {
    671 			if ((le16toh(le->classid) & 4095) !=
    672 			    I2O_CLASS_BUS_ADAPTER_PORT)
    673 				continue;
    674 			tid = le16toh(le->localtid) & 4095;
    675 
    676 			im = iop_msg_alloc(sc, IM_WAIT);
    677 
    678 			mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
    679 			mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
    680 			mf.msgictx = IOP_ICTX;
    681 			mf.msgtctx = im->im_tctx;
    682 
    683 			DPRINTF(("%s: scanning bus %d\n", device_xname(&sc->sc_dv),
    684 			    tid));
    685 
    686 			rv = iop_msg_post(sc, im, &mf, 5*60*1000);
    687 			iop_msg_free(sc, im);
    688 #ifdef I2ODEBUG
    689 			if (rv != 0)
    690 				aprint_error_dev(&sc->sc_dv, "bus scan failed\n");
    691 #endif
    692 		}
    693 	} else if (chgind <= sc->sc_chgind) {
    694 		DPRINTF(("%s: LCT unchanged (async)\n", device_xname(&sc->sc_dv)));
    695 		return (0);
    696 	}
    697 
    698 	/* Re-read the LCT and determine if it has changed. */
    699 	if ((rv = iop_lct_get(sc)) != 0) {
    700 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
    701 		return (rv);
    702 	}
    703 	DPRINTF(("%s: %d LCT entries\n", device_xname(&sc->sc_dv), sc->sc_nlctent));
    704 
    705 	chgind = le32toh(sc->sc_lct->changeindicator);
    706 	if (chgind == sc->sc_chgind) {
    707 		DPRINTF(("%s: LCT unchanged\n", device_xname(&sc->sc_dv)));
    708 		return (0);
    709 	}
    710 	DPRINTF(("%s: LCT changed\n", device_xname(&sc->sc_dv)));
    711 	sc->sc_chgind = chgind;
    712 
    713 	if (sc->sc_tidmap != NULL)
    714 		free(sc->sc_tidmap, M_DEVBUF);
    715 	sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
    716 	    M_DEVBUF, M_NOWAIT|M_ZERO);
    717 
    718 	/* Allow 1 queued command per device while we're configuring. */
    719 	iop_adjqparam(sc, 1);
    720 
    721 	/*
    722 	 * Match and attach child devices.  We configure high-level devices
    723 	 * first so that any claims will propagate throughout the LCT,
    724 	 * hopefully masking off aliased devices as a result.
    725 	 *
    726 	 * Re-reading the LCT at this point is a little dangerous, but we'll
    727 	 * trust the IOP (and the operator) to behave itself...
    728 	 */
    729 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
    730 	    IC_CONFIGURE | IC_PRIORITY);
    731 	if ((rv = iop_lct_get(sc)) != 0) {
    732 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
    733 	}
    734 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
    735 	    IC_CONFIGURE);
    736 
    737 	for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
    738 		nextii = LIST_NEXT(ii, ii_list);
    739 
    740 		/* Detach devices that were configured, but are now gone. */
    741 		for (i = 0; i < sc->sc_nlctent; i++)
    742 			if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
    743 				break;
    744 		if (i == sc->sc_nlctent ||
    745 		    (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
    746 			config_detach(ii->ii_dv, DETACH_FORCE);
    747 			continue;
    748 		}
    749 
    750 		/*
    751 		 * Tell initiators that existed before the re-configuration
    752 		 * to re-configure.
    753 		 */
    754 		if (ii->ii_reconfig == NULL)
    755 			continue;
    756 		if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
    757 			aprint_error_dev(&sc->sc_dv, "%s failed reconfigure (%d)\n",
    758 			    device_xname(ii->ii_dv), rv);
    759 	}
    760 
    761 	/* Re-adjust queue parameters and return. */
    762 	if (sc->sc_nii != 0)
    763 		iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
    764 		    / sc->sc_nii);
    765 
    766 	return (0);
    767 }
    768 
    769 /*
    770  * Configure I2O devices into the system.
    771  */
    772 static void
    773 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
    774 {
    775 	struct iop_attach_args ia;
    776 	struct iop_initiator *ii;
    777 	const struct i2o_lct_entry *le;
    778 	struct device *dv;
    779 	int i, j, nent;
    780 	u_int usertid;
    781 	int locs[IOPCF_NLOCS];
    782 
    783 	nent = sc->sc_nlctent;
    784 	for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
    785 		sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
    786 
    787 		/* Ignore the device if it's in use. */
    788 		usertid = le32toh(le->usertid) & 4095;
    789 		if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
    790 			continue;
    791 
    792 		ia.ia_class = le16toh(le->classid) & 4095;
    793 		ia.ia_tid = sc->sc_tidmap[i].it_tid;
    794 
    795 		/* Ignore uninteresting devices. */
    796 		for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
    797 			if (iop_class[j].ic_class == ia.ia_class)
    798 				break;
    799 		if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
    800 		    (iop_class[j].ic_flags & mask) != maskval)
    801 			continue;
    802 
    803 		/*
    804 		 * Try to configure the device only if it's not already
    805 		 * configured.
    806  		 */
    807  		LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
    808  			if (ia.ia_tid == ii->ii_tid) {
    809 				sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
    810 				strcpy(sc->sc_tidmap[i].it_dvname,
    811 				    device_xname(ii->ii_dv));
    812  				break;
    813 			}
    814 		}
    815 		if (ii != NULL)
    816 			continue;
    817 
    818 		locs[IOPCF_TID] = ia.ia_tid;
    819 
    820 		dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
    821 					 iop_print, config_stdsubmatch);
    822 		if (dv != NULL) {
    823  			sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
    824 			strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
    825 		}
    826 	}
    827 }
    828 
    829 /*
    830  * Adjust queue parameters for all child devices.
    831  */
    832 static void
    833 iop_adjqparam(struct iop_softc *sc, int mpi)
    834 {
    835 	struct iop_initiator *ii;
    836 
    837 	LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
    838 		if (ii->ii_adjqparam != NULL)
    839 			(*ii->ii_adjqparam)(ii->ii_dv, mpi);
    840 }
    841 
    842 static void
    843 iop_devinfo(int class, char *devinfo, size_t l)
    844 {
    845 	int i;
    846 
    847 	for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
    848 		if (class == iop_class[i].ic_class)
    849 			break;
    850 
    851 	if (i == sizeof(iop_class) / sizeof(iop_class[0]))
    852 		snprintf(devinfo, l, "device (class 0x%x)", class);
    853 	else
    854 		strlcpy(devinfo, iop_class[i].ic_caption, l);
    855 }
    856 
    857 static int
    858 iop_print(void *aux, const char *pnp)
    859 {
    860 	struct iop_attach_args *ia;
    861 	char devinfo[256];
    862 
    863 	ia = aux;
    864 
    865 	if (pnp != NULL) {
    866 		iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
    867 		aprint_normal("%s at %s", devinfo, pnp);
    868 	}
    869 	aprint_normal(" tid %d", ia->ia_tid);
    870 	return (UNCONF);
    871 }
    872 
    873 /*
    874  * Shut down all configured IOPs.
    875  */
    876 static void
    877 iop_shutdown(void *junk)
    878 {
    879 	struct iop_softc *sc;
    880 	int i;
    881 
    882 	printf("shutting down iop devices...");
    883 
    884 	for (i = 0; i < iop_cd.cd_ndevs; i++) {
    885 		if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
    886 			continue;
    887 		if ((sc->sc_flags & IOP_ONLINE) == 0)
    888 			continue;
    889 
    890 		iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
    891 		    0, 5000);
    892 
    893 		if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
    894 			/*
    895 			 * Some AMI firmware revisions will go to sleep and
    896 			 * never come back after this.
    897 			 */
    898 			iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
    899 			    IOP_ICTX, 0, 1000);
    900 		}
    901 	}
    902 
    903 	/* Wait.  Some boards could still be flushing, stupidly enough. */
    904 	delay(5000*1000);
    905 	printf(" done\n");
    906 }
    907 
    908 /*
    909  * Retrieve IOP status.
    910  */
    911 int
    912 iop_status_get(struct iop_softc *sc, int nosleep)
    913 {
    914 	struct i2o_exec_status_get mf;
    915 	struct i2o_status *st;
    916 	paddr_t pa;
    917 	int rv, i;
    918 
    919 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
    920 	st = (struct i2o_status *)sc->sc_scr;
    921 
    922 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
    923 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
    924 	mf.reserved[0] = 0;
    925 	mf.reserved[1] = 0;
    926 	mf.reserved[2] = 0;
    927 	mf.reserved[3] = 0;
    928 	mf.addrlow = (u_int32_t)pa;
    929 	mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
    930 	mf.length = sizeof(sc->sc_status);
    931 
    932 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
    933 	    BUS_DMASYNC_PREWRITE);
    934 	memset(st, 0, sizeof(*st));
    935 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
    936 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTWRITE);
    937 
    938 	if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
    939 		return (rv);
    940 
    941 	for (i = 100; i != 0; i--) {
    942 		bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
    943 		    sizeof(*st), BUS_DMASYNC_POSTREAD);
    944 		if (st->syncbyte == 0xff)
    945 			break;
    946 		if (nosleep)
    947 			DELAY(100*1000);
    948 		else
    949 			kpause("iopstat", false, hz / 10, NULL);
    950 	}
    951 
    952 	if (st->syncbyte != 0xff) {
    953 		aprint_error_dev(&sc->sc_dv, "STATUS_GET timed out\n");
    954 		rv = EIO;
    955 	} else {
    956 		memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
    957 		rv = 0;
    958 	}
    959 
    960 	return (rv);
    961 }
    962 
    963 /*
    964  * Initialize and populate the IOP's outbound FIFO.
    965  */
    966 static int
    967 iop_ofifo_init(struct iop_softc *sc)
    968 {
    969 	bus_addr_t addr;
    970 	bus_dma_segment_t seg;
    971 	struct i2o_exec_outbound_init *mf;
    972 	int i, rseg, rv;
    973 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
    974 
    975 	sw = (u_int32_t *)sc->sc_scr;
    976 
    977 	mf = (struct i2o_exec_outbound_init *)mb;
    978 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
    979 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
    980 	mf->msgictx = IOP_ICTX;
    981 	mf->msgtctx = 0;
    982 	mf->pagesize = PAGE_SIZE;
    983 	mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
    984 
    985 	/*
    986 	 * The I2O spec says that there are two SGLs: one for the status
    987 	 * word, and one for a list of discarded MFAs.  It continues to say
    988 	 * that if you don't want to get the list of MFAs, an IGNORE SGL is
    989 	 * necessary; this isn't the case (and is in fact a bad thing).
    990 	 */
    991 	mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
    992 	    I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
    993 	mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
    994 	    (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
    995 	mb[0] += 2 << 16;
    996 
    997 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
    998 	    BUS_DMASYNC_PREWRITE);
    999 	*sw = 0;
   1000 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
   1001 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_POSTWRITE);
   1002 
   1003 	if ((rv = iop_post(sc, mb)) != 0)
   1004 		return (rv);
   1005 
   1006 	POLL(5000,
   1007 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
   1008 	    BUS_DMASYNC_POSTREAD),
   1009 	    *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
   1010 
   1011 	if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
   1012 		aprint_error_dev(&sc->sc_dv, "outbound FIFO init failed (%d)\n",
   1013 		    le32toh(*sw));
   1014 		return (EIO);
   1015 	}
   1016 
   1017 	/* Allocate DMA safe memory for the reply frames. */
   1018 	if (sc->sc_rep_phys == 0) {
   1019 		sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
   1020 
   1021 		rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
   1022 		    0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
   1023 		if (rv != 0) {
   1024 			aprint_error_dev(&sc->sc_dv, "DMA alloc = %d\n",
   1025 			   rv);
   1026 			return (rv);
   1027 		}
   1028 
   1029 		rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
   1030 		    &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1031 		if (rv != 0) {
   1032 			aprint_error_dev(&sc->sc_dv, "DMA map = %d\n", rv);
   1033 			return (rv);
   1034 		}
   1035 
   1036 		rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
   1037 		    sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
   1038 		if (rv != 0) {
   1039 			aprint_error_dev(&sc->sc_dv, "DMA create = %d\n", rv);
   1040 			return (rv);
   1041 		}
   1042 
   1043 		rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
   1044 		    sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
   1045 		if (rv != 0) {
   1046 			aprint_error_dev(&sc->sc_dv, "DMA load = %d\n", rv);
   1047 			return (rv);
   1048 		}
   1049 
   1050 		sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
   1051 
   1052 		/* Now safe to sync the reply map. */
   1053 		sc->sc_curib = 0;
   1054 	}
   1055 
   1056 	/* Populate the outbound FIFO. */
   1057 	for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
   1058 		iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
   1059 		addr += sc->sc_framesize;
   1060 	}
   1061 
   1062 	return (0);
   1063 }
   1064 
   1065 /*
   1066  * Read the specified number of bytes from the IOP's hardware resource table.
   1067  */
   1068 static int
   1069 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
   1070 {
   1071 	struct iop_msg *im;
   1072 	int rv;
   1073 	struct i2o_exec_hrt_get *mf;
   1074 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1075 
   1076 	im = iop_msg_alloc(sc, IM_WAIT);
   1077 	mf = (struct i2o_exec_hrt_get *)mb;
   1078 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
   1079 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
   1080 	mf->msgictx = IOP_ICTX;
   1081 	mf->msgtctx = im->im_tctx;
   1082 
   1083 	iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
   1084 	rv = iop_msg_post(sc, im, mb, 30000);
   1085 	iop_msg_unmap(sc, im);
   1086 	iop_msg_free(sc, im);
   1087 	return (rv);
   1088 }
   1089 
   1090 /*
   1091  * Read the IOP's hardware resource table.
   1092  */
   1093 static int
   1094 iop_hrt_get(struct iop_softc *sc)
   1095 {
   1096 	struct i2o_hrt hrthdr, *hrt;
   1097 	int size, rv;
   1098 
   1099 	uvm_lwp_hold(curlwp);
   1100 	rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
   1101 	uvm_lwp_rele(curlwp);
   1102 	if (rv != 0)
   1103 		return (rv);
   1104 
   1105 	DPRINTF(("%s: %d hrt entries\n", device_xname(&sc->sc_dv),
   1106 	    le16toh(hrthdr.numentries)));
   1107 
   1108 	size = sizeof(struct i2o_hrt) +
   1109 	    (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
   1110 	hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
   1111 
   1112 	if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
   1113 		free(hrt, M_DEVBUF);
   1114 		return (rv);
   1115 	}
   1116 
   1117 	if (sc->sc_hrt != NULL)
   1118 		free(sc->sc_hrt, M_DEVBUF);
   1119 	sc->sc_hrt = hrt;
   1120 	return (0);
   1121 }
   1122 
   1123 /*
   1124  * Request the specified number of bytes from the IOP's logical
   1125  * configuration table.  If a change indicator is specified, this
   1126  * is a verbatim notification request, so the caller is prepared
   1127  * to wait indefinitely.
   1128  */
   1129 static int
   1130 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
   1131 	     u_int32_t chgind)
   1132 {
   1133 	struct iop_msg *im;
   1134 	struct i2o_exec_lct_notify *mf;
   1135 	int rv;
   1136 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1137 
   1138 	im = iop_msg_alloc(sc, IM_WAIT);
   1139 	memset(lct, 0, size);
   1140 
   1141 	mf = (struct i2o_exec_lct_notify *)mb;
   1142 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
   1143 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
   1144 	mf->msgictx = IOP_ICTX;
   1145 	mf->msgtctx = im->im_tctx;
   1146 	mf->classid = I2O_CLASS_ANY;
   1147 	mf->changeindicator = chgind;
   1148 
   1149 #ifdef I2ODEBUG
   1150 	printf("iop_lct_get0: reading LCT");
   1151 	if (chgind != 0)
   1152 		printf(" (async)");
   1153 	printf("\n");
   1154 #endif
   1155 
   1156 	iop_msg_map(sc, im, mb, lct, size, 0, NULL);
   1157 	rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
   1158 	iop_msg_unmap(sc, im);
   1159 	iop_msg_free(sc, im);
   1160 	return (rv);
   1161 }
   1162 
   1163 /*
   1164  * Read the IOP's logical configuration table.
   1165  */
   1166 int
   1167 iop_lct_get(struct iop_softc *sc)
   1168 {
   1169 	int esize, size, rv;
   1170 	struct i2o_lct *lct;
   1171 
   1172 	esize = le32toh(sc->sc_status.expectedlctsize);
   1173 	lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
   1174 	if (lct == NULL)
   1175 		return (ENOMEM);
   1176 
   1177 	if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
   1178 		free(lct, M_DEVBUF);
   1179 		return (rv);
   1180 	}
   1181 
   1182 	size = le16toh(lct->tablesize) << 2;
   1183 	if (esize != size) {
   1184 		free(lct, M_DEVBUF);
   1185 		lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
   1186 		if (lct == NULL)
   1187 			return (ENOMEM);
   1188 
   1189 		if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
   1190 			free(lct, M_DEVBUF);
   1191 			return (rv);
   1192 		}
   1193 	}
   1194 
   1195 	/* Swap in the new LCT. */
   1196 	if (sc->sc_lct != NULL)
   1197 		free(sc->sc_lct, M_DEVBUF);
   1198 	sc->sc_lct = lct;
   1199 	sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
   1200 	    sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
   1201 	    sizeof(struct i2o_lct_entry);
   1202 	return (0);
   1203 }
   1204 
   1205 /*
   1206  * Post a SYS_ENABLE message to the adapter.
   1207  */
   1208 int
   1209 iop_sys_enable(struct iop_softc *sc)
   1210 {
   1211 	struct iop_msg *im;
   1212 	struct i2o_msg mf;
   1213 	int rv;
   1214 
   1215 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
   1216 
   1217 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
   1218 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
   1219 	mf.msgictx = IOP_ICTX;
   1220 	mf.msgtctx = im->im_tctx;
   1221 
   1222 	rv = iop_msg_post(sc, im, &mf, 30000);
   1223 	if (rv == 0) {
   1224 		if ((im->im_flags & IM_FAIL) != 0)
   1225 			rv = ENXIO;
   1226 		else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
   1227 		    (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
   1228 		    im->im_detstatus == I2O_DSC_INVALID_REQUEST))
   1229 			rv = 0;
   1230 		else
   1231 			rv = EIO;
   1232 	}
   1233 
   1234 	iop_msg_free(sc, im);
   1235 	return (rv);
   1236 }
   1237 
   1238 /*
   1239  * Request the specified parameter group from the target.  If an initiator
   1240  * is specified (a) don't wait for the operation to complete, but instead
   1241  * let the initiator's interrupt handler deal with the reply and (b) place a
   1242  * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
   1243  */
   1244 int
   1245 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
   1246 		  int size, struct iop_initiator *ii)
   1247 {
   1248 	struct iop_msg *im;
   1249 	struct i2o_util_params_op *mf;
   1250 	int rv;
   1251 	struct iop_pgop *pgop;
   1252 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1253 
   1254 	im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
   1255 	if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
   1256 		iop_msg_free(sc, im);
   1257 		return (ENOMEM);
   1258 	}
   1259 	im->im_dvcontext = pgop;
   1260 
   1261 	mf = (struct i2o_util_params_op *)mb;
   1262 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
   1263 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
   1264 	mf->msgictx = IOP_ICTX;
   1265 	mf->msgtctx = im->im_tctx;
   1266 	mf->flags = 0;
   1267 
   1268 	pgop->olh.count = htole16(1);
   1269 	pgop->olh.reserved = htole16(0);
   1270 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
   1271 	pgop->oat.fieldcount = htole16(0xffff);
   1272 	pgop->oat.group = htole16(group);
   1273 
   1274 	if (ii == NULL)
   1275 		uvm_lwp_hold(curlwp);
   1276 
   1277 	memset(buf, 0, size);
   1278 	iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
   1279 	iop_msg_map(sc, im, mb, buf, size, 0, NULL);
   1280 	rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
   1281 
   1282 	if (ii == NULL)
   1283 		uvm_lwp_rele(curlwp);
   1284 
   1285 	/* Detect errors; let partial transfers to count as success. */
   1286 	if (ii == NULL && rv == 0) {
   1287 		if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
   1288 		    im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
   1289 			rv = 0;
   1290 		else
   1291 			rv = (im->im_reqstatus != 0 ? EIO : 0);
   1292 
   1293 		if (rv != 0)
   1294 			printf("%s: FIELD_GET failed for tid %d group %d\n",
   1295 			    device_xname(&sc->sc_dv), tid, group);
   1296 	}
   1297 
   1298 	if (ii == NULL || rv != 0) {
   1299 		iop_msg_unmap(sc, im);
   1300 		iop_msg_free(sc, im);
   1301 		free(pgop, M_DEVBUF);
   1302 	}
   1303 
   1304 	return (rv);
   1305 }
   1306 
   1307 /*
   1308  * Set a single field in a scalar parameter group.
   1309  */
   1310 int
   1311 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
   1312 	      int size, int field)
   1313 {
   1314 	struct iop_msg *im;
   1315 	struct i2o_util_params_op *mf;
   1316 	struct iop_pgop *pgop;
   1317 	int rv, totsize;
   1318 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1319 
   1320 	totsize = sizeof(*pgop) + size;
   1321 
   1322 	im = iop_msg_alloc(sc, IM_WAIT);
   1323 	if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
   1324 		iop_msg_free(sc, im);
   1325 		return (ENOMEM);
   1326 	}
   1327 
   1328 	mf = (struct i2o_util_params_op *)mb;
   1329 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
   1330 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
   1331 	mf->msgictx = IOP_ICTX;
   1332 	mf->msgtctx = im->im_tctx;
   1333 	mf->flags = 0;
   1334 
   1335 	pgop->olh.count = htole16(1);
   1336 	pgop->olh.reserved = htole16(0);
   1337 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
   1338 	pgop->oat.fieldcount = htole16(1);
   1339 	pgop->oat.group = htole16(group);
   1340 	pgop->oat.fields[0] = htole16(field);
   1341 	memcpy(pgop + 1, buf, size);
   1342 
   1343 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
   1344 	rv = iop_msg_post(sc, im, mb, 30000);
   1345 	if (rv != 0)
   1346 		aprint_error_dev(&sc->sc_dv, "FIELD_SET failed for tid %d group %d\n",
   1347 		    tid, group);
   1348 
   1349 	iop_msg_unmap(sc, im);
   1350 	iop_msg_free(sc, im);
   1351 	free(pgop, M_DEVBUF);
   1352 	return (rv);
   1353 }
   1354 
   1355 /*
   1356  * Delete all rows in a tablular parameter group.
   1357  */
   1358 int
   1359 iop_table_clear(struct iop_softc *sc, int tid, int group)
   1360 {
   1361 	struct iop_msg *im;
   1362 	struct i2o_util_params_op *mf;
   1363 	struct iop_pgop pgop;
   1364 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1365 	int rv;
   1366 
   1367 	im = iop_msg_alloc(sc, IM_WAIT);
   1368 
   1369 	mf = (struct i2o_util_params_op *)mb;
   1370 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
   1371 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
   1372 	mf->msgictx = IOP_ICTX;
   1373 	mf->msgtctx = im->im_tctx;
   1374 	mf->flags = 0;
   1375 
   1376 	pgop.olh.count = htole16(1);
   1377 	pgop.olh.reserved = htole16(0);
   1378 	pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
   1379 	pgop.oat.fieldcount = htole16(0);
   1380 	pgop.oat.group = htole16(group);
   1381 	pgop.oat.fields[0] = htole16(0);
   1382 
   1383 	uvm_lwp_hold(curlwp);
   1384 	iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
   1385 	rv = iop_msg_post(sc, im, mb, 30000);
   1386 	if (rv != 0)
   1387 		aprint_error_dev(&sc->sc_dv, "TABLE_CLEAR failed for tid %d group %d\n",
   1388 		    tid, group);
   1389 
   1390 	iop_msg_unmap(sc, im);
   1391 	uvm_lwp_rele(curlwp);
   1392 	iop_msg_free(sc, im);
   1393 	return (rv);
   1394 }
   1395 
   1396 /*
   1397  * Add a single row to a tabular parameter group.  The row can have only one
   1398  * field.
   1399  */
   1400 int
   1401 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
   1402 		  int size, int row)
   1403 {
   1404 	struct iop_msg *im;
   1405 	struct i2o_util_params_op *mf;
   1406 	struct iop_pgop *pgop;
   1407 	int rv, totsize;
   1408 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1409 
   1410 	totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
   1411 
   1412 	im = iop_msg_alloc(sc, IM_WAIT);
   1413 	if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
   1414 		iop_msg_free(sc, im);
   1415 		return (ENOMEM);
   1416 	}
   1417 
   1418 	mf = (struct i2o_util_params_op *)mb;
   1419 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
   1420 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
   1421 	mf->msgictx = IOP_ICTX;
   1422 	mf->msgtctx = im->im_tctx;
   1423 	mf->flags = 0;
   1424 
   1425 	pgop->olh.count = htole16(1);
   1426 	pgop->olh.reserved = htole16(0);
   1427 	pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
   1428 	pgop->oat.fieldcount = htole16(1);
   1429 	pgop->oat.group = htole16(group);
   1430 	pgop->oat.fields[0] = htole16(0);	/* FieldIdx */
   1431 	pgop->oat.fields[1] = htole16(1);	/* RowCount */
   1432 	pgop->oat.fields[2] = htole16(row);	/* KeyValue */
   1433 	memcpy(&pgop->oat.fields[3], buf, size);
   1434 
   1435 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
   1436 	rv = iop_msg_post(sc, im, mb, 30000);
   1437 	if (rv != 0)
   1438 		aprint_error_dev(&sc->sc_dv, "ADD_ROW failed for tid %d group %d row %d\n",
   1439 		    tid, group, row);
   1440 
   1441 	iop_msg_unmap(sc, im);
   1442 	iop_msg_free(sc, im);
   1443 	free(pgop, M_DEVBUF);
   1444 	return (rv);
   1445 }
   1446 
   1447 /*
   1448  * Execute a simple command (no parameters).
   1449  */
   1450 int
   1451 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
   1452 	       int async, int timo)
   1453 {
   1454 	struct iop_msg *im;
   1455 	struct i2o_msg mf;
   1456 	int rv, fl;
   1457 
   1458 	fl = (async != 0 ? IM_WAIT : IM_POLL);
   1459 	im = iop_msg_alloc(sc, fl);
   1460 
   1461 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
   1462 	mf.msgfunc = I2O_MSGFUNC(tid, function);
   1463 	mf.msgictx = ictx;
   1464 	mf.msgtctx = im->im_tctx;
   1465 
   1466 	rv = iop_msg_post(sc, im, &mf, timo);
   1467 	iop_msg_free(sc, im);
   1468 	return (rv);
   1469 }
   1470 
   1471 /*
   1472  * Post the system table to the IOP.
   1473  */
   1474 static int
   1475 iop_systab_set(struct iop_softc *sc)
   1476 {
   1477 	struct i2o_exec_sys_tab_set *mf;
   1478 	struct iop_msg *im;
   1479 	bus_space_handle_t bsh;
   1480 	bus_addr_t boo;
   1481 	u_int32_t mema[2], ioa[2];
   1482 	int rv;
   1483 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1484 
   1485 	im = iop_msg_alloc(sc, IM_WAIT);
   1486 
   1487 	mf = (struct i2o_exec_sys_tab_set *)mb;
   1488 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
   1489 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
   1490 	mf->msgictx = IOP_ICTX;
   1491 	mf->msgtctx = im->im_tctx;
   1492 	mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
   1493 	mf->segnumber = 0;
   1494 
   1495 	mema[1] = sc->sc_status.desiredprivmemsize;
   1496 	ioa[1] = sc->sc_status.desiredpriviosize;
   1497 
   1498 	if (mema[1] != 0) {
   1499 		rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
   1500 		    le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
   1501 		mema[0] = htole32(boo);
   1502 		if (rv != 0) {
   1503 			aprint_error_dev(&sc->sc_dv, "can't alloc priv mem space, err = %d\n", rv);
   1504 			mema[0] = 0;
   1505 			mema[1] = 0;
   1506 		}
   1507 	}
   1508 
   1509 	if (ioa[1] != 0) {
   1510 		rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
   1511 		    le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
   1512 		ioa[0] = htole32(boo);
   1513 		if (rv != 0) {
   1514 			aprint_error_dev(&sc->sc_dv, "can't alloc priv i/o space, err = %d\n", rv);
   1515 			ioa[0] = 0;
   1516 			ioa[1] = 0;
   1517 		}
   1518 	}
   1519 
   1520 	uvm_lwp_hold(curlwp);
   1521 	iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
   1522 	iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
   1523 	iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
   1524 	rv = iop_msg_post(sc, im, mb, 5000);
   1525 	iop_msg_unmap(sc, im);
   1526 	iop_msg_free(sc, im);
   1527 	uvm_lwp_rele(curlwp);
   1528 	return (rv);
   1529 }
   1530 
   1531 /*
   1532  * Reset the IOP.  Must be called with interrupts disabled.
   1533  */
   1534 static int
   1535 iop_reset(struct iop_softc *sc)
   1536 {
   1537 	u_int32_t mfa, *sw;
   1538 	struct i2o_exec_iop_reset mf;
   1539 	int rv;
   1540 	paddr_t pa;
   1541 
   1542 	sw = (u_int32_t *)sc->sc_scr;
   1543 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
   1544 
   1545 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
   1546 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
   1547 	mf.reserved[0] = 0;
   1548 	mf.reserved[1] = 0;
   1549 	mf.reserved[2] = 0;
   1550 	mf.reserved[3] = 0;
   1551 	mf.statuslow = (u_int32_t)pa;
   1552 	mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
   1553 
   1554 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
   1555 	    BUS_DMASYNC_PREWRITE);
   1556 	*sw = htole32(0);
   1557 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
   1558 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
   1559 
   1560 	if ((rv = iop_post(sc, (u_int32_t *)&mf)))
   1561 		return (rv);
   1562 
   1563 	POLL(2500,
   1564 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
   1565 	    BUS_DMASYNC_POSTREAD), *sw != 0));
   1566 	if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
   1567 		aprint_error_dev(&sc->sc_dv, "reset rejected, status 0x%x\n",
   1568 		    le32toh(*sw));
   1569 		return (EIO);
   1570 	}
   1571 
   1572 	/*
   1573 	 * IOP is now in the INIT state.  Wait no more than 10 seconds for
   1574 	 * the inbound queue to become responsive.
   1575 	 */
   1576 	POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
   1577 	if (mfa == IOP_MFA_EMPTY) {
   1578 		aprint_error_dev(&sc->sc_dv, "reset failed\n");
   1579 		return (EIO);
   1580 	}
   1581 
   1582 	iop_release_mfa(sc, mfa);
   1583 	return (0);
   1584 }
   1585 
   1586 /*
   1587  * Register a new initiator.  Must be called with the configuration lock
   1588  * held.
   1589  */
   1590 void
   1591 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
   1592 {
   1593 	static int ictxgen;
   1594 
   1595 	/* 0 is reserved (by us) for system messages. */
   1596 	ii->ii_ictx = ++ictxgen;
   1597 
   1598 	/*
   1599 	 * `Utility initiators' don't make it onto the per-IOP initiator list
   1600 	 * (which is used only for configuration), but do get one slot on
   1601 	 * the inbound queue.
   1602 	 */
   1603 	if ((ii->ii_flags & II_UTILITY) == 0) {
   1604 		LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
   1605 		sc->sc_nii++;
   1606 	} else
   1607 		sc->sc_nuii++;
   1608 
   1609 	cv_init(&ii->ii_cv, "iopevt");
   1610 
   1611 	mutex_spin_enter(&sc->sc_intrlock);
   1612 	LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
   1613 	mutex_spin_exit(&sc->sc_intrlock);
   1614 }
   1615 
   1616 /*
   1617  * Unregister an initiator.  Must be called with the configuration lock
   1618  * held.
   1619  */
   1620 void
   1621 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
   1622 {
   1623 
   1624 	if ((ii->ii_flags & II_UTILITY) == 0) {
   1625 		LIST_REMOVE(ii, ii_list);
   1626 		sc->sc_nii--;
   1627 	} else
   1628 		sc->sc_nuii--;
   1629 
   1630 	mutex_spin_enter(&sc->sc_intrlock);
   1631 	LIST_REMOVE(ii, ii_hash);
   1632 	mutex_spin_exit(&sc->sc_intrlock);
   1633 
   1634 	cv_destroy(&ii->ii_cv);
   1635 }
   1636 
   1637 /*
   1638  * Handle a reply frame from the IOP.
   1639  */
   1640 static int
   1641 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
   1642 {
   1643 	struct iop_msg *im;
   1644 	struct i2o_reply *rb;
   1645 	struct i2o_fault_notify *fn;
   1646 	struct iop_initiator *ii;
   1647 	u_int off, ictx, tctx, status, size;
   1648 
   1649 	KASSERT(mutex_owned(&sc->sc_intrlock));
   1650 
   1651 	off = (int)(rmfa - sc->sc_rep_phys);
   1652 	rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
   1653 
   1654 	/* Perform reply queue DMA synchronisation. */
   1655 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
   1656 	    sc->sc_framesize, BUS_DMASYNC_POSTREAD);
   1657 
   1658 #ifdef I2ODEBUG
   1659 	if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
   1660 		panic("iop_handle_reply: 64-bit reply");
   1661 #endif
   1662 	/*
   1663 	 * Find the initiator.
   1664 	 */
   1665 	ictx = le32toh(rb->msgictx);
   1666 	if (ictx == IOP_ICTX)
   1667 		ii = NULL;
   1668 	else {
   1669 		ii = LIST_FIRST(IOP_ICTXHASH(ictx));
   1670 		for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
   1671 			if (ii->ii_ictx == ictx)
   1672 				break;
   1673 		if (ii == NULL) {
   1674 #ifdef I2ODEBUG
   1675 			iop_reply_print(sc, rb);
   1676 #endif
   1677 			aprint_error_dev(&sc->sc_dv, "WARNING: bad ictx returned (%x)\n",
   1678 			    ictx);
   1679 			return (-1);
   1680 		}
   1681 	}
   1682 
   1683 	/*
   1684 	 * If we received a transport failure notice, we've got to dig the
   1685 	 * transaction context (if any) out of the original message frame,
   1686 	 * and then release the original MFA back to the inbound FIFO.
   1687 	 */
   1688 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
   1689 		status = I2O_STATUS_SUCCESS;
   1690 
   1691 		fn = (struct i2o_fault_notify *)rb;
   1692 		tctx = iop_inl_msg(sc, fn->lowmfa + 12);
   1693 		iop_release_mfa(sc, fn->lowmfa);
   1694 		iop_tfn_print(sc, fn);
   1695 	} else {
   1696 		status = rb->reqstatus;
   1697 		tctx = le32toh(rb->msgtctx);
   1698 	}
   1699 
   1700 	if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
   1701 		/*
   1702 		 * This initiator tracks state using message wrappers.
   1703 		 *
   1704 		 * Find the originating message wrapper, and if requested
   1705 		 * notify the initiator.
   1706 		 */
   1707 		im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
   1708 		if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
   1709 		    (im->im_flags & IM_ALLOCED) == 0 ||
   1710 		    tctx != im->im_tctx) {
   1711 			aprint_error_dev(&sc->sc_dv, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
   1712 			if (im != NULL)
   1713 				aprint_error_dev(&sc->sc_dv, "flags=0x%08x tctx=0x%08x\n",
   1714 				    im->im_flags, im->im_tctx);
   1715 #ifdef I2ODEBUG
   1716 			if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
   1717 				iop_reply_print(sc, rb);
   1718 #endif
   1719 			return (-1);
   1720 		}
   1721 
   1722 		if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
   1723 			im->im_flags |= IM_FAIL;
   1724 
   1725 #ifdef I2ODEBUG
   1726 		if ((im->im_flags & IM_REPLIED) != 0)
   1727 			panic("%s: dup reply", device_xname(&sc->sc_dv));
   1728 #endif
   1729 		im->im_flags |= IM_REPLIED;
   1730 
   1731 #ifdef I2ODEBUG
   1732 		if (status != I2O_STATUS_SUCCESS)
   1733 			iop_reply_print(sc, rb);
   1734 #endif
   1735 		im->im_reqstatus = status;
   1736 		im->im_detstatus = le16toh(rb->detail);
   1737 
   1738 		/* Copy the reply frame, if requested. */
   1739 		if (im->im_rb != NULL) {
   1740 			size = (le32toh(rb->msgflags) >> 14) & ~3;
   1741 #ifdef I2ODEBUG
   1742 			if (size > sc->sc_framesize)
   1743 				panic("iop_handle_reply: reply too large");
   1744 #endif
   1745 			memcpy(im->im_rb, rb, size);
   1746 		}
   1747 
   1748 		/* Notify the initiator. */
   1749 		if ((im->im_flags & IM_WAIT) != 0)
   1750 			cv_broadcast(&im->im_cv);
   1751 		else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
   1752 			if (ii != NULL) {
   1753 				mutex_spin_exit(&sc->sc_intrlock);
   1754 				(*ii->ii_intr)(ii->ii_dv, im, rb);
   1755 				mutex_spin_enter(&sc->sc_intrlock);
   1756 			}
   1757 		}
   1758 	} else {
   1759 		/*
   1760 		 * This initiator discards message wrappers.
   1761 		 *
   1762 		 * Simply pass the reply frame to the initiator.
   1763 		 */
   1764 		if (ii != NULL) {
   1765 			mutex_spin_exit(&sc->sc_intrlock);
   1766 			(*ii->ii_intr)(ii->ii_dv, NULL, rb);
   1767 			mutex_spin_enter(&sc->sc_intrlock);
   1768 		}
   1769 	}
   1770 
   1771 	return (status);
   1772 }
   1773 
   1774 /*
   1775  * Handle an interrupt from the IOP.
   1776  */
   1777 int
   1778 iop_intr(void *arg)
   1779 {
   1780 	struct iop_softc *sc;
   1781 	u_int32_t rmfa;
   1782 
   1783 	sc = arg;
   1784 
   1785 	mutex_spin_enter(&sc->sc_intrlock);
   1786 
   1787 	if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
   1788 		mutex_spin_exit(&sc->sc_intrlock);
   1789 		return (0);
   1790 	}
   1791 
   1792 	for (;;) {
   1793 		/* Double read to account for IOP bug. */
   1794 		if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
   1795 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
   1796 			if (rmfa == IOP_MFA_EMPTY)
   1797 				break;
   1798 		}
   1799 		iop_handle_reply(sc, rmfa);
   1800 		iop_outl(sc, IOP_REG_OFIFO, rmfa);
   1801 	}
   1802 
   1803 	mutex_spin_exit(&sc->sc_intrlock);
   1804 	return (1);
   1805 }
   1806 
   1807 /*
   1808  * Handle an event signalled by the executive.
   1809  */
   1810 static void
   1811 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
   1812 {
   1813 	struct i2o_util_event_register_reply *rb;
   1814 	u_int event;
   1815 
   1816 	rb = reply;
   1817 
   1818 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
   1819 		return;
   1820 
   1821 	event = le32toh(rb->event);
   1822 	printf("%s: event 0x%08x received\n", device_xname(dv), event);
   1823 }
   1824 
   1825 /*
   1826  * Allocate a message wrapper.
   1827  */
   1828 struct iop_msg *
   1829 iop_msg_alloc(struct iop_softc *sc, int flags)
   1830 {
   1831 	struct iop_msg *im;
   1832 	static u_int tctxgen;
   1833 	int i;
   1834 
   1835 #ifdef I2ODEBUG
   1836 	if ((flags & IM_SYSMASK) != 0)
   1837 		panic("iop_msg_alloc: system flags specified");
   1838 #endif
   1839 
   1840 	mutex_spin_enter(&sc->sc_intrlock);
   1841 	im = SLIST_FIRST(&sc->sc_im_freelist);
   1842 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
   1843 	if (im == NULL)
   1844 		panic("iop_msg_alloc: no free wrappers");
   1845 #endif
   1846 	SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
   1847 	mutex_spin_exit(&sc->sc_intrlock);
   1848 
   1849 	im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
   1850 	tctxgen += (1 << IOP_TCTX_SHIFT);
   1851 	im->im_flags = flags | IM_ALLOCED;
   1852 	im->im_rb = NULL;
   1853 	i = 0;
   1854 	do {
   1855 		im->im_xfer[i++].ix_size = 0;
   1856 	} while (i < IOP_MAX_MSG_XFERS);
   1857 
   1858 	return (im);
   1859 }
   1860 
   1861 /*
   1862  * Free a message wrapper.
   1863  */
   1864 void
   1865 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
   1866 {
   1867 
   1868 #ifdef I2ODEBUG
   1869 	if ((im->im_flags & IM_ALLOCED) == 0)
   1870 		panic("iop_msg_free: wrapper not allocated");
   1871 #endif
   1872 
   1873 	im->im_flags = 0;
   1874 	mutex_spin_enter(&sc->sc_intrlock);
   1875 	SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
   1876 	mutex_spin_exit(&sc->sc_intrlock);
   1877 }
   1878 
   1879 /*
   1880  * Map a data transfer.  Write a scatter-gather list into the message frame.
   1881  */
   1882 int
   1883 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
   1884 	    void *xferaddr, int xfersize, int out, struct proc *up)
   1885 {
   1886 	bus_dmamap_t dm;
   1887 	bus_dma_segment_t *ds;
   1888 	struct iop_xfer *ix;
   1889 	u_int rv, i, nsegs, flg, off, xn;
   1890 	u_int32_t *p;
   1891 
   1892 	for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
   1893 		if (ix->ix_size == 0)
   1894 			break;
   1895 
   1896 #ifdef I2ODEBUG
   1897 	if (xfersize == 0)
   1898 		panic("iop_msg_map: null transfer");
   1899 	if (xfersize > IOP_MAX_XFER)
   1900 		panic("iop_msg_map: transfer too large");
   1901 	if (xn == IOP_MAX_MSG_XFERS)
   1902 		panic("iop_msg_map: too many xfers");
   1903 #endif
   1904 
   1905 	/*
   1906 	 * Only the first DMA map is static.
   1907 	 */
   1908 	if (xn != 0) {
   1909 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
   1910 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
   1911 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
   1912 		if (rv != 0)
   1913 			return (rv);
   1914 	}
   1915 
   1916 	dm = ix->ix_map;
   1917 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
   1918 	    (up == NULL ? BUS_DMA_NOWAIT : 0));
   1919 	if (rv != 0)
   1920 		goto bad;
   1921 
   1922 	/*
   1923 	 * How many SIMPLE SG elements can we fit in this message?
   1924 	 */
   1925 	off = mb[0] >> 16;
   1926 	p = mb + off;
   1927 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
   1928 
   1929 	if (dm->dm_nsegs > nsegs) {
   1930 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
   1931 		rv = EFBIG;
   1932 		DPRINTF(("iop_msg_map: too many segs\n"));
   1933 		goto bad;
   1934 	}
   1935 
   1936 	nsegs = dm->dm_nsegs;
   1937 	xfersize = 0;
   1938 
   1939 	/*
   1940 	 * Write out the SG list.
   1941 	 */
   1942 	if (out)
   1943 		flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
   1944 	else
   1945 		flg = I2O_SGL_SIMPLE;
   1946 
   1947 	for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
   1948 		p[0] = (u_int32_t)ds->ds_len | flg;
   1949 		p[1] = (u_int32_t)ds->ds_addr;
   1950 		xfersize += ds->ds_len;
   1951 	}
   1952 
   1953 	p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
   1954 	p[1] = (u_int32_t)ds->ds_addr;
   1955 	xfersize += ds->ds_len;
   1956 
   1957 	/* Fix up the transfer record, and sync the map. */
   1958 	ix->ix_flags = (out ? IX_OUT : IX_IN);
   1959 	ix->ix_size = xfersize;
   1960 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
   1961 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
   1962 
   1963 	/*
   1964 	 * If this is the first xfer we've mapped for this message, adjust
   1965 	 * the SGL offset field in the message header.
   1966 	 */
   1967 	if ((im->im_flags & IM_SGLOFFADJ) == 0) {
   1968 		mb[0] += (mb[0] >> 12) & 0xf0;
   1969 		im->im_flags |= IM_SGLOFFADJ;
   1970 	}
   1971 	mb[0] += (nsegs << 17);
   1972 	return (0);
   1973 
   1974  bad:
   1975  	if (xn != 0)
   1976 		bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
   1977 	return (rv);
   1978 }
   1979 
   1980 /*
   1981  * Map a block I/O data transfer (different in that there's only one per
   1982  * message maximum, and PAGE addressing may be used).  Write a scatter
   1983  * gather list into the message frame.
   1984  */
   1985 int
   1986 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
   1987 		void *xferaddr, int xfersize, int out)
   1988 {
   1989 	bus_dma_segment_t *ds;
   1990 	bus_dmamap_t dm;
   1991 	struct iop_xfer *ix;
   1992 	u_int rv, i, nsegs, off, slen, tlen, flg;
   1993 	paddr_t saddr, eaddr;
   1994 	u_int32_t *p;
   1995 
   1996 #ifdef I2ODEBUG
   1997 	if (xfersize == 0)
   1998 		panic("iop_msg_map_bio: null transfer");
   1999 	if (xfersize > IOP_MAX_XFER)
   2000 		panic("iop_msg_map_bio: transfer too large");
   2001 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
   2002 		panic("iop_msg_map_bio: SGLOFFADJ");
   2003 #endif
   2004 
   2005 	ix = im->im_xfer;
   2006 	dm = ix->ix_map;
   2007 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
   2008 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
   2009 	if (rv != 0)
   2010 		return (rv);
   2011 
   2012 	off = mb[0] >> 16;
   2013 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
   2014 
   2015 	/*
   2016 	 * If the transfer is highly fragmented and won't fit using SIMPLE
   2017 	 * elements, use PAGE_LIST elements instead.  SIMPLE elements are
   2018 	 * potentially more efficient, both for us and the IOP.
   2019 	 */
   2020 	if (dm->dm_nsegs > nsegs) {
   2021 		nsegs = 1;
   2022 		p = mb + off + 1;
   2023 
   2024 		/* XXX This should be done with a bus_space flag. */
   2025 		for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
   2026 			slen = ds->ds_len;
   2027 			saddr = ds->ds_addr;
   2028 
   2029 			while (slen > 0) {
   2030 				eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
   2031 				tlen = min(eaddr - saddr, slen);
   2032 				slen -= tlen;
   2033 				*p++ = le32toh(saddr);
   2034 				saddr = eaddr;
   2035 				nsegs++;
   2036 			}
   2037 		}
   2038 
   2039 		mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
   2040 		    I2O_SGL_END;
   2041 		if (out)
   2042 			mb[off] |= I2O_SGL_DATA_OUT;
   2043 	} else {
   2044 		p = mb + off;
   2045 		nsegs = dm->dm_nsegs;
   2046 
   2047 		if (out)
   2048 			flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
   2049 		else
   2050 			flg = I2O_SGL_SIMPLE;
   2051 
   2052 		for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
   2053 			p[0] = (u_int32_t)ds->ds_len | flg;
   2054 			p[1] = (u_int32_t)ds->ds_addr;
   2055 		}
   2056 
   2057 		p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
   2058 		    I2O_SGL_END;
   2059 		p[1] = (u_int32_t)ds->ds_addr;
   2060 		nsegs <<= 1;
   2061 	}
   2062 
   2063 	/* Fix up the transfer record, and sync the map. */
   2064 	ix->ix_flags = (out ? IX_OUT : IX_IN);
   2065 	ix->ix_size = xfersize;
   2066 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
   2067 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
   2068 
   2069 	/*
   2070 	 * Adjust the SGL offset and total message size fields.  We don't
   2071 	 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
   2072 	 */
   2073 	mb[0] += ((off << 4) + (nsegs << 16));
   2074 	return (0);
   2075 }
   2076 
   2077 /*
   2078  * Unmap all data transfers associated with a message wrapper.
   2079  */
   2080 void
   2081 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
   2082 {
   2083 	struct iop_xfer *ix;
   2084 	int i;
   2085 
   2086 #ifdef I2ODEBUG
   2087 	if (im->im_xfer[0].ix_size == 0)
   2088 		panic("iop_msg_unmap: no transfers mapped");
   2089 #endif
   2090 
   2091 	for (ix = im->im_xfer, i = 0;;) {
   2092 		bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
   2093 		    ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
   2094 		    BUS_DMASYNC_POSTREAD);
   2095 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
   2096 
   2097 		/* Only the first DMA map is static. */
   2098 		if (i != 0)
   2099 			bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
   2100 		if ((++ix)->ix_size == 0)
   2101 			break;
   2102 		if (++i >= IOP_MAX_MSG_XFERS)
   2103 			break;
   2104 	}
   2105 }
   2106 
   2107 /*
   2108  * Post a message frame to the IOP's inbound queue.
   2109  */
   2110 int
   2111 iop_post(struct iop_softc *sc, u_int32_t *mb)
   2112 {
   2113 	u_int32_t mfa;
   2114 
   2115 #ifdef I2ODEBUG
   2116 	if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
   2117 		panic("iop_post: frame too large");
   2118 #endif
   2119 
   2120 	mutex_spin_enter(&sc->sc_intrlock);
   2121 
   2122 	/* Allocate a slot with the IOP. */
   2123 	if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
   2124 		if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
   2125 			mutex_spin_exit(&sc->sc_intrlock);
   2126 			aprint_error_dev(&sc->sc_dv, "mfa not forthcoming\n");
   2127 			return (EAGAIN);
   2128 		}
   2129 
   2130 	/* Perform reply buffer DMA synchronisation. */
   2131 	if (sc->sc_rep_size != 0) {
   2132 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
   2133 		    sc->sc_rep_size, BUS_DMASYNC_PREREAD);
   2134 	}
   2135 
   2136 	/* Copy out the message frame. */
   2137 	bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
   2138 	    mb[0] >> 16);
   2139 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
   2140 	    (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
   2141 
   2142 	/* Post the MFA back to the IOP. */
   2143 	iop_outl(sc, IOP_REG_IFIFO, mfa);
   2144 
   2145 	mutex_spin_exit(&sc->sc_intrlock);
   2146 	return (0);
   2147 }
   2148 
   2149 /*
   2150  * Post a message to the IOP and deal with completion.
   2151  */
   2152 int
   2153 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
   2154 {
   2155 	u_int32_t *mb;
   2156 	int rv;
   2157 
   2158 	mb = xmb;
   2159 
   2160 	/* Terminate the scatter/gather list chain. */
   2161 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
   2162 		mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
   2163 
   2164 	if ((rv = iop_post(sc, mb)) != 0)
   2165 		return (rv);
   2166 
   2167 	if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
   2168 		if ((im->im_flags & IM_POLL) != 0)
   2169 			iop_msg_poll(sc, im, timo);
   2170 		else
   2171 			iop_msg_wait(sc, im, timo);
   2172 
   2173 		mutex_spin_enter(&sc->sc_intrlock);
   2174 		if ((im->im_flags & IM_REPLIED) != 0) {
   2175 			if ((im->im_flags & IM_NOSTATUS) != 0)
   2176 				rv = 0;
   2177 			else if ((im->im_flags & IM_FAIL) != 0)
   2178 				rv = ENXIO;
   2179 			else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
   2180 				rv = EIO;
   2181 			else
   2182 				rv = 0;
   2183 		} else
   2184 			rv = EBUSY;
   2185 		mutex_spin_exit(&sc->sc_intrlock);
   2186 	} else
   2187 		rv = 0;
   2188 
   2189 	return (rv);
   2190 }
   2191 
   2192 /*
   2193  * Spin until the specified message is replied to.
   2194  */
   2195 static void
   2196 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
   2197 {
   2198 	u_int32_t rmfa;
   2199 
   2200 	mutex_spin_enter(&sc->sc_intrlock);
   2201 
   2202 	for (timo *= 10; timo != 0; timo--) {
   2203 		if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
   2204 			/* Double read to account for IOP bug. */
   2205 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
   2206 			if (rmfa == IOP_MFA_EMPTY)
   2207 				rmfa = iop_inl(sc, IOP_REG_OFIFO);
   2208 			if (rmfa != IOP_MFA_EMPTY) {
   2209 				iop_handle_reply(sc, rmfa);
   2210 
   2211 				/*
   2212 				 * Return the reply frame to the IOP's
   2213 				 * outbound FIFO.
   2214 				 */
   2215 				iop_outl(sc, IOP_REG_OFIFO, rmfa);
   2216 			}
   2217 		}
   2218 		if ((im->im_flags & IM_REPLIED) != 0)
   2219 			break;
   2220 		mutex_spin_exit(&sc->sc_intrlock);
   2221 		DELAY(100);
   2222 		mutex_spin_enter(&sc->sc_intrlock);
   2223 	}
   2224 
   2225 	if (timo == 0) {
   2226 #ifdef I2ODEBUG
   2227 		printf("%s: poll - no reply\n", device_xname(&sc->sc_dv));
   2228 		if (iop_status_get(sc, 1) != 0)
   2229 			printf("iop_msg_poll: unable to retrieve status\n");
   2230 		else
   2231 			printf("iop_msg_poll: IOP state = %d\n",
   2232 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
   2233 #endif
   2234 	}
   2235 
   2236 	mutex_spin_exit(&sc->sc_intrlock);
   2237 }
   2238 
   2239 /*
   2240  * Sleep until the specified message is replied to.
   2241  */
   2242 static void
   2243 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
   2244 {
   2245 	int rv;
   2246 
   2247 	mutex_spin_enter(&sc->sc_intrlock);
   2248 	if ((im->im_flags & IM_REPLIED) != 0) {
   2249 		mutex_spin_exit(&sc->sc_intrlock);
   2250 		return;
   2251 	}
   2252 	rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
   2253 	mutex_spin_exit(&sc->sc_intrlock);
   2254 
   2255 #ifdef I2ODEBUG
   2256 	if (rv != 0) {
   2257 		printf("iop_msg_wait: tsleep() == %d\n", rv);
   2258 		if (iop_status_get(sc, 0) != 0)
   2259 			printf("iop_msg_wait: unable to retrieve status\n");
   2260 		else
   2261 			printf("iop_msg_wait: IOP state = %d\n",
   2262 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
   2263 	}
   2264 #endif
   2265 }
   2266 
   2267 /*
   2268  * Release an unused message frame back to the IOP's inbound fifo.
   2269  */
   2270 static void
   2271 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
   2272 {
   2273 
   2274 	/* Use the frame to issue a no-op. */
   2275 	iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
   2276 	iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
   2277 	iop_outl_msg(sc, mfa + 8, 0);
   2278 	iop_outl_msg(sc, mfa + 12, 0);
   2279 
   2280 	iop_outl(sc, IOP_REG_IFIFO, mfa);
   2281 }
   2282 
   2283 #ifdef I2ODEBUG
   2284 /*
   2285  * Dump a reply frame header.
   2286  */
   2287 static void
   2288 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
   2289 {
   2290 	u_int function, detail;
   2291 	const char *statusstr;
   2292 
   2293 	function = (le32toh(rb->msgfunc) >> 24) & 0xff;
   2294 	detail = le16toh(rb->detail);
   2295 
   2296 	printf("%s: reply:\n", device_xname(&sc->sc_dv));
   2297 
   2298 	if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
   2299 		statusstr = iop_status[rb->reqstatus];
   2300 	else
   2301 		statusstr = "undefined error code";
   2302 
   2303 	printf("%s:   function=0x%02x status=0x%02x (%s)\n",
   2304 	    device_xname(&sc->sc_dv), function, rb->reqstatus, statusstr);
   2305 	printf("%s:   detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
   2306 	    device_xname(&sc->sc_dv), detail, le32toh(rb->msgictx),
   2307 	    le32toh(rb->msgtctx));
   2308 	printf("%s:   tidi=%d tidt=%d flags=0x%02x\n", device_xname(&sc->sc_dv),
   2309 	    (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
   2310 	    (le32toh(rb->msgflags) >> 8) & 0xff);
   2311 }
   2312 #endif
   2313 
   2314 /*
   2315  * Dump a transport failure reply.
   2316  */
   2317 static void
   2318 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
   2319 {
   2320 
   2321 	printf("%s: WARNING: transport failure:\n", device_xname(&sc->sc_dv));
   2322 
   2323 	printf("%s:  ictx=0x%08x tctx=0x%08x\n", device_xname(&sc->sc_dv),
   2324 	    le32toh(fn->msgictx), le32toh(fn->msgtctx));
   2325 	printf("%s:  failurecode=0x%02x severity=0x%02x\n",
   2326 	    device_xname(&sc->sc_dv), fn->failurecode, fn->severity);
   2327 	printf("%s:  highestver=0x%02x lowestver=0x%02x\n",
   2328 	    device_xname(&sc->sc_dv), fn->highestver, fn->lowestver);
   2329 }
   2330 
   2331 /*
   2332  * Translate an I2O ASCII field into a C string.
   2333  */
   2334 void
   2335 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
   2336 {
   2337 	int hc, lc, i, nit;
   2338 
   2339 	dlen--;
   2340 	lc = 0;
   2341 	hc = 0;
   2342 	i = 0;
   2343 
   2344 	/*
   2345 	 * DPT use NUL as a space, whereas AMI use it as a terminator.  The
   2346 	 * spec has nothing to say about it.  Since AMI fields are usually
   2347 	 * filled with junk after the terminator, ...
   2348 	 */
   2349 	nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
   2350 
   2351 	while (slen-- != 0 && dlen-- != 0) {
   2352 		if (nit && *src == '\0')
   2353 			break;
   2354 		else if (*src <= 0x20 || *src >= 0x7f) {
   2355 			if (hc)
   2356 				dst[i++] = ' ';
   2357 		} else {
   2358 			hc = 1;
   2359 			dst[i++] = *src;
   2360 			lc = i;
   2361 		}
   2362 		src++;
   2363 	}
   2364 
   2365 	dst[lc] = '\0';
   2366 }
   2367 
   2368 /*
   2369  * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
   2370  */
   2371 int
   2372 iop_print_ident(struct iop_softc *sc, int tid)
   2373 {
   2374 	struct {
   2375 		struct	i2o_param_op_results pr;
   2376 		struct	i2o_param_read_results prr;
   2377 		struct	i2o_param_device_identity di;
   2378 	} __packed p;
   2379 	char buf[32];
   2380 	int rv;
   2381 
   2382 	rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
   2383 	    sizeof(p), NULL);
   2384 	if (rv != 0)
   2385 		return (rv);
   2386 
   2387 	iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
   2388 	    sizeof(buf));
   2389 	printf(" <%s, ", buf);
   2390 	iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
   2391 	    sizeof(buf));
   2392 	printf("%s, ", buf);
   2393 	iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
   2394 	printf("%s>", buf);
   2395 
   2396 	return (0);
   2397 }
   2398 
   2399 /*
   2400  * Claim or unclaim the specified TID.
   2401  */
   2402 int
   2403 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
   2404 	       int flags)
   2405 {
   2406 	struct iop_msg *im;
   2407 	struct i2o_util_claim mf;
   2408 	int rv, func;
   2409 
   2410 	func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
   2411 	im = iop_msg_alloc(sc, IM_WAIT);
   2412 
   2413 	/* We can use the same structure, as they're identical. */
   2414 	mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
   2415 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
   2416 	mf.msgictx = ii->ii_ictx;
   2417 	mf.msgtctx = im->im_tctx;
   2418 	mf.flags = flags;
   2419 
   2420 	rv = iop_msg_post(sc, im, &mf, 5000);
   2421 	iop_msg_free(sc, im);
   2422 	return (rv);
   2423 }
   2424 
   2425 /*
   2426  * Perform an abort.
   2427  */
   2428 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
   2429 		   int tctxabort, int flags)
   2430 {
   2431 	struct iop_msg *im;
   2432 	struct i2o_util_abort mf;
   2433 	int rv;
   2434 
   2435 	im = iop_msg_alloc(sc, IM_WAIT);
   2436 
   2437 	mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
   2438 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
   2439 	mf.msgictx = ii->ii_ictx;
   2440 	mf.msgtctx = im->im_tctx;
   2441 	mf.flags = (func << 24) | flags;
   2442 	mf.tctxabort = tctxabort;
   2443 
   2444 	rv = iop_msg_post(sc, im, &mf, 5000);
   2445 	iop_msg_free(sc, im);
   2446 	return (rv);
   2447 }
   2448 
   2449 /*
   2450  * Enable or disable reception of events for the specified device.
   2451  */
   2452 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
   2453 {
   2454 	struct i2o_util_event_register mf;
   2455 
   2456 	mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
   2457 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
   2458 	mf.msgictx = ii->ii_ictx;
   2459 	mf.msgtctx = 0;
   2460 	mf.eventmask = mask;
   2461 
   2462 	/* This message is replied to only when events are signalled. */
   2463 	return (iop_post(sc, (u_int32_t *)&mf));
   2464 }
   2465 
   2466 int
   2467 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
   2468 {
   2469 	struct iop_softc *sc;
   2470 
   2471 	if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL)
   2472 		return (ENXIO);
   2473 	if ((sc->sc_flags & IOP_ONLINE) == 0)
   2474 		return (ENXIO);
   2475 	if ((sc->sc_flags & IOP_OPEN) != 0)
   2476 		return (EBUSY);
   2477 	sc->sc_flags |= IOP_OPEN;
   2478 
   2479 	return (0);
   2480 }
   2481 
   2482 int
   2483 iopclose(dev_t dev, int flag, int mode,
   2484     struct lwp *l)
   2485 {
   2486 	struct iop_softc *sc;
   2487 
   2488 	sc = device_lookup_private(&iop_cd, minor(dev));
   2489 	sc->sc_flags &= ~IOP_OPEN;
   2490 
   2491 	return (0);
   2492 }
   2493 
   2494 int
   2495 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
   2496 {
   2497 	struct iop_softc *sc;
   2498 	struct iovec *iov;
   2499 	int rv, i;
   2500 
   2501 	sc = device_lookup_private(&iop_cd, minor(dev));
   2502 	rv = 0;
   2503 
   2504 	switch (cmd) {
   2505 	case IOPIOCPT:
   2506 		rv = kauth_authorize_device_passthru(l->l_cred, dev,
   2507 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
   2508 		if (rv)
   2509 			return (rv);
   2510 
   2511 		return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
   2512 
   2513 	case IOPIOCGSTATUS:
   2514 		iov = (struct iovec *)data;
   2515 		i = sizeof(struct i2o_status);
   2516 		if (i > iov->iov_len)
   2517 			i = iov->iov_len;
   2518 		else
   2519 			iov->iov_len = i;
   2520 		if ((rv = iop_status_get(sc, 0)) == 0)
   2521 			rv = copyout(&sc->sc_status, iov->iov_base, i);
   2522 		return (rv);
   2523 
   2524 	case IOPIOCGLCT:
   2525 	case IOPIOCGTIDMAP:
   2526 	case IOPIOCRECONFIG:
   2527 		break;
   2528 
   2529 	default:
   2530 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
   2531 		printf("%s: unknown ioctl %lx\n", device_xname(&sc->sc_dv), cmd);
   2532 #endif
   2533 		return (ENOTTY);
   2534 	}
   2535 
   2536 	mutex_enter(&sc->sc_conflock);
   2537 
   2538 	switch (cmd) {
   2539 	case IOPIOCGLCT:
   2540 		iov = (struct iovec *)data;
   2541 		i = le16toh(sc->sc_lct->tablesize) << 2;
   2542 		if (i > iov->iov_len)
   2543 			i = iov->iov_len;
   2544 		else
   2545 			iov->iov_len = i;
   2546 		rv = copyout(sc->sc_lct, iov->iov_base, i);
   2547 		break;
   2548 
   2549 	case IOPIOCRECONFIG:
   2550 		rv = iop_reconfigure(sc, 0);
   2551 		break;
   2552 
   2553 	case IOPIOCGTIDMAP:
   2554 		iov = (struct iovec *)data;
   2555 		i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
   2556 		if (i > iov->iov_len)
   2557 			i = iov->iov_len;
   2558 		else
   2559 			iov->iov_len = i;
   2560 		rv = copyout(sc->sc_tidmap, iov->iov_base, i);
   2561 		break;
   2562 	}
   2563 
   2564 	mutex_exit(&sc->sc_conflock);
   2565 	return (rv);
   2566 }
   2567 
   2568 static int
   2569 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
   2570 {
   2571 	struct iop_msg *im;
   2572 	struct i2o_msg *mf;
   2573 	struct ioppt_buf *ptb;
   2574 	int rv, i, mapped;
   2575 
   2576 	mf = NULL;
   2577 	im = NULL;
   2578 	mapped = 1;
   2579 
   2580 	if (pt->pt_msglen > sc->sc_framesize ||
   2581 	    pt->pt_msglen < sizeof(struct i2o_msg) ||
   2582 	    pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
   2583 	    pt->pt_nbufs < 0 ||
   2584 #if 0
   2585 	    pt->pt_replylen < 0 ||
   2586 #endif
   2587             pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
   2588 		return (EINVAL);
   2589 
   2590 	for (i = 0; i < pt->pt_nbufs; i++)
   2591 		if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
   2592 			rv = ENOMEM;
   2593 			goto bad;
   2594 		}
   2595 
   2596 	mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
   2597 	if (mf == NULL)
   2598 		return (ENOMEM);
   2599 
   2600 	if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
   2601 		goto bad;
   2602 
   2603 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
   2604 	im->im_rb = (struct i2o_reply *)mf;
   2605 	mf->msgictx = IOP_ICTX;
   2606 	mf->msgtctx = im->im_tctx;
   2607 
   2608 	for (i = 0; i < pt->pt_nbufs; i++) {
   2609 		ptb = &pt->pt_bufs[i];
   2610 		rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
   2611 		    ptb->ptb_datalen, ptb->ptb_out != 0, p);
   2612 		if (rv != 0)
   2613 			goto bad;
   2614 		mapped = 1;
   2615 	}
   2616 
   2617 	if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
   2618 		goto bad;
   2619 
   2620 	i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
   2621 	if (i > sc->sc_framesize)
   2622 		i = sc->sc_framesize;
   2623 	if (i > pt->pt_replylen)
   2624 		i = pt->pt_replylen;
   2625 	rv = copyout(im->im_rb, pt->pt_reply, i);
   2626 
   2627  bad:
   2628 	if (mapped != 0)
   2629 		iop_msg_unmap(sc, im);
   2630 	if (im != NULL)
   2631 		iop_msg_free(sc, im);
   2632 	if (mf != NULL)
   2633 		free(mf, M_DEVBUF);
   2634 	return (rv);
   2635 }
   2636