Home | History | Annotate | Line # | Download | only in i2o
iop.c revision 1.88
      1 /*	$NetBSD: iop.c,v 1.88 2017/10/28 04:53:55 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Support for I2O IOPs (intelligent I/O processors).
     34  */
     35 
     36 #include <sys/cdefs.h>
     37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.88 2017/10/28 04:53:55 riastradh Exp $");
     38 
     39 #include "iop.h"
     40 
     41 #include <sys/param.h>
     42 #include <sys/systm.h>
     43 #include <sys/kernel.h>
     44 #include <sys/device.h>
     45 #include <sys/queue.h>
     46 #include <sys/proc.h>
     47 #include <sys/malloc.h>
     48 #include <sys/ioctl.h>
     49 #include <sys/endian.h>
     50 #include <sys/conf.h>
     51 #include <sys/kthread.h>
     52 #include <sys/kauth.h>
     53 #include <sys/bus.h>
     54 
     55 #include <dev/i2o/i2o.h>
     56 #include <dev/i2o/iopio.h>
     57 #include <dev/i2o/iopreg.h>
     58 #include <dev/i2o/iopvar.h>
     59 
     60 #include "ioconf.h"
     61 #include "locators.h"
     62 
     63 #define POLL(ms, cond)				\
     64 do {						\
     65 	int xi;					\
     66 	for (xi = (ms) * 10; xi; xi--) {	\
     67 		if (cond)			\
     68 			break;			\
     69 		DELAY(100);			\
     70 	}					\
     71 } while (/* CONSTCOND */0);
     72 
     73 #ifdef I2ODEBUG
     74 #define DPRINTF(x)	printf x
     75 #else
     76 #define	DPRINTF(x)
     77 #endif
     78 
     79 #define IOP_ICTXHASH_NBUCKETS	16
     80 #define	IOP_ICTXHASH(ictx)	(&iop_ictxhashtbl[(ictx) & iop_ictxhash])
     81 
     82 #define	IOP_MAX_SEGS	(((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
     83 
     84 #define	IOP_TCTX_SHIFT	12
     85 #define	IOP_TCTX_MASK	((1 << IOP_TCTX_SHIFT) - 1)
     86 
     87 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
     88 static u_long	iop_ictxhash;
     89 static void	*iop_sdh;
     90 static struct	i2o_systab *iop_systab;
     91 static int	iop_systab_size;
     92 
     93 dev_type_open(iopopen);
     94 dev_type_close(iopclose);
     95 dev_type_ioctl(iopioctl);
     96 
     97 const struct cdevsw iop_cdevsw = {
     98 	.d_open = iopopen,
     99 	.d_close = iopclose,
    100 	.d_read = noread,
    101 	.d_write = nowrite,
    102 	.d_ioctl = iopioctl,
    103 	.d_stop = nostop,
    104 	.d_tty = notty,
    105 	.d_poll = nopoll,
    106 	.d_mmap = nommap,
    107 	.d_kqfilter = nokqfilter,
    108 	.d_discard = nodiscard,
    109 	.d_flag = D_OTHER,
    110 };
    111 
    112 #define	IC_CONFIGURE	0x01
    113 #define	IC_PRIORITY	0x02
    114 
    115 static struct iop_class {
    116 	u_short	ic_class;
    117 	u_short	ic_flags;
    118 	const char *ic_caption;
    119 } const iop_class[] = {
    120 	{
    121 		I2O_CLASS_EXECUTIVE,
    122 		0,
    123 		"executive"
    124 	},
    125 	{
    126 		I2O_CLASS_DDM,
    127 		0,
    128 		"device driver module"
    129 	},
    130 	{
    131 		I2O_CLASS_RANDOM_BLOCK_STORAGE,
    132 		IC_CONFIGURE | IC_PRIORITY,
    133 		"random block storage"
    134 	},
    135 	{
    136 		I2O_CLASS_SEQUENTIAL_STORAGE,
    137 		IC_CONFIGURE | IC_PRIORITY,
    138 		"sequential storage"
    139 	},
    140 	{
    141 		I2O_CLASS_LAN,
    142 		IC_CONFIGURE | IC_PRIORITY,
    143 		"LAN port"
    144 	},
    145 	{
    146 		I2O_CLASS_WAN,
    147 		IC_CONFIGURE | IC_PRIORITY,
    148 		"WAN port"
    149 	},
    150 	{
    151 		I2O_CLASS_FIBRE_CHANNEL_PORT,
    152 		IC_CONFIGURE,
    153 		"fibrechannel port"
    154 	},
    155 	{
    156 		I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
    157 		0,
    158 		"fibrechannel peripheral"
    159 	},
    160  	{
    161  		I2O_CLASS_SCSI_PERIPHERAL,
    162  		0,
    163  		"SCSI peripheral"
    164  	},
    165 	{
    166 		I2O_CLASS_ATE_PORT,
    167 		IC_CONFIGURE,
    168 		"ATE port"
    169 	},
    170 	{
    171 		I2O_CLASS_ATE_PERIPHERAL,
    172 		0,
    173 		"ATE peripheral"
    174 	},
    175 	{
    176 		I2O_CLASS_FLOPPY_CONTROLLER,
    177 		IC_CONFIGURE,
    178 		"floppy controller"
    179 	},
    180 	{
    181 		I2O_CLASS_FLOPPY_DEVICE,
    182 		0,
    183 		"floppy device"
    184 	},
    185 	{
    186 		I2O_CLASS_BUS_ADAPTER_PORT,
    187 		IC_CONFIGURE,
    188 		"bus adapter port"
    189 	},
    190 };
    191 
    192 #ifdef I2ODEBUG
    193 static const char * const iop_status[] = {
    194 	"success",
    195 	"abort (dirty)",
    196 	"abort (no data transfer)",
    197 	"abort (partial transfer)",
    198 	"error (dirty)",
    199 	"error (no data transfer)",
    200 	"error (partial transfer)",
    201 	"undefined error code",
    202 	"process abort (dirty)",
    203 	"process abort (no data transfer)",
    204 	"process abort (partial transfer)",
    205 	"transaction error",
    206 };
    207 #endif
    208 
    209 static inline u_int32_t	iop_inl(struct iop_softc *, int);
    210 static inline void	iop_outl(struct iop_softc *, int, u_int32_t);
    211 
    212 static inline u_int32_t	iop_inl_msg(struct iop_softc *, int);
    213 static inline void	iop_outl_msg(struct iop_softc *, int, u_int32_t);
    214 
    215 static void	iop_config_interrupts(device_t);
    216 static void	iop_configure_devices(struct iop_softc *, int, int);
    217 static void	iop_devinfo(int, char *, size_t);
    218 static int	iop_print(void *, const char *);
    219 static void	iop_shutdown(void *);
    220 
    221 static void	iop_adjqparam(struct iop_softc *, int);
    222 static int	iop_handle_reply(struct iop_softc *, u_int32_t);
    223 static int	iop_hrt_get(struct iop_softc *);
    224 static int	iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
    225 static void	iop_intr_event(device_t, struct iop_msg *, void *);
    226 static int	iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
    227 			     u_int32_t);
    228 static void	iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
    229 static void	iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
    230 static int	iop_ofifo_init(struct iop_softc *);
    231 static int	iop_passthrough(struct iop_softc *, struct ioppt *,
    232 				struct proc *);
    233 static void	iop_reconf_thread(void *);
    234 static void	iop_release_mfa(struct iop_softc *, u_int32_t);
    235 static int	iop_reset(struct iop_softc *);
    236 static int	iop_sys_enable(struct iop_softc *);
    237 static int	iop_systab_set(struct iop_softc *);
    238 static void	iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
    239 
    240 #ifdef I2ODEBUG
    241 static void	iop_reply_print(struct iop_softc *, struct i2o_reply *);
    242 #endif
    243 
    244 static inline u_int32_t
    245 iop_inl(struct iop_softc *sc, int off)
    246 {
    247 
    248 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
    249 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
    250 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
    251 }
    252 
    253 static inline void
    254 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
    255 {
    256 
    257 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
    258 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
    259 	    BUS_SPACE_BARRIER_WRITE);
    260 }
    261 
    262 static inline u_int32_t
    263 iop_inl_msg(struct iop_softc *sc, int off)
    264 {
    265 
    266 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
    267 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
    268 	return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
    269 }
    270 
    271 static inline void
    272 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
    273 {
    274 
    275 	bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
    276 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
    277 	    BUS_SPACE_BARRIER_WRITE);
    278 }
    279 
    280 /*
    281  * Initialise the IOP and our interface.
    282  */
    283 void
    284 iop_init(struct iop_softc *sc, const char *intrstr)
    285 {
    286 	struct iop_msg *im;
    287 	int rv, i, j, state, nsegs;
    288 	u_int32_t mask;
    289 	char ident[64];
    290 
    291 	state = 0;
    292 
    293 	printf("I2O adapter");
    294 
    295 	mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
    296 	mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
    297 	cv_init(&sc->sc_confcv, "iopconf");
    298 
    299 	if (iop_ictxhashtbl == NULL) {
    300 		iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
    301 		    true, &iop_ictxhash);
    302 	}
    303 
    304 	/* Disable interrupts at the IOP. */
    305 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
    306 	iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
    307 
    308 	/* Allocate a scratch DMA map for small miscellaneous shared data. */
    309 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
    310 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
    311 		aprint_error_dev(sc->sc_dev, "cannot create scratch dmamap\n");
    312 		return;
    313 	}
    314 
    315 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
    316 	    sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
    317 		aprint_error_dev(sc->sc_dev, "cannot alloc scratch dmamem\n");
    318 		goto bail_out;
    319 	}
    320 	state++;
    321 
    322 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
    323 	    &sc->sc_scr, 0)) {
    324 		aprint_error_dev(sc->sc_dev, "cannot map scratch dmamem\n");
    325 		goto bail_out;
    326 	}
    327 	state++;
    328 
    329 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
    330 	    PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
    331 		aprint_error_dev(sc->sc_dev, "cannot load scratch dmamap\n");
    332 		goto bail_out;
    333 	}
    334 	state++;
    335 
    336 #ifdef I2ODEBUG
    337 	/* So that our debug checks don't choke. */
    338 	sc->sc_framesize = 128;
    339 #endif
    340 
    341 	/* Avoid syncing the reply map until it's set up. */
    342 	sc->sc_curib = 0x123;
    343 
    344 	/* Reset the adapter and request status. */
    345  	if ((rv = iop_reset(sc)) != 0) {
    346  		aprint_error_dev(sc->sc_dev, "not responding (reset)\n");
    347 		goto bail_out;
    348  	}
    349 
    350  	if ((rv = iop_status_get(sc, 1)) != 0) {
    351 		aprint_error_dev(sc->sc_dev, "not responding (get status)\n");
    352 		goto bail_out;
    353  	}
    354 
    355 	sc->sc_flags |= IOP_HAVESTATUS;
    356 	iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
    357 	    ident, sizeof(ident));
    358 	printf(" <%s>\n", ident);
    359 
    360 #ifdef I2ODEBUG
    361 	printf("%s: orgid=0x%04x version=%d\n",
    362 	    device_xname(sc->sc_dev),
    363 	    le16toh(sc->sc_status.orgid),
    364 	    (le32toh(sc->sc_status.segnumber) >> 12) & 15);
    365 	printf("%s: type want have cbase\n", device_xname(sc->sc_dev));
    366 	printf("%s: mem  %04x %04x %08x\n", device_xname(sc->sc_dev),
    367 	    le32toh(sc->sc_status.desiredprivmemsize),
    368 	    le32toh(sc->sc_status.currentprivmemsize),
    369 	    le32toh(sc->sc_status.currentprivmembase));
    370 	printf("%s: i/o  %04x %04x %08x\n", device_xname(sc->sc_dev),
    371 	    le32toh(sc->sc_status.desiredpriviosize),
    372 	    le32toh(sc->sc_status.currentpriviosize),
    373 	    le32toh(sc->sc_status.currentpriviobase));
    374 #endif
    375 
    376 	sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
    377 	if (sc->sc_maxob > IOP_MAX_OUTBOUND)
    378 		sc->sc_maxob = IOP_MAX_OUTBOUND;
    379 	sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
    380 	if (sc->sc_maxib > IOP_MAX_INBOUND)
    381 		sc->sc_maxib = IOP_MAX_INBOUND;
    382 	sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
    383 	if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
    384 		sc->sc_framesize = IOP_MAX_MSG_SIZE;
    385 
    386 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
    387 	if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
    388 		aprint_error_dev(sc->sc_dev, "frame size too small (%d)\n",
    389 		    sc->sc_framesize);
    390 		goto bail_out;
    391 	}
    392 #endif
    393 
    394 	/* Allocate message wrappers. */
    395 	im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
    396 	if (im == NULL) {
    397 		aprint_error_dev(sc->sc_dev, "memory allocation failure\n");
    398 		goto bail_out;
    399 	}
    400 	state++;
    401 	sc->sc_ims = im;
    402 	SLIST_INIT(&sc->sc_im_freelist);
    403 
    404 	for (i = 0; i < sc->sc_maxib; i++, im++) {
    405 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
    406 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
    407 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    408 		    &im->im_xfer[0].ix_map);
    409 		if (rv != 0) {
    410 			aprint_error_dev(sc->sc_dev, "couldn't create dmamap (%d)", rv);
    411 			goto bail_out3;
    412 		}
    413 
    414 		im->im_tctx = i;
    415 		SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
    416 		cv_init(&im->im_cv, "iopmsg");
    417 	}
    418 
    419 	/* Initialise the IOP's outbound FIFO. */
    420 	if (iop_ofifo_init(sc) != 0) {
    421 		aprint_error_dev(sc->sc_dev, "unable to init oubound FIFO\n");
    422 		goto bail_out3;
    423 	}
    424 
    425 	/*
    426  	 * Defer further configuration until (a) interrupts are working and
    427  	 * (b) we have enough information to build the system table.
    428  	 */
    429 	config_interrupts(sc->sc_dev, iop_config_interrupts);
    430 
    431 	/* Configure shutdown hook before we start any device activity. */
    432 	if (iop_sdh == NULL)
    433 		iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
    434 
    435 	/* Ensure interrupts are enabled at the IOP. */
    436 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
    437 	iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
    438 
    439 	if (intrstr != NULL)
    440 		printf("%s: interrupting at %s\n", device_xname(sc->sc_dev),
    441 		    intrstr);
    442 
    443 #ifdef I2ODEBUG
    444 	printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
    445 	    device_xname(sc->sc_dev), sc->sc_maxib,
    446 	    le32toh(sc->sc_status.maxinboundmframes),
    447 	    sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
    448 #endif
    449 
    450 	return;
    451 
    452  bail_out3:
    453  	if (state > 3) {
    454 		for (j = 0; j < i; j++)
    455 			bus_dmamap_destroy(sc->sc_dmat,
    456 			    sc->sc_ims[j].im_xfer[0].ix_map);
    457 		free(sc->sc_ims, M_DEVBUF);
    458 	}
    459  bail_out:
    460 	if (state > 2)
    461 		bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
    462 	if (state > 1)
    463 		bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
    464 	if (state > 0)
    465 		bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
    466 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
    467 }
    468 
    469 /*
    470  * Perform autoconfiguration tasks.
    471  */
    472 static void
    473 iop_config_interrupts(device_t self)
    474 {
    475 	struct iop_attach_args ia;
    476 	struct iop_softc *sc, *iop;
    477 	struct i2o_systab_entry *ste;
    478 	int rv, i, niop;
    479 	int locs[IOPCF_NLOCS];
    480 
    481 	sc = device_private(self);
    482 	mutex_enter(&sc->sc_conflock);
    483 
    484 	LIST_INIT(&sc->sc_iilist);
    485 
    486 	printf("%s: configuring...\n", device_xname(sc->sc_dev));
    487 
    488 	if (iop_hrt_get(sc) != 0) {
    489 		printf("%s: unable to retrieve HRT\n", device_xname(sc->sc_dev));
    490 		mutex_exit(&sc->sc_conflock);
    491 		return;
    492 	}
    493 
    494 	/*
    495  	 * Build the system table.
    496  	 */
    497 	if (iop_systab == NULL) {
    498 		for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
    499 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
    500 				continue;
    501 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
    502 				continue;
    503 			if (iop_status_get(iop, 1) != 0) {
    504 				aprint_error_dev(sc->sc_dev, "unable to retrieve status\n");
    505 				iop->sc_flags &= ~IOP_HAVESTATUS;
    506 				continue;
    507 			}
    508 			niop++;
    509 		}
    510 		if (niop == 0) {
    511 			mutex_exit(&sc->sc_conflock);
    512 			return;
    513 		}
    514 
    515 		i = sizeof(struct i2o_systab_entry) * (niop - 1) +
    516 		    sizeof(struct i2o_systab);
    517 		iop_systab_size = i;
    518 		iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
    519 
    520 		iop_systab->numentries = niop;
    521 		iop_systab->version = I2O_VERSION_11;
    522 
    523 		for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
    524 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
    525 				continue;
    526 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
    527 				continue;
    528 
    529 			ste->orgid = iop->sc_status.orgid;
    530 			ste->iopid = device_unit(iop->sc_dev) + 2;
    531 			ste->segnumber =
    532 			    htole32(le32toh(iop->sc_status.segnumber) & ~4095);
    533 			ste->iopcaps = iop->sc_status.iopcaps;
    534 			ste->inboundmsgframesize =
    535 			    iop->sc_status.inboundmframesize;
    536 			ste->inboundmsgportaddresslow =
    537 			    htole32(iop->sc_memaddr + IOP_REG_IFIFO);
    538 			ste++;
    539 		}
    540 	}
    541 
    542 	/*
    543 	 * Post the system table to the IOP and bring it to the OPERATIONAL
    544 	 * state.
    545 	 */
    546 	if (iop_systab_set(sc) != 0) {
    547 		aprint_error_dev(sc->sc_dev, "unable to set system table\n");
    548 		mutex_exit(&sc->sc_conflock);
    549 		return;
    550 	}
    551 	if (iop_sys_enable(sc) != 0) {
    552 		aprint_error_dev(sc->sc_dev, "unable to enable system\n");
    553 		mutex_exit(&sc->sc_conflock);
    554 		return;
    555 	}
    556 
    557 	/*
    558 	 * Set up an event handler for this IOP.
    559 	 */
    560 	sc->sc_eventii.ii_dv = self;
    561 	sc->sc_eventii.ii_intr = iop_intr_event;
    562 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
    563 	sc->sc_eventii.ii_tid = I2O_TID_IOP;
    564 	iop_initiator_register(sc, &sc->sc_eventii);
    565 
    566 	rv = iop_util_eventreg(sc, &sc->sc_eventii,
    567 	    I2O_EVENT_EXEC_RESOURCE_LIMITS |
    568 	    I2O_EVENT_EXEC_CONNECTION_FAIL |
    569 	    I2O_EVENT_EXEC_ADAPTER_FAULT |
    570 	    I2O_EVENT_EXEC_POWER_FAIL |
    571 	    I2O_EVENT_EXEC_RESET_PENDING |
    572 	    I2O_EVENT_EXEC_RESET_IMMINENT |
    573 	    I2O_EVENT_EXEC_HARDWARE_FAIL |
    574 	    I2O_EVENT_EXEC_XCT_CHANGE |
    575 	    I2O_EVENT_EXEC_DDM_AVAILIBILITY |
    576 	    I2O_EVENT_GEN_DEVICE_RESET |
    577 	    I2O_EVENT_GEN_STATE_CHANGE |
    578 	    I2O_EVENT_GEN_GENERAL_WARNING);
    579 	if (rv != 0) {
    580 		aprint_error_dev(sc->sc_dev, "unable to register for events");
    581 		mutex_exit(&sc->sc_conflock);
    582 		return;
    583 	}
    584 
    585 	/*
    586 	 * Attempt to match and attach a product-specific extension.
    587 	 */
    588 	ia.ia_class = I2O_CLASS_ANY;
    589 	ia.ia_tid = I2O_TID_IOP;
    590 	locs[IOPCF_TID] = I2O_TID_IOP;
    591 	config_found_sm_loc(self, "iop", locs, &ia, iop_print,
    592 		config_stdsubmatch);
    593 
    594 	/*
    595 	 * Start device configuration.
    596 	 */
    597 	if ((rv = iop_reconfigure(sc, 0)) == -1)
    598 		aprint_error_dev(sc->sc_dev, "configure failed (%d)\n", rv);
    599 
    600 
    601 	sc->sc_flags |= IOP_ONLINE;
    602 	rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
    603 	    &sc->sc_reconf_thread, "%s", device_xname(sc->sc_dev));
    604 	mutex_exit(&sc->sc_conflock);
    605  	if (rv != 0) {
    606 		aprint_error_dev(sc->sc_dev, "unable to create reconfiguration thread (%d)", rv);
    607  		return;
    608  	}
    609 }
    610 
    611 /*
    612  * Reconfiguration thread; listens for LCT change notification, and
    613  * initiates re-configuration if received.
    614  */
    615 static void
    616 iop_reconf_thread(void *cookie)
    617 {
    618 	struct iop_softc *sc;
    619 	struct i2o_lct lct;
    620 	u_int32_t chgind;
    621 	int rv;
    622 
    623 	sc = cookie;
    624 	chgind = sc->sc_chgind + 1;
    625 
    626 	for (;;) {
    627 		DPRINTF(("%s: async reconfig: requested 0x%08x\n",
    628 		    device_xname(sc->sc_dev), chgind));
    629 
    630 		rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
    631 
    632 		DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
    633 		    device_xname(sc->sc_dev), le32toh(lct.changeindicator), rv));
    634 
    635 		mutex_enter(&sc->sc_conflock);
    636 		if (rv == 0) {
    637 			iop_reconfigure(sc, le32toh(lct.changeindicator));
    638 			chgind = sc->sc_chgind + 1;
    639 		}
    640 		(void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
    641 		mutex_exit(&sc->sc_conflock);
    642 	}
    643 }
    644 
    645 /*
    646  * Reconfigure: find new and removed devices.
    647  */
    648 int
    649 iop_reconfigure(struct iop_softc *sc, u_int chgind)
    650 {
    651 	struct iop_msg *im;
    652 	struct i2o_hba_bus_scan mf;
    653 	struct i2o_lct_entry *le;
    654 	struct iop_initiator *ii, *nextii;
    655 	int rv, tid, i;
    656 
    657 	KASSERT(mutex_owned(&sc->sc_conflock));
    658 
    659 	/*
    660 	 * If the reconfiguration request isn't the result of LCT change
    661 	 * notification, then be more thorough: ask all bus ports to scan
    662 	 * their busses.  Wait up to 5 minutes for each bus port to complete
    663 	 * the request.
    664 	 */
    665 	if (chgind == 0) {
    666 		if ((rv = iop_lct_get(sc)) != 0) {
    667 			DPRINTF(("iop_reconfigure: unable to read LCT\n"));
    668 			return (rv);
    669 		}
    670 
    671 		le = sc->sc_lct->entry;
    672 		for (i = 0; i < sc->sc_nlctent; i++, le++) {
    673 			if ((le16toh(le->classid) & 4095) !=
    674 			    I2O_CLASS_BUS_ADAPTER_PORT)
    675 				continue;
    676 			tid = le16toh(le->localtid) & 4095;
    677 
    678 			im = iop_msg_alloc(sc, IM_WAIT);
    679 
    680 			mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
    681 			mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
    682 			mf.msgictx = IOP_ICTX;
    683 			mf.msgtctx = im->im_tctx;
    684 
    685 			DPRINTF(("%s: scanning bus %d\n", device_xname(sc->sc_dev),
    686 			    tid));
    687 
    688 			rv = iop_msg_post(sc, im, &mf, 5*60*1000);
    689 			iop_msg_free(sc, im);
    690 #ifdef I2ODEBUG
    691 			if (rv != 0)
    692 				aprint_error_dev(sc->sc_dev, "bus scan failed\n");
    693 #endif
    694 		}
    695 	} else if (chgind <= sc->sc_chgind) {
    696 		DPRINTF(("%s: LCT unchanged (async)\n", device_xname(sc->sc_dev)));
    697 		return (0);
    698 	}
    699 
    700 	/* Re-read the LCT and determine if it has changed. */
    701 	if ((rv = iop_lct_get(sc)) != 0) {
    702 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
    703 		return (rv);
    704 	}
    705 	DPRINTF(("%s: %d LCT entries\n", device_xname(sc->sc_dev), sc->sc_nlctent));
    706 
    707 	chgind = le32toh(sc->sc_lct->changeindicator);
    708 	if (chgind == sc->sc_chgind) {
    709 		DPRINTF(("%s: LCT unchanged\n", device_xname(sc->sc_dev)));
    710 		return (0);
    711 	}
    712 	DPRINTF(("%s: LCT changed\n", device_xname(sc->sc_dev)));
    713 	sc->sc_chgind = chgind;
    714 
    715 	if (sc->sc_tidmap != NULL)
    716 		free(sc->sc_tidmap, M_DEVBUF);
    717 	sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
    718 	    M_DEVBUF, M_NOWAIT|M_ZERO);
    719 
    720 	/* Allow 1 queued command per device while we're configuring. */
    721 	iop_adjqparam(sc, 1);
    722 
    723 	/*
    724 	 * Match and attach child devices.  We configure high-level devices
    725 	 * first so that any claims will propagate throughout the LCT,
    726 	 * hopefully masking off aliased devices as a result.
    727 	 *
    728 	 * Re-reading the LCT at this point is a little dangerous, but we'll
    729 	 * trust the IOP (and the operator) to behave itself...
    730 	 */
    731 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
    732 	    IC_CONFIGURE | IC_PRIORITY);
    733 	if ((rv = iop_lct_get(sc)) != 0) {
    734 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
    735 	}
    736 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
    737 	    IC_CONFIGURE);
    738 
    739 	for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
    740 		nextii = LIST_NEXT(ii, ii_list);
    741 
    742 		/* Detach devices that were configured, but are now gone. */
    743 		for (i = 0; i < sc->sc_nlctent; i++)
    744 			if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
    745 				break;
    746 		if (i == sc->sc_nlctent ||
    747 		    (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
    748 			config_detach(ii->ii_dv, DETACH_FORCE);
    749 			continue;
    750 		}
    751 
    752 		/*
    753 		 * Tell initiators that existed before the re-configuration
    754 		 * to re-configure.
    755 		 */
    756 		if (ii->ii_reconfig == NULL)
    757 			continue;
    758 		if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
    759 			aprint_error_dev(sc->sc_dev, "%s failed reconfigure (%d)\n",
    760 			    device_xname(ii->ii_dv), rv);
    761 	}
    762 
    763 	/* Re-adjust queue parameters and return. */
    764 	if (sc->sc_nii != 0)
    765 		iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
    766 		    / sc->sc_nii);
    767 
    768 	return (0);
    769 }
    770 
    771 /*
    772  * Configure I2O devices into the system.
    773  */
    774 static void
    775 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
    776 {
    777 	struct iop_attach_args ia;
    778 	struct iop_initiator *ii;
    779 	const struct i2o_lct_entry *le;
    780 	device_t dv;
    781 	int i, j, nent;
    782 	u_int usertid;
    783 	int locs[IOPCF_NLOCS];
    784 
    785 	nent = sc->sc_nlctent;
    786 	for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
    787 		sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
    788 
    789 		/* Ignore the device if it's in use. */
    790 		usertid = le32toh(le->usertid) & 4095;
    791 		if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
    792 			continue;
    793 
    794 		ia.ia_class = le16toh(le->classid) & 4095;
    795 		ia.ia_tid = sc->sc_tidmap[i].it_tid;
    796 
    797 		/* Ignore uninteresting devices. */
    798 		for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
    799 			if (iop_class[j].ic_class == ia.ia_class)
    800 				break;
    801 		if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
    802 		    (iop_class[j].ic_flags & mask) != maskval)
    803 			continue;
    804 
    805 		/*
    806 		 * Try to configure the device only if it's not already
    807 		 * configured.
    808  		 */
    809  		LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
    810  			if (ia.ia_tid == ii->ii_tid) {
    811 				sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
    812 				strcpy(sc->sc_tidmap[i].it_dvname,
    813 				    device_xname(ii->ii_dv));
    814  				break;
    815 			}
    816 		}
    817 		if (ii != NULL)
    818 			continue;
    819 
    820 		locs[IOPCF_TID] = ia.ia_tid;
    821 
    822 		dv = config_found_sm_loc(sc->sc_dev, "iop", locs, &ia,
    823 					 iop_print, config_stdsubmatch);
    824 		if (dv != NULL) {
    825  			sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
    826 			strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
    827 		}
    828 	}
    829 }
    830 
    831 /*
    832  * Adjust queue parameters for all child devices.
    833  */
    834 static void
    835 iop_adjqparam(struct iop_softc *sc, int mpi)
    836 {
    837 	struct iop_initiator *ii;
    838 
    839 	LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
    840 		if (ii->ii_adjqparam != NULL)
    841 			(*ii->ii_adjqparam)(ii->ii_dv, mpi);
    842 }
    843 
    844 static void
    845 iop_devinfo(int class, char *devinfo, size_t l)
    846 {
    847 	int i;
    848 
    849 	for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
    850 		if (class == iop_class[i].ic_class)
    851 			break;
    852 
    853 	if (i == sizeof(iop_class) / sizeof(iop_class[0]))
    854 		snprintf(devinfo, l, "device (class 0x%x)", class);
    855 	else
    856 		strlcpy(devinfo, iop_class[i].ic_caption, l);
    857 }
    858 
    859 static int
    860 iop_print(void *aux, const char *pnp)
    861 {
    862 	struct iop_attach_args *ia;
    863 	char devinfo[256];
    864 
    865 	ia = aux;
    866 
    867 	if (pnp != NULL) {
    868 		iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
    869 		aprint_normal("%s at %s", devinfo, pnp);
    870 	}
    871 	aprint_normal(" tid %d", ia->ia_tid);
    872 	return (UNCONF);
    873 }
    874 
    875 /*
    876  * Shut down all configured IOPs.
    877  */
    878 static void
    879 iop_shutdown(void *junk)
    880 {
    881 	struct iop_softc *sc;
    882 	int i;
    883 
    884 	printf("shutting down iop devices...");
    885 
    886 	for (i = 0; i < iop_cd.cd_ndevs; i++) {
    887 		if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
    888 			continue;
    889 		if ((sc->sc_flags & IOP_ONLINE) == 0)
    890 			continue;
    891 
    892 		iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
    893 		    0, 5000);
    894 
    895 		if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
    896 			/*
    897 			 * Some AMI firmware revisions will go to sleep and
    898 			 * never come back after this.
    899 			 */
    900 			iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
    901 			    IOP_ICTX, 0, 1000);
    902 		}
    903 	}
    904 
    905 	/* Wait.  Some boards could still be flushing, stupidly enough. */
    906 	delay(5000*1000);
    907 	printf(" done\n");
    908 }
    909 
    910 /*
    911  * Retrieve IOP status.
    912  */
    913 int
    914 iop_status_get(struct iop_softc *sc, int nosleep)
    915 {
    916 	struct i2o_exec_status_get mf;
    917 	struct i2o_status *st;
    918 	paddr_t pa;
    919 	int rv, i;
    920 
    921 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
    922 	st = (struct i2o_status *)sc->sc_scr;
    923 
    924 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
    925 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
    926 	mf.reserved[0] = 0;
    927 	mf.reserved[1] = 0;
    928 	mf.reserved[2] = 0;
    929 	mf.reserved[3] = 0;
    930 	mf.addrlow = (u_int32_t)pa;
    931 	mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
    932 	mf.length = sizeof(sc->sc_status);
    933 
    934 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
    935 	    BUS_DMASYNC_PREWRITE);
    936 	memset(st, 0, sizeof(*st));
    937 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
    938 	    BUS_DMASYNC_POSTWRITE);
    939 
    940 	if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
    941 		return (rv);
    942 
    943 	for (i = 100; i != 0; i--) {
    944 		bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
    945 		    sizeof(*st), BUS_DMASYNC_POSTREAD);
    946 		if (st->syncbyte == 0xff)
    947 			break;
    948 		if (nosleep)
    949 			DELAY(100*1000);
    950 		else
    951 			kpause("iopstat", false, hz / 10, NULL);
    952 	}
    953 
    954 	if (st->syncbyte != 0xff) {
    955 		aprint_error_dev(sc->sc_dev, "STATUS_GET timed out\n");
    956 		rv = EIO;
    957 	} else {
    958 		memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
    959 		rv = 0;
    960 	}
    961 
    962 	return (rv);
    963 }
    964 
    965 /*
    966  * Initialize and populate the IOP's outbound FIFO.
    967  */
    968 static int
    969 iop_ofifo_init(struct iop_softc *sc)
    970 {
    971 	bus_addr_t addr;
    972 	bus_dma_segment_t seg;
    973 	struct i2o_exec_outbound_init *mf;
    974 	int i, rseg, rv;
    975 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
    976 
    977 	sw = (u_int32_t *)sc->sc_scr;
    978 
    979 	mf = (struct i2o_exec_outbound_init *)mb;
    980 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
    981 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
    982 	mf->msgictx = IOP_ICTX;
    983 	mf->msgtctx = 0;
    984 	mf->pagesize = PAGE_SIZE;
    985 	mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
    986 
    987 	/*
    988 	 * The I2O spec says that there are two SGLs: one for the status
    989 	 * word, and one for a list of discarded MFAs.  It continues to say
    990 	 * that if you don't want to get the list of MFAs, an IGNORE SGL is
    991 	 * necessary; this isn't the case (and is in fact a bad thing).
    992 	 */
    993 	mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
    994 	    I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
    995 	mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
    996 	    (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
    997 	mb[0] += 2 << 16;
    998 
    999 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
   1000 	    BUS_DMASYNC_POSTWRITE);
   1001 	*sw = 0;
   1002 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
   1003 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
   1004 
   1005 	if ((rv = iop_post(sc, mb)) != 0)
   1006 		return (rv);
   1007 
   1008 	POLL(5000,
   1009 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
   1010 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD),
   1011 	    *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
   1012 
   1013 	if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
   1014 		aprint_error_dev(sc->sc_dev, "outbound FIFO init failed (%d)\n",
   1015 		    le32toh(*sw));
   1016 		return (EIO);
   1017 	}
   1018 
   1019 	/* Allocate DMA safe memory for the reply frames. */
   1020 	if (sc->sc_rep_phys == 0) {
   1021 		sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
   1022 
   1023 		rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
   1024 		    0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
   1025 		if (rv != 0) {
   1026 			aprint_error_dev(sc->sc_dev, "DMA alloc = %d\n",
   1027 			   rv);
   1028 			return (rv);
   1029 		}
   1030 
   1031 		rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
   1032 		    &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
   1033 		if (rv != 0) {
   1034 			aprint_error_dev(sc->sc_dev, "DMA map = %d\n", rv);
   1035 			return (rv);
   1036 		}
   1037 
   1038 		rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
   1039 		    sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
   1040 		if (rv != 0) {
   1041 			aprint_error_dev(sc->sc_dev, "DMA create = %d\n", rv);
   1042 			return (rv);
   1043 		}
   1044 
   1045 		rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
   1046 		    sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
   1047 		if (rv != 0) {
   1048 			aprint_error_dev(sc->sc_dev, "DMA load = %d\n", rv);
   1049 			return (rv);
   1050 		}
   1051 
   1052 		sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
   1053 
   1054 		/* Now safe to sync the reply map. */
   1055 		sc->sc_curib = 0;
   1056 	}
   1057 
   1058 	/* Populate the outbound FIFO. */
   1059 	for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
   1060 		iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
   1061 		addr += sc->sc_framesize;
   1062 	}
   1063 
   1064 	return (0);
   1065 }
   1066 
   1067 /*
   1068  * Read the specified number of bytes from the IOP's hardware resource table.
   1069  */
   1070 static int
   1071 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
   1072 {
   1073 	struct iop_msg *im;
   1074 	int rv;
   1075 	struct i2o_exec_hrt_get *mf;
   1076 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1077 
   1078 	im = iop_msg_alloc(sc, IM_WAIT);
   1079 	mf = (struct i2o_exec_hrt_get *)mb;
   1080 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
   1081 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
   1082 	mf->msgictx = IOP_ICTX;
   1083 	mf->msgtctx = im->im_tctx;
   1084 
   1085 	iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
   1086 	rv = iop_msg_post(sc, im, mb, 30000);
   1087 	iop_msg_unmap(sc, im);
   1088 	iop_msg_free(sc, im);
   1089 	return (rv);
   1090 }
   1091 
   1092 /*
   1093  * Read the IOP's hardware resource table.
   1094  */
   1095 static int
   1096 iop_hrt_get(struct iop_softc *sc)
   1097 {
   1098 	struct i2o_hrt hrthdr, *hrt;
   1099 	int size, rv;
   1100 
   1101 	rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
   1102 	if (rv != 0)
   1103 		return (rv);
   1104 
   1105 	DPRINTF(("%s: %d hrt entries\n", device_xname(sc->sc_dev),
   1106 	    le16toh(hrthdr.numentries)));
   1107 
   1108 	size = sizeof(struct i2o_hrt) +
   1109 	    (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
   1110 	hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
   1111 
   1112 	if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
   1113 		free(hrt, M_DEVBUF);
   1114 		return (rv);
   1115 	}
   1116 
   1117 	if (sc->sc_hrt != NULL)
   1118 		free(sc->sc_hrt, M_DEVBUF);
   1119 	sc->sc_hrt = hrt;
   1120 	return (0);
   1121 }
   1122 
   1123 /*
   1124  * Request the specified number of bytes from the IOP's logical
   1125  * configuration table.  If a change indicator is specified, this
   1126  * is a verbatim notification request, so the caller is prepared
   1127  * to wait indefinitely.
   1128  */
   1129 static int
   1130 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
   1131 	     u_int32_t chgind)
   1132 {
   1133 	struct iop_msg *im;
   1134 	struct i2o_exec_lct_notify *mf;
   1135 	int rv;
   1136 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1137 
   1138 	im = iop_msg_alloc(sc, IM_WAIT);
   1139 	memset(lct, 0, size);
   1140 
   1141 	mf = (struct i2o_exec_lct_notify *)mb;
   1142 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
   1143 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
   1144 	mf->msgictx = IOP_ICTX;
   1145 	mf->msgtctx = im->im_tctx;
   1146 	mf->classid = I2O_CLASS_ANY;
   1147 	mf->changeindicator = chgind;
   1148 
   1149 #ifdef I2ODEBUG
   1150 	printf("iop_lct_get0: reading LCT");
   1151 	if (chgind != 0)
   1152 		printf(" (async)");
   1153 	printf("\n");
   1154 #endif
   1155 
   1156 	iop_msg_map(sc, im, mb, lct, size, 0, NULL);
   1157 	rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
   1158 	iop_msg_unmap(sc, im);
   1159 	iop_msg_free(sc, im);
   1160 	return (rv);
   1161 }
   1162 
   1163 /*
   1164  * Read the IOP's logical configuration table.
   1165  */
   1166 int
   1167 iop_lct_get(struct iop_softc *sc)
   1168 {
   1169 	int esize, size, rv;
   1170 	struct i2o_lct *lct;
   1171 
   1172 	esize = le32toh(sc->sc_status.expectedlctsize);
   1173 	lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
   1174 	if (lct == NULL)
   1175 		return (ENOMEM);
   1176 
   1177 	if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
   1178 		free(lct, M_DEVBUF);
   1179 		return (rv);
   1180 	}
   1181 
   1182 	size = le16toh(lct->tablesize) << 2;
   1183 	if (esize != size) {
   1184 		free(lct, M_DEVBUF);
   1185 		lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
   1186 		if (lct == NULL)
   1187 			return (ENOMEM);
   1188 
   1189 		if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
   1190 			free(lct, M_DEVBUF);
   1191 			return (rv);
   1192 		}
   1193 	}
   1194 
   1195 	/* Swap in the new LCT. */
   1196 	if (sc->sc_lct != NULL)
   1197 		free(sc->sc_lct, M_DEVBUF);
   1198 	sc->sc_lct = lct;
   1199 	sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
   1200 	    sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
   1201 	    sizeof(struct i2o_lct_entry);
   1202 	return (0);
   1203 }
   1204 
   1205 /*
   1206  * Post a SYS_ENABLE message to the adapter.
   1207  */
   1208 int
   1209 iop_sys_enable(struct iop_softc *sc)
   1210 {
   1211 	struct iop_msg *im;
   1212 	struct i2o_msg mf;
   1213 	int rv;
   1214 
   1215 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
   1216 
   1217 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
   1218 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
   1219 	mf.msgictx = IOP_ICTX;
   1220 	mf.msgtctx = im->im_tctx;
   1221 
   1222 	rv = iop_msg_post(sc, im, &mf, 30000);
   1223 	if (rv == 0) {
   1224 		if ((im->im_flags & IM_FAIL) != 0)
   1225 			rv = ENXIO;
   1226 		else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
   1227 		    (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
   1228 		    im->im_detstatus == I2O_DSC_INVALID_REQUEST))
   1229 			rv = 0;
   1230 		else
   1231 			rv = EIO;
   1232 	}
   1233 
   1234 	iop_msg_free(sc, im);
   1235 	return (rv);
   1236 }
   1237 
   1238 /*
   1239  * Request the specified parameter group from the target.  If an initiator
   1240  * is specified (a) don't wait for the operation to complete, but instead
   1241  * let the initiator's interrupt handler deal with the reply and (b) place a
   1242  * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
   1243  */
   1244 int
   1245 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
   1246 		  int size, struct iop_initiator *ii)
   1247 {
   1248 	struct iop_msg *im;
   1249 	struct i2o_util_params_op *mf;
   1250 	int rv;
   1251 	struct iop_pgop *pgop;
   1252 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1253 
   1254 	im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
   1255 	if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
   1256 		iop_msg_free(sc, im);
   1257 		return (ENOMEM);
   1258 	}
   1259 	im->im_dvcontext = pgop;
   1260 
   1261 	mf = (struct i2o_util_params_op *)mb;
   1262 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
   1263 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
   1264 	mf->msgictx = IOP_ICTX;
   1265 	mf->msgtctx = im->im_tctx;
   1266 	mf->flags = 0;
   1267 
   1268 	pgop->olh.count = htole16(1);
   1269 	pgop->olh.reserved = htole16(0);
   1270 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
   1271 	pgop->oat.fieldcount = htole16(0xffff);
   1272 	pgop->oat.group = htole16(group);
   1273 
   1274 	memset(buf, 0, size);
   1275 	iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
   1276 	iop_msg_map(sc, im, mb, buf, size, 0, NULL);
   1277 	rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
   1278 
   1279 	/* Detect errors; let partial transfers to count as success. */
   1280 	if (ii == NULL && rv == 0) {
   1281 		if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
   1282 		    im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
   1283 			rv = 0;
   1284 		else
   1285 			rv = (im->im_reqstatus != 0 ? EIO : 0);
   1286 
   1287 		if (rv != 0)
   1288 			printf("%s: FIELD_GET failed for tid %d group %d\n",
   1289 			    device_xname(sc->sc_dev), tid, group);
   1290 	}
   1291 
   1292 	if (ii == NULL || rv != 0) {
   1293 		iop_msg_unmap(sc, im);
   1294 		iop_msg_free(sc, im);
   1295 		free(pgop, M_DEVBUF);
   1296 	}
   1297 
   1298 	return (rv);
   1299 }
   1300 
   1301 /*
   1302  * Set a single field in a scalar parameter group.
   1303  */
   1304 int
   1305 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
   1306 	      int size, int field)
   1307 {
   1308 	struct iop_msg *im;
   1309 	struct i2o_util_params_op *mf;
   1310 	struct iop_pgop *pgop;
   1311 	int rv, totsize;
   1312 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1313 
   1314 	totsize = sizeof(*pgop) + size;
   1315 
   1316 	im = iop_msg_alloc(sc, IM_WAIT);
   1317 	if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
   1318 		iop_msg_free(sc, im);
   1319 		return (ENOMEM);
   1320 	}
   1321 
   1322 	mf = (struct i2o_util_params_op *)mb;
   1323 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
   1324 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
   1325 	mf->msgictx = IOP_ICTX;
   1326 	mf->msgtctx = im->im_tctx;
   1327 	mf->flags = 0;
   1328 
   1329 	pgop->olh.count = htole16(1);
   1330 	pgop->olh.reserved = htole16(0);
   1331 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
   1332 	pgop->oat.fieldcount = htole16(1);
   1333 	pgop->oat.group = htole16(group);
   1334 	pgop->oat.fields[0] = htole16(field);
   1335 	memcpy(pgop + 1, buf, size);
   1336 
   1337 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
   1338 	rv = iop_msg_post(sc, im, mb, 30000);
   1339 	if (rv != 0)
   1340 		aprint_error_dev(sc->sc_dev, "FIELD_SET failed for tid %d group %d\n",
   1341 		    tid, group);
   1342 
   1343 	iop_msg_unmap(sc, im);
   1344 	iop_msg_free(sc, im);
   1345 	free(pgop, M_DEVBUF);
   1346 	return (rv);
   1347 }
   1348 
   1349 /*
   1350  * Delete all rows in a tablular parameter group.
   1351  */
   1352 int
   1353 iop_table_clear(struct iop_softc *sc, int tid, int group)
   1354 {
   1355 	struct iop_msg *im;
   1356 	struct i2o_util_params_op *mf;
   1357 	struct iop_pgop pgop;
   1358 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1359 	int rv;
   1360 
   1361 	im = iop_msg_alloc(sc, IM_WAIT);
   1362 
   1363 	mf = (struct i2o_util_params_op *)mb;
   1364 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
   1365 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
   1366 	mf->msgictx = IOP_ICTX;
   1367 	mf->msgtctx = im->im_tctx;
   1368 	mf->flags = 0;
   1369 
   1370 	pgop.olh.count = htole16(1);
   1371 	pgop.olh.reserved = htole16(0);
   1372 	pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
   1373 	pgop.oat.fieldcount = htole16(0);
   1374 	pgop.oat.group = htole16(group);
   1375 	pgop.oat.fields[0] = htole16(0);
   1376 
   1377 	iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
   1378 	rv = iop_msg_post(sc, im, mb, 30000);
   1379 	if (rv != 0)
   1380 		aprint_error_dev(sc->sc_dev, "TABLE_CLEAR failed for tid %d group %d\n",
   1381 		    tid, group);
   1382 
   1383 	iop_msg_unmap(sc, im);
   1384 	iop_msg_free(sc, im);
   1385 	return (rv);
   1386 }
   1387 
   1388 /*
   1389  * Add a single row to a tabular parameter group.  The row can have only one
   1390  * field.
   1391  */
   1392 int
   1393 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
   1394 		  int size, int row)
   1395 {
   1396 	struct iop_msg *im;
   1397 	struct i2o_util_params_op *mf;
   1398 	struct iop_pgop *pgop;
   1399 	int rv, totsize;
   1400 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1401 
   1402 	totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
   1403 
   1404 	im = iop_msg_alloc(sc, IM_WAIT);
   1405 	if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
   1406 		iop_msg_free(sc, im);
   1407 		return (ENOMEM);
   1408 	}
   1409 
   1410 	mf = (struct i2o_util_params_op *)mb;
   1411 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
   1412 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
   1413 	mf->msgictx = IOP_ICTX;
   1414 	mf->msgtctx = im->im_tctx;
   1415 	mf->flags = 0;
   1416 
   1417 	pgop->olh.count = htole16(1);
   1418 	pgop->olh.reserved = htole16(0);
   1419 	pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
   1420 	pgop->oat.fieldcount = htole16(1);
   1421 	pgop->oat.group = htole16(group);
   1422 	pgop->oat.fields[0] = htole16(0);	/* FieldIdx */
   1423 	pgop->oat.fields[1] = htole16(1);	/* RowCount */
   1424 	pgop->oat.fields[2] = htole16(row);	/* KeyValue */
   1425 	memcpy(&pgop->oat.fields[3], buf, size);
   1426 
   1427 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
   1428 	rv = iop_msg_post(sc, im, mb, 30000);
   1429 	if (rv != 0)
   1430 		aprint_error_dev(sc->sc_dev, "ADD_ROW failed for tid %d group %d row %d\n",
   1431 		    tid, group, row);
   1432 
   1433 	iop_msg_unmap(sc, im);
   1434 	iop_msg_free(sc, im);
   1435 	free(pgop, M_DEVBUF);
   1436 	return (rv);
   1437 }
   1438 
   1439 /*
   1440  * Execute a simple command (no parameters).
   1441  */
   1442 int
   1443 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
   1444 	       int async, int timo)
   1445 {
   1446 	struct iop_msg *im;
   1447 	struct i2o_msg mf;
   1448 	int rv, fl;
   1449 
   1450 	fl = (async != 0 ? IM_WAIT : IM_POLL);
   1451 	im = iop_msg_alloc(sc, fl);
   1452 
   1453 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
   1454 	mf.msgfunc = I2O_MSGFUNC(tid, function);
   1455 	mf.msgictx = ictx;
   1456 	mf.msgtctx = im->im_tctx;
   1457 
   1458 	rv = iop_msg_post(sc, im, &mf, timo);
   1459 	iop_msg_free(sc, im);
   1460 	return (rv);
   1461 }
   1462 
   1463 /*
   1464  * Post the system table to the IOP.
   1465  */
   1466 static int
   1467 iop_systab_set(struct iop_softc *sc)
   1468 {
   1469 	struct i2o_exec_sys_tab_set *mf;
   1470 	struct iop_msg *im;
   1471 	bus_space_handle_t bsh;
   1472 	bus_addr_t boo;
   1473 	u_int32_t mema[2], ioa[2];
   1474 	int rv;
   1475 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
   1476 
   1477 	im = iop_msg_alloc(sc, IM_WAIT);
   1478 
   1479 	mf = (struct i2o_exec_sys_tab_set *)mb;
   1480 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
   1481 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
   1482 	mf->msgictx = IOP_ICTX;
   1483 	mf->msgtctx = im->im_tctx;
   1484 	mf->iopid = (device_unit(sc->sc_dev) + 2) << 12;
   1485 	mf->segnumber = 0;
   1486 
   1487 	mema[1] = sc->sc_status.desiredprivmemsize;
   1488 	ioa[1] = sc->sc_status.desiredpriviosize;
   1489 
   1490 	if (mema[1] != 0) {
   1491 		rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
   1492 		    le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
   1493 		mema[0] = htole32(boo);
   1494 		if (rv != 0) {
   1495 			aprint_error_dev(sc->sc_dev, "can't alloc priv mem space, err = %d\n", rv);
   1496 			mema[0] = 0;
   1497 			mema[1] = 0;
   1498 		}
   1499 	}
   1500 
   1501 	if (ioa[1] != 0) {
   1502 		rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
   1503 		    le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
   1504 		ioa[0] = htole32(boo);
   1505 		if (rv != 0) {
   1506 			aprint_error_dev(sc->sc_dev, "can't alloc priv i/o space, err = %d\n", rv);
   1507 			ioa[0] = 0;
   1508 			ioa[1] = 0;
   1509 		}
   1510 	}
   1511 
   1512 	iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
   1513 	iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
   1514 	iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
   1515 	rv = iop_msg_post(sc, im, mb, 5000);
   1516 	iop_msg_unmap(sc, im);
   1517 	iop_msg_free(sc, im);
   1518 	return (rv);
   1519 }
   1520 
   1521 /*
   1522  * Reset the IOP.  Must be called with interrupts disabled.
   1523  */
   1524 static int
   1525 iop_reset(struct iop_softc *sc)
   1526 {
   1527 	u_int32_t mfa, *sw;
   1528 	struct i2o_exec_iop_reset mf;
   1529 	int rv;
   1530 	paddr_t pa;
   1531 
   1532 	sw = (u_int32_t *)sc->sc_scr;
   1533 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
   1534 
   1535 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
   1536 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
   1537 	mf.reserved[0] = 0;
   1538 	mf.reserved[1] = 0;
   1539 	mf.reserved[2] = 0;
   1540 	mf.reserved[3] = 0;
   1541 	mf.statuslow = (u_int32_t)pa;
   1542 	mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
   1543 
   1544 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
   1545 	    BUS_DMASYNC_POSTWRITE);
   1546 	*sw = htole32(0);
   1547 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
   1548 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
   1549 
   1550 	if ((rv = iop_post(sc, (u_int32_t *)&mf)))
   1551 		return (rv);
   1552 
   1553 	POLL(2500,
   1554 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
   1555 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD), *sw != 0));
   1556 	if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
   1557 		aprint_error_dev(sc->sc_dev, "reset rejected, status 0x%x\n",
   1558 		    le32toh(*sw));
   1559 		return (EIO);
   1560 	}
   1561 
   1562 	/*
   1563 	 * IOP is now in the INIT state.  Wait no more than 10 seconds for
   1564 	 * the inbound queue to become responsive.
   1565 	 */
   1566 	POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
   1567 	if (mfa == IOP_MFA_EMPTY) {
   1568 		aprint_error_dev(sc->sc_dev, "reset failed\n");
   1569 		return (EIO);
   1570 	}
   1571 
   1572 	iop_release_mfa(sc, mfa);
   1573 	return (0);
   1574 }
   1575 
   1576 /*
   1577  * Register a new initiator.  Must be called with the configuration lock
   1578  * held.
   1579  */
   1580 void
   1581 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
   1582 {
   1583 	static int ictxgen;
   1584 
   1585 	/* 0 is reserved (by us) for system messages. */
   1586 	ii->ii_ictx = ++ictxgen;
   1587 
   1588 	/*
   1589 	 * `Utility initiators' don't make it onto the per-IOP initiator list
   1590 	 * (which is used only for configuration), but do get one slot on
   1591 	 * the inbound queue.
   1592 	 */
   1593 	if ((ii->ii_flags & II_UTILITY) == 0) {
   1594 		LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
   1595 		sc->sc_nii++;
   1596 	} else
   1597 		sc->sc_nuii++;
   1598 
   1599 	cv_init(&ii->ii_cv, "iopevt");
   1600 
   1601 	mutex_spin_enter(&sc->sc_intrlock);
   1602 	LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
   1603 	mutex_spin_exit(&sc->sc_intrlock);
   1604 }
   1605 
   1606 /*
   1607  * Unregister an initiator.  Must be called with the configuration lock
   1608  * held.
   1609  */
   1610 void
   1611 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
   1612 {
   1613 
   1614 	if ((ii->ii_flags & II_UTILITY) == 0) {
   1615 		LIST_REMOVE(ii, ii_list);
   1616 		sc->sc_nii--;
   1617 	} else
   1618 		sc->sc_nuii--;
   1619 
   1620 	mutex_spin_enter(&sc->sc_intrlock);
   1621 	LIST_REMOVE(ii, ii_hash);
   1622 	mutex_spin_exit(&sc->sc_intrlock);
   1623 
   1624 	cv_destroy(&ii->ii_cv);
   1625 }
   1626 
   1627 /*
   1628  * Handle a reply frame from the IOP.
   1629  */
   1630 static int
   1631 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
   1632 {
   1633 	struct iop_msg *im;
   1634 	struct i2o_reply *rb;
   1635 	struct i2o_fault_notify *fn;
   1636 	struct iop_initiator *ii;
   1637 	u_int off, ictx, tctx, status, size;
   1638 
   1639 	KASSERT(mutex_owned(&sc->sc_intrlock));
   1640 
   1641 	off = (int)(rmfa - sc->sc_rep_phys);
   1642 	rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
   1643 
   1644 	/* Perform reply queue DMA synchronisation. */
   1645 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
   1646 	    sc->sc_framesize, BUS_DMASYNC_POSTREAD);
   1647 
   1648 #ifdef I2ODEBUG
   1649 	if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
   1650 		panic("iop_handle_reply: 64-bit reply");
   1651 #endif
   1652 	/*
   1653 	 * Find the initiator.
   1654 	 */
   1655 	ictx = le32toh(rb->msgictx);
   1656 	if (ictx == IOP_ICTX)
   1657 		ii = NULL;
   1658 	else {
   1659 		ii = LIST_FIRST(IOP_ICTXHASH(ictx));
   1660 		for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
   1661 			if (ii->ii_ictx == ictx)
   1662 				break;
   1663 		if (ii == NULL) {
   1664 #ifdef I2ODEBUG
   1665 			iop_reply_print(sc, rb);
   1666 #endif
   1667 			aprint_error_dev(sc->sc_dev, "WARNING: bad ictx returned (%x)\n",
   1668 			    ictx);
   1669 			return (-1);
   1670 		}
   1671 	}
   1672 
   1673 	/*
   1674 	 * If we received a transport failure notice, we've got to dig the
   1675 	 * transaction context (if any) out of the original message frame,
   1676 	 * and then release the original MFA back to the inbound FIFO.
   1677 	 */
   1678 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
   1679 		status = I2O_STATUS_SUCCESS;
   1680 
   1681 		fn = (struct i2o_fault_notify *)rb;
   1682 		tctx = iop_inl_msg(sc, fn->lowmfa + 12);
   1683 		iop_release_mfa(sc, fn->lowmfa);
   1684 		iop_tfn_print(sc, fn);
   1685 	} else {
   1686 		status = rb->reqstatus;
   1687 		tctx = le32toh(rb->msgtctx);
   1688 	}
   1689 
   1690 	if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
   1691 		/*
   1692 		 * This initiator tracks state using message wrappers.
   1693 		 *
   1694 		 * Find the originating message wrapper, and if requested
   1695 		 * notify the initiator.
   1696 		 */
   1697 		im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
   1698 		if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
   1699 		    (im->im_flags & IM_ALLOCED) == 0 ||
   1700 		    tctx != im->im_tctx) {
   1701 			aprint_error_dev(sc->sc_dev, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
   1702 			if (im != NULL)
   1703 				aprint_error_dev(sc->sc_dev, "flags=0x%08x tctx=0x%08x\n",
   1704 				    im->im_flags, im->im_tctx);
   1705 #ifdef I2ODEBUG
   1706 			if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
   1707 				iop_reply_print(sc, rb);
   1708 #endif
   1709 			return (-1);
   1710 		}
   1711 
   1712 		if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
   1713 			im->im_flags |= IM_FAIL;
   1714 
   1715 #ifdef I2ODEBUG
   1716 		if ((im->im_flags & IM_REPLIED) != 0)
   1717 			panic("%s: dup reply", device_xname(sc->sc_dev));
   1718 #endif
   1719 		im->im_flags |= IM_REPLIED;
   1720 
   1721 #ifdef I2ODEBUG
   1722 		if (status != I2O_STATUS_SUCCESS)
   1723 			iop_reply_print(sc, rb);
   1724 #endif
   1725 		im->im_reqstatus = status;
   1726 		im->im_detstatus = le16toh(rb->detail);
   1727 
   1728 		/* Copy the reply frame, if requested. */
   1729 		if (im->im_rb != NULL) {
   1730 			size = (le32toh(rb->msgflags) >> 14) & ~3;
   1731 #ifdef I2ODEBUG
   1732 			if (size > sc->sc_framesize)
   1733 				panic("iop_handle_reply: reply too large");
   1734 #endif
   1735 			memcpy(im->im_rb, rb, size);
   1736 		}
   1737 
   1738 		/* Notify the initiator. */
   1739 		if ((im->im_flags & IM_WAIT) != 0)
   1740 			cv_broadcast(&im->im_cv);
   1741 		else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
   1742 			if (ii != NULL) {
   1743 				mutex_spin_exit(&sc->sc_intrlock);
   1744 				(*ii->ii_intr)(ii->ii_dv, im, rb);
   1745 				mutex_spin_enter(&sc->sc_intrlock);
   1746 			}
   1747 		}
   1748 	} else {
   1749 		/*
   1750 		 * This initiator discards message wrappers.
   1751 		 *
   1752 		 * Simply pass the reply frame to the initiator.
   1753 		 */
   1754 		if (ii != NULL) {
   1755 			mutex_spin_exit(&sc->sc_intrlock);
   1756 			(*ii->ii_intr)(ii->ii_dv, NULL, rb);
   1757 			mutex_spin_enter(&sc->sc_intrlock);
   1758 		}
   1759 	}
   1760 
   1761 	return (status);
   1762 }
   1763 
   1764 /*
   1765  * Handle an interrupt from the IOP.
   1766  */
   1767 int
   1768 iop_intr(void *arg)
   1769 {
   1770 	struct iop_softc *sc;
   1771 	u_int32_t rmfa;
   1772 
   1773 	sc = arg;
   1774 
   1775 	mutex_spin_enter(&sc->sc_intrlock);
   1776 
   1777 	if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
   1778 		mutex_spin_exit(&sc->sc_intrlock);
   1779 		return (0);
   1780 	}
   1781 
   1782 	for (;;) {
   1783 		/* Double read to account for IOP bug. */
   1784 		if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
   1785 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
   1786 			if (rmfa == IOP_MFA_EMPTY)
   1787 				break;
   1788 		}
   1789 		iop_handle_reply(sc, rmfa);
   1790 		iop_outl(sc, IOP_REG_OFIFO, rmfa);
   1791 	}
   1792 
   1793 	mutex_spin_exit(&sc->sc_intrlock);
   1794 	return (1);
   1795 }
   1796 
   1797 /*
   1798  * Handle an event signalled by the executive.
   1799  */
   1800 static void
   1801 iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
   1802 {
   1803 	struct i2o_util_event_register_reply *rb;
   1804 	u_int event;
   1805 
   1806 	rb = reply;
   1807 
   1808 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
   1809 		return;
   1810 
   1811 	event = le32toh(rb->event);
   1812 	printf("%s: event 0x%08x received\n", device_xname(dv), event);
   1813 }
   1814 
   1815 /*
   1816  * Allocate a message wrapper.
   1817  */
   1818 struct iop_msg *
   1819 iop_msg_alloc(struct iop_softc *sc, int flags)
   1820 {
   1821 	struct iop_msg *im;
   1822 	static u_int tctxgen;
   1823 	int i;
   1824 
   1825 #ifdef I2ODEBUG
   1826 	if ((flags & IM_SYSMASK) != 0)
   1827 		panic("iop_msg_alloc: system flags specified");
   1828 #endif
   1829 
   1830 	mutex_spin_enter(&sc->sc_intrlock);
   1831 	im = SLIST_FIRST(&sc->sc_im_freelist);
   1832 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
   1833 	if (im == NULL)
   1834 		panic("iop_msg_alloc: no free wrappers");
   1835 #endif
   1836 	SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
   1837 	mutex_spin_exit(&sc->sc_intrlock);
   1838 
   1839 	im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
   1840 	tctxgen += (1 << IOP_TCTX_SHIFT);
   1841 	im->im_flags = flags | IM_ALLOCED;
   1842 	im->im_rb = NULL;
   1843 	i = 0;
   1844 	do {
   1845 		im->im_xfer[i++].ix_size = 0;
   1846 	} while (i < IOP_MAX_MSG_XFERS);
   1847 
   1848 	return (im);
   1849 }
   1850 
   1851 /*
   1852  * Free a message wrapper.
   1853  */
   1854 void
   1855 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
   1856 {
   1857 
   1858 #ifdef I2ODEBUG
   1859 	if ((im->im_flags & IM_ALLOCED) == 0)
   1860 		panic("iop_msg_free: wrapper not allocated");
   1861 #endif
   1862 
   1863 	im->im_flags = 0;
   1864 	mutex_spin_enter(&sc->sc_intrlock);
   1865 	SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
   1866 	mutex_spin_exit(&sc->sc_intrlock);
   1867 }
   1868 
   1869 /*
   1870  * Map a data transfer.  Write a scatter-gather list into the message frame.
   1871  */
   1872 int
   1873 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
   1874 	    void *xferaddr, int xfersize, int out, struct proc *up)
   1875 {
   1876 	bus_dmamap_t dm;
   1877 	bus_dma_segment_t *ds;
   1878 	struct iop_xfer *ix;
   1879 	u_int rv, i, nsegs, flg, off, xn;
   1880 	u_int32_t *p;
   1881 
   1882 	for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
   1883 		if (ix->ix_size == 0)
   1884 			break;
   1885 
   1886 #ifdef I2ODEBUG
   1887 	if (xfersize == 0)
   1888 		panic("iop_msg_map: null transfer");
   1889 	if (xfersize > IOP_MAX_XFER)
   1890 		panic("iop_msg_map: transfer too large");
   1891 	if (xn == IOP_MAX_MSG_XFERS)
   1892 		panic("iop_msg_map: too many xfers");
   1893 #endif
   1894 
   1895 	/*
   1896 	 * Only the first DMA map is static.
   1897 	 */
   1898 	if (xn != 0) {
   1899 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
   1900 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
   1901 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
   1902 		if (rv != 0)
   1903 			return (rv);
   1904 	}
   1905 
   1906 	dm = ix->ix_map;
   1907 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
   1908 	    (up == NULL ? BUS_DMA_NOWAIT : 0));
   1909 	if (rv != 0)
   1910 		goto bad;
   1911 
   1912 	/*
   1913 	 * How many SIMPLE SG elements can we fit in this message?
   1914 	 */
   1915 	off = mb[0] >> 16;
   1916 	p = mb + off;
   1917 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
   1918 
   1919 	if (dm->dm_nsegs > nsegs) {
   1920 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
   1921 		rv = EFBIG;
   1922 		DPRINTF(("iop_msg_map: too many segs\n"));
   1923 		goto bad;
   1924 	}
   1925 
   1926 	nsegs = dm->dm_nsegs;
   1927 	xfersize = 0;
   1928 
   1929 	/*
   1930 	 * Write out the SG list.
   1931 	 */
   1932 	if (out)
   1933 		flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
   1934 	else
   1935 		flg = I2O_SGL_SIMPLE;
   1936 
   1937 	for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
   1938 		p[0] = (u_int32_t)ds->ds_len | flg;
   1939 		p[1] = (u_int32_t)ds->ds_addr;
   1940 		xfersize += ds->ds_len;
   1941 	}
   1942 
   1943 	p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
   1944 	p[1] = (u_int32_t)ds->ds_addr;
   1945 	xfersize += ds->ds_len;
   1946 
   1947 	/* Fix up the transfer record, and sync the map. */
   1948 	ix->ix_flags = (out ? IX_OUT : IX_IN);
   1949 	ix->ix_size = xfersize;
   1950 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
   1951 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
   1952 
   1953 	/*
   1954 	 * If this is the first xfer we've mapped for this message, adjust
   1955 	 * the SGL offset field in the message header.
   1956 	 */
   1957 	if ((im->im_flags & IM_SGLOFFADJ) == 0) {
   1958 		mb[0] += (mb[0] >> 12) & 0xf0;
   1959 		im->im_flags |= IM_SGLOFFADJ;
   1960 	}
   1961 	mb[0] += (nsegs << 17);
   1962 	return (0);
   1963 
   1964  bad:
   1965  	if (xn != 0)
   1966 		bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
   1967 	return (rv);
   1968 }
   1969 
   1970 /*
   1971  * Map a block I/O data transfer (different in that there's only one per
   1972  * message maximum, and PAGE addressing may be used).  Write a scatter
   1973  * gather list into the message frame.
   1974  */
   1975 int
   1976 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
   1977 		void *xferaddr, int xfersize, int out)
   1978 {
   1979 	bus_dma_segment_t *ds;
   1980 	bus_dmamap_t dm;
   1981 	struct iop_xfer *ix;
   1982 	u_int rv, i, nsegs, off, slen, tlen, flg;
   1983 	paddr_t saddr, eaddr;
   1984 	u_int32_t *p;
   1985 
   1986 #ifdef I2ODEBUG
   1987 	if (xfersize == 0)
   1988 		panic("iop_msg_map_bio: null transfer");
   1989 	if (xfersize > IOP_MAX_XFER)
   1990 		panic("iop_msg_map_bio: transfer too large");
   1991 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
   1992 		panic("iop_msg_map_bio: SGLOFFADJ");
   1993 #endif
   1994 
   1995 	ix = im->im_xfer;
   1996 	dm = ix->ix_map;
   1997 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
   1998 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
   1999 	if (rv != 0)
   2000 		return (rv);
   2001 
   2002 	off = mb[0] >> 16;
   2003 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
   2004 
   2005 	/*
   2006 	 * If the transfer is highly fragmented and won't fit using SIMPLE
   2007 	 * elements, use PAGE_LIST elements instead.  SIMPLE elements are
   2008 	 * potentially more efficient, both for us and the IOP.
   2009 	 */
   2010 	if (dm->dm_nsegs > nsegs) {
   2011 		nsegs = 1;
   2012 		p = mb + off + 1;
   2013 
   2014 		/* XXX This should be done with a bus_space flag. */
   2015 		for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
   2016 			slen = ds->ds_len;
   2017 			saddr = ds->ds_addr;
   2018 
   2019 			while (slen > 0) {
   2020 				eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
   2021 				tlen = min(eaddr - saddr, slen);
   2022 				slen -= tlen;
   2023 				*p++ = le32toh(saddr);
   2024 				saddr = eaddr;
   2025 				nsegs++;
   2026 			}
   2027 		}
   2028 
   2029 		mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
   2030 		    I2O_SGL_END;
   2031 		if (out)
   2032 			mb[off] |= I2O_SGL_DATA_OUT;
   2033 	} else {
   2034 		p = mb + off;
   2035 		nsegs = dm->dm_nsegs;
   2036 
   2037 		if (out)
   2038 			flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
   2039 		else
   2040 			flg = I2O_SGL_SIMPLE;
   2041 
   2042 		for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
   2043 			p[0] = (u_int32_t)ds->ds_len | flg;
   2044 			p[1] = (u_int32_t)ds->ds_addr;
   2045 		}
   2046 
   2047 		p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
   2048 		    I2O_SGL_END;
   2049 		p[1] = (u_int32_t)ds->ds_addr;
   2050 		nsegs <<= 1;
   2051 	}
   2052 
   2053 	/* Fix up the transfer record, and sync the map. */
   2054 	ix->ix_flags = (out ? IX_OUT : IX_IN);
   2055 	ix->ix_size = xfersize;
   2056 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
   2057 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
   2058 
   2059 	/*
   2060 	 * Adjust the SGL offset and total message size fields.  We don't
   2061 	 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
   2062 	 */
   2063 	mb[0] += ((off << 4) + (nsegs << 16));
   2064 	return (0);
   2065 }
   2066 
   2067 /*
   2068  * Unmap all data transfers associated with a message wrapper.
   2069  */
   2070 void
   2071 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
   2072 {
   2073 	struct iop_xfer *ix;
   2074 	int i;
   2075 
   2076 #ifdef I2ODEBUG
   2077 	if (im->im_xfer[0].ix_size == 0)
   2078 		panic("iop_msg_unmap: no transfers mapped");
   2079 #endif
   2080 
   2081 	for (ix = im->im_xfer, i = 0;;) {
   2082 		bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
   2083 		    ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
   2084 		    BUS_DMASYNC_POSTREAD);
   2085 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
   2086 
   2087 		/* Only the first DMA map is static. */
   2088 		if (i != 0)
   2089 			bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
   2090 		if (++i >= IOP_MAX_MSG_XFERS)
   2091 			break;
   2092 		if ((++ix)->ix_size == 0)
   2093 			break;
   2094 	}
   2095 }
   2096 
   2097 /*
   2098  * Post a message frame to the IOP's inbound queue.
   2099  */
   2100 int
   2101 iop_post(struct iop_softc *sc, u_int32_t *mb)
   2102 {
   2103 	u_int32_t mfa;
   2104 
   2105 #ifdef I2ODEBUG
   2106 	if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
   2107 		panic("iop_post: frame too large");
   2108 #endif
   2109 
   2110 	mutex_spin_enter(&sc->sc_intrlock);
   2111 
   2112 	/* Allocate a slot with the IOP. */
   2113 	if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
   2114 		if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
   2115 			mutex_spin_exit(&sc->sc_intrlock);
   2116 			aprint_error_dev(sc->sc_dev, "mfa not forthcoming\n");
   2117 			return (EAGAIN);
   2118 		}
   2119 
   2120 	/* Perform reply buffer DMA synchronisation. */
   2121 	if (sc->sc_rep_size != 0) {
   2122 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
   2123 		    sc->sc_rep_size, BUS_DMASYNC_PREREAD);
   2124 	}
   2125 
   2126 	/* Copy out the message frame. */
   2127 	bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
   2128 	    mb[0] >> 16);
   2129 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
   2130 	    (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
   2131 
   2132 	/* Post the MFA back to the IOP. */
   2133 	iop_outl(sc, IOP_REG_IFIFO, mfa);
   2134 
   2135 	mutex_spin_exit(&sc->sc_intrlock);
   2136 	return (0);
   2137 }
   2138 
   2139 /*
   2140  * Post a message to the IOP and deal with completion.
   2141  */
   2142 int
   2143 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
   2144 {
   2145 	u_int32_t *mb;
   2146 	int rv;
   2147 
   2148 	mb = xmb;
   2149 
   2150 	/* Terminate the scatter/gather list chain. */
   2151 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
   2152 		mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
   2153 
   2154 	if ((rv = iop_post(sc, mb)) != 0)
   2155 		return (rv);
   2156 
   2157 	if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
   2158 		if ((im->im_flags & IM_POLL) != 0)
   2159 			iop_msg_poll(sc, im, timo);
   2160 		else
   2161 			iop_msg_wait(sc, im, timo);
   2162 
   2163 		mutex_spin_enter(&sc->sc_intrlock);
   2164 		if ((im->im_flags & IM_REPLIED) != 0) {
   2165 			if ((im->im_flags & IM_NOSTATUS) != 0)
   2166 				rv = 0;
   2167 			else if ((im->im_flags & IM_FAIL) != 0)
   2168 				rv = ENXIO;
   2169 			else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
   2170 				rv = EIO;
   2171 			else
   2172 				rv = 0;
   2173 		} else
   2174 			rv = EBUSY;
   2175 		mutex_spin_exit(&sc->sc_intrlock);
   2176 	} else
   2177 		rv = 0;
   2178 
   2179 	return (rv);
   2180 }
   2181 
   2182 /*
   2183  * Spin until the specified message is replied to.
   2184  */
   2185 static void
   2186 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
   2187 {
   2188 	u_int32_t rmfa;
   2189 
   2190 	mutex_spin_enter(&sc->sc_intrlock);
   2191 
   2192 	for (timo *= 10; timo != 0; timo--) {
   2193 		if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
   2194 			/* Double read to account for IOP bug. */
   2195 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
   2196 			if (rmfa == IOP_MFA_EMPTY)
   2197 				rmfa = iop_inl(sc, IOP_REG_OFIFO);
   2198 			if (rmfa != IOP_MFA_EMPTY) {
   2199 				iop_handle_reply(sc, rmfa);
   2200 
   2201 				/*
   2202 				 * Return the reply frame to the IOP's
   2203 				 * outbound FIFO.
   2204 				 */
   2205 				iop_outl(sc, IOP_REG_OFIFO, rmfa);
   2206 			}
   2207 		}
   2208 		if ((im->im_flags & IM_REPLIED) != 0)
   2209 			break;
   2210 		mutex_spin_exit(&sc->sc_intrlock);
   2211 		DELAY(100);
   2212 		mutex_spin_enter(&sc->sc_intrlock);
   2213 	}
   2214 
   2215 	if (timo == 0) {
   2216 #ifdef I2ODEBUG
   2217 		printf("%s: poll - no reply\n", device_xname(sc->sc_dev));
   2218 		if (iop_status_get(sc, 1) != 0)
   2219 			printf("iop_msg_poll: unable to retrieve status\n");
   2220 		else
   2221 			printf("iop_msg_poll: IOP state = %d\n",
   2222 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
   2223 #endif
   2224 	}
   2225 
   2226 	mutex_spin_exit(&sc->sc_intrlock);
   2227 }
   2228 
   2229 /*
   2230  * Sleep until the specified message is replied to.
   2231  */
   2232 static void
   2233 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
   2234 {
   2235 	int rv;
   2236 
   2237 	mutex_spin_enter(&sc->sc_intrlock);
   2238 	if ((im->im_flags & IM_REPLIED) != 0) {
   2239 		mutex_spin_exit(&sc->sc_intrlock);
   2240 		return;
   2241 	}
   2242 	rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
   2243 	mutex_spin_exit(&sc->sc_intrlock);
   2244 
   2245 #ifdef I2ODEBUG
   2246 	if (rv != 0) {
   2247 		printf("iop_msg_wait: tsleep() == %d\n", rv);
   2248 		if (iop_status_get(sc, 0) != 0)
   2249 			printf("%s: unable to retrieve status\n", __func__);
   2250 		else
   2251 			printf("%s: IOP state = %d\n", __func__,
   2252 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
   2253 	}
   2254 #else
   2255 	__USE(rv);
   2256 #endif
   2257 }
   2258 
   2259 /*
   2260  * Release an unused message frame back to the IOP's inbound fifo.
   2261  */
   2262 static void
   2263 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
   2264 {
   2265 
   2266 	/* Use the frame to issue a no-op. */
   2267 	iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
   2268 	iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
   2269 	iop_outl_msg(sc, mfa + 8, 0);
   2270 	iop_outl_msg(sc, mfa + 12, 0);
   2271 
   2272 	iop_outl(sc, IOP_REG_IFIFO, mfa);
   2273 }
   2274 
   2275 #ifdef I2ODEBUG
   2276 /*
   2277  * Dump a reply frame header.
   2278  */
   2279 static void
   2280 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
   2281 {
   2282 	u_int function, detail;
   2283 	const char *statusstr;
   2284 
   2285 	function = (le32toh(rb->msgfunc) >> 24) & 0xff;
   2286 	detail = le16toh(rb->detail);
   2287 
   2288 	printf("%s: reply:\n", device_xname(sc->sc_dev));
   2289 
   2290 	if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
   2291 		statusstr = iop_status[rb->reqstatus];
   2292 	else
   2293 		statusstr = "undefined error code";
   2294 
   2295 	printf("%s:   function=0x%02x status=0x%02x (%s)\n",
   2296 	    device_xname(sc->sc_dev), function, rb->reqstatus, statusstr);
   2297 	printf("%s:   detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
   2298 	    device_xname(sc->sc_dev), detail, le32toh(rb->msgictx),
   2299 	    le32toh(rb->msgtctx));
   2300 	printf("%s:   tidi=%d tidt=%d flags=0x%02x\n", device_xname(sc->sc_dev),
   2301 	    (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
   2302 	    (le32toh(rb->msgflags) >> 8) & 0xff);
   2303 }
   2304 #endif
   2305 
   2306 /*
   2307  * Dump a transport failure reply.
   2308  */
   2309 static void
   2310 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
   2311 {
   2312 
   2313 	printf("%s: WARNING: transport failure:\n", device_xname(sc->sc_dev));
   2314 
   2315 	printf("%s:  ictx=0x%08x tctx=0x%08x\n", device_xname(sc->sc_dev),
   2316 	    le32toh(fn->msgictx), le32toh(fn->msgtctx));
   2317 	printf("%s:  failurecode=0x%02x severity=0x%02x\n",
   2318 	    device_xname(sc->sc_dev), fn->failurecode, fn->severity);
   2319 	printf("%s:  highestver=0x%02x lowestver=0x%02x\n",
   2320 	    device_xname(sc->sc_dev), fn->highestver, fn->lowestver);
   2321 }
   2322 
   2323 /*
   2324  * Translate an I2O ASCII field into a C string.
   2325  */
   2326 void
   2327 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
   2328 {
   2329 	int hc, lc, i, nit;
   2330 
   2331 	dlen--;
   2332 	lc = 0;
   2333 	hc = 0;
   2334 	i = 0;
   2335 
   2336 	/*
   2337 	 * DPT use NUL as a space, whereas AMI use it as a terminator.  The
   2338 	 * spec has nothing to say about it.  Since AMI fields are usually
   2339 	 * filled with junk after the terminator, ...
   2340 	 */
   2341 	nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
   2342 
   2343 	while (slen-- != 0 && dlen-- != 0) {
   2344 		if (nit && *src == '\0')
   2345 			break;
   2346 		else if (*src <= 0x20 || *src >= 0x7f) {
   2347 			if (hc)
   2348 				dst[i++] = ' ';
   2349 		} else {
   2350 			hc = 1;
   2351 			dst[i++] = *src;
   2352 			lc = i;
   2353 		}
   2354 		src++;
   2355 	}
   2356 
   2357 	dst[lc] = '\0';
   2358 }
   2359 
   2360 /*
   2361  * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
   2362  */
   2363 int
   2364 iop_print_ident(struct iop_softc *sc, int tid)
   2365 {
   2366 	struct {
   2367 		struct	i2o_param_op_results pr;
   2368 		struct	i2o_param_read_results prr;
   2369 		struct	i2o_param_device_identity di;
   2370 	} __packed p;
   2371 	char buf[32];
   2372 	int rv;
   2373 
   2374 	rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
   2375 	    sizeof(p), NULL);
   2376 	if (rv != 0)
   2377 		return (rv);
   2378 
   2379 	iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
   2380 	    sizeof(buf));
   2381 	printf(" <%s, ", buf);
   2382 	iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
   2383 	    sizeof(buf));
   2384 	printf("%s, ", buf);
   2385 	iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
   2386 	printf("%s>", buf);
   2387 
   2388 	return (0);
   2389 }
   2390 
   2391 /*
   2392  * Claim or unclaim the specified TID.
   2393  */
   2394 int
   2395 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
   2396 	       int flags)
   2397 {
   2398 	struct iop_msg *im;
   2399 	struct i2o_util_claim mf;
   2400 	int rv, func;
   2401 
   2402 	func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
   2403 	im = iop_msg_alloc(sc, IM_WAIT);
   2404 
   2405 	/* We can use the same structure, as they're identical. */
   2406 	mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
   2407 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
   2408 	mf.msgictx = ii->ii_ictx;
   2409 	mf.msgtctx = im->im_tctx;
   2410 	mf.flags = flags;
   2411 
   2412 	rv = iop_msg_post(sc, im, &mf, 5000);
   2413 	iop_msg_free(sc, im);
   2414 	return (rv);
   2415 }
   2416 
   2417 /*
   2418  * Perform an abort.
   2419  */
   2420 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
   2421 		   int tctxabort, int flags)
   2422 {
   2423 	struct iop_msg *im;
   2424 	struct i2o_util_abort mf;
   2425 	int rv;
   2426 
   2427 	im = iop_msg_alloc(sc, IM_WAIT);
   2428 
   2429 	mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
   2430 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
   2431 	mf.msgictx = ii->ii_ictx;
   2432 	mf.msgtctx = im->im_tctx;
   2433 	mf.flags = (func << 24) | flags;
   2434 	mf.tctxabort = tctxabort;
   2435 
   2436 	rv = iop_msg_post(sc, im, &mf, 5000);
   2437 	iop_msg_free(sc, im);
   2438 	return (rv);
   2439 }
   2440 
   2441 /*
   2442  * Enable or disable reception of events for the specified device.
   2443  */
   2444 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
   2445 {
   2446 	struct i2o_util_event_register mf;
   2447 
   2448 	mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
   2449 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
   2450 	mf.msgictx = ii->ii_ictx;
   2451 	mf.msgtctx = 0;
   2452 	mf.eventmask = mask;
   2453 
   2454 	/* This message is replied to only when events are signalled. */
   2455 	return (iop_post(sc, (u_int32_t *)&mf));
   2456 }
   2457 
   2458 int
   2459 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
   2460 {
   2461 	struct iop_softc *sc;
   2462 
   2463 	if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL)
   2464 		return (ENXIO);
   2465 	if ((sc->sc_flags & IOP_ONLINE) == 0)
   2466 		return (ENXIO);
   2467 	if ((sc->sc_flags & IOP_OPEN) != 0)
   2468 		return (EBUSY);
   2469 	sc->sc_flags |= IOP_OPEN;
   2470 
   2471 	return (0);
   2472 }
   2473 
   2474 int
   2475 iopclose(dev_t dev, int flag, int mode,
   2476     struct lwp *l)
   2477 {
   2478 	struct iop_softc *sc;
   2479 
   2480 	sc = device_lookup_private(&iop_cd, minor(dev));
   2481 	sc->sc_flags &= ~IOP_OPEN;
   2482 
   2483 	return (0);
   2484 }
   2485 
   2486 int
   2487 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
   2488 {
   2489 	struct iop_softc *sc;
   2490 	struct iovec *iov;
   2491 	int rv, i;
   2492 
   2493 	sc = device_lookup_private(&iop_cd, minor(dev));
   2494 	rv = 0;
   2495 
   2496 	switch (cmd) {
   2497 	case IOPIOCPT:
   2498 		rv = kauth_authorize_device_passthru(l->l_cred, dev,
   2499 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
   2500 		if (rv)
   2501 			return (rv);
   2502 
   2503 		return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
   2504 
   2505 	case IOPIOCGSTATUS:
   2506 		iov = (struct iovec *)data;
   2507 		i = sizeof(struct i2o_status);
   2508 		if (i > iov->iov_len)
   2509 			i = iov->iov_len;
   2510 		else
   2511 			iov->iov_len = i;
   2512 		if ((rv = iop_status_get(sc, 0)) == 0)
   2513 			rv = copyout(&sc->sc_status, iov->iov_base, i);
   2514 		return (rv);
   2515 
   2516 	case IOPIOCGLCT:
   2517 	case IOPIOCGTIDMAP:
   2518 	case IOPIOCRECONFIG:
   2519 		break;
   2520 
   2521 	default:
   2522 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
   2523 		printf("%s: unknown ioctl %lx\n", device_xname(sc->sc_dev), cmd);
   2524 #endif
   2525 		return (ENOTTY);
   2526 	}
   2527 
   2528 	mutex_enter(&sc->sc_conflock);
   2529 
   2530 	switch (cmd) {
   2531 	case IOPIOCGLCT:
   2532 		iov = (struct iovec *)data;
   2533 		i = le16toh(sc->sc_lct->tablesize) << 2;
   2534 		if (i > iov->iov_len)
   2535 			i = iov->iov_len;
   2536 		else
   2537 			iov->iov_len = i;
   2538 		rv = copyout(sc->sc_lct, iov->iov_base, i);
   2539 		break;
   2540 
   2541 	case IOPIOCRECONFIG:
   2542 		rv = iop_reconfigure(sc, 0);
   2543 		break;
   2544 
   2545 	case IOPIOCGTIDMAP:
   2546 		iov = (struct iovec *)data;
   2547 		i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
   2548 		if (i > iov->iov_len)
   2549 			i = iov->iov_len;
   2550 		else
   2551 			iov->iov_len = i;
   2552 		rv = copyout(sc->sc_tidmap, iov->iov_base, i);
   2553 		break;
   2554 	}
   2555 
   2556 	mutex_exit(&sc->sc_conflock);
   2557 	return (rv);
   2558 }
   2559 
   2560 static int
   2561 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
   2562 {
   2563 	struct iop_msg *im;
   2564 	struct i2o_msg *mf;
   2565 	struct ioppt_buf *ptb;
   2566 	int rv, i, mapped;
   2567 
   2568 	mf = NULL;
   2569 	im = NULL;
   2570 	mapped = 1;
   2571 
   2572 	if (pt->pt_msglen > sc->sc_framesize ||
   2573 	    pt->pt_msglen < sizeof(struct i2o_msg) ||
   2574 	    pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
   2575 	    pt->pt_nbufs < 0 ||
   2576 #if 0
   2577 	    pt->pt_replylen < 0 ||
   2578 #endif
   2579             pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
   2580 		return (EINVAL);
   2581 
   2582 	for (i = 0; i < pt->pt_nbufs; i++)
   2583 		if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
   2584 			rv = ENOMEM;
   2585 			goto bad;
   2586 		}
   2587 
   2588 	mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
   2589 	if (mf == NULL)
   2590 		return (ENOMEM);
   2591 
   2592 	if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
   2593 		goto bad;
   2594 
   2595 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
   2596 	im->im_rb = (struct i2o_reply *)mf;
   2597 	mf->msgictx = IOP_ICTX;
   2598 	mf->msgtctx = im->im_tctx;
   2599 
   2600 	for (i = 0; i < pt->pt_nbufs; i++) {
   2601 		ptb = &pt->pt_bufs[i];
   2602 		rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
   2603 		    ptb->ptb_datalen, ptb->ptb_out != 0, p);
   2604 		if (rv != 0)
   2605 			goto bad;
   2606 		mapped = 1;
   2607 	}
   2608 
   2609 	if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
   2610 		goto bad;
   2611 
   2612 	i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
   2613 	if (i > sc->sc_framesize)
   2614 		i = sc->sc_framesize;
   2615 	if (i > pt->pt_replylen)
   2616 		i = pt->pt_replylen;
   2617 	rv = copyout(im->im_rb, pt->pt_reply, i);
   2618 
   2619  bad:
   2620 	if (mapped != 0)
   2621 		iop_msg_unmap(sc, im);
   2622 	if (im != NULL)
   2623 		iop_msg_free(sc, im);
   2624 	if (mf != NULL)
   2625 		free(mf, M_DEVBUF);
   2626 	return (rv);
   2627 }
   2628