Home | History | Annotate | Line # | Download | only in ic
icp.c revision 1.14
      1 /*	$NetBSD: icp.c,v 1.14 2005/02/27 00:27:01 perry Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 /*
     40  * Copyright (c) 1999, 2000 Niklas Hallqvist.  All rights reserved.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. All advertising materials mentioning features or use of this software
     51  *    must display the following acknowledgement:
     52  *	This product includes software developed by Niklas Hallqvist.
     53  * 4. The name of the author may not be used to endorse or promote products
     54  *    derived from this software without specific prior written permission.
     55  *
     56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     58  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     59  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     60  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     61   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     62  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     63  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     64  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     65  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     66  *
     67  * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
     68  */
     69 
     70 /*
     71  * This driver would not have written if it was not for the hardware donations
     72  * from both ICP-Vortex and ko.neT.  I want to thank them for their support.
     73  *
     74  * Re-worked for NetBSD by Andrew Doran.  Test hardware kindly supplied by
     75  * Intel.
     76  *
     77  * Support for the ICP-Vortex management tools added by
     78  * Jason R. Thorpe of Wasabi Systems, Inc., based on code
     79  * provided by Achim Leubner <achim.leubner (at) intel.com>.
     80  *
     81  * Additional support for dynamic rescan of cacheservice drives by
     82  * Jason R. Thorpe of Wasabi Systems, Inc.
     83  */
     84 
     85 #include <sys/cdefs.h>
     86 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.14 2005/02/27 00:27:01 perry Exp $");
     87 
     88 #include <sys/param.h>
     89 #include <sys/systm.h>
     90 #include <sys/kernel.h>
     91 #include <sys/device.h>
     92 #include <sys/queue.h>
     93 #include <sys/proc.h>
     94 #include <sys/buf.h>
     95 #include <sys/endian.h>
     96 #include <sys/malloc.h>
     97 #include <sys/disk.h>
     98 
     99 #include <uvm/uvm_extern.h>
    100 
    101 #include <machine/bswap.h>
    102 #include <machine/bus.h>
    103 
    104 #include <dev/pci/pcireg.h>
    105 #include <dev/pci/pcivar.h>
    106 #include <dev/pci/pcidevs.h>
    107 
    108 #include <dev/ic/icpreg.h>
    109 #include <dev/ic/icpvar.h>
    110 
    111 #include <dev/scsipi/scsipi_all.h>
    112 #include <dev/scsipi/scsiconf.h>
    113 
    114 #include "locators.h"
    115 
    116 int	icp_async_event(struct icp_softc *, int);
    117 void	icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
    118 void	icp_chain(struct icp_softc *);
    119 int	icp_print(void *, const char *);
    120 int	icp_submatch(struct device *, struct cfdata *,
    121 		     const locdesc_t *, void *);
    122 void	icp_watchdog(void *);
    123 void	icp_ucmd_intr(struct icp_ccb *);
    124 void	icp_recompute_openings(struct icp_softc *);
    125 
    126 int	icp_count;	/* total # of controllers, for ioctl interface */
    127 
    128 /*
    129  * Statistics for the ioctl interface to query.
    130  *
    131  * XXX Global.  They should probably be made per-controller
    132  * XXX at some point.
    133  */
    134 gdt_statist_t icp_stats;
    135 
    136 int
    137 icp_init(struct icp_softc *icp, const char *intrstr)
    138 {
    139 	struct icp_attach_args icpa;
    140 	struct icp_binfo binfo;
    141 	struct icp_ccb *ic;
    142 	u_int16_t cdev_cnt;
    143 	int i, j, state, feat, nsegs, rv;
    144 	int help[2];
    145 	locdesc_t *ldesc = (void *)help; /* XXX */
    146 
    147 	state = 0;
    148 
    149 	if (intrstr != NULL)
    150 		aprint_normal("%s: interrupting at %s\n", icp->icp_dv.dv_xname,
    151 		    intrstr);
    152 
    153 	SIMPLEQ_INIT(&icp->icp_ccb_queue);
    154 	SIMPLEQ_INIT(&icp->icp_ccb_freelist);
    155 	SIMPLEQ_INIT(&icp->icp_ucmd_queue);
    156 	callout_init(&icp->icp_wdog_callout);
    157 
    158 	/*
    159 	 * Allocate a scratch area.
    160 	 */
    161 	if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
    162 	    ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    163 	    &icp->icp_scr_dmamap) != 0) {
    164 		aprint_error("%s: cannot create scratch dmamap\n",
    165 		    icp->icp_dv.dv_xname);
    166 		return (1);
    167 	}
    168 	state++;
    169 
    170 	if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
    171 	    icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
    172 		aprint_error("%s: cannot alloc scratch dmamem\n",
    173 		    icp->icp_dv.dv_xname);
    174 		goto bail_out;
    175 	}
    176 	state++;
    177 
    178 	if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
    179 	    ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
    180 		aprint_error("%s: cannot map scratch dmamem\n",
    181 		    icp->icp_dv.dv_xname);
    182 		goto bail_out;
    183 	}
    184 	state++;
    185 
    186 	if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
    187 	    ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
    188 		aprint_error("%s: cannot load scratch dmamap\n",
    189 		    icp->icp_dv.dv_xname);
    190 		goto bail_out;
    191 	}
    192 	state++;
    193 
    194 	/*
    195 	 * Allocate and initialize the command control blocks.
    196 	 */
    197 	ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_NOWAIT | M_ZERO);
    198 	if ((icp->icp_ccbs = ic) == NULL) {
    199 		aprint_error("%s: malloc() failed\n", icp->icp_dv.dv_xname);
    200 		goto bail_out;
    201 	}
    202 	state++;
    203 
    204 	for (i = 0; i < ICP_NCCBS; i++, ic++) {
    205 		/*
    206 		 * The first two command indexes have special meanings, so
    207 		 * we can't use them.
    208 		 */
    209 		ic->ic_ident = i + 2;
    210 		rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
    211 		    ICP_MAXSG, ICP_MAX_XFER, 0,
    212 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    213 		    &ic->ic_xfer_map);
    214 		if (rv != 0)
    215 			break;
    216 		icp->icp_nccbs++;
    217 		icp_ccb_free(icp, ic);
    218 	}
    219 #ifdef DIAGNOSTIC
    220 	if (icp->icp_nccbs != ICP_NCCBS)
    221 		aprint_error("%s: %d/%d CCBs usable\n", icp->icp_dv.dv_xname,
    222 		    icp->icp_nccbs, ICP_NCCBS);
    223 #endif
    224 
    225 	/*
    226 	 * Initalize the controller.
    227 	 */
    228 	if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
    229 		aprint_error("%s: screen service init error %d\n",
    230 		    icp->icp_dv.dv_xname, icp->icp_status);
    231 		goto bail_out;
    232 	}
    233 
    234 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
    235 		aprint_error("%s: cache service init error %d\n",
    236 		    icp->icp_dv.dv_xname, icp->icp_status);
    237 		goto bail_out;
    238 	}
    239 
    240 	icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
    241 
    242 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
    243 		aprint_error("%s: cache service mount error %d\n",
    244 		    icp->icp_dv.dv_xname, icp->icp_status);
    245 		goto bail_out;
    246 	}
    247 
    248 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
    249 		aprint_error("%s: cache service post-mount init error %d\n",
    250 		    icp->icp_dv.dv_xname, icp->icp_status);
    251 		goto bail_out;
    252 	}
    253 	cdev_cnt = (u_int16_t)icp->icp_info;
    254 	icp->icp_fw_vers = icp->icp_service;
    255 
    256 	if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
    257 		aprint_error("%s: raw service init error %d\n",
    258 		    icp->icp_dv.dv_xname, icp->icp_status);
    259 		goto bail_out;
    260 	}
    261 
    262 	/*
    263 	 * Set/get raw service features (scatter/gather).
    264 	 */
    265 	feat = 0;
    266 	if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
    267 	    0, 0))
    268 		if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
    269 			feat = icp->icp_info;
    270 
    271 	if ((feat & ICP_SCATTER_GATHER) == 0) {
    272 #ifdef DIAGNOSTIC
    273 		aprint_normal(
    274 		    "%s: scatter/gather not supported (raw service)\n",
    275 		    icp->icp_dv.dv_xname);
    276 #endif
    277 	} else
    278 		icp->icp_features |= ICP_FEAT_RAWSERVICE;
    279 
    280 	/*
    281 	 * Set/get cache service features (scatter/gather).
    282 	 */
    283 	feat = 0;
    284 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
    285 	    ICP_SCATTER_GATHER, 0))
    286 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
    287 			feat = icp->icp_info;
    288 
    289 	if ((feat & ICP_SCATTER_GATHER) == 0) {
    290 #ifdef DIAGNOSTIC
    291 		aprint_normal(
    292 		    "%s: scatter/gather not supported (cache service)\n",
    293 		    icp->icp_dv.dv_xname);
    294 #endif
    295 	} else
    296 		icp->icp_features |= ICP_FEAT_CACHESERVICE;
    297 
    298 	/*
    299 	 * Pull some information from the board and dump.
    300 	 */
    301 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
    302 	    ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
    303 		aprint_error("%s: unable to retrive board info\n",
    304 		    icp->icp_dv.dv_xname);
    305 		goto bail_out;
    306 	}
    307 	memcpy(&binfo, icp->icp_scr, sizeof(binfo));
    308 
    309 	aprint_normal(
    310 	    "%s: model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
    311 	    icp->icp_dv.dv_xname, binfo.bi_type_string, binfo.bi_raid_string,
    312 	    binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
    313 
    314 	/*
    315 	 * Determine the number of devices, and number of openings per
    316 	 * device.
    317 	 */
    318 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
    319 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
    320 			if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
    321 			    0))
    322 				continue;
    323 
    324 			icp->icp_cdr[j].cd_size = icp->icp_info;
    325 			if (icp->icp_cdr[j].cd_size != 0)
    326 				icp->icp_ndevs++;
    327 
    328 			if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
    329 			    0))
    330 				icp->icp_cdr[j].cd_type = icp->icp_info;
    331 		}
    332 	}
    333 
    334 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
    335 		icp->icp_nchan = binfo.bi_chan_count;
    336 		icp->icp_ndevs += icp->icp_nchan;
    337 	}
    338 
    339 	icp_recompute_openings(icp);
    340 
    341 	/*
    342 	 * Attach SCSI channels.
    343 	 */
    344 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
    345 		struct icp_ioc_version *iv;
    346 		struct icp_rawioc *ri;
    347 		struct icp_getch *gc;
    348 
    349 		iv = (struct icp_ioc_version *)icp->icp_scr;
    350 		iv->iv_version = htole32(ICP_IOC_NEWEST);
    351 		iv->iv_listents = ICP_MAXBUS;
    352 		iv->iv_firstchan = 0;
    353 		iv->iv_lastchan = ICP_MAXBUS - 1;
    354 		iv->iv_listoffset = htole32(sizeof(*iv));
    355 
    356 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
    357 		    ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
    358 		    sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
    359 			ri = (struct icp_rawioc *)(iv + 1);
    360 			for (j = 0; j < binfo.bi_chan_count; j++, ri++)
    361 				icp->icp_bus_id[j] = ri->ri_procid;
    362 		} else {
    363 			/*
    364 			 * Fall back to the old method.
    365 			 */
    366 			gc = (struct icp_getch *)icp->icp_scr;
    367 
    368 			for (j = 0; j < binfo.bi_chan_count; j++) {
    369 				if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
    370 				    ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
    371 				    ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
    372 				    sizeof(*gc))) {
    373 				    	aprint_error(
    374 					    "%s: unable to get chan info",
    375 				    	    icp->icp_dv.dv_xname);
    376 					goto bail_out;
    377 				}
    378 				icp->icp_bus_id[j] = gc->gc_scsiid;
    379 			}
    380 		}
    381 
    382 		for (j = 0; j < binfo.bi_chan_count; j++) {
    383 			if (icp->icp_bus_id[j] > ICP_MAXID_FC)
    384 				icp->icp_bus_id[j] = ICP_MAXID_FC;
    385 
    386 			icpa.icpa_unit = j + ICPA_UNIT_SCSI;
    387 
    388 			ldesc->len = 1;
    389 			ldesc->locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI;
    390 
    391 			icp->icp_children[icpa.icpa_unit] =
    392 				config_found_sm_loc(&icp->icp_dv, "icp", ldesc,
    393 					&icpa, icp_print, icp_submatch);
    394 		}
    395 	}
    396 
    397 	/*
    398 	 * Attach cache devices.
    399 	 */
    400 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
    401 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
    402 			if (icp->icp_cdr[j].cd_size == 0)
    403 				continue;
    404 
    405 			icpa.icpa_unit = j;
    406 
    407 			ldesc->len = 1;
    408 			ldesc->locs[ICPCF_UNIT] = j;
    409 
    410 			icp->icp_children[icpa.icpa_unit] =
    411 			    config_found_sm_loc(&icp->icp_dv, "icp", ldesc,
    412 				&icpa, icp_print, icp_submatch);
    413 		}
    414 	}
    415 
    416 	/*
    417 	 * Start the watchdog.
    418 	 */
    419 	icp_watchdog(icp);
    420 
    421 	/*
    422 	 * Count the controller, and we're done!
    423 	 */
    424 	icp_count++;
    425 
    426 	return (0);
    427 
    428  bail_out:
    429 	if (state > 4)
    430 		for (j = 0; j < i; j++)
    431 			bus_dmamap_destroy(icp->icp_dmat,
    432 			    icp->icp_ccbs[j].ic_xfer_map);
    433  	if (state > 3)
    434 		free(icp->icp_ccbs, M_DEVBUF);
    435 	if (state > 2)
    436 		bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
    437 	if (state > 1)
    438 		bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
    439 		    ICP_SCRATCH_SIZE);
    440 	if (state > 0)
    441 		bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
    442 	bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
    443 
    444 	return (1);
    445 }
    446 
    447 void
    448 icp_register_servicecb(struct icp_softc *icp, int unit,
    449     const struct icp_servicecb *cb)
    450 {
    451 
    452 	icp->icp_servicecb[unit] = cb;
    453 }
    454 
    455 void
    456 icp_rescan(struct icp_softc *icp, int unit)
    457 {
    458 	struct icp_attach_args icpa;
    459 	u_int newsize, newtype;
    460 	int help[2];
    461 	locdesc_t *ldesc = (void *)help; /* XXX */
    462 
    463 	/*
    464 	 * NOTE: It is very important that the queue be frozen and not
    465 	 * commands running when this is called.  The ioctl mutex must
    466 	 * also be held.
    467 	 */
    468 
    469 	KASSERT(icp->icp_qfreeze != 0);
    470 	KASSERT(icp->icp_running == 0);
    471 	KASSERT(unit < ICP_MAX_HDRIVES);
    472 
    473 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
    474 #ifdef ICP_DEBUG
    475 		printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
    476 		    icp->icp_dv.dv_xname, unit, icp->icp_status);
    477 #endif
    478 		goto gone;
    479 	}
    480 	if ((newsize = icp->icp_info) == 0) {
    481 #ifdef ICP_DEBUG
    482 		printf("%s: rescan: unit %d has zero size\n",
    483 		    icp->icp_dv.dv_xname, unit);
    484 #endif
    485  gone:
    486 		/*
    487 		 * Host drive is no longer present; detach if a child
    488 		 * is currently there.
    489 		 */
    490 		if (icp->icp_cdr[unit].cd_size != 0)
    491 			icp->icp_ndevs--;
    492 		icp->icp_cdr[unit].cd_size = 0;
    493 		if (icp->icp_children[unit] != NULL) {
    494 			(void) config_detach(icp->icp_children[unit],
    495 			    DETACH_FORCE);
    496 			icp->icp_children[unit] = NULL;
    497 		}
    498 		return;
    499 	}
    500 
    501 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
    502 		newtype = icp->icp_info;
    503 	else {
    504 #ifdef ICP_DEBUG
    505 		printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
    506 		    icp->icp_dv.dv_xname, unit);
    507 #endif
    508 		newtype = 0;	/* XXX? */
    509 	}
    510 
    511 #ifdef ICP_DEBUG
    512 	printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
    513 	    icp->icp_dv.dv_xname, unit, icp->icp_cdr[unit].cd_size,
    514 	    icp->icp_cdr[unit].cd_type, newsize, newtype);
    515 #endif
    516 
    517 	/*
    518 	 * If the type or size changed, detach any old child (if it exists)
    519 	 * and attach a new one.
    520 	 */
    521 	if (icp->icp_children[unit] == NULL ||
    522 	    newsize != icp->icp_cdr[unit].cd_size ||
    523 	    newtype != icp->icp_cdr[unit].cd_type) {
    524 		if (icp->icp_cdr[unit].cd_size == 0)
    525 			icp->icp_ndevs++;
    526 		icp->icp_cdr[unit].cd_size = newsize;
    527 		icp->icp_cdr[unit].cd_type = newtype;
    528 		if (icp->icp_children[unit] != NULL)
    529 			(void) config_detach(icp->icp_children[unit],
    530 			    DETACH_FORCE);
    531 
    532 		icpa.icpa_unit = unit;
    533 
    534 		ldesc->len = 1;
    535 		ldesc->locs[ICPCF_UNIT] = unit;
    536 
    537 		icp->icp_children[unit] = config_found_sm_loc(&icp->icp_dv,
    538 			"icp", ldesc, &icpa, icp_print, icp_submatch);
    539 	}
    540 
    541 	icp_recompute_openings(icp);
    542 }
    543 
    544 void
    545 icp_rescan_all(struct icp_softc *icp)
    546 {
    547 	int unit;
    548 	u_int16_t cdev_cnt;
    549 
    550 	/*
    551 	 * This is the old method of rescanning the host drives.  We
    552 	 * start by reinitializing the cache service.
    553 	 */
    554 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
    555 		printf("%s: unable to re-initialize cache service for rescan\n",
    556 		    icp->icp_dv.dv_xname);
    557 		return;
    558 	}
    559 	cdev_cnt = (u_int16_t) icp->icp_info;
    560 
    561 	/* For each host drive, do the new-style rescan. */
    562 	for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++)
    563 		icp_rescan(icp, unit);
    564 
    565 	/* Now detach anything in the slots after cdev_cnt. */
    566 	for (; unit < ICP_MAX_HDRIVES; unit++) {
    567 		if (icp->icp_cdr[unit].cd_size != 0) {
    568 #ifdef ICP_DEBUG
    569 			printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
    570 			    icp->icp_dv.dv_xname, unit, cdev_cnt);
    571 #endif
    572 			icp->icp_ndevs--;
    573 			icp->icp_cdr[unit].cd_size = 0;
    574 			if (icp->icp_children[unit] != NULL) {
    575 				(void) config_detach(icp->icp_children[unit],
    576 				    DETACH_FORCE);
    577 				icp->icp_children[unit] = NULL;
    578 			}
    579 		}
    580 	}
    581 
    582 	icp_recompute_openings(icp);
    583 }
    584 
    585 void
    586 icp_recompute_openings(struct icp_softc *icp)
    587 {
    588 	int unit, openings;
    589 
    590 	if (icp->icp_ndevs != 0)
    591 		openings =
    592 		    (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
    593 	else
    594 		openings = 0;
    595 	if (openings == icp->icp_openings)
    596 		return;
    597 	icp->icp_openings = openings;
    598 
    599 #ifdef ICP_DEBUG
    600 	printf("%s: %d device%s, %d openings per device\n",
    601 	    icp->icp_dv.dv_xname, icp->icp_ndevs,
    602 	    icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
    603 #endif
    604 
    605 	for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) {
    606 		if (icp->icp_children[unit] != NULL)
    607 			(*icp->icp_servicecb[unit]->iscb_openings)(
    608 			    icp->icp_children[unit], icp->icp_openings);
    609 	}
    610 }
    611 
    612 void
    613 icp_watchdog(void *cookie)
    614 {
    615 	struct icp_softc *icp;
    616 	int s;
    617 
    618 	icp = cookie;
    619 
    620 	s = splbio();
    621 	icp_intr(icp);
    622 	if (ICP_HAS_WORK(icp))
    623 		icp_ccb_enqueue(icp, NULL);
    624 	splx(s);
    625 
    626 	callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
    627 	    icp_watchdog, icp);
    628 }
    629 
    630 int
    631 icp_print(void *aux, const char *pnp)
    632 {
    633 	struct icp_attach_args *icpa;
    634 	const char *str;
    635 
    636 	icpa = (struct icp_attach_args *)aux;
    637 
    638 	if (pnp != NULL) {
    639 		if (icpa->icpa_unit < ICPA_UNIT_SCSI)
    640 			str = "block device";
    641 		else
    642 			str = "SCSI channel";
    643 		aprint_normal("%s at %s", str, pnp);
    644 	}
    645 	aprint_normal(" unit %d", icpa->icpa_unit);
    646 
    647 	return (UNCONF);
    648 }
    649 
    650 int
    651 icp_submatch(struct device *parent, struct cfdata *cf,
    652 	     const locdesc_t *ldesc, void *aux)
    653 {
    654 
    655 	if (cf->cf_loc[ICPCF_UNIT] != ICPCF_UNIT_DEFAULT &&
    656 	    cf->cf_loc[ICPCF_UNIT] != ldesc->locs[ICPCF_UNIT])
    657 		return (0);
    658 
    659 	return (config_match(parent, cf, aux));
    660 }
    661 
    662 int
    663 icp_async_event(struct icp_softc *icp, int service)
    664 {
    665 
    666 	if (service == ICP_SCREENSERVICE) {
    667 		if (icp->icp_status == ICP_S_MSG_REQUEST) {
    668 			/* XXX */
    669 		}
    670 	} else {
    671 		if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
    672 			icp->icp_evt.size = 0;
    673 			icp->icp_evt.eu.async.ionode = icp->icp_dv.dv_unit;
    674 			icp->icp_evt.eu.async.status = icp->icp_status;
    675 			/*
    676 			 * Severity and event string are filled in by the
    677 			 * hardware interface interrupt handler.
    678 			 */
    679 			printf("%s: %s\n", icp->icp_dv.dv_xname,
    680 			    icp->icp_evt.event_string);
    681 		} else {
    682 			icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
    683 			icp->icp_evt.eu.async.ionode = icp->icp_dv.dv_unit;
    684 			icp->icp_evt.eu.async.service = service;
    685 			icp->icp_evt.eu.async.status = icp->icp_status;
    686 			icp->icp_evt.eu.async.info = icp->icp_info;
    687 			/* XXXJRT FIX THIS */
    688 			*(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
    689 			    icp->icp_info2;
    690 		}
    691 		icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
    692 	}
    693 
    694 	return (0);
    695 }
    696 
    697 int
    698 icp_intr(void *cookie)
    699 {
    700 	struct icp_softc *icp;
    701 	struct icp_intr_ctx ctx;
    702 	struct icp_ccb *ic;
    703 
    704 	icp = cookie;
    705 
    706 	ctx.istatus = (*icp->icp_get_status)(icp);
    707 	if (!ctx.istatus) {
    708 		icp->icp_status = ICP_S_NO_STATUS;
    709 		return (0);
    710 	}
    711 
    712 	(*icp->icp_intr)(icp, &ctx);
    713 
    714 	icp->icp_status = ctx.cmd_status;
    715 	icp->icp_service = ctx.service;
    716 	icp->icp_info = ctx.info;
    717 	icp->icp_info2 = ctx.info2;
    718 
    719 	switch (ctx.istatus) {
    720 	case ICP_ASYNCINDEX:
    721 		icp_async_event(icp, ctx.service);
    722 		return (1);
    723 
    724 	case ICP_SPEZINDEX:
    725 		printf("%s: uninitialized or unknown service (%d/%d)\n",
    726 		    icp->icp_dv.dv_xname, ctx.info, ctx.info2);
    727 		icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
    728 		icp->icp_evt.eu.driver.ionode = icp->icp_dv.dv_unit;
    729 		icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
    730 		return (1);
    731 	}
    732 
    733 	if ((ctx.istatus - 2) > icp->icp_nccbs)
    734 		panic("icp_intr: bad command index returned");
    735 
    736 	ic = &icp->icp_ccbs[ctx.istatus - 2];
    737 	ic->ic_status = icp->icp_status;
    738 
    739 	if ((ic->ic_flags & IC_ALLOCED) == 0) {
    740 		/* XXX ICP's "iir" driver just sends an event here. */
    741 		panic("icp_intr: inactive CCB identified");
    742 	}
    743 
    744 	/*
    745 	 * Try to protect ourselves from the running command count already
    746 	 * being 0 (e.g. if a polled command times out).
    747 	 */
    748 	KDASSERT(icp->icp_running != 0);
    749 	if (--icp->icp_running == 0 &&
    750 	    (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
    751 		icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
    752 		wakeup(&icp->icp_qfreeze);
    753 	}
    754 
    755 	switch (icp->icp_status) {
    756 	case ICP_S_BSY:
    757 #ifdef ICP_DEBUG
    758 		printf("%s: ICP_S_BSY received\n", icp->icp_dv.dv_xname);
    759 #endif
    760 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
    761 			SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
    762 		else
    763 			SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
    764 		break;
    765 
    766 	default:
    767 		ic->ic_flags |= IC_COMPLETE;
    768 
    769 		if ((ic->ic_flags & IC_WAITING) != 0)
    770 			wakeup(ic);
    771 		else if (ic->ic_intr != NULL)
    772 			(*ic->ic_intr)(ic);
    773 
    774 		if (ICP_HAS_WORK(icp))
    775 			icp_ccb_enqueue(icp, NULL);
    776 
    777 		break;
    778 	}
    779 
    780 	return (1);
    781 }
    782 
    783 struct icp_ucmd_ctx {
    784 	gdt_ucmd_t *iu_ucmd;
    785 	u_int32_t iu_cnt;
    786 };
    787 
    788 void
    789 icp_ucmd_intr(struct icp_ccb *ic)
    790 {
    791 	struct icp_softc *icp = (void *) ic->ic_dv;
    792 	struct icp_ucmd_ctx *iu = ic->ic_context;
    793 	gdt_ucmd_t *ucmd = iu->iu_ucmd;
    794 
    795 	ucmd->status = icp->icp_status;
    796 	ucmd->info = icp->icp_info;
    797 
    798 	if (iu->iu_cnt != 0) {
    799 		bus_dmamap_sync(icp->icp_dmat,
    800 		    icp->icp_scr_dmamap,
    801 		    ICP_SCRATCH_UCMD, iu->iu_cnt,
    802 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    803 		memcpy(ucmd->data,
    804 		    icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
    805 	}
    806 
    807 	icp->icp_ucmd_ccb = NULL;
    808 
    809 	ic->ic_flags |= IC_COMPLETE;
    810 	wakeup(ic);
    811 }
    812 
    813 /*
    814  * NOTE: We assume that it is safe to sleep here!
    815  */
    816 int
    817 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
    818 	u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
    819 {
    820 	struct icp_ioctlcmd *icmd;
    821 	struct icp_cachecmd *cc;
    822 	struct icp_rawcmd *rc;
    823 	int retries, rv;
    824 	struct icp_ccb *ic;
    825 
    826 	retries = ICP_RETRIES;
    827 
    828 	do {
    829 		ic = icp_ccb_alloc_wait(icp);
    830 		memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
    831 		ic->ic_cmd.cmd_opcode = htole16(opcode);
    832 
    833 		switch (service) {
    834 		case ICP_CACHESERVICE:
    835 			if (opcode == ICP_IOCTL) {
    836 				icmd = &ic->ic_cmd.cmd_packet.ic;
    837 				icmd->ic_subfunc = htole16(arg1);
    838 				icmd->ic_channel = htole32(arg2);
    839 				icmd->ic_bufsize = htole32(arg3);
    840 				icmd->ic_addr =
    841 				    htole32(icp->icp_scr_seg[0].ds_addr);
    842 
    843 				bus_dmamap_sync(icp->icp_dmat,
    844 				    icp->icp_scr_dmamap, 0, arg3,
    845 				    BUS_DMASYNC_PREWRITE |
    846 				    BUS_DMASYNC_PREREAD);
    847 			} else {
    848 				cc = &ic->ic_cmd.cmd_packet.cc;
    849 				cc->cc_deviceno = htole16(arg1);
    850 				cc->cc_blockno = htole32(arg2);
    851 			}
    852 			break;
    853 
    854 		case ICP_SCSIRAWSERVICE:
    855 			rc = &ic->ic_cmd.cmd_packet.rc;
    856 			rc->rc_direction = htole32(arg1);
    857 			rc->rc_bus = arg2;
    858 			rc->rc_target = arg3;
    859 			rc->rc_lun = arg3 >> 8;
    860 			break;
    861 		}
    862 
    863 		ic->ic_service = service;
    864 		ic->ic_cmdlen = sizeof(ic->ic_cmd);
    865 		rv = icp_ccb_poll(icp, ic, 10000);
    866 
    867 		switch (service) {
    868 		case ICP_CACHESERVICE:
    869 			if (opcode == ICP_IOCTL) {
    870 				bus_dmamap_sync(icp->icp_dmat,
    871 				    icp->icp_scr_dmamap, 0, arg3,
    872 				    BUS_DMASYNC_POSTWRITE |
    873 				    BUS_DMASYNC_POSTREAD);
    874 			}
    875 			break;
    876 		}
    877 
    878 		icp_ccb_free(icp, ic);
    879 	} while (rv != 0 && --retries > 0);
    880 
    881 	return (icp->icp_status == ICP_S_OK);
    882 }
    883 
    884 int
    885 icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
    886 {
    887 	struct icp_ccb *ic;
    888 	struct icp_ucmd_ctx iu;
    889 	u_int32_t cnt;
    890 	int error;
    891 
    892 	if (ucmd->service == ICP_CACHESERVICE) {
    893 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
    894 			cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
    895 			if (cnt > GDT_SCRATCH_SZ) {
    896 				printf("%s: scratch buffer too small (%d/%d)\n",
    897 				    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
    898 				return (EINVAL);
    899 			}
    900 		} else {
    901 			cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
    902 			    ICP_SECTOR_SIZE;
    903 			if (cnt > GDT_SCRATCH_SZ) {
    904 				printf("%s: scratch buffer too small (%d/%d)\n",
    905 				    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
    906 				return (EINVAL);
    907 			}
    908 		}
    909 	} else {
    910 		cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
    911 		    ucmd->command.cmd_packet.rc.rc_sense_len;
    912 		if (cnt > GDT_SCRATCH_SZ) {
    913 			printf("%s: scratch buffer too small (%d/%d)\n",
    914 			    icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
    915 			return (EINVAL);
    916 		}
    917 	}
    918 
    919 	iu.iu_ucmd = ucmd;
    920 	iu.iu_cnt = cnt;
    921 
    922 	ic = icp_ccb_alloc_wait(icp);
    923 	memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
    924 	ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
    925 
    926 	if (ucmd->service == ICP_CACHESERVICE) {
    927 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
    928 			struct icp_ioctlcmd *icmd, *uicmd;
    929 
    930 			icmd = &ic->ic_cmd.cmd_packet.ic;
    931 			uicmd = &ucmd->command.cmd_packet.ic;
    932 
    933 			icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
    934 			icmd->ic_channel = htole32(uicmd->ic_channel);
    935 			icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
    936 			icmd->ic_addr =
    937 			    htole32(icp->icp_scr_seg[0].ds_addr +
    938 				    ICP_SCRATCH_UCMD);
    939 		} else {
    940 			struct icp_cachecmd *cc, *ucc;
    941 
    942 			cc = &ic->ic_cmd.cmd_packet.cc;
    943 			ucc = &ucmd->command.cmd_packet.cc;
    944 
    945 			cc->cc_deviceno = htole16(ucc->cc_deviceno);
    946 			cc->cc_blockno = htole32(ucc->cc_blockno);
    947 			cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
    948 			cc->cc_addr = htole32(0xffffffffU);
    949 			cc->cc_nsgent = htole32(1);
    950 			cc->cc_sg[0].sg_addr =
    951 			    htole32(icp->icp_scr_seg[0].ds_addr +
    952 				    ICP_SCRATCH_UCMD);
    953 			cc->cc_sg[0].sg_len = htole32(cnt);
    954 		}
    955 	} else {
    956 		struct icp_rawcmd *rc, *urc;
    957 
    958 		rc = &ic->ic_cmd.cmd_packet.rc;
    959 		urc = &ucmd->command.cmd_packet.rc;
    960 
    961 		rc->rc_direction = htole32(urc->rc_direction);
    962 		rc->rc_sdata = htole32(0xffffffffU);
    963 		rc->rc_sdlen = htole32(urc->rc_sdlen);
    964 		rc->rc_clen = htole32(urc->rc_clen);
    965 		memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
    966 		rc->rc_target = urc->rc_target;
    967 		rc->rc_lun = urc->rc_lun;
    968 		rc->rc_bus = urc->rc_bus;
    969 		rc->rc_sense_len = htole32(urc->rc_sense_len);
    970 		rc->rc_sense_addr =
    971 		    htole32(icp->icp_scr_seg[0].ds_addr +
    972 			    ICP_SCRATCH_UCMD + urc->rc_sdlen);
    973 		rc->rc_nsgent = htole32(1);
    974 		rc->rc_sg[0].sg_addr =
    975 		    htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
    976 		rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
    977 	}
    978 
    979 	ic->ic_service = ucmd->service;
    980 	ic->ic_cmdlen = sizeof(ic->ic_cmd);
    981 	ic->ic_context = &iu;
    982 
    983 	/*
    984 	 * XXX What units are ucmd->timeout in?  Until we know, we
    985 	 * XXX just pull a number out of thin air.
    986 	 */
    987 	if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
    988 		printf("%s: error %d waiting for ucmd to complete\n",
    989 		    icp->icp_dv.dv_xname, error);
    990 
    991 	/* icp_ucmd_intr() has updated ucmd. */
    992 	icp_ccb_free(icp, ic);
    993 
    994 	return (error);
    995 }
    996 
    997 struct icp_ccb *
    998 icp_ccb_alloc(struct icp_softc *icp)
    999 {
   1000 	struct icp_ccb *ic;
   1001 	int s;
   1002 
   1003 	s = splbio();
   1004 	if (__predict_false((ic =
   1005 			     SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
   1006 		splx(s);
   1007 		return (NULL);
   1008 	}
   1009 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
   1010 	splx(s);
   1011 
   1012 	ic->ic_flags = IC_ALLOCED;
   1013 	return (ic);
   1014 }
   1015 
   1016 struct icp_ccb *
   1017 icp_ccb_alloc_wait(struct icp_softc *icp)
   1018 {
   1019 	struct icp_ccb *ic;
   1020 	int s;
   1021 
   1022 	s = splbio();
   1023 	while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
   1024 		icp->icp_flags |= ICP_F_WAIT_CCB;
   1025 		(void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
   1026 	}
   1027 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
   1028 	splx(s);
   1029 
   1030 	ic->ic_flags = IC_ALLOCED;
   1031 	return (ic);
   1032 }
   1033 
   1034 void
   1035 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
   1036 {
   1037 	int s;
   1038 
   1039 	s = splbio();
   1040 	ic->ic_flags = 0;
   1041 	ic->ic_intr = NULL;
   1042 	SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
   1043 	if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
   1044 		icp->icp_flags &= ~ICP_F_WAIT_CCB;
   1045 		wakeup(&icp->icp_ccb_freelist);
   1046 	}
   1047 	splx(s);
   1048 }
   1049 
   1050 void
   1051 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
   1052 {
   1053 	int s;
   1054 
   1055 	s = splbio();
   1056 
   1057 	if (ic != NULL) {
   1058 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
   1059 			SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
   1060 		else
   1061 			SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
   1062 	}
   1063 
   1064 	for (; icp->icp_qfreeze == 0;) {
   1065 		if (__predict_false((ic =
   1066 			    SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
   1067 			struct icp_ucmd_ctx *iu = ic->ic_context;
   1068 			gdt_ucmd_t *ucmd = iu->iu_ucmd;
   1069 
   1070 			/*
   1071 			 * All user-generated commands share the same
   1072 			 * scratch space, so if one is already running,
   1073 			 * we have to stall the command queue.
   1074 			 */
   1075 			if (icp->icp_ucmd_ccb != NULL)
   1076 				break;
   1077 			if ((*icp->icp_test_busy)(icp))
   1078 				break;
   1079 			icp->icp_ucmd_ccb = ic;
   1080 
   1081 			if (iu->iu_cnt != 0) {
   1082 				memcpy(icp->icp_scr + ICP_SCRATCH_UCMD,
   1083 				    ucmd->data, iu->iu_cnt);
   1084 				bus_dmamap_sync(icp->icp_dmat,
   1085 				    icp->icp_scr_dmamap,
   1086 				    ICP_SCRATCH_UCMD, iu->iu_cnt,
   1087 				    BUS_DMASYNC_PREREAD |
   1088 				    BUS_DMASYNC_PREWRITE);
   1089 			}
   1090 		} else if (__predict_true((ic =
   1091 				SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
   1092 			if ((*icp->icp_test_busy)(icp))
   1093 				break;
   1094 		} else {
   1095 			/* no command found */
   1096 			break;
   1097 		}
   1098 		icp_ccb_submit(icp, ic);
   1099 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
   1100 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
   1101 		else
   1102 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
   1103 	}
   1104 
   1105 	splx(s);
   1106 }
   1107 
   1108 int
   1109 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
   1110 	    int dir)
   1111 {
   1112 	struct icp_sg *sg;
   1113 	int nsegs, i, rv;
   1114 	bus_dmamap_t xfer;
   1115 
   1116 	xfer = ic->ic_xfer_map;
   1117 
   1118 	rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
   1119 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
   1120 	    ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
   1121 	if (rv != 0)
   1122 		return (rv);
   1123 
   1124 	nsegs = xfer->dm_nsegs;
   1125 	ic->ic_xfer_size = size;
   1126 	ic->ic_nsgent = nsegs;
   1127 	ic->ic_flags |= dir;
   1128 	sg = ic->ic_sg;
   1129 
   1130 	if (sg != NULL) {
   1131 		for (i = 0; i < nsegs; i++, sg++) {
   1132 			sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
   1133 			sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
   1134 		}
   1135 	} else if (nsegs > 1)
   1136 		panic("icp_ccb_map: no SG list specified, but nsegs > 1");
   1137 
   1138 	if ((dir & IC_XFER_OUT) != 0)
   1139 		i = BUS_DMASYNC_PREWRITE;
   1140 	else /* if ((dir & IC_XFER_IN) != 0) */
   1141 		i = BUS_DMASYNC_PREREAD;
   1142 
   1143 	bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
   1144 	return (0);
   1145 }
   1146 
   1147 void
   1148 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
   1149 {
   1150 	int i;
   1151 
   1152 	if ((ic->ic_flags & IC_XFER_OUT) != 0)
   1153 		i = BUS_DMASYNC_POSTWRITE;
   1154 	else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
   1155 		i = BUS_DMASYNC_POSTREAD;
   1156 
   1157 	bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
   1158 	bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
   1159 }
   1160 
   1161 int
   1162 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
   1163 {
   1164 	int s, rv;
   1165 
   1166 	s = splbio();
   1167 
   1168 	for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
   1169 		if (!(*icp->icp_test_busy)(icp))
   1170 			break;
   1171 		DELAY(10);
   1172 	}
   1173 	if (timo == 0) {
   1174 		printf("%s: submit: busy\n", icp->icp_dv.dv_xname);
   1175 		return (EAGAIN);
   1176 	}
   1177 
   1178 	icp_ccb_submit(icp, ic);
   1179 
   1180 	if (cold) {
   1181 		for (timo *= 10; timo != 0; timo--) {
   1182 			DELAY(100);
   1183 			icp_intr(icp);
   1184 			if ((ic->ic_flags & IC_COMPLETE) != 0)
   1185 				break;
   1186 		}
   1187 	} else {
   1188 		ic->ic_flags |= IC_WAITING;
   1189 		while ((ic->ic_flags & IC_COMPLETE) == 0) {
   1190 			if ((rv = tsleep(ic, PRIBIO, "icpwccb",
   1191 					 mstohz(timo))) != 0) {
   1192 				timo = 0;
   1193 				break;
   1194 			}
   1195 		}
   1196 	}
   1197 
   1198 	if (timo != 0) {
   1199 		if (ic->ic_status != ICP_S_OK) {
   1200 #ifdef ICP_DEBUG
   1201 			printf("%s: request failed; status=0x%04x\n",
   1202 			    icp->icp_dv.dv_xname, ic->ic_status);
   1203 #endif
   1204 			rv = EIO;
   1205 		} else
   1206 			rv = 0;
   1207 	} else {
   1208 		printf("%s: command timed out\n", icp->icp_dv.dv_xname);
   1209 		rv = EIO;
   1210 	}
   1211 
   1212 	while ((*icp->icp_test_busy)(icp) != 0)
   1213 		DELAY(10);
   1214 
   1215 	splx(s);
   1216 
   1217 	return (rv);
   1218 }
   1219 
   1220 int
   1221 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
   1222 {
   1223 	int s, rv;
   1224 
   1225 	ic->ic_flags |= IC_WAITING;
   1226 
   1227 	s = splbio();
   1228 	icp_ccb_enqueue(icp, ic);
   1229 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
   1230 		if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
   1231 			splx(s);
   1232 			return (rv);
   1233 		}
   1234 	}
   1235 	splx(s);
   1236 
   1237 	if (ic->ic_status != ICP_S_OK) {
   1238 		printf("%s: command failed; status=%x\n", icp->icp_dv.dv_xname,
   1239 		    ic->ic_status);
   1240 		return (EIO);
   1241 	}
   1242 
   1243 	return (0);
   1244 }
   1245 
   1246 int
   1247 icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
   1248 {
   1249 	int s, rv;
   1250 
   1251 	ic->ic_dv = &icp->icp_dv;
   1252 	ic->ic_intr = icp_ucmd_intr;
   1253 	ic->ic_flags |= IC_UCMD;
   1254 
   1255 	s = splbio();
   1256 	icp_ccb_enqueue(icp, ic);
   1257 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
   1258 		if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
   1259 			splx(s);
   1260 			return (rv);
   1261 		}
   1262 	}
   1263 	splx(s);
   1264 
   1265 	return (0);
   1266 }
   1267 
   1268 void
   1269 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
   1270 {
   1271 
   1272 	ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
   1273 
   1274 	(*icp->icp_set_sema0)(icp);
   1275 	DELAY(10);
   1276 
   1277 	ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
   1278 	ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
   1279 
   1280 	icp->icp_running++;
   1281 
   1282 	(*icp->icp_copy_cmd)(icp, ic);
   1283 	(*icp->icp_release_event)(icp, ic);
   1284 }
   1285 
   1286 int
   1287 icp_freeze(struct icp_softc *icp)
   1288 {
   1289 	int s, error = 0;
   1290 
   1291 	s = splbio();
   1292 	if (icp->icp_qfreeze++ == 0) {
   1293 		while (icp->icp_running != 0) {
   1294 			icp->icp_flags |= ICP_F_WAIT_FREEZE;
   1295 			error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
   1296 			    "icpqfrz", 0);
   1297 			if (error != 0 && --icp->icp_qfreeze == 0 &&
   1298 			    ICP_HAS_WORK(icp)) {
   1299 				icp_ccb_enqueue(icp, NULL);
   1300 				break;
   1301 			}
   1302 		}
   1303 	}
   1304 	splx(s);
   1305 
   1306 	return (error);
   1307 }
   1308 
   1309 void
   1310 icp_unfreeze(struct icp_softc *icp)
   1311 {
   1312 	int s;
   1313 
   1314 	s = splbio();
   1315 	KDASSERT(icp->icp_qfreeze != 0);
   1316 	if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
   1317 		icp_ccb_enqueue(icp, NULL);
   1318 	splx(s);
   1319 }
   1320 
   1321 /* XXX Global - should be per-controller? XXX */
   1322 static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
   1323 static int icp_event_oldidx;
   1324 static int icp_event_lastidx;
   1325 
   1326 gdt_evt_str *
   1327 icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
   1328     gdt_evt_data *evt)
   1329 {
   1330 	gdt_evt_str *e;
   1331 
   1332 	/* no source == no event */
   1333 	if (source == 0)
   1334 		return (NULL);
   1335 
   1336 	e = &icp_event_buffer[icp_event_lastidx];
   1337 	if (e->event_source == source && e->event_idx == idx &&
   1338 	    ((evt->size != 0 && e->event_data.size != 0 &&
   1339 	      memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
   1340 	     (evt->size == 0 && e->event_data.size == 0 &&
   1341 	      strcmp((char *) e->event_data.event_string,
   1342 	      	     (char *) evt->event_string) == 0))) {
   1343 		e->last_stamp = time.tv_sec;
   1344 		e->same_count++;
   1345 	} else {
   1346 		if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
   1347 			icp_event_lastidx++;
   1348 			if (icp_event_lastidx == ICP_MAX_EVENTS)
   1349 				icp_event_lastidx = 0;
   1350 			if (icp_event_lastidx == icp_event_oldidx) {
   1351 				icp_event_oldidx++;
   1352 				if (icp_event_oldidx == ICP_MAX_EVENTS)
   1353 					icp_event_oldidx = 0;
   1354 			}
   1355 		}
   1356 		e = &icp_event_buffer[icp_event_lastidx];
   1357 		e->event_source = source;
   1358 		e->event_idx = idx;
   1359 		e->first_stamp = e->last_stamp = time.tv_sec;
   1360 		e->same_count = 1;
   1361 		e->event_data = *evt;
   1362 		e->application = 0;
   1363 	}
   1364 	return (e);
   1365 }
   1366 
   1367 int
   1368 icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
   1369 {
   1370 	gdt_evt_str *e;
   1371 	int eindex, s;
   1372 
   1373 	s = splbio();
   1374 
   1375 	if (handle == -1)
   1376 		eindex = icp_event_oldidx;
   1377 	else
   1378 		eindex = handle;
   1379 
   1380 	estr->event_source = 0;
   1381 
   1382 	if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
   1383 		splx(s);
   1384 		return (eindex);
   1385 	}
   1386 
   1387 	e = &icp_event_buffer[eindex];
   1388 	if (e->event_source != 0) {
   1389 		if (eindex != icp_event_lastidx) {
   1390 			eindex++;
   1391 			if (eindex == ICP_MAX_EVENTS)
   1392 				eindex = 0;
   1393 		} else
   1394 			eindex = -1;
   1395 		memcpy(estr, e, sizeof(gdt_evt_str));
   1396 	}
   1397 
   1398 	splx(s);
   1399 
   1400 	return (eindex);
   1401 }
   1402 
   1403 void
   1404 icp_readapp_event(struct icp_softc *icp, u_int8_t application,
   1405     gdt_evt_str *estr)
   1406 {
   1407 	gdt_evt_str *e;
   1408 	int found = 0, eindex, s;
   1409 
   1410 	s = splbio();
   1411 
   1412 	eindex = icp_event_oldidx;
   1413 	for (;;) {
   1414 		e = &icp_event_buffer[eindex];
   1415 		if (e->event_source == 0)
   1416 			break;
   1417 		if ((e->application & application) == 0) {
   1418 			e->application |= application;
   1419 			found = 1;
   1420 			break;
   1421 		}
   1422 		if (eindex == icp_event_lastidx)
   1423 			break;
   1424 		eindex++;
   1425 		if (eindex == ICP_MAX_EVENTS)
   1426 			eindex = 0;
   1427 	}
   1428 	if (found)
   1429 		memcpy(estr, e, sizeof(gdt_evt_str));
   1430 	else
   1431 		estr->event_source = 0;
   1432 
   1433 	splx(s);
   1434 }
   1435 
   1436 void
   1437 icp_clear_events(struct icp_softc *icp)
   1438 {
   1439 	int s;
   1440 
   1441 	s = splbio();
   1442 	icp_event_oldidx = icp_event_lastidx = 0;
   1443 	memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
   1444 	splx(s);
   1445 }
   1446