Home | History | Annotate | Line # | Download | only in ic
icp.c revision 1.34.4.1
      1 /*	$NetBSD: icp.c,v 1.34.4.1 2021/03/22 02:01:00 thorpej Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1999, 2000 Niklas Hallqvist.  All rights reserved.
     34  *
     35  * Redistribution and use in source and binary forms, with or without
     36  * modification, are permitted provided that the following conditions
     37  * are met:
     38  * 1. Redistributions of source code must retain the above copyright
     39  *    notice, this list of conditions and the following disclaimer.
     40  * 2. Redistributions in binary form must reproduce the above copyright
     41  *    notice, this list of conditions and the following disclaimer in the
     42  *    documentation and/or other materials provided with the distribution.
     43  * 3. All advertising materials mentioning features or use of this software
     44  *    must display the following acknowledgement:
     45  *	This product includes software developed by Niklas Hallqvist.
     46  * 4. The name of the author may not be used to endorse or promote products
     47  *    derived from this software without specific prior written permission.
     48  *
     49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     54   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     59  *
     60  * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
     61  */
     62 
     63 /*
     64  * This driver would not have written if it was not for the hardware donations
     65  * from both ICP-Vortex and ko.neT.  I want to thank them for their support.
     66  *
     67  * Re-worked for NetBSD by Andrew Doran.  Test hardware kindly supplied by
     68  * Intel.
     69  *
     70  * Support for the ICP-Vortex management tools added by
     71  * Jason R. Thorpe of Wasabi Systems, Inc., based on code
     72  * provided by Achim Leubner <achim.leubner (at) intel.com>.
     73  *
     74  * Additional support for dynamic rescan of cacheservice drives by
     75  * Jason R. Thorpe of Wasabi Systems, Inc.
     76  */
     77 
     78 #include <sys/cdefs.h>
     79 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.34.4.1 2021/03/22 02:01:00 thorpej Exp $");
     80 
     81 #include <sys/param.h>
     82 #include <sys/systm.h>
     83 #include <sys/kernel.h>
     84 #include <sys/device.h>
     85 #include <sys/queue.h>
     86 #include <sys/proc.h>
     87 #include <sys/buf.h>
     88 #include <sys/endian.h>
     89 #include <sys/malloc.h>
     90 #include <sys/disk.h>
     91 
     92 #include <sys/bswap.h>
     93 #include <sys/bus.h>
     94 
     95 #include <dev/pci/pcireg.h>
     96 #include <dev/pci/pcivar.h>
     97 #include <dev/pci/pcidevs.h>
     98 
     99 #include <dev/ic/icpreg.h>
    100 #include <dev/ic/icpvar.h>
    101 
    102 #include <dev/scsipi/scsipi_all.h>
    103 #include <dev/scsipi/scsiconf.h>
    104 
    105 #include "locators.h"
    106 
    107 int	icp_async_event(struct icp_softc *, int);
    108 void	icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
    109 void	icp_chain(struct icp_softc *);
    110 int	icp_print(void *, const char *);
    111 void	icp_watchdog(void *);
    112 void	icp_ucmd_intr(struct icp_ccb *);
    113 void	icp_recompute_openings(struct icp_softc *);
    114 
    115 int	icp_count;	/* total # of controllers, for ioctl interface */
    116 
    117 /*
    118  * Statistics for the ioctl interface to query.
    119  *
    120  * XXX Global.  They should probably be made per-controller
    121  * XXX at some point.
    122  */
    123 gdt_statist_t icp_stats;
    124 
    125 int
    126 icp_init(struct icp_softc *icp, const char *intrstr)
    127 {
    128 	struct icp_attach_args icpa;
    129 	struct icp_binfo binfo;
    130 	struct icp_ccb *ic;
    131 	u_int16_t cdev_cnt;
    132 	int i, j, state, feat, nsegs, rv;
    133 	int locs[ICPCF_NLOCS];
    134 
    135 	state = 0;
    136 
    137 	if (intrstr != NULL)
    138 		aprint_normal_dev(icp->icp_dv, "interrupting at %s\n",
    139 		    intrstr);
    140 
    141 	SIMPLEQ_INIT(&icp->icp_ccb_queue);
    142 	SIMPLEQ_INIT(&icp->icp_ccb_freelist);
    143 	SIMPLEQ_INIT(&icp->icp_ucmd_queue);
    144 	callout_init(&icp->icp_wdog_callout, 0);
    145 
    146 	/*
    147 	 * Allocate a scratch area.
    148 	 */
    149 	if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
    150 	    ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    151 	    &icp->icp_scr_dmamap) != 0) {
    152 		aprint_error_dev(icp->icp_dv, "cannot create scratch dmamap\n");
    153 		return (1);
    154 	}
    155 	state++;
    156 
    157 	if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
    158 	    icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
    159 		aprint_error_dev(icp->icp_dv, "cannot alloc scratch dmamem\n");
    160 		goto bail_out;
    161 	}
    162 	state++;
    163 
    164 	if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
    165 	    ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
    166 		aprint_error_dev(icp->icp_dv, "cannot map scratch dmamem\n");
    167 		goto bail_out;
    168 	}
    169 	state++;
    170 
    171 	if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
    172 	    ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
    173 		aprint_error_dev(icp->icp_dv, "cannot load scratch dmamap\n");
    174 		goto bail_out;
    175 	}
    176 	state++;
    177 
    178 	/*
    179 	 * Allocate and initialize the command control blocks.
    180 	 */
    181 	ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_WAITOK | M_ZERO);
    182 	icp->icp_ccbs = ic;
    183 	state++;
    184 
    185 	for (i = 0; i < ICP_NCCBS; i++, ic++) {
    186 		/*
    187 		 * The first two command indexes have special meanings, so
    188 		 * we can't use them.
    189 		 */
    190 		ic->ic_ident = i + 2;
    191 		rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
    192 		    ICP_MAXSG, ICP_MAX_XFER, 0,
    193 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
    194 		    &ic->ic_xfer_map);
    195 		if (rv != 0)
    196 			break;
    197 		icp->icp_nccbs++;
    198 		icp_ccb_free(icp, ic);
    199 	}
    200 #ifdef DIAGNOSTIC
    201 	if (icp->icp_nccbs != ICP_NCCBS)
    202 		aprint_error_dev(icp->icp_dv, "%d/%d CCBs usable\n",
    203 		    icp->icp_nccbs, ICP_NCCBS);
    204 #endif
    205 
    206 	/*
    207 	 * Initialize the controller.
    208 	 */
    209 	if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
    210 		aprint_error_dev(icp->icp_dv, "screen service init error %d\n",
    211 		    icp->icp_status);
    212 		goto bail_out;
    213 	}
    214 
    215 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
    216 		aprint_error_dev(icp->icp_dv, "cache service init error %d\n",
    217 		    icp->icp_status);
    218 		goto bail_out;
    219 	}
    220 
    221 	icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
    222 
    223 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
    224 		aprint_error_dev(icp->icp_dv, "cache service mount error %d\n",
    225 		    icp->icp_status);
    226 		goto bail_out;
    227 	}
    228 
    229 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
    230 		aprint_error_dev(icp->icp_dv, "cache service post-mount init error %d\n",
    231 		    icp->icp_status);
    232 		goto bail_out;
    233 	}
    234 	cdev_cnt = (u_int16_t)icp->icp_info;
    235 	icp->icp_fw_vers = icp->icp_service;
    236 
    237 	if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
    238 		aprint_error_dev(icp->icp_dv, "raw service init error %d\n",
    239 		    icp->icp_status);
    240 		goto bail_out;
    241 	}
    242 
    243 	/*
    244 	 * Set/get raw service features (scatter/gather).
    245 	 */
    246 	feat = 0;
    247 	if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
    248 	    0, 0))
    249 		if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
    250 			feat = icp->icp_info;
    251 
    252 	if ((feat & ICP_SCATTER_GATHER) == 0) {
    253 #ifdef DIAGNOSTIC
    254 		aprint_normal_dev(icp->icp_dv,
    255 		    "scatter/gather not supported (raw service)\n");
    256 #endif
    257 	} else
    258 		icp->icp_features |= ICP_FEAT_RAWSERVICE;
    259 
    260 	/*
    261 	 * Set/get cache service features (scatter/gather).
    262 	 */
    263 	feat = 0;
    264 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
    265 	    ICP_SCATTER_GATHER, 0))
    266 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
    267 			feat = icp->icp_info;
    268 
    269 	if ((feat & ICP_SCATTER_GATHER) == 0) {
    270 #ifdef DIAGNOSTIC
    271 		aprint_normal_dev(icp->icp_dv,
    272 		    "scatter/gather not supported (cache service)\n");
    273 #endif
    274 	} else
    275 		icp->icp_features |= ICP_FEAT_CACHESERVICE;
    276 
    277 	/*
    278 	 * Pull some information from the board and dump.
    279 	 */
    280 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
    281 	    ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
    282 		aprint_error_dev(icp->icp_dv, "unable to retrive board info\n");
    283 		goto bail_out;
    284 	}
    285 	memcpy(&binfo, icp->icp_scr, sizeof(binfo));
    286 
    287 	aprint_normal_dev(icp->icp_dv,
    288 	    "model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
    289 	    binfo.bi_type_string, binfo.bi_raid_string,
    290 	    binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
    291 
    292 	/*
    293 	 * Determine the number of devices, and number of openings per
    294 	 * device.
    295 	 */
    296 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
    297 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
    298 			if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
    299 			    0))
    300 				continue;
    301 
    302 			icp->icp_cdr[j].cd_size = icp->icp_info;
    303 			if (icp->icp_cdr[j].cd_size != 0)
    304 				icp->icp_ndevs++;
    305 
    306 			if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
    307 			    0))
    308 				icp->icp_cdr[j].cd_type = icp->icp_info;
    309 		}
    310 	}
    311 
    312 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
    313 		icp->icp_nchan = binfo.bi_chan_count;
    314 		icp->icp_ndevs += icp->icp_nchan;
    315 	}
    316 
    317 	icp_recompute_openings(icp);
    318 
    319 	/*
    320 	 * Attach SCSI channels.
    321 	 */
    322 	if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
    323 		struct icp_ioc_version *iv;
    324 		struct icp_rawioc *ri;
    325 		struct icp_getch *gc;
    326 
    327 		iv = (struct icp_ioc_version *)icp->icp_scr;
    328 		iv->iv_version = htole32(ICP_IOC_NEWEST);
    329 		iv->iv_listents = ICP_MAXBUS;
    330 		iv->iv_firstchan = 0;
    331 		iv->iv_lastchan = ICP_MAXBUS - 1;
    332 		iv->iv_listoffset = htole32(sizeof(*iv));
    333 
    334 		if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
    335 		    ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
    336 		    sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
    337 			ri = (struct icp_rawioc *)(iv + 1);
    338 			for (j = 0; j < binfo.bi_chan_count; j++, ri++)
    339 				icp->icp_bus_id[j] = ri->ri_procid;
    340 		} else {
    341 			/*
    342 			 * Fall back to the old method.
    343 			 */
    344 			gc = (struct icp_getch *)icp->icp_scr;
    345 
    346 			for (j = 0; j < binfo.bi_chan_count; j++) {
    347 				if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
    348 				    ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
    349 				    ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
    350 				    sizeof(*gc))) {
    351 				    	aprint_error_dev(icp->icp_dv,
    352 					    "unable to get chan info");
    353 					goto bail_out;
    354 				}
    355 				icp->icp_bus_id[j] = gc->gc_scsiid;
    356 			}
    357 		}
    358 
    359 		for (j = 0; j < binfo.bi_chan_count; j++) {
    360 			if (icp->icp_bus_id[j] > ICP_MAXID_FC)
    361 				icp->icp_bus_id[j] = ICP_MAXID_FC;
    362 
    363 			icpa.icpa_unit = j + ICPA_UNIT_SCSI;
    364 
    365 			locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI;
    366 
    367 			icp->icp_children[icpa.icpa_unit] =
    368 			    config_found(icp->icp_dv, &icpa, icp_print,
    369 					 CFARG_SUBMATCH, config_stdsubmatch,
    370 					 CFARG_IATTR, "icp",
    371 					 CFARG_LOCATORS, locs,
    372 					 CFARG_EOL);
    373 		}
    374 	}
    375 
    376 	/*
    377 	 * Attach cache devices.
    378 	 */
    379 	if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
    380 		for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
    381 			if (icp->icp_cdr[j].cd_size == 0)
    382 				continue;
    383 
    384 			icpa.icpa_unit = j;
    385 
    386 			locs[ICPCF_UNIT] = j;
    387 
    388 			icp->icp_children[icpa.icpa_unit] =
    389 			    config_found(icp->icp_dv, &icpa, icp_print,
    390 					 CFARG_SUBMATCH, config_stdsubmatch,
    391 					 CFARG_IATTR, "icp",
    392 					 CFARG_LOCATORS, locs,
    393 					 CFARG_EOL);
    394 		}
    395 	}
    396 
    397 	/*
    398 	 * Start the watchdog.
    399 	 */
    400 	icp_watchdog(icp);
    401 
    402 	/*
    403 	 * Count the controller, and we're done!
    404 	 */
    405 	if (icp_count++ == 0)
    406 		mutex_init(&icp_ioctl_mutex, MUTEX_DEFAULT, IPL_NONE);
    407 
    408 	return (0);
    409 
    410  bail_out:
    411 	if (state > 4)
    412 		for (j = 0; j < i; j++)
    413 			bus_dmamap_destroy(icp->icp_dmat,
    414 			    icp->icp_ccbs[j].ic_xfer_map);
    415  	if (state > 3)
    416 		free(icp->icp_ccbs, M_DEVBUF);
    417 	if (state > 2)
    418 		bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
    419 	if (state > 1)
    420 		bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
    421 		    ICP_SCRATCH_SIZE);
    422 	if (state > 0)
    423 		bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
    424 	bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
    425 
    426 	return (1);
    427 }
    428 
    429 void
    430 icp_register_servicecb(struct icp_softc *icp, int unit,
    431     const struct icp_servicecb *cb)
    432 {
    433 
    434 	icp->icp_servicecb[unit] = cb;
    435 }
    436 
    437 void
    438 icp_rescan(struct icp_softc *icp, int unit)
    439 {
    440 	struct icp_attach_args icpa;
    441 	u_int newsize, newtype;
    442 	int locs[ICPCF_NLOCS];
    443 
    444 	/*
    445 	 * NOTE: It is very important that the queue be frozen and not
    446 	 * commands running when this is called.  The ioctl mutex must
    447 	 * also be held.
    448 	 */
    449 
    450 	KASSERT(icp->icp_qfreeze != 0);
    451 	KASSERT(icp->icp_running == 0);
    452 	KASSERT(unit < ICP_MAX_HDRIVES);
    453 
    454 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
    455 #ifdef ICP_DEBUG
    456 		printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
    457 		    device_xname(icp->icp_dv), unit, icp->icp_status);
    458 #endif
    459 		goto gone;
    460 	}
    461 	if ((newsize = icp->icp_info) == 0) {
    462 #ifdef ICP_DEBUG
    463 		printf("%s: rescan: unit %d has zero size\n",
    464 		    device_xname(icp->icp_dv), unit);
    465 #endif
    466  gone:
    467 		/*
    468 		 * Host drive is no longer present; detach if a child
    469 		 * is currently there.
    470 		 */
    471 		if (icp->icp_cdr[unit].cd_size != 0)
    472 			icp->icp_ndevs--;
    473 		icp->icp_cdr[unit].cd_size = 0;
    474 		if (icp->icp_children[unit] != NULL) {
    475 			(void) config_detach(icp->icp_children[unit],
    476 			    DETACH_FORCE);
    477 			icp->icp_children[unit] = NULL;
    478 		}
    479 		return;
    480 	}
    481 
    482 	if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
    483 		newtype = icp->icp_info;
    484 	else {
    485 #ifdef ICP_DEBUG
    486 		printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
    487 		    device_xname(icp->icp_dv), unit);
    488 #endif
    489 		newtype = 0;	/* XXX? */
    490 	}
    491 
    492 #ifdef ICP_DEBUG
    493 	printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
    494 	    device_xname(icp->icp_dv), unit, icp->icp_cdr[unit].cd_size,
    495 	    icp->icp_cdr[unit].cd_type, newsize, newtype);
    496 #endif
    497 
    498 	/*
    499 	 * If the type or size changed, detach any old child (if it exists)
    500 	 * and attach a new one.
    501 	 */
    502 	if (icp->icp_children[unit] == NULL ||
    503 	    newsize != icp->icp_cdr[unit].cd_size ||
    504 	    newtype != icp->icp_cdr[unit].cd_type) {
    505 		if (icp->icp_cdr[unit].cd_size == 0)
    506 			icp->icp_ndevs++;
    507 		icp->icp_cdr[unit].cd_size = newsize;
    508 		icp->icp_cdr[unit].cd_type = newtype;
    509 		if (icp->icp_children[unit] != NULL)
    510 			(void) config_detach(icp->icp_children[unit],
    511 			    DETACH_FORCE);
    512 
    513 		icpa.icpa_unit = unit;
    514 
    515 		locs[ICPCF_UNIT] = unit;
    516 
    517 		icp->icp_children[unit] =
    518 		    config_found(icp->icp_dv, &icpa, icp_print,
    519 				 CFARG_SUBMATCH, config_stdsubmatch,
    520 				 CFARG_IATTR, "icp",
    521 				 CFARG_LOCATORS, locs,
    522 				 CFARG_EOL);
    523 	}
    524 
    525 	icp_recompute_openings(icp);
    526 }
    527 
    528 void
    529 icp_rescan_all(struct icp_softc *icp)
    530 {
    531 	int unit;
    532 	u_int16_t cdev_cnt;
    533 
    534 	/*
    535 	 * This is the old method of rescanning the host drives.  We
    536 	 * start by reinitializing the cache service.
    537 	 */
    538 	if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
    539 		printf("%s: unable to re-initialize cache service for rescan\n",
    540 		    device_xname(icp->icp_dv));
    541 		return;
    542 	}
    543 	cdev_cnt = (u_int16_t) icp->icp_info;
    544 
    545 	/* For each host drive, do the new-style rescan. */
    546 	for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++)
    547 		icp_rescan(icp, unit);
    548 
    549 	/* Now detach anything in the slots after cdev_cnt. */
    550 	for (; unit < ICP_MAX_HDRIVES; unit++) {
    551 		if (icp->icp_cdr[unit].cd_size != 0) {
    552 #ifdef ICP_DEBUG
    553 			printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
    554 			    device_xname(icp->icp_dv), unit, cdev_cnt);
    555 #endif
    556 			icp->icp_ndevs--;
    557 			icp->icp_cdr[unit].cd_size = 0;
    558 			if (icp->icp_children[unit] != NULL) {
    559 				(void) config_detach(icp->icp_children[unit],
    560 				    DETACH_FORCE);
    561 				icp->icp_children[unit] = NULL;
    562 			}
    563 		}
    564 	}
    565 
    566 	icp_recompute_openings(icp);
    567 }
    568 
    569 void
    570 icp_recompute_openings(struct icp_softc *icp)
    571 {
    572 	int unit, openings;
    573 
    574 	if (icp->icp_ndevs != 0)
    575 		openings =
    576 		    (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
    577 	else
    578 		openings = 0;
    579 	if (openings == icp->icp_openings)
    580 		return;
    581 	icp->icp_openings = openings;
    582 
    583 #ifdef ICP_DEBUG
    584 	printf("%s: %d device%s, %d openings per device\n",
    585 	    device_xname(icp->icp_dv), icp->icp_ndevs,
    586 	    icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
    587 #endif
    588 
    589 	for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) {
    590 		if (icp->icp_children[unit] != NULL)
    591 			(*icp->icp_servicecb[unit]->iscb_openings)(
    592 			    icp->icp_children[unit], icp->icp_openings);
    593 	}
    594 }
    595 
    596 void
    597 icp_watchdog(void *cookie)
    598 {
    599 	struct icp_softc *icp;
    600 	int s;
    601 
    602 	icp = cookie;
    603 
    604 	s = splbio();
    605 	icp_intr(icp);
    606 	if (ICP_HAS_WORK(icp))
    607 		icp_ccb_enqueue(icp, NULL);
    608 	splx(s);
    609 
    610 	callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
    611 	    icp_watchdog, icp);
    612 }
    613 
    614 int
    615 icp_print(void *aux, const char *pnp)
    616 {
    617 	struct icp_attach_args *icpa;
    618 	const char *str;
    619 
    620 	icpa = (struct icp_attach_args *)aux;
    621 
    622 	if (pnp != NULL) {
    623 		if (icpa->icpa_unit < ICPA_UNIT_SCSI)
    624 			str = "block device";
    625 		else
    626 			str = "SCSI channel";
    627 		aprint_normal("%s at %s", str, pnp);
    628 	}
    629 	aprint_normal(" unit %d", icpa->icpa_unit);
    630 
    631 	return (UNCONF);
    632 }
    633 
    634 int
    635 icp_async_event(struct icp_softc *icp, int service)
    636 {
    637 
    638 	if (service == ICP_SCREENSERVICE) {
    639 		if (icp->icp_status == ICP_S_MSG_REQUEST) {
    640 			/* XXX */
    641 		}
    642 	} else {
    643 		if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
    644 			icp->icp_evt.size = 0;
    645 			icp->icp_evt.eu.async.ionode =
    646 			    device_unit(icp->icp_dv);
    647 			icp->icp_evt.eu.async.status = icp->icp_status;
    648 			/*
    649 			 * Severity and event string are filled in by the
    650 			 * hardware interface interrupt handler.
    651 			 */
    652 			printf("%s: %s\n", device_xname(icp->icp_dv),
    653 			    icp->icp_evt.event_string);
    654 		} else {
    655 			icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
    656 			icp->icp_evt.eu.async.ionode =
    657 			    device_unit(icp->icp_dv);
    658 			icp->icp_evt.eu.async.service = service;
    659 			icp->icp_evt.eu.async.status = icp->icp_status;
    660 			icp->icp_evt.eu.async.info = icp->icp_info;
    661 			/* XXXJRT FIX THIS */
    662 			*(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
    663 			    icp->icp_info2;
    664 		}
    665 		icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
    666 	}
    667 
    668 	return (0);
    669 }
    670 
    671 int
    672 icp_intr(void *cookie)
    673 {
    674 	struct icp_softc *icp;
    675 	struct icp_intr_ctx ctx;
    676 	struct icp_ccb *ic;
    677 
    678 	icp = cookie;
    679 
    680 	ctx.istatus = (*icp->icp_get_status)(icp);
    681 	if (!ctx.istatus) {
    682 		icp->icp_status = ICP_S_NO_STATUS;
    683 		return (0);
    684 	}
    685 
    686 	(*icp->icp_intr)(icp, &ctx);
    687 
    688 	icp->icp_status = ctx.cmd_status;
    689 	icp->icp_service = ctx.service;
    690 	icp->icp_info = ctx.info;
    691 	icp->icp_info2 = ctx.info2;
    692 
    693 	switch (ctx.istatus) {
    694 	case ICP_ASYNCINDEX:
    695 		icp_async_event(icp, ctx.service);
    696 		return (1);
    697 
    698 	case ICP_SPEZINDEX:
    699 		aprint_error_dev(icp->icp_dv, "uninitialized or unknown service (%d/%d)\n",
    700 		    ctx.info, ctx.info2);
    701 		icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
    702 		icp->icp_evt.eu.driver.ionode = device_unit(icp->icp_dv);
    703 		icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
    704 		return (1);
    705 	}
    706 
    707 	if ((ctx.istatus - 2) > icp->icp_nccbs)
    708 		panic("icp_intr: bad command index returned");
    709 
    710 	ic = &icp->icp_ccbs[ctx.istatus - 2];
    711 	ic->ic_status = icp->icp_status;
    712 
    713 	if ((ic->ic_flags & IC_ALLOCED) == 0) {
    714 		/* XXX ICP's "iir" driver just sends an event here. */
    715 		panic("icp_intr: inactive CCB identified");
    716 	}
    717 
    718 	/*
    719 	 * Try to protect ourselves from the running command count already
    720 	 * being 0 (e.g. if a polled command times out).
    721 	 */
    722 	KDASSERT(icp->icp_running != 0);
    723 	if (--icp->icp_running == 0 &&
    724 	    (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
    725 		icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
    726 		wakeup(&icp->icp_qfreeze);
    727 	}
    728 
    729 	switch (icp->icp_status) {
    730 	case ICP_S_BSY:
    731 #ifdef ICP_DEBUG
    732 		printf("%s: ICP_S_BSY received\n", device_xname(icp->icp_dv));
    733 #endif
    734 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
    735 			SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
    736 		else
    737 			SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
    738 		break;
    739 
    740 	default:
    741 		ic->ic_flags |= IC_COMPLETE;
    742 
    743 		if ((ic->ic_flags & IC_WAITING) != 0)
    744 			wakeup(ic);
    745 		else if (ic->ic_intr != NULL)
    746 			(*ic->ic_intr)(ic);
    747 
    748 		if (ICP_HAS_WORK(icp))
    749 			icp_ccb_enqueue(icp, NULL);
    750 
    751 		break;
    752 	}
    753 
    754 	return (1);
    755 }
    756 
    757 struct icp_ucmd_ctx {
    758 	gdt_ucmd_t *iu_ucmd;
    759 	u_int32_t iu_cnt;
    760 };
    761 
    762 void
    763 icp_ucmd_intr(struct icp_ccb *ic)
    764 {
    765 	struct icp_softc *icp = device_private(ic->ic_dv);
    766 	struct icp_ucmd_ctx *iu = ic->ic_context;
    767 	gdt_ucmd_t *ucmd = iu->iu_ucmd;
    768 
    769 	ucmd->status = icp->icp_status;
    770 	ucmd->info = icp->icp_info;
    771 
    772 	if (iu->iu_cnt != 0) {
    773 		bus_dmamap_sync(icp->icp_dmat,
    774 		    icp->icp_scr_dmamap,
    775 		    ICP_SCRATCH_UCMD, iu->iu_cnt,
    776 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    777 		memcpy(ucmd->data,
    778 		    (char *)icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
    779 	}
    780 
    781 	icp->icp_ucmd_ccb = NULL;
    782 
    783 	ic->ic_flags |= IC_COMPLETE;
    784 	wakeup(ic);
    785 }
    786 
    787 /*
    788  * NOTE: We assume that it is safe to sleep here!
    789  */
    790 int
    791 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
    792 	u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
    793 {
    794 	struct icp_ioctlcmd *icmd;
    795 	struct icp_cachecmd *cc;
    796 	struct icp_rawcmd *rc;
    797 	int retries, rv;
    798 	struct icp_ccb *ic;
    799 
    800 	retries = ICP_RETRIES;
    801 
    802 	do {
    803 		ic = icp_ccb_alloc_wait(icp);
    804 		memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
    805 		ic->ic_cmd.cmd_opcode = htole16(opcode);
    806 
    807 		switch (service) {
    808 		case ICP_CACHESERVICE:
    809 			if (opcode == ICP_IOCTL) {
    810 				icmd = &ic->ic_cmd.cmd_packet.ic;
    811 				icmd->ic_subfunc = htole16(arg1);
    812 				icmd->ic_channel = htole32(arg2);
    813 				icmd->ic_bufsize = htole32(arg3);
    814 				icmd->ic_addr =
    815 				    htole32(icp->icp_scr_seg[0].ds_addr);
    816 
    817 				bus_dmamap_sync(icp->icp_dmat,
    818 				    icp->icp_scr_dmamap, 0, arg3,
    819 				    BUS_DMASYNC_PREWRITE |
    820 				    BUS_DMASYNC_PREREAD);
    821 			} else {
    822 				cc = &ic->ic_cmd.cmd_packet.cc;
    823 				cc->cc_deviceno = htole16(arg1);
    824 				cc->cc_blockno = htole32(arg2);
    825 			}
    826 			break;
    827 
    828 		case ICP_SCSIRAWSERVICE:
    829 			rc = &ic->ic_cmd.cmd_packet.rc;
    830 			rc->rc_direction = htole32(arg1);
    831 			rc->rc_bus = arg2;
    832 			rc->rc_target = arg3;
    833 			rc->rc_lun = arg3 >> 8;
    834 			break;
    835 		}
    836 
    837 		ic->ic_service = service;
    838 		ic->ic_cmdlen = sizeof(ic->ic_cmd);
    839 		rv = icp_ccb_poll(icp, ic, 10000);
    840 
    841 		switch (service) {
    842 		case ICP_CACHESERVICE:
    843 			if (opcode == ICP_IOCTL) {
    844 				bus_dmamap_sync(icp->icp_dmat,
    845 				    icp->icp_scr_dmamap, 0, arg3,
    846 				    BUS_DMASYNC_POSTWRITE |
    847 				    BUS_DMASYNC_POSTREAD);
    848 			}
    849 			break;
    850 		}
    851 
    852 		icp_ccb_free(icp, ic);
    853 	} while (rv != 0 && --retries > 0);
    854 
    855 	return (icp->icp_status == ICP_S_OK);
    856 }
    857 
    858 int
    859 icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
    860 {
    861 	struct icp_ccb *ic;
    862 	struct icp_ucmd_ctx iu;
    863 	u_int32_t cnt;
    864 	int error;
    865 
    866 	if (ucmd->service == ICP_CACHESERVICE) {
    867 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
    868 			cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
    869 			if (cnt > GDT_SCRATCH_SZ) {
    870 				aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
    871 				    GDT_SCRATCH_SZ, cnt);
    872 				return (EINVAL);
    873 			}
    874 		} else {
    875 			cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
    876 			    ICP_SECTOR_SIZE;
    877 			if (cnt > GDT_SCRATCH_SZ) {
    878 				aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
    879 				    GDT_SCRATCH_SZ, cnt);
    880 				return (EINVAL);
    881 			}
    882 		}
    883 	} else {
    884 		cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
    885 		    ucmd->command.cmd_packet.rc.rc_sense_len;
    886 		if (cnt > GDT_SCRATCH_SZ) {
    887 			aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
    888 			    GDT_SCRATCH_SZ, cnt);
    889 			return (EINVAL);
    890 		}
    891 	}
    892 
    893 	iu.iu_ucmd = ucmd;
    894 	iu.iu_cnt = cnt;
    895 
    896 	ic = icp_ccb_alloc_wait(icp);
    897 	memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
    898 	ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
    899 
    900 	if (ucmd->service == ICP_CACHESERVICE) {
    901 		if (ucmd->command.cmd_opcode == ICP_IOCTL) {
    902 			struct icp_ioctlcmd *icmd, *uicmd;
    903 
    904 			icmd = &ic->ic_cmd.cmd_packet.ic;
    905 			uicmd = &ucmd->command.cmd_packet.ic;
    906 
    907 			icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
    908 			icmd->ic_channel = htole32(uicmd->ic_channel);
    909 			icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
    910 			icmd->ic_addr =
    911 			    htole32(icp->icp_scr_seg[0].ds_addr +
    912 				    ICP_SCRATCH_UCMD);
    913 		} else {
    914 			struct icp_cachecmd *cc, *ucc;
    915 
    916 			cc = &ic->ic_cmd.cmd_packet.cc;
    917 			ucc = &ucmd->command.cmd_packet.cc;
    918 
    919 			cc->cc_deviceno = htole16(ucc->cc_deviceno);
    920 			cc->cc_blockno = htole32(ucc->cc_blockno);
    921 			cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
    922 			cc->cc_addr = htole32(0xffffffffU);
    923 			cc->cc_nsgent = htole32(1);
    924 			cc->cc_sg[0].sg_addr =
    925 			    htole32(icp->icp_scr_seg[0].ds_addr +
    926 				    ICP_SCRATCH_UCMD);
    927 			cc->cc_sg[0].sg_len = htole32(cnt);
    928 		}
    929 	} else {
    930 		struct icp_rawcmd *rc, *urc;
    931 
    932 		rc = &ic->ic_cmd.cmd_packet.rc;
    933 		urc = &ucmd->command.cmd_packet.rc;
    934 
    935 		rc->rc_direction = htole32(urc->rc_direction);
    936 		rc->rc_sdata = htole32(0xffffffffU);
    937 		rc->rc_sdlen = htole32(urc->rc_sdlen);
    938 		rc->rc_clen = htole32(urc->rc_clen);
    939 		memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
    940 		rc->rc_target = urc->rc_target;
    941 		rc->rc_lun = urc->rc_lun;
    942 		rc->rc_bus = urc->rc_bus;
    943 		rc->rc_sense_len = htole32(urc->rc_sense_len);
    944 		rc->rc_sense_addr =
    945 		    htole32(icp->icp_scr_seg[0].ds_addr +
    946 			    ICP_SCRATCH_UCMD + urc->rc_sdlen);
    947 		rc->rc_nsgent = htole32(1);
    948 		rc->rc_sg[0].sg_addr =
    949 		    htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
    950 		rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
    951 	}
    952 
    953 	ic->ic_service = ucmd->service;
    954 	ic->ic_cmdlen = sizeof(ic->ic_cmd);
    955 	ic->ic_context = &iu;
    956 
    957 	/*
    958 	 * XXX What units are ucmd->timeout in?  Until we know, we
    959 	 * XXX just pull a number out of thin air.
    960 	 */
    961 	if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
    962 		aprint_error_dev(icp->icp_dv, "error %d waiting for ucmd to complete\n",
    963 		    error);
    964 
    965 	/* icp_ucmd_intr() has updated ucmd. */
    966 	icp_ccb_free(icp, ic);
    967 
    968 	return (error);
    969 }
    970 
    971 struct icp_ccb *
    972 icp_ccb_alloc(struct icp_softc *icp)
    973 {
    974 	struct icp_ccb *ic;
    975 	int s;
    976 
    977 	s = splbio();
    978 	if (__predict_false((ic =
    979 			     SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
    980 		splx(s);
    981 		return (NULL);
    982 	}
    983 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
    984 	splx(s);
    985 
    986 	ic->ic_flags = IC_ALLOCED;
    987 	return (ic);
    988 }
    989 
    990 struct icp_ccb *
    991 icp_ccb_alloc_wait(struct icp_softc *icp)
    992 {
    993 	struct icp_ccb *ic;
    994 	int s;
    995 
    996 	s = splbio();
    997 	while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
    998 		icp->icp_flags |= ICP_F_WAIT_CCB;
    999 		(void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
   1000 	}
   1001 	SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
   1002 	splx(s);
   1003 
   1004 	ic->ic_flags = IC_ALLOCED;
   1005 	return (ic);
   1006 }
   1007 
   1008 void
   1009 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
   1010 {
   1011 	int s;
   1012 
   1013 	s = splbio();
   1014 	ic->ic_flags = 0;
   1015 	ic->ic_intr = NULL;
   1016 	SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
   1017 	if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
   1018 		icp->icp_flags &= ~ICP_F_WAIT_CCB;
   1019 		wakeup(&icp->icp_ccb_freelist);
   1020 	}
   1021 	splx(s);
   1022 }
   1023 
   1024 void
   1025 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
   1026 {
   1027 	int s;
   1028 
   1029 	s = splbio();
   1030 
   1031 	if (ic != NULL) {
   1032 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
   1033 			SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
   1034 		else
   1035 			SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
   1036 	}
   1037 
   1038 	for (; icp->icp_qfreeze == 0;) {
   1039 		if (__predict_false((ic =
   1040 			    SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
   1041 			struct icp_ucmd_ctx *iu = ic->ic_context;
   1042 			gdt_ucmd_t *ucmd = iu->iu_ucmd;
   1043 
   1044 			/*
   1045 			 * All user-generated commands share the same
   1046 			 * scratch space, so if one is already running,
   1047 			 * we have to stall the command queue.
   1048 			 */
   1049 			if (icp->icp_ucmd_ccb != NULL)
   1050 				break;
   1051 			if ((*icp->icp_test_busy)(icp))
   1052 				break;
   1053 			icp->icp_ucmd_ccb = ic;
   1054 
   1055 			if (iu->iu_cnt != 0) {
   1056 				memcpy((char *)icp->icp_scr + ICP_SCRATCH_UCMD,
   1057 				    ucmd->data, iu->iu_cnt);
   1058 				bus_dmamap_sync(icp->icp_dmat,
   1059 				    icp->icp_scr_dmamap,
   1060 				    ICP_SCRATCH_UCMD, iu->iu_cnt,
   1061 				    BUS_DMASYNC_PREREAD |
   1062 				    BUS_DMASYNC_PREWRITE);
   1063 			}
   1064 		} else if (__predict_true((ic =
   1065 				SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
   1066 			if ((*icp->icp_test_busy)(icp))
   1067 				break;
   1068 		} else {
   1069 			/* no command found */
   1070 			break;
   1071 		}
   1072 		icp_ccb_submit(icp, ic);
   1073 		if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
   1074 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
   1075 		else
   1076 			SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
   1077 	}
   1078 
   1079 	splx(s);
   1080 }
   1081 
   1082 int
   1083 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
   1084 	    int dir)
   1085 {
   1086 	struct icp_sg *sg;
   1087 	int nsegs, i, rv;
   1088 	bus_dmamap_t xfer;
   1089 
   1090 	xfer = ic->ic_xfer_map;
   1091 
   1092 	rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
   1093 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
   1094 	    ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
   1095 	if (rv != 0)
   1096 		return (rv);
   1097 
   1098 	nsegs = xfer->dm_nsegs;
   1099 	ic->ic_xfer_size = size;
   1100 	ic->ic_nsgent = nsegs;
   1101 	ic->ic_flags |= dir;
   1102 	sg = ic->ic_sg;
   1103 
   1104 	if (sg != NULL) {
   1105 		for (i = 0; i < nsegs; i++, sg++) {
   1106 			sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
   1107 			sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
   1108 		}
   1109 	} else if (nsegs > 1)
   1110 		panic("icp_ccb_map: no SG list specified, but nsegs > 1");
   1111 
   1112 	if ((dir & IC_XFER_OUT) != 0)
   1113 		i = BUS_DMASYNC_PREWRITE;
   1114 	else /* if ((dir & IC_XFER_IN) != 0) */
   1115 		i = BUS_DMASYNC_PREREAD;
   1116 
   1117 	bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
   1118 	return (0);
   1119 }
   1120 
   1121 void
   1122 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
   1123 {
   1124 	int i;
   1125 
   1126 	if ((ic->ic_flags & IC_XFER_OUT) != 0)
   1127 		i = BUS_DMASYNC_POSTWRITE;
   1128 	else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
   1129 		i = BUS_DMASYNC_POSTREAD;
   1130 
   1131 	bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
   1132 	bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
   1133 }
   1134 
   1135 int
   1136 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
   1137 {
   1138 	int s, rv;
   1139 
   1140 	s = splbio();
   1141 
   1142 	for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
   1143 		if (!(*icp->icp_test_busy)(icp))
   1144 			break;
   1145 		DELAY(10);
   1146 	}
   1147 	if (timo == 0) {
   1148 		printf("%s: submit: busy\n", device_xname(icp->icp_dv));
   1149 		return (EAGAIN);
   1150 	}
   1151 
   1152 	icp_ccb_submit(icp, ic);
   1153 
   1154 	if (cold) {
   1155 		for (timo *= 10; timo != 0; timo--) {
   1156 			DELAY(100);
   1157 			icp_intr(icp);
   1158 			if ((ic->ic_flags & IC_COMPLETE) != 0)
   1159 				break;
   1160 		}
   1161 	} else {
   1162 		ic->ic_flags |= IC_WAITING;
   1163 		while ((ic->ic_flags & IC_COMPLETE) == 0) {
   1164 			if ((rv = tsleep(ic, PRIBIO, "icpwccb",
   1165 					 mstohz(timo))) != 0) {
   1166 				timo = 0;
   1167 				break;
   1168 			}
   1169 		}
   1170 	}
   1171 
   1172 	if (timo != 0) {
   1173 		if (ic->ic_status != ICP_S_OK) {
   1174 #ifdef ICP_DEBUG
   1175 			printf("%s: request failed; status=0x%04x\n",
   1176 			    device_xname(icp->icp_dv), ic->ic_status);
   1177 #endif
   1178 			rv = EIO;
   1179 		} else
   1180 			rv = 0;
   1181 	} else {
   1182 		aprint_error_dev(icp->icp_dv, "command timed out\n");
   1183 		rv = EIO;
   1184 	}
   1185 
   1186 	while ((*icp->icp_test_busy)(icp) != 0)
   1187 		DELAY(10);
   1188 
   1189 	splx(s);
   1190 
   1191 	return (rv);
   1192 }
   1193 
   1194 int
   1195 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
   1196 {
   1197 	int s, rv;
   1198 
   1199 	ic->ic_flags |= IC_WAITING;
   1200 
   1201 	s = splbio();
   1202 	icp_ccb_enqueue(icp, ic);
   1203 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
   1204 		if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
   1205 			splx(s);
   1206 			return (rv);
   1207 		}
   1208 	}
   1209 	splx(s);
   1210 
   1211 	if (ic->ic_status != ICP_S_OK) {
   1212 		aprint_error_dev(icp->icp_dv, "command failed; status=%x\n",
   1213 		    ic->ic_status);
   1214 		return (EIO);
   1215 	}
   1216 
   1217 	return (0);
   1218 }
   1219 
   1220 int
   1221 icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
   1222 {
   1223 	int s, rv;
   1224 
   1225 	ic->ic_dv = icp->icp_dv;
   1226 	ic->ic_intr = icp_ucmd_intr;
   1227 	ic->ic_flags |= IC_UCMD;
   1228 
   1229 	s = splbio();
   1230 	icp_ccb_enqueue(icp, ic);
   1231 	while ((ic->ic_flags & IC_COMPLETE) == 0) {
   1232 		if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
   1233 			splx(s);
   1234 			return (rv);
   1235 		}
   1236 	}
   1237 	splx(s);
   1238 
   1239 	return (0);
   1240 }
   1241 
   1242 void
   1243 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
   1244 {
   1245 
   1246 	ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
   1247 
   1248 	(*icp->icp_set_sema0)(icp);
   1249 	DELAY(10);
   1250 
   1251 	ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
   1252 	ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
   1253 
   1254 	icp->icp_running++;
   1255 
   1256 	(*icp->icp_copy_cmd)(icp, ic);
   1257 	(*icp->icp_release_event)(icp, ic);
   1258 }
   1259 
   1260 int
   1261 icp_freeze(struct icp_softc *icp)
   1262 {
   1263 	int s, error = 0;
   1264 
   1265 	s = splbio();
   1266 	if (icp->icp_qfreeze++ == 0) {
   1267 		while (icp->icp_running != 0) {
   1268 			icp->icp_flags |= ICP_F_WAIT_FREEZE;
   1269 			error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
   1270 			    "icpqfrz", 0);
   1271 			if (error != 0 && --icp->icp_qfreeze == 0 &&
   1272 			    ICP_HAS_WORK(icp)) {
   1273 				icp_ccb_enqueue(icp, NULL);
   1274 				break;
   1275 			}
   1276 		}
   1277 	}
   1278 	splx(s);
   1279 
   1280 	return (error);
   1281 }
   1282 
   1283 void
   1284 icp_unfreeze(struct icp_softc *icp)
   1285 {
   1286 	int s;
   1287 
   1288 	s = splbio();
   1289 	KDASSERT(icp->icp_qfreeze != 0);
   1290 	if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
   1291 		icp_ccb_enqueue(icp, NULL);
   1292 	splx(s);
   1293 }
   1294 
   1295 /* XXX Global - should be per-controller? XXX */
   1296 static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
   1297 static int icp_event_oldidx;
   1298 static int icp_event_lastidx;
   1299 
   1300 gdt_evt_str *
   1301 icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
   1302     gdt_evt_data *evt)
   1303 {
   1304 	gdt_evt_str *e;
   1305 
   1306 	/* no source == no event */
   1307 	if (source == 0)
   1308 		return (NULL);
   1309 
   1310 	e = &icp_event_buffer[icp_event_lastidx];
   1311 	if (e->event_source == source && e->event_idx == idx &&
   1312 	    ((evt->size != 0 && e->event_data.size != 0 &&
   1313 	      memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
   1314 	     (evt->size == 0 && e->event_data.size == 0 &&
   1315 	      strcmp((char *) e->event_data.event_string,
   1316 	      	     (char *) evt->event_string) == 0))) {
   1317 		e->last_stamp = time_second;
   1318 		e->same_count++;
   1319 	} else {
   1320 		if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
   1321 			icp_event_lastidx++;
   1322 			if (icp_event_lastidx == ICP_MAX_EVENTS)
   1323 				icp_event_lastidx = 0;
   1324 			if (icp_event_lastidx == icp_event_oldidx) {
   1325 				icp_event_oldidx++;
   1326 				if (icp_event_oldidx == ICP_MAX_EVENTS)
   1327 					icp_event_oldidx = 0;
   1328 			}
   1329 		}
   1330 		e = &icp_event_buffer[icp_event_lastidx];
   1331 		e->event_source = source;
   1332 		e->event_idx = idx;
   1333 		e->first_stamp = e->last_stamp = time_second;
   1334 		e->same_count = 1;
   1335 		e->event_data = *evt;
   1336 		e->application = 0;
   1337 	}
   1338 	return (e);
   1339 }
   1340 
   1341 int
   1342 icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
   1343 {
   1344 	gdt_evt_str *e;
   1345 	int eindex, s;
   1346 
   1347 	s = splbio();
   1348 
   1349 	if (handle == -1)
   1350 		eindex = icp_event_oldidx;
   1351 	else
   1352 		eindex = handle;
   1353 
   1354 	estr->event_source = 0;
   1355 
   1356 	if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
   1357 		splx(s);
   1358 		return (eindex);
   1359 	}
   1360 
   1361 	e = &icp_event_buffer[eindex];
   1362 	if (e->event_source != 0) {
   1363 		if (eindex != icp_event_lastidx) {
   1364 			eindex++;
   1365 			if (eindex == ICP_MAX_EVENTS)
   1366 				eindex = 0;
   1367 		} else
   1368 			eindex = -1;
   1369 		memcpy(estr, e, sizeof(gdt_evt_str));
   1370 	}
   1371 
   1372 	splx(s);
   1373 
   1374 	return (eindex);
   1375 }
   1376 
   1377 void
   1378 icp_readapp_event(struct icp_softc *icp, u_int8_t application,
   1379     gdt_evt_str *estr)
   1380 {
   1381 	gdt_evt_str *e;
   1382 	int found = 0, eindex, s;
   1383 
   1384 	s = splbio();
   1385 
   1386 	eindex = icp_event_oldidx;
   1387 	for (;;) {
   1388 		e = &icp_event_buffer[eindex];
   1389 		if (e->event_source == 0)
   1390 			break;
   1391 		if ((e->application & application) == 0) {
   1392 			e->application |= application;
   1393 			found = 1;
   1394 			break;
   1395 		}
   1396 		if (eindex == icp_event_lastidx)
   1397 			break;
   1398 		eindex++;
   1399 		if (eindex == ICP_MAX_EVENTS)
   1400 			eindex = 0;
   1401 	}
   1402 	if (found)
   1403 		memcpy(estr, e, sizeof(gdt_evt_str));
   1404 	else
   1405 		estr->event_source = 0;
   1406 
   1407 	splx(s);
   1408 }
   1409 
   1410 void
   1411 icp_clear_events(struct icp_softc *icp)
   1412 {
   1413 	int s;
   1414 
   1415 	s = splbio();
   1416 	icp_event_oldidx = icp_event_lastidx = 0;
   1417 	memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
   1418 	splx(s);
   1419 }
   1420