icp.c revision 1.35 1 /* $NetBSD: icp.c,v 1.35 2021/04/24 23:36:55 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. All advertising materials mentioning features or use of this software
44 * must display the following acknowledgement:
45 * This product includes software developed by Niklas Hallqvist.
46 * 4. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 *
60 * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
61 */
62
63 /*
64 * This driver would not have written if it was not for the hardware donations
65 * from both ICP-Vortex and ko.neT. I want to thank them for their support.
66 *
67 * Re-worked for NetBSD by Andrew Doran. Test hardware kindly supplied by
68 * Intel.
69 *
70 * Support for the ICP-Vortex management tools added by
71 * Jason R. Thorpe of Wasabi Systems, Inc., based on code
72 * provided by Achim Leubner <achim.leubner (at) intel.com>.
73 *
74 * Additional support for dynamic rescan of cacheservice drives by
75 * Jason R. Thorpe of Wasabi Systems, Inc.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.35 2021/04/24 23:36:55 thorpej Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/queue.h>
86 #include <sys/proc.h>
87 #include <sys/buf.h>
88 #include <sys/endian.h>
89 #include <sys/malloc.h>
90 #include <sys/disk.h>
91
92 #include <sys/bswap.h>
93 #include <sys/bus.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98
99 #include <dev/ic/icpreg.h>
100 #include <dev/ic/icpvar.h>
101
102 #include <dev/scsipi/scsipi_all.h>
103 #include <dev/scsipi/scsiconf.h>
104
105 #include "locators.h"
106
107 int icp_async_event(struct icp_softc *, int);
108 void icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
109 void icp_chain(struct icp_softc *);
110 int icp_print(void *, const char *);
111 void icp_watchdog(void *);
112 void icp_ucmd_intr(struct icp_ccb *);
113 void icp_recompute_openings(struct icp_softc *);
114
115 int icp_count; /* total # of controllers, for ioctl interface */
116
117 /*
118 * Statistics for the ioctl interface to query.
119 *
120 * XXX Global. They should probably be made per-controller
121 * XXX at some point.
122 */
123 gdt_statist_t icp_stats;
124
125 int
126 icp_init(struct icp_softc *icp, const char *intrstr)
127 {
128 struct icp_attach_args icpa;
129 struct icp_binfo binfo;
130 struct icp_ccb *ic;
131 u_int16_t cdev_cnt;
132 int i, j, state, feat, nsegs, rv;
133 int locs[ICPCF_NLOCS];
134
135 state = 0;
136
137 if (intrstr != NULL)
138 aprint_normal_dev(icp->icp_dv, "interrupting at %s\n",
139 intrstr);
140
141 SIMPLEQ_INIT(&icp->icp_ccb_queue);
142 SIMPLEQ_INIT(&icp->icp_ccb_freelist);
143 SIMPLEQ_INIT(&icp->icp_ucmd_queue);
144 callout_init(&icp->icp_wdog_callout, 0);
145
146 /*
147 * Allocate a scratch area.
148 */
149 if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
150 ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
151 &icp->icp_scr_dmamap) != 0) {
152 aprint_error_dev(icp->icp_dv, "cannot create scratch dmamap\n");
153 return (1);
154 }
155 state++;
156
157 if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
158 icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
159 aprint_error_dev(icp->icp_dv, "cannot alloc scratch dmamem\n");
160 goto bail_out;
161 }
162 state++;
163
164 if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
165 ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
166 aprint_error_dev(icp->icp_dv, "cannot map scratch dmamem\n");
167 goto bail_out;
168 }
169 state++;
170
171 if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
172 ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
173 aprint_error_dev(icp->icp_dv, "cannot load scratch dmamap\n");
174 goto bail_out;
175 }
176 state++;
177
178 /*
179 * Allocate and initialize the command control blocks.
180 */
181 ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_WAITOK | M_ZERO);
182 icp->icp_ccbs = ic;
183 state++;
184
185 for (i = 0; i < ICP_NCCBS; i++, ic++) {
186 /*
187 * The first two command indexes have special meanings, so
188 * we can't use them.
189 */
190 ic->ic_ident = i + 2;
191 rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
192 ICP_MAXSG, ICP_MAX_XFER, 0,
193 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
194 &ic->ic_xfer_map);
195 if (rv != 0)
196 break;
197 icp->icp_nccbs++;
198 icp_ccb_free(icp, ic);
199 }
200 #ifdef DIAGNOSTIC
201 if (icp->icp_nccbs != ICP_NCCBS)
202 aprint_error_dev(icp->icp_dv, "%d/%d CCBs usable\n",
203 icp->icp_nccbs, ICP_NCCBS);
204 #endif
205
206 /*
207 * Initialize the controller.
208 */
209 if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
210 aprint_error_dev(icp->icp_dv, "screen service init error %d\n",
211 icp->icp_status);
212 goto bail_out;
213 }
214
215 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
216 aprint_error_dev(icp->icp_dv, "cache service init error %d\n",
217 icp->icp_status);
218 goto bail_out;
219 }
220
221 icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
222
223 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
224 aprint_error_dev(icp->icp_dv, "cache service mount error %d\n",
225 icp->icp_status);
226 goto bail_out;
227 }
228
229 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
230 aprint_error_dev(icp->icp_dv, "cache service post-mount init error %d\n",
231 icp->icp_status);
232 goto bail_out;
233 }
234 cdev_cnt = (u_int16_t)icp->icp_info;
235 icp->icp_fw_vers = icp->icp_service;
236
237 if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
238 aprint_error_dev(icp->icp_dv, "raw service init error %d\n",
239 icp->icp_status);
240 goto bail_out;
241 }
242
243 /*
244 * Set/get raw service features (scatter/gather).
245 */
246 feat = 0;
247 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
248 0, 0))
249 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
250 feat = icp->icp_info;
251
252 if ((feat & ICP_SCATTER_GATHER) == 0) {
253 #ifdef DIAGNOSTIC
254 aprint_normal_dev(icp->icp_dv,
255 "scatter/gather not supported (raw service)\n");
256 #endif
257 } else
258 icp->icp_features |= ICP_FEAT_RAWSERVICE;
259
260 /*
261 * Set/get cache service features (scatter/gather).
262 */
263 feat = 0;
264 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
265 ICP_SCATTER_GATHER, 0))
266 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
267 feat = icp->icp_info;
268
269 if ((feat & ICP_SCATTER_GATHER) == 0) {
270 #ifdef DIAGNOSTIC
271 aprint_normal_dev(icp->icp_dv,
272 "scatter/gather not supported (cache service)\n");
273 #endif
274 } else
275 icp->icp_features |= ICP_FEAT_CACHESERVICE;
276
277 /*
278 * Pull some information from the board and dump.
279 */
280 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
281 ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
282 aprint_error_dev(icp->icp_dv, "unable to retrive board info\n");
283 goto bail_out;
284 }
285 memcpy(&binfo, icp->icp_scr, sizeof(binfo));
286
287 aprint_normal_dev(icp->icp_dv,
288 "model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
289 binfo.bi_type_string, binfo.bi_raid_string,
290 binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
291
292 /*
293 * Determine the number of devices, and number of openings per
294 * device.
295 */
296 if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
297 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
298 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
299 0))
300 continue;
301
302 icp->icp_cdr[j].cd_size = icp->icp_info;
303 if (icp->icp_cdr[j].cd_size != 0)
304 icp->icp_ndevs++;
305
306 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
307 0))
308 icp->icp_cdr[j].cd_type = icp->icp_info;
309 }
310 }
311
312 if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
313 icp->icp_nchan = binfo.bi_chan_count;
314 icp->icp_ndevs += icp->icp_nchan;
315 }
316
317 icp_recompute_openings(icp);
318
319 /*
320 * Attach SCSI channels.
321 */
322 if (icp->icp_features & ICP_FEAT_RAWSERVICE) {
323 struct icp_ioc_version *iv;
324 struct icp_rawioc *ri;
325 struct icp_getch *gc;
326
327 iv = (struct icp_ioc_version *)icp->icp_scr;
328 iv->iv_version = htole32(ICP_IOC_NEWEST);
329 iv->iv_listents = ICP_MAXBUS;
330 iv->iv_firstchan = 0;
331 iv->iv_lastchan = ICP_MAXBUS - 1;
332 iv->iv_listoffset = htole32(sizeof(*iv));
333
334 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
335 ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
336 sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
337 ri = (struct icp_rawioc *)(iv + 1);
338 for (j = 0; j < binfo.bi_chan_count; j++, ri++)
339 icp->icp_bus_id[j] = ri->ri_procid;
340 } else {
341 /*
342 * Fall back to the old method.
343 */
344 gc = (struct icp_getch *)icp->icp_scr;
345
346 for (j = 0; j < binfo.bi_chan_count; j++) {
347 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
348 ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
349 ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
350 sizeof(*gc))) {
351 aprint_error_dev(icp->icp_dv,
352 "unable to get chan info");
353 goto bail_out;
354 }
355 icp->icp_bus_id[j] = gc->gc_scsiid;
356 }
357 }
358
359 for (j = 0; j < binfo.bi_chan_count; j++) {
360 if (icp->icp_bus_id[j] > ICP_MAXID_FC)
361 icp->icp_bus_id[j] = ICP_MAXID_FC;
362
363 icpa.icpa_unit = j + ICPA_UNIT_SCSI;
364
365 locs[ICPCF_UNIT] = j + ICPA_UNIT_SCSI;
366
367 icp->icp_children[icpa.icpa_unit] =
368 config_found(icp->icp_dv, &icpa, icp_print,
369 CFARG_SUBMATCH, config_stdsubmatch,
370 CFARG_LOCATORS, locs,
371 CFARG_EOL);
372 }
373 }
374
375 /*
376 * Attach cache devices.
377 */
378 if (icp->icp_features & ICP_FEAT_CACHESERVICE) {
379 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
380 if (icp->icp_cdr[j].cd_size == 0)
381 continue;
382
383 icpa.icpa_unit = j;
384
385 locs[ICPCF_UNIT] = j;
386
387 icp->icp_children[icpa.icpa_unit] =
388 config_found(icp->icp_dv, &icpa, icp_print,
389 CFARG_SUBMATCH, config_stdsubmatch,
390 CFARG_LOCATORS, locs,
391 CFARG_EOL);
392 }
393 }
394
395 /*
396 * Start the watchdog.
397 */
398 icp_watchdog(icp);
399
400 /*
401 * Count the controller, and we're done!
402 */
403 if (icp_count++ == 0)
404 mutex_init(&icp_ioctl_mutex, MUTEX_DEFAULT, IPL_NONE);
405
406 return (0);
407
408 bail_out:
409 if (state > 4)
410 for (j = 0; j < i; j++)
411 bus_dmamap_destroy(icp->icp_dmat,
412 icp->icp_ccbs[j].ic_xfer_map);
413 if (state > 3)
414 free(icp->icp_ccbs, M_DEVBUF);
415 if (state > 2)
416 bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
417 if (state > 1)
418 bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
419 ICP_SCRATCH_SIZE);
420 if (state > 0)
421 bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
422 bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
423
424 return (1);
425 }
426
427 void
428 icp_register_servicecb(struct icp_softc *icp, int unit,
429 const struct icp_servicecb *cb)
430 {
431
432 icp->icp_servicecb[unit] = cb;
433 }
434
435 void
436 icp_rescan(struct icp_softc *icp, int unit)
437 {
438 struct icp_attach_args icpa;
439 u_int newsize, newtype;
440 int locs[ICPCF_NLOCS];
441
442 /*
443 * NOTE: It is very important that the queue be frozen and not
444 * commands running when this is called. The ioctl mutex must
445 * also be held.
446 */
447
448 KASSERT(icp->icp_qfreeze != 0);
449 KASSERT(icp->icp_running == 0);
450 KASSERT(unit < ICP_MAX_HDRIVES);
451
452 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, unit, 0, 0)) {
453 #ifdef ICP_DEBUG
454 printf("%s: rescan: unit %d ICP_INFO failed -> 0x%04x\n",
455 device_xname(icp->icp_dv), unit, icp->icp_status);
456 #endif
457 goto gone;
458 }
459 if ((newsize = icp->icp_info) == 0) {
460 #ifdef ICP_DEBUG
461 printf("%s: rescan: unit %d has zero size\n",
462 device_xname(icp->icp_dv), unit);
463 #endif
464 gone:
465 /*
466 * Host drive is no longer present; detach if a child
467 * is currently there.
468 */
469 if (icp->icp_cdr[unit].cd_size != 0)
470 icp->icp_ndevs--;
471 icp->icp_cdr[unit].cd_size = 0;
472 if (icp->icp_children[unit] != NULL) {
473 (void) config_detach(icp->icp_children[unit],
474 DETACH_FORCE);
475 icp->icp_children[unit] = NULL;
476 }
477 return;
478 }
479
480 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, unit, 0, 0))
481 newtype = icp->icp_info;
482 else {
483 #ifdef ICP_DEBUG
484 printf("%s: rescan: unit %d ICP_DEVTYPE failed\n",
485 device_xname(icp->icp_dv), unit);
486 #endif
487 newtype = 0; /* XXX? */
488 }
489
490 #ifdef ICP_DEBUG
491 printf("%s: rescan: unit %d old %u/%u, new %u/%u\n",
492 device_xname(icp->icp_dv), unit, icp->icp_cdr[unit].cd_size,
493 icp->icp_cdr[unit].cd_type, newsize, newtype);
494 #endif
495
496 /*
497 * If the type or size changed, detach any old child (if it exists)
498 * and attach a new one.
499 */
500 if (icp->icp_children[unit] == NULL ||
501 newsize != icp->icp_cdr[unit].cd_size ||
502 newtype != icp->icp_cdr[unit].cd_type) {
503 if (icp->icp_cdr[unit].cd_size == 0)
504 icp->icp_ndevs++;
505 icp->icp_cdr[unit].cd_size = newsize;
506 icp->icp_cdr[unit].cd_type = newtype;
507 if (icp->icp_children[unit] != NULL)
508 (void) config_detach(icp->icp_children[unit],
509 DETACH_FORCE);
510
511 icpa.icpa_unit = unit;
512
513 locs[ICPCF_UNIT] = unit;
514
515 icp->icp_children[unit] =
516 config_found(icp->icp_dv, &icpa, icp_print,
517 CFARG_SUBMATCH, config_stdsubmatch,
518 CFARG_LOCATORS, locs,
519 CFARG_EOL);
520 }
521
522 icp_recompute_openings(icp);
523 }
524
525 void
526 icp_rescan_all(struct icp_softc *icp)
527 {
528 int unit;
529 u_int16_t cdev_cnt;
530
531 /*
532 * This is the old method of rescanning the host drives. We
533 * start by reinitializing the cache service.
534 */
535 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
536 printf("%s: unable to re-initialize cache service for rescan\n",
537 device_xname(icp->icp_dv));
538 return;
539 }
540 cdev_cnt = (u_int16_t) icp->icp_info;
541
542 /* For each host drive, do the new-style rescan. */
543 for (unit = 0; unit < cdev_cnt && unit < ICP_MAX_HDRIVES; unit++)
544 icp_rescan(icp, unit);
545
546 /* Now detach anything in the slots after cdev_cnt. */
547 for (; unit < ICP_MAX_HDRIVES; unit++) {
548 if (icp->icp_cdr[unit].cd_size != 0) {
549 #ifdef ICP_DEBUG
550 printf("%s: rescan all: unit %d < new cdev_cnt (%d)\n",
551 device_xname(icp->icp_dv), unit, cdev_cnt);
552 #endif
553 icp->icp_ndevs--;
554 icp->icp_cdr[unit].cd_size = 0;
555 if (icp->icp_children[unit] != NULL) {
556 (void) config_detach(icp->icp_children[unit],
557 DETACH_FORCE);
558 icp->icp_children[unit] = NULL;
559 }
560 }
561 }
562
563 icp_recompute_openings(icp);
564 }
565
566 void
567 icp_recompute_openings(struct icp_softc *icp)
568 {
569 int unit, openings;
570
571 if (icp->icp_ndevs != 0)
572 openings =
573 (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
574 else
575 openings = 0;
576 if (openings == icp->icp_openings)
577 return;
578 icp->icp_openings = openings;
579
580 #ifdef ICP_DEBUG
581 printf("%s: %d device%s, %d openings per device\n",
582 device_xname(icp->icp_dv), icp->icp_ndevs,
583 icp->icp_ndevs == 1 ? "" : "s", icp->icp_openings);
584 #endif
585
586 for (unit = 0; unit < ICP_MAX_HDRIVES + ICP_MAXBUS; unit++) {
587 if (icp->icp_children[unit] != NULL)
588 (*icp->icp_servicecb[unit]->iscb_openings)(
589 icp->icp_children[unit], icp->icp_openings);
590 }
591 }
592
593 void
594 icp_watchdog(void *cookie)
595 {
596 struct icp_softc *icp;
597 int s;
598
599 icp = cookie;
600
601 s = splbio();
602 icp_intr(icp);
603 if (ICP_HAS_WORK(icp))
604 icp_ccb_enqueue(icp, NULL);
605 splx(s);
606
607 callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
608 icp_watchdog, icp);
609 }
610
611 int
612 icp_print(void *aux, const char *pnp)
613 {
614 struct icp_attach_args *icpa;
615 const char *str;
616
617 icpa = (struct icp_attach_args *)aux;
618
619 if (pnp != NULL) {
620 if (icpa->icpa_unit < ICPA_UNIT_SCSI)
621 str = "block device";
622 else
623 str = "SCSI channel";
624 aprint_normal("%s at %s", str, pnp);
625 }
626 aprint_normal(" unit %d", icpa->icpa_unit);
627
628 return (UNCONF);
629 }
630
631 int
632 icp_async_event(struct icp_softc *icp, int service)
633 {
634
635 if (service == ICP_SCREENSERVICE) {
636 if (icp->icp_status == ICP_S_MSG_REQUEST) {
637 /* XXX */
638 }
639 } else {
640 if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
641 icp->icp_evt.size = 0;
642 icp->icp_evt.eu.async.ionode =
643 device_unit(icp->icp_dv);
644 icp->icp_evt.eu.async.status = icp->icp_status;
645 /*
646 * Severity and event string are filled in by the
647 * hardware interface interrupt handler.
648 */
649 printf("%s: %s\n", device_xname(icp->icp_dv),
650 icp->icp_evt.event_string);
651 } else {
652 icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
653 icp->icp_evt.eu.async.ionode =
654 device_unit(icp->icp_dv);
655 icp->icp_evt.eu.async.service = service;
656 icp->icp_evt.eu.async.status = icp->icp_status;
657 icp->icp_evt.eu.async.info = icp->icp_info;
658 /* XXXJRT FIX THIS */
659 *(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
660 icp->icp_info2;
661 }
662 icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
663 }
664
665 return (0);
666 }
667
668 int
669 icp_intr(void *cookie)
670 {
671 struct icp_softc *icp;
672 struct icp_intr_ctx ctx;
673 struct icp_ccb *ic;
674
675 icp = cookie;
676
677 ctx.istatus = (*icp->icp_get_status)(icp);
678 if (!ctx.istatus) {
679 icp->icp_status = ICP_S_NO_STATUS;
680 return (0);
681 }
682
683 (*icp->icp_intr)(icp, &ctx);
684
685 icp->icp_status = ctx.cmd_status;
686 icp->icp_service = ctx.service;
687 icp->icp_info = ctx.info;
688 icp->icp_info2 = ctx.info2;
689
690 switch (ctx.istatus) {
691 case ICP_ASYNCINDEX:
692 icp_async_event(icp, ctx.service);
693 return (1);
694
695 case ICP_SPEZINDEX:
696 aprint_error_dev(icp->icp_dv, "uninitialized or unknown service (%d/%d)\n",
697 ctx.info, ctx.info2);
698 icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
699 icp->icp_evt.eu.driver.ionode = device_unit(icp->icp_dv);
700 icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
701 return (1);
702 }
703
704 if ((ctx.istatus - 2) > icp->icp_nccbs)
705 panic("icp_intr: bad command index returned");
706
707 ic = &icp->icp_ccbs[ctx.istatus - 2];
708 ic->ic_status = icp->icp_status;
709
710 if ((ic->ic_flags & IC_ALLOCED) == 0) {
711 /* XXX ICP's "iir" driver just sends an event here. */
712 panic("icp_intr: inactive CCB identified");
713 }
714
715 /*
716 * Try to protect ourselves from the running command count already
717 * being 0 (e.g. if a polled command times out).
718 */
719 KDASSERT(icp->icp_running != 0);
720 if (--icp->icp_running == 0 &&
721 (icp->icp_flags & ICP_F_WAIT_FREEZE) != 0) {
722 icp->icp_flags &= ~ICP_F_WAIT_FREEZE;
723 wakeup(&icp->icp_qfreeze);
724 }
725
726 switch (icp->icp_status) {
727 case ICP_S_BSY:
728 #ifdef ICP_DEBUG
729 printf("%s: ICP_S_BSY received\n", device_xname(icp->icp_dv));
730 #endif
731 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
732 SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
733 else
734 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
735 break;
736
737 default:
738 ic->ic_flags |= IC_COMPLETE;
739
740 if ((ic->ic_flags & IC_WAITING) != 0)
741 wakeup(ic);
742 else if (ic->ic_intr != NULL)
743 (*ic->ic_intr)(ic);
744
745 if (ICP_HAS_WORK(icp))
746 icp_ccb_enqueue(icp, NULL);
747
748 break;
749 }
750
751 return (1);
752 }
753
754 struct icp_ucmd_ctx {
755 gdt_ucmd_t *iu_ucmd;
756 u_int32_t iu_cnt;
757 };
758
759 void
760 icp_ucmd_intr(struct icp_ccb *ic)
761 {
762 struct icp_softc *icp = device_private(ic->ic_dv);
763 struct icp_ucmd_ctx *iu = ic->ic_context;
764 gdt_ucmd_t *ucmd = iu->iu_ucmd;
765
766 ucmd->status = icp->icp_status;
767 ucmd->info = icp->icp_info;
768
769 if (iu->iu_cnt != 0) {
770 bus_dmamap_sync(icp->icp_dmat,
771 icp->icp_scr_dmamap,
772 ICP_SCRATCH_UCMD, iu->iu_cnt,
773 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
774 memcpy(ucmd->data,
775 (char *)icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
776 }
777
778 icp->icp_ucmd_ccb = NULL;
779
780 ic->ic_flags |= IC_COMPLETE;
781 wakeup(ic);
782 }
783
784 /*
785 * NOTE: We assume that it is safe to sleep here!
786 */
787 int
788 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
789 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
790 {
791 struct icp_ioctlcmd *icmd;
792 struct icp_cachecmd *cc;
793 struct icp_rawcmd *rc;
794 int retries, rv;
795 struct icp_ccb *ic;
796
797 retries = ICP_RETRIES;
798
799 do {
800 ic = icp_ccb_alloc_wait(icp);
801 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
802 ic->ic_cmd.cmd_opcode = htole16(opcode);
803
804 switch (service) {
805 case ICP_CACHESERVICE:
806 if (opcode == ICP_IOCTL) {
807 icmd = &ic->ic_cmd.cmd_packet.ic;
808 icmd->ic_subfunc = htole16(arg1);
809 icmd->ic_channel = htole32(arg2);
810 icmd->ic_bufsize = htole32(arg3);
811 icmd->ic_addr =
812 htole32(icp->icp_scr_seg[0].ds_addr);
813
814 bus_dmamap_sync(icp->icp_dmat,
815 icp->icp_scr_dmamap, 0, arg3,
816 BUS_DMASYNC_PREWRITE |
817 BUS_DMASYNC_PREREAD);
818 } else {
819 cc = &ic->ic_cmd.cmd_packet.cc;
820 cc->cc_deviceno = htole16(arg1);
821 cc->cc_blockno = htole32(arg2);
822 }
823 break;
824
825 case ICP_SCSIRAWSERVICE:
826 rc = &ic->ic_cmd.cmd_packet.rc;
827 rc->rc_direction = htole32(arg1);
828 rc->rc_bus = arg2;
829 rc->rc_target = arg3;
830 rc->rc_lun = arg3 >> 8;
831 break;
832 }
833
834 ic->ic_service = service;
835 ic->ic_cmdlen = sizeof(ic->ic_cmd);
836 rv = icp_ccb_poll(icp, ic, 10000);
837
838 switch (service) {
839 case ICP_CACHESERVICE:
840 if (opcode == ICP_IOCTL) {
841 bus_dmamap_sync(icp->icp_dmat,
842 icp->icp_scr_dmamap, 0, arg3,
843 BUS_DMASYNC_POSTWRITE |
844 BUS_DMASYNC_POSTREAD);
845 }
846 break;
847 }
848
849 icp_ccb_free(icp, ic);
850 } while (rv != 0 && --retries > 0);
851
852 return (icp->icp_status == ICP_S_OK);
853 }
854
855 int
856 icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
857 {
858 struct icp_ccb *ic;
859 struct icp_ucmd_ctx iu;
860 u_int32_t cnt;
861 int error;
862
863 if (ucmd->service == ICP_CACHESERVICE) {
864 if (ucmd->command.cmd_opcode == ICP_IOCTL) {
865 cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
866 if (cnt > GDT_SCRATCH_SZ) {
867 aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
868 GDT_SCRATCH_SZ, cnt);
869 return (EINVAL);
870 }
871 } else {
872 cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
873 ICP_SECTOR_SIZE;
874 if (cnt > GDT_SCRATCH_SZ) {
875 aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
876 GDT_SCRATCH_SZ, cnt);
877 return (EINVAL);
878 }
879 }
880 } else {
881 cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
882 ucmd->command.cmd_packet.rc.rc_sense_len;
883 if (cnt > GDT_SCRATCH_SZ) {
884 aprint_error_dev(icp->icp_dv, "scratch buffer too small (%d/%d)\n",
885 GDT_SCRATCH_SZ, cnt);
886 return (EINVAL);
887 }
888 }
889
890 iu.iu_ucmd = ucmd;
891 iu.iu_cnt = cnt;
892
893 ic = icp_ccb_alloc_wait(icp);
894 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
895 ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
896
897 if (ucmd->service == ICP_CACHESERVICE) {
898 if (ucmd->command.cmd_opcode == ICP_IOCTL) {
899 struct icp_ioctlcmd *icmd, *uicmd;
900
901 icmd = &ic->ic_cmd.cmd_packet.ic;
902 uicmd = &ucmd->command.cmd_packet.ic;
903
904 icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
905 icmd->ic_channel = htole32(uicmd->ic_channel);
906 icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
907 icmd->ic_addr =
908 htole32(icp->icp_scr_seg[0].ds_addr +
909 ICP_SCRATCH_UCMD);
910 } else {
911 struct icp_cachecmd *cc, *ucc;
912
913 cc = &ic->ic_cmd.cmd_packet.cc;
914 ucc = &ucmd->command.cmd_packet.cc;
915
916 cc->cc_deviceno = htole16(ucc->cc_deviceno);
917 cc->cc_blockno = htole32(ucc->cc_blockno);
918 cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
919 cc->cc_addr = htole32(0xffffffffU);
920 cc->cc_nsgent = htole32(1);
921 cc->cc_sg[0].sg_addr =
922 htole32(icp->icp_scr_seg[0].ds_addr +
923 ICP_SCRATCH_UCMD);
924 cc->cc_sg[0].sg_len = htole32(cnt);
925 }
926 } else {
927 struct icp_rawcmd *rc, *urc;
928
929 rc = &ic->ic_cmd.cmd_packet.rc;
930 urc = &ucmd->command.cmd_packet.rc;
931
932 rc->rc_direction = htole32(urc->rc_direction);
933 rc->rc_sdata = htole32(0xffffffffU);
934 rc->rc_sdlen = htole32(urc->rc_sdlen);
935 rc->rc_clen = htole32(urc->rc_clen);
936 memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
937 rc->rc_target = urc->rc_target;
938 rc->rc_lun = urc->rc_lun;
939 rc->rc_bus = urc->rc_bus;
940 rc->rc_sense_len = htole32(urc->rc_sense_len);
941 rc->rc_sense_addr =
942 htole32(icp->icp_scr_seg[0].ds_addr +
943 ICP_SCRATCH_UCMD + urc->rc_sdlen);
944 rc->rc_nsgent = htole32(1);
945 rc->rc_sg[0].sg_addr =
946 htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
947 rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
948 }
949
950 ic->ic_service = ucmd->service;
951 ic->ic_cmdlen = sizeof(ic->ic_cmd);
952 ic->ic_context = &iu;
953
954 /*
955 * XXX What units are ucmd->timeout in? Until we know, we
956 * XXX just pull a number out of thin air.
957 */
958 if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
959 aprint_error_dev(icp->icp_dv, "error %d waiting for ucmd to complete\n",
960 error);
961
962 /* icp_ucmd_intr() has updated ucmd. */
963 icp_ccb_free(icp, ic);
964
965 return (error);
966 }
967
968 struct icp_ccb *
969 icp_ccb_alloc(struct icp_softc *icp)
970 {
971 struct icp_ccb *ic;
972 int s;
973
974 s = splbio();
975 if (__predict_false((ic =
976 SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
977 splx(s);
978 return (NULL);
979 }
980 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
981 splx(s);
982
983 ic->ic_flags = IC_ALLOCED;
984 return (ic);
985 }
986
987 struct icp_ccb *
988 icp_ccb_alloc_wait(struct icp_softc *icp)
989 {
990 struct icp_ccb *ic;
991 int s;
992
993 s = splbio();
994 while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
995 icp->icp_flags |= ICP_F_WAIT_CCB;
996 (void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
997 }
998 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
999 splx(s);
1000
1001 ic->ic_flags = IC_ALLOCED;
1002 return (ic);
1003 }
1004
1005 void
1006 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
1007 {
1008 int s;
1009
1010 s = splbio();
1011 ic->ic_flags = 0;
1012 ic->ic_intr = NULL;
1013 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
1014 if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
1015 icp->icp_flags &= ~ICP_F_WAIT_CCB;
1016 wakeup(&icp->icp_ccb_freelist);
1017 }
1018 splx(s);
1019 }
1020
1021 void
1022 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
1023 {
1024 int s;
1025
1026 s = splbio();
1027
1028 if (ic != NULL) {
1029 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1030 SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
1031 else
1032 SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
1033 }
1034
1035 for (; icp->icp_qfreeze == 0;) {
1036 if (__predict_false((ic =
1037 SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
1038 struct icp_ucmd_ctx *iu = ic->ic_context;
1039 gdt_ucmd_t *ucmd = iu->iu_ucmd;
1040
1041 /*
1042 * All user-generated commands share the same
1043 * scratch space, so if one is already running,
1044 * we have to stall the command queue.
1045 */
1046 if (icp->icp_ucmd_ccb != NULL)
1047 break;
1048 if ((*icp->icp_test_busy)(icp))
1049 break;
1050 icp->icp_ucmd_ccb = ic;
1051
1052 if (iu->iu_cnt != 0) {
1053 memcpy((char *)icp->icp_scr + ICP_SCRATCH_UCMD,
1054 ucmd->data, iu->iu_cnt);
1055 bus_dmamap_sync(icp->icp_dmat,
1056 icp->icp_scr_dmamap,
1057 ICP_SCRATCH_UCMD, iu->iu_cnt,
1058 BUS_DMASYNC_PREREAD |
1059 BUS_DMASYNC_PREWRITE);
1060 }
1061 } else if (__predict_true((ic =
1062 SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL)) {
1063 if ((*icp->icp_test_busy)(icp))
1064 break;
1065 } else {
1066 /* no command found */
1067 break;
1068 }
1069 icp_ccb_submit(icp, ic);
1070 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
1071 SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
1072 else
1073 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
1074 }
1075
1076 splx(s);
1077 }
1078
1079 int
1080 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
1081 int dir)
1082 {
1083 struct icp_sg *sg;
1084 int nsegs, i, rv;
1085 bus_dmamap_t xfer;
1086
1087 xfer = ic->ic_xfer_map;
1088
1089 rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
1090 BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1091 ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
1092 if (rv != 0)
1093 return (rv);
1094
1095 nsegs = xfer->dm_nsegs;
1096 ic->ic_xfer_size = size;
1097 ic->ic_nsgent = nsegs;
1098 ic->ic_flags |= dir;
1099 sg = ic->ic_sg;
1100
1101 if (sg != NULL) {
1102 for (i = 0; i < nsegs; i++, sg++) {
1103 sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
1104 sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
1105 }
1106 } else if (nsegs > 1)
1107 panic("icp_ccb_map: no SG list specified, but nsegs > 1");
1108
1109 if ((dir & IC_XFER_OUT) != 0)
1110 i = BUS_DMASYNC_PREWRITE;
1111 else /* if ((dir & IC_XFER_IN) != 0) */
1112 i = BUS_DMASYNC_PREREAD;
1113
1114 bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
1115 return (0);
1116 }
1117
1118 void
1119 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
1120 {
1121 int i;
1122
1123 if ((ic->ic_flags & IC_XFER_OUT) != 0)
1124 i = BUS_DMASYNC_POSTWRITE;
1125 else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
1126 i = BUS_DMASYNC_POSTREAD;
1127
1128 bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
1129 bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
1130 }
1131
1132 int
1133 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1134 {
1135 int s, rv;
1136
1137 s = splbio();
1138
1139 for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
1140 if (!(*icp->icp_test_busy)(icp))
1141 break;
1142 DELAY(10);
1143 }
1144 if (timo == 0) {
1145 printf("%s: submit: busy\n", device_xname(icp->icp_dv));
1146 return (EAGAIN);
1147 }
1148
1149 icp_ccb_submit(icp, ic);
1150
1151 if (cold) {
1152 for (timo *= 10; timo != 0; timo--) {
1153 DELAY(100);
1154 icp_intr(icp);
1155 if ((ic->ic_flags & IC_COMPLETE) != 0)
1156 break;
1157 }
1158 } else {
1159 ic->ic_flags |= IC_WAITING;
1160 while ((ic->ic_flags & IC_COMPLETE) == 0) {
1161 if ((rv = tsleep(ic, PRIBIO, "icpwccb",
1162 mstohz(timo))) != 0) {
1163 timo = 0;
1164 break;
1165 }
1166 }
1167 }
1168
1169 if (timo != 0) {
1170 if (ic->ic_status != ICP_S_OK) {
1171 #ifdef ICP_DEBUG
1172 printf("%s: request failed; status=0x%04x\n",
1173 device_xname(icp->icp_dv), ic->ic_status);
1174 #endif
1175 rv = EIO;
1176 } else
1177 rv = 0;
1178 } else {
1179 aprint_error_dev(icp->icp_dv, "command timed out\n");
1180 rv = EIO;
1181 }
1182
1183 while ((*icp->icp_test_busy)(icp) != 0)
1184 DELAY(10);
1185
1186 splx(s);
1187
1188 return (rv);
1189 }
1190
1191 int
1192 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1193 {
1194 int s, rv;
1195
1196 ic->ic_flags |= IC_WAITING;
1197
1198 s = splbio();
1199 icp_ccb_enqueue(icp, ic);
1200 while ((ic->ic_flags & IC_COMPLETE) == 0) {
1201 if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
1202 splx(s);
1203 return (rv);
1204 }
1205 }
1206 splx(s);
1207
1208 if (ic->ic_status != ICP_S_OK) {
1209 aprint_error_dev(icp->icp_dv, "command failed; status=%x\n",
1210 ic->ic_status);
1211 return (EIO);
1212 }
1213
1214 return (0);
1215 }
1216
1217 int
1218 icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1219 {
1220 int s, rv;
1221
1222 ic->ic_dv = icp->icp_dv;
1223 ic->ic_intr = icp_ucmd_intr;
1224 ic->ic_flags |= IC_UCMD;
1225
1226 s = splbio();
1227 icp_ccb_enqueue(icp, ic);
1228 while ((ic->ic_flags & IC_COMPLETE) == 0) {
1229 if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
1230 splx(s);
1231 return (rv);
1232 }
1233 }
1234 splx(s);
1235
1236 return (0);
1237 }
1238
1239 void
1240 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
1241 {
1242
1243 ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
1244
1245 (*icp->icp_set_sema0)(icp);
1246 DELAY(10);
1247
1248 ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
1249 ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
1250
1251 icp->icp_running++;
1252
1253 (*icp->icp_copy_cmd)(icp, ic);
1254 (*icp->icp_release_event)(icp, ic);
1255 }
1256
1257 int
1258 icp_freeze(struct icp_softc *icp)
1259 {
1260 int s, error = 0;
1261
1262 s = splbio();
1263 if (icp->icp_qfreeze++ == 0) {
1264 while (icp->icp_running != 0) {
1265 icp->icp_flags |= ICP_F_WAIT_FREEZE;
1266 error = tsleep(&icp->icp_qfreeze, PRIBIO|PCATCH,
1267 "icpqfrz", 0);
1268 if (error != 0 && --icp->icp_qfreeze == 0 &&
1269 ICP_HAS_WORK(icp)) {
1270 icp_ccb_enqueue(icp, NULL);
1271 break;
1272 }
1273 }
1274 }
1275 splx(s);
1276
1277 return (error);
1278 }
1279
1280 void
1281 icp_unfreeze(struct icp_softc *icp)
1282 {
1283 int s;
1284
1285 s = splbio();
1286 KDASSERT(icp->icp_qfreeze != 0);
1287 if (--icp->icp_qfreeze == 0 && ICP_HAS_WORK(icp))
1288 icp_ccb_enqueue(icp, NULL);
1289 splx(s);
1290 }
1291
1292 /* XXX Global - should be per-controller? XXX */
1293 static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
1294 static int icp_event_oldidx;
1295 static int icp_event_lastidx;
1296
1297 gdt_evt_str *
1298 icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
1299 gdt_evt_data *evt)
1300 {
1301 gdt_evt_str *e;
1302
1303 /* no source == no event */
1304 if (source == 0)
1305 return (NULL);
1306
1307 e = &icp_event_buffer[icp_event_lastidx];
1308 if (e->event_source == source && e->event_idx == idx &&
1309 ((evt->size != 0 && e->event_data.size != 0 &&
1310 memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
1311 (evt->size == 0 && e->event_data.size == 0 &&
1312 strcmp((char *) e->event_data.event_string,
1313 (char *) evt->event_string) == 0))) {
1314 e->last_stamp = time_second;
1315 e->same_count++;
1316 } else {
1317 if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
1318 icp_event_lastidx++;
1319 if (icp_event_lastidx == ICP_MAX_EVENTS)
1320 icp_event_lastidx = 0;
1321 if (icp_event_lastidx == icp_event_oldidx) {
1322 icp_event_oldidx++;
1323 if (icp_event_oldidx == ICP_MAX_EVENTS)
1324 icp_event_oldidx = 0;
1325 }
1326 }
1327 e = &icp_event_buffer[icp_event_lastidx];
1328 e->event_source = source;
1329 e->event_idx = idx;
1330 e->first_stamp = e->last_stamp = time_second;
1331 e->same_count = 1;
1332 e->event_data = *evt;
1333 e->application = 0;
1334 }
1335 return (e);
1336 }
1337
1338 int
1339 icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
1340 {
1341 gdt_evt_str *e;
1342 int eindex, s;
1343
1344 s = splbio();
1345
1346 if (handle == -1)
1347 eindex = icp_event_oldidx;
1348 else
1349 eindex = handle;
1350
1351 estr->event_source = 0;
1352
1353 if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
1354 splx(s);
1355 return (eindex);
1356 }
1357
1358 e = &icp_event_buffer[eindex];
1359 if (e->event_source != 0) {
1360 if (eindex != icp_event_lastidx) {
1361 eindex++;
1362 if (eindex == ICP_MAX_EVENTS)
1363 eindex = 0;
1364 } else
1365 eindex = -1;
1366 memcpy(estr, e, sizeof(gdt_evt_str));
1367 }
1368
1369 splx(s);
1370
1371 return (eindex);
1372 }
1373
1374 void
1375 icp_readapp_event(struct icp_softc *icp, u_int8_t application,
1376 gdt_evt_str *estr)
1377 {
1378 gdt_evt_str *e;
1379 int found = 0, eindex, s;
1380
1381 s = splbio();
1382
1383 eindex = icp_event_oldidx;
1384 for (;;) {
1385 e = &icp_event_buffer[eindex];
1386 if (e->event_source == 0)
1387 break;
1388 if ((e->application & application) == 0) {
1389 e->application |= application;
1390 found = 1;
1391 break;
1392 }
1393 if (eindex == icp_event_lastidx)
1394 break;
1395 eindex++;
1396 if (eindex == ICP_MAX_EVENTS)
1397 eindex = 0;
1398 }
1399 if (found)
1400 memcpy(estr, e, sizeof(gdt_evt_str));
1401 else
1402 estr->event_source = 0;
1403
1404 splx(s);
1405 }
1406
1407 void
1408 icp_clear_events(struct icp_softc *icp)
1409 {
1410 int s;
1411
1412 s = splbio();
1413 icp_event_oldidx = icp_event_lastidx = 0;
1414 memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
1415 splx(s);
1416 }
1417