icp.c revision 1.8 1 /* $NetBSD: icp.c,v 1.8 2003/05/13 15:42:33 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, and by Jason R. Thorpe of Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Niklas Hallqvist.
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *
67 * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
68 */
69
70 /*
71 * This driver would not have written if it was not for the hardware donations
72 * from both ICP-Vortex and ko.neT. I want to thank them for their support.
73 *
74 * Re-worked for NetBSD by Andrew Doran. Test hardware kindly supplied by
75 * Intel.
76 *
77 * Support for the ICP-Vortex management tools added by
78 * Jason R. Thorpe of Wasabi Systems, Inc., based on code
79 * provided by Achim Leubner <achim.leubner (at) intel.com>
80 */
81
82 #include <sys/cdefs.h>
83 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.8 2003/05/13 15:42:33 thorpej Exp $");
84
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/kernel.h>
88 #include <sys/device.h>
89 #include <sys/queue.h>
90 #include <sys/proc.h>
91 #include <sys/buf.h>
92 #include <sys/endian.h>
93 #include <sys/malloc.h>
94 #include <sys/disk.h>
95
96 #include <uvm/uvm_extern.h>
97
98 #include <machine/bswap.h>
99 #include <machine/bus.h>
100
101 #include <dev/pci/pcireg.h>
102 #include <dev/pci/pcivar.h>
103 #include <dev/pci/pcidevs.h>
104
105 #include <dev/ic/icpreg.h>
106 #include <dev/ic/icpvar.h>
107
108 #include <dev/scsipi/scsipi_all.h>
109 #include <dev/scsipi/scsiconf.h>
110
111 int icp_async_event(struct icp_softc *, int);
112 void icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
113 void icp_chain(struct icp_softc *);
114 int icp_print(void *, const char *);
115 int icp_submatch(struct device *, struct cfdata *, void *);
116 void icp_watchdog(void *);
117 void icp_ucmd_intr(struct icp_ccb *);
118
119 int icp_count; /* total # of controllers, for ioctl interface */
120
121 /*
122 * Statistics for the ioctl interface to query.
123 *
124 * XXX Global. They should probably be made per-controller
125 * XXX at some point.
126 */
127 gdt_statist_t icp_stats;
128
129 int
130 icp_init(struct icp_softc *icp, const char *intrstr)
131 {
132 struct icp_attach_args icpa;
133 struct icp_binfo binfo;
134 struct icp_ccb *ic;
135 u_int16_t cdev_cnt;
136 int i, j, state, feat, nsegs, rv, noscsi, nocache;
137
138 state = 0;
139 noscsi = 0;
140 nocache = 0;
141
142 if (intrstr != NULL)
143 aprint_normal("%s: interrupting at %s\n", icp->icp_dv.dv_xname,
144 intrstr);
145
146 SIMPLEQ_INIT(&icp->icp_ccb_queue);
147 SIMPLEQ_INIT(&icp->icp_ccb_freelist);
148 SIMPLEQ_INIT(&icp->icp_ucmd_queue);
149 callout_init(&icp->icp_wdog_callout);
150
151 /*
152 * Allocate a scratch area.
153 */
154 if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
155 ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
156 &icp->icp_scr_dmamap) != 0) {
157 aprint_error("%s: cannot create scratch dmamap\n",
158 icp->icp_dv.dv_xname);
159 return (1);
160 }
161 state++;
162
163 if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
164 icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
165 aprint_error("%s: cannot alloc scratch dmamem\n",
166 icp->icp_dv.dv_xname);
167 goto bail_out;
168 }
169 state++;
170
171 if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
172 ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
173 aprint_error("%s: cannot map scratch dmamem\n",
174 icp->icp_dv.dv_xname);
175 goto bail_out;
176 }
177 state++;
178
179 if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
180 ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
181 aprint_error("%s: cannot load scratch dmamap\n",
182 icp->icp_dv.dv_xname);
183 goto bail_out;
184 }
185 state++;
186
187 /*
188 * Allocate and initialize the command control blocks.
189 */
190 ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_NOWAIT | M_ZERO);
191 if ((icp->icp_ccbs = ic) == NULL) {
192 aprint_error("%s: malloc() failed\n", icp->icp_dv.dv_xname);
193 goto bail_out;
194 }
195 state++;
196
197 for (i = 0; i < ICP_NCCBS; i++, ic++) {
198 /*
199 * The first two command indexes have special meanings, so
200 * we can't use them.
201 */
202 ic->ic_ident = i + 2;
203 rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
204 ICP_MAXSG, ICP_MAX_XFER, 0,
205 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
206 &ic->ic_xfer_map);
207 if (rv != 0)
208 break;
209 icp->icp_nccbs++;
210 icp_ccb_free(icp, ic);
211 }
212 #ifdef DIAGNOSTIC
213 if (icp->icp_nccbs != ICP_NCCBS)
214 aprint_error("%s: %d/%d CCBs usable\n", icp->icp_dv.dv_xname,
215 icp->icp_nccbs, ICP_NCCBS);
216 #endif
217
218 /*
219 * Initalize the controller.
220 */
221 if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
222 aprint_error("%s: screen service init error %d\n",
223 icp->icp_dv.dv_xname, icp->icp_status);
224 goto bail_out;
225 }
226
227 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
228 aprint_error("%s: cache service init error %d\n",
229 icp->icp_dv.dv_xname, icp->icp_status);
230 goto bail_out;
231 }
232
233 icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
234
235 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
236 aprint_error("%s: cache service mount error %d\n",
237 icp->icp_dv.dv_xname, icp->icp_status);
238 goto bail_out;
239 }
240
241 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
242 aprint_error("%s: cache service post-mount init error %d\n",
243 icp->icp_dv.dv_xname, icp->icp_status);
244 goto bail_out;
245 }
246 cdev_cnt = (u_int16_t)icp->icp_info;
247 icp->icp_fw_vers = icp->icp_service;
248
249 if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
250 aprint_error("%s: raw service init error %d\n",
251 icp->icp_dv.dv_xname, icp->icp_status);
252 goto bail_out;
253 }
254
255 /*
256 * Set/get raw service features (scatter/gather).
257 */
258 feat = 0;
259 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
260 0, 0))
261 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
262 feat = icp->icp_info;
263
264 if ((feat & ICP_SCATTER_GATHER) == 0) {
265 #ifdef DIAGNOSTIC
266 aprint_normal(
267 "%s: scatter/gather not supported (raw service)\n",
268 icp->icp_dv.dv_xname);
269 #endif
270 noscsi = 1;
271 }
272
273 /*
274 * Set/get cache service features (scatter/gather).
275 */
276 feat = 0;
277 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
278 ICP_SCATTER_GATHER, 0))
279 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
280 feat = icp->icp_info;
281
282 if ((feat & ICP_SCATTER_GATHER) == 0) {
283 #ifdef DIAGNOSTIC
284 aprint_normal(
285 "%s: scatter/gather not supported (cache service)\n",
286 icp->icp_dv.dv_xname);
287 #endif
288 nocache = 1;
289 }
290
291 /*
292 * Pull some information from the board and dump.
293 */
294 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
295 ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
296 aprint_error("%s: unable to retrive board info\n",
297 icp->icp_dv.dv_xname);
298 goto bail_out;
299 }
300 memcpy(&binfo, icp->icp_scr, sizeof(binfo));
301
302 aprint_normal(
303 "%s: model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
304 icp->icp_dv.dv_xname, binfo.bi_type_string, binfo.bi_raid_string,
305 binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
306
307 /*
308 * Determine the number of devices, and number of openings per
309 * device.
310 */
311 if (!nocache) {
312 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
313 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
314 0))
315 continue;
316
317 icp->icp_cdr[j].cd_size = icp->icp_info;
318 icp->icp_ndevs++;
319
320 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
321 0))
322 icp->icp_cdr[j].cd_type = icp->icp_info;
323 }
324 }
325
326 if (!noscsi)
327 icp->icp_ndevs += binfo.bi_chan_count;
328
329 if (icp->icp_ndevs != 0)
330 icp->icp_openings =
331 (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
332 #ifdef ICP_DEBUG
333 aprint_debug("%s: %d openings per device\n", icp->icp_dv.dv_xname,
334 icp->icp_openings);
335 #endif
336
337 /*
338 * Attach SCSI channels.
339 */
340 if (!noscsi) {
341 struct icp_ioc_version *iv;
342 struct icp_rawioc *ri;
343 struct icp_getch *gc;
344
345 iv = (struct icp_ioc_version *)icp->icp_scr;
346 iv->iv_version = htole32(ICP_IOC_NEWEST);
347 iv->iv_listents = ICP_MAXBUS;
348 iv->iv_firstchan = 0;
349 iv->iv_lastchan = ICP_MAXBUS - 1;
350 iv->iv_listoffset = htole32(sizeof(*iv));
351
352 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
353 ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
354 sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
355 ri = (struct icp_rawioc *)(iv + 1);
356 for (j = 0; j < binfo.bi_chan_count; j++, ri++)
357 icp->icp_bus_id[j] = ri->ri_procid;
358 } else {
359 /*
360 * Fall back to the old method.
361 */
362 gc = (struct icp_getch *)icp->icp_scr;
363
364 for (i = 0; j < binfo.bi_chan_count; j++) {
365 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
366 ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
367 ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
368 sizeof(*gc))) {
369 aprint_error(
370 "%s: unable to get chan info",
371 icp->icp_dv.dv_xname);
372 goto bail_out;
373 }
374 icp->icp_bus_id[j] = gc->gc_scsiid;
375 }
376 }
377
378 for (j = 0; j < binfo.bi_chan_count; j++) {
379 if (icp->icp_bus_id[j] > ICP_MAXID_FC)
380 icp->icp_bus_id[j] = ICP_MAXID_FC;
381
382 icpa.icpa_unit = j + ICPA_UNIT_SCSI;
383 config_found_sm(&icp->icp_dv, &icpa, icp_print,
384 icp_submatch);
385 }
386 }
387
388 /*
389 * Attach cache devices.
390 */
391 if (!nocache) {
392 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
393 if (icp->icp_cdr[j].cd_size == 0)
394 continue;
395
396 icpa.icpa_unit = j;
397 config_found_sm(&icp->icp_dv, &icpa, icp_print,
398 icp_submatch);
399 }
400 }
401
402 /*
403 * Start the watchdog.
404 */
405 icp_watchdog(icp);
406
407 /*
408 * Count the controller, and we're done!
409 */
410 icp_count++;
411
412 return (0);
413
414 bail_out:
415 if (state > 4)
416 for (j = 0; j < i; j++)
417 bus_dmamap_destroy(icp->icp_dmat,
418 icp->icp_ccbs[j].ic_xfer_map);
419 if (state > 3)
420 free(icp->icp_ccbs, M_DEVBUF);
421 if (state > 2)
422 bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
423 if (state > 1)
424 bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
425 ICP_SCRATCH_SIZE);
426 if (state > 0)
427 bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
428 bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
429
430 return (1);
431 }
432
433 void
434 icp_watchdog(void *cookie)
435 {
436 struct icp_softc *icp;
437 int s;
438
439 icp = cookie;
440
441 s = splbio();
442 icp_intr(icp);
443 if (ICP_HAS_WORK(icp))
444 icp_ccb_enqueue(icp, NULL);
445 splx(s);
446
447 callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
448 icp_watchdog, icp);
449 }
450
451 int
452 icp_print(void *aux, const char *pnp)
453 {
454 struct icp_attach_args *icpa;
455 const char *str;
456
457 icpa = (struct icp_attach_args *)aux;
458
459 if (pnp != NULL) {
460 if (icpa->icpa_unit < ICPA_UNIT_SCSI)
461 str = "block device";
462 else
463 str = "SCSI channel";
464 aprint_normal("%s at %s", str, pnp);
465 }
466 aprint_normal(" unit %d", icpa->icpa_unit);
467
468 return (UNCONF);
469 }
470
471 int
472 icp_submatch(struct device *parent, struct cfdata *cf, void *aux)
473 {
474 struct icp_attach_args *icpa;
475
476 icpa = (struct icp_attach_args *)aux;
477
478 if (cf->icpacf_unit != ICPCF_UNIT_DEFAULT &&
479 cf->icpacf_unit != icpa->icpa_unit)
480 return (0);
481
482 return (config_match(parent, cf, aux));
483 }
484
485 int
486 icp_async_event(struct icp_softc *icp, int service)
487 {
488
489 if (service == ICP_SCREENSERVICE) {
490 if (icp->icp_status == ICP_S_MSG_REQUEST) {
491 /* XXX */
492 }
493 } else {
494 if ((icp->icp_fw_vers & 0xff) >= 0x1a) {
495 icp->icp_evt.size = 0;
496 icp->icp_evt.eu.async.ionode = icp->icp_dv.dv_unit;
497 icp->icp_evt.eu.async.status = icp->icp_status;
498 /*
499 * Severity and event string are filled in by the
500 * hardware interface interrupt handler.
501 */
502 printf("%s: %s\n", icp->icp_dv.dv_xname,
503 icp->icp_evt.event_string);
504 } else {
505 icp->icp_evt.size = sizeof(icp->icp_evt.eu.async);
506 icp->icp_evt.eu.async.ionode = icp->icp_dv.dv_unit;
507 icp->icp_evt.eu.async.service = service;
508 icp->icp_evt.eu.async.status = icp->icp_status;
509 icp->icp_evt.eu.async.info = icp->icp_info;
510 /* XXXJRT FIX THIS */
511 *(u_int32_t *) icp->icp_evt.eu.async.scsi_coord =
512 icp->icp_info2;
513 }
514 icp_store_event(icp, GDT_ES_ASYNC, service, &icp->icp_evt);
515 }
516
517 return (0);
518 }
519
520 int
521 icp_intr(void *cookie)
522 {
523 struct icp_softc *icp;
524 struct icp_intr_ctx ctx;
525 struct icp_ccb *ic;
526
527 icp = cookie;
528
529 ctx.istatus = (*icp->icp_get_status)(icp);
530 if (!ctx.istatus) {
531 icp->icp_status = ICP_S_NO_STATUS;
532 return (0);
533 }
534
535 (*icp->icp_intr)(icp, &ctx);
536
537 icp->icp_status = ctx.cmd_status;
538 icp->icp_service = ctx.service;
539 icp->icp_info = ctx.info;
540 icp->icp_info2 = ctx.info2;
541
542 switch (ctx.istatus) {
543 case ICP_ASYNCINDEX:
544 icp_async_event(icp, ctx.service);
545 return (1);
546
547 case ICP_SPEZINDEX:
548 printf("%s: uninitialized or unknown service (%d/%d)\n",
549 icp->icp_dv.dv_xname, ctx.info, ctx.info2);
550 icp->icp_evt.size = sizeof(icp->icp_evt.eu.driver);
551 icp->icp_evt.eu.driver.ionode = icp->icp_dv.dv_unit;
552 icp_store_event(icp, GDT_ES_DRIVER, 4, &icp->icp_evt);
553 return (1);
554 }
555
556 if ((ctx.istatus - 2) > icp->icp_nccbs)
557 panic("icp_intr: bad command index returned");
558
559 ic = &icp->icp_ccbs[ctx.istatus - 2];
560 ic->ic_status = icp->icp_status;
561
562 if ((ic->ic_flags & IC_ALLOCED) == 0) {
563 /* XXX ICP's "iir" driver just sends an event here. */
564 panic("icp_intr: inactive CCB identified");
565 }
566
567 switch (icp->icp_status) {
568 case ICP_S_BSY:
569 #ifdef ICP_DEBUG
570 printf("%s: ICP_S_BSY received\n", icp->icp_dv.dv_xname);
571 #endif
572 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
573 SIMPLEQ_INSERT_HEAD(&icp->icp_ucmd_queue, ic, ic_chain);
574 else
575 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
576 break;
577
578 default:
579 ic->ic_flags |= IC_COMPLETE;
580
581 if ((ic->ic_flags & IC_WAITING) != 0)
582 wakeup(ic);
583 else if (ic->ic_intr != NULL)
584 (*ic->ic_intr)(ic);
585
586 if (ICP_HAS_WORK(icp))
587 icp_ccb_enqueue(icp, NULL);
588
589 break;
590 }
591
592 return (1);
593 }
594
595 struct icp_ucmd_ctx {
596 gdt_ucmd_t *iu_ucmd;
597 u_int32_t iu_cnt;
598 };
599
600 void
601 icp_ucmd_intr(struct icp_ccb *ic)
602 {
603 struct icp_softc *icp = (void *) ic->ic_dv;
604 struct icp_ucmd_ctx *iu = ic->ic_context;
605 gdt_ucmd_t *ucmd = iu->iu_ucmd;
606
607 ucmd->status = icp->icp_status;
608 ucmd->info = icp->icp_info;
609
610 if (iu->iu_cnt != 0) {
611 bus_dmamap_sync(icp->icp_dmat,
612 icp->icp_scr_dmamap,
613 ICP_SCRATCH_UCMD, iu->iu_cnt,
614 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
615 memcpy(ucmd->data,
616 icp->icp_scr + ICP_SCRATCH_UCMD, iu->iu_cnt);
617 }
618
619 icp->icp_ucmd_ccb = NULL;
620
621 ic->ic_flags |= IC_COMPLETE;
622 wakeup(ic);
623 }
624
625 /*
626 * NOTE: We assume that it is safe to sleep here!
627 */
628 int
629 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
630 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
631 {
632 struct icp_ioctlcmd *icmd;
633 struct icp_cachecmd *cc;
634 struct icp_rawcmd *rc;
635 int retries, rv;
636 struct icp_ccb *ic;
637
638 retries = ICP_RETRIES;
639
640 do {
641 ic = icp_ccb_alloc_wait(icp);
642 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
643 ic->ic_cmd.cmd_opcode = htole16(opcode);
644
645 switch (service) {
646 case ICP_CACHESERVICE:
647 if (opcode == ICP_IOCTL) {
648 icmd = &ic->ic_cmd.cmd_packet.ic;
649 icmd->ic_subfunc = htole16(arg1);
650 icmd->ic_channel = htole32(arg2);
651 icmd->ic_bufsize = htole32(arg3);
652 icmd->ic_addr =
653 htole32(icp->icp_scr_seg[0].ds_addr);
654
655 bus_dmamap_sync(icp->icp_dmat,
656 icp->icp_scr_dmamap, 0, arg3,
657 BUS_DMASYNC_PREWRITE |
658 BUS_DMASYNC_PREREAD);
659 } else {
660 cc = &ic->ic_cmd.cmd_packet.cc;
661 cc->cc_deviceno = htole16(arg1);
662 cc->cc_blockno = htole32(arg2);
663 }
664 break;
665
666 case ICP_SCSIRAWSERVICE:
667 rc = &ic->ic_cmd.cmd_packet.rc;
668 rc->rc_direction = htole32(arg1);
669 rc->rc_bus = arg2;
670 rc->rc_target = arg3;
671 rc->rc_lun = arg3 >> 8;
672 break;
673 }
674
675 ic->ic_service = service;
676 ic->ic_cmdlen = sizeof(ic->ic_cmd);
677 rv = icp_ccb_poll(icp, ic, 10000);
678
679 switch (service) {
680 case ICP_CACHESERVICE:
681 if (opcode == ICP_IOCTL) {
682 bus_dmamap_sync(icp->icp_dmat,
683 icp->icp_scr_dmamap, 0, arg3,
684 BUS_DMASYNC_POSTWRITE |
685 BUS_DMASYNC_POSTREAD);
686 }
687 break;
688 }
689
690 icp_ccb_free(icp, ic);
691 } while (rv != 0 && --retries > 0);
692
693 return (icp->icp_status == ICP_S_OK);
694 }
695
696 int
697 icp_ucmd(struct icp_softc *icp, gdt_ucmd_t *ucmd)
698 {
699 struct icp_ccb *ic;
700 struct icp_ucmd_ctx iu;
701 u_int32_t cnt;
702 int error;
703
704 if (ucmd->service == ICP_CACHESERVICE) {
705 if (ucmd->command.cmd_opcode == ICP_IOCTL) {
706 cnt = ucmd->command.cmd_packet.ic.ic_bufsize;
707 if (cnt > GDT_SCRATCH_SZ) {
708 printf("%s: scratch buffer too small (%d/%d)\n",
709 icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
710 return (EINVAL);
711 }
712 } else {
713 cnt = ucmd->command.cmd_packet.cc.cc_blockcnt *
714 ICP_SECTOR_SIZE;
715 if (cnt > GDT_SCRATCH_SZ) {
716 printf("%s: scratch buffer too small (%d/%d)\n",
717 icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
718 return (EINVAL);
719 }
720 }
721 } else {
722 cnt = ucmd->command.cmd_packet.rc.rc_sdlen +
723 ucmd->command.cmd_packet.rc.rc_sense_len;
724 if (cnt > GDT_SCRATCH_SZ) {
725 printf("%s: scratch buffer too small (%d/%d)\n",
726 icp->icp_dv.dv_xname, GDT_SCRATCH_SZ, cnt);
727 return (EINVAL);
728 }
729 }
730
731 iu.iu_ucmd = ucmd;
732 iu.iu_cnt = cnt;
733
734 ic = icp_ccb_alloc_wait(icp);
735 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
736 ic->ic_cmd.cmd_opcode = htole16(ucmd->command.cmd_opcode);
737
738 if (ucmd->service == ICP_CACHESERVICE) {
739 if (ucmd->command.cmd_opcode == ICP_IOCTL) {
740 struct icp_ioctlcmd *icmd, *uicmd;
741
742 icmd = &ic->ic_cmd.cmd_packet.ic;
743 uicmd = &ucmd->command.cmd_packet.ic;
744
745 icmd->ic_subfunc = htole16(uicmd->ic_subfunc);
746 icmd->ic_channel = htole32(uicmd->ic_channel);
747 icmd->ic_bufsize = htole32(uicmd->ic_bufsize);
748 icmd->ic_addr =
749 htole32(icp->icp_scr_seg[0].ds_addr +
750 ICP_SCRATCH_UCMD);
751 } else {
752 struct icp_cachecmd *cc, *ucc;
753
754 cc = &ic->ic_cmd.cmd_packet.cc;
755 ucc = &ucmd->command.cmd_packet.cc;
756
757 cc->cc_deviceno = htole16(ucc->cc_deviceno);
758 cc->cc_blockno = htole32(ucc->cc_blockno);
759 cc->cc_blockcnt = htole32(ucc->cc_blockcnt);
760 cc->cc_addr = htole32(0xffffffffU);
761 cc->cc_nsgent = htole32(1);
762 cc->cc_sg[0].sg_addr =
763 htole32(icp->icp_scr_seg[0].ds_addr +
764 ICP_SCRATCH_UCMD);
765 cc->cc_sg[0].sg_len = htole32(cnt);
766 }
767 } else {
768 struct icp_rawcmd *rc, *urc;
769
770 rc = &ic->ic_cmd.cmd_packet.rc;
771 urc = &ucmd->command.cmd_packet.rc;
772
773 rc->rc_direction = htole32(urc->rc_direction);
774 rc->rc_sdata = htole32(0xffffffffU);
775 rc->rc_sdlen = htole32(urc->rc_sdlen);
776 rc->rc_clen = htole32(urc->rc_clen);
777 memcpy(rc->rc_cdb, urc->rc_cdb, sizeof(rc->rc_cdb));
778 rc->rc_target = urc->rc_target;
779 rc->rc_lun = urc->rc_lun;
780 rc->rc_bus = urc->rc_bus;
781 rc->rc_sense_len = htole32(urc->rc_sense_len);
782 rc->rc_sense_addr =
783 htole32(icp->icp_scr_seg[0].ds_addr +
784 ICP_SCRATCH_UCMD + urc->rc_sdlen);
785 rc->rc_nsgent = htole32(1);
786 rc->rc_sg[0].sg_addr =
787 htole32(icp->icp_scr_seg[0].ds_addr + ICP_SCRATCH_UCMD);
788 rc->rc_sg[0].sg_len = htole32(cnt - urc->rc_sense_len);
789 }
790
791 ic->ic_service = ucmd->service;
792 ic->ic_cmdlen = sizeof(ic->ic_cmd);
793 ic->ic_context = &iu;
794
795 /*
796 * XXX What units are ucmd->timeout in? Until we know, we
797 * XXX just pull a number out of thin air.
798 */
799 if (__predict_false((error = icp_ccb_wait_user(icp, ic, 30000)) != 0))
800 printf("%s: error %d waiting for ucmd to complete\n",
801 icp->icp_dv.dv_xname, error);
802
803 /* icp_ucmd_intr() has updated ucmd. */
804 icp_ccb_free(icp, ic);
805
806 return (error);
807 }
808
809 struct icp_ccb *
810 icp_ccb_alloc(struct icp_softc *icp)
811 {
812 struct icp_ccb *ic;
813 int s;
814
815 s = splbio();
816 if (__predict_false((ic =
817 SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL)) {
818 splx(s);
819 return (NULL);
820 }
821 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
822 splx(s);
823
824 ic->ic_flags = IC_ALLOCED;
825 return (ic);
826 }
827
828 struct icp_ccb *
829 icp_ccb_alloc_wait(struct icp_softc *icp)
830 {
831 struct icp_ccb *ic;
832 int s;
833
834 s = splbio();
835 while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist)) == NULL) {
836 icp->icp_flags |= ICP_F_WAIT_CCB;
837 (void) tsleep(&icp->icp_ccb_freelist, PRIBIO, "icpccb", 0);
838 }
839 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
840 splx(s);
841
842 ic->ic_flags = IC_ALLOCED;
843 return (ic);
844 }
845
846 void
847 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
848 {
849 int s;
850
851 s = splbio();
852 ic->ic_flags = 0;
853 ic->ic_intr = NULL;
854 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
855 if (__predict_false((icp->icp_flags & ICP_F_WAIT_CCB) != 0)) {
856 icp->icp_flags &= ~ICP_F_WAIT_CCB;
857 wakeup(&icp->icp_ccb_freelist);
858 }
859 splx(s);
860 }
861
862 void
863 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
864 {
865 int s;
866
867 s = splbio();
868
869 if (ic != NULL) {
870 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
871 SIMPLEQ_INSERT_TAIL(&icp->icp_ucmd_queue, ic, ic_chain);
872 else
873 SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
874 }
875
876 for (;;) {
877 if (__predict_false((ic =
878 SIMPLEQ_FIRST(&icp->icp_ucmd_queue)) != NULL)) {
879 struct icp_ucmd_ctx *iu = ic->ic_context;
880 gdt_ucmd_t *ucmd = iu->iu_ucmd;
881
882 /*
883 * All user-generated commands share the same
884 * scratch space, so if one is already running,
885 * we have to stall the command queue.
886 */
887 if (icp->icp_ucmd_ccb != NULL)
888 break;
889 icp->icp_ucmd_ccb = ic;
890
891 if (iu->iu_cnt != 0) {
892 memcpy(icp->icp_scr + ICP_SCRATCH_UCMD,
893 ucmd->data, iu->iu_cnt);
894 bus_dmamap_sync(icp->icp_dmat,
895 icp->icp_scr_dmamap,
896 ICP_SCRATCH_UCMD, iu->iu_cnt,
897 BUS_DMASYNC_PREREAD |
898 BUS_DMASYNC_PREWRITE);
899 }
900 } else if ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_queue)) == NULL)
901 break;
902 if ((*icp->icp_test_busy)(icp))
903 break;
904 icp_ccb_submit(icp, ic);
905 if (__predict_false((ic->ic_flags & IC_UCMD) != 0))
906 SIMPLEQ_REMOVE_HEAD(&icp->icp_ucmd_queue, ic_chain);
907 else
908 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
909 }
910
911 splx(s);
912 }
913
914 int
915 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
916 int dir)
917 {
918 struct icp_sg *sg;
919 int nsegs, i, rv;
920 bus_dmamap_t xfer;
921
922 xfer = ic->ic_xfer_map;
923
924 rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
925 BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
926 ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
927 if (rv != 0)
928 return (rv);
929
930 nsegs = xfer->dm_nsegs;
931 ic->ic_xfer_size = size;
932 ic->ic_nsgent = nsegs;
933 ic->ic_flags |= dir;
934 sg = ic->ic_sg;
935
936 if (sg != NULL) {
937 for (i = 0; i < nsegs; i++, sg++) {
938 sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
939 sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
940 }
941 } else if (nsegs > 1)
942 panic("icp_ccb_map: no SG list specified, but nsegs > 1");
943
944 if ((dir & IC_XFER_OUT) != 0)
945 i = BUS_DMASYNC_PREWRITE;
946 else /* if ((dir & IC_XFER_IN) != 0) */
947 i = BUS_DMASYNC_PREREAD;
948
949 bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
950 return (0);
951 }
952
953 void
954 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
955 {
956 int i;
957
958 if ((ic->ic_flags & IC_XFER_OUT) != 0)
959 i = BUS_DMASYNC_POSTWRITE;
960 else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
961 i = BUS_DMASYNC_POSTREAD;
962
963 bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
964 bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
965 }
966
967 int
968 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
969 {
970 int rv;
971
972 for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
973 if (!(*icp->icp_test_busy)(icp))
974 break;
975 DELAY(10);
976 }
977 if (timo == 0) {
978 printf("%s: submit: busy\n", icp->icp_dv.dv_xname);
979 return (EAGAIN);
980 }
981
982 icp_ccb_submit(icp, ic);
983
984 for (timo *= 10; timo != 0; timo--) {
985 DELAY(100);
986 icp_intr(icp);
987 if ((ic->ic_flags & IC_COMPLETE) != 0)
988 break;
989 }
990
991 if (timo != 0) {
992 if (ic->ic_status != ICP_S_OK) {
993 #ifdef ICP_DEBUG
994 printf("%s: request failed; status=0x%04x\n",
995 icp->icp_dv.dv_xname, ic->ic_status);
996 #endif
997 rv = EIO;
998 } else
999 rv = 0;
1000 } else {
1001 printf("%s: command timed out\n", icp->icp_dv.dv_xname);
1002 rv = EIO;
1003 }
1004
1005 while ((*icp->icp_test_busy)(icp) != 0)
1006 DELAY(10);
1007
1008 return (rv);
1009 }
1010
1011 int
1012 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1013 {
1014 int s, rv;
1015
1016 ic->ic_flags |= IC_WAITING;
1017
1018 s = splbio();
1019 icp_ccb_enqueue(icp, ic);
1020 while ((ic->ic_flags & IC_COMPLETE) == 0) {
1021 if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
1022 splx(s);
1023 return (rv);
1024 }
1025 }
1026 splx(s);
1027
1028 if (ic->ic_status != ICP_S_OK) {
1029 printf("%s: command failed; status=%x\n", icp->icp_dv.dv_xname,
1030 ic->ic_status);
1031 return (EIO);
1032 }
1033
1034 return (0);
1035 }
1036
1037 int
1038 icp_ccb_wait_user(struct icp_softc *icp, struct icp_ccb *ic, int timo)
1039 {
1040 int s, rv;
1041
1042 ic->ic_dv = &icp->icp_dv;
1043 ic->ic_intr = icp_ucmd_intr;
1044 ic->ic_flags |= IC_UCMD;
1045
1046 s = splbio();
1047 icp_ccb_enqueue(icp, ic);
1048 while ((ic->ic_flags & IC_COMPLETE) == 0) {
1049 if ((rv = tsleep(ic, PRIBIO, "icpwuccb", mstohz(timo))) != 0) {
1050 splx(s);
1051 return (rv);
1052 }
1053 }
1054 splx(s);
1055
1056 return (0);
1057 }
1058
1059 void
1060 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
1061 {
1062
1063 ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
1064
1065 (*icp->icp_set_sema0)(icp);
1066 DELAY(10);
1067
1068 ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
1069 ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
1070
1071 (*icp->icp_copy_cmd)(icp, ic);
1072 (*icp->icp_release_event)(icp, ic);
1073 }
1074
1075 /* XXX Global - should be per-controller? XXX */
1076 static gdt_evt_str icp_event_buffer[ICP_MAX_EVENTS];
1077 static int icp_event_oldidx;
1078 static int icp_event_lastidx;
1079
1080 gdt_evt_str *
1081 icp_store_event(struct icp_softc *icp, u_int16_t source, u_int16_t idx,
1082 gdt_evt_data *evt)
1083 {
1084 gdt_evt_str *e;
1085
1086 /* no source == no event */
1087 if (source == 0)
1088 return (NULL);
1089
1090 e = &icp_event_buffer[icp_event_lastidx];
1091 if (e->event_source == source && e->event_idx == idx &&
1092 ((evt->size != 0 && e->event_data.size != 0 &&
1093 memcmp(&e->event_data.eu, &evt->eu, evt->size) == 0) ||
1094 (evt->size == 0 && e->event_data.size == 0 &&
1095 strcmp((char *) e->event_data.event_string,
1096 (char *) evt->event_string) == 0))) {
1097 e->last_stamp = time.tv_sec;
1098 e->same_count++;
1099 } else {
1100 if (icp_event_buffer[icp_event_lastidx].event_source != 0) {
1101 icp_event_lastidx++;
1102 if (icp_event_lastidx == ICP_MAX_EVENTS)
1103 icp_event_lastidx = 0;
1104 if (icp_event_lastidx == icp_event_oldidx) {
1105 icp_event_oldidx++;
1106 if (icp_event_oldidx == ICP_MAX_EVENTS)
1107 icp_event_oldidx = 0;
1108 }
1109 }
1110 e = &icp_event_buffer[icp_event_lastidx];
1111 e->event_source = source;
1112 e->event_idx = idx;
1113 e->first_stamp = e->last_stamp = time.tv_sec;
1114 e->same_count = 1;
1115 e->event_data = *evt;
1116 e->application = 0;
1117 }
1118 return (e);
1119 }
1120
1121 int
1122 icp_read_event(struct icp_softc *icp, int handle, gdt_evt_str *estr)
1123 {
1124 gdt_evt_str *e;
1125 int eindex, s;
1126
1127 s = splbio();
1128
1129 if (handle == -1)
1130 eindex = icp_event_oldidx;
1131 else
1132 eindex = handle;
1133
1134 estr->event_source = 0;
1135
1136 if (eindex < 0 || eindex >= ICP_MAX_EVENTS) {
1137 splx(s);
1138 return (eindex);
1139 }
1140
1141 e = &icp_event_buffer[eindex];
1142 if (e->event_source != 0) {
1143 if (eindex != icp_event_lastidx) {
1144 eindex++;
1145 if (eindex == ICP_MAX_EVENTS)
1146 eindex = 0;
1147 } else
1148 eindex = -1;
1149 memcpy(estr, e, sizeof(gdt_evt_str));
1150 }
1151
1152 splx(s);
1153
1154 return (eindex);
1155 }
1156
1157 void
1158 icp_readapp_event(struct icp_softc *icp, u_int8_t application,
1159 gdt_evt_str *estr)
1160 {
1161 gdt_evt_str *e;
1162 int found = 0, eindex, s;
1163
1164 s = splbio();
1165
1166 eindex = icp_event_oldidx;
1167 for (;;) {
1168 e = &icp_event_buffer[eindex];
1169 if (e->event_source == 0)
1170 break;
1171 if ((e->application & application) == 0) {
1172 e->application |= application;
1173 found = 1;
1174 break;
1175 }
1176 if (eindex == icp_event_lastidx)
1177 break;
1178 eindex++;
1179 if (eindex == ICP_MAX_EVENTS)
1180 eindex = 0;
1181 }
1182 if (found)
1183 memcpy(estr, e, sizeof(gdt_evt_str));
1184 else
1185 estr->event_source = 0;
1186
1187 splx(s);
1188 }
1189
1190 void
1191 icp_clear_events(struct icp_softc *icp)
1192 {
1193 int s;
1194
1195 s = splbio();
1196 icp_event_oldidx = icp_event_lastidx = 0;
1197 memset(icp_event_buffer, 0, sizeof(icp_event_buffer));
1198 splx(s);
1199 }
1200