icp.c revision 1.7 1 /* $NetBSD: icp.c,v 1.7 2003/01/31 00:26:30 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Copyright (c) 1999, 2000 Niklas Hallqvist. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by Niklas Hallqvist.
53 * 4. The name of the author may not be used to endorse or promote products
54 * derived from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *
67 * from OpenBSD: gdt_common.c,v 1.12 2001/07/04 06:43:18 niklas Exp
68 */
69
70 /*
71 * This driver would not have written if it was not for the hardware donations
72 * from both ICP-Vortex and ko.neT. I want to thank them for their support.
73 *
74 * Re-worked for NetBSD by Andrew Doran. Test hardware kindly supplied by
75 * Intel.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: icp.c,v 1.7 2003/01/31 00:26:30 thorpej Exp $");
80
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/kernel.h>
84 #include <sys/device.h>
85 #include <sys/queue.h>
86 #include <sys/proc.h>
87 #include <sys/buf.h>
88 #include <sys/endian.h>
89 #include <sys/malloc.h>
90 #include <sys/disk.h>
91
92 #include <uvm/uvm_extern.h>
93
94 #include <machine/bswap.h>
95 #include <machine/bus.h>
96
97 #include <dev/pci/pcireg.h>
98 #include <dev/pci/pcivar.h>
99 #include <dev/pci/pcidevs.h>
100
101 #include <dev/ic/icpreg.h>
102 #include <dev/ic/icpvar.h>
103
104 int icp_async_event(struct icp_softc *, int);
105 void icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic);
106 void icp_chain(struct icp_softc *);
107 int icp_print(void *, const char *);
108 int icp_submatch(struct device *, struct cfdata *, void *);
109 void icp_watchdog(void *);
110
111 int
112 icp_init(struct icp_softc *icp, const char *intrstr)
113 {
114 struct icp_attach_args icpa;
115 struct icp_binfo binfo;
116 struct icp_ccb *ic;
117 u_int16_t cdev_cnt;
118 int i, j, state, feat, nsegs, rv, noscsi, nocache;
119
120 state = 0;
121 noscsi = 0;
122 nocache = 0;
123
124 if (intrstr != NULL)
125 aprint_normal("%s: interrupting at %s\n", icp->icp_dv.dv_xname,
126 intrstr);
127
128 SIMPLEQ_INIT(&icp->icp_ccb_queue);
129 SIMPLEQ_INIT(&icp->icp_ccb_freelist);
130 callout_init(&icp->icp_wdog_callout);
131
132 /*
133 * Allocate a scratch area.
134 */
135 if (bus_dmamap_create(icp->icp_dmat, ICP_SCRATCH_SIZE, 1,
136 ICP_SCRATCH_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
137 &icp->icp_scr_dmamap) != 0) {
138 aprint_error("%s: cannot create scratch dmamap\n",
139 icp->icp_dv.dv_xname);
140 return (1);
141 }
142 state++;
143
144 if (bus_dmamem_alloc(icp->icp_dmat, ICP_SCRATCH_SIZE, PAGE_SIZE, 0,
145 icp->icp_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
146 aprint_error("%s: cannot alloc scratch dmamem\n",
147 icp->icp_dv.dv_xname);
148 goto bail_out;
149 }
150 state++;
151
152 if (bus_dmamem_map(icp->icp_dmat, icp->icp_scr_seg, nsegs,
153 ICP_SCRATCH_SIZE, &icp->icp_scr, 0)) {
154 aprint_error("%s: cannot map scratch dmamem\n",
155 icp->icp_dv.dv_xname);
156 goto bail_out;
157 }
158 state++;
159
160 if (bus_dmamap_load(icp->icp_dmat, icp->icp_scr_dmamap, icp->icp_scr,
161 ICP_SCRATCH_SIZE, NULL, BUS_DMA_NOWAIT)) {
162 aprint_error("%s: cannot load scratch dmamap\n",
163 icp->icp_dv.dv_xname);
164 goto bail_out;
165 }
166 state++;
167
168 /*
169 * Allocate and initialize the command control blocks.
170 */
171 ic = malloc(sizeof(*ic) * ICP_NCCBS, M_DEVBUF, M_NOWAIT | M_ZERO);
172 if ((icp->icp_ccbs = ic) == NULL) {
173 aprint_error("%s: malloc() failed\n", icp->icp_dv.dv_xname);
174 goto bail_out;
175 }
176 state++;
177
178 for (i = 0; i < ICP_NCCBS; i++, ic++) {
179 /*
180 * The first two command indexes have special meanings, so
181 * we can't use them.
182 */
183 ic->ic_ident = i + 2;
184 rv = bus_dmamap_create(icp->icp_dmat, ICP_MAX_XFER,
185 ICP_MAXSG, ICP_MAX_XFER, 0,
186 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
187 &ic->ic_xfer_map);
188 if (rv != 0)
189 break;
190 icp->icp_nccbs++;
191 icp_ccb_free(icp, ic);
192 }
193 #ifdef DIAGNOSTIC
194 if (icp->icp_nccbs != ICP_NCCBS)
195 aprint_error("%s: %d/%d CCBs usable\n", icp->icp_dv.dv_xname,
196 icp->icp_nccbs, ICP_NCCBS);
197 #endif
198
199 /*
200 * Initalize the controller.
201 */
202 if (!icp_cmd(icp, ICP_SCREENSERVICE, ICP_INIT, 0, 0, 0)) {
203 aprint_error("%s: screen service init error %d\n",
204 icp->icp_dv.dv_xname, icp->icp_status);
205 goto bail_out;
206 }
207
208 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
209 aprint_error("%s: cache service init error %d\n",
210 icp->icp_dv.dv_xname, icp->icp_status);
211 goto bail_out;
212 }
213
214 icp_cmd(icp, ICP_CACHESERVICE, ICP_UNFREEZE_IO, 0, 0, 0);
215
216 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_MOUNT, 0xffff, 1, 0)) {
217 aprint_error("%s: cache service mount error %d\n",
218 icp->icp_dv.dv_xname, icp->icp_status);
219 goto bail_out;
220 }
221
222 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INIT, ICP_LINUX_OS, 0, 0)) {
223 aprint_error("%s: cache service post-mount init error %d\n",
224 icp->icp_dv.dv_xname, icp->icp_status);
225 goto bail_out;
226 }
227 cdev_cnt = (u_int16_t)icp->icp_info;
228
229 if (!icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_INIT, 0, 0, 0)) {
230 aprint_error("%s: raw service init error %d\n",
231 icp->icp_dv.dv_xname, icp->icp_status);
232 goto bail_out;
233 }
234
235 /*
236 * Set/get raw service features (scatter/gather).
237 */
238 feat = 0;
239 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_SET_FEAT, ICP_SCATTER_GATHER,
240 0, 0))
241 if (icp_cmd(icp, ICP_SCSIRAWSERVICE, ICP_GET_FEAT, 0, 0, 0))
242 feat = icp->icp_info;
243
244 if ((feat & ICP_SCATTER_GATHER) == 0) {
245 #ifdef DIAGNOSTIC
246 aprint_normal(
247 "%s: scatter/gather not supported (raw service)\n",
248 icp->icp_dv.dv_xname);
249 #endif
250 noscsi = 1;
251 }
252
253 /*
254 * Set/get cache service features (scatter/gather).
255 */
256 feat = 0;
257 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_SET_FEAT, 0,
258 ICP_SCATTER_GATHER, 0))
259 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_GET_FEAT, 0, 0, 0))
260 feat = icp->icp_info;
261
262 if ((feat & ICP_SCATTER_GATHER) == 0) {
263 #ifdef DIAGNOSTIC
264 aprint_normal(
265 "%s: scatter/gather not supported (cache service)\n",
266 icp->icp_dv.dv_xname);
267 #endif
268 nocache = 1;
269 }
270
271 /*
272 * Pull some information from the board and dump.
273 */
274 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL, ICP_BOARD_INFO,
275 ICP_INVALID_CHANNEL, sizeof(struct icp_binfo))) {
276 aprint_error("%s: unable to retrive board info\n",
277 icp->icp_dv.dv_xname);
278 goto bail_out;
279 }
280 memcpy(&binfo, icp->icp_scr, sizeof(binfo));
281
282 aprint_normal(
283 "%s: model <%s>, firmware <%s>, %d channel(s), %dMB memory\n",
284 icp->icp_dv.dv_xname, binfo.bi_type_string, binfo.bi_raid_string,
285 binfo.bi_chan_count, le32toh(binfo.bi_memsize) >> 20);
286
287 /*
288 * Determine the number of devices, and number of openings per
289 * device.
290 */
291 if (!nocache) {
292 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
293 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_INFO, j, 0,
294 0))
295 continue;
296
297 icp->icp_cdr[j].cd_size = icp->icp_info;
298 icp->icp_ndevs++;
299
300 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_DEVTYPE, j, 0,
301 0))
302 icp->icp_cdr[j].cd_type = icp->icp_info;
303 }
304 }
305
306 if (!noscsi)
307 icp->icp_ndevs += binfo.bi_chan_count;
308
309 if (icp->icp_ndevs != 0)
310 icp->icp_openings =
311 (icp->icp_nccbs - ICP_NCCB_RESERVE) / icp->icp_ndevs;
312 #ifdef ICP_DEBUG
313 aprint_debug("%s: %d openings per device\n", icp->icp_dv.dv_xname,
314 icp->icp_openings);
315 #endif
316
317 /*
318 * Attach SCSI channels.
319 */
320 if (!noscsi) {
321 struct icp_ioc_version *iv;
322 struct icp_rawioc *ri;
323 struct icp_getch *gc;
324
325 iv = (struct icp_ioc_version *)icp->icp_scr;
326 iv->iv_version = htole32(ICP_IOC_NEWEST);
327 iv->iv_listents = ICP_MAXBUS;
328 iv->iv_firstchan = 0;
329 iv->iv_lastchan = ICP_MAXBUS - 1;
330 iv->iv_listoffset = htole32(sizeof(*iv));
331
332 if (icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
333 ICP_IOCHAN_RAW_DESC, ICP_INVALID_CHANNEL,
334 sizeof(*iv) + ICP_MAXBUS * sizeof(*ri))) {
335 ri = (struct icp_rawioc *)(iv + 1);
336 for (j = 0; j < binfo.bi_chan_count; j++, ri++)
337 icp->icp_bus_id[j] = ri->ri_procid;
338 } else {
339 /*
340 * Fall back to the old method.
341 */
342 gc = (struct icp_getch *)icp->icp_scr;
343
344 for (i = 0; j < binfo.bi_chan_count; j++) {
345 if (!icp_cmd(icp, ICP_CACHESERVICE, ICP_IOCTL,
346 ICP_SCSI_CHAN_CNT | ICP_L_CTRL_PATTERN,
347 ICP_IO_CHANNEL | ICP_INVALID_CHANNEL,
348 sizeof(*gc))) {
349 aprint_error(
350 "%s: unable to get chan info",
351 icp->icp_dv.dv_xname);
352 goto bail_out;
353 }
354 icp->icp_bus_id[j] = gc->gc_scsiid;
355 }
356 }
357
358 for (j = 0; j < binfo.bi_chan_count; j++) {
359 if (icp->icp_bus_id[j] > ICP_MAXID_FC)
360 icp->icp_bus_id[j] = ICP_MAXID_FC;
361
362 icpa.icpa_unit = j + ICPA_UNIT_SCSI;
363 config_found_sm(&icp->icp_dv, &icpa, icp_print,
364 icp_submatch);
365 }
366 }
367
368 /*
369 * Attach cache devices.
370 */
371 if (!nocache) {
372 for (j = 0; j < cdev_cnt && j < ICP_MAX_HDRIVES; j++) {
373 if (icp->icp_cdr[j].cd_size == 0)
374 continue;
375
376 icpa.icpa_unit = j;
377 config_found_sm(&icp->icp_dv, &icpa, icp_print,
378 icp_submatch);
379 }
380 }
381
382 /*
383 * Start the watchdog.
384 */
385 icp_watchdog(icp);
386 return (0);
387
388 bail_out:
389 if (state > 4)
390 for (j = 0; j < i; j++)
391 bus_dmamap_destroy(icp->icp_dmat,
392 icp->icp_ccbs[j].ic_xfer_map);
393 if (state > 3)
394 free(icp->icp_ccbs, M_DEVBUF);
395 if (state > 2)
396 bus_dmamap_unload(icp->icp_dmat, icp->icp_scr_dmamap);
397 if (state > 1)
398 bus_dmamem_unmap(icp->icp_dmat, icp->icp_scr,
399 ICP_SCRATCH_SIZE);
400 if (state > 0)
401 bus_dmamem_free(icp->icp_dmat, icp->icp_scr_seg, nsegs);
402 bus_dmamap_destroy(icp->icp_dmat, icp->icp_scr_dmamap);
403
404 return (1);
405 }
406
407 void
408 icp_watchdog(void *cookie)
409 {
410 struct icp_softc *icp;
411 int s;
412
413 icp = cookie;
414
415 s = splbio();
416 icp_intr(icp);
417 if (! SIMPLEQ_EMPTY(&icp->icp_ccb_queue))
418 icp_ccb_enqueue(icp, NULL);
419 splx(s);
420
421 callout_reset(&icp->icp_wdog_callout, hz * ICP_WATCHDOG_FREQ,
422 icp_watchdog, icp);
423 }
424
425 int
426 icp_print(void *aux, const char *pnp)
427 {
428 struct icp_attach_args *icpa;
429 const char *str;
430
431 icpa = (struct icp_attach_args *)aux;
432
433 if (pnp != NULL) {
434 if (icpa->icpa_unit < ICPA_UNIT_SCSI)
435 str = "block device";
436 else
437 str = "SCSI channel";
438 aprint_normal("%s at %s", str, pnp);
439 }
440 aprint_normal(" unit %d", icpa->icpa_unit);
441
442 return (UNCONF);
443 }
444
445 int
446 icp_submatch(struct device *parent, struct cfdata *cf, void *aux)
447 {
448 struct icp_attach_args *icpa;
449
450 icpa = (struct icp_attach_args *)aux;
451
452 if (cf->icpacf_unit != ICPCF_UNIT_DEFAULT &&
453 cf->icpacf_unit != icpa->icpa_unit)
454 return (0);
455
456 return (config_match(parent, cf, aux));
457 }
458
459 int
460 icp_async_event(struct icp_softc *icp, int val)
461 {
462
463 /* XXX */
464 return (1);
465 }
466
467 int
468 icp_intr(void *cookie)
469 {
470 struct icp_softc *icp;
471 struct icp_intr_ctx ctx;
472 struct icp_ccb *ic;
473
474 icp = cookie;
475
476 ctx.istatus = (*icp->icp_get_status)(icp);
477 if (!ctx.istatus) {
478 icp->icp_status = ICP_S_NO_STATUS;
479 return (0);
480 }
481
482 (*icp->icp_intr)(icp, &ctx);
483
484 icp->icp_status = ctx.cmd_status;
485 icp->icp_info = ctx.info;
486 icp->icp_info2 = ctx.info2;
487
488 switch (ctx.istatus) {
489 case ICP_ASYNCINDEX:
490 icp_async_event(icp, ctx.service);
491 return (1);
492
493 case ICP_SPEZINDEX:
494 printf("%s: uninitialized or unknown service (%d/%d)\n",
495 icp->icp_dv.dv_xname, ctx.info, ctx.info2);
496 return (1);
497 }
498
499 if ((ctx.istatus - 2) > icp->icp_nccbs)
500 panic("icp_intr: bad command index returned");
501
502 ic = &icp->icp_ccbs[ctx.istatus - 2];
503 ic->ic_status = icp->icp_status;
504
505 if ((ic->ic_flags & IC_ALLOCED) == 0)
506 panic("icp_intr: inactive CCB identified");
507
508 switch (icp->icp_status) {
509 case ICP_S_BSY:
510 #ifdef ICP_DEBUG
511 printf("%s: ICP_S_BSY received\n", icp->icp_dv.dv_xname);
512 #endif
513 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_queue, ic, ic_chain);
514 break;
515
516 default:
517 ic->ic_flags |= IC_COMPLETE;
518
519 if ((ic->ic_flags & IC_WAITING) != 0)
520 wakeup(ic);
521 else if (ic->ic_intr != NULL)
522 (*ic->ic_intr)(ic);
523
524 if (! SIMPLEQ_EMPTY(&icp->icp_ccb_queue))
525 icp_ccb_enqueue(icp, NULL);
526
527 break;
528 }
529
530 return (1);
531 }
532
533 int
534 icp_cmd(struct icp_softc *icp, u_int8_t service, u_int16_t opcode,
535 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
536 {
537 struct icp_ioctlcmd *icmd;
538 struct icp_cachecmd *cc;
539 struct icp_rawcmd *rc;
540 int retries, rv;
541 struct icp_ccb *ic;
542
543 retries = ICP_RETRIES;
544
545 do {
546 ic = icp_ccb_alloc(icp);
547 memset(&ic->ic_cmd, 0, sizeof(ic->ic_cmd));
548 ic->ic_cmd.cmd_opcode = htole16(opcode);
549
550 switch (service) {
551 case ICP_CACHESERVICE:
552 if (opcode == ICP_IOCTL) {
553 icmd = &ic->ic_cmd.cmd_packet.ic;
554 icmd->ic_subfunc = htole16(arg1);
555 icmd->ic_channel = htole32(arg2);
556 icmd->ic_bufsize = htole32(arg3);
557 icmd->ic_addr =
558 htole32(icp->icp_scr_seg[0].ds_addr);
559
560 bus_dmamap_sync(icp->icp_dmat,
561 icp->icp_scr_dmamap, 0, arg3,
562 BUS_DMASYNC_PREWRITE |
563 BUS_DMASYNC_PREREAD);
564 } else {
565 cc = &ic->ic_cmd.cmd_packet.cc;
566 cc->cc_deviceno = htole16(arg1);
567 cc->cc_blockno = htole32(arg2);
568 }
569 break;
570
571 case ICP_SCSIRAWSERVICE:
572 rc = &ic->ic_cmd.cmd_packet.rc;
573 rc->rc_direction = htole32(arg1);
574 rc->rc_bus = arg2;
575 rc->rc_target = arg3;
576 rc->rc_lun = arg3 >> 8;
577 break;
578 }
579
580 ic->ic_service = service;
581 ic->ic_cmdlen = sizeof(ic->ic_cmd);
582 rv = icp_ccb_poll(icp, ic, 10000);
583
584 switch (service) {
585 case ICP_CACHESERVICE:
586 if (opcode == ICP_IOCTL) {
587 bus_dmamap_sync(icp->icp_dmat,
588 icp->icp_scr_dmamap, 0, arg3,
589 BUS_DMASYNC_POSTWRITE |
590 BUS_DMASYNC_POSTREAD);
591 }
592 break;
593 }
594
595 icp_ccb_free(icp, ic);
596 } while (rv != 0 && --retries > 0);
597
598 return (icp->icp_status == ICP_S_OK);
599 }
600
601 struct icp_ccb *
602 icp_ccb_alloc(struct icp_softc *icp)
603 {
604 struct icp_ccb *ic;
605 int s;
606
607 s = splbio();
608 ic = SIMPLEQ_FIRST(&icp->icp_ccb_freelist);
609 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_freelist, ic_chain);
610 splx(s);
611
612 ic->ic_flags = IC_ALLOCED;
613 return (ic);
614 }
615
616 void
617 icp_ccb_free(struct icp_softc *icp, struct icp_ccb *ic)
618 {
619 int s;
620
621 s = splbio();
622 ic->ic_flags = 0;
623 ic->ic_intr = NULL;
624 SIMPLEQ_INSERT_HEAD(&icp->icp_ccb_freelist, ic, ic_chain);
625 splx(s);
626 }
627
628 void
629 icp_ccb_enqueue(struct icp_softc *icp, struct icp_ccb *ic)
630 {
631 int s;
632
633 s = splbio();
634
635 if (ic != NULL)
636 SIMPLEQ_INSERT_TAIL(&icp->icp_ccb_queue, ic, ic_chain);
637
638 while ((ic = SIMPLEQ_FIRST(&icp->icp_ccb_queue)) != NULL) {
639 if ((*icp->icp_test_busy)(icp))
640 break;
641 icp_ccb_submit(icp, ic);
642 SIMPLEQ_REMOVE_HEAD(&icp->icp_ccb_queue, ic_chain);
643 }
644
645 splx(s);
646 }
647
648 int
649 icp_ccb_map(struct icp_softc *icp, struct icp_ccb *ic, void *data, int size,
650 int dir)
651 {
652 struct icp_sg *sg;
653 int nsegs, i, rv;
654 bus_dmamap_t xfer;
655
656 xfer = ic->ic_xfer_map;
657
658 rv = bus_dmamap_load(icp->icp_dmat, xfer, data, size, NULL,
659 BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
660 ((dir & IC_XFER_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
661 if (rv != 0)
662 return (rv);
663
664 nsegs = xfer->dm_nsegs;
665 ic->ic_xfer_size = size;
666 ic->ic_nsgent = nsegs;
667 ic->ic_flags |= dir;
668 sg = ic->ic_sg;
669
670 if (sg != NULL) {
671 for (i = 0; i < nsegs; i++, sg++) {
672 sg->sg_addr = htole32(xfer->dm_segs[i].ds_addr);
673 sg->sg_len = htole32(xfer->dm_segs[i].ds_len);
674 }
675 } else if (nsegs > 1)
676 panic("icp_ccb_map: no SG list specified, but nsegs > 1");
677
678 if ((dir & IC_XFER_OUT) != 0)
679 i = BUS_DMASYNC_PREWRITE;
680 else /* if ((dir & IC_XFER_IN) != 0) */
681 i = BUS_DMASYNC_PREREAD;
682
683 bus_dmamap_sync(icp->icp_dmat, xfer, 0, ic->ic_xfer_size, i);
684 return (0);
685 }
686
687 void
688 icp_ccb_unmap(struct icp_softc *icp, struct icp_ccb *ic)
689 {
690 int i;
691
692 if ((ic->ic_flags & IC_XFER_OUT) != 0)
693 i = BUS_DMASYNC_POSTWRITE;
694 else /* if ((ic->ic_flags & IC_XFER_IN) != 0) */
695 i = BUS_DMASYNC_POSTREAD;
696
697 bus_dmamap_sync(icp->icp_dmat, ic->ic_xfer_map, 0, ic->ic_xfer_size, i);
698 bus_dmamap_unload(icp->icp_dmat, ic->ic_xfer_map);
699 }
700
701 int
702 icp_ccb_poll(struct icp_softc *icp, struct icp_ccb *ic, int timo)
703 {
704 int rv;
705
706 for (timo = ICP_BUSY_WAIT_MS * 100; timo != 0; timo--) {
707 if (!(*icp->icp_test_busy)(icp))
708 break;
709 DELAY(10);
710 }
711 if (timo == 0) {
712 printf("%s: submit: busy\n", icp->icp_dv.dv_xname);
713 return (EAGAIN);
714 }
715
716 icp_ccb_submit(icp, ic);
717
718 for (timo *= 10; timo != 0; timo--) {
719 DELAY(100);
720 icp_intr(icp);
721 if ((ic->ic_flags & IC_COMPLETE) != 0)
722 break;
723 }
724
725 if (timo != 0) {
726 if (ic->ic_status != ICP_S_OK) {
727 #ifdef ICP_DEBUG
728 printf("%s: request failed; status=0x%04x\n",
729 icp->icp_dv.dv_xname, ic->ic_status);
730 #endif
731 rv = EIO;
732 } else
733 rv = 0;
734 } else {
735 printf("%s: command timed out\n", icp->icp_dv.dv_xname);
736 rv = EIO;
737 }
738
739 while ((*icp->icp_test_busy)(icp) != 0)
740 DELAY(10);
741
742 return (rv);
743 }
744
745 int
746 icp_ccb_wait(struct icp_softc *icp, struct icp_ccb *ic, int timo)
747 {
748 int s, rv;
749
750 ic->ic_flags |= IC_WAITING;
751
752 s = splbio();
753 icp_ccb_enqueue(icp, ic);
754 if ((rv = tsleep(ic, PRIBIO, "icpwccb", mstohz(timo))) != 0) {
755 splx(s);
756 return (rv);
757 }
758 splx(s);
759
760 if (ic->ic_status != ICP_S_OK) {
761 printf("%s: command failed; status=%x\n", icp->icp_dv.dv_xname,
762 ic->ic_status);
763 return (EIO);
764 }
765
766 return (0);
767 }
768
769 void
770 icp_ccb_submit(struct icp_softc *icp, struct icp_ccb *ic)
771 {
772
773 ic->ic_cmdlen = (ic->ic_cmdlen + 3) & ~3;
774
775 (*icp->icp_set_sema0)(icp);
776 DELAY(10);
777
778 ic->ic_cmd.cmd_boardnode = htole32(ICP_LOCALBOARD);
779 ic->ic_cmd.cmd_cmdindex = htole32(ic->ic_ident);
780
781 (*icp->icp_copy_cmd)(icp, ic);
782 (*icp->icp_release_event)(icp, ic);
783 }
784