iop.c revision 1.4 1 /* $NetBSD: iop.c,v 1.4 2000/11/14 18:48:14 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/device.h>
49 #include <sys/queue.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/ioctl.h>
53 #include <sys/endian.h>
54 #include <sys/pool.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <machine/bus.h>
59
60 #include <dev/i2o/i2o.h>
61 #include <dev/i2o/iopreg.h>
62 #include <dev/i2o/iopvar.h>
63
64 #define IOP_INL(x, o) \
65 bus_space_read_4((x)->sc_iot, (x)->sc_ioh, (o))
66 #define IOP_OUTL(x, o, d) \
67 bus_space_write_4((x)->sc_iot, (x)->sc_ioh, (o), (d))
68
69 #define POLL(ms, cond) \
70 do { \
71 int i; \
72 for (i = (ms) * 10; i; i--) { \
73 if (cond) \
74 break; \
75 DELAY(100); \
76 } \
77 } while (/* CONSTCOND */0);
78
79 #ifdef I2ODEBUG
80 #define DPRINTF(x) printf x
81 #else
82 #define DPRINTF(x)
83 #endif
84
85 #ifdef I2OVERBOSE
86 #define IFVERBOSE(x) x
87 #else
88 #define IFVERBOSE(x)
89 #endif
90
91 #define IOP_MSGHASH_NBUCKETS 64
92 #define IOP_MSGHASH(tctx) (&iop_msghashtbl[(tctx) & iop_msghash])
93
94 static TAILQ_HEAD(iop_msghashhead, iop_msg) *iop_msghashtbl;
95 static u_long iop_msghash;
96 static void *iop_sdh;
97 static struct pool *iop_msgpool;
98
99 extern struct cfdriver iop_cd;
100
101 #define IC_CONFIGURE 0x01 /* Try to configure devices of this class */
102 #define IC_HIGHLEVEL 0x02 /* This is a `high level' device class */
103
104 struct iop_class {
105 int ic_class;
106 int ic_flags;
107 #ifdef I2OVERBOSE
108 const char *ic_caption;
109 #endif
110 } static const iop_class[] = {
111 {
112 I2O_CLASS_EXECUTIVE,
113 0,
114 IFVERBOSE("executive")
115 },
116 {
117 I2O_CLASS_DDM,
118 0,
119 IFVERBOSE("device driver module")
120 },
121 {
122 I2O_CLASS_RANDOM_BLOCK_STORAGE,
123 IC_CONFIGURE | IC_HIGHLEVEL,
124 IFVERBOSE("random block storage")
125 },
126 {
127 I2O_CLASS_SEQUENTIAL_STORAGE,
128 IC_CONFIGURE | IC_HIGHLEVEL,
129 IFVERBOSE("sequential storage")
130 },
131 {
132 I2O_CLASS_LAN,
133 IC_CONFIGURE,
134 IFVERBOSE("LAN port")
135 },
136 {
137 I2O_CLASS_WAN,
138 IC_CONFIGURE,
139 IFVERBOSE("WAN port")
140 },
141 {
142 I2O_CLASS_FIBRE_CHANNEL_PORT,
143 IC_CONFIGURE,
144 IFVERBOSE("fibrechannel port")
145 },
146 {
147 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
148 0,
149 IFVERBOSE("fibrechannel peripheral")
150 },
151 {
152 I2O_CLASS_SCSI_PERIPHERAL,
153 0,
154 IFVERBOSE("SCSI peripheral")
155 },
156 {
157 I2O_CLASS_ATE_PORT,
158 IC_CONFIGURE,
159 IFVERBOSE("ATE port")
160 },
161 {
162 I2O_CLASS_ATE_PERIPHERAL,
163 0,
164 IFVERBOSE("ATE peripheral")
165 },
166 {
167 I2O_CLASS_FLOPPY_CONTROLLER,
168 IC_CONFIGURE,
169 IFVERBOSE("floppy controller")
170 },
171 {
172 I2O_CLASS_FLOPPY_DEVICE,
173 0,
174 IFVERBOSE("floppy device")
175 },
176 {
177 I2O_CLASS_BUS_ADAPTER_PORT,
178 IC_CONFIGURE,
179 IFVERBOSE("bus adapter port" )
180 },
181 };
182
183 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
184 static const char *iop_status[] = {
185 "success",
186 "abort (dirty)",
187 "abort (no data transfer)",
188 "abort (partial transfer)",
189 "error (dirty)",
190 "error (no data transfer)",
191 "error (partial transfer)",
192 "undefined error code",
193 "process abort (dirty)",
194 "process abort (no data transfer)",
195 "process abort (partial transfer)",
196 "transaction error",
197 };
198 #endif
199
200 static void iop_config_interrupts(struct device *);
201 static void iop_config_interrupts0(struct iop_softc *, int, int);
202 static void iop_devinfo(int, char *);
203 static int iop_print(void *, const char *);
204 static void iop_shutdown(void *);
205 static int iop_submatch(struct device *, struct cfdata *, void *);
206 static int iop_vendor_print(void *, const char *);
207
208 static int iop_alloc_dmamem(struct iop_softc *, int, bus_dmamap_t *,
209 caddr_t *, bus_addr_t *);
210 static int iop_hrt_get(struct iop_softc *);
211 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
212 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int);
213 static int iop_ofifo_init(struct iop_softc *);
214 static int iop_poll(struct iop_softc *);
215 static void iop_release_mfa(struct iop_softc *, u_int32_t);
216 static int iop_reset(struct iop_softc *);
217 static int iop_status_get(struct iop_softc *);
218 static int iop_systab_set(struct iop_softc *);
219
220 #ifdef I2ODEBUG
221 static void iop_reply_print(struct iop_softc *, struct iop_msg *,
222 struct i2o_reply *);
223 #endif
224
225 /*
226 * Initialise the adapter.
227 */
228 int
229 iop_init(struct iop_softc *sc, const char *intrstr)
230 {
231 int rv;
232 u_int32_t mask;
233 static int again;
234 char ident[64];
235
236 if (again == 0) {
237 /* Create the shared message wrapper pool and hash. */
238 iop_msgpool = pool_create(sizeof(struct iop_msg), 0, 0, 0,
239 "ioppl", 0, NULL, NULL, M_DEVBUF);
240 iop_msghashtbl = hashinit(IOP_MSGHASH_NBUCKETS, HASH_TAILQ,
241 M_DEVBUF, M_NOWAIT, &iop_msghash);
242 again = 1;
243 }
244
245 /*
246 * Reset the IOP and request status.
247 */
248 printf("I2O adapter");
249 if ((rv = iop_reset(sc)) != 0)
250 return (rv);
251 if ((rv = iop_status_get(sc)) != 0) {
252 printf("%s: not responding\n", sc->sc_dv.dv_xname);
253 return (rv);
254 }
255
256 iop_strvis(sc->sc_status.productid, sizeof(sc->sc_status.productid),
257 ident, sizeof(ident));
258 printf(" <%s>", ident);
259
260 /* Allocate reply frames and initialise the IOP's outbound FIFO. */
261 sc->sc_maxreplycnt = le32toh(sc->sc_status.maxoutboundmframes);
262 if (sc->sc_maxreplycnt > IOP_MAX_HW_REPLYCNT)
263 sc->sc_maxreplycnt = IOP_MAX_HW_REPLYCNT;
264 if ((rv = iop_ofifo_init(sc)) != 0)
265 return (rv);
266
267 /* Bring the IOP online. */
268 if ((rv = iop_hrt_get(sc)) != 0)
269 return (rv);
270 if ((rv = iop_systab_set(sc)) != 0)
271 return (rv);
272 if ((rv = iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE,
273 IOP_ICTX)) != 0)
274 return (rv);
275
276 /* Defer configuration of children until interrupts are working. */
277 config_interrupts((struct device *)sc, iop_config_interrupts);
278
279 /* Configure shutdownhook. */
280 if (iop_sdh == NULL)
281 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
282
283 /* Ensure interrupts are enabled at the IOP. */
284 mask = IOP_INL(sc, IOP_REG_INTR_MASK);
285 IOP_OUTL(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
286
287 printf("\n");
288 if (intrstr != NULL)
289 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
290 intrstr);
291
292 sc->sc_maxqueuecnt = le32toh(sc->sc_status.maxinboundmframes);
293 if (sc->sc_maxqueuecnt > IOP_MAX_HW_QUEUECNT)
294 sc->sc_maxqueuecnt = IOP_MAX_HW_QUEUECNT;
295
296 #ifdef I2ODEBUG
297 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
298 sc->sc_dv.dv_xname,
299 sc->sc_maxqueuecnt, le32toh(sc->sc_status.maxinboundmframes),
300 sc->sc_maxreplycnt, le32toh(sc->sc_status.maxoutboundmframes));
301 #endif
302
303 SIMPLEQ_INIT(&sc->sc_queue);
304 return (0);
305 }
306
307 /*
308 * Attempt to match and attach child devices.
309 */
310 static void
311 iop_config_interrupts(struct device *self)
312 {
313 struct iop_attach_args ia;
314 struct iop_softc *sc;
315 int rv;
316
317 sc = (struct iop_softc *)self;
318
319 /* Read the LCT. */
320 if ((rv = iop_lct_get(sc)) != 0)
321 printf("%s: failed to read LCT (%d)\n", sc->sc_dv.dv_xname, rv);
322
323 /* Attempt to match and attach a product-specific extension. */
324 ia.ia_class = I2O_CLASS_ANY;
325 ia.ia_tid = I2O_TID_IOP;
326 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
327
328 /*
329 * Match and attach child devices. We do two runs: the first to
330 * match "high level" devices, and the second to match "low level"
331 * devices (low level devices may be parents of high level devices).
332 *
333 * XXX sc_lctmap shouldn't be allocated here.
334 */
335 sc->sc_lctmap = malloc(sc->sc_nlctent * sizeof(int8_t), M_DEVBUF,
336 M_NOWAIT);
337 memset(sc->sc_lctmap, 0, sc->sc_nlctent * sizeof(int8_t));
338 iop_config_interrupts0(sc, IC_CONFIGURE | IC_HIGHLEVEL,
339 IC_CONFIGURE | IC_HIGHLEVEL);
340 iop_config_interrupts0(sc, IC_CONFIGURE, IC_CONFIGURE | IC_HIGHLEVEL);
341 }
342
343 /*
344 * Attempt to match and attach device classes with the specified flag
345 * pattern.
346 */
347 static void
348 iop_config_interrupts0(struct iop_softc *sc, int pat, int mask)
349 {
350 struct iop_attach_args ia;
351 const struct i2o_lct_entry *le;
352 int i, j, nent, doit;
353
354 nent = sc->sc_nlctent;
355 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
356 if ((sc->sc_lctmap[i] & IOP_LCTMAP_INUSE) != 0)
357 continue;
358
359 ia.ia_class = le16toh(le->classid) & 4095;
360 doit = 0;
361
362 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
363 if (ia.ia_class == iop_class[j].ic_class)
364 if ((iop_class[j].ic_flags & mask) == pat) {
365 doit = 1;
366 break;
367 }
368
369 /*
370 * Try to configure the device if the pattern matches. If
371 * the device is matched, mark it as being in use.
372 */
373 if (doit) {
374 ia.ia_tid = le32toh(le->localtid) & 4095;
375 if (config_found_sm(&sc->sc_dv, &ia, iop_print,
376 iop_submatch))
377 sc->sc_lctmap[i] |= IOP_LCTMAP_INUSE;
378 }
379 }
380 }
381
382 static void
383 iop_devinfo(int class, char *devinfo)
384 {
385 #ifdef I2OVERBOSE
386 int i;
387
388 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
389 if (class == iop_class[i].ic_class)
390 break;
391
392 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
393 sprintf(devinfo, "device (class 0x%x)", class);
394 else
395 strcpy(devinfo, iop_class[i].ic_caption);
396 #else
397
398 sprintf(devinfo, "device (class 0x%x)", class);
399 #endif
400 }
401
402 static int
403 iop_print(void *aux, const char *pnp)
404 {
405 struct iop_attach_args *ia;
406 char devinfo[256];
407
408 ia = aux;
409
410 if (pnp != NULL) {
411 iop_devinfo(ia->ia_class, devinfo);
412 printf("%s at %s", devinfo, pnp);
413 }
414 printf(" tid %d", ia->ia_tid);
415 return (UNCONF);
416 }
417
418 static int
419 iop_vendor_print(void *aux, const char *pnp)
420 {
421
422 if (pnp != NULL)
423 printf("vendor specific extension at %s", pnp);
424 return (UNCONF);
425 }
426
427 static int
428 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
429 {
430 struct iop_attach_args *ia;
431
432 ia = aux;
433
434 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
435 return (0);
436
437 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
438 }
439
440 /*
441 * Shut down all configured IOPs.
442 */
443 static void
444 iop_shutdown(void *junk)
445 {
446 struct iop_softc *sc;
447 int i;
448
449 printf("shutting down iop devices... ");
450
451 for (i = 0; i < iop_cd.cd_ndevs; i++) {
452 if ((sc = device_lookup(&iop_cd, i)) == NULL)
453 continue;
454 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX);
455 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX);
456 }
457
458 /* Wait. Some boards could still be flushing, stupidly enough. */
459 delay(5000*1000);
460 printf(" done\n");
461 }
462
463 /*
464 * Retrieve adapter status.
465 */
466 static int
467 iop_status_get(struct iop_softc *sc)
468 {
469 struct iop_msg *im;
470 struct i2o_exec_status_get *mb;
471 int rv, s;
472
473 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
474 return (rv);
475
476 mb = (struct i2o_exec_status_get *)im->im_msg;
477 mb->msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
478 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
479 mb->reserved[0] = 0;
480 mb->reserved[1] = 0;
481 mb->reserved[2] = 0;
482 mb->reserved[3] = 0;
483 mb->addrlow = kvtop((caddr_t)&sc->sc_status); /* XXX */
484 mb->addrhigh = 0;
485 mb->length = sizeof(sc->sc_status);
486
487 s = splbio();
488
489 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
490 splx(s);
491 iop_msg_free(sc, NULL, im);
492 return (rv);
493 }
494
495 /* XXX */
496 POLL(2500, *((volatile u_char *)&sc->sc_status.syncbyte) == 0xff);
497
498 splx(s);
499 iop_msg_free(sc, NULL, im);
500 return (*((volatile u_char *)&sc->sc_status.syncbyte) != 0xff);
501 }
502
503 /*
504 * Allocate DMA safe memory.
505 */
506 static int
507 iop_alloc_dmamem(struct iop_softc *sc, int size, bus_dmamap_t *dmamap,
508 caddr_t *kva, bus_addr_t *paddr)
509 {
510 int rseg, rv;
511 bus_dma_segment_t seg;
512
513 if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0,
514 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
515 printf("%s: dmamem_alloc = %d\n", sc->sc_dv.dv_xname, rv);
516 return (rv);
517 }
518
519 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, kva,
520 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
521 printf("%s: dmamem_map = %d\n", sc->sc_dv.dv_xname, rv);
522 return (rv);
523 }
524
525 if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0,
526 BUS_DMA_NOWAIT, dmamap)) != 0) {
527 printf("%s: dmamap_create = %d\n", sc->sc_dv.dv_xname, rv);
528 return (rv);
529 }
530
531 if ((rv = bus_dmamap_load(sc->sc_dmat, *dmamap, *kva, size,
532 NULL, BUS_DMA_NOWAIT)) != 0) {
533 printf("%s: dmamap_load = %d\n", sc->sc_dv.dv_xname, rv);
534 return (rv);
535 }
536
537 *paddr = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
538 return (0);
539 }
540
541 /*
542 * Initalize and populate the adapter's outbound FIFO.
543 */
544 static int
545 iop_ofifo_init(struct iop_softc *sc)
546 {
547 struct iop_msg *im;
548 volatile u_int32_t status;
549 bus_addr_t addr;
550 struct i2o_exec_outbound_init *mb;
551 int i, rv;
552
553 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
554 return (rv);
555
556 mb = (struct i2o_exec_outbound_init *)im->im_msg;
557 mb->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
558 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
559 mb->msgictx = IOP_ICTX;
560 mb->msgtctx = im->im_tctx;
561 mb->pagesize = PAGE_SIZE;
562 mb->flags = 0x80 | ((IOP_MAX_MSG_SIZE >> 2) << 16);
563
564 status = 0;
565 iop_msg_map(sc, im, (void *)&status, sizeof(status), 0);
566
567 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
568 iop_msg_free(sc, NULL, im);
569 return (rv);
570 }
571
572 DELAY(500000); /* XXX */
573
574 iop_msg_unmap(sc, im);
575 iop_msg_free(sc, NULL, im);
576
577 if (status != I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS &&
578 status != I2O_EXEC_OUTBOUND_INIT_COMPLETE) {
579 printf("%s: outbound queue failed to initialize (%08x)\n",
580 sc->sc_dv.dv_xname, status);
581 return (ENXIO);
582 }
583
584 #ifdef I2ODEBUG
585 if (status != I2O_EXEC_OUTBOUND_INIT_COMPLETE)
586 printf("%s: outbound FIFO init not complete yet\n",
587 sc->sc_dv.dv_xname);
588 #endif
589
590 /* If we need to allocate DMA safe memory, do it now. */
591 if (sc->sc_rep_phys == 0) {
592 sc->sc_rep_size = sc->sc_maxreplycnt * IOP_MAX_MSG_SIZE;
593 iop_alloc_dmamem(sc, sc->sc_rep_size, &sc->sc_rep_dmamap,
594 &sc->sc_rep, &sc->sc_rep_phys);
595 }
596
597 /* Populate the outbound FIFO. */
598 for (i = sc->sc_maxreplycnt, addr = sc->sc_rep_phys; i; i--) {
599 IOP_OUTL(sc, IOP_REG_OFIFO, (u_int32_t)addr);
600 DELAY(10);
601 addr += IOP_MAX_MSG_SIZE;
602 }
603
604 return (0);
605 }
606
607 /*
608 * Read the specified number of bytes from the IOP's hardware resource table.
609 */
610 static int
611 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
612 {
613 struct iop_msg *im;
614 int rv;
615 struct i2o_exec_hrt_get *mb;
616
617 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOINTR)) != 0)
618 return (rv);
619
620 mb = (struct i2o_exec_hrt_get *)im->im_msg;
621 mb->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
622 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
623 mb->msgictx = IOP_ICTX;
624 mb->msgtctx = im->im_tctx;
625
626 iop_msg_map(sc, im, hrt, size, 0);
627 rv = iop_msg_send(sc, im, 5000);
628 iop_msg_unmap(sc, im);
629 iop_msg_free(sc, NULL, im);
630 return (rv);
631 }
632
633 /*
634 * Read the IOP's hardware resource table. Once read, not much is done with
635 * the HRT; it's stored for later retrieval by a user-space program. Reading
636 * the HRT is a required part of the IOP initalization sequence.
637 */
638 static int
639 iop_hrt_get(struct iop_softc *sc)
640 {
641 struct i2o_hrt hrthdr, *hrt;
642 int size, rv;
643
644 if ((rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr))) != 0)
645 return (rv);
646
647 size = (htole32(hrthdr.nentries) - 1) * sizeof(struct i2o_hrt_entry) +
648 sizeof(struct i2o_hrt);
649 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
650
651 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
652 free(hrt, M_DEVBUF);
653 return (rv);
654 }
655
656 if (sc->sc_hrt != NULL)
657 free(sc->sc_hrt, M_DEVBUF);
658 sc->sc_hrt = hrt;
659 return (0);
660 }
661
662 /*
663 * Request the specified number of bytes from the IOP's logical
664 * configuration table. Must be called with interrupts enabled.
665 */
666 static int
667 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size)
668 {
669 struct iop_msg *im;
670 struct i2o_exec_lct_notify *mb;
671 int rv;
672
673 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
674 return (rv);
675
676 memset(lct, 0, size);
677
678 mb = (struct i2o_exec_lct_notify *)im->im_msg;
679 mb->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
680 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
681 mb->msgictx = IOP_ICTX;
682 mb->msgtctx = im->im_tctx;
683 mb->classid = I2O_CLASS_ANY;
684 mb->changeindicator = 0;
685
686 iop_msg_map(sc, im, lct, size, 0);
687 if ((rv = iop_msg_enqueue(sc, im)) == 0)
688 rv = iop_msg_wait(sc, im, 1000);
689 iop_msg_unmap(sc, im);
690 iop_msg_free(sc, NULL, im);
691 return (rv);
692 }
693
694 /*
695 * Read the IOP's logical configuration table. Must be called with
696 * interrupts enabled.
697 */
698 int
699 iop_lct_get(struct iop_softc *sc)
700 {
701 int size, rv;
702 struct i2o_lct lcthdr, *lct;
703
704 /* Determine LCT size. */
705 if ((rv = iop_lct_get0(sc, &lcthdr, sizeof(lcthdr))) != 0)
706 return (rv);
707
708 size = le16toh(lcthdr.tablesize) << 2;
709 if ((lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK)) == NULL)
710 return (ENOMEM);
711
712 /* Request the entire LCT. */
713 if ((rv = iop_lct_get0(sc, lct, size)) != 0) {
714 free(lct, M_DEVBUF);
715 return (rv);
716 }
717
718 /* Swap in the new LCT. */
719 if ((rv = iop_lct_lock(sc)) != 0) {
720 free(lct, M_DEVBUF);
721 return (rv);
722 }
723 if (sc->sc_lct != NULL)
724 free(sc->sc_lct, M_DEVBUF);
725 sc->sc_lct = lct;
726 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
727 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
728 sizeof(struct i2o_lct_entry);
729 iop_lct_unlock(sc);
730 return (0);
731 }
732
733 /*
734 * Request the specified parameter group from the target. Must be called
735 * with interrupts enabled.
736 */
737 int
738 iop_params_get(struct iop_softc *sc, int tid, int group, void *buf, int size)
739 {
740 struct iop_msg *im;
741 struct i2o_util_params_get *mb;
742 int rv;
743 struct {
744 struct i2o_param_op_list_header olh;
745 struct i2o_param_op_all_template oat;
746 } req;
747
748 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
749 return (rv);
750
751 mb = (struct i2o_util_params_get *)im->im_msg;
752 mb->msgflags = I2O_MSGFLAGS(i2o_util_params_get);
753 mb->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
754 mb->msgictx = IOP_ICTX;
755 mb->msgtctx = im->im_tctx;
756 mb->flags = 0;
757
758 req.olh.count = htole16(1);
759 req.olh.reserved = htole16(0);
760 req.oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
761 req.oat.fieldcount = htole16(0xffff);
762 req.oat.group = htole16(group);
763
764 iop_msg_map(sc, im, &req, sizeof(req), 1);
765 iop_msg_map(sc, im, buf, size, 0);
766
767 if ((rv = iop_msg_enqueue(sc, im)) == 0)
768 rv = iop_msg_wait(sc, im, 1000);
769 iop_msg_unmap(sc, im);
770 iop_msg_free(sc, NULL, im);
771 return (rv);
772 }
773
774 /*
775 * Execute a simple command (no parameters) and poll on completion.
776 */
777 int
778 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx)
779 {
780 struct iop_msg *im;
781 struct i2o_msg *mb;
782 int rv;
783
784 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOINTR)) != 0)
785 return (rv);
786
787 mb = (struct i2o_msg *)im->im_msg;
788 mb->msgflags = I2O_MSGFLAGS(i2o_msg);
789 mb->msgfunc = I2O_MSGFUNC(tid, function);
790 mb->msgictx = ictx;
791 mb->msgtctx = im->im_tctx;
792
793 rv = iop_msg_send(sc, im, 5000);
794 iop_msg_free(sc, NULL, im);
795 return (rv);
796 }
797
798 /*
799 * Post the system table to the IOP. We don't maintain a full system table
800 * as we should - it describes only "this" IOP and is built on the stack
801 * here for the one time that we will use it: posting to the IOP.
802 */
803 static int
804 iop_systab_set(struct iop_softc *sc)
805 {
806 struct i2o_iop_entry systab;
807 struct iop_msg *im;
808 u_int32_t mema[2], ioa[2];
809 struct i2o_exec_sys_tab_set *mb;
810 int rv;
811
812 memset(&systab, 0, sizeof(systab));
813 systab.orgid = sc->sc_status.orgid;
814 systab.iopid = htole32(le32toh(sc->sc_status.iopid) & 4095);
815 systab.segnumber = sc->sc_status.segnumber;
816 systab.iopcaps = sc->sc_status.iopcaps;
817 systab.inboundmsgframesize = sc->sc_status.inboundmframesize;
818 systab.inboundmsgportaddresslow =
819 htole32(sc->sc_memaddr + IOP_REG_IFIFO);
820
821 /* Record private memory and I/O spaces. */
822 mema[0] = htole32(sc->sc_memaddr);
823 mema[1] = htole32(sc->sc_memsize);
824 ioa[0] = htole32(sc->sc_ioaddr);
825 ioa[1] = htole32(sc->sc_iosize);
826
827 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOINTR)) != 0)
828 return (rv);
829
830 mb = (struct i2o_exec_sys_tab_set *)im->im_msg;
831 mb->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
832 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
833 mb->msgictx = IOP_ICTX;
834 mb->msgtctx = im->im_tctx;
835 mb->iopid = (2 + sc->sc_dv.dv_unit) & 4095;
836 mb->segnumber = le32toh(sc->sc_status.segnumber) & 4095;
837
838 iop_msg_map(sc, im, &systab, sizeof(systab), 1);
839 iop_msg_map(sc, im, mema, sizeof(mema), 1);
840 iop_msg_map(sc, im, ioa, sizeof(ioa), 1);
841
842 rv = iop_msg_send(sc, im, 5000);
843 iop_msg_unmap(sc, im);
844 iop_msg_free(sc, NULL, im);
845 return (rv);
846 }
847
848 /*
849 * Reset the adapter. Must be called with interrupts disabled.
850 */
851 static int
852 iop_reset(struct iop_softc *sc)
853 {
854 struct iop_msg *im;
855 volatile u_int32_t sw;
856 u_int32_t mfa;
857 struct i2o_exec_iop_reset *mb;
858 int rv;
859
860 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
861 return (rv);
862
863 sw = 0;
864
865 mb = (struct i2o_exec_iop_reset *)im->im_msg;
866 mb->msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
867 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
868 mb->reserved[0] = 0;
869 mb->reserved[1] = 0;
870 mb->reserved[2] = 0;
871 mb->reserved[3] = 0;
872 mb->statuslow = kvtop((caddr_t)&sw); /* XXX */
873 mb->statushigh = 0;
874
875 if ((rv = iop_msg_send(sc, im, 0)))
876 return (rv);
877 iop_msg_free(sc, NULL, im);
878
879 POLL(2500, sw != 0); /* XXX */
880 if (sw != I2O_RESET_IN_PROGRESS) {
881 printf("%s: reset rejected\n", sc->sc_dv.dv_xname);
882 return (EIO);
883 }
884
885 /*
886 * IOP is now in the INIT state. Wait no more than 5 seconds for
887 * the inbound queue to become responsive.
888 */
889 DELAY(1000);
890 POLL(5000, (mfa = IOP_INL(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
891 if (mfa == IOP_MFA_EMPTY) {
892 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
893 return (EIO);
894 }
895
896 iop_release_mfa(sc, mfa);
897 return (0);
898 }
899
900 /*
901 * Register a new initiator.
902 */
903 int
904 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
905 {
906 int i;
907
908 /* Find a free slot. If no slots are free, puke. */
909 for (i = 0; i < IOP_MAX_INITIATORS; i++)
910 if (sc->sc_itab[i] == NULL)
911 break;
912 if (i == IOP_MAX_INITIATORS)
913 return (ENOMEM);
914
915 #ifdef notyet
916 ii->ii_maxqueuecnt = IOP_MAX_PI_QUEUECNT;
917 ii->ii_queuecnt = 0;
918 #endif
919 ii->ii_ictx = i + 1;
920 sc->sc_itab[i] = ii;
921 return (0);
922 }
923
924 /*
925 * Unregister an initiator.
926 */
927 void
928 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
929 {
930
931 #ifdef notyet
932 #ifdef I2ODEBUG
933 if (ii->ii_queuecnt != 0)
934 panic("iop_inititator_unregister: busy");
935 #endif
936 #endif
937 sc->sc_itab[ii->ii_ictx - 1] = NULL;
938 }
939
940 /*
941 * Attempt to read a reply frame from the adapter. If we get one, deal with
942 * it.
943 */
944 static int
945 iop_poll(struct iop_softc *sc)
946 {
947 struct iop_msg *im;
948 struct i2o_reply *rb;
949 struct iop_initiator *ii;
950 u_int32_t rmfa;
951 u_int off, ictx, tctx, status;
952
953 /* Double read to account for IOP bug. */
954 if ((rmfa = IOP_INL(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY &&
955 (rmfa = IOP_INL(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY)
956 return (-1);
957
958 off = (int)(rmfa - sc->sc_rep_phys);
959 rb = (struct i2o_reply *)(sc->sc_rep + off);
960
961 /*
962 * Perform reply queue DMA synchronisation.
963 */
964 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, IOP_MAX_MSG_SIZE,
965 BUS_DMASYNC_POSTREAD);
966 if (--sc->sc_stat.is_cur_hwqueue != 0)
967 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
968 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
969
970 #ifdef I2ODEBUG
971 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
972 panic("iop_poll: 64-bit reply");
973 #endif
974 /*
975 * Find the initiator.
976 */
977 ictx = le32toh(rb->msgictx);
978 if (ictx > IOP_MAX_INITIATORS)
979 panic("%s: bad ictx returned", sc->sc_dv.dv_xname);
980 if (ictx == IOP_ICTX)
981 ii = NULL;
982 else {
983 #ifdef I2ODEBUG
984 if (sc->sc_itab == NULL)
985 panic("iop_poll: itab == NULL; ictx %d", ictx);
986 #endif
987 if ((ii = sc->sc_itab[ictx - 1]) == NULL)
988 panic("%s: bad ictx returned", sc->sc_dv.dv_xname);
989 }
990
991 status = rb->reqstatus;
992
993 if (ii == NULL || (ii->ii_flags & II_DISCARD) == 0) {
994 /*
995 * This initiator tracks state using message wrappers.
996 *
997 * Find the originating message wrapper, and if requested
998 * notify the initiator.
999 */
1000 tctx = le32toh(rb->msgtctx);
1001 im = TAILQ_FIRST(IOP_MSGHASH(tctx));
1002 for (; im != NULL; im = TAILQ_NEXT(im, im_hash))
1003 if (im->im_tctx == tctx)
1004 break;
1005 if (im == NULL || (im->im_flags & IM_ALLOCED) == 0)
1006 panic("%s: bad tctx returned (%x, %p)",
1007 sc->sc_dv.dv_xname, tctx, im);
1008 #ifdef I2ODEBUG
1009 if ((im->im_flags & IM_REPLIED) != 0)
1010 panic("%s: duplicate reply", sc->sc_dv.dv_xname);
1011 #endif
1012
1013 im->im_flags |= IM_REPLIED;
1014
1015 #ifdef I2ODEBUG
1016 if (rb->reqstatus != 0)
1017 iop_reply_print(sc, im, rb);
1018 #endif
1019 /* Notify the initiator. */
1020 if ((im->im_flags & IM_WAITING) != 0)
1021 wakeup(im);
1022 if ((im->im_flags & IM_NOINTR) == 0)
1023 (*ii->ii_intr)(ii->ii_dv, im, rb);
1024 } else {
1025 /*
1026 * This initiator discards message wrappers.
1027 *
1028 * Simply pass the reply frame to the initiator.
1029 */
1030 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1031 }
1032
1033 /* Return the reply frame to the IOP's outbound FIFO. */
1034 IOP_OUTL(sc, IOP_REG_OFIFO, rmfa);
1035
1036 /* Run the queue. */
1037 if ((im = SIMPLEQ_FIRST(&sc->sc_queue)) != NULL)
1038 iop_msg_enqueue(sc, im);
1039
1040 return (status);
1041 }
1042
1043 /*
1044 * Handle an interrupt from the adapter.
1045 */
1046 int
1047 iop_intr(void *arg)
1048 {
1049 struct iop_softc *sc;
1050 int forus;
1051
1052 sc = arg;
1053 forus = 0;
1054
1055 /* Handle replies and dispatch enqueued messages. */
1056 while ((IOP_INL(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
1057 iop_poll(sc);
1058 forus = 1;
1059 }
1060
1061 #ifdef I2ODEBUG
1062 if (!forus)
1063 printf("%s: spurious intr\n", sc->sc_dv.dv_xname);
1064 #endif
1065 return (forus);
1066 }
1067
1068 /*
1069 * Allocate a message wrapper.
1070 */
1071 int
1072 iop_msg_alloc(struct iop_softc *sc, struct iop_initiator *ii,
1073 struct iop_msg **imp, int flags)
1074 {
1075 struct iop_msg *im;
1076 static int tctx = 666;
1077 int s, rv, i;
1078
1079 #ifdef I2ODEBUG
1080 if ((flags & IM_SYSMASK) != 0)
1081 panic("iop_msg_alloc: system flags specified");
1082 #endif
1083
1084 s = splbio(); /* XXX */
1085
1086 if (ii != NULL) {
1087 #ifdef notyet
1088 /*
1089 * If this initiator has exceeded it's maximum allowed queue
1090 * depth, sleep until one of its currently queued commands
1091 * has completed.
1092 */
1093 if (ii->ii_queuecnt >= ii->ii_maxqueuecnt) {
1094 if ((flags & IM_NOWAIT) != 0) {
1095 splx(s);
1096 return (EAGAIN);
1097 }
1098 ii->ii_waitcnt++;
1099 tsleep(ii, PRIBIO, "iopmsg", 0);
1100 }
1101 ii->ii_queuecnt++;
1102 #endif
1103 if ((ii->ii_flags & II_DISCARD) != 0)
1104 flags |= IM_DISCARD;
1105 }
1106
1107 im = (struct iop_msg *)pool_get(iop_msgpool,
1108 (flags & IM_NOWAIT) == 0 ? PR_WAITOK : 0);
1109 if (im == NULL) {
1110 splx(s);
1111 return (ENOMEM);
1112 }
1113
1114 /* XXX */
1115 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, IOP_MAX_SGL_ENTRIES,
1116 IOP_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1117 &im->im_xfer[0].ix_map);
1118 if (rv != 0) {
1119 pool_put(iop_msgpool, im);
1120 splx(s);
1121 return (rv);
1122 }
1123
1124 if ((flags & (IM_DISCARD | IM_NOICTX)) == 0)
1125 TAILQ_INSERT_TAIL(IOP_MSGHASH(tctx), im, im_hash);
1126
1127 splx(s);
1128
1129 im->im_tctx = tctx++;
1130 im->im_flags = flags | IM_ALLOCED;
1131 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1132 im->im_xfer[i].ix_size = 0;
1133 *imp = im;
1134
1135 return (0);
1136 }
1137
1138 /*
1139 * Free a message wrapper.
1140 */
1141 void
1142 iop_msg_free(struct iop_softc *sc, struct iop_initiator *ii, struct iop_msg *im)
1143 {
1144 int s;
1145
1146 #ifdef I2ODEBUG
1147 if ((im->im_flags & IM_ALLOCED) == 0)
1148 panic("iop_msg_free: wrapper not allocated");
1149 #endif
1150
1151 /* XXX */
1152 bus_dmamap_destroy(sc->sc_dmat, im->im_xfer[0].ix_map);
1153
1154 s = splbio(); /* XXX */
1155
1156 if ((im->im_flags & (IM_DISCARD | IM_NOICTX)) == 0)
1157 TAILQ_REMOVE(IOP_MSGHASH(im->im_tctx), im, im_hash);
1158
1159 im->im_flags = 0;
1160 pool_put(iop_msgpool, im);
1161
1162 #ifdef notyet
1163 if (ii != NULL) {
1164 ii->ii_queuecnt--;
1165 if (ii->ii_waitcnt != 0) {
1166 wakeup_one(ii);
1167 ii->ii_waitcnt--;
1168 }
1169 }
1170 #endif
1171
1172 splx(s);
1173 }
1174
1175 /*
1176 * Map a data transfer. Write a scatter gather list into the message frame.
1177 */
1178 int
1179 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, void *xferaddr,
1180 int xfersize, int out)
1181 {
1182 struct iop_xfer *ix;
1183 u_int32_t *mb;
1184 int rv, seg, flg, i;
1185
1186 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++)
1187 if (ix->ix_size == 0)
1188 break;
1189 #ifdef I2ODEBUG
1190 if (i == IOP_MAX_MSG_XFERS)
1191 panic("iop_msg_map: too many xfers");
1192 #endif
1193
1194 /* Only the first DMA map is static. */
1195 if (i != 0) {
1196 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1197 IOP_MAX_SGL_ENTRIES, IOP_MAX_XFER, 0,
1198 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1199 if (rv != 0)
1200 return (rv);
1201 }
1202
1203 flg = (out ? IX_OUT : IX_IN);
1204 ix->ix_size = xfersize;
1205
1206 rv = bus_dmamap_load(sc->sc_dmat, ix->ix_map, xferaddr, xfersize,
1207 NULL, 0);
1208 if (rv != 0)
1209 return (rv);
1210 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1211 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1212
1213 mb = im->im_msg + (im->im_msg[0] >> 16);
1214 if (out)
1215 out = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1216 else
1217 out = I2O_SGL_SIMPLE;
1218
1219 for (seg = 0; seg < ix->ix_map->dm_nsegs; seg++) {
1220 #ifdef I2ODEBUG
1221 if ((seg << 1) + (im->im_msg[0] >> 16) >=
1222 (IOP_MAX_MSG_SIZE >> 2))
1223 panic("iop_map_xfer: message frame too large");
1224 #endif
1225 if (seg == ix->ix_map->dm_nsegs - 1)
1226 out |= I2O_SGL_END_BUFFER;
1227 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_len | out;
1228 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_addr;
1229 }
1230
1231 /*
1232 * If this is the first xfer we've mapped for this message, adjust
1233 * the SGL offset field in the message header.
1234 */
1235 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1236 im->im_msg[0] += ((im->im_msg[0] >> 16) + seg * 2) << 4;
1237 im->im_flags |= IM_SGLOFFADJ;
1238 }
1239 im->im_msg[0] += (seg << 17);
1240 return (0);
1241 }
1242
1243 /*
1244 * Unmap all data transfers associated with a message wrapper.
1245 */
1246 void
1247 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
1248 {
1249 struct iop_xfer *ix;
1250 int i;
1251
1252 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++) {
1253 if (ix->ix_size == 0)
1254 break;
1255 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
1256 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
1257 BUS_DMASYNC_POSTREAD);
1258 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1259
1260 /* Only the first DMA map is static. */
1261 if (i != 0)
1262 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1263
1264 ix->ix_size = 0;
1265 }
1266 }
1267
1268 /*
1269 * Send a message to the IOP. Optionally, poll on completion. Return
1270 * non-zero if failure status is returned and IM_NOINTR is set.
1271 */
1272 int
1273 iop_msg_send(struct iop_softc *sc, struct iop_msg *im, int timo)
1274 {
1275 u_int32_t mfa;
1276 int rv, status, i, s;
1277
1278 #ifdef I2ODEBUG
1279 if ((im->im_flags & IM_NOICTX) == 0)
1280 if (im->im_msg[3] == IOP_ICTX &&
1281 (im->im_flags & IM_NOINTR) == 0)
1282 panic("iop_msg_send: IOP_ICTX and !IM_NOINTR");
1283 if ((im->im_flags & IM_DISCARD) != 0)
1284 panic("iop_msg_send: IM_DISCARD");
1285 #endif
1286
1287 im->im_tid = im->im_msg[1] & 4095; /* XXX */
1288
1289 s = splbio(); /* XXX */
1290
1291 /* Wait up to 250ms for an MFA. */
1292 POLL(250, (mfa = IOP_INL(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1293 if (mfa == IOP_MFA_EMPTY) {
1294 DPRINTF(("%s: mfa not forthcoming\n", sc->sc_dv.dv_xname));
1295 splx(s);
1296 return (EBUSY);
1297 }
1298
1299 /* Perform reply queue DMA synchronisation and update counters. */
1300 if ((im->im_flags & IM_NOICTX) == 0) {
1301 if (sc->sc_stat.is_cur_hwqueue == 0)
1302 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1303 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1304 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1305 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1306 sc->sc_stat.is_requests++;
1307 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1308 sc->sc_stat.is_peak_hwqueue =
1309 sc->sc_stat.is_cur_hwqueue;
1310 }
1311
1312 /* Terminate scatter/gather lists. */
1313 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1314 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1315
1316 /* Post the message frame. */
1317 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1318 im->im_msg, im->im_msg[0] >> 16);
1319
1320 /* Post the MFA back to the IOP, thus starting the command. */
1321 IOP_OUTL(sc, IOP_REG_IFIFO, mfa);
1322
1323 if (timo == 0) {
1324 splx(s);
1325 return (0);
1326 }
1327
1328 /* Wait for completion. */
1329 for (timo *= 10; timo != 0; timo--) {
1330 if ((IOP_INL(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0)
1331 status = iop_poll(sc);
1332 if ((im->im_flags & IM_REPLIED) != 0)
1333 break;
1334 DELAY(100);
1335 }
1336
1337 splx(s);
1338
1339 if (timo == 0) {
1340 DPRINTF(("%s: poll - no reply\n", sc->sc_dv.dv_xname));
1341 rv = EBUSY;
1342 } else if ((im->im_flags & IM_NOINTR) != 0)
1343 rv = (status != I2O_STATUS_SUCCESS ? EIO : 0);
1344
1345 return (rv);
1346 }
1347
1348 /*
1349 * Try to post a message to the adapter; if that's not possible, enqueue it
1350 * with us.
1351 */
1352 int
1353 iop_msg_enqueue(struct iop_softc *sc, struct iop_msg *im)
1354 {
1355 u_int mfa;
1356 int s, fromqueue, i;
1357
1358 #ifdef I2ODEBUG
1359 if (im == NULL)
1360 panic("iop_msg_enqueue: im == NULL");
1361 if (sc == NULL)
1362 panic("iop_msg_enqueue: sc == NULL");
1363 if ((im->im_flags & IM_NOICTX) != 0)
1364 panic("iop_msg_enqueue: IM_NOICTX");
1365 if (im->im_msg[3] == IOP_ICTX && (im->im_flags & IM_NOINTR) == 0)
1366 panic("iop_msg_send: IOP_ICTX and no IM_NOINTR");
1367 #endif
1368
1369 im->im_tid = im->im_msg[1] & 4095; /* XXX */
1370
1371 s = splbio(); /* XXX */
1372 fromqueue = (im == SIMPLEQ_FIRST(&sc->sc_queue));
1373
1374 if (sc->sc_stat.is_cur_hwqueue >= sc->sc_maxqueuecnt) {
1375 /*
1376 * While the IOP may be able to accept more inbound message
1377 * frames than it advertises, don't push harder than it
1378 * wants to go lest we starve it.
1379 *
1380 * XXX We should be handling IOP resource shortages.
1381 */
1382 mfa = IOP_MFA_EMPTY;
1383 } else {
1384 /* Double read to account for IOP bug. */
1385 if ((mfa = IOP_INL(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
1386 mfa = IOP_INL(sc, IOP_REG_IFIFO);
1387 }
1388
1389 if (mfa == IOP_MFA_EMPTY) {
1390 /* Can't transfer to h/w queue - queue with us. */
1391 if (!fromqueue) {
1392 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, im, im_queue);
1393 if (++sc->sc_stat.is_cur_swqueue >
1394 sc->sc_stat.is_peak_swqueue)
1395 sc->sc_stat.is_peak_swqueue =
1396 sc->sc_stat.is_cur_swqueue;
1397 }
1398 splx(s);
1399 return (0);
1400 } else if (fromqueue) {
1401 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, im, im_queue);
1402 sc->sc_stat.is_cur_swqueue--;
1403 }
1404
1405 /* Perform reply queue DMA synchronisation and update counters. */
1406 if (sc->sc_stat.is_cur_hwqueue == 0)
1407 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1408 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1409
1410 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1411 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1412 sc->sc_stat.is_requests++;
1413 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1414 sc->sc_stat.is_peak_hwqueue = sc->sc_stat.is_cur_hwqueue;
1415
1416 /* Terminate the scatter/gather list. */
1417 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1418 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1419
1420 /* Post the message frame. */
1421 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1422 im->im_msg, im->im_msg[0] >> 16);
1423
1424 /* Post the MFA back to the IOP, thus starting the command. */
1425 IOP_OUTL(sc, IOP_REG_IFIFO, mfa);
1426
1427 /* If this is a discardable message wrapper, free it. */
1428 if ((im->im_flags & IM_DISCARD) != 0)
1429 iop_msg_free(sc, NULL, im);
1430 splx(s);
1431 return (0);
1432 }
1433
1434 /*
1435 * Wait for the specified message to complete. Must be called with
1436 * interrupts enabled.
1437 */
1438 int
1439 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
1440 {
1441 int rv;
1442
1443 im->im_flags |= IM_WAITING;
1444 if ((im->im_flags & IM_REPLIED) != 0)
1445 return (0);
1446 rv = tsleep(im, PRIBIO, "iopmsg", timo);
1447 if ((im->im_flags & IM_REPLIED) != 0)
1448 return (0);
1449 return (rv);
1450 }
1451
1452 /*
1453 * Release an unused message frame back to the IOP's inbound fifo.
1454 */
1455 static void
1456 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
1457 {
1458
1459 /* Use the frame to issue a no-op. */
1460 IOP_OUTL(sc, mfa, I2O_VERSION_11 | (4 << 16));
1461 IOP_OUTL(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
1462 IOP_OUTL(sc, mfa + 8, 0);
1463 IOP_OUTL(sc, mfa + 12, 0);
1464
1465 IOP_OUTL(sc, IOP_REG_IFIFO, mfa);
1466 }
1467
1468 #ifdef I2ODEBUG
1469 /*
1470 * Print status information from a failure reply frame.
1471 */
1472 static void
1473 iop_reply_print(struct iop_softc *sc, struct iop_msg *im,
1474 struct i2o_reply *rb)
1475 {
1476 u_int cmd, detail;
1477 #ifdef I2OVERBOSE
1478 const char *statusstr;
1479 #endif
1480
1481 #ifdef I2ODEBUG
1482 if ((im->im_flags & IM_REPLIED) == 0)
1483 panic("iop_msg_print_status: %p not replied to", im);
1484 #endif
1485
1486 cmd = le32toh(rb->msgflags) >> 24;
1487 detail = le16toh(rb->detail);
1488
1489 #ifdef I2OVERBOSE
1490 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
1491 statusstr = iop_status[rb->reqstatus];
1492 else
1493 statusstr = "undefined error code";
1494
1495 printf("%s: tid=%d cmd=0x%02x: status=0x%02x (%s) detail=0x%04x\n",
1496 sc->sc_dv.dv_xname, im->im_tid, cmd, rb->reqstatus, statusstr,
1497 detail);
1498 #else
1499 printf("%s: tid=%d cmd=0x%02x: status=0x%02x detail=0x%04x\n",
1500 sc->sc_dv.dv_xname, im->im_tid, cmd, rb->reqstatus, detail);
1501 #endif
1502 }
1503 #endif
1504
1505 /*
1506 * Wait for an exclusive lock on the LCT.
1507 */
1508 int
1509 iop_lct_lock(struct iop_softc *sc)
1510 {
1511 int rv;
1512
1513 while ((sc->sc_flags & IOP_LCTLKHELD) != 0)
1514 if ((rv = tsleep(sc, PRIBIO | PCATCH, "ioplct", 0)) != 0)
1515 return (rv);
1516 sc->sc_flags |= IOP_LCTLKHELD;
1517 return (0);
1518 }
1519
1520 /*
1521 * Unlock and wake up any waiters.
1522 */
1523 void
1524 iop_lct_unlock(struct iop_softc *sc)
1525 {
1526
1527 sc->sc_flags &= ~IOP_LCTLKHELD;
1528 wakeup_one(sc);
1529 }
1530
1531 /*
1532 * Translate an I2O ASCII string into a C string.
1533 *
1534 * XXX Doesn't belong here.
1535 */
1536 void
1537 iop_strvis(const char *src, int slen, char *dst, int dlen)
1538 {
1539 int hc, lc, i;
1540
1541 dlen--;
1542 lc = 0;
1543 hc = 0;
1544 i = 0;
1545
1546 while (slen-- && dlen--) {
1547 if (*src <= 0x20 || *src >= 0x7f) {
1548 if (hc)
1549 dst[i++] = ' ';
1550 } else {
1551 hc = 1;
1552 dst[i++] = *src;
1553 lc = i;
1554 }
1555 src++;
1556 }
1557
1558 dst[lc] = '\0';
1559 }
1560
1561 /*
1562 * Return the index of the LCT entry matching the specified TID.
1563 */
1564 int
1565 iop_tid_lct_index(struct iop_softc *sc, int tid)
1566 {
1567 const struct i2o_lct_entry *le;
1568 int i;
1569
1570 for (i = 0, le = sc->sc_lct->entry; i < sc->sc_nlctent; i++, le++)
1571 if ((le32toh(le->localtid) & 4095) == tid)
1572 return (i);
1573
1574 return (-1);
1575 }
1576
1577 /*
1578 * Determine whether the specified target is in use by an OSM (or in turn,
1579 * by a DDM). Return a positive non-zero value on error, zero if the TID is
1580 * in use and a negative non-zero value if the TID is not in use.
1581 */
1582 int
1583 iop_tid_inuse(struct iop_softc *sc, int tid)
1584 {
1585 int i;
1586
1587 if ((i = iop_tid_lct_index(sc, tid)) < 0)
1588 return (ENXIO);
1589 return (-((sc->sc_lctmap[i] & IOP_LCTMAP_INUSE) == 0));
1590 }
1591
1592 /*
1593 * Mark all targets used by the specified target as in use.
1594 */
1595 void
1596 iop_tid_markallused(struct iop_softc *sc, int tid)
1597 {
1598 const struct i2o_lct_entry *le;
1599 int i;
1600
1601 for (i = 0, le = sc->sc_lct->entry; i < sc->sc_nlctent; i++, le++)
1602 if ((le32toh(le->usertid) & 4095) == tid) {
1603 #ifdef I2ODEBUG
1604 if ((sc->sc_lctmap[i] & IOP_LCTMAP_INUSE) != 0)
1605 panic("iop_tid_markallused: multiple use\n");
1606 #endif
1607 sc->sc_lctmap[i] |= IOP_LCTMAP_INUSE;
1608 }
1609 }
1610
1611 /*
1612 * Claim the specified TID. Must be called with interrupts enabled.
1613 */
1614 int
1615 iop_tid_claim(struct iop_softc *sc, int tid, int ictx, int flags)
1616 {
1617 struct iop_msg *im;
1618 struct i2o_util_claim *mb;
1619 int rv;
1620
1621 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1622 return (rv);
1623
1624 mb = (struct i2o_util_claim *)im->im_msg;
1625 mb->msgflags = I2O_MSGFLAGS(i2o_util_claim);
1626 mb->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_CLAIM);
1627 mb->msgictx = ictx;
1628 mb->msgtctx = im->im_tctx;
1629 mb->flags = flags;
1630
1631 if ((rv = iop_msg_enqueue(sc, im)) == 0)
1632 rv = iop_msg_wait(sc, im, 1000);
1633 iop_msg_free(sc, NULL, im);
1634 return (rv);
1635 }
1636