iop.c revision 1.2 1 /* $NetBSD: iop.c,v 1.2 2000/11/09 12:51:36 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/device.h>
49 #include <sys/queue.h>
50 #include <sys/proc.h>
51 #include <sys/malloc.h>
52 #include <sys/ioctl.h>
53 #include <sys/endian.h>
54 #include <sys/pool.h>
55
56 #include <machine/bus.h>
57
58 #include <dev/i2o/i2o.h>
59 #include <dev/i2o/iopreg.h>
60 #include <dev/i2o/iopvar.h>
61
62 #define IOP_INL(x, o) \
63 bus_space_read_4((x)->sc_iot, (x)->sc_ioh, (o))
64 #define IOP_OUTL(x, o, d) \
65 bus_space_write_4((x)->sc_iot, (x)->sc_ioh, (o), (d))
66
67 #define POLL(ms, cond) \
68 do { \
69 int i; \
70 for (i = (ms) * 10; i; i--) { \
71 if (cond) \
72 break; \
73 DELAY(100); \
74 } \
75 } while (/* CONSTCOND */0);
76
77 #ifdef I2ODEBUG
78 #define DPRINTF(x) printf x
79 #else
80 #define DPRINTF(x)
81 #endif
82
83 #ifdef I2OVERBOSE
84 #define IFVERBOSE(x) x
85 #else
86 #define IFVERBOSE(x)
87 #endif
88
89 #define IOP_MSGHASH_NBUCKETS 64
90 #define IOP_MSGHASH(tctx) (&iop_msghashtbl[(tctx) & iop_msghash])
91
92 static TAILQ_HEAD(iop_msghashhead, iop_msg) *iop_msghashtbl;
93 static u_long iop_msghash;
94 static void *iop_sdh;
95 static struct pool *iop_msgpool;
96
97 extern struct cfdriver iop_cd;
98
99 #define IC_CONFIGURE 0x01 /* Try to configure devices of this class */
100 #define IC_HIGHLEVEL 0x02 /* This is a `high level' device class */
101
102 struct iop_class {
103 int ic_class;
104 int ic_flags;
105 #ifdef I2OVERBOSE
106 const char *ic_caption;
107 #endif
108 } static const iop_class[] = {
109 {
110 I2O_CLASS_EXECUTIVE,
111 0,
112 IFVERBOSE("executive")
113 },
114 {
115 I2O_CLASS_DDM,
116 0,
117 IFVERBOSE("device driver module")
118 },
119 {
120 I2O_CLASS_RANDOM_BLOCK_STORAGE,
121 IC_CONFIGURE | IC_HIGHLEVEL,
122 IFVERBOSE("random block storage")
123 },
124 {
125 I2O_CLASS_SEQUENTIAL_STORAGE,
126 IC_CONFIGURE | IC_HIGHLEVEL,
127 IFVERBOSE("sequential storage")
128 },
129 {
130 I2O_CLASS_LAN,
131 IC_CONFIGURE,
132 IFVERBOSE("LAN port")
133 },
134 {
135 I2O_CLASS_WAN,
136 IC_CONFIGURE,
137 IFVERBOSE("WAN port")
138 },
139 {
140 I2O_CLASS_FIBRE_CHANNEL_PORT,
141 IC_CONFIGURE,
142 IFVERBOSE("fibrechannel port")
143 },
144 {
145 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
146 0,
147 IFVERBOSE("fibrechannel peripheral")
148 },
149 {
150 I2O_CLASS_SCSI_PERIPHERAL,
151 0,
152 IFVERBOSE("SCSI peripheral")
153 },
154 {
155 I2O_CLASS_ATE_PORT,
156 IC_CONFIGURE,
157 IFVERBOSE("ATE port")
158 },
159 {
160 I2O_CLASS_ATE_PERIPHERAL,
161 0,
162 IFVERBOSE("ATE peripheral")
163 },
164 {
165 I2O_CLASS_FLOPPY_CONTROLLER,
166 IC_CONFIGURE,
167 IFVERBOSE("floppy controller")
168 },
169 {
170 I2O_CLASS_FLOPPY_DEVICE,
171 0,
172 IFVERBOSE("floppy device")
173 },
174 {
175 I2O_CLASS_BUS_ADAPTER_PORT,
176 IC_CONFIGURE,
177 IFVERBOSE("bus adapter port" )
178 },
179 };
180
181 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
182 static const char *iop_status[] = {
183 "success",
184 "abort (dirty)",
185 "abort (no data transfer)",
186 "abort (partial transfer)",
187 "error (dirty)",
188 "error (no data transfer)",
189 "error (partial transfer)",
190 "undefined error code",
191 "process abort (dirty)",
192 "process abort (no data transfer)",
193 "process abort (partial transfer)",
194 "transaction error",
195 };
196 #endif
197
198 static void iop_config_interrupts(struct device *);
199 static void iop_config_interrupts0(struct iop_softc *, int, int);
200 static void iop_devinfo(int, char *);
201 static int iop_print(void *, const char *);
202 static void iop_shutdown(void *);
203 static int iop_submatch(struct device *, struct cfdata *, void *);
204 static int iop_vendor_print(void *, const char *);
205
206 static int iop_alloc_dmamem(struct iop_softc *, int, bus_dmamap_t *,
207 caddr_t *, bus_addr_t *);
208 static int iop_hrt_get(struct iop_softc *);
209 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
210 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int);
211 static int iop_ofifo_init(struct iop_softc *);
212 static int iop_poll(struct iop_softc *);
213 static void iop_release_mfa(struct iop_softc *, u_int32_t);
214 static int iop_reset(struct iop_softc *);
215 static int iop_status_get(struct iop_softc *);
216 static int iop_systab_set(struct iop_softc *);
217
218 #ifdef I2ODEBUG
219 static void iop_reply_print(struct iop_softc *, struct iop_msg *,
220 struct i2o_reply *);
221 #endif
222
223 /*
224 * Initialise the adapter.
225 */
226 int
227 iop_init(struct iop_softc *sc, const char *intrstr)
228 {
229 int rv;
230 u_int32_t mask;
231 static int again;
232 char ident[64];
233
234 if (again == 0) {
235 /* Create the shared message wrapper pool and hash. */
236 iop_msgpool = pool_create(sizeof(struct iop_msg), 0, 0, 0,
237 "ioppl", 0, NULL, NULL, M_DEVBUF);
238 iop_msghashtbl = hashinit(IOP_MSGHASH_NBUCKETS, HASH_TAILQ,
239 M_DEVBUF, M_NOWAIT, &iop_msghash);
240 again = 1;
241 }
242
243 /*
244 * Reset the IOP and request status.
245 */
246 printf("I2O adapter");
247 if ((rv = iop_reset(sc)) != 0)
248 return (rv);
249 if ((rv = iop_status_get(sc)) != 0) {
250 printf("%s: not responding\n", sc->sc_dv.dv_xname);
251 return (rv);
252 }
253
254 iop_strvis(sc->sc_status.productid, sizeof(sc->sc_status.productid),
255 ident, sizeof(ident));
256 printf(" <%s>", ident);
257
258 /* Allocate reply frames and initialise the IOP's outbound FIFO. */
259 sc->sc_maxreplycnt = le32toh(sc->sc_status.maxoutboundmframes);
260 if (sc->sc_maxreplycnt > IOP_MAX_HW_REPLYCNT)
261 sc->sc_maxreplycnt = IOP_MAX_HW_REPLYCNT;
262 if ((rv = iop_ofifo_init(sc)) != 0)
263 return (rv);
264
265 /* Bring the IOP online. */
266 if ((rv = iop_hrt_get(sc)) != 0)
267 return (rv);
268 if ((rv = iop_systab_set(sc)) != 0)
269 return (rv);
270 if ((rv = iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE,
271 IOP_ICTX)) != 0)
272 return (rv);
273
274 /* Defer configuration of children until interrupts are working. */
275 config_interrupts((struct device *)sc, iop_config_interrupts);
276
277 /* Configure shutdownhook. */
278 if (iop_sdh == NULL)
279 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
280
281 /* Ensure interrupts are enabled at the IOP. */
282 mask = IOP_INL(sc, IOP_REG_INTR_MASK);
283 IOP_OUTL(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
284
285 printf("\n");
286 if (intrstr != NULL)
287 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
288 intrstr);
289
290 sc->sc_maxqueuecnt = le32toh(sc->sc_status.maxinboundmframes);
291 if (sc->sc_maxqueuecnt > IOP_MAX_HW_QUEUECNT)
292 sc->sc_maxqueuecnt = IOP_MAX_HW_QUEUECNT;
293
294 #ifdef I2ODEBUG
295 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
296 sc->sc_dv.dv_xname,
297 sc->sc_maxqueuecnt, le32toh(sc->sc_status.maxinboundmframes),
298 sc->sc_maxreplycnt, le32toh(sc->sc_status.maxoutboundmframes));
299 #endif
300
301 SIMPLEQ_INIT(&sc->sc_queue);
302 return (0);
303 }
304
305 /*
306 * Attempt to match and attach child devices.
307 */
308 static void
309 iop_config_interrupts(struct device *self)
310 {
311 struct iop_attach_args ia;
312 struct iop_softc *sc;
313 int rv;
314
315 sc = (struct iop_softc *)self;
316
317 /* Read the LCT. */
318 if ((rv = iop_lct_get(sc)) != 0)
319 printf("%s: failed to read LCT (%d)\n", sc->sc_dv.dv_xname, rv);
320
321 /* Attempt to match and attach a product-specific extension. */
322 ia.ia_class = I2O_CLASS_ANY;
323 ia.ia_tid = I2O_TID_IOP;
324 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
325
326 /*
327 * Match and attach child devices. We do two runs: the first to
328 * match "high level" devices, and the second to match "low level"
329 * devices (low level devices may be parents of high level devices).
330 *
331 * XXX sc_lctmap shouldn't be allocated here.
332 */
333 sc->sc_lctmap = malloc(sc->sc_nlctent * sizeof(int8_t), M_DEVBUF,
334 M_NOWAIT);
335 memset(sc->sc_lctmap, 0, sc->sc_nlctent * sizeof(int8_t));
336 iop_config_interrupts0(sc, IC_CONFIGURE | IC_HIGHLEVEL,
337 IC_CONFIGURE | IC_HIGHLEVEL);
338 iop_config_interrupts0(sc, IC_CONFIGURE, IC_CONFIGURE | IC_HIGHLEVEL);
339 }
340
341 /*
342 * Attempt to match and attach device classes with the specified flag
343 * pattern.
344 */
345 static void
346 iop_config_interrupts0(struct iop_softc *sc, int pat, int mask)
347 {
348 struct iop_attach_args ia;
349 const struct i2o_lct_entry *le;
350 int i, j, nent, doit;
351
352 nent = sc->sc_nlctent;
353 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
354 if ((sc->sc_lctmap[i] & IOP_LCTMAP_INUSE) != 0)
355 continue;
356
357 ia.ia_class = le16toh(le->classid) & 4095;
358 doit = 0;
359
360 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
361 if (ia.ia_class == iop_class[j].ic_class)
362 if ((iop_class[j].ic_flags & mask) == pat) {
363 doit = 1;
364 break;
365 }
366
367 /*
368 * Try to configure the device if the pattern matches. If
369 * the device is matched, mark it as being in use.
370 */
371 if (doit) {
372 ia.ia_tid = le32toh(le->localtid) & 4095;
373 if (config_found_sm(&sc->sc_dv, &ia, iop_print,
374 iop_submatch))
375 sc->sc_lctmap[i] |= IOP_LCTMAP_INUSE;
376 }
377 }
378 }
379
380 static void
381 iop_devinfo(int class, char *devinfo)
382 {
383 #ifdef I2OVERBOSE
384 int i;
385
386 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
387 if (class == iop_class[i].ic_class)
388 break;
389
390 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
391 sprintf(devinfo, "device (class 0x%x)", class);
392 else
393 strcpy(devinfo, iop_class[i].ic_caption);
394 #else
395
396 sprintf(devinfo, "device (class 0x%x)", class);
397 #endif
398 }
399
400 static int
401 iop_print(void *aux, const char *pnp)
402 {
403 struct iop_attach_args *ia;
404 char devinfo[256];
405
406 ia = aux;
407
408 if (pnp != NULL) {
409 iop_devinfo(ia->ia_class, devinfo);
410 printf("%s at %s", devinfo, pnp);
411 }
412 printf(" tid %d", ia->ia_tid);
413 return (UNCONF);
414 }
415
416 static int
417 iop_vendor_print(void *aux, const char *pnp)
418 {
419
420 if (pnp != NULL)
421 printf("vendor specific extension at %s", pnp);
422 return (UNCONF);
423 }
424
425 static int
426 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
427 {
428 struct iop_attach_args *ia;
429
430 ia = aux;
431
432 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
433 return (0);
434
435 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
436 }
437
438 /*
439 * Shut down all configured IOPs.
440 */
441 static void
442 iop_shutdown(void *junk)
443 {
444 struct iop_softc *sc;
445 int i;
446
447 printf("shutting down iop devices... ");
448
449 for (i = 0; i < iop_cd.cd_ndevs; i++) {
450 if ((sc = device_lookup(&iop_cd, i)) == NULL)
451 continue;
452 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX);
453 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX);
454 }
455
456 /* Wait. Some boards could still be flushing, stupidly enough. */
457 delay(5000*1000);
458 printf(" done\n");
459 }
460
461 /*
462 * Retrieve adapter status.
463 */
464 static int
465 iop_status_get(struct iop_softc *sc)
466 {
467 struct iop_msg *im;
468 struct i2o_exec_status_get *mb;
469 int rv, s;
470
471 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
472 return (rv);
473
474 mb = (struct i2o_exec_status_get *)im->im_msg;
475 mb->msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
476 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
477 mb->reserved[0] = 0;
478 mb->reserved[1] = 0;
479 mb->reserved[2] = 0;
480 mb->reserved[3] = 0;
481 mb->addrlow = kvtop((caddr_t)&sc->sc_status); /* XXX */
482 mb->addrhigh = 0;
483 mb->length = sizeof(sc->sc_status);
484
485 s = splbio();
486
487 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
488 splx(s);
489 iop_msg_free(sc, NULL, im);
490 return (rv);
491 }
492
493 /* XXX */
494 POLL(2500, *((volatile u_char *)&sc->sc_status.syncbyte) == 0xff);
495
496 splx(s);
497 iop_msg_free(sc, NULL, im);
498 return (*((volatile u_char *)&sc->sc_status.syncbyte) != 0xff);
499 }
500
501 /*
502 * Allocate DMA safe memory.
503 */
504 static int
505 iop_alloc_dmamem(struct iop_softc *sc, int size, bus_dmamap_t *dmamap,
506 caddr_t *kva, bus_addr_t *paddr)
507 {
508 int rseg, rv;
509 bus_dma_segment_t seg;
510
511 if ((rv = bus_dmamem_alloc(sc->sc_dmat, size, NBPG, 0,
512 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
513 printf("%s: dmamem_alloc = %d\n", sc->sc_dv.dv_xname, rv);
514 return (rv);
515 }
516
517 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, kva,
518 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
519 printf("%s: dmamem_map = %d\n", sc->sc_dv.dv_xname, rv);
520 return (rv);
521 }
522
523 if ((rv = bus_dmamap_create(sc->sc_dmat, size, size, 1, 0,
524 BUS_DMA_NOWAIT, dmamap)) != 0) {
525 printf("%s: dmamap_create = %d\n", sc->sc_dv.dv_xname, rv);
526 return (rv);
527 }
528
529 if ((rv = bus_dmamap_load(sc->sc_dmat, *dmamap, *kva, size,
530 NULL, BUS_DMA_NOWAIT)) != 0) {
531 printf("%s: dmamap_load = %d\n", sc->sc_dv.dv_xname, rv);
532 return (rv);
533 }
534
535 *paddr = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
536 return (0);
537 }
538
539 /*
540 * Initalize and populate the adapter's outbound FIFO.
541 */
542 static int
543 iop_ofifo_init(struct iop_softc *sc)
544 {
545 struct iop_msg *im;
546 volatile u_int32_t status;
547 bus_addr_t addr;
548 struct i2o_exec_outbound_init *mb;
549 int i, rv;
550
551 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
552 return (rv);
553
554 mb = (struct i2o_exec_outbound_init *)im->im_msg;
555 mb->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
556 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
557 mb->msgictx = IOP_ICTX;
558 mb->msgtctx = im->im_tctx;
559 mb->pagesize = NBPG;
560 mb->flags = 0x80 | ((IOP_MAX_MSG_SIZE >> 2) << 16);
561
562 status = 0;
563 iop_msg_map(sc, im, (void *)&status, sizeof(status), 0);
564
565 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
566 iop_msg_free(sc, NULL, im);
567 return (rv);
568 }
569
570 DELAY(500000); /* XXX */
571
572 iop_msg_unmap(sc, im);
573 iop_msg_free(sc, NULL, im);
574
575 if (status != I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS &&
576 status != I2O_EXEC_OUTBOUND_INIT_COMPLETE) {
577 printf("%s: outbound queue failed to initialize (%08x)\n",
578 sc->sc_dv.dv_xname, status);
579 return (ENXIO);
580 }
581
582 #ifdef I2ODEBUG
583 if (status != I2O_EXEC_OUTBOUND_INIT_COMPLETE)
584 printf("%s: outbound FIFO init not complete yet\n",
585 sc->sc_dv.dv_xname);
586 #endif
587
588 /* If we need to allocate DMA safe memory, do it now. */
589 if (sc->sc_rep_phys == 0) {
590 sc->sc_rep_size = sc->sc_maxreplycnt * IOP_MAX_MSG_SIZE;
591 iop_alloc_dmamem(sc, sc->sc_rep_size, &sc->sc_rep_dmamap,
592 &sc->sc_rep, &sc->sc_rep_phys);
593 }
594
595 /* Populate the outbound FIFO. */
596 for (i = sc->sc_maxreplycnt, addr = sc->sc_rep_phys; i; i--) {
597 IOP_OUTL(sc, IOP_REG_OFIFO, (u_int32_t)addr);
598 DELAY(10);
599 addr += IOP_MAX_MSG_SIZE;
600 }
601
602 return (0);
603 }
604
605 /*
606 * Read the specified number of bytes from the IOP's hardware resource table.
607 */
608 static int
609 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
610 {
611 struct iop_msg *im;
612 int rv;
613 struct i2o_exec_hrt_get *mb;
614
615 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOINTR)) != 0)
616 return (rv);
617
618 mb = (struct i2o_exec_hrt_get *)im->im_msg;
619 mb->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
620 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
621 mb->msgictx = IOP_ICTX;
622 mb->msgtctx = im->im_tctx;
623
624 iop_msg_map(sc, im, hrt, size, 0);
625 rv = iop_msg_send(sc, im, 5000);
626 iop_msg_unmap(sc, im);
627 iop_msg_free(sc, NULL, im);
628 return (rv);
629 }
630
631 /*
632 * Read the IOP's hardware resource table. Once read, not much is done with
633 * the HRT; it's stored for later retrieval by a user-space program. Reading
634 * the HRT is a required part of the IOP initalization sequence.
635 */
636 static int
637 iop_hrt_get(struct iop_softc *sc)
638 {
639 struct i2o_hrt hrthdr, *hrt;
640 int size, rv;
641
642 if ((rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr))) != 0)
643 return (rv);
644
645 size = (htole32(hrthdr.nentries) - 1) * sizeof(struct i2o_hrt_entry) +
646 sizeof(struct i2o_hrt);
647 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
648
649 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
650 free(hrt, M_DEVBUF);
651 return (rv);
652 }
653
654 if (sc->sc_hrt != NULL)
655 free(sc->sc_hrt, M_DEVBUF);
656 sc->sc_hrt = hrt;
657 return (0);
658 }
659
660 /*
661 * Request the specified number of bytes from the IOP's logical
662 * configuration table. Must be called with interrupts enabled.
663 */
664 static int
665 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size)
666 {
667 struct iop_msg *im;
668 struct i2o_exec_lct_notify *mb;
669 int rv;
670
671 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
672 return (rv);
673
674 memset(lct, 0, size);
675
676 mb = (struct i2o_exec_lct_notify *)im->im_msg;
677 mb->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
678 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
679 mb->msgictx = IOP_ICTX;
680 mb->msgtctx = im->im_tctx;
681 mb->classid = I2O_CLASS_ANY;
682 mb->changeindicator = 0;
683
684 iop_msg_map(sc, im, lct, size, 0);
685 if ((rv = iop_msg_enqueue(sc, im)) == 0)
686 rv = iop_msg_wait(sc, im, 1000);
687 iop_msg_unmap(sc, im);
688 iop_msg_free(sc, NULL, im);
689 return (rv);
690 }
691
692 /*
693 * Read the IOP's logical configuration table. Must be called with
694 * interrupts enabled.
695 */
696 int
697 iop_lct_get(struct iop_softc *sc)
698 {
699 int size, rv;
700 struct i2o_lct lcthdr, *lct;
701
702 /* Determine LCT size. */
703 if ((rv = iop_lct_get0(sc, &lcthdr, sizeof(lcthdr))) != 0)
704 return (rv);
705
706 size = le16toh(lcthdr.tablesize) << 2;
707 if ((lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK)) == NULL)
708 return (ENOMEM);
709
710 /* Request the entire LCT. */
711 if ((rv = iop_lct_get0(sc, lct, size)) != 0) {
712 free(lct, M_DEVBUF);
713 return (rv);
714 }
715
716 /* Swap in the new LCT. */
717 if ((rv = iop_lct_lock(sc)) != 0) {
718 free(lct, M_DEVBUF);
719 return (rv);
720 }
721 if (sc->sc_lct != NULL)
722 free(sc->sc_lct, M_DEVBUF);
723 sc->sc_lct = lct;
724 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
725 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
726 sizeof(struct i2o_lct_entry);
727 iop_lct_unlock(sc);
728 return (0);
729 }
730
731 /*
732 * Request the specified parameter group from the target. Must be called
733 * with interrupts enabled.
734 */
735 int
736 iop_params_get(struct iop_softc *sc, int tid, int group, void *buf, int size)
737 {
738 struct iop_msg *im;
739 struct i2o_util_params_get *mb;
740 int rv;
741 struct {
742 struct i2o_param_op_list_header olh;
743 struct i2o_param_op_all_template oat;
744 } req;
745
746 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
747 return (rv);
748
749 mb = (struct i2o_util_params_get *)im->im_msg;
750 mb->msgflags = I2O_MSGFLAGS(i2o_util_params_get);
751 mb->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
752 mb->msgictx = IOP_ICTX;
753 mb->msgtctx = im->im_tctx;
754 mb->flags = 0;
755
756 req.olh.count = htole16(1);
757 req.olh.reserved = htole16(0);
758 req.oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
759 req.oat.fieldcount = htole16(0xffff);
760 req.oat.group = htole16(group);
761
762 iop_msg_map(sc, im, &req, sizeof(req), 1);
763 iop_msg_map(sc, im, buf, size, 0);
764
765 if ((rv = iop_msg_enqueue(sc, im)) == 0)
766 rv = iop_msg_wait(sc, im, 1000);
767 iop_msg_unmap(sc, im);
768 iop_msg_free(sc, NULL, im);
769 return (rv);
770 }
771
772 /*
773 * Execute a simple command (no parameters) and poll on completion.
774 */
775 int
776 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx)
777 {
778 struct iop_msg *im;
779 struct i2o_msg *mb;
780 int rv;
781
782 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOINTR)) != 0)
783 return (rv);
784
785 mb = (struct i2o_msg *)im->im_msg;
786 mb->msgflags = I2O_MSGFLAGS(i2o_msg);
787 mb->msgfunc = I2O_MSGFUNC(tid, function);
788 mb->msgictx = ictx;
789 mb->msgtctx = im->im_tctx;
790
791 rv = iop_msg_send(sc, im, 5000);
792 iop_msg_free(sc, NULL, im);
793 return (rv);
794 }
795
796 /*
797 * Post the system table to the IOP. We don't maintain a full system table
798 * as we should - it describes only "this" IOP and is built on the stack
799 * here for the one time that we will use it: posting to the IOP.
800 */
801 static int
802 iop_systab_set(struct iop_softc *sc)
803 {
804 struct i2o_iop_entry systab;
805 struct iop_msg *im;
806 u_int32_t mema[2], ioa[2];
807 struct i2o_exec_sys_tab_set *mb;
808 int rv;
809
810 memset(&systab, 0, sizeof(systab));
811 systab.orgid = sc->sc_status.orgid;
812 systab.iopid = htole32(le32toh(sc->sc_status.iopid) & 4095);
813 systab.segnumber = sc->sc_status.segnumber;
814 systab.iopcaps = sc->sc_status.iopcaps;
815 systab.inboundmsgframesize = sc->sc_status.inboundmframesize;
816 systab.inboundmsgportaddresslow =
817 htole32(sc->sc_memaddr + IOP_REG_IFIFO);
818
819 /* Record private memory and I/O spaces. */
820 mema[0] = htole32(sc->sc_memaddr);
821 mema[1] = htole32(sc->sc_memsize);
822 ioa[0] = htole32(sc->sc_ioaddr);
823 ioa[1] = htole32(sc->sc_iosize);
824
825 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOINTR)) != 0)
826 return (rv);
827
828 mb = (struct i2o_exec_sys_tab_set *)im->im_msg;
829 mb->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
830 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
831 mb->msgictx = IOP_ICTX;
832 mb->msgtctx = im->im_tctx;
833 mb->iopid = (2 + sc->sc_dv.dv_unit) & 4095;
834 mb->segnumber = le32toh(sc->sc_status.segnumber) & 4095;
835
836 iop_msg_map(sc, im, &systab, sizeof(systab), 1);
837 iop_msg_map(sc, im, mema, sizeof(mema), 1);
838 iop_msg_map(sc, im, ioa, sizeof(ioa), 1);
839
840 rv = iop_msg_send(sc, im, 5000);
841 iop_msg_unmap(sc, im);
842 iop_msg_free(sc, NULL, im);
843 return (rv);
844 }
845
846 /*
847 * Reset the adapter. Must be called with interrupts disabled.
848 */
849 static int
850 iop_reset(struct iop_softc *sc)
851 {
852 struct iop_msg *im;
853 volatile u_int32_t sw;
854 u_int32_t mfa;
855 struct i2o_exec_iop_reset *mb;
856 int rv;
857
858 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
859 return (rv);
860
861 sw = 0;
862
863 mb = (struct i2o_exec_iop_reset *)im->im_msg;
864 mb->msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
865 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
866 mb->reserved[0] = 0;
867 mb->reserved[1] = 0;
868 mb->reserved[2] = 0;
869 mb->reserved[3] = 0;
870 mb->statuslow = kvtop((caddr_t)&sw); /* XXX */
871 mb->statushigh = 0;
872
873 if ((rv = iop_msg_send(sc, im, 0)))
874 return (rv);
875 iop_msg_free(sc, NULL, im);
876
877 POLL(2500, sw != 0); /* XXX */
878 if (sw != I2O_RESET_IN_PROGRESS) {
879 printf("%s: reset rejected\n", sc->sc_dv.dv_xname);
880 return (EIO);
881 }
882
883 /*
884 * IOP is now in the INIT state. Wait no more than 5 seconds for
885 * the inbound queue to become responsive.
886 */
887 DELAY(1000);
888 POLL(5000, (mfa = IOP_INL(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
889 if (mfa == IOP_MFA_EMPTY) {
890 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
891 return (EIO);
892 }
893
894 iop_release_mfa(sc, mfa);
895 return (0);
896 }
897
898 /*
899 * Register a new initiator.
900 */
901 int
902 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
903 {
904 int i;
905
906 /* Find a free slot. If no slots are free, puke. */
907 for (i = 0; i < IOP_MAX_INITIATORS; i++)
908 if (sc->sc_itab[i] == NULL)
909 break;
910 if (i == IOP_MAX_INITIATORS)
911 return (ENOMEM);
912
913 #ifdef notyet
914 ii->ii_maxqueuecnt = IOP_MAX_PI_QUEUECNT;
915 ii->ii_queuecnt = 0;
916 #endif
917 ii->ii_ictx = i + 1;
918 sc->sc_itab[i] = ii;
919 return (0);
920 }
921
922 /*
923 * Unregister an initiator.
924 */
925 void
926 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
927 {
928
929 #ifdef notyet
930 #ifdef I2ODEBUG
931 if (ii->ii_queuecnt != 0)
932 panic("iop_inititator_unregister: busy");
933 #endif
934 #endif
935 sc->sc_itab[ii->ii_ictx - 1] = NULL;
936 }
937
938 /*
939 * Attempt to read a reply frame from the adapter. If we get one, deal with
940 * it.
941 */
942 static int
943 iop_poll(struct iop_softc *sc)
944 {
945 struct iop_msg *im;
946 struct i2o_reply *rb;
947 struct iop_initiator *ii;
948 u_int32_t rmfa;
949 u_int off, ictx, tctx, status;
950
951 /* Double read to account for IOP bug. */
952 if ((rmfa = IOP_INL(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY &&
953 (rmfa = IOP_INL(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY)
954 return (-1);
955
956 off = (int)(rmfa - sc->sc_rep_phys);
957 rb = (struct i2o_reply *)(sc->sc_rep + off);
958
959 /*
960 * Perform reply queue DMA synchronisation.
961 */
962 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, IOP_MAX_MSG_SIZE,
963 BUS_DMASYNC_POSTREAD);
964 if (--sc->sc_stat.is_cur_hwqueue != 0)
965 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
966 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
967
968 #ifdef I2ODEBUG
969 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
970 panic("iop_poll: 64-bit reply");
971 #endif
972 /*
973 * Find the initiator.
974 */
975 ictx = le32toh(rb->msgictx);
976 if (ictx > IOP_MAX_INITIATORS)
977 panic("%s: bad ictx returned", sc->sc_dv.dv_xname);
978 if (ictx == IOP_ICTX)
979 ii = NULL;
980 else {
981 #ifdef I2ODEBUG
982 if (sc->sc_itab == NULL)
983 panic("iop_poll: itab == NULL; ictx %d", ictx);
984 #endif
985 if ((ii = sc->sc_itab[ictx - 1]) == NULL)
986 panic("%s: bad ictx returned", sc->sc_dv.dv_xname);
987 }
988
989 status = rb->reqstatus;
990
991 if (ii == NULL || (ii->ii_flags & II_DISCARD) == 0) {
992 /*
993 * This initiator tracks state using message wrappers.
994 *
995 * Find the originating message wrapper, and if requested
996 * notify the initiator.
997 */
998 tctx = le32toh(rb->msgtctx);
999 im = TAILQ_FIRST(IOP_MSGHASH(tctx));
1000 for (; im != NULL; im = TAILQ_NEXT(im, im_hash))
1001 if (im->im_tctx == tctx)
1002 break;
1003 if (im == NULL || (im->im_flags & IM_ALLOCED) == 0)
1004 panic("%s: bad tctx returned (%x, %p)",
1005 sc->sc_dv.dv_xname, tctx, im);
1006 #ifdef I2ODEBUG
1007 if ((im->im_flags & IM_REPLIED) != 0)
1008 panic("%s: duplicate reply", sc->sc_dv.dv_xname);
1009 #endif
1010
1011 im->im_flags |= IM_REPLIED;
1012
1013 #ifdef I2ODEBUG
1014 if (rb->reqstatus != 0)
1015 iop_reply_print(sc, im, rb);
1016 #endif
1017 /* Notify the initiator. */
1018 if ((im->im_flags & IM_WAITING) != 0)
1019 wakeup(im);
1020 if ((im->im_flags & IM_NOINTR) == 0)
1021 (*ii->ii_intr)(ii->ii_dv, im, rb);
1022 } else {
1023 /*
1024 * This initiator discards message wrappers.
1025 *
1026 * Simply pass the reply frame to the initiator.
1027 */
1028 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1029 }
1030
1031 /* Return the reply frame to the IOP's outbound FIFO. */
1032 IOP_OUTL(sc, IOP_REG_OFIFO, rmfa);
1033
1034 /* Run the queue. */
1035 if ((im = SIMPLEQ_FIRST(&sc->sc_queue)) != NULL)
1036 iop_msg_enqueue(sc, im);
1037
1038 return (status);
1039 }
1040
1041 /*
1042 * Handle an interrupt from the adapter.
1043 */
1044 int
1045 iop_intr(void *arg)
1046 {
1047 struct iop_softc *sc;
1048 int forus;
1049
1050 sc = arg;
1051 forus = 0;
1052
1053 /* Handle replies and dispatch enqueued messages. */
1054 while ((IOP_INL(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
1055 iop_poll(sc);
1056 forus = 1;
1057 }
1058
1059 #ifdef I2ODEBUG
1060 if (!forus)
1061 printf("%s: spurious intr\n", sc->sc_dv.dv_xname);
1062 #endif
1063 return (forus);
1064 }
1065
1066 /*
1067 * Allocate a message wrapper.
1068 */
1069 int
1070 iop_msg_alloc(struct iop_softc *sc, struct iop_initiator *ii,
1071 struct iop_msg **imp, int flags)
1072 {
1073 struct iop_msg *im;
1074 static int tctx = 666;
1075 int s, rv, i;
1076
1077 #ifdef I2ODEBUG
1078 if ((flags & IM_SYSMASK) != 0)
1079 panic("iop_msg_alloc: system flags specified");
1080 #endif
1081
1082 s = splbio(); /* XXX */
1083
1084 if (ii != NULL) {
1085 #ifdef notyet
1086 /*
1087 * If this initiator has exceeded it's maximum allowed queue
1088 * depth, sleep until one of its currently queued commands
1089 * has completed.
1090 */
1091 if (ii->ii_queuecnt >= ii->ii_maxqueuecnt) {
1092 if ((flags & IM_NOWAIT) != 0) {
1093 splx(s);
1094 return (EAGAIN);
1095 }
1096 ii->ii_waitcnt++;
1097 tsleep(ii, PRIBIO, "iopmsg", 0);
1098 }
1099 ii->ii_queuecnt++;
1100 #endif
1101 if ((ii->ii_flags & II_DISCARD) != 0)
1102 flags |= IM_DISCARD;
1103 }
1104
1105 im = (struct iop_msg *)pool_get(iop_msgpool,
1106 (flags & IM_NOWAIT) == 0 ? PR_WAITOK : 0);
1107 if (im == NULL) {
1108 splx(s);
1109 return (ENOMEM);
1110 }
1111
1112 /* XXX */
1113 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, IOP_MAX_SGL_ENTRIES,
1114 IOP_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1115 &im->im_xfer[0].ix_map);
1116 if (rv != 0) {
1117 pool_put(iop_msgpool, im);
1118 splx(s);
1119 return (rv);
1120 }
1121
1122 if ((flags & (IM_DISCARD | IM_NOICTX)) == 0)
1123 TAILQ_INSERT_TAIL(IOP_MSGHASH(tctx), im, im_hash);
1124
1125 splx(s);
1126
1127 im->im_tctx = tctx++;
1128 im->im_flags = flags | IM_ALLOCED;
1129 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1130 im->im_xfer[i].ix_size = 0;
1131 *imp = im;
1132
1133 return (0);
1134 }
1135
1136 /*
1137 * Free a message wrapper.
1138 */
1139 void
1140 iop_msg_free(struct iop_softc *sc, struct iop_initiator *ii, struct iop_msg *im)
1141 {
1142 int s;
1143
1144 #ifdef I2ODEBUG
1145 if ((im->im_flags & IM_ALLOCED) == 0)
1146 panic("iop_msg_free: wrapper not allocated");
1147 #endif
1148
1149 /* XXX */
1150 bus_dmamap_destroy(sc->sc_dmat, im->im_xfer[0].ix_map);
1151
1152 s = splbio(); /* XXX */
1153
1154 if ((im->im_flags & (IM_DISCARD | IM_NOICTX)) == 0)
1155 TAILQ_REMOVE(IOP_MSGHASH(im->im_tctx), im, im_hash);
1156
1157 im->im_flags = 0;
1158 pool_put(iop_msgpool, im);
1159
1160 #ifdef notyet
1161 if (ii != NULL) {
1162 ii->ii_queuecnt--;
1163 if (ii->ii_waitcnt != 0) {
1164 wakeup_one(ii);
1165 ii->ii_waitcnt--;
1166 }
1167 }
1168 #endif
1169
1170 splx(s);
1171 }
1172
1173 /*
1174 * Map a data transfer. Write a scatter gather list into the message frame.
1175 */
1176 int
1177 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, void *xferaddr,
1178 int xfersize, int out)
1179 {
1180 struct iop_xfer *ix;
1181 u_int32_t *mb;
1182 int rv, seg, flg, i;
1183
1184 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++)
1185 if (ix->ix_size == 0)
1186 break;
1187 #ifdef I2ODEBUG
1188 if (i == IOP_MAX_MSG_XFERS)
1189 panic("iop_msg_map: too many xfers");
1190 #endif
1191
1192 /* Only the first DMA map is static. */
1193 if (i != 0) {
1194 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1195 IOP_MAX_SGL_ENTRIES, IOP_MAX_XFER, 0,
1196 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1197 if (rv != 0)
1198 return (rv);
1199 }
1200
1201 flg = (out ? IX_OUT : IX_IN);
1202 ix->ix_size = xfersize;
1203
1204 rv = bus_dmamap_load(sc->sc_dmat, ix->ix_map, xferaddr, xfersize,
1205 NULL, 0);
1206 if (rv != 0)
1207 return (rv);
1208 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1209 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1210
1211 mb = im->im_msg + (im->im_msg[0] >> 16);
1212 if (out)
1213 out = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1214 else
1215 out = I2O_SGL_SIMPLE;
1216
1217 for (seg = 0; seg < ix->ix_map->dm_nsegs; seg++) {
1218 #ifdef I2ODEBUG
1219 if ((seg << 1) + (im->im_msg[0] >> 16) >=
1220 (IOP_MAX_MSG_SIZE >> 2))
1221 panic("iop_map_xfer: message frame too large");
1222 #endif
1223 if (seg == ix->ix_map->dm_nsegs - 1)
1224 out |= I2O_SGL_END_BUFFER;
1225 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_len | out;
1226 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_addr;
1227 }
1228
1229 /*
1230 * If this is the first xfer we've mapped for this message, adjust
1231 * the SGL offset field in the message header.
1232 */
1233 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1234 im->im_msg[0] += ((im->im_msg[0] >> 16) + seg * 2) << 4;
1235 im->im_flags |= IM_SGLOFFADJ;
1236 }
1237 im->im_msg[0] += (seg << 17);
1238 return (0);
1239 }
1240
1241 /*
1242 * Unmap all data transfers associated with a message wrapper.
1243 */
1244 void
1245 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
1246 {
1247 struct iop_xfer *ix;
1248 int i;
1249
1250 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++) {
1251 if (ix->ix_size == 0)
1252 break;
1253 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
1254 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
1255 BUS_DMASYNC_POSTREAD);
1256 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1257
1258 /* Only the first DMA map is static. */
1259 if (i != 0)
1260 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1261
1262 ix->ix_size = 0;
1263 }
1264 }
1265
1266 /*
1267 * Send a message to the IOP. Optionally, poll on completion. Return
1268 * non-zero if failure status is returned and IM_NOINTR is set.
1269 */
1270 int
1271 iop_msg_send(struct iop_softc *sc, struct iop_msg *im, int timo)
1272 {
1273 u_int32_t mfa, mask;
1274 int rv, status, i, s;
1275
1276 #ifdef I2ODEBUG
1277 if ((im->im_flags & IM_NOICTX) == 0)
1278 if (im->im_msg[3] == IOP_ICTX &&
1279 (im->im_flags & IM_NOINTR) == 0)
1280 panic("iop_msg_send: IOP_ICTX and !IM_NOINTR");
1281 if ((im->im_flags & IM_DISCARD) != 0)
1282 panic("iop_msg_send: IM_DISCARD");
1283 #endif
1284
1285 im->im_tid = im->im_msg[1] & 4095; /* XXX */
1286
1287 s = splbio(); /* XXX */
1288
1289 /* Wait up to 250ms for an MFA. */
1290 POLL(250, (mfa = IOP_INL(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1291 if (mfa == IOP_MFA_EMPTY) {
1292 DPRINTF(("%s: mfa not forthcoming\n", sc->sc_dv.dv_xname));
1293 splx(s);
1294 return (EBUSY);
1295 }
1296
1297 /* Perform reply queue DMA synchronisation and update counters. */
1298 if ((im->im_flags & IM_NOICTX) == 0) {
1299 if (sc->sc_stat.is_cur_hwqueue == 0)
1300 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1301 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1302 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1303 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1304 sc->sc_stat.is_requests++;
1305 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1306 sc->sc_stat.is_peak_hwqueue =
1307 sc->sc_stat.is_cur_hwqueue;
1308 }
1309
1310 /* Terminate scatter/gather lists. */
1311 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1312 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1313
1314 /* Post the message frame. */
1315 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1316 im->im_msg, im->im_msg[0] >> 16);
1317
1318 /* Post the MFA back to the IOP, thus starting the command. */
1319 IOP_OUTL(sc, IOP_REG_IFIFO, mfa);
1320
1321 if (timo == 0) {
1322 splx(s);
1323 return (0);
1324 }
1325
1326 /* Wait for completion. */
1327 for (timo *= 10; timo != 0; timo--) {
1328 if ((IOP_INL(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0)
1329 status = iop_poll(sc);
1330 if ((im->im_flags & IM_REPLIED) != 0)
1331 break;
1332 DELAY(100);
1333 }
1334
1335 splx(s);
1336
1337 if (timo == 0) {
1338 DPRINTF(("%s: poll - no reply\n", sc->sc_dv.dv_xname));
1339 rv = EBUSY;
1340 } else if ((im->im_flags & IM_NOINTR) != 0)
1341 rv = (status != I2O_STATUS_SUCCESS ? EIO : 0);
1342
1343 return (rv);
1344 }
1345
1346 /*
1347 * Try to post a message to the adapter; if that's not possible, enqueue it
1348 * with us.
1349 */
1350 int
1351 iop_msg_enqueue(struct iop_softc *sc, struct iop_msg *im)
1352 {
1353 u_int mfa;
1354 int s, fromqueue, i;
1355
1356 #ifdef I2ODEBUG
1357 if (im == NULL)
1358 panic("iop_msg_enqueue: im == NULL");
1359 if (sc == NULL)
1360 panic("iop_msg_enqueue: sc == NULL");
1361 if ((im->im_flags & IM_NOICTX) != 0)
1362 panic("iop_msg_enqueue: IM_NOICTX");
1363 if (im->im_msg[3] == IOP_ICTX && (im->im_flags & IM_NOINTR) == 0)
1364 panic("iop_msg_send: IOP_ICTX and no IM_NOINTR");
1365 #endif
1366
1367 im->im_tid = im->im_msg[1] & 4095; /* XXX */
1368
1369 s = splbio(); /* XXX */
1370 fromqueue = (im == SIMPLEQ_FIRST(&sc->sc_queue));
1371
1372 if (sc->sc_stat.is_cur_hwqueue >= sc->sc_maxqueuecnt) {
1373 /*
1374 * While the IOP may be able to accept more inbound message
1375 * frames than it advertises, don't push harder than it
1376 * wants to go lest we starve it.
1377 *
1378 * XXX We should be handling IOP resource shortages.
1379 */
1380 mfa = IOP_MFA_EMPTY;
1381 } else {
1382 /* Double read to account for IOP bug. */
1383 if ((mfa = IOP_INL(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
1384 mfa = IOP_INL(sc, IOP_REG_IFIFO);
1385 }
1386
1387 if (mfa == IOP_MFA_EMPTY) {
1388 /* Can't transfer to h/w queue - queue with us. */
1389 if (!fromqueue) {
1390 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, im, im_queue);
1391 if (++sc->sc_stat.is_cur_swqueue >
1392 sc->sc_stat.is_peak_swqueue)
1393 sc->sc_stat.is_peak_swqueue =
1394 sc->sc_stat.is_cur_swqueue;
1395 }
1396 splx(s);
1397 return (0);
1398 } else if (fromqueue) {
1399 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, im, im_queue);
1400 sc->sc_stat.is_cur_swqueue--;
1401 }
1402
1403 /* Perform reply queue DMA synchronisation and update counters. */
1404 if (sc->sc_stat.is_cur_hwqueue == 0)
1405 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1406 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1407
1408 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1409 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1410 sc->sc_stat.is_requests++;
1411 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1412 sc->sc_stat.is_peak_hwqueue = sc->sc_stat.is_cur_hwqueue;
1413
1414 /* Terminate the scatter/gather list. */
1415 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1416 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1417
1418 /* Post the message frame. */
1419 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1420 im->im_msg, im->im_msg[0] >> 16);
1421
1422 /* Post the MFA back to the IOP, thus starting the command. */
1423 IOP_OUTL(sc, IOP_REG_IFIFO, mfa);
1424
1425 /* If this is a discardable message wrapper, free it. */
1426 if ((im->im_flags & IM_DISCARD) != 0)
1427 iop_msg_free(sc, NULL, im);
1428 splx(s);
1429 return (0);
1430 }
1431
1432 /*
1433 * Wait for the specified message to complete. Must be called with
1434 * interrupts enabled.
1435 */
1436 int
1437 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
1438 {
1439 int rv;
1440
1441 im->im_flags |= IM_WAITING;
1442 if ((im->im_flags & IM_REPLIED) != 0)
1443 return (0);
1444 rv = tsleep(im, PRIBIO, "iopmsg", timo);
1445 if ((im->im_flags & IM_REPLIED) != 0)
1446 return (0);
1447 return (rv);
1448 }
1449
1450 /*
1451 * Release an unused message frame back to the IOP's inbound fifo.
1452 */
1453 static void
1454 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
1455 {
1456
1457 /* Use the frame to issue a no-op. */
1458 IOP_OUTL(sc, mfa, I2O_VERSION_11 | (4 << 16));
1459 IOP_OUTL(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
1460 IOP_OUTL(sc, mfa + 8, 0);
1461 IOP_OUTL(sc, mfa + 12, 0);
1462
1463 IOP_OUTL(sc, IOP_REG_IFIFO, mfa);
1464 }
1465
1466 #ifdef I2ODEBUG
1467 /*
1468 * Print status information from a failure reply frame.
1469 */
1470 static void
1471 iop_reply_print(struct iop_softc *sc, struct iop_msg *im,
1472 struct i2o_reply *rb)
1473 {
1474 u_int cmd, detail;
1475 #ifdef I2OVERBOSE
1476 const char *statusstr;
1477 #endif
1478
1479 #ifdef I2ODEBUG
1480 if ((im->im_flags & IM_REPLIED) == 0)
1481 panic("iop_msg_print_status: %p not replied to", im);
1482 #endif
1483
1484 cmd = le32toh(rb->msgflags) >> 24;
1485 detail = le16toh(rb->detail);
1486
1487 #ifdef I2OVERBOSE
1488 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
1489 statusstr = iop_status[rb->reqstatus];
1490 else
1491 statusstr = "undefined error code";
1492
1493 printf("%s: tid=%d cmd=0x%02x: status=0x%02x (%s) detail=0x%04x\n",
1494 sc->sc_dv.dv_xname, im->im_tid, cmd, rb->reqstatus, statusstr,
1495 detail);
1496 #else
1497 printf("%s: tid=%d cmd=0x%02x: status=0x%02x detail=0x%04x\n",
1498 sc->sc_dv.dv_xname, im->im_tid, cmd, rb->reqstatus, detail);
1499 #endif
1500 }
1501 #endif
1502
1503 /*
1504 * Wait for an exclusive lock on the LCT.
1505 */
1506 int
1507 iop_lct_lock(struct iop_softc *sc)
1508 {
1509 int rv;
1510
1511 while ((sc->sc_flags & IOP_LCTLKHELD) != 0)
1512 if ((rv = tsleep(sc, PRIBIO | PCATCH, "ioplct", 0)) != 0)
1513 return (rv);
1514 sc->sc_flags |= IOP_LCTLKHELD;
1515 return (0);
1516 }
1517
1518 /*
1519 * Unlock and wake up any waiters.
1520 */
1521 void
1522 iop_lct_unlock(struct iop_softc *sc)
1523 {
1524
1525 sc->sc_flags &= ~IOP_LCTLKHELD;
1526 wakeup_one(sc);
1527 }
1528
1529 /*
1530 * Translate an I2O ASCII string into a C string.
1531 *
1532 * XXX Doesn't belong here.
1533 */
1534 void
1535 iop_strvis(const char *src, int slen, char *dst, int dlen)
1536 {
1537 int hc, lc, i;
1538
1539 dlen--;
1540 lc = 0;
1541 hc = 0;
1542 i = 0;
1543
1544 while (slen-- && dlen--) {
1545 if (*src <= 0x20 || *src >= 0x7f) {
1546 if (hc)
1547 dst[i++] = ' ';
1548 } else {
1549 hc = 1;
1550 dst[i++] = *src;
1551 lc = i;
1552 }
1553 src++;
1554 }
1555
1556 dst[lc] = '\0';
1557 }
1558
1559 /*
1560 * Return the index of the LCT entry matching the specified TID.
1561 */
1562 int
1563 iop_tid_lct_index(struct iop_softc *sc, int tid)
1564 {
1565 const struct i2o_lct_entry *le;
1566 int i;
1567
1568 for (i = 0, le = sc->sc_lct->entry; i < sc->sc_nlctent; i++, le++)
1569 if ((le32toh(le->localtid) & 4095) == tid)
1570 return (i);
1571
1572 return (-1);
1573 }
1574
1575 /*
1576 * Determine whether the specified target is in use by an OSM (or in turn,
1577 * by a DDM). Return a positive non-zero value on error, zero if the TID is
1578 * in use and a negative non-zero value if the TID is not in use.
1579 */
1580 int
1581 iop_tid_inuse(struct iop_softc *sc, int tid)
1582 {
1583 int i;
1584
1585 if ((i = iop_tid_lct_index(sc, tid)) < 0)
1586 return (ENXIO);
1587 return (-((sc->sc_lctmap[i] & IOP_LCTMAP_INUSE) == 0));
1588 }
1589
1590 /*
1591 * Mark all targets used by the specified target as in use.
1592 */
1593 void
1594 iop_tid_markallused(struct iop_softc *sc, int tid)
1595 {
1596 const struct i2o_lct_entry *le;
1597 int i;
1598
1599 for (i = 0, le = sc->sc_lct->entry; i < sc->sc_nlctent; i++, le++)
1600 if ((le32toh(le->usertid) & 4095) == tid) {
1601 #ifdef I2ODEBUG
1602 if ((sc->sc_lctmap[i] & IOP_LCTMAP_INUSE) != 0)
1603 panic("iop_tid_markallused: multiple use\n");
1604 #endif
1605 sc->sc_lctmap[i] |= IOP_LCTMAP_INUSE;
1606 }
1607 }
1608
1609 /*
1610 * Claim the specified TID. Must be called with interrupts enabled.
1611 */
1612 int
1613 iop_tid_claim(struct iop_softc *sc, int tid, int ictx, int flags)
1614 {
1615 struct iop_msg *im;
1616 struct i2o_util_claim *mb;
1617 int rv;
1618
1619 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1620 return (rv);
1621
1622 mb = (struct i2o_util_claim *)im->im_msg;
1623 mb->msgflags = I2O_MSGFLAGS(i2o_util_claim);
1624 mb->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_CLAIM);
1625 mb->msgictx = ictx;
1626 mb->msgtctx = im->im_tctx;
1627 mb->flags = flags;
1628
1629 if ((rv = iop_msg_enqueue(sc, im)) == 0)
1630 rv = iop_msg_wait(sc, im, 1000);
1631 iop_msg_free(sc, NULL, im);
1632 return (rv);
1633 }
1634