iop.c revision 1.8 1 /* $NetBSD: iop.c,v 1.8 2001/01/01 19:03:30 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/pool.h>
56 #include <sys/conf.h>
57 #include <sys/kthread.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #include <machine/bus.h>
62
63 #include <dev/i2o/i2o.h>
64 #include <dev/i2o/iopreg.h>
65 #include <dev/i2o/iopvar.h>
66
67 #define POLL(ms, cond) \
68 do { \
69 int i; \
70 for (i = (ms) * 10; i; i--) { \
71 if (cond) \
72 break; \
73 DELAY(100); \
74 } \
75 } while (/* CONSTCOND */0);
76
77 #ifdef I2ODEBUG
78 #define DPRINTF(x) printf x
79 #else
80 #define DPRINTF(x)
81 #endif
82
83 #ifdef I2OVERBOSE
84 #define IFVERBOSE(x) x
85 #else
86 #define IFVERBOSE(x)
87 #endif
88
89 #define COMMENT(x) ""
90
91 #define IOP_ICTXHASH_NBUCKETS 16
92 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
93 #define IOP_TCTXHASH_NBUCKETS 64
94 #define IOP_TCTXHASH(tctx) (&iop_tctxhashtbl[(tctx) & iop_tctxhash])
95
96 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
97 static u_long iop_ictxhash;
98 static TAILQ_HEAD(, iop_msg) *iop_tctxhashtbl;
99 static u_long iop_tctxhash;
100 static void *iop_sdh;
101 static struct pool *iop_msgpool;
102 static struct i2o_systab *iop_systab;
103 static int iop_systab_size;
104
105 extern struct cfdriver iop_cd;
106
107 #define IC_CONFIGURE 0x01
108
109 struct iop_class {
110 u_short ic_class;
111 u_short ic_flags;
112 const char *ic_caption;
113 } static const iop_class[] = {
114 {
115 I2O_CLASS_EXECUTIVE,
116 0,
117 COMMENT("executive")
118 },
119 {
120 I2O_CLASS_DDM,
121 0,
122 COMMENT("device driver module")
123 },
124 {
125 I2O_CLASS_RANDOM_BLOCK_STORAGE,
126 IC_CONFIGURE,
127 IFVERBOSE("random block storage")
128 },
129 {
130 I2O_CLASS_SEQUENTIAL_STORAGE,
131 IC_CONFIGURE,
132 IFVERBOSE("sequential storage")
133 },
134 {
135 I2O_CLASS_LAN,
136 IC_CONFIGURE,
137 IFVERBOSE("LAN port")
138 },
139 {
140 I2O_CLASS_WAN,
141 IC_CONFIGURE,
142 IFVERBOSE("WAN port")
143 },
144 {
145 I2O_CLASS_FIBRE_CHANNEL_PORT,
146 IC_CONFIGURE,
147 IFVERBOSE("fibrechannel port")
148 },
149 {
150 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
151 0,
152 COMMENT("fibrechannel peripheral")
153 },
154 {
155 I2O_CLASS_SCSI_PERIPHERAL,
156 0,
157 COMMENT("SCSI peripheral")
158 },
159 {
160 I2O_CLASS_ATE_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("ATE port")
163 },
164 {
165 I2O_CLASS_ATE_PERIPHERAL,
166 0,
167 COMMENT("ATE peripheral")
168 },
169 {
170 I2O_CLASS_FLOPPY_CONTROLLER,
171 IC_CONFIGURE,
172 IFVERBOSE("floppy controller")
173 },
174 {
175 I2O_CLASS_FLOPPY_DEVICE,
176 0,
177 COMMENT("floppy device")
178 },
179 {
180 I2O_CLASS_BUS_ADAPTER_PORT,
181 IC_CONFIGURE,
182 IFVERBOSE("bus adapter port" )
183 },
184 };
185
186 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
187 static const char *iop_status[] = {
188 "success",
189 "abort (dirty)",
190 "abort (no data transfer)",
191 "abort (partial transfer)",
192 "error (dirty)",
193 "error (no data transfer)",
194 "error (partial transfer)",
195 "undefined error code",
196 "process abort (dirty)",
197 "process abort (no data transfer)",
198 "process abort (partial transfer)",
199 "transaction error",
200 };
201 #endif
202
203 static inline u_int32_t iop_inl(struct iop_softc *, int);
204 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
205
206 static void iop_config_interrupts(struct device *);
207 static void iop_configure_devices(struct iop_softc *);
208 static void iop_devinfo(int, char *);
209 static int iop_print(void *, const char *);
210 static int iop_reconfigure(struct iop_softc *, u_int32_t);
211 static void iop_shutdown(void *);
212 static int iop_submatch(struct device *, struct cfdata *, void *);
213 #ifdef notyet
214 static int iop_vendor_print(void *, const char *);
215 #endif
216
217 static void iop_intr_event(struct device *, struct iop_msg *, void *);
218 static int iop_hrt_get(struct iop_softc *);
219 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
220 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
221 u_int32_t);
222 static int iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
223 static int iop_ofifo_init(struct iop_softc *);
224 static int iop_handle_reply(struct iop_softc *, u_int32_t);
225 static void iop_reconfigure_proc(void *);
226 static void iop_release_mfa(struct iop_softc *, u_int32_t);
227 static int iop_reset(struct iop_softc *);
228 static int iop_status_get(struct iop_softc *);
229 static int iop_systab_set(struct iop_softc *);
230
231 #ifdef I2ODEBUG
232 static void iop_reply_print(struct iop_softc *, struct iop_msg *,
233 struct i2o_reply *);
234 #endif
235
236 cdev_decl(iop);
237
238 static inline u_int32_t
239 iop_inl(struct iop_softc *sc, int off)
240 {
241
242 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
243 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
244 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
245 }
246
247 static inline void
248 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
249 {
250
251 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
252 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
253 BUS_SPACE_BARRIER_WRITE);
254 }
255
256 /*
257 * Initialise the adapter.
258 */
259 void
260 iop_init(struct iop_softc *sc, const char *intrstr)
261 {
262 int rv;
263 u_int32_t mask;
264 static int again;
265 char ident[64];
266
267 if (again == 0) {
268 /* Create the shared message wrapper pool and hashes. */
269 iop_msgpool = pool_create(sizeof(struct iop_msg), 0, 0, 0,
270 "ioppl", 0, NULL, NULL, M_DEVBUF);
271 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
272 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
273 iop_tctxhashtbl = hashinit(IOP_TCTXHASH_NBUCKETS, HASH_TAILQ,
274 M_DEVBUF, M_NOWAIT, &iop_tctxhash);
275 again = 1;
276 }
277
278 /* Reset the IOP and request status. */
279 printf("I2O adapter");
280
281 if ((rv = iop_reset(sc)) != 0) {
282 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
283 return;
284 }
285 if ((rv = iop_status_get(sc)) != 0) {
286 printf("%s: not responding (get status)\n", sc->sc_dv.dv_xname);
287 return;
288 }
289 DPRINTF((" (state=%d)",
290 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff));
291 sc->sc_flags |= IOP_HAVESTATUS;
292
293 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
294 ident, sizeof(ident));
295 printf(" <%s>\n", ident);
296
297 #ifdef I2ODEBUG
298 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
299 le16toh(sc->sc_status.orgid),
300 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
301 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
302 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
303 le32toh(sc->sc_status.desiredprivmemsize),
304 le32toh(sc->sc_status.currentprivmemsize),
305 le32toh(sc->sc_status.currentprivmembase));
306 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
307 le32toh(sc->sc_status.desiredpriviosize),
308 le32toh(sc->sc_status.currentpriviosize),
309 le32toh(sc->sc_status.currentpriviobase));
310 #endif
311
312 sc->sc_maxreplycnt = le32toh(sc->sc_status.maxoutboundmframes);
313 if (sc->sc_maxreplycnt > IOP_MAX_HW_REPLYCNT)
314 sc->sc_maxreplycnt = IOP_MAX_HW_REPLYCNT;
315 sc->sc_maxqueuecnt = le32toh(sc->sc_status.maxinboundmframes);
316 if (sc->sc_maxqueuecnt > IOP_MAX_HW_QUEUECNT)
317 sc->sc_maxqueuecnt = IOP_MAX_HW_QUEUECNT;
318
319 if (iop_ofifo_init(sc) != 0) {
320 printf("%s: unable to init oubound FIFO\n", sc->sc_dv.dv_xname);
321 return;
322 }
323
324 /*
325 * Defer further configuration until (a) interrupts are working and
326 * (b) we have enough information to build the system table.
327 */
328 config_interrupts((struct device *)sc, iop_config_interrupts);
329
330 /* Configure shutdown hook before we start any device activity. */
331 if (iop_sdh == NULL)
332 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
333
334 /* Ensure interrupts are enabled at the IOP. */
335 mask = iop_inl(sc, IOP_REG_INTR_MASK);
336 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
337
338 if (intrstr != NULL)
339 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
340 intrstr);
341
342 #ifdef I2ODEBUG
343 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
344 sc->sc_dv.dv_xname,
345 sc->sc_maxqueuecnt, le32toh(sc->sc_status.maxinboundmframes),
346 sc->sc_maxreplycnt, le32toh(sc->sc_status.maxoutboundmframes));
347 #endif
348
349 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
350 SIMPLEQ_INIT(&sc->sc_queue);
351 }
352
353 /*
354 * Perform autoconfiguration tasks.
355 */
356 static void
357 iop_config_interrupts(struct device *self)
358 {
359 struct iop_softc *sc, *iop;
360 struct i2o_systab_entry *ste;
361 int rv, i, niop;
362
363 sc = (struct iop_softc *)self;
364 LIST_INIT(&sc->sc_iilist);
365
366 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
367
368 if (iop_hrt_get(sc) != 0) {
369 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
370 return;
371 }
372
373 /*
374 * Build the system table.
375 */
376 if (iop_systab == NULL) {
377 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
378 if ((iop = device_lookup(&iop_cd, i)) == NULL)
379 continue;
380 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
381 continue;
382 if (iop_status_get(iop) != 0) {
383 printf("%s: unable to retrieve status\n",
384 sc->sc_dv.dv_xname);
385 iop->sc_flags &= ~IOP_HAVESTATUS;
386 continue;
387 }
388 niop++;
389 }
390 if (niop == 0)
391 return;
392
393 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
394 sizeof(struct i2o_systab);
395 iop_systab_size = i;
396 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
397
398 memset(iop_systab, 0, i);
399 iop_systab->numentries = niop;
400 iop_systab->version = I2O_VERSION_11;
401
402 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
403 if ((iop = device_lookup(&iop_cd, i)) == NULL)
404 continue;
405 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
406 continue;
407
408 ste->orgid = iop->sc_status.orgid;
409 ste->iopid = iop->sc_dv.dv_unit + 2;
410 ste->segnumber =
411 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
412 ste->iopcaps = iop->sc_status.iopcaps;
413 ste->inboundmsgframesize =
414 iop->sc_status.inboundmframesize;
415 ste->inboundmsgportaddresslow =
416 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
417 ste++;
418 }
419 }
420
421 if (iop_systab_set(sc) != 0) {
422 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
423 return;
424 }
425 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
426 5000) != 0) {
427 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
428 return;
429 }
430
431 /*
432 * Set up an event handler for this IOP.
433 */
434 sc->sc_eventii.ii_dv = self;
435 sc->sc_eventii.ii_intr = iop_intr_event;
436 sc->sc_eventii.ii_flags = II_DISCARD | II_UTILITY;
437 sc->sc_eventii.ii_tid = I2O_TID_IOP;
438 if (iop_initiator_register(sc, &sc->sc_eventii) != 0) {
439 printf("%s: unable to register initiator", sc->sc_dv.dv_xname);
440 return;
441 }
442 if (iop_util_eventreg(sc, &sc->sc_eventii, 0xffffffff)) {
443 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
444 return;
445 }
446
447 #ifdef notyet
448 /* Attempt to match and attach a product-specific extension. */
449 ia.ia_class = I2O_CLASS_ANY;
450 ia.ia_tid = I2O_TID_IOP;
451 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
452 #endif
453
454 if ((rv = iop_reconfigure(sc, 0)) != 0) {
455 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
456 return;
457 }
458
459 sc->sc_flags |= IOP_ONLINE;
460
461 rv = kthread_create1(iop_reconfigure_proc, sc, &sc->sc_reconf_proc,
462 "%s", sc->sc_dv.dv_xname);
463 if (rv != 0) {
464 printf("%s: unable to create thread (%d)",
465 sc->sc_dv.dv_xname, rv);
466 return;
467 }
468 }
469
470 /*
471 * Reconfiguration thread; listens for LCT change notification, and
472 * initiates re-configuration if recieved.
473 */
474 static void
475 iop_reconfigure_proc(void *cookie)
476 {
477 struct iop_softc *sc;
478 struct i2o_lct lct;
479 u_int32_t chgind;
480
481 sc = cookie;
482
483 for (;;) {
484 chgind = le32toh(sc->sc_chgindicator) + 1;
485
486 if (iop_lct_get0(sc, &lct, sizeof(lct), chgind) == 0) {
487 DPRINTF(("%s: async reconfiguration (0x%08x)\n",
488 sc->sc_dv.dv_xname, le32toh(lct.changeindicator)));
489 iop_reconfigure(sc, lct.changeindicator);
490 }
491
492 tsleep(iop_reconfigure_proc, PWAIT, "iopzzz", hz * 5);
493 }
494 }
495
496 /*
497 * Reconfigure: find new and removed devices.
498 */
499 static int
500 iop_reconfigure(struct iop_softc *sc, u_int32_t chgind)
501 {
502 struct iop_msg *im;
503 struct i2o_hba_bus_scan *mb;
504 struct i2o_lct_entry *le;
505 struct iop_initiator *ii, *nextii;
506 int rv, tid, i;
507
508 rv = lockmgr(&sc->sc_conflock, LK_EXCLUSIVE | LK_RECURSEFAIL, NULL);
509 if (rv != 0) {
510 DPRINTF(("iop_reconfigure: unable to acquire lock\n"));
511 return (rv);
512 }
513
514 /*
515 * If the reconfiguration request isn't the result of LCT change
516 * notification, then be more thorough: ask all bus ports to scan
517 * their busses. Wait up to 5 minutes for each bus port to complete
518 * the request.
519 */
520 if (chgind == 0) {
521 if ((rv = iop_lct_get(sc)) != 0) {
522 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
523 goto done;
524 }
525
526 le = sc->sc_lct->entry;
527 for (i = 0; i < sc->sc_nlctent; i++, le++) {
528 if ((le16toh(le->classid) & 4095) !=
529 I2O_CLASS_BUS_ADAPTER_PORT)
530 continue;
531 tid = le32toh(le->localtid) & 4095;
532
533 rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR);
534 if (rv != 0) {
535 DPRINTF(("iop_reconfigure: alloc msg\n"));
536 goto done;
537 }
538
539 mb = (struct i2o_hba_bus_scan *)im->im_msg;
540 mb->msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
541 mb->msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
542 mb->msgictx = IOP_ICTX;
543 mb->msgtctx = im->im_tctx;
544
545 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
546 tid));
547
548 rv = iop_msg_enqueue(sc, im, 5*60*1000);
549 iop_msg_free(sc, NULL, im);
550 if (rv != 0) {
551 DPRINTF(("iop_reconfigure: scan failed\n"));
552 goto done;
553 }
554 }
555 } else if (chgind == sc->sc_chgindicator) {
556 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
557 goto done;
558 }
559
560 /* Re-read the LCT and determine if it has changed. */
561 if ((rv = iop_lct_get(sc)) != 0) {
562 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
563 goto done;
564 }
565 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
566
567 if (sc->sc_lct->changeindicator == sc->sc_chgindicator) {
568 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
569 /* Nothing to do. */
570 rv = 0;
571 goto done;
572 }
573 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
574 sc->sc_chgindicator = sc->sc_lct->changeindicator;
575
576 if (sc->sc_tidmap != NULL)
577 free(sc->sc_tidmap, M_DEVBUF);
578 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
579 M_DEVBUF, M_NOWAIT);
580 memset(sc->sc_tidmap, 0, sc->sc_nlctent * sizeof(struct iop_tidmap));
581
582 /* Match and attach child devices. */
583 iop_configure_devices(sc);
584
585 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
586 nextii = LIST_NEXT(ii, ii_list);
587 if ((ii->ii_flags & II_UTILITY) != 0)
588 continue;
589 if ((ii->ii_flags & II_CONFIGURED) == 0) {
590 ii->ii_flags |= II_CONFIGURED;
591 continue;
592 }
593
594 /* Detach devices that were configured, but are now gone. */
595 for (i = 0; i < sc->sc_nlctent; i++)
596 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
597 break;
598 if (i == sc->sc_nlctent ||
599 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
600 config_detach(ii->ii_dv, DETACH_FORCE);
601
602 /*
603 * Tell initiators that existed before the re-configuration
604 * to re-configure.
605 */
606 if (ii->ii_reconfig == NULL)
607 continue;
608 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
609 printf("%s: %s failed reconfigure (%d)\n",
610 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
611 }
612 rv = 0;
613
614 done:
615 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
616 return (rv);
617 }
618
619 /*
620 * Configure I2O devices into the system.
621 */
622 static void
623 iop_configure_devices(struct iop_softc *sc)
624 {
625 struct iop_attach_args ia;
626 struct iop_initiator *ii;
627 const struct i2o_lct_entry *le;
628 int i, j, nent;
629
630 nent = sc->sc_nlctent;
631 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
632 /*
633 * Ignore the device if it's in use.
634 */
635 if ((le32toh(le->usertid) & 4095) != 4095)
636 continue;
637
638 ia.ia_class = le16toh(le->classid) & 4095;
639 ia.ia_tid = le32toh(le->localtid) & 4095;
640
641 /* Ignore uninteresting devices. */
642 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
643 if (iop_class[j].ic_class == ia.ia_class)
644 break;
645 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
646 (iop_class[j].ic_flags & IC_CONFIGURE) == 0)
647 continue;
648
649 /*
650 * Try to configure the device only if it's not already
651 * configured.
652 */
653 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
654 if ((ii->ii_flags & II_UTILITY) != 0)
655 continue;
656 if (ia.ia_tid == ii->ii_tid)
657 break;
658 }
659 if (ii != NULL)
660 continue;
661
662 if (config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch))
663 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
664 }
665 }
666
667 static void
668 iop_devinfo(int class, char *devinfo)
669 {
670 #ifdef I2OVERBOSE
671 int i;
672
673 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
674 if (class == iop_class[i].ic_class)
675 break;
676
677 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
678 sprintf(devinfo, "device (class 0x%x)", class);
679 else
680 strcpy(devinfo, iop_class[i].ic_caption);
681 #else
682
683 sprintf(devinfo, "device (class 0x%x)", class);
684 #endif
685 }
686
687 static int
688 iop_print(void *aux, const char *pnp)
689 {
690 struct iop_attach_args *ia;
691 char devinfo[256];
692
693 ia = aux;
694
695 if (pnp != NULL) {
696 iop_devinfo(ia->ia_class, devinfo);
697 printf("%s at %s", devinfo, pnp);
698 }
699 printf(" tid %d", ia->ia_tid);
700 return (UNCONF);
701 }
702
703 #ifdef notyet
704 static int
705 iop_vendor_print(void *aux, const char *pnp)
706 {
707
708 if (pnp != NULL)
709 printf("vendor specific extension at %s", pnp);
710 return (UNCONF);
711 }
712 #endif
713
714 static int
715 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
716 {
717 struct iop_attach_args *ia;
718
719 ia = aux;
720
721 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
722 return (0);
723
724 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
725 }
726
727 /*
728 * Shut down all configured IOPs.
729 */
730 static void
731 iop_shutdown(void *junk)
732 {
733 struct iop_softc *sc;
734 int i;
735
736 printf("shutting down iop devices... ");
737
738 for (i = 0; i < iop_cd.cd_ndevs; i++) {
739 if ((sc = device_lookup(&iop_cd, i)) == NULL)
740 continue;
741 if ((sc->sc_flags & IOP_ONLINE) == 0)
742 continue;
743 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
744 0, 5000);
745 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
746 0, 5000);
747 }
748
749 /* Wait. Some boards could still be flushing, stupidly enough. */
750 delay(5000*1000);
751 printf(" done\n");
752 }
753
754 /*
755 * Retrieve adapter status.
756 */
757 static int
758 iop_status_get(struct iop_softc *sc)
759 {
760 struct iop_msg *im;
761 struct i2o_exec_status_get *mb;
762 int rv, s;
763
764 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
765 return (rv);
766
767 mb = (struct i2o_exec_status_get *)im->im_msg;
768 mb->msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
769 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
770 mb->reserved[0] = 0;
771 mb->reserved[1] = 0;
772 mb->reserved[2] = 0;
773 mb->reserved[3] = 0;
774 mb->addrlow = kvtop((caddr_t)&sc->sc_status); /* XXX */
775 mb->addrhigh = 0;
776 mb->length = sizeof(sc->sc_status);
777
778 s = splbio();
779 memset(&sc->sc_status, 0, sizeof(sc->sc_status));
780
781 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
782 splx(s);
783 iop_msg_free(sc, NULL, im);
784 return (rv);
785 }
786
787 /* XXX */
788 POLL(2500, *((volatile u_char *)&sc->sc_status.syncbyte) == 0xff);
789
790 splx(s);
791 iop_msg_free(sc, NULL, im);
792 return (*((volatile u_char *)&sc->sc_status.syncbyte) != 0xff);
793 }
794
795 /*
796 * Initalize and populate the adapter's outbound FIFO.
797 */
798 static int
799 iop_ofifo_init(struct iop_softc *sc)
800 {
801 struct iop_msg *im;
802 volatile u_int32_t status;
803 bus_addr_t addr;
804 bus_dma_segment_t seg;
805 struct i2o_exec_outbound_init *mb;
806 int i, rseg, rv;
807
808 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
809 return (rv);
810
811 mb = (struct i2o_exec_outbound_init *)im->im_msg;
812 mb->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
813 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
814 mb->msgictx = IOP_ICTX;
815 mb->msgtctx = im->im_tctx;
816 mb->pagesize = PAGE_SIZE;
817 mb->flags = 0x80 | ((IOP_MAX_REPLY_SIZE >> 2) << 16); /* XXX */
818
819 status = 0;
820
821 /*
822 * The I2O spec says that there are two SGLs: one for the status
823 * word, and one for a list of discarded MFAs. It continues to say
824 * that if you don't want to get the list of MFAs, an IGNORE SGL is
825 * necessary; this isn't the case (and in fact appears to be a bad
826 * thing).
827 */
828 iop_msg_map(sc, im, (void *)&status, sizeof(status), 0);
829 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
830 iop_msg_free(sc, NULL, im);
831 return (rv);
832 }
833 iop_msg_unmap(sc, im);
834 iop_msg_free(sc, NULL, im);
835
836 /* XXX */
837 POLL(5000, status == I2O_EXEC_OUTBOUND_INIT_COMPLETE);
838 if (status != I2O_EXEC_OUTBOUND_INIT_COMPLETE) {
839 printf("%s: outbound FIFO init failed\n", sc->sc_dv.dv_xname);
840 return (EIO);
841 }
842
843 /* If we need to allocate DMA safe memory, do it now. */
844 if (sc->sc_rep_phys == 0) {
845 sc->sc_rep_size = sc->sc_maxreplycnt * IOP_MAX_REPLY_SIZE;
846
847 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
848 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
849 if (rv != 0) {
850 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
851 rv);
852 return (rv);
853 }
854
855 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
856 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
857 if (rv != 0) {
858 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
859 return (rv);
860 }
861
862 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
863 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
864 if (rv != 0) {
865 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname, rv);
866 return (rv);
867 }
868
869 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, sc->sc_rep,
870 sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
871 if (rv != 0) {
872 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
873 return (rv);
874 }
875
876 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
877 }
878
879 /* Populate the outbound FIFO. */
880 for (i = sc->sc_maxreplycnt, addr = sc->sc_rep_phys; i != 0; i--) {
881 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
882 addr += IOP_MAX_REPLY_SIZE;
883 }
884
885 return (0);
886 }
887
888 /*
889 * Read the specified number of bytes from the IOP's hardware resource table.
890 */
891 static int
892 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
893 {
894 struct iop_msg *im;
895 int rv;
896 struct i2o_exec_hrt_get *mb;
897
898 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
899 return (rv);
900
901 mb = (struct i2o_exec_hrt_get *)im->im_msg;
902 mb->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
903 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
904 mb->msgictx = IOP_ICTX;
905 mb->msgtctx = im->im_tctx;
906
907 iop_msg_map(sc, im, hrt, size, 0);
908 rv = iop_msg_enqueue(sc, im, 5000);
909 iop_msg_unmap(sc, im);
910 iop_msg_free(sc, NULL, im);
911 return (rv);
912 }
913
914 /*
915 * Read the IOP's hardware resource table.
916 */
917 static int
918 iop_hrt_get(struct iop_softc *sc)
919 {
920 struct i2o_hrt hrthdr, *hrt;
921 int size, rv;
922
923 if ((rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr))) != 0)
924 return (rv);
925
926 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
927 le16toh(hrthdr.numentries)));
928
929 size = sizeof(struct i2o_hrt) +
930 (htole32(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
931 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
932
933 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
934 free(hrt, M_DEVBUF);
935 return (rv);
936 }
937
938 if (sc->sc_hrt != NULL)
939 free(sc->sc_hrt, M_DEVBUF);
940 sc->sc_hrt = hrt;
941 return (0);
942 }
943
944 /*
945 * Request the specified number of bytes from the IOP's logical
946 * configuration table. If a change indicator is specified, this
947 * is an verbatim notification request, so the caller is prepared
948 * to wait indefinitely.
949 */
950 static int
951 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
952 u_int32_t chgind)
953 {
954 struct iop_msg *im;
955 struct i2o_exec_lct_notify *mb;
956 int rv;
957
958 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
959 return (rv);
960
961 memset(lct, 0, size);
962 memset(im->im_msg, 0, sizeof(im->im_msg));
963
964 mb = (struct i2o_exec_lct_notify *)im->im_msg;
965 mb->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
966 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
967 mb->msgictx = IOP_ICTX;
968 mb->msgtctx = im->im_tctx;
969 mb->classid = I2O_CLASS_ANY;
970 mb->changeindicator = chgind;
971
972 DPRINTF(("iop_lct_get0: reading LCT\n"));
973
974 iop_msg_map(sc, im, lct, size, 0);
975 rv = iop_msg_enqueue(sc, im, (chgind == 0 ? 120*1000 : 0));
976 iop_msg_unmap(sc, im);
977 iop_msg_free(sc, NULL, im);
978 return (rv);
979 }
980
981 /*
982 * Read the IOP's logical configuration table.
983 */
984 int
985 iop_lct_get(struct iop_softc *sc)
986 {
987 int esize, size, rv;
988 struct i2o_lct *lct;
989
990 esize = le32toh(sc->sc_status.expectedlctsize);
991 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
992 if (lct == NULL)
993 return (ENOMEM);
994
995 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
996 free(lct, M_DEVBUF);
997 return (rv);
998 }
999
1000 size = le16toh(lct->tablesize) << 2;
1001 if (esize != size) {
1002 free(lct, M_DEVBUF);
1003 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1004 if (lct == NULL)
1005 return (ENOMEM);
1006
1007 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1008 free(lct, M_DEVBUF);
1009 return (rv);
1010 }
1011 }
1012
1013 /* Swap in the new LCT. */
1014 if (sc->sc_lct != NULL)
1015 free(sc->sc_lct, M_DEVBUF);
1016 sc->sc_lct = lct;
1017 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1018 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1019 sizeof(struct i2o_lct_entry);
1020 return (0);
1021 }
1022
1023 /*
1024 * Request the specified parameter group from the target.
1025 */
1026 int
1027 iop_param_op(struct iop_softc *sc, int tid, int write, int group, void *buf,
1028 int size)
1029 {
1030 struct iop_msg *im;
1031 struct i2o_util_params_op *mb;
1032 int rv, func, op;
1033 struct {
1034 struct i2o_param_op_list_header olh;
1035 struct i2o_param_op_all_template oat;
1036 } req;
1037
1038 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1039 return (rv);
1040
1041 if (write) {
1042 func = I2O_UTIL_PARAMS_SET;
1043 op = I2O_PARAMS_OP_FIELD_SET;
1044 } else {
1045 func = I2O_UTIL_PARAMS_GET;
1046 op = I2O_PARAMS_OP_FIELD_GET;
1047 }
1048
1049 mb = (struct i2o_util_params_op *)im->im_msg;
1050 mb->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1051 mb->msgfunc = I2O_MSGFUNC(tid, func);
1052 mb->msgictx = IOP_ICTX;
1053 mb->msgtctx = im->im_tctx;
1054 mb->flags = 0;
1055
1056 req.olh.count = htole16(1);
1057 req.olh.reserved = htole16(0);
1058 req.oat.operation = htole16(op);
1059 req.oat.fieldcount = htole16(0xffff);
1060 req.oat.group = htole16(group);
1061
1062 memset(buf, 0, size);
1063 iop_msg_map(sc, im, &req, sizeof(req), 1);
1064 iop_msg_map(sc, im, buf, size, write);
1065
1066 rv = iop_msg_enqueue(sc, im, 5000);
1067 iop_msg_unmap(sc, im);
1068 iop_msg_free(sc, NULL, im);
1069 return (rv);
1070 }
1071
1072 /*
1073 * Execute a simple command (no parameters).
1074 */
1075 int
1076 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1077 int async, int timo)
1078 {
1079 struct iop_msg *im;
1080 struct i2o_msg *mb;
1081 int rv, fl;
1082
1083 fl = (async != 0 ? IM_NOWAIT : 0);
1084 if ((rv = iop_msg_alloc(sc, NULL, &im, fl | IM_NOINTR)) != 0)
1085 return (rv);
1086
1087 mb = (struct i2o_msg *)im->im_msg;
1088 mb->msgflags = I2O_MSGFLAGS(i2o_msg);
1089 mb->msgfunc = I2O_MSGFUNC(tid, function);
1090 mb->msgictx = ictx;
1091 mb->msgtctx = im->im_tctx;
1092
1093 if (async)
1094 rv = iop_msg_enqueue(sc, im, timo);
1095 else
1096 rv = iop_msg_send(sc, im, timo);
1097 iop_msg_free(sc, NULL, im);
1098 return (rv);
1099 }
1100
1101 /*
1102 * Post the system table to the IOP.
1103 */
1104 static int
1105 iop_systab_set(struct iop_softc *sc)
1106 {
1107 struct i2o_exec_sys_tab_set *mb;
1108 struct iop_msg *im;
1109 u_int32_t mema[2], ioa[2];
1110 int rv;
1111
1112 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1113 return (rv);
1114
1115 mb = (struct i2o_exec_sys_tab_set *)im->im_msg;
1116 mb->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1117 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1118 mb->msgictx = IOP_ICTX;
1119 mb->msgtctx = im->im_tctx;
1120 mb->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1121 mb->segnumber = 0;
1122
1123 /* XXX This is questionable, but better than nothing... */
1124 mema[0] = le32toh(sc->sc_status.currentprivmembase);
1125 mema[1] = le32toh(sc->sc_status.currentprivmemsize);
1126 ioa[0] = le32toh(sc->sc_status.currentpriviobase);
1127 ioa[1] = le32toh(sc->sc_status.currentpriviosize);
1128
1129 iop_msg_map(sc, im, iop_systab, iop_systab_size, 1);
1130 iop_msg_map(sc, im, mema, sizeof(mema), 1);
1131 iop_msg_map(sc, im, ioa, sizeof(ioa), 1);
1132
1133 rv = iop_msg_enqueue(sc, im, 5000);
1134 iop_msg_unmap(sc, im);
1135 iop_msg_free(sc, NULL, im);
1136 return (rv);
1137 }
1138
1139 /*
1140 * Reset the adapter. Must be called with interrupts disabled.
1141 */
1142 static int
1143 iop_reset(struct iop_softc *sc)
1144 {
1145 struct iop_msg *im;
1146 volatile u_int32_t sw;
1147 u_int32_t mfa;
1148 struct i2o_exec_iop_reset *mb;
1149 int rv;
1150
1151 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
1152 return (rv);
1153
1154 sw = 0;
1155
1156 mb = (struct i2o_exec_iop_reset *)im->im_msg;
1157 mb->msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1158 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1159 mb->reserved[0] = 0;
1160 mb->reserved[1] = 0;
1161 mb->reserved[2] = 0;
1162 mb->reserved[3] = 0;
1163 mb->statuslow = kvtop((caddr_t)&sw); /* XXX */
1164 mb->statushigh = 0;
1165
1166 if ((rv = iop_msg_send(sc, im, 0)))
1167 return (rv);
1168 iop_msg_free(sc, NULL, im);
1169
1170 POLL(2500, sw != 0); /* XXX */
1171 if (sw != I2O_RESET_IN_PROGRESS) {
1172 printf("%s: reset rejected\n", sc->sc_dv.dv_xname);
1173 return (EIO);
1174 }
1175
1176 /*
1177 * IOP is now in the INIT state. Wait no more than 10 seconds for
1178 * the inbound queue to become responsive.
1179 */
1180 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1181 if (mfa == IOP_MFA_EMPTY) {
1182 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1183 return (EIO);
1184 }
1185
1186 if (sw == I2O_RESET_REJECTED)
1187 printf("%s: reset rejected?\n", sc->sc_dv.dv_xname);
1188
1189 iop_release_mfa(sc, mfa);
1190 return (0);
1191 }
1192
1193 /*
1194 * Register a new initiator.
1195 */
1196 int
1197 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1198 {
1199 static int ictx;
1200 static int stctx;
1201
1202 /* 0 is reserved for system messages. */
1203 ii->ii_ictx = ++ictx;
1204 ii->ii_stctx = ++stctx | 0x80000000;
1205
1206 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1207 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1208
1209 return (0);
1210 }
1211
1212 /*
1213 * Unregister an initiator.
1214 */
1215 void
1216 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1217 {
1218
1219 LIST_REMOVE(ii, ii_list);
1220 LIST_REMOVE(ii, ii_hash);
1221 }
1222
1223 /*
1224 * Handle a reply frame from the adapter.
1225 */
1226 static int
1227 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1228 {
1229 struct iop_msg *im;
1230 struct i2o_reply *rb;
1231 struct iop_initiator *ii;
1232 u_int off, ictx, tctx, status, size;
1233
1234 off = (int)(rmfa - sc->sc_rep_phys);
1235 rb = (struct i2o_reply *)(sc->sc_rep + off);
1236
1237 /* Perform reply queue DMA synchronisation... */
1238 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, IOP_MAX_MSG_SIZE,
1239 BUS_DMASYNC_POSTREAD);
1240 if (--sc->sc_stat.is_cur_hwqueue != 0)
1241 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1242 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1243
1244 #ifdef I2ODEBUG
1245 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1246 panic("iop_handle_reply: 64-bit reply");
1247 #endif
1248 /*
1249 * Find the initiator.
1250 */
1251 ictx = le32toh(rb->msgictx);
1252 if (ictx == IOP_ICTX)
1253 ii = NULL;
1254 else {
1255 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1256 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1257 if (ii->ii_ictx == ictx)
1258 break;
1259 if (ii == NULL) {
1260 #ifdef I2ODEBUG
1261 iop_reply_print(sc, NULL, rb);
1262 #endif
1263 printf("%s: WARNING: bad ictx returned (%x)",
1264 sc->sc_dv.dv_xname, ictx);
1265
1266 /* Return the reply frame to the IOP's outbound FIFO. */
1267 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1268 return (-1);
1269 }
1270 }
1271
1272 status = rb->reqstatus;
1273
1274 if (ii == NULL || (ii->ii_flags & II_DISCARD) == 0) {
1275 /*
1276 * This initiator tracks state using message wrappers.
1277 *
1278 * Find the originating message wrapper, and if requested
1279 * notify the initiator.
1280 */
1281 tctx = le32toh(rb->msgtctx);
1282 im = TAILQ_FIRST(IOP_TCTXHASH(tctx));
1283 for (; im != NULL; im = TAILQ_NEXT(im, im_hash))
1284 if (im->im_tctx == tctx)
1285 break;
1286 if (im == NULL || (im->im_flags & IM_ALLOCED) == 0) {
1287 #ifdef I2ODEBUG
1288 iop_reply_print(sc, NULL, rb);
1289 #endif
1290 printf("%s: WARNING: bad tctx returned (%x, %p)",
1291 sc->sc_dv.dv_xname, tctx, im);
1292
1293 /* Return the reply frame to the IOP's outbound FIFO. */
1294 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1295 return (-1);
1296 }
1297 #ifdef I2ODEBUG
1298 if ((im->im_flags & IM_REPLIED) != 0)
1299 panic("%s: dup reply", sc->sc_dv.dv_xname);
1300 #endif
1301
1302 im->im_flags |= IM_REPLIED;
1303
1304 #ifdef I2ODEBUG
1305 if (rb->reqstatus != 0)
1306 iop_reply_print(sc, im, rb);
1307 #endif
1308 /* Notify the initiator. */
1309 if ((im->im_flags & IM_WAITING) != 0) {
1310 size = (le32toh(rb->msgflags) >> 14) & ~3;
1311 if (size > IOP_MAX_REPLY_SIZE)
1312 size = IOP_MAX_REPLY_SIZE;
1313 memcpy(im->im_msg, rb, size);
1314 wakeup(im);
1315 } else if ((im->im_flags & IM_NOINTR) == 0)
1316 (*ii->ii_intr)(ii->ii_dv, im, rb);
1317 } else {
1318 /*
1319 * This initiator discards message wrappers.
1320 *
1321 * Simply pass the reply frame to the initiator.
1322 */
1323 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1324 }
1325
1326 /* Return the reply frame to the IOP's outbound FIFO. */
1327 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1328
1329 /* Run the queue. */
1330 if ((im = SIMPLEQ_FIRST(&sc->sc_queue)) != NULL)
1331 iop_msg_enqueue(sc, im, 0);
1332
1333 return (status);
1334 }
1335
1336 /*
1337 * Handle an interrupt from the adapter.
1338 */
1339 int
1340 iop_intr(void *arg)
1341 {
1342 struct iop_softc *sc;
1343 u_int32_t rmfa;
1344
1345 sc = arg;
1346
1347 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1348 return (0);
1349
1350 for (;;) {
1351 /* Double read to account for IOP bug. */
1352 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY &&
1353 (rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY)
1354 break;
1355 iop_handle_reply(sc, rmfa);
1356 }
1357
1358 return (1);
1359 }
1360
1361 /*
1362 * Handle an event signalled by the executive.
1363 */
1364 static void
1365 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1366 {
1367 struct i2o_util_event_register_reply *rb;
1368 struct iop_softc *sc;
1369 u_int event;
1370
1371 sc = (struct iop_softc *)dv;
1372 rb = reply;
1373 event = le32toh(rb->event);
1374
1375 #ifndef I2ODEBUG
1376 if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED)
1377 return;
1378 #endif
1379
1380 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1381 }
1382
1383 /*
1384 * Allocate a message wrapper.
1385 */
1386 int
1387 iop_msg_alloc(struct iop_softc *sc, struct iop_initiator *ii,
1388 struct iop_msg **imp, int flags)
1389 {
1390 struct iop_msg *im;
1391 static int tctxgen = 666;
1392 int s, rv, i, tctx;
1393
1394 #ifdef I2ODEBUG
1395 if ((flags & IM_SYSMASK) != 0)
1396 panic("iop_msg_alloc: system flags specified");
1397 #endif
1398
1399 s = splbio(); /* XXX */
1400
1401 if (ii != NULL && (ii->ii_flags & II_DISCARD) != 0) {
1402 flags |= IM_DISCARD;
1403 tctx = ii->ii_stctx;
1404 } else
1405 tctx = tctxgen++ & 0x7fffffff;
1406
1407 im = (struct iop_msg *)pool_get(iop_msgpool,
1408 (flags & IM_NOWAIT) == 0 ? PR_WAITOK : 0);
1409 if (im == NULL) {
1410 splx(s);
1411 return (ENOMEM);
1412 }
1413
1414 /* XXX */
1415 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, IOP_MAX_SGL_ENTRIES,
1416 IOP_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1417 &im->im_xfer[0].ix_map);
1418 if (rv != 0) {
1419 pool_put(iop_msgpool, im);
1420 splx(s);
1421 return (rv);
1422 }
1423
1424 if ((flags & (IM_DISCARD | IM_NOICTX)) == 0)
1425 TAILQ_INSERT_TAIL(IOP_TCTXHASH(tctx), im, im_hash);
1426
1427 splx(s);
1428
1429 im->im_tctx = tctx;
1430 im->im_flags = flags | IM_ALLOCED;
1431 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1432 im->im_xfer[i].ix_size = 0;
1433 *imp = im;
1434
1435 return (0);
1436 }
1437
1438 /*
1439 * Free a message wrapper.
1440 */
1441 void
1442 iop_msg_free(struct iop_softc *sc, struct iop_initiator *ii, struct iop_msg *im)
1443 {
1444 int s;
1445
1446 #ifdef I2ODEBUG
1447 if ((im->im_flags & IM_ALLOCED) == 0)
1448 panic("iop_msg_free: wrapper not allocated");
1449 #endif
1450
1451 /* XXX */
1452 bus_dmamap_destroy(sc->sc_dmat, im->im_xfer[0].ix_map);
1453
1454 s = splbio(); /* XXX */
1455
1456 if ((im->im_flags & (IM_DISCARD | IM_NOICTX)) == 0)
1457 TAILQ_REMOVE(IOP_TCTXHASH(im->im_tctx), im, im_hash);
1458
1459 im->im_flags = 0;
1460 pool_put(iop_msgpool, im);
1461 splx(s);
1462 }
1463
1464 /*
1465 * Map a data transfer. Write a scatter-gather list into the message frame.
1466 */
1467 int
1468 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, void *xferaddr,
1469 int xfersize, int out)
1470 {
1471 struct iop_xfer *ix;
1472 u_int32_t *mb;
1473 int rv, seg, i;
1474
1475 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++)
1476 if (ix->ix_size == 0)
1477 break;
1478 #ifdef I2ODEBUG
1479 if (i == IOP_MAX_MSG_XFERS)
1480 panic("iop_msg_map: too many xfers");
1481 #endif
1482
1483 /* Only the first DMA map is static. */
1484 if (i != 0) {
1485 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1486 IOP_MAX_SGL_ENTRIES, IOP_MAX_XFER, 0,
1487 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1488 if (rv != 0)
1489 return (rv);
1490 }
1491
1492 ix->ix_flags = (out ? IX_OUT : IX_IN);
1493 ix->ix_size = xfersize;
1494
1495 rv = bus_dmamap_load(sc->sc_dmat, ix->ix_map, xferaddr, xfersize,
1496 NULL, 0);
1497 if (rv != 0)
1498 return (rv);
1499 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1500 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1501
1502 mb = im->im_msg + (im->im_msg[0] >> 16);
1503 if (out)
1504 out = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1505 else
1506 out = I2O_SGL_SIMPLE;
1507
1508 for (seg = 0; seg < ix->ix_map->dm_nsegs; seg++) {
1509 #ifdef I2ODEBUG
1510 if ((seg << 1) + (im->im_msg[0] >> 16) >=
1511 (IOP_MAX_MSG_SIZE >> 2))
1512 panic("iop_map_xfer: message frame too large");
1513 #endif
1514 if (seg == ix->ix_map->dm_nsegs - 1)
1515 out |= I2O_SGL_END_BUFFER;
1516 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_len | out;
1517 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_addr;
1518 }
1519
1520 /*
1521 * If this is the first xfer we've mapped for this message, adjust
1522 * the SGL offset field in the message header.
1523 */
1524 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1525 im->im_msg[0] += ((im->im_msg[0] >> 16) + seg * 2) << 4;
1526 im->im_flags |= IM_SGLOFFADJ;
1527 }
1528 im->im_msg[0] += (seg << 17);
1529 return (0);
1530 }
1531
1532 /*
1533 * Unmap all data transfers associated with a message wrapper.
1534 */
1535 void
1536 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
1537 {
1538 struct iop_xfer *ix;
1539 int i;
1540
1541 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++) {
1542 if (ix->ix_size == 0)
1543 break;
1544 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
1545 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
1546 BUS_DMASYNC_POSTREAD);
1547 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1548
1549 /* Only the first DMA map is static. */
1550 if (i != 0)
1551 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1552
1553 ix->ix_size = 0;
1554 }
1555 }
1556
1557 /*
1558 * Send a message to the IOP. Optionally, poll on completion. Return
1559 * non-zero if failure status is returned and IM_NOINTR is set.
1560 */
1561 int
1562 iop_msg_send(struct iop_softc *sc, struct iop_msg *im, int timo)
1563 {
1564 u_int32_t mfa, rmfa;
1565 int rv, status, i, s;
1566
1567 #ifdef I2ODEBUG
1568 if ((im->im_flags & IM_NOICTX) == 0)
1569 if (im->im_msg[3] == IOP_ICTX &&
1570 (im->im_flags & IM_NOINTR) == 0)
1571 panic("iop_msg_send: IOP_ICTX and !IM_NOINTR");
1572 if ((im->im_flags & IM_DISCARD) != 0)
1573 panic("iop_msg_send: IM_DISCARD");
1574 #endif
1575
1576 s = splbio(); /* XXX */
1577
1578 /* Wait up to 250ms for an MFA. */
1579 POLL(250, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1580 if (mfa == IOP_MFA_EMPTY) {
1581 DPRINTF(("%s: mfa not forthcoming\n", sc->sc_dv.dv_xname));
1582 splx(s);
1583 return (EBUSY);
1584 }
1585
1586 /* Perform reply queue DMA synchronisation and update counters. */
1587 if ((im->im_flags & IM_NOICTX) == 0) {
1588 if (sc->sc_stat.is_cur_hwqueue == 0)
1589 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1590 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1591 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1592 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1593 sc->sc_stat.is_requests++;
1594 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1595 sc->sc_stat.is_peak_hwqueue =
1596 sc->sc_stat.is_cur_hwqueue;
1597 }
1598
1599 /* Terminate scatter/gather lists. */
1600 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1601 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1602
1603 /* Post the message frame. */
1604 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1605 im->im_msg, im->im_msg[0] >> 16);
1606 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa,
1607 (im->im_msg[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
1608
1609 /* Post the MFA back to the IOP, thus starting the command. */
1610 iop_outl(sc, IOP_REG_IFIFO, mfa);
1611
1612 if (timo == 0) {
1613 splx(s);
1614 return (0);
1615 }
1616
1617 /* Wait for completion. */
1618 for (timo *= 10; timo != 0; timo--) {
1619 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
1620 /* Double read to account for IOP bug. */
1621 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1622 if (rmfa == IOP_MFA_EMPTY)
1623 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1624 if (rmfa != IOP_MFA_EMPTY)
1625 status = iop_handle_reply(sc, rmfa);
1626 }
1627 if ((im->im_flags & IM_REPLIED) != 0)
1628 break;
1629 DELAY(100);
1630 }
1631
1632 splx(s);
1633
1634 if (timo == 0) {
1635 #ifdef I2ODEBUG
1636 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
1637 if (iop_status_get(sc) != 0)
1638 printf("iop_msg_send: unable to retrieve status\n");
1639 else
1640 printf("iop_msg_send: IOP state = %d\n",
1641 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1642 #endif
1643 rv = EBUSY;
1644 } else if ((im->im_flags & IM_NOINTR) != 0)
1645 rv = (status != I2O_STATUS_SUCCESS ? EIO : 0);
1646
1647 return (rv);
1648 }
1649
1650 /*
1651 * Try to post a message to the adapter; if that's not possible, enqueue it
1652 * with us. If a timeout is specified, wait for the message to complete.
1653 */
1654 int
1655 iop_msg_enqueue(struct iop_softc *sc, struct iop_msg *im, int timo)
1656 {
1657 u_int mfa;
1658 int s, fromqueue, i, rv;
1659
1660 #ifdef I2ODEBUG
1661 if (im == NULL)
1662 panic("iop_msg_enqueue: im == NULL");
1663 if (sc == NULL)
1664 panic("iop_msg_enqueue: sc == NULL");
1665 if ((im->im_flags & IM_NOICTX) != 0)
1666 panic("iop_msg_enqueue: IM_NOICTX");
1667 if (im->im_msg[3] == IOP_ICTX && (im->im_flags & IM_NOINTR) == 0)
1668 panic("iop_msg_enqueue: IOP_ICTX and no IM_NOINTR");
1669 if ((im->im_flags & IM_DISCARD) != 0 && timo != 0)
1670 panic("iop_msg_enqueue: IM_DISCARD && timo != 0");
1671 if ((im->im_flags & IM_NOINTR) == 0 && timo != 0)
1672 panic("iop_msg_enqueue: !IM_NOINTR && timo != 0");
1673 #endif
1674
1675 s = splbio(); /* XXX */
1676 fromqueue = (im == SIMPLEQ_FIRST(&sc->sc_queue));
1677
1678 if (sc->sc_stat.is_cur_hwqueue >= sc->sc_maxqueuecnt) {
1679 /*
1680 * While the IOP may be able to accept more inbound message
1681 * frames than it advertises, don't push harder than it
1682 * wants to go lest we starve it.
1683 *
1684 * XXX We should be handling IOP resource shortages.
1685 */
1686 mfa = IOP_MFA_EMPTY;
1687 DPRINTF(("iop_msg_enqueue: exceeded max queue count\n"));
1688 } else {
1689 /* Double read to account for IOP bug. */
1690 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
1691 mfa = iop_inl(sc, IOP_REG_IFIFO);
1692 }
1693
1694 if (mfa == IOP_MFA_EMPTY) {
1695 DPRINTF(("iop_msg_enqueue: no mfa\n"));
1696 /* Can't transfer to h/w queue - queue with us. */
1697 if (!fromqueue) {
1698 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, im, im_queue);
1699 if (++sc->sc_stat.is_cur_swqueue >
1700 sc->sc_stat.is_peak_swqueue)
1701 sc->sc_stat.is_peak_swqueue =
1702 sc->sc_stat.is_cur_swqueue;
1703 }
1704 splx(s);
1705 if ((im->im_flags & IM_NOINTR) != 0)
1706 rv = iop_msg_wait(sc, im, timo);
1707 else
1708 rv = 0;
1709 return (rv);
1710 } else if (fromqueue) {
1711 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, im, im_queue);
1712 sc->sc_stat.is_cur_swqueue--;
1713 }
1714
1715 if ((im->im_flags & IM_NOINTR) != 0)
1716 im->im_flags |= IM_WAITING;
1717
1718 /* Perform reply queue DMA synchronisation and update counters. */
1719 if (sc->sc_stat.is_cur_hwqueue == 0)
1720 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1721 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1722
1723 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1724 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1725 sc->sc_stat.is_requests++;
1726 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1727 sc->sc_stat.is_peak_hwqueue = sc->sc_stat.is_cur_hwqueue;
1728
1729 /* Terminate the scatter/gather list. */
1730 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1731 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1732
1733 /* Post the message frame. */
1734 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1735 im->im_msg, im->im_msg[0] >> 16);
1736 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa,
1737 (im->im_msg[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
1738
1739 /* Post the MFA back to the IOP, thus starting the command. */
1740 iop_outl(sc, IOP_REG_IFIFO, mfa);
1741
1742 /* If this is a discardable message wrapper, free it. */
1743 if ((im->im_flags & IM_DISCARD) != 0)
1744 iop_msg_free(sc, NULL, im);
1745 splx(s);
1746
1747 if ((im->im_flags & IM_NOINTR) != 0)
1748 rv = iop_msg_wait(sc, im, timo);
1749 else
1750 rv = 0;
1751 return (rv);
1752 }
1753
1754 /*
1755 * Wait for the specified message to complete.
1756 */
1757 static int
1758 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
1759 {
1760 struct i2o_reply *rb;
1761 int rv, s;
1762
1763 s = splbio();
1764 if ((im->im_flags & IM_REPLIED) != 0) {
1765 splx(s);
1766 return (0);
1767 }
1768 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
1769 splx(s);
1770 #ifdef I2ODEBUG
1771 if (rv != 0) {
1772 printf("iop_msg_wait: tsleep() == %d\n", rv);
1773 if (iop_status_get(sc) != 0)
1774 printf("iop_msg_wait: unable to retrieve status\n");
1775 else
1776 printf("iop_msg_wait: IOP state = %d\n",
1777 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1778 }
1779 #endif
1780 if ((im->im_flags & (IM_REPLIED | IM_NOSTATUS)) == IM_REPLIED) {
1781 rb = (struct i2o_reply *)im->im_msg;
1782 rv = (rb->reqstatus != I2O_STATUS_SUCCESS ? EIO : 0);
1783 }
1784 return (rv);
1785 }
1786
1787 /*
1788 * Release an unused message frame back to the IOP's inbound fifo.
1789 */
1790 static void
1791 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
1792 {
1793
1794 /* Use the frame to issue a no-op. */
1795 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
1796 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
1797 iop_outl(sc, mfa + 8, 0);
1798 iop_outl(sc, mfa + 12, 0);
1799
1800 iop_outl(sc, IOP_REG_IFIFO, mfa);
1801 }
1802
1803 #ifdef I2ODEBUG
1804 /*
1805 * Print status information from a failure reply frame.
1806 */
1807 static void
1808 iop_reply_print(struct iop_softc *sc, struct iop_msg *im,
1809 struct i2o_reply *rb)
1810 {
1811 u_int function, detail;
1812 #ifdef I2OVERBOSE
1813 const char *statusstr;
1814 #endif
1815
1816 if (im != NULL && (im->im_flags & IM_REPLIED) == 0)
1817 panic("iop_msg_print_status: %p not replied to", im);
1818
1819 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
1820 detail = le16toh(rb->detail);
1821
1822 printf("%s: reply:\n", sc->sc_dv.dv_xname);
1823
1824 #ifdef I2OVERBOSE
1825 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
1826 statusstr = iop_status[rb->reqstatus];
1827 else
1828 statusstr = "undefined error code";
1829
1830 printf("%s: function=0x%02x status=0x%02x (%s)\n",
1831 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
1832 #else
1833 printf("%s: function=0x%02x status=0x%02x\n",
1834 sc->sc_dv.dv_xname, function, rb->reqstatus);
1835 #endif
1836 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
1837 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
1838 le32toh(rb->msgtctx));
1839 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
1840 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
1841 (le32toh(rb->msgflags) >> 8) & 0xff);
1842 }
1843 #endif
1844
1845 /*
1846 * Translate an I2O ASCII field into a C string.
1847 */
1848 void
1849 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
1850 {
1851 int hc, lc, i, nit;
1852
1853 dlen--;
1854 lc = 0;
1855 hc = 0;
1856 i = 0;
1857
1858 /*
1859 * DPT use NUL as a space, whereas AMI use it as a terminator. The
1860 * spec has nothing to say about it. Since AMI fields are usually
1861 * filled with junk after the terminator, ...
1862 */
1863 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
1864
1865 while (slen-- != 0 && dlen-- != 0) {
1866 if (nit && *src == '\0')
1867 break;
1868 else if (*src <= 0x20 || *src >= 0x7f) {
1869 if (hc)
1870 dst[i++] = ' ';
1871 } else {
1872 hc = 1;
1873 dst[i++] = *src;
1874 lc = i;
1875 }
1876 src++;
1877 }
1878
1879 dst[lc] = '\0';
1880 }
1881
1882 /*
1883 * Claim or unclaim the specified TID.
1884 */
1885 int
1886 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
1887 int flags)
1888 {
1889 struct iop_msg *im;
1890 struct i2o_util_claim *mb;
1891 int rv, func;
1892
1893 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
1894
1895 if ((rv = iop_msg_alloc(sc, ii, &im, IM_NOINTR)) != 0)
1896 return (rv);
1897
1898 /* We can use the same structure, as both are identical. */
1899 mb = (struct i2o_util_claim *)im->im_msg;
1900 mb->msgflags = I2O_MSGFLAGS(i2o_util_claim);
1901 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
1902 mb->msgictx = ii->ii_ictx;
1903 mb->msgtctx = im->im_tctx;
1904 mb->flags = flags;
1905
1906 rv = iop_msg_enqueue(sc, im, 5000);
1907 iop_msg_free(sc, ii, im);
1908 return (rv);
1909 }
1910
1911 /*
1912 * Perform an abort.
1913 */
1914 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
1915 int tctxabort, int flags)
1916 {
1917 struct iop_msg *im;
1918 struct i2o_util_abort *mb;
1919 int rv;
1920
1921 if ((rv = iop_msg_alloc(sc, ii, &im, IM_NOINTR)) != 0)
1922 return (rv);
1923
1924 mb = (struct i2o_util_abort *)im->im_msg;
1925 mb->msgflags = I2O_MSGFLAGS(i2o_util_abort);
1926 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
1927 mb->msgictx = ii->ii_ictx;
1928 mb->msgtctx = im->im_tctx;
1929 mb->flags = (func << 24) | flags;
1930 mb->tctxabort = tctxabort;
1931
1932 rv = iop_msg_enqueue(sc, im, 5000);
1933 iop_msg_free(sc, ii, im);
1934 return (rv);
1935 }
1936
1937 /*
1938 * Enable or disable event types for the specified device.
1939 */
1940 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
1941 {
1942 struct iop_msg *im;
1943 struct i2o_util_event_register *mb;
1944 int rv;
1945
1946 if ((rv = iop_msg_alloc(sc, ii, &im, 0)) != 0)
1947 return (rv);
1948
1949 mb = (struct i2o_util_event_register *)im->im_msg;
1950 mb->msgflags = I2O_MSGFLAGS(i2o_util_event_register);
1951 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
1952 mb->msgictx = ii->ii_ictx;
1953 mb->msgtctx = im->im_tctx;
1954 mb->eventmask = mask;
1955
1956 return (iop_msg_enqueue(sc, im, 0));
1957 }
1958
1959 int
1960 iopopen(dev_t dev, int flag, int mode, struct proc *p)
1961 {
1962 struct iop_softc *sc;
1963 int unit, error;
1964
1965 unit = minor(dev);
1966
1967 sc = device_lookup(&iop_cd, minor(dev));
1968 if ((sc = iop_cd.cd_devs[unit]) == NULL)
1969 return (ENXIO);
1970 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1971 return (error);
1972
1973 if ((sc->sc_flags & IOP_OPEN) != 0)
1974 return (EBUSY);
1975 if ((sc->sc_flags & IOP_ONLINE) == 0)
1976 return (EIO);
1977 sc->sc_flags |= IOP_OPEN;
1978
1979 return (0);
1980 }
1981
1982 int
1983 iopclose(dev_t dev, int flag, int mode, struct proc *p)
1984 {
1985 struct iop_softc *sc;
1986
1987 sc = device_lookup(&iop_cd, minor(dev));
1988 sc->sc_flags &= ~IOP_OPEN;
1989 return (0);
1990 }
1991
1992 int
1993 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1994 {
1995 struct iop_softc *sc;
1996 struct iovec *iov;
1997 struct ioppt *pt;
1998 struct iop_msg *im;
1999 struct i2o_msg *mb;
2000 struct i2o_reply *rb;
2001 int rv, i;
2002
2003 if (securelevel >= 2)
2004 return (EPERM);
2005
2006 sc = device_lookup(&iop_cd, minor(dev));
2007
2008 switch (cmd) {
2009 case IOPIOCPT:
2010 pt = (struct ioppt *)data;
2011
2012 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2013 pt->pt_msglen < sizeof(struct i2o_msg) ||
2014 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2015 pt->pt_nbufs < 0 ||
2016 pt->pt_replylen < 0 ||
2017 pt->pt_timo < 1000 ||
2018 pt->pt_timo > 5*60*1000) {
2019 rv = EINVAL;
2020 break;
2021 }
2022
2023 rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR | IM_NOSTATUS);
2024 if (rv != 0)
2025 break;
2026
2027 if ((rv = copyin(pt->pt_msg, im->im_msg, pt->pt_msglen)) != 0) {
2028 iop_msg_free(sc, NULL, im);
2029 break;
2030 }
2031
2032 mb = (struct i2o_msg *)im->im_msg;
2033 mb->msgictx = IOP_ICTX;
2034 mb->msgtctx = im->im_tctx;
2035
2036 for (i = 0; i < pt->pt_nbufs; i++) {
2037 rv = iop_msg_map(sc, im, pt->pt_bufs[i].ptb_data,
2038 pt->pt_bufs[i].ptb_datalen,
2039 pt->pt_bufs[i].ptb_out != 0);
2040 if (rv != 0) {
2041 iop_msg_free(sc, NULL, im);
2042 return (rv);
2043 }
2044 }
2045
2046 if ((rv = iop_msg_enqueue(sc, im, pt->pt_timo)) == 0) {
2047 rb = (struct i2o_reply *)im->im_msg;
2048 i = (le32toh(rb->msgflags) >> 14) & ~3; /* XXX */
2049 if (i > IOP_MAX_REPLY_SIZE)
2050 i = IOP_MAX_REPLY_SIZE;
2051 if (i > pt->pt_replylen)
2052 i = pt->pt_replylen;
2053 rv = copyout(rb, pt->pt_reply, i);
2054 }
2055
2056 iop_msg_free(sc, NULL, im);
2057 break;
2058
2059 case IOPIOCGLCT:
2060 iov = (struct iovec *)data;
2061 rv = lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
2062 if (rv == 0) {
2063 i = le16toh(sc->sc_lct->tablesize) << 2;
2064 if (i > iov->iov_len)
2065 i = iov->iov_len;
2066 else
2067 iov->iov_len = i;
2068 rv = copyout(sc->sc_lct, iov->iov_base, i);
2069 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2070 }
2071 break;
2072
2073 case IOPIOCGSTATUS:
2074 iov = (struct iovec *)data;
2075 i = sizeof(struct i2o_status);
2076 if (i > iov->iov_len)
2077 i = iov->iov_len;
2078 else
2079 iov->iov_len = i;
2080 if ((rv = iop_status_get(sc)) == 0)
2081 rv = copyout(&sc->sc_status, iov->iov_base, i);
2082 break;
2083
2084 case IOPIOCRECONFIG:
2085 rv = iop_reconfigure(sc, 0);
2086 break;
2087
2088 default:
2089 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2090 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2091 #endif
2092 rv = ENOTTY;
2093 break;
2094 }
2095
2096 return (rv);
2097 }
2098