iop.c revision 1.9 1 /* $NetBSD: iop.c,v 1.9 2001/01/03 21:04:01 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/pool.h>
56 #include <sys/conf.h>
57 #include <sys/kthread.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #include <machine/bus.h>
62
63 #include <dev/i2o/i2o.h>
64 #include <dev/i2o/iopreg.h>
65 #include <dev/i2o/iopvar.h>
66
67 #define POLL(ms, cond) \
68 do { \
69 int i; \
70 for (i = (ms) * 10; i; i--) { \
71 if (cond) \
72 break; \
73 DELAY(100); \
74 } \
75 } while (/* CONSTCOND */0);
76
77 #ifdef I2ODEBUG
78 #define DPRINTF(x) printf x
79 #else
80 #define DPRINTF(x)
81 #endif
82
83 #ifdef I2OVERBOSE
84 #define IFVERBOSE(x) x
85 #else
86 #define IFVERBOSE(x)
87 #endif
88
89 #define COMMENT(x) ""
90
91 #define IOP_ICTXHASH_NBUCKETS 16
92 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
93 #define IOP_TCTXHASH_NBUCKETS 64
94 #define IOP_TCTXHASH(tctx) (&iop_tctxhashtbl[(tctx) & iop_tctxhash])
95
96 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
97 static u_long iop_ictxhash;
98 static TAILQ_HEAD(, iop_msg) *iop_tctxhashtbl;
99 static u_long iop_tctxhash;
100 static void *iop_sdh;
101 static struct pool *iop_msgpool;
102 static struct i2o_systab *iop_systab;
103 static int iop_systab_size;
104
105 extern struct cfdriver iop_cd;
106
107 #define IC_CONFIGURE 0x01
108
109 struct iop_class {
110 u_short ic_class;
111 u_short ic_flags;
112 const char *ic_caption;
113 } static const iop_class[] = {
114 {
115 I2O_CLASS_EXECUTIVE,
116 0,
117 COMMENT("executive")
118 },
119 {
120 I2O_CLASS_DDM,
121 0,
122 COMMENT("device driver module")
123 },
124 {
125 I2O_CLASS_RANDOM_BLOCK_STORAGE,
126 IC_CONFIGURE,
127 IFVERBOSE("random block storage")
128 },
129 {
130 I2O_CLASS_SEQUENTIAL_STORAGE,
131 IC_CONFIGURE,
132 IFVERBOSE("sequential storage")
133 },
134 {
135 I2O_CLASS_LAN,
136 IC_CONFIGURE,
137 IFVERBOSE("LAN port")
138 },
139 {
140 I2O_CLASS_WAN,
141 IC_CONFIGURE,
142 IFVERBOSE("WAN port")
143 },
144 {
145 I2O_CLASS_FIBRE_CHANNEL_PORT,
146 IC_CONFIGURE,
147 IFVERBOSE("fibrechannel port")
148 },
149 {
150 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
151 0,
152 COMMENT("fibrechannel peripheral")
153 },
154 {
155 I2O_CLASS_SCSI_PERIPHERAL,
156 0,
157 COMMENT("SCSI peripheral")
158 },
159 {
160 I2O_CLASS_ATE_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("ATE port")
163 },
164 {
165 I2O_CLASS_ATE_PERIPHERAL,
166 0,
167 COMMENT("ATE peripheral")
168 },
169 {
170 I2O_CLASS_FLOPPY_CONTROLLER,
171 IC_CONFIGURE,
172 IFVERBOSE("floppy controller")
173 },
174 {
175 I2O_CLASS_FLOPPY_DEVICE,
176 0,
177 COMMENT("floppy device")
178 },
179 {
180 I2O_CLASS_BUS_ADAPTER_PORT,
181 IC_CONFIGURE,
182 IFVERBOSE("bus adapter port" )
183 },
184 };
185
186 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
187 static const char *iop_status[] = {
188 "success",
189 "abort (dirty)",
190 "abort (no data transfer)",
191 "abort (partial transfer)",
192 "error (dirty)",
193 "error (no data transfer)",
194 "error (partial transfer)",
195 "undefined error code",
196 "process abort (dirty)",
197 "process abort (no data transfer)",
198 "process abort (partial transfer)",
199 "transaction error",
200 };
201 #endif
202
203 static inline u_int32_t iop_inl(struct iop_softc *, int);
204 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
205
206 static void iop_config_interrupts(struct device *);
207 static void iop_configure_devices(struct iop_softc *);
208 static void iop_devinfo(int, char *);
209 static int iop_print(void *, const char *);
210 static int iop_reconfigure(struct iop_softc *, u_int32_t, int);
211 static void iop_shutdown(void *);
212 static int iop_submatch(struct device *, struct cfdata *, void *);
213 #ifdef notyet
214 static int iop_vendor_print(void *, const char *);
215 #endif
216
217 static void iop_create_reconf_thread(void *);
218 static void iop_intr_event(struct device *, struct iop_msg *, void *);
219 static int iop_hrt_get(struct iop_softc *);
220 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
221 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
222 u_int32_t);
223 static int iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
224 static int iop_ofifo_init(struct iop_softc *);
225 static int iop_handle_reply(struct iop_softc *, u_int32_t);
226 static void iop_reconf_thread(void *);
227 static void iop_release_mfa(struct iop_softc *, u_int32_t);
228 static int iop_reset(struct iop_softc *);
229 static int iop_status_get(struct iop_softc *);
230 static int iop_systab_set(struct iop_softc *);
231
232 #ifdef I2ODEBUG
233 static void iop_reply_print(struct iop_softc *, struct iop_msg *,
234 struct i2o_reply *);
235 #endif
236
237 cdev_decl(iop);
238
239 static inline u_int32_t
240 iop_inl(struct iop_softc *sc, int off)
241 {
242
243 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
244 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
245 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
246 }
247
248 static inline void
249 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
250 {
251
252 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
253 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
254 BUS_SPACE_BARRIER_WRITE);
255 }
256
257 /*
258 * Initialise the adapter.
259 */
260 void
261 iop_init(struct iop_softc *sc, const char *intrstr)
262 {
263 int rv;
264 u_int32_t mask;
265 static int again;
266 char ident[64];
267
268 if (again == 0) {
269 /* Create the shared message wrapper pool and hashes. */
270 iop_msgpool = pool_create(sizeof(struct iop_msg), 0, 0, 0,
271 "ioppl", 0, NULL, NULL, M_DEVBUF);
272 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
273 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
274 iop_tctxhashtbl = hashinit(IOP_TCTXHASH_NBUCKETS, HASH_TAILQ,
275 M_DEVBUF, M_NOWAIT, &iop_tctxhash);
276 again = 1;
277 }
278
279 /* Reset the IOP and request status. */
280 printf("I2O adapter");
281
282 if ((rv = iop_reset(sc)) != 0) {
283 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
284 return;
285 }
286 if ((rv = iop_status_get(sc)) != 0) {
287 printf("%s: not responding (get status)\n", sc->sc_dv.dv_xname);
288 return;
289 }
290 DPRINTF((" (state=%d)",
291 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff));
292 sc->sc_flags |= IOP_HAVESTATUS;
293
294 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
295 ident, sizeof(ident));
296 printf(" <%s>\n", ident);
297
298 #ifdef I2ODEBUG
299 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
300 le16toh(sc->sc_status.orgid),
301 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
302 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
303 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
304 le32toh(sc->sc_status.desiredprivmemsize),
305 le32toh(sc->sc_status.currentprivmemsize),
306 le32toh(sc->sc_status.currentprivmembase));
307 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
308 le32toh(sc->sc_status.desiredpriviosize),
309 le32toh(sc->sc_status.currentpriviosize),
310 le32toh(sc->sc_status.currentpriviobase));
311 #endif
312
313 sc->sc_maxreplycnt = le32toh(sc->sc_status.maxoutboundmframes);
314 if (sc->sc_maxreplycnt > IOP_MAX_HW_REPLYCNT)
315 sc->sc_maxreplycnt = IOP_MAX_HW_REPLYCNT;
316 sc->sc_maxqueuecnt = le32toh(sc->sc_status.maxinboundmframes);
317 if (sc->sc_maxqueuecnt > IOP_MAX_HW_QUEUECNT)
318 sc->sc_maxqueuecnt = IOP_MAX_HW_QUEUECNT;
319
320 if (iop_ofifo_init(sc) != 0) {
321 printf("%s: unable to init oubound FIFO\n", sc->sc_dv.dv_xname);
322 return;
323 }
324
325 /*
326 * Defer further configuration until (a) interrupts are working and
327 * (b) we have enough information to build the system table.
328 */
329 config_interrupts((struct device *)sc, iop_config_interrupts);
330
331 /* Configure shutdown hook before we start any device activity. */
332 if (iop_sdh == NULL)
333 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
334
335 /* Ensure interrupts are enabled at the IOP. */
336 mask = iop_inl(sc, IOP_REG_INTR_MASK);
337 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
338
339 if (intrstr != NULL)
340 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
341 intrstr);
342
343 #ifdef I2ODEBUG
344 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
345 sc->sc_dv.dv_xname,
346 sc->sc_maxqueuecnt, le32toh(sc->sc_status.maxinboundmframes),
347 sc->sc_maxreplycnt, le32toh(sc->sc_status.maxoutboundmframes));
348 #endif
349
350 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
351 SIMPLEQ_INIT(&sc->sc_queue);
352 }
353
354 /*
355 * Perform autoconfiguration tasks.
356 */
357 static void
358 iop_config_interrupts(struct device *self)
359 {
360 struct iop_softc *sc, *iop;
361 struct i2o_systab_entry *ste;
362 int rv, i, niop;
363
364 sc = (struct iop_softc *)self;
365 LIST_INIT(&sc->sc_iilist);
366
367 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
368
369 if (iop_hrt_get(sc) != 0) {
370 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
371 return;
372 }
373
374 /*
375 * Build the system table.
376 */
377 if (iop_systab == NULL) {
378 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
379 if ((iop = device_lookup(&iop_cd, i)) == NULL)
380 continue;
381 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
382 continue;
383 if (iop_status_get(iop) != 0) {
384 printf("%s: unable to retrieve status\n",
385 sc->sc_dv.dv_xname);
386 iop->sc_flags &= ~IOP_HAVESTATUS;
387 continue;
388 }
389 niop++;
390 }
391 if (niop == 0)
392 return;
393
394 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
395 sizeof(struct i2o_systab);
396 iop_systab_size = i;
397 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
398
399 memset(iop_systab, 0, i);
400 iop_systab->numentries = niop;
401 iop_systab->version = I2O_VERSION_11;
402
403 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
404 if ((iop = device_lookup(&iop_cd, i)) == NULL)
405 continue;
406 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
407 continue;
408
409 ste->orgid = iop->sc_status.orgid;
410 ste->iopid = iop->sc_dv.dv_unit + 2;
411 ste->segnumber =
412 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
413 ste->iopcaps = iop->sc_status.iopcaps;
414 ste->inboundmsgframesize =
415 iop->sc_status.inboundmframesize;
416 ste->inboundmsgportaddresslow =
417 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
418 ste++;
419 }
420 }
421
422 if (iop_systab_set(sc) != 0) {
423 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
424 return;
425 }
426 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
427 5000) != 0) {
428 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
429 return;
430 }
431
432 /*
433 * Set up an event handler for this IOP.
434 */
435 sc->sc_eventii.ii_dv = self;
436 sc->sc_eventii.ii_intr = iop_intr_event;
437 sc->sc_eventii.ii_flags = II_DISCARD | II_UTILITY;
438 sc->sc_eventii.ii_tid = I2O_TID_IOP;
439 if (iop_initiator_register(sc, &sc->sc_eventii) != 0) {
440 printf("%s: unable to register initiator", sc->sc_dv.dv_xname);
441 return;
442 }
443 if (iop_util_eventreg(sc, &sc->sc_eventii, 0xffffffff)) {
444 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
445 return;
446 }
447
448 #ifdef notyet
449 /* Attempt to match and attach a product-specific extension. */
450 ia.ia_class = I2O_CLASS_ANY;
451 ia.ia_tid = I2O_TID_IOP;
452 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
453 #endif
454
455 if ((rv = iop_reconfigure(sc, 0, 0)) != 0) {
456 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
457 return;
458 }
459
460 kthread_create(iop_create_reconf_thread, sc);
461 }
462
463 /*
464 * Create the reconfiguration thread. Called after the standard kernel
465 * threads have been created.
466 */
467 static void
468 iop_create_reconf_thread(void *cookie)
469 {
470 struct iop_softc *sc;
471 int rv;
472
473 sc = cookie;
474
475 sc->sc_flags |= IOP_ONLINE;
476 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
477 "%s", sc->sc_dv.dv_xname);
478 if (rv != 0) {
479 printf("%s: unable to create reconfiguration thread (%d)",
480 sc->sc_dv.dv_xname, rv);
481 return;
482 }
483 }
484
485 /*
486 * Reconfiguration thread; listens for LCT change notification, and
487 * initiates re-configuration if recieved.
488 */
489 static void
490 iop_reconf_thread(void *cookie)
491 {
492 struct iop_softc *sc;
493 struct i2o_lct lct;
494 u_int32_t chgind;
495
496 sc = cookie;
497
498 for (;;) {
499 chgind = le32toh(sc->sc_chgindicator) + 1;
500
501 if (iop_lct_get0(sc, &lct, sizeof(lct), chgind) == 0) {
502 DPRINTF(("%s: async reconfiguration (0x%08x)\n",
503 sc->sc_dv.dv_xname, le32toh(lct.changeindicator)));
504 iop_reconfigure(sc, lct.changeindicator, LK_NOWAIT);
505 }
506
507 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
508 }
509 }
510
511 /*
512 * Reconfigure: find new and removed devices.
513 */
514 static int
515 iop_reconfigure(struct iop_softc *sc, u_int32_t chgind, int lkflags)
516 {
517 struct iop_msg *im;
518 struct i2o_hba_bus_scan *mb;
519 struct i2o_lct_entry *le;
520 struct iop_initiator *ii, *nextii;
521 int rv, tid, i;
522
523 lkflags |= LK_EXCLUSIVE | LK_RECURSEFAIL;
524 if ((rv = lockmgr(&sc->sc_conflock, lkflags, NULL)) != 0) {
525 DPRINTF(("iop_reconfigure: unable to acquire lock\n"));
526 return (rv);
527 }
528
529 /*
530 * If the reconfiguration request isn't the result of LCT change
531 * notification, then be more thorough: ask all bus ports to scan
532 * their busses. Wait up to 5 minutes for each bus port to complete
533 * the request.
534 */
535 if (chgind == 0) {
536 if ((rv = iop_lct_get(sc)) != 0) {
537 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
538 goto done;
539 }
540
541 le = sc->sc_lct->entry;
542 for (i = 0; i < sc->sc_nlctent; i++, le++) {
543 if ((le16toh(le->classid) & 4095) !=
544 I2O_CLASS_BUS_ADAPTER_PORT)
545 continue;
546 tid = le32toh(le->localtid) & 4095;
547
548 rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR);
549 if (rv != 0) {
550 DPRINTF(("iop_reconfigure: alloc msg\n"));
551 goto done;
552 }
553
554 mb = (struct i2o_hba_bus_scan *)im->im_msg;
555 mb->msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
556 mb->msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
557 mb->msgictx = IOP_ICTX;
558 mb->msgtctx = im->im_tctx;
559
560 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
561 tid));
562
563 rv = iop_msg_enqueue(sc, im, 5*60*1000);
564 iop_msg_free(sc, NULL, im);
565 if (rv != 0) {
566 DPRINTF(("iop_reconfigure: scan failed\n"));
567 goto done;
568 }
569 }
570 } else if (chgind == sc->sc_chgindicator) {
571 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
572 goto done;
573 }
574
575 /* Re-read the LCT and determine if it has changed. */
576 if ((rv = iop_lct_get(sc)) != 0) {
577 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
578 goto done;
579 }
580 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
581
582 if (sc->sc_lct->changeindicator == sc->sc_chgindicator) {
583 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
584 /* Nothing to do. */
585 rv = 0;
586 goto done;
587 }
588 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
589 sc->sc_chgindicator = sc->sc_lct->changeindicator;
590
591 if (sc->sc_tidmap != NULL)
592 free(sc->sc_tidmap, M_DEVBUF);
593 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
594 M_DEVBUF, M_NOWAIT);
595
596 /* Match and attach child devices. */
597 iop_configure_devices(sc);
598
599 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
600 nextii = ii;
601 do {
602 if ((nextii = LIST_NEXT(nextii, ii_list)) == NULL)
603 break;
604 } while ((nextii->ii_flags & II_UTILITY) != 0);
605 if ((ii->ii_flags & II_UTILITY) != 0)
606 continue;
607
608 /* Detach devices that were configured, but are now gone. */
609 for (i = 0; i < sc->sc_nlctent; i++)
610 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
611 break;
612 if (i == sc->sc_nlctent ||
613 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
614 config_detach(ii->ii_dv, DETACH_FORCE);
615
616 /*
617 * Tell initiators that existed before the re-configuration
618 * to re-configure.
619 */
620 if (ii->ii_reconfig == NULL)
621 continue;
622 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
623 printf("%s: %s failed reconfigure (%d)\n",
624 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
625 }
626 rv = 0;
627
628 done:
629 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
630 return (rv);
631 }
632
633 /*
634 * Configure I2O devices into the system.
635 */
636 static void
637 iop_configure_devices(struct iop_softc *sc)
638 {
639 struct iop_attach_args ia;
640 struct iop_initiator *ii;
641 const struct i2o_lct_entry *le;
642 struct device *dv;
643 int i, j, nent;
644
645 nent = sc->sc_nlctent;
646 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
647 sc->sc_tidmap[i].it_tid = le32toh(le->localtid) & 4095;
648 sc->sc_tidmap[i].it_flags = 0;
649 sc->sc_tidmap[i].it_dvname[0] = '\0';
650
651 /*
652 * Ignore the device if it's in use.
653 */
654 if ((le32toh(le->usertid) & 4095) != 4095)
655 continue;
656
657 ia.ia_class = le16toh(le->classid) & 4095;
658 ia.ia_tid = sc->sc_tidmap[i].it_tid;
659
660 /* Ignore uninteresting devices. */
661 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
662 if (iop_class[j].ic_class == ia.ia_class)
663 break;
664 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
665 (iop_class[j].ic_flags & IC_CONFIGURE) == 0)
666 continue;
667
668 /*
669 * Try to configure the device only if it's not already
670 * configured.
671 */
672 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
673 if ((ii->ii_flags & II_UTILITY) != 0)
674 continue;
675 if (ia.ia_tid == ii->ii_tid) {
676 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
677 strcpy(sc->sc_tidmap[i].it_dvname,
678 ii->ii_dv->dv_xname);
679 break;
680 }
681 }
682 if (ii != NULL)
683 continue;
684
685 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
686 if (dv != NULL) {
687 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
688 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
689 }
690 }
691 }
692
693 static void
694 iop_devinfo(int class, char *devinfo)
695 {
696 #ifdef I2OVERBOSE
697 int i;
698
699 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
700 if (class == iop_class[i].ic_class)
701 break;
702
703 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
704 sprintf(devinfo, "device (class 0x%x)", class);
705 else
706 strcpy(devinfo, iop_class[i].ic_caption);
707 #else
708
709 sprintf(devinfo, "device (class 0x%x)", class);
710 #endif
711 }
712
713 static int
714 iop_print(void *aux, const char *pnp)
715 {
716 struct iop_attach_args *ia;
717 char devinfo[256];
718
719 ia = aux;
720
721 if (pnp != NULL) {
722 iop_devinfo(ia->ia_class, devinfo);
723 printf("%s at %s", devinfo, pnp);
724 }
725 printf(" tid %d", ia->ia_tid);
726 return (UNCONF);
727 }
728
729 #ifdef notyet
730 static int
731 iop_vendor_print(void *aux, const char *pnp)
732 {
733
734 if (pnp != NULL)
735 printf("vendor specific extension at %s", pnp);
736 return (UNCONF);
737 }
738 #endif
739
740 static int
741 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
742 {
743 struct iop_attach_args *ia;
744
745 ia = aux;
746
747 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
748 return (0);
749
750 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
751 }
752
753 /*
754 * Shut down all configured IOPs.
755 */
756 static void
757 iop_shutdown(void *junk)
758 {
759 struct iop_softc *sc;
760 int i;
761
762 printf("shutting down iop devices... ");
763
764 for (i = 0; i < iop_cd.cd_ndevs; i++) {
765 if ((sc = device_lookup(&iop_cd, i)) == NULL)
766 continue;
767 if ((sc->sc_flags & IOP_ONLINE) == 0)
768 continue;
769 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
770 0, 5000);
771 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
772 0, 5000);
773 }
774
775 /* Wait. Some boards could still be flushing, stupidly enough. */
776 delay(5000*1000);
777 printf(" done\n");
778 }
779
780 /*
781 * Retrieve adapter status.
782 */
783 static int
784 iop_status_get(struct iop_softc *sc)
785 {
786 struct iop_msg *im;
787 struct i2o_exec_status_get *mb;
788 int rv, s;
789
790 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
791 return (rv);
792
793 mb = (struct i2o_exec_status_get *)im->im_msg;
794 mb->msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
795 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
796 mb->reserved[0] = 0;
797 mb->reserved[1] = 0;
798 mb->reserved[2] = 0;
799 mb->reserved[3] = 0;
800 mb->addrlow = kvtop((caddr_t)&sc->sc_status); /* XXX */
801 mb->addrhigh = 0;
802 mb->length = sizeof(sc->sc_status);
803
804 s = splbio();
805 memset(&sc->sc_status, 0, sizeof(sc->sc_status));
806
807 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
808 splx(s);
809 iop_msg_free(sc, NULL, im);
810 return (rv);
811 }
812
813 /* XXX */
814 POLL(2500, *((volatile u_char *)&sc->sc_status.syncbyte) == 0xff);
815
816 splx(s);
817 iop_msg_free(sc, NULL, im);
818 return (*((volatile u_char *)&sc->sc_status.syncbyte) != 0xff);
819 }
820
821 /*
822 * Initalize and populate the adapter's outbound FIFO.
823 */
824 static int
825 iop_ofifo_init(struct iop_softc *sc)
826 {
827 struct iop_msg *im;
828 volatile u_int32_t status;
829 bus_addr_t addr;
830 bus_dma_segment_t seg;
831 struct i2o_exec_outbound_init *mb;
832 int i, rseg, rv;
833
834 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
835 return (rv);
836
837 mb = (struct i2o_exec_outbound_init *)im->im_msg;
838 mb->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
839 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
840 mb->msgictx = IOP_ICTX;
841 mb->msgtctx = im->im_tctx;
842 mb->pagesize = PAGE_SIZE;
843 mb->flags = 0x80 | ((IOP_MAX_REPLY_SIZE >> 2) << 16); /* XXX */
844
845 status = 0;
846
847 /*
848 * The I2O spec says that there are two SGLs: one for the status
849 * word, and one for a list of discarded MFAs. It continues to say
850 * that if you don't want to get the list of MFAs, an IGNORE SGL is
851 * necessary; this isn't the case (and in fact appears to be a bad
852 * thing).
853 */
854 iop_msg_map(sc, im, (void *)&status, sizeof(status), 0);
855 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
856 iop_msg_free(sc, NULL, im);
857 return (rv);
858 }
859 iop_msg_unmap(sc, im);
860 iop_msg_free(sc, NULL, im);
861
862 /* XXX */
863 POLL(5000, status == I2O_EXEC_OUTBOUND_INIT_COMPLETE);
864 if (status != I2O_EXEC_OUTBOUND_INIT_COMPLETE) {
865 printf("%s: outbound FIFO init failed\n", sc->sc_dv.dv_xname);
866 return (EIO);
867 }
868
869 /* If we need to allocate DMA safe memory, do it now. */
870 if (sc->sc_rep_phys == 0) {
871 sc->sc_rep_size = sc->sc_maxreplycnt * IOP_MAX_REPLY_SIZE;
872
873 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
874 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
875 if (rv != 0) {
876 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
877 rv);
878 return (rv);
879 }
880
881 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
882 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
883 if (rv != 0) {
884 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
885 return (rv);
886 }
887
888 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
889 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
890 if (rv != 0) {
891 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname, rv);
892 return (rv);
893 }
894
895 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, sc->sc_rep,
896 sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
897 if (rv != 0) {
898 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
899 return (rv);
900 }
901
902 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
903 }
904
905 /* Populate the outbound FIFO. */
906 for (i = sc->sc_maxreplycnt, addr = sc->sc_rep_phys; i != 0; i--) {
907 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
908 addr += IOP_MAX_REPLY_SIZE;
909 }
910
911 return (0);
912 }
913
914 /*
915 * Read the specified number of bytes from the IOP's hardware resource table.
916 */
917 static int
918 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
919 {
920 struct iop_msg *im;
921 int rv;
922 struct i2o_exec_hrt_get *mb;
923
924 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
925 return (rv);
926
927 mb = (struct i2o_exec_hrt_get *)im->im_msg;
928 mb->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
929 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
930 mb->msgictx = IOP_ICTX;
931 mb->msgtctx = im->im_tctx;
932
933 iop_msg_map(sc, im, hrt, size, 0);
934 rv = iop_msg_enqueue(sc, im, 5000);
935 iop_msg_unmap(sc, im);
936 iop_msg_free(sc, NULL, im);
937 return (rv);
938 }
939
940 /*
941 * Read the IOP's hardware resource table.
942 */
943 static int
944 iop_hrt_get(struct iop_softc *sc)
945 {
946 struct i2o_hrt hrthdr, *hrt;
947 int size, rv;
948
949 if ((rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr))) != 0)
950 return (rv);
951
952 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
953 le16toh(hrthdr.numentries)));
954
955 size = sizeof(struct i2o_hrt) +
956 (htole32(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
957 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
958
959 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
960 free(hrt, M_DEVBUF);
961 return (rv);
962 }
963
964 if (sc->sc_hrt != NULL)
965 free(sc->sc_hrt, M_DEVBUF);
966 sc->sc_hrt = hrt;
967 return (0);
968 }
969
970 /*
971 * Request the specified number of bytes from the IOP's logical
972 * configuration table. If a change indicator is specified, this
973 * is an verbatim notification request, so the caller is prepared
974 * to wait indefinitely.
975 */
976 static int
977 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
978 u_int32_t chgind)
979 {
980 struct iop_msg *im;
981 struct i2o_exec_lct_notify *mb;
982 int rv;
983
984 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
985 return (rv);
986
987 memset(lct, 0, size);
988 memset(im->im_msg, 0, sizeof(im->im_msg));
989
990 mb = (struct i2o_exec_lct_notify *)im->im_msg;
991 mb->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
992 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
993 mb->msgictx = IOP_ICTX;
994 mb->msgtctx = im->im_tctx;
995 mb->classid = I2O_CLASS_ANY;
996 mb->changeindicator = chgind;
997
998 #ifdef I2ODEBUG
999 printf("iop_lct_get0: reading LCT");
1000 if (chgind != 0)
1001 printf(" (async)");
1002 printf("\n");
1003 #endif
1004
1005 iop_msg_map(sc, im, lct, size, 0);
1006 rv = iop_msg_enqueue(sc, im, (chgind == 0 ? 120*1000 : 0));
1007 iop_msg_unmap(sc, im);
1008 iop_msg_free(sc, NULL, im);
1009 return (rv);
1010 }
1011
1012 /*
1013 * Read the IOP's logical configuration table.
1014 */
1015 int
1016 iop_lct_get(struct iop_softc *sc)
1017 {
1018 int esize, size, rv;
1019 struct i2o_lct *lct;
1020
1021 esize = le32toh(sc->sc_status.expectedlctsize);
1022 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1023 if (lct == NULL)
1024 return (ENOMEM);
1025
1026 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1027 free(lct, M_DEVBUF);
1028 return (rv);
1029 }
1030
1031 size = le16toh(lct->tablesize) << 2;
1032 if (esize != size) {
1033 free(lct, M_DEVBUF);
1034 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1035 if (lct == NULL)
1036 return (ENOMEM);
1037
1038 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1039 free(lct, M_DEVBUF);
1040 return (rv);
1041 }
1042 }
1043
1044 /* Swap in the new LCT. */
1045 if (sc->sc_lct != NULL)
1046 free(sc->sc_lct, M_DEVBUF);
1047 sc->sc_lct = lct;
1048 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1049 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1050 sizeof(struct i2o_lct_entry);
1051 return (0);
1052 }
1053
1054 /*
1055 * Request the specified parameter group from the target.
1056 */
1057 int
1058 iop_param_op(struct iop_softc *sc, int tid, int write, int group, void *buf,
1059 int size)
1060 {
1061 struct iop_msg *im;
1062 struct i2o_util_params_op *mb;
1063 int rv, func, op;
1064 struct {
1065 struct i2o_param_op_list_header olh;
1066 struct i2o_param_op_all_template oat;
1067 } req;
1068
1069 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1070 return (rv);
1071
1072 if (write) {
1073 func = I2O_UTIL_PARAMS_SET;
1074 op = I2O_PARAMS_OP_FIELD_SET;
1075 } else {
1076 func = I2O_UTIL_PARAMS_GET;
1077 op = I2O_PARAMS_OP_FIELD_GET;
1078 }
1079
1080 mb = (struct i2o_util_params_op *)im->im_msg;
1081 mb->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1082 mb->msgfunc = I2O_MSGFUNC(tid, func);
1083 mb->msgictx = IOP_ICTX;
1084 mb->msgtctx = im->im_tctx;
1085 mb->flags = 0;
1086
1087 req.olh.count = htole16(1);
1088 req.olh.reserved = htole16(0);
1089 req.oat.operation = htole16(op);
1090 req.oat.fieldcount = htole16(0xffff);
1091 req.oat.group = htole16(group);
1092
1093 memset(buf, 0, size);
1094 iop_msg_map(sc, im, &req, sizeof(req), 1);
1095 iop_msg_map(sc, im, buf, size, write);
1096
1097 rv = iop_msg_enqueue(sc, im, 5000);
1098 iop_msg_unmap(sc, im);
1099 iop_msg_free(sc, NULL, im);
1100 return (rv);
1101 }
1102
1103 /*
1104 * Execute a simple command (no parameters).
1105 */
1106 int
1107 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1108 int async, int timo)
1109 {
1110 struct iop_msg *im;
1111 struct i2o_msg *mb;
1112 int rv, fl;
1113
1114 fl = (async != 0 ? IM_NOWAIT : 0);
1115 if ((rv = iop_msg_alloc(sc, NULL, &im, fl | IM_NOINTR)) != 0)
1116 return (rv);
1117
1118 mb = (struct i2o_msg *)im->im_msg;
1119 mb->msgflags = I2O_MSGFLAGS(i2o_msg);
1120 mb->msgfunc = I2O_MSGFUNC(tid, function);
1121 mb->msgictx = ictx;
1122 mb->msgtctx = im->im_tctx;
1123
1124 if (async)
1125 rv = iop_msg_enqueue(sc, im, timo);
1126 else
1127 rv = iop_msg_send(sc, im, timo);
1128 iop_msg_free(sc, NULL, im);
1129 return (rv);
1130 }
1131
1132 /*
1133 * Post the system table to the IOP.
1134 */
1135 static int
1136 iop_systab_set(struct iop_softc *sc)
1137 {
1138 struct i2o_exec_sys_tab_set *mb;
1139 struct iop_msg *im;
1140 u_int32_t mema[2], ioa[2];
1141 int rv;
1142
1143 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1144 return (rv);
1145
1146 mb = (struct i2o_exec_sys_tab_set *)im->im_msg;
1147 mb->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1148 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1149 mb->msgictx = IOP_ICTX;
1150 mb->msgtctx = im->im_tctx;
1151 mb->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1152 mb->segnumber = 0;
1153
1154 /* XXX This is questionable, but better than nothing... */
1155 mema[0] = le32toh(sc->sc_status.currentprivmembase);
1156 mema[1] = le32toh(sc->sc_status.currentprivmemsize);
1157 ioa[0] = le32toh(sc->sc_status.currentpriviobase);
1158 ioa[1] = le32toh(sc->sc_status.currentpriviosize);
1159
1160 iop_msg_map(sc, im, iop_systab, iop_systab_size, 1);
1161 iop_msg_map(sc, im, mema, sizeof(mema), 1);
1162 iop_msg_map(sc, im, ioa, sizeof(ioa), 1);
1163
1164 rv = iop_msg_enqueue(sc, im, 5000);
1165 iop_msg_unmap(sc, im);
1166 iop_msg_free(sc, NULL, im);
1167 return (rv);
1168 }
1169
1170 /*
1171 * Reset the adapter. Must be called with interrupts disabled.
1172 */
1173 static int
1174 iop_reset(struct iop_softc *sc)
1175 {
1176 struct iop_msg *im;
1177 volatile u_int32_t sw;
1178 u_int32_t mfa;
1179 struct i2o_exec_iop_reset *mb;
1180 int rv;
1181
1182 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
1183 return (rv);
1184
1185 sw = 0;
1186
1187 mb = (struct i2o_exec_iop_reset *)im->im_msg;
1188 mb->msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1189 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1190 mb->reserved[0] = 0;
1191 mb->reserved[1] = 0;
1192 mb->reserved[2] = 0;
1193 mb->reserved[3] = 0;
1194 mb->statuslow = kvtop((caddr_t)&sw); /* XXX */
1195 mb->statushigh = 0;
1196
1197 if ((rv = iop_msg_send(sc, im, 0)))
1198 return (rv);
1199 iop_msg_free(sc, NULL, im);
1200
1201 POLL(2500, sw != 0); /* XXX */
1202 if (sw != I2O_RESET_IN_PROGRESS) {
1203 printf("%s: reset rejected\n", sc->sc_dv.dv_xname);
1204 return (EIO);
1205 }
1206
1207 /*
1208 * IOP is now in the INIT state. Wait no more than 10 seconds for
1209 * the inbound queue to become responsive.
1210 */
1211 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1212 if (mfa == IOP_MFA_EMPTY) {
1213 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1214 return (EIO);
1215 }
1216
1217 if (sw == I2O_RESET_REJECTED)
1218 printf("%s: reset rejected?\n", sc->sc_dv.dv_xname);
1219
1220 iop_release_mfa(sc, mfa);
1221 return (0);
1222 }
1223
1224 /*
1225 * Register a new initiator.
1226 */
1227 int
1228 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1229 {
1230 static int ictx;
1231 static int stctx;
1232
1233 /* 0 is reserved for system messages. */
1234 ii->ii_ictx = ++ictx;
1235 ii->ii_stctx = ++stctx | 0x80000000;
1236
1237 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1238 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1239
1240 return (0);
1241 }
1242
1243 /*
1244 * Unregister an initiator.
1245 */
1246 void
1247 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1248 {
1249
1250 LIST_REMOVE(ii, ii_list);
1251 LIST_REMOVE(ii, ii_hash);
1252 }
1253
1254 /*
1255 * Handle a reply frame from the adapter.
1256 */
1257 static int
1258 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1259 {
1260 struct iop_msg *im;
1261 struct i2o_reply *rb;
1262 struct iop_initiator *ii;
1263 u_int off, ictx, tctx, status, size;
1264
1265 off = (int)(rmfa - sc->sc_rep_phys);
1266 rb = (struct i2o_reply *)(sc->sc_rep + off);
1267
1268 /* Perform reply queue DMA synchronisation... */
1269 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, IOP_MAX_MSG_SIZE,
1270 BUS_DMASYNC_POSTREAD);
1271 if (--sc->sc_stat.is_cur_hwqueue != 0)
1272 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1273 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1274
1275 #ifdef I2ODEBUG
1276 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1277 panic("iop_handle_reply: 64-bit reply");
1278 #endif
1279 /*
1280 * Find the initiator.
1281 */
1282 ictx = le32toh(rb->msgictx);
1283 if (ictx == IOP_ICTX)
1284 ii = NULL;
1285 else {
1286 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1287 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1288 if (ii->ii_ictx == ictx)
1289 break;
1290 if (ii == NULL) {
1291 #ifdef I2ODEBUG
1292 iop_reply_print(sc, NULL, rb);
1293 #endif
1294 printf("%s: WARNING: bad ictx returned (%x)",
1295 sc->sc_dv.dv_xname, ictx);
1296
1297 /* Return the reply frame to the IOP's outbound FIFO. */
1298 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1299 return (-1);
1300 }
1301 }
1302
1303 status = rb->reqstatus;
1304
1305 if (ii == NULL || (ii->ii_flags & II_DISCARD) == 0) {
1306 /*
1307 * This initiator tracks state using message wrappers.
1308 *
1309 * Find the originating message wrapper, and if requested
1310 * notify the initiator.
1311 */
1312 tctx = le32toh(rb->msgtctx);
1313 im = TAILQ_FIRST(IOP_TCTXHASH(tctx));
1314 for (; im != NULL; im = TAILQ_NEXT(im, im_hash))
1315 if (im->im_tctx == tctx)
1316 break;
1317 if (im == NULL || (im->im_flags & IM_ALLOCED) == 0) {
1318 #ifdef I2ODEBUG
1319 iop_reply_print(sc, NULL, rb);
1320 #endif
1321 printf("%s: WARNING: bad tctx returned (%x, %p)",
1322 sc->sc_dv.dv_xname, tctx, im);
1323
1324 /* Return the reply frame to the IOP's outbound FIFO. */
1325 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1326 return (-1);
1327 }
1328 #ifdef I2ODEBUG
1329 if ((im->im_flags & IM_REPLIED) != 0)
1330 panic("%s: dup reply", sc->sc_dv.dv_xname);
1331 #endif
1332
1333 im->im_flags |= IM_REPLIED;
1334
1335 #ifdef I2ODEBUG
1336 if (rb->reqstatus != 0)
1337 iop_reply_print(sc, im, rb);
1338 #endif
1339 /* Notify the initiator. */
1340 if ((im->im_flags & IM_WAITING) != 0) {
1341 size = (le32toh(rb->msgflags) >> 14) & ~3;
1342 if (size > IOP_MAX_REPLY_SIZE)
1343 size = IOP_MAX_REPLY_SIZE;
1344 memcpy(im->im_msg, rb, size);
1345 wakeup(im);
1346 } else if ((im->im_flags & IM_NOINTR) == 0)
1347 (*ii->ii_intr)(ii->ii_dv, im, rb);
1348 } else {
1349 /*
1350 * This initiator discards message wrappers.
1351 *
1352 * Simply pass the reply frame to the initiator.
1353 */
1354 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1355 }
1356
1357 /* Return the reply frame to the IOP's outbound FIFO. */
1358 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1359
1360 /* Run the queue. */
1361 if ((im = SIMPLEQ_FIRST(&sc->sc_queue)) != NULL)
1362 iop_msg_enqueue(sc, im, 0);
1363
1364 return (status);
1365 }
1366
1367 /*
1368 * Handle an interrupt from the adapter.
1369 */
1370 int
1371 iop_intr(void *arg)
1372 {
1373 struct iop_softc *sc;
1374 u_int32_t rmfa;
1375
1376 sc = arg;
1377
1378 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1379 return (0);
1380
1381 for (;;) {
1382 /* Double read to account for IOP bug. */
1383 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY &&
1384 (rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY)
1385 break;
1386 iop_handle_reply(sc, rmfa);
1387 }
1388
1389 return (1);
1390 }
1391
1392 /*
1393 * Handle an event signalled by the executive.
1394 */
1395 static void
1396 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1397 {
1398 struct i2o_util_event_register_reply *rb;
1399 struct iop_softc *sc;
1400 u_int event;
1401
1402 sc = (struct iop_softc *)dv;
1403 rb = reply;
1404 event = le32toh(rb->event);
1405
1406 #ifndef I2ODEBUG
1407 if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED)
1408 return;
1409 #endif
1410
1411 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1412 }
1413
1414 /*
1415 * Allocate a message wrapper.
1416 */
1417 int
1418 iop_msg_alloc(struct iop_softc *sc, struct iop_initiator *ii,
1419 struct iop_msg **imp, int flags)
1420 {
1421 struct iop_msg *im;
1422 static int tctxgen = 666;
1423 int s, rv, i, tctx;
1424
1425 #ifdef I2ODEBUG
1426 if ((flags & IM_SYSMASK) != 0)
1427 panic("iop_msg_alloc: system flags specified");
1428 #endif
1429
1430 s = splbio(); /* XXX */
1431
1432 if (ii != NULL && (ii->ii_flags & II_DISCARD) != 0) {
1433 flags |= IM_DISCARD;
1434 tctx = ii->ii_stctx;
1435 } else
1436 tctx = tctxgen++ & 0x7fffffff;
1437
1438 im = (struct iop_msg *)pool_get(iop_msgpool,
1439 (flags & IM_NOWAIT) == 0 ? PR_WAITOK : 0);
1440 if (im == NULL) {
1441 splx(s);
1442 return (ENOMEM);
1443 }
1444
1445 /* XXX */
1446 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, IOP_MAX_SGL_ENTRIES,
1447 IOP_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1448 &im->im_xfer[0].ix_map);
1449 if (rv != 0) {
1450 pool_put(iop_msgpool, im);
1451 splx(s);
1452 return (rv);
1453 }
1454
1455 if ((flags & (IM_DISCARD | IM_NOICTX)) == 0)
1456 TAILQ_INSERT_TAIL(IOP_TCTXHASH(tctx), im, im_hash);
1457
1458 splx(s);
1459
1460 im->im_tctx = tctx;
1461 im->im_flags = flags | IM_ALLOCED;
1462 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1463 im->im_xfer[i].ix_size = 0;
1464 *imp = im;
1465
1466 return (0);
1467 }
1468
1469 /*
1470 * Free a message wrapper.
1471 */
1472 void
1473 iop_msg_free(struct iop_softc *sc, struct iop_initiator *ii, struct iop_msg *im)
1474 {
1475 int s;
1476
1477 #ifdef I2ODEBUG
1478 if ((im->im_flags & IM_ALLOCED) == 0)
1479 panic("iop_msg_free: wrapper not allocated");
1480 #endif
1481
1482 /* XXX */
1483 bus_dmamap_destroy(sc->sc_dmat, im->im_xfer[0].ix_map);
1484
1485 s = splbio(); /* XXX */
1486
1487 if ((im->im_flags & (IM_DISCARD | IM_NOICTX)) == 0)
1488 TAILQ_REMOVE(IOP_TCTXHASH(im->im_tctx), im, im_hash);
1489
1490 im->im_flags = 0;
1491 pool_put(iop_msgpool, im);
1492 splx(s);
1493 }
1494
1495 /*
1496 * Map a data transfer. Write a scatter-gather list into the message frame.
1497 */
1498 int
1499 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, void *xferaddr,
1500 int xfersize, int out)
1501 {
1502 struct iop_xfer *ix;
1503 u_int32_t *mb;
1504 int rv, seg, i;
1505
1506 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++)
1507 if (ix->ix_size == 0)
1508 break;
1509 #ifdef I2ODEBUG
1510 if (i == IOP_MAX_MSG_XFERS)
1511 panic("iop_msg_map: too many xfers");
1512 #endif
1513
1514 /* Only the first DMA map is static. */
1515 if (i != 0) {
1516 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1517 IOP_MAX_SGL_ENTRIES, IOP_MAX_XFER, 0,
1518 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1519 if (rv != 0)
1520 return (rv);
1521 }
1522
1523 ix->ix_flags = (out ? IX_OUT : IX_IN);
1524 ix->ix_size = xfersize;
1525
1526 rv = bus_dmamap_load(sc->sc_dmat, ix->ix_map, xferaddr, xfersize,
1527 NULL, 0);
1528 if (rv != 0)
1529 return (rv);
1530 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1531 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1532
1533 mb = im->im_msg + (im->im_msg[0] >> 16);
1534 if (out)
1535 out = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1536 else
1537 out = I2O_SGL_SIMPLE;
1538
1539 for (seg = 0; seg < ix->ix_map->dm_nsegs; seg++) {
1540 #ifdef I2ODEBUG
1541 if ((seg << 1) + (im->im_msg[0] >> 16) >=
1542 (IOP_MAX_MSG_SIZE >> 2))
1543 panic("iop_map_xfer: message frame too large");
1544 #endif
1545 if (seg == ix->ix_map->dm_nsegs - 1)
1546 out |= I2O_SGL_END_BUFFER;
1547 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_len | out;
1548 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_addr;
1549 }
1550
1551 /*
1552 * If this is the first xfer we've mapped for this message, adjust
1553 * the SGL offset field in the message header.
1554 */
1555 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1556 im->im_msg[0] += ((im->im_msg[0] >> 16) + seg * 2) << 4;
1557 im->im_flags |= IM_SGLOFFADJ;
1558 }
1559 im->im_msg[0] += (seg << 17);
1560 return (0);
1561 }
1562
1563 /*
1564 * Unmap all data transfers associated with a message wrapper.
1565 */
1566 void
1567 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
1568 {
1569 struct iop_xfer *ix;
1570 int i;
1571
1572 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++) {
1573 if (ix->ix_size == 0)
1574 break;
1575 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
1576 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
1577 BUS_DMASYNC_POSTREAD);
1578 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1579
1580 /* Only the first DMA map is static. */
1581 if (i != 0)
1582 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1583
1584 ix->ix_size = 0;
1585 }
1586 }
1587
1588 /*
1589 * Send a message to the IOP. Optionally, poll on completion. Return
1590 * non-zero if failure status is returned and IM_NOINTR is set.
1591 */
1592 int
1593 iop_msg_send(struct iop_softc *sc, struct iop_msg *im, int timo)
1594 {
1595 u_int32_t mfa, rmfa;
1596 int rv, status, i, s;
1597
1598 #ifdef I2ODEBUG
1599 if ((im->im_flags & IM_NOICTX) == 0)
1600 if (im->im_msg[3] == IOP_ICTX &&
1601 (im->im_flags & IM_NOINTR) == 0)
1602 panic("iop_msg_send: IOP_ICTX and !IM_NOINTR");
1603 if ((im->im_flags & IM_DISCARD) != 0)
1604 panic("iop_msg_send: IM_DISCARD");
1605 #endif
1606
1607 s = splbio(); /* XXX */
1608
1609 /* Wait up to 250ms for an MFA. */
1610 POLL(250, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1611 if (mfa == IOP_MFA_EMPTY) {
1612 DPRINTF(("%s: mfa not forthcoming\n", sc->sc_dv.dv_xname));
1613 splx(s);
1614 return (EBUSY);
1615 }
1616
1617 /* Perform reply queue DMA synchronisation and update counters. */
1618 if ((im->im_flags & IM_NOICTX) == 0) {
1619 if (sc->sc_stat.is_cur_hwqueue == 0)
1620 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1621 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1622 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1623 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1624 sc->sc_stat.is_requests++;
1625 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1626 sc->sc_stat.is_peak_hwqueue =
1627 sc->sc_stat.is_cur_hwqueue;
1628 }
1629
1630 /* Terminate scatter/gather lists. */
1631 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1632 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1633
1634 /* Post the message frame. */
1635 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1636 im->im_msg, im->im_msg[0] >> 16);
1637 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa,
1638 (im->im_msg[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
1639
1640 /* Post the MFA back to the IOP, thus starting the command. */
1641 iop_outl(sc, IOP_REG_IFIFO, mfa);
1642
1643 if (timo == 0) {
1644 splx(s);
1645 return (0);
1646 }
1647
1648 /* Wait for completion. */
1649 for (timo *= 10; timo != 0; timo--) {
1650 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
1651 /* Double read to account for IOP bug. */
1652 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1653 if (rmfa == IOP_MFA_EMPTY)
1654 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1655 if (rmfa != IOP_MFA_EMPTY)
1656 status = iop_handle_reply(sc, rmfa);
1657 }
1658 if ((im->im_flags & IM_REPLIED) != 0)
1659 break;
1660 DELAY(100);
1661 }
1662
1663 splx(s);
1664
1665 if (timo == 0) {
1666 #ifdef I2ODEBUG
1667 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
1668 if (iop_status_get(sc) != 0)
1669 printf("iop_msg_send: unable to retrieve status\n");
1670 else
1671 printf("iop_msg_send: IOP state = %d\n",
1672 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1673 #endif
1674 rv = EBUSY;
1675 } else if ((im->im_flags & IM_NOINTR) != 0)
1676 rv = (status != I2O_STATUS_SUCCESS ? EIO : 0);
1677
1678 return (rv);
1679 }
1680
1681 /*
1682 * Try to post a message to the adapter; if that's not possible, enqueue it
1683 * with us. If a timeout is specified, wait for the message to complete.
1684 */
1685 int
1686 iop_msg_enqueue(struct iop_softc *sc, struct iop_msg *im, int timo)
1687 {
1688 u_int mfa;
1689 int s, fromqueue, i, rv;
1690
1691 #ifdef I2ODEBUG
1692 if (im == NULL)
1693 panic("iop_msg_enqueue: im == NULL");
1694 if (sc == NULL)
1695 panic("iop_msg_enqueue: sc == NULL");
1696 if ((im->im_flags & IM_NOICTX) != 0)
1697 panic("iop_msg_enqueue: IM_NOICTX");
1698 if (im->im_msg[3] == IOP_ICTX && (im->im_flags & IM_NOINTR) == 0)
1699 panic("iop_msg_enqueue: IOP_ICTX and no IM_NOINTR");
1700 if ((im->im_flags & IM_DISCARD) != 0 && timo != 0)
1701 panic("iop_msg_enqueue: IM_DISCARD && timo != 0");
1702 if ((im->im_flags & IM_NOINTR) == 0 && timo != 0)
1703 panic("iop_msg_enqueue: !IM_NOINTR && timo != 0");
1704 #endif
1705
1706 s = splbio(); /* XXX */
1707 fromqueue = (im == SIMPLEQ_FIRST(&sc->sc_queue));
1708
1709 if (sc->sc_stat.is_cur_hwqueue >= sc->sc_maxqueuecnt) {
1710 /*
1711 * While the IOP may be able to accept more inbound message
1712 * frames than it advertises, don't push harder than it
1713 * wants to go lest we starve it.
1714 *
1715 * XXX We should be handling IOP resource shortages.
1716 */
1717 mfa = IOP_MFA_EMPTY;
1718 DPRINTF(("iop_msg_enqueue: exceeded max queue count\n"));
1719 } else {
1720 /* Double read to account for IOP bug. */
1721 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
1722 mfa = iop_inl(sc, IOP_REG_IFIFO);
1723 }
1724
1725 if (mfa == IOP_MFA_EMPTY) {
1726 DPRINTF(("iop_msg_enqueue: no mfa\n"));
1727 /* Can't transfer to h/w queue - queue with us. */
1728 if (!fromqueue) {
1729 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, im, im_queue);
1730 if (++sc->sc_stat.is_cur_swqueue >
1731 sc->sc_stat.is_peak_swqueue)
1732 sc->sc_stat.is_peak_swqueue =
1733 sc->sc_stat.is_cur_swqueue;
1734 }
1735 splx(s);
1736 if ((im->im_flags & IM_NOINTR) != 0)
1737 rv = iop_msg_wait(sc, im, timo);
1738 else
1739 rv = 0;
1740 return (rv);
1741 } else if (fromqueue) {
1742 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, im, im_queue);
1743 sc->sc_stat.is_cur_swqueue--;
1744 }
1745
1746 if ((im->im_flags & IM_NOINTR) != 0)
1747 im->im_flags |= IM_WAITING;
1748
1749 /* Perform reply queue DMA synchronisation and update counters. */
1750 if (sc->sc_stat.is_cur_hwqueue == 0)
1751 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1752 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1753
1754 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1755 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1756 sc->sc_stat.is_requests++;
1757 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1758 sc->sc_stat.is_peak_hwqueue = sc->sc_stat.is_cur_hwqueue;
1759
1760 /* Terminate the scatter/gather list. */
1761 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1762 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1763
1764 /* Post the message frame. */
1765 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1766 im->im_msg, im->im_msg[0] >> 16);
1767 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa,
1768 (im->im_msg[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
1769
1770 /* Post the MFA back to the IOP, thus starting the command. */
1771 iop_outl(sc, IOP_REG_IFIFO, mfa);
1772
1773 /* If this is a discardable message wrapper, free it. */
1774 if ((im->im_flags & IM_DISCARD) != 0)
1775 iop_msg_free(sc, NULL, im);
1776 splx(s);
1777
1778 if ((im->im_flags & IM_NOINTR) != 0)
1779 rv = iop_msg_wait(sc, im, timo);
1780 else
1781 rv = 0;
1782 return (rv);
1783 }
1784
1785 /*
1786 * Wait for the specified message to complete.
1787 */
1788 static int
1789 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
1790 {
1791 struct i2o_reply *rb;
1792 int rv, s;
1793
1794 s = splbio();
1795 if ((im->im_flags & IM_REPLIED) != 0) {
1796 splx(s);
1797 return (0);
1798 }
1799 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
1800 splx(s);
1801 #ifdef I2ODEBUG
1802 if (rv != 0) {
1803 printf("iop_msg_wait: tsleep() == %d\n", rv);
1804 if (iop_status_get(sc) != 0)
1805 printf("iop_msg_wait: unable to retrieve status\n");
1806 else
1807 printf("iop_msg_wait: IOP state = %d\n",
1808 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1809 }
1810 #endif
1811
1812 if ((im->im_flags & (IM_REPLIED | IM_NOSTATUS)) == IM_REPLIED) {
1813 rb = (struct i2o_reply *)im->im_msg;
1814 rv = (rb->reqstatus != I2O_STATUS_SUCCESS ? EIO : 0);
1815 }
1816 return (rv);
1817 }
1818
1819 /*
1820 * Release an unused message frame back to the IOP's inbound fifo.
1821 */
1822 static void
1823 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
1824 {
1825
1826 /* Use the frame to issue a no-op. */
1827 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
1828 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
1829 iop_outl(sc, mfa + 8, 0);
1830 iop_outl(sc, mfa + 12, 0);
1831
1832 iop_outl(sc, IOP_REG_IFIFO, mfa);
1833 }
1834
1835 #ifdef I2ODEBUG
1836 /*
1837 * Print status information from a failure reply frame.
1838 */
1839 static void
1840 iop_reply_print(struct iop_softc *sc, struct iop_msg *im,
1841 struct i2o_reply *rb)
1842 {
1843 u_int function, detail;
1844 #ifdef I2OVERBOSE
1845 const char *statusstr;
1846 #endif
1847
1848 if (im != NULL && (im->im_flags & IM_REPLIED) == 0)
1849 panic("iop_msg_print_status: %p not replied to", im);
1850
1851 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
1852 detail = le16toh(rb->detail);
1853
1854 printf("%s: reply:\n", sc->sc_dv.dv_xname);
1855
1856 #ifdef I2OVERBOSE
1857 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
1858 statusstr = iop_status[rb->reqstatus];
1859 else
1860 statusstr = "undefined error code";
1861
1862 printf("%s: function=0x%02x status=0x%02x (%s)\n",
1863 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
1864 #else
1865 printf("%s: function=0x%02x status=0x%02x\n",
1866 sc->sc_dv.dv_xname, function, rb->reqstatus);
1867 #endif
1868 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
1869 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
1870 le32toh(rb->msgtctx));
1871 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
1872 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
1873 (le32toh(rb->msgflags) >> 8) & 0xff);
1874 }
1875 #endif
1876
1877 /*
1878 * Translate an I2O ASCII field into a C string.
1879 */
1880 void
1881 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
1882 {
1883 int hc, lc, i, nit;
1884
1885 dlen--;
1886 lc = 0;
1887 hc = 0;
1888 i = 0;
1889
1890 /*
1891 * DPT use NUL as a space, whereas AMI use it as a terminator. The
1892 * spec has nothing to say about it. Since AMI fields are usually
1893 * filled with junk after the terminator, ...
1894 */
1895 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
1896
1897 while (slen-- != 0 && dlen-- != 0) {
1898 if (nit && *src == '\0')
1899 break;
1900 else if (*src <= 0x20 || *src >= 0x7f) {
1901 if (hc)
1902 dst[i++] = ' ';
1903 } else {
1904 hc = 1;
1905 dst[i++] = *src;
1906 lc = i;
1907 }
1908 src++;
1909 }
1910
1911 dst[lc] = '\0';
1912 }
1913
1914 /*
1915 * Claim or unclaim the specified TID.
1916 */
1917 int
1918 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
1919 int flags)
1920 {
1921 struct iop_msg *im;
1922 struct i2o_util_claim *mb;
1923 int rv, func;
1924
1925 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
1926
1927 if ((rv = iop_msg_alloc(sc, ii, &im, IM_NOINTR)) != 0)
1928 return (rv);
1929
1930 /* We can use the same structure, as both are identical. */
1931 mb = (struct i2o_util_claim *)im->im_msg;
1932 mb->msgflags = I2O_MSGFLAGS(i2o_util_claim);
1933 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
1934 mb->msgictx = ii->ii_ictx;
1935 mb->msgtctx = im->im_tctx;
1936 mb->flags = flags;
1937
1938 rv = iop_msg_enqueue(sc, im, 5000);
1939 iop_msg_free(sc, ii, im);
1940 return (rv);
1941 }
1942
1943 /*
1944 * Perform an abort.
1945 */
1946 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
1947 int tctxabort, int flags)
1948 {
1949 struct iop_msg *im;
1950 struct i2o_util_abort *mb;
1951 int rv;
1952
1953 if ((rv = iop_msg_alloc(sc, ii, &im, IM_NOINTR)) != 0)
1954 return (rv);
1955
1956 mb = (struct i2o_util_abort *)im->im_msg;
1957 mb->msgflags = I2O_MSGFLAGS(i2o_util_abort);
1958 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
1959 mb->msgictx = ii->ii_ictx;
1960 mb->msgtctx = im->im_tctx;
1961 mb->flags = (func << 24) | flags;
1962 mb->tctxabort = tctxabort;
1963
1964 rv = iop_msg_enqueue(sc, im, 5000);
1965 iop_msg_free(sc, ii, im);
1966 return (rv);
1967 }
1968
1969 /*
1970 * Enable or disable event types for the specified device.
1971 */
1972 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
1973 {
1974 struct iop_msg *im;
1975 struct i2o_util_event_register *mb;
1976 int rv;
1977
1978 if ((rv = iop_msg_alloc(sc, ii, &im, 0)) != 0)
1979 return (rv);
1980
1981 mb = (struct i2o_util_event_register *)im->im_msg;
1982 mb->msgflags = I2O_MSGFLAGS(i2o_util_event_register);
1983 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
1984 mb->msgictx = ii->ii_ictx;
1985 mb->msgtctx = im->im_tctx;
1986 mb->eventmask = mask;
1987
1988 return (iop_msg_enqueue(sc, im, 0));
1989 }
1990
1991 int
1992 iopopen(dev_t dev, int flag, int mode, struct proc *p)
1993 {
1994 struct iop_softc *sc;
1995 int unit, error;
1996
1997 unit = minor(dev);
1998
1999 sc = device_lookup(&iop_cd, minor(dev));
2000 if ((sc = iop_cd.cd_devs[unit]) == NULL)
2001 return (ENXIO);
2002 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
2003 return (error);
2004
2005 if ((sc->sc_flags & IOP_OPEN) != 0)
2006 return (EBUSY);
2007 if ((sc->sc_flags & IOP_ONLINE) == 0)
2008 return (EIO);
2009 sc->sc_flags |= IOP_OPEN;
2010
2011 /* XXX */
2012 sc->sc_ptb = malloc(((MAXPHYS + 3) & ~3) * IOP_MAX_MSG_XFERS, M_DEVBUF,
2013 M_WAITOK);
2014 if (sc->sc_ptb == NULL) {
2015 sc->sc_flags ^= IOP_OPEN;
2016 return (ENOMEM);
2017 }
2018
2019 return (0);
2020 }
2021
2022 int
2023 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2024 {
2025 struct iop_softc *sc;
2026
2027 sc = device_lookup(&iop_cd, minor(dev));
2028 free(sc->sc_ptb, M_DEVBUF);
2029 sc->sc_flags ^= IOP_OPEN;
2030 return (0);
2031 }
2032
2033 int
2034 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2035 {
2036 struct iop_softc *sc;
2037 struct iovec *iov;
2038 struct ioppt *pt;
2039 struct iop_msg *im;
2040 struct i2o_msg *mb;
2041 struct i2o_reply *rb;
2042 int rv, i;
2043 struct ioppt_buf *ptb;
2044 void *buf;
2045
2046 if (securelevel >= 2)
2047 return (EPERM);
2048
2049 sc = device_lookup(&iop_cd, minor(dev));
2050
2051 PHOLD(p);
2052
2053 switch (cmd) {
2054 case IOPIOCPT:
2055 pt = (struct ioppt *)data;
2056
2057 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2058 pt->pt_msglen < sizeof(struct i2o_msg) ||
2059 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2060 pt->pt_nbufs < 0 ||
2061 pt->pt_replylen < 0 ||
2062 pt->pt_timo < 1000 ||
2063 pt->pt_timo > 5*60*1000) {
2064 rv = EINVAL;
2065 break;
2066 }
2067 for (i = 0; i < pt->pt_nbufs; i++)
2068 if (pt->pt_bufs[i].ptb_datalen > ((MAXPHYS + 3) & ~3))
2069 return (ENOMEM);
2070
2071 rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR | IM_NOSTATUS);
2072 if (rv != 0)
2073 break;
2074
2075 if ((rv = copyin(pt->pt_msg, im->im_msg, pt->pt_msglen)) != 0) {
2076 iop_msg_free(sc, NULL, im);
2077 break;
2078 }
2079
2080 mb = (struct i2o_msg *)im->im_msg;
2081 mb->msgictx = IOP_ICTX;
2082 mb->msgtctx = im->im_tctx;
2083
2084 for (i = 0; i < pt->pt_nbufs; i++) {
2085 ptb = &pt->pt_bufs[i];
2086 buf = sc->sc_ptb + i * ((MAXPHYS + 3) & ~3);
2087
2088 if (ptb->ptb_out != 0)
2089 rv = copyin(ptb->ptb_data, buf,
2090 ptb->ptb_datalen);
2091
2092 rv = iop_msg_map(sc, im, buf, ptb->ptb_datalen,
2093 ptb->ptb_out != 0);
2094 if (rv != 0) {
2095 iop_msg_free(sc, NULL, im);
2096 break;
2097 }
2098 }
2099
2100 if ((rv = iop_msg_enqueue(sc, im, pt->pt_timo)) == 0) {
2101 rb = (struct i2o_reply *)im->im_msg;
2102 i = (le32toh(rb->msgflags) >> 14) & ~3; /* XXX */
2103 if (i > IOP_MAX_REPLY_SIZE)
2104 i = IOP_MAX_REPLY_SIZE;
2105 if (i > pt->pt_replylen)
2106 i = pt->pt_replylen;
2107 rv = copyout(rb, pt->pt_reply, i);
2108 }
2109
2110 for (i = 0; i < pt->pt_nbufs; i++) {
2111 ptb = &pt->pt_bufs[i];
2112 if (ptb->ptb_out != 0 || rv != 0)
2113 continue;
2114 rv = copyout(sc->sc_ptb + i * ((MAXPHYS + 3) & ~3),
2115 ptb->ptb_data, ptb->ptb_datalen);
2116 }
2117
2118 iop_msg_free(sc, NULL, im);
2119 break;
2120
2121 case IOPIOCGLCT:
2122 iov = (struct iovec *)data;
2123 rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL);
2124 if (rv == 0) {
2125 i = le16toh(sc->sc_lct->tablesize) << 2;
2126 if (i > iov->iov_len)
2127 i = iov->iov_len;
2128 else
2129 iov->iov_len = i;
2130 rv = copyout(sc->sc_lct, iov->iov_base, i);
2131 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2132 }
2133 break;
2134
2135 case IOPIOCGSTATUS:
2136 iov = (struct iovec *)data;
2137 i = sizeof(struct i2o_status);
2138 if (i > iov->iov_len)
2139 i = iov->iov_len;
2140 else
2141 iov->iov_len = i;
2142 if ((rv = iop_status_get(sc)) == 0)
2143 rv = copyout(&sc->sc_status, iov->iov_base, i);
2144 break;
2145
2146 case IOPIOCRECONFIG:
2147 rv = iop_reconfigure(sc, 0, 0);
2148 break;
2149
2150 case IOPIOCGTIDMAP:
2151 iov = (struct iovec *)data;
2152 rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL);
2153 if (rv == 0) {
2154 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2155 if (i > iov->iov_len)
2156 i = iov->iov_len;
2157 else
2158 iov->iov_len = i;
2159 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2160 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2161 }
2162 break;
2163
2164 default:
2165 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2166 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2167 #endif
2168 rv = ENOTTY;
2169 break;
2170 }
2171
2172 PRELE(p);
2173
2174 return (rv);
2175 }
2176