iop.c revision 1.6 1 /* $NetBSD: iop.c,v 1.6 2000/12/03 13:34:37 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/pool.h>
56 #include <sys/conf.h>
57 #include <sys/kthread.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #include <machine/bus.h>
62
63 #include <dev/i2o/i2o.h>
64 #include <dev/i2o/iopreg.h>
65 #include <dev/i2o/iopvar.h>
66
67 #define POLL(ms, cond) \
68 do { \
69 int i; \
70 for (i = (ms) * 10; i; i--) { \
71 if (cond) \
72 break; \
73 DELAY(100); \
74 } \
75 } while (/* CONSTCOND */0);
76
77 #ifdef I2ODEBUG
78 #define DPRINTF(x) printf x
79 #else
80 #define DPRINTF(x)
81 #endif
82
83 #ifdef I2OVERBOSE
84 #define IFVERBOSE(x) x
85 #else
86 #define IFVERBOSE(x)
87 #endif
88
89 #define COMMENT(x) ""
90
91 #define IOP_ICTXHASH_NBUCKETS 16
92 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
93 #define IOP_TCTXHASH_NBUCKETS 64
94 #define IOP_TCTXHASH(tctx) (&iop_tctxhashtbl[(tctx) & iop_tctxhash])
95
96 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
97 static u_long iop_ictxhash;
98 static TAILQ_HEAD(, iop_msg) *iop_tctxhashtbl;
99 static u_long iop_tctxhash;
100 static void *iop_sdh;
101 static struct pool *iop_msgpool;
102 static struct i2o_systab *iop_systab;
103 static int iop_systab_size;
104
105 extern struct cfdriver iop_cd;
106
107 #define IC_CONFIGURE 0x01
108
109 struct iop_class {
110 u_short ic_class;
111 u_short ic_flags;
112 const char *ic_caption;
113 } static const iop_class[] = {
114 {
115 I2O_CLASS_EXECUTIVE,
116 0,
117 COMMENT("executive")
118 },
119 {
120 I2O_CLASS_DDM,
121 0,
122 COMMENT("device driver module")
123 },
124 {
125 I2O_CLASS_RANDOM_BLOCK_STORAGE,
126 IC_CONFIGURE,
127 IFVERBOSE("random block storage")
128 },
129 {
130 I2O_CLASS_SEQUENTIAL_STORAGE,
131 IC_CONFIGURE,
132 IFVERBOSE("sequential storage")
133 },
134 {
135 I2O_CLASS_LAN,
136 IC_CONFIGURE,
137 IFVERBOSE("LAN port")
138 },
139 {
140 I2O_CLASS_WAN,
141 IC_CONFIGURE,
142 IFVERBOSE("WAN port")
143 },
144 {
145 I2O_CLASS_FIBRE_CHANNEL_PORT,
146 IC_CONFIGURE,
147 IFVERBOSE("fibrechannel port")
148 },
149 {
150 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
151 0,
152 COMMENT("fibrechannel peripheral")
153 },
154 {
155 I2O_CLASS_SCSI_PERIPHERAL,
156 0,
157 COMMENT("SCSI peripheral")
158 },
159 {
160 I2O_CLASS_ATE_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("ATE port")
163 },
164 {
165 I2O_CLASS_ATE_PERIPHERAL,
166 0,
167 COMMENT("ATE peripheral")
168 },
169 {
170 I2O_CLASS_FLOPPY_CONTROLLER,
171 IC_CONFIGURE,
172 IFVERBOSE("floppy controller")
173 },
174 {
175 I2O_CLASS_FLOPPY_DEVICE,
176 0,
177 COMMENT("floppy device")
178 },
179 {
180 I2O_CLASS_BUS_ADAPTER_PORT,
181 IC_CONFIGURE,
182 IFVERBOSE("bus adapter port" )
183 },
184 };
185
186 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
187 static const char *iop_status[] = {
188 "success",
189 "abort (dirty)",
190 "abort (no data transfer)",
191 "abort (partial transfer)",
192 "error (dirty)",
193 "error (no data transfer)",
194 "error (partial transfer)",
195 "undefined error code",
196 "process abort (dirty)",
197 "process abort (no data transfer)",
198 "process abort (partial transfer)",
199 "transaction error",
200 };
201 #endif
202
203 static inline u_int32_t iop_inl(struct iop_softc *, int);
204 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
205
206 static void iop_config_interrupts(struct device *);
207 static void iop_configure_devices(struct iop_softc *);
208 static void iop_devinfo(int, char *);
209 static int iop_print(void *, const char *);
210 static int iop_reconfigure(struct iop_softc *, u_int32_t);
211 static void iop_shutdown(void *);
212 static int iop_submatch(struct device *, struct cfdata *, void *);
213 #ifdef notyet
214 static int iop_vendor_print(void *, const char *);
215 #endif
216
217 static void iop_intr_event(struct device *, struct iop_msg *, void *);
218 static int iop_hrt_get(struct iop_softc *);
219 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
220 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
221 u_int32_t);
222 static int iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
223 static int iop_ofifo_init(struct iop_softc *);
224 static int iop_handle_reply(struct iop_softc *, u_int32_t);
225 static void iop_reconfigure_proc(void *);
226 static void iop_release_mfa(struct iop_softc *, u_int32_t);
227 static int iop_reset(struct iop_softc *);
228 static int iop_status_get(struct iop_softc *);
229 static int iop_systab_set(struct iop_softc *);
230
231 #ifdef I2ODEBUG
232 static void iop_reply_print(struct iop_softc *, struct iop_msg *,
233 struct i2o_reply *);
234 #endif
235
236 cdev_decl(iop);
237
238 static inline u_int32_t
239 iop_inl(struct iop_softc *sc, int off)
240 {
241
242 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
243 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
244 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
245 }
246
247 static inline void
248 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
249 {
250
251 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
252 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
253 BUS_SPACE_BARRIER_WRITE);
254 }
255
256 /*
257 * Initialise the adapter.
258 */
259 void
260 iop_init(struct iop_softc *sc, const char *intrstr)
261 {
262 int rv;
263 u_int32_t mask;
264 static int again;
265 char ident[64];
266
267 if (again == 0) {
268 /* Create the shared message wrapper pool and hashes. */
269 iop_msgpool = pool_create(sizeof(struct iop_msg), 0, 0, 0,
270 "ioppl", 0, NULL, NULL, M_DEVBUF);
271 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
272 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
273 iop_tctxhashtbl = hashinit(IOP_TCTXHASH_NBUCKETS, HASH_TAILQ,
274 M_DEVBUF, M_NOWAIT, &iop_tctxhash);
275 again = 1;
276 }
277
278 /* Reset the IOP and request status. */
279 printf("I2O adapter");
280
281 if ((rv = iop_reset(sc)) != 0) {
282 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
283 return;
284 }
285 if ((rv = iop_status_get(sc)) != 0) {
286 printf("%s: not responding (get status)\n", sc->sc_dv.dv_xname);
287 return;
288 }
289 DPRINTF((" (state=%d)",
290 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff));
291 sc->sc_flags |= IOP_HAVESTATUS;
292
293 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
294 ident, sizeof(ident));
295 printf(" <%s>\n", ident);
296
297 #ifdef I2ODEBUG
298 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
299 le16toh(sc->sc_status.orgid),
300 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
301 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
302 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
303 le32toh(sc->sc_status.desiredprivmemsize),
304 le32toh(sc->sc_status.currentprivmemsize),
305 le32toh(sc->sc_status.currentprivmembase));
306 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
307 le32toh(sc->sc_status.desiredpriviosize),
308 le32toh(sc->sc_status.currentpriviosize),
309 le32toh(sc->sc_status.currentpriviobase));
310 #endif
311
312 sc->sc_maxreplycnt = le32toh(sc->sc_status.maxoutboundmframes);
313 if (sc->sc_maxreplycnt > IOP_MAX_HW_REPLYCNT)
314 sc->sc_maxreplycnt = IOP_MAX_HW_REPLYCNT;
315 sc->sc_maxqueuecnt = le32toh(sc->sc_status.maxinboundmframes);
316 if (sc->sc_maxqueuecnt > IOP_MAX_HW_QUEUECNT)
317 sc->sc_maxqueuecnt = IOP_MAX_HW_QUEUECNT;
318
319 if (iop_ofifo_init(sc) != 0) {
320 printf("%s: unable to init oubound FIFO\n", sc->sc_dv.dv_xname);
321 return;
322 }
323
324 /*
325 * Defer further configuration until (a) interrupts are working and
326 * (b) we have enough information to build the system table.
327 */
328 config_interrupts((struct device *)sc, iop_config_interrupts);
329
330 /* Configure shutdown hook before we start any device activity. */
331 if (iop_sdh == NULL)
332 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
333
334 /* Ensure interrupts are enabled at the IOP. */
335 mask = iop_inl(sc, IOP_REG_INTR_MASK);
336 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
337
338 if (intrstr != NULL)
339 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
340 intrstr);
341
342 #ifdef I2ODEBUG
343 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
344 sc->sc_dv.dv_xname,
345 sc->sc_maxqueuecnt, le32toh(sc->sc_status.maxinboundmframes),
346 sc->sc_maxreplycnt, le32toh(sc->sc_status.maxoutboundmframes));
347 #endif
348
349 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
350 SIMPLEQ_INIT(&sc->sc_queue);
351 }
352
353 /*
354 * Perform autoconfiguration tasks.
355 */
356 static void
357 iop_config_interrupts(struct device *self)
358 {
359 struct iop_softc *sc, *iop;
360 struct i2o_systab_entry *ste;
361 int rv, i, niop;
362
363 sc = (struct iop_softc *)self;
364 LIST_INIT(&sc->sc_iilist);
365
366 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
367
368 if (iop_hrt_get(sc) != 0) {
369 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
370 return;
371 }
372
373 /*
374 * Build the system table.
375 */
376 if (iop_systab == NULL) {
377 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
378 if ((iop = device_lookup(&iop_cd, i)) == NULL)
379 continue;
380 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
381 continue;
382 if (iop_status_get(iop) != 0) {
383 printf("%s: unable to retrieve status\n",
384 sc->sc_dv.dv_xname);
385 iop->sc_flags &= ~IOP_HAVESTATUS;
386 continue;
387 }
388 niop++;
389 }
390 if (niop == 0)
391 return;
392
393 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
394 sizeof(struct i2o_systab);
395 iop_systab_size = i;
396 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
397
398 memset(iop_systab, 0, i);
399 iop_systab->numentries = niop;
400 iop_systab->version = I2O_VERSION_11;
401
402 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
403 if ((iop = device_lookup(&iop_cd, i)) == NULL)
404 continue;
405 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
406 continue;
407
408 ste->orgid = iop->sc_status.orgid;
409 ste->iopid = iop->sc_dv.dv_unit + 2;
410 ste->segnumber =
411 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
412 ste->iopcaps = iop->sc_status.iopcaps;
413 ste->inboundmsgframesize =
414 iop->sc_status.inboundmframesize;
415 ste->inboundmsgportaddresslow =
416 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
417 ste++;
418 }
419 }
420
421 if (iop_systab_set(sc) != 0) {
422 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
423 return;
424 }
425 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
426 5000) != 0) {
427 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
428 return;
429 }
430
431 /*
432 * Set up an event handler for this IOP.
433 */
434 sc->sc_eventii.ii_dv = self;
435 sc->sc_eventii.ii_intr = iop_intr_event;
436 sc->sc_eventii.ii_flags = II_DISCARD | II_UTILITY;
437 sc->sc_eventii.ii_tid = I2O_TID_IOP;
438 if (iop_initiator_register(sc, &sc->sc_eventii) != 0) {
439 printf("%s: unable to register initiator", sc->sc_dv.dv_xname);
440 return;
441 }
442 if (iop_util_eventreg(sc, &sc->sc_eventii, 0xffffffff)) {
443 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
444 return;
445 }
446
447 #ifdef notyet
448 /* Attempt to match and attach a product-specific extension. */
449 ia.ia_class = I2O_CLASS_ANY;
450 ia.ia_tid = I2O_TID_IOP;
451 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
452 #endif
453
454 if ((rv = iop_reconfigure(sc, 0)) != 0) {
455 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
456 return;
457 }
458
459 sc->sc_flags |= IOP_ONLINE;
460
461 rv = kthread_create1(iop_reconfigure_proc, sc, &sc->sc_reconf_proc,
462 "%s", sc->sc_dv.dv_xname);
463 if (rv != 0) {
464 printf("%s: unable to create thread (%d)",
465 sc->sc_dv.dv_xname, rv);
466 return;
467 }
468 }
469
470 /*
471 * Reconfiguration thread; listens for LCT change notification, and
472 * initiates re-configuration if recieved.
473 */
474 static void
475 iop_reconfigure_proc(void *cookie)
476 {
477 struct iop_softc *sc;
478 struct i2o_lct lct;
479 u_int32_t chgind;
480
481 sc = cookie;
482
483 for (;;) {
484 chgind = le32toh(sc->sc_chgindicator) + 1;
485
486 if (iop_lct_get0(sc, &lct, sizeof(lct), chgind) == 0) {
487 DPRINTF(("%s: async reconfiguration (0x%08x)\n",
488 sc->sc_dv.dv_xname, le32toh(lct.changeindicator)));
489 iop_reconfigure(sc, lct.changeindicator);
490 }
491
492 tsleep(iop_reconfigure_proc, PWAIT, "iopzzz", hz * 5);
493 }
494 }
495
496 /*
497 * Reconfigure: find new and removed devices.
498 */
499 static int
500 iop_reconfigure(struct iop_softc *sc, u_int32_t chgind)
501 {
502 struct iop_msg *im;
503 struct i2o_hba_bus_scan *mb;
504 struct i2o_lct_entry *le;
505 struct iop_initiator *ii, *nextii;
506 int rv, tid, i;
507
508 rv = lockmgr(&sc->sc_conflock, LK_EXCLUSIVE | LK_RECURSEFAIL, NULL);
509 if (rv != 0) {
510 DPRINTF(("iop_reconfigure: unable to acquire lock\n"));
511 return (rv);
512 }
513
514 /*
515 * If the reconfiguration request isn't the result of LCT change
516 * notification, then be more thorough: ask all bus ports to scan
517 * their busses. Wait up to 5 minutes for each bus port to complete
518 * the request.
519 */
520 if (chgind == 0) {
521 if ((rv = iop_lct_get(sc)) != 0) {
522 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
523 goto done;
524 }
525
526 le = sc->sc_lct->entry;
527 for (i = 0; i < sc->sc_nlctent; i++, le++) {
528 if ((le16toh(le->classid) & 4095) !=
529 I2O_CLASS_BUS_ADAPTER_PORT)
530 continue;
531 tid = le32toh(le->localtid) & 4095;
532
533 rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR);
534 if (rv != 0) {
535 DPRINTF(("iop_reconfigure: alloc msg\n"));
536 goto done;
537 }
538
539 mb = (struct i2o_hba_bus_scan *)im->im_msg;
540 mb->msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
541 mb->msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
542 mb->msgictx = IOP_ICTX;
543 mb->msgtctx = im->im_tctx;
544
545 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
546 tid));
547
548 rv = iop_msg_enqueue(sc, im, 5*60*1000);
549 iop_msg_free(sc, NULL, im);
550 if (rv != 0) {
551 DPRINTF(("iop_reconfigure: scan failed\n"));
552 goto done;
553 }
554 }
555 } else if (chgind == sc->sc_chgindicator) {
556 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
557 goto done;
558 }
559
560 /* Re-read the LCT and determine if it has changed. */
561 if ((rv = iop_lct_get(sc)) != 0) {
562 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
563 goto done;
564 }
565 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
566
567 if (sc->sc_lct->changeindicator == sc->sc_chgindicator) {
568 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
569 /* Nothing to do. */
570 rv = 0;
571 goto done;
572 }
573 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
574 sc->sc_chgindicator = sc->sc_lct->changeindicator;
575
576 if (sc->sc_tidmap != NULL)
577 free(sc->sc_tidmap, M_DEVBUF);
578 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
579 M_DEVBUF, M_NOWAIT);
580 memset(sc->sc_tidmap, 0, sc->sc_nlctent * sizeof(struct iop_tidmap));
581
582 /* Match and attach child devices. */
583 iop_configure_devices(sc);
584
585 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
586 nextii = LIST_NEXT(ii, ii_list);
587 if ((ii->ii_flags & II_UTILITY) != 0)
588 continue;
589 if ((ii->ii_flags & II_CONFIGURED) == 0) {
590 ii->ii_flags |= II_CONFIGURED;
591 continue;
592 }
593
594 /* Detach devices that were configured, but are now gone. */
595 for (i = 0; i < sc->sc_nlctent; i++)
596 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
597 break;
598 if (i == sc->sc_nlctent ||
599 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
600 config_detach(ii->ii_dv, DETACH_FORCE);
601
602 /*
603 * Tell initiators that existed before the re-configuration
604 * to re-configure.
605 */
606 if (ii->ii_reconfig == NULL)
607 continue;
608 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
609 printf("%s: %s failed reconfigure (%d)\n",
610 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
611 }
612 rv = 0;
613
614 done:
615 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
616 return (rv);
617 }
618
619 /*
620 * Configure I2O devices into the system.
621 */
622 static void
623 iop_configure_devices(struct iop_softc *sc)
624 {
625 struct iop_attach_args ia;
626 struct iop_initiator *ii;
627 const struct i2o_lct_entry *le;
628 int i, nent;
629
630 nent = sc->sc_nlctent;
631 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
632 /*
633 * Ignore the device if it's in use.
634 */
635 if ((le32toh(le->usertid) & 4095) != 4095)
636 continue;
637
638 ia.ia_class = le16toh(le->classid) & 4095;
639 ia.ia_tid = le32toh(le->localtid) & 4095;
640
641 /*
642 * Try to configure the device only if it's not already
643 * configured.
644 */
645 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
646 if (ia.ia_tid == ii->ii_tid)
647 break;
648 if (ii != NULL)
649 continue;
650
651 if (config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch))
652 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
653 }
654 }
655
656 static void
657 iop_devinfo(int class, char *devinfo)
658 {
659 #ifdef I2OVERBOSE
660 int i;
661
662 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
663 if (class == iop_class[i].ic_class)
664 break;
665
666 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
667 sprintf(devinfo, "device (class 0x%x)", class);
668 else
669 strcpy(devinfo, iop_class[i].ic_caption);
670 #else
671
672 sprintf(devinfo, "device (class 0x%x)", class);
673 #endif
674 }
675
676 static int
677 iop_print(void *aux, const char *pnp)
678 {
679 struct iop_attach_args *ia;
680 char devinfo[256];
681
682 ia = aux;
683
684 if (pnp != NULL) {
685 iop_devinfo(ia->ia_class, devinfo);
686 printf("%s at %s", devinfo, pnp);
687 }
688 printf(" tid %d", ia->ia_tid);
689 return (UNCONF);
690 }
691
692 #ifdef notyet
693 static int
694 iop_vendor_print(void *aux, const char *pnp)
695 {
696
697 if (pnp != NULL)
698 printf("vendor specific extension at %s", pnp);
699 return (UNCONF);
700 }
701 #endif
702
703 static int
704 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
705 {
706 struct iop_attach_args *ia;
707
708 ia = aux;
709
710 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
711 return (0);
712
713 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
714 }
715
716 /*
717 * Shut down all configured IOPs.
718 */
719 static void
720 iop_shutdown(void *junk)
721 {
722 struct iop_softc *sc;
723 int i;
724
725 printf("shutting down iop devices... ");
726
727 for (i = 0; i < iop_cd.cd_ndevs; i++) {
728 if ((sc = device_lookup(&iop_cd, i)) == NULL)
729 continue;
730 if ((sc->sc_flags & IOP_ONLINE) == 0)
731 continue;
732 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
733 0, 5000);
734 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
735 0, 5000);
736 }
737
738 /* Wait. Some boards could still be flushing, stupidly enough. */
739 delay(5000*1000);
740 printf(" done\n");
741 }
742
743 /*
744 * Retrieve adapter status.
745 */
746 static int
747 iop_status_get(struct iop_softc *sc)
748 {
749 struct iop_msg *im;
750 struct i2o_exec_status_get *mb;
751 int rv, s;
752
753 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
754 return (rv);
755
756 mb = (struct i2o_exec_status_get *)im->im_msg;
757 mb->msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
758 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
759 mb->reserved[0] = 0;
760 mb->reserved[1] = 0;
761 mb->reserved[2] = 0;
762 mb->reserved[3] = 0;
763 mb->addrlow = kvtop((caddr_t)&sc->sc_status); /* XXX */
764 mb->addrhigh = 0;
765 mb->length = sizeof(sc->sc_status);
766
767 s = splbio();
768 memset(&sc->sc_status, 0, sizeof(sc->sc_status));
769
770 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
771 splx(s);
772 iop_msg_free(sc, NULL, im);
773 return (rv);
774 }
775
776 /* XXX */
777 POLL(2500, *((volatile u_char *)&sc->sc_status.syncbyte) == 0xff);
778
779 splx(s);
780 iop_msg_free(sc, NULL, im);
781 return (*((volatile u_char *)&sc->sc_status.syncbyte) != 0xff);
782 }
783
784 /*
785 * Initalize and populate the adapter's outbound FIFO.
786 */
787 static int
788 iop_ofifo_init(struct iop_softc *sc)
789 {
790 struct iop_msg *im;
791 volatile u_int32_t status;
792 bus_addr_t addr;
793 bus_dma_segment_t seg;
794 struct i2o_exec_outbound_init *mb;
795 int i, rseg, rv;
796
797 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
798 return (rv);
799
800 mb = (struct i2o_exec_outbound_init *)im->im_msg;
801 mb->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
802 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
803 mb->msgictx = IOP_ICTX;
804 mb->msgtctx = im->im_tctx;
805 mb->pagesize = PAGE_SIZE;
806 mb->flags = 0x80 | ((IOP_MAX_REPLY_SIZE >> 2) << 16); /* XXX */
807
808 status = 0;
809
810 /*
811 * The I2O spec says that there are two SGLs: one for the status
812 * word, and one for a list of discarded MFAs. It continues to say
813 * that if you don't want to get the list of MFAs, an IGNORE SGL is
814 * necessary; this isn't the case (and in fact appears to be a bad
815 * thing).
816 */
817 iop_msg_map(sc, im, (void *)&status, sizeof(status), 0);
818 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
819 iop_msg_free(sc, NULL, im);
820 return (rv);
821 }
822 iop_msg_unmap(sc, im);
823 iop_msg_free(sc, NULL, im);
824
825 /* XXX */
826 POLL(5000, status == I2O_EXEC_OUTBOUND_INIT_COMPLETE);
827 if (status != I2O_EXEC_OUTBOUND_INIT_COMPLETE) {
828 printf("%s: outbound FIFO init failed\n", sc->sc_dv.dv_xname);
829 return (EIO);
830 }
831
832 /* If we need to allocate DMA safe memory, do it now. */
833 if (sc->sc_rep_phys == 0) {
834 sc->sc_rep_size = sc->sc_maxreplycnt * IOP_MAX_REPLY_SIZE;
835
836 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
837 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
838 if (rv != 0) {
839 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
840 rv);
841 return (rv);
842 }
843
844 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
845 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
846 if (rv != 0) {
847 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
848 return (rv);
849 }
850
851 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
852 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
853 if (rv != 0) {
854 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname, rv);
855 return (rv);
856 }
857
858 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, sc->sc_rep,
859 sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
860 if (rv != 0) {
861 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
862 return (rv);
863 }
864
865 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
866 }
867
868 /* Populate the outbound FIFO. */
869 for (i = sc->sc_maxreplycnt, addr = sc->sc_rep_phys; i != 0; i--) {
870 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
871 addr += IOP_MAX_REPLY_SIZE;
872 }
873
874 return (0);
875 }
876
877 /*
878 * Read the specified number of bytes from the IOP's hardware resource table.
879 */
880 static int
881 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
882 {
883 struct iop_msg *im;
884 int rv;
885 struct i2o_exec_hrt_get *mb;
886
887 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
888 return (rv);
889
890 mb = (struct i2o_exec_hrt_get *)im->im_msg;
891 mb->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
892 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
893 mb->msgictx = IOP_ICTX;
894 mb->msgtctx = im->im_tctx;
895
896 iop_msg_map(sc, im, hrt, size, 0);
897 rv = iop_msg_enqueue(sc, im, 5000);
898 iop_msg_unmap(sc, im);
899 iop_msg_free(sc, NULL, im);
900 return (rv);
901 }
902
903 /*
904 * Read the IOP's hardware resource table.
905 */
906 static int
907 iop_hrt_get(struct iop_softc *sc)
908 {
909 struct i2o_hrt hrthdr, *hrt;
910 int size, rv;
911
912 if ((rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr))) != 0)
913 return (rv);
914
915 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
916 le16toh(hrthdr.numentries)));
917
918 size = sizeof(struct i2o_hrt) +
919 (htole32(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
920 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
921
922 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
923 free(hrt, M_DEVBUF);
924 return (rv);
925 }
926
927 if (sc->sc_hrt != NULL)
928 free(sc->sc_hrt, M_DEVBUF);
929 sc->sc_hrt = hrt;
930 return (0);
931 }
932
933 /*
934 * Request the specified number of bytes from the IOP's logical
935 * configuration table. If a change indicator is specified, this
936 * is an verbatim notification request, so the caller is prepared
937 * to wait indefinitely.
938 */
939 static int
940 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
941 u_int32_t chgind)
942 {
943 struct iop_msg *im;
944 struct i2o_exec_lct_notify *mb;
945 int rv;
946
947 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
948 return (rv);
949
950 memset(lct, 0, size);
951 memset(im->im_msg, 0, sizeof(im->im_msg));
952
953 mb = (struct i2o_exec_lct_notify *)im->im_msg;
954 mb->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
955 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
956 mb->msgictx = IOP_ICTX;
957 mb->msgtctx = im->im_tctx;
958 mb->classid = I2O_CLASS_ANY;
959 mb->changeindicator = chgind;
960
961 DPRINTF(("iop_lct_get0: reading LCT\n"));
962
963 iop_msg_map(sc, im, lct, size, 0);
964 rv = iop_msg_enqueue(sc, im, (chgind == 0 ? 120*1000 : 0));
965 iop_msg_unmap(sc, im);
966 iop_msg_free(sc, NULL, im);
967 return (rv);
968 }
969
970 /*
971 * Read the IOP's logical configuration table.
972 */
973 int
974 iop_lct_get(struct iop_softc *sc)
975 {
976 int esize, size, rv;
977 struct i2o_lct *lct;
978
979 esize = le32toh(sc->sc_status.expectedlctsize);
980 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
981 if (lct == NULL)
982 return (ENOMEM);
983
984 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
985 free(lct, M_DEVBUF);
986 return (rv);
987 }
988
989 size = le16toh(lct->tablesize) << 2;
990 if (esize != size) {
991 free(lct, M_DEVBUF);
992 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
993 if (lct == NULL)
994 return (ENOMEM);
995
996 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
997 free(lct, M_DEVBUF);
998 return (rv);
999 }
1000 }
1001
1002 /* Swap in the new LCT. */
1003 if (sc->sc_lct != NULL)
1004 free(sc->sc_lct, M_DEVBUF);
1005 sc->sc_lct = lct;
1006 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1007 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1008 sizeof(struct i2o_lct_entry);
1009 return (0);
1010 }
1011
1012 /*
1013 * Request the specified parameter group from the target.
1014 */
1015 int
1016 iop_param_op(struct iop_softc *sc, int tid, int write, int group, void *buf,
1017 int size)
1018 {
1019 struct iop_msg *im;
1020 struct i2o_util_params_op *mb;
1021 int rv, func, op;
1022 struct {
1023 struct i2o_param_op_list_header olh;
1024 struct i2o_param_op_all_template oat;
1025 } req;
1026
1027 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1028 return (rv);
1029
1030 if (write) {
1031 func = I2O_UTIL_PARAMS_SET;
1032 op = I2O_PARAMS_OP_FIELD_SET;
1033 } else {
1034 func = I2O_UTIL_PARAMS_GET;
1035 op = I2O_PARAMS_OP_FIELD_GET;
1036 }
1037
1038 mb = (struct i2o_util_params_op *)im->im_msg;
1039 mb->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1040 mb->msgfunc = I2O_MSGFUNC(tid, func);
1041 mb->msgictx = IOP_ICTX;
1042 mb->msgtctx = im->im_tctx;
1043 mb->flags = 0;
1044
1045 req.olh.count = htole16(1);
1046 req.olh.reserved = htole16(0);
1047 req.oat.operation = htole16(op);
1048 req.oat.fieldcount = htole16(0xffff);
1049 req.oat.group = htole16(group);
1050
1051 memset(buf, 0, size);
1052 iop_msg_map(sc, im, &req, sizeof(req), 1);
1053 iop_msg_map(sc, im, buf, size, write);
1054
1055 rv = iop_msg_enqueue(sc, im, 5000);
1056 iop_msg_unmap(sc, im);
1057 iop_msg_free(sc, NULL, im);
1058 return (rv);
1059 }
1060
1061 /*
1062 * Execute a simple command (no parameters).
1063 */
1064 int
1065 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1066 int async, int timo)
1067 {
1068 struct iop_msg *im;
1069 struct i2o_msg *mb;
1070 int rv, fl;
1071
1072 fl = (async != 0 ? IM_NOWAIT : 0);
1073 if ((rv = iop_msg_alloc(sc, NULL, &im, fl | IM_NOINTR)) != 0)
1074 return (rv);
1075
1076 mb = (struct i2o_msg *)im->im_msg;
1077 mb->msgflags = I2O_MSGFLAGS(i2o_msg);
1078 mb->msgfunc = I2O_MSGFUNC(tid, function);
1079 mb->msgictx = ictx;
1080 mb->msgtctx = im->im_tctx;
1081
1082 if (async)
1083 rv = iop_msg_enqueue(sc, im, timo);
1084 else
1085 rv = iop_msg_send(sc, im, timo);
1086 iop_msg_free(sc, NULL, im);
1087 return (rv);
1088 }
1089
1090 /*
1091 * Post the system table to the IOP.
1092 */
1093 static int
1094 iop_systab_set(struct iop_softc *sc)
1095 {
1096 struct i2o_exec_sys_tab_set *mb;
1097 struct iop_msg *im;
1098 u_int32_t mema[2], ioa[2];
1099 int rv;
1100
1101 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1102 return (rv);
1103
1104 mb = (struct i2o_exec_sys_tab_set *)im->im_msg;
1105 mb->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1106 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1107 mb->msgictx = IOP_ICTX;
1108 mb->msgtctx = im->im_tctx;
1109 mb->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1110 mb->segnumber = 0;
1111
1112 /* XXX This is questionable, but better than nothing... */
1113 mema[0] = le32toh(sc->sc_status.currentprivmembase);
1114 mema[1] = le32toh(sc->sc_status.currentprivmemsize);
1115 ioa[0] = le32toh(sc->sc_status.currentpriviobase);
1116 ioa[1] = le32toh(sc->sc_status.currentpriviosize);
1117
1118 iop_msg_map(sc, im, iop_systab, iop_systab_size, 1);
1119 iop_msg_map(sc, im, mema, sizeof(mema), 1);
1120 iop_msg_map(sc, im, ioa, sizeof(ioa), 1);
1121
1122 rv = iop_msg_enqueue(sc, im, 5000);
1123 iop_msg_unmap(sc, im);
1124 iop_msg_free(sc, NULL, im);
1125 return (rv);
1126 }
1127
1128 /*
1129 * Reset the adapter. Must be called with interrupts disabled.
1130 */
1131 static int
1132 iop_reset(struct iop_softc *sc)
1133 {
1134 struct iop_msg *im;
1135 volatile u_int32_t sw;
1136 u_int32_t mfa;
1137 struct i2o_exec_iop_reset *mb;
1138 int rv;
1139
1140 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
1141 return (rv);
1142
1143 sw = 0;
1144
1145 mb = (struct i2o_exec_iop_reset *)im->im_msg;
1146 mb->msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1147 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1148 mb->reserved[0] = 0;
1149 mb->reserved[1] = 0;
1150 mb->reserved[2] = 0;
1151 mb->reserved[3] = 0;
1152 mb->statuslow = kvtop((caddr_t)&sw); /* XXX */
1153 mb->statushigh = 0;
1154
1155 if ((rv = iop_msg_send(sc, im, 0)))
1156 return (rv);
1157 iop_msg_free(sc, NULL, im);
1158
1159 POLL(2500, sw != 0); /* XXX */
1160 if (sw != I2O_RESET_IN_PROGRESS) {
1161 printf("%s: reset rejected\n", sc->sc_dv.dv_xname);
1162 return (EIO);
1163 }
1164
1165 /*
1166 * IOP is now in the INIT state. Wait no more than 10 seconds for
1167 * the inbound queue to become responsive.
1168 */
1169 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1170 if (mfa == IOP_MFA_EMPTY) {
1171 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1172 return (EIO);
1173 }
1174
1175 if (sw == I2O_RESET_REJECTED)
1176 printf("%s: reset rejected?\n", sc->sc_dv.dv_xname);
1177
1178 iop_release_mfa(sc, mfa);
1179 return (0);
1180 }
1181
1182 /*
1183 * Register a new initiator.
1184 */
1185 int
1186 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1187 {
1188 static int ictx;
1189 static int stctx;
1190
1191 /* 0 is reserved for system messages. */
1192 ii->ii_ictx = ++ictx;
1193 ii->ii_stctx = ++stctx | 0x80000000;
1194
1195 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1196 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1197
1198 return (0);
1199 }
1200
1201 /*
1202 * Unregister an initiator.
1203 */
1204 void
1205 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1206 {
1207
1208 LIST_REMOVE(ii, ii_list);
1209 LIST_REMOVE(ii, ii_hash);
1210 }
1211
1212 /*
1213 * Handle a reply frame from the adapter.
1214 */
1215 static int
1216 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1217 {
1218 struct iop_msg *im;
1219 struct i2o_reply *rb;
1220 struct iop_initiator *ii;
1221 u_int off, ictx, tctx, status, size;
1222
1223 off = (int)(rmfa - sc->sc_rep_phys);
1224 rb = (struct i2o_reply *)(sc->sc_rep + off);
1225
1226 /* Perform reply queue DMA synchronisation... */
1227 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, IOP_MAX_MSG_SIZE,
1228 BUS_DMASYNC_POSTREAD);
1229 if (--sc->sc_stat.is_cur_hwqueue != 0)
1230 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1231 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1232
1233 #ifdef I2ODEBUG
1234 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1235 panic("iop_handle_reply: 64-bit reply");
1236 #endif
1237 /*
1238 * Find the initiator.
1239 */
1240 ictx = le32toh(rb->msgictx);
1241 if (ictx == IOP_ICTX)
1242 ii = NULL;
1243 else {
1244 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1245 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1246 if (ii->ii_ictx == ictx)
1247 break;
1248 if (ii == NULL) {
1249 #ifdef I2ODEBUG
1250 iop_reply_print(sc, NULL, rb);
1251 #endif
1252 printf("%s: WARNING: bad ictx returned (%x)",
1253 sc->sc_dv.dv_xname, ictx);
1254
1255 /* Return the reply frame to the IOP's outbound FIFO. */
1256 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1257 return (-1);
1258 }
1259 }
1260
1261 status = rb->reqstatus;
1262
1263 if (ii == NULL || (ii->ii_flags & II_DISCARD) == 0) {
1264 /*
1265 * This initiator tracks state using message wrappers.
1266 *
1267 * Find the originating message wrapper, and if requested
1268 * notify the initiator.
1269 */
1270 tctx = le32toh(rb->msgtctx);
1271 im = TAILQ_FIRST(IOP_TCTXHASH(tctx));
1272 for (; im != NULL; im = TAILQ_NEXT(im, im_hash))
1273 if (im->im_tctx == tctx)
1274 break;
1275 if (im == NULL || (im->im_flags & IM_ALLOCED) == 0) {
1276 #ifdef I2ODEBUG
1277 iop_reply_print(sc, NULL, rb);
1278 #endif
1279 printf("%s: WARNING: bad tctx returned (%x, %p)",
1280 sc->sc_dv.dv_xname, tctx, im);
1281
1282 /* Return the reply frame to the IOP's outbound FIFO. */
1283 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1284 return (-1);
1285 }
1286 #ifdef I2ODEBUG
1287 if ((im->im_flags & IM_REPLIED) != 0)
1288 panic("%s: dup reply", sc->sc_dv.dv_xname);
1289 #endif
1290
1291 im->im_flags |= IM_REPLIED;
1292
1293 #ifdef I2ODEBUG
1294 if (rb->reqstatus != 0)
1295 iop_reply_print(sc, im, rb);
1296 #endif
1297 /* Notify the initiator. */
1298 if ((im->im_flags & IM_WAITING) != 0) {
1299 size = (le32toh(rb->msgflags) >> 14) & ~3;
1300 if (size > IOP_MAX_REPLY_SIZE)
1301 size = IOP_MAX_REPLY_SIZE;
1302 memcpy(im->im_msg, rb, size);
1303 wakeup(im);
1304 } else if ((im->im_flags & IM_NOINTR) == 0)
1305 (*ii->ii_intr)(ii->ii_dv, im, rb);
1306 } else {
1307 /*
1308 * This initiator discards message wrappers.
1309 *
1310 * Simply pass the reply frame to the initiator.
1311 */
1312 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1313 }
1314
1315 /* Return the reply frame to the IOP's outbound FIFO. */
1316 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1317
1318 /* Run the queue. */
1319 if ((im = SIMPLEQ_FIRST(&sc->sc_queue)) != NULL)
1320 iop_msg_enqueue(sc, im, 0);
1321
1322 return (status);
1323 }
1324
1325 /*
1326 * Handle an interrupt from the adapter.
1327 */
1328 int
1329 iop_intr(void *arg)
1330 {
1331 struct iop_softc *sc;
1332 u_int32_t rmfa;
1333
1334 sc = arg;
1335
1336 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1337 return (0);
1338
1339 for (;;) {
1340 /* Double read to account for IOP bug. */
1341 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY &&
1342 (rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY)
1343 break;
1344 iop_handle_reply(sc, rmfa);
1345 }
1346
1347 return (1);
1348 }
1349
1350 /*
1351 * Handle an event signalled by the executive.
1352 */
1353 static void
1354 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1355 {
1356 struct i2o_util_event_register_reply *rb;
1357 struct iop_softc *sc;
1358 u_int event;
1359
1360 sc = (struct iop_softc *)dv;
1361 rb = reply;
1362 event = le32toh(rb->event);
1363
1364 #ifndef I2ODEBUG
1365 if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED)
1366 return;
1367 #endif
1368
1369 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1370 }
1371
1372 /*
1373 * Allocate a message wrapper.
1374 */
1375 int
1376 iop_msg_alloc(struct iop_softc *sc, struct iop_initiator *ii,
1377 struct iop_msg **imp, int flags)
1378 {
1379 struct iop_msg *im;
1380 static int tctxgen = 666;
1381 int s, rv, i, tctx;
1382
1383 #ifdef I2ODEBUG
1384 if ((flags & IM_SYSMASK) != 0)
1385 panic("iop_msg_alloc: system flags specified");
1386 #endif
1387
1388 s = splbio(); /* XXX */
1389
1390 if (ii != NULL && (ii->ii_flags & II_DISCARD) != 0) {
1391 flags |= IM_DISCARD;
1392 tctx = ii->ii_stctx;
1393 } else
1394 tctx = tctxgen++ & 0x7fffffff;
1395
1396 im = (struct iop_msg *)pool_get(iop_msgpool,
1397 (flags & IM_NOWAIT) == 0 ? PR_WAITOK : 0);
1398 if (im == NULL) {
1399 splx(s);
1400 return (ENOMEM);
1401 }
1402
1403 /* XXX */
1404 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, IOP_MAX_SGL_ENTRIES,
1405 IOP_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1406 &im->im_xfer[0].ix_map);
1407 if (rv != 0) {
1408 pool_put(iop_msgpool, im);
1409 splx(s);
1410 return (rv);
1411 }
1412
1413 if ((flags & (IM_DISCARD | IM_NOICTX)) == 0)
1414 TAILQ_INSERT_TAIL(IOP_TCTXHASH(tctx), im, im_hash);
1415
1416 splx(s);
1417
1418 im->im_tctx = tctx;
1419 im->im_flags = flags | IM_ALLOCED;
1420 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1421 im->im_xfer[i].ix_size = 0;
1422 *imp = im;
1423
1424 return (0);
1425 }
1426
1427 /*
1428 * Free a message wrapper.
1429 */
1430 void
1431 iop_msg_free(struct iop_softc *sc, struct iop_initiator *ii, struct iop_msg *im)
1432 {
1433 int s;
1434
1435 #ifdef I2ODEBUG
1436 if ((im->im_flags & IM_ALLOCED) == 0)
1437 panic("iop_msg_free: wrapper not allocated");
1438 #endif
1439
1440 /* XXX */
1441 bus_dmamap_destroy(sc->sc_dmat, im->im_xfer[0].ix_map);
1442
1443 s = splbio(); /* XXX */
1444
1445 if ((im->im_flags & (IM_DISCARD | IM_NOICTX)) == 0)
1446 TAILQ_REMOVE(IOP_TCTXHASH(im->im_tctx), im, im_hash);
1447
1448 im->im_flags = 0;
1449 pool_put(iop_msgpool, im);
1450 splx(s);
1451 }
1452
1453 /*
1454 * Map a data transfer. Write a scatter-gather list into the message frame.
1455 */
1456 int
1457 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, void *xferaddr,
1458 int xfersize, int out)
1459 {
1460 struct iop_xfer *ix;
1461 u_int32_t *mb;
1462 int rv, seg, i;
1463
1464 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++)
1465 if (ix->ix_size == 0)
1466 break;
1467 #ifdef I2ODEBUG
1468 if (i == IOP_MAX_MSG_XFERS)
1469 panic("iop_msg_map: too many xfers");
1470 #endif
1471
1472 /* Only the first DMA map is static. */
1473 if (i != 0) {
1474 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1475 IOP_MAX_SGL_ENTRIES, IOP_MAX_XFER, 0,
1476 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1477 if (rv != 0)
1478 return (rv);
1479 }
1480
1481 ix->ix_flags = (out ? IX_OUT : IX_IN);
1482 ix->ix_size = xfersize;
1483
1484 rv = bus_dmamap_load(sc->sc_dmat, ix->ix_map, xferaddr, xfersize,
1485 NULL, 0);
1486 if (rv != 0)
1487 return (rv);
1488 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1489 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1490
1491 mb = im->im_msg + (im->im_msg[0] >> 16);
1492 if (out)
1493 out = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1494 else
1495 out = I2O_SGL_SIMPLE;
1496
1497 for (seg = 0; seg < ix->ix_map->dm_nsegs; seg++) {
1498 #ifdef I2ODEBUG
1499 if ((seg << 1) + (im->im_msg[0] >> 16) >=
1500 (IOP_MAX_MSG_SIZE >> 2))
1501 panic("iop_map_xfer: message frame too large");
1502 #endif
1503 if (seg == ix->ix_map->dm_nsegs - 1)
1504 out |= I2O_SGL_END_BUFFER;
1505 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_len | out;
1506 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_addr;
1507 }
1508
1509 /*
1510 * If this is the first xfer we've mapped for this message, adjust
1511 * the SGL offset field in the message header.
1512 */
1513 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1514 im->im_msg[0] += ((im->im_msg[0] >> 16) + seg * 2) << 4;
1515 im->im_flags |= IM_SGLOFFADJ;
1516 }
1517 im->im_msg[0] += (seg << 17);
1518 return (0);
1519 }
1520
1521 /*
1522 * Unmap all data transfers associated with a message wrapper.
1523 */
1524 void
1525 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
1526 {
1527 struct iop_xfer *ix;
1528 int i;
1529
1530 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++) {
1531 if (ix->ix_size == 0)
1532 break;
1533 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
1534 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
1535 BUS_DMASYNC_POSTREAD);
1536 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1537
1538 /* Only the first DMA map is static. */
1539 if (i != 0)
1540 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1541
1542 ix->ix_size = 0;
1543 }
1544 }
1545
1546 /*
1547 * Send a message to the IOP. Optionally, poll on completion. Return
1548 * non-zero if failure status is returned and IM_NOINTR is set.
1549 */
1550 int
1551 iop_msg_send(struct iop_softc *sc, struct iop_msg *im, int timo)
1552 {
1553 u_int32_t mfa, rmfa;
1554 int rv, status, i, s;
1555
1556 #ifdef I2ODEBUG
1557 if ((im->im_flags & IM_NOICTX) == 0)
1558 if (im->im_msg[3] == IOP_ICTX &&
1559 (im->im_flags & IM_NOINTR) == 0)
1560 panic("iop_msg_send: IOP_ICTX and !IM_NOINTR");
1561 if ((im->im_flags & IM_DISCARD) != 0)
1562 panic("iop_msg_send: IM_DISCARD");
1563 #endif
1564
1565 s = splbio(); /* XXX */
1566
1567 /* Wait up to 250ms for an MFA. */
1568 POLL(250, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1569 if (mfa == IOP_MFA_EMPTY) {
1570 DPRINTF(("%s: mfa not forthcoming\n", sc->sc_dv.dv_xname));
1571 splx(s);
1572 return (EBUSY);
1573 }
1574
1575 /* Perform reply queue DMA synchronisation and update counters. */
1576 if ((im->im_flags & IM_NOICTX) == 0) {
1577 if (sc->sc_stat.is_cur_hwqueue == 0)
1578 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1579 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1580 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1581 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1582 sc->sc_stat.is_requests++;
1583 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1584 sc->sc_stat.is_peak_hwqueue =
1585 sc->sc_stat.is_cur_hwqueue;
1586 }
1587
1588 /* Terminate scatter/gather lists. */
1589 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1590 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1591
1592 /* Post the message frame. */
1593 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1594 im->im_msg, im->im_msg[0] >> 16);
1595 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa,
1596 (im->im_msg[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
1597
1598 /* Post the MFA back to the IOP, thus starting the command. */
1599 iop_outl(sc, IOP_REG_IFIFO, mfa);
1600
1601 if (timo == 0) {
1602 splx(s);
1603 return (0);
1604 }
1605
1606 /* Wait for completion. */
1607 for (timo *= 10; timo != 0; timo--) {
1608 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
1609 /* Double read to account for IOP bug. */
1610 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1611 if (rmfa == IOP_MFA_EMPTY)
1612 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1613 if (rmfa != IOP_MFA_EMPTY)
1614 status = iop_handle_reply(sc, rmfa);
1615 }
1616 if ((im->im_flags & IM_REPLIED) != 0)
1617 break;
1618 DELAY(100);
1619 }
1620
1621 splx(s);
1622
1623 if (timo == 0) {
1624 #ifdef I2ODEBUG
1625 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
1626 if (iop_status_get(sc) != 0)
1627 printf("iop_msg_send: unable to retrieve status\n");
1628 else
1629 printf("iop_msg_send: IOP state = %d\n",
1630 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1631 #endif
1632 rv = EBUSY;
1633 } else if ((im->im_flags & IM_NOINTR) != 0)
1634 rv = (status != I2O_STATUS_SUCCESS ? EIO : 0);
1635
1636 return (rv);
1637 }
1638
1639 /*
1640 * Try to post a message to the adapter; if that's not possible, enqueue it
1641 * with us. If a timeout is specified, wait for the message to complete.
1642 */
1643 int
1644 iop_msg_enqueue(struct iop_softc *sc, struct iop_msg *im, int timo)
1645 {
1646 u_int mfa;
1647 int s, fromqueue, i, rv;
1648
1649 #ifdef I2ODEBUG
1650 if (im == NULL)
1651 panic("iop_msg_enqueue: im == NULL");
1652 if (sc == NULL)
1653 panic("iop_msg_enqueue: sc == NULL");
1654 if ((im->im_flags & IM_NOICTX) != 0)
1655 panic("iop_msg_enqueue: IM_NOICTX");
1656 if (im->im_msg[3] == IOP_ICTX && (im->im_flags & IM_NOINTR) == 0)
1657 panic("iop_msg_enqueue: IOP_ICTX and no IM_NOINTR");
1658 if ((im->im_flags & IM_DISCARD) != 0 && timo != 0)
1659 panic("iop_msg_enqueue: IM_DISCARD && timo != 0");
1660 if ((im->im_flags & IM_NOINTR) == 0 && timo != 0)
1661 panic("iop_msg_enqueue: !IM_NOINTR && timo != 0");
1662 #endif
1663
1664 s = splbio(); /* XXX */
1665 fromqueue = (im == SIMPLEQ_FIRST(&sc->sc_queue));
1666
1667 if (sc->sc_stat.is_cur_hwqueue >= sc->sc_maxqueuecnt) {
1668 /*
1669 * While the IOP may be able to accept more inbound message
1670 * frames than it advertises, don't push harder than it
1671 * wants to go lest we starve it.
1672 *
1673 * XXX We should be handling IOP resource shortages.
1674 */
1675 mfa = IOP_MFA_EMPTY;
1676 DPRINTF(("iop_msg_enqueue: exceeded max queue count\n"));
1677 } else {
1678 /* Double read to account for IOP bug. */
1679 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
1680 mfa = iop_inl(sc, IOP_REG_IFIFO);
1681 }
1682
1683 if (mfa == IOP_MFA_EMPTY) {
1684 DPRINTF(("iop_msg_enqueue: no mfa\n"));
1685 /* Can't transfer to h/w queue - queue with us. */
1686 if (!fromqueue) {
1687 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, im, im_queue);
1688 if (++sc->sc_stat.is_cur_swqueue >
1689 sc->sc_stat.is_peak_swqueue)
1690 sc->sc_stat.is_peak_swqueue =
1691 sc->sc_stat.is_cur_swqueue;
1692 }
1693 splx(s);
1694 if ((im->im_flags & IM_NOINTR) != 0)
1695 rv = iop_msg_wait(sc, im, timo);
1696 else
1697 rv = 0;
1698 return (rv);
1699 } else if (fromqueue) {
1700 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, im, im_queue);
1701 sc->sc_stat.is_cur_swqueue--;
1702 }
1703
1704 if ((im->im_flags & IM_NOINTR) != 0)
1705 im->im_flags |= IM_WAITING;
1706
1707 /* Perform reply queue DMA synchronisation and update counters. */
1708 if (sc->sc_stat.is_cur_hwqueue == 0)
1709 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1710 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1711
1712 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1713 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1714 sc->sc_stat.is_requests++;
1715 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1716 sc->sc_stat.is_peak_hwqueue = sc->sc_stat.is_cur_hwqueue;
1717
1718 /* Terminate the scatter/gather list. */
1719 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1720 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1721
1722 /* Post the message frame. */
1723 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1724 im->im_msg, im->im_msg[0] >> 16);
1725 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa,
1726 (im->im_msg[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
1727
1728 /* Post the MFA back to the IOP, thus starting the command. */
1729 iop_outl(sc, IOP_REG_IFIFO, mfa);
1730
1731 /* If this is a discardable message wrapper, free it. */
1732 if ((im->im_flags & IM_DISCARD) != 0)
1733 iop_msg_free(sc, NULL, im);
1734 splx(s);
1735
1736 if ((im->im_flags & IM_NOINTR) != 0)
1737 rv = iop_msg_wait(sc, im, timo);
1738 else
1739 rv = 0;
1740 return (rv);
1741 }
1742
1743 /*
1744 * Wait for the specified message to complete.
1745 */
1746 static int
1747 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
1748 {
1749 struct i2o_reply *rb;
1750 int rv, s;
1751
1752 s = splbio();
1753 if ((im->im_flags & IM_REPLIED) != 0) {
1754 splx(s);
1755 return (0);
1756 }
1757 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
1758 splx(s);
1759 #ifdef I2ODEBUG
1760 if (rv != 0) {
1761 printf("iop_msg_wait: tsleep() == %d\n", rv);
1762 if (iop_status_get(sc) != 0)
1763 printf("iop_msg_wait: unable to retrieve status\n");
1764 else
1765 printf("iop_msg_wait: IOP state = %d\n",
1766 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1767 }
1768 #endif
1769 if ((im->im_flags & (IM_REPLIED | IM_NOSTATUS)) == IM_REPLIED) {
1770 rb = (struct i2o_reply *)im->im_msg;
1771 rv = (rb->reqstatus != I2O_STATUS_SUCCESS ? EIO : 0);
1772 }
1773 return (rv);
1774 }
1775
1776 /*
1777 * Release an unused message frame back to the IOP's inbound fifo.
1778 */
1779 static void
1780 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
1781 {
1782
1783 /* Use the frame to issue a no-op. */
1784 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
1785 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
1786 iop_outl(sc, mfa + 8, 0);
1787 iop_outl(sc, mfa + 12, 0);
1788
1789 iop_outl(sc, IOP_REG_IFIFO, mfa);
1790 }
1791
1792 #ifdef I2ODEBUG
1793 /*
1794 * Print status information from a failure reply frame.
1795 */
1796 static void
1797 iop_reply_print(struct iop_softc *sc, struct iop_msg *im,
1798 struct i2o_reply *rb)
1799 {
1800 u_int function, detail;
1801 #ifdef I2OVERBOSE
1802 const char *statusstr;
1803 #endif
1804
1805 if (im != NULL && (im->im_flags & IM_REPLIED) == 0)
1806 panic("iop_msg_print_status: %p not replied to", im);
1807
1808 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
1809 detail = le16toh(rb->detail);
1810
1811 printf("%s: reply:\n", sc->sc_dv.dv_xname);
1812
1813 #ifdef I2OVERBOSE
1814 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
1815 statusstr = iop_status[rb->reqstatus];
1816 else
1817 statusstr = "undefined error code";
1818
1819 printf("%s: function=0x%02x status=0x%02x (%s)\n",
1820 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
1821 #else
1822 printf("%s: function=0x%02x status=0x%02x\n",
1823 sc->sc_dv.dv_xname, function, rb->reqstatus);
1824 #endif
1825 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
1826 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
1827 le32toh(rb->msgtctx));
1828 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
1829 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
1830 (le32toh(rb->msgflags) >> 8) & 0xff);
1831 }
1832 #endif
1833
1834 /*
1835 * Translate an I2O ASCII field into a C string.
1836 */
1837 void
1838 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
1839 {
1840 int hc, lc, i, nit;
1841
1842 dlen--;
1843 lc = 0;
1844 hc = 0;
1845 i = 0;
1846
1847 /*
1848 * DPT use NUL as a space, whereas AMI use it as a terminator. The
1849 * spec has nothing to say about it. Since AMI fields are usually
1850 * filled with junk after the terminator, ...
1851 */
1852 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
1853
1854 while (slen-- != 0 && dlen-- != 0) {
1855 if (nit && *src == '\0')
1856 break;
1857 else if (*src <= 0x20 || *src >= 0x7f) {
1858 if (hc)
1859 dst[i++] = ' ';
1860 } else {
1861 hc = 1;
1862 dst[i++] = *src;
1863 lc = i;
1864 }
1865 src++;
1866 }
1867
1868 dst[lc] = '\0';
1869 }
1870
1871 /*
1872 * Claim or unclaim the specified TID.
1873 */
1874 int
1875 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
1876 int flags)
1877 {
1878 struct iop_msg *im;
1879 struct i2o_util_claim *mb;
1880 int rv, func;
1881
1882 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
1883
1884 if ((rv = iop_msg_alloc(sc, ii, &im, IM_NOINTR)) != 0)
1885 return (rv);
1886
1887 /* We can use the same structure, as both are identical. */
1888 mb = (struct i2o_util_claim *)im->im_msg;
1889 mb->msgflags = I2O_MSGFLAGS(i2o_util_claim);
1890 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
1891 mb->msgictx = ii->ii_ictx;
1892 mb->msgtctx = im->im_tctx;
1893 mb->flags = flags;
1894
1895 rv = iop_msg_enqueue(sc, im, 5000);
1896 iop_msg_free(sc, ii, im);
1897 return (rv);
1898 }
1899
1900 /*
1901 * Perform an abort.
1902 */
1903 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
1904 int tctxabort, int flags)
1905 {
1906 struct iop_msg *im;
1907 struct i2o_util_abort *mb;
1908 int rv;
1909
1910 if ((rv = iop_msg_alloc(sc, ii, &im, IM_NOINTR)) != 0)
1911 return (rv);
1912
1913 mb = (struct i2o_util_abort *)im->im_msg;
1914 mb->msgflags = I2O_MSGFLAGS(i2o_util_abort);
1915 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
1916 mb->msgictx = ii->ii_ictx;
1917 mb->msgtctx = im->im_tctx;
1918 mb->flags = (func << 24) | flags;
1919 mb->tctxabort = tctxabort;
1920
1921 rv = iop_msg_enqueue(sc, im, 5000);
1922 iop_msg_free(sc, ii, im);
1923 return (rv);
1924 }
1925
1926 /*
1927 * Enable or disable event types for the specified device.
1928 */
1929 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
1930 {
1931 struct iop_msg *im;
1932 struct i2o_util_event_register *mb;
1933 int rv;
1934
1935 if ((rv = iop_msg_alloc(sc, ii, &im, 0)) != 0)
1936 return (rv);
1937
1938 mb = (struct i2o_util_event_register *)im->im_msg;
1939 mb->msgflags = I2O_MSGFLAGS(i2o_util_event_register);
1940 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
1941 mb->msgictx = ii->ii_ictx;
1942 mb->msgtctx = im->im_tctx;
1943 mb->eventmask = mask;
1944
1945 return (iop_msg_enqueue(sc, im, 0));
1946 }
1947
1948 int
1949 iopopen(dev_t dev, int flag, int mode, struct proc *p)
1950 {
1951 struct iop_softc *sc;
1952 int unit, error;
1953
1954 unit = minor(dev);
1955
1956 sc = device_lookup(&iop_cd, minor(dev));
1957 if ((sc = iop_cd.cd_devs[unit]) == NULL)
1958 return (ENXIO);
1959 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1960 return (error);
1961
1962 if ((sc->sc_flags & IOP_OPEN) != 0)
1963 return (EBUSY);
1964 if ((sc->sc_flags & IOP_ONLINE) == 0)
1965 return (EIO);
1966 sc->sc_flags |= IOP_OPEN;
1967
1968 return (0);
1969 }
1970
1971 int
1972 iopclose(dev_t dev, int flag, int mode, struct proc *p)
1973 {
1974 struct iop_softc *sc;
1975
1976 sc = device_lookup(&iop_cd, minor(dev));
1977 sc->sc_flags &= ~IOP_OPEN;
1978 return (0);
1979 }
1980
1981 int
1982 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1983 {
1984 struct iop_softc *sc;
1985 struct iovec *iov;
1986 struct ioppt *pt;
1987 struct iop_msg *im;
1988 struct i2o_msg *mb;
1989 struct i2o_reply *rb;
1990 int rv, i;
1991
1992 if (securelevel >= 2)
1993 return (EPERM);
1994
1995 sc = device_lookup(&iop_cd, minor(dev));
1996
1997 switch (cmd) {
1998 case IOPIOCPT:
1999 pt = (struct ioppt *)data;
2000
2001 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2002 pt->pt_msglen < sizeof(struct i2o_msg) ||
2003 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2004 pt->pt_nbufs < 0 ||
2005 pt->pt_replylen < 0 ||
2006 pt->pt_timo < 1000 ||
2007 pt->pt_timo > 5*60*1000) {
2008 rv = EINVAL;
2009 break;
2010 }
2011
2012 rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR | IM_NOSTATUS);
2013 if (rv != 0)
2014 break;
2015
2016 if ((rv = copyin(pt->pt_msg, im->im_msg, pt->pt_msglen)) != 0) {
2017 iop_msg_free(sc, NULL, im);
2018 break;
2019 }
2020
2021 mb = (struct i2o_msg *)im->im_msg;
2022 mb->msgictx = IOP_ICTX;
2023 mb->msgtctx = im->im_tctx;
2024
2025 for (i = 0; i < pt->pt_nbufs; i++) {
2026 rv = iop_msg_map(sc, im, pt->pt_bufs[i].ptb_data,
2027 pt->pt_bufs[i].ptb_datalen,
2028 pt->pt_bufs[i].ptb_out != 0);
2029 if (rv != 0) {
2030 iop_msg_free(sc, NULL, im);
2031 return (rv);
2032 }
2033 }
2034
2035 if ((rv = iop_msg_enqueue(sc, im, pt->pt_timo)) == 0) {
2036 rb = (struct i2o_reply *)im->im_msg;
2037 i = (le32toh(rb->msgflags) >> 14) & ~3; /* XXX */
2038 if (i > IOP_MAX_REPLY_SIZE)
2039 i = IOP_MAX_REPLY_SIZE;
2040 if (i > pt->pt_replylen)
2041 i = pt->pt_replylen;
2042 rv = copyout(rb, pt->pt_reply, i);
2043 }
2044
2045 iop_msg_free(sc, NULL, im);
2046 break;
2047
2048 case IOPIOCGLCT:
2049 iov = (struct iovec *)data;
2050 rv = lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
2051 if (rv == 0) {
2052 i = le16toh(sc->sc_lct->tablesize) << 2;
2053 if (i > iov->iov_len)
2054 i = iov->iov_len;
2055 else
2056 iov->iov_len = i;
2057 rv = copyout(sc->sc_lct, iov->iov_base, i);
2058 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2059 }
2060 break;
2061
2062 case IOPIOCGSTATUS:
2063 iov = (struct iovec *)data;
2064 i = sizeof(struct i2o_status);
2065 if (i > iov->iov_len)
2066 i = iov->iov_len;
2067 else
2068 iov->iov_len = i;
2069 if ((rv = iop_status_get(sc)) == 0)
2070 rv = copyout(&sc->sc_status, iov->iov_base, i);
2071 break;
2072
2073 case IOPIOCRECONFIG:
2074 rv = iop_reconfigure(sc, 0);
2075 break;
2076
2077 default:
2078 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2079 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2080 #endif
2081 rv = ENOTTY;
2082 break;
2083 }
2084
2085 return (rv);
2086 }
2087