iop.c revision 1.7 1 /* $NetBSD: iop.c,v 1.7 2000/12/03 15:51:36 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/pool.h>
56 #include <sys/conf.h>
57 #include <sys/kthread.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #include <machine/bus.h>
62
63 #include <dev/i2o/i2o.h>
64 #include <dev/i2o/iopreg.h>
65 #include <dev/i2o/iopvar.h>
66
67 #define POLL(ms, cond) \
68 do { \
69 int i; \
70 for (i = (ms) * 10; i; i--) { \
71 if (cond) \
72 break; \
73 DELAY(100); \
74 } \
75 } while (/* CONSTCOND */0);
76
77 #ifdef I2ODEBUG
78 #define DPRINTF(x) printf x
79 #else
80 #define DPRINTF(x)
81 #endif
82
83 #ifdef I2OVERBOSE
84 #define IFVERBOSE(x) x
85 #else
86 #define IFVERBOSE(x)
87 #endif
88
89 #define COMMENT(x) ""
90
91 #define IOP_ICTXHASH_NBUCKETS 16
92 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
93 #define IOP_TCTXHASH_NBUCKETS 64
94 #define IOP_TCTXHASH(tctx) (&iop_tctxhashtbl[(tctx) & iop_tctxhash])
95
96 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
97 static u_long iop_ictxhash;
98 static TAILQ_HEAD(, iop_msg) *iop_tctxhashtbl;
99 static u_long iop_tctxhash;
100 static void *iop_sdh;
101 static struct pool *iop_msgpool;
102 static struct i2o_systab *iop_systab;
103 static int iop_systab_size;
104
105 extern struct cfdriver iop_cd;
106
107 #define IC_CONFIGURE 0x01
108
109 struct iop_class {
110 u_short ic_class;
111 u_short ic_flags;
112 const char *ic_caption;
113 } static const iop_class[] = {
114 {
115 I2O_CLASS_EXECUTIVE,
116 0,
117 COMMENT("executive")
118 },
119 {
120 I2O_CLASS_DDM,
121 0,
122 COMMENT("device driver module")
123 },
124 {
125 I2O_CLASS_RANDOM_BLOCK_STORAGE,
126 IC_CONFIGURE,
127 IFVERBOSE("random block storage")
128 },
129 {
130 I2O_CLASS_SEQUENTIAL_STORAGE,
131 IC_CONFIGURE,
132 IFVERBOSE("sequential storage")
133 },
134 {
135 I2O_CLASS_LAN,
136 IC_CONFIGURE,
137 IFVERBOSE("LAN port")
138 },
139 {
140 I2O_CLASS_WAN,
141 IC_CONFIGURE,
142 IFVERBOSE("WAN port")
143 },
144 {
145 I2O_CLASS_FIBRE_CHANNEL_PORT,
146 IC_CONFIGURE,
147 IFVERBOSE("fibrechannel port")
148 },
149 {
150 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
151 0,
152 COMMENT("fibrechannel peripheral")
153 },
154 {
155 I2O_CLASS_SCSI_PERIPHERAL,
156 0,
157 COMMENT("SCSI peripheral")
158 },
159 {
160 I2O_CLASS_ATE_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("ATE port")
163 },
164 {
165 I2O_CLASS_ATE_PERIPHERAL,
166 0,
167 COMMENT("ATE peripheral")
168 },
169 {
170 I2O_CLASS_FLOPPY_CONTROLLER,
171 IC_CONFIGURE,
172 IFVERBOSE("floppy controller")
173 },
174 {
175 I2O_CLASS_FLOPPY_DEVICE,
176 0,
177 COMMENT("floppy device")
178 },
179 {
180 I2O_CLASS_BUS_ADAPTER_PORT,
181 IC_CONFIGURE,
182 IFVERBOSE("bus adapter port" )
183 },
184 };
185
186 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
187 static const char *iop_status[] = {
188 "success",
189 "abort (dirty)",
190 "abort (no data transfer)",
191 "abort (partial transfer)",
192 "error (dirty)",
193 "error (no data transfer)",
194 "error (partial transfer)",
195 "undefined error code",
196 "process abort (dirty)",
197 "process abort (no data transfer)",
198 "process abort (partial transfer)",
199 "transaction error",
200 };
201 #endif
202
203 static inline u_int32_t iop_inl(struct iop_softc *, int);
204 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
205
206 static void iop_config_interrupts(struct device *);
207 static void iop_configure_devices(struct iop_softc *);
208 static void iop_devinfo(int, char *);
209 static int iop_print(void *, const char *);
210 static int iop_reconfigure(struct iop_softc *, u_int32_t);
211 static void iop_shutdown(void *);
212 static int iop_submatch(struct device *, struct cfdata *, void *);
213 #ifdef notyet
214 static int iop_vendor_print(void *, const char *);
215 #endif
216
217 static void iop_intr_event(struct device *, struct iop_msg *, void *);
218 static int iop_hrt_get(struct iop_softc *);
219 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
220 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
221 u_int32_t);
222 static int iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
223 static int iop_ofifo_init(struct iop_softc *);
224 static int iop_handle_reply(struct iop_softc *, u_int32_t);
225 static void iop_reconfigure_proc(void *);
226 static void iop_release_mfa(struct iop_softc *, u_int32_t);
227 static int iop_reset(struct iop_softc *);
228 static int iop_status_get(struct iop_softc *);
229 static int iop_systab_set(struct iop_softc *);
230
231 #ifdef I2ODEBUG
232 static void iop_reply_print(struct iop_softc *, struct iop_msg *,
233 struct i2o_reply *);
234 #endif
235
236 cdev_decl(iop);
237
238 static inline u_int32_t
239 iop_inl(struct iop_softc *sc, int off)
240 {
241
242 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
243 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
244 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
245 }
246
247 static inline void
248 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
249 {
250
251 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
252 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
253 BUS_SPACE_BARRIER_WRITE);
254 }
255
256 /*
257 * Initialise the adapter.
258 */
259 void
260 iop_init(struct iop_softc *sc, const char *intrstr)
261 {
262 int rv;
263 u_int32_t mask;
264 static int again;
265 char ident[64];
266
267 if (again == 0) {
268 /* Create the shared message wrapper pool and hashes. */
269 iop_msgpool = pool_create(sizeof(struct iop_msg), 0, 0, 0,
270 "ioppl", 0, NULL, NULL, M_DEVBUF);
271 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
272 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
273 iop_tctxhashtbl = hashinit(IOP_TCTXHASH_NBUCKETS, HASH_TAILQ,
274 M_DEVBUF, M_NOWAIT, &iop_tctxhash);
275 again = 1;
276 }
277
278 /* Reset the IOP and request status. */
279 printf("I2O adapter");
280
281 if ((rv = iop_reset(sc)) != 0) {
282 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
283 return;
284 }
285 if ((rv = iop_status_get(sc)) != 0) {
286 printf("%s: not responding (get status)\n", sc->sc_dv.dv_xname);
287 return;
288 }
289 DPRINTF((" (state=%d)",
290 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff));
291 sc->sc_flags |= IOP_HAVESTATUS;
292
293 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
294 ident, sizeof(ident));
295 printf(" <%s>\n", ident);
296
297 #ifdef I2ODEBUG
298 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
299 le16toh(sc->sc_status.orgid),
300 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
301 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
302 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
303 le32toh(sc->sc_status.desiredprivmemsize),
304 le32toh(sc->sc_status.currentprivmemsize),
305 le32toh(sc->sc_status.currentprivmembase));
306 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
307 le32toh(sc->sc_status.desiredpriviosize),
308 le32toh(sc->sc_status.currentpriviosize),
309 le32toh(sc->sc_status.currentpriviobase));
310 #endif
311
312 sc->sc_maxreplycnt = le32toh(sc->sc_status.maxoutboundmframes);
313 if (sc->sc_maxreplycnt > IOP_MAX_HW_REPLYCNT)
314 sc->sc_maxreplycnt = IOP_MAX_HW_REPLYCNT;
315 sc->sc_maxqueuecnt = le32toh(sc->sc_status.maxinboundmframes);
316 if (sc->sc_maxqueuecnt > IOP_MAX_HW_QUEUECNT)
317 sc->sc_maxqueuecnt = IOP_MAX_HW_QUEUECNT;
318
319 if (iop_ofifo_init(sc) != 0) {
320 printf("%s: unable to init oubound FIFO\n", sc->sc_dv.dv_xname);
321 return;
322 }
323
324 /*
325 * Defer further configuration until (a) interrupts are working and
326 * (b) we have enough information to build the system table.
327 */
328 config_interrupts((struct device *)sc, iop_config_interrupts);
329
330 /* Configure shutdown hook before we start any device activity. */
331 if (iop_sdh == NULL)
332 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
333
334 /* Ensure interrupts are enabled at the IOP. */
335 mask = iop_inl(sc, IOP_REG_INTR_MASK);
336 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
337
338 if (intrstr != NULL)
339 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
340 intrstr);
341
342 #ifdef I2ODEBUG
343 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
344 sc->sc_dv.dv_xname,
345 sc->sc_maxqueuecnt, le32toh(sc->sc_status.maxinboundmframes),
346 sc->sc_maxreplycnt, le32toh(sc->sc_status.maxoutboundmframes));
347 #endif
348
349 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
350 SIMPLEQ_INIT(&sc->sc_queue);
351 }
352
353 /*
354 * Perform autoconfiguration tasks.
355 */
356 static void
357 iop_config_interrupts(struct device *self)
358 {
359 struct iop_softc *sc, *iop;
360 struct i2o_systab_entry *ste;
361 int rv, i, niop;
362
363 sc = (struct iop_softc *)self;
364 LIST_INIT(&sc->sc_iilist);
365
366 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
367
368 if (iop_hrt_get(sc) != 0) {
369 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
370 return;
371 }
372
373 /*
374 * Build the system table.
375 */
376 if (iop_systab == NULL) {
377 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
378 if ((iop = device_lookup(&iop_cd, i)) == NULL)
379 continue;
380 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
381 continue;
382 if (iop_status_get(iop) != 0) {
383 printf("%s: unable to retrieve status\n",
384 sc->sc_dv.dv_xname);
385 iop->sc_flags &= ~IOP_HAVESTATUS;
386 continue;
387 }
388 niop++;
389 }
390 if (niop == 0)
391 return;
392
393 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
394 sizeof(struct i2o_systab);
395 iop_systab_size = i;
396 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
397
398 memset(iop_systab, 0, i);
399 iop_systab->numentries = niop;
400 iop_systab->version = I2O_VERSION_11;
401
402 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
403 if ((iop = device_lookup(&iop_cd, i)) == NULL)
404 continue;
405 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
406 continue;
407
408 ste->orgid = iop->sc_status.orgid;
409 ste->iopid = iop->sc_dv.dv_unit + 2;
410 ste->segnumber =
411 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
412 ste->iopcaps = iop->sc_status.iopcaps;
413 ste->inboundmsgframesize =
414 iop->sc_status.inboundmframesize;
415 ste->inboundmsgportaddresslow =
416 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
417 ste++;
418 }
419 }
420
421 if (iop_systab_set(sc) != 0) {
422 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
423 return;
424 }
425 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
426 5000) != 0) {
427 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
428 return;
429 }
430
431 /*
432 * Set up an event handler for this IOP.
433 */
434 sc->sc_eventii.ii_dv = self;
435 sc->sc_eventii.ii_intr = iop_intr_event;
436 sc->sc_eventii.ii_flags = II_DISCARD | II_UTILITY;
437 sc->sc_eventii.ii_tid = I2O_TID_IOP;
438 if (iop_initiator_register(sc, &sc->sc_eventii) != 0) {
439 printf("%s: unable to register initiator", sc->sc_dv.dv_xname);
440 return;
441 }
442 if (iop_util_eventreg(sc, &sc->sc_eventii, 0xffffffff)) {
443 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
444 return;
445 }
446
447 #ifdef notyet
448 /* Attempt to match and attach a product-specific extension. */
449 ia.ia_class = I2O_CLASS_ANY;
450 ia.ia_tid = I2O_TID_IOP;
451 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
452 #endif
453
454 if ((rv = iop_reconfigure(sc, 0)) != 0) {
455 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
456 return;
457 }
458
459 sc->sc_flags |= IOP_ONLINE;
460
461 rv = kthread_create1(iop_reconfigure_proc, sc, &sc->sc_reconf_proc,
462 "%s", sc->sc_dv.dv_xname);
463 if (rv != 0) {
464 printf("%s: unable to create thread (%d)",
465 sc->sc_dv.dv_xname, rv);
466 return;
467 }
468 }
469
470 /*
471 * Reconfiguration thread; listens for LCT change notification, and
472 * initiates re-configuration if recieved.
473 */
474 static void
475 iop_reconfigure_proc(void *cookie)
476 {
477 struct iop_softc *sc;
478 struct i2o_lct lct;
479 u_int32_t chgind;
480
481 sc = cookie;
482
483 for (;;) {
484 chgind = le32toh(sc->sc_chgindicator) + 1;
485
486 if (iop_lct_get0(sc, &lct, sizeof(lct), chgind) == 0) {
487 DPRINTF(("%s: async reconfiguration (0x%08x)\n",
488 sc->sc_dv.dv_xname, le32toh(lct.changeindicator)));
489 iop_reconfigure(sc, lct.changeindicator);
490 }
491
492 tsleep(iop_reconfigure_proc, PWAIT, "iopzzz", hz * 5);
493 }
494 }
495
496 /*
497 * Reconfigure: find new and removed devices.
498 */
499 static int
500 iop_reconfigure(struct iop_softc *sc, u_int32_t chgind)
501 {
502 struct iop_msg *im;
503 struct i2o_hba_bus_scan *mb;
504 struct i2o_lct_entry *le;
505 struct iop_initiator *ii, *nextii;
506 int rv, tid, i;
507
508 rv = lockmgr(&sc->sc_conflock, LK_EXCLUSIVE | LK_RECURSEFAIL, NULL);
509 if (rv != 0) {
510 DPRINTF(("iop_reconfigure: unable to acquire lock\n"));
511 return (rv);
512 }
513
514 /*
515 * If the reconfiguration request isn't the result of LCT change
516 * notification, then be more thorough: ask all bus ports to scan
517 * their busses. Wait up to 5 minutes for each bus port to complete
518 * the request.
519 */
520 if (chgind == 0) {
521 if ((rv = iop_lct_get(sc)) != 0) {
522 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
523 goto done;
524 }
525
526 le = sc->sc_lct->entry;
527 for (i = 0; i < sc->sc_nlctent; i++, le++) {
528 if ((le16toh(le->classid) & 4095) !=
529 I2O_CLASS_BUS_ADAPTER_PORT)
530 continue;
531 tid = le32toh(le->localtid) & 4095;
532
533 rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR);
534 if (rv != 0) {
535 DPRINTF(("iop_reconfigure: alloc msg\n"));
536 goto done;
537 }
538
539 mb = (struct i2o_hba_bus_scan *)im->im_msg;
540 mb->msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
541 mb->msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
542 mb->msgictx = IOP_ICTX;
543 mb->msgtctx = im->im_tctx;
544
545 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
546 tid));
547
548 rv = iop_msg_enqueue(sc, im, 5*60*1000);
549 iop_msg_free(sc, NULL, im);
550 if (rv != 0) {
551 DPRINTF(("iop_reconfigure: scan failed\n"));
552 goto done;
553 }
554 }
555 } else if (chgind == sc->sc_chgindicator) {
556 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
557 goto done;
558 }
559
560 /* Re-read the LCT and determine if it has changed. */
561 if ((rv = iop_lct_get(sc)) != 0) {
562 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
563 goto done;
564 }
565 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
566
567 if (sc->sc_lct->changeindicator == sc->sc_chgindicator) {
568 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
569 /* Nothing to do. */
570 rv = 0;
571 goto done;
572 }
573 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
574 sc->sc_chgindicator = sc->sc_lct->changeindicator;
575
576 if (sc->sc_tidmap != NULL)
577 free(sc->sc_tidmap, M_DEVBUF);
578 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
579 M_DEVBUF, M_NOWAIT);
580 memset(sc->sc_tidmap, 0, sc->sc_nlctent * sizeof(struct iop_tidmap));
581
582 /* Match and attach child devices. */
583 iop_configure_devices(sc);
584
585 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
586 nextii = LIST_NEXT(ii, ii_list);
587 if ((ii->ii_flags & II_UTILITY) != 0)
588 continue;
589 if ((ii->ii_flags & II_CONFIGURED) == 0) {
590 ii->ii_flags |= II_CONFIGURED;
591 continue;
592 }
593
594 /* Detach devices that were configured, but are now gone. */
595 for (i = 0; i < sc->sc_nlctent; i++)
596 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
597 break;
598 if (i == sc->sc_nlctent ||
599 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
600 config_detach(ii->ii_dv, DETACH_FORCE);
601
602 /*
603 * Tell initiators that existed before the re-configuration
604 * to re-configure.
605 */
606 if (ii->ii_reconfig == NULL)
607 continue;
608 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
609 printf("%s: %s failed reconfigure (%d)\n",
610 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
611 }
612 rv = 0;
613
614 done:
615 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
616 return (rv);
617 }
618
619 /*
620 * Configure I2O devices into the system.
621 */
622 static void
623 iop_configure_devices(struct iop_softc *sc)
624 {
625 struct iop_attach_args ia;
626 struct iop_initiator *ii;
627 const struct i2o_lct_entry *le;
628 int i, nent;
629
630 nent = sc->sc_nlctent;
631 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
632 /*
633 * Ignore the device if it's in use.
634 */
635 if ((le32toh(le->usertid) & 4095) != 4095)
636 continue;
637
638 ia.ia_class = le16toh(le->classid) & 4095;
639 ia.ia_tid = le32toh(le->localtid) & 4095;
640
641 /*
642 * Try to configure the device only if it's not already
643 * configured.
644 */
645 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
646 if ((ii->ii_flags & II_UTILITY) != 0)
647 continue;
648 if (ia.ia_tid == ii->ii_tid)
649 break;
650 }
651 if (ii != NULL)
652 continue;
653
654 if (config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch))
655 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
656 }
657 }
658
659 static void
660 iop_devinfo(int class, char *devinfo)
661 {
662 #ifdef I2OVERBOSE
663 int i;
664
665 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
666 if (class == iop_class[i].ic_class)
667 break;
668
669 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
670 sprintf(devinfo, "device (class 0x%x)", class);
671 else
672 strcpy(devinfo, iop_class[i].ic_caption);
673 #else
674
675 sprintf(devinfo, "device (class 0x%x)", class);
676 #endif
677 }
678
679 static int
680 iop_print(void *aux, const char *pnp)
681 {
682 struct iop_attach_args *ia;
683 char devinfo[256];
684
685 ia = aux;
686
687 if (pnp != NULL) {
688 iop_devinfo(ia->ia_class, devinfo);
689 printf("%s at %s", devinfo, pnp);
690 }
691 printf(" tid %d", ia->ia_tid);
692 return (UNCONF);
693 }
694
695 #ifdef notyet
696 static int
697 iop_vendor_print(void *aux, const char *pnp)
698 {
699
700 if (pnp != NULL)
701 printf("vendor specific extension at %s", pnp);
702 return (UNCONF);
703 }
704 #endif
705
706 static int
707 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
708 {
709 struct iop_attach_args *ia;
710
711 ia = aux;
712
713 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
714 return (0);
715
716 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
717 }
718
719 /*
720 * Shut down all configured IOPs.
721 */
722 static void
723 iop_shutdown(void *junk)
724 {
725 struct iop_softc *sc;
726 int i;
727
728 printf("shutting down iop devices... ");
729
730 for (i = 0; i < iop_cd.cd_ndevs; i++) {
731 if ((sc = device_lookup(&iop_cd, i)) == NULL)
732 continue;
733 if ((sc->sc_flags & IOP_ONLINE) == 0)
734 continue;
735 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
736 0, 5000);
737 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
738 0, 5000);
739 }
740
741 /* Wait. Some boards could still be flushing, stupidly enough. */
742 delay(5000*1000);
743 printf(" done\n");
744 }
745
746 /*
747 * Retrieve adapter status.
748 */
749 static int
750 iop_status_get(struct iop_softc *sc)
751 {
752 struct iop_msg *im;
753 struct i2o_exec_status_get *mb;
754 int rv, s;
755
756 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
757 return (rv);
758
759 mb = (struct i2o_exec_status_get *)im->im_msg;
760 mb->msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
761 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
762 mb->reserved[0] = 0;
763 mb->reserved[1] = 0;
764 mb->reserved[2] = 0;
765 mb->reserved[3] = 0;
766 mb->addrlow = kvtop((caddr_t)&sc->sc_status); /* XXX */
767 mb->addrhigh = 0;
768 mb->length = sizeof(sc->sc_status);
769
770 s = splbio();
771 memset(&sc->sc_status, 0, sizeof(sc->sc_status));
772
773 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
774 splx(s);
775 iop_msg_free(sc, NULL, im);
776 return (rv);
777 }
778
779 /* XXX */
780 POLL(2500, *((volatile u_char *)&sc->sc_status.syncbyte) == 0xff);
781
782 splx(s);
783 iop_msg_free(sc, NULL, im);
784 return (*((volatile u_char *)&sc->sc_status.syncbyte) != 0xff);
785 }
786
787 /*
788 * Initalize and populate the adapter's outbound FIFO.
789 */
790 static int
791 iop_ofifo_init(struct iop_softc *sc)
792 {
793 struct iop_msg *im;
794 volatile u_int32_t status;
795 bus_addr_t addr;
796 bus_dma_segment_t seg;
797 struct i2o_exec_outbound_init *mb;
798 int i, rseg, rv;
799
800 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
801 return (rv);
802
803 mb = (struct i2o_exec_outbound_init *)im->im_msg;
804 mb->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
805 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
806 mb->msgictx = IOP_ICTX;
807 mb->msgtctx = im->im_tctx;
808 mb->pagesize = PAGE_SIZE;
809 mb->flags = 0x80 | ((IOP_MAX_REPLY_SIZE >> 2) << 16); /* XXX */
810
811 status = 0;
812
813 /*
814 * The I2O spec says that there are two SGLs: one for the status
815 * word, and one for a list of discarded MFAs. It continues to say
816 * that if you don't want to get the list of MFAs, an IGNORE SGL is
817 * necessary; this isn't the case (and in fact appears to be a bad
818 * thing).
819 */
820 iop_msg_map(sc, im, (void *)&status, sizeof(status), 0);
821 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
822 iop_msg_free(sc, NULL, im);
823 return (rv);
824 }
825 iop_msg_unmap(sc, im);
826 iop_msg_free(sc, NULL, im);
827
828 /* XXX */
829 POLL(5000, status == I2O_EXEC_OUTBOUND_INIT_COMPLETE);
830 if (status != I2O_EXEC_OUTBOUND_INIT_COMPLETE) {
831 printf("%s: outbound FIFO init failed\n", sc->sc_dv.dv_xname);
832 return (EIO);
833 }
834
835 /* If we need to allocate DMA safe memory, do it now. */
836 if (sc->sc_rep_phys == 0) {
837 sc->sc_rep_size = sc->sc_maxreplycnt * IOP_MAX_REPLY_SIZE;
838
839 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
840 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
841 if (rv != 0) {
842 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
843 rv);
844 return (rv);
845 }
846
847 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
848 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
849 if (rv != 0) {
850 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
851 return (rv);
852 }
853
854 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
855 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
856 if (rv != 0) {
857 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname, rv);
858 return (rv);
859 }
860
861 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, sc->sc_rep,
862 sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
863 if (rv != 0) {
864 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
865 return (rv);
866 }
867
868 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
869 }
870
871 /* Populate the outbound FIFO. */
872 for (i = sc->sc_maxreplycnt, addr = sc->sc_rep_phys; i != 0; i--) {
873 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
874 addr += IOP_MAX_REPLY_SIZE;
875 }
876
877 return (0);
878 }
879
880 /*
881 * Read the specified number of bytes from the IOP's hardware resource table.
882 */
883 static int
884 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
885 {
886 struct iop_msg *im;
887 int rv;
888 struct i2o_exec_hrt_get *mb;
889
890 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
891 return (rv);
892
893 mb = (struct i2o_exec_hrt_get *)im->im_msg;
894 mb->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
895 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
896 mb->msgictx = IOP_ICTX;
897 mb->msgtctx = im->im_tctx;
898
899 iop_msg_map(sc, im, hrt, size, 0);
900 rv = iop_msg_enqueue(sc, im, 5000);
901 iop_msg_unmap(sc, im);
902 iop_msg_free(sc, NULL, im);
903 return (rv);
904 }
905
906 /*
907 * Read the IOP's hardware resource table.
908 */
909 static int
910 iop_hrt_get(struct iop_softc *sc)
911 {
912 struct i2o_hrt hrthdr, *hrt;
913 int size, rv;
914
915 if ((rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr))) != 0)
916 return (rv);
917
918 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
919 le16toh(hrthdr.numentries)));
920
921 size = sizeof(struct i2o_hrt) +
922 (htole32(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
923 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
924
925 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
926 free(hrt, M_DEVBUF);
927 return (rv);
928 }
929
930 if (sc->sc_hrt != NULL)
931 free(sc->sc_hrt, M_DEVBUF);
932 sc->sc_hrt = hrt;
933 return (0);
934 }
935
936 /*
937 * Request the specified number of bytes from the IOP's logical
938 * configuration table. If a change indicator is specified, this
939 * is an verbatim notification request, so the caller is prepared
940 * to wait indefinitely.
941 */
942 static int
943 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
944 u_int32_t chgind)
945 {
946 struct iop_msg *im;
947 struct i2o_exec_lct_notify *mb;
948 int rv;
949
950 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
951 return (rv);
952
953 memset(lct, 0, size);
954 memset(im->im_msg, 0, sizeof(im->im_msg));
955
956 mb = (struct i2o_exec_lct_notify *)im->im_msg;
957 mb->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
958 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
959 mb->msgictx = IOP_ICTX;
960 mb->msgtctx = im->im_tctx;
961 mb->classid = I2O_CLASS_ANY;
962 mb->changeindicator = chgind;
963
964 DPRINTF(("iop_lct_get0: reading LCT\n"));
965
966 iop_msg_map(sc, im, lct, size, 0);
967 rv = iop_msg_enqueue(sc, im, (chgind == 0 ? 120*1000 : 0));
968 iop_msg_unmap(sc, im);
969 iop_msg_free(sc, NULL, im);
970 return (rv);
971 }
972
973 /*
974 * Read the IOP's logical configuration table.
975 */
976 int
977 iop_lct_get(struct iop_softc *sc)
978 {
979 int esize, size, rv;
980 struct i2o_lct *lct;
981
982 esize = le32toh(sc->sc_status.expectedlctsize);
983 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
984 if (lct == NULL)
985 return (ENOMEM);
986
987 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
988 free(lct, M_DEVBUF);
989 return (rv);
990 }
991
992 size = le16toh(lct->tablesize) << 2;
993 if (esize != size) {
994 free(lct, M_DEVBUF);
995 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
996 if (lct == NULL)
997 return (ENOMEM);
998
999 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1000 free(lct, M_DEVBUF);
1001 return (rv);
1002 }
1003 }
1004
1005 /* Swap in the new LCT. */
1006 if (sc->sc_lct != NULL)
1007 free(sc->sc_lct, M_DEVBUF);
1008 sc->sc_lct = lct;
1009 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1010 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1011 sizeof(struct i2o_lct_entry);
1012 return (0);
1013 }
1014
1015 /*
1016 * Request the specified parameter group from the target.
1017 */
1018 int
1019 iop_param_op(struct iop_softc *sc, int tid, int write, int group, void *buf,
1020 int size)
1021 {
1022 struct iop_msg *im;
1023 struct i2o_util_params_op *mb;
1024 int rv, func, op;
1025 struct {
1026 struct i2o_param_op_list_header olh;
1027 struct i2o_param_op_all_template oat;
1028 } req;
1029
1030 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1031 return (rv);
1032
1033 if (write) {
1034 func = I2O_UTIL_PARAMS_SET;
1035 op = I2O_PARAMS_OP_FIELD_SET;
1036 } else {
1037 func = I2O_UTIL_PARAMS_GET;
1038 op = I2O_PARAMS_OP_FIELD_GET;
1039 }
1040
1041 mb = (struct i2o_util_params_op *)im->im_msg;
1042 mb->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1043 mb->msgfunc = I2O_MSGFUNC(tid, func);
1044 mb->msgictx = IOP_ICTX;
1045 mb->msgtctx = im->im_tctx;
1046 mb->flags = 0;
1047
1048 req.olh.count = htole16(1);
1049 req.olh.reserved = htole16(0);
1050 req.oat.operation = htole16(op);
1051 req.oat.fieldcount = htole16(0xffff);
1052 req.oat.group = htole16(group);
1053
1054 memset(buf, 0, size);
1055 iop_msg_map(sc, im, &req, sizeof(req), 1);
1056 iop_msg_map(sc, im, buf, size, write);
1057
1058 rv = iop_msg_enqueue(sc, im, 5000);
1059 iop_msg_unmap(sc, im);
1060 iop_msg_free(sc, NULL, im);
1061 return (rv);
1062 }
1063
1064 /*
1065 * Execute a simple command (no parameters).
1066 */
1067 int
1068 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1069 int async, int timo)
1070 {
1071 struct iop_msg *im;
1072 struct i2o_msg *mb;
1073 int rv, fl;
1074
1075 fl = (async != 0 ? IM_NOWAIT : 0);
1076 if ((rv = iop_msg_alloc(sc, NULL, &im, fl | IM_NOINTR)) != 0)
1077 return (rv);
1078
1079 mb = (struct i2o_msg *)im->im_msg;
1080 mb->msgflags = I2O_MSGFLAGS(i2o_msg);
1081 mb->msgfunc = I2O_MSGFUNC(tid, function);
1082 mb->msgictx = ictx;
1083 mb->msgtctx = im->im_tctx;
1084
1085 if (async)
1086 rv = iop_msg_enqueue(sc, im, timo);
1087 else
1088 rv = iop_msg_send(sc, im, timo);
1089 iop_msg_free(sc, NULL, im);
1090 return (rv);
1091 }
1092
1093 /*
1094 * Post the system table to the IOP.
1095 */
1096 static int
1097 iop_systab_set(struct iop_softc *sc)
1098 {
1099 struct i2o_exec_sys_tab_set *mb;
1100 struct iop_msg *im;
1101 u_int32_t mema[2], ioa[2];
1102 int rv;
1103
1104 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1105 return (rv);
1106
1107 mb = (struct i2o_exec_sys_tab_set *)im->im_msg;
1108 mb->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1109 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1110 mb->msgictx = IOP_ICTX;
1111 mb->msgtctx = im->im_tctx;
1112 mb->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1113 mb->segnumber = 0;
1114
1115 /* XXX This is questionable, but better than nothing... */
1116 mema[0] = le32toh(sc->sc_status.currentprivmembase);
1117 mema[1] = le32toh(sc->sc_status.currentprivmemsize);
1118 ioa[0] = le32toh(sc->sc_status.currentpriviobase);
1119 ioa[1] = le32toh(sc->sc_status.currentpriviosize);
1120
1121 iop_msg_map(sc, im, iop_systab, iop_systab_size, 1);
1122 iop_msg_map(sc, im, mema, sizeof(mema), 1);
1123 iop_msg_map(sc, im, ioa, sizeof(ioa), 1);
1124
1125 rv = iop_msg_enqueue(sc, im, 5000);
1126 iop_msg_unmap(sc, im);
1127 iop_msg_free(sc, NULL, im);
1128 return (rv);
1129 }
1130
1131 /*
1132 * Reset the adapter. Must be called with interrupts disabled.
1133 */
1134 static int
1135 iop_reset(struct iop_softc *sc)
1136 {
1137 struct iop_msg *im;
1138 volatile u_int32_t sw;
1139 u_int32_t mfa;
1140 struct i2o_exec_iop_reset *mb;
1141 int rv;
1142
1143 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
1144 return (rv);
1145
1146 sw = 0;
1147
1148 mb = (struct i2o_exec_iop_reset *)im->im_msg;
1149 mb->msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1150 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1151 mb->reserved[0] = 0;
1152 mb->reserved[1] = 0;
1153 mb->reserved[2] = 0;
1154 mb->reserved[3] = 0;
1155 mb->statuslow = kvtop((caddr_t)&sw); /* XXX */
1156 mb->statushigh = 0;
1157
1158 if ((rv = iop_msg_send(sc, im, 0)))
1159 return (rv);
1160 iop_msg_free(sc, NULL, im);
1161
1162 POLL(2500, sw != 0); /* XXX */
1163 if (sw != I2O_RESET_IN_PROGRESS) {
1164 printf("%s: reset rejected\n", sc->sc_dv.dv_xname);
1165 return (EIO);
1166 }
1167
1168 /*
1169 * IOP is now in the INIT state. Wait no more than 10 seconds for
1170 * the inbound queue to become responsive.
1171 */
1172 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1173 if (mfa == IOP_MFA_EMPTY) {
1174 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1175 return (EIO);
1176 }
1177
1178 if (sw == I2O_RESET_REJECTED)
1179 printf("%s: reset rejected?\n", sc->sc_dv.dv_xname);
1180
1181 iop_release_mfa(sc, mfa);
1182 return (0);
1183 }
1184
1185 /*
1186 * Register a new initiator.
1187 */
1188 int
1189 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1190 {
1191 static int ictx;
1192 static int stctx;
1193
1194 /* 0 is reserved for system messages. */
1195 ii->ii_ictx = ++ictx;
1196 ii->ii_stctx = ++stctx | 0x80000000;
1197
1198 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1199 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1200
1201 return (0);
1202 }
1203
1204 /*
1205 * Unregister an initiator.
1206 */
1207 void
1208 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1209 {
1210
1211 LIST_REMOVE(ii, ii_list);
1212 LIST_REMOVE(ii, ii_hash);
1213 }
1214
1215 /*
1216 * Handle a reply frame from the adapter.
1217 */
1218 static int
1219 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1220 {
1221 struct iop_msg *im;
1222 struct i2o_reply *rb;
1223 struct iop_initiator *ii;
1224 u_int off, ictx, tctx, status, size;
1225
1226 off = (int)(rmfa - sc->sc_rep_phys);
1227 rb = (struct i2o_reply *)(sc->sc_rep + off);
1228
1229 /* Perform reply queue DMA synchronisation... */
1230 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, IOP_MAX_MSG_SIZE,
1231 BUS_DMASYNC_POSTREAD);
1232 if (--sc->sc_stat.is_cur_hwqueue != 0)
1233 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1234 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1235
1236 #ifdef I2ODEBUG
1237 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1238 panic("iop_handle_reply: 64-bit reply");
1239 #endif
1240 /*
1241 * Find the initiator.
1242 */
1243 ictx = le32toh(rb->msgictx);
1244 if (ictx == IOP_ICTX)
1245 ii = NULL;
1246 else {
1247 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1248 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1249 if (ii->ii_ictx == ictx)
1250 break;
1251 if (ii == NULL) {
1252 #ifdef I2ODEBUG
1253 iop_reply_print(sc, NULL, rb);
1254 #endif
1255 printf("%s: WARNING: bad ictx returned (%x)",
1256 sc->sc_dv.dv_xname, ictx);
1257
1258 /* Return the reply frame to the IOP's outbound FIFO. */
1259 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1260 return (-1);
1261 }
1262 }
1263
1264 status = rb->reqstatus;
1265
1266 if (ii == NULL || (ii->ii_flags & II_DISCARD) == 0) {
1267 /*
1268 * This initiator tracks state using message wrappers.
1269 *
1270 * Find the originating message wrapper, and if requested
1271 * notify the initiator.
1272 */
1273 tctx = le32toh(rb->msgtctx);
1274 im = TAILQ_FIRST(IOP_TCTXHASH(tctx));
1275 for (; im != NULL; im = TAILQ_NEXT(im, im_hash))
1276 if (im->im_tctx == tctx)
1277 break;
1278 if (im == NULL || (im->im_flags & IM_ALLOCED) == 0) {
1279 #ifdef I2ODEBUG
1280 iop_reply_print(sc, NULL, rb);
1281 #endif
1282 printf("%s: WARNING: bad tctx returned (%x, %p)",
1283 sc->sc_dv.dv_xname, tctx, im);
1284
1285 /* Return the reply frame to the IOP's outbound FIFO. */
1286 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1287 return (-1);
1288 }
1289 #ifdef I2ODEBUG
1290 if ((im->im_flags & IM_REPLIED) != 0)
1291 panic("%s: dup reply", sc->sc_dv.dv_xname);
1292 #endif
1293
1294 im->im_flags |= IM_REPLIED;
1295
1296 #ifdef I2ODEBUG
1297 if (rb->reqstatus != 0)
1298 iop_reply_print(sc, im, rb);
1299 #endif
1300 /* Notify the initiator. */
1301 if ((im->im_flags & IM_WAITING) != 0) {
1302 size = (le32toh(rb->msgflags) >> 14) & ~3;
1303 if (size > IOP_MAX_REPLY_SIZE)
1304 size = IOP_MAX_REPLY_SIZE;
1305 memcpy(im->im_msg, rb, size);
1306 wakeup(im);
1307 } else if ((im->im_flags & IM_NOINTR) == 0)
1308 (*ii->ii_intr)(ii->ii_dv, im, rb);
1309 } else {
1310 /*
1311 * This initiator discards message wrappers.
1312 *
1313 * Simply pass the reply frame to the initiator.
1314 */
1315 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1316 }
1317
1318 /* Return the reply frame to the IOP's outbound FIFO. */
1319 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1320
1321 /* Run the queue. */
1322 if ((im = SIMPLEQ_FIRST(&sc->sc_queue)) != NULL)
1323 iop_msg_enqueue(sc, im, 0);
1324
1325 return (status);
1326 }
1327
1328 /*
1329 * Handle an interrupt from the adapter.
1330 */
1331 int
1332 iop_intr(void *arg)
1333 {
1334 struct iop_softc *sc;
1335 u_int32_t rmfa;
1336
1337 sc = arg;
1338
1339 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1340 return (0);
1341
1342 for (;;) {
1343 /* Double read to account for IOP bug. */
1344 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY &&
1345 (rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY)
1346 break;
1347 iop_handle_reply(sc, rmfa);
1348 }
1349
1350 return (1);
1351 }
1352
1353 /*
1354 * Handle an event signalled by the executive.
1355 */
1356 static void
1357 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1358 {
1359 struct i2o_util_event_register_reply *rb;
1360 struct iop_softc *sc;
1361 u_int event;
1362
1363 sc = (struct iop_softc *)dv;
1364 rb = reply;
1365 event = le32toh(rb->event);
1366
1367 #ifndef I2ODEBUG
1368 if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED)
1369 return;
1370 #endif
1371
1372 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1373 }
1374
1375 /*
1376 * Allocate a message wrapper.
1377 */
1378 int
1379 iop_msg_alloc(struct iop_softc *sc, struct iop_initiator *ii,
1380 struct iop_msg **imp, int flags)
1381 {
1382 struct iop_msg *im;
1383 static int tctxgen = 666;
1384 int s, rv, i, tctx;
1385
1386 #ifdef I2ODEBUG
1387 if ((flags & IM_SYSMASK) != 0)
1388 panic("iop_msg_alloc: system flags specified");
1389 #endif
1390
1391 s = splbio(); /* XXX */
1392
1393 if (ii != NULL && (ii->ii_flags & II_DISCARD) != 0) {
1394 flags |= IM_DISCARD;
1395 tctx = ii->ii_stctx;
1396 } else
1397 tctx = tctxgen++ & 0x7fffffff;
1398
1399 im = (struct iop_msg *)pool_get(iop_msgpool,
1400 (flags & IM_NOWAIT) == 0 ? PR_WAITOK : 0);
1401 if (im == NULL) {
1402 splx(s);
1403 return (ENOMEM);
1404 }
1405
1406 /* XXX */
1407 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, IOP_MAX_SGL_ENTRIES,
1408 IOP_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1409 &im->im_xfer[0].ix_map);
1410 if (rv != 0) {
1411 pool_put(iop_msgpool, im);
1412 splx(s);
1413 return (rv);
1414 }
1415
1416 if ((flags & (IM_DISCARD | IM_NOICTX)) == 0)
1417 TAILQ_INSERT_TAIL(IOP_TCTXHASH(tctx), im, im_hash);
1418
1419 splx(s);
1420
1421 im->im_tctx = tctx;
1422 im->im_flags = flags | IM_ALLOCED;
1423 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1424 im->im_xfer[i].ix_size = 0;
1425 *imp = im;
1426
1427 return (0);
1428 }
1429
1430 /*
1431 * Free a message wrapper.
1432 */
1433 void
1434 iop_msg_free(struct iop_softc *sc, struct iop_initiator *ii, struct iop_msg *im)
1435 {
1436 int s;
1437
1438 #ifdef I2ODEBUG
1439 if ((im->im_flags & IM_ALLOCED) == 0)
1440 panic("iop_msg_free: wrapper not allocated");
1441 #endif
1442
1443 /* XXX */
1444 bus_dmamap_destroy(sc->sc_dmat, im->im_xfer[0].ix_map);
1445
1446 s = splbio(); /* XXX */
1447
1448 if ((im->im_flags & (IM_DISCARD | IM_NOICTX)) == 0)
1449 TAILQ_REMOVE(IOP_TCTXHASH(im->im_tctx), im, im_hash);
1450
1451 im->im_flags = 0;
1452 pool_put(iop_msgpool, im);
1453 splx(s);
1454 }
1455
1456 /*
1457 * Map a data transfer. Write a scatter-gather list into the message frame.
1458 */
1459 int
1460 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, void *xferaddr,
1461 int xfersize, int out)
1462 {
1463 struct iop_xfer *ix;
1464 u_int32_t *mb;
1465 int rv, seg, i;
1466
1467 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++)
1468 if (ix->ix_size == 0)
1469 break;
1470 #ifdef I2ODEBUG
1471 if (i == IOP_MAX_MSG_XFERS)
1472 panic("iop_msg_map: too many xfers");
1473 #endif
1474
1475 /* Only the first DMA map is static. */
1476 if (i != 0) {
1477 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1478 IOP_MAX_SGL_ENTRIES, IOP_MAX_XFER, 0,
1479 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1480 if (rv != 0)
1481 return (rv);
1482 }
1483
1484 ix->ix_flags = (out ? IX_OUT : IX_IN);
1485 ix->ix_size = xfersize;
1486
1487 rv = bus_dmamap_load(sc->sc_dmat, ix->ix_map, xferaddr, xfersize,
1488 NULL, 0);
1489 if (rv != 0)
1490 return (rv);
1491 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1492 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1493
1494 mb = im->im_msg + (im->im_msg[0] >> 16);
1495 if (out)
1496 out = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1497 else
1498 out = I2O_SGL_SIMPLE;
1499
1500 for (seg = 0; seg < ix->ix_map->dm_nsegs; seg++) {
1501 #ifdef I2ODEBUG
1502 if ((seg << 1) + (im->im_msg[0] >> 16) >=
1503 (IOP_MAX_MSG_SIZE >> 2))
1504 panic("iop_map_xfer: message frame too large");
1505 #endif
1506 if (seg == ix->ix_map->dm_nsegs - 1)
1507 out |= I2O_SGL_END_BUFFER;
1508 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_len | out;
1509 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_addr;
1510 }
1511
1512 /*
1513 * If this is the first xfer we've mapped for this message, adjust
1514 * the SGL offset field in the message header.
1515 */
1516 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1517 im->im_msg[0] += ((im->im_msg[0] >> 16) + seg * 2) << 4;
1518 im->im_flags |= IM_SGLOFFADJ;
1519 }
1520 im->im_msg[0] += (seg << 17);
1521 return (0);
1522 }
1523
1524 /*
1525 * Unmap all data transfers associated with a message wrapper.
1526 */
1527 void
1528 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
1529 {
1530 struct iop_xfer *ix;
1531 int i;
1532
1533 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++) {
1534 if (ix->ix_size == 0)
1535 break;
1536 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
1537 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
1538 BUS_DMASYNC_POSTREAD);
1539 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1540
1541 /* Only the first DMA map is static. */
1542 if (i != 0)
1543 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1544
1545 ix->ix_size = 0;
1546 }
1547 }
1548
1549 /*
1550 * Send a message to the IOP. Optionally, poll on completion. Return
1551 * non-zero if failure status is returned and IM_NOINTR is set.
1552 */
1553 int
1554 iop_msg_send(struct iop_softc *sc, struct iop_msg *im, int timo)
1555 {
1556 u_int32_t mfa, rmfa;
1557 int rv, status, i, s;
1558
1559 #ifdef I2ODEBUG
1560 if ((im->im_flags & IM_NOICTX) == 0)
1561 if (im->im_msg[3] == IOP_ICTX &&
1562 (im->im_flags & IM_NOINTR) == 0)
1563 panic("iop_msg_send: IOP_ICTX and !IM_NOINTR");
1564 if ((im->im_flags & IM_DISCARD) != 0)
1565 panic("iop_msg_send: IM_DISCARD");
1566 #endif
1567
1568 s = splbio(); /* XXX */
1569
1570 /* Wait up to 250ms for an MFA. */
1571 POLL(250, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1572 if (mfa == IOP_MFA_EMPTY) {
1573 DPRINTF(("%s: mfa not forthcoming\n", sc->sc_dv.dv_xname));
1574 splx(s);
1575 return (EBUSY);
1576 }
1577
1578 /* Perform reply queue DMA synchronisation and update counters. */
1579 if ((im->im_flags & IM_NOICTX) == 0) {
1580 if (sc->sc_stat.is_cur_hwqueue == 0)
1581 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1582 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1583 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1584 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1585 sc->sc_stat.is_requests++;
1586 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1587 sc->sc_stat.is_peak_hwqueue =
1588 sc->sc_stat.is_cur_hwqueue;
1589 }
1590
1591 /* Terminate scatter/gather lists. */
1592 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1593 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1594
1595 /* Post the message frame. */
1596 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1597 im->im_msg, im->im_msg[0] >> 16);
1598 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa,
1599 (im->im_msg[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
1600
1601 /* Post the MFA back to the IOP, thus starting the command. */
1602 iop_outl(sc, IOP_REG_IFIFO, mfa);
1603
1604 if (timo == 0) {
1605 splx(s);
1606 return (0);
1607 }
1608
1609 /* Wait for completion. */
1610 for (timo *= 10; timo != 0; timo--) {
1611 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
1612 /* Double read to account for IOP bug. */
1613 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1614 if (rmfa == IOP_MFA_EMPTY)
1615 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1616 if (rmfa != IOP_MFA_EMPTY)
1617 status = iop_handle_reply(sc, rmfa);
1618 }
1619 if ((im->im_flags & IM_REPLIED) != 0)
1620 break;
1621 DELAY(100);
1622 }
1623
1624 splx(s);
1625
1626 if (timo == 0) {
1627 #ifdef I2ODEBUG
1628 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
1629 if (iop_status_get(sc) != 0)
1630 printf("iop_msg_send: unable to retrieve status\n");
1631 else
1632 printf("iop_msg_send: IOP state = %d\n",
1633 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1634 #endif
1635 rv = EBUSY;
1636 } else if ((im->im_flags & IM_NOINTR) != 0)
1637 rv = (status != I2O_STATUS_SUCCESS ? EIO : 0);
1638
1639 return (rv);
1640 }
1641
1642 /*
1643 * Try to post a message to the adapter; if that's not possible, enqueue it
1644 * with us. If a timeout is specified, wait for the message to complete.
1645 */
1646 int
1647 iop_msg_enqueue(struct iop_softc *sc, struct iop_msg *im, int timo)
1648 {
1649 u_int mfa;
1650 int s, fromqueue, i, rv;
1651
1652 #ifdef I2ODEBUG
1653 if (im == NULL)
1654 panic("iop_msg_enqueue: im == NULL");
1655 if (sc == NULL)
1656 panic("iop_msg_enqueue: sc == NULL");
1657 if ((im->im_flags & IM_NOICTX) != 0)
1658 panic("iop_msg_enqueue: IM_NOICTX");
1659 if (im->im_msg[3] == IOP_ICTX && (im->im_flags & IM_NOINTR) == 0)
1660 panic("iop_msg_enqueue: IOP_ICTX and no IM_NOINTR");
1661 if ((im->im_flags & IM_DISCARD) != 0 && timo != 0)
1662 panic("iop_msg_enqueue: IM_DISCARD && timo != 0");
1663 if ((im->im_flags & IM_NOINTR) == 0 && timo != 0)
1664 panic("iop_msg_enqueue: !IM_NOINTR && timo != 0");
1665 #endif
1666
1667 s = splbio(); /* XXX */
1668 fromqueue = (im == SIMPLEQ_FIRST(&sc->sc_queue));
1669
1670 if (sc->sc_stat.is_cur_hwqueue >= sc->sc_maxqueuecnt) {
1671 /*
1672 * While the IOP may be able to accept more inbound message
1673 * frames than it advertises, don't push harder than it
1674 * wants to go lest we starve it.
1675 *
1676 * XXX We should be handling IOP resource shortages.
1677 */
1678 mfa = IOP_MFA_EMPTY;
1679 DPRINTF(("iop_msg_enqueue: exceeded max queue count\n"));
1680 } else {
1681 /* Double read to account for IOP bug. */
1682 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
1683 mfa = iop_inl(sc, IOP_REG_IFIFO);
1684 }
1685
1686 if (mfa == IOP_MFA_EMPTY) {
1687 DPRINTF(("iop_msg_enqueue: no mfa\n"));
1688 /* Can't transfer to h/w queue - queue with us. */
1689 if (!fromqueue) {
1690 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, im, im_queue);
1691 if (++sc->sc_stat.is_cur_swqueue >
1692 sc->sc_stat.is_peak_swqueue)
1693 sc->sc_stat.is_peak_swqueue =
1694 sc->sc_stat.is_cur_swqueue;
1695 }
1696 splx(s);
1697 if ((im->im_flags & IM_NOINTR) != 0)
1698 rv = iop_msg_wait(sc, im, timo);
1699 else
1700 rv = 0;
1701 return (rv);
1702 } else if (fromqueue) {
1703 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, im, im_queue);
1704 sc->sc_stat.is_cur_swqueue--;
1705 }
1706
1707 if ((im->im_flags & IM_NOINTR) != 0)
1708 im->im_flags |= IM_WAITING;
1709
1710 /* Perform reply queue DMA synchronisation and update counters. */
1711 if (sc->sc_stat.is_cur_hwqueue == 0)
1712 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1713 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1714
1715 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1716 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1717 sc->sc_stat.is_requests++;
1718 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1719 sc->sc_stat.is_peak_hwqueue = sc->sc_stat.is_cur_hwqueue;
1720
1721 /* Terminate the scatter/gather list. */
1722 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1723 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1724
1725 /* Post the message frame. */
1726 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1727 im->im_msg, im->im_msg[0] >> 16);
1728 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa,
1729 (im->im_msg[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
1730
1731 /* Post the MFA back to the IOP, thus starting the command. */
1732 iop_outl(sc, IOP_REG_IFIFO, mfa);
1733
1734 /* If this is a discardable message wrapper, free it. */
1735 if ((im->im_flags & IM_DISCARD) != 0)
1736 iop_msg_free(sc, NULL, im);
1737 splx(s);
1738
1739 if ((im->im_flags & IM_NOINTR) != 0)
1740 rv = iop_msg_wait(sc, im, timo);
1741 else
1742 rv = 0;
1743 return (rv);
1744 }
1745
1746 /*
1747 * Wait for the specified message to complete.
1748 */
1749 static int
1750 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
1751 {
1752 struct i2o_reply *rb;
1753 int rv, s;
1754
1755 s = splbio();
1756 if ((im->im_flags & IM_REPLIED) != 0) {
1757 splx(s);
1758 return (0);
1759 }
1760 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
1761 splx(s);
1762 #ifdef I2ODEBUG
1763 if (rv != 0) {
1764 printf("iop_msg_wait: tsleep() == %d\n", rv);
1765 if (iop_status_get(sc) != 0)
1766 printf("iop_msg_wait: unable to retrieve status\n");
1767 else
1768 printf("iop_msg_wait: IOP state = %d\n",
1769 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1770 }
1771 #endif
1772 if ((im->im_flags & (IM_REPLIED | IM_NOSTATUS)) == IM_REPLIED) {
1773 rb = (struct i2o_reply *)im->im_msg;
1774 rv = (rb->reqstatus != I2O_STATUS_SUCCESS ? EIO : 0);
1775 }
1776 return (rv);
1777 }
1778
1779 /*
1780 * Release an unused message frame back to the IOP's inbound fifo.
1781 */
1782 static void
1783 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
1784 {
1785
1786 /* Use the frame to issue a no-op. */
1787 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
1788 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
1789 iop_outl(sc, mfa + 8, 0);
1790 iop_outl(sc, mfa + 12, 0);
1791
1792 iop_outl(sc, IOP_REG_IFIFO, mfa);
1793 }
1794
1795 #ifdef I2ODEBUG
1796 /*
1797 * Print status information from a failure reply frame.
1798 */
1799 static void
1800 iop_reply_print(struct iop_softc *sc, struct iop_msg *im,
1801 struct i2o_reply *rb)
1802 {
1803 u_int function, detail;
1804 #ifdef I2OVERBOSE
1805 const char *statusstr;
1806 #endif
1807
1808 if (im != NULL && (im->im_flags & IM_REPLIED) == 0)
1809 panic("iop_msg_print_status: %p not replied to", im);
1810
1811 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
1812 detail = le16toh(rb->detail);
1813
1814 printf("%s: reply:\n", sc->sc_dv.dv_xname);
1815
1816 #ifdef I2OVERBOSE
1817 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
1818 statusstr = iop_status[rb->reqstatus];
1819 else
1820 statusstr = "undefined error code";
1821
1822 printf("%s: function=0x%02x status=0x%02x (%s)\n",
1823 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
1824 #else
1825 printf("%s: function=0x%02x status=0x%02x\n",
1826 sc->sc_dv.dv_xname, function, rb->reqstatus);
1827 #endif
1828 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
1829 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
1830 le32toh(rb->msgtctx));
1831 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
1832 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
1833 (le32toh(rb->msgflags) >> 8) & 0xff);
1834 }
1835 #endif
1836
1837 /*
1838 * Translate an I2O ASCII field into a C string.
1839 */
1840 void
1841 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
1842 {
1843 int hc, lc, i, nit;
1844
1845 dlen--;
1846 lc = 0;
1847 hc = 0;
1848 i = 0;
1849
1850 /*
1851 * DPT use NUL as a space, whereas AMI use it as a terminator. The
1852 * spec has nothing to say about it. Since AMI fields are usually
1853 * filled with junk after the terminator, ...
1854 */
1855 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
1856
1857 while (slen-- != 0 && dlen-- != 0) {
1858 if (nit && *src == '\0')
1859 break;
1860 else if (*src <= 0x20 || *src >= 0x7f) {
1861 if (hc)
1862 dst[i++] = ' ';
1863 } else {
1864 hc = 1;
1865 dst[i++] = *src;
1866 lc = i;
1867 }
1868 src++;
1869 }
1870
1871 dst[lc] = '\0';
1872 }
1873
1874 /*
1875 * Claim or unclaim the specified TID.
1876 */
1877 int
1878 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
1879 int flags)
1880 {
1881 struct iop_msg *im;
1882 struct i2o_util_claim *mb;
1883 int rv, func;
1884
1885 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
1886
1887 if ((rv = iop_msg_alloc(sc, ii, &im, IM_NOINTR)) != 0)
1888 return (rv);
1889
1890 /* We can use the same structure, as both are identical. */
1891 mb = (struct i2o_util_claim *)im->im_msg;
1892 mb->msgflags = I2O_MSGFLAGS(i2o_util_claim);
1893 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
1894 mb->msgictx = ii->ii_ictx;
1895 mb->msgtctx = im->im_tctx;
1896 mb->flags = flags;
1897
1898 rv = iop_msg_enqueue(sc, im, 5000);
1899 iop_msg_free(sc, ii, im);
1900 return (rv);
1901 }
1902
1903 /*
1904 * Perform an abort.
1905 */
1906 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
1907 int tctxabort, int flags)
1908 {
1909 struct iop_msg *im;
1910 struct i2o_util_abort *mb;
1911 int rv;
1912
1913 if ((rv = iop_msg_alloc(sc, ii, &im, IM_NOINTR)) != 0)
1914 return (rv);
1915
1916 mb = (struct i2o_util_abort *)im->im_msg;
1917 mb->msgflags = I2O_MSGFLAGS(i2o_util_abort);
1918 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
1919 mb->msgictx = ii->ii_ictx;
1920 mb->msgtctx = im->im_tctx;
1921 mb->flags = (func << 24) | flags;
1922 mb->tctxabort = tctxabort;
1923
1924 rv = iop_msg_enqueue(sc, im, 5000);
1925 iop_msg_free(sc, ii, im);
1926 return (rv);
1927 }
1928
1929 /*
1930 * Enable or disable event types for the specified device.
1931 */
1932 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
1933 {
1934 struct iop_msg *im;
1935 struct i2o_util_event_register *mb;
1936 int rv;
1937
1938 if ((rv = iop_msg_alloc(sc, ii, &im, 0)) != 0)
1939 return (rv);
1940
1941 mb = (struct i2o_util_event_register *)im->im_msg;
1942 mb->msgflags = I2O_MSGFLAGS(i2o_util_event_register);
1943 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
1944 mb->msgictx = ii->ii_ictx;
1945 mb->msgtctx = im->im_tctx;
1946 mb->eventmask = mask;
1947
1948 return (iop_msg_enqueue(sc, im, 0));
1949 }
1950
1951 int
1952 iopopen(dev_t dev, int flag, int mode, struct proc *p)
1953 {
1954 struct iop_softc *sc;
1955 int unit, error;
1956
1957 unit = minor(dev);
1958
1959 sc = device_lookup(&iop_cd, minor(dev));
1960 if ((sc = iop_cd.cd_devs[unit]) == NULL)
1961 return (ENXIO);
1962 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1963 return (error);
1964
1965 if ((sc->sc_flags & IOP_OPEN) != 0)
1966 return (EBUSY);
1967 if ((sc->sc_flags & IOP_ONLINE) == 0)
1968 return (EIO);
1969 sc->sc_flags |= IOP_OPEN;
1970
1971 return (0);
1972 }
1973
1974 int
1975 iopclose(dev_t dev, int flag, int mode, struct proc *p)
1976 {
1977 struct iop_softc *sc;
1978
1979 sc = device_lookup(&iop_cd, minor(dev));
1980 sc->sc_flags &= ~IOP_OPEN;
1981 return (0);
1982 }
1983
1984 int
1985 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1986 {
1987 struct iop_softc *sc;
1988 struct iovec *iov;
1989 struct ioppt *pt;
1990 struct iop_msg *im;
1991 struct i2o_msg *mb;
1992 struct i2o_reply *rb;
1993 int rv, i;
1994
1995 if (securelevel >= 2)
1996 return (EPERM);
1997
1998 sc = device_lookup(&iop_cd, minor(dev));
1999
2000 switch (cmd) {
2001 case IOPIOCPT:
2002 pt = (struct ioppt *)data;
2003
2004 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2005 pt->pt_msglen < sizeof(struct i2o_msg) ||
2006 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2007 pt->pt_nbufs < 0 ||
2008 pt->pt_replylen < 0 ||
2009 pt->pt_timo < 1000 ||
2010 pt->pt_timo > 5*60*1000) {
2011 rv = EINVAL;
2012 break;
2013 }
2014
2015 rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR | IM_NOSTATUS);
2016 if (rv != 0)
2017 break;
2018
2019 if ((rv = copyin(pt->pt_msg, im->im_msg, pt->pt_msglen)) != 0) {
2020 iop_msg_free(sc, NULL, im);
2021 break;
2022 }
2023
2024 mb = (struct i2o_msg *)im->im_msg;
2025 mb->msgictx = IOP_ICTX;
2026 mb->msgtctx = im->im_tctx;
2027
2028 for (i = 0; i < pt->pt_nbufs; i++) {
2029 rv = iop_msg_map(sc, im, pt->pt_bufs[i].ptb_data,
2030 pt->pt_bufs[i].ptb_datalen,
2031 pt->pt_bufs[i].ptb_out != 0);
2032 if (rv != 0) {
2033 iop_msg_free(sc, NULL, im);
2034 return (rv);
2035 }
2036 }
2037
2038 if ((rv = iop_msg_enqueue(sc, im, pt->pt_timo)) == 0) {
2039 rb = (struct i2o_reply *)im->im_msg;
2040 i = (le32toh(rb->msgflags) >> 14) & ~3; /* XXX */
2041 if (i > IOP_MAX_REPLY_SIZE)
2042 i = IOP_MAX_REPLY_SIZE;
2043 if (i > pt->pt_replylen)
2044 i = pt->pt_replylen;
2045 rv = copyout(rb, pt->pt_reply, i);
2046 }
2047
2048 iop_msg_free(sc, NULL, im);
2049 break;
2050
2051 case IOPIOCGLCT:
2052 iov = (struct iovec *)data;
2053 rv = lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
2054 if (rv == 0) {
2055 i = le16toh(sc->sc_lct->tablesize) << 2;
2056 if (i > iov->iov_len)
2057 i = iov->iov_len;
2058 else
2059 iov->iov_len = i;
2060 rv = copyout(sc->sc_lct, iov->iov_base, i);
2061 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2062 }
2063 break;
2064
2065 case IOPIOCGSTATUS:
2066 iov = (struct iovec *)data;
2067 i = sizeof(struct i2o_status);
2068 if (i > iov->iov_len)
2069 i = iov->iov_len;
2070 else
2071 iov->iov_len = i;
2072 if ((rv = iop_status_get(sc)) == 0)
2073 rv = copyout(&sc->sc_status, iov->iov_base, i);
2074 break;
2075
2076 case IOPIOCRECONFIG:
2077 rv = iop_reconfigure(sc, 0);
2078 break;
2079
2080 default:
2081 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2082 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2083 #endif
2084 rv = ENOTTY;
2085 break;
2086 }
2087
2088 return (rv);
2089 }
2090