iop.c revision 1.5 1 /* $NetBSD: iop.c,v 1.5 2000/12/03 13:17:03 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/pool.h>
56 #include <sys/conf.h>
57 #include <sys/kthread.h>
58
59 #include <uvm/uvm_extern.h>
60
61 #include <machine/bus.h>
62
63 #include <dev/i2o/i2o.h>
64 #include <dev/i2o/iopreg.h>
65 #include <dev/i2o/iopvar.h>
66
67 #define POLL(ms, cond) \
68 do { \
69 int i; \
70 for (i = (ms) * 10; i; i--) { \
71 if (cond) \
72 break; \
73 DELAY(100); \
74 } \
75 } while (/* CONSTCOND */0);
76
77 #ifdef I2ODEBUG
78 #define DPRINTF(x) printf x
79 #else
80 #define DPRINTF(x)
81 #endif
82
83 #ifdef I2OVERBOSE
84 #define IFVERBOSE(x) x
85 #else
86 #define IFVERBOSE(x)
87 #endif
88
89 #define COMMENT(x) ""
90
91 #define IOP_ICTXHASH_NBUCKETS 16
92 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
93 #define IOP_TCTXHASH_NBUCKETS 64
94 #define IOP_TCTXHASH(tctx) (&iop_tctxhashtbl[(tctx) & iop_tctxhash])
95
96 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
97 static u_long iop_ictxhash;
98 static TAILQ_HEAD(, iop_msg) *iop_tctxhashtbl;
99 static u_long iop_tctxhash;
100 static void *iop_sdh;
101 static struct pool *iop_msgpool;
102 static struct i2o_systab *iop_systab;
103 static int iop_systab_size;
104
105 extern struct cfdriver iop_cd;
106
107 #define IC_CONFIGURE 0x01
108
109 struct iop_class {
110 u_short ic_class;
111 u_short ic_flags;
112 const char *ic_caption;
113 } static const iop_class[] = {
114 {
115 I2O_CLASS_EXECUTIVE,
116 0,
117 COMMENT("executive")
118 },
119 {
120 I2O_CLASS_DDM,
121 0,
122 COMMENT("device driver module")
123 },
124 {
125 I2O_CLASS_RANDOM_BLOCK_STORAGE,
126 IC_CONFIGURE,
127 IFVERBOSE("random block storage")
128 },
129 {
130 I2O_CLASS_SEQUENTIAL_STORAGE,
131 IC_CONFIGURE,
132 IFVERBOSE("sequential storage")
133 },
134 {
135 I2O_CLASS_LAN,
136 IC_CONFIGURE,
137 IFVERBOSE("LAN port")
138 },
139 {
140 I2O_CLASS_WAN,
141 IC_CONFIGURE,
142 IFVERBOSE("WAN port")
143 },
144 {
145 I2O_CLASS_FIBRE_CHANNEL_PORT,
146 IC_CONFIGURE,
147 IFVERBOSE("fibrechannel port")
148 },
149 {
150 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
151 0,
152 COMMENT("fibrechannel peripheral")
153 },
154 {
155 I2O_CLASS_SCSI_PERIPHERAL,
156 0,
157 COMMENT("SCSI peripheral")
158 },
159 {
160 I2O_CLASS_ATE_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("ATE port")
163 },
164 {
165 I2O_CLASS_ATE_PERIPHERAL,
166 0,
167 COMMENT("ATE peripheral")
168 },
169 {
170 I2O_CLASS_FLOPPY_CONTROLLER,
171 IC_CONFIGURE,
172 IFVERBOSE("floppy controller")
173 },
174 {
175 I2O_CLASS_FLOPPY_DEVICE,
176 0,
177 COMMENT("floppy device")
178 },
179 {
180 I2O_CLASS_BUS_ADAPTER_PORT,
181 IC_CONFIGURE,
182 IFVERBOSE("bus adapter port" )
183 },
184 };
185
186 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
187 static const char *iop_status[] = {
188 "success",
189 "abort (dirty)",
190 "abort (no data transfer)",
191 "abort (partial transfer)",
192 "error (dirty)",
193 "error (no data transfer)",
194 "error (partial transfer)",
195 "undefined error code",
196 "process abort (dirty)",
197 "process abort (no data transfer)",
198 "process abort (partial transfer)",
199 "transaction error",
200 };
201 #endif
202
203 static inline u_int32_t iop_inl(struct iop_softc *, int);
204 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
205
206 static void iop_config_interrupts(struct device *);
207 static void iop_configure_devices(struct iop_softc *);
208 static void iop_devinfo(int, char *);
209 static int iop_print(void *, const char *);
210 static int iop_reconfigure(struct iop_softc *, u_int32_t);
211 static void iop_shutdown(void *);
212 static int iop_submatch(struct device *, struct cfdata *, void *);
213 #ifdef notyet
214 static int iop_vendor_print(void *, const char *);
215 #endif
216
217 static void iop_intr_event(struct device *, struct iop_msg *, void *);
218 static int iop_hrt_get(struct iop_softc *);
219 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
220 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
221 u_int32_t);
222 static int iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
223 static int iop_ofifo_init(struct iop_softc *);
224 static int iop_handle_reply(struct iop_softc *, u_int32_t);
225 static void iop_reconfigure_proc(void *);
226 static void iop_release_mfa(struct iop_softc *, u_int32_t);
227 static int iop_reset(struct iop_softc *);
228 static int iop_status_get(struct iop_softc *);
229 static int iop_systab_set(struct iop_softc *);
230
231 #ifdef I2ODEBUG
232 static void iop_reply_print(struct iop_softc *, struct iop_msg *,
233 struct i2o_reply *);
234 #endif
235
236 cdev_decl(iop);
237
238 static inline u_int32_t
239 iop_inl(struct iop_softc *sc, int off)
240 {
241
242 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
243 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
244 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
245 }
246
247 static inline void
248 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
249 {
250
251 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
252 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
253 BUS_SPACE_BARRIER_WRITE);
254 }
255
256 /*
257 * Initialise the adapter.
258 */
259 void
260 iop_init(struct iop_softc *sc, const char *intrstr)
261 {
262 int rv;
263 u_int32_t mask;
264 static int again;
265 char ident[64];
266
267 if (again == 0) {
268 /* Create the shared message wrapper pool and hashes. */
269 iop_msgpool = pool_create(sizeof(struct iop_msg), 0, 0, 0,
270 "ioppl", 0, NULL, NULL, M_DEVBUF);
271 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
272 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
273 iop_tctxhashtbl = hashinit(IOP_TCTXHASH_NBUCKETS, HASH_TAILQ,
274 M_DEVBUF, M_NOWAIT, &iop_tctxhash);
275 again = 1;
276 }
277
278 /* Reset the IOP and request status. */
279 printf("I2O adapter");
280
281 if ((rv = iop_reset(sc)) != 0) {
282 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
283 return;
284 }
285 if ((rv = iop_status_get(sc)) != 0) {
286 printf("%s: not responding (get status)\n", sc->sc_dv.dv_xname);
287 return;
288 }
289 DPRINTF((" (state=%d)",
290 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff));
291 sc->sc_flags |= IOP_HAVESTATUS;
292
293 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
294 ident, sizeof(ident));
295 printf(" <%s>\n", ident);
296
297 #ifdef I2ODEBUG
298 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
299 le16toh(sc->sc_status.orgid),
300 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
301 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
302 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
303 le32toh(sc->sc_status.desiredprivmemsize),
304 le32toh(sc->sc_status.currentprivmemsize),
305 le32toh(sc->sc_status.currentprivmembase));
306 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
307 le32toh(sc->sc_status.desiredpriviosize),
308 le32toh(sc->sc_status.currentpriviosize),
309 le32toh(sc->sc_status.currentpriviobase));
310 #endif
311
312 sc->sc_maxreplycnt = le32toh(sc->sc_status.maxoutboundmframes);
313 if (sc->sc_maxreplycnt > IOP_MAX_HW_REPLYCNT)
314 sc->sc_maxreplycnt = IOP_MAX_HW_REPLYCNT;
315 sc->sc_maxqueuecnt = le32toh(sc->sc_status.maxinboundmframes);
316 if (sc->sc_maxqueuecnt > IOP_MAX_HW_QUEUECNT)
317 sc->sc_maxqueuecnt = IOP_MAX_HW_QUEUECNT;
318
319 if (iop_ofifo_init(sc) != 0) {
320 printf("%s: unable to init oubound FIFO\n", sc->sc_dv.dv_xname);
321 return;
322 }
323
324 /*
325 * Defer further configuration until (a) interrupts are working and
326 * (b) we have enough information to build the system table.
327 */
328 config_interrupts((struct device *)sc, iop_config_interrupts);
329
330 /* Configure shutdown hook before we start any device activity. */
331 if (iop_sdh == NULL)
332 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
333
334 /* Ensure interrupts are enabled at the IOP. */
335 mask = iop_inl(sc, IOP_REG_INTR_MASK);
336 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
337
338 if (intrstr != NULL)
339 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
340 intrstr);
341
342 #ifdef I2ODEBUG
343 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
344 sc->sc_dv.dv_xname,
345 sc->sc_maxqueuecnt, le32toh(sc->sc_status.maxinboundmframes),
346 sc->sc_maxreplycnt, le32toh(sc->sc_status.maxoutboundmframes));
347 #endif
348
349 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
350 SIMPLEQ_INIT(&sc->sc_queue);
351 }
352
353 /*
354 * Perform autoconfiguration tasks.
355 */
356 static void
357 iop_config_interrupts(struct device *self)
358 {
359 struct iop_softc *sc, *iop;
360 struct i2o_systab_entry *ste;
361 int rv, i, niop;
362
363 sc = (struct iop_softc *)self;
364 LIST_INIT(&sc->sc_iilist);
365
366 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
367
368 if (iop_hrt_get(sc) != 0) {
369 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
370 return;
371 }
372
373 /*
374 * Build the system table.
375 */
376 if (iop_systab == NULL) {
377 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
378 if ((iop = device_lookup(&iop_cd, i)) == NULL)
379 continue;
380 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
381 continue;
382 if (iop_status_get(iop) != 0) {
383 printf("%s: unable to retrieve status\n",
384 sc->sc_dv.dv_xname);
385 iop->sc_flags &= ~IOP_HAVESTATUS;
386 continue;
387 }
388 niop++;
389 }
390 if (niop == 0)
391 return;
392
393 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
394 sizeof(struct i2o_systab);
395 iop_systab_size = i;
396 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
397
398 memset(iop_systab, 0, i);
399 iop_systab->numentries = niop;
400 iop_systab->version = I2O_VERSION_11;
401
402 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
403 if ((iop = device_lookup(&iop_cd, i)) == NULL)
404 continue;
405 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
406 continue;
407
408 ste->orgid = iop->sc_status.orgid;
409 ste->iopid = iop->sc_dv.dv_unit + 2;
410 ste->segnumber =
411 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
412 ste->iopcaps = iop->sc_status.iopcaps;
413 ste->inboundmsgframesize =
414 iop->sc_status.inboundmframesize;
415 ste->inboundmsgportaddresslow =
416 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
417 ste++;
418 }
419 }
420
421 if (iop_systab_set(sc) != 0) {
422 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
423 return;
424 }
425 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
426 5000) != 0) {
427 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
428 return;
429 }
430
431 /*
432 * Set up an event handler for this IOP.
433 */
434 sc->sc_eventii.ii_dv = self;
435 sc->sc_eventii.ii_intr = iop_intr_event;
436 sc->sc_eventii.ii_flags = II_DISCARD | II_UTILITY;
437 sc->sc_eventii.ii_tid = I2O_TID_IOP;
438 if (iop_initiator_register(sc, &sc->sc_eventii) != 0) {
439 printf("%s: unable to register initiator", sc->sc_dv.dv_xname);
440 return;
441 }
442 if (iop_util_eventreg(sc, &sc->sc_eventii, 0xffffffff)) {
443 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
444 return;
445 }
446
447 #ifdef notyet
448 /* Attempt to match and attach a product-specific extension. */
449 ia.ia_class = I2O_CLASS_ANY;
450 ia.ia_tid = I2O_TID_IOP;
451 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
452 #endif
453
454 if ((rv = iop_reconfigure(sc, 0)) != 0) {
455 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
456 return;
457 }
458
459 sc->sc_flags |= IOP_ONLINE;
460
461 rv = kthread_create1(iop_reconfigure_proc, sc, &sc->sc_reconf_proc,
462 "%s", sc->sc_dv.dv_xname);
463 if (rv != 0) {
464 printf("%s: unable to create thread (%d)",
465 sc->sc_dv.dv_xname, rv);
466 return;
467 }
468 }
469
470 /*
471 * Reconfiguration thread; listens for LCT change notification, and
472 * initiates re-configuration if recieved.
473 */
474 static void
475 iop_reconfigure_proc(void *cookie)
476 {
477 struct iop_softc *sc;
478 struct i2o_lct lct;
479 u_int32_t chgind;
480
481 sc = cookie;
482
483 for (;;) {
484 chgind = le32toh(sc->sc_chgindicator) + 1;
485
486 if (iop_lct_get0(sc, &lct, sizeof(lct), chgind) == 0) {
487 DPRINTF(("%s: async reconfiguration (0x%08x)\n",
488 sc->sc_dv.dv_xname, le32toh(lct.changeindicator)));
489 iop_reconfigure(sc, lct.changeindicator);
490 }
491
492 tsleep(iop_reconfigure_proc, PWAIT, "iopzzz", hz * 5);
493 }
494 }
495
496 /*
497 * Reconfigure: find new and removed devices.
498 */
499 static int
500 iop_reconfigure(struct iop_softc *sc, u_int32_t chgind)
501 {
502 struct iop_msg *im;
503 struct i2o_hba_bus_scan *mb;
504 struct i2o_lct_entry *le;
505 struct iop_initiator *ii, *nextii;
506 int rv, tid, i;
507
508 rv = lockmgr(&sc->sc_conflock, LK_EXCLUSIVE | LK_RECURSEFAIL, NULL);
509 if (rv != 0) {
510 DPRINTF(("iop_reconfigure: unable to acquire lock\n"));
511 return (rv);
512 }
513
514 /*
515 * If the reconfiguration request isn't the result of LCT change
516 * notification, then be more thorough: ask all bus ports to scan
517 * their busses. Wait up to 5 minutes for each bus port to complete
518 * the request.
519 */
520 if (chgind == 0) {
521 if ((rv = iop_lct_get(sc)) != 0) {
522 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
523 goto done;
524 }
525
526 le = sc->sc_lct->entry;
527 for (i = 0; i < sc->sc_nlctent; i++, le++) {
528 if ((le16toh(le->classid) & 4095) !=
529 I2O_CLASS_BUS_ADAPTER_PORT)
530 continue;
531 tid = le32toh(le->localtid) & 4095;
532
533 rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR);
534 if (rv != 0) {
535 DPRINTF(("iop_reconfigure: alloc msg\n"));
536 goto done;
537 }
538
539 mb = (struct i2o_hba_bus_scan *)im->im_msg;
540 mb->msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
541 mb->msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
542 mb->msgictx = IOP_ICTX;
543 mb->msgtctx = im->im_tctx;
544
545 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
546 tid));
547
548 rv = iop_msg_enqueue(sc, im, 5*60*1000);
549 iop_msg_free(sc, NULL, im);
550 if (rv != 0) {
551 DPRINTF(("iop_reconfigure: scan failed\n"));
552 goto done;
553 }
554 }
555 } else if (chgind == sc->sc_chgindicator) {
556 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
557 goto done;
558 }
559
560 /* Re-read the LCT and determine if it has changed. */
561 if ((rv = iop_lct_get(sc)) != 0) {
562 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
563 goto done;
564 }
565 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
566
567 if (sc->sc_lct->changeindicator == sc->sc_chgindicator) {
568 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
569 /* Nothing to do. */
570 rv = 0;
571 goto done;
572 }
573 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
574 sc->sc_chgindicator = sc->sc_lct->changeindicator;
575
576 if (sc->sc_tidmap != NULL)
577 free(sc->sc_tidmap, M_DEVBUF);
578 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
579 M_DEVBUF, M_NOWAIT);
580 memset(sc->sc_tidmap, 0, sc->sc_nlctent * sizeof(struct iop_tidmap));
581
582 /* Match and attach child devices. */
583 iop_configure_devices(sc);
584
585 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
586 nextii = LIST_NEXT(ii, ii_list);
587 if ((ii->ii_flags & II_UTILITY) != 0)
588 continue;
589 if ((ii->ii_flags & II_CONFIGURED) == 0) {
590 ii->ii_flags |= II_CONFIGURED;
591 continue;
592 }
593
594 /* Detach devices that were configured, but are now gone. */
595 for (i = 0; i < sc->sc_nlctent; i++)
596 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
597 break;
598 if (i == sc->sc_nlctent ||
599 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
600 config_detach(ii->ii_dv, DETACH_FORCE);
601
602 /*
603 * Tell initiators that existed before the re-configuration
604 * to re-configure.
605 */
606 if (ii->ii_reconfig == NULL)
607 continue;
608 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
609 printf("%s: %s failed reconfigure (%d)\n",
610 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
611 }
612 rv = 0;
613
614 done:
615 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
616 return (rv);
617 }
618
619 /*
620 * Configure I2O devices into the system.
621 */
622 static void
623 iop_configure_devices(struct iop_softc *sc)
624 {
625 struct iop_attach_args ia;
626 struct iop_initiator *ii;
627 const struct i2o_lct_entry *le;
628 int i, nent;
629
630 nent = sc->sc_nlctent;
631 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
632 /*
633 * Ignore the device if it's in use.
634 */
635 if ((le32toh(le->usertid) & 4095) != 4095)
636 continue;
637
638 ia.ia_class = le16toh(le->classid) & 4095;
639 ia.ia_tid = le32toh(le->localtid) & 4095;
640
641 /*
642 * Try to configure the device only if it's not already
643 * configured.
644 */
645 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
646 if (ia.ia_tid == ii->ii_tid)
647 break;
648 if (ii != NULL)
649 continue;
650
651 if (config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch))
652 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
653 }
654 }
655
656 static void
657 iop_devinfo(int class, char *devinfo)
658 {
659 #ifdef I2OVERBOSE
660 int i;
661
662 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
663 if (class == iop_class[i].ic_class)
664 break;
665
666 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
667 sprintf(devinfo, "device (class 0x%x)", class);
668 else
669 strcpy(devinfo, iop_class[i].ic_caption);
670 #else
671
672 sprintf(devinfo, "device (class 0x%x)", class);
673 #endif
674 }
675
676 static int
677 iop_print(void *aux, const char *pnp)
678 {
679 struct iop_attach_args *ia;
680 char devinfo[256];
681
682 ia = aux;
683
684 if (pnp != NULL) {
685 iop_devinfo(ia->ia_class, devinfo);
686 printf("%s at %s", devinfo, pnp);
687 }
688 printf(" tid %d", ia->ia_tid);
689 return (UNCONF);
690 }
691
692 #ifdef notyet
693 static int
694 iop_vendor_print(void *aux, const char *pnp)
695 {
696
697 if (pnp != NULL)
698 printf("vendor specific extension at %s", pnp);
699 return (UNCONF);
700 }
701 #endif
702
703 static int
704 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
705 {
706 struct iop_attach_args *ia;
707
708 ia = aux;
709
710 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
711 return (0);
712
713 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
714 }
715
716 /*
717 * Shut down all configured IOPs.
718 */
719 static void
720 iop_shutdown(void *junk)
721 {
722 struct iop_softc *sc;
723 int i;
724
725 printf("shutting down iop devices... ");
726
727 for (i = 0; i < iop_cd.cd_ndevs; i++) {
728 if ((sc = device_lookup(&iop_cd, i)) == NULL)
729 continue;
730 if ((sc->sc_flags & IOP_ONLINE) == 0)
731 continue;
732 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
733 0, 5000);
734 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
735 0, 5000);
736 }
737
738 /* Wait. Some boards could still be flushing, stupidly enough. */
739 delay(5000*1000);
740 printf(" done\n");
741 }
742
743 /*
744 * Retrieve adapter status.
745 */
746 static int
747 iop_status_get(struct iop_softc *sc)
748 {
749 struct iop_msg *im;
750 struct i2o_exec_status_get *mb;
751 int rv, s;
752
753 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
754 return (rv);
755
756 mb = (struct i2o_exec_status_get *)im->im_msg;
757 mb->msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
758 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
759 mb->reserved[0] = 0;
760 mb->reserved[1] = 0;
761 mb->reserved[2] = 0;
762 mb->reserved[3] = 0;
763 mb->addrlow = kvtop((caddr_t)&sc->sc_status); /* XXX */
764 mb->addrhigh = 0;
765 mb->length = sizeof(sc->sc_status);
766
767 s = splbio();
768 memset(&sc->sc_status, 0, sizeof(sc->sc_status));
769
770 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
771 splx(s);
772 iop_msg_free(sc, NULL, im);
773 return (rv);
774 }
775
776 /* XXX */
777 POLL(2500, *((volatile u_char *)&sc->sc_status.syncbyte) == 0xff);
778
779 splx(s);
780 iop_msg_free(sc, NULL, im);
781 return (*((volatile u_char *)&sc->sc_status.syncbyte) != 0xff);
782 }
783
784 /*
785 * Initalize and populate the adapter's outbound FIFO.
786 */
787 static int
788 iop_ofifo_init(struct iop_softc *sc)
789 {
790 struct iop_msg *im;
791 volatile u_int32_t status;
792 bus_addr_t addr;
793 bus_dma_segment_t seg;
794 struct i2o_exec_outbound_init *mb;
795 int i, rseg, rv;
796
797 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
798 return (rv);
799
800 mb = (struct i2o_exec_outbound_init *)im->im_msg;
801 mb->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
802 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
803 mb->msgictx = IOP_ICTX;
804 mb->msgtctx = im->im_tctx;
805 mb->pagesize = PAGE_SIZE;
806 mb->flags = 0x80 | ((IOP_MAX_REPLY_SIZE >> 2) << 16); /* XXX */
807
808 status = 0;
809
810 /*
811 * The I2O spec says that there are two SGLs: one for the status
812 * word, and one for a list of discarded MFAs. It continues to say
813 * that if you don't want to get the list of MFAs, an IGNORE SGL is
814 * necessary; this isn't the case (and in fact appears to be a bad
815 * thing).
816 */
817 iop_msg_map(sc, im, (void *)&status, sizeof(status), 0);
818 if ((rv = iop_msg_send(sc, im, 0)) != 0) {
819 iop_msg_free(sc, NULL, im);
820 return (rv);
821 }
822 iop_msg_unmap(sc, im);
823 iop_msg_free(sc, NULL, im);
824
825 /* XXX */
826 POLL(5000, status == I2O_EXEC_OUTBOUND_INIT_COMPLETE);
827 if (status != I2O_EXEC_OUTBOUND_INIT_COMPLETE) {
828 printf("%s: outbound FIFO init failed\n", sc->sc_dv.dv_xname);
829 return (EIO);
830 }
831
832 /* If we need to allocate DMA safe memory, do it now. */
833 if (sc->sc_rep_phys == 0) {
834 sc->sc_rep_size = sc->sc_maxreplycnt * IOP_MAX_REPLY_SIZE;
835
836 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
837 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
838 if (rv != 0) {
839 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
840 rv);
841 return (rv);
842 }
843
844 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
845 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
846 if (rv != 0) {
847 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
848 return (rv);
849 }
850
851 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
852 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
853 if (rv != 0) {
854 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname, rv);
855 return (rv);
856 }
857
858 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, sc->sc_rep,
859 sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
860 if (rv != 0) {
861 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
862 return (rv);
863 }
864
865 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
866 }
867
868 /* Populate the outbound FIFO. */
869 for (i = sc->sc_maxreplycnt, addr = sc->sc_rep_phys; i != 0; i--) {
870 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
871 addr += IOP_MAX_REPLY_SIZE;
872 }
873
874 return (0);
875 }
876
877 /*
878 * Read the specified number of bytes from the IOP's hardware resource table.
879 */
880 static int
881 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
882 {
883 struct iop_msg *im;
884 int rv;
885 struct i2o_exec_hrt_get *mb;
886
887 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
888 return (rv);
889
890 mb = (struct i2o_exec_hrt_get *)im->im_msg;
891 mb->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
892 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
893 mb->msgictx = IOP_ICTX;
894 mb->msgtctx = im->im_tctx;
895
896 iop_msg_map(sc, im, hrt, size, 0);
897 rv = iop_msg_enqueue(sc, im, 5000);
898 iop_msg_unmap(sc, im);
899 iop_msg_free(sc, NULL, im);
900 return (rv);
901 }
902
903 /*
904 * Read the IOP's hardware resource table.
905 */
906 static int
907 iop_hrt_get(struct iop_softc *sc)
908 {
909 struct i2o_hrt hrthdr, *hrt;
910 int size, rv;
911
912 if ((rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr))) != 0)
913 return (rv);
914
915 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
916 le16toh(hrthdr.numentries)));
917
918 size = sizeof(struct i2o_hrt) +
919 (htole32(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
920 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
921
922 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
923 free(hrt, M_DEVBUF);
924 return (rv);
925 }
926
927 if (sc->sc_hrt != NULL)
928 free(sc->sc_hrt, M_DEVBUF);
929 sc->sc_hrt = hrt;
930 return (0);
931 }
932
933 /*
934 * Request the specified number of bytes from the IOP's logical
935 * configuration table. If a change indicator is specified, this
936 * is an verbatim notification request, so the caller is prepared
937 * to wait indefinitely.
938 */
939 static int
940 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
941 u_int32_t chgind)
942 {
943 struct iop_msg *im;
944 struct i2o_exec_lct_notify *mb;
945 int rv;
946
947 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
948 return (rv);
949
950 memset(lct, 0, size);
951 memset(im->im_msg, 0, sizeof(im->im_msg));
952
953 mb = (struct i2o_exec_lct_notify *)im->im_msg;
954 mb->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
955 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
956 mb->msgictx = IOP_ICTX;
957 mb->msgtctx = im->im_tctx;
958 mb->classid = I2O_CLASS_ANY;
959 mb->changeindicator = chgind;
960
961 DPRINTF(("iop_lct_get0: reading LCT\n"));
962
963 iop_msg_map(sc, im, lct, size, 0);
964 rv = iop_msg_enqueue(sc, im, (chgind == 0 ? 120*1000 : 0));
965 iop_msg_unmap(sc, im);
966 iop_msg_free(sc, NULL, im);
967 return (rv);
968 }
969
970 /*
971 * Read the IOP's logical configuration table. Access to our private copy
972 * of the LCT must be serialized through sc_conflock; this copy should match
973 * the current Un*x device configuration.
974 */
975 int
976 iop_lct_get(struct iop_softc *sc)
977 {
978 int esize, size, rv;
979 struct i2o_lct *lct;
980
981 esize = le32toh(sc->sc_status.expectedlctsize);
982 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
983 if (lct == NULL)
984 return (ENOMEM);
985
986 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
987 free(lct, M_DEVBUF);
988 return (rv);
989 }
990
991 size = le16toh(lct->tablesize) << 2;
992 if (esize != size) {
993 free(lct, M_DEVBUF);
994 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
995 if (lct == NULL)
996 return (ENOMEM);
997
998 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
999 free(lct, M_DEVBUF);
1000 return (rv);
1001 }
1002 }
1003
1004 /* Swap in the new LCT. */
1005 if (sc->sc_lct != NULL)
1006 free(sc->sc_lct, M_DEVBUF);
1007 sc->sc_lct = lct;
1008 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1009 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1010 sizeof(struct i2o_lct_entry);
1011 return (0);
1012 }
1013
1014 /*
1015 * Request the specified parameter group from the target.
1016 */
1017 int
1018 iop_param_op(struct iop_softc *sc, int tid, int write, int group, void *buf,
1019 int size)
1020 {
1021 struct iop_msg *im;
1022 struct i2o_util_params_op *mb;
1023 int rv, func, op;
1024 struct {
1025 struct i2o_param_op_list_header olh;
1026 struct i2o_param_op_all_template oat;
1027 } req;
1028
1029 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1030 return (rv);
1031
1032 if (write) {
1033 func = I2O_UTIL_PARAMS_SET;
1034 op = I2O_PARAMS_OP_FIELD_SET;
1035 } else {
1036 func = I2O_UTIL_PARAMS_GET;
1037 op = I2O_PARAMS_OP_FIELD_GET;
1038 }
1039
1040 mb = (struct i2o_util_params_op *)im->im_msg;
1041 mb->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1042 mb->msgfunc = I2O_MSGFUNC(tid, func);
1043 mb->msgictx = IOP_ICTX;
1044 mb->msgtctx = im->im_tctx;
1045 mb->flags = 0;
1046
1047 req.olh.count = htole16(1);
1048 req.olh.reserved = htole16(0);
1049 req.oat.operation = htole16(op);
1050 req.oat.fieldcount = htole16(0xffff);
1051 req.oat.group = htole16(group);
1052
1053 memset(buf, 0, size);
1054 iop_msg_map(sc, im, &req, sizeof(req), 1);
1055 iop_msg_map(sc, im, buf, size, write);
1056
1057 rv = iop_msg_enqueue(sc, im, 5000);
1058 iop_msg_unmap(sc, im);
1059 iop_msg_free(sc, NULL, im);
1060 return (rv);
1061 }
1062
1063 /*
1064 * Execute a simple command (no parameters).
1065 */
1066 int
1067 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1068 int async, int timo)
1069 {
1070 struct iop_msg *im;
1071 struct i2o_msg *mb;
1072 int rv, fl;
1073
1074 fl = (async != 0 ? IM_NOWAIT : 0);
1075 if ((rv = iop_msg_alloc(sc, NULL, &im, fl | IM_NOINTR)) != 0)
1076 return (rv);
1077
1078 mb = (struct i2o_msg *)im->im_msg;
1079 mb->msgflags = I2O_MSGFLAGS(i2o_msg);
1080 mb->msgfunc = I2O_MSGFUNC(tid, function);
1081 mb->msgictx = ictx;
1082 mb->msgtctx = im->im_tctx;
1083
1084 if (async)
1085 rv = iop_msg_enqueue(sc, im, timo);
1086 else
1087 rv = iop_msg_send(sc, im, timo);
1088 iop_msg_free(sc, NULL, im);
1089 return (rv);
1090 }
1091
1092 /*
1093 * Post the system table to the IOP.
1094 */
1095 static int
1096 iop_systab_set(struct iop_softc *sc)
1097 {
1098 struct i2o_exec_sys_tab_set *mb;
1099 struct iop_msg *im;
1100 u_int32_t mema[2], ioa[2];
1101 int rv;
1102
1103 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR)) != 0)
1104 return (rv);
1105
1106 mb = (struct i2o_exec_sys_tab_set *)im->im_msg;
1107 mb->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1108 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1109 mb->msgictx = IOP_ICTX;
1110 mb->msgtctx = im->im_tctx;
1111 mb->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1112 mb->segnumber = 0;
1113
1114 /* XXX This is questionable, but better than nothing... */
1115 mema[0] = le32toh(sc->sc_status.currentprivmembase);
1116 mema[1] = le32toh(sc->sc_status.currentprivmemsize);
1117 ioa[0] = le32toh(sc->sc_status.currentpriviobase);
1118 ioa[1] = le32toh(sc->sc_status.currentpriviosize);
1119
1120 iop_msg_map(sc, im, iop_systab, iop_systab_size, 1);
1121 iop_msg_map(sc, im, mema, sizeof(mema), 1);
1122 iop_msg_map(sc, im, ioa, sizeof(ioa), 1);
1123
1124 rv = iop_msg_enqueue(sc, im, 5000);
1125 iop_msg_unmap(sc, im);
1126 iop_msg_free(sc, NULL, im);
1127 return (rv);
1128 }
1129
1130 /*
1131 * Reset the adapter. Must be called with interrupts disabled.
1132 */
1133 static int
1134 iop_reset(struct iop_softc *sc)
1135 {
1136 struct iop_msg *im;
1137 volatile u_int32_t sw;
1138 u_int32_t mfa;
1139 struct i2o_exec_iop_reset *mb;
1140 int rv;
1141
1142 if ((rv = iop_msg_alloc(sc, NULL, &im, IM_NOWAIT | IM_NOICTX)) != 0)
1143 return (rv);
1144
1145 sw = 0;
1146
1147 mb = (struct i2o_exec_iop_reset *)im->im_msg;
1148 mb->msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1149 mb->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1150 mb->reserved[0] = 0;
1151 mb->reserved[1] = 0;
1152 mb->reserved[2] = 0;
1153 mb->reserved[3] = 0;
1154 mb->statuslow = kvtop((caddr_t)&sw); /* XXX */
1155 mb->statushigh = 0;
1156
1157 if ((rv = iop_msg_send(sc, im, 0)))
1158 return (rv);
1159 iop_msg_free(sc, NULL, im);
1160
1161 POLL(2500, sw != 0); /* XXX */
1162 if (sw != I2O_RESET_IN_PROGRESS) {
1163 printf("%s: reset rejected\n", sc->sc_dv.dv_xname);
1164 return (EIO);
1165 }
1166
1167 /*
1168 * IOP is now in the INIT state. Wait no more than 10 seconds for
1169 * the inbound queue to become responsive.
1170 */
1171 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1172 if (mfa == IOP_MFA_EMPTY) {
1173 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1174 return (EIO);
1175 }
1176
1177 if (sw == I2O_RESET_REJECTED)
1178 printf("%s: reset rejected?\n", sc->sc_dv.dv_xname);
1179
1180 iop_release_mfa(sc, mfa);
1181 return (0);
1182 }
1183
1184 /*
1185 * Register a new initiator.
1186 */
1187 int
1188 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1189 {
1190 static int ictx;
1191 static int stctx;
1192
1193 /* 0 is reserved for system messages. */
1194 ii->ii_ictx = ++ictx;
1195 ii->ii_stctx = ++stctx | 0x80000000;
1196
1197 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1198 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1199
1200 return (0);
1201 }
1202
1203 /*
1204 * Unregister an initiator.
1205 */
1206 void
1207 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1208 {
1209
1210 LIST_REMOVE(ii, ii_list);
1211 LIST_REMOVE(ii, ii_hash);
1212 }
1213
1214 /*
1215 * Handle a reply frame from the adapter.
1216 */
1217 static int
1218 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1219 {
1220 struct iop_msg *im;
1221 struct i2o_reply *rb;
1222 struct iop_initiator *ii;
1223 u_int off, ictx, tctx, status, size;
1224
1225 off = (int)(rmfa - sc->sc_rep_phys);
1226 rb = (struct i2o_reply *)(sc->sc_rep + off);
1227
1228 /* Perform reply queue DMA synchronisation... */
1229 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, IOP_MAX_MSG_SIZE,
1230 BUS_DMASYNC_POSTREAD);
1231 if (--sc->sc_stat.is_cur_hwqueue != 0)
1232 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1233 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1234
1235 #ifdef I2ODEBUG
1236 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1237 panic("iop_handle_reply: 64-bit reply");
1238 #endif
1239 /*
1240 * Find the initiator.
1241 */
1242 ictx = le32toh(rb->msgictx);
1243 if (ictx == IOP_ICTX)
1244 ii = NULL;
1245 else {
1246 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1247 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1248 if (ii->ii_ictx == ictx)
1249 break;
1250 if (ii == NULL) {
1251 #ifdef I2ODEBUG
1252 iop_reply_print(sc, NULL, rb);
1253 #endif
1254 printf("%s: WARNING: bad ictx returned (%x)",
1255 sc->sc_dv.dv_xname, ictx);
1256
1257 /* Return the reply frame to the IOP's outbound FIFO. */
1258 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1259 return (-1);
1260 }
1261 }
1262
1263 status = rb->reqstatus;
1264
1265 if (ii == NULL || (ii->ii_flags & II_DISCARD) == 0) {
1266 /*
1267 * This initiator tracks state using message wrappers.
1268 *
1269 * Find the originating message wrapper, and if requested
1270 * notify the initiator.
1271 */
1272 tctx = le32toh(rb->msgtctx);
1273 im = TAILQ_FIRST(IOP_TCTXHASH(tctx));
1274 for (; im != NULL; im = TAILQ_NEXT(im, im_hash))
1275 if (im->im_tctx == tctx)
1276 break;
1277 if (im == NULL || (im->im_flags & IM_ALLOCED) == 0) {
1278 #ifdef I2ODEBUG
1279 iop_reply_print(sc, NULL, rb);
1280 #endif
1281 printf("%s: WARNING: bad tctx returned (%x, %p)",
1282 sc->sc_dv.dv_xname, tctx, im);
1283
1284 /* Return the reply frame to the IOP's outbound FIFO. */
1285 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1286 return (-1);
1287 }
1288 #ifdef I2ODEBUG
1289 if ((im->im_flags & IM_REPLIED) != 0)
1290 panic("%s: dup reply", sc->sc_dv.dv_xname);
1291 #endif
1292
1293 im->im_flags |= IM_REPLIED;
1294
1295 #ifdef I2ODEBUG
1296 if (rb->reqstatus != 0)
1297 iop_reply_print(sc, im, rb);
1298 #endif
1299 /* Notify the initiator. */
1300 if ((im->im_flags & IM_WAITING) != 0) {
1301 size = (le32toh(rb->msgflags) >> 14) & ~3;
1302 if (size > IOP_MAX_REPLY_SIZE)
1303 size = IOP_MAX_REPLY_SIZE;
1304 memcpy(im->im_msg, rb, size);
1305 wakeup(im);
1306 } else if ((im->im_flags & IM_NOINTR) == 0)
1307 (*ii->ii_intr)(ii->ii_dv, im, rb);
1308 } else {
1309 /*
1310 * This initiator discards message wrappers.
1311 *
1312 * Simply pass the reply frame to the initiator.
1313 */
1314 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1315 }
1316
1317 /* Return the reply frame to the IOP's outbound FIFO. */
1318 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1319
1320 /* Run the queue. */
1321 if ((im = SIMPLEQ_FIRST(&sc->sc_queue)) != NULL)
1322 iop_msg_enqueue(sc, im, 0);
1323
1324 return (status);
1325 }
1326
1327 /*
1328 * Handle an interrupt from the adapter.
1329 */
1330 int
1331 iop_intr(void *arg)
1332 {
1333 struct iop_softc *sc;
1334 u_int32_t rmfa;
1335
1336 sc = arg;
1337
1338 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1339 return (0);
1340
1341 for (;;) {
1342 /* Double read to account for IOP bug. */
1343 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY &&
1344 (rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY)
1345 break;
1346 iop_handle_reply(sc, rmfa);
1347 }
1348
1349 return (1);
1350 }
1351
1352 /*
1353 * Handle an event signalled by the executive.
1354 */
1355 static void
1356 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1357 {
1358 struct i2o_util_event_register_reply *rb;
1359 struct iop_softc *sc;
1360 u_int event;
1361
1362 sc = (struct iop_softc *)dv;
1363 rb = reply;
1364 event = le32toh(rb->event);
1365
1366 #ifndef I2ODEBUG
1367 if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED)
1368 return;
1369 #endif
1370
1371 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1372 }
1373
1374 /*
1375 * Allocate a message wrapper.
1376 */
1377 int
1378 iop_msg_alloc(struct iop_softc *sc, struct iop_initiator *ii,
1379 struct iop_msg **imp, int flags)
1380 {
1381 struct iop_msg *im;
1382 static int tctxgen = 666;
1383 int s, rv, i, tctx;
1384
1385 #ifdef I2ODEBUG
1386 if ((flags & IM_SYSMASK) != 0)
1387 panic("iop_msg_alloc: system flags specified");
1388 #endif
1389
1390 s = splbio(); /* XXX */
1391
1392 if (ii != NULL && (ii->ii_flags & II_DISCARD) != 0) {
1393 flags |= IM_DISCARD;
1394 tctx = ii->ii_stctx;
1395 } else
1396 tctx = tctxgen++ & 0x7fffffff;
1397
1398 im = (struct iop_msg *)pool_get(iop_msgpool,
1399 (flags & IM_NOWAIT) == 0 ? PR_WAITOK : 0);
1400 if (im == NULL) {
1401 splx(s);
1402 return (ENOMEM);
1403 }
1404
1405 /* XXX */
1406 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, IOP_MAX_SGL_ENTRIES,
1407 IOP_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1408 &im->im_xfer[0].ix_map);
1409 if (rv != 0) {
1410 pool_put(iop_msgpool, im);
1411 splx(s);
1412 return (rv);
1413 }
1414
1415 if ((flags & (IM_DISCARD | IM_NOICTX)) == 0)
1416 TAILQ_INSERT_TAIL(IOP_TCTXHASH(tctx), im, im_hash);
1417
1418 splx(s);
1419
1420 im->im_tctx = tctx;
1421 im->im_flags = flags | IM_ALLOCED;
1422 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1423 im->im_xfer[i].ix_size = 0;
1424 *imp = im;
1425
1426 return (0);
1427 }
1428
1429 /*
1430 * Free a message wrapper.
1431 */
1432 void
1433 iop_msg_free(struct iop_softc *sc, struct iop_initiator *ii, struct iop_msg *im)
1434 {
1435 int s;
1436
1437 #ifdef I2ODEBUG
1438 if ((im->im_flags & IM_ALLOCED) == 0)
1439 panic("iop_msg_free: wrapper not allocated");
1440 #endif
1441
1442 /* XXX */
1443 bus_dmamap_destroy(sc->sc_dmat, im->im_xfer[0].ix_map);
1444
1445 s = splbio(); /* XXX */
1446
1447 if ((im->im_flags & (IM_DISCARD | IM_NOICTX)) == 0)
1448 TAILQ_REMOVE(IOP_TCTXHASH(im->im_tctx), im, im_hash);
1449
1450 im->im_flags = 0;
1451 pool_put(iop_msgpool, im);
1452 splx(s);
1453 }
1454
1455 /*
1456 * Map a data transfer. Write a scatter-gather list into the message frame.
1457 */
1458 int
1459 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, void *xferaddr,
1460 int xfersize, int out)
1461 {
1462 struct iop_xfer *ix;
1463 u_int32_t *mb;
1464 int rv, seg, i;
1465
1466 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++)
1467 if (ix->ix_size == 0)
1468 break;
1469 #ifdef I2ODEBUG
1470 if (i == IOP_MAX_MSG_XFERS)
1471 panic("iop_msg_map: too many xfers");
1472 #endif
1473
1474 /* Only the first DMA map is static. */
1475 if (i != 0) {
1476 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1477 IOP_MAX_SGL_ENTRIES, IOP_MAX_XFER, 0,
1478 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1479 if (rv != 0)
1480 return (rv);
1481 }
1482
1483 ix->ix_flags = (out ? IX_OUT : IX_IN);
1484 ix->ix_size = xfersize;
1485
1486 rv = bus_dmamap_load(sc->sc_dmat, ix->ix_map, xferaddr, xfersize,
1487 NULL, 0);
1488 if (rv != 0)
1489 return (rv);
1490 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1491 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1492
1493 mb = im->im_msg + (im->im_msg[0] >> 16);
1494 if (out)
1495 out = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1496 else
1497 out = I2O_SGL_SIMPLE;
1498
1499 for (seg = 0; seg < ix->ix_map->dm_nsegs; seg++) {
1500 #ifdef I2ODEBUG
1501 if ((seg << 1) + (im->im_msg[0] >> 16) >=
1502 (IOP_MAX_MSG_SIZE >> 2))
1503 panic("iop_map_xfer: message frame too large");
1504 #endif
1505 if (seg == ix->ix_map->dm_nsegs - 1)
1506 out |= I2O_SGL_END_BUFFER;
1507 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_len | out;
1508 *mb++ = (u_int32_t)ix->ix_map->dm_segs[seg].ds_addr;
1509 }
1510
1511 /*
1512 * If this is the first xfer we've mapped for this message, adjust
1513 * the SGL offset field in the message header.
1514 */
1515 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1516 im->im_msg[0] += ((im->im_msg[0] >> 16) + seg * 2) << 4;
1517 im->im_flags |= IM_SGLOFFADJ;
1518 }
1519 im->im_msg[0] += (seg << 17);
1520 return (0);
1521 }
1522
1523 /*
1524 * Unmap all data transfers associated with a message wrapper.
1525 */
1526 void
1527 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
1528 {
1529 struct iop_xfer *ix;
1530 int i;
1531
1532 for (i = 0, ix = im->im_xfer; i < IOP_MAX_MSG_XFERS; i++, ix++) {
1533 if (ix->ix_size == 0)
1534 break;
1535 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
1536 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
1537 BUS_DMASYNC_POSTREAD);
1538 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1539
1540 /* Only the first DMA map is static. */
1541 if (i != 0)
1542 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1543
1544 ix->ix_size = 0;
1545 }
1546 }
1547
1548 /*
1549 * Send a message to the IOP. Optionally, poll on completion. Return
1550 * non-zero if failure status is returned and IM_NOINTR is set.
1551 */
1552 int
1553 iop_msg_send(struct iop_softc *sc, struct iop_msg *im, int timo)
1554 {
1555 u_int32_t mfa, rmfa;
1556 int rv, status, i, s;
1557
1558 #ifdef I2ODEBUG
1559 if ((im->im_flags & IM_NOICTX) == 0)
1560 if (im->im_msg[3] == IOP_ICTX &&
1561 (im->im_flags & IM_NOINTR) == 0)
1562 panic("iop_msg_send: IOP_ICTX and !IM_NOINTR");
1563 if ((im->im_flags & IM_DISCARD) != 0)
1564 panic("iop_msg_send: IM_DISCARD");
1565 #endif
1566
1567 s = splbio(); /* XXX */
1568
1569 /* Wait up to 250ms for an MFA. */
1570 POLL(250, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1571 if (mfa == IOP_MFA_EMPTY) {
1572 DPRINTF(("%s: mfa not forthcoming\n", sc->sc_dv.dv_xname));
1573 splx(s);
1574 return (EBUSY);
1575 }
1576
1577 /* Perform reply queue DMA synchronisation and update counters. */
1578 if ((im->im_flags & IM_NOICTX) == 0) {
1579 if (sc->sc_stat.is_cur_hwqueue == 0)
1580 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1581 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1582 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1583 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1584 sc->sc_stat.is_requests++;
1585 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1586 sc->sc_stat.is_peak_hwqueue =
1587 sc->sc_stat.is_cur_hwqueue;
1588 }
1589
1590 /* Terminate scatter/gather lists. */
1591 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1592 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1593
1594 /* Post the message frame. */
1595 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1596 im->im_msg, im->im_msg[0] >> 16);
1597 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa,
1598 (im->im_msg[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
1599
1600 /* Post the MFA back to the IOP, thus starting the command. */
1601 iop_outl(sc, IOP_REG_IFIFO, mfa);
1602
1603 if (timo == 0) {
1604 splx(s);
1605 return (0);
1606 }
1607
1608 /* Wait for completion. */
1609 for (timo *= 10; timo != 0; timo--) {
1610 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
1611 /* Double read to account for IOP bug. */
1612 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1613 if (rmfa == IOP_MFA_EMPTY)
1614 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1615 if (rmfa != IOP_MFA_EMPTY)
1616 status = iop_handle_reply(sc, rmfa);
1617 }
1618 if ((im->im_flags & IM_REPLIED) != 0)
1619 break;
1620 DELAY(100);
1621 }
1622
1623 splx(s);
1624
1625 if (timo == 0) {
1626 #ifdef I2ODEBUG
1627 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
1628 if (iop_status_get(sc) != 0)
1629 printf("iop_msg_send: unable to retrieve status\n");
1630 else
1631 printf("iop_msg_send: IOP state = %d\n",
1632 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1633 #endif
1634 rv = EBUSY;
1635 } else if ((im->im_flags & IM_NOINTR) != 0)
1636 rv = (status != I2O_STATUS_SUCCESS ? EIO : 0);
1637
1638 return (rv);
1639 }
1640
1641 /*
1642 * Try to post a message to the adapter; if that's not possible, enqueue it
1643 * with us. If a timeout is specified, wait for the message to complete.
1644 */
1645 int
1646 iop_msg_enqueue(struct iop_softc *sc, struct iop_msg *im, int timo)
1647 {
1648 u_int mfa;
1649 int s, fromqueue, i, rv;
1650
1651 #ifdef I2ODEBUG
1652 if (im == NULL)
1653 panic("iop_msg_enqueue: im == NULL");
1654 if (sc == NULL)
1655 panic("iop_msg_enqueue: sc == NULL");
1656 if ((im->im_flags & IM_NOICTX) != 0)
1657 panic("iop_msg_enqueue: IM_NOICTX");
1658 if (im->im_msg[3] == IOP_ICTX && (im->im_flags & IM_NOINTR) == 0)
1659 panic("iop_msg_enqueue: IOP_ICTX and no IM_NOINTR");
1660 if ((im->im_flags & IM_DISCARD) != 0 && timo != 0)
1661 panic("iop_msg_enqueue: IM_DISCARD && timo != 0");
1662 if ((im->im_flags & IM_NOINTR) == 0 && timo != 0)
1663 panic("iop_msg_enqueue: !IM_NOINTR && timo != 0");
1664 #endif
1665
1666 s = splbio(); /* XXX */
1667 fromqueue = (im == SIMPLEQ_FIRST(&sc->sc_queue));
1668
1669 if (sc->sc_stat.is_cur_hwqueue >= sc->sc_maxqueuecnt) {
1670 /*
1671 * While the IOP may be able to accept more inbound message
1672 * frames than it advertises, don't push harder than it
1673 * wants to go lest we starve it.
1674 *
1675 * XXX We should be handling IOP resource shortages.
1676 */
1677 mfa = IOP_MFA_EMPTY;
1678 DPRINTF(("iop_msg_enqueue: exceeded max queue count\n"));
1679 } else {
1680 /* Double read to account for IOP bug. */
1681 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
1682 mfa = iop_inl(sc, IOP_REG_IFIFO);
1683 }
1684
1685 if (mfa == IOP_MFA_EMPTY) {
1686 DPRINTF(("iop_msg_enqueue: no mfa\n"));
1687 /* Can't transfer to h/w queue - queue with us. */
1688 if (!fromqueue) {
1689 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, im, im_queue);
1690 if (++sc->sc_stat.is_cur_swqueue >
1691 sc->sc_stat.is_peak_swqueue)
1692 sc->sc_stat.is_peak_swqueue =
1693 sc->sc_stat.is_cur_swqueue;
1694 }
1695 splx(s);
1696 if ((im->im_flags & IM_NOINTR) != 0)
1697 rv = iop_msg_wait(sc, im, timo);
1698 else
1699 rv = 0;
1700 return (rv);
1701 } else if (fromqueue) {
1702 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, im, im_queue);
1703 sc->sc_stat.is_cur_swqueue--;
1704 }
1705
1706 if ((im->im_flags & IM_NOINTR) != 0)
1707 im->im_flags |= IM_WAITING;
1708
1709 /* Perform reply queue DMA synchronisation and update counters. */
1710 if (sc->sc_stat.is_cur_hwqueue == 0)
1711 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1712 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1713
1714 for (i = 0; i < IOP_MAX_MSG_XFERS; i++)
1715 sc->sc_stat.is_bytes += im->im_xfer[i].ix_size;
1716 sc->sc_stat.is_requests++;
1717 if (++sc->sc_stat.is_cur_hwqueue > sc->sc_stat.is_peak_hwqueue)
1718 sc->sc_stat.is_peak_hwqueue = sc->sc_stat.is_cur_hwqueue;
1719
1720 /* Terminate the scatter/gather list. */
1721 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1722 im->im_msg[(im->im_msg[0] >> 16) - 2] |= I2O_SGL_END;
1723
1724 /* Post the message frame. */
1725 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa,
1726 im->im_msg, im->im_msg[0] >> 16);
1727 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa,
1728 (im->im_msg[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
1729
1730 /* Post the MFA back to the IOP, thus starting the command. */
1731 iop_outl(sc, IOP_REG_IFIFO, mfa);
1732
1733 /* If this is a discardable message wrapper, free it. */
1734 if ((im->im_flags & IM_DISCARD) != 0)
1735 iop_msg_free(sc, NULL, im);
1736 splx(s);
1737
1738 if ((im->im_flags & IM_NOINTR) != 0)
1739 rv = iop_msg_wait(sc, im, timo);
1740 else
1741 rv = 0;
1742 return (rv);
1743 }
1744
1745 /*
1746 * Wait for the specified message to complete.
1747 */
1748 static int
1749 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
1750 {
1751 struct i2o_reply *rb;
1752 int rv, s;
1753
1754 s = splbio();
1755 if ((im->im_flags & IM_REPLIED) != 0) {
1756 splx(s);
1757 return (0);
1758 }
1759 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
1760 splx(s);
1761 #ifdef I2ODEBUG
1762 if (rv != 0) {
1763 printf("iop_msg_wait: tsleep() == %d\n", rv);
1764 if (iop_status_get(sc) != 0)
1765 printf("iop_msg_wait: unable to retrieve status\n");
1766 else
1767 printf("iop_msg_wait: IOP state = %d\n",
1768 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1769 }
1770 #endif
1771 if ((im->im_flags & (IM_REPLIED | IM_NOSTATUS)) == IM_REPLIED) {
1772 rb = (struct i2o_reply *)im->im_msg;
1773 rv = (rb->reqstatus != I2O_STATUS_SUCCESS ? EIO : 0);
1774 }
1775 return (rv);
1776 }
1777
1778 /*
1779 * Release an unused message frame back to the IOP's inbound fifo.
1780 */
1781 static void
1782 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
1783 {
1784
1785 /* Use the frame to issue a no-op. */
1786 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
1787 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
1788 iop_outl(sc, mfa + 8, 0);
1789 iop_outl(sc, mfa + 12, 0);
1790
1791 iop_outl(sc, IOP_REG_IFIFO, mfa);
1792 }
1793
1794 #ifdef I2ODEBUG
1795 /*
1796 * Print status information from a failure reply frame.
1797 */
1798 static void
1799 iop_reply_print(struct iop_softc *sc, struct iop_msg *im,
1800 struct i2o_reply *rb)
1801 {
1802 u_int function, detail;
1803 #ifdef I2OVERBOSE
1804 const char *statusstr;
1805 #endif
1806
1807 if (im != NULL && (im->im_flags & IM_REPLIED) == 0)
1808 panic("iop_msg_print_status: %p not replied to", im);
1809
1810 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
1811 detail = le16toh(rb->detail);
1812
1813 printf("%s: reply:\n", sc->sc_dv.dv_xname);
1814
1815 #ifdef I2OVERBOSE
1816 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
1817 statusstr = iop_status[rb->reqstatus];
1818 else
1819 statusstr = "undefined error code";
1820
1821 printf("%s: function=0x%02x status=0x%02x (%s)\n",
1822 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
1823 #else
1824 printf("%s: function=0x%02x status=0x%02x\n",
1825 sc->sc_dv.dv_xname, function, rb->reqstatus);
1826 #endif
1827 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
1828 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
1829 le32toh(rb->msgtctx));
1830 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
1831 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
1832 (le32toh(rb->msgflags) >> 8) & 0xff);
1833 }
1834 #endif
1835
1836 /*
1837 * Translate an I2O ASCII field into a C string.
1838 */
1839 void
1840 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
1841 {
1842 int hc, lc, i, nit;
1843
1844 dlen--;
1845 lc = 0;
1846 hc = 0;
1847 i = 0;
1848
1849 /*
1850 * DPT use NUL as a space, whereas AMI use it as a terminator. The
1851 * spec has nothing to say about it. Since AMI fields are usually
1852 * filled with junk after the terminator, ...
1853 */
1854 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
1855
1856 while (slen-- != 0 && dlen-- != 0) {
1857 if (nit && *src == '\0')
1858 break;
1859 else if (*src <= 0x20 || *src >= 0x7f) {
1860 if (hc)
1861 dst[i++] = ' ';
1862 } else {
1863 hc = 1;
1864 dst[i++] = *src;
1865 lc = i;
1866 }
1867 src++;
1868 }
1869
1870 dst[lc] = '\0';
1871 }
1872
1873 /*
1874 * Claim or unclaim the specified TID.
1875 */
1876 int
1877 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
1878 int flags)
1879 {
1880 struct iop_msg *im;
1881 struct i2o_util_claim *mb;
1882 int rv, func;
1883
1884 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
1885
1886 if ((rv = iop_msg_alloc(sc, ii, &im, IM_NOINTR)) != 0)
1887 return (rv);
1888
1889 /* We can use the same structure, as both are identical. */
1890 mb = (struct i2o_util_claim *)im->im_msg;
1891 mb->msgflags = I2O_MSGFLAGS(i2o_util_claim);
1892 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
1893 mb->msgictx = ii->ii_ictx;
1894 mb->msgtctx = im->im_tctx;
1895 mb->flags = flags;
1896
1897 rv = iop_msg_enqueue(sc, im, 5000);
1898 iop_msg_free(sc, ii, im);
1899 return (rv);
1900 }
1901
1902 /*
1903 * Perform an abort.
1904 */
1905 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
1906 int tctxabort, int flags)
1907 {
1908 struct iop_msg *im;
1909 struct i2o_util_abort *mb;
1910 int rv;
1911
1912 if ((rv = iop_msg_alloc(sc, ii, &im, IM_NOINTR)) != 0)
1913 return (rv);
1914
1915 mb = (struct i2o_util_abort *)im->im_msg;
1916 mb->msgflags = I2O_MSGFLAGS(i2o_util_abort);
1917 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
1918 mb->msgictx = ii->ii_ictx;
1919 mb->msgtctx = im->im_tctx;
1920 mb->flags = (func << 24) | flags;
1921 mb->tctxabort = tctxabort;
1922
1923 rv = iop_msg_enqueue(sc, im, 5000);
1924 iop_msg_free(sc, ii, im);
1925 return (rv);
1926 }
1927
1928 /*
1929 * Enable or disable event types for the specified device.
1930 */
1931 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
1932 {
1933 struct iop_msg *im;
1934 struct i2o_util_event_register *mb;
1935 int rv;
1936
1937 if ((rv = iop_msg_alloc(sc, ii, &im, 0)) != 0)
1938 return (rv);
1939
1940 mb = (struct i2o_util_event_register *)im->im_msg;
1941 mb->msgflags = I2O_MSGFLAGS(i2o_util_event_register);
1942 mb->msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
1943 mb->msgictx = ii->ii_ictx;
1944 mb->msgtctx = im->im_tctx;
1945 mb->eventmask = mask;
1946
1947 return (iop_msg_enqueue(sc, im, 0));
1948 }
1949
1950 int
1951 iopopen(dev_t dev, int flag, int mode, struct proc *p)
1952 {
1953 struct iop_softc *sc;
1954 int unit, error;
1955
1956 unit = minor(dev);
1957
1958 sc = device_lookup(&iop_cd, minor(dev));
1959 if ((sc = iop_cd.cd_devs[unit]) == NULL)
1960 return (ENXIO);
1961 if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1962 return (error);
1963
1964 if ((sc->sc_flags & IOP_OPEN) != 0)
1965 return (EBUSY);
1966 if ((sc->sc_flags & IOP_ONLINE) == 0)
1967 return (EIO);
1968 sc->sc_flags |= IOP_OPEN;
1969
1970 return (0);
1971 }
1972
1973 int
1974 iopclose(dev_t dev, int flag, int mode, struct proc *p)
1975 {
1976 struct iop_softc *sc;
1977
1978 sc = device_lookup(&iop_cd, minor(dev));
1979 sc->sc_flags &= ~IOP_OPEN;
1980 return (0);
1981 }
1982
1983 int
1984 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
1985 {
1986 struct iop_softc *sc;
1987 struct iovec *iov;
1988 struct ioppt *pt;
1989 struct iop_msg *im;
1990 struct i2o_msg *mb;
1991 struct i2o_reply *rb;
1992 int rv, i;
1993
1994 if (securelevel >= 2)
1995 return (EPERM);
1996
1997 sc = device_lookup(&iop_cd, minor(dev));
1998
1999 switch (cmd) {
2000 case IOPIOCPT:
2001 pt = (struct ioppt *)data;
2002
2003 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2004 pt->pt_msglen < sizeof(struct i2o_msg) ||
2005 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2006 pt->pt_nbufs < 0 ||
2007 pt->pt_replylen < 0 ||
2008 pt->pt_timo < 1000 ||
2009 pt->pt_timo > 5*60*1000) {
2010 rv = EINVAL;
2011 break;
2012 }
2013
2014 rv = iop_msg_alloc(sc, NULL, &im, IM_NOINTR | IM_NOSTATUS);
2015 if (rv != 0)
2016 break;
2017
2018 if ((rv = copyin(pt->pt_msg, im->im_msg, pt->pt_msglen)) != 0) {
2019 iop_msg_free(sc, NULL, im);
2020 break;
2021 }
2022
2023 mb = (struct i2o_msg *)im->im_msg;
2024 mb->msgictx = IOP_ICTX;
2025 mb->msgtctx = im->im_tctx;
2026
2027 for (i = 0; i < pt->pt_nbufs; i++) {
2028 rv = iop_msg_map(sc, im, pt->pt_bufs[i].ptb_data,
2029 pt->pt_bufs[i].ptb_datalen,
2030 pt->pt_bufs[i].ptb_out != 0);
2031 if (rv != 0) {
2032 iop_msg_free(sc, NULL, im);
2033 return (rv);
2034 }
2035 }
2036
2037 if ((rv = iop_msg_enqueue(sc, im, pt->pt_timo)) == 0) {
2038 rb = (struct i2o_reply *)im->im_msg;
2039 i = (le32toh(rb->msgflags) >> 14) & ~3; /* XXX */
2040 if (i > IOP_MAX_REPLY_SIZE)
2041 i = IOP_MAX_REPLY_SIZE;
2042 if (i > pt->pt_replylen)
2043 i = pt->pt_replylen;
2044 rv = copyout(rb, pt->pt_reply, i);
2045 }
2046
2047 iop_msg_free(sc, NULL, im);
2048 break;
2049
2050 case IOPIOCGLCT:
2051 iov = (struct iovec *)data;
2052 rv = lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
2053 if (rv == 0) {
2054 i = le16toh(sc->sc_lct->tablesize) << 2;
2055 if (i > iov->iov_len)
2056 i = iov->iov_len;
2057 else
2058 iov->iov_len = i;
2059 rv = copyout(sc->sc_lct, iov->iov_base, i);
2060 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2061 }
2062 break;
2063
2064 case IOPIOCGSTATUS:
2065 iov = (struct iovec *)data;
2066 i = sizeof(struct i2o_status);
2067 if (i > iov->iov_len)
2068 i = iov->iov_len;
2069 else
2070 iov->iov_len = i;
2071 if ((rv = iop_status_get(sc)) == 0)
2072 rv = copyout(&sc->sc_status, iov->iov_base, i);
2073 break;
2074
2075 case IOPIOCRECONFIG:
2076 rv = iop_reconfigure(sc, 0);
2077 break;
2078
2079 default:
2080 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2081 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2082 #endif
2083 rv = ENOTTY;
2084 break;
2085 }
2086
2087 return (rv);
2088 }
2089