iop.c revision 1.14 1 /* $NetBSD: iop.c,v 1.14 2001/06/12 15:17:27 wiz Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/conf.h>
56 #include <sys/kthread.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <machine/bus.h>
61
62 #include <dev/i2o/i2o.h>
63 #include <dev/i2o/iopio.h>
64 #include <dev/i2o/iopreg.h>
65 #include <dev/i2o/iopvar.h>
66
67 #define POLL(ms, cond) \
68 do { \
69 int i; \
70 for (i = (ms) * 10; i; i--) { \
71 if (cond) \
72 break; \
73 DELAY(100); \
74 } \
75 } while (/* CONSTCOND */0);
76
77 #ifdef I2ODEBUG
78 #define DPRINTF(x) printf x
79 #else
80 #define DPRINTF(x)
81 #endif
82
83 #ifdef I2OVERBOSE
84 #define IFVERBOSE(x) x
85 #define COMMENT(x) NULL
86 #else
87 #define IFVERBOSE(x)
88 #define COMMENT(x)
89 #endif
90
91 #define IOP_ICTXHASH_NBUCKETS 16
92 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
93
94 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
95
96 #define IOP_TCTX_SHIFT 12
97 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
98
99 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
100 static u_long iop_ictxhash;
101 static void *iop_sdh;
102 static struct i2o_systab *iop_systab;
103 static int iop_systab_size;
104
105 extern struct cfdriver iop_cd;
106
107 #define IC_CONFIGURE 0x01
108 #define IC_PRIORITY 0x02
109
110 struct iop_class {
111 u_short ic_class;
112 u_short ic_flags;
113 #ifdef I2OVERBOSE
114 const char *ic_caption;
115 #endif
116 } static const iop_class[] = {
117 {
118 I2O_CLASS_EXECUTIVE,
119 0,
120 COMMENT("executive")
121 },
122 {
123 I2O_CLASS_DDM,
124 0,
125 COMMENT("device driver module")
126 },
127 {
128 I2O_CLASS_RANDOM_BLOCK_STORAGE,
129 IC_CONFIGURE | IC_PRIORITY,
130 IFVERBOSE("random block storage")
131 },
132 {
133 I2O_CLASS_SEQUENTIAL_STORAGE,
134 IC_CONFIGURE | IC_PRIORITY,
135 IFVERBOSE("sequential storage")
136 },
137 {
138 I2O_CLASS_LAN,
139 IC_CONFIGURE | IC_PRIORITY,
140 IFVERBOSE("LAN port")
141 },
142 {
143 I2O_CLASS_WAN,
144 IC_CONFIGURE | IC_PRIORITY,
145 IFVERBOSE("WAN port")
146 },
147 {
148 I2O_CLASS_FIBRE_CHANNEL_PORT,
149 IC_CONFIGURE,
150 IFVERBOSE("fibrechannel port")
151 },
152 {
153 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
154 0,
155 COMMENT("fibrechannel peripheral")
156 },
157 {
158 I2O_CLASS_SCSI_PERIPHERAL,
159 0,
160 COMMENT("SCSI peripheral")
161 },
162 {
163 I2O_CLASS_ATE_PORT,
164 IC_CONFIGURE,
165 IFVERBOSE("ATE port")
166 },
167 {
168 I2O_CLASS_ATE_PERIPHERAL,
169 0,
170 COMMENT("ATE peripheral")
171 },
172 {
173 I2O_CLASS_FLOPPY_CONTROLLER,
174 IC_CONFIGURE,
175 IFVERBOSE("floppy controller")
176 },
177 {
178 I2O_CLASS_FLOPPY_DEVICE,
179 0,
180 COMMENT("floppy device")
181 },
182 {
183 I2O_CLASS_BUS_ADAPTER_PORT,
184 IC_CONFIGURE,
185 IFVERBOSE("bus adapter port" )
186 },
187 };
188
189 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
190 static const char * const iop_status[] = {
191 "success",
192 "abort (dirty)",
193 "abort (no data transfer)",
194 "abort (partial transfer)",
195 "error (dirty)",
196 "error (no data transfer)",
197 "error (partial transfer)",
198 "undefined error code",
199 "process abort (dirty)",
200 "process abort (no data transfer)",
201 "process abort (partial transfer)",
202 "transaction error",
203 };
204 #endif
205
206 static inline u_int32_t iop_inl(struct iop_softc *, int);
207 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
208
209 static void iop_config_interrupts(struct device *);
210 static void iop_configure_devices(struct iop_softc *, int, int);
211 static void iop_devinfo(int, char *);
212 static int iop_print(void *, const char *);
213 static int iop_reconfigure(struct iop_softc *, u_int);
214 static void iop_shutdown(void *);
215 static int iop_submatch(struct device *, struct cfdata *, void *);
216 #ifdef notyet
217 static int iop_vendor_print(void *, const char *);
218 #endif
219
220 static void iop_adjqparam(struct iop_softc *, int);
221 static void iop_create_reconf_thread(void *);
222 static int iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int iop_hrt_get(struct iop_softc *);
224 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void iop_intr_event(struct device *, struct iop_msg *, void *);
226 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 u_int32_t);
228 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int iop_ofifo_init(struct iop_softc *);
231 static int iop_passthrough(struct iop_softc *, struct ioppt *);
232 static int iop_post(struct iop_softc *, u_int32_t *);
233 static void iop_reconf_thread(void *);
234 static void iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int iop_reset(struct iop_softc *);
236 static int iop_status_get(struct iop_softc *, int);
237 static int iop_systab_set(struct iop_softc *);
238 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
239
240 #ifdef I2ODEBUG
241 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
242 #endif
243
244 cdev_decl(iop);
245
246 static inline u_int32_t
247 iop_inl(struct iop_softc *sc, int off)
248 {
249
250 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
251 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
252 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
253 }
254
255 static inline void
256 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
257 {
258
259 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
260 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
261 BUS_SPACE_BARRIER_WRITE);
262 }
263
264 /*
265 * Initialise the IOP and our interface.
266 */
267 void
268 iop_init(struct iop_softc *sc, const char *intrstr)
269 {
270 struct iop_msg *im;
271 int rv, i;
272 u_int32_t mask;
273 char ident[64];
274
275 if (iop_ictxhashtbl == NULL)
276 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
277 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
278
279 /* Reset the IOP and request status. */
280 printf("I2O adapter");
281
282 if ((rv = iop_reset(sc)) != 0) {
283 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
284 return;
285 }
286 if ((rv = iop_status_get(sc, 1)) != 0) {
287 printf("%s: not responding (get status)\n", sc->sc_dv.dv_xname);
288 return;
289 }
290 sc->sc_flags |= IOP_HAVESTATUS;
291 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
292 ident, sizeof(ident));
293 printf(" <%s>\n", ident);
294
295 #ifdef I2ODEBUG
296 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
297 le16toh(sc->sc_status.orgid),
298 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
299 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
300 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
301 le32toh(sc->sc_status.desiredprivmemsize),
302 le32toh(sc->sc_status.currentprivmemsize),
303 le32toh(sc->sc_status.currentprivmembase));
304 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
305 le32toh(sc->sc_status.desiredpriviosize),
306 le32toh(sc->sc_status.currentpriviosize),
307 le32toh(sc->sc_status.currentpriviobase));
308 #endif
309
310 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
311 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
312 sc->sc_maxob = IOP_MAX_OUTBOUND;
313 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
314 if (sc->sc_maxib > IOP_MAX_INBOUND)
315 sc->sc_maxib = IOP_MAX_INBOUND;
316
317 /* Allocate message wrappers. */
318 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT);
319 memset(im, 0, sizeof(*im) * sc->sc_maxib);
320 sc->sc_ims = im;
321 SLIST_INIT(&sc->sc_im_freelist);
322
323 for (i = 0; i < sc->sc_maxib; i++, im++) {
324 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
325 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
326 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
327 &im->im_xfer[0].ix_map);
328 if (rv != 0) {
329 printf("%s: couldn't create dmamap (%d)",
330 sc->sc_dv.dv_xname, rv);
331 return;
332 }
333
334 im->im_tctx = i;
335 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
336 }
337
338 /* Initalise the IOP's outbound FIFO. */
339 if (iop_ofifo_init(sc) != 0) {
340 printf("%s: unable to init oubound FIFO\n", sc->sc_dv.dv_xname);
341 return;
342 }
343
344 /*
345 * Defer further configuration until (a) interrupts are working and
346 * (b) we have enough information to build the system table.
347 */
348 config_interrupts((struct device *)sc, iop_config_interrupts);
349
350 /* Configure shutdown hook before we start any device activity. */
351 if (iop_sdh == NULL)
352 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
353
354 /* Ensure interrupts are enabled at the IOP. */
355 mask = iop_inl(sc, IOP_REG_INTR_MASK);
356 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
357
358 if (intrstr != NULL)
359 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
360 intrstr);
361
362 #ifdef I2ODEBUG
363 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
364 sc->sc_dv.dv_xname, sc->sc_maxib,
365 le32toh(sc->sc_status.maxinboundmframes),
366 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
367 #endif
368
369 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
370 }
371
372 /*
373 * Perform autoconfiguration tasks.
374 */
375 static void
376 iop_config_interrupts(struct device *self)
377 {
378 struct iop_softc *sc, *iop;
379 struct i2o_systab_entry *ste;
380 int rv, i, niop;
381
382 sc = (struct iop_softc *)self;
383 LIST_INIT(&sc->sc_iilist);
384
385 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
386
387 if (iop_hrt_get(sc) != 0) {
388 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
389 return;
390 }
391
392 /*
393 * Build the system table.
394 */
395 if (iop_systab == NULL) {
396 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
397 if ((iop = device_lookup(&iop_cd, i)) == NULL)
398 continue;
399 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
400 continue;
401 if (iop_status_get(iop, 1) != 0) {
402 printf("%s: unable to retrieve status\n",
403 sc->sc_dv.dv_xname);
404 iop->sc_flags &= ~IOP_HAVESTATUS;
405 continue;
406 }
407 niop++;
408 }
409 if (niop == 0)
410 return;
411
412 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
413 sizeof(struct i2o_systab);
414 iop_systab_size = i;
415 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
416
417 memset(iop_systab, 0, i);
418 iop_systab->numentries = niop;
419 iop_systab->version = I2O_VERSION_11;
420
421 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
422 if ((iop = device_lookup(&iop_cd, i)) == NULL)
423 continue;
424 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
425 continue;
426
427 ste->orgid = iop->sc_status.orgid;
428 ste->iopid = iop->sc_dv.dv_unit + 2;
429 ste->segnumber =
430 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
431 ste->iopcaps = iop->sc_status.iopcaps;
432 ste->inboundmsgframesize =
433 iop->sc_status.inboundmframesize;
434 ste->inboundmsgportaddresslow =
435 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
436 ste++;
437 }
438 }
439
440 /*
441 * Post the system table to the IOP and bring it to the OPERATIONAL
442 * state.
443 */
444 if (iop_systab_set(sc) != 0) {
445 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
446 return;
447 }
448 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
449 30000) != 0) {
450 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
451 return;
452 }
453
454 /*
455 * Set up an event handler for this IOP.
456 */
457 sc->sc_eventii.ii_dv = self;
458 sc->sc_eventii.ii_intr = iop_intr_event;
459 sc->sc_eventii.ii_flags = II_DISCARD | II_UTILITY;
460 sc->sc_eventii.ii_tid = I2O_TID_IOP;
461 iop_initiator_register(sc, &sc->sc_eventii);
462
463 rv = iop_util_eventreg(sc, &sc->sc_eventii,
464 I2O_EVENT_EXEC_RESOURCE_LIMITS |
465 I2O_EVENT_EXEC_CONNECTION_FAIL |
466 I2O_EVENT_EXEC_ADAPTER_FAULT |
467 I2O_EVENT_EXEC_POWER_FAIL |
468 I2O_EVENT_EXEC_RESET_PENDING |
469 I2O_EVENT_EXEC_RESET_IMMINENT |
470 I2O_EVENT_EXEC_HARDWARE_FAIL |
471 I2O_EVENT_EXEC_XCT_CHANGE |
472 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
473 I2O_EVENT_GEN_DEVICE_RESET |
474 I2O_EVENT_GEN_STATE_CHANGE |
475 I2O_EVENT_GEN_GENERAL_WARNING);
476 if (rv != 0) {
477 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
478 return;
479 }
480
481 #ifdef notyet
482 /* Attempt to match and attach a product-specific extension. */
483 ia.ia_class = I2O_CLASS_ANY;
484 ia.ia_tid = I2O_TID_IOP;
485 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
486 #endif
487
488 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
489 if ((rv = iop_reconfigure(sc, 0)) == -1) {
490 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
491 return;
492 }
493 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
494
495 kthread_create(iop_create_reconf_thread, sc);
496 }
497
498 /*
499 * Create the reconfiguration thread. Called after the standard kernel
500 * threads have been created.
501 */
502 static void
503 iop_create_reconf_thread(void *cookie)
504 {
505 struct iop_softc *sc;
506 int rv;
507
508 sc = cookie;
509 sc->sc_flags |= IOP_ONLINE;
510
511 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
512 "%s", sc->sc_dv.dv_xname);
513 if (rv != 0) {
514 printf("%s: unable to create reconfiguration thread (%d)",
515 sc->sc_dv.dv_xname, rv);
516 return;
517 }
518 }
519
520 /*
521 * Reconfiguration thread; listens for LCT change notification, and
522 * initiates re-configuration if received.
523 */
524 static void
525 iop_reconf_thread(void *cookie)
526 {
527 struct iop_softc *sc;
528 struct i2o_lct lct;
529 u_int32_t chgind;
530 int rv;
531
532 sc = cookie;
533 chgind = sc->sc_chgind + 1;
534
535 for (;;) {
536 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
537 sc->sc_dv.dv_xname, chgind));
538
539 PHOLD(sc->sc_reconf_proc);
540 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
541 PRELE(sc->sc_reconf_proc);
542
543 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
544 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
545
546 if (rv == 0 &&
547 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
548 iop_reconfigure(sc, le32toh(lct.changeindicator));
549 chgind = sc->sc_chgind + 1;
550 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
551 }
552
553 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
554 }
555 }
556
557 /*
558 * Reconfigure: find new and removed devices.
559 */
560 static int
561 iop_reconfigure(struct iop_softc *sc, u_int chgind)
562 {
563 struct iop_msg *im;
564 struct i2o_hba_bus_scan mf;
565 struct i2o_lct_entry *le;
566 struct iop_initiator *ii, *nextii;
567 int rv, tid, i;
568
569 /*
570 * If the reconfiguration request isn't the result of LCT change
571 * notification, then be more thorough: ask all bus ports to scan
572 * their busses. Wait up to 5 minutes for each bus port to complete
573 * the request.
574 */
575 if (chgind == 0) {
576 if ((rv = iop_lct_get(sc)) != 0) {
577 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
578 return (rv);
579 }
580
581 le = sc->sc_lct->entry;
582 for (i = 0; i < sc->sc_nlctent; i++, le++) {
583 if ((le16toh(le->classid) & 4095) !=
584 I2O_CLASS_BUS_ADAPTER_PORT)
585 continue;
586 tid = le32toh(le->localtid) & 4095;
587
588 im = iop_msg_alloc(sc, NULL, IM_WAIT);
589
590 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
591 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
592 mf.msgictx = IOP_ICTX;
593 mf.msgtctx = im->im_tctx;
594
595 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
596 tid));
597
598 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
599 iop_msg_free(sc, im);
600 #ifdef I2ODEBUG
601 if (rv != 0)
602 printf("%s: bus scan failed\n",
603 sc->sc_dv.dv_xname);
604 #endif
605 }
606 } else if (chgind <= sc->sc_chgind) {
607 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
608 return (0);
609 }
610
611 /* Re-read the LCT and determine if it has changed. */
612 if ((rv = iop_lct_get(sc)) != 0) {
613 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
614 return (rv);
615 }
616 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
617
618 chgind = le32toh(sc->sc_lct->changeindicator);
619 if (chgind == sc->sc_chgind) {
620 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
621 return (0);
622 }
623 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
624 sc->sc_chgind = chgind;
625
626 if (sc->sc_tidmap != NULL)
627 free(sc->sc_tidmap, M_DEVBUF);
628 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
629 M_DEVBUF, M_NOWAIT);
630 memset(sc->sc_tidmap, 0, sizeof(sc->sc_tidmap));
631
632 /* Allow 1 queued command per device while we're configuring. */
633 iop_adjqparam(sc, 1);
634
635 /*
636 * Match and attach child devices. We configure high-level devices
637 * first so that any claims will propagate throughout the LCT,
638 * hopefully masking off aliased devices as a result.
639 *
640 * Re-reading the LCT at this point is a little dangerous, but we'll
641 * trust the IOP (and the operator) to behave itself...
642 */
643 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
644 IC_CONFIGURE | IC_PRIORITY);
645 if ((rv = iop_lct_get(sc)) != 0)
646 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
647 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
648 IC_CONFIGURE);
649
650 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
651 nextii = LIST_NEXT(ii, ii_list);
652
653 /* Detach devices that were configured, but are now gone. */
654 for (i = 0; i < sc->sc_nlctent; i++)
655 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
656 break;
657 if (i == sc->sc_nlctent ||
658 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
659 config_detach(ii->ii_dv, DETACH_FORCE);
660
661 /*
662 * Tell initiators that existed before the re-configuration
663 * to re-configure.
664 */
665 if (ii->ii_reconfig == NULL)
666 continue;
667 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
668 printf("%s: %s failed reconfigure (%d)\n",
669 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
670 }
671
672 /* Re-adjust queue parameters and return. */
673 if (sc->sc_nii != 0)
674 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
675 / sc->sc_nii);
676
677 return (0);
678 }
679
680 /*
681 * Configure I2O devices into the system.
682 */
683 static void
684 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
685 {
686 struct iop_attach_args ia;
687 struct iop_initiator *ii;
688 const struct i2o_lct_entry *le;
689 struct device *dv;
690 int i, j, nent;
691 u_int usertid;
692
693 nent = sc->sc_nlctent;
694 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
695 sc->sc_tidmap[i].it_tid = le32toh(le->localtid) & 4095;
696
697 /* Ignore the device if it's in use. */
698 usertid = le32toh(le->usertid) & 4095;
699 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
700 continue;
701
702 ia.ia_class = le16toh(le->classid) & 4095;
703 ia.ia_tid = sc->sc_tidmap[i].it_tid;
704
705 /* Ignore uninteresting devices. */
706 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
707 if (iop_class[j].ic_class == ia.ia_class)
708 break;
709 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
710 (iop_class[j].ic_flags & mask) != maskval)
711 continue;
712
713 /*
714 * Try to configure the device only if it's not already
715 * configured.
716 */
717 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
718 if (ia.ia_tid == ii->ii_tid) {
719 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
720 strcpy(sc->sc_tidmap[i].it_dvname,
721 ii->ii_dv->dv_xname);
722 break;
723 }
724 }
725 if (ii != NULL)
726 continue;
727
728 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
729 if (dv != NULL) {
730 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
731 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
732 }
733 }
734 }
735
736 /*
737 * Adjust queue parameters for all child devices.
738 */
739 static void
740 iop_adjqparam(struct iop_softc *sc, int mpi)
741 {
742 struct iop_initiator *ii;
743
744 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
745 if (ii->ii_adjqparam != NULL)
746 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
747 }
748
749 static void
750 iop_devinfo(int class, char *devinfo)
751 {
752 #ifdef I2OVERBOSE
753 int i;
754
755 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
756 if (class == iop_class[i].ic_class)
757 break;
758
759 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
760 sprintf(devinfo, "device (class 0x%x)", class);
761 else
762 strcpy(devinfo, iop_class[i].ic_caption);
763 #else
764
765 sprintf(devinfo, "device (class 0x%x)", class);
766 #endif
767 }
768
769 static int
770 iop_print(void *aux, const char *pnp)
771 {
772 struct iop_attach_args *ia;
773 char devinfo[256];
774
775 ia = aux;
776
777 if (pnp != NULL) {
778 iop_devinfo(ia->ia_class, devinfo);
779 printf("%s at %s", devinfo, pnp);
780 }
781 printf(" tid %d", ia->ia_tid);
782 return (UNCONF);
783 }
784
785 #ifdef notyet
786 static int
787 iop_vendor_print(void *aux, const char *pnp)
788 {
789
790 if (pnp != NULL)
791 printf("vendor specific extension at %s", pnp);
792 return (UNCONF);
793 }
794 #endif
795
796 static int
797 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
798 {
799 struct iop_attach_args *ia;
800
801 ia = aux;
802
803 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
804 return (0);
805
806 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
807 }
808
809 /*
810 * Shut down all configured IOPs.
811 */
812 static void
813 iop_shutdown(void *junk)
814 {
815 struct iop_softc *sc;
816 int i;
817
818 printf("shutting down iop devices...");
819
820 for (i = 0; i < iop_cd.cd_ndevs; i++) {
821 if ((sc = device_lookup(&iop_cd, i)) == NULL)
822 continue;
823 if ((sc->sc_flags & IOP_ONLINE) == 0)
824 continue;
825 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
826 0, 5000);
827 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
828 0, 1000);
829 }
830
831 /* Wait. Some boards could still be flushing, stupidly enough. */
832 delay(5000*1000);
833 printf(" done.\n");
834 }
835
836 /*
837 * Retrieve IOP status.
838 */
839 static int
840 iop_status_get(struct iop_softc *sc, int nosleep)
841 {
842 struct i2o_exec_status_get mf;
843 int rv, i;
844
845 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
846 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
847 mf.reserved[0] = 0;
848 mf.reserved[1] = 0;
849 mf.reserved[2] = 0;
850 mf.reserved[3] = 0;
851 mf.addrlow = kvtop((caddr_t)&sc->sc_status); /* XXX */
852 mf.addrhigh = 0;
853 mf.length = sizeof(sc->sc_status);
854
855 memset(&sc->sc_status, 0, sizeof(sc->sc_status));
856
857 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
858 return (rv);
859
860 /* XXX */
861 for (i = 25; i != 0; i--) {
862 if (*((volatile u_char *)&sc->sc_status.syncbyte) == 0xff)
863 break;
864 if (nosleep)
865 DELAY(100*1000);
866 else
867 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
868 }
869
870 if (*((volatile u_char *)&sc->sc_status.syncbyte) != 0xff)
871 rv = EIO;
872 else
873 rv = 0;
874 return (rv);
875 }
876
877 /*
878 * Initalize and populate the IOP's outbound FIFO.
879 */
880 static int
881 iop_ofifo_init(struct iop_softc *sc)
882 {
883 struct iop_msg *im;
884 volatile u_int32_t status;
885 bus_addr_t addr;
886 bus_dma_segment_t seg;
887 struct i2o_exec_outbound_init *mf;
888 int i, rseg, rv;
889 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
890
891 im = iop_msg_alloc(sc, NULL, IM_POLL);
892
893 mf = (struct i2o_exec_outbound_init *)mb;
894 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
895 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
896 mf->msgictx = IOP_ICTX;
897 mf->msgtctx = im->im_tctx;
898 mf->pagesize = PAGE_SIZE;
899 mf->flags = IOP_INIT_CODE | ((IOP_MAX_MSG_SIZE >> 2) << 16);
900
901 status = 0;
902
903 /*
904 * The I2O spec says that there are two SGLs: one for the status
905 * word, and one for a list of discarded MFAs. It continues to say
906 * that if you don't want to get the list of MFAs, an IGNORE SGL is
907 * necessary; this isn't the case (and is in fact a bad thing).
908 */
909 iop_msg_map(sc, im, mb, (void *)&status, sizeof(status), 0);
910 if ((rv = iop_msg_post(sc, im, mb, 0)) != 0) {
911 iop_msg_free(sc, im);
912 return (rv);
913 }
914 iop_msg_unmap(sc, im);
915 iop_msg_free(sc, im);
916
917 /* XXX */
918 POLL(5000, status == I2O_EXEC_OUTBOUND_INIT_COMPLETE);
919 if (status != I2O_EXEC_OUTBOUND_INIT_COMPLETE) {
920 printf("%s: outbound FIFO init failed\n", sc->sc_dv.dv_xname);
921 return (EIO);
922 }
923
924 /* Allocate DMA safe memory for the reply frames. */
925 if (sc->sc_rep_phys == 0) {
926 sc->sc_rep_size = sc->sc_maxob * IOP_MAX_MSG_SIZE;
927
928 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
929 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
930 if (rv != 0) {
931 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
932 rv);
933 return (rv);
934 }
935
936 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
937 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
938 if (rv != 0) {
939 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
940 return (rv);
941 }
942
943 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
944 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
945 if (rv != 0) {
946 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname, rv);
947 return (rv);
948 }
949
950 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, sc->sc_rep,
951 sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
952 if (rv != 0) {
953 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
954 return (rv);
955 }
956
957 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
958 }
959
960 /* Populate the outbound FIFO. */
961 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
962 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
963 addr += IOP_MAX_MSG_SIZE;
964 }
965
966 return (0);
967 }
968
969 /*
970 * Read the specified number of bytes from the IOP's hardware resource table.
971 */
972 static int
973 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
974 {
975 struct iop_msg *im;
976 int rv;
977 struct i2o_exec_hrt_get *mf;
978 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
979
980 im = iop_msg_alloc(sc, NULL, IM_WAIT);
981 mf = (struct i2o_exec_hrt_get *)mb;
982 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
983 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
984 mf->msgictx = IOP_ICTX;
985 mf->msgtctx = im->im_tctx;
986
987 iop_msg_map(sc, im, mb, hrt, size, 0);
988 rv = iop_msg_post(sc, im, mb, 30000);
989 iop_msg_unmap(sc, im);
990 iop_msg_free(sc, im);
991 return (rv);
992 }
993
994 /*
995 * Read the IOP's hardware resource table.
996 */
997 static int
998 iop_hrt_get(struct iop_softc *sc)
999 {
1000 struct i2o_hrt hrthdr, *hrt;
1001 int size, rv;
1002
1003 PHOLD(curproc);
1004 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1005 PRELE(curproc);
1006 if (rv != 0)
1007 return (rv);
1008
1009 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1010 le16toh(hrthdr.numentries)));
1011
1012 size = sizeof(struct i2o_hrt) +
1013 (htole32(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1014 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1015
1016 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1017 free(hrt, M_DEVBUF);
1018 return (rv);
1019 }
1020
1021 if (sc->sc_hrt != NULL)
1022 free(sc->sc_hrt, M_DEVBUF);
1023 sc->sc_hrt = hrt;
1024 return (0);
1025 }
1026
1027 /*
1028 * Request the specified number of bytes from the IOP's logical
1029 * configuration table. If a change indicator is specified, this
1030 * is a verbatim notification request, so the caller is prepared
1031 * to wait indefinitely.
1032 */
1033 static int
1034 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1035 u_int32_t chgind)
1036 {
1037 struct iop_msg *im;
1038 struct i2o_exec_lct_notify *mf;
1039 int rv;
1040 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1041
1042 im = iop_msg_alloc(sc, NULL, IM_WAIT);
1043 memset(lct, 0, size);
1044
1045 mf = (struct i2o_exec_lct_notify *)mb;
1046 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1047 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1048 mf->msgictx = IOP_ICTX;
1049 mf->msgtctx = im->im_tctx;
1050 mf->classid = I2O_CLASS_ANY;
1051 mf->changeindicator = chgind;
1052
1053 #ifdef I2ODEBUG
1054 printf("iop_lct_get0: reading LCT");
1055 if (chgind != 0)
1056 printf(" (async)");
1057 printf("\n");
1058 #endif
1059
1060 iop_msg_map(sc, im, mb, lct, size, 0);
1061 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1062 iop_msg_unmap(sc, im);
1063 iop_msg_free(sc, im);
1064 return (rv);
1065 }
1066
1067 /*
1068 * Read the IOP's logical configuration table.
1069 */
1070 int
1071 iop_lct_get(struct iop_softc *sc)
1072 {
1073 int esize, size, rv;
1074 struct i2o_lct *lct;
1075
1076 esize = le32toh(sc->sc_status.expectedlctsize);
1077 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1078 if (lct == NULL)
1079 return (ENOMEM);
1080
1081 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1082 free(lct, M_DEVBUF);
1083 return (rv);
1084 }
1085
1086 size = le16toh(lct->tablesize) << 2;
1087 if (esize != size) {
1088 free(lct, M_DEVBUF);
1089 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1090 if (lct == NULL)
1091 return (ENOMEM);
1092
1093 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1094 free(lct, M_DEVBUF);
1095 return (rv);
1096 }
1097 }
1098
1099 /* Swap in the new LCT. */
1100 if (sc->sc_lct != NULL)
1101 free(sc->sc_lct, M_DEVBUF);
1102 sc->sc_lct = lct;
1103 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1104 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1105 sizeof(struct i2o_lct_entry);
1106 return (0);
1107 }
1108
1109 /*
1110 * Request the specified parameter group from the target. If an initiator
1111 * is specified (a) don't wait for the operation to complete, but instead
1112 * let the initiator's interrupt handler deal with the reply and (b) place a
1113 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1114 */
1115 int
1116 iop_param_op(struct iop_softc *sc, int tid, struct iop_initiator *ii,
1117 int write, int group, void *buf, int size)
1118 {
1119 struct iop_msg *im;
1120 struct i2o_util_params_op *mf;
1121 struct i2o_reply *rf;
1122 int rv, func, op;
1123 struct iop_pgop *pgop;
1124 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1125
1126 im = iop_msg_alloc(sc, ii, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1127 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1128 iop_msg_free(sc, im);
1129 return (ENOMEM);
1130 }
1131 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1132 iop_msg_free(sc, im);
1133 free(pgop, M_DEVBUF);
1134 return (ENOMEM);
1135 }
1136 im->im_dvcontext = pgop;
1137 im->im_rb = rf;
1138
1139 if (write) {
1140 func = I2O_UTIL_PARAMS_SET;
1141 op = I2O_PARAMS_OP_FIELD_SET;
1142 } else {
1143 func = I2O_UTIL_PARAMS_GET;
1144 op = I2O_PARAMS_OP_FIELD_GET;
1145 }
1146
1147 mf = (struct i2o_util_params_op *)mb;
1148 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1149 mf->msgfunc = I2O_MSGFUNC(tid, func);
1150 mf->msgictx = IOP_ICTX;
1151 mf->msgtctx = im->im_tctx;
1152 mf->flags = 0;
1153
1154 pgop->olh.count = htole16(1);
1155 pgop->olh.reserved = htole16(0);
1156 pgop->oat.operation = htole16(op);
1157 pgop->oat.fieldcount = htole16(0xffff);
1158 pgop->oat.group = htole16(group);
1159
1160 if (ii == NULL)
1161 PHOLD(curproc);
1162
1163 memset(buf, 0, size);
1164 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1);
1165 iop_msg_map(sc, im, mb, buf, size, write);
1166 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1167
1168 if (ii == NULL)
1169 PRELE(curproc);
1170
1171 /* Detect errors; let partial transfers to count as success. */
1172 if (ii == NULL && rv == 0) {
1173 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1174 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1175 rv = 0;
1176 else
1177 rv = (rf->reqstatus != 0 ? EIO : 0);
1178 }
1179
1180 if (ii == NULL || rv != 0) {
1181 iop_msg_unmap(sc, im);
1182 iop_msg_free(sc, im);
1183 free(pgop, M_DEVBUF);
1184 free(rf, M_DEVBUF);
1185 }
1186
1187 return (rv);
1188 }
1189
1190 /*
1191 * Execute a simple command (no parameters).
1192 */
1193 int
1194 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1195 int async, int timo)
1196 {
1197 struct iop_msg *im;
1198 struct i2o_msg mf;
1199 int rv, fl;
1200
1201 fl = (async != 0 ? IM_WAIT : IM_POLL);
1202 im = iop_msg_alloc(sc, NULL, fl);
1203
1204 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1205 mf.msgfunc = I2O_MSGFUNC(tid, function);
1206 mf.msgictx = ictx;
1207 mf.msgtctx = im->im_tctx;
1208
1209 rv = iop_msg_post(sc, im, &mf, timo);
1210 iop_msg_free(sc, im);
1211 return (rv);
1212 }
1213
1214 /*
1215 * Post the system table to the IOP.
1216 */
1217 static int
1218 iop_systab_set(struct iop_softc *sc)
1219 {
1220 struct i2o_exec_sys_tab_set *mf;
1221 struct iop_msg *im;
1222 bus_space_handle_t bsh;
1223 bus_addr_t boo;
1224 u_int32_t mema[2], ioa[2];
1225 int rv;
1226 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1227
1228 im = iop_msg_alloc(sc, NULL, IM_WAIT);
1229
1230 mf = (struct i2o_exec_sys_tab_set *)mb;
1231 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1232 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1233 mf->msgictx = IOP_ICTX;
1234 mf->msgtctx = im->im_tctx;
1235 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1236 mf->segnumber = 0;
1237
1238 mema[1] = sc->sc_status.desiredprivmemsize;
1239 ioa[1] = sc->sc_status.desiredpriviosize;
1240
1241 if (mema[1] != 0) {
1242 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1243 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1244 mema[0] = htole32(boo);
1245 if (rv != 0) {
1246 printf("%s: can't alloc priv mem space, err = %d\n",
1247 sc->sc_dv.dv_xname, rv);
1248 mema[0] = 0;
1249 mema[1] = 0;
1250 }
1251 }
1252
1253 if (ioa[1] != 0) {
1254 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1255 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1256 ioa[0] = htole32(boo);
1257 if (rv != 0) {
1258 printf("%s: can't alloc priv i/o space, err = %d\n",
1259 sc->sc_dv.dv_xname, rv);
1260 ioa[0] = 0;
1261 ioa[1] = 0;
1262 }
1263 }
1264
1265 PHOLD(curproc);
1266 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1);
1267 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1);
1268 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1);
1269 rv = iop_msg_post(sc, im, mb, 5000);
1270 iop_msg_unmap(sc, im);
1271 iop_msg_free(sc, im);
1272 PRELE(curproc);
1273 return (rv);
1274 }
1275
1276 /*
1277 * Reset the IOP. Must be called with interrupts disabled.
1278 */
1279 static int
1280 iop_reset(struct iop_softc *sc)
1281 {
1282 volatile u_int32_t sw;
1283 u_int32_t mfa;
1284 struct i2o_exec_iop_reset mf;
1285 int rv;
1286
1287 sw = 0;
1288
1289 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1290 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1291 mf.reserved[0] = 0;
1292 mf.reserved[1] = 0;
1293 mf.reserved[2] = 0;
1294 mf.reserved[3] = 0;
1295 mf.statuslow = kvtop((caddr_t)&sw); /* XXX */
1296 mf.statushigh = 0;
1297
1298 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1299 return (rv);
1300
1301 POLL(2500, sw != 0); /* XXX */
1302 if (sw != I2O_RESET_IN_PROGRESS) {
1303 printf("%s: reset rejected\n", sc->sc_dv.dv_xname);
1304 return (EIO);
1305 }
1306
1307 /*
1308 * IOP is now in the INIT state. Wait no more than 10 seconds for
1309 * the inbound queue to become responsive.
1310 */
1311 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1312 if (mfa == IOP_MFA_EMPTY) {
1313 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1314 return (EIO);
1315 }
1316
1317 iop_release_mfa(sc, mfa);
1318 return (0);
1319 }
1320
1321 /*
1322 * Register a new initiator. Must be called with the configuration lock
1323 * held.
1324 */
1325 void
1326 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1327 {
1328 static int ictxgen;
1329 int s;
1330
1331 /* 0 is reserved (by us) for system messages. */
1332 ii->ii_ictx = ++ictxgen;
1333
1334 /*
1335 * `Utility initiators' don't make it onto the per-IOP initiator list
1336 * (which is used only for configuration), but do get one slot on
1337 * the inbound queue.
1338 */
1339 if ((ii->ii_flags & II_UTILITY) == 0) {
1340 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1341 sc->sc_nii++;
1342 } else
1343 sc->sc_nuii++;
1344
1345 s = splbio();
1346 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1347 splx(s);
1348 }
1349
1350 /*
1351 * Unregister an initiator. Must be called with the configuration lock
1352 * held.
1353 */
1354 void
1355 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1356 {
1357 int s;
1358
1359 if ((ii->ii_flags & II_UTILITY) == 0) {
1360 LIST_REMOVE(ii, ii_list);
1361 sc->sc_nii--;
1362 } else
1363 sc->sc_nuii--;
1364
1365 s = splbio();
1366 LIST_REMOVE(ii, ii_hash);
1367 splx(s);
1368 }
1369
1370 /*
1371 * Handle a reply frame from the IOP.
1372 */
1373 static int
1374 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1375 {
1376 struct iop_msg *im;
1377 struct i2o_reply *rb;
1378 struct i2o_fault_notify *fn;
1379 struct iop_initiator *ii;
1380 u_int off, ictx, tctx, status, size;
1381
1382 off = (int)(rmfa - sc->sc_rep_phys);
1383 rb = (struct i2o_reply *)(sc->sc_rep + off);
1384
1385 /* Perform reply queue DMA synchronisation. XXX This is rubbish. */
1386 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1387 IOP_MAX_MSG_SIZE, BUS_DMASYNC_POSTREAD);
1388 if (--sc->sc_curib != 0)
1389 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1390 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1391
1392 #ifdef I2ODEBUG
1393 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1394 panic("iop_handle_reply: 64-bit reply");
1395 #endif
1396 /*
1397 * Find the initiator.
1398 */
1399 ictx = le32toh(rb->msgictx);
1400 if (ictx == IOP_ICTX)
1401 ii = NULL;
1402 else {
1403 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1404 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1405 if (ii->ii_ictx == ictx)
1406 break;
1407 if (ii == NULL) {
1408 #ifdef I2ODEBUG
1409 iop_reply_print(sc, rb);
1410 #endif
1411 printf("%s: WARNING: bad ictx returned (%x)\n",
1412 sc->sc_dv.dv_xname, ictx);
1413 return (-1);
1414 }
1415 }
1416
1417 /*
1418 * If we received a transport failure notice, we've got to dig the
1419 * transaction context (if any) out of the original message frame,
1420 * and then release the original MFA back to the inbound FIFO.
1421 */
1422 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1423 status = I2O_STATUS_SUCCESS;
1424
1425 fn = (struct i2o_fault_notify *)rb;
1426 tctx = iop_inl(sc, fn->lowmfa + 12); /* XXX */
1427 iop_release_mfa(sc, fn->lowmfa);
1428 iop_tfn_print(sc, fn);
1429 } else {
1430 status = rb->reqstatus;
1431 tctx = le32toh(rb->msgtctx);
1432 }
1433
1434 if (ii == NULL || (ii->ii_flags & II_DISCARD) == 0) {
1435 /*
1436 * This initiator tracks state using message wrappers.
1437 *
1438 * Find the originating message wrapper, and if requested
1439 * notify the initiator.
1440 */
1441 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1442 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1443 (im->im_flags & IM_ALLOCED) == 0 ||
1444 tctx != im->im_tctx) {
1445 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1446 sc->sc_dv.dv_xname, tctx, im);
1447 if (im != NULL)
1448 printf("%s: flags=0x%08x tctx=0x%08x\n",
1449 sc->sc_dv.dv_xname, im->im_flags,
1450 im->im_tctx);
1451 #ifdef I2ODEBUG
1452 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1453 iop_reply_print(sc, rb);
1454 #endif
1455 return (-1);
1456 }
1457
1458 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1459 im->im_flags |= IM_FAIL;
1460
1461 #ifdef I2ODEBUG
1462 if ((im->im_flags & IM_REPLIED) != 0)
1463 panic("%s: dup reply", sc->sc_dv.dv_xname);
1464 #endif
1465 im->im_flags |= IM_REPLIED;
1466
1467 #ifdef I2ODEBUG
1468 if (status != I2O_STATUS_SUCCESS)
1469 iop_reply_print(sc, rb);
1470 #endif
1471 im->im_reqstatus = status;
1472
1473 /* Copy the reply frame, if requested. */
1474 if (im->im_rb != NULL) {
1475 size = (le32toh(rb->msgflags) >> 14) & ~3;
1476 #ifdef I2ODEBUG
1477 if (size > IOP_MAX_MSG_SIZE)
1478 panic("iop_handle_reply: reply too large");
1479 #endif
1480 memcpy(im->im_rb, rb, size);
1481 }
1482
1483 /* Notify the initiator. */
1484 if ((im->im_flags & IM_WAIT) != 0)
1485 wakeup(im);
1486 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1487 (*ii->ii_intr)(ii->ii_dv, im, rb);
1488 } else {
1489 /*
1490 * This initiator discards message wrappers.
1491 *
1492 * Simply pass the reply frame to the initiator.
1493 */
1494 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1495 }
1496
1497 return (status);
1498 }
1499
1500 /*
1501 * Handle an interrupt from the IOP.
1502 */
1503 int
1504 iop_intr(void *arg)
1505 {
1506 struct iop_softc *sc;
1507 u_int32_t rmfa;
1508
1509 sc = arg;
1510
1511 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1512 return (0);
1513
1514 for (;;) {
1515 /* Double read to account for IOP bug. */
1516 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1517 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1518 if (rmfa == IOP_MFA_EMPTY)
1519 break;
1520 }
1521 iop_handle_reply(sc, rmfa);
1522 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1523 }
1524
1525 return (1);
1526 }
1527
1528 /*
1529 * Handle an event signalled by the executive.
1530 */
1531 static void
1532 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1533 {
1534 struct i2o_util_event_register_reply *rb;
1535 struct iop_softc *sc;
1536 u_int event;
1537
1538 sc = (struct iop_softc *)dv;
1539 rb = reply;
1540
1541 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1542 return;
1543
1544 event = le32toh(rb->event);
1545 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1546 }
1547
1548 /*
1549 * Allocate a message wrapper.
1550 */
1551 struct iop_msg *
1552 iop_msg_alloc(struct iop_softc *sc, struct iop_initiator *ii, int flags)
1553 {
1554 struct iop_msg *im;
1555 static u_int tctxgen;
1556 int s, i;
1557
1558 #ifdef I2ODEBUG
1559 if ((flags & IM_SYSMASK) != 0)
1560 panic("iop_msg_alloc: system flags specified");
1561 #endif
1562
1563 s = splbio(); /* XXX */
1564 im = SLIST_FIRST(&sc->sc_im_freelist);
1565 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1566 if (im == NULL)
1567 panic("iop_msg_alloc: no free wrappers");
1568 #endif
1569 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1570 splx(s);
1571
1572 if (ii != NULL && (ii->ii_flags & II_DISCARD) != 0)
1573 flags |= IM_DISCARD;
1574
1575 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1576 tctxgen += (1 << IOP_TCTX_SHIFT);
1577 im->im_flags = flags | IM_ALLOCED;
1578 im->im_rb = NULL;
1579 i = 0;
1580 do {
1581 im->im_xfer[i++].ix_size = 0;
1582 } while (i < IOP_MAX_MSG_XFERS);
1583
1584 return (im);
1585 }
1586
1587 /*
1588 * Free a message wrapper.
1589 */
1590 void
1591 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1592 {
1593 int s;
1594
1595 #ifdef I2ODEBUG
1596 if ((im->im_flags & IM_ALLOCED) == 0)
1597 panic("iop_msg_free: wrapper not allocated");
1598 #endif
1599
1600 im->im_flags = 0;
1601 s = splbio();
1602 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1603 splx(s);
1604 }
1605
1606 /*
1607 * Map a data transfer. Write a scatter-gather list into the message frame.
1608 */
1609 int
1610 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1611 void *xferaddr, int xfersize, int out)
1612 {
1613 bus_dmamap_t dm;
1614 bus_dma_segment_t *ds;
1615 struct iop_xfer *ix;
1616 u_int rv, i, nsegs, flg, off, xn;
1617 u_int32_t *p;
1618
1619 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1620 if (ix->ix_size == 0)
1621 break;
1622
1623 #ifdef I2ODEBUG
1624 if (xfersize == 0)
1625 panic("iop_msg_map: null transfer");
1626 if (xfersize > IOP_MAX_XFER)
1627 panic("iop_msg_map: transfer too large");
1628 if (xn == IOP_MAX_MSG_XFERS)
1629 panic("iop_msg_map: too many xfers");
1630 #endif
1631
1632 /*
1633 * Only the first DMA map is static.
1634 */
1635 if (xn != 0) {
1636 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1637 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1638 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1639 if (rv != 0)
1640 return (rv);
1641 }
1642
1643 dm = ix->ix_map;
1644 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL, 0);
1645 if (rv != 0)
1646 goto bad;
1647
1648 /*
1649 * How many SIMPLE SG elements can we fit in this message?
1650 */
1651 off = mb[0] >> 16;
1652 p = mb + off;
1653 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1654
1655 if (dm->dm_nsegs > nsegs) {
1656 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1657 rv = EFBIG;
1658 DPRINTF(("iop_msg_map: too many segs\n"));
1659 goto bad;
1660 }
1661
1662 nsegs = dm->dm_nsegs;
1663 xfersize = 0;
1664
1665 /*
1666 * Write out the SG list.
1667 */
1668 if (out)
1669 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1670 else
1671 flg = I2O_SGL_SIMPLE;
1672
1673 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1674 p[0] = (u_int32_t)ds->ds_len | flg;
1675 p[1] = (u_int32_t)ds->ds_addr;
1676 xfersize += ds->ds_len;
1677 }
1678
1679 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1680 p[1] = (u_int32_t)ds->ds_addr;
1681 xfersize += ds->ds_len;
1682
1683 /* Fix up the transfer record, and sync the map. */
1684 ix->ix_flags = (out ? IX_OUT : IX_IN);
1685 ix->ix_size = xfersize;
1686 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1687 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1688
1689 /*
1690 * If this is the first xfer we've mapped for this message, adjust
1691 * the SGL offset field in the message header.
1692 */
1693 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1694 mb[0] += (mb[0] >> 12) & 0xf0;
1695 im->im_flags |= IM_SGLOFFADJ;
1696 }
1697 mb[0] += (nsegs << 17);
1698 return (0);
1699
1700 bad:
1701 if (xn != 0)
1702 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1703 return (rv);
1704 }
1705
1706 /*
1707 * Map a block I/O data transfer (different in that there's only one per
1708 * message maximum, and PAGE addressing may be used). Write a scatter
1709 * gather list into the message frame.
1710 */
1711 int
1712 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1713 void *xferaddr, int xfersize, int out)
1714 {
1715 bus_dma_segment_t *ds;
1716 bus_dmamap_t dm;
1717 struct iop_xfer *ix;
1718 u_int rv, i, nsegs, off, slen, tlen, flg;
1719 paddr_t saddr, eaddr;
1720 u_int32_t *p;
1721
1722 #ifdef I2ODEBUG
1723 if (xfersize == 0)
1724 panic("iop_msg_map_bio: null transfer");
1725 if (xfersize > IOP_MAX_XFER)
1726 panic("iop_msg_map_bio: transfer too large");
1727 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1728 panic("iop_msg_map_bio: SGLOFFADJ");
1729 #endif
1730
1731 ix = im->im_xfer;
1732 dm = ix->ix_map;
1733 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL, 0);
1734 if (rv != 0)
1735 return (rv);
1736
1737 off = mb[0] >> 16;
1738 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1739
1740 /*
1741 * If the transfer is highly fragmented and won't fit using SIMPLE
1742 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1743 * potentially more efficient, both for us and the IOP.
1744 */
1745 if (dm->dm_nsegs > nsegs) {
1746 nsegs = 1;
1747 p = mb + off + 1;
1748
1749 /* XXX This should be done with a bus_space flag. */
1750 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1751 slen = ds->ds_len;
1752 saddr = ds->ds_addr;
1753
1754 while (slen > 0) {
1755 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1756 tlen = min(eaddr - saddr, slen);
1757 slen -= tlen;
1758 *p++ = le32toh(saddr);
1759 saddr = eaddr;
1760 nsegs++;
1761 }
1762 }
1763
1764 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
1765 I2O_SGL_END;
1766 if (out)
1767 mb[off] |= I2O_SGL_DATA_OUT;
1768 } else {
1769 p = mb + off;
1770 nsegs = dm->dm_nsegs;
1771
1772 if (out)
1773 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1774 else
1775 flg = I2O_SGL_SIMPLE;
1776
1777 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1778 p[0] = (u_int32_t)ds->ds_len | flg;
1779 p[1] = (u_int32_t)ds->ds_addr;
1780 }
1781
1782 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
1783 I2O_SGL_END;
1784 p[1] = (u_int32_t)ds->ds_addr;
1785 nsegs <<= 1;
1786 }
1787
1788 /* Fix up the transfer record, and sync the map. */
1789 ix->ix_flags = (out ? IX_OUT : IX_IN);
1790 ix->ix_size = xfersize;
1791 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1792 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1793
1794 /*
1795 * Adjust the SGL offset and total message size fields. We don't
1796 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
1797 */
1798 mb[0] += ((off << 4) + (nsegs << 16));
1799 return (0);
1800 }
1801
1802 /*
1803 * Unmap all data transfers associated with a message wrapper.
1804 */
1805 void
1806 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
1807 {
1808 struct iop_xfer *ix;
1809 int i;
1810
1811 #ifdef I2ODEBUG
1812 if (im->im_xfer[0].ix_size == 0)
1813 panic("iop_msg_unmap: no transfers mapped");
1814 #endif
1815
1816 for (ix = im->im_xfer, i = 0;;) {
1817 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
1818 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
1819 BUS_DMASYNC_POSTREAD);
1820 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1821
1822 /* Only the first DMA map is static. */
1823 if (i != 0)
1824 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1825 if ((++ix)->ix_size == 0)
1826 break;
1827 if (++i >= IOP_MAX_MSG_XFERS)
1828 break;
1829 }
1830 }
1831
1832 /*
1833 * Post a message frame to the IOP's inbound queue.
1834 */
1835 int
1836 iop_post(struct iop_softc *sc, u_int32_t *mb)
1837 {
1838 u_int32_t mfa;
1839 int s;
1840
1841 /* ZZZ */
1842 if ((mb[0] >> 16) > IOP_MAX_MSG_SIZE / 4)
1843 panic("iop_post: frame too large");
1844
1845 s = splbio(); /* XXX */
1846
1847 /* Allocate a slot with the IOP. */
1848 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
1849 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
1850 splx(s);
1851 printf("%s: mfa not forthcoming\n",
1852 sc->sc_dv.dv_xname);
1853 return (EAGAIN);
1854 }
1855
1856 /* Perform reply buffer DMA synchronisation. XXX This is rubbish. */
1857 if (sc->sc_curib++ == 0)
1858 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1859 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1860
1861 /* Copy out the message frame. */
1862 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
1863 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
1864 BUS_SPACE_BARRIER_WRITE);
1865
1866 /* Post the MFA back to the IOP. */
1867 iop_outl(sc, IOP_REG_IFIFO, mfa);
1868
1869 splx(s);
1870 return (0);
1871 }
1872
1873 /*
1874 * Post a message to the IOP and deal with completion.
1875 */
1876 int
1877 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
1878 {
1879 u_int32_t *mb;
1880 int rv, s;
1881
1882 mb = xmb;
1883
1884 /* Terminate the scatter/gather list chain. */
1885 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1886 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
1887
1888 if ((rv = iop_post(sc, mb)) != 0)
1889 return (rv);
1890
1891 if ((im->im_flags & IM_DISCARD) != 0)
1892 iop_msg_free(sc, im);
1893 else if ((im->im_flags & IM_POLL) != 0 && timo == 0) {
1894 /* XXX For ofifo_init(). */
1895 rv = 0;
1896 } else if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
1897 if ((im->im_flags & IM_POLL) != 0)
1898 iop_msg_poll(sc, im, timo);
1899 else
1900 iop_msg_wait(sc, im, timo);
1901
1902 s = splbio();
1903 if ((im->im_flags & IM_REPLIED) != 0) {
1904 if ((im->im_flags & IM_NOSTATUS) != 0)
1905 rv = 0;
1906 else if ((im->im_flags & IM_FAIL) != 0)
1907 rv = ENXIO;
1908 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
1909 rv = EIO;
1910 else
1911 rv = 0;
1912 } else
1913 rv = EBUSY;
1914 splx(s);
1915 } else
1916 rv = 0;
1917
1918 return (rv);
1919 }
1920
1921 /*
1922 * Spin until the specified message is replied to.
1923 */
1924 static void
1925 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
1926 {
1927 u_int32_t rmfa;
1928 int s, status;
1929
1930 s = splbio(); /* XXX */
1931
1932 /* Wait for completion. */
1933 for (timo *= 10; timo != 0; timo--) {
1934 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
1935 /* Double read to account for IOP bug. */
1936 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1937 if (rmfa == IOP_MFA_EMPTY)
1938 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1939 if (rmfa != IOP_MFA_EMPTY) {
1940 status = iop_handle_reply(sc, rmfa);
1941
1942 /*
1943 * Return the reply frame to the IOP's
1944 * outbound FIFO.
1945 */
1946 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1947 }
1948 }
1949 if ((im->im_flags & IM_REPLIED) != 0)
1950 break;
1951 DELAY(100);
1952 }
1953
1954 if (timo == 0) {
1955 #ifdef I2ODEBUG
1956 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
1957 if (iop_status_get(sc, 1) != 0)
1958 printf("iop_msg_poll: unable to retrieve status\n");
1959 else
1960 printf("iop_msg_poll: IOP state = %d\n",
1961 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1962 #endif
1963 }
1964
1965 splx(s);
1966 }
1967
1968 /*
1969 * Sleep until the specified message is replied to.
1970 */
1971 static void
1972 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
1973 {
1974 int s, rv;
1975
1976 s = splbio();
1977 if ((im->im_flags & IM_REPLIED) != 0) {
1978 splx(s);
1979 return;
1980 }
1981 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
1982 splx(s);
1983
1984 #ifdef I2ODEBUG
1985 if (rv != 0) {
1986 printf("iop_msg_wait: tsleep() == %d\n", rv);
1987 if (iop_status_get(sc, 0) != 0)
1988 printf("iop_msg_wait: unable to retrieve status\n");
1989 else
1990 printf("iop_msg_wait: IOP state = %d\n",
1991 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1992 }
1993 #endif
1994 }
1995
1996 /*
1997 * Release an unused message frame back to the IOP's inbound fifo.
1998 */
1999 static void
2000 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2001 {
2002
2003 /* Use the frame to issue a no-op. */
2004 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2005 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2006 iop_outl(sc, mfa + 8, 0);
2007 iop_outl(sc, mfa + 12, 0);
2008
2009 iop_outl(sc, IOP_REG_IFIFO, mfa);
2010 }
2011
2012 #ifdef I2ODEBUG
2013 /*
2014 * Dump a reply frame header.
2015 */
2016 static void
2017 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2018 {
2019 u_int function, detail;
2020 #ifdef I2OVERBOSE
2021 const char *statusstr;
2022 #endif
2023
2024 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2025 detail = le16toh(rb->detail);
2026
2027 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2028
2029 #ifdef I2OVERBOSE
2030 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2031 statusstr = iop_status[rb->reqstatus];
2032 else
2033 statusstr = "undefined error code";
2034
2035 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2036 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2037 #else
2038 printf("%s: function=0x%02x status=0x%02x\n",
2039 sc->sc_dv.dv_xname, function, rb->reqstatus);
2040 #endif
2041 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2042 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2043 le32toh(rb->msgtctx));
2044 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2045 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2046 (le32toh(rb->msgflags) >> 8) & 0xff);
2047 }
2048 #endif
2049
2050 /*
2051 * Dump a transport failure reply.
2052 */
2053 static void
2054 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2055 {
2056
2057 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2058
2059 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2060 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2061 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2062 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2063 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2064 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2065 }
2066
2067 /*
2068 * Translate an I2O ASCII field into a C string.
2069 */
2070 void
2071 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2072 {
2073 int hc, lc, i, nit;
2074
2075 dlen--;
2076 lc = 0;
2077 hc = 0;
2078 i = 0;
2079
2080 /*
2081 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2082 * spec has nothing to say about it. Since AMI fields are usually
2083 * filled with junk after the terminator, ...
2084 */
2085 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2086
2087 while (slen-- != 0 && dlen-- != 0) {
2088 if (nit && *src == '\0')
2089 break;
2090 else if (*src <= 0x20 || *src >= 0x7f) {
2091 if (hc)
2092 dst[i++] = ' ';
2093 } else {
2094 hc = 1;
2095 dst[i++] = *src;
2096 lc = i;
2097 }
2098 src++;
2099 }
2100
2101 dst[lc] = '\0';
2102 }
2103
2104 /*
2105 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2106 */
2107 int
2108 iop_print_ident(struct iop_softc *sc, int tid)
2109 {
2110 struct {
2111 struct i2o_param_op_results pr;
2112 struct i2o_param_read_results prr;
2113 struct i2o_param_device_identity di;
2114 } __attribute__ ((__packed__)) p;
2115 char buf[32];
2116 int rv;
2117
2118 rv = iop_param_op(sc, tid, NULL, 0, I2O_PARAM_DEVICE_IDENTITY, &p,
2119 sizeof(p));
2120 if (rv != 0)
2121 return (rv);
2122
2123 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2124 sizeof(buf));
2125 printf(" <%s, ", buf);
2126 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2127 sizeof(buf));
2128 printf("%s, ", buf);
2129 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2130 printf("%s>", buf);
2131
2132 return (0);
2133 }
2134
2135 /*
2136 * Claim or unclaim the specified TID.
2137 */
2138 int
2139 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2140 int flags)
2141 {
2142 struct iop_msg *im;
2143 struct i2o_util_claim mf;
2144 int rv, func;
2145
2146 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2147 im = iop_msg_alloc(sc, ii, IM_WAIT);
2148
2149 /* We can use the same structure, as they're identical. */
2150 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2151 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2152 mf.msgictx = ii->ii_ictx;
2153 mf.msgtctx = im->im_tctx;
2154 mf.flags = flags;
2155
2156 rv = iop_msg_post(sc, im, &mf, 5000);
2157 iop_msg_free(sc, im);
2158 return (rv);
2159 }
2160
2161 /*
2162 * Perform an abort.
2163 */
2164 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2165 int tctxabort, int flags)
2166 {
2167 struct iop_msg *im;
2168 struct i2o_util_abort mf;
2169 int rv;
2170
2171 im = iop_msg_alloc(sc, ii, IM_WAIT);
2172
2173 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2174 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2175 mf.msgictx = ii->ii_ictx;
2176 mf.msgtctx = im->im_tctx;
2177 mf.flags = (func << 24) | flags;
2178 mf.tctxabort = tctxabort;
2179
2180 rv = iop_msg_post(sc, im, &mf, 5000);
2181 iop_msg_free(sc, im);
2182 return (rv);
2183 }
2184
2185 /*
2186 * Enable or disable reception of events for the specified device.
2187 */
2188 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2189 {
2190 struct iop_msg *im;
2191 struct i2o_util_event_register mf;
2192
2193 im = iop_msg_alloc(sc, ii, 0);
2194
2195 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2196 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2197 mf.msgictx = ii->ii_ictx;
2198 mf.msgtctx = im->im_tctx;
2199 mf.eventmask = mask;
2200
2201 /* This message is replied to only when events are signalled. */
2202 return (iop_msg_post(sc, im, &mf, 0));
2203 }
2204
2205 int
2206 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2207 {
2208 struct iop_softc *sc;
2209
2210 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2211 return (ENXIO);
2212 if ((sc->sc_flags & IOP_ONLINE) == 0)
2213 return (ENXIO);
2214 if ((sc->sc_flags & IOP_OPEN) != 0)
2215 return (EBUSY);
2216 sc->sc_flags |= IOP_OPEN;
2217
2218 sc->sc_ptb = malloc(IOP_MAX_XFER * IOP_MAX_MSG_XFERS, M_DEVBUF,
2219 M_WAITOK);
2220 if (sc->sc_ptb == NULL) {
2221 sc->sc_flags ^= IOP_OPEN;
2222 return (ENOMEM);
2223 }
2224
2225 return (0);
2226 }
2227
2228 int
2229 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2230 {
2231 struct iop_softc *sc;
2232
2233 sc = device_lookup(&iop_cd, minor(dev));
2234 free(sc->sc_ptb, M_DEVBUF);
2235 sc->sc_flags &= ~IOP_OPEN;
2236 return (0);
2237 }
2238
2239 int
2240 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2241 {
2242 struct iop_softc *sc;
2243 struct iovec *iov;
2244 int rv, i;
2245
2246 if (securelevel >= 2)
2247 return (EPERM);
2248
2249 sc = device_lookup(&iop_cd, minor(dev));
2250
2251 switch (cmd) {
2252 case IOPIOCPT:
2253 return (iop_passthrough(sc, (struct ioppt *)data));
2254
2255 case IOPIOCGSTATUS:
2256 iov = (struct iovec *)data;
2257 i = sizeof(struct i2o_status);
2258 if (i > iov->iov_len)
2259 i = iov->iov_len;
2260 else
2261 iov->iov_len = i;
2262 if ((rv = iop_status_get(sc, 0)) == 0)
2263 rv = copyout(&sc->sc_status, iov->iov_base, i);
2264 return (rv);
2265
2266 case IOPIOCGLCT:
2267 case IOPIOCGTIDMAP:
2268 case IOPIOCRECONFIG:
2269 break;
2270
2271 default:
2272 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2273 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2274 #endif
2275 return (ENOTTY);
2276 }
2277
2278 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2279 return (rv);
2280
2281 switch (cmd) {
2282 case IOPIOCGLCT:
2283 iov = (struct iovec *)data;
2284 i = le16toh(sc->sc_lct->tablesize) << 2;
2285 if (i > iov->iov_len)
2286 i = iov->iov_len;
2287 else
2288 iov->iov_len = i;
2289 rv = copyout(sc->sc_lct, iov->iov_base, i);
2290 break;
2291
2292 case IOPIOCRECONFIG:
2293 rv = iop_reconfigure(sc, 0);
2294 break;
2295
2296 case IOPIOCGTIDMAP:
2297 iov = (struct iovec *)data;
2298 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2299 if (i > iov->iov_len)
2300 i = iov->iov_len;
2301 else
2302 iov->iov_len = i;
2303 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2304 break;
2305 }
2306
2307 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2308 return (rv);
2309 }
2310
2311 static int
2312 iop_passthrough(struct iop_softc *sc, struct ioppt *pt)
2313 {
2314 struct iop_msg *im;
2315 struct i2o_msg *mf;
2316 struct ioppt_buf *ptb;
2317 int rv, i, mapped;
2318 void *buf;
2319
2320 mf = NULL;
2321 im = NULL;
2322 mapped = 1;
2323
2324 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2325 pt->pt_msglen > (le16toh(sc->sc_status.inboundmframesize) << 2) ||
2326 pt->pt_msglen < sizeof(struct i2o_msg) ||
2327 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2328 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2329 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2330 return (EINVAL);
2331
2332 for (i = 0; i < pt->pt_nbufs; i++)
2333 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2334 rv = ENOMEM;
2335 goto bad;
2336 }
2337
2338 mf = malloc(IOP_MAX_MSG_SIZE, M_DEVBUF, M_WAITOK);
2339 if (mf == NULL)
2340 return (ENOMEM);
2341
2342 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2343 goto bad;
2344
2345 im = iop_msg_alloc(sc, NULL, IM_WAIT | IM_NOSTATUS);
2346 im->im_rb = (struct i2o_reply *)mf;
2347 mf->msgictx = IOP_ICTX;
2348 mf->msgtctx = im->im_tctx;
2349
2350 for (i = 0; i < pt->pt_nbufs; i++) {
2351 ptb = &pt->pt_bufs[i];
2352 buf = sc->sc_ptb + i * IOP_MAX_XFER;
2353
2354 if ((u_int)ptb->ptb_datalen > IOP_MAX_XFER) {
2355 rv = EINVAL;
2356 goto bad;
2357 }
2358
2359 if (ptb->ptb_out != 0) {
2360 rv = copyin(ptb->ptb_data, buf, ptb->ptb_datalen);
2361 if (rv != 0)
2362 goto bad;
2363 }
2364
2365 rv = iop_msg_map(sc, im, (u_int32_t *)mf, buf,
2366 ptb->ptb_datalen, ptb->ptb_out != 0);
2367 if (rv != 0)
2368 goto bad;
2369 mapped = 1;
2370 }
2371
2372 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2373 goto bad;
2374
2375 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2376 if (i > IOP_MAX_MSG_SIZE)
2377 i = IOP_MAX_MSG_SIZE;
2378 if (i > pt->pt_replylen)
2379 i = pt->pt_replylen;
2380 if ((rv = copyout(im->im_rb, pt->pt_reply, i)) != 0)
2381 goto bad;
2382
2383 iop_msg_unmap(sc, im);
2384 mapped = 0;
2385
2386 for (i = 0; i < pt->pt_nbufs; i++) {
2387 ptb = &pt->pt_bufs[i];
2388 if (ptb->ptb_out != 0)
2389 continue;
2390 buf = sc->sc_ptb + i * IOP_MAX_XFER;
2391 rv = copyout(buf, ptb->ptb_data, ptb->ptb_datalen);
2392 if (rv != 0)
2393 break;
2394 }
2395
2396 bad:
2397 if (mapped != 0)
2398 iop_msg_unmap(sc, im);
2399 if (im != NULL)
2400 iop_msg_free(sc, im);
2401 if (mf != NULL)
2402 free(mf, M_DEVBUF);
2403 return (rv);
2404 }
2405