iop.c revision 1.12 1 /* $NetBSD: iop.c,v 1.12 2001/03/21 14:27:05 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/conf.h>
56 #include <sys/kthread.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <machine/bus.h>
61
62 #include <dev/i2o/i2o.h>
63 #include <dev/i2o/iopio.h>
64 #include <dev/i2o/iopreg.h>
65 #include <dev/i2o/iopvar.h>
66
67 #define POLL(ms, cond) \
68 do { \
69 int i; \
70 for (i = (ms) * 10; i; i--) { \
71 if (cond) \
72 break; \
73 DELAY(100); \
74 } \
75 } while (/* CONSTCOND */0);
76
77 #ifdef I2ODEBUG
78 #define DPRINTF(x) printf x
79 #else
80 #define DPRINTF(x)
81 #endif
82
83 #ifdef I2OVERBOSE
84 #define IFVERBOSE(x) x
85 #define COMMENT(x) NULL
86 #else
87 #define IFVERBOSE(x)
88 #define COMMENT(x)
89 #endif
90
91 #define IOP_ICTXHASH_NBUCKETS 16
92 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
93
94 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
95
96 #define IOP_TCTX_SHIFT 12
97 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
98
99 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
100 static u_long iop_ictxhash;
101 static void *iop_sdh;
102 static struct i2o_systab *iop_systab;
103 static int iop_systab_size;
104
105 extern struct cfdriver iop_cd;
106
107 #define IC_CONFIGURE 0x01
108 #define IC_PRIORITY 0x02
109
110 struct iop_class {
111 u_short ic_class;
112 u_short ic_flags;
113 #ifdef I2OVERBOSE
114 const char *ic_caption;
115 #endif
116 } static const iop_class[] = {
117 {
118 I2O_CLASS_EXECUTIVE,
119 0,
120 COMMENT("executive")
121 },
122 {
123 I2O_CLASS_DDM,
124 0,
125 COMMENT("device driver module")
126 },
127 {
128 I2O_CLASS_RANDOM_BLOCK_STORAGE,
129 IC_CONFIGURE | IC_PRIORITY,
130 IFVERBOSE("random block storage")
131 },
132 {
133 I2O_CLASS_SEQUENTIAL_STORAGE,
134 IC_CONFIGURE | IC_PRIORITY,
135 IFVERBOSE("sequential storage")
136 },
137 {
138 I2O_CLASS_LAN,
139 IC_CONFIGURE | IC_PRIORITY,
140 IFVERBOSE("LAN port")
141 },
142 {
143 I2O_CLASS_WAN,
144 IC_CONFIGURE | IC_PRIORITY,
145 IFVERBOSE("WAN port")
146 },
147 {
148 I2O_CLASS_FIBRE_CHANNEL_PORT,
149 IC_CONFIGURE,
150 IFVERBOSE("fibrechannel port")
151 },
152 {
153 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
154 0,
155 COMMENT("fibrechannel peripheral")
156 },
157 {
158 I2O_CLASS_SCSI_PERIPHERAL,
159 0,
160 COMMENT("SCSI peripheral")
161 },
162 {
163 I2O_CLASS_ATE_PORT,
164 IC_CONFIGURE,
165 IFVERBOSE("ATE port")
166 },
167 {
168 I2O_CLASS_ATE_PERIPHERAL,
169 0,
170 COMMENT("ATE peripheral")
171 },
172 {
173 I2O_CLASS_FLOPPY_CONTROLLER,
174 IC_CONFIGURE,
175 IFVERBOSE("floppy controller")
176 },
177 {
178 I2O_CLASS_FLOPPY_DEVICE,
179 0,
180 COMMENT("floppy device")
181 },
182 {
183 I2O_CLASS_BUS_ADAPTER_PORT,
184 IC_CONFIGURE,
185 IFVERBOSE("bus adapter port" )
186 },
187 };
188
189 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
190 static const char * const iop_status[] = {
191 "success",
192 "abort (dirty)",
193 "abort (no data transfer)",
194 "abort (partial transfer)",
195 "error (dirty)",
196 "error (no data transfer)",
197 "error (partial transfer)",
198 "undefined error code",
199 "process abort (dirty)",
200 "process abort (no data transfer)",
201 "process abort (partial transfer)",
202 "transaction error",
203 };
204 #endif
205
206 static inline u_int32_t iop_inl(struct iop_softc *, int);
207 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
208
209 static void iop_config_interrupts(struct device *);
210 static void iop_configure_devices(struct iop_softc *, int, int);
211 static void iop_devinfo(int, char *);
212 static int iop_print(void *, const char *);
213 static int iop_reconfigure(struct iop_softc *, u_int);
214 static void iop_shutdown(void *);
215 static int iop_submatch(struct device *, struct cfdata *, void *);
216 #ifdef notyet
217 static int iop_vendor_print(void *, const char *);
218 #endif
219
220 static void iop_adjqparam(struct iop_softc *, int);
221 static void iop_create_reconf_thread(void *);
222 static int iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int iop_hrt_get(struct iop_softc *);
224 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void iop_intr_event(struct device *, struct iop_msg *, void *);
226 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 u_int32_t);
228 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int iop_ofifo_init(struct iop_softc *);
231 static int iop_passthrough(struct iop_softc *, struct ioppt *);
232 static int iop_post(struct iop_softc *, u_int32_t *);
233 static void iop_reconf_thread(void *);
234 static void iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int iop_reset(struct iop_softc *);
236 static int iop_status_get(struct iop_softc *, int);
237 static int iop_systab_set(struct iop_softc *);
238 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
239
240 #ifdef I2ODEBUG
241 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
242 #endif
243
244 cdev_decl(iop);
245
246 static inline u_int32_t
247 iop_inl(struct iop_softc *sc, int off)
248 {
249
250 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
251 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
252 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
253 }
254
255 static inline void
256 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
257 {
258
259 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
260 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
261 BUS_SPACE_BARRIER_WRITE);
262 }
263
264 /*
265 * Initialise the IOP and our interface.
266 */
267 void
268 iop_init(struct iop_softc *sc, const char *intrstr)
269 {
270 struct iop_msg *im;
271 int rv, i;
272 u_int32_t mask;
273 char ident[64];
274
275 if (iop_ictxhashtbl == NULL)
276 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
277 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
278
279 /* Reset the IOP and request status. */
280 printf("I2O adapter");
281
282 if ((rv = iop_reset(sc)) != 0) {
283 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
284 return;
285 }
286 if ((rv = iop_status_get(sc, 1)) != 0) {
287 printf("%s: not responding (get status)\n", sc->sc_dv.dv_xname);
288 return;
289 }
290 sc->sc_flags |= IOP_HAVESTATUS;
291 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
292 ident, sizeof(ident));
293 printf(" <%s>\n", ident);
294
295 #ifdef I2ODEBUG
296 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
297 le16toh(sc->sc_status.orgid),
298 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
299 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
300 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
301 le32toh(sc->sc_status.desiredprivmemsize),
302 le32toh(sc->sc_status.currentprivmemsize),
303 le32toh(sc->sc_status.currentprivmembase));
304 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
305 le32toh(sc->sc_status.desiredpriviosize),
306 le32toh(sc->sc_status.currentpriviosize),
307 le32toh(sc->sc_status.currentpriviobase));
308 #endif
309
310 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
311 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
312 sc->sc_maxob = IOP_MAX_OUTBOUND;
313 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
314 if (sc->sc_maxib > IOP_MAX_INBOUND)
315 sc->sc_maxib = IOP_MAX_INBOUND;
316
317 /* Allocate message wrappers. */
318 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT);
319 memset(im, 0, sizeof(*im) * sc->sc_maxib);
320 sc->sc_ims = im;
321 SLIST_INIT(&sc->sc_im_freelist);
322
323 for (i = 0; i < sc->sc_maxib; i++, im++) {
324 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
325 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
326 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
327 &im->im_xfer[0].ix_map);
328 if (rv != 0) {
329 printf("%s: couldn't create dmamap (%d)",
330 sc->sc_dv.dv_xname, rv);
331 return;
332 }
333
334 im->im_tctx = i;
335 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
336 }
337
338 /* Initalise the IOP's outbound FIFO. */
339 if (iop_ofifo_init(sc) != 0) {
340 printf("%s: unable to init oubound FIFO\n", sc->sc_dv.dv_xname);
341 return;
342 }
343
344 /*
345 * Defer further configuration until (a) interrupts are working and
346 * (b) we have enough information to build the system table.
347 */
348 config_interrupts((struct device *)sc, iop_config_interrupts);
349
350 /* Configure shutdown hook before we start any device activity. */
351 if (iop_sdh == NULL)
352 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
353
354 /* Ensure interrupts are enabled at the IOP. */
355 mask = iop_inl(sc, IOP_REG_INTR_MASK);
356 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
357
358 if (intrstr != NULL)
359 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
360 intrstr);
361
362 #ifdef I2ODEBUG
363 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
364 sc->sc_dv.dv_xname, sc->sc_maxib,
365 le32toh(sc->sc_status.maxinboundmframes),
366 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
367 #endif
368
369 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
370 }
371
372 /*
373 * Perform autoconfiguration tasks.
374 */
375 static void
376 iop_config_interrupts(struct device *self)
377 {
378 struct iop_softc *sc, *iop;
379 struct i2o_systab_entry *ste;
380 int rv, i, niop;
381
382 sc = (struct iop_softc *)self;
383 LIST_INIT(&sc->sc_iilist);
384
385 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
386
387 if (iop_hrt_get(sc) != 0) {
388 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
389 return;
390 }
391
392 /*
393 * Build the system table.
394 */
395 if (iop_systab == NULL) {
396 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
397 if ((iop = device_lookup(&iop_cd, i)) == NULL)
398 continue;
399 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
400 continue;
401 if (iop_status_get(iop, 1) != 0) {
402 printf("%s: unable to retrieve status\n",
403 sc->sc_dv.dv_xname);
404 iop->sc_flags &= ~IOP_HAVESTATUS;
405 continue;
406 }
407 niop++;
408 }
409 if (niop == 0)
410 return;
411
412 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
413 sizeof(struct i2o_systab);
414 iop_systab_size = i;
415 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
416
417 memset(iop_systab, 0, i);
418 iop_systab->numentries = niop;
419 iop_systab->version = I2O_VERSION_11;
420
421 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
422 if ((iop = device_lookup(&iop_cd, i)) == NULL)
423 continue;
424 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
425 continue;
426
427 ste->orgid = iop->sc_status.orgid;
428 ste->iopid = iop->sc_dv.dv_unit + 2;
429 ste->segnumber =
430 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
431 ste->iopcaps = iop->sc_status.iopcaps;
432 ste->inboundmsgframesize =
433 iop->sc_status.inboundmframesize;
434 ste->inboundmsgportaddresslow =
435 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
436 ste++;
437 }
438 }
439
440 /*
441 * Post the system table to the IOP and bring it to the OPERATIONAL
442 * state.
443 */
444 if (iop_systab_set(sc) != 0) {
445 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
446 return;
447 }
448 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
449 30000) != 0) {
450 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
451 return;
452 }
453
454 /*
455 * Set up an event handler for this IOP.
456 */
457 sc->sc_eventii.ii_dv = self;
458 sc->sc_eventii.ii_intr = iop_intr_event;
459 sc->sc_eventii.ii_flags = II_DISCARD | II_UTILITY;
460 sc->sc_eventii.ii_tid = I2O_TID_IOP;
461 iop_initiator_register(sc, &sc->sc_eventii);
462
463 rv = iop_util_eventreg(sc, &sc->sc_eventii,
464 I2O_EVENT_EXEC_RESOURCE_LIMITS |
465 I2O_EVENT_EXEC_CONNECTION_FAIL |
466 I2O_EVENT_EXEC_ADAPTER_FAULT |
467 I2O_EVENT_EXEC_POWER_FAIL |
468 I2O_EVENT_EXEC_RESET_PENDING |
469 I2O_EVENT_EXEC_RESET_IMMINENT |
470 I2O_EVENT_EXEC_HARDWARE_FAIL |
471 I2O_EVENT_EXEC_XCT_CHANGE |
472 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
473 I2O_EVENT_GEN_DEVICE_RESET |
474 I2O_EVENT_GEN_STATE_CHANGE |
475 I2O_EVENT_GEN_GENERAL_WARNING);
476 if (rv != 0) {
477 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
478 return;
479 }
480
481 #ifdef notyet
482 /* Attempt to match and attach a product-specific extension. */
483 ia.ia_class = I2O_CLASS_ANY;
484 ia.ia_tid = I2O_TID_IOP;
485 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
486 #endif
487
488 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
489 if ((rv = iop_reconfigure(sc, 0)) == -1) {
490 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
491 return;
492 }
493 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
494
495 kthread_create(iop_create_reconf_thread, sc);
496 }
497
498 /*
499 * Create the reconfiguration thread. Called after the standard kernel
500 * threads have been created.
501 */
502 static void
503 iop_create_reconf_thread(void *cookie)
504 {
505 struct iop_softc *sc;
506 int rv;
507
508 sc = cookie;
509 sc->sc_flags |= IOP_ONLINE;
510
511 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
512 "%s", sc->sc_dv.dv_xname);
513 if (rv != 0) {
514 printf("%s: unable to create reconfiguration thread (%d)",
515 sc->sc_dv.dv_xname, rv);
516 return;
517 }
518 }
519
520 /*
521 * Reconfiguration thread; listens for LCT change notification, and
522 * initiates re-configuration if recieved.
523 */
524 static void
525 iop_reconf_thread(void *cookie)
526 {
527 struct iop_softc *sc;
528 struct i2o_lct lct;
529 u_int32_t chgind;
530 int rv;
531
532 sc = cookie;
533 chgind = sc->sc_chgind + 1;
534
535 for (;;) {
536 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
537 sc->sc_dv.dv_xname, chgind));
538
539 PHOLD(sc->sc_reconf_proc);
540 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
541 PRELE(sc->sc_reconf_proc);
542
543 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
544 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
545
546 if (rv == 0 &&
547 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
548 iop_reconfigure(sc, le32toh(lct.changeindicator));
549 chgind = sc->sc_chgind + 1;
550 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
551 }
552
553 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
554 }
555 }
556
557 /*
558 * Reconfigure: find new and removed devices.
559 */
560 static int
561 iop_reconfigure(struct iop_softc *sc, u_int chgind)
562 {
563 struct iop_msg *im;
564 struct i2o_hba_bus_scan mf;
565 struct i2o_lct_entry *le;
566 struct iop_initiator *ii, *nextii;
567 int rv, tid, i;
568
569 /*
570 * If the reconfiguration request isn't the result of LCT change
571 * notification, then be more thorough: ask all bus ports to scan
572 * their busses. Wait up to 5 minutes for each bus port to complete
573 * the request.
574 */
575 if (chgind == 0) {
576 if ((rv = iop_lct_get(sc)) != 0) {
577 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
578 return (rv);
579 }
580
581 le = sc->sc_lct->entry;
582 for (i = 0; i < sc->sc_nlctent; i++, le++) {
583 if ((le16toh(le->classid) & 4095) !=
584 I2O_CLASS_BUS_ADAPTER_PORT)
585 continue;
586 tid = le32toh(le->localtid) & 4095;
587
588 im = iop_msg_alloc(sc, NULL, IM_WAIT);
589
590 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
591 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
592 mf.msgictx = IOP_ICTX;
593 mf.msgtctx = im->im_tctx;
594
595 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
596 tid));
597
598 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
599 iop_msg_free(sc, im);
600 #ifdef I2ODEBUG
601 if (rv != 0)
602 printf("%s: bus scan failed\n",
603 sc->sc_dv.dv_xname);
604 #endif
605 }
606 } else if (chgind <= sc->sc_chgind) {
607 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
608 return (0);
609 }
610
611 /* Re-read the LCT and determine if it has changed. */
612 if ((rv = iop_lct_get(sc)) != 0) {
613 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
614 return (rv);
615 }
616 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
617
618 chgind = le32toh(sc->sc_lct->changeindicator);
619 if (chgind == sc->sc_chgind) {
620 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
621 return (0);
622 }
623 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
624 sc->sc_chgind = chgind;
625
626 if (sc->sc_tidmap != NULL)
627 free(sc->sc_tidmap, M_DEVBUF);
628 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
629 M_DEVBUF, M_NOWAIT);
630 memset(sc->sc_tidmap, 0, sizeof(sc->sc_tidmap));
631
632 /* Allow 1 queued command per device while we're configuring. */
633 iop_adjqparam(sc, 1);
634
635 /*
636 * Match and attach child devices. We configure high-level devices
637 * first so that any claims will propagate throughout the LCT,
638 * hopefully masking off aliased devices as a result.
639 *
640 * Re-reading the LCT at this point is a little dangerous, but we'll
641 * trust the IOP (and the operator) to behave itself...
642 */
643 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
644 IC_CONFIGURE | IC_PRIORITY);
645 if ((rv = iop_lct_get(sc)) != 0)
646 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
647 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
648 IC_CONFIGURE);
649
650 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
651 nextii = LIST_NEXT(ii, ii_list);
652
653 /* Detach devices that were configured, but are now gone. */
654 for (i = 0; i < sc->sc_nlctent; i++)
655 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
656 break;
657 if (i == sc->sc_nlctent ||
658 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
659 config_detach(ii->ii_dv, DETACH_FORCE);
660
661 /*
662 * Tell initiators that existed before the re-configuration
663 * to re-configure.
664 */
665 if (ii->ii_reconfig == NULL)
666 continue;
667 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
668 printf("%s: %s failed reconfigure (%d)\n",
669 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
670 }
671
672 /* Re-adjust queue parameters and return. */
673 if (sc->sc_nii != 0)
674 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
675 / sc->sc_nii);
676
677 return (0);
678 }
679
680 /*
681 * Configure I2O devices into the system.
682 */
683 static void
684 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
685 {
686 struct iop_attach_args ia;
687 struct iop_initiator *ii;
688 const struct i2o_lct_entry *le;
689 struct device *dv;
690 int i, j, nent;
691 u_int usertid;
692
693 nent = sc->sc_nlctent;
694 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
695 sc->sc_tidmap[i].it_tid = le32toh(le->localtid) & 4095;
696
697 /* Ignore the device if it's in use. */
698 usertid = le32toh(le->usertid) & 4095;
699 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
700 continue;
701
702 ia.ia_class = le16toh(le->classid) & 4095;
703 ia.ia_tid = sc->sc_tidmap[i].it_tid;
704
705 /* Ignore uninteresting devices. */
706 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
707 if (iop_class[j].ic_class == ia.ia_class)
708 break;
709 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
710 (iop_class[j].ic_flags & mask) != maskval)
711 continue;
712
713 /*
714 * Try to configure the device only if it's not already
715 * configured.
716 */
717 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
718 if (ia.ia_tid == ii->ii_tid) {
719 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
720 strcpy(sc->sc_tidmap[i].it_dvname,
721 ii->ii_dv->dv_xname);
722 break;
723 }
724 }
725 if (ii != NULL)
726 continue;
727
728 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
729 if (dv != NULL) {
730 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
731 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
732 }
733 }
734 }
735
736 /*
737 * Adjust queue parameters for all child devices.
738 */
739 static void
740 iop_adjqparam(struct iop_softc *sc, int mpi)
741 {
742 struct iop_initiator *ii;
743
744 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
745 if (ii->ii_adjqparam != NULL)
746 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
747 }
748
749 static void
750 iop_devinfo(int class, char *devinfo)
751 {
752 #ifdef I2OVERBOSE
753 int i;
754
755 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
756 if (class == iop_class[i].ic_class)
757 break;
758
759 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
760 sprintf(devinfo, "device (class 0x%x)", class);
761 else
762 strcpy(devinfo, iop_class[i].ic_caption);
763 #else
764
765 sprintf(devinfo, "device (class 0x%x)", class);
766 #endif
767 }
768
769 static int
770 iop_print(void *aux, const char *pnp)
771 {
772 struct iop_attach_args *ia;
773 char devinfo[256];
774
775 ia = aux;
776
777 if (pnp != NULL) {
778 iop_devinfo(ia->ia_class, devinfo);
779 printf("%s at %s", devinfo, pnp);
780 }
781 printf(" tid %d", ia->ia_tid);
782 return (UNCONF);
783 }
784
785 #ifdef notyet
786 static int
787 iop_vendor_print(void *aux, const char *pnp)
788 {
789
790 if (pnp != NULL)
791 printf("vendor specific extension at %s", pnp);
792 return (UNCONF);
793 }
794 #endif
795
796 static int
797 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
798 {
799 struct iop_attach_args *ia;
800
801 ia = aux;
802
803 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
804 return (0);
805
806 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
807 }
808
809 /*
810 * Shut down all configured IOPs.
811 */
812 static void
813 iop_shutdown(void *junk)
814 {
815 struct iop_softc *sc;
816 int i;
817
818 printf("shutting down iop devices...");
819
820 for (i = 0; i < iop_cd.cd_ndevs; i++) {
821 if ((sc = device_lookup(&iop_cd, i)) == NULL)
822 continue;
823 if ((sc->sc_flags & IOP_ONLINE) == 0)
824 continue;
825 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
826 0, 5000);
827 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
828 0, 1000);
829 }
830
831 /* Wait. Some boards could still be flushing, stupidly enough. */
832 delay(5000*1000);
833 printf(" done.\n");
834 }
835
836 /*
837 * Retrieve IOP status.
838 */
839 static int
840 iop_status_get(struct iop_softc *sc, int nosleep)
841 {
842 struct i2o_exec_status_get mf;
843 int rv, i;
844
845 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
846 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
847 mf.reserved[0] = 0;
848 mf.reserved[1] = 0;
849 mf.reserved[2] = 0;
850 mf.reserved[3] = 0;
851 mf.addrlow = kvtop((caddr_t)&sc->sc_status); /* XXX */
852 mf.addrhigh = 0;
853 mf.length = sizeof(sc->sc_status);
854
855 memset(&sc->sc_status, 0, sizeof(sc->sc_status));
856
857 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
858 return (rv);
859
860 /* XXX */
861 for (i = 25; i != 0; i--) {
862 if (*((volatile u_char *)&sc->sc_status.syncbyte) == 0xff)
863 break;
864 if (nosleep)
865 DELAY(100*1000);
866 else
867 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
868 }
869
870 if (*((volatile u_char *)&sc->sc_status.syncbyte) != 0xff)
871 rv = EIO;
872 else
873 rv = 0;
874 return (rv);
875 }
876
877 /*
878 * Initalize and populate the IOP's outbound FIFO.
879 */
880 static int
881 iop_ofifo_init(struct iop_softc *sc)
882 {
883 struct iop_msg *im;
884 volatile u_int32_t status;
885 bus_addr_t addr;
886 bus_dma_segment_t seg;
887 struct i2o_exec_outbound_init *mf;
888 int i, rseg, rv;
889 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
890
891 im = iop_msg_alloc(sc, NULL, IM_POLL);
892
893 mf = (struct i2o_exec_outbound_init *)mb;
894 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
895 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
896 mf->msgictx = IOP_ICTX;
897 mf->msgtctx = im->im_tctx;
898 mf->pagesize = PAGE_SIZE;
899 mf->flags = IOP_INIT_CODE | ((IOP_MAX_MSG_SIZE >> 2) << 16);
900
901 status = 0;
902
903 /*
904 * The I2O spec says that there are two SGLs: one for the status
905 * word, and one for a list of discarded MFAs. It continues to say
906 * that if you don't want to get the list of MFAs, an IGNORE SGL is
907 * necessary; this isn't the case (and is in fact a bad thing).
908 */
909 iop_msg_map(sc, im, mb, (void *)&status, sizeof(status), 0);
910 if ((rv = iop_msg_post(sc, im, mb, 0)) != 0) {
911 iop_msg_free(sc, im);
912 return (rv);
913 }
914 iop_msg_unmap(sc, im);
915 iop_msg_free(sc, im);
916
917 /* XXX */
918 POLL(5000, status == I2O_EXEC_OUTBOUND_INIT_COMPLETE);
919 if (status != I2O_EXEC_OUTBOUND_INIT_COMPLETE) {
920 printf("%s: outbound FIFO init failed\n", sc->sc_dv.dv_xname);
921 return (EIO);
922 }
923
924 /* Allocate DMA safe memory for the reply frames. */
925 if (sc->sc_rep_phys == 0) {
926 sc->sc_rep_size = sc->sc_maxob * IOP_MAX_MSG_SIZE;
927
928 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
929 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
930 if (rv != 0) {
931 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
932 rv);
933 return (rv);
934 }
935
936 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
937 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
938 if (rv != 0) {
939 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
940 return (rv);
941 }
942
943 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
944 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
945 if (rv != 0) {
946 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname, rv);
947 return (rv);
948 }
949
950 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, sc->sc_rep,
951 sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
952 if (rv != 0) {
953 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
954 return (rv);
955 }
956
957 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
958 }
959
960 /* Populate the outbound FIFO. */
961 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
962 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
963 addr += IOP_MAX_MSG_SIZE;
964 }
965
966 return (0);
967 }
968
969 /*
970 * Read the specified number of bytes from the IOP's hardware resource table.
971 */
972 static int
973 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
974 {
975 struct iop_msg *im;
976 int rv;
977 struct i2o_exec_hrt_get *mf;
978 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
979
980 im = iop_msg_alloc(sc, NULL, IM_WAIT);
981 mf = (struct i2o_exec_hrt_get *)mb;
982 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
983 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
984 mf->msgictx = IOP_ICTX;
985 mf->msgtctx = im->im_tctx;
986
987 iop_msg_map(sc, im, mb, hrt, size, 0);
988 rv = iop_msg_post(sc, im, mb, 30000);
989 iop_msg_unmap(sc, im);
990 iop_msg_free(sc, im);
991 return (rv);
992 }
993
994 /*
995 * Read the IOP's hardware resource table.
996 */
997 static int
998 iop_hrt_get(struct iop_softc *sc)
999 {
1000 struct i2o_hrt hrthdr, *hrt;
1001 int size, rv;
1002
1003 PHOLD(curproc);
1004 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1005 PRELE(curproc);
1006 if (rv != 0)
1007 return (rv);
1008
1009 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1010 le16toh(hrthdr.numentries)));
1011
1012 size = sizeof(struct i2o_hrt) +
1013 (htole32(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1014 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1015
1016 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1017 free(hrt, M_DEVBUF);
1018 return (rv);
1019 }
1020
1021 if (sc->sc_hrt != NULL)
1022 free(sc->sc_hrt, M_DEVBUF);
1023 sc->sc_hrt = hrt;
1024 return (0);
1025 }
1026
1027 /*
1028 * Request the specified number of bytes from the IOP's logical
1029 * configuration table. If a change indicator is specified, this
1030 * is a verbatim notification request, so the caller is prepared
1031 * to wait indefinitely.
1032 */
1033 static int
1034 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1035 u_int32_t chgind)
1036 {
1037 struct iop_msg *im;
1038 struct i2o_exec_lct_notify *mf;
1039 int rv;
1040 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1041
1042 im = iop_msg_alloc(sc, NULL, IM_WAIT);
1043 memset(lct, 0, size);
1044
1045 mf = (struct i2o_exec_lct_notify *)mb;
1046 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1047 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1048 mf->msgictx = IOP_ICTX;
1049 mf->msgtctx = im->im_tctx;
1050 mf->classid = I2O_CLASS_ANY;
1051 mf->changeindicator = chgind;
1052
1053 #ifdef I2ODEBUG
1054 printf("iop_lct_get0: reading LCT");
1055 if (chgind != 0)
1056 printf(" (async)");
1057 printf("\n");
1058 #endif
1059
1060 iop_msg_map(sc, im, mb, lct, size, 0);
1061 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1062 iop_msg_unmap(sc, im);
1063 iop_msg_free(sc, im);
1064 return (rv);
1065 }
1066
1067 /*
1068 * Read the IOP's logical configuration table.
1069 */
1070 int
1071 iop_lct_get(struct iop_softc *sc)
1072 {
1073 int esize, size, rv;
1074 struct i2o_lct *lct;
1075
1076 esize = le32toh(sc->sc_status.expectedlctsize);
1077 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1078 if (lct == NULL)
1079 return (ENOMEM);
1080
1081 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1082 free(lct, M_DEVBUF);
1083 return (rv);
1084 }
1085
1086 size = le16toh(lct->tablesize) << 2;
1087 if (esize != size) {
1088 free(lct, M_DEVBUF);
1089 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1090 if (lct == NULL)
1091 return (ENOMEM);
1092
1093 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1094 free(lct, M_DEVBUF);
1095 return (rv);
1096 }
1097 }
1098
1099 /* Swap in the new LCT. */
1100 if (sc->sc_lct != NULL)
1101 free(sc->sc_lct, M_DEVBUF);
1102 sc->sc_lct = lct;
1103 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1104 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1105 sizeof(struct i2o_lct_entry);
1106 return (0);
1107 }
1108
1109 /*
1110 * Request the specified parameter group from the target. If an initiator
1111 * is specified (a) don't wait for the operation to complete, but instead
1112 * let the initiator's interrupt handler deal with the reply and (b) place a
1113 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1114 */
1115 int
1116 iop_param_op(struct iop_softc *sc, int tid, struct iop_initiator *ii,
1117 int write, int group, void *buf, int size)
1118 {
1119 struct iop_msg *im;
1120 struct i2o_util_params_op *mf;
1121 struct i2o_reply *rf;
1122 int rv, func, op;
1123 struct iop_pgop *pgop;
1124 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1125
1126 im = iop_msg_alloc(sc, ii, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1127 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1128 iop_msg_free(sc, im);
1129 return (ENOMEM);
1130 }
1131 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1132 iop_msg_free(sc, im);
1133 free(pgop, M_DEVBUF);
1134 return (ENOMEM);
1135 }
1136 im->im_dvcontext = pgop;
1137 im->im_rb = rf;
1138
1139 if (write) {
1140 func = I2O_UTIL_PARAMS_SET;
1141 op = I2O_PARAMS_OP_FIELD_SET;
1142 } else {
1143 func = I2O_UTIL_PARAMS_GET;
1144 op = I2O_PARAMS_OP_FIELD_GET;
1145 }
1146
1147 mf = (struct i2o_util_params_op *)mb;
1148 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1149 mf->msgfunc = I2O_MSGFUNC(tid, func);
1150 mf->msgictx = IOP_ICTX;
1151 mf->msgtctx = im->im_tctx;
1152 mf->flags = 0;
1153
1154 pgop->olh.count = htole16(1);
1155 pgop->olh.reserved = htole16(0);
1156 pgop->oat.operation = htole16(op);
1157 pgop->oat.fieldcount = htole16(0xffff);
1158 pgop->oat.group = htole16(group);
1159
1160 if (ii == NULL)
1161 PHOLD(curproc);
1162
1163 memset(buf, 0, size);
1164 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1);
1165 iop_msg_map(sc, im, mb, buf, size, write);
1166 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1167
1168 if (ii == NULL)
1169 PRELE(curproc);
1170
1171 /* Detect errors; let partial transfers to count as success. */
1172 if (ii == NULL && rv == 0) {
1173 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1174 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1175 rv = 0;
1176 else
1177 rv = (rf->reqstatus != 0 ? EIO : 0);
1178 }
1179
1180 if (ii == NULL || rv != 0) {
1181 iop_msg_unmap(sc, im);
1182 iop_msg_free(sc, im);
1183 free(pgop, M_DEVBUF);
1184 free(rf, M_DEVBUF);
1185 }
1186
1187 return (rv);
1188 }
1189
1190 /*
1191 * Execute a simple command (no parameters).
1192 */
1193 int
1194 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1195 int async, int timo)
1196 {
1197 struct iop_msg *im;
1198 struct i2o_msg mf;
1199 int rv, fl;
1200
1201 fl = (async != 0 ? IM_WAIT : IM_POLL);
1202 im = iop_msg_alloc(sc, NULL, fl);
1203
1204 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1205 mf.msgfunc = I2O_MSGFUNC(tid, function);
1206 mf.msgictx = ictx;
1207 mf.msgtctx = im->im_tctx;
1208
1209 rv = iop_msg_post(sc, im, &mf, timo);
1210 iop_msg_free(sc, im);
1211 return (rv);
1212 }
1213
1214 /*
1215 * Post the system table to the IOP.
1216 */
1217 static int
1218 iop_systab_set(struct iop_softc *sc)
1219 {
1220 struct i2o_exec_sys_tab_set *mf;
1221 struct iop_msg *im;
1222 u_int32_t mema[2], ioa[2];
1223 int rv;
1224 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1225
1226 im = iop_msg_alloc(sc, NULL, IM_WAIT);
1227
1228 mf = (struct i2o_exec_sys_tab_set *)mb;
1229 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1230 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1231 mf->msgictx = IOP_ICTX;
1232 mf->msgtctx = im->im_tctx;
1233 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1234 mf->segnumber = 0;
1235
1236 /* XXX This is questionable, but better than nothing... */
1237 mema[0] = le32toh(sc->sc_status.currentprivmembase);
1238 mema[1] = le32toh(sc->sc_status.currentprivmemsize);
1239 ioa[0] = le32toh(sc->sc_status.currentpriviobase);
1240 ioa[1] = le32toh(sc->sc_status.currentpriviosize);
1241
1242 PHOLD(curproc);
1243 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1);
1244 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1);
1245 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1);
1246 rv = iop_msg_post(sc, im, mb, 5000);
1247 iop_msg_unmap(sc, im);
1248 iop_msg_free(sc, im);
1249 PRELE(curproc);
1250 return (rv);
1251 }
1252
1253 /*
1254 * Reset the IOP. Must be called with interrupts disabled.
1255 */
1256 static int
1257 iop_reset(struct iop_softc *sc)
1258 {
1259 volatile u_int32_t sw;
1260 u_int32_t mfa;
1261 struct i2o_exec_iop_reset mf;
1262 int rv;
1263
1264 sw = 0;
1265
1266 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1267 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1268 mf.reserved[0] = 0;
1269 mf.reserved[1] = 0;
1270 mf.reserved[2] = 0;
1271 mf.reserved[3] = 0;
1272 mf.statuslow = kvtop((caddr_t)&sw); /* XXX */
1273 mf.statushigh = 0;
1274
1275 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1276 return (rv);
1277
1278 POLL(2500, sw != 0); /* XXX */
1279 if (sw != I2O_RESET_IN_PROGRESS) {
1280 printf("%s: reset rejected\n", sc->sc_dv.dv_xname);
1281 return (EIO);
1282 }
1283
1284 /*
1285 * IOP is now in the INIT state. Wait no more than 10 seconds for
1286 * the inbound queue to become responsive.
1287 */
1288 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1289 if (mfa == IOP_MFA_EMPTY) {
1290 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1291 return (EIO);
1292 }
1293
1294 iop_release_mfa(sc, mfa);
1295 return (0);
1296 }
1297
1298 /*
1299 * Register a new initiator. Must be called with the configuration lock
1300 * held.
1301 */
1302 void
1303 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1304 {
1305 static int ictxgen;
1306 int s;
1307
1308 /* 0 is reserved (by us) for system messages. */
1309 ii->ii_ictx = ++ictxgen;
1310
1311 /*
1312 * `Utility initiators' don't make it onto the per-IOP initiator list
1313 * (which is used only for configuration), but do get one slot on
1314 * the inbound queue.
1315 */
1316 if ((ii->ii_flags & II_UTILITY) == 0) {
1317 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1318 sc->sc_nii++;
1319 } else
1320 sc->sc_nuii++;
1321
1322 s = splbio();
1323 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1324 splx(s);
1325 }
1326
1327 /*
1328 * Unregister an initiator. Must be called with the configuration lock
1329 * held.
1330 */
1331 void
1332 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1333 {
1334 int s;
1335
1336 if ((ii->ii_flags & II_UTILITY) == 0) {
1337 LIST_REMOVE(ii, ii_list);
1338 sc->sc_nii--;
1339 } else
1340 sc->sc_nuii--;
1341
1342 s = splbio();
1343 LIST_REMOVE(ii, ii_hash);
1344 splx(s);
1345 }
1346
1347 /*
1348 * Handle a reply frame from the IOP.
1349 */
1350 static int
1351 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1352 {
1353 struct iop_msg *im;
1354 struct i2o_reply *rb;
1355 struct i2o_fault_notify *fn;
1356 struct iop_initiator *ii;
1357 u_int off, ictx, tctx, status, size;
1358
1359 off = (int)(rmfa - sc->sc_rep_phys);
1360 rb = (struct i2o_reply *)(sc->sc_rep + off);
1361
1362 /* Perform reply queue DMA synchronisation. XXX This is rubbish. */
1363 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1364 IOP_MAX_MSG_SIZE, BUS_DMASYNC_POSTREAD);
1365 if (--sc->sc_curib != 0)
1366 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1367 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1368
1369 #ifdef I2ODEBUG
1370 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1371 panic("iop_handle_reply: 64-bit reply");
1372 #endif
1373 /*
1374 * Find the initiator.
1375 */
1376 ictx = le32toh(rb->msgictx);
1377 if (ictx == IOP_ICTX)
1378 ii = NULL;
1379 else {
1380 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1381 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1382 if (ii->ii_ictx == ictx)
1383 break;
1384 if (ii == NULL) {
1385 #ifdef I2ODEBUG
1386 iop_reply_print(sc, rb);
1387 #endif
1388 printf("%s: WARNING: bad ictx returned (%x)\n",
1389 sc->sc_dv.dv_xname, ictx);
1390 return (-1);
1391 }
1392 }
1393
1394 /*
1395 * If we recieved a transport failure notice, we've got to dig the
1396 * transaction context (if any) out of the original message frame,
1397 * and then release the original MFA back to the inbound FIFO.
1398 */
1399 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1400 status = I2O_STATUS_SUCCESS;
1401
1402 fn = (struct i2o_fault_notify *)rb;
1403 tctx = iop_inl(sc, fn->lowmfa + 12); /* XXX */
1404 iop_release_mfa(sc, fn->lowmfa);
1405 iop_tfn_print(sc, fn);
1406 } else {
1407 status = rb->reqstatus;
1408 tctx = le32toh(rb->msgtctx);
1409 }
1410
1411 if (ii == NULL || (ii->ii_flags & II_DISCARD) == 0) {
1412 /*
1413 * This initiator tracks state using message wrappers.
1414 *
1415 * Find the originating message wrapper, and if requested
1416 * notify the initiator.
1417 */
1418 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1419 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1420 (im->im_flags & IM_ALLOCED) == 0 ||
1421 tctx != im->im_tctx) {
1422 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1423 sc->sc_dv.dv_xname, tctx, im);
1424 if (im != NULL)
1425 printf("%s: flags=0x%08x tctx=0x%08x\n",
1426 sc->sc_dv.dv_xname, im->im_flags,
1427 im->im_tctx);
1428 #ifdef I2ODEBUG
1429 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1430 iop_reply_print(sc, rb);
1431 #endif
1432 return (-1);
1433 }
1434
1435 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1436 im->im_flags |= IM_FAIL;
1437
1438 #ifdef I2ODEBUG
1439 if ((im->im_flags & IM_REPLIED) != 0)
1440 panic("%s: dup reply", sc->sc_dv.dv_xname);
1441 #endif
1442 im->im_flags |= IM_REPLIED;
1443
1444 #ifdef I2ODEBUG
1445 if (status != I2O_STATUS_SUCCESS)
1446 iop_reply_print(sc, rb);
1447 #endif
1448 im->im_reqstatus = status;
1449
1450 /* Copy the reply frame, if requested. */
1451 if (im->im_rb != NULL) {
1452 size = (le32toh(rb->msgflags) >> 14) & ~3;
1453 #ifdef I2ODEBUG
1454 if (size > IOP_MAX_MSG_SIZE)
1455 panic("iop_handle_reply: reply too large");
1456 #endif
1457 memcpy(im->im_rb, rb, size);
1458 }
1459
1460 /* Notify the initiator. */
1461 if ((im->im_flags & IM_WAIT) != 0)
1462 wakeup(im);
1463 else if ((im->im_flags & IM_POLL) == 0)
1464 (*ii->ii_intr)(ii->ii_dv, im, rb);
1465 } else {
1466 /*
1467 * This initiator discards message wrappers.
1468 *
1469 * Simply pass the reply frame to the initiator.
1470 */
1471 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1472 }
1473
1474 return (status);
1475 }
1476
1477 /*
1478 * Handle an interrupt from the IOP.
1479 */
1480 int
1481 iop_intr(void *arg)
1482 {
1483 struct iop_softc *sc;
1484 u_int32_t rmfa;
1485
1486 sc = arg;
1487
1488 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1489 return (0);
1490
1491 for (;;) {
1492 /* Double read to account for IOP bug. */
1493 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1494 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1495 if (rmfa == IOP_MFA_EMPTY)
1496 break;
1497 }
1498 iop_handle_reply(sc, rmfa);
1499 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1500 }
1501
1502 return (1);
1503 }
1504
1505 /*
1506 * Handle an event signalled by the executive.
1507 */
1508 static void
1509 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1510 {
1511 struct i2o_util_event_register_reply *rb;
1512 struct iop_softc *sc;
1513 u_int event;
1514
1515 sc = (struct iop_softc *)dv;
1516 rb = reply;
1517
1518 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1519 return;
1520
1521 event = le32toh(rb->event);
1522 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1523 }
1524
1525 /*
1526 * Allocate a message wrapper.
1527 */
1528 struct iop_msg *
1529 iop_msg_alloc(struct iop_softc *sc, struct iop_initiator *ii, int flags)
1530 {
1531 struct iop_msg *im;
1532 static u_int tctxgen;
1533 int s, i;
1534
1535 #ifdef I2ODEBUG
1536 if ((flags & IM_SYSMASK) != 0)
1537 panic("iop_msg_alloc: system flags specified");
1538 #endif
1539
1540 s = splbio(); /* XXX */
1541 im = SLIST_FIRST(&sc->sc_im_freelist);
1542 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1543 if (im == NULL)
1544 panic("iop_msg_alloc: no free wrappers");
1545 #endif
1546 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1547 splx(s);
1548
1549 if (ii != NULL && (ii->ii_flags & II_DISCARD) != 0)
1550 flags |= IM_DISCARD;
1551
1552 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1553 tctxgen += (1 << IOP_TCTX_SHIFT);
1554 im->im_flags = flags | IM_ALLOCED;
1555 im->im_rb = NULL;
1556 i = 0;
1557 do {
1558 im->im_xfer[i++].ix_size = 0;
1559 } while (i < IOP_MAX_MSG_XFERS);
1560
1561 return (im);
1562 }
1563
1564 /*
1565 * Free a message wrapper.
1566 */
1567 void
1568 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1569 {
1570 int s;
1571
1572 #ifdef I2ODEBUG
1573 if ((im->im_flags & IM_ALLOCED) == 0)
1574 panic("iop_msg_free: wrapper not allocated");
1575 if ((im->im_flags & IM_REPLIED) == 0)
1576 printf("iop_msg_free: message was not replied to");
1577 #endif
1578
1579 im->im_flags = 0;
1580 s = splbio();
1581 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1582 splx(s);
1583 }
1584
1585 /*
1586 * Map a data transfer. Write a scatter-gather list into the message frame.
1587 */
1588 int
1589 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1590 void *xferaddr, int xfersize, int out)
1591 {
1592 bus_dmamap_t dm;
1593 bus_dma_segment_t *ds;
1594 struct iop_xfer *ix;
1595 u_int rv, i, nsegs, flg, off, xn;
1596 u_int32_t *p;
1597
1598 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1599 if (ix->ix_size == 0)
1600 break;
1601
1602 #ifdef I2ODEBUG
1603 if (xfersize == 0)
1604 panic("iop_msg_map: null transfer");
1605 if (xfersize > IOP_MAX_XFER)
1606 panic("iop_msg_map: transfer too large");
1607 if (xn == IOP_MAX_MSG_XFERS)
1608 panic("iop_msg_map: too many xfers");
1609 #endif
1610
1611 /*
1612 * Only the first DMA map is static.
1613 */
1614 if (xn != 0) {
1615 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1616 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1617 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1618 if (rv != 0)
1619 return (rv);
1620 }
1621
1622 dm = ix->ix_map;
1623 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL, 0);
1624 if (rv != 0)
1625 goto bad;
1626
1627 /*
1628 * How many SIMPLE SG elements can we fit in this message?
1629 */
1630 off = mb[0] >> 16;
1631 p = mb + off;
1632 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1633
1634 if (dm->dm_nsegs > nsegs) {
1635 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1636 rv = EFBIG;
1637 DPRINTF(("iop_msg_map: too many segs\n"));
1638 goto bad;
1639 }
1640
1641 nsegs = dm->dm_nsegs;
1642 xfersize = 0;
1643
1644 /*
1645 * Write out the SG list.
1646 */
1647 if (out)
1648 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1649 else
1650 flg = I2O_SGL_SIMPLE;
1651
1652 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1653 p[0] = (u_int32_t)ds->ds_len | flg;
1654 p[1] = (u_int32_t)ds->ds_addr;
1655 xfersize += ds->ds_len;
1656 }
1657
1658 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1659 p[1] = (u_int32_t)ds->ds_addr;
1660 xfersize += ds->ds_len;
1661
1662 /* Fix up the transfer record, and sync the map. */
1663 ix->ix_flags = (out ? IX_OUT : IX_IN);
1664 ix->ix_size = xfersize;
1665 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1666 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1667
1668 /*
1669 * If this is the first xfer we've mapped for this message, adjust
1670 * the SGL offset field in the message header.
1671 */
1672 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1673 mb[0] += (mb[0] >> 12) & 0xf0;
1674 im->im_flags |= IM_SGLOFFADJ;
1675 }
1676 mb[0] += (nsegs << 17);
1677 return (0);
1678
1679 bad:
1680 if (xn != 0)
1681 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1682 return (rv);
1683 }
1684
1685 /*
1686 * Map a block I/O data transfer (different in that there's only one per
1687 * message maximum, and PAGE addressing may be used). Write a scatter
1688 * gather list into the message frame.
1689 */
1690 int
1691 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1692 void *xferaddr, int xfersize, int out)
1693 {
1694 bus_dma_segment_t *ds;
1695 bus_dmamap_t dm;
1696 struct iop_xfer *ix;
1697 u_int rv, i, nsegs, off, slen, tlen, flg;
1698 paddr_t saddr, eaddr;
1699 u_int32_t *p;
1700
1701 #ifdef I2ODEBUG
1702 if (xfersize == 0)
1703 panic("iop_msg_map_bio: null transfer");
1704 if (xfersize > IOP_MAX_XFER)
1705 panic("iop_msg_map_bio: transfer too large");
1706 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1707 panic("iop_msg_map_bio: SGLOFFADJ");
1708 #endif
1709
1710 ix = im->im_xfer;
1711 dm = ix->ix_map;
1712 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL, 0);
1713 if (rv != 0)
1714 return (rv);
1715
1716 off = mb[0] >> 16;
1717 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1718
1719 /*
1720 * If the transfer is highly fragmented and won't fit using SIMPLE
1721 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1722 * potentially more efficient, both for us and the IOP.
1723 */
1724 if (dm->dm_nsegs > nsegs) {
1725 nsegs = 1;
1726 p = mb + off + 1;
1727
1728 /* XXX This should be done with a bus_space flag. */
1729 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1730 slen = ds->ds_len;
1731 saddr = ds->ds_addr;
1732
1733 while (slen > 0) {
1734 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1735 tlen = min(eaddr - saddr, slen);
1736 slen -= tlen;
1737 *p++ = le32toh(saddr);
1738 saddr = eaddr;
1739 nsegs++;
1740 }
1741 }
1742
1743 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
1744 I2O_SGL_END;
1745 if (out)
1746 mb[off] |= I2O_SGL_DATA_OUT;
1747 } else {
1748 p = mb + off;
1749
1750 if (dm->dm_nsegs < nsegs)
1751 nsegs = dm->dm_nsegs;
1752 if (out)
1753 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1754 else
1755 flg = I2O_SGL_SIMPLE;
1756
1757 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1758 p[0] = (u_int32_t)ds->ds_len | flg;
1759 p[1] = (u_int32_t)ds->ds_addr;
1760 }
1761
1762 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
1763 I2O_SGL_END;
1764 p[1] = (u_int32_t)ds->ds_addr;
1765 nsegs <<= 1;
1766 }
1767
1768 /* Fix up the transfer record, and sync the map. */
1769 ix->ix_flags = (out ? IX_OUT : IX_IN);
1770 ix->ix_size = xfersize;
1771 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1772 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1773
1774 /*
1775 * Adjust the SGL offset and total message size fields. We don't
1776 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
1777 */
1778 mb[0] += ((off << 4) + (nsegs << 16));
1779 return (0);
1780 }
1781
1782 /*
1783 * Unmap all data transfers associated with a message wrapper.
1784 */
1785 void
1786 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
1787 {
1788 struct iop_xfer *ix;
1789 int i;
1790
1791 #ifdef I2ODEBUG
1792 if (im->im_xfer[0].ix_size == 0)
1793 panic("iop_msg_unmap: no transfers mapped");
1794 #endif
1795
1796 for (ix = im->im_xfer, i = 0;;) {
1797 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
1798 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
1799 BUS_DMASYNC_POSTREAD);
1800 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1801
1802 /* Only the first DMA map is static. */
1803 if (i != 0)
1804 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1805 if ((++ix)->ix_size == 0)
1806 break;
1807 if (++i >= IOP_MAX_MSG_XFERS)
1808 break;
1809 }
1810 }
1811
1812 /*
1813 * Post a message frame to the IOP's inbound queue.
1814 */
1815 int
1816 iop_post(struct iop_softc *sc, u_int32_t *mb)
1817 {
1818 u_int32_t mfa;
1819 int s;
1820
1821 s = splbio(); /* XXX */
1822
1823 /* Allocate a slot with the IOP. */
1824 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
1825 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
1826 splx(s);
1827 printf("%s: mfa not forthcoming\n",
1828 sc->sc_dv.dv_xname);
1829 return (EAGAIN);
1830 }
1831
1832 /* Perform reply buffer DMA synchronisation. XXX This is rubbish. */
1833 if (sc->sc_curib++ == 0)
1834 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1835 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1836
1837 /* Copy out the message frame. */
1838 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
1839 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, mb[0] >> 16,
1840 BUS_SPACE_BARRIER_WRITE);
1841
1842 /* Post the MFA back to the IOP. */
1843 iop_outl(sc, IOP_REG_IFIFO, mfa);
1844
1845 splx(s);
1846 return (0);
1847 }
1848
1849 /*
1850 * Post a message to the IOP and deal with completion.
1851 */
1852 int
1853 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
1854 {
1855 u_int32_t *mb;
1856 int rv, s;
1857
1858 mb = xmb;
1859
1860 /* Terminate the scatter/gather list chain. */
1861 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1862 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
1863
1864 if ((rv = iop_post(sc, mb)) != 0)
1865 return (rv);
1866
1867 if ((im->im_flags & IM_DISCARD) != 0)
1868 iop_msg_free(sc, im);
1869 else if ((im->im_flags & IM_POLL) != 0 && timo == 0) {
1870 /* XXX For ofifo_init(). */
1871 rv = 0;
1872 } else if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
1873 if ((im->im_flags & IM_POLL) != 0)
1874 iop_msg_poll(sc, im, timo);
1875 else
1876 iop_msg_wait(sc, im, timo);
1877
1878 s = splbio();
1879 if ((im->im_flags & IM_REPLIED) != 0) {
1880 if ((im->im_flags & IM_NOSTATUS) != 0)
1881 rv = 0;
1882 else if ((im->im_flags & IM_FAIL) != 0)
1883 rv = ENXIO;
1884 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
1885 rv = EIO;
1886 else
1887 rv = 0;
1888 } else
1889 rv = EBUSY;
1890 splx(s);
1891 } else
1892 rv = 0;
1893
1894 return (rv);
1895 }
1896
1897 /*
1898 * Spin until the specified message is replied to.
1899 */
1900 static void
1901 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
1902 {
1903 u_int32_t rmfa;
1904 int s, status;
1905
1906 s = splbio(); /* XXX */
1907
1908 /* Wait for completion. */
1909 for (timo *= 10; timo != 0; timo--) {
1910 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
1911 /* Double read to account for IOP bug. */
1912 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1913 if (rmfa == IOP_MFA_EMPTY)
1914 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1915 if (rmfa != IOP_MFA_EMPTY) {
1916 status = iop_handle_reply(sc, rmfa);
1917
1918 /*
1919 * Return the reply frame to the IOP's
1920 * outbound FIFO.
1921 */
1922 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1923 }
1924 }
1925 if ((im->im_flags & IM_REPLIED) != 0)
1926 break;
1927 DELAY(100);
1928 }
1929
1930 if (timo == 0) {
1931 #ifdef I2ODEBUG
1932 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
1933 if (iop_status_get(sc, 1) != 0)
1934 printf("iop_msg_poll: unable to retrieve status\n");
1935 else
1936 printf("iop_msg_poll: IOP state = %d\n",
1937 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1938 #endif
1939 }
1940
1941 splx(s);
1942 }
1943
1944 /*
1945 * Sleep until the specified message is replied to.
1946 */
1947 static void
1948 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
1949 {
1950 int s, rv;
1951
1952 s = splbio();
1953 if ((im->im_flags & IM_REPLIED) != 0) {
1954 splx(s);
1955 return;
1956 }
1957 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
1958 splx(s);
1959
1960 #ifdef I2ODEBUG
1961 if (rv != 0) {
1962 printf("iop_msg_wait: tsleep() == %d\n", rv);
1963 if (iop_status_get(sc, 0) != 0)
1964 printf("iop_msg_wait: unable to retrieve status\n");
1965 else
1966 printf("iop_msg_wait: IOP state = %d\n",
1967 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
1968 }
1969 #endif
1970 }
1971
1972 /*
1973 * Release an unused message frame back to the IOP's inbound fifo.
1974 */
1975 static void
1976 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
1977 {
1978
1979 /* Use the frame to issue a no-op. */
1980 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
1981 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
1982 iop_outl(sc, mfa + 8, 0);
1983 iop_outl(sc, mfa + 12, 0);
1984
1985 iop_outl(sc, IOP_REG_IFIFO, mfa);
1986 }
1987
1988 #ifdef I2ODEBUG
1989 /*
1990 * Dump a reply frame header.
1991 */
1992 static void
1993 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
1994 {
1995 u_int function, detail;
1996 #ifdef I2OVERBOSE
1997 const char *statusstr;
1998 #endif
1999
2000 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2001 detail = le16toh(rb->detail);
2002
2003 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2004
2005 #ifdef I2OVERBOSE
2006 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2007 statusstr = iop_status[rb->reqstatus];
2008 else
2009 statusstr = "undefined error code";
2010
2011 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2012 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2013 #else
2014 printf("%s: function=0x%02x status=0x%02x\n",
2015 sc->sc_dv.dv_xname, function, rb->reqstatus);
2016 #endif
2017 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2018 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2019 le32toh(rb->msgtctx));
2020 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2021 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2022 (le32toh(rb->msgflags) >> 8) & 0xff);
2023 }
2024 #endif
2025
2026 /*
2027 * Dump a transport failure reply.
2028 */
2029 static void
2030 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2031 {
2032
2033 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2034
2035 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2036 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2037 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2038 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2039 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2040 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2041 }
2042
2043 /*
2044 * Translate an I2O ASCII field into a C string.
2045 */
2046 void
2047 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2048 {
2049 int hc, lc, i, nit;
2050
2051 dlen--;
2052 lc = 0;
2053 hc = 0;
2054 i = 0;
2055
2056 /*
2057 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2058 * spec has nothing to say about it. Since AMI fields are usually
2059 * filled with junk after the terminator, ...
2060 */
2061 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2062
2063 while (slen-- != 0 && dlen-- != 0) {
2064 if (nit && *src == '\0')
2065 break;
2066 else if (*src <= 0x20 || *src >= 0x7f) {
2067 if (hc)
2068 dst[i++] = ' ';
2069 } else {
2070 hc = 1;
2071 dst[i++] = *src;
2072 lc = i;
2073 }
2074 src++;
2075 }
2076
2077 dst[lc] = '\0';
2078 }
2079
2080 /*
2081 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2082 */
2083 int
2084 iop_print_ident(struct iop_softc *sc, int tid)
2085 {
2086 struct {
2087 struct i2o_param_op_results pr;
2088 struct i2o_param_read_results prr;
2089 struct i2o_param_device_identity di;
2090 } __attribute__ ((__packed__)) p;
2091 char buf[32];
2092 int rv;
2093
2094 rv = iop_param_op(sc, tid, NULL, 0, I2O_PARAM_DEVICE_IDENTITY, &p,
2095 sizeof(p));
2096 if (rv != 0)
2097 return (rv);
2098
2099 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2100 sizeof(buf));
2101 printf(" <%s, ", buf);
2102 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2103 sizeof(buf));
2104 printf("%s, ", buf);
2105 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2106 printf("%s>", buf);
2107
2108 return (0);
2109 }
2110
2111 /*
2112 * Claim or unclaim the specified TID.
2113 */
2114 int
2115 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2116 int flags)
2117 {
2118 struct iop_msg *im;
2119 struct i2o_util_claim mf;
2120 int rv, func;
2121
2122 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2123 im = iop_msg_alloc(sc, ii, IM_WAIT);
2124
2125 /* We can use the same structure, as they're identical. */
2126 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2127 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2128 mf.msgictx = ii->ii_ictx;
2129 mf.msgtctx = im->im_tctx;
2130 mf.flags = flags;
2131
2132 rv = iop_msg_post(sc, im, &mf, 5000);
2133 iop_msg_free(sc, im);
2134 return (rv);
2135 }
2136
2137 /*
2138 * Perform an abort.
2139 */
2140 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2141 int tctxabort, int flags)
2142 {
2143 struct iop_msg *im;
2144 struct i2o_util_abort mf;
2145 int rv;
2146
2147 im = iop_msg_alloc(sc, ii, IM_WAIT);
2148
2149 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2150 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2151 mf.msgictx = ii->ii_ictx;
2152 mf.msgtctx = im->im_tctx;
2153 mf.flags = (func << 24) | flags;
2154 mf.tctxabort = tctxabort;
2155
2156 rv = iop_msg_post(sc, im, &mf, 5000);
2157 iop_msg_free(sc, im);
2158 return (rv);
2159 }
2160
2161 /*
2162 * Enable or disable reception of events for the specified device.
2163 */
2164 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2165 {
2166 struct iop_msg *im;
2167 struct i2o_util_event_register mf;
2168
2169 im = iop_msg_alloc(sc, ii, 0);
2170
2171 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2172 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2173 mf.msgictx = ii->ii_ictx;
2174 mf.msgtctx = im->im_tctx;
2175 mf.eventmask = mask;
2176
2177 /* This message is replied to only when events are signalled. */
2178 return (iop_msg_post(sc, im, &mf, 0));
2179 }
2180
2181 int
2182 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2183 {
2184 struct iop_softc *sc;
2185
2186 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2187 return (ENXIO);
2188 if ((sc->sc_flags & IOP_ONLINE) == 0)
2189 return (ENXIO);
2190 if ((sc->sc_flags & IOP_OPEN) != 0)
2191 return (EBUSY);
2192 sc->sc_flags |= IOP_OPEN;
2193
2194 sc->sc_ptb = malloc(IOP_MAX_XFER * IOP_MAX_MSG_XFERS, M_DEVBUF,
2195 M_WAITOK);
2196 if (sc->sc_ptb == NULL) {
2197 sc->sc_flags ^= IOP_OPEN;
2198 return (ENOMEM);
2199 }
2200
2201 return (0);
2202 }
2203
2204 int
2205 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2206 {
2207 struct iop_softc *sc;
2208
2209 sc = device_lookup(&iop_cd, minor(dev));
2210 free(sc->sc_ptb, M_DEVBUF);
2211 sc->sc_flags &= ~IOP_OPEN;
2212 return (0);
2213 }
2214
2215 int
2216 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2217 {
2218 struct iop_softc *sc;
2219 struct iovec *iov;
2220 int rv, i;
2221
2222 if (securelevel >= 2)
2223 return (EPERM);
2224
2225 sc = device_lookup(&iop_cd, minor(dev));
2226
2227 switch (cmd) {
2228 case IOPIOCPT:
2229 return (iop_passthrough(sc, (struct ioppt *)data));
2230
2231 case IOPIOCGSTATUS:
2232 iov = (struct iovec *)data;
2233 i = sizeof(struct i2o_status);
2234 if (i > iov->iov_len)
2235 i = iov->iov_len;
2236 else
2237 iov->iov_len = i;
2238 if ((rv = iop_status_get(sc, 0)) == 0)
2239 rv = copyout(&sc->sc_status, iov->iov_base, i);
2240 return (rv);
2241
2242 case IOPIOCGLCT:
2243 case IOPIOCGTIDMAP:
2244 case IOPIOCRECONFIG:
2245 break;
2246
2247 default:
2248 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2249 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2250 #endif
2251 return (ENOTTY);
2252 }
2253
2254 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2255 return (rv);
2256
2257 switch (cmd) {
2258 case IOPIOCGLCT:
2259 iov = (struct iovec *)data;
2260 i = le16toh(sc->sc_lct->tablesize) << 2;
2261 if (i > iov->iov_len)
2262 i = iov->iov_len;
2263 else
2264 iov->iov_len = i;
2265 rv = copyout(sc->sc_lct, iov->iov_base, i);
2266 break;
2267
2268 case IOPIOCRECONFIG:
2269 rv = iop_reconfigure(sc, 0);
2270 break;
2271
2272 case IOPIOCGTIDMAP:
2273 iov = (struct iovec *)data;
2274 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2275 if (i > iov->iov_len)
2276 i = iov->iov_len;
2277 else
2278 iov->iov_len = i;
2279 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2280 break;
2281 }
2282
2283 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2284 return (rv);
2285 }
2286
2287 static int
2288 iop_passthrough(struct iop_softc *sc, struct ioppt *pt)
2289 {
2290 struct iop_msg *im;
2291 struct i2o_msg *mf;
2292 struct ioppt_buf *ptb;
2293 int rv, i, mapped;
2294 void *buf;
2295
2296 mf = NULL;
2297 im = NULL;
2298 mapped = 1;
2299
2300 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2301 pt->pt_msglen > (le16toh(sc->sc_status.inboundmframesize) << 2) ||
2302 pt->pt_msglen < sizeof(struct i2o_msg) ||
2303 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2304 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2305 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2306 return (EINVAL);
2307
2308 for (i = 0; i < pt->pt_nbufs; i++)
2309 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2310 rv = ENOMEM;
2311 goto bad;
2312 }
2313
2314 mf = malloc(IOP_MAX_MSG_SIZE, M_DEVBUF, M_WAITOK);
2315 if (mf == NULL)
2316 return (ENOMEM);
2317
2318 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2319 goto bad;
2320
2321 im = iop_msg_alloc(sc, NULL, IM_WAIT | IM_NOSTATUS);
2322 im->im_rb = (struct i2o_reply *)mf;
2323 mf->msgictx = IOP_ICTX;
2324 mf->msgtctx = im->im_tctx;
2325
2326 for (i = 0; i < pt->pt_nbufs; i++) {
2327 ptb = &pt->pt_bufs[i];
2328 buf = sc->sc_ptb + i * IOP_MAX_XFER;
2329
2330 if ((u_int)ptb->ptb_datalen > IOP_MAX_XFER) {
2331 rv = EINVAL;
2332 goto bad;
2333 }
2334
2335 if (ptb->ptb_out != 0) {
2336 rv = copyin(ptb->ptb_data, buf, ptb->ptb_datalen);
2337 if (rv != 0)
2338 goto bad;
2339 }
2340
2341 rv = iop_msg_map(sc, im, (u_int32_t *)mf, buf,
2342 ptb->ptb_datalen, ptb->ptb_out != 0);
2343 if (rv != 0)
2344 goto bad;
2345 mapped = 1;
2346 }
2347
2348 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2349 goto bad;
2350
2351 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2352 if (i > IOP_MAX_MSG_SIZE)
2353 i = IOP_MAX_MSG_SIZE;
2354 if (i > pt->pt_replylen)
2355 i = pt->pt_replylen;
2356 if ((rv = copyout(im->im_rb, pt->pt_reply, i)) != 0)
2357 goto bad;
2358
2359 iop_msg_unmap(sc, im);
2360 mapped = 0;
2361
2362 for (i = 0; i < pt->pt_nbufs; i++) {
2363 ptb = &pt->pt_bufs[i];
2364 if (ptb->ptb_out != 0)
2365 continue;
2366 buf = sc->sc_ptb + i * IOP_MAX_XFER;
2367 rv = copyout(buf, ptb->ptb_data, ptb->ptb_datalen);
2368 if (rv != 0)
2369 break;
2370 }
2371
2372 bad:
2373 if (mapped != 0)
2374 iop_msg_unmap(sc, im);
2375 if (im != NULL)
2376 iop_msg_free(sc, im);
2377 if (mf != NULL)
2378 free(mf, M_DEVBUF);
2379 return (rv);
2380 }
2381