iop.c revision 1.40 1 /* $NetBSD: iop.c,v 1.40 2003/10/25 20:26:25 mycroft Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.40 2003/10/25 20:26:25 mycroft Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 dev_type_open(iopopen);
111 dev_type_close(iopclose);
112 dev_type_ioctl(iopioctl);
113
114 const struct cdevsw iop_cdevsw = {
115 iopopen, iopclose, noread, nowrite, iopioctl,
116 nostop, notty, nopoll, nommap, nokqfilter,
117 };
118
119 #define IC_CONFIGURE 0x01
120 #define IC_PRIORITY 0x02
121
122 struct iop_class {
123 u_short ic_class;
124 u_short ic_flags;
125 #ifdef I2OVERBOSE
126 const char *ic_caption;
127 #endif
128 } static const iop_class[] = {
129 {
130 I2O_CLASS_EXECUTIVE,
131 0,
132 IFVERBOSE("executive")
133 },
134 {
135 I2O_CLASS_DDM,
136 0,
137 COMMENT("device driver module")
138 },
139 {
140 I2O_CLASS_RANDOM_BLOCK_STORAGE,
141 IC_CONFIGURE | IC_PRIORITY,
142 IFVERBOSE("random block storage")
143 },
144 {
145 I2O_CLASS_SEQUENTIAL_STORAGE,
146 IC_CONFIGURE | IC_PRIORITY,
147 IFVERBOSE("sequential storage")
148 },
149 {
150 I2O_CLASS_LAN,
151 IC_CONFIGURE | IC_PRIORITY,
152 IFVERBOSE("LAN port")
153 },
154 {
155 I2O_CLASS_WAN,
156 IC_CONFIGURE | IC_PRIORITY,
157 IFVERBOSE("WAN port")
158 },
159 {
160 I2O_CLASS_FIBRE_CHANNEL_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("fibrechannel port")
163 },
164 {
165 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
166 0,
167 COMMENT("fibrechannel peripheral")
168 },
169 {
170 I2O_CLASS_SCSI_PERIPHERAL,
171 0,
172 COMMENT("SCSI peripheral")
173 },
174 {
175 I2O_CLASS_ATE_PORT,
176 IC_CONFIGURE,
177 IFVERBOSE("ATE port")
178 },
179 {
180 I2O_CLASS_ATE_PERIPHERAL,
181 0,
182 COMMENT("ATE peripheral")
183 },
184 {
185 I2O_CLASS_FLOPPY_CONTROLLER,
186 IC_CONFIGURE,
187 IFVERBOSE("floppy controller")
188 },
189 {
190 I2O_CLASS_FLOPPY_DEVICE,
191 0,
192 COMMENT("floppy device")
193 },
194 {
195 I2O_CLASS_BUS_ADAPTER_PORT,
196 IC_CONFIGURE,
197 IFVERBOSE("bus adapter port" )
198 },
199 };
200
201 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
202 static const char * const iop_status[] = {
203 "success",
204 "abort (dirty)",
205 "abort (no data transfer)",
206 "abort (partial transfer)",
207 "error (dirty)",
208 "error (no data transfer)",
209 "error (partial transfer)",
210 "undefined error code",
211 "process abort (dirty)",
212 "process abort (no data transfer)",
213 "process abort (partial transfer)",
214 "transaction error",
215 };
216 #endif
217
218 static inline u_int32_t iop_inl(struct iop_softc *, int);
219 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
220
221 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
222 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
223
224 static void iop_config_interrupts(struct device *);
225 static void iop_configure_devices(struct iop_softc *, int, int);
226 static void iop_devinfo(int, char *);
227 static int iop_print(void *, const char *);
228 static void iop_shutdown(void *);
229 static int iop_submatch(struct device *, struct cfdata *, void *);
230
231 static void iop_adjqparam(struct iop_softc *, int);
232 static void iop_create_reconf_thread(void *);
233 static int iop_handle_reply(struct iop_softc *, u_int32_t);
234 static int iop_hrt_get(struct iop_softc *);
235 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
236 static void iop_intr_event(struct device *, struct iop_msg *, void *);
237 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
238 u_int32_t);
239 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
240 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
241 static int iop_ofifo_init(struct iop_softc *);
242 static int iop_passthrough(struct iop_softc *, struct ioppt *,
243 struct proc *);
244 static void iop_reconf_thread(void *);
245 static void iop_release_mfa(struct iop_softc *, u_int32_t);
246 static int iop_reset(struct iop_softc *);
247 static int iop_systab_set(struct iop_softc *);
248 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
249
250 #ifdef I2ODEBUG
251 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
252 #endif
253
254 static inline u_int32_t
255 iop_inl(struct iop_softc *sc, int off)
256 {
257
258 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
259 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
260 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
261 }
262
263 static inline void
264 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
265 {
266
267 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
268 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
269 BUS_SPACE_BARRIER_WRITE);
270 }
271
272 static inline u_int32_t
273 iop_inl_msg(struct iop_softc *sc, int off)
274 {
275
276 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
277 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
278 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
279 }
280
281 static inline void
282 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
283 {
284
285 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
286 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
287 BUS_SPACE_BARRIER_WRITE);
288 }
289
290 /*
291 * Initialise the IOP and our interface.
292 */
293 void
294 iop_init(struct iop_softc *sc, const char *intrstr)
295 {
296 struct iop_msg *im;
297 int rv, i, j, state, nsegs;
298 u_int32_t mask;
299 char ident[64];
300
301 state = 0;
302
303 printf("I2O adapter");
304
305 if (iop_ictxhashtbl == NULL)
306 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
307 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
308
309 /* Disable interrupts at the IOP. */
310 mask = iop_inl(sc, IOP_REG_INTR_MASK);
311 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
312
313 /* Allocate a scratch DMA map for small miscellaneous shared data. */
314 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
315 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
316 printf("%s: cannot create scratch dmamap\n",
317 sc->sc_dv.dv_xname);
318 return;
319 }
320
321 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
322 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
323 printf("%s: cannot alloc scratch dmamem\n",
324 sc->sc_dv.dv_xname);
325 goto bail_out;
326 }
327 state++;
328
329 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
330 &sc->sc_scr, 0)) {
331 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
332 goto bail_out;
333 }
334 state++;
335
336 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
337 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
338 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
339 goto bail_out;
340 }
341 state++;
342
343 #ifdef I2ODEBUG
344 /* So that our debug checks don't choke. */
345 sc->sc_framesize = 128;
346 #endif
347
348 /* Reset the adapter and request status. */
349 if ((rv = iop_reset(sc)) != 0) {
350 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
351 goto bail_out;
352 }
353
354 if ((rv = iop_status_get(sc, 1)) != 0) {
355 printf("%s: not responding (get status)\n",
356 sc->sc_dv.dv_xname);
357 goto bail_out;
358 }
359
360 sc->sc_flags |= IOP_HAVESTATUS;
361 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
362 ident, sizeof(ident));
363 printf(" <%s>\n", ident);
364
365 #ifdef I2ODEBUG
366 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
367 le16toh(sc->sc_status.orgid),
368 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
369 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
370 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
371 le32toh(sc->sc_status.desiredprivmemsize),
372 le32toh(sc->sc_status.currentprivmemsize),
373 le32toh(sc->sc_status.currentprivmembase));
374 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
375 le32toh(sc->sc_status.desiredpriviosize),
376 le32toh(sc->sc_status.currentpriviosize),
377 le32toh(sc->sc_status.currentpriviobase));
378 #endif
379
380 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
381 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
382 sc->sc_maxob = IOP_MAX_OUTBOUND;
383 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
384 if (sc->sc_maxib > IOP_MAX_INBOUND)
385 sc->sc_maxib = IOP_MAX_INBOUND;
386 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
387 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
388 sc->sc_framesize = IOP_MAX_MSG_SIZE;
389
390 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
391 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
392 printf("%s: frame size too small (%d)\n",
393 sc->sc_dv.dv_xname, sc->sc_framesize);
394 goto bail_out;
395 }
396 #endif
397
398 /* Allocate message wrappers. */
399 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
400 if (im == NULL) {
401 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
402 goto bail_out;
403 }
404 state++;
405 sc->sc_ims = im;
406 SLIST_INIT(&sc->sc_im_freelist);
407
408 for (i = 0; i < sc->sc_maxib; i++, im++) {
409 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
410 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
411 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
412 &im->im_xfer[0].ix_map);
413 if (rv != 0) {
414 printf("%s: couldn't create dmamap (%d)",
415 sc->sc_dv.dv_xname, rv);
416 goto bail_out3;
417 }
418
419 im->im_tctx = i;
420 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
421 }
422
423 /* Initialise the IOP's outbound FIFO. */
424 if (iop_ofifo_init(sc) != 0) {
425 printf("%s: unable to init oubound FIFO\n",
426 sc->sc_dv.dv_xname);
427 goto bail_out3;
428 }
429
430 /*
431 * Defer further configuration until (a) interrupts are working and
432 * (b) we have enough information to build the system table.
433 */
434 config_interrupts((struct device *)sc, iop_config_interrupts);
435
436 /* Configure shutdown hook before we start any device activity. */
437 if (iop_sdh == NULL)
438 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
439
440 /* Ensure interrupts are enabled at the IOP. */
441 mask = iop_inl(sc, IOP_REG_INTR_MASK);
442 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
443
444 if (intrstr != NULL)
445 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
446 intrstr);
447
448 #ifdef I2ODEBUG
449 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
450 sc->sc_dv.dv_xname, sc->sc_maxib,
451 le32toh(sc->sc_status.maxinboundmframes),
452 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
453 #endif
454
455 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
456 return;
457
458 bail_out3:
459 if (state > 3) {
460 for (j = 0; j < i; j++)
461 bus_dmamap_destroy(sc->sc_dmat,
462 sc->sc_ims[j].im_xfer[0].ix_map);
463 free(sc->sc_ims, M_DEVBUF);
464 }
465 bail_out:
466 if (state > 2)
467 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
468 if (state > 1)
469 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
470 if (state > 0)
471 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
472 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
473 }
474
475 /*
476 * Perform autoconfiguration tasks.
477 */
478 static void
479 iop_config_interrupts(struct device *self)
480 {
481 struct iop_attach_args ia;
482 struct iop_softc *sc, *iop;
483 struct i2o_systab_entry *ste;
484 int rv, i, niop;
485
486 sc = (struct iop_softc *)self;
487 LIST_INIT(&sc->sc_iilist);
488
489 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
490
491 if (iop_hrt_get(sc) != 0) {
492 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
493 return;
494 }
495
496 /*
497 * Build the system table.
498 */
499 if (iop_systab == NULL) {
500 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
501 if ((iop = device_lookup(&iop_cd, i)) == NULL)
502 continue;
503 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
504 continue;
505 if (iop_status_get(iop, 1) != 0) {
506 printf("%s: unable to retrieve status\n",
507 sc->sc_dv.dv_xname);
508 iop->sc_flags &= ~IOP_HAVESTATUS;
509 continue;
510 }
511 niop++;
512 }
513 if (niop == 0)
514 return;
515
516 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
517 sizeof(struct i2o_systab);
518 iop_systab_size = i;
519 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
520
521 iop_systab->numentries = niop;
522 iop_systab->version = I2O_VERSION_11;
523
524 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
525 if ((iop = device_lookup(&iop_cd, i)) == NULL)
526 continue;
527 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
528 continue;
529
530 ste->orgid = iop->sc_status.orgid;
531 ste->iopid = iop->sc_dv.dv_unit + 2;
532 ste->segnumber =
533 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
534 ste->iopcaps = iop->sc_status.iopcaps;
535 ste->inboundmsgframesize =
536 iop->sc_status.inboundmframesize;
537 ste->inboundmsgportaddresslow =
538 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
539 ste++;
540 }
541 }
542
543 /*
544 * Post the system table to the IOP and bring it to the OPERATIONAL
545 * state.
546 */
547 if (iop_systab_set(sc) != 0) {
548 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
549 return;
550 }
551 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
552 30000) != 0) {
553 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
554 return;
555 }
556
557 /*
558 * Set up an event handler for this IOP.
559 */
560 sc->sc_eventii.ii_dv = self;
561 sc->sc_eventii.ii_intr = iop_intr_event;
562 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
563 sc->sc_eventii.ii_tid = I2O_TID_IOP;
564 iop_initiator_register(sc, &sc->sc_eventii);
565
566 rv = iop_util_eventreg(sc, &sc->sc_eventii,
567 I2O_EVENT_EXEC_RESOURCE_LIMITS |
568 I2O_EVENT_EXEC_CONNECTION_FAIL |
569 I2O_EVENT_EXEC_ADAPTER_FAULT |
570 I2O_EVENT_EXEC_POWER_FAIL |
571 I2O_EVENT_EXEC_RESET_PENDING |
572 I2O_EVENT_EXEC_RESET_IMMINENT |
573 I2O_EVENT_EXEC_HARDWARE_FAIL |
574 I2O_EVENT_EXEC_XCT_CHANGE |
575 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
576 I2O_EVENT_GEN_DEVICE_RESET |
577 I2O_EVENT_GEN_STATE_CHANGE |
578 I2O_EVENT_GEN_GENERAL_WARNING);
579 if (rv != 0) {
580 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
581 return;
582 }
583
584 /*
585 * Attempt to match and attach a product-specific extension.
586 */
587 ia.ia_class = I2O_CLASS_ANY;
588 ia.ia_tid = I2O_TID_IOP;
589 config_found_sm(self, &ia, iop_print, iop_submatch);
590
591 /*
592 * Start device configuration.
593 */
594 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
595 if ((rv = iop_reconfigure(sc, 0)) == -1) {
596 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
597 return;
598 }
599 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
600
601 kthread_create(iop_create_reconf_thread, sc);
602 }
603
604 /*
605 * Create the reconfiguration thread. Called after the standard kernel
606 * threads have been created.
607 */
608 static void
609 iop_create_reconf_thread(void *cookie)
610 {
611 struct iop_softc *sc;
612 int rv;
613
614 sc = cookie;
615 sc->sc_flags |= IOP_ONLINE;
616
617 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
618 "%s", sc->sc_dv.dv_xname);
619 if (rv != 0) {
620 printf("%s: unable to create reconfiguration thread (%d)",
621 sc->sc_dv.dv_xname, rv);
622 return;
623 }
624 }
625
626 /*
627 * Reconfiguration thread; listens for LCT change notification, and
628 * initiates re-configuration if received.
629 */
630 static void
631 iop_reconf_thread(void *cookie)
632 {
633 struct iop_softc *sc;
634 struct lwp *l;
635 struct i2o_lct lct;
636 u_int32_t chgind;
637 int rv;
638
639 sc = cookie;
640 chgind = sc->sc_chgind + 1;
641 l = curlwp;
642
643 for (;;) {
644 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
645 sc->sc_dv.dv_xname, chgind));
646
647 PHOLD(l);
648 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
649 PRELE(l);
650
651 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
652 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
653
654 if (rv == 0 &&
655 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
656 iop_reconfigure(sc, le32toh(lct.changeindicator));
657 chgind = sc->sc_chgind + 1;
658 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
659 }
660
661 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
662 }
663 }
664
665 /*
666 * Reconfigure: find new and removed devices.
667 */
668 int
669 iop_reconfigure(struct iop_softc *sc, u_int chgind)
670 {
671 struct iop_msg *im;
672 struct i2o_hba_bus_scan mf;
673 struct i2o_lct_entry *le;
674 struct iop_initiator *ii, *nextii;
675 int rv, tid, i;
676
677 /*
678 * If the reconfiguration request isn't the result of LCT change
679 * notification, then be more thorough: ask all bus ports to scan
680 * their busses. Wait up to 5 minutes for each bus port to complete
681 * the request.
682 */
683 if (chgind == 0) {
684 if ((rv = iop_lct_get(sc)) != 0) {
685 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
686 return (rv);
687 }
688
689 le = sc->sc_lct->entry;
690 for (i = 0; i < sc->sc_nlctent; i++, le++) {
691 if ((le16toh(le->classid) & 4095) !=
692 I2O_CLASS_BUS_ADAPTER_PORT)
693 continue;
694 tid = le16toh(le->localtid) & 4095;
695
696 im = iop_msg_alloc(sc, IM_WAIT);
697
698 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
699 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
700 mf.msgictx = IOP_ICTX;
701 mf.msgtctx = im->im_tctx;
702
703 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
704 tid));
705
706 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
707 iop_msg_free(sc, im);
708 #ifdef I2ODEBUG
709 if (rv != 0)
710 printf("%s: bus scan failed\n",
711 sc->sc_dv.dv_xname);
712 #endif
713 }
714 } else if (chgind <= sc->sc_chgind) {
715 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
716 return (0);
717 }
718
719 /* Re-read the LCT and determine if it has changed. */
720 if ((rv = iop_lct_get(sc)) != 0) {
721 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
722 return (rv);
723 }
724 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
725
726 chgind = le32toh(sc->sc_lct->changeindicator);
727 if (chgind == sc->sc_chgind) {
728 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
729 return (0);
730 }
731 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
732 sc->sc_chgind = chgind;
733
734 if (sc->sc_tidmap != NULL)
735 free(sc->sc_tidmap, M_DEVBUF);
736 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
737 M_DEVBUF, M_NOWAIT|M_ZERO);
738
739 /* Allow 1 queued command per device while we're configuring. */
740 iop_adjqparam(sc, 1);
741
742 /*
743 * Match and attach child devices. We configure high-level devices
744 * first so that any claims will propagate throughout the LCT,
745 * hopefully masking off aliased devices as a result.
746 *
747 * Re-reading the LCT at this point is a little dangerous, but we'll
748 * trust the IOP (and the operator) to behave itself...
749 */
750 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
751 IC_CONFIGURE | IC_PRIORITY);
752 if ((rv = iop_lct_get(sc)) != 0)
753 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
754 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
755 IC_CONFIGURE);
756
757 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
758 nextii = LIST_NEXT(ii, ii_list);
759
760 /* Detach devices that were configured, but are now gone. */
761 for (i = 0; i < sc->sc_nlctent; i++)
762 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
763 break;
764 if (i == sc->sc_nlctent ||
765 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
766 config_detach(ii->ii_dv, DETACH_FORCE);
767
768 /*
769 * Tell initiators that existed before the re-configuration
770 * to re-configure.
771 */
772 if (ii->ii_reconfig == NULL)
773 continue;
774 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
775 printf("%s: %s failed reconfigure (%d)\n",
776 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
777 }
778
779 /* Re-adjust queue parameters and return. */
780 if (sc->sc_nii != 0)
781 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
782 / sc->sc_nii);
783
784 return (0);
785 }
786
787 /*
788 * Configure I2O devices into the system.
789 */
790 static void
791 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
792 {
793 struct iop_attach_args ia;
794 struct iop_initiator *ii;
795 const struct i2o_lct_entry *le;
796 struct device *dv;
797 int i, j, nent;
798 u_int usertid;
799
800 nent = sc->sc_nlctent;
801 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
802 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
803
804 /* Ignore the device if it's in use. */
805 usertid = le32toh(le->usertid) & 4095;
806 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
807 continue;
808
809 ia.ia_class = le16toh(le->classid) & 4095;
810 ia.ia_tid = sc->sc_tidmap[i].it_tid;
811
812 /* Ignore uninteresting devices. */
813 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
814 if (iop_class[j].ic_class == ia.ia_class)
815 break;
816 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
817 (iop_class[j].ic_flags & mask) != maskval)
818 continue;
819
820 /*
821 * Try to configure the device only if it's not already
822 * configured.
823 */
824 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
825 if (ia.ia_tid == ii->ii_tid) {
826 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
827 strcpy(sc->sc_tidmap[i].it_dvname,
828 ii->ii_dv->dv_xname);
829 break;
830 }
831 }
832 if (ii != NULL)
833 continue;
834
835 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
836 if (dv != NULL) {
837 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
838 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
839 }
840 }
841 }
842
843 /*
844 * Adjust queue parameters for all child devices.
845 */
846 static void
847 iop_adjqparam(struct iop_softc *sc, int mpi)
848 {
849 struct iop_initiator *ii;
850
851 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
852 if (ii->ii_adjqparam != NULL)
853 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
854 }
855
856 static void
857 iop_devinfo(int class, char *devinfo)
858 {
859 #ifdef I2OVERBOSE
860 int i;
861
862 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
863 if (class == iop_class[i].ic_class)
864 break;
865
866 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
867 sprintf(devinfo, "device (class 0x%x)", class);
868 else
869 strcpy(devinfo, iop_class[i].ic_caption);
870 #else
871
872 sprintf(devinfo, "device (class 0x%x)", class);
873 #endif
874 }
875
876 static int
877 iop_print(void *aux, const char *pnp)
878 {
879 struct iop_attach_args *ia;
880 char devinfo[256];
881
882 ia = aux;
883
884 if (pnp != NULL) {
885 iop_devinfo(ia->ia_class, devinfo);
886 aprint_normal("%s at %s", devinfo, pnp);
887 }
888 aprint_normal(" tid %d", ia->ia_tid);
889 return (UNCONF);
890 }
891
892 static int
893 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
894 {
895 struct iop_attach_args *ia;
896
897 ia = aux;
898
899 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
900 return (0);
901
902 return (config_match(parent, cf, aux));
903 }
904
905 /*
906 * Shut down all configured IOPs.
907 */
908 static void
909 iop_shutdown(void *junk)
910 {
911 struct iop_softc *sc;
912 int i;
913
914 printf("shutting down iop devices...");
915
916 for (i = 0; i < iop_cd.cd_ndevs; i++) {
917 if ((sc = device_lookup(&iop_cd, i)) == NULL)
918 continue;
919 if ((sc->sc_flags & IOP_ONLINE) == 0)
920 continue;
921
922 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
923 0, 5000);
924
925 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
926 /*
927 * Some AMI firmware revisions will go to sleep and
928 * never come back after this.
929 */
930 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
931 IOP_ICTX, 0, 1000);
932 }
933 }
934
935 /* Wait. Some boards could still be flushing, stupidly enough. */
936 delay(5000*1000);
937 printf(" done\n");
938 }
939
940 /*
941 * Retrieve IOP status.
942 */
943 int
944 iop_status_get(struct iop_softc *sc, int nosleep)
945 {
946 struct i2o_exec_status_get mf;
947 struct i2o_status *st;
948 paddr_t pa;
949 int rv, i;
950
951 pa = sc->sc_scr_seg->ds_addr;
952 st = (struct i2o_status *)sc->sc_scr;
953
954 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
955 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
956 mf.reserved[0] = 0;
957 mf.reserved[1] = 0;
958 mf.reserved[2] = 0;
959 mf.reserved[3] = 0;
960 mf.addrlow = (u_int32_t)pa;
961 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
962 mf.length = sizeof(sc->sc_status);
963
964 memset(st, 0, sizeof(*st));
965 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
966 BUS_DMASYNC_PREREAD);
967
968 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
969 return (rv);
970
971 for (i = 25; i != 0; i--) {
972 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
973 sizeof(*st), BUS_DMASYNC_POSTREAD);
974 if (st->syncbyte == 0xff)
975 break;
976 if (nosleep)
977 DELAY(100*1000);
978 else
979 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
980 }
981
982 if (st->syncbyte != 0xff) {
983 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
984 rv = EIO;
985 } else {
986 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
987 rv = 0;
988 }
989
990 return (rv);
991 }
992
993 /*
994 * Initialize and populate the IOP's outbound FIFO.
995 */
996 static int
997 iop_ofifo_init(struct iop_softc *sc)
998 {
999 bus_addr_t addr;
1000 bus_dma_segment_t seg;
1001 struct i2o_exec_outbound_init *mf;
1002 int i, rseg, rv;
1003 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1004
1005 sw = (u_int32_t *)sc->sc_scr;
1006
1007 mf = (struct i2o_exec_outbound_init *)mb;
1008 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1009 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1010 mf->msgictx = IOP_ICTX;
1011 mf->msgtctx = 0;
1012 mf->pagesize = PAGE_SIZE;
1013 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1014
1015 /*
1016 * The I2O spec says that there are two SGLs: one for the status
1017 * word, and one for a list of discarded MFAs. It continues to say
1018 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1019 * necessary; this isn't the case (and is in fact a bad thing).
1020 */
1021 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1022 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1023 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1024 (u_int32_t)sc->sc_scr_seg->ds_addr;
1025 mb[0] += 2 << 16;
1026
1027 *sw = 0;
1028 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1029 BUS_DMASYNC_PREREAD);
1030
1031 if ((rv = iop_post(sc, mb)) != 0)
1032 return (rv);
1033
1034 POLL(5000,
1035 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1036 BUS_DMASYNC_POSTREAD),
1037 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1038
1039 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1040 printf("%s: outbound FIFO init failed (%d)\n",
1041 sc->sc_dv.dv_xname, le32toh(*sw));
1042 return (EIO);
1043 }
1044
1045 /* Allocate DMA safe memory for the reply frames. */
1046 if (sc->sc_rep_phys == 0) {
1047 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1048
1049 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1050 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1051 if (rv != 0) {
1052 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1053 rv);
1054 return (rv);
1055 }
1056
1057 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1058 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1059 if (rv != 0) {
1060 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1061 return (rv);
1062 }
1063
1064 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1065 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1066 if (rv != 0) {
1067 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1068 rv);
1069 return (rv);
1070 }
1071
1072 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1073 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1074 if (rv != 0) {
1075 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1076 return (rv);
1077 }
1078
1079 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1080 }
1081
1082 /* Populate the outbound FIFO. */
1083 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1084 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1085 addr += sc->sc_framesize;
1086 }
1087
1088 return (0);
1089 }
1090
1091 /*
1092 * Read the specified number of bytes from the IOP's hardware resource table.
1093 */
1094 static int
1095 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1096 {
1097 struct iop_msg *im;
1098 int rv;
1099 struct i2o_exec_hrt_get *mf;
1100 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1101
1102 im = iop_msg_alloc(sc, IM_WAIT);
1103 mf = (struct i2o_exec_hrt_get *)mb;
1104 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1105 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1106 mf->msgictx = IOP_ICTX;
1107 mf->msgtctx = im->im_tctx;
1108
1109 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1110 rv = iop_msg_post(sc, im, mb, 30000);
1111 iop_msg_unmap(sc, im);
1112 iop_msg_free(sc, im);
1113 return (rv);
1114 }
1115
1116 /*
1117 * Read the IOP's hardware resource table.
1118 */
1119 static int
1120 iop_hrt_get(struct iop_softc *sc)
1121 {
1122 struct i2o_hrt hrthdr, *hrt;
1123 int size, rv;
1124
1125 PHOLD(curlwp);
1126 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1127 PRELE(curlwp);
1128 if (rv != 0)
1129 return (rv);
1130
1131 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1132 le16toh(hrthdr.numentries)));
1133
1134 size = sizeof(struct i2o_hrt) +
1135 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1136 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1137
1138 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1139 free(hrt, M_DEVBUF);
1140 return (rv);
1141 }
1142
1143 if (sc->sc_hrt != NULL)
1144 free(sc->sc_hrt, M_DEVBUF);
1145 sc->sc_hrt = hrt;
1146 return (0);
1147 }
1148
1149 /*
1150 * Request the specified number of bytes from the IOP's logical
1151 * configuration table. If a change indicator is specified, this
1152 * is a verbatim notification request, so the caller is prepared
1153 * to wait indefinitely.
1154 */
1155 static int
1156 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1157 u_int32_t chgind)
1158 {
1159 struct iop_msg *im;
1160 struct i2o_exec_lct_notify *mf;
1161 int rv;
1162 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1163
1164 im = iop_msg_alloc(sc, IM_WAIT);
1165 memset(lct, 0, size);
1166
1167 mf = (struct i2o_exec_lct_notify *)mb;
1168 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1169 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1170 mf->msgictx = IOP_ICTX;
1171 mf->msgtctx = im->im_tctx;
1172 mf->classid = I2O_CLASS_ANY;
1173 mf->changeindicator = chgind;
1174
1175 #ifdef I2ODEBUG
1176 printf("iop_lct_get0: reading LCT");
1177 if (chgind != 0)
1178 printf(" (async)");
1179 printf("\n");
1180 #endif
1181
1182 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1183 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1184 iop_msg_unmap(sc, im);
1185 iop_msg_free(sc, im);
1186 return (rv);
1187 }
1188
1189 /*
1190 * Read the IOP's logical configuration table.
1191 */
1192 int
1193 iop_lct_get(struct iop_softc *sc)
1194 {
1195 int esize, size, rv;
1196 struct i2o_lct *lct;
1197
1198 esize = le32toh(sc->sc_status.expectedlctsize);
1199 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1200 if (lct == NULL)
1201 return (ENOMEM);
1202
1203 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1204 free(lct, M_DEVBUF);
1205 return (rv);
1206 }
1207
1208 size = le16toh(lct->tablesize) << 2;
1209 if (esize != size) {
1210 free(lct, M_DEVBUF);
1211 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1212 if (lct == NULL)
1213 return (ENOMEM);
1214
1215 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1216 free(lct, M_DEVBUF);
1217 return (rv);
1218 }
1219 }
1220
1221 /* Swap in the new LCT. */
1222 if (sc->sc_lct != NULL)
1223 free(sc->sc_lct, M_DEVBUF);
1224 sc->sc_lct = lct;
1225 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1226 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1227 sizeof(struct i2o_lct_entry);
1228 return (0);
1229 }
1230
1231 /*
1232 * Request the specified parameter group from the target. If an initiator
1233 * is specified (a) don't wait for the operation to complete, but instead
1234 * let the initiator's interrupt handler deal with the reply and (b) place a
1235 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1236 */
1237 int
1238 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1239 int size, struct iop_initiator *ii)
1240 {
1241 struct iop_msg *im;
1242 struct i2o_util_params_op *mf;
1243 struct i2o_reply *rf;
1244 int rv;
1245 struct iop_pgop *pgop;
1246 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1247
1248 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1249 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1250 iop_msg_free(sc, im);
1251 return (ENOMEM);
1252 }
1253 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1254 iop_msg_free(sc, im);
1255 free(pgop, M_DEVBUF);
1256 return (ENOMEM);
1257 }
1258 im->im_dvcontext = pgop;
1259 im->im_rb = rf;
1260
1261 mf = (struct i2o_util_params_op *)mb;
1262 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1263 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1264 mf->msgictx = IOP_ICTX;
1265 mf->msgtctx = im->im_tctx;
1266 mf->flags = 0;
1267
1268 pgop->olh.count = htole16(1);
1269 pgop->olh.reserved = htole16(0);
1270 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1271 pgop->oat.fieldcount = htole16(0xffff);
1272 pgop->oat.group = htole16(group);
1273
1274 if (ii == NULL)
1275 PHOLD(curlwp);
1276
1277 memset(buf, 0, size);
1278 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1279 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1280 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1281
1282 if (ii == NULL)
1283 PRELE(curlwp);
1284
1285 /* Detect errors; let partial transfers to count as success. */
1286 if (ii == NULL && rv == 0) {
1287 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1288 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1289 rv = 0;
1290 else
1291 rv = (rf->reqstatus != 0 ? EIO : 0);
1292
1293 if (rv != 0)
1294 printf("%s: FIELD_GET failed for tid %d group %d\n",
1295 sc->sc_dv.dv_xname, tid, group);
1296 }
1297
1298 if (ii == NULL || rv != 0) {
1299 iop_msg_unmap(sc, im);
1300 iop_msg_free(sc, im);
1301 free(pgop, M_DEVBUF);
1302 free(rf, M_DEVBUF);
1303 }
1304
1305 return (rv);
1306 }
1307
1308 /*
1309 * Set a single field in a scalar parameter group.
1310 */
1311 int
1312 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1313 int size, int field)
1314 {
1315 struct iop_msg *im;
1316 struct i2o_util_params_op *mf;
1317 struct iop_pgop *pgop;
1318 int rv, totsize;
1319 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1320
1321 totsize = sizeof(*pgop) + size;
1322
1323 im = iop_msg_alloc(sc, IM_WAIT);
1324 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1325 iop_msg_free(sc, im);
1326 return (ENOMEM);
1327 }
1328
1329 mf = (struct i2o_util_params_op *)mb;
1330 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1331 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1332 mf->msgictx = IOP_ICTX;
1333 mf->msgtctx = im->im_tctx;
1334 mf->flags = 0;
1335
1336 pgop->olh.count = htole16(1);
1337 pgop->olh.reserved = htole16(0);
1338 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1339 pgop->oat.fieldcount = htole16(1);
1340 pgop->oat.group = htole16(group);
1341 pgop->oat.fields[0] = htole16(field);
1342 memcpy(pgop + 1, buf, size);
1343
1344 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1345 rv = iop_msg_post(sc, im, mb, 30000);
1346 if (rv != 0)
1347 printf("%s: FIELD_SET failed for tid %d group %d\n",
1348 sc->sc_dv.dv_xname, tid, group);
1349
1350 iop_msg_unmap(sc, im);
1351 iop_msg_free(sc, im);
1352 free(pgop, M_DEVBUF);
1353 return (rv);
1354 }
1355
1356 /*
1357 * Delete all rows in a tablular parameter group.
1358 */
1359 int
1360 iop_table_clear(struct iop_softc *sc, int tid, int group)
1361 {
1362 struct iop_msg *im;
1363 struct i2o_util_params_op *mf;
1364 struct iop_pgop pgop;
1365 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1366 int rv;
1367
1368 im = iop_msg_alloc(sc, IM_WAIT);
1369
1370 mf = (struct i2o_util_params_op *)mb;
1371 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1372 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1373 mf->msgictx = IOP_ICTX;
1374 mf->msgtctx = im->im_tctx;
1375 mf->flags = 0;
1376
1377 pgop.olh.count = htole16(1);
1378 pgop.olh.reserved = htole16(0);
1379 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1380 pgop.oat.fieldcount = htole16(0);
1381 pgop.oat.group = htole16(group);
1382 pgop.oat.fields[0] = htole16(0);
1383
1384 PHOLD(curlwp);
1385 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1386 rv = iop_msg_post(sc, im, mb, 30000);
1387 if (rv != 0)
1388 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1389 sc->sc_dv.dv_xname, tid, group);
1390
1391 iop_msg_unmap(sc, im);
1392 PRELE(curlwp);
1393 iop_msg_free(sc, im);
1394 return (rv);
1395 }
1396
1397 /*
1398 * Add a single row to a tabular parameter group. The row can have only one
1399 * field.
1400 */
1401 int
1402 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1403 int size, int row)
1404 {
1405 struct iop_msg *im;
1406 struct i2o_util_params_op *mf;
1407 struct iop_pgop *pgop;
1408 int rv, totsize;
1409 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1410
1411 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1412
1413 im = iop_msg_alloc(sc, IM_WAIT);
1414 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1415 iop_msg_free(sc, im);
1416 return (ENOMEM);
1417 }
1418
1419 mf = (struct i2o_util_params_op *)mb;
1420 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1421 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1422 mf->msgictx = IOP_ICTX;
1423 mf->msgtctx = im->im_tctx;
1424 mf->flags = 0;
1425
1426 pgop->olh.count = htole16(1);
1427 pgop->olh.reserved = htole16(0);
1428 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1429 pgop->oat.fieldcount = htole16(1);
1430 pgop->oat.group = htole16(group);
1431 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1432 pgop->oat.fields[1] = htole16(1); /* RowCount */
1433 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1434 memcpy(&pgop->oat.fields[3], buf, size);
1435
1436 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1437 rv = iop_msg_post(sc, im, mb, 30000);
1438 if (rv != 0)
1439 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1440 sc->sc_dv.dv_xname, tid, group, row);
1441
1442 iop_msg_unmap(sc, im);
1443 iop_msg_free(sc, im);
1444 free(pgop, M_DEVBUF);
1445 return (rv);
1446 }
1447
1448 /*
1449 * Execute a simple command (no parameters).
1450 */
1451 int
1452 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1453 int async, int timo)
1454 {
1455 struct iop_msg *im;
1456 struct i2o_msg mf;
1457 int rv, fl;
1458
1459 fl = (async != 0 ? IM_WAIT : IM_POLL);
1460 im = iop_msg_alloc(sc, fl);
1461
1462 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1463 mf.msgfunc = I2O_MSGFUNC(tid, function);
1464 mf.msgictx = ictx;
1465 mf.msgtctx = im->im_tctx;
1466
1467 rv = iop_msg_post(sc, im, &mf, timo);
1468 iop_msg_free(sc, im);
1469 return (rv);
1470 }
1471
1472 /*
1473 * Post the system table to the IOP.
1474 */
1475 static int
1476 iop_systab_set(struct iop_softc *sc)
1477 {
1478 struct i2o_exec_sys_tab_set *mf;
1479 struct iop_msg *im;
1480 bus_space_handle_t bsh;
1481 bus_addr_t boo;
1482 u_int32_t mema[2], ioa[2];
1483 int rv;
1484 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1485
1486 im = iop_msg_alloc(sc, IM_WAIT);
1487
1488 mf = (struct i2o_exec_sys_tab_set *)mb;
1489 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1490 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1491 mf->msgictx = IOP_ICTX;
1492 mf->msgtctx = im->im_tctx;
1493 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1494 mf->segnumber = 0;
1495
1496 mema[1] = sc->sc_status.desiredprivmemsize;
1497 ioa[1] = sc->sc_status.desiredpriviosize;
1498
1499 if (mema[1] != 0) {
1500 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1501 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1502 mema[0] = htole32(boo);
1503 if (rv != 0) {
1504 printf("%s: can't alloc priv mem space, err = %d\n",
1505 sc->sc_dv.dv_xname, rv);
1506 mema[0] = 0;
1507 mema[1] = 0;
1508 }
1509 }
1510
1511 if (ioa[1] != 0) {
1512 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1513 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1514 ioa[0] = htole32(boo);
1515 if (rv != 0) {
1516 printf("%s: can't alloc priv i/o space, err = %d\n",
1517 sc->sc_dv.dv_xname, rv);
1518 ioa[0] = 0;
1519 ioa[1] = 0;
1520 }
1521 }
1522
1523 PHOLD(curlwp);
1524 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1525 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1526 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1527 rv = iop_msg_post(sc, im, mb, 5000);
1528 iop_msg_unmap(sc, im);
1529 iop_msg_free(sc, im);
1530 PRELE(curlwp);
1531 return (rv);
1532 }
1533
1534 /*
1535 * Reset the IOP. Must be called with interrupts disabled.
1536 */
1537 static int
1538 iop_reset(struct iop_softc *sc)
1539 {
1540 u_int32_t mfa, *sw;
1541 struct i2o_exec_iop_reset mf;
1542 int rv;
1543 paddr_t pa;
1544
1545 sw = (u_int32_t *)sc->sc_scr;
1546 pa = sc->sc_scr_seg->ds_addr;
1547
1548 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1549 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1550 mf.reserved[0] = 0;
1551 mf.reserved[1] = 0;
1552 mf.reserved[2] = 0;
1553 mf.reserved[3] = 0;
1554 mf.statuslow = (u_int32_t)pa;
1555 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1556
1557 *sw = htole32(0);
1558 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1559 BUS_DMASYNC_PREREAD);
1560
1561 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1562 return (rv);
1563
1564 POLL(2500,
1565 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1566 BUS_DMASYNC_POSTREAD), *sw != 0));
1567 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1568 printf("%s: reset rejected, status 0x%x\n",
1569 sc->sc_dv.dv_xname, le32toh(*sw));
1570 return (EIO);
1571 }
1572
1573 /*
1574 * IOP is now in the INIT state. Wait no more than 10 seconds for
1575 * the inbound queue to become responsive.
1576 */
1577 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1578 if (mfa == IOP_MFA_EMPTY) {
1579 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1580 return (EIO);
1581 }
1582
1583 iop_release_mfa(sc, mfa);
1584 return (0);
1585 }
1586
1587 /*
1588 * Register a new initiator. Must be called with the configuration lock
1589 * held.
1590 */
1591 void
1592 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1593 {
1594 static int ictxgen;
1595 int s;
1596
1597 /* 0 is reserved (by us) for system messages. */
1598 ii->ii_ictx = ++ictxgen;
1599
1600 /*
1601 * `Utility initiators' don't make it onto the per-IOP initiator list
1602 * (which is used only for configuration), but do get one slot on
1603 * the inbound queue.
1604 */
1605 if ((ii->ii_flags & II_UTILITY) == 0) {
1606 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1607 sc->sc_nii++;
1608 } else
1609 sc->sc_nuii++;
1610
1611 s = splbio();
1612 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1613 splx(s);
1614 }
1615
1616 /*
1617 * Unregister an initiator. Must be called with the configuration lock
1618 * held.
1619 */
1620 void
1621 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1622 {
1623 int s;
1624
1625 if ((ii->ii_flags & II_UTILITY) == 0) {
1626 LIST_REMOVE(ii, ii_list);
1627 sc->sc_nii--;
1628 } else
1629 sc->sc_nuii--;
1630
1631 s = splbio();
1632 LIST_REMOVE(ii, ii_hash);
1633 splx(s);
1634 }
1635
1636 /*
1637 * Handle a reply frame from the IOP.
1638 */
1639 static int
1640 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1641 {
1642 struct iop_msg *im;
1643 struct i2o_reply *rb;
1644 struct i2o_fault_notify *fn;
1645 struct iop_initiator *ii;
1646 u_int off, ictx, tctx, status, size;
1647
1648 off = (int)(rmfa - sc->sc_rep_phys);
1649 rb = (struct i2o_reply *)(sc->sc_rep + off);
1650
1651 /* Perform reply queue DMA synchronisation. */
1652 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1653 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1654 if (--sc->sc_curib != 0)
1655 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1656 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1657
1658 #ifdef I2ODEBUG
1659 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1660 panic("iop_handle_reply: 64-bit reply");
1661 #endif
1662 /*
1663 * Find the initiator.
1664 */
1665 ictx = le32toh(rb->msgictx);
1666 if (ictx == IOP_ICTX)
1667 ii = NULL;
1668 else {
1669 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1670 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1671 if (ii->ii_ictx == ictx)
1672 break;
1673 if (ii == NULL) {
1674 #ifdef I2ODEBUG
1675 iop_reply_print(sc, rb);
1676 #endif
1677 printf("%s: WARNING: bad ictx returned (%x)\n",
1678 sc->sc_dv.dv_xname, ictx);
1679 return (-1);
1680 }
1681 }
1682
1683 /*
1684 * If we received a transport failure notice, we've got to dig the
1685 * transaction context (if any) out of the original message frame,
1686 * and then release the original MFA back to the inbound FIFO.
1687 */
1688 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1689 status = I2O_STATUS_SUCCESS;
1690
1691 fn = (struct i2o_fault_notify *)rb;
1692 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1693 iop_release_mfa(sc, fn->lowmfa);
1694 iop_tfn_print(sc, fn);
1695 } else {
1696 status = rb->reqstatus;
1697 tctx = le32toh(rb->msgtctx);
1698 }
1699
1700 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1701 /*
1702 * This initiator tracks state using message wrappers.
1703 *
1704 * Find the originating message wrapper, and if requested
1705 * notify the initiator.
1706 */
1707 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1708 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1709 (im->im_flags & IM_ALLOCED) == 0 ||
1710 tctx != im->im_tctx) {
1711 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1712 sc->sc_dv.dv_xname, tctx, im);
1713 if (im != NULL)
1714 printf("%s: flags=0x%08x tctx=0x%08x\n",
1715 sc->sc_dv.dv_xname, im->im_flags,
1716 im->im_tctx);
1717 #ifdef I2ODEBUG
1718 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1719 iop_reply_print(sc, rb);
1720 #endif
1721 return (-1);
1722 }
1723
1724 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1725 im->im_flags |= IM_FAIL;
1726
1727 #ifdef I2ODEBUG
1728 if ((im->im_flags & IM_REPLIED) != 0)
1729 panic("%s: dup reply", sc->sc_dv.dv_xname);
1730 #endif
1731 im->im_flags |= IM_REPLIED;
1732
1733 #ifdef I2ODEBUG
1734 if (status != I2O_STATUS_SUCCESS)
1735 iop_reply_print(sc, rb);
1736 #endif
1737 im->im_reqstatus = status;
1738
1739 /* Copy the reply frame, if requested. */
1740 if (im->im_rb != NULL) {
1741 size = (le32toh(rb->msgflags) >> 14) & ~3;
1742 #ifdef I2ODEBUG
1743 if (size > sc->sc_framesize)
1744 panic("iop_handle_reply: reply too large");
1745 #endif
1746 memcpy(im->im_rb, rb, size);
1747 }
1748
1749 /* Notify the initiator. */
1750 if ((im->im_flags & IM_WAIT) != 0)
1751 wakeup(im);
1752 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1753 (*ii->ii_intr)(ii->ii_dv, im, rb);
1754 } else {
1755 /*
1756 * This initiator discards message wrappers.
1757 *
1758 * Simply pass the reply frame to the initiator.
1759 */
1760 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1761 }
1762
1763 return (status);
1764 }
1765
1766 /*
1767 * Handle an interrupt from the IOP.
1768 */
1769 int
1770 iop_intr(void *arg)
1771 {
1772 struct iop_softc *sc;
1773 u_int32_t rmfa;
1774
1775 sc = arg;
1776
1777 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1778 return (0);
1779
1780 for (;;) {
1781 /* Double read to account for IOP bug. */
1782 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1783 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1784 if (rmfa == IOP_MFA_EMPTY)
1785 break;
1786 }
1787 iop_handle_reply(sc, rmfa);
1788 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1789 }
1790
1791 return (1);
1792 }
1793
1794 /*
1795 * Handle an event signalled by the executive.
1796 */
1797 static void
1798 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1799 {
1800 struct i2o_util_event_register_reply *rb;
1801 struct iop_softc *sc;
1802 u_int event;
1803
1804 sc = (struct iop_softc *)dv;
1805 rb = reply;
1806
1807 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1808 return;
1809
1810 event = le32toh(rb->event);
1811 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1812 }
1813
1814 /*
1815 * Allocate a message wrapper.
1816 */
1817 struct iop_msg *
1818 iop_msg_alloc(struct iop_softc *sc, int flags)
1819 {
1820 struct iop_msg *im;
1821 static u_int tctxgen;
1822 int s, i;
1823
1824 #ifdef I2ODEBUG
1825 if ((flags & IM_SYSMASK) != 0)
1826 panic("iop_msg_alloc: system flags specified");
1827 #endif
1828
1829 s = splbio();
1830 im = SLIST_FIRST(&sc->sc_im_freelist);
1831 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1832 if (im == NULL)
1833 panic("iop_msg_alloc: no free wrappers");
1834 #endif
1835 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1836 splx(s);
1837
1838 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1839 tctxgen += (1 << IOP_TCTX_SHIFT);
1840 im->im_flags = flags | IM_ALLOCED;
1841 im->im_rb = NULL;
1842 i = 0;
1843 do {
1844 im->im_xfer[i++].ix_size = 0;
1845 } while (i < IOP_MAX_MSG_XFERS);
1846
1847 return (im);
1848 }
1849
1850 /*
1851 * Free a message wrapper.
1852 */
1853 void
1854 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1855 {
1856 int s;
1857
1858 #ifdef I2ODEBUG
1859 if ((im->im_flags & IM_ALLOCED) == 0)
1860 panic("iop_msg_free: wrapper not allocated");
1861 #endif
1862
1863 im->im_flags = 0;
1864 s = splbio();
1865 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1866 splx(s);
1867 }
1868
1869 /*
1870 * Map a data transfer. Write a scatter-gather list into the message frame.
1871 */
1872 int
1873 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1874 void *xferaddr, int xfersize, int out, struct proc *up)
1875 {
1876 bus_dmamap_t dm;
1877 bus_dma_segment_t *ds;
1878 struct iop_xfer *ix;
1879 u_int rv, i, nsegs, flg, off, xn;
1880 u_int32_t *p;
1881
1882 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1883 if (ix->ix_size == 0)
1884 break;
1885
1886 #ifdef I2ODEBUG
1887 if (xfersize == 0)
1888 panic("iop_msg_map: null transfer");
1889 if (xfersize > IOP_MAX_XFER)
1890 panic("iop_msg_map: transfer too large");
1891 if (xn == IOP_MAX_MSG_XFERS)
1892 panic("iop_msg_map: too many xfers");
1893 #endif
1894
1895 /*
1896 * Only the first DMA map is static.
1897 */
1898 if (xn != 0) {
1899 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1900 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1901 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1902 if (rv != 0)
1903 return (rv);
1904 }
1905
1906 dm = ix->ix_map;
1907 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1908 (up == NULL ? BUS_DMA_NOWAIT : 0));
1909 if (rv != 0)
1910 goto bad;
1911
1912 /*
1913 * How many SIMPLE SG elements can we fit in this message?
1914 */
1915 off = mb[0] >> 16;
1916 p = mb + off;
1917 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1918
1919 if (dm->dm_nsegs > nsegs) {
1920 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1921 rv = EFBIG;
1922 DPRINTF(("iop_msg_map: too many segs\n"));
1923 goto bad;
1924 }
1925
1926 nsegs = dm->dm_nsegs;
1927 xfersize = 0;
1928
1929 /*
1930 * Write out the SG list.
1931 */
1932 if (out)
1933 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1934 else
1935 flg = I2O_SGL_SIMPLE;
1936
1937 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1938 p[0] = (u_int32_t)ds->ds_len | flg;
1939 p[1] = (u_int32_t)ds->ds_addr;
1940 xfersize += ds->ds_len;
1941 }
1942
1943 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1944 p[1] = (u_int32_t)ds->ds_addr;
1945 xfersize += ds->ds_len;
1946
1947 /* Fix up the transfer record, and sync the map. */
1948 ix->ix_flags = (out ? IX_OUT : IX_IN);
1949 ix->ix_size = xfersize;
1950 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1951 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1952
1953 /*
1954 * If this is the first xfer we've mapped for this message, adjust
1955 * the SGL offset field in the message header.
1956 */
1957 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1958 mb[0] += (mb[0] >> 12) & 0xf0;
1959 im->im_flags |= IM_SGLOFFADJ;
1960 }
1961 mb[0] += (nsegs << 17);
1962 return (0);
1963
1964 bad:
1965 if (xn != 0)
1966 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1967 return (rv);
1968 }
1969
1970 /*
1971 * Map a block I/O data transfer (different in that there's only one per
1972 * message maximum, and PAGE addressing may be used). Write a scatter
1973 * gather list into the message frame.
1974 */
1975 int
1976 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1977 void *xferaddr, int xfersize, int out)
1978 {
1979 bus_dma_segment_t *ds;
1980 bus_dmamap_t dm;
1981 struct iop_xfer *ix;
1982 u_int rv, i, nsegs, off, slen, tlen, flg;
1983 paddr_t saddr, eaddr;
1984 u_int32_t *p;
1985
1986 #ifdef I2ODEBUG
1987 if (xfersize == 0)
1988 panic("iop_msg_map_bio: null transfer");
1989 if (xfersize > IOP_MAX_XFER)
1990 panic("iop_msg_map_bio: transfer too large");
1991 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1992 panic("iop_msg_map_bio: SGLOFFADJ");
1993 #endif
1994
1995 ix = im->im_xfer;
1996 dm = ix->ix_map;
1997 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1998 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1999 if (rv != 0)
2000 return (rv);
2001
2002 off = mb[0] >> 16;
2003 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2004
2005 /*
2006 * If the transfer is highly fragmented and won't fit using SIMPLE
2007 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2008 * potentially more efficient, both for us and the IOP.
2009 */
2010 if (dm->dm_nsegs > nsegs) {
2011 nsegs = 1;
2012 p = mb + off + 1;
2013
2014 /* XXX This should be done with a bus_space flag. */
2015 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2016 slen = ds->ds_len;
2017 saddr = ds->ds_addr;
2018
2019 while (slen > 0) {
2020 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2021 tlen = min(eaddr - saddr, slen);
2022 slen -= tlen;
2023 *p++ = le32toh(saddr);
2024 saddr = eaddr;
2025 nsegs++;
2026 }
2027 }
2028
2029 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2030 I2O_SGL_END;
2031 if (out)
2032 mb[off] |= I2O_SGL_DATA_OUT;
2033 } else {
2034 p = mb + off;
2035 nsegs = dm->dm_nsegs;
2036
2037 if (out)
2038 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2039 else
2040 flg = I2O_SGL_SIMPLE;
2041
2042 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2043 p[0] = (u_int32_t)ds->ds_len | flg;
2044 p[1] = (u_int32_t)ds->ds_addr;
2045 }
2046
2047 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2048 I2O_SGL_END;
2049 p[1] = (u_int32_t)ds->ds_addr;
2050 nsegs <<= 1;
2051 }
2052
2053 /* Fix up the transfer record, and sync the map. */
2054 ix->ix_flags = (out ? IX_OUT : IX_IN);
2055 ix->ix_size = xfersize;
2056 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2057 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2058
2059 /*
2060 * Adjust the SGL offset and total message size fields. We don't
2061 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2062 */
2063 mb[0] += ((off << 4) + (nsegs << 16));
2064 return (0);
2065 }
2066
2067 /*
2068 * Unmap all data transfers associated with a message wrapper.
2069 */
2070 void
2071 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2072 {
2073 struct iop_xfer *ix;
2074 int i;
2075
2076 #ifdef I2ODEBUG
2077 if (im->im_xfer[0].ix_size == 0)
2078 panic("iop_msg_unmap: no transfers mapped");
2079 #endif
2080
2081 for (ix = im->im_xfer, i = 0;;) {
2082 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2083 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2084 BUS_DMASYNC_POSTREAD);
2085 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2086
2087 /* Only the first DMA map is static. */
2088 if (i != 0)
2089 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2090 if ((++ix)->ix_size == 0)
2091 break;
2092 if (++i >= IOP_MAX_MSG_XFERS)
2093 break;
2094 }
2095 }
2096
2097 /*
2098 * Post a message frame to the IOP's inbound queue.
2099 */
2100 int
2101 iop_post(struct iop_softc *sc, u_int32_t *mb)
2102 {
2103 u_int32_t mfa;
2104 int s;
2105
2106 #ifdef I2ODEBUG
2107 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2108 panic("iop_post: frame too large");
2109 #endif
2110
2111 s = splbio();
2112
2113 /* Allocate a slot with the IOP. */
2114 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2115 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2116 splx(s);
2117 printf("%s: mfa not forthcoming\n",
2118 sc->sc_dv.dv_xname);
2119 return (EAGAIN);
2120 }
2121
2122 /* Perform reply buffer DMA synchronisation. */
2123 if (sc->sc_curib++ == 0)
2124 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2125 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2126
2127 /* Copy out the message frame. */
2128 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2129 mb[0] >> 16);
2130 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2131 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2132
2133 /* Post the MFA back to the IOP. */
2134 iop_outl(sc, IOP_REG_IFIFO, mfa);
2135
2136 splx(s);
2137 return (0);
2138 }
2139
2140 /*
2141 * Post a message to the IOP and deal with completion.
2142 */
2143 int
2144 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2145 {
2146 u_int32_t *mb;
2147 int rv, s;
2148
2149 mb = xmb;
2150
2151 /* Terminate the scatter/gather list chain. */
2152 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2153 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2154
2155 if ((rv = iop_post(sc, mb)) != 0)
2156 return (rv);
2157
2158 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2159 if ((im->im_flags & IM_POLL) != 0)
2160 iop_msg_poll(sc, im, timo);
2161 else
2162 iop_msg_wait(sc, im, timo);
2163
2164 s = splbio();
2165 if ((im->im_flags & IM_REPLIED) != 0) {
2166 if ((im->im_flags & IM_NOSTATUS) != 0)
2167 rv = 0;
2168 else if ((im->im_flags & IM_FAIL) != 0)
2169 rv = ENXIO;
2170 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2171 rv = EIO;
2172 else
2173 rv = 0;
2174 } else
2175 rv = EBUSY;
2176 splx(s);
2177 } else
2178 rv = 0;
2179
2180 return (rv);
2181 }
2182
2183 /*
2184 * Spin until the specified message is replied to.
2185 */
2186 static void
2187 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2188 {
2189 u_int32_t rmfa;
2190 int s;
2191
2192 s = splbio();
2193
2194 /* Wait for completion. */
2195 for (timo *= 10; timo != 0; timo--) {
2196 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2197 /* Double read to account for IOP bug. */
2198 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2199 if (rmfa == IOP_MFA_EMPTY)
2200 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2201 if (rmfa != IOP_MFA_EMPTY) {
2202 iop_handle_reply(sc, rmfa);
2203
2204 /*
2205 * Return the reply frame to the IOP's
2206 * outbound FIFO.
2207 */
2208 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2209 }
2210 }
2211 if ((im->im_flags & IM_REPLIED) != 0)
2212 break;
2213 DELAY(100);
2214 }
2215
2216 if (timo == 0) {
2217 #ifdef I2ODEBUG
2218 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2219 if (iop_status_get(sc, 1) != 0)
2220 printf("iop_msg_poll: unable to retrieve status\n");
2221 else
2222 printf("iop_msg_poll: IOP state = %d\n",
2223 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2224 #endif
2225 }
2226
2227 splx(s);
2228 }
2229
2230 /*
2231 * Sleep until the specified message is replied to.
2232 */
2233 static void
2234 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2235 {
2236 int s, rv;
2237
2238 s = splbio();
2239 if ((im->im_flags & IM_REPLIED) != 0) {
2240 splx(s);
2241 return;
2242 }
2243 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2244 splx(s);
2245
2246 #ifdef I2ODEBUG
2247 if (rv != 0) {
2248 printf("iop_msg_wait: tsleep() == %d\n", rv);
2249 if (iop_status_get(sc, 0) != 0)
2250 printf("iop_msg_wait: unable to retrieve status\n");
2251 else
2252 printf("iop_msg_wait: IOP state = %d\n",
2253 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2254 }
2255 #endif
2256 }
2257
2258 /*
2259 * Release an unused message frame back to the IOP's inbound fifo.
2260 */
2261 static void
2262 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2263 {
2264
2265 /* Use the frame to issue a no-op. */
2266 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2267 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2268 iop_outl_msg(sc, mfa + 8, 0);
2269 iop_outl_msg(sc, mfa + 12, 0);
2270
2271 iop_outl(sc, IOP_REG_IFIFO, mfa);
2272 }
2273
2274 #ifdef I2ODEBUG
2275 /*
2276 * Dump a reply frame header.
2277 */
2278 static void
2279 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2280 {
2281 u_int function, detail;
2282 #ifdef I2OVERBOSE
2283 const char *statusstr;
2284 #endif
2285
2286 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2287 detail = le16toh(rb->detail);
2288
2289 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2290
2291 #ifdef I2OVERBOSE
2292 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2293 statusstr = iop_status[rb->reqstatus];
2294 else
2295 statusstr = "undefined error code";
2296
2297 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2298 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2299 #else
2300 printf("%s: function=0x%02x status=0x%02x\n",
2301 sc->sc_dv.dv_xname, function, rb->reqstatus);
2302 #endif
2303 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2304 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2305 le32toh(rb->msgtctx));
2306 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2307 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2308 (le32toh(rb->msgflags) >> 8) & 0xff);
2309 }
2310 #endif
2311
2312 /*
2313 * Dump a transport failure reply.
2314 */
2315 static void
2316 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2317 {
2318
2319 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2320
2321 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2322 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2323 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2324 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2325 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2326 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2327 }
2328
2329 /*
2330 * Translate an I2O ASCII field into a C string.
2331 */
2332 void
2333 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2334 {
2335 int hc, lc, i, nit;
2336
2337 dlen--;
2338 lc = 0;
2339 hc = 0;
2340 i = 0;
2341
2342 /*
2343 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2344 * spec has nothing to say about it. Since AMI fields are usually
2345 * filled with junk after the terminator, ...
2346 */
2347 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2348
2349 while (slen-- != 0 && dlen-- != 0) {
2350 if (nit && *src == '\0')
2351 break;
2352 else if (*src <= 0x20 || *src >= 0x7f) {
2353 if (hc)
2354 dst[i++] = ' ';
2355 } else {
2356 hc = 1;
2357 dst[i++] = *src;
2358 lc = i;
2359 }
2360 src++;
2361 }
2362
2363 dst[lc] = '\0';
2364 }
2365
2366 /*
2367 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2368 */
2369 int
2370 iop_print_ident(struct iop_softc *sc, int tid)
2371 {
2372 struct {
2373 struct i2o_param_op_results pr;
2374 struct i2o_param_read_results prr;
2375 struct i2o_param_device_identity di;
2376 } __attribute__ ((__packed__)) p;
2377 char buf[32];
2378 int rv;
2379
2380 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2381 sizeof(p), NULL);
2382 if (rv != 0)
2383 return (rv);
2384
2385 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2386 sizeof(buf));
2387 printf(" <%s, ", buf);
2388 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2389 sizeof(buf));
2390 printf("%s, ", buf);
2391 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2392 printf("%s>", buf);
2393
2394 return (0);
2395 }
2396
2397 /*
2398 * Claim or unclaim the specified TID.
2399 */
2400 int
2401 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2402 int flags)
2403 {
2404 struct iop_msg *im;
2405 struct i2o_util_claim mf;
2406 int rv, func;
2407
2408 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2409 im = iop_msg_alloc(sc, IM_WAIT);
2410
2411 /* We can use the same structure, as they're identical. */
2412 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2413 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2414 mf.msgictx = ii->ii_ictx;
2415 mf.msgtctx = im->im_tctx;
2416 mf.flags = flags;
2417
2418 rv = iop_msg_post(sc, im, &mf, 5000);
2419 iop_msg_free(sc, im);
2420 return (rv);
2421 }
2422
2423 /*
2424 * Perform an abort.
2425 */
2426 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2427 int tctxabort, int flags)
2428 {
2429 struct iop_msg *im;
2430 struct i2o_util_abort mf;
2431 int rv;
2432
2433 im = iop_msg_alloc(sc, IM_WAIT);
2434
2435 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2436 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2437 mf.msgictx = ii->ii_ictx;
2438 mf.msgtctx = im->im_tctx;
2439 mf.flags = (func << 24) | flags;
2440 mf.tctxabort = tctxabort;
2441
2442 rv = iop_msg_post(sc, im, &mf, 5000);
2443 iop_msg_free(sc, im);
2444 return (rv);
2445 }
2446
2447 /*
2448 * Enable or disable reception of events for the specified device.
2449 */
2450 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2451 {
2452 struct i2o_util_event_register mf;
2453
2454 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2455 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2456 mf.msgictx = ii->ii_ictx;
2457 mf.msgtctx = 0;
2458 mf.eventmask = mask;
2459
2460 /* This message is replied to only when events are signalled. */
2461 return (iop_post(sc, (u_int32_t *)&mf));
2462 }
2463
2464 int
2465 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2466 {
2467 struct iop_softc *sc;
2468
2469 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2470 return (ENXIO);
2471 if ((sc->sc_flags & IOP_ONLINE) == 0)
2472 return (ENXIO);
2473 if ((sc->sc_flags & IOP_OPEN) != 0)
2474 return (EBUSY);
2475 sc->sc_flags |= IOP_OPEN;
2476
2477 return (0);
2478 }
2479
2480 int
2481 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2482 {
2483 struct iop_softc *sc;
2484
2485 sc = device_lookup(&iop_cd, minor(dev));
2486 sc->sc_flags &= ~IOP_OPEN;
2487
2488 return (0);
2489 }
2490
2491 int
2492 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2493 {
2494 struct iop_softc *sc;
2495 struct iovec *iov;
2496 int rv, i;
2497
2498 if (securelevel >= 2)
2499 return (EPERM);
2500
2501 sc = device_lookup(&iop_cd, minor(dev));
2502
2503 switch (cmd) {
2504 case IOPIOCPT:
2505 return (iop_passthrough(sc, (struct ioppt *)data, p));
2506
2507 case IOPIOCGSTATUS:
2508 iov = (struct iovec *)data;
2509 i = sizeof(struct i2o_status);
2510 if (i > iov->iov_len)
2511 i = iov->iov_len;
2512 else
2513 iov->iov_len = i;
2514 if ((rv = iop_status_get(sc, 0)) == 0)
2515 rv = copyout(&sc->sc_status, iov->iov_base, i);
2516 return (rv);
2517
2518 case IOPIOCGLCT:
2519 case IOPIOCGTIDMAP:
2520 case IOPIOCRECONFIG:
2521 break;
2522
2523 default:
2524 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2525 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2526 #endif
2527 return (ENOTTY);
2528 }
2529
2530 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2531 return (rv);
2532
2533 switch (cmd) {
2534 case IOPIOCGLCT:
2535 iov = (struct iovec *)data;
2536 i = le16toh(sc->sc_lct->tablesize) << 2;
2537 if (i > iov->iov_len)
2538 i = iov->iov_len;
2539 else
2540 iov->iov_len = i;
2541 rv = copyout(sc->sc_lct, iov->iov_base, i);
2542 break;
2543
2544 case IOPIOCRECONFIG:
2545 rv = iop_reconfigure(sc, 0);
2546 break;
2547
2548 case IOPIOCGTIDMAP:
2549 iov = (struct iovec *)data;
2550 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2551 if (i > iov->iov_len)
2552 i = iov->iov_len;
2553 else
2554 iov->iov_len = i;
2555 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2556 break;
2557 }
2558
2559 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2560 return (rv);
2561 }
2562
2563 static int
2564 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2565 {
2566 struct iop_msg *im;
2567 struct i2o_msg *mf;
2568 struct ioppt_buf *ptb;
2569 int rv, i, mapped;
2570
2571 mf = NULL;
2572 im = NULL;
2573 mapped = 1;
2574
2575 if (pt->pt_msglen > sc->sc_framesize ||
2576 pt->pt_msglen < sizeof(struct i2o_msg) ||
2577 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2578 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2579 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2580 return (EINVAL);
2581
2582 for (i = 0; i < pt->pt_nbufs; i++)
2583 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2584 rv = ENOMEM;
2585 goto bad;
2586 }
2587
2588 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2589 if (mf == NULL)
2590 return (ENOMEM);
2591
2592 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2593 goto bad;
2594
2595 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2596 im->im_rb = (struct i2o_reply *)mf;
2597 mf->msgictx = IOP_ICTX;
2598 mf->msgtctx = im->im_tctx;
2599
2600 for (i = 0; i < pt->pt_nbufs; i++) {
2601 ptb = &pt->pt_bufs[i];
2602 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2603 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2604 if (rv != 0)
2605 goto bad;
2606 mapped = 1;
2607 }
2608
2609 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2610 goto bad;
2611
2612 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2613 if (i > sc->sc_framesize)
2614 i = sc->sc_framesize;
2615 if (i > pt->pt_replylen)
2616 i = pt->pt_replylen;
2617 rv = copyout(im->im_rb, pt->pt_reply, i);
2618
2619 bad:
2620 if (mapped != 0)
2621 iop_msg_unmap(sc, im);
2622 if (im != NULL)
2623 iop_msg_free(sc, im);
2624 if (mf != NULL)
2625 free(mf, M_DEVBUF);
2626 return (rv);
2627 }
2628