iop.c revision 1.30 1 /* $NetBSD: iop.c,v 1.30 2002/11/15 13:51:29 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.30 2002/11/15 13:51:29 ad Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 dev_type_open(iopopen);
111 dev_type_close(iopclose);
112 dev_type_ioctl(iopioctl);
113
114 const struct cdevsw iop_cdevsw = {
115 iopopen, iopclose, noread, nowrite, iopioctl,
116 nostop, notty, nopoll, nommap, nokqfilter,
117 };
118
119 #define IC_CONFIGURE 0x01
120 #define IC_PRIORITY 0x02
121
122 struct iop_class {
123 u_short ic_class;
124 u_short ic_flags;
125 #ifdef I2OVERBOSE
126 const char *ic_caption;
127 #endif
128 } static const iop_class[] = {
129 {
130 I2O_CLASS_EXECUTIVE,
131 0,
132 COMMENT("executive")
133 },
134 {
135 I2O_CLASS_DDM,
136 0,
137 COMMENT("device driver module")
138 },
139 {
140 I2O_CLASS_RANDOM_BLOCK_STORAGE,
141 IC_CONFIGURE | IC_PRIORITY,
142 IFVERBOSE("random block storage")
143 },
144 {
145 I2O_CLASS_SEQUENTIAL_STORAGE,
146 IC_CONFIGURE | IC_PRIORITY,
147 IFVERBOSE("sequential storage")
148 },
149 {
150 I2O_CLASS_LAN,
151 IC_CONFIGURE | IC_PRIORITY,
152 IFVERBOSE("LAN port")
153 },
154 {
155 I2O_CLASS_WAN,
156 IC_CONFIGURE | IC_PRIORITY,
157 IFVERBOSE("WAN port")
158 },
159 {
160 I2O_CLASS_FIBRE_CHANNEL_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("fibrechannel port")
163 },
164 {
165 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
166 0,
167 COMMENT("fibrechannel peripheral")
168 },
169 {
170 I2O_CLASS_SCSI_PERIPHERAL,
171 0,
172 COMMENT("SCSI peripheral")
173 },
174 {
175 I2O_CLASS_ATE_PORT,
176 IC_CONFIGURE,
177 IFVERBOSE("ATE port")
178 },
179 {
180 I2O_CLASS_ATE_PERIPHERAL,
181 0,
182 COMMENT("ATE peripheral")
183 },
184 {
185 I2O_CLASS_FLOPPY_CONTROLLER,
186 IC_CONFIGURE,
187 IFVERBOSE("floppy controller")
188 },
189 {
190 I2O_CLASS_FLOPPY_DEVICE,
191 0,
192 COMMENT("floppy device")
193 },
194 {
195 I2O_CLASS_BUS_ADAPTER_PORT,
196 IC_CONFIGURE,
197 IFVERBOSE("bus adapter port" )
198 },
199 };
200
201 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
202 static const char * const iop_status[] = {
203 "success",
204 "abort (dirty)",
205 "abort (no data transfer)",
206 "abort (partial transfer)",
207 "error (dirty)",
208 "error (no data transfer)",
209 "error (partial transfer)",
210 "undefined error code",
211 "process abort (dirty)",
212 "process abort (no data transfer)",
213 "process abort (partial transfer)",
214 "transaction error",
215 };
216 #endif
217
218 static inline u_int32_t iop_inl(struct iop_softc *, int);
219 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
220
221 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
222 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
223
224 static void iop_config_interrupts(struct device *);
225 static void iop_configure_devices(struct iop_softc *, int, int);
226 static void iop_devinfo(int, char *);
227 static int iop_print(void *, const char *);
228 static void iop_shutdown(void *);
229 static int iop_submatch(struct device *, struct cfdata *, void *);
230 static int iop_vendor_print(void *, const char *);
231
232 static void iop_adjqparam(struct iop_softc *, int);
233 static void iop_create_reconf_thread(void *);
234 static int iop_handle_reply(struct iop_softc *, u_int32_t);
235 static int iop_hrt_get(struct iop_softc *);
236 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
237 static void iop_intr_event(struct device *, struct iop_msg *, void *);
238 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
239 u_int32_t);
240 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
241 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
242 static int iop_ofifo_init(struct iop_softc *);
243 static int iop_passthrough(struct iop_softc *, struct ioppt *,
244 struct proc *);
245 static void iop_reconf_thread(void *);
246 static void iop_release_mfa(struct iop_softc *, u_int32_t);
247 static int iop_reset(struct iop_softc *);
248 static int iop_systab_set(struct iop_softc *);
249 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
250
251 #ifdef I2ODEBUG
252 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
253 #endif
254
255 static inline u_int32_t
256 iop_inl(struct iop_softc *sc, int off)
257 {
258
259 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
260 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
261 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
262 }
263
264 static inline void
265 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
266 {
267
268 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
269 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
270 BUS_SPACE_BARRIER_WRITE);
271 }
272
273 static inline u_int32_t
274 iop_inl_msg(struct iop_softc *sc, int off)
275 {
276
277 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
278 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
279 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
280 }
281
282 static inline void
283 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
284 {
285
286 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
287 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
288 BUS_SPACE_BARRIER_WRITE);
289 }
290
291 /*
292 * Initialise the IOP and our interface.
293 */
294 void
295 iop_init(struct iop_softc *sc, const char *intrstr)
296 {
297 struct iop_msg *im;
298 int rv, i, j, state, nsegs;
299 u_int32_t mask;
300 char ident[64];
301
302 state = 0;
303
304 printf("I2O adapter");
305
306 if (iop_ictxhashtbl == NULL)
307 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
308 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
309
310 /* Disable interrupts at the IOP. */
311 mask = iop_inl(sc, IOP_REG_INTR_MASK);
312 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
313
314 /* Allocate a scratch DMA map for small miscellaneous shared data. */
315 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
316 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
317 printf("%s: cannot create scratch dmamap\n",
318 sc->sc_dv.dv_xname);
319 return;
320 }
321
322 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
323 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
324 printf("%s: cannot alloc scratch dmamem\n",
325 sc->sc_dv.dv_xname);
326 goto bail_out;
327 }
328 state++;
329
330 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
331 &sc->sc_scr, 0)) {
332 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
333 goto bail_out;
334 }
335 state++;
336
337 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
338 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
339 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
340 goto bail_out;
341 }
342 state++;
343
344 #ifdef I2ODEBUG
345 /* So that our debug checks don't choke. */
346 sc->sc_framesize = 128;
347 #endif
348
349 /* Reset the adapter and request status. */
350 if ((rv = iop_reset(sc)) != 0) {
351 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
352 goto bail_out;
353 }
354
355 if ((rv = iop_status_get(sc, 1)) != 0) {
356 printf("%s: not responding (get status)\n",
357 sc->sc_dv.dv_xname);
358 goto bail_out;
359 }
360
361 sc->sc_flags |= IOP_HAVESTATUS;
362 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
363 ident, sizeof(ident));
364 printf(" <%s>\n", ident);
365
366 #ifdef I2ODEBUG
367 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
368 le16toh(sc->sc_status.orgid),
369 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
370 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
371 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
372 le32toh(sc->sc_status.desiredprivmemsize),
373 le32toh(sc->sc_status.currentprivmemsize),
374 le32toh(sc->sc_status.currentprivmembase));
375 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
376 le32toh(sc->sc_status.desiredpriviosize),
377 le32toh(sc->sc_status.currentpriviosize),
378 le32toh(sc->sc_status.currentpriviobase));
379 #endif
380
381 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
382 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
383 sc->sc_maxob = IOP_MAX_OUTBOUND;
384 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
385 if (sc->sc_maxib > IOP_MAX_INBOUND)
386 sc->sc_maxib = IOP_MAX_INBOUND;
387 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
388 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
389 sc->sc_framesize = IOP_MAX_MSG_SIZE;
390
391 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
392 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
393 printf("%s: frame size too small (%d)\n",
394 sc->sc_dv.dv_xname, sc->sc_framesize);
395 goto bail_out;
396 }
397 #endif
398
399 /* Allocate message wrappers. */
400 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
401 if (im == NULL) {
402 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
403 goto bail_out;
404 }
405 state++;
406 sc->sc_ims = im;
407 SLIST_INIT(&sc->sc_im_freelist);
408
409 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
410 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
411 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
412 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
413 &im->im_xfer[0].ix_map);
414 if (rv != 0) {
415 printf("%s: couldn't create dmamap (%d)",
416 sc->sc_dv.dv_xname, rv);
417 goto bail_out;
418 }
419
420 im->im_tctx = i;
421 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
422 }
423
424 /* Initialise the IOP's outbound FIFO. */
425 if (iop_ofifo_init(sc) != 0) {
426 printf("%s: unable to init oubound FIFO\n",
427 sc->sc_dv.dv_xname);
428 goto bail_out;
429 }
430
431 /*
432 * Defer further configuration until (a) interrupts are working and
433 * (b) we have enough information to build the system table.
434 */
435 config_interrupts((struct device *)sc, iop_config_interrupts);
436
437 /* Configure shutdown hook before we start any device activity. */
438 if (iop_sdh == NULL)
439 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
440
441 /* Ensure interrupts are enabled at the IOP. */
442 mask = iop_inl(sc, IOP_REG_INTR_MASK);
443 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
444
445 if (intrstr != NULL)
446 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
447 intrstr);
448
449 #ifdef I2ODEBUG
450 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
451 sc->sc_dv.dv_xname, sc->sc_maxib,
452 le32toh(sc->sc_status.maxinboundmframes),
453 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
454 #endif
455
456 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
457 return;
458
459 bail_out:
460 if (state > 3) {
461 for (j = 0; j < i; j++)
462 bus_dmamap_destroy(sc->sc_dmat,
463 sc->sc_ims[j].im_xfer[0].ix_map);
464 free(sc->sc_ims, M_DEVBUF);
465 }
466 if (state > 2)
467 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
468 if (state > 1)
469 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
470 if (state > 0)
471 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
472 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
473 }
474
475 /*
476 * Perform autoconfiguration tasks.
477 */
478 static void
479 iop_config_interrupts(struct device *self)
480 {
481 struct iop_attach_args ia;
482 struct iop_softc *sc, *iop;
483 struct i2o_systab_entry *ste;
484 int rv, i, niop;
485
486 sc = (struct iop_softc *)self;
487 LIST_INIT(&sc->sc_iilist);
488
489 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
490
491 if (iop_hrt_get(sc) != 0) {
492 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
493 return;
494 }
495
496 /*
497 * Build the system table.
498 */
499 if (iop_systab == NULL) {
500 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
501 if ((iop = device_lookup(&iop_cd, i)) == NULL)
502 continue;
503 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
504 continue;
505 if (iop_status_get(iop, 1) != 0) {
506 printf("%s: unable to retrieve status\n",
507 sc->sc_dv.dv_xname);
508 iop->sc_flags &= ~IOP_HAVESTATUS;
509 continue;
510 }
511 niop++;
512 }
513 if (niop == 0)
514 return;
515
516 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
517 sizeof(struct i2o_systab);
518 iop_systab_size = i;
519 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
520
521 iop_systab->numentries = niop;
522 iop_systab->version = I2O_VERSION_11;
523
524 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
525 if ((iop = device_lookup(&iop_cd, i)) == NULL)
526 continue;
527 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
528 continue;
529
530 ste->orgid = iop->sc_status.orgid;
531 ste->iopid = iop->sc_dv.dv_unit + 2;
532 ste->segnumber =
533 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
534 ste->iopcaps = iop->sc_status.iopcaps;
535 ste->inboundmsgframesize =
536 iop->sc_status.inboundmframesize;
537 ste->inboundmsgportaddresslow =
538 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
539 ste++;
540 }
541 }
542
543 /*
544 * Post the system table to the IOP and bring it to the OPERATIONAL
545 * state.
546 */
547 if (iop_systab_set(sc) != 0) {
548 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
549 return;
550 }
551 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
552 30000) != 0) {
553 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
554 return;
555 }
556
557 /*
558 * Set up an event handler for this IOP.
559 */
560 sc->sc_eventii.ii_dv = self;
561 sc->sc_eventii.ii_intr = iop_intr_event;
562 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
563 sc->sc_eventii.ii_tid = I2O_TID_IOP;
564 iop_initiator_register(sc, &sc->sc_eventii);
565
566 rv = iop_util_eventreg(sc, &sc->sc_eventii,
567 I2O_EVENT_EXEC_RESOURCE_LIMITS |
568 I2O_EVENT_EXEC_CONNECTION_FAIL |
569 I2O_EVENT_EXEC_ADAPTER_FAULT |
570 I2O_EVENT_EXEC_POWER_FAIL |
571 I2O_EVENT_EXEC_RESET_PENDING |
572 I2O_EVENT_EXEC_RESET_IMMINENT |
573 I2O_EVENT_EXEC_HARDWARE_FAIL |
574 I2O_EVENT_EXEC_XCT_CHANGE |
575 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
576 I2O_EVENT_GEN_DEVICE_RESET |
577 I2O_EVENT_GEN_STATE_CHANGE |
578 I2O_EVENT_GEN_GENERAL_WARNING);
579 if (rv != 0) {
580 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
581 return;
582 }
583
584 /*
585 * Attempt to match and attach a product-specific extension.
586 */
587 ia.ia_class = I2O_CLASS_ANY;
588 ia.ia_tid = I2O_TID_IOP;
589 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
590
591 /*
592 * Start device configuration.
593 */
594 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
595 if ((rv = iop_reconfigure(sc, 0)) == -1) {
596 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
597 return;
598 }
599 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
600
601 kthread_create(iop_create_reconf_thread, sc);
602 }
603
604 /*
605 * Create the reconfiguration thread. Called after the standard kernel
606 * threads have been created.
607 */
608 static void
609 iop_create_reconf_thread(void *cookie)
610 {
611 struct iop_softc *sc;
612 int rv;
613
614 sc = cookie;
615 sc->sc_flags |= IOP_ONLINE;
616
617 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
618 "%s", sc->sc_dv.dv_xname);
619 if (rv != 0) {
620 printf("%s: unable to create reconfiguration thread (%d)",
621 sc->sc_dv.dv_xname, rv);
622 return;
623 }
624 }
625
626 /*
627 * Reconfiguration thread; listens for LCT change notification, and
628 * initiates re-configuration if received.
629 */
630 static void
631 iop_reconf_thread(void *cookie)
632 {
633 struct iop_softc *sc;
634 struct i2o_lct lct;
635 u_int32_t chgind;
636 int rv;
637
638 sc = cookie;
639 chgind = sc->sc_chgind + 1;
640
641 for (;;) {
642 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
643 sc->sc_dv.dv_xname, chgind));
644
645 PHOLD(sc->sc_reconf_proc);
646 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
647 PRELE(sc->sc_reconf_proc);
648
649 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
650 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
651
652 if (rv == 0 &&
653 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
654 iop_reconfigure(sc, le32toh(lct.changeindicator));
655 chgind = sc->sc_chgind + 1;
656 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
657 }
658
659 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
660 }
661 }
662
663 /*
664 * Reconfigure: find new and removed devices.
665 */
666 int
667 iop_reconfigure(struct iop_softc *sc, u_int chgind)
668 {
669 struct iop_msg *im;
670 struct i2o_hba_bus_scan mf;
671 struct i2o_lct_entry *le;
672 struct iop_initiator *ii, *nextii;
673 int rv, tid, i;
674
675 /*
676 * If the reconfiguration request isn't the result of LCT change
677 * notification, then be more thorough: ask all bus ports to scan
678 * their busses. Wait up to 5 minutes for each bus port to complete
679 * the request.
680 */
681 if (chgind == 0) {
682 if ((rv = iop_lct_get(sc)) != 0) {
683 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
684 return (rv);
685 }
686
687 le = sc->sc_lct->entry;
688 for (i = 0; i < sc->sc_nlctent; i++, le++) {
689 if ((le16toh(le->classid) & 4095) !=
690 I2O_CLASS_BUS_ADAPTER_PORT)
691 continue;
692 tid = le16toh(le->localtid) & 4095;
693
694 im = iop_msg_alloc(sc, IM_WAIT);
695
696 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
697 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
698 mf.msgictx = IOP_ICTX;
699 mf.msgtctx = im->im_tctx;
700
701 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
702 tid));
703
704 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
705 iop_msg_free(sc, im);
706 #ifdef I2ODEBUG
707 if (rv != 0)
708 printf("%s: bus scan failed\n",
709 sc->sc_dv.dv_xname);
710 #endif
711 }
712 } else if (chgind <= sc->sc_chgind) {
713 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
714 return (0);
715 }
716
717 /* Re-read the LCT and determine if it has changed. */
718 if ((rv = iop_lct_get(sc)) != 0) {
719 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
720 return (rv);
721 }
722 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
723
724 chgind = le32toh(sc->sc_lct->changeindicator);
725 if (chgind == sc->sc_chgind) {
726 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
727 return (0);
728 }
729 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
730 sc->sc_chgind = chgind;
731
732 if (sc->sc_tidmap != NULL)
733 free(sc->sc_tidmap, M_DEVBUF);
734 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
735 M_DEVBUF, M_NOWAIT|M_ZERO);
736
737 /* Allow 1 queued command per device while we're configuring. */
738 iop_adjqparam(sc, 1);
739
740 /*
741 * Match and attach child devices. We configure high-level devices
742 * first so that any claims will propagate throughout the LCT,
743 * hopefully masking off aliased devices as a result.
744 *
745 * Re-reading the LCT at this point is a little dangerous, but we'll
746 * trust the IOP (and the operator) to behave itself...
747 */
748 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
749 IC_CONFIGURE | IC_PRIORITY);
750 if ((rv = iop_lct_get(sc)) != 0)
751 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
752 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
753 IC_CONFIGURE);
754
755 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
756 nextii = LIST_NEXT(ii, ii_list);
757
758 /* Detach devices that were configured, but are now gone. */
759 for (i = 0; i < sc->sc_nlctent; i++)
760 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
761 break;
762 if (i == sc->sc_nlctent ||
763 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
764 config_detach(ii->ii_dv, DETACH_FORCE);
765
766 /*
767 * Tell initiators that existed before the re-configuration
768 * to re-configure.
769 */
770 if (ii->ii_reconfig == NULL)
771 continue;
772 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
773 printf("%s: %s failed reconfigure (%d)\n",
774 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
775 }
776
777 /* Re-adjust queue parameters and return. */
778 if (sc->sc_nii != 0)
779 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
780 / sc->sc_nii);
781
782 return (0);
783 }
784
785 /*
786 * Configure I2O devices into the system.
787 */
788 static void
789 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
790 {
791 struct iop_attach_args ia;
792 struct iop_initiator *ii;
793 const struct i2o_lct_entry *le;
794 struct device *dv;
795 int i, j, nent;
796 u_int usertid;
797
798 nent = sc->sc_nlctent;
799 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
800 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
801
802 /* Ignore the device if it's in use. */
803 usertid = le32toh(le->usertid) & 4095;
804 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
805 continue;
806
807 ia.ia_class = le16toh(le->classid) & 4095;
808 ia.ia_tid = sc->sc_tidmap[i].it_tid;
809
810 /* Ignore uninteresting devices. */
811 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
812 if (iop_class[j].ic_class == ia.ia_class)
813 break;
814 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
815 (iop_class[j].ic_flags & mask) != maskval)
816 continue;
817
818 /*
819 * Try to configure the device only if it's not already
820 * configured.
821 */
822 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
823 if (ia.ia_tid == ii->ii_tid) {
824 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
825 strcpy(sc->sc_tidmap[i].it_dvname,
826 ii->ii_dv->dv_xname);
827 break;
828 }
829 }
830 if (ii != NULL)
831 continue;
832
833 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
834 if (dv != NULL) {
835 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
836 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
837 }
838 }
839 }
840
841 /*
842 * Adjust queue parameters for all child devices.
843 */
844 static void
845 iop_adjqparam(struct iop_softc *sc, int mpi)
846 {
847 struct iop_initiator *ii;
848
849 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
850 if (ii->ii_adjqparam != NULL)
851 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
852 }
853
854 static void
855 iop_devinfo(int class, char *devinfo)
856 {
857 #ifdef I2OVERBOSE
858 int i;
859
860 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
861 if (class == iop_class[i].ic_class)
862 break;
863
864 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
865 sprintf(devinfo, "device (class 0x%x)", class);
866 else
867 strcpy(devinfo, iop_class[i].ic_caption);
868 #else
869
870 sprintf(devinfo, "device (class 0x%x)", class);
871 #endif
872 }
873
874 static int
875 iop_print(void *aux, const char *pnp)
876 {
877 struct iop_attach_args *ia;
878 char devinfo[256];
879
880 ia = aux;
881
882 if (pnp != NULL) {
883 iop_devinfo(ia->ia_class, devinfo);
884 printf("%s at %s", devinfo, pnp);
885 }
886 printf(" tid %d", ia->ia_tid);
887 return (UNCONF);
888 }
889
890 static int
891 iop_vendor_print(void *aux, const char *pnp)
892 {
893
894 return (QUIET);
895 }
896
897 static int
898 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
899 {
900 struct iop_attach_args *ia;
901
902 ia = aux;
903
904 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
905 return (0);
906
907 return (config_match(parent, cf, aux));
908 }
909
910 /*
911 * Shut down all configured IOPs.
912 */
913 static void
914 iop_shutdown(void *junk)
915 {
916 struct iop_softc *sc;
917 int i;
918
919 printf("shutting down iop devices...");
920
921 for (i = 0; i < iop_cd.cd_ndevs; i++) {
922 if ((sc = device_lookup(&iop_cd, i)) == NULL)
923 continue;
924 if ((sc->sc_flags & IOP_ONLINE) == 0)
925 continue;
926
927 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
928 0, 5000);
929
930 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
931 /*
932 * Some AMI firmware revisions will go to sleep and
933 * never come back after this.
934 */
935 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
936 IOP_ICTX, 0, 1000);
937 }
938 }
939
940 /* Wait. Some boards could still be flushing, stupidly enough. */
941 delay(5000*1000);
942 printf(" done\n");
943 }
944
945 /*
946 * Retrieve IOP status.
947 */
948 int
949 iop_status_get(struct iop_softc *sc, int nosleep)
950 {
951 struct i2o_exec_status_get mf;
952 struct i2o_status *st;
953 paddr_t pa;
954 int rv, i;
955
956 pa = sc->sc_scr_seg->ds_addr;
957 st = (struct i2o_status *)sc->sc_scr;
958
959 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
960 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
961 mf.reserved[0] = 0;
962 mf.reserved[1] = 0;
963 mf.reserved[2] = 0;
964 mf.reserved[3] = 0;
965 mf.addrlow = (u_int32_t)pa;
966 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
967 mf.length = sizeof(sc->sc_status);
968
969 memset(st, 0, sizeof(*st));
970 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
971 BUS_DMASYNC_PREREAD);
972
973 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
974 return (rv);
975
976 for (i = 25; i != 0; i--) {
977 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
978 sizeof(*st), BUS_DMASYNC_POSTREAD);
979 if (st->syncbyte == 0xff)
980 break;
981 if (nosleep)
982 DELAY(100*1000);
983 else
984 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
985 }
986
987 if (st->syncbyte != 0xff) {
988 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
989 rv = EIO;
990 } else {
991 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
992 rv = 0;
993 }
994
995 return (rv);
996 }
997
998 /*
999 * Initialize and populate the IOP's outbound FIFO.
1000 */
1001 static int
1002 iop_ofifo_init(struct iop_softc *sc)
1003 {
1004 bus_addr_t addr;
1005 bus_dma_segment_t seg;
1006 struct i2o_exec_outbound_init *mf;
1007 int i, rseg, rv;
1008 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1009
1010 sw = (u_int32_t *)sc->sc_scr;
1011
1012 mf = (struct i2o_exec_outbound_init *)mb;
1013 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1014 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1015 mf->msgictx = IOP_ICTX;
1016 mf->msgtctx = 0;
1017 mf->pagesize = PAGE_SIZE;
1018 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1019
1020 /*
1021 * The I2O spec says that there are two SGLs: one for the status
1022 * word, and one for a list of discarded MFAs. It continues to say
1023 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1024 * necessary; this isn't the case (and is in fact a bad thing).
1025 */
1026 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1027 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1028 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1029 (u_int32_t)sc->sc_scr_seg->ds_addr;
1030 mb[0] += 2 << 16;
1031
1032 *sw = 0;
1033 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1034 BUS_DMASYNC_PREREAD);
1035
1036 if ((rv = iop_post(sc, mb)) != 0)
1037 return (rv);
1038
1039 POLL(5000,
1040 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1041 BUS_DMASYNC_POSTREAD),
1042 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1043
1044 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1045 printf("%s: outbound FIFO init failed (%d)\n",
1046 sc->sc_dv.dv_xname, le32toh(*sw));
1047 return (EIO);
1048 }
1049
1050 /* Allocate DMA safe memory for the reply frames. */
1051 if (sc->sc_rep_phys == 0) {
1052 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1053
1054 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1055 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1056 if (rv != 0) {
1057 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1058 rv);
1059 return (rv);
1060 }
1061
1062 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1063 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1064 if (rv != 0) {
1065 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1066 return (rv);
1067 }
1068
1069 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1070 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1071 if (rv != 0) {
1072 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1073 rv);
1074 return (rv);
1075 }
1076
1077 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1078 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1079 if (rv != 0) {
1080 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1081 return (rv);
1082 }
1083
1084 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1085 }
1086
1087 /* Populate the outbound FIFO. */
1088 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1089 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1090 addr += sc->sc_framesize;
1091 }
1092
1093 return (0);
1094 }
1095
1096 /*
1097 * Read the specified number of bytes from the IOP's hardware resource table.
1098 */
1099 static int
1100 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1101 {
1102 struct iop_msg *im;
1103 int rv;
1104 struct i2o_exec_hrt_get *mf;
1105 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1106
1107 im = iop_msg_alloc(sc, IM_WAIT);
1108 mf = (struct i2o_exec_hrt_get *)mb;
1109 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1110 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1111 mf->msgictx = IOP_ICTX;
1112 mf->msgtctx = im->im_tctx;
1113
1114 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1115 rv = iop_msg_post(sc, im, mb, 30000);
1116 iop_msg_unmap(sc, im);
1117 iop_msg_free(sc, im);
1118 return (rv);
1119 }
1120
1121 /*
1122 * Read the IOP's hardware resource table.
1123 */
1124 static int
1125 iop_hrt_get(struct iop_softc *sc)
1126 {
1127 struct i2o_hrt hrthdr, *hrt;
1128 int size, rv;
1129
1130 PHOLD(curproc);
1131 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1132 PRELE(curproc);
1133 if (rv != 0)
1134 return (rv);
1135
1136 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1137 le16toh(hrthdr.numentries)));
1138
1139 size = sizeof(struct i2o_hrt) +
1140 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1141 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1142
1143 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1144 free(hrt, M_DEVBUF);
1145 return (rv);
1146 }
1147
1148 if (sc->sc_hrt != NULL)
1149 free(sc->sc_hrt, M_DEVBUF);
1150 sc->sc_hrt = hrt;
1151 return (0);
1152 }
1153
1154 /*
1155 * Request the specified number of bytes from the IOP's logical
1156 * configuration table. If a change indicator is specified, this
1157 * is a verbatim notification request, so the caller is prepared
1158 * to wait indefinitely.
1159 */
1160 static int
1161 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1162 u_int32_t chgind)
1163 {
1164 struct iop_msg *im;
1165 struct i2o_exec_lct_notify *mf;
1166 int rv;
1167 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1168
1169 im = iop_msg_alloc(sc, IM_WAIT);
1170 memset(lct, 0, size);
1171
1172 mf = (struct i2o_exec_lct_notify *)mb;
1173 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1174 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1175 mf->msgictx = IOP_ICTX;
1176 mf->msgtctx = im->im_tctx;
1177 mf->classid = I2O_CLASS_ANY;
1178 mf->changeindicator = chgind;
1179
1180 #ifdef I2ODEBUG
1181 printf("iop_lct_get0: reading LCT");
1182 if (chgind != 0)
1183 printf(" (async)");
1184 printf("\n");
1185 #endif
1186
1187 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1188 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1189 iop_msg_unmap(sc, im);
1190 iop_msg_free(sc, im);
1191 return (rv);
1192 }
1193
1194 /*
1195 * Read the IOP's logical configuration table.
1196 */
1197 int
1198 iop_lct_get(struct iop_softc *sc)
1199 {
1200 int esize, size, rv;
1201 struct i2o_lct *lct;
1202
1203 esize = le32toh(sc->sc_status.expectedlctsize);
1204 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1205 if (lct == NULL)
1206 return (ENOMEM);
1207
1208 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1209 free(lct, M_DEVBUF);
1210 return (rv);
1211 }
1212
1213 size = le16toh(lct->tablesize) << 2;
1214 if (esize != size) {
1215 free(lct, M_DEVBUF);
1216 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1217 if (lct == NULL)
1218 return (ENOMEM);
1219
1220 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1221 free(lct, M_DEVBUF);
1222 return (rv);
1223 }
1224 }
1225
1226 /* Swap in the new LCT. */
1227 if (sc->sc_lct != NULL)
1228 free(sc->sc_lct, M_DEVBUF);
1229 sc->sc_lct = lct;
1230 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1231 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1232 sizeof(struct i2o_lct_entry);
1233 return (0);
1234 }
1235
1236 /*
1237 * Request the specified parameter group from the target. If an initiator
1238 * is specified (a) don't wait for the operation to complete, but instead
1239 * let the initiator's interrupt handler deal with the reply and (b) place a
1240 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1241 */
1242 int
1243 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1244 int size, struct iop_initiator *ii)
1245 {
1246 struct iop_msg *im;
1247 struct i2o_util_params_op *mf;
1248 struct i2o_reply *rf;
1249 int rv;
1250 struct iop_pgop *pgop;
1251 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1252
1253 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1254 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1255 iop_msg_free(sc, im);
1256 return (ENOMEM);
1257 }
1258 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1259 iop_msg_free(sc, im);
1260 free(pgop, M_DEVBUF);
1261 return (ENOMEM);
1262 }
1263 im->im_dvcontext = pgop;
1264 im->im_rb = rf;
1265
1266 mf = (struct i2o_util_params_op *)mb;
1267 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1268 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1269 mf->msgictx = IOP_ICTX;
1270 mf->msgtctx = im->im_tctx;
1271 mf->flags = 0;
1272
1273 pgop->olh.count = htole16(1);
1274 pgop->olh.reserved = htole16(0);
1275 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1276 pgop->oat.fieldcount = htole16(0xffff);
1277 pgop->oat.group = htole16(group);
1278
1279 if (ii == NULL)
1280 PHOLD(curproc);
1281
1282 memset(buf, 0, size);
1283 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1284 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1285 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1286
1287 if (ii == NULL)
1288 PRELE(curproc);
1289
1290 /* Detect errors; let partial transfers to count as success. */
1291 if (ii == NULL && rv == 0) {
1292 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1293 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1294 rv = 0;
1295 else
1296 rv = (rf->reqstatus != 0 ? EIO : 0);
1297
1298 if (rv != 0)
1299 printf("%s: FIELD_GET failed for tid %d group %d\n",
1300 sc->sc_dv.dv_xname, tid, group);
1301 }
1302
1303 if (ii == NULL || rv != 0) {
1304 iop_msg_unmap(sc, im);
1305 iop_msg_free(sc, im);
1306 free(pgop, M_DEVBUF);
1307 free(rf, M_DEVBUF);
1308 }
1309
1310 return (rv);
1311 }
1312
1313 /*
1314 * Set a single field in a scalar parameter group.
1315 */
1316 int
1317 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1318 int size, int field)
1319 {
1320 struct iop_msg *im;
1321 struct i2o_util_params_op *mf;
1322 struct iop_pgop *pgop;
1323 int rv, totsize;
1324 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1325
1326 totsize = sizeof(*pgop) + size;
1327
1328 im = iop_msg_alloc(sc, IM_WAIT);
1329 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1330 iop_msg_free(sc, im);
1331 return (ENOMEM);
1332 }
1333
1334 mf = (struct i2o_util_params_op *)mb;
1335 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1336 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1337 mf->msgictx = IOP_ICTX;
1338 mf->msgtctx = im->im_tctx;
1339 mf->flags = 0;
1340
1341 pgop->olh.count = htole16(1);
1342 pgop->olh.reserved = htole16(0);
1343 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1344 pgop->oat.fieldcount = htole16(1);
1345 pgop->oat.group = htole16(group);
1346 pgop->oat.fields[0] = htole16(field);
1347 memcpy(pgop + 1, buf, size);
1348
1349 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1350 rv = iop_msg_post(sc, im, mb, 30000);
1351 if (rv != 0)
1352 printf("%s: FIELD_SET failed for tid %d group %d\n",
1353 sc->sc_dv.dv_xname, tid, group);
1354
1355 iop_msg_unmap(sc, im);
1356 iop_msg_free(sc, im);
1357 free(pgop, M_DEVBUF);
1358 return (rv);
1359 }
1360
1361 /*
1362 * Delete all rows in a tablular parameter group.
1363 */
1364 int
1365 iop_table_clear(struct iop_softc *sc, int tid, int group)
1366 {
1367 struct iop_msg *im;
1368 struct i2o_util_params_op *mf;
1369 struct iop_pgop pgop;
1370 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1371 int rv;
1372
1373 im = iop_msg_alloc(sc, IM_WAIT);
1374
1375 mf = (struct i2o_util_params_op *)mb;
1376 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1377 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1378 mf->msgictx = IOP_ICTX;
1379 mf->msgtctx = im->im_tctx;
1380 mf->flags = 0;
1381
1382 pgop.olh.count = htole16(1);
1383 pgop.olh.reserved = htole16(0);
1384 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1385 pgop.oat.fieldcount = htole16(0);
1386 pgop.oat.group = htole16(group);
1387 pgop.oat.fields[0] = htole16(0);
1388
1389 PHOLD(curproc);
1390 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1391 rv = iop_msg_post(sc, im, mb, 30000);
1392 if (rv != 0)
1393 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1394 sc->sc_dv.dv_xname, tid, group);
1395
1396 iop_msg_unmap(sc, im);
1397 PRELE(curproc);
1398 iop_msg_free(sc, im);
1399 return (rv);
1400 }
1401
1402 /*
1403 * Add a single row to a tabular parameter group. The row can have only one
1404 * field.
1405 */
1406 int
1407 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1408 int size, int row)
1409 {
1410 struct iop_msg *im;
1411 struct i2o_util_params_op *mf;
1412 struct iop_pgop *pgop;
1413 int rv, totsize;
1414 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1415
1416 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1417
1418 im = iop_msg_alloc(sc, IM_WAIT);
1419 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1420 iop_msg_free(sc, im);
1421 return (ENOMEM);
1422 }
1423
1424 mf = (struct i2o_util_params_op *)mb;
1425 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1426 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1427 mf->msgictx = IOP_ICTX;
1428 mf->msgtctx = im->im_tctx;
1429 mf->flags = 0;
1430
1431 pgop->olh.count = htole16(1);
1432 pgop->olh.reserved = htole16(0);
1433 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1434 pgop->oat.fieldcount = htole16(1);
1435 pgop->oat.group = htole16(group);
1436 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1437 pgop->oat.fields[1] = htole16(1); /* RowCount */
1438 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1439 memcpy(&pgop->oat.fields[3], buf, size);
1440
1441 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1442 rv = iop_msg_post(sc, im, mb, 30000);
1443 if (rv != 0)
1444 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1445 sc->sc_dv.dv_xname, tid, group, row);
1446
1447 iop_msg_unmap(sc, im);
1448 iop_msg_free(sc, im);
1449 free(pgop, M_DEVBUF);
1450 return (rv);
1451 }
1452
1453 /*
1454 * Execute a simple command (no parameters).
1455 */
1456 int
1457 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1458 int async, int timo)
1459 {
1460 struct iop_msg *im;
1461 struct i2o_msg mf;
1462 int rv, fl;
1463
1464 fl = (async != 0 ? IM_WAIT : IM_POLL);
1465 im = iop_msg_alloc(sc, fl);
1466
1467 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1468 mf.msgfunc = I2O_MSGFUNC(tid, function);
1469 mf.msgictx = ictx;
1470 mf.msgtctx = im->im_tctx;
1471
1472 rv = iop_msg_post(sc, im, &mf, timo);
1473 iop_msg_free(sc, im);
1474 return (rv);
1475 }
1476
1477 /*
1478 * Post the system table to the IOP.
1479 */
1480 static int
1481 iop_systab_set(struct iop_softc *sc)
1482 {
1483 struct i2o_exec_sys_tab_set *mf;
1484 struct iop_msg *im;
1485 bus_space_handle_t bsh;
1486 bus_addr_t boo;
1487 u_int32_t mema[2], ioa[2];
1488 int rv;
1489 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1490
1491 im = iop_msg_alloc(sc, IM_WAIT);
1492
1493 mf = (struct i2o_exec_sys_tab_set *)mb;
1494 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1495 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1496 mf->msgictx = IOP_ICTX;
1497 mf->msgtctx = im->im_tctx;
1498 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1499 mf->segnumber = 0;
1500
1501 mema[1] = sc->sc_status.desiredprivmemsize;
1502 ioa[1] = sc->sc_status.desiredpriviosize;
1503
1504 if (mema[1] != 0) {
1505 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1506 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1507 mema[0] = htole32(boo);
1508 if (rv != 0) {
1509 printf("%s: can't alloc priv mem space, err = %d\n",
1510 sc->sc_dv.dv_xname, rv);
1511 mema[0] = 0;
1512 mema[1] = 0;
1513 }
1514 }
1515
1516 if (ioa[1] != 0) {
1517 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1518 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1519 ioa[0] = htole32(boo);
1520 if (rv != 0) {
1521 printf("%s: can't alloc priv i/o space, err = %d\n",
1522 sc->sc_dv.dv_xname, rv);
1523 ioa[0] = 0;
1524 ioa[1] = 0;
1525 }
1526 }
1527
1528 PHOLD(curproc);
1529 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1530 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1531 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1532 rv = iop_msg_post(sc, im, mb, 5000);
1533 iop_msg_unmap(sc, im);
1534 iop_msg_free(sc, im);
1535 PRELE(curproc);
1536 return (rv);
1537 }
1538
1539 /*
1540 * Reset the IOP. Must be called with interrupts disabled.
1541 */
1542 static int
1543 iop_reset(struct iop_softc *sc)
1544 {
1545 u_int32_t mfa, *sw;
1546 struct i2o_exec_iop_reset mf;
1547 int rv;
1548 paddr_t pa;
1549
1550 sw = (u_int32_t *)sc->sc_scr;
1551 pa = sc->sc_scr_seg->ds_addr;
1552
1553 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1554 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1555 mf.reserved[0] = 0;
1556 mf.reserved[1] = 0;
1557 mf.reserved[2] = 0;
1558 mf.reserved[3] = 0;
1559 mf.statuslow = (u_int32_t)pa;
1560 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1561
1562 *sw = htole32(0);
1563 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1564 BUS_DMASYNC_PREREAD);
1565
1566 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1567 return (rv);
1568
1569 POLL(2500,
1570 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1571 BUS_DMASYNC_POSTREAD), *sw != 0));
1572 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1573 printf("%s: reset rejected, status 0x%x\n",
1574 sc->sc_dv.dv_xname, le32toh(*sw));
1575 return (EIO);
1576 }
1577
1578 /*
1579 * IOP is now in the INIT state. Wait no more than 10 seconds for
1580 * the inbound queue to become responsive.
1581 */
1582 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1583 if (mfa == IOP_MFA_EMPTY) {
1584 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1585 return (EIO);
1586 }
1587
1588 iop_release_mfa(sc, mfa);
1589 return (0);
1590 }
1591
1592 /*
1593 * Register a new initiator. Must be called with the configuration lock
1594 * held.
1595 */
1596 void
1597 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1598 {
1599 static int ictxgen;
1600 int s;
1601
1602 /* 0 is reserved (by us) for system messages. */
1603 ii->ii_ictx = ++ictxgen;
1604
1605 /*
1606 * `Utility initiators' don't make it onto the per-IOP initiator list
1607 * (which is used only for configuration), but do get one slot on
1608 * the inbound queue.
1609 */
1610 if ((ii->ii_flags & II_UTILITY) == 0) {
1611 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1612 sc->sc_nii++;
1613 } else
1614 sc->sc_nuii++;
1615
1616 s = splbio();
1617 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1618 splx(s);
1619 }
1620
1621 /*
1622 * Unregister an initiator. Must be called with the configuration lock
1623 * held.
1624 */
1625 void
1626 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1627 {
1628 int s;
1629
1630 if ((ii->ii_flags & II_UTILITY) == 0) {
1631 LIST_REMOVE(ii, ii_list);
1632 sc->sc_nii--;
1633 } else
1634 sc->sc_nuii--;
1635
1636 s = splbio();
1637 LIST_REMOVE(ii, ii_hash);
1638 splx(s);
1639 }
1640
1641 /*
1642 * Handle a reply frame from the IOP.
1643 */
1644 static int
1645 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1646 {
1647 struct iop_msg *im;
1648 struct i2o_reply *rb;
1649 struct i2o_fault_notify *fn;
1650 struct iop_initiator *ii;
1651 u_int off, ictx, tctx, status, size;
1652
1653 off = (int)(rmfa - sc->sc_rep_phys);
1654 rb = (struct i2o_reply *)(sc->sc_rep + off);
1655
1656 /* Perform reply queue DMA synchronisation. */
1657 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1658 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1659 if (--sc->sc_curib != 0)
1660 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1661 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1662
1663 #ifdef I2ODEBUG
1664 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1665 panic("iop_handle_reply: 64-bit reply");
1666 #endif
1667 /*
1668 * Find the initiator.
1669 */
1670 ictx = le32toh(rb->msgictx);
1671 if (ictx == IOP_ICTX)
1672 ii = NULL;
1673 else {
1674 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1675 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1676 if (ii->ii_ictx == ictx)
1677 break;
1678 if (ii == NULL) {
1679 #ifdef I2ODEBUG
1680 iop_reply_print(sc, rb);
1681 #endif
1682 printf("%s: WARNING: bad ictx returned (%x)\n",
1683 sc->sc_dv.dv_xname, ictx);
1684 return (-1);
1685 }
1686 }
1687
1688 /*
1689 * If we received a transport failure notice, we've got to dig the
1690 * transaction context (if any) out of the original message frame,
1691 * and then release the original MFA back to the inbound FIFO.
1692 */
1693 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1694 status = I2O_STATUS_SUCCESS;
1695
1696 fn = (struct i2o_fault_notify *)rb;
1697 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1698 iop_release_mfa(sc, fn->lowmfa);
1699 iop_tfn_print(sc, fn);
1700 } else {
1701 status = rb->reqstatus;
1702 tctx = le32toh(rb->msgtctx);
1703 }
1704
1705 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1706 /*
1707 * This initiator tracks state using message wrappers.
1708 *
1709 * Find the originating message wrapper, and if requested
1710 * notify the initiator.
1711 */
1712 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1713 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1714 (im->im_flags & IM_ALLOCED) == 0 ||
1715 tctx != im->im_tctx) {
1716 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1717 sc->sc_dv.dv_xname, tctx, im);
1718 if (im != NULL)
1719 printf("%s: flags=0x%08x tctx=0x%08x\n",
1720 sc->sc_dv.dv_xname, im->im_flags,
1721 im->im_tctx);
1722 #ifdef I2ODEBUG
1723 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1724 iop_reply_print(sc, rb);
1725 #endif
1726 return (-1);
1727 }
1728
1729 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1730 im->im_flags |= IM_FAIL;
1731
1732 #ifdef I2ODEBUG
1733 if ((im->im_flags & IM_REPLIED) != 0)
1734 panic("%s: dup reply", sc->sc_dv.dv_xname);
1735 #endif
1736 im->im_flags |= IM_REPLIED;
1737
1738 #ifdef I2ODEBUG
1739 if (status != I2O_STATUS_SUCCESS)
1740 iop_reply_print(sc, rb);
1741 #endif
1742 im->im_reqstatus = status;
1743
1744 /* Copy the reply frame, if requested. */
1745 if (im->im_rb != NULL) {
1746 size = (le32toh(rb->msgflags) >> 14) & ~3;
1747 #ifdef I2ODEBUG
1748 if (size > sc->sc_framesize)
1749 panic("iop_handle_reply: reply too large");
1750 #endif
1751 memcpy(im->im_rb, rb, size);
1752 }
1753
1754 /* Notify the initiator. */
1755 if ((im->im_flags & IM_WAIT) != 0)
1756 wakeup(im);
1757 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1758 (*ii->ii_intr)(ii->ii_dv, im, rb);
1759 } else {
1760 /*
1761 * This initiator discards message wrappers.
1762 *
1763 * Simply pass the reply frame to the initiator.
1764 */
1765 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1766 }
1767
1768 return (status);
1769 }
1770
1771 /*
1772 * Handle an interrupt from the IOP.
1773 */
1774 int
1775 iop_intr(void *arg)
1776 {
1777 struct iop_softc *sc;
1778 u_int32_t rmfa;
1779
1780 sc = arg;
1781
1782 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1783 return (0);
1784
1785 for (;;) {
1786 /* Double read to account for IOP bug. */
1787 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1788 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1789 if (rmfa == IOP_MFA_EMPTY)
1790 break;
1791 }
1792 iop_handle_reply(sc, rmfa);
1793 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1794 }
1795
1796 return (1);
1797 }
1798
1799 /*
1800 * Handle an event signalled by the executive.
1801 */
1802 static void
1803 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1804 {
1805 struct i2o_util_event_register_reply *rb;
1806 struct iop_softc *sc;
1807 u_int event;
1808
1809 sc = (struct iop_softc *)dv;
1810 rb = reply;
1811
1812 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1813 return;
1814
1815 event = le32toh(rb->event);
1816 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1817 }
1818
1819 /*
1820 * Allocate a message wrapper.
1821 */
1822 struct iop_msg *
1823 iop_msg_alloc(struct iop_softc *sc, int flags)
1824 {
1825 struct iop_msg *im;
1826 static u_int tctxgen;
1827 int s, i;
1828
1829 #ifdef I2ODEBUG
1830 if ((flags & IM_SYSMASK) != 0)
1831 panic("iop_msg_alloc: system flags specified");
1832 #endif
1833
1834 s = splbio();
1835 im = SLIST_FIRST(&sc->sc_im_freelist);
1836 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1837 if (im == NULL)
1838 panic("iop_msg_alloc: no free wrappers");
1839 #endif
1840 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1841 splx(s);
1842
1843 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1844 tctxgen += (1 << IOP_TCTX_SHIFT);
1845 im->im_flags = flags | IM_ALLOCED;
1846 im->im_rb = NULL;
1847 i = 0;
1848 do {
1849 im->im_xfer[i++].ix_size = 0;
1850 } while (i < IOP_MAX_MSG_XFERS);
1851
1852 return (im);
1853 }
1854
1855 /*
1856 * Free a message wrapper.
1857 */
1858 void
1859 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1860 {
1861 int s;
1862
1863 #ifdef I2ODEBUG
1864 if ((im->im_flags & IM_ALLOCED) == 0)
1865 panic("iop_msg_free: wrapper not allocated");
1866 #endif
1867
1868 im->im_flags = 0;
1869 s = splbio();
1870 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1871 splx(s);
1872 }
1873
1874 /*
1875 * Map a data transfer. Write a scatter-gather list into the message frame.
1876 */
1877 int
1878 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1879 void *xferaddr, int xfersize, int out, struct proc *up)
1880 {
1881 bus_dmamap_t dm;
1882 bus_dma_segment_t *ds;
1883 struct iop_xfer *ix;
1884 u_int rv, i, nsegs, flg, off, xn;
1885 u_int32_t *p;
1886
1887 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1888 if (ix->ix_size == 0)
1889 break;
1890
1891 #ifdef I2ODEBUG
1892 if (xfersize == 0)
1893 panic("iop_msg_map: null transfer");
1894 if (xfersize > IOP_MAX_XFER)
1895 panic("iop_msg_map: transfer too large");
1896 if (xn == IOP_MAX_MSG_XFERS)
1897 panic("iop_msg_map: too many xfers");
1898 #endif
1899
1900 /*
1901 * Only the first DMA map is static.
1902 */
1903 if (xn != 0) {
1904 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1905 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1906 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1907 if (rv != 0)
1908 return (rv);
1909 }
1910
1911 dm = ix->ix_map;
1912 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1913 (up == NULL ? BUS_DMA_NOWAIT : 0));
1914 if (rv != 0)
1915 goto bad;
1916
1917 /*
1918 * How many SIMPLE SG elements can we fit in this message?
1919 */
1920 off = mb[0] >> 16;
1921 p = mb + off;
1922 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1923
1924 if (dm->dm_nsegs > nsegs) {
1925 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1926 rv = EFBIG;
1927 DPRINTF(("iop_msg_map: too many segs\n"));
1928 goto bad;
1929 }
1930
1931 nsegs = dm->dm_nsegs;
1932 xfersize = 0;
1933
1934 /*
1935 * Write out the SG list.
1936 */
1937 if (out)
1938 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1939 else
1940 flg = I2O_SGL_SIMPLE;
1941
1942 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1943 p[0] = (u_int32_t)ds->ds_len | flg;
1944 p[1] = (u_int32_t)ds->ds_addr;
1945 xfersize += ds->ds_len;
1946 }
1947
1948 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1949 p[1] = (u_int32_t)ds->ds_addr;
1950 xfersize += ds->ds_len;
1951
1952 /* Fix up the transfer record, and sync the map. */
1953 ix->ix_flags = (out ? IX_OUT : IX_IN);
1954 ix->ix_size = xfersize;
1955 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1956 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1957
1958 /*
1959 * If this is the first xfer we've mapped for this message, adjust
1960 * the SGL offset field in the message header.
1961 */
1962 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1963 mb[0] += (mb[0] >> 12) & 0xf0;
1964 im->im_flags |= IM_SGLOFFADJ;
1965 }
1966 mb[0] += (nsegs << 17);
1967 return (0);
1968
1969 bad:
1970 if (xn != 0)
1971 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1972 return (rv);
1973 }
1974
1975 /*
1976 * Map a block I/O data transfer (different in that there's only one per
1977 * message maximum, and PAGE addressing may be used). Write a scatter
1978 * gather list into the message frame.
1979 */
1980 int
1981 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1982 void *xferaddr, int xfersize, int out)
1983 {
1984 bus_dma_segment_t *ds;
1985 bus_dmamap_t dm;
1986 struct iop_xfer *ix;
1987 u_int rv, i, nsegs, off, slen, tlen, flg;
1988 paddr_t saddr, eaddr;
1989 u_int32_t *p;
1990
1991 #ifdef I2ODEBUG
1992 if (xfersize == 0)
1993 panic("iop_msg_map_bio: null transfer");
1994 if (xfersize > IOP_MAX_XFER)
1995 panic("iop_msg_map_bio: transfer too large");
1996 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1997 panic("iop_msg_map_bio: SGLOFFADJ");
1998 #endif
1999
2000 ix = im->im_xfer;
2001 dm = ix->ix_map;
2002 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2003 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2004 if (rv != 0)
2005 return (rv);
2006
2007 off = mb[0] >> 16;
2008 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2009
2010 /*
2011 * If the transfer is highly fragmented and won't fit using SIMPLE
2012 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2013 * potentially more efficient, both for us and the IOP.
2014 */
2015 if (dm->dm_nsegs > nsegs) {
2016 nsegs = 1;
2017 p = mb + off + 1;
2018
2019 /* XXX This should be done with a bus_space flag. */
2020 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2021 slen = ds->ds_len;
2022 saddr = ds->ds_addr;
2023
2024 while (slen > 0) {
2025 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2026 tlen = min(eaddr - saddr, slen);
2027 slen -= tlen;
2028 *p++ = le32toh(saddr);
2029 saddr = eaddr;
2030 nsegs++;
2031 }
2032 }
2033
2034 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2035 I2O_SGL_END;
2036 if (out)
2037 mb[off] |= I2O_SGL_DATA_OUT;
2038 } else {
2039 p = mb + off;
2040 nsegs = dm->dm_nsegs;
2041
2042 if (out)
2043 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2044 else
2045 flg = I2O_SGL_SIMPLE;
2046
2047 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2048 p[0] = (u_int32_t)ds->ds_len | flg;
2049 p[1] = (u_int32_t)ds->ds_addr;
2050 }
2051
2052 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2053 I2O_SGL_END;
2054 p[1] = (u_int32_t)ds->ds_addr;
2055 nsegs <<= 1;
2056 }
2057
2058 /* Fix up the transfer record, and sync the map. */
2059 ix->ix_flags = (out ? IX_OUT : IX_IN);
2060 ix->ix_size = xfersize;
2061 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2062 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2063
2064 /*
2065 * Adjust the SGL offset and total message size fields. We don't
2066 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2067 */
2068 mb[0] += ((off << 4) + (nsegs << 16));
2069 return (0);
2070 }
2071
2072 /*
2073 * Unmap all data transfers associated with a message wrapper.
2074 */
2075 void
2076 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2077 {
2078 struct iop_xfer *ix;
2079 int i;
2080
2081 #ifdef I2ODEBUG
2082 if (im->im_xfer[0].ix_size == 0)
2083 panic("iop_msg_unmap: no transfers mapped");
2084 #endif
2085
2086 for (ix = im->im_xfer, i = 0;;) {
2087 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2088 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2089 BUS_DMASYNC_POSTREAD);
2090 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2091
2092 /* Only the first DMA map is static. */
2093 if (i != 0)
2094 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2095 if ((++ix)->ix_size == 0)
2096 break;
2097 if (++i >= IOP_MAX_MSG_XFERS)
2098 break;
2099 }
2100 }
2101
2102 /*
2103 * Post a message frame to the IOP's inbound queue.
2104 */
2105 int
2106 iop_post(struct iop_softc *sc, u_int32_t *mb)
2107 {
2108 u_int32_t mfa;
2109 int s;
2110
2111 #ifdef I2ODEBUG
2112 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2113 panic("iop_post: frame too large");
2114 #endif
2115
2116 s = splbio();
2117
2118 /* Allocate a slot with the IOP. */
2119 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2120 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2121 splx(s);
2122 printf("%s: mfa not forthcoming\n",
2123 sc->sc_dv.dv_xname);
2124 return (EAGAIN);
2125 }
2126
2127 /* Perform reply buffer DMA synchronisation. */
2128 if (sc->sc_curib++ == 0)
2129 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2130 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2131
2132 /* Copy out the message frame. */
2133 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2134 mb[0] >> 16);
2135 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2136 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2137
2138 /* Post the MFA back to the IOP. */
2139 iop_outl(sc, IOP_REG_IFIFO, mfa);
2140
2141 splx(s);
2142 return (0);
2143 }
2144
2145 /*
2146 * Post a message to the IOP and deal with completion.
2147 */
2148 int
2149 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2150 {
2151 u_int32_t *mb;
2152 int rv, s;
2153
2154 mb = xmb;
2155
2156 /* Terminate the scatter/gather list chain. */
2157 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2158 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2159
2160 if ((rv = iop_post(sc, mb)) != 0)
2161 return (rv);
2162
2163 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2164 if ((im->im_flags & IM_POLL) != 0)
2165 iop_msg_poll(sc, im, timo);
2166 else
2167 iop_msg_wait(sc, im, timo);
2168
2169 s = splbio();
2170 if ((im->im_flags & IM_REPLIED) != 0) {
2171 if ((im->im_flags & IM_NOSTATUS) != 0)
2172 rv = 0;
2173 else if ((im->im_flags & IM_FAIL) != 0)
2174 rv = ENXIO;
2175 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2176 rv = EIO;
2177 else
2178 rv = 0;
2179 } else
2180 rv = EBUSY;
2181 splx(s);
2182 } else
2183 rv = 0;
2184
2185 return (rv);
2186 }
2187
2188 /*
2189 * Spin until the specified message is replied to.
2190 */
2191 static void
2192 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2193 {
2194 u_int32_t rmfa;
2195 int s, status;
2196
2197 s = splbio();
2198
2199 /* Wait for completion. */
2200 for (timo *= 10; timo != 0; timo--) {
2201 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2202 /* Double read to account for IOP bug. */
2203 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2204 if (rmfa == IOP_MFA_EMPTY)
2205 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2206 if (rmfa != IOP_MFA_EMPTY) {
2207 status = iop_handle_reply(sc, rmfa);
2208
2209 /*
2210 * Return the reply frame to the IOP's
2211 * outbound FIFO.
2212 */
2213 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2214 }
2215 }
2216 if ((im->im_flags & IM_REPLIED) != 0)
2217 break;
2218 DELAY(100);
2219 }
2220
2221 if (timo == 0) {
2222 #ifdef I2ODEBUG
2223 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2224 if (iop_status_get(sc, 1) != 0)
2225 printf("iop_msg_poll: unable to retrieve status\n");
2226 else
2227 printf("iop_msg_poll: IOP state = %d\n",
2228 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2229 #endif
2230 }
2231
2232 splx(s);
2233 }
2234
2235 /*
2236 * Sleep until the specified message is replied to.
2237 */
2238 static void
2239 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2240 {
2241 int s, rv;
2242
2243 s = splbio();
2244 if ((im->im_flags & IM_REPLIED) != 0) {
2245 splx(s);
2246 return;
2247 }
2248 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2249 splx(s);
2250
2251 #ifdef I2ODEBUG
2252 if (rv != 0) {
2253 printf("iop_msg_wait: tsleep() == %d\n", rv);
2254 if (iop_status_get(sc, 0) != 0)
2255 printf("iop_msg_wait: unable to retrieve status\n");
2256 else
2257 printf("iop_msg_wait: IOP state = %d\n",
2258 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2259 }
2260 #endif
2261 }
2262
2263 /*
2264 * Release an unused message frame back to the IOP's inbound fifo.
2265 */
2266 static void
2267 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2268 {
2269
2270 /* Use the frame to issue a no-op. */
2271 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2272 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2273 iop_outl_msg(sc, mfa + 8, 0);
2274 iop_outl_msg(sc, mfa + 12, 0);
2275
2276 iop_outl(sc, IOP_REG_IFIFO, mfa);
2277 }
2278
2279 #ifdef I2ODEBUG
2280 /*
2281 * Dump a reply frame header.
2282 */
2283 static void
2284 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2285 {
2286 u_int function, detail;
2287 #ifdef I2OVERBOSE
2288 const char *statusstr;
2289 #endif
2290
2291 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2292 detail = le16toh(rb->detail);
2293
2294 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2295
2296 #ifdef I2OVERBOSE
2297 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2298 statusstr = iop_status[rb->reqstatus];
2299 else
2300 statusstr = "undefined error code";
2301
2302 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2303 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2304 #else
2305 printf("%s: function=0x%02x status=0x%02x\n",
2306 sc->sc_dv.dv_xname, function, rb->reqstatus);
2307 #endif
2308 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2309 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2310 le32toh(rb->msgtctx));
2311 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2312 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2313 (le32toh(rb->msgflags) >> 8) & 0xff);
2314 }
2315 #endif
2316
2317 /*
2318 * Dump a transport failure reply.
2319 */
2320 static void
2321 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2322 {
2323
2324 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2325
2326 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2327 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2328 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2329 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2330 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2331 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2332 }
2333
2334 /*
2335 * Translate an I2O ASCII field into a C string.
2336 */
2337 void
2338 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2339 {
2340 int hc, lc, i, nit;
2341
2342 dlen--;
2343 lc = 0;
2344 hc = 0;
2345 i = 0;
2346
2347 /*
2348 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2349 * spec has nothing to say about it. Since AMI fields are usually
2350 * filled with junk after the terminator, ...
2351 */
2352 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2353
2354 while (slen-- != 0 && dlen-- != 0) {
2355 if (nit && *src == '\0')
2356 break;
2357 else if (*src <= 0x20 || *src >= 0x7f) {
2358 if (hc)
2359 dst[i++] = ' ';
2360 } else {
2361 hc = 1;
2362 dst[i++] = *src;
2363 lc = i;
2364 }
2365 src++;
2366 }
2367
2368 dst[lc] = '\0';
2369 }
2370
2371 /*
2372 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2373 */
2374 int
2375 iop_print_ident(struct iop_softc *sc, int tid)
2376 {
2377 struct {
2378 struct i2o_param_op_results pr;
2379 struct i2o_param_read_results prr;
2380 struct i2o_param_device_identity di;
2381 } __attribute__ ((__packed__)) p;
2382 char buf[32];
2383 int rv;
2384
2385 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2386 sizeof(p), NULL);
2387 if (rv != 0)
2388 return (rv);
2389
2390 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2391 sizeof(buf));
2392 printf(" <%s, ", buf);
2393 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2394 sizeof(buf));
2395 printf("%s, ", buf);
2396 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2397 printf("%s>", buf);
2398
2399 return (0);
2400 }
2401
2402 /*
2403 * Claim or unclaim the specified TID.
2404 */
2405 int
2406 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2407 int flags)
2408 {
2409 struct iop_msg *im;
2410 struct i2o_util_claim mf;
2411 int rv, func;
2412
2413 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2414 im = iop_msg_alloc(sc, IM_WAIT);
2415
2416 /* We can use the same structure, as they're identical. */
2417 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2418 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2419 mf.msgictx = ii->ii_ictx;
2420 mf.msgtctx = im->im_tctx;
2421 mf.flags = flags;
2422
2423 rv = iop_msg_post(sc, im, &mf, 5000);
2424 iop_msg_free(sc, im);
2425 return (rv);
2426 }
2427
2428 /*
2429 * Perform an abort.
2430 */
2431 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2432 int tctxabort, int flags)
2433 {
2434 struct iop_msg *im;
2435 struct i2o_util_abort mf;
2436 int rv;
2437
2438 im = iop_msg_alloc(sc, IM_WAIT);
2439
2440 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2441 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2442 mf.msgictx = ii->ii_ictx;
2443 mf.msgtctx = im->im_tctx;
2444 mf.flags = (func << 24) | flags;
2445 mf.tctxabort = tctxabort;
2446
2447 rv = iop_msg_post(sc, im, &mf, 5000);
2448 iop_msg_free(sc, im);
2449 return (rv);
2450 }
2451
2452 /*
2453 * Enable or disable reception of events for the specified device.
2454 */
2455 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2456 {
2457 struct i2o_util_event_register mf;
2458
2459 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2460 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2461 mf.msgictx = ii->ii_ictx;
2462 mf.msgtctx = 0;
2463 mf.eventmask = mask;
2464
2465 /* This message is replied to only when events are signalled. */
2466 return (iop_post(sc, (u_int32_t *)&mf));
2467 }
2468
2469 int
2470 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2471 {
2472 struct iop_softc *sc;
2473
2474 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2475 return (ENXIO);
2476 if ((sc->sc_flags & IOP_ONLINE) == 0)
2477 return (ENXIO);
2478 if ((sc->sc_flags & IOP_OPEN) != 0)
2479 return (EBUSY);
2480 sc->sc_flags |= IOP_OPEN;
2481
2482 return (0);
2483 }
2484
2485 int
2486 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2487 {
2488 struct iop_softc *sc;
2489
2490 sc = device_lookup(&iop_cd, minor(dev));
2491 sc->sc_flags &= ~IOP_OPEN;
2492
2493 return (0);
2494 }
2495
2496 int
2497 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2498 {
2499 struct iop_softc *sc;
2500 struct iovec *iov;
2501 int rv, i;
2502
2503 if (securelevel >= 2)
2504 return (EPERM);
2505
2506 sc = device_lookup(&iop_cd, minor(dev));
2507
2508 switch (cmd) {
2509 case IOPIOCPT:
2510 return (iop_passthrough(sc, (struct ioppt *)data, p));
2511
2512 case IOPIOCGSTATUS:
2513 iov = (struct iovec *)data;
2514 i = sizeof(struct i2o_status);
2515 if (i > iov->iov_len)
2516 i = iov->iov_len;
2517 else
2518 iov->iov_len = i;
2519 if ((rv = iop_status_get(sc, 0)) == 0)
2520 rv = copyout(&sc->sc_status, iov->iov_base, i);
2521 return (rv);
2522
2523 case IOPIOCGLCT:
2524 case IOPIOCGTIDMAP:
2525 case IOPIOCRECONFIG:
2526 break;
2527
2528 default:
2529 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2530 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2531 #endif
2532 return (ENOTTY);
2533 }
2534
2535 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2536 return (rv);
2537
2538 switch (cmd) {
2539 case IOPIOCGLCT:
2540 iov = (struct iovec *)data;
2541 i = le16toh(sc->sc_lct->tablesize) << 2;
2542 if (i > iov->iov_len)
2543 i = iov->iov_len;
2544 else
2545 iov->iov_len = i;
2546 rv = copyout(sc->sc_lct, iov->iov_base, i);
2547 break;
2548
2549 case IOPIOCRECONFIG:
2550 rv = iop_reconfigure(sc, 0);
2551 break;
2552
2553 case IOPIOCGTIDMAP:
2554 iov = (struct iovec *)data;
2555 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2556 if (i > iov->iov_len)
2557 i = iov->iov_len;
2558 else
2559 iov->iov_len = i;
2560 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2561 break;
2562 }
2563
2564 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2565 return (rv);
2566 }
2567
2568 static int
2569 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2570 {
2571 struct iop_msg *im;
2572 struct i2o_msg *mf;
2573 struct ioppt_buf *ptb;
2574 int rv, i, mapped;
2575
2576 mf = NULL;
2577 im = NULL;
2578 mapped = 1;
2579
2580 if (pt->pt_msglen > sc->sc_framesize ||
2581 pt->pt_msglen < sizeof(struct i2o_msg) ||
2582 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2583 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2584 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2585 return (EINVAL);
2586
2587 for (i = 0; i < pt->pt_nbufs; i++)
2588 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2589 rv = ENOMEM;
2590 goto bad;
2591 }
2592
2593 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2594 if (mf == NULL)
2595 return (ENOMEM);
2596
2597 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2598 goto bad;
2599
2600 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2601 im->im_rb = (struct i2o_reply *)mf;
2602 mf->msgictx = IOP_ICTX;
2603 mf->msgtctx = im->im_tctx;
2604
2605 for (i = 0; i < pt->pt_nbufs; i++) {
2606 ptb = &pt->pt_bufs[i];
2607 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2608 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2609 if (rv != 0)
2610 goto bad;
2611 mapped = 1;
2612 }
2613
2614 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2615 goto bad;
2616
2617 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2618 if (i > sc->sc_framesize)
2619 i = sc->sc_framesize;
2620 if (i > pt->pt_replylen)
2621 i = pt->pt_replylen;
2622 rv = copyout(im->im_rb, pt->pt_reply, i);
2623
2624 bad:
2625 if (mapped != 0)
2626 iop_msg_unmap(sc, im);
2627 if (im != NULL)
2628 iop_msg_free(sc, im);
2629 if (mf != NULL)
2630 free(mf, M_DEVBUF);
2631 return (rv);
2632 }
2633