iop.c revision 1.10.2.17 1 /* $NetBSD: iop.c,v 1.10.2.17 2002/11/27 21:59:23 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.10.2.17 2002/11/27 21:59:23 christos Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 dev_type_open(iopopen);
111 dev_type_close(iopclose);
112 dev_type_ioctl(iopioctl);
113
114 const struct cdevsw iop_cdevsw = {
115 iopopen, iopclose, noread, nowrite, iopioctl,
116 nostop, notty, nopoll, nommap, nokqfilter,
117 };
118
119 #define IC_CONFIGURE 0x01
120 #define IC_PRIORITY 0x02
121
122 struct iop_class {
123 u_short ic_class;
124 u_short ic_flags;
125 #ifdef I2OVERBOSE
126 const char *ic_caption;
127 #endif
128 } static const iop_class[] = {
129 {
130 I2O_CLASS_EXECUTIVE,
131 0,
132 COMMENT("executive")
133 },
134 {
135 I2O_CLASS_DDM,
136 0,
137 COMMENT("device driver module")
138 },
139 {
140 I2O_CLASS_RANDOM_BLOCK_STORAGE,
141 IC_CONFIGURE | IC_PRIORITY,
142 IFVERBOSE("random block storage")
143 },
144 {
145 I2O_CLASS_SEQUENTIAL_STORAGE,
146 IC_CONFIGURE | IC_PRIORITY,
147 IFVERBOSE("sequential storage")
148 },
149 {
150 I2O_CLASS_LAN,
151 IC_CONFIGURE | IC_PRIORITY,
152 IFVERBOSE("LAN port")
153 },
154 {
155 I2O_CLASS_WAN,
156 IC_CONFIGURE | IC_PRIORITY,
157 IFVERBOSE("WAN port")
158 },
159 {
160 I2O_CLASS_FIBRE_CHANNEL_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("fibrechannel port")
163 },
164 {
165 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
166 0,
167 COMMENT("fibrechannel peripheral")
168 },
169 {
170 I2O_CLASS_SCSI_PERIPHERAL,
171 0,
172 COMMENT("SCSI peripheral")
173 },
174 {
175 I2O_CLASS_ATE_PORT,
176 IC_CONFIGURE,
177 IFVERBOSE("ATE port")
178 },
179 {
180 I2O_CLASS_ATE_PERIPHERAL,
181 0,
182 COMMENT("ATE peripheral")
183 },
184 {
185 I2O_CLASS_FLOPPY_CONTROLLER,
186 IC_CONFIGURE,
187 IFVERBOSE("floppy controller")
188 },
189 {
190 I2O_CLASS_FLOPPY_DEVICE,
191 0,
192 COMMENT("floppy device")
193 },
194 {
195 I2O_CLASS_BUS_ADAPTER_PORT,
196 IC_CONFIGURE,
197 IFVERBOSE("bus adapter port" )
198 },
199 };
200
201 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
202 static const char * const iop_status[] = {
203 "success",
204 "abort (dirty)",
205 "abort (no data transfer)",
206 "abort (partial transfer)",
207 "error (dirty)",
208 "error (no data transfer)",
209 "error (partial transfer)",
210 "undefined error code",
211 "process abort (dirty)",
212 "process abort (no data transfer)",
213 "process abort (partial transfer)",
214 "transaction error",
215 };
216 #endif
217
218 static inline u_int32_t iop_inl(struct iop_softc *, int);
219 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
220
221 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
222 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
223
224 static void iop_config_interrupts(struct device *);
225 static void iop_configure_devices(struct iop_softc *, int, int);
226 static void iop_devinfo(int, char *);
227 static int iop_print(void *, const char *);
228 static void iop_shutdown(void *);
229 static int iop_submatch(struct device *, struct cfdata *, void *);
230 static int iop_vendor_print(void *, const char *);
231
232 static void iop_adjqparam(struct iop_softc *, int);
233 static void iop_create_reconf_thread(void *);
234 static int iop_handle_reply(struct iop_softc *, u_int32_t);
235 static int iop_hrt_get(struct iop_softc *);
236 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
237 static void iop_intr_event(struct device *, struct iop_msg *, void *);
238 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
239 u_int32_t);
240 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
241 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
242 static int iop_ofifo_init(struct iop_softc *);
243 static int iop_passthrough(struct iop_softc *, struct ioppt *,
244 struct proc *);
245 static void iop_reconf_thread(void *);
246 static void iop_release_mfa(struct iop_softc *, u_int32_t);
247 static int iop_reset(struct iop_softc *);
248 static int iop_systab_set(struct iop_softc *);
249 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
250
251 #ifdef I2ODEBUG
252 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
253 #endif
254
255 static inline u_int32_t
256 iop_inl(struct iop_softc *sc, int off)
257 {
258
259 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
260 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
261 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
262 }
263
264 static inline void
265 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
266 {
267
268 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
269 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
270 BUS_SPACE_BARRIER_WRITE);
271 }
272
273 static inline u_int32_t
274 iop_inl_msg(struct iop_softc *sc, int off)
275 {
276
277 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
278 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
279 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
280 }
281
282 static inline void
283 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
284 {
285
286 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
287 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
288 BUS_SPACE_BARRIER_WRITE);
289 }
290
291 /*
292 * Initialise the IOP and our interface.
293 */
294 void
295 iop_init(struct iop_softc *sc, const char *intrstr)
296 {
297 struct iop_msg *im;
298 int rv, i, j, state, nsegs;
299 u_int32_t mask;
300 char ident[64];
301
302 state = 0;
303
304 printf("I2O adapter");
305
306 if (iop_ictxhashtbl == NULL)
307 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
308 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
309
310 /* Disable interrupts at the IOP. */
311 mask = iop_inl(sc, IOP_REG_INTR_MASK);
312 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
313
314 /* Allocate a scratch DMA map for small miscellaneous shared data. */
315 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
316 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
317 printf("%s: cannot create scratch dmamap\n",
318 sc->sc_dv.dv_xname);
319 return;
320 }
321
322 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
323 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
324 printf("%s: cannot alloc scratch dmamem\n",
325 sc->sc_dv.dv_xname);
326 goto bail_out;
327 }
328 state++;
329
330 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
331 &sc->sc_scr, 0)) {
332 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
333 goto bail_out;
334 }
335 state++;
336
337 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
338 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
339 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
340 goto bail_out;
341 }
342 state++;
343
344 #ifdef I2ODEBUG
345 /* So that our debug checks don't choke. */
346 sc->sc_framesize = 128;
347 #endif
348
349 /* Reset the adapter and request status. */
350 if ((rv = iop_reset(sc)) != 0) {
351 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
352 goto bail_out;
353 }
354
355 if ((rv = iop_status_get(sc, 1)) != 0) {
356 printf("%s: not responding (get status)\n",
357 sc->sc_dv.dv_xname);
358 goto bail_out;
359 }
360
361 sc->sc_flags |= IOP_HAVESTATUS;
362 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
363 ident, sizeof(ident));
364 printf(" <%s>\n", ident);
365
366 #ifdef I2ODEBUG
367 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
368 le16toh(sc->sc_status.orgid),
369 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
370 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
371 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
372 le32toh(sc->sc_status.desiredprivmemsize),
373 le32toh(sc->sc_status.currentprivmemsize),
374 le32toh(sc->sc_status.currentprivmembase));
375 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
376 le32toh(sc->sc_status.desiredpriviosize),
377 le32toh(sc->sc_status.currentpriviosize),
378 le32toh(sc->sc_status.currentpriviobase));
379 #endif
380
381 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
382 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
383 sc->sc_maxob = IOP_MAX_OUTBOUND;
384 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
385 if (sc->sc_maxib > IOP_MAX_INBOUND)
386 sc->sc_maxib = IOP_MAX_INBOUND;
387 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
388 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
389 sc->sc_framesize = IOP_MAX_MSG_SIZE;
390
391 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
392 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
393 printf("%s: frame size too small (%d)\n",
394 sc->sc_dv.dv_xname, sc->sc_framesize);
395 goto bail_out;
396 }
397 #endif
398
399 /* Allocate message wrappers. */
400 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
401 if (im == NULL) {
402 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
403 goto bail_out;
404 }
405 state++;
406 sc->sc_ims = im;
407 SLIST_INIT(&sc->sc_im_freelist);
408
409 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
410 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
411 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
412 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
413 &im->im_xfer[0].ix_map);
414 if (rv != 0) {
415 printf("%s: couldn't create dmamap (%d)",
416 sc->sc_dv.dv_xname, rv);
417 goto bail_out;
418 }
419
420 im->im_tctx = i;
421 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
422 }
423
424 /* Initialise the IOP's outbound FIFO. */
425 if (iop_ofifo_init(sc) != 0) {
426 printf("%s: unable to init oubound FIFO\n",
427 sc->sc_dv.dv_xname);
428 goto bail_out;
429 }
430
431 /*
432 * Defer further configuration until (a) interrupts are working and
433 * (b) we have enough information to build the system table.
434 */
435 config_interrupts((struct device *)sc, iop_config_interrupts);
436
437 /* Configure shutdown hook before we start any device activity. */
438 if (iop_sdh == NULL)
439 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
440
441 /* Ensure interrupts are enabled at the IOP. */
442 mask = iop_inl(sc, IOP_REG_INTR_MASK);
443 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
444
445 if (intrstr != NULL)
446 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
447 intrstr);
448
449 #ifdef I2ODEBUG
450 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
451 sc->sc_dv.dv_xname, sc->sc_maxib,
452 le32toh(sc->sc_status.maxinboundmframes),
453 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
454 #endif
455
456 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
457 return;
458
459 bail_out:
460 if (state > 3) {
461 for (j = 0; j < i; j++)
462 bus_dmamap_destroy(sc->sc_dmat,
463 sc->sc_ims[j].im_xfer[0].ix_map);
464 free(sc->sc_ims, M_DEVBUF);
465 }
466 if (state > 2)
467 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
468 if (state > 1)
469 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
470 if (state > 0)
471 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
472 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
473 }
474
475 /*
476 * Perform autoconfiguration tasks.
477 */
478 static void
479 iop_config_interrupts(struct device *self)
480 {
481 struct iop_attach_args ia;
482 struct iop_softc *sc, *iop;
483 struct i2o_systab_entry *ste;
484 int rv, i, niop;
485
486 sc = (struct iop_softc *)self;
487 LIST_INIT(&sc->sc_iilist);
488
489 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
490
491 if (iop_hrt_get(sc) != 0) {
492 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
493 return;
494 }
495
496 /*
497 * Build the system table.
498 */
499 if (iop_systab == NULL) {
500 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
501 if ((iop = device_lookup(&iop_cd, i)) == NULL)
502 continue;
503 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
504 continue;
505 if (iop_status_get(iop, 1) != 0) {
506 printf("%s: unable to retrieve status\n",
507 sc->sc_dv.dv_xname);
508 iop->sc_flags &= ~IOP_HAVESTATUS;
509 continue;
510 }
511 niop++;
512 }
513 if (niop == 0)
514 return;
515
516 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
517 sizeof(struct i2o_systab);
518 iop_systab_size = i;
519 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
520
521 iop_systab->numentries = niop;
522 iop_systab->version = I2O_VERSION_11;
523
524 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
525 if ((iop = device_lookup(&iop_cd, i)) == NULL)
526 continue;
527 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
528 continue;
529
530 ste->orgid = iop->sc_status.orgid;
531 ste->iopid = iop->sc_dv.dv_unit + 2;
532 ste->segnumber =
533 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
534 ste->iopcaps = iop->sc_status.iopcaps;
535 ste->inboundmsgframesize =
536 iop->sc_status.inboundmframesize;
537 ste->inboundmsgportaddresslow =
538 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
539 ste++;
540 }
541 }
542
543 /*
544 * Post the system table to the IOP and bring it to the OPERATIONAL
545 * state.
546 */
547 if (iop_systab_set(sc) != 0) {
548 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
549 return;
550 }
551 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
552 30000) != 0) {
553 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
554 return;
555 }
556
557 /*
558 * Set up an event handler for this IOP.
559 */
560 sc->sc_eventii.ii_dv = self;
561 sc->sc_eventii.ii_intr = iop_intr_event;
562 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
563 sc->sc_eventii.ii_tid = I2O_TID_IOP;
564 iop_initiator_register(sc, &sc->sc_eventii);
565
566 rv = iop_util_eventreg(sc, &sc->sc_eventii,
567 I2O_EVENT_EXEC_RESOURCE_LIMITS |
568 I2O_EVENT_EXEC_CONNECTION_FAIL |
569 I2O_EVENT_EXEC_ADAPTER_FAULT |
570 I2O_EVENT_EXEC_POWER_FAIL |
571 I2O_EVENT_EXEC_RESET_PENDING |
572 I2O_EVENT_EXEC_RESET_IMMINENT |
573 I2O_EVENT_EXEC_HARDWARE_FAIL |
574 I2O_EVENT_EXEC_XCT_CHANGE |
575 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
576 I2O_EVENT_GEN_DEVICE_RESET |
577 I2O_EVENT_GEN_STATE_CHANGE |
578 I2O_EVENT_GEN_GENERAL_WARNING);
579 if (rv != 0) {
580 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
581 return;
582 }
583
584 /*
585 * Attempt to match and attach a product-specific extension.
586 */
587 ia.ia_class = I2O_CLASS_ANY;
588 ia.ia_tid = I2O_TID_IOP;
589 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
590
591 /*
592 * Start device configuration.
593 */
594 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
595 if ((rv = iop_reconfigure(sc, 0)) == -1) {
596 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
597 return;
598 }
599 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
600
601 kthread_create(iop_create_reconf_thread, sc);
602 }
603
604 /*
605 * Create the reconfiguration thread. Called after the standard kernel
606 * threads have been created.
607 */
608 static void
609 iop_create_reconf_thread(void *cookie)
610 {
611 struct iop_softc *sc;
612 int rv;
613
614 sc = cookie;
615 sc->sc_flags |= IOP_ONLINE;
616
617 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
618 "%s", sc->sc_dv.dv_xname);
619 if (rv != 0) {
620 printf("%s: unable to create reconfiguration thread (%d)",
621 sc->sc_dv.dv_xname, rv);
622 return;
623 }
624 }
625
626 /*
627 * Reconfiguration thread; listens for LCT change notification, and
628 * initiates re-configuration if received.
629 */
630 static void
631 iop_reconf_thread(void *cookie)
632 {
633 struct iop_softc *sc;
634 struct lwp *l;
635 struct i2o_lct lct;
636 u_int32_t chgind;
637 int rv;
638
639 sc = cookie;
640 chgind = sc->sc_chgind + 1;
641 l = curlwp;
642
643 for (;;) {
644 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
645 sc->sc_dv.dv_xname, chgind));
646
647 PHOLD(l);
648 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
649 PRELE(l);
650
651 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
652 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
653
654 if (rv == 0 &&
655 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
656 iop_reconfigure(sc, le32toh(lct.changeindicator));
657 chgind = sc->sc_chgind + 1;
658 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
659 }
660
661 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
662 }
663 }
664
665 /*
666 * Reconfigure: find new and removed devices.
667 */
668 int
669 iop_reconfigure(struct iop_softc *sc, u_int chgind)
670 {
671 struct iop_msg *im;
672 struct i2o_hba_bus_scan mf;
673 struct i2o_lct_entry *le;
674 struct iop_initiator *ii, *nextii;
675 int rv, tid, i;
676
677 /*
678 * If the reconfiguration request isn't the result of LCT change
679 * notification, then be more thorough: ask all bus ports to scan
680 * their busses. Wait up to 5 minutes for each bus port to complete
681 * the request.
682 */
683 if (chgind == 0) {
684 if ((rv = iop_lct_get(sc)) != 0) {
685 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
686 return (rv);
687 }
688
689 le = sc->sc_lct->entry;
690 for (i = 0; i < sc->sc_nlctent; i++, le++) {
691 if ((le16toh(le->classid) & 4095) !=
692 I2O_CLASS_BUS_ADAPTER_PORT)
693 continue;
694 tid = le16toh(le->localtid) & 4095;
695
696 im = iop_msg_alloc(sc, IM_WAIT);
697
698 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
699 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
700 mf.msgictx = IOP_ICTX;
701 mf.msgtctx = im->im_tctx;
702
703 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
704 tid));
705
706 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
707 iop_msg_free(sc, im);
708 #ifdef I2ODEBUG
709 if (rv != 0)
710 printf("%s: bus scan failed\n",
711 sc->sc_dv.dv_xname);
712 #endif
713 }
714 } else if (chgind <= sc->sc_chgind) {
715 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
716 return (0);
717 }
718
719 /* Re-read the LCT and determine if it has changed. */
720 if ((rv = iop_lct_get(sc)) != 0) {
721 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
722 return (rv);
723 }
724 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
725
726 chgind = le32toh(sc->sc_lct->changeindicator);
727 if (chgind == sc->sc_chgind) {
728 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
729 return (0);
730 }
731 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
732 sc->sc_chgind = chgind;
733
734 if (sc->sc_tidmap != NULL)
735 free(sc->sc_tidmap, M_DEVBUF);
736 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
737 M_DEVBUF, M_NOWAIT|M_ZERO);
738
739 /* Allow 1 queued command per device while we're configuring. */
740 iop_adjqparam(sc, 1);
741
742 /*
743 * Match and attach child devices. We configure high-level devices
744 * first so that any claims will propagate throughout the LCT,
745 * hopefully masking off aliased devices as a result.
746 *
747 * Re-reading the LCT at this point is a little dangerous, but we'll
748 * trust the IOP (and the operator) to behave itself...
749 */
750 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
751 IC_CONFIGURE | IC_PRIORITY);
752 if ((rv = iop_lct_get(sc)) != 0)
753 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
754 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
755 IC_CONFIGURE);
756
757 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
758 nextii = LIST_NEXT(ii, ii_list);
759
760 /* Detach devices that were configured, but are now gone. */
761 for (i = 0; i < sc->sc_nlctent; i++)
762 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
763 break;
764 if (i == sc->sc_nlctent ||
765 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
766 config_detach(ii->ii_dv, DETACH_FORCE);
767
768 /*
769 * Tell initiators that existed before the re-configuration
770 * to re-configure.
771 */
772 if (ii->ii_reconfig == NULL)
773 continue;
774 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
775 printf("%s: %s failed reconfigure (%d)\n",
776 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
777 }
778
779 /* Re-adjust queue parameters and return. */
780 if (sc->sc_nii != 0)
781 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
782 / sc->sc_nii);
783
784 return (0);
785 }
786
787 /*
788 * Configure I2O devices into the system.
789 */
790 static void
791 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
792 {
793 struct iop_attach_args ia;
794 struct iop_initiator *ii;
795 const struct i2o_lct_entry *le;
796 struct device *dv;
797 int i, j, nent;
798 u_int usertid;
799
800 nent = sc->sc_nlctent;
801 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
802 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
803
804 /* Ignore the device if it's in use. */
805 usertid = le32toh(le->usertid) & 4095;
806 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
807 continue;
808
809 ia.ia_class = le16toh(le->classid) & 4095;
810 ia.ia_tid = sc->sc_tidmap[i].it_tid;
811
812 /* Ignore uninteresting devices. */
813 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
814 if (iop_class[j].ic_class == ia.ia_class)
815 break;
816 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
817 (iop_class[j].ic_flags & mask) != maskval)
818 continue;
819
820 /*
821 * Try to configure the device only if it's not already
822 * configured.
823 */
824 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
825 if (ia.ia_tid == ii->ii_tid) {
826 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
827 strcpy(sc->sc_tidmap[i].it_dvname,
828 ii->ii_dv->dv_xname);
829 break;
830 }
831 }
832 if (ii != NULL)
833 continue;
834
835 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
836 if (dv != NULL) {
837 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
838 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
839 }
840 }
841 }
842
843 /*
844 * Adjust queue parameters for all child devices.
845 */
846 static void
847 iop_adjqparam(struct iop_softc *sc, int mpi)
848 {
849 struct iop_initiator *ii;
850
851 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
852 if (ii->ii_adjqparam != NULL)
853 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
854 }
855
856 static void
857 iop_devinfo(int class, char *devinfo)
858 {
859 #ifdef I2OVERBOSE
860 int i;
861
862 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
863 if (class == iop_class[i].ic_class)
864 break;
865
866 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
867 sprintf(devinfo, "device (class 0x%x)", class);
868 else
869 strcpy(devinfo, iop_class[i].ic_caption);
870 #else
871
872 sprintf(devinfo, "device (class 0x%x)", class);
873 #endif
874 }
875
876 static int
877 iop_print(void *aux, const char *pnp)
878 {
879 struct iop_attach_args *ia;
880 char devinfo[256];
881
882 ia = aux;
883
884 if (pnp != NULL) {
885 iop_devinfo(ia->ia_class, devinfo);
886 printf("%s at %s", devinfo, pnp);
887 }
888 printf(" tid %d", ia->ia_tid);
889 return (UNCONF);
890 }
891
892 static int
893 iop_vendor_print(void *aux, const char *pnp)
894 {
895
896 return (QUIET);
897 }
898
899 static int
900 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
901 {
902 struct iop_attach_args *ia;
903
904 ia = aux;
905
906 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
907 return (0);
908
909 return (config_match(parent, cf, aux));
910 }
911
912 /*
913 * Shut down all configured IOPs.
914 */
915 static void
916 iop_shutdown(void *junk)
917 {
918 struct iop_softc *sc;
919 int i;
920
921 printf("shutting down iop devices...");
922
923 for (i = 0; i < iop_cd.cd_ndevs; i++) {
924 if ((sc = device_lookup(&iop_cd, i)) == NULL)
925 continue;
926 if ((sc->sc_flags & IOP_ONLINE) == 0)
927 continue;
928
929 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
930 0, 5000);
931
932 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
933 /*
934 * Some AMI firmware revisions will go to sleep and
935 * never come back after this.
936 */
937 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
938 IOP_ICTX, 0, 1000);
939 }
940 }
941
942 /* Wait. Some boards could still be flushing, stupidly enough. */
943 delay(5000*1000);
944 printf(" done\n");
945 }
946
947 /*
948 * Retrieve IOP status.
949 */
950 int
951 iop_status_get(struct iop_softc *sc, int nosleep)
952 {
953 struct i2o_exec_status_get mf;
954 struct i2o_status *st;
955 paddr_t pa;
956 int rv, i;
957
958 pa = sc->sc_scr_seg->ds_addr;
959 st = (struct i2o_status *)sc->sc_scr;
960
961 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
962 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
963 mf.reserved[0] = 0;
964 mf.reserved[1] = 0;
965 mf.reserved[2] = 0;
966 mf.reserved[3] = 0;
967 mf.addrlow = (u_int32_t)pa;
968 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
969 mf.length = sizeof(sc->sc_status);
970
971 memset(st, 0, sizeof(*st));
972 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
973 BUS_DMASYNC_PREREAD);
974
975 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
976 return (rv);
977
978 for (i = 25; i != 0; i--) {
979 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
980 sizeof(*st), BUS_DMASYNC_POSTREAD);
981 if (st->syncbyte == 0xff)
982 break;
983 if (nosleep)
984 DELAY(100*1000);
985 else
986 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
987 }
988
989 if (st->syncbyte != 0xff) {
990 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
991 rv = EIO;
992 } else {
993 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
994 rv = 0;
995 }
996
997 return (rv);
998 }
999
1000 /*
1001 * Initialize and populate the IOP's outbound FIFO.
1002 */
1003 static int
1004 iop_ofifo_init(struct iop_softc *sc)
1005 {
1006 bus_addr_t addr;
1007 bus_dma_segment_t seg;
1008 struct i2o_exec_outbound_init *mf;
1009 int i, rseg, rv;
1010 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1011
1012 sw = (u_int32_t *)sc->sc_scr;
1013
1014 mf = (struct i2o_exec_outbound_init *)mb;
1015 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1016 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1017 mf->msgictx = IOP_ICTX;
1018 mf->msgtctx = 0;
1019 mf->pagesize = PAGE_SIZE;
1020 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1021
1022 /*
1023 * The I2O spec says that there are two SGLs: one for the status
1024 * word, and one for a list of discarded MFAs. It continues to say
1025 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1026 * necessary; this isn't the case (and is in fact a bad thing).
1027 */
1028 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1029 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1030 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1031 (u_int32_t)sc->sc_scr_seg->ds_addr;
1032 mb[0] += 2 << 16;
1033
1034 *sw = 0;
1035 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1036 BUS_DMASYNC_PREREAD);
1037
1038 if ((rv = iop_post(sc, mb)) != 0)
1039 return (rv);
1040
1041 POLL(5000,
1042 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1043 BUS_DMASYNC_POSTREAD),
1044 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1045
1046 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1047 printf("%s: outbound FIFO init failed (%d)\n",
1048 sc->sc_dv.dv_xname, le32toh(*sw));
1049 return (EIO);
1050 }
1051
1052 /* Allocate DMA safe memory for the reply frames. */
1053 if (sc->sc_rep_phys == 0) {
1054 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1055
1056 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1057 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1058 if (rv != 0) {
1059 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1060 rv);
1061 return (rv);
1062 }
1063
1064 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1065 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1066 if (rv != 0) {
1067 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1068 return (rv);
1069 }
1070
1071 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1072 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1073 if (rv != 0) {
1074 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1075 rv);
1076 return (rv);
1077 }
1078
1079 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1080 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1081 if (rv != 0) {
1082 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1083 return (rv);
1084 }
1085
1086 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1087 }
1088
1089 /* Populate the outbound FIFO. */
1090 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1091 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1092 addr += sc->sc_framesize;
1093 }
1094
1095 return (0);
1096 }
1097
1098 /*
1099 * Read the specified number of bytes from the IOP's hardware resource table.
1100 */
1101 static int
1102 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1103 {
1104 struct iop_msg *im;
1105 int rv;
1106 struct i2o_exec_hrt_get *mf;
1107 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1108
1109 im = iop_msg_alloc(sc, IM_WAIT);
1110 mf = (struct i2o_exec_hrt_get *)mb;
1111 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1112 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1113 mf->msgictx = IOP_ICTX;
1114 mf->msgtctx = im->im_tctx;
1115
1116 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1117 rv = iop_msg_post(sc, im, mb, 30000);
1118 iop_msg_unmap(sc, im);
1119 iop_msg_free(sc, im);
1120 return (rv);
1121 }
1122
1123 /*
1124 * Read the IOP's hardware resource table.
1125 */
1126 static int
1127 iop_hrt_get(struct iop_softc *sc)
1128 {
1129 struct i2o_hrt hrthdr, *hrt;
1130 int size, rv;
1131
1132 PHOLD(curlwp);
1133 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1134 PRELE(curlwp);
1135 if (rv != 0)
1136 return (rv);
1137
1138 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1139 le16toh(hrthdr.numentries)));
1140
1141 size = sizeof(struct i2o_hrt) +
1142 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1143 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1144
1145 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1146 free(hrt, M_DEVBUF);
1147 return (rv);
1148 }
1149
1150 if (sc->sc_hrt != NULL)
1151 free(sc->sc_hrt, M_DEVBUF);
1152 sc->sc_hrt = hrt;
1153 return (0);
1154 }
1155
1156 /*
1157 * Request the specified number of bytes from the IOP's logical
1158 * configuration table. If a change indicator is specified, this
1159 * is a verbatim notification request, so the caller is prepared
1160 * to wait indefinitely.
1161 */
1162 static int
1163 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1164 u_int32_t chgind)
1165 {
1166 struct iop_msg *im;
1167 struct i2o_exec_lct_notify *mf;
1168 int rv;
1169 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1170
1171 im = iop_msg_alloc(sc, IM_WAIT);
1172 memset(lct, 0, size);
1173
1174 mf = (struct i2o_exec_lct_notify *)mb;
1175 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1176 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1177 mf->msgictx = IOP_ICTX;
1178 mf->msgtctx = im->im_tctx;
1179 mf->classid = I2O_CLASS_ANY;
1180 mf->changeindicator = chgind;
1181
1182 #ifdef I2ODEBUG
1183 printf("iop_lct_get0: reading LCT");
1184 if (chgind != 0)
1185 printf(" (async)");
1186 printf("\n");
1187 #endif
1188
1189 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1190 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1191 iop_msg_unmap(sc, im);
1192 iop_msg_free(sc, im);
1193 return (rv);
1194 }
1195
1196 /*
1197 * Read the IOP's logical configuration table.
1198 */
1199 int
1200 iop_lct_get(struct iop_softc *sc)
1201 {
1202 int esize, size, rv;
1203 struct i2o_lct *lct;
1204
1205 esize = le32toh(sc->sc_status.expectedlctsize);
1206 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1207 if (lct == NULL)
1208 return (ENOMEM);
1209
1210 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1211 free(lct, M_DEVBUF);
1212 return (rv);
1213 }
1214
1215 size = le16toh(lct->tablesize) << 2;
1216 if (esize != size) {
1217 free(lct, M_DEVBUF);
1218 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1219 if (lct == NULL)
1220 return (ENOMEM);
1221
1222 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1223 free(lct, M_DEVBUF);
1224 return (rv);
1225 }
1226 }
1227
1228 /* Swap in the new LCT. */
1229 if (sc->sc_lct != NULL)
1230 free(sc->sc_lct, M_DEVBUF);
1231 sc->sc_lct = lct;
1232 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1233 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1234 sizeof(struct i2o_lct_entry);
1235 return (0);
1236 }
1237
1238 /*
1239 * Request the specified parameter group from the target. If an initiator
1240 * is specified (a) don't wait for the operation to complete, but instead
1241 * let the initiator's interrupt handler deal with the reply and (b) place a
1242 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1243 */
1244 int
1245 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1246 int size, struct iop_initiator *ii)
1247 {
1248 struct iop_msg *im;
1249 struct i2o_util_params_op *mf;
1250 struct i2o_reply *rf;
1251 int rv;
1252 struct iop_pgop *pgop;
1253 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1254
1255 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1256 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1257 iop_msg_free(sc, im);
1258 return (ENOMEM);
1259 }
1260 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1261 iop_msg_free(sc, im);
1262 free(pgop, M_DEVBUF);
1263 return (ENOMEM);
1264 }
1265 im->im_dvcontext = pgop;
1266 im->im_rb = rf;
1267
1268 mf = (struct i2o_util_params_op *)mb;
1269 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1270 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1271 mf->msgictx = IOP_ICTX;
1272 mf->msgtctx = im->im_tctx;
1273 mf->flags = 0;
1274
1275 pgop->olh.count = htole16(1);
1276 pgop->olh.reserved = htole16(0);
1277 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1278 pgop->oat.fieldcount = htole16(0xffff);
1279 pgop->oat.group = htole16(group);
1280
1281 if (ii == NULL)
1282 PHOLD(curlwp);
1283
1284 memset(buf, 0, size);
1285 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1286 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1287 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1288
1289 if (ii == NULL)
1290 PRELE(curlwp);
1291
1292 /* Detect errors; let partial transfers to count as success. */
1293 if (ii == NULL && rv == 0) {
1294 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1295 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1296 rv = 0;
1297 else
1298 rv = (rf->reqstatus != 0 ? EIO : 0);
1299
1300 if (rv != 0)
1301 printf("%s: FIELD_GET failed for tid %d group %d\n",
1302 sc->sc_dv.dv_xname, tid, group);
1303 }
1304
1305 if (ii == NULL || rv != 0) {
1306 iop_msg_unmap(sc, im);
1307 iop_msg_free(sc, im);
1308 free(pgop, M_DEVBUF);
1309 free(rf, M_DEVBUF);
1310 }
1311
1312 return (rv);
1313 }
1314
1315 /*
1316 * Set a single field in a scalar parameter group.
1317 */
1318 int
1319 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1320 int size, int field)
1321 {
1322 struct iop_msg *im;
1323 struct i2o_util_params_op *mf;
1324 struct iop_pgop *pgop;
1325 int rv, totsize;
1326 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1327
1328 totsize = sizeof(*pgop) + size;
1329
1330 im = iop_msg_alloc(sc, IM_WAIT);
1331 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1332 iop_msg_free(sc, im);
1333 return (ENOMEM);
1334 }
1335
1336 mf = (struct i2o_util_params_op *)mb;
1337 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1338 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1339 mf->msgictx = IOP_ICTX;
1340 mf->msgtctx = im->im_tctx;
1341 mf->flags = 0;
1342
1343 pgop->olh.count = htole16(1);
1344 pgop->olh.reserved = htole16(0);
1345 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1346 pgop->oat.fieldcount = htole16(1);
1347 pgop->oat.group = htole16(group);
1348 pgop->oat.fields[0] = htole16(field);
1349 memcpy(pgop + 1, buf, size);
1350
1351 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1352 rv = iop_msg_post(sc, im, mb, 30000);
1353 if (rv != 0)
1354 printf("%s: FIELD_SET failed for tid %d group %d\n",
1355 sc->sc_dv.dv_xname, tid, group);
1356
1357 iop_msg_unmap(sc, im);
1358 iop_msg_free(sc, im);
1359 free(pgop, M_DEVBUF);
1360 return (rv);
1361 }
1362
1363 /*
1364 * Delete all rows in a tablular parameter group.
1365 */
1366 int
1367 iop_table_clear(struct iop_softc *sc, int tid, int group)
1368 {
1369 struct iop_msg *im;
1370 struct i2o_util_params_op *mf;
1371 struct iop_pgop pgop;
1372 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1373 int rv;
1374
1375 im = iop_msg_alloc(sc, IM_WAIT);
1376
1377 mf = (struct i2o_util_params_op *)mb;
1378 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1379 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1380 mf->msgictx = IOP_ICTX;
1381 mf->msgtctx = im->im_tctx;
1382 mf->flags = 0;
1383
1384 pgop.olh.count = htole16(1);
1385 pgop.olh.reserved = htole16(0);
1386 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1387 pgop.oat.fieldcount = htole16(0);
1388 pgop.oat.group = htole16(group);
1389 pgop.oat.fields[0] = htole16(0);
1390
1391 PHOLD(curlwp);
1392 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1393 rv = iop_msg_post(sc, im, mb, 30000);
1394 if (rv != 0)
1395 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1396 sc->sc_dv.dv_xname, tid, group);
1397
1398 iop_msg_unmap(sc, im);
1399 PRELE(curlwp);
1400 iop_msg_free(sc, im);
1401 return (rv);
1402 }
1403
1404 /*
1405 * Add a single row to a tabular parameter group. The row can have only one
1406 * field.
1407 */
1408 int
1409 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1410 int size, int row)
1411 {
1412 struct iop_msg *im;
1413 struct i2o_util_params_op *mf;
1414 struct iop_pgop *pgop;
1415 int rv, totsize;
1416 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1417
1418 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1419
1420 im = iop_msg_alloc(sc, IM_WAIT);
1421 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1422 iop_msg_free(sc, im);
1423 return (ENOMEM);
1424 }
1425
1426 mf = (struct i2o_util_params_op *)mb;
1427 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1428 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1429 mf->msgictx = IOP_ICTX;
1430 mf->msgtctx = im->im_tctx;
1431 mf->flags = 0;
1432
1433 pgop->olh.count = htole16(1);
1434 pgop->olh.reserved = htole16(0);
1435 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1436 pgop->oat.fieldcount = htole16(1);
1437 pgop->oat.group = htole16(group);
1438 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1439 pgop->oat.fields[1] = htole16(1); /* RowCount */
1440 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1441 memcpy(&pgop->oat.fields[3], buf, size);
1442
1443 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1444 rv = iop_msg_post(sc, im, mb, 30000);
1445 if (rv != 0)
1446 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1447 sc->sc_dv.dv_xname, tid, group, row);
1448
1449 iop_msg_unmap(sc, im);
1450 iop_msg_free(sc, im);
1451 free(pgop, M_DEVBUF);
1452 return (rv);
1453 }
1454
1455 /*
1456 * Execute a simple command (no parameters).
1457 */
1458 int
1459 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1460 int async, int timo)
1461 {
1462 struct iop_msg *im;
1463 struct i2o_msg mf;
1464 int rv, fl;
1465
1466 fl = (async != 0 ? IM_WAIT : IM_POLL);
1467 im = iop_msg_alloc(sc, fl);
1468
1469 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1470 mf.msgfunc = I2O_MSGFUNC(tid, function);
1471 mf.msgictx = ictx;
1472 mf.msgtctx = im->im_tctx;
1473
1474 rv = iop_msg_post(sc, im, &mf, timo);
1475 iop_msg_free(sc, im);
1476 return (rv);
1477 }
1478
1479 /*
1480 * Post the system table to the IOP.
1481 */
1482 static int
1483 iop_systab_set(struct iop_softc *sc)
1484 {
1485 struct i2o_exec_sys_tab_set *mf;
1486 struct iop_msg *im;
1487 bus_space_handle_t bsh;
1488 bus_addr_t boo;
1489 u_int32_t mema[2], ioa[2];
1490 int rv;
1491 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1492
1493 im = iop_msg_alloc(sc, IM_WAIT);
1494
1495 mf = (struct i2o_exec_sys_tab_set *)mb;
1496 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1497 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1498 mf->msgictx = IOP_ICTX;
1499 mf->msgtctx = im->im_tctx;
1500 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1501 mf->segnumber = 0;
1502
1503 mema[1] = sc->sc_status.desiredprivmemsize;
1504 ioa[1] = sc->sc_status.desiredpriviosize;
1505
1506 if (mema[1] != 0) {
1507 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1508 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1509 mema[0] = htole32(boo);
1510 if (rv != 0) {
1511 printf("%s: can't alloc priv mem space, err = %d\n",
1512 sc->sc_dv.dv_xname, rv);
1513 mema[0] = 0;
1514 mema[1] = 0;
1515 }
1516 }
1517
1518 if (ioa[1] != 0) {
1519 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1520 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1521 ioa[0] = htole32(boo);
1522 if (rv != 0) {
1523 printf("%s: can't alloc priv i/o space, err = %d\n",
1524 sc->sc_dv.dv_xname, rv);
1525 ioa[0] = 0;
1526 ioa[1] = 0;
1527 }
1528 }
1529
1530 PHOLD(curlwp);
1531 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1532 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1533 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1534 rv = iop_msg_post(sc, im, mb, 5000);
1535 iop_msg_unmap(sc, im);
1536 iop_msg_free(sc, im);
1537 PRELE(curlwp);
1538 return (rv);
1539 }
1540
1541 /*
1542 * Reset the IOP. Must be called with interrupts disabled.
1543 */
1544 static int
1545 iop_reset(struct iop_softc *sc)
1546 {
1547 u_int32_t mfa, *sw;
1548 struct i2o_exec_iop_reset mf;
1549 int rv;
1550 paddr_t pa;
1551
1552 sw = (u_int32_t *)sc->sc_scr;
1553 pa = sc->sc_scr_seg->ds_addr;
1554
1555 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1556 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1557 mf.reserved[0] = 0;
1558 mf.reserved[1] = 0;
1559 mf.reserved[2] = 0;
1560 mf.reserved[3] = 0;
1561 mf.statuslow = (u_int32_t)pa;
1562 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1563
1564 *sw = htole32(0);
1565 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1566 BUS_DMASYNC_PREREAD);
1567
1568 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1569 return (rv);
1570
1571 POLL(2500,
1572 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1573 BUS_DMASYNC_POSTREAD), *sw != 0));
1574 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1575 printf("%s: reset rejected, status 0x%x\n",
1576 sc->sc_dv.dv_xname, le32toh(*sw));
1577 return (EIO);
1578 }
1579
1580 /*
1581 * IOP is now in the INIT state. Wait no more than 10 seconds for
1582 * the inbound queue to become responsive.
1583 */
1584 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1585 if (mfa == IOP_MFA_EMPTY) {
1586 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1587 return (EIO);
1588 }
1589
1590 iop_release_mfa(sc, mfa);
1591 return (0);
1592 }
1593
1594 /*
1595 * Register a new initiator. Must be called with the configuration lock
1596 * held.
1597 */
1598 void
1599 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1600 {
1601 static int ictxgen;
1602 int s;
1603
1604 /* 0 is reserved (by us) for system messages. */
1605 ii->ii_ictx = ++ictxgen;
1606
1607 /*
1608 * `Utility initiators' don't make it onto the per-IOP initiator list
1609 * (which is used only for configuration), but do get one slot on
1610 * the inbound queue.
1611 */
1612 if ((ii->ii_flags & II_UTILITY) == 0) {
1613 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1614 sc->sc_nii++;
1615 } else
1616 sc->sc_nuii++;
1617
1618 s = splbio();
1619 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1620 splx(s);
1621 }
1622
1623 /*
1624 * Unregister an initiator. Must be called with the configuration lock
1625 * held.
1626 */
1627 void
1628 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1629 {
1630 int s;
1631
1632 if ((ii->ii_flags & II_UTILITY) == 0) {
1633 LIST_REMOVE(ii, ii_list);
1634 sc->sc_nii--;
1635 } else
1636 sc->sc_nuii--;
1637
1638 s = splbio();
1639 LIST_REMOVE(ii, ii_hash);
1640 splx(s);
1641 }
1642
1643 /*
1644 * Handle a reply frame from the IOP.
1645 */
1646 static int
1647 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1648 {
1649 struct iop_msg *im;
1650 struct i2o_reply *rb;
1651 struct i2o_fault_notify *fn;
1652 struct iop_initiator *ii;
1653 u_int off, ictx, tctx, status, size;
1654
1655 off = (int)(rmfa - sc->sc_rep_phys);
1656 rb = (struct i2o_reply *)(sc->sc_rep + off);
1657
1658 /* Perform reply queue DMA synchronisation. */
1659 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1660 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1661 if (--sc->sc_curib != 0)
1662 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1663 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1664
1665 #ifdef I2ODEBUG
1666 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1667 panic("iop_handle_reply: 64-bit reply");
1668 #endif
1669 /*
1670 * Find the initiator.
1671 */
1672 ictx = le32toh(rb->msgictx);
1673 if (ictx == IOP_ICTX)
1674 ii = NULL;
1675 else {
1676 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1677 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1678 if (ii->ii_ictx == ictx)
1679 break;
1680 if (ii == NULL) {
1681 #ifdef I2ODEBUG
1682 iop_reply_print(sc, rb);
1683 #endif
1684 printf("%s: WARNING: bad ictx returned (%x)\n",
1685 sc->sc_dv.dv_xname, ictx);
1686 return (-1);
1687 }
1688 }
1689
1690 /*
1691 * If we received a transport failure notice, we've got to dig the
1692 * transaction context (if any) out of the original message frame,
1693 * and then release the original MFA back to the inbound FIFO.
1694 */
1695 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1696 status = I2O_STATUS_SUCCESS;
1697
1698 fn = (struct i2o_fault_notify *)rb;
1699 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1700 iop_release_mfa(sc, fn->lowmfa);
1701 iop_tfn_print(sc, fn);
1702 } else {
1703 status = rb->reqstatus;
1704 tctx = le32toh(rb->msgtctx);
1705 }
1706
1707 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1708 /*
1709 * This initiator tracks state using message wrappers.
1710 *
1711 * Find the originating message wrapper, and if requested
1712 * notify the initiator.
1713 */
1714 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1715 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1716 (im->im_flags & IM_ALLOCED) == 0 ||
1717 tctx != im->im_tctx) {
1718 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1719 sc->sc_dv.dv_xname, tctx, im);
1720 if (im != NULL)
1721 printf("%s: flags=0x%08x tctx=0x%08x\n",
1722 sc->sc_dv.dv_xname, im->im_flags,
1723 im->im_tctx);
1724 #ifdef I2ODEBUG
1725 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1726 iop_reply_print(sc, rb);
1727 #endif
1728 return (-1);
1729 }
1730
1731 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1732 im->im_flags |= IM_FAIL;
1733
1734 #ifdef I2ODEBUG
1735 if ((im->im_flags & IM_REPLIED) != 0)
1736 panic("%s: dup reply", sc->sc_dv.dv_xname);
1737 #endif
1738 im->im_flags |= IM_REPLIED;
1739
1740 #ifdef I2ODEBUG
1741 if (status != I2O_STATUS_SUCCESS)
1742 iop_reply_print(sc, rb);
1743 #endif
1744 im->im_reqstatus = status;
1745
1746 /* Copy the reply frame, if requested. */
1747 if (im->im_rb != NULL) {
1748 size = (le32toh(rb->msgflags) >> 14) & ~3;
1749 #ifdef I2ODEBUG
1750 if (size > sc->sc_framesize)
1751 panic("iop_handle_reply: reply too large");
1752 #endif
1753 memcpy(im->im_rb, rb, size);
1754 }
1755
1756 /* Notify the initiator. */
1757 if ((im->im_flags & IM_WAIT) != 0)
1758 wakeup(im);
1759 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1760 (*ii->ii_intr)(ii->ii_dv, im, rb);
1761 } else {
1762 /*
1763 * This initiator discards message wrappers.
1764 *
1765 * Simply pass the reply frame to the initiator.
1766 */
1767 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1768 }
1769
1770 return (status);
1771 }
1772
1773 /*
1774 * Handle an interrupt from the IOP.
1775 */
1776 int
1777 iop_intr(void *arg)
1778 {
1779 struct iop_softc *sc;
1780 u_int32_t rmfa;
1781
1782 sc = arg;
1783
1784 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1785 return (0);
1786
1787 for (;;) {
1788 /* Double read to account for IOP bug. */
1789 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1790 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1791 if (rmfa == IOP_MFA_EMPTY)
1792 break;
1793 }
1794 iop_handle_reply(sc, rmfa);
1795 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1796 }
1797
1798 return (1);
1799 }
1800
1801 /*
1802 * Handle an event signalled by the executive.
1803 */
1804 static void
1805 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1806 {
1807 struct i2o_util_event_register_reply *rb;
1808 struct iop_softc *sc;
1809 u_int event;
1810
1811 sc = (struct iop_softc *)dv;
1812 rb = reply;
1813
1814 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1815 return;
1816
1817 event = le32toh(rb->event);
1818 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1819 }
1820
1821 /*
1822 * Allocate a message wrapper.
1823 */
1824 struct iop_msg *
1825 iop_msg_alloc(struct iop_softc *sc, int flags)
1826 {
1827 struct iop_msg *im;
1828 static u_int tctxgen;
1829 int s, i;
1830
1831 #ifdef I2ODEBUG
1832 if ((flags & IM_SYSMASK) != 0)
1833 panic("iop_msg_alloc: system flags specified");
1834 #endif
1835
1836 s = splbio();
1837 im = SLIST_FIRST(&sc->sc_im_freelist);
1838 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1839 if (im == NULL)
1840 panic("iop_msg_alloc: no free wrappers");
1841 #endif
1842 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1843 splx(s);
1844
1845 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1846 tctxgen += (1 << IOP_TCTX_SHIFT);
1847 im->im_flags = flags | IM_ALLOCED;
1848 im->im_rb = NULL;
1849 i = 0;
1850 do {
1851 im->im_xfer[i++].ix_size = 0;
1852 } while (i < IOP_MAX_MSG_XFERS);
1853
1854 return (im);
1855 }
1856
1857 /*
1858 * Free a message wrapper.
1859 */
1860 void
1861 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1862 {
1863 int s;
1864
1865 #ifdef I2ODEBUG
1866 if ((im->im_flags & IM_ALLOCED) == 0)
1867 panic("iop_msg_free: wrapper not allocated");
1868 #endif
1869
1870 im->im_flags = 0;
1871 s = splbio();
1872 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1873 splx(s);
1874 }
1875
1876 /*
1877 * Map a data transfer. Write a scatter-gather list into the message frame.
1878 */
1879 int
1880 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1881 void *xferaddr, int xfersize, int out, struct proc *up)
1882 {
1883 bus_dmamap_t dm;
1884 bus_dma_segment_t *ds;
1885 struct iop_xfer *ix;
1886 u_int rv, i, nsegs, flg, off, xn;
1887 u_int32_t *p;
1888
1889 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1890 if (ix->ix_size == 0)
1891 break;
1892
1893 #ifdef I2ODEBUG
1894 if (xfersize == 0)
1895 panic("iop_msg_map: null transfer");
1896 if (xfersize > IOP_MAX_XFER)
1897 panic("iop_msg_map: transfer too large");
1898 if (xn == IOP_MAX_MSG_XFERS)
1899 panic("iop_msg_map: too many xfers");
1900 #endif
1901
1902 /*
1903 * Only the first DMA map is static.
1904 */
1905 if (xn != 0) {
1906 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1907 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1908 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1909 if (rv != 0)
1910 return (rv);
1911 }
1912
1913 dm = ix->ix_map;
1914 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1915 (up == NULL ? BUS_DMA_NOWAIT : 0));
1916 if (rv != 0)
1917 goto bad;
1918
1919 /*
1920 * How many SIMPLE SG elements can we fit in this message?
1921 */
1922 off = mb[0] >> 16;
1923 p = mb + off;
1924 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1925
1926 if (dm->dm_nsegs > nsegs) {
1927 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1928 rv = EFBIG;
1929 DPRINTF(("iop_msg_map: too many segs\n"));
1930 goto bad;
1931 }
1932
1933 nsegs = dm->dm_nsegs;
1934 xfersize = 0;
1935
1936 /*
1937 * Write out the SG list.
1938 */
1939 if (out)
1940 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1941 else
1942 flg = I2O_SGL_SIMPLE;
1943
1944 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1945 p[0] = (u_int32_t)ds->ds_len | flg;
1946 p[1] = (u_int32_t)ds->ds_addr;
1947 xfersize += ds->ds_len;
1948 }
1949
1950 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1951 p[1] = (u_int32_t)ds->ds_addr;
1952 xfersize += ds->ds_len;
1953
1954 /* Fix up the transfer record, and sync the map. */
1955 ix->ix_flags = (out ? IX_OUT : IX_IN);
1956 ix->ix_size = xfersize;
1957 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1958 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1959
1960 /*
1961 * If this is the first xfer we've mapped for this message, adjust
1962 * the SGL offset field in the message header.
1963 */
1964 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1965 mb[0] += (mb[0] >> 12) & 0xf0;
1966 im->im_flags |= IM_SGLOFFADJ;
1967 }
1968 mb[0] += (nsegs << 17);
1969 return (0);
1970
1971 bad:
1972 if (xn != 0)
1973 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1974 return (rv);
1975 }
1976
1977 /*
1978 * Map a block I/O data transfer (different in that there's only one per
1979 * message maximum, and PAGE addressing may be used). Write a scatter
1980 * gather list into the message frame.
1981 */
1982 int
1983 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1984 void *xferaddr, int xfersize, int out)
1985 {
1986 bus_dma_segment_t *ds;
1987 bus_dmamap_t dm;
1988 struct iop_xfer *ix;
1989 u_int rv, i, nsegs, off, slen, tlen, flg;
1990 paddr_t saddr, eaddr;
1991 u_int32_t *p;
1992
1993 #ifdef I2ODEBUG
1994 if (xfersize == 0)
1995 panic("iop_msg_map_bio: null transfer");
1996 if (xfersize > IOP_MAX_XFER)
1997 panic("iop_msg_map_bio: transfer too large");
1998 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1999 panic("iop_msg_map_bio: SGLOFFADJ");
2000 #endif
2001
2002 ix = im->im_xfer;
2003 dm = ix->ix_map;
2004 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2005 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2006 if (rv != 0)
2007 return (rv);
2008
2009 off = mb[0] >> 16;
2010 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2011
2012 /*
2013 * If the transfer is highly fragmented and won't fit using SIMPLE
2014 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2015 * potentially more efficient, both for us and the IOP.
2016 */
2017 if (dm->dm_nsegs > nsegs) {
2018 nsegs = 1;
2019 p = mb + off + 1;
2020
2021 /* XXX This should be done with a bus_space flag. */
2022 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2023 slen = ds->ds_len;
2024 saddr = ds->ds_addr;
2025
2026 while (slen > 0) {
2027 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2028 tlen = min(eaddr - saddr, slen);
2029 slen -= tlen;
2030 *p++ = le32toh(saddr);
2031 saddr = eaddr;
2032 nsegs++;
2033 }
2034 }
2035
2036 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2037 I2O_SGL_END;
2038 if (out)
2039 mb[off] |= I2O_SGL_DATA_OUT;
2040 } else {
2041 p = mb + off;
2042 nsegs = dm->dm_nsegs;
2043
2044 if (out)
2045 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2046 else
2047 flg = I2O_SGL_SIMPLE;
2048
2049 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2050 p[0] = (u_int32_t)ds->ds_len | flg;
2051 p[1] = (u_int32_t)ds->ds_addr;
2052 }
2053
2054 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2055 I2O_SGL_END;
2056 p[1] = (u_int32_t)ds->ds_addr;
2057 nsegs <<= 1;
2058 }
2059
2060 /* Fix up the transfer record, and sync the map. */
2061 ix->ix_flags = (out ? IX_OUT : IX_IN);
2062 ix->ix_size = xfersize;
2063 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2064 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2065
2066 /*
2067 * Adjust the SGL offset and total message size fields. We don't
2068 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2069 */
2070 mb[0] += ((off << 4) + (nsegs << 16));
2071 return (0);
2072 }
2073
2074 /*
2075 * Unmap all data transfers associated with a message wrapper.
2076 */
2077 void
2078 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2079 {
2080 struct iop_xfer *ix;
2081 int i;
2082
2083 #ifdef I2ODEBUG
2084 if (im->im_xfer[0].ix_size == 0)
2085 panic("iop_msg_unmap: no transfers mapped");
2086 #endif
2087
2088 for (ix = im->im_xfer, i = 0;;) {
2089 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2090 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2091 BUS_DMASYNC_POSTREAD);
2092 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2093
2094 /* Only the first DMA map is static. */
2095 if (i != 0)
2096 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2097 if ((++ix)->ix_size == 0)
2098 break;
2099 if (++i >= IOP_MAX_MSG_XFERS)
2100 break;
2101 }
2102 }
2103
2104 /*
2105 * Post a message frame to the IOP's inbound queue.
2106 */
2107 int
2108 iop_post(struct iop_softc *sc, u_int32_t *mb)
2109 {
2110 u_int32_t mfa;
2111 int s;
2112
2113 #ifdef I2ODEBUG
2114 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2115 panic("iop_post: frame too large");
2116 #endif
2117
2118 s = splbio();
2119
2120 /* Allocate a slot with the IOP. */
2121 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2122 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2123 splx(s);
2124 printf("%s: mfa not forthcoming\n",
2125 sc->sc_dv.dv_xname);
2126 return (EAGAIN);
2127 }
2128
2129 /* Perform reply buffer DMA synchronisation. */
2130 if (sc->sc_curib++ == 0)
2131 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2132 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2133
2134 /* Copy out the message frame. */
2135 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2136 mb[0] >> 16);
2137 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2138 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2139
2140 /* Post the MFA back to the IOP. */
2141 iop_outl(sc, IOP_REG_IFIFO, mfa);
2142
2143 splx(s);
2144 return (0);
2145 }
2146
2147 /*
2148 * Post a message to the IOP and deal with completion.
2149 */
2150 int
2151 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2152 {
2153 u_int32_t *mb;
2154 int rv, s;
2155
2156 mb = xmb;
2157
2158 /* Terminate the scatter/gather list chain. */
2159 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2160 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2161
2162 if ((rv = iop_post(sc, mb)) != 0)
2163 return (rv);
2164
2165 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2166 if ((im->im_flags & IM_POLL) != 0)
2167 iop_msg_poll(sc, im, timo);
2168 else
2169 iop_msg_wait(sc, im, timo);
2170
2171 s = splbio();
2172 if ((im->im_flags & IM_REPLIED) != 0) {
2173 if ((im->im_flags & IM_NOSTATUS) != 0)
2174 rv = 0;
2175 else if ((im->im_flags & IM_FAIL) != 0)
2176 rv = ENXIO;
2177 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2178 rv = EIO;
2179 else
2180 rv = 0;
2181 } else
2182 rv = EBUSY;
2183 splx(s);
2184 } else
2185 rv = 0;
2186
2187 return (rv);
2188 }
2189
2190 /*
2191 * Spin until the specified message is replied to.
2192 */
2193 static void
2194 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2195 {
2196 u_int32_t rmfa;
2197 int s, status;
2198
2199 s = splbio();
2200
2201 /* Wait for completion. */
2202 for (timo *= 10; timo != 0; timo--) {
2203 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2204 /* Double read to account for IOP bug. */
2205 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2206 if (rmfa == IOP_MFA_EMPTY)
2207 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2208 if (rmfa != IOP_MFA_EMPTY) {
2209 status = iop_handle_reply(sc, rmfa);
2210
2211 /*
2212 * Return the reply frame to the IOP's
2213 * outbound FIFO.
2214 */
2215 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2216 }
2217 }
2218 if ((im->im_flags & IM_REPLIED) != 0)
2219 break;
2220 DELAY(100);
2221 }
2222
2223 if (timo == 0) {
2224 #ifdef I2ODEBUG
2225 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2226 if (iop_status_get(sc, 1) != 0)
2227 printf("iop_msg_poll: unable to retrieve status\n");
2228 else
2229 printf("iop_msg_poll: IOP state = %d\n",
2230 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2231 #endif
2232 }
2233
2234 splx(s);
2235 }
2236
2237 /*
2238 * Sleep until the specified message is replied to.
2239 */
2240 static void
2241 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2242 {
2243 int s, rv;
2244
2245 s = splbio();
2246 if ((im->im_flags & IM_REPLIED) != 0) {
2247 splx(s);
2248 return;
2249 }
2250 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2251 splx(s);
2252
2253 #ifdef I2ODEBUG
2254 if (rv != 0) {
2255 printf("iop_msg_wait: tsleep() == %d\n", rv);
2256 if (iop_status_get(sc, 0) != 0)
2257 printf("iop_msg_wait: unable to retrieve status\n");
2258 else
2259 printf("iop_msg_wait: IOP state = %d\n",
2260 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2261 }
2262 #endif
2263 }
2264
2265 /*
2266 * Release an unused message frame back to the IOP's inbound fifo.
2267 */
2268 static void
2269 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2270 {
2271
2272 /* Use the frame to issue a no-op. */
2273 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2274 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2275 iop_outl_msg(sc, mfa + 8, 0);
2276 iop_outl_msg(sc, mfa + 12, 0);
2277
2278 iop_outl(sc, IOP_REG_IFIFO, mfa);
2279 }
2280
2281 #ifdef I2ODEBUG
2282 /*
2283 * Dump a reply frame header.
2284 */
2285 static void
2286 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2287 {
2288 u_int function, detail;
2289 #ifdef I2OVERBOSE
2290 const char *statusstr;
2291 #endif
2292
2293 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2294 detail = le16toh(rb->detail);
2295
2296 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2297
2298 #ifdef I2OVERBOSE
2299 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2300 statusstr = iop_status[rb->reqstatus];
2301 else
2302 statusstr = "undefined error code";
2303
2304 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2305 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2306 #else
2307 printf("%s: function=0x%02x status=0x%02x\n",
2308 sc->sc_dv.dv_xname, function, rb->reqstatus);
2309 #endif
2310 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2311 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2312 le32toh(rb->msgtctx));
2313 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2314 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2315 (le32toh(rb->msgflags) >> 8) & 0xff);
2316 }
2317 #endif
2318
2319 /*
2320 * Dump a transport failure reply.
2321 */
2322 static void
2323 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2324 {
2325
2326 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2327
2328 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2329 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2330 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2331 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2332 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2333 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2334 }
2335
2336 /*
2337 * Translate an I2O ASCII field into a C string.
2338 */
2339 void
2340 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2341 {
2342 int hc, lc, i, nit;
2343
2344 dlen--;
2345 lc = 0;
2346 hc = 0;
2347 i = 0;
2348
2349 /*
2350 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2351 * spec has nothing to say about it. Since AMI fields are usually
2352 * filled with junk after the terminator, ...
2353 */
2354 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2355
2356 while (slen-- != 0 && dlen-- != 0) {
2357 if (nit && *src == '\0')
2358 break;
2359 else if (*src <= 0x20 || *src >= 0x7f) {
2360 if (hc)
2361 dst[i++] = ' ';
2362 } else {
2363 hc = 1;
2364 dst[i++] = *src;
2365 lc = i;
2366 }
2367 src++;
2368 }
2369
2370 dst[lc] = '\0';
2371 }
2372
2373 /*
2374 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2375 */
2376 int
2377 iop_print_ident(struct iop_softc *sc, int tid)
2378 {
2379 struct {
2380 struct i2o_param_op_results pr;
2381 struct i2o_param_read_results prr;
2382 struct i2o_param_device_identity di;
2383 } __attribute__ ((__packed__)) p;
2384 char buf[32];
2385 int rv;
2386
2387 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2388 sizeof(p), NULL);
2389 if (rv != 0)
2390 return (rv);
2391
2392 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2393 sizeof(buf));
2394 printf(" <%s, ", buf);
2395 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2396 sizeof(buf));
2397 printf("%s, ", buf);
2398 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2399 printf("%s>", buf);
2400
2401 return (0);
2402 }
2403
2404 /*
2405 * Claim or unclaim the specified TID.
2406 */
2407 int
2408 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2409 int flags)
2410 {
2411 struct iop_msg *im;
2412 struct i2o_util_claim mf;
2413 int rv, func;
2414
2415 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2416 im = iop_msg_alloc(sc, IM_WAIT);
2417
2418 /* We can use the same structure, as they're identical. */
2419 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2420 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2421 mf.msgictx = ii->ii_ictx;
2422 mf.msgtctx = im->im_tctx;
2423 mf.flags = flags;
2424
2425 rv = iop_msg_post(sc, im, &mf, 5000);
2426 iop_msg_free(sc, im);
2427 return (rv);
2428 }
2429
2430 /*
2431 * Perform an abort.
2432 */
2433 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2434 int tctxabort, int flags)
2435 {
2436 struct iop_msg *im;
2437 struct i2o_util_abort mf;
2438 int rv;
2439
2440 im = iop_msg_alloc(sc, IM_WAIT);
2441
2442 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2443 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2444 mf.msgictx = ii->ii_ictx;
2445 mf.msgtctx = im->im_tctx;
2446 mf.flags = (func << 24) | flags;
2447 mf.tctxabort = tctxabort;
2448
2449 rv = iop_msg_post(sc, im, &mf, 5000);
2450 iop_msg_free(sc, im);
2451 return (rv);
2452 }
2453
2454 /*
2455 * Enable or disable reception of events for the specified device.
2456 */
2457 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2458 {
2459 struct i2o_util_event_register mf;
2460
2461 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2462 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2463 mf.msgictx = ii->ii_ictx;
2464 mf.msgtctx = 0;
2465 mf.eventmask = mask;
2466
2467 /* This message is replied to only when events are signalled. */
2468 return (iop_post(sc, (u_int32_t *)&mf));
2469 }
2470
2471 int
2472 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2473 {
2474 struct iop_softc *sc;
2475
2476 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2477 return (ENXIO);
2478 if ((sc->sc_flags & IOP_ONLINE) == 0)
2479 return (ENXIO);
2480 if ((sc->sc_flags & IOP_OPEN) != 0)
2481 return (EBUSY);
2482 sc->sc_flags |= IOP_OPEN;
2483
2484 return (0);
2485 }
2486
2487 int
2488 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2489 {
2490 struct iop_softc *sc;
2491
2492 sc = device_lookup(&iop_cd, minor(dev));
2493 sc->sc_flags &= ~IOP_OPEN;
2494
2495 return (0);
2496 }
2497
2498 int
2499 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2500 {
2501 struct iop_softc *sc;
2502 struct iovec *iov;
2503 int rv, i;
2504
2505 if (securelevel >= 2)
2506 return (EPERM);
2507
2508 sc = device_lookup(&iop_cd, minor(dev));
2509
2510 switch (cmd) {
2511 case IOPIOCPT:
2512 return (iop_passthrough(sc, (struct ioppt *)data, p));
2513
2514 case IOPIOCGSTATUS:
2515 iov = (struct iovec *)data;
2516 i = sizeof(struct i2o_status);
2517 if (i > iov->iov_len)
2518 i = iov->iov_len;
2519 else
2520 iov->iov_len = i;
2521 if ((rv = iop_status_get(sc, 0)) == 0)
2522 rv = copyout(&sc->sc_status, iov->iov_base, i);
2523 return (rv);
2524
2525 case IOPIOCGLCT:
2526 case IOPIOCGTIDMAP:
2527 case IOPIOCRECONFIG:
2528 break;
2529
2530 default:
2531 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2532 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2533 #endif
2534 return (ENOTTY);
2535 }
2536
2537 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2538 return (rv);
2539
2540 switch (cmd) {
2541 case IOPIOCGLCT:
2542 iov = (struct iovec *)data;
2543 i = le16toh(sc->sc_lct->tablesize) << 2;
2544 if (i > iov->iov_len)
2545 i = iov->iov_len;
2546 else
2547 iov->iov_len = i;
2548 rv = copyout(sc->sc_lct, iov->iov_base, i);
2549 break;
2550
2551 case IOPIOCRECONFIG:
2552 rv = iop_reconfigure(sc, 0);
2553 break;
2554
2555 case IOPIOCGTIDMAP:
2556 iov = (struct iovec *)data;
2557 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2558 if (i > iov->iov_len)
2559 i = iov->iov_len;
2560 else
2561 iov->iov_len = i;
2562 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2563 break;
2564 }
2565
2566 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2567 return (rv);
2568 }
2569
2570 static int
2571 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2572 {
2573 struct iop_msg *im;
2574 struct i2o_msg *mf;
2575 struct ioppt_buf *ptb;
2576 int rv, i, mapped;
2577
2578 mf = NULL;
2579 im = NULL;
2580 mapped = 1;
2581
2582 if (pt->pt_msglen > sc->sc_framesize ||
2583 pt->pt_msglen < sizeof(struct i2o_msg) ||
2584 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2585 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2586 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2587 return (EINVAL);
2588
2589 for (i = 0; i < pt->pt_nbufs; i++)
2590 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2591 rv = ENOMEM;
2592 goto bad;
2593 }
2594
2595 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2596 if (mf == NULL)
2597 return (ENOMEM);
2598
2599 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2600 goto bad;
2601
2602 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2603 im->im_rb = (struct i2o_reply *)mf;
2604 mf->msgictx = IOP_ICTX;
2605 mf->msgtctx = im->im_tctx;
2606
2607 for (i = 0; i < pt->pt_nbufs; i++) {
2608 ptb = &pt->pt_bufs[i];
2609 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2610 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2611 if (rv != 0)
2612 goto bad;
2613 mapped = 1;
2614 }
2615
2616 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2617 goto bad;
2618
2619 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2620 if (i > sc->sc_framesize)
2621 i = sc->sc_framesize;
2622 if (i > pt->pt_replylen)
2623 i = pt->pt_replylen;
2624 rv = copyout(im->im_rb, pt->pt_reply, i);
2625
2626 bad:
2627 if (mapped != 0)
2628 iop_msg_unmap(sc, im);
2629 if (im != NULL)
2630 iop_msg_free(sc, im);
2631 if (mf != NULL)
2632 free(mf, M_DEVBUF);
2633 return (rv);
2634 }
2635