iop.c revision 1.32 1 /* $NetBSD: iop.c,v 1.32 2002/12/11 13:02:31 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.32 2002/12/11 13:02:31 ad Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 dev_type_open(iopopen);
111 dev_type_close(iopclose);
112 dev_type_ioctl(iopioctl);
113
114 const struct cdevsw iop_cdevsw = {
115 iopopen, iopclose, noread, nowrite, iopioctl,
116 nostop, notty, nopoll, nommap, nokqfilter,
117 };
118
119 #define IC_CONFIGURE 0x01
120 #define IC_PRIORITY 0x02
121
122 struct iop_class {
123 u_short ic_class;
124 u_short ic_flags;
125 #ifdef I2OVERBOSE
126 const char *ic_caption;
127 #endif
128 } static const iop_class[] = {
129 {
130 I2O_CLASS_EXECUTIVE,
131 0,
132 IFVERBOSE("executive")
133 },
134 {
135 I2O_CLASS_DDM,
136 0,
137 COMMENT("device driver module")
138 },
139 {
140 I2O_CLASS_RANDOM_BLOCK_STORAGE,
141 IC_CONFIGURE | IC_PRIORITY,
142 IFVERBOSE("random block storage")
143 },
144 {
145 I2O_CLASS_SEQUENTIAL_STORAGE,
146 IC_CONFIGURE | IC_PRIORITY,
147 IFVERBOSE("sequential storage")
148 },
149 {
150 I2O_CLASS_LAN,
151 IC_CONFIGURE | IC_PRIORITY,
152 IFVERBOSE("LAN port")
153 },
154 {
155 I2O_CLASS_WAN,
156 IC_CONFIGURE | IC_PRIORITY,
157 IFVERBOSE("WAN port")
158 },
159 {
160 I2O_CLASS_FIBRE_CHANNEL_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("fibrechannel port")
163 },
164 {
165 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
166 0,
167 COMMENT("fibrechannel peripheral")
168 },
169 {
170 I2O_CLASS_SCSI_PERIPHERAL,
171 0,
172 COMMENT("SCSI peripheral")
173 },
174 {
175 I2O_CLASS_ATE_PORT,
176 IC_CONFIGURE,
177 IFVERBOSE("ATE port")
178 },
179 {
180 I2O_CLASS_ATE_PERIPHERAL,
181 0,
182 COMMENT("ATE peripheral")
183 },
184 {
185 I2O_CLASS_FLOPPY_CONTROLLER,
186 IC_CONFIGURE,
187 IFVERBOSE("floppy controller")
188 },
189 {
190 I2O_CLASS_FLOPPY_DEVICE,
191 0,
192 COMMENT("floppy device")
193 },
194 {
195 I2O_CLASS_BUS_ADAPTER_PORT,
196 IC_CONFIGURE,
197 IFVERBOSE("bus adapter port" )
198 },
199 };
200
201 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
202 static const char * const iop_status[] = {
203 "success",
204 "abort (dirty)",
205 "abort (no data transfer)",
206 "abort (partial transfer)",
207 "error (dirty)",
208 "error (no data transfer)",
209 "error (partial transfer)",
210 "undefined error code",
211 "process abort (dirty)",
212 "process abort (no data transfer)",
213 "process abort (partial transfer)",
214 "transaction error",
215 };
216 #endif
217
218 static inline u_int32_t iop_inl(struct iop_softc *, int);
219 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
220
221 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
222 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
223
224 static void iop_config_interrupts(struct device *);
225 static void iop_configure_devices(struct iop_softc *, int, int);
226 static void iop_devinfo(int, char *);
227 static int iop_print(void *, const char *);
228 static void iop_shutdown(void *);
229 static int iop_submatch(struct device *, struct cfdata *, void *);
230
231 static void iop_adjqparam(struct iop_softc *, int);
232 static void iop_create_reconf_thread(void *);
233 static int iop_handle_reply(struct iop_softc *, u_int32_t);
234 static int iop_hrt_get(struct iop_softc *);
235 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
236 static void iop_intr_event(struct device *, struct iop_msg *, void *);
237 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
238 u_int32_t);
239 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
240 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
241 static int iop_ofifo_init(struct iop_softc *);
242 static int iop_passthrough(struct iop_softc *, struct ioppt *,
243 struct proc *);
244 static void iop_reconf_thread(void *);
245 static void iop_release_mfa(struct iop_softc *, u_int32_t);
246 static int iop_reset(struct iop_softc *);
247 static int iop_systab_set(struct iop_softc *);
248 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
249
250 #ifdef I2ODEBUG
251 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
252 #endif
253
254 static inline u_int32_t
255 iop_inl(struct iop_softc *sc, int off)
256 {
257
258 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
259 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
260 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
261 }
262
263 static inline void
264 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
265 {
266
267 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
268 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
269 BUS_SPACE_BARRIER_WRITE);
270 }
271
272 static inline u_int32_t
273 iop_inl_msg(struct iop_softc *sc, int off)
274 {
275
276 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
277 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
278 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
279 }
280
281 static inline void
282 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
283 {
284
285 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
286 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
287 BUS_SPACE_BARRIER_WRITE);
288 }
289
290 /*
291 * Initialise the IOP and our interface.
292 */
293 void
294 iop_init(struct iop_softc *sc, const char *intrstr)
295 {
296 struct iop_msg *im;
297 int rv, i, j, state, nsegs;
298 u_int32_t mask;
299 char ident[64];
300
301 state = 0;
302
303 printf("I2O adapter");
304
305 if (iop_ictxhashtbl == NULL)
306 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
307 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
308
309 /* Disable interrupts at the IOP. */
310 mask = iop_inl(sc, IOP_REG_INTR_MASK);
311 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
312
313 /* Allocate a scratch DMA map for small miscellaneous shared data. */
314 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
315 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
316 printf("%s: cannot create scratch dmamap\n",
317 sc->sc_dv.dv_xname);
318 return;
319 }
320
321 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
322 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
323 printf("%s: cannot alloc scratch dmamem\n",
324 sc->sc_dv.dv_xname);
325 goto bail_out;
326 }
327 state++;
328
329 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
330 &sc->sc_scr, 0)) {
331 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
332 goto bail_out;
333 }
334 state++;
335
336 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
337 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
338 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
339 goto bail_out;
340 }
341 state++;
342
343 #ifdef I2ODEBUG
344 /* So that our debug checks don't choke. */
345 sc->sc_framesize = 128;
346 #endif
347
348 /* Reset the adapter and request status. */
349 if ((rv = iop_reset(sc)) != 0) {
350 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
351 goto bail_out;
352 }
353
354 if ((rv = iop_status_get(sc, 1)) != 0) {
355 printf("%s: not responding (get status)\n",
356 sc->sc_dv.dv_xname);
357 goto bail_out;
358 }
359
360 sc->sc_flags |= IOP_HAVESTATUS;
361 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
362 ident, sizeof(ident));
363 printf(" <%s>\n", ident);
364
365 #ifdef I2ODEBUG
366 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
367 le16toh(sc->sc_status.orgid),
368 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
369 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
370 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
371 le32toh(sc->sc_status.desiredprivmemsize),
372 le32toh(sc->sc_status.currentprivmemsize),
373 le32toh(sc->sc_status.currentprivmembase));
374 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
375 le32toh(sc->sc_status.desiredpriviosize),
376 le32toh(sc->sc_status.currentpriviosize),
377 le32toh(sc->sc_status.currentpriviobase));
378 #endif
379
380 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
381 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
382 sc->sc_maxob = IOP_MAX_OUTBOUND;
383 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
384 if (sc->sc_maxib > IOP_MAX_INBOUND)
385 sc->sc_maxib = IOP_MAX_INBOUND;
386 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
387 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
388 sc->sc_framesize = IOP_MAX_MSG_SIZE;
389
390 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
391 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
392 printf("%s: frame size too small (%d)\n",
393 sc->sc_dv.dv_xname, sc->sc_framesize);
394 goto bail_out;
395 }
396 #endif
397
398 /* Allocate message wrappers. */
399 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
400 if (im == NULL) {
401 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
402 goto bail_out;
403 }
404 state++;
405 sc->sc_ims = im;
406 SLIST_INIT(&sc->sc_im_freelist);
407
408 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
409 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
410 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
411 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
412 &im->im_xfer[0].ix_map);
413 if (rv != 0) {
414 printf("%s: couldn't create dmamap (%d)",
415 sc->sc_dv.dv_xname, rv);
416 goto bail_out;
417 }
418
419 im->im_tctx = i;
420 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
421 }
422
423 /* Initialise the IOP's outbound FIFO. */
424 if (iop_ofifo_init(sc) != 0) {
425 printf("%s: unable to init oubound FIFO\n",
426 sc->sc_dv.dv_xname);
427 goto bail_out;
428 }
429
430 /*
431 * Defer further configuration until (a) interrupts are working and
432 * (b) we have enough information to build the system table.
433 */
434 config_interrupts((struct device *)sc, iop_config_interrupts);
435
436 /* Configure shutdown hook before we start any device activity. */
437 if (iop_sdh == NULL)
438 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
439
440 /* Ensure interrupts are enabled at the IOP. */
441 mask = iop_inl(sc, IOP_REG_INTR_MASK);
442 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
443
444 if (intrstr != NULL)
445 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
446 intrstr);
447
448 #ifdef I2ODEBUG
449 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
450 sc->sc_dv.dv_xname, sc->sc_maxib,
451 le32toh(sc->sc_status.maxinboundmframes),
452 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
453 #endif
454
455 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
456 return;
457
458 bail_out:
459 if (state > 3) {
460 for (j = 0; j < i; j++)
461 bus_dmamap_destroy(sc->sc_dmat,
462 sc->sc_ims[j].im_xfer[0].ix_map);
463 free(sc->sc_ims, M_DEVBUF);
464 }
465 if (state > 2)
466 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
467 if (state > 1)
468 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
469 if (state > 0)
470 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
471 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
472 }
473
474 /*
475 * Perform autoconfiguration tasks.
476 */
477 static void
478 iop_config_interrupts(struct device *self)
479 {
480 struct iop_attach_args ia;
481 struct iop_softc *sc, *iop;
482 struct i2o_systab_entry *ste;
483 int rv, i, niop;
484
485 sc = (struct iop_softc *)self;
486 LIST_INIT(&sc->sc_iilist);
487
488 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
489
490 if (iop_hrt_get(sc) != 0) {
491 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
492 return;
493 }
494
495 /*
496 * Build the system table.
497 */
498 if (iop_systab == NULL) {
499 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
500 if ((iop = device_lookup(&iop_cd, i)) == NULL)
501 continue;
502 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
503 continue;
504 if (iop_status_get(iop, 1) != 0) {
505 printf("%s: unable to retrieve status\n",
506 sc->sc_dv.dv_xname);
507 iop->sc_flags &= ~IOP_HAVESTATUS;
508 continue;
509 }
510 niop++;
511 }
512 if (niop == 0)
513 return;
514
515 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
516 sizeof(struct i2o_systab);
517 iop_systab_size = i;
518 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
519
520 iop_systab->numentries = niop;
521 iop_systab->version = I2O_VERSION_11;
522
523 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
524 if ((iop = device_lookup(&iop_cd, i)) == NULL)
525 continue;
526 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
527 continue;
528
529 ste->orgid = iop->sc_status.orgid;
530 ste->iopid = iop->sc_dv.dv_unit + 2;
531 ste->segnumber =
532 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
533 ste->iopcaps = iop->sc_status.iopcaps;
534 ste->inboundmsgframesize =
535 iop->sc_status.inboundmframesize;
536 ste->inboundmsgportaddresslow =
537 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
538 ste++;
539 }
540 }
541
542 /*
543 * Post the system table to the IOP and bring it to the OPERATIONAL
544 * state.
545 */
546 if (iop_systab_set(sc) != 0) {
547 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
548 return;
549 }
550 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
551 30000) != 0) {
552 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
553 return;
554 }
555
556 /*
557 * Set up an event handler for this IOP.
558 */
559 sc->sc_eventii.ii_dv = self;
560 sc->sc_eventii.ii_intr = iop_intr_event;
561 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
562 sc->sc_eventii.ii_tid = I2O_TID_IOP;
563 iop_initiator_register(sc, &sc->sc_eventii);
564
565 rv = iop_util_eventreg(sc, &sc->sc_eventii,
566 I2O_EVENT_EXEC_RESOURCE_LIMITS |
567 I2O_EVENT_EXEC_CONNECTION_FAIL |
568 I2O_EVENT_EXEC_ADAPTER_FAULT |
569 I2O_EVENT_EXEC_POWER_FAIL |
570 I2O_EVENT_EXEC_RESET_PENDING |
571 I2O_EVENT_EXEC_RESET_IMMINENT |
572 I2O_EVENT_EXEC_HARDWARE_FAIL |
573 I2O_EVENT_EXEC_XCT_CHANGE |
574 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
575 I2O_EVENT_GEN_DEVICE_RESET |
576 I2O_EVENT_GEN_STATE_CHANGE |
577 I2O_EVENT_GEN_GENERAL_WARNING);
578 if (rv != 0) {
579 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
580 return;
581 }
582
583 /*
584 * Attempt to match and attach a product-specific extension.
585 */
586 ia.ia_class = I2O_CLASS_ANY;
587 ia.ia_tid = I2O_TID_IOP;
588 config_found_sm(self, &ia, iop_print, iop_submatch);
589
590 /*
591 * Start device configuration.
592 */
593 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
594 if ((rv = iop_reconfigure(sc, 0)) == -1) {
595 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
596 return;
597 }
598 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
599
600 kthread_create(iop_create_reconf_thread, sc);
601 }
602
603 /*
604 * Create the reconfiguration thread. Called after the standard kernel
605 * threads have been created.
606 */
607 static void
608 iop_create_reconf_thread(void *cookie)
609 {
610 struct iop_softc *sc;
611 int rv;
612
613 sc = cookie;
614 sc->sc_flags |= IOP_ONLINE;
615
616 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
617 "%s", sc->sc_dv.dv_xname);
618 if (rv != 0) {
619 printf("%s: unable to create reconfiguration thread (%d)",
620 sc->sc_dv.dv_xname, rv);
621 return;
622 }
623 }
624
625 /*
626 * Reconfiguration thread; listens for LCT change notification, and
627 * initiates re-configuration if received.
628 */
629 static void
630 iop_reconf_thread(void *cookie)
631 {
632 struct iop_softc *sc;
633 struct i2o_lct lct;
634 u_int32_t chgind;
635 int rv;
636
637 sc = cookie;
638 chgind = sc->sc_chgind + 1;
639
640 for (;;) {
641 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
642 sc->sc_dv.dv_xname, chgind));
643
644 PHOLD(sc->sc_reconf_proc);
645 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
646 PRELE(sc->sc_reconf_proc);
647
648 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
649 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
650
651 if (rv == 0 &&
652 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
653 iop_reconfigure(sc, le32toh(lct.changeindicator));
654 chgind = sc->sc_chgind + 1;
655 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
656 }
657
658 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
659 }
660 }
661
662 /*
663 * Reconfigure: find new and removed devices.
664 */
665 int
666 iop_reconfigure(struct iop_softc *sc, u_int chgind)
667 {
668 struct iop_msg *im;
669 struct i2o_hba_bus_scan mf;
670 struct i2o_lct_entry *le;
671 struct iop_initiator *ii, *nextii;
672 int rv, tid, i;
673
674 /*
675 * If the reconfiguration request isn't the result of LCT change
676 * notification, then be more thorough: ask all bus ports to scan
677 * their busses. Wait up to 5 minutes for each bus port to complete
678 * the request.
679 */
680 if (chgind == 0) {
681 if ((rv = iop_lct_get(sc)) != 0) {
682 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
683 return (rv);
684 }
685
686 le = sc->sc_lct->entry;
687 for (i = 0; i < sc->sc_nlctent; i++, le++) {
688 if ((le16toh(le->classid) & 4095) !=
689 I2O_CLASS_BUS_ADAPTER_PORT)
690 continue;
691 tid = le16toh(le->localtid) & 4095;
692
693 im = iop_msg_alloc(sc, IM_WAIT);
694
695 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
696 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
697 mf.msgictx = IOP_ICTX;
698 mf.msgtctx = im->im_tctx;
699
700 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
701 tid));
702
703 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
704 iop_msg_free(sc, im);
705 #ifdef I2ODEBUG
706 if (rv != 0)
707 printf("%s: bus scan failed\n",
708 sc->sc_dv.dv_xname);
709 #endif
710 }
711 } else if (chgind <= sc->sc_chgind) {
712 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
713 return (0);
714 }
715
716 /* Re-read the LCT and determine if it has changed. */
717 if ((rv = iop_lct_get(sc)) != 0) {
718 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
719 return (rv);
720 }
721 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
722
723 chgind = le32toh(sc->sc_lct->changeindicator);
724 if (chgind == sc->sc_chgind) {
725 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
726 return (0);
727 }
728 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
729 sc->sc_chgind = chgind;
730
731 if (sc->sc_tidmap != NULL)
732 free(sc->sc_tidmap, M_DEVBUF);
733 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
734 M_DEVBUF, M_NOWAIT|M_ZERO);
735
736 /* Allow 1 queued command per device while we're configuring. */
737 iop_adjqparam(sc, 1);
738
739 /*
740 * Match and attach child devices. We configure high-level devices
741 * first so that any claims will propagate throughout the LCT,
742 * hopefully masking off aliased devices as a result.
743 *
744 * Re-reading the LCT at this point is a little dangerous, but we'll
745 * trust the IOP (and the operator) to behave itself...
746 */
747 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
748 IC_CONFIGURE | IC_PRIORITY);
749 if ((rv = iop_lct_get(sc)) != 0)
750 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
751 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
752 IC_CONFIGURE);
753
754 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
755 nextii = LIST_NEXT(ii, ii_list);
756
757 /* Detach devices that were configured, but are now gone. */
758 for (i = 0; i < sc->sc_nlctent; i++)
759 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
760 break;
761 if (i == sc->sc_nlctent ||
762 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
763 config_detach(ii->ii_dv, DETACH_FORCE);
764
765 /*
766 * Tell initiators that existed before the re-configuration
767 * to re-configure.
768 */
769 if (ii->ii_reconfig == NULL)
770 continue;
771 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
772 printf("%s: %s failed reconfigure (%d)\n",
773 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
774 }
775
776 /* Re-adjust queue parameters and return. */
777 if (sc->sc_nii != 0)
778 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
779 / sc->sc_nii);
780
781 return (0);
782 }
783
784 /*
785 * Configure I2O devices into the system.
786 */
787 static void
788 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
789 {
790 struct iop_attach_args ia;
791 struct iop_initiator *ii;
792 const struct i2o_lct_entry *le;
793 struct device *dv;
794 int i, j, nent;
795 u_int usertid;
796
797 nent = sc->sc_nlctent;
798 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
799 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
800
801 /* Ignore the device if it's in use. */
802 usertid = le32toh(le->usertid) & 4095;
803 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
804 continue;
805
806 ia.ia_class = le16toh(le->classid) & 4095;
807 ia.ia_tid = sc->sc_tidmap[i].it_tid;
808
809 /* Ignore uninteresting devices. */
810 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
811 if (iop_class[j].ic_class == ia.ia_class)
812 break;
813 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
814 (iop_class[j].ic_flags & mask) != maskval)
815 continue;
816
817 /*
818 * Try to configure the device only if it's not already
819 * configured.
820 */
821 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
822 if (ia.ia_tid == ii->ii_tid) {
823 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
824 strcpy(sc->sc_tidmap[i].it_dvname,
825 ii->ii_dv->dv_xname);
826 break;
827 }
828 }
829 if (ii != NULL)
830 continue;
831
832 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
833 if (dv != NULL) {
834 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
835 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
836 }
837 }
838 }
839
840 /*
841 * Adjust queue parameters for all child devices.
842 */
843 static void
844 iop_adjqparam(struct iop_softc *sc, int mpi)
845 {
846 struct iop_initiator *ii;
847
848 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
849 if (ii->ii_adjqparam != NULL)
850 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
851 }
852
853 static void
854 iop_devinfo(int class, char *devinfo)
855 {
856 #ifdef I2OVERBOSE
857 int i;
858
859 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
860 if (class == iop_class[i].ic_class)
861 break;
862
863 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
864 sprintf(devinfo, "device (class 0x%x)", class);
865 else
866 strcpy(devinfo, iop_class[i].ic_caption);
867 #else
868
869 sprintf(devinfo, "device (class 0x%x)", class);
870 #endif
871 }
872
873 static int
874 iop_print(void *aux, const char *pnp)
875 {
876 struct iop_attach_args *ia;
877 char devinfo[256];
878
879 ia = aux;
880
881 if (pnp != NULL) {
882 iop_devinfo(ia->ia_class, devinfo);
883 printf("%s at %s", devinfo, pnp);
884 }
885 printf(" tid %d", ia->ia_tid);
886 return (UNCONF);
887 }
888
889 static int
890 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
891 {
892 struct iop_attach_args *ia;
893
894 ia = aux;
895
896 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
897 return (0);
898
899 return (config_match(parent, cf, aux));
900 }
901
902 /*
903 * Shut down all configured IOPs.
904 */
905 static void
906 iop_shutdown(void *junk)
907 {
908 struct iop_softc *sc;
909 int i;
910
911 printf("shutting down iop devices...");
912
913 for (i = 0; i < iop_cd.cd_ndevs; i++) {
914 if ((sc = device_lookup(&iop_cd, i)) == NULL)
915 continue;
916 if ((sc->sc_flags & IOP_ONLINE) == 0)
917 continue;
918
919 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
920 0, 5000);
921
922 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
923 /*
924 * Some AMI firmware revisions will go to sleep and
925 * never come back after this.
926 */
927 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
928 IOP_ICTX, 0, 1000);
929 }
930 }
931
932 /* Wait. Some boards could still be flushing, stupidly enough. */
933 delay(5000*1000);
934 printf(" done\n");
935 }
936
937 /*
938 * Retrieve IOP status.
939 */
940 int
941 iop_status_get(struct iop_softc *sc, int nosleep)
942 {
943 struct i2o_exec_status_get mf;
944 struct i2o_status *st;
945 paddr_t pa;
946 int rv, i;
947
948 pa = sc->sc_scr_seg->ds_addr;
949 st = (struct i2o_status *)sc->sc_scr;
950
951 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
952 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
953 mf.reserved[0] = 0;
954 mf.reserved[1] = 0;
955 mf.reserved[2] = 0;
956 mf.reserved[3] = 0;
957 mf.addrlow = (u_int32_t)pa;
958 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
959 mf.length = sizeof(sc->sc_status);
960
961 memset(st, 0, sizeof(*st));
962 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
963 BUS_DMASYNC_PREREAD);
964
965 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
966 return (rv);
967
968 for (i = 25; i != 0; i--) {
969 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
970 sizeof(*st), BUS_DMASYNC_POSTREAD);
971 if (st->syncbyte == 0xff)
972 break;
973 if (nosleep)
974 DELAY(100*1000);
975 else
976 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
977 }
978
979 if (st->syncbyte != 0xff) {
980 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
981 rv = EIO;
982 } else {
983 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
984 rv = 0;
985 }
986
987 return (rv);
988 }
989
990 /*
991 * Initialize and populate the IOP's outbound FIFO.
992 */
993 static int
994 iop_ofifo_init(struct iop_softc *sc)
995 {
996 bus_addr_t addr;
997 bus_dma_segment_t seg;
998 struct i2o_exec_outbound_init *mf;
999 int i, rseg, rv;
1000 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1001
1002 sw = (u_int32_t *)sc->sc_scr;
1003
1004 mf = (struct i2o_exec_outbound_init *)mb;
1005 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1006 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1007 mf->msgictx = IOP_ICTX;
1008 mf->msgtctx = 0;
1009 mf->pagesize = PAGE_SIZE;
1010 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1011
1012 /*
1013 * The I2O spec says that there are two SGLs: one for the status
1014 * word, and one for a list of discarded MFAs. It continues to say
1015 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1016 * necessary; this isn't the case (and is in fact a bad thing).
1017 */
1018 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1019 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1020 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1021 (u_int32_t)sc->sc_scr_seg->ds_addr;
1022 mb[0] += 2 << 16;
1023
1024 *sw = 0;
1025 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1026 BUS_DMASYNC_PREREAD);
1027
1028 if ((rv = iop_post(sc, mb)) != 0)
1029 return (rv);
1030
1031 POLL(5000,
1032 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1033 BUS_DMASYNC_POSTREAD),
1034 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1035
1036 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1037 printf("%s: outbound FIFO init failed (%d)\n",
1038 sc->sc_dv.dv_xname, le32toh(*sw));
1039 return (EIO);
1040 }
1041
1042 /* Allocate DMA safe memory for the reply frames. */
1043 if (sc->sc_rep_phys == 0) {
1044 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1045
1046 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1047 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1048 if (rv != 0) {
1049 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1050 rv);
1051 return (rv);
1052 }
1053
1054 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1055 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1056 if (rv != 0) {
1057 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1058 return (rv);
1059 }
1060
1061 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1062 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1063 if (rv != 0) {
1064 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1065 rv);
1066 return (rv);
1067 }
1068
1069 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1070 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1071 if (rv != 0) {
1072 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1073 return (rv);
1074 }
1075
1076 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1077 }
1078
1079 /* Populate the outbound FIFO. */
1080 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1081 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1082 addr += sc->sc_framesize;
1083 }
1084
1085 return (0);
1086 }
1087
1088 /*
1089 * Read the specified number of bytes from the IOP's hardware resource table.
1090 */
1091 static int
1092 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1093 {
1094 struct iop_msg *im;
1095 int rv;
1096 struct i2o_exec_hrt_get *mf;
1097 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1098
1099 im = iop_msg_alloc(sc, IM_WAIT);
1100 mf = (struct i2o_exec_hrt_get *)mb;
1101 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1102 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1103 mf->msgictx = IOP_ICTX;
1104 mf->msgtctx = im->im_tctx;
1105
1106 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1107 rv = iop_msg_post(sc, im, mb, 30000);
1108 iop_msg_unmap(sc, im);
1109 iop_msg_free(sc, im);
1110 return (rv);
1111 }
1112
1113 /*
1114 * Read the IOP's hardware resource table.
1115 */
1116 static int
1117 iop_hrt_get(struct iop_softc *sc)
1118 {
1119 struct i2o_hrt hrthdr, *hrt;
1120 int size, rv;
1121
1122 PHOLD(curproc);
1123 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1124 PRELE(curproc);
1125 if (rv != 0)
1126 return (rv);
1127
1128 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1129 le16toh(hrthdr.numentries)));
1130
1131 size = sizeof(struct i2o_hrt) +
1132 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1133 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1134
1135 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1136 free(hrt, M_DEVBUF);
1137 return (rv);
1138 }
1139
1140 if (sc->sc_hrt != NULL)
1141 free(sc->sc_hrt, M_DEVBUF);
1142 sc->sc_hrt = hrt;
1143 return (0);
1144 }
1145
1146 /*
1147 * Request the specified number of bytes from the IOP's logical
1148 * configuration table. If a change indicator is specified, this
1149 * is a verbatim notification request, so the caller is prepared
1150 * to wait indefinitely.
1151 */
1152 static int
1153 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1154 u_int32_t chgind)
1155 {
1156 struct iop_msg *im;
1157 struct i2o_exec_lct_notify *mf;
1158 int rv;
1159 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1160
1161 im = iop_msg_alloc(sc, IM_WAIT);
1162 memset(lct, 0, size);
1163
1164 mf = (struct i2o_exec_lct_notify *)mb;
1165 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1166 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1167 mf->msgictx = IOP_ICTX;
1168 mf->msgtctx = im->im_tctx;
1169 mf->classid = I2O_CLASS_ANY;
1170 mf->changeindicator = chgind;
1171
1172 #ifdef I2ODEBUG
1173 printf("iop_lct_get0: reading LCT");
1174 if (chgind != 0)
1175 printf(" (async)");
1176 printf("\n");
1177 #endif
1178
1179 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1180 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1181 iop_msg_unmap(sc, im);
1182 iop_msg_free(sc, im);
1183 return (rv);
1184 }
1185
1186 /*
1187 * Read the IOP's logical configuration table.
1188 */
1189 int
1190 iop_lct_get(struct iop_softc *sc)
1191 {
1192 int esize, size, rv;
1193 struct i2o_lct *lct;
1194
1195 esize = le32toh(sc->sc_status.expectedlctsize);
1196 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1197 if (lct == NULL)
1198 return (ENOMEM);
1199
1200 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1201 free(lct, M_DEVBUF);
1202 return (rv);
1203 }
1204
1205 size = le16toh(lct->tablesize) << 2;
1206 if (esize != size) {
1207 free(lct, M_DEVBUF);
1208 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1209 if (lct == NULL)
1210 return (ENOMEM);
1211
1212 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1213 free(lct, M_DEVBUF);
1214 return (rv);
1215 }
1216 }
1217
1218 /* Swap in the new LCT. */
1219 if (sc->sc_lct != NULL)
1220 free(sc->sc_lct, M_DEVBUF);
1221 sc->sc_lct = lct;
1222 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1223 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1224 sizeof(struct i2o_lct_entry);
1225 return (0);
1226 }
1227
1228 /*
1229 * Request the specified parameter group from the target. If an initiator
1230 * is specified (a) don't wait for the operation to complete, but instead
1231 * let the initiator's interrupt handler deal with the reply and (b) place a
1232 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1233 */
1234 int
1235 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1236 int size, struct iop_initiator *ii)
1237 {
1238 struct iop_msg *im;
1239 struct i2o_util_params_op *mf;
1240 struct i2o_reply *rf;
1241 int rv;
1242 struct iop_pgop *pgop;
1243 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1244
1245 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1246 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1247 iop_msg_free(sc, im);
1248 return (ENOMEM);
1249 }
1250 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1251 iop_msg_free(sc, im);
1252 free(pgop, M_DEVBUF);
1253 return (ENOMEM);
1254 }
1255 im->im_dvcontext = pgop;
1256 im->im_rb = rf;
1257
1258 mf = (struct i2o_util_params_op *)mb;
1259 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1260 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1261 mf->msgictx = IOP_ICTX;
1262 mf->msgtctx = im->im_tctx;
1263 mf->flags = 0;
1264
1265 pgop->olh.count = htole16(1);
1266 pgop->olh.reserved = htole16(0);
1267 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1268 pgop->oat.fieldcount = htole16(0xffff);
1269 pgop->oat.group = htole16(group);
1270
1271 if (ii == NULL)
1272 PHOLD(curproc);
1273
1274 memset(buf, 0, size);
1275 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1276 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1277 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1278
1279 if (ii == NULL)
1280 PRELE(curproc);
1281
1282 /* Detect errors; let partial transfers to count as success. */
1283 if (ii == NULL && rv == 0) {
1284 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1285 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1286 rv = 0;
1287 else
1288 rv = (rf->reqstatus != 0 ? EIO : 0);
1289
1290 if (rv != 0)
1291 printf("%s: FIELD_GET failed for tid %d group %d\n",
1292 sc->sc_dv.dv_xname, tid, group);
1293 }
1294
1295 if (ii == NULL || rv != 0) {
1296 iop_msg_unmap(sc, im);
1297 iop_msg_free(sc, im);
1298 free(pgop, M_DEVBUF);
1299 free(rf, M_DEVBUF);
1300 }
1301
1302 return (rv);
1303 }
1304
1305 /*
1306 * Set a single field in a scalar parameter group.
1307 */
1308 int
1309 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1310 int size, int field)
1311 {
1312 struct iop_msg *im;
1313 struct i2o_util_params_op *mf;
1314 struct iop_pgop *pgop;
1315 int rv, totsize;
1316 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1317
1318 totsize = sizeof(*pgop) + size;
1319
1320 im = iop_msg_alloc(sc, IM_WAIT);
1321 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1322 iop_msg_free(sc, im);
1323 return (ENOMEM);
1324 }
1325
1326 mf = (struct i2o_util_params_op *)mb;
1327 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1328 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1329 mf->msgictx = IOP_ICTX;
1330 mf->msgtctx = im->im_tctx;
1331 mf->flags = 0;
1332
1333 pgop->olh.count = htole16(1);
1334 pgop->olh.reserved = htole16(0);
1335 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1336 pgop->oat.fieldcount = htole16(1);
1337 pgop->oat.group = htole16(group);
1338 pgop->oat.fields[0] = htole16(field);
1339 memcpy(pgop + 1, buf, size);
1340
1341 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1342 rv = iop_msg_post(sc, im, mb, 30000);
1343 if (rv != 0)
1344 printf("%s: FIELD_SET failed for tid %d group %d\n",
1345 sc->sc_dv.dv_xname, tid, group);
1346
1347 iop_msg_unmap(sc, im);
1348 iop_msg_free(sc, im);
1349 free(pgop, M_DEVBUF);
1350 return (rv);
1351 }
1352
1353 /*
1354 * Delete all rows in a tablular parameter group.
1355 */
1356 int
1357 iop_table_clear(struct iop_softc *sc, int tid, int group)
1358 {
1359 struct iop_msg *im;
1360 struct i2o_util_params_op *mf;
1361 struct iop_pgop pgop;
1362 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1363 int rv;
1364
1365 im = iop_msg_alloc(sc, IM_WAIT);
1366
1367 mf = (struct i2o_util_params_op *)mb;
1368 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1369 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1370 mf->msgictx = IOP_ICTX;
1371 mf->msgtctx = im->im_tctx;
1372 mf->flags = 0;
1373
1374 pgop.olh.count = htole16(1);
1375 pgop.olh.reserved = htole16(0);
1376 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1377 pgop.oat.fieldcount = htole16(0);
1378 pgop.oat.group = htole16(group);
1379 pgop.oat.fields[0] = htole16(0);
1380
1381 PHOLD(curproc);
1382 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1383 rv = iop_msg_post(sc, im, mb, 30000);
1384 if (rv != 0)
1385 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1386 sc->sc_dv.dv_xname, tid, group);
1387
1388 iop_msg_unmap(sc, im);
1389 PRELE(curproc);
1390 iop_msg_free(sc, im);
1391 return (rv);
1392 }
1393
1394 /*
1395 * Add a single row to a tabular parameter group. The row can have only one
1396 * field.
1397 */
1398 int
1399 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1400 int size, int row)
1401 {
1402 struct iop_msg *im;
1403 struct i2o_util_params_op *mf;
1404 struct iop_pgop *pgop;
1405 int rv, totsize;
1406 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1407
1408 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1409
1410 im = iop_msg_alloc(sc, IM_WAIT);
1411 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1412 iop_msg_free(sc, im);
1413 return (ENOMEM);
1414 }
1415
1416 mf = (struct i2o_util_params_op *)mb;
1417 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1418 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1419 mf->msgictx = IOP_ICTX;
1420 mf->msgtctx = im->im_tctx;
1421 mf->flags = 0;
1422
1423 pgop->olh.count = htole16(1);
1424 pgop->olh.reserved = htole16(0);
1425 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1426 pgop->oat.fieldcount = htole16(1);
1427 pgop->oat.group = htole16(group);
1428 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1429 pgop->oat.fields[1] = htole16(1); /* RowCount */
1430 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1431 memcpy(&pgop->oat.fields[3], buf, size);
1432
1433 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1434 rv = iop_msg_post(sc, im, mb, 30000);
1435 if (rv != 0)
1436 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1437 sc->sc_dv.dv_xname, tid, group, row);
1438
1439 iop_msg_unmap(sc, im);
1440 iop_msg_free(sc, im);
1441 free(pgop, M_DEVBUF);
1442 return (rv);
1443 }
1444
1445 /*
1446 * Execute a simple command (no parameters).
1447 */
1448 int
1449 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1450 int async, int timo)
1451 {
1452 struct iop_msg *im;
1453 struct i2o_msg mf;
1454 int rv, fl;
1455
1456 fl = (async != 0 ? IM_WAIT : IM_POLL);
1457 im = iop_msg_alloc(sc, fl);
1458
1459 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1460 mf.msgfunc = I2O_MSGFUNC(tid, function);
1461 mf.msgictx = ictx;
1462 mf.msgtctx = im->im_tctx;
1463
1464 rv = iop_msg_post(sc, im, &mf, timo);
1465 iop_msg_free(sc, im);
1466 return (rv);
1467 }
1468
1469 /*
1470 * Post the system table to the IOP.
1471 */
1472 static int
1473 iop_systab_set(struct iop_softc *sc)
1474 {
1475 struct i2o_exec_sys_tab_set *mf;
1476 struct iop_msg *im;
1477 bus_space_handle_t bsh;
1478 bus_addr_t boo;
1479 u_int32_t mema[2], ioa[2];
1480 int rv;
1481 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1482
1483 im = iop_msg_alloc(sc, IM_WAIT);
1484
1485 mf = (struct i2o_exec_sys_tab_set *)mb;
1486 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1487 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1488 mf->msgictx = IOP_ICTX;
1489 mf->msgtctx = im->im_tctx;
1490 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1491 mf->segnumber = 0;
1492
1493 mema[1] = sc->sc_status.desiredprivmemsize;
1494 ioa[1] = sc->sc_status.desiredpriviosize;
1495
1496 if (mema[1] != 0) {
1497 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1498 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1499 mema[0] = htole32(boo);
1500 if (rv != 0) {
1501 printf("%s: can't alloc priv mem space, err = %d\n",
1502 sc->sc_dv.dv_xname, rv);
1503 mema[0] = 0;
1504 mema[1] = 0;
1505 }
1506 }
1507
1508 if (ioa[1] != 0) {
1509 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1510 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1511 ioa[0] = htole32(boo);
1512 if (rv != 0) {
1513 printf("%s: can't alloc priv i/o space, err = %d\n",
1514 sc->sc_dv.dv_xname, rv);
1515 ioa[0] = 0;
1516 ioa[1] = 0;
1517 }
1518 }
1519
1520 PHOLD(curproc);
1521 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1522 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1523 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1524 rv = iop_msg_post(sc, im, mb, 5000);
1525 iop_msg_unmap(sc, im);
1526 iop_msg_free(sc, im);
1527 PRELE(curproc);
1528 return (rv);
1529 }
1530
1531 /*
1532 * Reset the IOP. Must be called with interrupts disabled.
1533 */
1534 static int
1535 iop_reset(struct iop_softc *sc)
1536 {
1537 u_int32_t mfa, *sw;
1538 struct i2o_exec_iop_reset mf;
1539 int rv;
1540 paddr_t pa;
1541
1542 sw = (u_int32_t *)sc->sc_scr;
1543 pa = sc->sc_scr_seg->ds_addr;
1544
1545 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1546 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1547 mf.reserved[0] = 0;
1548 mf.reserved[1] = 0;
1549 mf.reserved[2] = 0;
1550 mf.reserved[3] = 0;
1551 mf.statuslow = (u_int32_t)pa;
1552 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1553
1554 *sw = htole32(0);
1555 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1556 BUS_DMASYNC_PREREAD);
1557
1558 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1559 return (rv);
1560
1561 POLL(2500,
1562 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1563 BUS_DMASYNC_POSTREAD), *sw != 0));
1564 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1565 printf("%s: reset rejected, status 0x%x\n",
1566 sc->sc_dv.dv_xname, le32toh(*sw));
1567 return (EIO);
1568 }
1569
1570 /*
1571 * IOP is now in the INIT state. Wait no more than 10 seconds for
1572 * the inbound queue to become responsive.
1573 */
1574 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1575 if (mfa == IOP_MFA_EMPTY) {
1576 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1577 return (EIO);
1578 }
1579
1580 iop_release_mfa(sc, mfa);
1581 return (0);
1582 }
1583
1584 /*
1585 * Register a new initiator. Must be called with the configuration lock
1586 * held.
1587 */
1588 void
1589 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1590 {
1591 static int ictxgen;
1592 int s;
1593
1594 /* 0 is reserved (by us) for system messages. */
1595 ii->ii_ictx = ++ictxgen;
1596
1597 /*
1598 * `Utility initiators' don't make it onto the per-IOP initiator list
1599 * (which is used only for configuration), but do get one slot on
1600 * the inbound queue.
1601 */
1602 if ((ii->ii_flags & II_UTILITY) == 0) {
1603 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1604 sc->sc_nii++;
1605 } else
1606 sc->sc_nuii++;
1607
1608 s = splbio();
1609 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1610 splx(s);
1611 }
1612
1613 /*
1614 * Unregister an initiator. Must be called with the configuration lock
1615 * held.
1616 */
1617 void
1618 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1619 {
1620 int s;
1621
1622 if ((ii->ii_flags & II_UTILITY) == 0) {
1623 LIST_REMOVE(ii, ii_list);
1624 sc->sc_nii--;
1625 } else
1626 sc->sc_nuii--;
1627
1628 s = splbio();
1629 LIST_REMOVE(ii, ii_hash);
1630 splx(s);
1631 }
1632
1633 /*
1634 * Handle a reply frame from the IOP.
1635 */
1636 static int
1637 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1638 {
1639 struct iop_msg *im;
1640 struct i2o_reply *rb;
1641 struct i2o_fault_notify *fn;
1642 struct iop_initiator *ii;
1643 u_int off, ictx, tctx, status, size;
1644
1645 off = (int)(rmfa - sc->sc_rep_phys);
1646 rb = (struct i2o_reply *)(sc->sc_rep + off);
1647
1648 /* Perform reply queue DMA synchronisation. */
1649 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1650 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1651 if (--sc->sc_curib != 0)
1652 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1653 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1654
1655 #ifdef I2ODEBUG
1656 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1657 panic("iop_handle_reply: 64-bit reply");
1658 #endif
1659 /*
1660 * Find the initiator.
1661 */
1662 ictx = le32toh(rb->msgictx);
1663 if (ictx == IOP_ICTX)
1664 ii = NULL;
1665 else {
1666 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1667 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1668 if (ii->ii_ictx == ictx)
1669 break;
1670 if (ii == NULL) {
1671 #ifdef I2ODEBUG
1672 iop_reply_print(sc, rb);
1673 #endif
1674 printf("%s: WARNING: bad ictx returned (%x)\n",
1675 sc->sc_dv.dv_xname, ictx);
1676 return (-1);
1677 }
1678 }
1679
1680 /*
1681 * If we received a transport failure notice, we've got to dig the
1682 * transaction context (if any) out of the original message frame,
1683 * and then release the original MFA back to the inbound FIFO.
1684 */
1685 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1686 status = I2O_STATUS_SUCCESS;
1687
1688 fn = (struct i2o_fault_notify *)rb;
1689 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1690 iop_release_mfa(sc, fn->lowmfa);
1691 iop_tfn_print(sc, fn);
1692 } else {
1693 status = rb->reqstatus;
1694 tctx = le32toh(rb->msgtctx);
1695 }
1696
1697 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1698 /*
1699 * This initiator tracks state using message wrappers.
1700 *
1701 * Find the originating message wrapper, and if requested
1702 * notify the initiator.
1703 */
1704 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1705 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1706 (im->im_flags & IM_ALLOCED) == 0 ||
1707 tctx != im->im_tctx) {
1708 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1709 sc->sc_dv.dv_xname, tctx, im);
1710 if (im != NULL)
1711 printf("%s: flags=0x%08x tctx=0x%08x\n",
1712 sc->sc_dv.dv_xname, im->im_flags,
1713 im->im_tctx);
1714 #ifdef I2ODEBUG
1715 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1716 iop_reply_print(sc, rb);
1717 #endif
1718 return (-1);
1719 }
1720
1721 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1722 im->im_flags |= IM_FAIL;
1723
1724 #ifdef I2ODEBUG
1725 if ((im->im_flags & IM_REPLIED) != 0)
1726 panic("%s: dup reply", sc->sc_dv.dv_xname);
1727 #endif
1728 im->im_flags |= IM_REPLIED;
1729
1730 #ifdef I2ODEBUG
1731 if (status != I2O_STATUS_SUCCESS)
1732 iop_reply_print(sc, rb);
1733 #endif
1734 im->im_reqstatus = status;
1735
1736 /* Copy the reply frame, if requested. */
1737 if (im->im_rb != NULL) {
1738 size = (le32toh(rb->msgflags) >> 14) & ~3;
1739 #ifdef I2ODEBUG
1740 if (size > sc->sc_framesize)
1741 panic("iop_handle_reply: reply too large");
1742 #endif
1743 memcpy(im->im_rb, rb, size);
1744 }
1745
1746 /* Notify the initiator. */
1747 if ((im->im_flags & IM_WAIT) != 0)
1748 wakeup(im);
1749 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1750 (*ii->ii_intr)(ii->ii_dv, im, rb);
1751 } else {
1752 /*
1753 * This initiator discards message wrappers.
1754 *
1755 * Simply pass the reply frame to the initiator.
1756 */
1757 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1758 }
1759
1760 return (status);
1761 }
1762
1763 /*
1764 * Handle an interrupt from the IOP.
1765 */
1766 int
1767 iop_intr(void *arg)
1768 {
1769 struct iop_softc *sc;
1770 u_int32_t rmfa;
1771
1772 sc = arg;
1773
1774 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1775 return (0);
1776
1777 for (;;) {
1778 /* Double read to account for IOP bug. */
1779 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1780 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1781 if (rmfa == IOP_MFA_EMPTY)
1782 break;
1783 }
1784 iop_handle_reply(sc, rmfa);
1785 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1786 }
1787
1788 return (1);
1789 }
1790
1791 /*
1792 * Handle an event signalled by the executive.
1793 */
1794 static void
1795 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1796 {
1797 struct i2o_util_event_register_reply *rb;
1798 struct iop_softc *sc;
1799 u_int event;
1800
1801 sc = (struct iop_softc *)dv;
1802 rb = reply;
1803
1804 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1805 return;
1806
1807 event = le32toh(rb->event);
1808 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1809 }
1810
1811 /*
1812 * Allocate a message wrapper.
1813 */
1814 struct iop_msg *
1815 iop_msg_alloc(struct iop_softc *sc, int flags)
1816 {
1817 struct iop_msg *im;
1818 static u_int tctxgen;
1819 int s, i;
1820
1821 #ifdef I2ODEBUG
1822 if ((flags & IM_SYSMASK) != 0)
1823 panic("iop_msg_alloc: system flags specified");
1824 #endif
1825
1826 s = splbio();
1827 im = SLIST_FIRST(&sc->sc_im_freelist);
1828 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1829 if (im == NULL)
1830 panic("iop_msg_alloc: no free wrappers");
1831 #endif
1832 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1833 splx(s);
1834
1835 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1836 tctxgen += (1 << IOP_TCTX_SHIFT);
1837 im->im_flags = flags | IM_ALLOCED;
1838 im->im_rb = NULL;
1839 i = 0;
1840 do {
1841 im->im_xfer[i++].ix_size = 0;
1842 } while (i < IOP_MAX_MSG_XFERS);
1843
1844 return (im);
1845 }
1846
1847 /*
1848 * Free a message wrapper.
1849 */
1850 void
1851 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1852 {
1853 int s;
1854
1855 #ifdef I2ODEBUG
1856 if ((im->im_flags & IM_ALLOCED) == 0)
1857 panic("iop_msg_free: wrapper not allocated");
1858 #endif
1859
1860 im->im_flags = 0;
1861 s = splbio();
1862 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1863 splx(s);
1864 }
1865
1866 /*
1867 * Map a data transfer. Write a scatter-gather list into the message frame.
1868 */
1869 int
1870 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1871 void *xferaddr, int xfersize, int out, struct proc *up)
1872 {
1873 bus_dmamap_t dm;
1874 bus_dma_segment_t *ds;
1875 struct iop_xfer *ix;
1876 u_int rv, i, nsegs, flg, off, xn;
1877 u_int32_t *p;
1878
1879 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1880 if (ix->ix_size == 0)
1881 break;
1882
1883 #ifdef I2ODEBUG
1884 if (xfersize == 0)
1885 panic("iop_msg_map: null transfer");
1886 if (xfersize > IOP_MAX_XFER)
1887 panic("iop_msg_map: transfer too large");
1888 if (xn == IOP_MAX_MSG_XFERS)
1889 panic("iop_msg_map: too many xfers");
1890 #endif
1891
1892 /*
1893 * Only the first DMA map is static.
1894 */
1895 if (xn != 0) {
1896 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1897 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1898 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1899 if (rv != 0)
1900 return (rv);
1901 }
1902
1903 dm = ix->ix_map;
1904 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1905 (up == NULL ? BUS_DMA_NOWAIT : 0));
1906 if (rv != 0)
1907 goto bad;
1908
1909 /*
1910 * How many SIMPLE SG elements can we fit in this message?
1911 */
1912 off = mb[0] >> 16;
1913 p = mb + off;
1914 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1915
1916 if (dm->dm_nsegs > nsegs) {
1917 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1918 rv = EFBIG;
1919 DPRINTF(("iop_msg_map: too many segs\n"));
1920 goto bad;
1921 }
1922
1923 nsegs = dm->dm_nsegs;
1924 xfersize = 0;
1925
1926 /*
1927 * Write out the SG list.
1928 */
1929 if (out)
1930 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1931 else
1932 flg = I2O_SGL_SIMPLE;
1933
1934 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1935 p[0] = (u_int32_t)ds->ds_len | flg;
1936 p[1] = (u_int32_t)ds->ds_addr;
1937 xfersize += ds->ds_len;
1938 }
1939
1940 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1941 p[1] = (u_int32_t)ds->ds_addr;
1942 xfersize += ds->ds_len;
1943
1944 /* Fix up the transfer record, and sync the map. */
1945 ix->ix_flags = (out ? IX_OUT : IX_IN);
1946 ix->ix_size = xfersize;
1947 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1948 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1949
1950 /*
1951 * If this is the first xfer we've mapped for this message, adjust
1952 * the SGL offset field in the message header.
1953 */
1954 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1955 mb[0] += (mb[0] >> 12) & 0xf0;
1956 im->im_flags |= IM_SGLOFFADJ;
1957 }
1958 mb[0] += (nsegs << 17);
1959 return (0);
1960
1961 bad:
1962 if (xn != 0)
1963 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1964 return (rv);
1965 }
1966
1967 /*
1968 * Map a block I/O data transfer (different in that there's only one per
1969 * message maximum, and PAGE addressing may be used). Write a scatter
1970 * gather list into the message frame.
1971 */
1972 int
1973 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1974 void *xferaddr, int xfersize, int out)
1975 {
1976 bus_dma_segment_t *ds;
1977 bus_dmamap_t dm;
1978 struct iop_xfer *ix;
1979 u_int rv, i, nsegs, off, slen, tlen, flg;
1980 paddr_t saddr, eaddr;
1981 u_int32_t *p;
1982
1983 #ifdef I2ODEBUG
1984 if (xfersize == 0)
1985 panic("iop_msg_map_bio: null transfer");
1986 if (xfersize > IOP_MAX_XFER)
1987 panic("iop_msg_map_bio: transfer too large");
1988 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1989 panic("iop_msg_map_bio: SGLOFFADJ");
1990 #endif
1991
1992 ix = im->im_xfer;
1993 dm = ix->ix_map;
1994 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1995 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1996 if (rv != 0)
1997 return (rv);
1998
1999 off = mb[0] >> 16;
2000 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2001
2002 /*
2003 * If the transfer is highly fragmented and won't fit using SIMPLE
2004 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2005 * potentially more efficient, both for us and the IOP.
2006 */
2007 if (dm->dm_nsegs > nsegs) {
2008 nsegs = 1;
2009 p = mb + off + 1;
2010
2011 /* XXX This should be done with a bus_space flag. */
2012 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2013 slen = ds->ds_len;
2014 saddr = ds->ds_addr;
2015
2016 while (slen > 0) {
2017 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2018 tlen = min(eaddr - saddr, slen);
2019 slen -= tlen;
2020 *p++ = le32toh(saddr);
2021 saddr = eaddr;
2022 nsegs++;
2023 }
2024 }
2025
2026 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2027 I2O_SGL_END;
2028 if (out)
2029 mb[off] |= I2O_SGL_DATA_OUT;
2030 } else {
2031 p = mb + off;
2032 nsegs = dm->dm_nsegs;
2033
2034 if (out)
2035 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2036 else
2037 flg = I2O_SGL_SIMPLE;
2038
2039 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2040 p[0] = (u_int32_t)ds->ds_len | flg;
2041 p[1] = (u_int32_t)ds->ds_addr;
2042 }
2043
2044 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2045 I2O_SGL_END;
2046 p[1] = (u_int32_t)ds->ds_addr;
2047 nsegs <<= 1;
2048 }
2049
2050 /* Fix up the transfer record, and sync the map. */
2051 ix->ix_flags = (out ? IX_OUT : IX_IN);
2052 ix->ix_size = xfersize;
2053 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2054 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2055
2056 /*
2057 * Adjust the SGL offset and total message size fields. We don't
2058 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2059 */
2060 mb[0] += ((off << 4) + (nsegs << 16));
2061 return (0);
2062 }
2063
2064 /*
2065 * Unmap all data transfers associated with a message wrapper.
2066 */
2067 void
2068 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2069 {
2070 struct iop_xfer *ix;
2071 int i;
2072
2073 #ifdef I2ODEBUG
2074 if (im->im_xfer[0].ix_size == 0)
2075 panic("iop_msg_unmap: no transfers mapped");
2076 #endif
2077
2078 for (ix = im->im_xfer, i = 0;;) {
2079 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2080 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2081 BUS_DMASYNC_POSTREAD);
2082 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2083
2084 /* Only the first DMA map is static. */
2085 if (i != 0)
2086 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2087 if ((++ix)->ix_size == 0)
2088 break;
2089 if (++i >= IOP_MAX_MSG_XFERS)
2090 break;
2091 }
2092 }
2093
2094 /*
2095 * Post a message frame to the IOP's inbound queue.
2096 */
2097 int
2098 iop_post(struct iop_softc *sc, u_int32_t *mb)
2099 {
2100 u_int32_t mfa;
2101 int s;
2102
2103 #ifdef I2ODEBUG
2104 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2105 panic("iop_post: frame too large");
2106 #endif
2107
2108 s = splbio();
2109
2110 /* Allocate a slot with the IOP. */
2111 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2112 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2113 splx(s);
2114 printf("%s: mfa not forthcoming\n",
2115 sc->sc_dv.dv_xname);
2116 return (EAGAIN);
2117 }
2118
2119 /* Perform reply buffer DMA synchronisation. */
2120 if (sc->sc_curib++ == 0)
2121 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2122 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2123
2124 /* Copy out the message frame. */
2125 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2126 mb[0] >> 16);
2127 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2128 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2129
2130 /* Post the MFA back to the IOP. */
2131 iop_outl(sc, IOP_REG_IFIFO, mfa);
2132
2133 splx(s);
2134 return (0);
2135 }
2136
2137 /*
2138 * Post a message to the IOP and deal with completion.
2139 */
2140 int
2141 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2142 {
2143 u_int32_t *mb;
2144 int rv, s;
2145
2146 mb = xmb;
2147
2148 /* Terminate the scatter/gather list chain. */
2149 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2150 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2151
2152 if ((rv = iop_post(sc, mb)) != 0)
2153 return (rv);
2154
2155 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2156 if ((im->im_flags & IM_POLL) != 0)
2157 iop_msg_poll(sc, im, timo);
2158 else
2159 iop_msg_wait(sc, im, timo);
2160
2161 s = splbio();
2162 if ((im->im_flags & IM_REPLIED) != 0) {
2163 if ((im->im_flags & IM_NOSTATUS) != 0)
2164 rv = 0;
2165 else if ((im->im_flags & IM_FAIL) != 0)
2166 rv = ENXIO;
2167 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2168 rv = EIO;
2169 else
2170 rv = 0;
2171 } else
2172 rv = EBUSY;
2173 splx(s);
2174 } else
2175 rv = 0;
2176
2177 return (rv);
2178 }
2179
2180 /*
2181 * Spin until the specified message is replied to.
2182 */
2183 static void
2184 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2185 {
2186 u_int32_t rmfa;
2187 int s, status;
2188
2189 s = splbio();
2190
2191 /* Wait for completion. */
2192 for (timo *= 10; timo != 0; timo--) {
2193 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2194 /* Double read to account for IOP bug. */
2195 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2196 if (rmfa == IOP_MFA_EMPTY)
2197 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2198 if (rmfa != IOP_MFA_EMPTY) {
2199 status = iop_handle_reply(sc, rmfa);
2200
2201 /*
2202 * Return the reply frame to the IOP's
2203 * outbound FIFO.
2204 */
2205 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2206 }
2207 }
2208 if ((im->im_flags & IM_REPLIED) != 0)
2209 break;
2210 DELAY(100);
2211 }
2212
2213 if (timo == 0) {
2214 #ifdef I2ODEBUG
2215 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2216 if (iop_status_get(sc, 1) != 0)
2217 printf("iop_msg_poll: unable to retrieve status\n");
2218 else
2219 printf("iop_msg_poll: IOP state = %d\n",
2220 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2221 #endif
2222 }
2223
2224 splx(s);
2225 }
2226
2227 /*
2228 * Sleep until the specified message is replied to.
2229 */
2230 static void
2231 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2232 {
2233 int s, rv;
2234
2235 s = splbio();
2236 if ((im->im_flags & IM_REPLIED) != 0) {
2237 splx(s);
2238 return;
2239 }
2240 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2241 splx(s);
2242
2243 #ifdef I2ODEBUG
2244 if (rv != 0) {
2245 printf("iop_msg_wait: tsleep() == %d\n", rv);
2246 if (iop_status_get(sc, 0) != 0)
2247 printf("iop_msg_wait: unable to retrieve status\n");
2248 else
2249 printf("iop_msg_wait: IOP state = %d\n",
2250 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2251 }
2252 #endif
2253 }
2254
2255 /*
2256 * Release an unused message frame back to the IOP's inbound fifo.
2257 */
2258 static void
2259 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2260 {
2261
2262 /* Use the frame to issue a no-op. */
2263 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2264 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2265 iop_outl_msg(sc, mfa + 8, 0);
2266 iop_outl_msg(sc, mfa + 12, 0);
2267
2268 iop_outl(sc, IOP_REG_IFIFO, mfa);
2269 }
2270
2271 #ifdef I2ODEBUG
2272 /*
2273 * Dump a reply frame header.
2274 */
2275 static void
2276 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2277 {
2278 u_int function, detail;
2279 #ifdef I2OVERBOSE
2280 const char *statusstr;
2281 #endif
2282
2283 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2284 detail = le16toh(rb->detail);
2285
2286 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2287
2288 #ifdef I2OVERBOSE
2289 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2290 statusstr = iop_status[rb->reqstatus];
2291 else
2292 statusstr = "undefined error code";
2293
2294 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2295 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2296 #else
2297 printf("%s: function=0x%02x status=0x%02x\n",
2298 sc->sc_dv.dv_xname, function, rb->reqstatus);
2299 #endif
2300 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2301 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2302 le32toh(rb->msgtctx));
2303 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2304 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2305 (le32toh(rb->msgflags) >> 8) & 0xff);
2306 }
2307 #endif
2308
2309 /*
2310 * Dump a transport failure reply.
2311 */
2312 static void
2313 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2314 {
2315
2316 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2317
2318 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2319 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2320 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2321 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2322 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2323 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2324 }
2325
2326 /*
2327 * Translate an I2O ASCII field into a C string.
2328 */
2329 void
2330 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2331 {
2332 int hc, lc, i, nit;
2333
2334 dlen--;
2335 lc = 0;
2336 hc = 0;
2337 i = 0;
2338
2339 /*
2340 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2341 * spec has nothing to say about it. Since AMI fields are usually
2342 * filled with junk after the terminator, ...
2343 */
2344 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2345
2346 while (slen-- != 0 && dlen-- != 0) {
2347 if (nit && *src == '\0')
2348 break;
2349 else if (*src <= 0x20 || *src >= 0x7f) {
2350 if (hc)
2351 dst[i++] = ' ';
2352 } else {
2353 hc = 1;
2354 dst[i++] = *src;
2355 lc = i;
2356 }
2357 src++;
2358 }
2359
2360 dst[lc] = '\0';
2361 }
2362
2363 /*
2364 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2365 */
2366 int
2367 iop_print_ident(struct iop_softc *sc, int tid)
2368 {
2369 struct {
2370 struct i2o_param_op_results pr;
2371 struct i2o_param_read_results prr;
2372 struct i2o_param_device_identity di;
2373 } __attribute__ ((__packed__)) p;
2374 char buf[32];
2375 int rv;
2376
2377 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2378 sizeof(p), NULL);
2379 if (rv != 0)
2380 return (rv);
2381
2382 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2383 sizeof(buf));
2384 printf(" <%s, ", buf);
2385 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2386 sizeof(buf));
2387 printf("%s, ", buf);
2388 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2389 printf("%s>", buf);
2390
2391 return (0);
2392 }
2393
2394 /*
2395 * Claim or unclaim the specified TID.
2396 */
2397 int
2398 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2399 int flags)
2400 {
2401 struct iop_msg *im;
2402 struct i2o_util_claim mf;
2403 int rv, func;
2404
2405 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2406 im = iop_msg_alloc(sc, IM_WAIT);
2407
2408 /* We can use the same structure, as they're identical. */
2409 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2410 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2411 mf.msgictx = ii->ii_ictx;
2412 mf.msgtctx = im->im_tctx;
2413 mf.flags = flags;
2414
2415 rv = iop_msg_post(sc, im, &mf, 5000);
2416 iop_msg_free(sc, im);
2417 return (rv);
2418 }
2419
2420 /*
2421 * Perform an abort.
2422 */
2423 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2424 int tctxabort, int flags)
2425 {
2426 struct iop_msg *im;
2427 struct i2o_util_abort mf;
2428 int rv;
2429
2430 im = iop_msg_alloc(sc, IM_WAIT);
2431
2432 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2433 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2434 mf.msgictx = ii->ii_ictx;
2435 mf.msgtctx = im->im_tctx;
2436 mf.flags = (func << 24) | flags;
2437 mf.tctxabort = tctxabort;
2438
2439 rv = iop_msg_post(sc, im, &mf, 5000);
2440 iop_msg_free(sc, im);
2441 return (rv);
2442 }
2443
2444 /*
2445 * Enable or disable reception of events for the specified device.
2446 */
2447 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2448 {
2449 struct i2o_util_event_register mf;
2450
2451 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2452 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2453 mf.msgictx = ii->ii_ictx;
2454 mf.msgtctx = 0;
2455 mf.eventmask = mask;
2456
2457 /* This message is replied to only when events are signalled. */
2458 return (iop_post(sc, (u_int32_t *)&mf));
2459 }
2460
2461 int
2462 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2463 {
2464 struct iop_softc *sc;
2465
2466 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2467 return (ENXIO);
2468 if ((sc->sc_flags & IOP_ONLINE) == 0)
2469 return (ENXIO);
2470 if ((sc->sc_flags & IOP_OPEN) != 0)
2471 return (EBUSY);
2472 sc->sc_flags |= IOP_OPEN;
2473
2474 return (0);
2475 }
2476
2477 int
2478 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2479 {
2480 struct iop_softc *sc;
2481
2482 sc = device_lookup(&iop_cd, minor(dev));
2483 sc->sc_flags &= ~IOP_OPEN;
2484
2485 return (0);
2486 }
2487
2488 int
2489 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2490 {
2491 struct iop_softc *sc;
2492 struct iovec *iov;
2493 int rv, i;
2494
2495 if (securelevel >= 2)
2496 return (EPERM);
2497
2498 sc = device_lookup(&iop_cd, minor(dev));
2499
2500 switch (cmd) {
2501 case IOPIOCPT:
2502 return (iop_passthrough(sc, (struct ioppt *)data, p));
2503
2504 case IOPIOCGSTATUS:
2505 iov = (struct iovec *)data;
2506 i = sizeof(struct i2o_status);
2507 if (i > iov->iov_len)
2508 i = iov->iov_len;
2509 else
2510 iov->iov_len = i;
2511 if ((rv = iop_status_get(sc, 0)) == 0)
2512 rv = copyout(&sc->sc_status, iov->iov_base, i);
2513 return (rv);
2514
2515 case IOPIOCGLCT:
2516 case IOPIOCGTIDMAP:
2517 case IOPIOCRECONFIG:
2518 break;
2519
2520 default:
2521 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2522 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2523 #endif
2524 return (ENOTTY);
2525 }
2526
2527 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2528 return (rv);
2529
2530 switch (cmd) {
2531 case IOPIOCGLCT:
2532 iov = (struct iovec *)data;
2533 i = le16toh(sc->sc_lct->tablesize) << 2;
2534 if (i > iov->iov_len)
2535 i = iov->iov_len;
2536 else
2537 iov->iov_len = i;
2538 rv = copyout(sc->sc_lct, iov->iov_base, i);
2539 break;
2540
2541 case IOPIOCRECONFIG:
2542 rv = iop_reconfigure(sc, 0);
2543 break;
2544
2545 case IOPIOCGTIDMAP:
2546 iov = (struct iovec *)data;
2547 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2548 if (i > iov->iov_len)
2549 i = iov->iov_len;
2550 else
2551 iov->iov_len = i;
2552 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2553 break;
2554 }
2555
2556 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2557 return (rv);
2558 }
2559
2560 static int
2561 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2562 {
2563 struct iop_msg *im;
2564 struct i2o_msg *mf;
2565 struct ioppt_buf *ptb;
2566 int rv, i, mapped;
2567
2568 mf = NULL;
2569 im = NULL;
2570 mapped = 1;
2571
2572 if (pt->pt_msglen > sc->sc_framesize ||
2573 pt->pt_msglen < sizeof(struct i2o_msg) ||
2574 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2575 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2576 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2577 return (EINVAL);
2578
2579 for (i = 0; i < pt->pt_nbufs; i++)
2580 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2581 rv = ENOMEM;
2582 goto bad;
2583 }
2584
2585 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2586 if (mf == NULL)
2587 return (ENOMEM);
2588
2589 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2590 goto bad;
2591
2592 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2593 im->im_rb = (struct i2o_reply *)mf;
2594 mf->msgictx = IOP_ICTX;
2595 mf->msgtctx = im->im_tctx;
2596
2597 for (i = 0; i < pt->pt_nbufs; i++) {
2598 ptb = &pt->pt_bufs[i];
2599 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2600 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2601 if (rv != 0)
2602 goto bad;
2603 mapped = 1;
2604 }
2605
2606 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2607 goto bad;
2608
2609 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2610 if (i > sc->sc_framesize)
2611 i = sc->sc_framesize;
2612 if (i > pt->pt_replylen)
2613 i = pt->pt_replylen;
2614 rv = copyout(im->im_rb, pt->pt_reply, i);
2615
2616 bad:
2617 if (mapped != 0)
2618 iop_msg_unmap(sc, im);
2619 if (im != NULL)
2620 iop_msg_free(sc, im);
2621 if (mf != NULL)
2622 free(mf, M_DEVBUF);
2623 return (rv);
2624 }
2625