iop.c revision 1.55 1 /* $NetBSD: iop.c,v 1.55 2006/04/14 20:16:02 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.55 2006/04/14 20:16:02 christos Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #include "locators.h"
71
72 #define POLL(ms, cond) \
73 do { \
74 int xi; \
75 for (xi = (ms) * 10; xi; xi--) { \
76 if (cond) \
77 break; \
78 DELAY(100); \
79 } \
80 } while (/* CONSTCOND */0);
81
82 #ifdef I2ODEBUG
83 #define DPRINTF(x) printf x
84 #else
85 #define DPRINTF(x)
86 #endif
87
88 #ifdef I2OVERBOSE
89 #define IFVERBOSE(x) x
90 #define COMMENT(x) NULL
91 #else
92 #define IFVERBOSE(x)
93 #define COMMENT(x)
94 #endif
95
96 #define IOP_ICTXHASH_NBUCKETS 16
97 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
98
99 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
100
101 #define IOP_TCTX_SHIFT 12
102 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
103
104 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
105 static u_long iop_ictxhash;
106 static void *iop_sdh;
107 static struct i2o_systab *iop_systab;
108 static int iop_systab_size;
109
110 extern struct cfdriver iop_cd;
111
112 dev_type_open(iopopen);
113 dev_type_close(iopclose);
114 dev_type_ioctl(iopioctl);
115
116 const struct cdevsw iop_cdevsw = {
117 iopopen, iopclose, noread, nowrite, iopioctl,
118 nostop, notty, nopoll, nommap, nokqfilter,
119 };
120
121 #define IC_CONFIGURE 0x01
122 #define IC_PRIORITY 0x02
123
124 struct iop_class {
125 u_short ic_class;
126 u_short ic_flags;
127 #ifdef I2OVERBOSE
128 const char *ic_caption;
129 #endif
130 } static const iop_class[] = {
131 {
132 I2O_CLASS_EXECUTIVE,
133 0,
134 IFVERBOSE("executive")
135 },
136 {
137 I2O_CLASS_DDM,
138 0,
139 COMMENT("device driver module")
140 },
141 {
142 I2O_CLASS_RANDOM_BLOCK_STORAGE,
143 IC_CONFIGURE | IC_PRIORITY,
144 IFVERBOSE("random block storage")
145 },
146 {
147 I2O_CLASS_SEQUENTIAL_STORAGE,
148 IC_CONFIGURE | IC_PRIORITY,
149 IFVERBOSE("sequential storage")
150 },
151 {
152 I2O_CLASS_LAN,
153 IC_CONFIGURE | IC_PRIORITY,
154 IFVERBOSE("LAN port")
155 },
156 {
157 I2O_CLASS_WAN,
158 IC_CONFIGURE | IC_PRIORITY,
159 IFVERBOSE("WAN port")
160 },
161 {
162 I2O_CLASS_FIBRE_CHANNEL_PORT,
163 IC_CONFIGURE,
164 IFVERBOSE("fibrechannel port")
165 },
166 {
167 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
168 0,
169 COMMENT("fibrechannel peripheral")
170 },
171 {
172 I2O_CLASS_SCSI_PERIPHERAL,
173 0,
174 COMMENT("SCSI peripheral")
175 },
176 {
177 I2O_CLASS_ATE_PORT,
178 IC_CONFIGURE,
179 IFVERBOSE("ATE port")
180 },
181 {
182 I2O_CLASS_ATE_PERIPHERAL,
183 0,
184 COMMENT("ATE peripheral")
185 },
186 {
187 I2O_CLASS_FLOPPY_CONTROLLER,
188 IC_CONFIGURE,
189 IFVERBOSE("floppy controller")
190 },
191 {
192 I2O_CLASS_FLOPPY_DEVICE,
193 0,
194 COMMENT("floppy device")
195 },
196 {
197 I2O_CLASS_BUS_ADAPTER_PORT,
198 IC_CONFIGURE,
199 IFVERBOSE("bus adapter port" )
200 },
201 };
202
203 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
204 static const char * const iop_status[] = {
205 "success",
206 "abort (dirty)",
207 "abort (no data transfer)",
208 "abort (partial transfer)",
209 "error (dirty)",
210 "error (no data transfer)",
211 "error (partial transfer)",
212 "undefined error code",
213 "process abort (dirty)",
214 "process abort (no data transfer)",
215 "process abort (partial transfer)",
216 "transaction error",
217 };
218 #endif
219
220 static inline u_int32_t iop_inl(struct iop_softc *, int);
221 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
222
223 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
224 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
225
226 static void iop_config_interrupts(struct device *);
227 static void iop_configure_devices(struct iop_softc *, int, int);
228 static void iop_devinfo(int, char *, size_t);
229 static int iop_print(void *, const char *);
230 static void iop_shutdown(void *);
231
232 static void iop_adjqparam(struct iop_softc *, int);
233 static void iop_create_reconf_thread(void *);
234 static int iop_handle_reply(struct iop_softc *, u_int32_t);
235 static int iop_hrt_get(struct iop_softc *);
236 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
237 static void iop_intr_event(struct device *, struct iop_msg *, void *);
238 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
239 u_int32_t);
240 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
241 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
242 static int iop_ofifo_init(struct iop_softc *);
243 static int iop_passthrough(struct iop_softc *, struct ioppt *,
244 struct proc *);
245 static void iop_reconf_thread(void *);
246 static void iop_release_mfa(struct iop_softc *, u_int32_t);
247 static int iop_reset(struct iop_softc *);
248 static int iop_sys_enable(struct iop_softc *);
249 static int iop_systab_set(struct iop_softc *);
250 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
251
252 #ifdef I2ODEBUG
253 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
254 #endif
255
256 static inline u_int32_t
257 iop_inl(struct iop_softc *sc, int off)
258 {
259
260 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
261 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
262 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
263 }
264
265 static inline void
266 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
267 {
268
269 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
270 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
271 BUS_SPACE_BARRIER_WRITE);
272 }
273
274 static inline u_int32_t
275 iop_inl_msg(struct iop_softc *sc, int off)
276 {
277
278 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
279 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
280 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
281 }
282
283 static inline void
284 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
285 {
286
287 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
288 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
289 BUS_SPACE_BARRIER_WRITE);
290 }
291
292 /*
293 * Initialise the IOP and our interface.
294 */
295 void
296 iop_init(struct iop_softc *sc, const char *intrstr)
297 {
298 struct iop_msg *im;
299 int rv, i, j, state, nsegs;
300 u_int32_t mask;
301 char ident[64];
302
303 state = 0;
304
305 printf("I2O adapter");
306
307 if (iop_ictxhashtbl == NULL)
308 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
309 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
310
311 /* Disable interrupts at the IOP. */
312 mask = iop_inl(sc, IOP_REG_INTR_MASK);
313 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
314
315 /* Allocate a scratch DMA map for small miscellaneous shared data. */
316 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
317 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
318 printf("%s: cannot create scratch dmamap\n",
319 sc->sc_dv.dv_xname);
320 return;
321 }
322
323 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
324 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
325 printf("%s: cannot alloc scratch dmamem\n",
326 sc->sc_dv.dv_xname);
327 goto bail_out;
328 }
329 state++;
330
331 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
332 &sc->sc_scr, 0)) {
333 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
334 goto bail_out;
335 }
336 state++;
337
338 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
339 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
340 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
341 goto bail_out;
342 }
343 state++;
344
345 #ifdef I2ODEBUG
346 /* So that our debug checks don't choke. */
347 sc->sc_framesize = 128;
348 #endif
349
350 /* Reset the adapter and request status. */
351 if ((rv = iop_reset(sc)) != 0) {
352 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
353 goto bail_out;
354 }
355
356 if ((rv = iop_status_get(sc, 1)) != 0) {
357 printf("%s: not responding (get status)\n",
358 sc->sc_dv.dv_xname);
359 goto bail_out;
360 }
361
362 sc->sc_flags |= IOP_HAVESTATUS;
363 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
364 ident, sizeof(ident));
365 printf(" <%s>\n", ident);
366
367 #ifdef I2ODEBUG
368 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
369 le16toh(sc->sc_status.orgid),
370 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
371 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
372 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
373 le32toh(sc->sc_status.desiredprivmemsize),
374 le32toh(sc->sc_status.currentprivmemsize),
375 le32toh(sc->sc_status.currentprivmembase));
376 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
377 le32toh(sc->sc_status.desiredpriviosize),
378 le32toh(sc->sc_status.currentpriviosize),
379 le32toh(sc->sc_status.currentpriviobase));
380 #endif
381
382 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
383 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
384 sc->sc_maxob = IOP_MAX_OUTBOUND;
385 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
386 if (sc->sc_maxib > IOP_MAX_INBOUND)
387 sc->sc_maxib = IOP_MAX_INBOUND;
388 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
389 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
390 sc->sc_framesize = IOP_MAX_MSG_SIZE;
391
392 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
393 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
394 printf("%s: frame size too small (%d)\n",
395 sc->sc_dv.dv_xname, sc->sc_framesize);
396 goto bail_out;
397 }
398 #endif
399
400 /* Allocate message wrappers. */
401 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
402 if (im == NULL) {
403 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
404 goto bail_out;
405 }
406 state++;
407 sc->sc_ims = im;
408 SLIST_INIT(&sc->sc_im_freelist);
409
410 for (i = 0; i < sc->sc_maxib; i++, im++) {
411 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
412 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
413 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
414 &im->im_xfer[0].ix_map);
415 if (rv != 0) {
416 printf("%s: couldn't create dmamap (%d)",
417 sc->sc_dv.dv_xname, rv);
418 goto bail_out3;
419 }
420
421 im->im_tctx = i;
422 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
423 }
424
425 /* Initialise the IOP's outbound FIFO. */
426 if (iop_ofifo_init(sc) != 0) {
427 printf("%s: unable to init oubound FIFO\n",
428 sc->sc_dv.dv_xname);
429 goto bail_out3;
430 }
431
432 /*
433 * Defer further configuration until (a) interrupts are working and
434 * (b) we have enough information to build the system table.
435 */
436 config_interrupts((struct device *)sc, iop_config_interrupts);
437
438 /* Configure shutdown hook before we start any device activity. */
439 if (iop_sdh == NULL)
440 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
441
442 /* Ensure interrupts are enabled at the IOP. */
443 mask = iop_inl(sc, IOP_REG_INTR_MASK);
444 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
445
446 if (intrstr != NULL)
447 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
448 intrstr);
449
450 #ifdef I2ODEBUG
451 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
452 sc->sc_dv.dv_xname, sc->sc_maxib,
453 le32toh(sc->sc_status.maxinboundmframes),
454 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
455 #endif
456
457 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
458 return;
459
460 bail_out3:
461 if (state > 3) {
462 for (j = 0; j < i; j++)
463 bus_dmamap_destroy(sc->sc_dmat,
464 sc->sc_ims[j].im_xfer[0].ix_map);
465 free(sc->sc_ims, M_DEVBUF);
466 }
467 bail_out:
468 if (state > 2)
469 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
470 if (state > 1)
471 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
472 if (state > 0)
473 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
474 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
475 }
476
477 /*
478 * Perform autoconfiguration tasks.
479 */
480 static void
481 iop_config_interrupts(struct device *self)
482 {
483 struct iop_attach_args ia;
484 struct iop_softc *sc, *iop;
485 struct i2o_systab_entry *ste;
486 int rv, i, niop;
487 int locs[IOPCF_NLOCS];
488
489 sc = device_private(self);
490 LIST_INIT(&sc->sc_iilist);
491
492 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
493
494 if (iop_hrt_get(sc) != 0) {
495 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
496 return;
497 }
498
499 /*
500 * Build the system table.
501 */
502 if (iop_systab == NULL) {
503 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
504 if ((iop = device_lookup(&iop_cd, i)) == NULL)
505 continue;
506 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
507 continue;
508 if (iop_status_get(iop, 1) != 0) {
509 printf("%s: unable to retrieve status\n",
510 sc->sc_dv.dv_xname);
511 iop->sc_flags &= ~IOP_HAVESTATUS;
512 continue;
513 }
514 niop++;
515 }
516 if (niop == 0)
517 return;
518
519 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
520 sizeof(struct i2o_systab);
521 iop_systab_size = i;
522 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
523
524 iop_systab->numentries = niop;
525 iop_systab->version = I2O_VERSION_11;
526
527 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
528 if ((iop = device_lookup(&iop_cd, i)) == NULL)
529 continue;
530 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
531 continue;
532
533 ste->orgid = iop->sc_status.orgid;
534 ste->iopid = device_unit(&iop->sc_dv) + 2;
535 ste->segnumber =
536 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
537 ste->iopcaps = iop->sc_status.iopcaps;
538 ste->inboundmsgframesize =
539 iop->sc_status.inboundmframesize;
540 ste->inboundmsgportaddresslow =
541 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
542 ste++;
543 }
544 }
545
546 /*
547 * Post the system table to the IOP and bring it to the OPERATIONAL
548 * state.
549 */
550 if (iop_systab_set(sc) != 0) {
551 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
552 return;
553 }
554 if (iop_sys_enable(sc) != 0) {
555 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
556 return;
557 }
558
559 /*
560 * Set up an event handler for this IOP.
561 */
562 sc->sc_eventii.ii_dv = self;
563 sc->sc_eventii.ii_intr = iop_intr_event;
564 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
565 sc->sc_eventii.ii_tid = I2O_TID_IOP;
566 iop_initiator_register(sc, &sc->sc_eventii);
567
568 rv = iop_util_eventreg(sc, &sc->sc_eventii,
569 I2O_EVENT_EXEC_RESOURCE_LIMITS |
570 I2O_EVENT_EXEC_CONNECTION_FAIL |
571 I2O_EVENT_EXEC_ADAPTER_FAULT |
572 I2O_EVENT_EXEC_POWER_FAIL |
573 I2O_EVENT_EXEC_RESET_PENDING |
574 I2O_EVENT_EXEC_RESET_IMMINENT |
575 I2O_EVENT_EXEC_HARDWARE_FAIL |
576 I2O_EVENT_EXEC_XCT_CHANGE |
577 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
578 I2O_EVENT_GEN_DEVICE_RESET |
579 I2O_EVENT_GEN_STATE_CHANGE |
580 I2O_EVENT_GEN_GENERAL_WARNING);
581 if (rv != 0) {
582 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
583 return;
584 }
585
586 /*
587 * Attempt to match and attach a product-specific extension.
588 */
589 ia.ia_class = I2O_CLASS_ANY;
590 ia.ia_tid = I2O_TID_IOP;
591 locs[IOPCF_TID] = I2O_TID_IOP;
592 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
593 config_stdsubmatch);
594
595 /*
596 * Start device configuration.
597 */
598 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
599 if ((rv = iop_reconfigure(sc, 0)) == -1) {
600 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
601 return;
602 }
603 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
604
605 kthread_create(iop_create_reconf_thread, sc);
606 }
607
608 /*
609 * Create the reconfiguration thread. Called after the standard kernel
610 * threads have been created.
611 */
612 static void
613 iop_create_reconf_thread(void *cookie)
614 {
615 struct iop_softc *sc;
616 int rv;
617
618 sc = cookie;
619 sc->sc_flags |= IOP_ONLINE;
620
621 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
622 "%s", sc->sc_dv.dv_xname);
623 if (rv != 0) {
624 printf("%s: unable to create reconfiguration thread (%d)",
625 sc->sc_dv.dv_xname, rv);
626 return;
627 }
628 }
629
630 /*
631 * Reconfiguration thread; listens for LCT change notification, and
632 * initiates re-configuration if received.
633 */
634 static void
635 iop_reconf_thread(void *cookie)
636 {
637 struct iop_softc *sc;
638 struct lwp *l;
639 struct i2o_lct lct;
640 u_int32_t chgind;
641 int rv;
642
643 sc = cookie;
644 chgind = sc->sc_chgind + 1;
645 l = curlwp;
646
647 for (;;) {
648 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
649 sc->sc_dv.dv_xname, chgind));
650
651 PHOLD(l);
652 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
653 PRELE(l);
654
655 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
656 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
657
658 if (rv == 0 &&
659 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
660 iop_reconfigure(sc, le32toh(lct.changeindicator));
661 chgind = sc->sc_chgind + 1;
662 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
663 }
664
665 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
666 }
667 }
668
669 /*
670 * Reconfigure: find new and removed devices.
671 */
672 int
673 iop_reconfigure(struct iop_softc *sc, u_int chgind)
674 {
675 struct iop_msg *im;
676 struct i2o_hba_bus_scan mf;
677 struct i2o_lct_entry *le;
678 struct iop_initiator *ii, *nextii;
679 int rv, tid, i;
680
681 /*
682 * If the reconfiguration request isn't the result of LCT change
683 * notification, then be more thorough: ask all bus ports to scan
684 * their busses. Wait up to 5 minutes for each bus port to complete
685 * the request.
686 */
687 if (chgind == 0) {
688 if ((rv = iop_lct_get(sc)) != 0) {
689 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
690 return (rv);
691 }
692
693 le = sc->sc_lct->entry;
694 for (i = 0; i < sc->sc_nlctent; i++, le++) {
695 if ((le16toh(le->classid) & 4095) !=
696 I2O_CLASS_BUS_ADAPTER_PORT)
697 continue;
698 tid = le16toh(le->localtid) & 4095;
699
700 im = iop_msg_alloc(sc, IM_WAIT);
701
702 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
703 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
704 mf.msgictx = IOP_ICTX;
705 mf.msgtctx = im->im_tctx;
706
707 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
708 tid));
709
710 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
711 iop_msg_free(sc, im);
712 #ifdef I2ODEBUG
713 if (rv != 0)
714 printf("%s: bus scan failed\n",
715 sc->sc_dv.dv_xname);
716 #endif
717 }
718 } else if (chgind <= sc->sc_chgind) {
719 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
720 return (0);
721 }
722
723 /* Re-read the LCT and determine if it has changed. */
724 if ((rv = iop_lct_get(sc)) != 0) {
725 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
726 return (rv);
727 }
728 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
729
730 chgind = le32toh(sc->sc_lct->changeindicator);
731 if (chgind == sc->sc_chgind) {
732 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
733 return (0);
734 }
735 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
736 sc->sc_chgind = chgind;
737
738 if (sc->sc_tidmap != NULL)
739 free(sc->sc_tidmap, M_DEVBUF);
740 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
741 M_DEVBUF, M_NOWAIT|M_ZERO);
742
743 /* Allow 1 queued command per device while we're configuring. */
744 iop_adjqparam(sc, 1);
745
746 /*
747 * Match and attach child devices. We configure high-level devices
748 * first so that any claims will propagate throughout the LCT,
749 * hopefully masking off aliased devices as a result.
750 *
751 * Re-reading the LCT at this point is a little dangerous, but we'll
752 * trust the IOP (and the operator) to behave itself...
753 */
754 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
755 IC_CONFIGURE | IC_PRIORITY);
756 if ((rv = iop_lct_get(sc)) != 0)
757 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
758 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
759 IC_CONFIGURE);
760
761 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
762 nextii = LIST_NEXT(ii, ii_list);
763
764 /* Detach devices that were configured, but are now gone. */
765 for (i = 0; i < sc->sc_nlctent; i++)
766 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
767 break;
768 if (i == sc->sc_nlctent ||
769 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
770 config_detach(ii->ii_dv, DETACH_FORCE);
771 continue;
772 }
773
774 /*
775 * Tell initiators that existed before the re-configuration
776 * to re-configure.
777 */
778 if (ii->ii_reconfig == NULL)
779 continue;
780 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
781 printf("%s: %s failed reconfigure (%d)\n",
782 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
783 }
784
785 /* Re-adjust queue parameters and return. */
786 if (sc->sc_nii != 0)
787 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
788 / sc->sc_nii);
789
790 return (0);
791 }
792
793 /*
794 * Configure I2O devices into the system.
795 */
796 static void
797 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
798 {
799 struct iop_attach_args ia;
800 struct iop_initiator *ii;
801 const struct i2o_lct_entry *le;
802 struct device *dv;
803 int i, j, nent;
804 u_int usertid;
805 int locs[IOPCF_NLOCS];
806
807 nent = sc->sc_nlctent;
808 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
809 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
810
811 /* Ignore the device if it's in use. */
812 usertid = le32toh(le->usertid) & 4095;
813 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
814 continue;
815
816 ia.ia_class = le16toh(le->classid) & 4095;
817 ia.ia_tid = sc->sc_tidmap[i].it_tid;
818
819 /* Ignore uninteresting devices. */
820 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
821 if (iop_class[j].ic_class == ia.ia_class)
822 break;
823 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
824 (iop_class[j].ic_flags & mask) != maskval)
825 continue;
826
827 /*
828 * Try to configure the device only if it's not already
829 * configured.
830 */
831 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
832 if (ia.ia_tid == ii->ii_tid) {
833 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
834 strcpy(sc->sc_tidmap[i].it_dvname,
835 ii->ii_dv->dv_xname);
836 break;
837 }
838 }
839 if (ii != NULL)
840 continue;
841
842 locs[IOPCF_TID] = ia.ia_tid;
843
844 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
845 iop_print, config_stdsubmatch);
846 if (dv != NULL) {
847 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
848 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
849 }
850 }
851 }
852
853 /*
854 * Adjust queue parameters for all child devices.
855 */
856 static void
857 iop_adjqparam(struct iop_softc *sc, int mpi)
858 {
859 struct iop_initiator *ii;
860
861 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
862 if (ii->ii_adjqparam != NULL)
863 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
864 }
865
866 static void
867 iop_devinfo(int class, char *devinfo, size_t l)
868 {
869 #ifdef I2OVERBOSE
870 int i;
871
872 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
873 if (class == iop_class[i].ic_class)
874 break;
875
876 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
877 snprintf(devinfo, l, "device (class 0x%x)", class);
878 else
879 strlcpy(devinfo, iop_class[i].ic_caption, l);
880 #else
881
882 snprintf(devinfo, l, "device (class 0x%x)", class);
883 #endif
884 }
885
886 static int
887 iop_print(void *aux, const char *pnp)
888 {
889 struct iop_attach_args *ia;
890 char devinfo[256];
891
892 ia = aux;
893
894 if (pnp != NULL) {
895 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
896 aprint_normal("%s at %s", devinfo, pnp);
897 }
898 aprint_normal(" tid %d", ia->ia_tid);
899 return (UNCONF);
900 }
901
902 /*
903 * Shut down all configured IOPs.
904 */
905 static void
906 iop_shutdown(void *junk)
907 {
908 struct iop_softc *sc;
909 int i;
910
911 printf("shutting down iop devices...");
912
913 for (i = 0; i < iop_cd.cd_ndevs; i++) {
914 if ((sc = device_lookup(&iop_cd, i)) == NULL)
915 continue;
916 if ((sc->sc_flags & IOP_ONLINE) == 0)
917 continue;
918
919 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
920 0, 5000);
921
922 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
923 /*
924 * Some AMI firmware revisions will go to sleep and
925 * never come back after this.
926 */
927 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
928 IOP_ICTX, 0, 1000);
929 }
930 }
931
932 /* Wait. Some boards could still be flushing, stupidly enough. */
933 delay(5000*1000);
934 printf(" done\n");
935 }
936
937 /*
938 * Retrieve IOP status.
939 */
940 int
941 iop_status_get(struct iop_softc *sc, int nosleep)
942 {
943 struct i2o_exec_status_get mf;
944 struct i2o_status *st;
945 paddr_t pa;
946 int rv, i;
947
948 pa = sc->sc_scr_seg->ds_addr;
949 st = (struct i2o_status *)sc->sc_scr;
950
951 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
952 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
953 mf.reserved[0] = 0;
954 mf.reserved[1] = 0;
955 mf.reserved[2] = 0;
956 mf.reserved[3] = 0;
957 mf.addrlow = (u_int32_t)pa;
958 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
959 mf.length = sizeof(sc->sc_status);
960
961 memset(st, 0, sizeof(*st));
962 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
963 BUS_DMASYNC_PREREAD);
964
965 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
966 return (rv);
967
968 for (i = 25; i != 0; i--) {
969 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
970 sizeof(*st), BUS_DMASYNC_POSTREAD);
971 if (st->syncbyte == 0xff)
972 break;
973 if (nosleep)
974 DELAY(100*1000);
975 else
976 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
977 }
978
979 if (st->syncbyte != 0xff) {
980 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
981 rv = EIO;
982 } else {
983 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
984 rv = 0;
985 }
986
987 return (rv);
988 }
989
990 /*
991 * Initialize and populate the IOP's outbound FIFO.
992 */
993 static int
994 iop_ofifo_init(struct iop_softc *sc)
995 {
996 bus_addr_t addr;
997 bus_dma_segment_t seg;
998 struct i2o_exec_outbound_init *mf;
999 int i, rseg, rv;
1000 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1001
1002 sw = (u_int32_t *)sc->sc_scr;
1003
1004 mf = (struct i2o_exec_outbound_init *)mb;
1005 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1006 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1007 mf->msgictx = IOP_ICTX;
1008 mf->msgtctx = 0;
1009 mf->pagesize = PAGE_SIZE;
1010 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1011
1012 /*
1013 * The I2O spec says that there are two SGLs: one for the status
1014 * word, and one for a list of discarded MFAs. It continues to say
1015 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1016 * necessary; this isn't the case (and is in fact a bad thing).
1017 */
1018 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1019 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1020 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1021 (u_int32_t)sc->sc_scr_seg->ds_addr;
1022 mb[0] += 2 << 16;
1023
1024 *sw = 0;
1025 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1026 BUS_DMASYNC_PREREAD);
1027
1028 if ((rv = iop_post(sc, mb)) != 0)
1029 return (rv);
1030
1031 POLL(5000,
1032 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1033 BUS_DMASYNC_POSTREAD),
1034 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1035
1036 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1037 printf("%s: outbound FIFO init failed (%d)\n",
1038 sc->sc_dv.dv_xname, le32toh(*sw));
1039 return (EIO);
1040 }
1041
1042 /* Allocate DMA safe memory for the reply frames. */
1043 if (sc->sc_rep_phys == 0) {
1044 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1045
1046 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1047 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1048 if (rv != 0) {
1049 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1050 rv);
1051 return (rv);
1052 }
1053
1054 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1055 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1056 if (rv != 0) {
1057 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1058 return (rv);
1059 }
1060
1061 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1062 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1063 if (rv != 0) {
1064 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1065 rv);
1066 return (rv);
1067 }
1068
1069 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1070 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1071 if (rv != 0) {
1072 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1073 return (rv);
1074 }
1075
1076 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1077 }
1078
1079 /* Populate the outbound FIFO. */
1080 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1081 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1082 addr += sc->sc_framesize;
1083 }
1084
1085 return (0);
1086 }
1087
1088 /*
1089 * Read the specified number of bytes from the IOP's hardware resource table.
1090 */
1091 static int
1092 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1093 {
1094 struct iop_msg *im;
1095 int rv;
1096 struct i2o_exec_hrt_get *mf;
1097 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1098
1099 im = iop_msg_alloc(sc, IM_WAIT);
1100 mf = (struct i2o_exec_hrt_get *)mb;
1101 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1102 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1103 mf->msgictx = IOP_ICTX;
1104 mf->msgtctx = im->im_tctx;
1105
1106 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1107 rv = iop_msg_post(sc, im, mb, 30000);
1108 iop_msg_unmap(sc, im);
1109 iop_msg_free(sc, im);
1110 return (rv);
1111 }
1112
1113 /*
1114 * Read the IOP's hardware resource table.
1115 */
1116 static int
1117 iop_hrt_get(struct iop_softc *sc)
1118 {
1119 struct i2o_hrt hrthdr, *hrt;
1120 int size, rv;
1121
1122 PHOLD(curlwp);
1123 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1124 PRELE(curlwp);
1125 if (rv != 0)
1126 return (rv);
1127
1128 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1129 le16toh(hrthdr.numentries)));
1130
1131 size = sizeof(struct i2o_hrt) +
1132 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1133 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1134
1135 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1136 free(hrt, M_DEVBUF);
1137 return (rv);
1138 }
1139
1140 if (sc->sc_hrt != NULL)
1141 free(sc->sc_hrt, M_DEVBUF);
1142 sc->sc_hrt = hrt;
1143 return (0);
1144 }
1145
1146 /*
1147 * Request the specified number of bytes from the IOP's logical
1148 * configuration table. If a change indicator is specified, this
1149 * is a verbatim notification request, so the caller is prepared
1150 * to wait indefinitely.
1151 */
1152 static int
1153 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1154 u_int32_t chgind)
1155 {
1156 struct iop_msg *im;
1157 struct i2o_exec_lct_notify *mf;
1158 int rv;
1159 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1160
1161 im = iop_msg_alloc(sc, IM_WAIT);
1162 memset(lct, 0, size);
1163
1164 mf = (struct i2o_exec_lct_notify *)mb;
1165 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1166 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1167 mf->msgictx = IOP_ICTX;
1168 mf->msgtctx = im->im_tctx;
1169 mf->classid = I2O_CLASS_ANY;
1170 mf->changeindicator = chgind;
1171
1172 #ifdef I2ODEBUG
1173 printf("iop_lct_get0: reading LCT");
1174 if (chgind != 0)
1175 printf(" (async)");
1176 printf("\n");
1177 #endif
1178
1179 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1180 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1181 iop_msg_unmap(sc, im);
1182 iop_msg_free(sc, im);
1183 return (rv);
1184 }
1185
1186 /*
1187 * Read the IOP's logical configuration table.
1188 */
1189 int
1190 iop_lct_get(struct iop_softc *sc)
1191 {
1192 int esize, size, rv;
1193 struct i2o_lct *lct;
1194
1195 esize = le32toh(sc->sc_status.expectedlctsize);
1196 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1197 if (lct == NULL)
1198 return (ENOMEM);
1199
1200 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1201 free(lct, M_DEVBUF);
1202 return (rv);
1203 }
1204
1205 size = le16toh(lct->tablesize) << 2;
1206 if (esize != size) {
1207 free(lct, M_DEVBUF);
1208 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1209 if (lct == NULL)
1210 return (ENOMEM);
1211
1212 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1213 free(lct, M_DEVBUF);
1214 return (rv);
1215 }
1216 }
1217
1218 /* Swap in the new LCT. */
1219 if (sc->sc_lct != NULL)
1220 free(sc->sc_lct, M_DEVBUF);
1221 sc->sc_lct = lct;
1222 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1223 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1224 sizeof(struct i2o_lct_entry);
1225 return (0);
1226 }
1227
1228 /*
1229 * Post a SYS_ENABLE message to the adapter.
1230 */
1231 int
1232 iop_sys_enable(struct iop_softc *sc)
1233 {
1234 struct iop_msg *im;
1235 struct i2o_msg mf;
1236 int rv;
1237
1238 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1239
1240 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1241 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1242 mf.msgictx = IOP_ICTX;
1243 mf.msgtctx = im->im_tctx;
1244
1245 rv = iop_msg_post(sc, im, &mf, 30000);
1246 if (rv == 0) {
1247 if ((im->im_flags & IM_FAIL) != 0)
1248 rv = ENXIO;
1249 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1250 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1251 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1252 rv = 0;
1253 else
1254 rv = EIO;
1255 }
1256
1257 iop_msg_free(sc, im);
1258 return (rv);
1259 }
1260
1261 /*
1262 * Request the specified parameter group from the target. If an initiator
1263 * is specified (a) don't wait for the operation to complete, but instead
1264 * let the initiator's interrupt handler deal with the reply and (b) place a
1265 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1266 */
1267 int
1268 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1269 int size, struct iop_initiator *ii)
1270 {
1271 struct iop_msg *im;
1272 struct i2o_util_params_op *mf;
1273 int rv;
1274 struct iop_pgop *pgop;
1275 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1276
1277 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1278 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1279 iop_msg_free(sc, im);
1280 return (ENOMEM);
1281 }
1282 im->im_dvcontext = pgop;
1283
1284 mf = (struct i2o_util_params_op *)mb;
1285 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1286 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1287 mf->msgictx = IOP_ICTX;
1288 mf->msgtctx = im->im_tctx;
1289 mf->flags = 0;
1290
1291 pgop->olh.count = htole16(1);
1292 pgop->olh.reserved = htole16(0);
1293 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1294 pgop->oat.fieldcount = htole16(0xffff);
1295 pgop->oat.group = htole16(group);
1296
1297 if (ii == NULL)
1298 PHOLD(curlwp);
1299
1300 memset(buf, 0, size);
1301 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1302 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1303 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1304
1305 if (ii == NULL)
1306 PRELE(curlwp);
1307
1308 /* Detect errors; let partial transfers to count as success. */
1309 if (ii == NULL && rv == 0) {
1310 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1311 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1312 rv = 0;
1313 else
1314 rv = (im->im_reqstatus != 0 ? EIO : 0);
1315
1316 if (rv != 0)
1317 printf("%s: FIELD_GET failed for tid %d group %d\n",
1318 sc->sc_dv.dv_xname, tid, group);
1319 }
1320
1321 if (ii == NULL || rv != 0) {
1322 iop_msg_unmap(sc, im);
1323 iop_msg_free(sc, im);
1324 free(pgop, M_DEVBUF);
1325 }
1326
1327 return (rv);
1328 }
1329
1330 /*
1331 * Set a single field in a scalar parameter group.
1332 */
1333 int
1334 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1335 int size, int field)
1336 {
1337 struct iop_msg *im;
1338 struct i2o_util_params_op *mf;
1339 struct iop_pgop *pgop;
1340 int rv, totsize;
1341 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1342
1343 totsize = sizeof(*pgop) + size;
1344
1345 im = iop_msg_alloc(sc, IM_WAIT);
1346 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1347 iop_msg_free(sc, im);
1348 return (ENOMEM);
1349 }
1350
1351 mf = (struct i2o_util_params_op *)mb;
1352 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1353 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1354 mf->msgictx = IOP_ICTX;
1355 mf->msgtctx = im->im_tctx;
1356 mf->flags = 0;
1357
1358 pgop->olh.count = htole16(1);
1359 pgop->olh.reserved = htole16(0);
1360 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1361 pgop->oat.fieldcount = htole16(1);
1362 pgop->oat.group = htole16(group);
1363 pgop->oat.fields[0] = htole16(field);
1364 memcpy(pgop + 1, buf, size);
1365
1366 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1367 rv = iop_msg_post(sc, im, mb, 30000);
1368 if (rv != 0)
1369 printf("%s: FIELD_SET failed for tid %d group %d\n",
1370 sc->sc_dv.dv_xname, tid, group);
1371
1372 iop_msg_unmap(sc, im);
1373 iop_msg_free(sc, im);
1374 free(pgop, M_DEVBUF);
1375 return (rv);
1376 }
1377
1378 /*
1379 * Delete all rows in a tablular parameter group.
1380 */
1381 int
1382 iop_table_clear(struct iop_softc *sc, int tid, int group)
1383 {
1384 struct iop_msg *im;
1385 struct i2o_util_params_op *mf;
1386 struct iop_pgop pgop;
1387 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1388 int rv;
1389
1390 im = iop_msg_alloc(sc, IM_WAIT);
1391
1392 mf = (struct i2o_util_params_op *)mb;
1393 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1394 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1395 mf->msgictx = IOP_ICTX;
1396 mf->msgtctx = im->im_tctx;
1397 mf->flags = 0;
1398
1399 pgop.olh.count = htole16(1);
1400 pgop.olh.reserved = htole16(0);
1401 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1402 pgop.oat.fieldcount = htole16(0);
1403 pgop.oat.group = htole16(group);
1404 pgop.oat.fields[0] = htole16(0);
1405
1406 PHOLD(curlwp);
1407 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1408 rv = iop_msg_post(sc, im, mb, 30000);
1409 if (rv != 0)
1410 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1411 sc->sc_dv.dv_xname, tid, group);
1412
1413 iop_msg_unmap(sc, im);
1414 PRELE(curlwp);
1415 iop_msg_free(sc, im);
1416 return (rv);
1417 }
1418
1419 /*
1420 * Add a single row to a tabular parameter group. The row can have only one
1421 * field.
1422 */
1423 int
1424 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1425 int size, int row)
1426 {
1427 struct iop_msg *im;
1428 struct i2o_util_params_op *mf;
1429 struct iop_pgop *pgop;
1430 int rv, totsize;
1431 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1432
1433 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1434
1435 im = iop_msg_alloc(sc, IM_WAIT);
1436 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1437 iop_msg_free(sc, im);
1438 return (ENOMEM);
1439 }
1440
1441 mf = (struct i2o_util_params_op *)mb;
1442 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1443 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1444 mf->msgictx = IOP_ICTX;
1445 mf->msgtctx = im->im_tctx;
1446 mf->flags = 0;
1447
1448 pgop->olh.count = htole16(1);
1449 pgop->olh.reserved = htole16(0);
1450 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1451 pgop->oat.fieldcount = htole16(1);
1452 pgop->oat.group = htole16(group);
1453 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1454 pgop->oat.fields[1] = htole16(1); /* RowCount */
1455 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1456 memcpy(&pgop->oat.fields[3], buf, size);
1457
1458 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1459 rv = iop_msg_post(sc, im, mb, 30000);
1460 if (rv != 0)
1461 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1462 sc->sc_dv.dv_xname, tid, group, row);
1463
1464 iop_msg_unmap(sc, im);
1465 iop_msg_free(sc, im);
1466 free(pgop, M_DEVBUF);
1467 return (rv);
1468 }
1469
1470 /*
1471 * Execute a simple command (no parameters).
1472 */
1473 int
1474 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1475 int async, int timo)
1476 {
1477 struct iop_msg *im;
1478 struct i2o_msg mf;
1479 int rv, fl;
1480
1481 fl = (async != 0 ? IM_WAIT : IM_POLL);
1482 im = iop_msg_alloc(sc, fl);
1483
1484 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1485 mf.msgfunc = I2O_MSGFUNC(tid, function);
1486 mf.msgictx = ictx;
1487 mf.msgtctx = im->im_tctx;
1488
1489 rv = iop_msg_post(sc, im, &mf, timo);
1490 iop_msg_free(sc, im);
1491 return (rv);
1492 }
1493
1494 /*
1495 * Post the system table to the IOP.
1496 */
1497 static int
1498 iop_systab_set(struct iop_softc *sc)
1499 {
1500 struct i2o_exec_sys_tab_set *mf;
1501 struct iop_msg *im;
1502 bus_space_handle_t bsh;
1503 bus_addr_t boo;
1504 u_int32_t mema[2], ioa[2];
1505 int rv;
1506 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1507
1508 im = iop_msg_alloc(sc, IM_WAIT);
1509
1510 mf = (struct i2o_exec_sys_tab_set *)mb;
1511 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1512 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1513 mf->msgictx = IOP_ICTX;
1514 mf->msgtctx = im->im_tctx;
1515 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1516 mf->segnumber = 0;
1517
1518 mema[1] = sc->sc_status.desiredprivmemsize;
1519 ioa[1] = sc->sc_status.desiredpriviosize;
1520
1521 if (mema[1] != 0) {
1522 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1523 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1524 mema[0] = htole32(boo);
1525 if (rv != 0) {
1526 printf("%s: can't alloc priv mem space, err = %d\n",
1527 sc->sc_dv.dv_xname, rv);
1528 mema[0] = 0;
1529 mema[1] = 0;
1530 }
1531 }
1532
1533 if (ioa[1] != 0) {
1534 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1535 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1536 ioa[0] = htole32(boo);
1537 if (rv != 0) {
1538 printf("%s: can't alloc priv i/o space, err = %d\n",
1539 sc->sc_dv.dv_xname, rv);
1540 ioa[0] = 0;
1541 ioa[1] = 0;
1542 }
1543 }
1544
1545 PHOLD(curlwp);
1546 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1547 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1548 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1549 rv = iop_msg_post(sc, im, mb, 5000);
1550 iop_msg_unmap(sc, im);
1551 iop_msg_free(sc, im);
1552 PRELE(curlwp);
1553 return (rv);
1554 }
1555
1556 /*
1557 * Reset the IOP. Must be called with interrupts disabled.
1558 */
1559 static int
1560 iop_reset(struct iop_softc *sc)
1561 {
1562 u_int32_t mfa, *sw;
1563 struct i2o_exec_iop_reset mf;
1564 int rv;
1565 paddr_t pa;
1566
1567 sw = (u_int32_t *)sc->sc_scr;
1568 pa = sc->sc_scr_seg->ds_addr;
1569
1570 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1571 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1572 mf.reserved[0] = 0;
1573 mf.reserved[1] = 0;
1574 mf.reserved[2] = 0;
1575 mf.reserved[3] = 0;
1576 mf.statuslow = (u_int32_t)pa;
1577 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1578
1579 *sw = htole32(0);
1580 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1581 BUS_DMASYNC_PREREAD);
1582
1583 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1584 return (rv);
1585
1586 POLL(2500,
1587 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1588 BUS_DMASYNC_POSTREAD), *sw != 0));
1589 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1590 printf("%s: reset rejected, status 0x%x\n",
1591 sc->sc_dv.dv_xname, le32toh(*sw));
1592 return (EIO);
1593 }
1594
1595 /*
1596 * IOP is now in the INIT state. Wait no more than 10 seconds for
1597 * the inbound queue to become responsive.
1598 */
1599 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1600 if (mfa == IOP_MFA_EMPTY) {
1601 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1602 return (EIO);
1603 }
1604
1605 iop_release_mfa(sc, mfa);
1606 return (0);
1607 }
1608
1609 /*
1610 * Register a new initiator. Must be called with the configuration lock
1611 * held.
1612 */
1613 void
1614 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1615 {
1616 static int ictxgen;
1617 int s;
1618
1619 /* 0 is reserved (by us) for system messages. */
1620 ii->ii_ictx = ++ictxgen;
1621
1622 /*
1623 * `Utility initiators' don't make it onto the per-IOP initiator list
1624 * (which is used only for configuration), but do get one slot on
1625 * the inbound queue.
1626 */
1627 if ((ii->ii_flags & II_UTILITY) == 0) {
1628 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1629 sc->sc_nii++;
1630 } else
1631 sc->sc_nuii++;
1632
1633 s = splbio();
1634 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1635 splx(s);
1636 }
1637
1638 /*
1639 * Unregister an initiator. Must be called with the configuration lock
1640 * held.
1641 */
1642 void
1643 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1644 {
1645 int s;
1646
1647 if ((ii->ii_flags & II_UTILITY) == 0) {
1648 LIST_REMOVE(ii, ii_list);
1649 sc->sc_nii--;
1650 } else
1651 sc->sc_nuii--;
1652
1653 s = splbio();
1654 LIST_REMOVE(ii, ii_hash);
1655 splx(s);
1656 }
1657
1658 /*
1659 * Handle a reply frame from the IOP.
1660 */
1661 static int
1662 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1663 {
1664 struct iop_msg *im;
1665 struct i2o_reply *rb;
1666 struct i2o_fault_notify *fn;
1667 struct iop_initiator *ii;
1668 u_int off, ictx, tctx, status, size;
1669
1670 off = (int)(rmfa - sc->sc_rep_phys);
1671 rb = (struct i2o_reply *)(sc->sc_rep + off);
1672
1673 /* Perform reply queue DMA synchronisation. */
1674 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1675 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1676 if (--sc->sc_curib != 0)
1677 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1678 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1679
1680 #ifdef I2ODEBUG
1681 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1682 panic("iop_handle_reply: 64-bit reply");
1683 #endif
1684 /*
1685 * Find the initiator.
1686 */
1687 ictx = le32toh(rb->msgictx);
1688 if (ictx == IOP_ICTX)
1689 ii = NULL;
1690 else {
1691 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1692 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1693 if (ii->ii_ictx == ictx)
1694 break;
1695 if (ii == NULL) {
1696 #ifdef I2ODEBUG
1697 iop_reply_print(sc, rb);
1698 #endif
1699 printf("%s: WARNING: bad ictx returned (%x)\n",
1700 sc->sc_dv.dv_xname, ictx);
1701 return (-1);
1702 }
1703 }
1704
1705 /*
1706 * If we received a transport failure notice, we've got to dig the
1707 * transaction context (if any) out of the original message frame,
1708 * and then release the original MFA back to the inbound FIFO.
1709 */
1710 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1711 status = I2O_STATUS_SUCCESS;
1712
1713 fn = (struct i2o_fault_notify *)rb;
1714 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1715 iop_release_mfa(sc, fn->lowmfa);
1716 iop_tfn_print(sc, fn);
1717 } else {
1718 status = rb->reqstatus;
1719 tctx = le32toh(rb->msgtctx);
1720 }
1721
1722 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1723 /*
1724 * This initiator tracks state using message wrappers.
1725 *
1726 * Find the originating message wrapper, and if requested
1727 * notify the initiator.
1728 */
1729 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1730 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1731 (im->im_flags & IM_ALLOCED) == 0 ||
1732 tctx != im->im_tctx) {
1733 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1734 sc->sc_dv.dv_xname, tctx, im);
1735 if (im != NULL)
1736 printf("%s: flags=0x%08x tctx=0x%08x\n",
1737 sc->sc_dv.dv_xname, im->im_flags,
1738 im->im_tctx);
1739 #ifdef I2ODEBUG
1740 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1741 iop_reply_print(sc, rb);
1742 #endif
1743 return (-1);
1744 }
1745
1746 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1747 im->im_flags |= IM_FAIL;
1748
1749 #ifdef I2ODEBUG
1750 if ((im->im_flags & IM_REPLIED) != 0)
1751 panic("%s: dup reply", sc->sc_dv.dv_xname);
1752 #endif
1753 im->im_flags |= IM_REPLIED;
1754
1755 #ifdef I2ODEBUG
1756 if (status != I2O_STATUS_SUCCESS)
1757 iop_reply_print(sc, rb);
1758 #endif
1759 im->im_reqstatus = status;
1760 im->im_detstatus = le16toh(rb->detail);
1761
1762 /* Copy the reply frame, if requested. */
1763 if (im->im_rb != NULL) {
1764 size = (le32toh(rb->msgflags) >> 14) & ~3;
1765 #ifdef I2ODEBUG
1766 if (size > sc->sc_framesize)
1767 panic("iop_handle_reply: reply too large");
1768 #endif
1769 memcpy(im->im_rb, rb, size);
1770 }
1771
1772 /* Notify the initiator. */
1773 if ((im->im_flags & IM_WAIT) != 0)
1774 wakeup(im);
1775 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1776 if (ii)
1777 (*ii->ii_intr)(ii->ii_dv, im, rb);
1778 }
1779 } else {
1780 /*
1781 * This initiator discards message wrappers.
1782 *
1783 * Simply pass the reply frame to the initiator.
1784 */
1785 if (ii)
1786 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1787 }
1788
1789 return (status);
1790 }
1791
1792 /*
1793 * Handle an interrupt from the IOP.
1794 */
1795 int
1796 iop_intr(void *arg)
1797 {
1798 struct iop_softc *sc;
1799 u_int32_t rmfa;
1800
1801 sc = arg;
1802
1803 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1804 return (0);
1805
1806 for (;;) {
1807 /* Double read to account for IOP bug. */
1808 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1809 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1810 if (rmfa == IOP_MFA_EMPTY)
1811 break;
1812 }
1813 iop_handle_reply(sc, rmfa);
1814 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1815 }
1816
1817 return (1);
1818 }
1819
1820 /*
1821 * Handle an event signalled by the executive.
1822 */
1823 static void
1824 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1825 {
1826 struct i2o_util_event_register_reply *rb;
1827 u_int event;
1828
1829 rb = reply;
1830
1831 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1832 return;
1833
1834 event = le32toh(rb->event);
1835 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1836 }
1837
1838 /*
1839 * Allocate a message wrapper.
1840 */
1841 struct iop_msg *
1842 iop_msg_alloc(struct iop_softc *sc, int flags)
1843 {
1844 struct iop_msg *im;
1845 static u_int tctxgen;
1846 int s, i;
1847
1848 #ifdef I2ODEBUG
1849 if ((flags & IM_SYSMASK) != 0)
1850 panic("iop_msg_alloc: system flags specified");
1851 #endif
1852
1853 s = splbio();
1854 im = SLIST_FIRST(&sc->sc_im_freelist);
1855 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1856 if (im == NULL)
1857 panic("iop_msg_alloc: no free wrappers");
1858 #endif
1859 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1860 splx(s);
1861
1862 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1863 tctxgen += (1 << IOP_TCTX_SHIFT);
1864 im->im_flags = flags | IM_ALLOCED;
1865 im->im_rb = NULL;
1866 i = 0;
1867 do {
1868 im->im_xfer[i++].ix_size = 0;
1869 } while (i < IOP_MAX_MSG_XFERS);
1870
1871 return (im);
1872 }
1873
1874 /*
1875 * Free a message wrapper.
1876 */
1877 void
1878 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1879 {
1880 int s;
1881
1882 #ifdef I2ODEBUG
1883 if ((im->im_flags & IM_ALLOCED) == 0)
1884 panic("iop_msg_free: wrapper not allocated");
1885 #endif
1886
1887 im->im_flags = 0;
1888 s = splbio();
1889 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1890 splx(s);
1891 }
1892
1893 /*
1894 * Map a data transfer. Write a scatter-gather list into the message frame.
1895 */
1896 int
1897 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1898 void *xferaddr, int xfersize, int out, struct proc *up)
1899 {
1900 bus_dmamap_t dm;
1901 bus_dma_segment_t *ds;
1902 struct iop_xfer *ix;
1903 u_int rv, i, nsegs, flg, off, xn;
1904 u_int32_t *p;
1905
1906 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1907 if (ix->ix_size == 0)
1908 break;
1909
1910 #ifdef I2ODEBUG
1911 if (xfersize == 0)
1912 panic("iop_msg_map: null transfer");
1913 if (xfersize > IOP_MAX_XFER)
1914 panic("iop_msg_map: transfer too large");
1915 if (xn == IOP_MAX_MSG_XFERS)
1916 panic("iop_msg_map: too many xfers");
1917 #endif
1918
1919 /*
1920 * Only the first DMA map is static.
1921 */
1922 if (xn != 0) {
1923 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1924 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1925 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1926 if (rv != 0)
1927 return (rv);
1928 }
1929
1930 dm = ix->ix_map;
1931 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1932 (up == NULL ? BUS_DMA_NOWAIT : 0));
1933 if (rv != 0)
1934 goto bad;
1935
1936 /*
1937 * How many SIMPLE SG elements can we fit in this message?
1938 */
1939 off = mb[0] >> 16;
1940 p = mb + off;
1941 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1942
1943 if (dm->dm_nsegs > nsegs) {
1944 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1945 rv = EFBIG;
1946 DPRINTF(("iop_msg_map: too many segs\n"));
1947 goto bad;
1948 }
1949
1950 nsegs = dm->dm_nsegs;
1951 xfersize = 0;
1952
1953 /*
1954 * Write out the SG list.
1955 */
1956 if (out)
1957 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1958 else
1959 flg = I2O_SGL_SIMPLE;
1960
1961 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1962 p[0] = (u_int32_t)ds->ds_len | flg;
1963 p[1] = (u_int32_t)ds->ds_addr;
1964 xfersize += ds->ds_len;
1965 }
1966
1967 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1968 p[1] = (u_int32_t)ds->ds_addr;
1969 xfersize += ds->ds_len;
1970
1971 /* Fix up the transfer record, and sync the map. */
1972 ix->ix_flags = (out ? IX_OUT : IX_IN);
1973 ix->ix_size = xfersize;
1974 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1975 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1976
1977 /*
1978 * If this is the first xfer we've mapped for this message, adjust
1979 * the SGL offset field in the message header.
1980 */
1981 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1982 mb[0] += (mb[0] >> 12) & 0xf0;
1983 im->im_flags |= IM_SGLOFFADJ;
1984 }
1985 mb[0] += (nsegs << 17);
1986 return (0);
1987
1988 bad:
1989 if (xn != 0)
1990 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1991 return (rv);
1992 }
1993
1994 /*
1995 * Map a block I/O data transfer (different in that there's only one per
1996 * message maximum, and PAGE addressing may be used). Write a scatter
1997 * gather list into the message frame.
1998 */
1999 int
2000 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
2001 void *xferaddr, int xfersize, int out)
2002 {
2003 bus_dma_segment_t *ds;
2004 bus_dmamap_t dm;
2005 struct iop_xfer *ix;
2006 u_int rv, i, nsegs, off, slen, tlen, flg;
2007 paddr_t saddr, eaddr;
2008 u_int32_t *p;
2009
2010 #ifdef I2ODEBUG
2011 if (xfersize == 0)
2012 panic("iop_msg_map_bio: null transfer");
2013 if (xfersize > IOP_MAX_XFER)
2014 panic("iop_msg_map_bio: transfer too large");
2015 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2016 panic("iop_msg_map_bio: SGLOFFADJ");
2017 #endif
2018
2019 ix = im->im_xfer;
2020 dm = ix->ix_map;
2021 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2022 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2023 if (rv != 0)
2024 return (rv);
2025
2026 off = mb[0] >> 16;
2027 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2028
2029 /*
2030 * If the transfer is highly fragmented and won't fit using SIMPLE
2031 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2032 * potentially more efficient, both for us and the IOP.
2033 */
2034 if (dm->dm_nsegs > nsegs) {
2035 nsegs = 1;
2036 p = mb + off + 1;
2037
2038 /* XXX This should be done with a bus_space flag. */
2039 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2040 slen = ds->ds_len;
2041 saddr = ds->ds_addr;
2042
2043 while (slen > 0) {
2044 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2045 tlen = min(eaddr - saddr, slen);
2046 slen -= tlen;
2047 *p++ = le32toh(saddr);
2048 saddr = eaddr;
2049 nsegs++;
2050 }
2051 }
2052
2053 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2054 I2O_SGL_END;
2055 if (out)
2056 mb[off] |= I2O_SGL_DATA_OUT;
2057 } else {
2058 p = mb + off;
2059 nsegs = dm->dm_nsegs;
2060
2061 if (out)
2062 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2063 else
2064 flg = I2O_SGL_SIMPLE;
2065
2066 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2067 p[0] = (u_int32_t)ds->ds_len | flg;
2068 p[1] = (u_int32_t)ds->ds_addr;
2069 }
2070
2071 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2072 I2O_SGL_END;
2073 p[1] = (u_int32_t)ds->ds_addr;
2074 nsegs <<= 1;
2075 }
2076
2077 /* Fix up the transfer record, and sync the map. */
2078 ix->ix_flags = (out ? IX_OUT : IX_IN);
2079 ix->ix_size = xfersize;
2080 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2081 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2082
2083 /*
2084 * Adjust the SGL offset and total message size fields. We don't
2085 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2086 */
2087 mb[0] += ((off << 4) + (nsegs << 16));
2088 return (0);
2089 }
2090
2091 /*
2092 * Unmap all data transfers associated with a message wrapper.
2093 */
2094 void
2095 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2096 {
2097 struct iop_xfer *ix;
2098 int i;
2099
2100 #ifdef I2ODEBUG
2101 if (im->im_xfer[0].ix_size == 0)
2102 panic("iop_msg_unmap: no transfers mapped");
2103 #endif
2104
2105 for (ix = im->im_xfer, i = 0;;) {
2106 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2107 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2108 BUS_DMASYNC_POSTREAD);
2109 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2110
2111 /* Only the first DMA map is static. */
2112 if (i != 0)
2113 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2114 if ((++ix)->ix_size == 0)
2115 break;
2116 if (++i >= IOP_MAX_MSG_XFERS)
2117 break;
2118 }
2119 }
2120
2121 /*
2122 * Post a message frame to the IOP's inbound queue.
2123 */
2124 int
2125 iop_post(struct iop_softc *sc, u_int32_t *mb)
2126 {
2127 u_int32_t mfa;
2128 int s;
2129
2130 #ifdef I2ODEBUG
2131 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2132 panic("iop_post: frame too large");
2133 #endif
2134
2135 s = splbio();
2136
2137 /* Allocate a slot with the IOP. */
2138 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2139 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2140 splx(s);
2141 printf("%s: mfa not forthcoming\n",
2142 sc->sc_dv.dv_xname);
2143 return (EAGAIN);
2144 }
2145
2146 /* Perform reply buffer DMA synchronisation. */
2147 if (sc->sc_curib++ == 0)
2148 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2149 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2150
2151 /* Copy out the message frame. */
2152 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2153 mb[0] >> 16);
2154 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2155 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2156
2157 /* Post the MFA back to the IOP. */
2158 iop_outl(sc, IOP_REG_IFIFO, mfa);
2159
2160 splx(s);
2161 return (0);
2162 }
2163
2164 /*
2165 * Post a message to the IOP and deal with completion.
2166 */
2167 int
2168 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2169 {
2170 u_int32_t *mb;
2171 int rv, s;
2172
2173 mb = xmb;
2174
2175 /* Terminate the scatter/gather list chain. */
2176 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2177 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2178
2179 if ((rv = iop_post(sc, mb)) != 0)
2180 return (rv);
2181
2182 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2183 if ((im->im_flags & IM_POLL) != 0)
2184 iop_msg_poll(sc, im, timo);
2185 else
2186 iop_msg_wait(sc, im, timo);
2187
2188 s = splbio();
2189 if ((im->im_flags & IM_REPLIED) != 0) {
2190 if ((im->im_flags & IM_NOSTATUS) != 0)
2191 rv = 0;
2192 else if ((im->im_flags & IM_FAIL) != 0)
2193 rv = ENXIO;
2194 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2195 rv = EIO;
2196 else
2197 rv = 0;
2198 } else
2199 rv = EBUSY;
2200 splx(s);
2201 } else
2202 rv = 0;
2203
2204 return (rv);
2205 }
2206
2207 /*
2208 * Spin until the specified message is replied to.
2209 */
2210 static void
2211 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2212 {
2213 u_int32_t rmfa;
2214 int s;
2215
2216 s = splbio();
2217
2218 /* Wait for completion. */
2219 for (timo *= 10; timo != 0; timo--) {
2220 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2221 /* Double read to account for IOP bug. */
2222 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2223 if (rmfa == IOP_MFA_EMPTY)
2224 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2225 if (rmfa != IOP_MFA_EMPTY) {
2226 iop_handle_reply(sc, rmfa);
2227
2228 /*
2229 * Return the reply frame to the IOP's
2230 * outbound FIFO.
2231 */
2232 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2233 }
2234 }
2235 if ((im->im_flags & IM_REPLIED) != 0)
2236 break;
2237 DELAY(100);
2238 }
2239
2240 if (timo == 0) {
2241 #ifdef I2ODEBUG
2242 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2243 if (iop_status_get(sc, 1) != 0)
2244 printf("iop_msg_poll: unable to retrieve status\n");
2245 else
2246 printf("iop_msg_poll: IOP state = %d\n",
2247 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2248 #endif
2249 }
2250
2251 splx(s);
2252 }
2253
2254 /*
2255 * Sleep until the specified message is replied to.
2256 */
2257 static void
2258 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2259 {
2260 int s, rv;
2261
2262 s = splbio();
2263 if ((im->im_flags & IM_REPLIED) != 0) {
2264 splx(s);
2265 return;
2266 }
2267 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2268 splx(s);
2269
2270 #ifdef I2ODEBUG
2271 if (rv != 0) {
2272 printf("iop_msg_wait: tsleep() == %d\n", rv);
2273 if (iop_status_get(sc, 0) != 0)
2274 printf("iop_msg_wait: unable to retrieve status\n");
2275 else
2276 printf("iop_msg_wait: IOP state = %d\n",
2277 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2278 }
2279 #endif
2280 }
2281
2282 /*
2283 * Release an unused message frame back to the IOP's inbound fifo.
2284 */
2285 static void
2286 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2287 {
2288
2289 /* Use the frame to issue a no-op. */
2290 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2291 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2292 iop_outl_msg(sc, mfa + 8, 0);
2293 iop_outl_msg(sc, mfa + 12, 0);
2294
2295 iop_outl(sc, IOP_REG_IFIFO, mfa);
2296 }
2297
2298 #ifdef I2ODEBUG
2299 /*
2300 * Dump a reply frame header.
2301 */
2302 static void
2303 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2304 {
2305 u_int function, detail;
2306 #ifdef I2OVERBOSE
2307 const char *statusstr;
2308 #endif
2309
2310 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2311 detail = le16toh(rb->detail);
2312
2313 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2314
2315 #ifdef I2OVERBOSE
2316 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2317 statusstr = iop_status[rb->reqstatus];
2318 else
2319 statusstr = "undefined error code";
2320
2321 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2322 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2323 #else
2324 printf("%s: function=0x%02x status=0x%02x\n",
2325 sc->sc_dv.dv_xname, function, rb->reqstatus);
2326 #endif
2327 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2328 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2329 le32toh(rb->msgtctx));
2330 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2331 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2332 (le32toh(rb->msgflags) >> 8) & 0xff);
2333 }
2334 #endif
2335
2336 /*
2337 * Dump a transport failure reply.
2338 */
2339 static void
2340 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2341 {
2342
2343 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2344
2345 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2346 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2347 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2348 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2349 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2350 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2351 }
2352
2353 /*
2354 * Translate an I2O ASCII field into a C string.
2355 */
2356 void
2357 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2358 {
2359 int hc, lc, i, nit;
2360
2361 dlen--;
2362 lc = 0;
2363 hc = 0;
2364 i = 0;
2365
2366 /*
2367 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2368 * spec has nothing to say about it. Since AMI fields are usually
2369 * filled with junk after the terminator, ...
2370 */
2371 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2372
2373 while (slen-- != 0 && dlen-- != 0) {
2374 if (nit && *src == '\0')
2375 break;
2376 else if (*src <= 0x20 || *src >= 0x7f) {
2377 if (hc)
2378 dst[i++] = ' ';
2379 } else {
2380 hc = 1;
2381 dst[i++] = *src;
2382 lc = i;
2383 }
2384 src++;
2385 }
2386
2387 dst[lc] = '\0';
2388 }
2389
2390 /*
2391 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2392 */
2393 int
2394 iop_print_ident(struct iop_softc *sc, int tid)
2395 {
2396 struct {
2397 struct i2o_param_op_results pr;
2398 struct i2o_param_read_results prr;
2399 struct i2o_param_device_identity di;
2400 } __attribute__ ((__packed__)) p;
2401 char buf[32];
2402 int rv;
2403
2404 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2405 sizeof(p), NULL);
2406 if (rv != 0)
2407 return (rv);
2408
2409 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2410 sizeof(buf));
2411 printf(" <%s, ", buf);
2412 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2413 sizeof(buf));
2414 printf("%s, ", buf);
2415 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2416 printf("%s>", buf);
2417
2418 return (0);
2419 }
2420
2421 /*
2422 * Claim or unclaim the specified TID.
2423 */
2424 int
2425 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2426 int flags)
2427 {
2428 struct iop_msg *im;
2429 struct i2o_util_claim mf;
2430 int rv, func;
2431
2432 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2433 im = iop_msg_alloc(sc, IM_WAIT);
2434
2435 /* We can use the same structure, as they're identical. */
2436 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2437 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2438 mf.msgictx = ii->ii_ictx;
2439 mf.msgtctx = im->im_tctx;
2440 mf.flags = flags;
2441
2442 rv = iop_msg_post(sc, im, &mf, 5000);
2443 iop_msg_free(sc, im);
2444 return (rv);
2445 }
2446
2447 /*
2448 * Perform an abort.
2449 */
2450 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2451 int tctxabort, int flags)
2452 {
2453 struct iop_msg *im;
2454 struct i2o_util_abort mf;
2455 int rv;
2456
2457 im = iop_msg_alloc(sc, IM_WAIT);
2458
2459 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2460 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2461 mf.msgictx = ii->ii_ictx;
2462 mf.msgtctx = im->im_tctx;
2463 mf.flags = (func << 24) | flags;
2464 mf.tctxabort = tctxabort;
2465
2466 rv = iop_msg_post(sc, im, &mf, 5000);
2467 iop_msg_free(sc, im);
2468 return (rv);
2469 }
2470
2471 /*
2472 * Enable or disable reception of events for the specified device.
2473 */
2474 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2475 {
2476 struct i2o_util_event_register mf;
2477
2478 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2479 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2480 mf.msgictx = ii->ii_ictx;
2481 mf.msgtctx = 0;
2482 mf.eventmask = mask;
2483
2484 /* This message is replied to only when events are signalled. */
2485 return (iop_post(sc, (u_int32_t *)&mf));
2486 }
2487
2488 int
2489 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2490 {
2491 struct iop_softc *sc;
2492
2493 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2494 return (ENXIO);
2495 if ((sc->sc_flags & IOP_ONLINE) == 0)
2496 return (ENXIO);
2497 if ((sc->sc_flags & IOP_OPEN) != 0)
2498 return (EBUSY);
2499 sc->sc_flags |= IOP_OPEN;
2500
2501 return (0);
2502 }
2503
2504 int
2505 iopclose(dev_t dev, int flag, int mode, struct lwp *l)
2506 {
2507 struct iop_softc *sc;
2508
2509 sc = device_lookup(&iop_cd, minor(dev));
2510 sc->sc_flags &= ~IOP_OPEN;
2511
2512 return (0);
2513 }
2514
2515 int
2516 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
2517 {
2518 struct iop_softc *sc;
2519 struct iovec *iov;
2520 int rv, i;
2521
2522 if (securelevel >= 2)
2523 return (EPERM);
2524
2525 sc = device_lookup(&iop_cd, minor(dev));
2526
2527 switch (cmd) {
2528 case IOPIOCPT:
2529 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2530
2531 case IOPIOCGSTATUS:
2532 iov = (struct iovec *)data;
2533 i = sizeof(struct i2o_status);
2534 if (i > iov->iov_len)
2535 i = iov->iov_len;
2536 else
2537 iov->iov_len = i;
2538 if ((rv = iop_status_get(sc, 0)) == 0)
2539 rv = copyout(&sc->sc_status, iov->iov_base, i);
2540 return (rv);
2541
2542 case IOPIOCGLCT:
2543 case IOPIOCGTIDMAP:
2544 case IOPIOCRECONFIG:
2545 break;
2546
2547 default:
2548 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2549 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2550 #endif
2551 return (ENOTTY);
2552 }
2553
2554 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2555 return (rv);
2556
2557 switch (cmd) {
2558 case IOPIOCGLCT:
2559 iov = (struct iovec *)data;
2560 i = le16toh(sc->sc_lct->tablesize) << 2;
2561 if (i > iov->iov_len)
2562 i = iov->iov_len;
2563 else
2564 iov->iov_len = i;
2565 rv = copyout(sc->sc_lct, iov->iov_base, i);
2566 break;
2567
2568 case IOPIOCRECONFIG:
2569 if ((rv = lockmgr(&sc->sc_conflock, LK_UPGRADE, NULL)) == 0)
2570 rv = iop_reconfigure(sc, 0);
2571 break;
2572
2573 case IOPIOCGTIDMAP:
2574 iov = (struct iovec *)data;
2575 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2576 if (i > iov->iov_len)
2577 i = iov->iov_len;
2578 else
2579 iov->iov_len = i;
2580 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2581 break;
2582 }
2583
2584 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2585 return (rv);
2586 }
2587
2588 static int
2589 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2590 {
2591 struct iop_msg *im;
2592 struct i2o_msg *mf;
2593 struct ioppt_buf *ptb;
2594 int rv, i, mapped;
2595
2596 mf = NULL;
2597 im = NULL;
2598 mapped = 1;
2599
2600 if (pt->pt_msglen > sc->sc_framesize ||
2601 pt->pt_msglen < sizeof(struct i2o_msg) ||
2602 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2603 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2604 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2605 return (EINVAL);
2606
2607 for (i = 0; i < pt->pt_nbufs; i++)
2608 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2609 rv = ENOMEM;
2610 goto bad;
2611 }
2612
2613 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2614 if (mf == NULL)
2615 return (ENOMEM);
2616
2617 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2618 goto bad;
2619
2620 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2621 im->im_rb = (struct i2o_reply *)mf;
2622 mf->msgictx = IOP_ICTX;
2623 mf->msgtctx = im->im_tctx;
2624
2625 for (i = 0; i < pt->pt_nbufs; i++) {
2626 ptb = &pt->pt_bufs[i];
2627 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2628 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2629 if (rv != 0)
2630 goto bad;
2631 mapped = 1;
2632 }
2633
2634 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2635 goto bad;
2636
2637 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2638 if (i > sc->sc_framesize)
2639 i = sc->sc_framesize;
2640 if (i > pt->pt_replylen)
2641 i = pt->pt_replylen;
2642 rv = copyout(im->im_rb, pt->pt_reply, i);
2643
2644 bad:
2645 if (mapped != 0)
2646 iop_msg_unmap(sc, im);
2647 if (im != NULL)
2648 iop_msg_free(sc, im);
2649 if (mf != NULL)
2650 free(mf, M_DEVBUF);
2651 return (rv);
2652 }
2653