iop.c revision 1.58 1 /* $NetBSD: iop.c,v 1.58 2006/10/04 15:52:35 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.58 2006/10/04 15:52:35 christos Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #include "locators.h"
71
72 #define POLL(ms, cond) \
73 do { \
74 int xi; \
75 for (xi = (ms) * 10; xi; xi--) { \
76 if (cond) \
77 break; \
78 DELAY(100); \
79 } \
80 } while (/* CONSTCOND */0);
81
82 #ifdef I2ODEBUG
83 #define DPRINTF(x) printf x
84 #else
85 #define DPRINTF(x)
86 #endif
87
88 #ifdef I2OVERBOSE
89 #define IFVERBOSE(x) x
90 #define COMMENT(x) NULL
91 #else
92 #define IFVERBOSE(x)
93 #define COMMENT(x)
94 #endif
95
96 #define IOP_ICTXHASH_NBUCKETS 16
97 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
98
99 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
100
101 #define IOP_TCTX_SHIFT 12
102 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
103
104 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
105 static u_long iop_ictxhash;
106 static void *iop_sdh;
107 static struct i2o_systab *iop_systab;
108 static int iop_systab_size;
109
110 extern struct cfdriver iop_cd;
111
112 dev_type_open(iopopen);
113 dev_type_close(iopclose);
114 dev_type_ioctl(iopioctl);
115
116 const struct cdevsw iop_cdevsw = {
117 iopopen, iopclose, noread, nowrite, iopioctl,
118 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
119 };
120
121 #define IC_CONFIGURE 0x01
122 #define IC_PRIORITY 0x02
123
124 static struct iop_class {
125 u_short ic_class;
126 u_short ic_flags;
127 #ifdef I2OVERBOSE
128 const char *ic_caption;
129 #endif
130 } const iop_class[] = {
131 {
132 I2O_CLASS_EXECUTIVE,
133 0,
134 IFVERBOSE("executive")
135 },
136 {
137 I2O_CLASS_DDM,
138 0,
139 COMMENT("device driver module")
140 },
141 {
142 I2O_CLASS_RANDOM_BLOCK_STORAGE,
143 IC_CONFIGURE | IC_PRIORITY,
144 IFVERBOSE("random block storage")
145 },
146 {
147 I2O_CLASS_SEQUENTIAL_STORAGE,
148 IC_CONFIGURE | IC_PRIORITY,
149 IFVERBOSE("sequential storage")
150 },
151 {
152 I2O_CLASS_LAN,
153 IC_CONFIGURE | IC_PRIORITY,
154 IFVERBOSE("LAN port")
155 },
156 {
157 I2O_CLASS_WAN,
158 IC_CONFIGURE | IC_PRIORITY,
159 IFVERBOSE("WAN port")
160 },
161 {
162 I2O_CLASS_FIBRE_CHANNEL_PORT,
163 IC_CONFIGURE,
164 IFVERBOSE("fibrechannel port")
165 },
166 {
167 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
168 0,
169 COMMENT("fibrechannel peripheral")
170 },
171 {
172 I2O_CLASS_SCSI_PERIPHERAL,
173 0,
174 COMMENT("SCSI peripheral")
175 },
176 {
177 I2O_CLASS_ATE_PORT,
178 IC_CONFIGURE,
179 IFVERBOSE("ATE port")
180 },
181 {
182 I2O_CLASS_ATE_PERIPHERAL,
183 0,
184 COMMENT("ATE peripheral")
185 },
186 {
187 I2O_CLASS_FLOPPY_CONTROLLER,
188 IC_CONFIGURE,
189 IFVERBOSE("floppy controller")
190 },
191 {
192 I2O_CLASS_FLOPPY_DEVICE,
193 0,
194 COMMENT("floppy device")
195 },
196 {
197 I2O_CLASS_BUS_ADAPTER_PORT,
198 IC_CONFIGURE,
199 IFVERBOSE("bus adapter port" )
200 },
201 };
202
203 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
204 static const char * const iop_status[] = {
205 "success",
206 "abort (dirty)",
207 "abort (no data transfer)",
208 "abort (partial transfer)",
209 "error (dirty)",
210 "error (no data transfer)",
211 "error (partial transfer)",
212 "undefined error code",
213 "process abort (dirty)",
214 "process abort (no data transfer)",
215 "process abort (partial transfer)",
216 "transaction error",
217 };
218 #endif
219
220 static inline u_int32_t iop_inl(struct iop_softc *, int);
221 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
222
223 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
224 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
225
226 static void iop_config_interrupts(struct device *);
227 static void iop_configure_devices(struct iop_softc *, int, int);
228 static void iop_devinfo(int, char *, size_t);
229 static int iop_print(void *, const char *);
230 static void iop_shutdown(void *);
231
232 static void iop_adjqparam(struct iop_softc *, int);
233 static void iop_create_reconf_thread(void *);
234 static int iop_handle_reply(struct iop_softc *, u_int32_t);
235 static int iop_hrt_get(struct iop_softc *);
236 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
237 static void iop_intr_event(struct device *, struct iop_msg *, void *);
238 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
239 u_int32_t);
240 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
241 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
242 static int iop_ofifo_init(struct iop_softc *);
243 static int iop_passthrough(struct iop_softc *, struct ioppt *,
244 struct proc *);
245 static void iop_reconf_thread(void *);
246 static void iop_release_mfa(struct iop_softc *, u_int32_t);
247 static int iop_reset(struct iop_softc *);
248 static int iop_sys_enable(struct iop_softc *);
249 static int iop_systab_set(struct iop_softc *);
250 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
251
252 #ifdef I2ODEBUG
253 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
254 #endif
255
256 static inline u_int32_t
257 iop_inl(struct iop_softc *sc, int off)
258 {
259
260 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
261 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
262 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
263 }
264
265 static inline void
266 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
267 {
268
269 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
270 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
271 BUS_SPACE_BARRIER_WRITE);
272 }
273
274 static inline u_int32_t
275 iop_inl_msg(struct iop_softc *sc, int off)
276 {
277
278 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
279 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
280 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
281 }
282
283 static inline void
284 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
285 {
286
287 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
288 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
289 BUS_SPACE_BARRIER_WRITE);
290 }
291
292 /*
293 * Initialise the IOP and our interface.
294 */
295 void
296 iop_init(struct iop_softc *sc, const char *intrstr)
297 {
298 struct iop_msg *im;
299 int rv, i, j, state, nsegs;
300 u_int32_t mask;
301 char ident[64];
302
303 state = 0;
304
305 printf("I2O adapter");
306
307 if (iop_ictxhashtbl == NULL)
308 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
309 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
310
311 /* Disable interrupts at the IOP. */
312 mask = iop_inl(sc, IOP_REG_INTR_MASK);
313 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
314
315 /* Allocate a scratch DMA map for small miscellaneous shared data. */
316 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
317 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
318 printf("%s: cannot create scratch dmamap\n",
319 sc->sc_dv.dv_xname);
320 return;
321 }
322
323 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
324 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
325 printf("%s: cannot alloc scratch dmamem\n",
326 sc->sc_dv.dv_xname);
327 goto bail_out;
328 }
329 state++;
330
331 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
332 &sc->sc_scr, 0)) {
333 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
334 goto bail_out;
335 }
336 state++;
337
338 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
339 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
340 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
341 goto bail_out;
342 }
343 state++;
344
345 #ifdef I2ODEBUG
346 /* So that our debug checks don't choke. */
347 sc->sc_framesize = 128;
348 #endif
349
350 /* Reset the adapter and request status. */
351 if ((rv = iop_reset(sc)) != 0) {
352 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
353 goto bail_out;
354 }
355
356 if ((rv = iop_status_get(sc, 1)) != 0) {
357 printf("%s: not responding (get status)\n",
358 sc->sc_dv.dv_xname);
359 goto bail_out;
360 }
361
362 sc->sc_flags |= IOP_HAVESTATUS;
363 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
364 ident, sizeof(ident));
365 printf(" <%s>\n", ident);
366
367 #ifdef I2ODEBUG
368 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
369 le16toh(sc->sc_status.orgid),
370 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
371 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
372 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
373 le32toh(sc->sc_status.desiredprivmemsize),
374 le32toh(sc->sc_status.currentprivmemsize),
375 le32toh(sc->sc_status.currentprivmembase));
376 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
377 le32toh(sc->sc_status.desiredpriviosize),
378 le32toh(sc->sc_status.currentpriviosize),
379 le32toh(sc->sc_status.currentpriviobase));
380 #endif
381
382 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
383 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
384 sc->sc_maxob = IOP_MAX_OUTBOUND;
385 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
386 if (sc->sc_maxib > IOP_MAX_INBOUND)
387 sc->sc_maxib = IOP_MAX_INBOUND;
388 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
389 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
390 sc->sc_framesize = IOP_MAX_MSG_SIZE;
391
392 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
393 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
394 printf("%s: frame size too small (%d)\n",
395 sc->sc_dv.dv_xname, sc->sc_framesize);
396 goto bail_out;
397 }
398 #endif
399
400 /* Allocate message wrappers. */
401 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
402 if (im == NULL) {
403 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
404 goto bail_out;
405 }
406 state++;
407 sc->sc_ims = im;
408 SLIST_INIT(&sc->sc_im_freelist);
409
410 for (i = 0; i < sc->sc_maxib; i++, im++) {
411 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
412 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
413 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
414 &im->im_xfer[0].ix_map);
415 if (rv != 0) {
416 printf("%s: couldn't create dmamap (%d)",
417 sc->sc_dv.dv_xname, rv);
418 goto bail_out3;
419 }
420
421 im->im_tctx = i;
422 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
423 }
424
425 /* Initialise the IOP's outbound FIFO. */
426 if (iop_ofifo_init(sc) != 0) {
427 printf("%s: unable to init oubound FIFO\n",
428 sc->sc_dv.dv_xname);
429 goto bail_out3;
430 }
431
432 /*
433 * Defer further configuration until (a) interrupts are working and
434 * (b) we have enough information to build the system table.
435 */
436 config_interrupts((struct device *)sc, iop_config_interrupts);
437
438 /* Configure shutdown hook before we start any device activity. */
439 if (iop_sdh == NULL)
440 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
441
442 /* Ensure interrupts are enabled at the IOP. */
443 mask = iop_inl(sc, IOP_REG_INTR_MASK);
444 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
445
446 if (intrstr != NULL)
447 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
448 intrstr);
449
450 #ifdef I2ODEBUG
451 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
452 sc->sc_dv.dv_xname, sc->sc_maxib,
453 le32toh(sc->sc_status.maxinboundmframes),
454 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
455 #endif
456
457 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
458 return;
459
460 bail_out3:
461 if (state > 3) {
462 for (j = 0; j < i; j++)
463 bus_dmamap_destroy(sc->sc_dmat,
464 sc->sc_ims[j].im_xfer[0].ix_map);
465 free(sc->sc_ims, M_DEVBUF);
466 }
467 bail_out:
468 if (state > 2)
469 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
470 if (state > 1)
471 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
472 if (state > 0)
473 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
474 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
475 }
476
477 /*
478 * Perform autoconfiguration tasks.
479 */
480 static void
481 iop_config_interrupts(struct device *self)
482 {
483 struct iop_attach_args ia;
484 struct iop_softc *sc, *iop;
485 struct i2o_systab_entry *ste;
486 int rv, i, niop;
487 int locs[IOPCF_NLOCS];
488
489 sc = device_private(self);
490 LIST_INIT(&sc->sc_iilist);
491
492 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
493
494 if (iop_hrt_get(sc) != 0) {
495 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
496 return;
497 }
498
499 /*
500 * Build the system table.
501 */
502 if (iop_systab == NULL) {
503 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
504 if ((iop = device_lookup(&iop_cd, i)) == NULL)
505 continue;
506 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
507 continue;
508 if (iop_status_get(iop, 1) != 0) {
509 printf("%s: unable to retrieve status\n",
510 sc->sc_dv.dv_xname);
511 iop->sc_flags &= ~IOP_HAVESTATUS;
512 continue;
513 }
514 niop++;
515 }
516 if (niop == 0)
517 return;
518
519 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
520 sizeof(struct i2o_systab);
521 iop_systab_size = i;
522 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
523
524 iop_systab->numentries = niop;
525 iop_systab->version = I2O_VERSION_11;
526
527 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
528 if ((iop = device_lookup(&iop_cd, i)) == NULL)
529 continue;
530 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
531 continue;
532
533 ste->orgid = iop->sc_status.orgid;
534 ste->iopid = device_unit(&iop->sc_dv) + 2;
535 ste->segnumber =
536 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
537 ste->iopcaps = iop->sc_status.iopcaps;
538 ste->inboundmsgframesize =
539 iop->sc_status.inboundmframesize;
540 ste->inboundmsgportaddresslow =
541 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
542 ste++;
543 }
544 }
545
546 /*
547 * Post the system table to the IOP and bring it to the OPERATIONAL
548 * state.
549 */
550 if (iop_systab_set(sc) != 0) {
551 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
552 return;
553 }
554 if (iop_sys_enable(sc) != 0) {
555 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
556 return;
557 }
558
559 /*
560 * Set up an event handler for this IOP.
561 */
562 sc->sc_eventii.ii_dv = self;
563 sc->sc_eventii.ii_intr = iop_intr_event;
564 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
565 sc->sc_eventii.ii_tid = I2O_TID_IOP;
566 iop_initiator_register(sc, &sc->sc_eventii);
567
568 rv = iop_util_eventreg(sc, &sc->sc_eventii,
569 I2O_EVENT_EXEC_RESOURCE_LIMITS |
570 I2O_EVENT_EXEC_CONNECTION_FAIL |
571 I2O_EVENT_EXEC_ADAPTER_FAULT |
572 I2O_EVENT_EXEC_POWER_FAIL |
573 I2O_EVENT_EXEC_RESET_PENDING |
574 I2O_EVENT_EXEC_RESET_IMMINENT |
575 I2O_EVENT_EXEC_HARDWARE_FAIL |
576 I2O_EVENT_EXEC_XCT_CHANGE |
577 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
578 I2O_EVENT_GEN_DEVICE_RESET |
579 I2O_EVENT_GEN_STATE_CHANGE |
580 I2O_EVENT_GEN_GENERAL_WARNING);
581 if (rv != 0) {
582 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
583 return;
584 }
585
586 /*
587 * Attempt to match and attach a product-specific extension.
588 */
589 ia.ia_class = I2O_CLASS_ANY;
590 ia.ia_tid = I2O_TID_IOP;
591 locs[IOPCF_TID] = I2O_TID_IOP;
592 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
593 config_stdsubmatch);
594
595 /*
596 * Start device configuration.
597 */
598 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
599 if ((rv = iop_reconfigure(sc, 0)) == -1) {
600 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
601 return;
602 }
603 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
604
605 kthread_create(iop_create_reconf_thread, sc);
606 }
607
608 /*
609 * Create the reconfiguration thread. Called after the standard kernel
610 * threads have been created.
611 */
612 static void
613 iop_create_reconf_thread(void *cookie)
614 {
615 struct iop_softc *sc;
616 int rv;
617
618 sc = cookie;
619 sc->sc_flags |= IOP_ONLINE;
620
621 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
622 "%s", sc->sc_dv.dv_xname);
623 if (rv != 0) {
624 printf("%s: unable to create reconfiguration thread (%d)",
625 sc->sc_dv.dv_xname, rv);
626 return;
627 }
628 }
629
630 /*
631 * Reconfiguration thread; listens for LCT change notification, and
632 * initiates re-configuration if received.
633 */
634 static void
635 iop_reconf_thread(void *cookie)
636 {
637 struct iop_softc *sc;
638 struct lwp *l;
639 struct i2o_lct lct;
640 u_int32_t chgind;
641 int rv;
642
643 sc = cookie;
644 chgind = sc->sc_chgind + 1;
645 l = curlwp;
646
647 for (;;) {
648 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
649 sc->sc_dv.dv_xname, chgind));
650
651 PHOLD(l);
652 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
653 PRELE(l);
654
655 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
656 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
657
658 if (rv == 0 &&
659 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
660 iop_reconfigure(sc, le32toh(lct.changeindicator));
661 chgind = sc->sc_chgind + 1;
662 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
663 }
664
665 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
666 }
667 }
668
669 /*
670 * Reconfigure: find new and removed devices.
671 */
672 int
673 iop_reconfigure(struct iop_softc *sc, u_int chgind)
674 {
675 struct iop_msg *im;
676 struct i2o_hba_bus_scan mf;
677 struct i2o_lct_entry *le;
678 struct iop_initiator *ii, *nextii;
679 int rv, tid, i;
680
681 /*
682 * If the reconfiguration request isn't the result of LCT change
683 * notification, then be more thorough: ask all bus ports to scan
684 * their busses. Wait up to 5 minutes for each bus port to complete
685 * the request.
686 */
687 if (chgind == 0) {
688 if ((rv = iop_lct_get(sc)) != 0) {
689 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
690 return (rv);
691 }
692
693 le = sc->sc_lct->entry;
694 for (i = 0; i < sc->sc_nlctent; i++, le++) {
695 if ((le16toh(le->classid) & 4095) !=
696 I2O_CLASS_BUS_ADAPTER_PORT)
697 continue;
698 tid = le16toh(le->localtid) & 4095;
699
700 im = iop_msg_alloc(sc, IM_WAIT);
701
702 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
703 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
704 mf.msgictx = IOP_ICTX;
705 mf.msgtctx = im->im_tctx;
706
707 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
708 tid));
709
710 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
711 iop_msg_free(sc, im);
712 #ifdef I2ODEBUG
713 if (rv != 0)
714 printf("%s: bus scan failed\n",
715 sc->sc_dv.dv_xname);
716 #endif
717 }
718 } else if (chgind <= sc->sc_chgind) {
719 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
720 return (0);
721 }
722
723 /* Re-read the LCT and determine if it has changed. */
724 if ((rv = iop_lct_get(sc)) != 0) {
725 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
726 return (rv);
727 }
728 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
729
730 chgind = le32toh(sc->sc_lct->changeindicator);
731 if (chgind == sc->sc_chgind) {
732 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
733 return (0);
734 }
735 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
736 sc->sc_chgind = chgind;
737
738 if (sc->sc_tidmap != NULL)
739 free(sc->sc_tidmap, M_DEVBUF);
740 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
741 M_DEVBUF, M_NOWAIT|M_ZERO);
742
743 /* Allow 1 queued command per device while we're configuring. */
744 iop_adjqparam(sc, 1);
745
746 /*
747 * Match and attach child devices. We configure high-level devices
748 * first so that any claims will propagate throughout the LCT,
749 * hopefully masking off aliased devices as a result.
750 *
751 * Re-reading the LCT at this point is a little dangerous, but we'll
752 * trust the IOP (and the operator) to behave itself...
753 */
754 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
755 IC_CONFIGURE | IC_PRIORITY);
756 if ((rv = iop_lct_get(sc)) != 0) {
757 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
758 }
759 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
760 IC_CONFIGURE);
761
762 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
763 nextii = LIST_NEXT(ii, ii_list);
764
765 /* Detach devices that were configured, but are now gone. */
766 for (i = 0; i < sc->sc_nlctent; i++)
767 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
768 break;
769 if (i == sc->sc_nlctent ||
770 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
771 config_detach(ii->ii_dv, DETACH_FORCE);
772 continue;
773 }
774
775 /*
776 * Tell initiators that existed before the re-configuration
777 * to re-configure.
778 */
779 if (ii->ii_reconfig == NULL)
780 continue;
781 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
782 printf("%s: %s failed reconfigure (%d)\n",
783 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
784 }
785
786 /* Re-adjust queue parameters and return. */
787 if (sc->sc_nii != 0)
788 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
789 / sc->sc_nii);
790
791 return (0);
792 }
793
794 /*
795 * Configure I2O devices into the system.
796 */
797 static void
798 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
799 {
800 struct iop_attach_args ia;
801 struct iop_initiator *ii;
802 const struct i2o_lct_entry *le;
803 struct device *dv;
804 int i, j, nent;
805 u_int usertid;
806 int locs[IOPCF_NLOCS];
807
808 nent = sc->sc_nlctent;
809 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
810 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
811
812 /* Ignore the device if it's in use. */
813 usertid = le32toh(le->usertid) & 4095;
814 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
815 continue;
816
817 ia.ia_class = le16toh(le->classid) & 4095;
818 ia.ia_tid = sc->sc_tidmap[i].it_tid;
819
820 /* Ignore uninteresting devices. */
821 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
822 if (iop_class[j].ic_class == ia.ia_class)
823 break;
824 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
825 (iop_class[j].ic_flags & mask) != maskval)
826 continue;
827
828 /*
829 * Try to configure the device only if it's not already
830 * configured.
831 */
832 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
833 if (ia.ia_tid == ii->ii_tid) {
834 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
835 strcpy(sc->sc_tidmap[i].it_dvname,
836 ii->ii_dv->dv_xname);
837 break;
838 }
839 }
840 if (ii != NULL)
841 continue;
842
843 locs[IOPCF_TID] = ia.ia_tid;
844
845 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
846 iop_print, config_stdsubmatch);
847 if (dv != NULL) {
848 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
849 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
850 }
851 }
852 }
853
854 /*
855 * Adjust queue parameters for all child devices.
856 */
857 static void
858 iop_adjqparam(struct iop_softc *sc, int mpi)
859 {
860 struct iop_initiator *ii;
861
862 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
863 if (ii->ii_adjqparam != NULL)
864 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
865 }
866
867 static void
868 iop_devinfo(int class, char *devinfo, size_t l)
869 {
870 #ifdef I2OVERBOSE
871 int i;
872
873 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
874 if (class == iop_class[i].ic_class)
875 break;
876
877 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
878 snprintf(devinfo, l, "device (class 0x%x)", class);
879 else
880 strlcpy(devinfo, iop_class[i].ic_caption, l);
881 #else
882
883 snprintf(devinfo, l, "device (class 0x%x)", class);
884 #endif
885 }
886
887 static int
888 iop_print(void *aux, const char *pnp)
889 {
890 struct iop_attach_args *ia;
891 char devinfo[256];
892
893 ia = aux;
894
895 if (pnp != NULL) {
896 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
897 aprint_normal("%s at %s", devinfo, pnp);
898 }
899 aprint_normal(" tid %d", ia->ia_tid);
900 return (UNCONF);
901 }
902
903 /*
904 * Shut down all configured IOPs.
905 */
906 static void
907 iop_shutdown(void *junk)
908 {
909 struct iop_softc *sc;
910 int i;
911
912 printf("shutting down iop devices...");
913
914 for (i = 0; i < iop_cd.cd_ndevs; i++) {
915 if ((sc = device_lookup(&iop_cd, i)) == NULL)
916 continue;
917 if ((sc->sc_flags & IOP_ONLINE) == 0)
918 continue;
919
920 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
921 0, 5000);
922
923 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
924 /*
925 * Some AMI firmware revisions will go to sleep and
926 * never come back after this.
927 */
928 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
929 IOP_ICTX, 0, 1000);
930 }
931 }
932
933 /* Wait. Some boards could still be flushing, stupidly enough. */
934 delay(5000*1000);
935 printf(" done\n");
936 }
937
938 /*
939 * Retrieve IOP status.
940 */
941 int
942 iop_status_get(struct iop_softc *sc, int nosleep)
943 {
944 struct i2o_exec_status_get mf;
945 struct i2o_status *st;
946 paddr_t pa;
947 int rv, i;
948
949 pa = sc->sc_scr_seg->ds_addr;
950 st = (struct i2o_status *)sc->sc_scr;
951
952 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
953 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
954 mf.reserved[0] = 0;
955 mf.reserved[1] = 0;
956 mf.reserved[2] = 0;
957 mf.reserved[3] = 0;
958 mf.addrlow = (u_int32_t)pa;
959 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
960 mf.length = sizeof(sc->sc_status);
961
962 memset(st, 0, sizeof(*st));
963 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
964 BUS_DMASYNC_PREREAD);
965
966 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
967 return (rv);
968
969 for (i = 25; i != 0; i--) {
970 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
971 sizeof(*st), BUS_DMASYNC_POSTREAD);
972 if (st->syncbyte == 0xff)
973 break;
974 if (nosleep)
975 DELAY(100*1000);
976 else
977 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
978 }
979
980 if (st->syncbyte != 0xff) {
981 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
982 rv = EIO;
983 } else {
984 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
985 rv = 0;
986 }
987
988 return (rv);
989 }
990
991 /*
992 * Initialize and populate the IOP's outbound FIFO.
993 */
994 static int
995 iop_ofifo_init(struct iop_softc *sc)
996 {
997 bus_addr_t addr;
998 bus_dma_segment_t seg;
999 struct i2o_exec_outbound_init *mf;
1000 int i, rseg, rv;
1001 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1002
1003 sw = (u_int32_t *)sc->sc_scr;
1004
1005 mf = (struct i2o_exec_outbound_init *)mb;
1006 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1007 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1008 mf->msgictx = IOP_ICTX;
1009 mf->msgtctx = 0;
1010 mf->pagesize = PAGE_SIZE;
1011 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1012
1013 /*
1014 * The I2O spec says that there are two SGLs: one for the status
1015 * word, and one for a list of discarded MFAs. It continues to say
1016 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1017 * necessary; this isn't the case (and is in fact a bad thing).
1018 */
1019 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1020 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1021 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1022 (u_int32_t)sc->sc_scr_seg->ds_addr;
1023 mb[0] += 2 << 16;
1024
1025 *sw = 0;
1026 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1027 BUS_DMASYNC_PREREAD);
1028
1029 if ((rv = iop_post(sc, mb)) != 0)
1030 return (rv);
1031
1032 POLL(5000,
1033 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1034 BUS_DMASYNC_POSTREAD),
1035 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1036
1037 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1038 printf("%s: outbound FIFO init failed (%d)\n",
1039 sc->sc_dv.dv_xname, le32toh(*sw));
1040 return (EIO);
1041 }
1042
1043 /* Allocate DMA safe memory for the reply frames. */
1044 if (sc->sc_rep_phys == 0) {
1045 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1046
1047 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1048 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1049 if (rv != 0) {
1050 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1051 rv);
1052 return (rv);
1053 }
1054
1055 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1056 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1057 if (rv != 0) {
1058 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1059 return (rv);
1060 }
1061
1062 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1063 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1064 if (rv != 0) {
1065 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1066 rv);
1067 return (rv);
1068 }
1069
1070 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1071 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1072 if (rv != 0) {
1073 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1074 return (rv);
1075 }
1076
1077 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1078 }
1079
1080 /* Populate the outbound FIFO. */
1081 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1082 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1083 addr += sc->sc_framesize;
1084 }
1085
1086 return (0);
1087 }
1088
1089 /*
1090 * Read the specified number of bytes from the IOP's hardware resource table.
1091 */
1092 static int
1093 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1094 {
1095 struct iop_msg *im;
1096 int rv;
1097 struct i2o_exec_hrt_get *mf;
1098 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1099
1100 im = iop_msg_alloc(sc, IM_WAIT);
1101 mf = (struct i2o_exec_hrt_get *)mb;
1102 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1103 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1104 mf->msgictx = IOP_ICTX;
1105 mf->msgtctx = im->im_tctx;
1106
1107 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1108 rv = iop_msg_post(sc, im, mb, 30000);
1109 iop_msg_unmap(sc, im);
1110 iop_msg_free(sc, im);
1111 return (rv);
1112 }
1113
1114 /*
1115 * Read the IOP's hardware resource table.
1116 */
1117 static int
1118 iop_hrt_get(struct iop_softc *sc)
1119 {
1120 struct i2o_hrt hrthdr, *hrt;
1121 int size, rv;
1122
1123 PHOLD(curlwp);
1124 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1125 PRELE(curlwp);
1126 if (rv != 0)
1127 return (rv);
1128
1129 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1130 le16toh(hrthdr.numentries)));
1131
1132 size = sizeof(struct i2o_hrt) +
1133 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1134 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1135
1136 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1137 free(hrt, M_DEVBUF);
1138 return (rv);
1139 }
1140
1141 if (sc->sc_hrt != NULL)
1142 free(sc->sc_hrt, M_DEVBUF);
1143 sc->sc_hrt = hrt;
1144 return (0);
1145 }
1146
1147 /*
1148 * Request the specified number of bytes from the IOP's logical
1149 * configuration table. If a change indicator is specified, this
1150 * is a verbatim notification request, so the caller is prepared
1151 * to wait indefinitely.
1152 */
1153 static int
1154 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1155 u_int32_t chgind)
1156 {
1157 struct iop_msg *im;
1158 struct i2o_exec_lct_notify *mf;
1159 int rv;
1160 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1161
1162 im = iop_msg_alloc(sc, IM_WAIT);
1163 memset(lct, 0, size);
1164
1165 mf = (struct i2o_exec_lct_notify *)mb;
1166 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1167 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1168 mf->msgictx = IOP_ICTX;
1169 mf->msgtctx = im->im_tctx;
1170 mf->classid = I2O_CLASS_ANY;
1171 mf->changeindicator = chgind;
1172
1173 #ifdef I2ODEBUG
1174 printf("iop_lct_get0: reading LCT");
1175 if (chgind != 0)
1176 printf(" (async)");
1177 printf("\n");
1178 #endif
1179
1180 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1181 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1182 iop_msg_unmap(sc, im);
1183 iop_msg_free(sc, im);
1184 return (rv);
1185 }
1186
1187 /*
1188 * Read the IOP's logical configuration table.
1189 */
1190 int
1191 iop_lct_get(struct iop_softc *sc)
1192 {
1193 int esize, size, rv;
1194 struct i2o_lct *lct;
1195
1196 esize = le32toh(sc->sc_status.expectedlctsize);
1197 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1198 if (lct == NULL)
1199 return (ENOMEM);
1200
1201 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1202 free(lct, M_DEVBUF);
1203 return (rv);
1204 }
1205
1206 size = le16toh(lct->tablesize) << 2;
1207 if (esize != size) {
1208 free(lct, M_DEVBUF);
1209 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1210 if (lct == NULL)
1211 return (ENOMEM);
1212
1213 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1214 free(lct, M_DEVBUF);
1215 return (rv);
1216 }
1217 }
1218
1219 /* Swap in the new LCT. */
1220 if (sc->sc_lct != NULL)
1221 free(sc->sc_lct, M_DEVBUF);
1222 sc->sc_lct = lct;
1223 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1224 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1225 sizeof(struct i2o_lct_entry);
1226 return (0);
1227 }
1228
1229 /*
1230 * Post a SYS_ENABLE message to the adapter.
1231 */
1232 int
1233 iop_sys_enable(struct iop_softc *sc)
1234 {
1235 struct iop_msg *im;
1236 struct i2o_msg mf;
1237 int rv;
1238
1239 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1240
1241 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1242 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1243 mf.msgictx = IOP_ICTX;
1244 mf.msgtctx = im->im_tctx;
1245
1246 rv = iop_msg_post(sc, im, &mf, 30000);
1247 if (rv == 0) {
1248 if ((im->im_flags & IM_FAIL) != 0)
1249 rv = ENXIO;
1250 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1251 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1252 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1253 rv = 0;
1254 else
1255 rv = EIO;
1256 }
1257
1258 iop_msg_free(sc, im);
1259 return (rv);
1260 }
1261
1262 /*
1263 * Request the specified parameter group from the target. If an initiator
1264 * is specified (a) don't wait for the operation to complete, but instead
1265 * let the initiator's interrupt handler deal with the reply and (b) place a
1266 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1267 */
1268 int
1269 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1270 int size, struct iop_initiator *ii)
1271 {
1272 struct iop_msg *im;
1273 struct i2o_util_params_op *mf;
1274 int rv;
1275 struct iop_pgop *pgop;
1276 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1277
1278 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1279 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1280 iop_msg_free(sc, im);
1281 return (ENOMEM);
1282 }
1283 im->im_dvcontext = pgop;
1284
1285 mf = (struct i2o_util_params_op *)mb;
1286 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1287 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1288 mf->msgictx = IOP_ICTX;
1289 mf->msgtctx = im->im_tctx;
1290 mf->flags = 0;
1291
1292 pgop->olh.count = htole16(1);
1293 pgop->olh.reserved = htole16(0);
1294 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1295 pgop->oat.fieldcount = htole16(0xffff);
1296 pgop->oat.group = htole16(group);
1297
1298 if (ii == NULL)
1299 PHOLD(curlwp);
1300
1301 memset(buf, 0, size);
1302 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1303 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1304 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1305
1306 if (ii == NULL)
1307 PRELE(curlwp);
1308
1309 /* Detect errors; let partial transfers to count as success. */
1310 if (ii == NULL && rv == 0) {
1311 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1312 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1313 rv = 0;
1314 else
1315 rv = (im->im_reqstatus != 0 ? EIO : 0);
1316
1317 if (rv != 0)
1318 printf("%s: FIELD_GET failed for tid %d group %d\n",
1319 sc->sc_dv.dv_xname, tid, group);
1320 }
1321
1322 if (ii == NULL || rv != 0) {
1323 iop_msg_unmap(sc, im);
1324 iop_msg_free(sc, im);
1325 free(pgop, M_DEVBUF);
1326 }
1327
1328 return (rv);
1329 }
1330
1331 /*
1332 * Set a single field in a scalar parameter group.
1333 */
1334 int
1335 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1336 int size, int field)
1337 {
1338 struct iop_msg *im;
1339 struct i2o_util_params_op *mf;
1340 struct iop_pgop *pgop;
1341 int rv, totsize;
1342 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1343
1344 totsize = sizeof(*pgop) + size;
1345
1346 im = iop_msg_alloc(sc, IM_WAIT);
1347 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1348 iop_msg_free(sc, im);
1349 return (ENOMEM);
1350 }
1351
1352 mf = (struct i2o_util_params_op *)mb;
1353 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1354 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1355 mf->msgictx = IOP_ICTX;
1356 mf->msgtctx = im->im_tctx;
1357 mf->flags = 0;
1358
1359 pgop->olh.count = htole16(1);
1360 pgop->olh.reserved = htole16(0);
1361 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1362 pgop->oat.fieldcount = htole16(1);
1363 pgop->oat.group = htole16(group);
1364 pgop->oat.fields[0] = htole16(field);
1365 memcpy(pgop + 1, buf, size);
1366
1367 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1368 rv = iop_msg_post(sc, im, mb, 30000);
1369 if (rv != 0)
1370 printf("%s: FIELD_SET failed for tid %d group %d\n",
1371 sc->sc_dv.dv_xname, tid, group);
1372
1373 iop_msg_unmap(sc, im);
1374 iop_msg_free(sc, im);
1375 free(pgop, M_DEVBUF);
1376 return (rv);
1377 }
1378
1379 /*
1380 * Delete all rows in a tablular parameter group.
1381 */
1382 int
1383 iop_table_clear(struct iop_softc *sc, int tid, int group)
1384 {
1385 struct iop_msg *im;
1386 struct i2o_util_params_op *mf;
1387 struct iop_pgop pgop;
1388 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1389 int rv;
1390
1391 im = iop_msg_alloc(sc, IM_WAIT);
1392
1393 mf = (struct i2o_util_params_op *)mb;
1394 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1395 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1396 mf->msgictx = IOP_ICTX;
1397 mf->msgtctx = im->im_tctx;
1398 mf->flags = 0;
1399
1400 pgop.olh.count = htole16(1);
1401 pgop.olh.reserved = htole16(0);
1402 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1403 pgop.oat.fieldcount = htole16(0);
1404 pgop.oat.group = htole16(group);
1405 pgop.oat.fields[0] = htole16(0);
1406
1407 PHOLD(curlwp);
1408 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1409 rv = iop_msg_post(sc, im, mb, 30000);
1410 if (rv != 0)
1411 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1412 sc->sc_dv.dv_xname, tid, group);
1413
1414 iop_msg_unmap(sc, im);
1415 PRELE(curlwp);
1416 iop_msg_free(sc, im);
1417 return (rv);
1418 }
1419
1420 /*
1421 * Add a single row to a tabular parameter group. The row can have only one
1422 * field.
1423 */
1424 int
1425 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1426 int size, int row)
1427 {
1428 struct iop_msg *im;
1429 struct i2o_util_params_op *mf;
1430 struct iop_pgop *pgop;
1431 int rv, totsize;
1432 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1433
1434 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1435
1436 im = iop_msg_alloc(sc, IM_WAIT);
1437 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1438 iop_msg_free(sc, im);
1439 return (ENOMEM);
1440 }
1441
1442 mf = (struct i2o_util_params_op *)mb;
1443 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1444 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1445 mf->msgictx = IOP_ICTX;
1446 mf->msgtctx = im->im_tctx;
1447 mf->flags = 0;
1448
1449 pgop->olh.count = htole16(1);
1450 pgop->olh.reserved = htole16(0);
1451 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1452 pgop->oat.fieldcount = htole16(1);
1453 pgop->oat.group = htole16(group);
1454 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1455 pgop->oat.fields[1] = htole16(1); /* RowCount */
1456 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1457 memcpy(&pgop->oat.fields[3], buf, size);
1458
1459 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1460 rv = iop_msg_post(sc, im, mb, 30000);
1461 if (rv != 0)
1462 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1463 sc->sc_dv.dv_xname, tid, group, row);
1464
1465 iop_msg_unmap(sc, im);
1466 iop_msg_free(sc, im);
1467 free(pgop, M_DEVBUF);
1468 return (rv);
1469 }
1470
1471 /*
1472 * Execute a simple command (no parameters).
1473 */
1474 int
1475 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1476 int async, int timo)
1477 {
1478 struct iop_msg *im;
1479 struct i2o_msg mf;
1480 int rv, fl;
1481
1482 fl = (async != 0 ? IM_WAIT : IM_POLL);
1483 im = iop_msg_alloc(sc, fl);
1484
1485 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1486 mf.msgfunc = I2O_MSGFUNC(tid, function);
1487 mf.msgictx = ictx;
1488 mf.msgtctx = im->im_tctx;
1489
1490 rv = iop_msg_post(sc, im, &mf, timo);
1491 iop_msg_free(sc, im);
1492 return (rv);
1493 }
1494
1495 /*
1496 * Post the system table to the IOP.
1497 */
1498 static int
1499 iop_systab_set(struct iop_softc *sc)
1500 {
1501 struct i2o_exec_sys_tab_set *mf;
1502 struct iop_msg *im;
1503 bus_space_handle_t bsh;
1504 bus_addr_t boo;
1505 u_int32_t mema[2], ioa[2];
1506 int rv;
1507 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1508
1509 im = iop_msg_alloc(sc, IM_WAIT);
1510
1511 mf = (struct i2o_exec_sys_tab_set *)mb;
1512 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1513 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1514 mf->msgictx = IOP_ICTX;
1515 mf->msgtctx = im->im_tctx;
1516 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1517 mf->segnumber = 0;
1518
1519 mema[1] = sc->sc_status.desiredprivmemsize;
1520 ioa[1] = sc->sc_status.desiredpriviosize;
1521
1522 if (mema[1] != 0) {
1523 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1524 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1525 mema[0] = htole32(boo);
1526 if (rv != 0) {
1527 printf("%s: can't alloc priv mem space, err = %d\n",
1528 sc->sc_dv.dv_xname, rv);
1529 mema[0] = 0;
1530 mema[1] = 0;
1531 }
1532 }
1533
1534 if (ioa[1] != 0) {
1535 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1536 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1537 ioa[0] = htole32(boo);
1538 if (rv != 0) {
1539 printf("%s: can't alloc priv i/o space, err = %d\n",
1540 sc->sc_dv.dv_xname, rv);
1541 ioa[0] = 0;
1542 ioa[1] = 0;
1543 }
1544 }
1545
1546 PHOLD(curlwp);
1547 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1548 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1549 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1550 rv = iop_msg_post(sc, im, mb, 5000);
1551 iop_msg_unmap(sc, im);
1552 iop_msg_free(sc, im);
1553 PRELE(curlwp);
1554 return (rv);
1555 }
1556
1557 /*
1558 * Reset the IOP. Must be called with interrupts disabled.
1559 */
1560 static int
1561 iop_reset(struct iop_softc *sc)
1562 {
1563 u_int32_t mfa, *sw;
1564 struct i2o_exec_iop_reset mf;
1565 int rv;
1566 paddr_t pa;
1567
1568 sw = (u_int32_t *)sc->sc_scr;
1569 pa = sc->sc_scr_seg->ds_addr;
1570
1571 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1572 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1573 mf.reserved[0] = 0;
1574 mf.reserved[1] = 0;
1575 mf.reserved[2] = 0;
1576 mf.reserved[3] = 0;
1577 mf.statuslow = (u_int32_t)pa;
1578 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1579
1580 *sw = htole32(0);
1581 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1582 BUS_DMASYNC_PREREAD);
1583
1584 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1585 return (rv);
1586
1587 POLL(2500,
1588 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1589 BUS_DMASYNC_POSTREAD), *sw != 0));
1590 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1591 printf("%s: reset rejected, status 0x%x\n",
1592 sc->sc_dv.dv_xname, le32toh(*sw));
1593 return (EIO);
1594 }
1595
1596 /*
1597 * IOP is now in the INIT state. Wait no more than 10 seconds for
1598 * the inbound queue to become responsive.
1599 */
1600 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1601 if (mfa == IOP_MFA_EMPTY) {
1602 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1603 return (EIO);
1604 }
1605
1606 iop_release_mfa(sc, mfa);
1607 return (0);
1608 }
1609
1610 /*
1611 * Register a new initiator. Must be called with the configuration lock
1612 * held.
1613 */
1614 void
1615 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1616 {
1617 static int ictxgen;
1618 int s;
1619
1620 /* 0 is reserved (by us) for system messages. */
1621 ii->ii_ictx = ++ictxgen;
1622
1623 /*
1624 * `Utility initiators' don't make it onto the per-IOP initiator list
1625 * (which is used only for configuration), but do get one slot on
1626 * the inbound queue.
1627 */
1628 if ((ii->ii_flags & II_UTILITY) == 0) {
1629 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1630 sc->sc_nii++;
1631 } else
1632 sc->sc_nuii++;
1633
1634 s = splbio();
1635 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1636 splx(s);
1637 }
1638
1639 /*
1640 * Unregister an initiator. Must be called with the configuration lock
1641 * held.
1642 */
1643 void
1644 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1645 {
1646 int s;
1647
1648 if ((ii->ii_flags & II_UTILITY) == 0) {
1649 LIST_REMOVE(ii, ii_list);
1650 sc->sc_nii--;
1651 } else
1652 sc->sc_nuii--;
1653
1654 s = splbio();
1655 LIST_REMOVE(ii, ii_hash);
1656 splx(s);
1657 }
1658
1659 /*
1660 * Handle a reply frame from the IOP.
1661 */
1662 static int
1663 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1664 {
1665 struct iop_msg *im;
1666 struct i2o_reply *rb;
1667 struct i2o_fault_notify *fn;
1668 struct iop_initiator *ii;
1669 u_int off, ictx, tctx, status, size;
1670
1671 off = (int)(rmfa - sc->sc_rep_phys);
1672 rb = (struct i2o_reply *)(sc->sc_rep + off);
1673
1674 /* Perform reply queue DMA synchronisation. */
1675 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1676 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1677 if (--sc->sc_curib != 0)
1678 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1679 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1680
1681 #ifdef I2ODEBUG
1682 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1683 panic("iop_handle_reply: 64-bit reply");
1684 #endif
1685 /*
1686 * Find the initiator.
1687 */
1688 ictx = le32toh(rb->msgictx);
1689 if (ictx == IOP_ICTX)
1690 ii = NULL;
1691 else {
1692 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1693 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1694 if (ii->ii_ictx == ictx)
1695 break;
1696 if (ii == NULL) {
1697 #ifdef I2ODEBUG
1698 iop_reply_print(sc, rb);
1699 #endif
1700 printf("%s: WARNING: bad ictx returned (%x)\n",
1701 sc->sc_dv.dv_xname, ictx);
1702 return (-1);
1703 }
1704 }
1705
1706 /*
1707 * If we received a transport failure notice, we've got to dig the
1708 * transaction context (if any) out of the original message frame,
1709 * and then release the original MFA back to the inbound FIFO.
1710 */
1711 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1712 status = I2O_STATUS_SUCCESS;
1713
1714 fn = (struct i2o_fault_notify *)rb;
1715 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1716 iop_release_mfa(sc, fn->lowmfa);
1717 iop_tfn_print(sc, fn);
1718 } else {
1719 status = rb->reqstatus;
1720 tctx = le32toh(rb->msgtctx);
1721 }
1722
1723 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1724 /*
1725 * This initiator tracks state using message wrappers.
1726 *
1727 * Find the originating message wrapper, and if requested
1728 * notify the initiator.
1729 */
1730 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1731 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1732 (im->im_flags & IM_ALLOCED) == 0 ||
1733 tctx != im->im_tctx) {
1734 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1735 sc->sc_dv.dv_xname, tctx, im);
1736 if (im != NULL)
1737 printf("%s: flags=0x%08x tctx=0x%08x\n",
1738 sc->sc_dv.dv_xname, im->im_flags,
1739 im->im_tctx);
1740 #ifdef I2ODEBUG
1741 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1742 iop_reply_print(sc, rb);
1743 #endif
1744 return (-1);
1745 }
1746
1747 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1748 im->im_flags |= IM_FAIL;
1749
1750 #ifdef I2ODEBUG
1751 if ((im->im_flags & IM_REPLIED) != 0)
1752 panic("%s: dup reply", sc->sc_dv.dv_xname);
1753 #endif
1754 im->im_flags |= IM_REPLIED;
1755
1756 #ifdef I2ODEBUG
1757 if (status != I2O_STATUS_SUCCESS)
1758 iop_reply_print(sc, rb);
1759 #endif
1760 im->im_reqstatus = status;
1761 im->im_detstatus = le16toh(rb->detail);
1762
1763 /* Copy the reply frame, if requested. */
1764 if (im->im_rb != NULL) {
1765 size = (le32toh(rb->msgflags) >> 14) & ~3;
1766 #ifdef I2ODEBUG
1767 if (size > sc->sc_framesize)
1768 panic("iop_handle_reply: reply too large");
1769 #endif
1770 memcpy(im->im_rb, rb, size);
1771 }
1772
1773 /* Notify the initiator. */
1774 if ((im->im_flags & IM_WAIT) != 0)
1775 wakeup(im);
1776 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1777 if (ii)
1778 (*ii->ii_intr)(ii->ii_dv, im, rb);
1779 }
1780 } else {
1781 /*
1782 * This initiator discards message wrappers.
1783 *
1784 * Simply pass the reply frame to the initiator.
1785 */
1786 if (ii)
1787 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1788 }
1789
1790 return (status);
1791 }
1792
1793 /*
1794 * Handle an interrupt from the IOP.
1795 */
1796 int
1797 iop_intr(void *arg)
1798 {
1799 struct iop_softc *sc;
1800 u_int32_t rmfa;
1801
1802 sc = arg;
1803
1804 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1805 return (0);
1806
1807 for (;;) {
1808 /* Double read to account for IOP bug. */
1809 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1810 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1811 if (rmfa == IOP_MFA_EMPTY)
1812 break;
1813 }
1814 iop_handle_reply(sc, rmfa);
1815 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1816 }
1817
1818 return (1);
1819 }
1820
1821 /*
1822 * Handle an event signalled by the executive.
1823 */
1824 static void
1825 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1826 {
1827 struct i2o_util_event_register_reply *rb;
1828 u_int event;
1829
1830 rb = reply;
1831
1832 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1833 return;
1834
1835 event = le32toh(rb->event);
1836 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1837 }
1838
1839 /*
1840 * Allocate a message wrapper.
1841 */
1842 struct iop_msg *
1843 iop_msg_alloc(struct iop_softc *sc, int flags)
1844 {
1845 struct iop_msg *im;
1846 static u_int tctxgen;
1847 int s, i;
1848
1849 #ifdef I2ODEBUG
1850 if ((flags & IM_SYSMASK) != 0)
1851 panic("iop_msg_alloc: system flags specified");
1852 #endif
1853
1854 s = splbio();
1855 im = SLIST_FIRST(&sc->sc_im_freelist);
1856 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1857 if (im == NULL)
1858 panic("iop_msg_alloc: no free wrappers");
1859 #endif
1860 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1861 splx(s);
1862
1863 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1864 tctxgen += (1 << IOP_TCTX_SHIFT);
1865 im->im_flags = flags | IM_ALLOCED;
1866 im->im_rb = NULL;
1867 i = 0;
1868 do {
1869 im->im_xfer[i++].ix_size = 0;
1870 } while (i < IOP_MAX_MSG_XFERS);
1871
1872 return (im);
1873 }
1874
1875 /*
1876 * Free a message wrapper.
1877 */
1878 void
1879 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1880 {
1881 int s;
1882
1883 #ifdef I2ODEBUG
1884 if ((im->im_flags & IM_ALLOCED) == 0)
1885 panic("iop_msg_free: wrapper not allocated");
1886 #endif
1887
1888 im->im_flags = 0;
1889 s = splbio();
1890 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1891 splx(s);
1892 }
1893
1894 /*
1895 * Map a data transfer. Write a scatter-gather list into the message frame.
1896 */
1897 int
1898 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1899 void *xferaddr, int xfersize, int out, struct proc *up)
1900 {
1901 bus_dmamap_t dm;
1902 bus_dma_segment_t *ds;
1903 struct iop_xfer *ix;
1904 u_int rv, i, nsegs, flg, off, xn;
1905 u_int32_t *p;
1906
1907 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1908 if (ix->ix_size == 0)
1909 break;
1910
1911 #ifdef I2ODEBUG
1912 if (xfersize == 0)
1913 panic("iop_msg_map: null transfer");
1914 if (xfersize > IOP_MAX_XFER)
1915 panic("iop_msg_map: transfer too large");
1916 if (xn == IOP_MAX_MSG_XFERS)
1917 panic("iop_msg_map: too many xfers");
1918 #endif
1919
1920 /*
1921 * Only the first DMA map is static.
1922 */
1923 if (xn != 0) {
1924 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1925 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1926 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1927 if (rv != 0)
1928 return (rv);
1929 }
1930
1931 dm = ix->ix_map;
1932 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1933 (up == NULL ? BUS_DMA_NOWAIT : 0));
1934 if (rv != 0)
1935 goto bad;
1936
1937 /*
1938 * How many SIMPLE SG elements can we fit in this message?
1939 */
1940 off = mb[0] >> 16;
1941 p = mb + off;
1942 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1943
1944 if (dm->dm_nsegs > nsegs) {
1945 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1946 rv = EFBIG;
1947 DPRINTF(("iop_msg_map: too many segs\n"));
1948 goto bad;
1949 }
1950
1951 nsegs = dm->dm_nsegs;
1952 xfersize = 0;
1953
1954 /*
1955 * Write out the SG list.
1956 */
1957 if (out)
1958 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1959 else
1960 flg = I2O_SGL_SIMPLE;
1961
1962 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1963 p[0] = (u_int32_t)ds->ds_len | flg;
1964 p[1] = (u_int32_t)ds->ds_addr;
1965 xfersize += ds->ds_len;
1966 }
1967
1968 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1969 p[1] = (u_int32_t)ds->ds_addr;
1970 xfersize += ds->ds_len;
1971
1972 /* Fix up the transfer record, and sync the map. */
1973 ix->ix_flags = (out ? IX_OUT : IX_IN);
1974 ix->ix_size = xfersize;
1975 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1976 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1977
1978 /*
1979 * If this is the first xfer we've mapped for this message, adjust
1980 * the SGL offset field in the message header.
1981 */
1982 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1983 mb[0] += (mb[0] >> 12) & 0xf0;
1984 im->im_flags |= IM_SGLOFFADJ;
1985 }
1986 mb[0] += (nsegs << 17);
1987 return (0);
1988
1989 bad:
1990 if (xn != 0)
1991 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1992 return (rv);
1993 }
1994
1995 /*
1996 * Map a block I/O data transfer (different in that there's only one per
1997 * message maximum, and PAGE addressing may be used). Write a scatter
1998 * gather list into the message frame.
1999 */
2000 int
2001 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
2002 void *xferaddr, int xfersize, int out)
2003 {
2004 bus_dma_segment_t *ds;
2005 bus_dmamap_t dm;
2006 struct iop_xfer *ix;
2007 u_int rv, i, nsegs, off, slen, tlen, flg;
2008 paddr_t saddr, eaddr;
2009 u_int32_t *p;
2010
2011 #ifdef I2ODEBUG
2012 if (xfersize == 0)
2013 panic("iop_msg_map_bio: null transfer");
2014 if (xfersize > IOP_MAX_XFER)
2015 panic("iop_msg_map_bio: transfer too large");
2016 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2017 panic("iop_msg_map_bio: SGLOFFADJ");
2018 #endif
2019
2020 ix = im->im_xfer;
2021 dm = ix->ix_map;
2022 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2023 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2024 if (rv != 0)
2025 return (rv);
2026
2027 off = mb[0] >> 16;
2028 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2029
2030 /*
2031 * If the transfer is highly fragmented and won't fit using SIMPLE
2032 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2033 * potentially more efficient, both for us and the IOP.
2034 */
2035 if (dm->dm_nsegs > nsegs) {
2036 nsegs = 1;
2037 p = mb + off + 1;
2038
2039 /* XXX This should be done with a bus_space flag. */
2040 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2041 slen = ds->ds_len;
2042 saddr = ds->ds_addr;
2043
2044 while (slen > 0) {
2045 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2046 tlen = min(eaddr - saddr, slen);
2047 slen -= tlen;
2048 *p++ = le32toh(saddr);
2049 saddr = eaddr;
2050 nsegs++;
2051 }
2052 }
2053
2054 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2055 I2O_SGL_END;
2056 if (out)
2057 mb[off] |= I2O_SGL_DATA_OUT;
2058 } else {
2059 p = mb + off;
2060 nsegs = dm->dm_nsegs;
2061
2062 if (out)
2063 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2064 else
2065 flg = I2O_SGL_SIMPLE;
2066
2067 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2068 p[0] = (u_int32_t)ds->ds_len | flg;
2069 p[1] = (u_int32_t)ds->ds_addr;
2070 }
2071
2072 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2073 I2O_SGL_END;
2074 p[1] = (u_int32_t)ds->ds_addr;
2075 nsegs <<= 1;
2076 }
2077
2078 /* Fix up the transfer record, and sync the map. */
2079 ix->ix_flags = (out ? IX_OUT : IX_IN);
2080 ix->ix_size = xfersize;
2081 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2082 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2083
2084 /*
2085 * Adjust the SGL offset and total message size fields. We don't
2086 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2087 */
2088 mb[0] += ((off << 4) + (nsegs << 16));
2089 return (0);
2090 }
2091
2092 /*
2093 * Unmap all data transfers associated with a message wrapper.
2094 */
2095 void
2096 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2097 {
2098 struct iop_xfer *ix;
2099 int i;
2100
2101 #ifdef I2ODEBUG
2102 if (im->im_xfer[0].ix_size == 0)
2103 panic("iop_msg_unmap: no transfers mapped");
2104 #endif
2105
2106 for (ix = im->im_xfer, i = 0;;) {
2107 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2108 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2109 BUS_DMASYNC_POSTREAD);
2110 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2111
2112 /* Only the first DMA map is static. */
2113 if (i != 0)
2114 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2115 if ((++ix)->ix_size == 0)
2116 break;
2117 if (++i >= IOP_MAX_MSG_XFERS)
2118 break;
2119 }
2120 }
2121
2122 /*
2123 * Post a message frame to the IOP's inbound queue.
2124 */
2125 int
2126 iop_post(struct iop_softc *sc, u_int32_t *mb)
2127 {
2128 u_int32_t mfa;
2129 int s;
2130
2131 #ifdef I2ODEBUG
2132 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2133 panic("iop_post: frame too large");
2134 #endif
2135
2136 s = splbio();
2137
2138 /* Allocate a slot with the IOP. */
2139 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2140 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2141 splx(s);
2142 printf("%s: mfa not forthcoming\n",
2143 sc->sc_dv.dv_xname);
2144 return (EAGAIN);
2145 }
2146
2147 /* Perform reply buffer DMA synchronisation. */
2148 if (sc->sc_curib++ == 0)
2149 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2150 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2151
2152 /* Copy out the message frame. */
2153 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2154 mb[0] >> 16);
2155 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2156 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2157
2158 /* Post the MFA back to the IOP. */
2159 iop_outl(sc, IOP_REG_IFIFO, mfa);
2160
2161 splx(s);
2162 return (0);
2163 }
2164
2165 /*
2166 * Post a message to the IOP and deal with completion.
2167 */
2168 int
2169 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2170 {
2171 u_int32_t *mb;
2172 int rv, s;
2173
2174 mb = xmb;
2175
2176 /* Terminate the scatter/gather list chain. */
2177 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2178 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2179
2180 if ((rv = iop_post(sc, mb)) != 0)
2181 return (rv);
2182
2183 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2184 if ((im->im_flags & IM_POLL) != 0)
2185 iop_msg_poll(sc, im, timo);
2186 else
2187 iop_msg_wait(sc, im, timo);
2188
2189 s = splbio();
2190 if ((im->im_flags & IM_REPLIED) != 0) {
2191 if ((im->im_flags & IM_NOSTATUS) != 0)
2192 rv = 0;
2193 else if ((im->im_flags & IM_FAIL) != 0)
2194 rv = ENXIO;
2195 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2196 rv = EIO;
2197 else
2198 rv = 0;
2199 } else
2200 rv = EBUSY;
2201 splx(s);
2202 } else
2203 rv = 0;
2204
2205 return (rv);
2206 }
2207
2208 /*
2209 * Spin until the specified message is replied to.
2210 */
2211 static void
2212 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2213 {
2214 u_int32_t rmfa;
2215 int s;
2216
2217 s = splbio();
2218
2219 /* Wait for completion. */
2220 for (timo *= 10; timo != 0; timo--) {
2221 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2222 /* Double read to account for IOP bug. */
2223 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2224 if (rmfa == IOP_MFA_EMPTY)
2225 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2226 if (rmfa != IOP_MFA_EMPTY) {
2227 iop_handle_reply(sc, rmfa);
2228
2229 /*
2230 * Return the reply frame to the IOP's
2231 * outbound FIFO.
2232 */
2233 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2234 }
2235 }
2236 if ((im->im_flags & IM_REPLIED) != 0)
2237 break;
2238 DELAY(100);
2239 }
2240
2241 if (timo == 0) {
2242 #ifdef I2ODEBUG
2243 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2244 if (iop_status_get(sc, 1) != 0)
2245 printf("iop_msg_poll: unable to retrieve status\n");
2246 else
2247 printf("iop_msg_poll: IOP state = %d\n",
2248 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2249 #endif
2250 }
2251
2252 splx(s);
2253 }
2254
2255 /*
2256 * Sleep until the specified message is replied to.
2257 */
2258 static void
2259 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2260 {
2261 int s, rv;
2262
2263 s = splbio();
2264 if ((im->im_flags & IM_REPLIED) != 0) {
2265 splx(s);
2266 return;
2267 }
2268 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2269 splx(s);
2270
2271 #ifdef I2ODEBUG
2272 if (rv != 0) {
2273 printf("iop_msg_wait: tsleep() == %d\n", rv);
2274 if (iop_status_get(sc, 0) != 0)
2275 printf("iop_msg_wait: unable to retrieve status\n");
2276 else
2277 printf("iop_msg_wait: IOP state = %d\n",
2278 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2279 }
2280 #endif
2281 }
2282
2283 /*
2284 * Release an unused message frame back to the IOP's inbound fifo.
2285 */
2286 static void
2287 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2288 {
2289
2290 /* Use the frame to issue a no-op. */
2291 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2292 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2293 iop_outl_msg(sc, mfa + 8, 0);
2294 iop_outl_msg(sc, mfa + 12, 0);
2295
2296 iop_outl(sc, IOP_REG_IFIFO, mfa);
2297 }
2298
2299 #ifdef I2ODEBUG
2300 /*
2301 * Dump a reply frame header.
2302 */
2303 static void
2304 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2305 {
2306 u_int function, detail;
2307 #ifdef I2OVERBOSE
2308 const char *statusstr;
2309 #endif
2310
2311 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2312 detail = le16toh(rb->detail);
2313
2314 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2315
2316 #ifdef I2OVERBOSE
2317 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2318 statusstr = iop_status[rb->reqstatus];
2319 else
2320 statusstr = "undefined error code";
2321
2322 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2323 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2324 #else
2325 printf("%s: function=0x%02x status=0x%02x\n",
2326 sc->sc_dv.dv_xname, function, rb->reqstatus);
2327 #endif
2328 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2329 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2330 le32toh(rb->msgtctx));
2331 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2332 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2333 (le32toh(rb->msgflags) >> 8) & 0xff);
2334 }
2335 #endif
2336
2337 /*
2338 * Dump a transport failure reply.
2339 */
2340 static void
2341 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2342 {
2343
2344 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2345
2346 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2347 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2348 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2349 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2350 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2351 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2352 }
2353
2354 /*
2355 * Translate an I2O ASCII field into a C string.
2356 */
2357 void
2358 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2359 {
2360 int hc, lc, i, nit;
2361
2362 dlen--;
2363 lc = 0;
2364 hc = 0;
2365 i = 0;
2366
2367 /*
2368 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2369 * spec has nothing to say about it. Since AMI fields are usually
2370 * filled with junk after the terminator, ...
2371 */
2372 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2373
2374 while (slen-- != 0 && dlen-- != 0) {
2375 if (nit && *src == '\0')
2376 break;
2377 else if (*src <= 0x20 || *src >= 0x7f) {
2378 if (hc)
2379 dst[i++] = ' ';
2380 } else {
2381 hc = 1;
2382 dst[i++] = *src;
2383 lc = i;
2384 }
2385 src++;
2386 }
2387
2388 dst[lc] = '\0';
2389 }
2390
2391 /*
2392 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2393 */
2394 int
2395 iop_print_ident(struct iop_softc *sc, int tid)
2396 {
2397 struct {
2398 struct i2o_param_op_results pr;
2399 struct i2o_param_read_results prr;
2400 struct i2o_param_device_identity di;
2401 } __attribute__ ((__packed__)) p;
2402 char buf[32];
2403 int rv;
2404
2405 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2406 sizeof(p), NULL);
2407 if (rv != 0)
2408 return (rv);
2409
2410 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2411 sizeof(buf));
2412 printf(" <%s, ", buf);
2413 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2414 sizeof(buf));
2415 printf("%s, ", buf);
2416 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2417 printf("%s>", buf);
2418
2419 return (0);
2420 }
2421
2422 /*
2423 * Claim or unclaim the specified TID.
2424 */
2425 int
2426 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2427 int flags)
2428 {
2429 struct iop_msg *im;
2430 struct i2o_util_claim mf;
2431 int rv, func;
2432
2433 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2434 im = iop_msg_alloc(sc, IM_WAIT);
2435
2436 /* We can use the same structure, as they're identical. */
2437 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2438 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2439 mf.msgictx = ii->ii_ictx;
2440 mf.msgtctx = im->im_tctx;
2441 mf.flags = flags;
2442
2443 rv = iop_msg_post(sc, im, &mf, 5000);
2444 iop_msg_free(sc, im);
2445 return (rv);
2446 }
2447
2448 /*
2449 * Perform an abort.
2450 */
2451 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2452 int tctxabort, int flags)
2453 {
2454 struct iop_msg *im;
2455 struct i2o_util_abort mf;
2456 int rv;
2457
2458 im = iop_msg_alloc(sc, IM_WAIT);
2459
2460 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2461 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2462 mf.msgictx = ii->ii_ictx;
2463 mf.msgtctx = im->im_tctx;
2464 mf.flags = (func << 24) | flags;
2465 mf.tctxabort = tctxabort;
2466
2467 rv = iop_msg_post(sc, im, &mf, 5000);
2468 iop_msg_free(sc, im);
2469 return (rv);
2470 }
2471
2472 /*
2473 * Enable or disable reception of events for the specified device.
2474 */
2475 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2476 {
2477 struct i2o_util_event_register mf;
2478
2479 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2480 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2481 mf.msgictx = ii->ii_ictx;
2482 mf.msgtctx = 0;
2483 mf.eventmask = mask;
2484
2485 /* This message is replied to only when events are signalled. */
2486 return (iop_post(sc, (u_int32_t *)&mf));
2487 }
2488
2489 int
2490 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2491 {
2492 struct iop_softc *sc;
2493
2494 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2495 return (ENXIO);
2496 if ((sc->sc_flags & IOP_ONLINE) == 0)
2497 return (ENXIO);
2498 if ((sc->sc_flags & IOP_OPEN) != 0)
2499 return (EBUSY);
2500 sc->sc_flags |= IOP_OPEN;
2501
2502 return (0);
2503 }
2504
2505 int
2506 iopclose(dev_t dev, int flag, int mode, struct lwp *l)
2507 {
2508 struct iop_softc *sc;
2509
2510 sc = device_lookup(&iop_cd, minor(dev));
2511 sc->sc_flags &= ~IOP_OPEN;
2512
2513 return (0);
2514 }
2515
2516 int
2517 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
2518 {
2519 struct iop_softc *sc;
2520 struct iovec *iov;
2521 int rv, i;
2522
2523 sc = device_lookup(&iop_cd, minor(dev));
2524
2525 switch (cmd) {
2526 case IOPIOCPT:
2527 if (securelevel >= 2)
2528 return (EPERM);
2529
2530 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2531
2532 case IOPIOCGSTATUS:
2533 iov = (struct iovec *)data;
2534 i = sizeof(struct i2o_status);
2535 if (i > iov->iov_len)
2536 i = iov->iov_len;
2537 else
2538 iov->iov_len = i;
2539 if ((rv = iop_status_get(sc, 0)) == 0)
2540 rv = copyout(&sc->sc_status, iov->iov_base, i);
2541 return (rv);
2542
2543 case IOPIOCGLCT:
2544 case IOPIOCGTIDMAP:
2545 case IOPIOCRECONFIG:
2546 break;
2547
2548 default:
2549 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2550 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2551 #endif
2552 return (ENOTTY);
2553 }
2554
2555 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2556 return (rv);
2557
2558 switch (cmd) {
2559 case IOPIOCGLCT:
2560 iov = (struct iovec *)data;
2561 i = le16toh(sc->sc_lct->tablesize) << 2;
2562 if (i > iov->iov_len)
2563 i = iov->iov_len;
2564 else
2565 iov->iov_len = i;
2566 rv = copyout(sc->sc_lct, iov->iov_base, i);
2567 break;
2568
2569 case IOPIOCRECONFIG:
2570 if ((rv = lockmgr(&sc->sc_conflock, LK_UPGRADE, NULL)) == 0)
2571 rv = iop_reconfigure(sc, 0);
2572 break;
2573
2574 case IOPIOCGTIDMAP:
2575 iov = (struct iovec *)data;
2576 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2577 if (i > iov->iov_len)
2578 i = iov->iov_len;
2579 else
2580 iov->iov_len = i;
2581 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2582 break;
2583 }
2584
2585 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2586 return (rv);
2587 }
2588
2589 static int
2590 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2591 {
2592 struct iop_msg *im;
2593 struct i2o_msg *mf;
2594 struct ioppt_buf *ptb;
2595 int rv, i, mapped;
2596
2597 mf = NULL;
2598 im = NULL;
2599 mapped = 1;
2600
2601 if (pt->pt_msglen > sc->sc_framesize ||
2602 pt->pt_msglen < sizeof(struct i2o_msg) ||
2603 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2604 pt->pt_nbufs < 0 ||
2605 #if 0
2606 pt->pt_replylen < 0 ||
2607 #endif
2608 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2609 return (EINVAL);
2610
2611 for (i = 0; i < pt->pt_nbufs; i++)
2612 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2613 rv = ENOMEM;
2614 goto bad;
2615 }
2616
2617 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2618 if (mf == NULL)
2619 return (ENOMEM);
2620
2621 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2622 goto bad;
2623
2624 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2625 im->im_rb = (struct i2o_reply *)mf;
2626 mf->msgictx = IOP_ICTX;
2627 mf->msgtctx = im->im_tctx;
2628
2629 for (i = 0; i < pt->pt_nbufs; i++) {
2630 ptb = &pt->pt_bufs[i];
2631 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2632 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2633 if (rv != 0)
2634 goto bad;
2635 mapped = 1;
2636 }
2637
2638 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2639 goto bad;
2640
2641 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2642 if (i > sc->sc_framesize)
2643 i = sc->sc_framesize;
2644 if (i > pt->pt_replylen)
2645 i = pt->pt_replylen;
2646 rv = copyout(im->im_rb, pt->pt_reply, i);
2647
2648 bad:
2649 if (mapped != 0)
2650 iop_msg_unmap(sc, im);
2651 if (im != NULL)
2652 iop_msg_free(sc, im);
2653 if (mf != NULL)
2654 free(mf, M_DEVBUF);
2655 return (rv);
2656 }
2657