iop.c revision 1.52 1 /* $NetBSD: iop.c,v 1.52 2006/03/27 21:50:45 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.52 2006/03/27 21:50:45 bouyer Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #include "locators.h"
71
72 #define POLL(ms, cond) \
73 do { \
74 int xi; \
75 for (xi = (ms) * 10; xi; xi--) { \
76 if (cond) \
77 break; \
78 DELAY(100); \
79 } \
80 } while (/* CONSTCOND */0);
81
82 #ifdef I2ODEBUG
83 #define DPRINTF(x) printf x
84 #else
85 #define DPRINTF(x)
86 #endif
87
88 #ifdef I2OVERBOSE
89 #define IFVERBOSE(x) x
90 #define COMMENT(x) NULL
91 #else
92 #define IFVERBOSE(x)
93 #define COMMENT(x)
94 #endif
95
96 #define IOP_ICTXHASH_NBUCKETS 16
97 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
98
99 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
100
101 #define IOP_TCTX_SHIFT 12
102 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
103
104 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
105 static u_long iop_ictxhash;
106 static void *iop_sdh;
107 static struct i2o_systab *iop_systab;
108 static int iop_systab_size;
109
110 extern struct cfdriver iop_cd;
111
112 dev_type_open(iopopen);
113 dev_type_close(iopclose);
114 dev_type_ioctl(iopioctl);
115
116 const struct cdevsw iop_cdevsw = {
117 iopopen, iopclose, noread, nowrite, iopioctl,
118 nostop, notty, nopoll, nommap, nokqfilter,
119 };
120
121 #define IC_CONFIGURE 0x01
122 #define IC_PRIORITY 0x02
123
124 struct iop_class {
125 u_short ic_class;
126 u_short ic_flags;
127 #ifdef I2OVERBOSE
128 const char *ic_caption;
129 #endif
130 } static const iop_class[] = {
131 {
132 I2O_CLASS_EXECUTIVE,
133 0,
134 IFVERBOSE("executive")
135 },
136 {
137 I2O_CLASS_DDM,
138 0,
139 COMMENT("device driver module")
140 },
141 {
142 I2O_CLASS_RANDOM_BLOCK_STORAGE,
143 IC_CONFIGURE | IC_PRIORITY,
144 IFVERBOSE("random block storage")
145 },
146 {
147 I2O_CLASS_SEQUENTIAL_STORAGE,
148 IC_CONFIGURE | IC_PRIORITY,
149 IFVERBOSE("sequential storage")
150 },
151 {
152 I2O_CLASS_LAN,
153 IC_CONFIGURE | IC_PRIORITY,
154 IFVERBOSE("LAN port")
155 },
156 {
157 I2O_CLASS_WAN,
158 IC_CONFIGURE | IC_PRIORITY,
159 IFVERBOSE("WAN port")
160 },
161 {
162 I2O_CLASS_FIBRE_CHANNEL_PORT,
163 IC_CONFIGURE,
164 IFVERBOSE("fibrechannel port")
165 },
166 {
167 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
168 0,
169 COMMENT("fibrechannel peripheral")
170 },
171 {
172 I2O_CLASS_SCSI_PERIPHERAL,
173 0,
174 COMMENT("SCSI peripheral")
175 },
176 {
177 I2O_CLASS_ATE_PORT,
178 IC_CONFIGURE,
179 IFVERBOSE("ATE port")
180 },
181 {
182 I2O_CLASS_ATE_PERIPHERAL,
183 0,
184 COMMENT("ATE peripheral")
185 },
186 {
187 I2O_CLASS_FLOPPY_CONTROLLER,
188 IC_CONFIGURE,
189 IFVERBOSE("floppy controller")
190 },
191 {
192 I2O_CLASS_FLOPPY_DEVICE,
193 0,
194 COMMENT("floppy device")
195 },
196 {
197 I2O_CLASS_BUS_ADAPTER_PORT,
198 IC_CONFIGURE,
199 IFVERBOSE("bus adapter port" )
200 },
201 };
202
203 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
204 static const char * const iop_status[] = {
205 "success",
206 "abort (dirty)",
207 "abort (no data transfer)",
208 "abort (partial transfer)",
209 "error (dirty)",
210 "error (no data transfer)",
211 "error (partial transfer)",
212 "undefined error code",
213 "process abort (dirty)",
214 "process abort (no data transfer)",
215 "process abort (partial transfer)",
216 "transaction error",
217 };
218 #endif
219
220 static inline u_int32_t iop_inl(struct iop_softc *, int);
221 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
222
223 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
224 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
225
226 static void iop_config_interrupts(struct device *);
227 static void iop_configure_devices(struct iop_softc *, int, int);
228 static void iop_devinfo(int, char *, size_t);
229 static int iop_print(void *, const char *);
230 static void iop_shutdown(void *);
231
232 static void iop_adjqparam(struct iop_softc *, int);
233 static void iop_create_reconf_thread(void *);
234 static int iop_handle_reply(struct iop_softc *, u_int32_t);
235 static int iop_hrt_get(struct iop_softc *);
236 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
237 static void iop_intr_event(struct device *, struct iop_msg *, void *);
238 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
239 u_int32_t);
240 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
241 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
242 static int iop_ofifo_init(struct iop_softc *);
243 static int iop_passthrough(struct iop_softc *, struct ioppt *,
244 struct proc *);
245 static void iop_reconf_thread(void *);
246 static void iop_release_mfa(struct iop_softc *, u_int32_t);
247 static int iop_reset(struct iop_softc *);
248 static int iop_sys_enable(struct iop_softc *);
249 static int iop_systab_set(struct iop_softc *);
250 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
251
252 #ifdef I2ODEBUG
253 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
254 #endif
255
256 static inline u_int32_t
257 iop_inl(struct iop_softc *sc, int off)
258 {
259
260 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
261 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
262 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
263 }
264
265 static inline void
266 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
267 {
268
269 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
270 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
271 BUS_SPACE_BARRIER_WRITE);
272 }
273
274 static inline u_int32_t
275 iop_inl_msg(struct iop_softc *sc, int off)
276 {
277
278 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
279 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
280 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
281 }
282
283 static inline void
284 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
285 {
286
287 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
288 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
289 BUS_SPACE_BARRIER_WRITE);
290 }
291
292 /*
293 * Initialise the IOP and our interface.
294 */
295 void
296 iop_init(struct iop_softc *sc, const char *intrstr)
297 {
298 struct iop_msg *im;
299 int rv, i, j, state, nsegs;
300 u_int32_t mask;
301 char ident[64];
302
303 state = 0;
304
305 printf("I2O adapter");
306
307 if (iop_ictxhashtbl == NULL)
308 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
309 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
310
311 /* Disable interrupts at the IOP. */
312 mask = iop_inl(sc, IOP_REG_INTR_MASK);
313 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
314
315 /* Allocate a scratch DMA map for small miscellaneous shared data. */
316 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
317 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
318 printf("%s: cannot create scratch dmamap\n",
319 sc->sc_dv.dv_xname);
320 return;
321 }
322
323 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
324 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
325 printf("%s: cannot alloc scratch dmamem\n",
326 sc->sc_dv.dv_xname);
327 goto bail_out;
328 }
329 state++;
330
331 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
332 &sc->sc_scr, 0)) {
333 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
334 goto bail_out;
335 }
336 state++;
337
338 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
339 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
340 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
341 goto bail_out;
342 }
343 state++;
344
345 #ifdef I2ODEBUG
346 /* So that our debug checks don't choke. */
347 sc->sc_framesize = 128;
348 #endif
349
350 /* Reset the adapter and request status. */
351 if ((rv = iop_reset(sc)) != 0) {
352 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
353 goto bail_out;
354 }
355
356 if ((rv = iop_status_get(sc, 1)) != 0) {
357 printf("%s: not responding (get status)\n",
358 sc->sc_dv.dv_xname);
359 goto bail_out;
360 }
361
362 sc->sc_flags |= IOP_HAVESTATUS;
363 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
364 ident, sizeof(ident));
365 printf(" <%s>\n", ident);
366
367 #ifdef I2ODEBUG
368 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
369 le16toh(sc->sc_status.orgid),
370 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
371 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
372 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
373 le32toh(sc->sc_status.desiredprivmemsize),
374 le32toh(sc->sc_status.currentprivmemsize),
375 le32toh(sc->sc_status.currentprivmembase));
376 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
377 le32toh(sc->sc_status.desiredpriviosize),
378 le32toh(sc->sc_status.currentpriviosize),
379 le32toh(sc->sc_status.currentpriviobase));
380 #endif
381
382 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
383 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
384 sc->sc_maxob = IOP_MAX_OUTBOUND;
385 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
386 if (sc->sc_maxib > IOP_MAX_INBOUND)
387 sc->sc_maxib = IOP_MAX_INBOUND;
388 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
389 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
390 sc->sc_framesize = IOP_MAX_MSG_SIZE;
391
392 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
393 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
394 printf("%s: frame size too small (%d)\n",
395 sc->sc_dv.dv_xname, sc->sc_framesize);
396 goto bail_out;
397 }
398 #endif
399
400 /* Allocate message wrappers. */
401 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
402 if (im == NULL) {
403 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
404 goto bail_out;
405 }
406 state++;
407 sc->sc_ims = im;
408 SLIST_INIT(&sc->sc_im_freelist);
409
410 for (i = 0; i < sc->sc_maxib; i++, im++) {
411 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
412 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
413 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
414 &im->im_xfer[0].ix_map);
415 if (rv != 0) {
416 printf("%s: couldn't create dmamap (%d)",
417 sc->sc_dv.dv_xname, rv);
418 goto bail_out3;
419 }
420
421 im->im_tctx = i;
422 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
423 }
424
425 /* Initialise the IOP's outbound FIFO. */
426 if (iop_ofifo_init(sc) != 0) {
427 printf("%s: unable to init oubound FIFO\n",
428 sc->sc_dv.dv_xname);
429 goto bail_out3;
430 }
431
432 /*
433 * Defer further configuration until (a) interrupts are working and
434 * (b) we have enough information to build the system table.
435 */
436 config_interrupts((struct device *)sc, iop_config_interrupts);
437
438 /* Configure shutdown hook before we start any device activity. */
439 if (iop_sdh == NULL)
440 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
441
442 /* Ensure interrupts are enabled at the IOP. */
443 mask = iop_inl(sc, IOP_REG_INTR_MASK);
444 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
445
446 if (intrstr != NULL)
447 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
448 intrstr);
449
450 #ifdef I2ODEBUG
451 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
452 sc->sc_dv.dv_xname, sc->sc_maxib,
453 le32toh(sc->sc_status.maxinboundmframes),
454 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
455 #endif
456
457 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
458 return;
459
460 bail_out3:
461 if (state > 3) {
462 for (j = 0; j < i; j++)
463 bus_dmamap_destroy(sc->sc_dmat,
464 sc->sc_ims[j].im_xfer[0].ix_map);
465 free(sc->sc_ims, M_DEVBUF);
466 }
467 bail_out:
468 if (state > 2)
469 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
470 if (state > 1)
471 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
472 if (state > 0)
473 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
474 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
475 }
476
477 /*
478 * Perform autoconfiguration tasks.
479 */
480 static void
481 iop_config_interrupts(struct device *self)
482 {
483 struct iop_attach_args ia;
484 struct iop_softc *sc, *iop;
485 struct i2o_systab_entry *ste;
486 int rv, i, niop;
487 int locs[IOPCF_NLOCS];
488
489 sc = (struct iop_softc *)self;
490 LIST_INIT(&sc->sc_iilist);
491
492 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
493
494 if (iop_hrt_get(sc) != 0) {
495 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
496 return;
497 }
498
499 /*
500 * Build the system table.
501 */
502 if (iop_systab == NULL) {
503 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
504 if ((iop = device_lookup(&iop_cd, i)) == NULL)
505 continue;
506 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
507 continue;
508 if (iop_status_get(iop, 1) != 0) {
509 printf("%s: unable to retrieve status\n",
510 sc->sc_dv.dv_xname);
511 iop->sc_flags &= ~IOP_HAVESTATUS;
512 continue;
513 }
514 niop++;
515 }
516 if (niop == 0)
517 return;
518
519 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
520 sizeof(struct i2o_systab);
521 iop_systab_size = i;
522 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
523
524 iop_systab->numentries = niop;
525 iop_systab->version = I2O_VERSION_11;
526
527 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
528 if ((iop = device_lookup(&iop_cd, i)) == NULL)
529 continue;
530 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
531 continue;
532
533 ste->orgid = iop->sc_status.orgid;
534 ste->iopid = iop->sc_dv.dv_unit + 2;
535 ste->segnumber =
536 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
537 ste->iopcaps = iop->sc_status.iopcaps;
538 ste->inboundmsgframesize =
539 iop->sc_status.inboundmframesize;
540 ste->inboundmsgportaddresslow =
541 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
542 ste++;
543 }
544 }
545
546 /*
547 * Post the system table to the IOP and bring it to the OPERATIONAL
548 * state.
549 */
550 if (iop_systab_set(sc) != 0) {
551 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
552 return;
553 }
554 if (iop_sys_enable(sc) != 0) {
555 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
556 return;
557 }
558
559 /*
560 * Set up an event handler for this IOP.
561 */
562 sc->sc_eventii.ii_dv = self;
563 sc->sc_eventii.ii_intr = iop_intr_event;
564 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
565 sc->sc_eventii.ii_tid = I2O_TID_IOP;
566 iop_initiator_register(sc, &sc->sc_eventii);
567
568 rv = iop_util_eventreg(sc, &sc->sc_eventii,
569 I2O_EVENT_EXEC_RESOURCE_LIMITS |
570 I2O_EVENT_EXEC_CONNECTION_FAIL |
571 I2O_EVENT_EXEC_ADAPTER_FAULT |
572 I2O_EVENT_EXEC_POWER_FAIL |
573 I2O_EVENT_EXEC_RESET_PENDING |
574 I2O_EVENT_EXEC_RESET_IMMINENT |
575 I2O_EVENT_EXEC_HARDWARE_FAIL |
576 I2O_EVENT_EXEC_XCT_CHANGE |
577 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
578 I2O_EVENT_GEN_DEVICE_RESET |
579 I2O_EVENT_GEN_STATE_CHANGE |
580 I2O_EVENT_GEN_GENERAL_WARNING);
581 if (rv != 0) {
582 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
583 return;
584 }
585
586 /*
587 * Attempt to match and attach a product-specific extension.
588 */
589 ia.ia_class = I2O_CLASS_ANY;
590 ia.ia_tid = I2O_TID_IOP;
591 locs[IOPCF_TID] = I2O_TID_IOP;
592 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
593 config_stdsubmatch);
594
595 /*
596 * Start device configuration.
597 */
598 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
599 if ((rv = iop_reconfigure(sc, 0)) == -1) {
600 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
601 return;
602 }
603 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
604
605 kthread_create(iop_create_reconf_thread, sc);
606 }
607
608 /*
609 * Create the reconfiguration thread. Called after the standard kernel
610 * threads have been created.
611 */
612 static void
613 iop_create_reconf_thread(void *cookie)
614 {
615 struct iop_softc *sc;
616 int rv;
617
618 sc = cookie;
619 sc->sc_flags |= IOP_ONLINE;
620
621 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
622 "%s", sc->sc_dv.dv_xname);
623 if (rv != 0) {
624 printf("%s: unable to create reconfiguration thread (%d)",
625 sc->sc_dv.dv_xname, rv);
626 return;
627 }
628 }
629
630 /*
631 * Reconfiguration thread; listens for LCT change notification, and
632 * initiates re-configuration if received.
633 */
634 static void
635 iop_reconf_thread(void *cookie)
636 {
637 struct iop_softc *sc;
638 struct lwp *l;
639 struct i2o_lct lct;
640 u_int32_t chgind;
641 int rv;
642
643 sc = cookie;
644 chgind = sc->sc_chgind + 1;
645 l = curlwp;
646
647 for (;;) {
648 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
649 sc->sc_dv.dv_xname, chgind));
650
651 PHOLD(l);
652 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
653 PRELE(l);
654
655 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
656 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
657
658 if (rv == 0 &&
659 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
660 iop_reconfigure(sc, le32toh(lct.changeindicator));
661 chgind = sc->sc_chgind + 1;
662 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
663 }
664
665 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
666 }
667 }
668
669 /*
670 * Reconfigure: find new and removed devices.
671 */
672 int
673 iop_reconfigure(struct iop_softc *sc, u_int chgind)
674 {
675 struct iop_msg *im;
676 struct i2o_hba_bus_scan mf;
677 struct i2o_lct_entry *le;
678 struct iop_initiator *ii, *nextii;
679 int rv, tid, i;
680
681 /*
682 * If the reconfiguration request isn't the result of LCT change
683 * notification, then be more thorough: ask all bus ports to scan
684 * their busses. Wait up to 5 minutes for each bus port to complete
685 * the request.
686 */
687 if (chgind == 0) {
688 if ((rv = iop_lct_get(sc)) != 0) {
689 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
690 return (rv);
691 }
692
693 le = sc->sc_lct->entry;
694 for (i = 0; i < sc->sc_nlctent; i++, le++) {
695 if ((le16toh(le->classid) & 4095) !=
696 I2O_CLASS_BUS_ADAPTER_PORT)
697 continue;
698 tid = le16toh(le->localtid) & 4095;
699
700 im = iop_msg_alloc(sc, IM_WAIT);
701
702 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
703 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
704 mf.msgictx = IOP_ICTX;
705 mf.msgtctx = im->im_tctx;
706
707 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
708 tid));
709
710 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
711 iop_msg_free(sc, im);
712 #ifdef I2ODEBUG
713 if (rv != 0)
714 printf("%s: bus scan failed\n",
715 sc->sc_dv.dv_xname);
716 #endif
717 }
718 } else if (chgind <= sc->sc_chgind) {
719 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
720 return (0);
721 }
722
723 /* Re-read the LCT and determine if it has changed. */
724 if ((rv = iop_lct_get(sc)) != 0) {
725 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
726 return (rv);
727 }
728 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
729
730 chgind = le32toh(sc->sc_lct->changeindicator);
731 if (chgind == sc->sc_chgind) {
732 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
733 return (0);
734 }
735 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
736 sc->sc_chgind = chgind;
737
738 if (sc->sc_tidmap != NULL)
739 free(sc->sc_tidmap, M_DEVBUF);
740 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
741 M_DEVBUF, M_NOWAIT|M_ZERO);
742
743 /* Allow 1 queued command per device while we're configuring. */
744 iop_adjqparam(sc, 1);
745
746 /*
747 * Match and attach child devices. We configure high-level devices
748 * first so that any claims will propagate throughout the LCT,
749 * hopefully masking off aliased devices as a result.
750 *
751 * Re-reading the LCT at this point is a little dangerous, but we'll
752 * trust the IOP (and the operator) to behave itself...
753 */
754 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
755 IC_CONFIGURE | IC_PRIORITY);
756 if ((rv = iop_lct_get(sc)) != 0)
757 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
758 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
759 IC_CONFIGURE);
760
761 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
762 nextii = LIST_NEXT(ii, ii_list);
763
764 /* Detach devices that were configured, but are now gone. */
765 for (i = 0; i < sc->sc_nlctent; i++)
766 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
767 break;
768 if (i == sc->sc_nlctent ||
769 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
770 config_detach(ii->ii_dv, DETACH_FORCE);
771 continue;
772 }
773
774 /*
775 * Tell initiators that existed before the re-configuration
776 * to re-configure.
777 */
778 if (ii->ii_reconfig == NULL)
779 continue;
780 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
781 printf("%s: %s failed reconfigure (%d)\n",
782 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
783 }
784
785 /* Re-adjust queue parameters and return. */
786 if (sc->sc_nii != 0)
787 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
788 / sc->sc_nii);
789
790 return (0);
791 }
792
793 /*
794 * Configure I2O devices into the system.
795 */
796 static void
797 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
798 {
799 struct iop_attach_args ia;
800 struct iop_initiator *ii;
801 const struct i2o_lct_entry *le;
802 struct device *dv;
803 int i, j, nent;
804 u_int usertid;
805 int locs[IOPCF_NLOCS];
806
807 nent = sc->sc_nlctent;
808 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
809 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
810
811 /* Ignore the device if it's in use. */
812 usertid = le32toh(le->usertid) & 4095;
813 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
814 continue;
815
816 ia.ia_class = le16toh(le->classid) & 4095;
817 ia.ia_tid = sc->sc_tidmap[i].it_tid;
818
819 /* Ignore uninteresting devices. */
820 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
821 if (iop_class[j].ic_class == ia.ia_class)
822 break;
823 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
824 (iop_class[j].ic_flags & mask) != maskval)
825 continue;
826
827 /*
828 * Try to configure the device only if it's not already
829 * configured.
830 */
831 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
832 if (ia.ia_tid == ii->ii_tid) {
833 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
834 strcpy(sc->sc_tidmap[i].it_dvname,
835 ii->ii_dv->dv_xname);
836 break;
837 }
838 }
839 if (ii != NULL)
840 continue;
841
842 locs[IOPCF_TID] = ia.ia_tid;
843
844 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
845 iop_print, config_stdsubmatch);
846 if (dv != NULL) {
847 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
848 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
849 }
850 }
851 }
852
853 /*
854 * Adjust queue parameters for all child devices.
855 */
856 static void
857 iop_adjqparam(struct iop_softc *sc, int mpi)
858 {
859 struct iop_initiator *ii;
860
861 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
862 if (ii->ii_adjqparam != NULL)
863 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
864 }
865
866 static void
867 iop_devinfo(int class, char *devinfo, size_t l)
868 {
869 #ifdef I2OVERBOSE
870 int i;
871
872 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
873 if (class == iop_class[i].ic_class)
874 break;
875
876 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
877 snprintf(devinfo, l, "device (class 0x%x)", class);
878 else
879 strlcpy(devinfo, iop_class[i].ic_caption, l);
880 #else
881
882 snprintf(devinfo, l, "device (class 0x%x)", class);
883 #endif
884 }
885
886 static int
887 iop_print(void *aux, const char *pnp)
888 {
889 struct iop_attach_args *ia;
890 char devinfo[256];
891
892 ia = aux;
893
894 if (pnp != NULL) {
895 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
896 aprint_normal("%s at %s", devinfo, pnp);
897 }
898 aprint_normal(" tid %d", ia->ia_tid);
899 return (UNCONF);
900 }
901
902 /*
903 * Shut down all configured IOPs.
904 */
905 static void
906 iop_shutdown(void *junk)
907 {
908 struct iop_softc *sc;
909 int i;
910
911 printf("shutting down iop devices...");
912
913 for (i = 0; i < iop_cd.cd_ndevs; i++) {
914 if ((sc = device_lookup(&iop_cd, i)) == NULL)
915 continue;
916 if ((sc->sc_flags & IOP_ONLINE) == 0)
917 continue;
918
919 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
920 0, 5000);
921
922 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
923 /*
924 * Some AMI firmware revisions will go to sleep and
925 * never come back after this.
926 */
927 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
928 IOP_ICTX, 0, 1000);
929 }
930 }
931
932 /* Wait. Some boards could still be flushing, stupidly enough. */
933 delay(5000*1000);
934 printf(" done\n");
935 }
936
937 /*
938 * Retrieve IOP status.
939 */
940 int
941 iop_status_get(struct iop_softc *sc, int nosleep)
942 {
943 struct i2o_exec_status_get mf;
944 struct i2o_status *st;
945 paddr_t pa;
946 int rv, i;
947
948 pa = sc->sc_scr_seg->ds_addr;
949 st = (struct i2o_status *)sc->sc_scr;
950
951 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
952 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
953 mf.reserved[0] = 0;
954 mf.reserved[1] = 0;
955 mf.reserved[2] = 0;
956 mf.reserved[3] = 0;
957 mf.addrlow = (u_int32_t)pa;
958 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
959 mf.length = sizeof(sc->sc_status);
960
961 memset(st, 0, sizeof(*st));
962 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
963 BUS_DMASYNC_PREREAD);
964
965 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
966 return (rv);
967
968 for (i = 25; i != 0; i--) {
969 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
970 sizeof(*st), BUS_DMASYNC_POSTREAD);
971 if (st->syncbyte == 0xff)
972 break;
973 if (nosleep)
974 DELAY(100*1000);
975 else
976 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
977 }
978
979 if (st->syncbyte != 0xff) {
980 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
981 rv = EIO;
982 } else {
983 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
984 rv = 0;
985 }
986
987 return (rv);
988 }
989
990 /*
991 * Initialize and populate the IOP's outbound FIFO.
992 */
993 static int
994 iop_ofifo_init(struct iop_softc *sc)
995 {
996 bus_addr_t addr;
997 bus_dma_segment_t seg;
998 struct i2o_exec_outbound_init *mf;
999 int i, rseg, rv;
1000 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1001
1002 sw = (u_int32_t *)sc->sc_scr;
1003
1004 mf = (struct i2o_exec_outbound_init *)mb;
1005 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1006 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1007 mf->msgictx = IOP_ICTX;
1008 mf->msgtctx = 0;
1009 mf->pagesize = PAGE_SIZE;
1010 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1011
1012 /*
1013 * The I2O spec says that there are two SGLs: one for the status
1014 * word, and one for a list of discarded MFAs. It continues to say
1015 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1016 * necessary; this isn't the case (and is in fact a bad thing).
1017 */
1018 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1019 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1020 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1021 (u_int32_t)sc->sc_scr_seg->ds_addr;
1022 mb[0] += 2 << 16;
1023
1024 *sw = 0;
1025 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1026 BUS_DMASYNC_PREREAD);
1027
1028 if ((rv = iop_post(sc, mb)) != 0)
1029 return (rv);
1030
1031 POLL(5000,
1032 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1033 BUS_DMASYNC_POSTREAD),
1034 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1035
1036 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1037 printf("%s: outbound FIFO init failed (%d)\n",
1038 sc->sc_dv.dv_xname, le32toh(*sw));
1039 return (EIO);
1040 }
1041
1042 /* Allocate DMA safe memory for the reply frames. */
1043 if (sc->sc_rep_phys == 0) {
1044 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1045
1046 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1047 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1048 if (rv != 0) {
1049 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1050 rv);
1051 return (rv);
1052 }
1053
1054 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1055 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1056 if (rv != 0) {
1057 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1058 return (rv);
1059 }
1060
1061 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1062 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1063 if (rv != 0) {
1064 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1065 rv);
1066 return (rv);
1067 }
1068
1069 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1070 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1071 if (rv != 0) {
1072 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1073 return (rv);
1074 }
1075
1076 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1077 }
1078
1079 /* Populate the outbound FIFO. */
1080 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1081 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1082 addr += sc->sc_framesize;
1083 }
1084
1085 return (0);
1086 }
1087
1088 /*
1089 * Read the specified number of bytes from the IOP's hardware resource table.
1090 */
1091 static int
1092 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1093 {
1094 struct iop_msg *im;
1095 int rv;
1096 struct i2o_exec_hrt_get *mf;
1097 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1098
1099 im = iop_msg_alloc(sc, IM_WAIT);
1100 mf = (struct i2o_exec_hrt_get *)mb;
1101 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1102 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1103 mf->msgictx = IOP_ICTX;
1104 mf->msgtctx = im->im_tctx;
1105
1106 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1107 rv = iop_msg_post(sc, im, mb, 30000);
1108 iop_msg_unmap(sc, im);
1109 iop_msg_free(sc, im);
1110 return (rv);
1111 }
1112
1113 /*
1114 * Read the IOP's hardware resource table.
1115 */
1116 static int
1117 iop_hrt_get(struct iop_softc *sc)
1118 {
1119 struct i2o_hrt hrthdr, *hrt;
1120 int size, rv;
1121
1122 PHOLD(curlwp);
1123 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1124 PRELE(curlwp);
1125 if (rv != 0)
1126 return (rv);
1127
1128 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1129 le16toh(hrthdr.numentries)));
1130
1131 size = sizeof(struct i2o_hrt) +
1132 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1133 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1134
1135 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1136 free(hrt, M_DEVBUF);
1137 return (rv);
1138 }
1139
1140 if (sc->sc_hrt != NULL)
1141 free(sc->sc_hrt, M_DEVBUF);
1142 sc->sc_hrt = hrt;
1143 return (0);
1144 }
1145
1146 /*
1147 * Request the specified number of bytes from the IOP's logical
1148 * configuration table. If a change indicator is specified, this
1149 * is a verbatim notification request, so the caller is prepared
1150 * to wait indefinitely.
1151 */
1152 static int
1153 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1154 u_int32_t chgind)
1155 {
1156 struct iop_msg *im;
1157 struct i2o_exec_lct_notify *mf;
1158 int rv;
1159 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1160
1161 im = iop_msg_alloc(sc, IM_WAIT);
1162 memset(lct, 0, size);
1163
1164 mf = (struct i2o_exec_lct_notify *)mb;
1165 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1166 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1167 mf->msgictx = IOP_ICTX;
1168 mf->msgtctx = im->im_tctx;
1169 mf->classid = I2O_CLASS_ANY;
1170 mf->changeindicator = chgind;
1171
1172 #ifdef I2ODEBUG
1173 printf("iop_lct_get0: reading LCT");
1174 if (chgind != 0)
1175 printf(" (async)");
1176 printf("\n");
1177 #endif
1178
1179 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1180 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1181 iop_msg_unmap(sc, im);
1182 iop_msg_free(sc, im);
1183 return (rv);
1184 }
1185
1186 /*
1187 * Read the IOP's logical configuration table.
1188 */
1189 int
1190 iop_lct_get(struct iop_softc *sc)
1191 {
1192 int esize, size, rv;
1193 struct i2o_lct *lct;
1194
1195 esize = le32toh(sc->sc_status.expectedlctsize);
1196 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1197 if (lct == NULL)
1198 return (ENOMEM);
1199
1200 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1201 free(lct, M_DEVBUF);
1202 return (rv);
1203 }
1204
1205 size = le16toh(lct->tablesize) << 2;
1206 if (esize != size) {
1207 free(lct, M_DEVBUF);
1208 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1209 if (lct == NULL)
1210 return (ENOMEM);
1211
1212 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1213 free(lct, M_DEVBUF);
1214 return (rv);
1215 }
1216 }
1217
1218 /* Swap in the new LCT. */
1219 if (sc->sc_lct != NULL)
1220 free(sc->sc_lct, M_DEVBUF);
1221 sc->sc_lct = lct;
1222 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1223 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1224 sizeof(struct i2o_lct_entry);
1225 return (0);
1226 }
1227
1228 /*
1229 * Post a SYS_ENABLE message to the adapter.
1230 */
1231 int
1232 iop_sys_enable(struct iop_softc *sc)
1233 {
1234 struct iop_msg *im;
1235 struct i2o_msg mf;
1236 int rv;
1237
1238 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1239
1240 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1241 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1242 mf.msgictx = IOP_ICTX;
1243 mf.msgtctx = im->im_tctx;
1244
1245 rv = iop_msg_post(sc, im, &mf, 30000);
1246 if (rv == 0) {
1247 if ((im->im_flags & IM_FAIL) != 0)
1248 rv = ENXIO;
1249 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1250 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1251 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1252 rv = 0;
1253 else
1254 rv = EIO;
1255 }
1256
1257 iop_msg_free(sc, im);
1258 return (rv);
1259 }
1260
1261 /*
1262 * Request the specified parameter group from the target. If an initiator
1263 * is specified (a) don't wait for the operation to complete, but instead
1264 * let the initiator's interrupt handler deal with the reply and (b) place a
1265 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1266 */
1267 int
1268 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1269 int size, struct iop_initiator *ii)
1270 {
1271 struct iop_msg *im;
1272 struct i2o_util_params_op *mf;
1273 int rv;
1274 struct iop_pgop *pgop;
1275 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1276
1277 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1278 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1279 iop_msg_free(sc, im);
1280 return (ENOMEM);
1281 }
1282 im->im_dvcontext = pgop;
1283
1284 mf = (struct i2o_util_params_op *)mb;
1285 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1286 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1287 mf->msgictx = IOP_ICTX;
1288 mf->msgtctx = im->im_tctx;
1289 mf->flags = 0;
1290
1291 pgop->olh.count = htole16(1);
1292 pgop->olh.reserved = htole16(0);
1293 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1294 pgop->oat.fieldcount = htole16(0xffff);
1295 pgop->oat.group = htole16(group);
1296
1297 if (ii == NULL)
1298 PHOLD(curlwp);
1299
1300 memset(buf, 0, size);
1301 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1302 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1303 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1304
1305 if (ii == NULL)
1306 PRELE(curlwp);
1307
1308 /* Detect errors; let partial transfers to count as success. */
1309 if (ii == NULL && rv == 0) {
1310 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1311 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1312 rv = 0;
1313 else
1314 rv = (im->im_reqstatus != 0 ? EIO : 0);
1315
1316 if (rv != 0)
1317 printf("%s: FIELD_GET failed for tid %d group %d\n",
1318 sc->sc_dv.dv_xname, tid, group);
1319 }
1320
1321 if (ii == NULL || rv != 0) {
1322 iop_msg_unmap(sc, im);
1323 iop_msg_free(sc, im);
1324 free(pgop, M_DEVBUF);
1325 }
1326
1327 return (rv);
1328 }
1329
1330 /*
1331 * Set a single field in a scalar parameter group.
1332 */
1333 int
1334 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1335 int size, int field)
1336 {
1337 struct iop_msg *im;
1338 struct i2o_util_params_op *mf;
1339 struct iop_pgop *pgop;
1340 int rv, totsize;
1341 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1342
1343 totsize = sizeof(*pgop) + size;
1344
1345 im = iop_msg_alloc(sc, IM_WAIT);
1346 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1347 iop_msg_free(sc, im);
1348 return (ENOMEM);
1349 }
1350
1351 mf = (struct i2o_util_params_op *)mb;
1352 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1353 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1354 mf->msgictx = IOP_ICTX;
1355 mf->msgtctx = im->im_tctx;
1356 mf->flags = 0;
1357
1358 pgop->olh.count = htole16(1);
1359 pgop->olh.reserved = htole16(0);
1360 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1361 pgop->oat.fieldcount = htole16(1);
1362 pgop->oat.group = htole16(group);
1363 pgop->oat.fields[0] = htole16(field);
1364 memcpy(pgop + 1, buf, size);
1365
1366 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1367 rv = iop_msg_post(sc, im, mb, 30000);
1368 if (rv != 0)
1369 printf("%s: FIELD_SET failed for tid %d group %d\n",
1370 sc->sc_dv.dv_xname, tid, group);
1371
1372 iop_msg_unmap(sc, im);
1373 iop_msg_free(sc, im);
1374 free(pgop, M_DEVBUF);
1375 return (rv);
1376 }
1377
1378 /*
1379 * Delete all rows in a tablular parameter group.
1380 */
1381 int
1382 iop_table_clear(struct iop_softc *sc, int tid, int group)
1383 {
1384 struct iop_msg *im;
1385 struct i2o_util_params_op *mf;
1386 struct iop_pgop pgop;
1387 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1388 int rv;
1389
1390 im = iop_msg_alloc(sc, IM_WAIT);
1391
1392 mf = (struct i2o_util_params_op *)mb;
1393 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1394 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1395 mf->msgictx = IOP_ICTX;
1396 mf->msgtctx = im->im_tctx;
1397 mf->flags = 0;
1398
1399 pgop.olh.count = htole16(1);
1400 pgop.olh.reserved = htole16(0);
1401 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1402 pgop.oat.fieldcount = htole16(0);
1403 pgop.oat.group = htole16(group);
1404 pgop.oat.fields[0] = htole16(0);
1405
1406 PHOLD(curlwp);
1407 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1408 rv = iop_msg_post(sc, im, mb, 30000);
1409 if (rv != 0)
1410 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1411 sc->sc_dv.dv_xname, tid, group);
1412
1413 iop_msg_unmap(sc, im);
1414 PRELE(curlwp);
1415 iop_msg_free(sc, im);
1416 return (rv);
1417 }
1418
1419 /*
1420 * Add a single row to a tabular parameter group. The row can have only one
1421 * field.
1422 */
1423 int
1424 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1425 int size, int row)
1426 {
1427 struct iop_msg *im;
1428 struct i2o_util_params_op *mf;
1429 struct iop_pgop *pgop;
1430 int rv, totsize;
1431 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1432
1433 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1434
1435 im = iop_msg_alloc(sc, IM_WAIT);
1436 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1437 iop_msg_free(sc, im);
1438 return (ENOMEM);
1439 }
1440
1441 mf = (struct i2o_util_params_op *)mb;
1442 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1443 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1444 mf->msgictx = IOP_ICTX;
1445 mf->msgtctx = im->im_tctx;
1446 mf->flags = 0;
1447
1448 pgop->olh.count = htole16(1);
1449 pgop->olh.reserved = htole16(0);
1450 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1451 pgop->oat.fieldcount = htole16(1);
1452 pgop->oat.group = htole16(group);
1453 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1454 pgop->oat.fields[1] = htole16(1); /* RowCount */
1455 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1456 memcpy(&pgop->oat.fields[3], buf, size);
1457
1458 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1459 rv = iop_msg_post(sc, im, mb, 30000);
1460 if (rv != 0)
1461 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1462 sc->sc_dv.dv_xname, tid, group, row);
1463
1464 iop_msg_unmap(sc, im);
1465 iop_msg_free(sc, im);
1466 free(pgop, M_DEVBUF);
1467 return (rv);
1468 }
1469
1470 /*
1471 * Execute a simple command (no parameters).
1472 */
1473 int
1474 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1475 int async, int timo)
1476 {
1477 struct iop_msg *im;
1478 struct i2o_msg mf;
1479 int rv, fl;
1480
1481 fl = (async != 0 ? IM_WAIT : IM_POLL);
1482 im = iop_msg_alloc(sc, fl);
1483
1484 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1485 mf.msgfunc = I2O_MSGFUNC(tid, function);
1486 mf.msgictx = ictx;
1487 mf.msgtctx = im->im_tctx;
1488
1489 rv = iop_msg_post(sc, im, &mf, timo);
1490 iop_msg_free(sc, im);
1491 return (rv);
1492 }
1493
1494 /*
1495 * Post the system table to the IOP.
1496 */
1497 static int
1498 iop_systab_set(struct iop_softc *sc)
1499 {
1500 struct i2o_exec_sys_tab_set *mf;
1501 struct iop_msg *im;
1502 bus_space_handle_t bsh;
1503 bus_addr_t boo;
1504 u_int32_t mema[2], ioa[2];
1505 int rv;
1506 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1507
1508 im = iop_msg_alloc(sc, IM_WAIT);
1509
1510 mf = (struct i2o_exec_sys_tab_set *)mb;
1511 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1512 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1513 mf->msgictx = IOP_ICTX;
1514 mf->msgtctx = im->im_tctx;
1515 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1516 mf->segnumber = 0;
1517
1518 mema[1] = sc->sc_status.desiredprivmemsize;
1519 ioa[1] = sc->sc_status.desiredpriviosize;
1520
1521 if (mema[1] != 0) {
1522 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1523 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1524 mema[0] = htole32(boo);
1525 if (rv != 0) {
1526 printf("%s: can't alloc priv mem space, err = %d\n",
1527 sc->sc_dv.dv_xname, rv);
1528 mema[0] = 0;
1529 mema[1] = 0;
1530 }
1531 }
1532
1533 if (ioa[1] != 0) {
1534 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1535 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1536 ioa[0] = htole32(boo);
1537 if (rv != 0) {
1538 printf("%s: can't alloc priv i/o space, err = %d\n",
1539 sc->sc_dv.dv_xname, rv);
1540 ioa[0] = 0;
1541 ioa[1] = 0;
1542 }
1543 }
1544
1545 PHOLD(curlwp);
1546 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1547 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1548 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1549 rv = iop_msg_post(sc, im, mb, 5000);
1550 iop_msg_unmap(sc, im);
1551 iop_msg_free(sc, im);
1552 PRELE(curlwp);
1553 return (rv);
1554 }
1555
1556 /*
1557 * Reset the IOP. Must be called with interrupts disabled.
1558 */
1559 static int
1560 iop_reset(struct iop_softc *sc)
1561 {
1562 u_int32_t mfa, *sw;
1563 struct i2o_exec_iop_reset mf;
1564 int rv;
1565 paddr_t pa;
1566
1567 sw = (u_int32_t *)sc->sc_scr;
1568 pa = sc->sc_scr_seg->ds_addr;
1569
1570 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1571 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1572 mf.reserved[0] = 0;
1573 mf.reserved[1] = 0;
1574 mf.reserved[2] = 0;
1575 mf.reserved[3] = 0;
1576 mf.statuslow = (u_int32_t)pa;
1577 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1578
1579 *sw = htole32(0);
1580 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1581 BUS_DMASYNC_PREREAD);
1582
1583 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1584 return (rv);
1585
1586 POLL(2500,
1587 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1588 BUS_DMASYNC_POSTREAD), *sw != 0));
1589 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1590 printf("%s: reset rejected, status 0x%x\n",
1591 sc->sc_dv.dv_xname, le32toh(*sw));
1592 return (EIO);
1593 }
1594
1595 /*
1596 * IOP is now in the INIT state. Wait no more than 10 seconds for
1597 * the inbound queue to become responsive.
1598 */
1599 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1600 if (mfa == IOP_MFA_EMPTY) {
1601 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1602 return (EIO);
1603 }
1604
1605 iop_release_mfa(sc, mfa);
1606 return (0);
1607 }
1608
1609 /*
1610 * Register a new initiator. Must be called with the configuration lock
1611 * held.
1612 */
1613 void
1614 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1615 {
1616 static int ictxgen;
1617 int s;
1618
1619 /* 0 is reserved (by us) for system messages. */
1620 ii->ii_ictx = ++ictxgen;
1621
1622 /*
1623 * `Utility initiators' don't make it onto the per-IOP initiator list
1624 * (which is used only for configuration), but do get one slot on
1625 * the inbound queue.
1626 */
1627 if ((ii->ii_flags & II_UTILITY) == 0) {
1628 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1629 sc->sc_nii++;
1630 } else
1631 sc->sc_nuii++;
1632
1633 s = splbio();
1634 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1635 splx(s);
1636 }
1637
1638 /*
1639 * Unregister an initiator. Must be called with the configuration lock
1640 * held.
1641 */
1642 void
1643 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1644 {
1645 int s;
1646
1647 if ((ii->ii_flags & II_UTILITY) == 0) {
1648 LIST_REMOVE(ii, ii_list);
1649 sc->sc_nii--;
1650 } else
1651 sc->sc_nuii--;
1652
1653 s = splbio();
1654 LIST_REMOVE(ii, ii_hash);
1655 splx(s);
1656 }
1657
1658 /*
1659 * Handle a reply frame from the IOP.
1660 */
1661 static int
1662 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1663 {
1664 struct iop_msg *im;
1665 struct i2o_reply *rb;
1666 struct i2o_fault_notify *fn;
1667 struct iop_initiator *ii;
1668 u_int off, ictx, tctx, status, size;
1669
1670 off = (int)(rmfa - sc->sc_rep_phys);
1671 rb = (struct i2o_reply *)(sc->sc_rep + off);
1672
1673 /* Perform reply queue DMA synchronisation. */
1674 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1675 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1676 if (--sc->sc_curib != 0)
1677 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1678 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1679
1680 #ifdef I2ODEBUG
1681 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1682 panic("iop_handle_reply: 64-bit reply");
1683 #endif
1684 /*
1685 * Find the initiator.
1686 */
1687 ictx = le32toh(rb->msgictx);
1688 if (ictx == IOP_ICTX)
1689 ii = NULL;
1690 else {
1691 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1692 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1693 if (ii->ii_ictx == ictx)
1694 break;
1695 if (ii == NULL) {
1696 #ifdef I2ODEBUG
1697 iop_reply_print(sc, rb);
1698 #endif
1699 printf("%s: WARNING: bad ictx returned (%x)\n",
1700 sc->sc_dv.dv_xname, ictx);
1701 return (-1);
1702 }
1703 }
1704
1705 /*
1706 * If we received a transport failure notice, we've got to dig the
1707 * transaction context (if any) out of the original message frame,
1708 * and then release the original MFA back to the inbound FIFO.
1709 */
1710 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1711 status = I2O_STATUS_SUCCESS;
1712
1713 fn = (struct i2o_fault_notify *)rb;
1714 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1715 iop_release_mfa(sc, fn->lowmfa);
1716 iop_tfn_print(sc, fn);
1717 } else {
1718 status = rb->reqstatus;
1719 tctx = le32toh(rb->msgtctx);
1720 }
1721
1722 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1723 /*
1724 * This initiator tracks state using message wrappers.
1725 *
1726 * Find the originating message wrapper, and if requested
1727 * notify the initiator.
1728 */
1729 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1730 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1731 (im->im_flags & IM_ALLOCED) == 0 ||
1732 tctx != im->im_tctx) {
1733 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1734 sc->sc_dv.dv_xname, tctx, im);
1735 if (im != NULL)
1736 printf("%s: flags=0x%08x tctx=0x%08x\n",
1737 sc->sc_dv.dv_xname, im->im_flags,
1738 im->im_tctx);
1739 #ifdef I2ODEBUG
1740 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1741 iop_reply_print(sc, rb);
1742 #endif
1743 return (-1);
1744 }
1745
1746 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1747 im->im_flags |= IM_FAIL;
1748
1749 #ifdef I2ODEBUG
1750 if ((im->im_flags & IM_REPLIED) != 0)
1751 panic("%s: dup reply", sc->sc_dv.dv_xname);
1752 #endif
1753 im->im_flags |= IM_REPLIED;
1754
1755 #ifdef I2ODEBUG
1756 if (status != I2O_STATUS_SUCCESS)
1757 iop_reply_print(sc, rb);
1758 #endif
1759 im->im_reqstatus = status;
1760 im->im_detstatus = le16toh(rb->detail);
1761
1762 /* Copy the reply frame, if requested. */
1763 if (im->im_rb != NULL) {
1764 size = (le32toh(rb->msgflags) >> 14) & ~3;
1765 #ifdef I2ODEBUG
1766 if (size > sc->sc_framesize)
1767 panic("iop_handle_reply: reply too large");
1768 #endif
1769 memcpy(im->im_rb, rb, size);
1770 }
1771
1772 /* Notify the initiator. */
1773 if ((im->im_flags & IM_WAIT) != 0)
1774 wakeup(im);
1775 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1776 (*ii->ii_intr)(ii->ii_dv, im, rb);
1777 } else {
1778 /*
1779 * This initiator discards message wrappers.
1780 *
1781 * Simply pass the reply frame to the initiator.
1782 */
1783 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1784 }
1785
1786 return (status);
1787 }
1788
1789 /*
1790 * Handle an interrupt from the IOP.
1791 */
1792 int
1793 iop_intr(void *arg)
1794 {
1795 struct iop_softc *sc;
1796 u_int32_t rmfa;
1797
1798 sc = arg;
1799
1800 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1801 return (0);
1802
1803 for (;;) {
1804 /* Double read to account for IOP bug. */
1805 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1806 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1807 if (rmfa == IOP_MFA_EMPTY)
1808 break;
1809 }
1810 iop_handle_reply(sc, rmfa);
1811 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1812 }
1813
1814 return (1);
1815 }
1816
1817 /*
1818 * Handle an event signalled by the executive.
1819 */
1820 static void
1821 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1822 {
1823 struct i2o_util_event_register_reply *rb;
1824 u_int event;
1825
1826 rb = reply;
1827
1828 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1829 return;
1830
1831 event = le32toh(rb->event);
1832 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1833 }
1834
1835 /*
1836 * Allocate a message wrapper.
1837 */
1838 struct iop_msg *
1839 iop_msg_alloc(struct iop_softc *sc, int flags)
1840 {
1841 struct iop_msg *im;
1842 static u_int tctxgen;
1843 int s, i;
1844
1845 #ifdef I2ODEBUG
1846 if ((flags & IM_SYSMASK) != 0)
1847 panic("iop_msg_alloc: system flags specified");
1848 #endif
1849
1850 s = splbio();
1851 im = SLIST_FIRST(&sc->sc_im_freelist);
1852 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1853 if (im == NULL)
1854 panic("iop_msg_alloc: no free wrappers");
1855 #endif
1856 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1857 splx(s);
1858
1859 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1860 tctxgen += (1 << IOP_TCTX_SHIFT);
1861 im->im_flags = flags | IM_ALLOCED;
1862 im->im_rb = NULL;
1863 i = 0;
1864 do {
1865 im->im_xfer[i++].ix_size = 0;
1866 } while (i < IOP_MAX_MSG_XFERS);
1867
1868 return (im);
1869 }
1870
1871 /*
1872 * Free a message wrapper.
1873 */
1874 void
1875 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1876 {
1877 int s;
1878
1879 #ifdef I2ODEBUG
1880 if ((im->im_flags & IM_ALLOCED) == 0)
1881 panic("iop_msg_free: wrapper not allocated");
1882 #endif
1883
1884 im->im_flags = 0;
1885 s = splbio();
1886 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1887 splx(s);
1888 }
1889
1890 /*
1891 * Map a data transfer. Write a scatter-gather list into the message frame.
1892 */
1893 int
1894 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1895 void *xferaddr, int xfersize, int out, struct proc *up)
1896 {
1897 bus_dmamap_t dm;
1898 bus_dma_segment_t *ds;
1899 struct iop_xfer *ix;
1900 u_int rv, i, nsegs, flg, off, xn;
1901 u_int32_t *p;
1902
1903 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1904 if (ix->ix_size == 0)
1905 break;
1906
1907 #ifdef I2ODEBUG
1908 if (xfersize == 0)
1909 panic("iop_msg_map: null transfer");
1910 if (xfersize > IOP_MAX_XFER)
1911 panic("iop_msg_map: transfer too large");
1912 if (xn == IOP_MAX_MSG_XFERS)
1913 panic("iop_msg_map: too many xfers");
1914 #endif
1915
1916 /*
1917 * Only the first DMA map is static.
1918 */
1919 if (xn != 0) {
1920 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1921 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1922 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1923 if (rv != 0)
1924 return (rv);
1925 }
1926
1927 dm = ix->ix_map;
1928 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1929 (up == NULL ? BUS_DMA_NOWAIT : 0));
1930 if (rv != 0)
1931 goto bad;
1932
1933 /*
1934 * How many SIMPLE SG elements can we fit in this message?
1935 */
1936 off = mb[0] >> 16;
1937 p = mb + off;
1938 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1939
1940 if (dm->dm_nsegs > nsegs) {
1941 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1942 rv = EFBIG;
1943 DPRINTF(("iop_msg_map: too many segs\n"));
1944 goto bad;
1945 }
1946
1947 nsegs = dm->dm_nsegs;
1948 xfersize = 0;
1949
1950 /*
1951 * Write out the SG list.
1952 */
1953 if (out)
1954 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1955 else
1956 flg = I2O_SGL_SIMPLE;
1957
1958 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1959 p[0] = (u_int32_t)ds->ds_len | flg;
1960 p[1] = (u_int32_t)ds->ds_addr;
1961 xfersize += ds->ds_len;
1962 }
1963
1964 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1965 p[1] = (u_int32_t)ds->ds_addr;
1966 xfersize += ds->ds_len;
1967
1968 /* Fix up the transfer record, and sync the map. */
1969 ix->ix_flags = (out ? IX_OUT : IX_IN);
1970 ix->ix_size = xfersize;
1971 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1972 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1973
1974 /*
1975 * If this is the first xfer we've mapped for this message, adjust
1976 * the SGL offset field in the message header.
1977 */
1978 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1979 mb[0] += (mb[0] >> 12) & 0xf0;
1980 im->im_flags |= IM_SGLOFFADJ;
1981 }
1982 mb[0] += (nsegs << 17);
1983 return (0);
1984
1985 bad:
1986 if (xn != 0)
1987 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1988 return (rv);
1989 }
1990
1991 /*
1992 * Map a block I/O data transfer (different in that there's only one per
1993 * message maximum, and PAGE addressing may be used). Write a scatter
1994 * gather list into the message frame.
1995 */
1996 int
1997 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1998 void *xferaddr, int xfersize, int out)
1999 {
2000 bus_dma_segment_t *ds;
2001 bus_dmamap_t dm;
2002 struct iop_xfer *ix;
2003 u_int rv, i, nsegs, off, slen, tlen, flg;
2004 paddr_t saddr, eaddr;
2005 u_int32_t *p;
2006
2007 #ifdef I2ODEBUG
2008 if (xfersize == 0)
2009 panic("iop_msg_map_bio: null transfer");
2010 if (xfersize > IOP_MAX_XFER)
2011 panic("iop_msg_map_bio: transfer too large");
2012 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2013 panic("iop_msg_map_bio: SGLOFFADJ");
2014 #endif
2015
2016 ix = im->im_xfer;
2017 dm = ix->ix_map;
2018 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2019 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2020 if (rv != 0)
2021 return (rv);
2022
2023 off = mb[0] >> 16;
2024 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2025
2026 /*
2027 * If the transfer is highly fragmented and won't fit using SIMPLE
2028 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2029 * potentially more efficient, both for us and the IOP.
2030 */
2031 if (dm->dm_nsegs > nsegs) {
2032 nsegs = 1;
2033 p = mb + off + 1;
2034
2035 /* XXX This should be done with a bus_space flag. */
2036 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2037 slen = ds->ds_len;
2038 saddr = ds->ds_addr;
2039
2040 while (slen > 0) {
2041 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2042 tlen = min(eaddr - saddr, slen);
2043 slen -= tlen;
2044 *p++ = le32toh(saddr);
2045 saddr = eaddr;
2046 nsegs++;
2047 }
2048 }
2049
2050 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2051 I2O_SGL_END;
2052 if (out)
2053 mb[off] |= I2O_SGL_DATA_OUT;
2054 } else {
2055 p = mb + off;
2056 nsegs = dm->dm_nsegs;
2057
2058 if (out)
2059 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2060 else
2061 flg = I2O_SGL_SIMPLE;
2062
2063 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2064 p[0] = (u_int32_t)ds->ds_len | flg;
2065 p[1] = (u_int32_t)ds->ds_addr;
2066 }
2067
2068 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2069 I2O_SGL_END;
2070 p[1] = (u_int32_t)ds->ds_addr;
2071 nsegs <<= 1;
2072 }
2073
2074 /* Fix up the transfer record, and sync the map. */
2075 ix->ix_flags = (out ? IX_OUT : IX_IN);
2076 ix->ix_size = xfersize;
2077 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2078 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2079
2080 /*
2081 * Adjust the SGL offset and total message size fields. We don't
2082 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2083 */
2084 mb[0] += ((off << 4) + (nsegs << 16));
2085 return (0);
2086 }
2087
2088 /*
2089 * Unmap all data transfers associated with a message wrapper.
2090 */
2091 void
2092 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2093 {
2094 struct iop_xfer *ix;
2095 int i;
2096
2097 #ifdef I2ODEBUG
2098 if (im->im_xfer[0].ix_size == 0)
2099 panic("iop_msg_unmap: no transfers mapped");
2100 #endif
2101
2102 for (ix = im->im_xfer, i = 0;;) {
2103 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2104 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2105 BUS_DMASYNC_POSTREAD);
2106 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2107
2108 /* Only the first DMA map is static. */
2109 if (i != 0)
2110 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2111 if ((++ix)->ix_size == 0)
2112 break;
2113 if (++i >= IOP_MAX_MSG_XFERS)
2114 break;
2115 }
2116 }
2117
2118 /*
2119 * Post a message frame to the IOP's inbound queue.
2120 */
2121 int
2122 iop_post(struct iop_softc *sc, u_int32_t *mb)
2123 {
2124 u_int32_t mfa;
2125 int s;
2126
2127 #ifdef I2ODEBUG
2128 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2129 panic("iop_post: frame too large");
2130 #endif
2131
2132 s = splbio();
2133
2134 /* Allocate a slot with the IOP. */
2135 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2136 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2137 splx(s);
2138 printf("%s: mfa not forthcoming\n",
2139 sc->sc_dv.dv_xname);
2140 return (EAGAIN);
2141 }
2142
2143 /* Perform reply buffer DMA synchronisation. */
2144 if (sc->sc_curib++ == 0)
2145 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2146 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2147
2148 /* Copy out the message frame. */
2149 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2150 mb[0] >> 16);
2151 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2152 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2153
2154 /* Post the MFA back to the IOP. */
2155 iop_outl(sc, IOP_REG_IFIFO, mfa);
2156
2157 splx(s);
2158 return (0);
2159 }
2160
2161 /*
2162 * Post a message to the IOP and deal with completion.
2163 */
2164 int
2165 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2166 {
2167 u_int32_t *mb;
2168 int rv, s;
2169
2170 mb = xmb;
2171
2172 /* Terminate the scatter/gather list chain. */
2173 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2174 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2175
2176 if ((rv = iop_post(sc, mb)) != 0)
2177 return (rv);
2178
2179 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2180 if ((im->im_flags & IM_POLL) != 0)
2181 iop_msg_poll(sc, im, timo);
2182 else
2183 iop_msg_wait(sc, im, timo);
2184
2185 s = splbio();
2186 if ((im->im_flags & IM_REPLIED) != 0) {
2187 if ((im->im_flags & IM_NOSTATUS) != 0)
2188 rv = 0;
2189 else if ((im->im_flags & IM_FAIL) != 0)
2190 rv = ENXIO;
2191 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2192 rv = EIO;
2193 else
2194 rv = 0;
2195 } else
2196 rv = EBUSY;
2197 splx(s);
2198 } else
2199 rv = 0;
2200
2201 return (rv);
2202 }
2203
2204 /*
2205 * Spin until the specified message is replied to.
2206 */
2207 static void
2208 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2209 {
2210 u_int32_t rmfa;
2211 int s;
2212
2213 s = splbio();
2214
2215 /* Wait for completion. */
2216 for (timo *= 10; timo != 0; timo--) {
2217 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2218 /* Double read to account for IOP bug. */
2219 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2220 if (rmfa == IOP_MFA_EMPTY)
2221 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2222 if (rmfa != IOP_MFA_EMPTY) {
2223 iop_handle_reply(sc, rmfa);
2224
2225 /*
2226 * Return the reply frame to the IOP's
2227 * outbound FIFO.
2228 */
2229 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2230 }
2231 }
2232 if ((im->im_flags & IM_REPLIED) != 0)
2233 break;
2234 DELAY(100);
2235 }
2236
2237 if (timo == 0) {
2238 #ifdef I2ODEBUG
2239 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2240 if (iop_status_get(sc, 1) != 0)
2241 printf("iop_msg_poll: unable to retrieve status\n");
2242 else
2243 printf("iop_msg_poll: IOP state = %d\n",
2244 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2245 #endif
2246 }
2247
2248 splx(s);
2249 }
2250
2251 /*
2252 * Sleep until the specified message is replied to.
2253 */
2254 static void
2255 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2256 {
2257 int s, rv;
2258
2259 s = splbio();
2260 if ((im->im_flags & IM_REPLIED) != 0) {
2261 splx(s);
2262 return;
2263 }
2264 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2265 splx(s);
2266
2267 #ifdef I2ODEBUG
2268 if (rv != 0) {
2269 printf("iop_msg_wait: tsleep() == %d\n", rv);
2270 if (iop_status_get(sc, 0) != 0)
2271 printf("iop_msg_wait: unable to retrieve status\n");
2272 else
2273 printf("iop_msg_wait: IOP state = %d\n",
2274 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2275 }
2276 #endif
2277 }
2278
2279 /*
2280 * Release an unused message frame back to the IOP's inbound fifo.
2281 */
2282 static void
2283 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2284 {
2285
2286 /* Use the frame to issue a no-op. */
2287 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2288 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2289 iop_outl_msg(sc, mfa + 8, 0);
2290 iop_outl_msg(sc, mfa + 12, 0);
2291
2292 iop_outl(sc, IOP_REG_IFIFO, mfa);
2293 }
2294
2295 #ifdef I2ODEBUG
2296 /*
2297 * Dump a reply frame header.
2298 */
2299 static void
2300 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2301 {
2302 u_int function, detail;
2303 #ifdef I2OVERBOSE
2304 const char *statusstr;
2305 #endif
2306
2307 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2308 detail = le16toh(rb->detail);
2309
2310 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2311
2312 #ifdef I2OVERBOSE
2313 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2314 statusstr = iop_status[rb->reqstatus];
2315 else
2316 statusstr = "undefined error code";
2317
2318 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2319 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2320 #else
2321 printf("%s: function=0x%02x status=0x%02x\n",
2322 sc->sc_dv.dv_xname, function, rb->reqstatus);
2323 #endif
2324 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2325 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2326 le32toh(rb->msgtctx));
2327 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2328 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2329 (le32toh(rb->msgflags) >> 8) & 0xff);
2330 }
2331 #endif
2332
2333 /*
2334 * Dump a transport failure reply.
2335 */
2336 static void
2337 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2338 {
2339
2340 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2341
2342 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2343 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2344 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2345 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2346 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2347 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2348 }
2349
2350 /*
2351 * Translate an I2O ASCII field into a C string.
2352 */
2353 void
2354 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2355 {
2356 int hc, lc, i, nit;
2357
2358 dlen--;
2359 lc = 0;
2360 hc = 0;
2361 i = 0;
2362
2363 /*
2364 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2365 * spec has nothing to say about it. Since AMI fields are usually
2366 * filled with junk after the terminator, ...
2367 */
2368 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2369
2370 while (slen-- != 0 && dlen-- != 0) {
2371 if (nit && *src == '\0')
2372 break;
2373 else if (*src <= 0x20 || *src >= 0x7f) {
2374 if (hc)
2375 dst[i++] = ' ';
2376 } else {
2377 hc = 1;
2378 dst[i++] = *src;
2379 lc = i;
2380 }
2381 src++;
2382 }
2383
2384 dst[lc] = '\0';
2385 }
2386
2387 /*
2388 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2389 */
2390 int
2391 iop_print_ident(struct iop_softc *sc, int tid)
2392 {
2393 struct {
2394 struct i2o_param_op_results pr;
2395 struct i2o_param_read_results prr;
2396 struct i2o_param_device_identity di;
2397 } __attribute__ ((__packed__)) p;
2398 char buf[32];
2399 int rv;
2400
2401 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2402 sizeof(p), NULL);
2403 if (rv != 0)
2404 return (rv);
2405
2406 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2407 sizeof(buf));
2408 printf(" <%s, ", buf);
2409 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2410 sizeof(buf));
2411 printf("%s, ", buf);
2412 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2413 printf("%s>", buf);
2414
2415 return (0);
2416 }
2417
2418 /*
2419 * Claim or unclaim the specified TID.
2420 */
2421 int
2422 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2423 int flags)
2424 {
2425 struct iop_msg *im;
2426 struct i2o_util_claim mf;
2427 int rv, func;
2428
2429 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2430 im = iop_msg_alloc(sc, IM_WAIT);
2431
2432 /* We can use the same structure, as they're identical. */
2433 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2434 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2435 mf.msgictx = ii->ii_ictx;
2436 mf.msgtctx = im->im_tctx;
2437 mf.flags = flags;
2438
2439 rv = iop_msg_post(sc, im, &mf, 5000);
2440 iop_msg_free(sc, im);
2441 return (rv);
2442 }
2443
2444 /*
2445 * Perform an abort.
2446 */
2447 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2448 int tctxabort, int flags)
2449 {
2450 struct iop_msg *im;
2451 struct i2o_util_abort mf;
2452 int rv;
2453
2454 im = iop_msg_alloc(sc, IM_WAIT);
2455
2456 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2457 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2458 mf.msgictx = ii->ii_ictx;
2459 mf.msgtctx = im->im_tctx;
2460 mf.flags = (func << 24) | flags;
2461 mf.tctxabort = tctxabort;
2462
2463 rv = iop_msg_post(sc, im, &mf, 5000);
2464 iop_msg_free(sc, im);
2465 return (rv);
2466 }
2467
2468 /*
2469 * Enable or disable reception of events for the specified device.
2470 */
2471 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2472 {
2473 struct i2o_util_event_register mf;
2474
2475 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2476 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2477 mf.msgictx = ii->ii_ictx;
2478 mf.msgtctx = 0;
2479 mf.eventmask = mask;
2480
2481 /* This message is replied to only when events are signalled. */
2482 return (iop_post(sc, (u_int32_t *)&mf));
2483 }
2484
2485 int
2486 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2487 {
2488 struct iop_softc *sc;
2489
2490 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2491 return (ENXIO);
2492 if ((sc->sc_flags & IOP_ONLINE) == 0)
2493 return (ENXIO);
2494 if ((sc->sc_flags & IOP_OPEN) != 0)
2495 return (EBUSY);
2496 sc->sc_flags |= IOP_OPEN;
2497
2498 return (0);
2499 }
2500
2501 int
2502 iopclose(dev_t dev, int flag, int mode, struct lwp *l)
2503 {
2504 struct iop_softc *sc;
2505
2506 sc = device_lookup(&iop_cd, minor(dev));
2507 sc->sc_flags &= ~IOP_OPEN;
2508
2509 return (0);
2510 }
2511
2512 int
2513 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
2514 {
2515 struct iop_softc *sc;
2516 struct iovec *iov;
2517 int rv, i;
2518
2519 if (securelevel >= 2)
2520 return (EPERM);
2521
2522 sc = device_lookup(&iop_cd, minor(dev));
2523
2524 switch (cmd) {
2525 case IOPIOCPT:
2526 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2527
2528 case IOPIOCGSTATUS:
2529 iov = (struct iovec *)data;
2530 i = sizeof(struct i2o_status);
2531 if (i > iov->iov_len)
2532 i = iov->iov_len;
2533 else
2534 iov->iov_len = i;
2535 if ((rv = iop_status_get(sc, 0)) == 0)
2536 rv = copyout(&sc->sc_status, iov->iov_base, i);
2537 return (rv);
2538
2539 case IOPIOCGLCT:
2540 case IOPIOCGTIDMAP:
2541 case IOPIOCRECONFIG:
2542 break;
2543
2544 default:
2545 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2546 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2547 #endif
2548 return (ENOTTY);
2549 }
2550
2551 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2552 return (rv);
2553
2554 switch (cmd) {
2555 case IOPIOCGLCT:
2556 iov = (struct iovec *)data;
2557 i = le16toh(sc->sc_lct->tablesize) << 2;
2558 if (i > iov->iov_len)
2559 i = iov->iov_len;
2560 else
2561 iov->iov_len = i;
2562 rv = copyout(sc->sc_lct, iov->iov_base, i);
2563 break;
2564
2565 case IOPIOCRECONFIG:
2566 if ((rv = lockmgr(&sc->sc_conflock, LK_UPGRADE, NULL)) == 0)
2567 rv = iop_reconfigure(sc, 0);
2568 break;
2569
2570 case IOPIOCGTIDMAP:
2571 iov = (struct iovec *)data;
2572 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2573 if (i > iov->iov_len)
2574 i = iov->iov_len;
2575 else
2576 iov->iov_len = i;
2577 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2578 break;
2579 }
2580
2581 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2582 return (rv);
2583 }
2584
2585 static int
2586 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2587 {
2588 struct iop_msg *im;
2589 struct i2o_msg *mf;
2590 struct ioppt_buf *ptb;
2591 int rv, i, mapped;
2592
2593 mf = NULL;
2594 im = NULL;
2595 mapped = 1;
2596
2597 if (pt->pt_msglen > sc->sc_framesize ||
2598 pt->pt_msglen < sizeof(struct i2o_msg) ||
2599 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2600 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2601 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2602 return (EINVAL);
2603
2604 for (i = 0; i < pt->pt_nbufs; i++)
2605 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2606 rv = ENOMEM;
2607 goto bad;
2608 }
2609
2610 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2611 if (mf == NULL)
2612 return (ENOMEM);
2613
2614 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2615 goto bad;
2616
2617 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2618 im->im_rb = (struct i2o_reply *)mf;
2619 mf->msgictx = IOP_ICTX;
2620 mf->msgtctx = im->im_tctx;
2621
2622 for (i = 0; i < pt->pt_nbufs; i++) {
2623 ptb = &pt->pt_bufs[i];
2624 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2625 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2626 if (rv != 0)
2627 goto bad;
2628 mapped = 1;
2629 }
2630
2631 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2632 goto bad;
2633
2634 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2635 if (i > sc->sc_framesize)
2636 i = sc->sc_framesize;
2637 if (i > pt->pt_replylen)
2638 i = pt->pt_replylen;
2639 rv = copyout(im->im_rb, pt->pt_reply, i);
2640
2641 bad:
2642 if (mapped != 0)
2643 iop_msg_unmap(sc, im);
2644 if (im != NULL)
2645 iop_msg_free(sc, im);
2646 if (mf != NULL)
2647 free(mf, M_DEVBUF);
2648 return (rv);
2649 }
2650