iop.c revision 1.27 1 /* $NetBSD: iop.c,v 1.27 2002/10/22 13:42:33 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.27 2002/10/22 13:42:33 ad Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 dev_type_open(iopopen);
111 dev_type_close(iopclose);
112 dev_type_ioctl(iopioctl);
113
114 const struct cdevsw iop_cdevsw = {
115 iopopen, iopclose, noread, nowrite, iopioctl,
116 nostop, notty, nopoll, nommap,
117 };
118
119 #define IC_CONFIGURE 0x01
120 #define IC_PRIORITY 0x02
121
122 struct iop_class {
123 u_short ic_class;
124 u_short ic_flags;
125 #ifdef I2OVERBOSE
126 const char *ic_caption;
127 #endif
128 } static const iop_class[] = {
129 {
130 I2O_CLASS_EXECUTIVE,
131 0,
132 COMMENT("executive")
133 },
134 {
135 I2O_CLASS_DDM,
136 0,
137 COMMENT("device driver module")
138 },
139 {
140 I2O_CLASS_RANDOM_BLOCK_STORAGE,
141 IC_CONFIGURE | IC_PRIORITY,
142 IFVERBOSE("random block storage")
143 },
144 {
145 I2O_CLASS_SEQUENTIAL_STORAGE,
146 IC_CONFIGURE | IC_PRIORITY,
147 IFVERBOSE("sequential storage")
148 },
149 {
150 I2O_CLASS_LAN,
151 IC_CONFIGURE | IC_PRIORITY,
152 IFVERBOSE("LAN port")
153 },
154 {
155 I2O_CLASS_WAN,
156 IC_CONFIGURE | IC_PRIORITY,
157 IFVERBOSE("WAN port")
158 },
159 {
160 I2O_CLASS_FIBRE_CHANNEL_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("fibrechannel port")
163 },
164 {
165 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
166 0,
167 COMMENT("fibrechannel peripheral")
168 },
169 {
170 I2O_CLASS_SCSI_PERIPHERAL,
171 0,
172 COMMENT("SCSI peripheral")
173 },
174 {
175 I2O_CLASS_ATE_PORT,
176 IC_CONFIGURE,
177 IFVERBOSE("ATE port")
178 },
179 {
180 I2O_CLASS_ATE_PERIPHERAL,
181 0,
182 COMMENT("ATE peripheral")
183 },
184 {
185 I2O_CLASS_FLOPPY_CONTROLLER,
186 IC_CONFIGURE,
187 IFVERBOSE("floppy controller")
188 },
189 {
190 I2O_CLASS_FLOPPY_DEVICE,
191 0,
192 COMMENT("floppy device")
193 },
194 {
195 I2O_CLASS_BUS_ADAPTER_PORT,
196 IC_CONFIGURE,
197 IFVERBOSE("bus adapter port" )
198 },
199 };
200
201 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
202 static const char * const iop_status[] = {
203 "success",
204 "abort (dirty)",
205 "abort (no data transfer)",
206 "abort (partial transfer)",
207 "error (dirty)",
208 "error (no data transfer)",
209 "error (partial transfer)",
210 "undefined error code",
211 "process abort (dirty)",
212 "process abort (no data transfer)",
213 "process abort (partial transfer)",
214 "transaction error",
215 };
216 #endif
217
218 static inline u_int32_t iop_inl(struct iop_softc *, int);
219 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
220
221 static void iop_config_interrupts(struct device *);
222 static void iop_configure_devices(struct iop_softc *, int, int);
223 static void iop_devinfo(int, char *);
224 static int iop_print(void *, const char *);
225 static void iop_shutdown(void *);
226 static int iop_submatch(struct device *, struct cfdata *, void *);
227 static int iop_vendor_print(void *, const char *);
228
229 static void iop_adjqparam(struct iop_softc *, int);
230 static void iop_create_reconf_thread(void *);
231 static int iop_handle_reply(struct iop_softc *, u_int32_t);
232 static int iop_hrt_get(struct iop_softc *);
233 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
234 static void iop_intr_event(struct device *, struct iop_msg *, void *);
235 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
236 u_int32_t);
237 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
238 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
239 static int iop_ofifo_init(struct iop_softc *);
240 static int iop_passthrough(struct iop_softc *, struct ioppt *,
241 struct proc *);
242 static void iop_reconf_thread(void *);
243 static void iop_release_mfa(struct iop_softc *, u_int32_t);
244 static int iop_reset(struct iop_softc *);
245 static int iop_systab_set(struct iop_softc *);
246 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
247
248 #ifdef I2ODEBUG
249 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
250 #endif
251
252 static inline u_int32_t
253 iop_inl(struct iop_softc *sc, int off)
254 {
255
256 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
257 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
258 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
259 }
260
261 static inline void
262 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
263 {
264
265 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
266 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
267 BUS_SPACE_BARRIER_WRITE);
268 }
269
270 /*
271 * Initialise the IOP and our interface.
272 */
273 void
274 iop_init(struct iop_softc *sc, const char *intrstr)
275 {
276 struct iop_msg *im;
277 int rv, i, j, state, nsegs;
278 u_int32_t mask;
279 char ident[64];
280
281 state = 0;
282
283 printf("I2O adapter");
284
285 if (iop_ictxhashtbl == NULL)
286 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
287 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
288
289 /* Disable interrupts at the IOP. */
290 mask = iop_inl(sc, IOP_REG_INTR_MASK);
291 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
292
293 /* Allocate a scratch DMA map for small miscellaneous shared data. */
294 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
295 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
296 printf("%s: cannot create scratch dmamap\n",
297 sc->sc_dv.dv_xname);
298 return;
299 }
300
301 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
302 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
303 printf("%s: cannot alloc scratch dmamem\n",
304 sc->sc_dv.dv_xname);
305 goto bail_out;
306 }
307 state++;
308
309 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
310 &sc->sc_scr, 0)) {
311 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
312 goto bail_out;
313 }
314 state++;
315
316 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
317 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
318 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
319 goto bail_out;
320 }
321 state++;
322
323 #ifdef I2ODEBUG
324 /* So that our debug checks don't choke. */
325 sc->sc_framesize = 128;
326 #endif
327
328 /* Reset the adapter and request status. */
329 if ((rv = iop_reset(sc)) != 0) {
330 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
331 goto bail_out;
332 }
333
334 if ((rv = iop_status_get(sc, 1)) != 0) {
335 printf("%s: not responding (get status)\n",
336 sc->sc_dv.dv_xname);
337 goto bail_out;
338 }
339
340 sc->sc_flags |= IOP_HAVESTATUS;
341 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
342 ident, sizeof(ident));
343 printf(" <%s>\n", ident);
344
345 #ifdef I2ODEBUG
346 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
347 le16toh(sc->sc_status.orgid),
348 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
349 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
350 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
351 le32toh(sc->sc_status.desiredprivmemsize),
352 le32toh(sc->sc_status.currentprivmemsize),
353 le32toh(sc->sc_status.currentprivmembase));
354 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
355 le32toh(sc->sc_status.desiredpriviosize),
356 le32toh(sc->sc_status.currentpriviosize),
357 le32toh(sc->sc_status.currentpriviobase));
358 #endif
359
360 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
361 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
362 sc->sc_maxob = IOP_MAX_OUTBOUND;
363 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
364 if (sc->sc_maxib > IOP_MAX_INBOUND)
365 sc->sc_maxib = IOP_MAX_INBOUND;
366 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
367 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
368 sc->sc_framesize = IOP_MAX_MSG_SIZE;
369
370 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
371 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
372 printf("%s: frame size too small (%d)\n",
373 sc->sc_dv.dv_xname, sc->sc_framesize);
374 goto bail_out;
375 }
376 #endif
377
378 /* Allocate message wrappers. */
379 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
380 if (im == NULL) {
381 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
382 goto bail_out;
383 }
384 state++;
385 sc->sc_ims = im;
386 SLIST_INIT(&sc->sc_im_freelist);
387
388 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
389 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
390 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
391 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
392 &im->im_xfer[0].ix_map);
393 if (rv != 0) {
394 printf("%s: couldn't create dmamap (%d)",
395 sc->sc_dv.dv_xname, rv);
396 goto bail_out;
397 }
398
399 im->im_tctx = i;
400 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
401 }
402
403 /* Initialise the IOP's outbound FIFO. */
404 if (iop_ofifo_init(sc) != 0) {
405 printf("%s: unable to init oubound FIFO\n",
406 sc->sc_dv.dv_xname);
407 goto bail_out;
408 }
409
410 /*
411 * Defer further configuration until (a) interrupts are working and
412 * (b) we have enough information to build the system table.
413 */
414 config_interrupts((struct device *)sc, iop_config_interrupts);
415
416 /* Configure shutdown hook before we start any device activity. */
417 if (iop_sdh == NULL)
418 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
419
420 /* Ensure interrupts are enabled at the IOP. */
421 mask = iop_inl(sc, IOP_REG_INTR_MASK);
422 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
423
424 if (intrstr != NULL)
425 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
426 intrstr);
427
428 #ifdef I2ODEBUG
429 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
430 sc->sc_dv.dv_xname, sc->sc_maxib,
431 le32toh(sc->sc_status.maxinboundmframes),
432 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
433 #endif
434
435 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
436 return;
437
438 bail_out:
439 if (state > 3) {
440 for (j = 0; j < i; j++)
441 bus_dmamap_destroy(sc->sc_dmat,
442 sc->sc_ims[j].im_xfer[0].ix_map);
443 free(sc->sc_ims, M_DEVBUF);
444 }
445 if (state > 2)
446 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
447 if (state > 1)
448 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
449 if (state > 0)
450 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
451 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
452 }
453
454 /*
455 * Perform autoconfiguration tasks.
456 */
457 static void
458 iop_config_interrupts(struct device *self)
459 {
460 struct iop_attach_args ia;
461 struct iop_softc *sc, *iop;
462 struct i2o_systab_entry *ste;
463 int rv, i, niop;
464
465 sc = (struct iop_softc *)self;
466 LIST_INIT(&sc->sc_iilist);
467
468 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
469
470 if (iop_hrt_get(sc) != 0) {
471 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
472 return;
473 }
474
475 /*
476 * Build the system table.
477 */
478 if (iop_systab == NULL) {
479 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
480 if ((iop = device_lookup(&iop_cd, i)) == NULL)
481 continue;
482 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
483 continue;
484 if (iop_status_get(iop, 1) != 0) {
485 printf("%s: unable to retrieve status\n",
486 sc->sc_dv.dv_xname);
487 iop->sc_flags &= ~IOP_HAVESTATUS;
488 continue;
489 }
490 niop++;
491 }
492 if (niop == 0)
493 return;
494
495 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
496 sizeof(struct i2o_systab);
497 iop_systab_size = i;
498 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
499
500 iop_systab->numentries = niop;
501 iop_systab->version = I2O_VERSION_11;
502
503 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
504 if ((iop = device_lookup(&iop_cd, i)) == NULL)
505 continue;
506 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
507 continue;
508
509 ste->orgid = iop->sc_status.orgid;
510 ste->iopid = iop->sc_dv.dv_unit + 2;
511 ste->segnumber =
512 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
513 ste->iopcaps = iop->sc_status.iopcaps;
514 ste->inboundmsgframesize =
515 iop->sc_status.inboundmframesize;
516 ste->inboundmsgportaddresslow =
517 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
518 ste++;
519 }
520 }
521
522 /*
523 * Post the system table to the IOP and bring it to the OPERATIONAL
524 * state.
525 */
526 if (iop_systab_set(sc) != 0) {
527 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
528 return;
529 }
530 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
531 30000) != 0) {
532 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
533 return;
534 }
535
536 /*
537 * Set up an event handler for this IOP.
538 */
539 sc->sc_eventii.ii_dv = self;
540 sc->sc_eventii.ii_intr = iop_intr_event;
541 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
542 sc->sc_eventii.ii_tid = I2O_TID_IOP;
543 iop_initiator_register(sc, &sc->sc_eventii);
544
545 rv = iop_util_eventreg(sc, &sc->sc_eventii,
546 I2O_EVENT_EXEC_RESOURCE_LIMITS |
547 I2O_EVENT_EXEC_CONNECTION_FAIL |
548 I2O_EVENT_EXEC_ADAPTER_FAULT |
549 I2O_EVENT_EXEC_POWER_FAIL |
550 I2O_EVENT_EXEC_RESET_PENDING |
551 I2O_EVENT_EXEC_RESET_IMMINENT |
552 I2O_EVENT_EXEC_HARDWARE_FAIL |
553 I2O_EVENT_EXEC_XCT_CHANGE |
554 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
555 I2O_EVENT_GEN_DEVICE_RESET |
556 I2O_EVENT_GEN_STATE_CHANGE |
557 I2O_EVENT_GEN_GENERAL_WARNING);
558 if (rv != 0) {
559 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
560 return;
561 }
562
563 /*
564 * Attempt to match and attach a product-specific extension.
565 */
566 ia.ia_class = I2O_CLASS_ANY;
567 ia.ia_tid = I2O_TID_IOP;
568 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
569
570 /*
571 * Start device configuration.
572 */
573 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
574 if ((rv = iop_reconfigure(sc, 0)) == -1) {
575 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
576 return;
577 }
578 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
579
580 kthread_create(iop_create_reconf_thread, sc);
581 }
582
583 /*
584 * Create the reconfiguration thread. Called after the standard kernel
585 * threads have been created.
586 */
587 static void
588 iop_create_reconf_thread(void *cookie)
589 {
590 struct iop_softc *sc;
591 int rv;
592
593 sc = cookie;
594 sc->sc_flags |= IOP_ONLINE;
595
596 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
597 "%s", sc->sc_dv.dv_xname);
598 if (rv != 0) {
599 printf("%s: unable to create reconfiguration thread (%d)",
600 sc->sc_dv.dv_xname, rv);
601 return;
602 }
603 }
604
605 /*
606 * Reconfiguration thread; listens for LCT change notification, and
607 * initiates re-configuration if received.
608 */
609 static void
610 iop_reconf_thread(void *cookie)
611 {
612 struct iop_softc *sc;
613 struct i2o_lct lct;
614 u_int32_t chgind;
615 int rv;
616
617 sc = cookie;
618 chgind = sc->sc_chgind + 1;
619
620 for (;;) {
621 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
622 sc->sc_dv.dv_xname, chgind));
623
624 PHOLD(sc->sc_reconf_proc);
625 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
626 PRELE(sc->sc_reconf_proc);
627
628 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
629 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
630
631 if (rv == 0 &&
632 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
633 iop_reconfigure(sc, le32toh(lct.changeindicator));
634 chgind = sc->sc_chgind + 1;
635 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
636 }
637
638 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
639 }
640 }
641
642 /*
643 * Reconfigure: find new and removed devices.
644 */
645 int
646 iop_reconfigure(struct iop_softc *sc, u_int chgind)
647 {
648 struct iop_msg *im;
649 struct i2o_hba_bus_scan mf;
650 struct i2o_lct_entry *le;
651 struct iop_initiator *ii, *nextii;
652 int rv, tid, i;
653
654 /*
655 * If the reconfiguration request isn't the result of LCT change
656 * notification, then be more thorough: ask all bus ports to scan
657 * their busses. Wait up to 5 minutes for each bus port to complete
658 * the request.
659 */
660 if (chgind == 0) {
661 if ((rv = iop_lct_get(sc)) != 0) {
662 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
663 return (rv);
664 }
665
666 le = sc->sc_lct->entry;
667 for (i = 0; i < sc->sc_nlctent; i++, le++) {
668 if ((le16toh(le->classid) & 4095) !=
669 I2O_CLASS_BUS_ADAPTER_PORT)
670 continue;
671 tid = le16toh(le->localtid) & 4095;
672
673 im = iop_msg_alloc(sc, IM_WAIT);
674
675 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
676 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
677 mf.msgictx = IOP_ICTX;
678 mf.msgtctx = im->im_tctx;
679
680 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
681 tid));
682
683 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
684 iop_msg_free(sc, im);
685 #ifdef I2ODEBUG
686 if (rv != 0)
687 printf("%s: bus scan failed\n",
688 sc->sc_dv.dv_xname);
689 #endif
690 }
691 } else if (chgind <= sc->sc_chgind) {
692 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
693 return (0);
694 }
695
696 /* Re-read the LCT and determine if it has changed. */
697 if ((rv = iop_lct_get(sc)) != 0) {
698 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
699 return (rv);
700 }
701 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
702
703 chgind = le32toh(sc->sc_lct->changeindicator);
704 if (chgind == sc->sc_chgind) {
705 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
706 return (0);
707 }
708 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
709 sc->sc_chgind = chgind;
710
711 if (sc->sc_tidmap != NULL)
712 free(sc->sc_tidmap, M_DEVBUF);
713 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
714 M_DEVBUF, M_NOWAIT|M_ZERO);
715
716 /* Allow 1 queued command per device while we're configuring. */
717 iop_adjqparam(sc, 1);
718
719 /*
720 * Match and attach child devices. We configure high-level devices
721 * first so that any claims will propagate throughout the LCT,
722 * hopefully masking off aliased devices as a result.
723 *
724 * Re-reading the LCT at this point is a little dangerous, but we'll
725 * trust the IOP (and the operator) to behave itself...
726 */
727 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
728 IC_CONFIGURE | IC_PRIORITY);
729 if ((rv = iop_lct_get(sc)) != 0)
730 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
731 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
732 IC_CONFIGURE);
733
734 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
735 nextii = LIST_NEXT(ii, ii_list);
736
737 /* Detach devices that were configured, but are now gone. */
738 for (i = 0; i < sc->sc_nlctent; i++)
739 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
740 break;
741 if (i == sc->sc_nlctent ||
742 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
743 config_detach(ii->ii_dv, DETACH_FORCE);
744
745 /*
746 * Tell initiators that existed before the re-configuration
747 * to re-configure.
748 */
749 if (ii->ii_reconfig == NULL)
750 continue;
751 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
752 printf("%s: %s failed reconfigure (%d)\n",
753 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
754 }
755
756 /* Re-adjust queue parameters and return. */
757 if (sc->sc_nii != 0)
758 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
759 / sc->sc_nii);
760
761 return (0);
762 }
763
764 /*
765 * Configure I2O devices into the system.
766 */
767 static void
768 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
769 {
770 struct iop_attach_args ia;
771 struct iop_initiator *ii;
772 const struct i2o_lct_entry *le;
773 struct device *dv;
774 int i, j, nent;
775 u_int usertid;
776
777 nent = sc->sc_nlctent;
778 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
779 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
780
781 /* Ignore the device if it's in use. */
782 usertid = le32toh(le->usertid) & 4095;
783 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
784 continue;
785
786 ia.ia_class = le16toh(le->classid) & 4095;
787 ia.ia_tid = sc->sc_tidmap[i].it_tid;
788
789 /* Ignore uninteresting devices. */
790 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
791 if (iop_class[j].ic_class == ia.ia_class)
792 break;
793 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
794 (iop_class[j].ic_flags & mask) != maskval)
795 continue;
796
797 /*
798 * Try to configure the device only if it's not already
799 * configured.
800 */
801 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
802 if (ia.ia_tid == ii->ii_tid) {
803 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
804 strcpy(sc->sc_tidmap[i].it_dvname,
805 ii->ii_dv->dv_xname);
806 break;
807 }
808 }
809 if (ii != NULL)
810 continue;
811
812 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
813 if (dv != NULL) {
814 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
815 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
816 }
817 }
818 }
819
820 /*
821 * Adjust queue parameters for all child devices.
822 */
823 static void
824 iop_adjqparam(struct iop_softc *sc, int mpi)
825 {
826 struct iop_initiator *ii;
827
828 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
829 if (ii->ii_adjqparam != NULL)
830 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
831 }
832
833 static void
834 iop_devinfo(int class, char *devinfo)
835 {
836 #ifdef I2OVERBOSE
837 int i;
838
839 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
840 if (class == iop_class[i].ic_class)
841 break;
842
843 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
844 sprintf(devinfo, "device (class 0x%x)", class);
845 else
846 strcpy(devinfo, iop_class[i].ic_caption);
847 #else
848
849 sprintf(devinfo, "device (class 0x%x)", class);
850 #endif
851 }
852
853 static int
854 iop_print(void *aux, const char *pnp)
855 {
856 struct iop_attach_args *ia;
857 char devinfo[256];
858
859 ia = aux;
860
861 if (pnp != NULL) {
862 iop_devinfo(ia->ia_class, devinfo);
863 printf("%s at %s", devinfo, pnp);
864 }
865 printf(" tid %d", ia->ia_tid);
866 return (UNCONF);
867 }
868
869 static int
870 iop_vendor_print(void *aux, const char *pnp)
871 {
872
873 return (QUIET);
874 }
875
876 static int
877 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
878 {
879 struct iop_attach_args *ia;
880
881 ia = aux;
882
883 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
884 return (0);
885
886 return (config_match(parent, cf, aux));
887 }
888
889 /*
890 * Shut down all configured IOPs.
891 */
892 static void
893 iop_shutdown(void *junk)
894 {
895 struct iop_softc *sc;
896 int i;
897
898 printf("shutting down iop devices...");
899
900 for (i = 0; i < iop_cd.cd_ndevs; i++) {
901 if ((sc = device_lookup(&iop_cd, i)) == NULL)
902 continue;
903 if ((sc->sc_flags & IOP_ONLINE) == 0)
904 continue;
905
906 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
907 0, 5000);
908
909 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
910 /*
911 * Some AMI firmware revisions will go to sleep and
912 * never come back after this.
913 */
914 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
915 IOP_ICTX, 0, 1000);
916 }
917 }
918
919 /* Wait. Some boards could still be flushing, stupidly enough. */
920 delay(5000*1000);
921 printf(" done\n");
922 }
923
924 /*
925 * Retrieve IOP status.
926 */
927 int
928 iop_status_get(struct iop_softc *sc, int nosleep)
929 {
930 struct i2o_exec_status_get mf;
931 struct i2o_status *st;
932 paddr_t pa;
933 int rv, i;
934
935 pa = sc->sc_scr_seg->ds_addr;
936 st = (struct i2o_status *)sc->sc_scr;
937
938 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
939 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
940 mf.reserved[0] = 0;
941 mf.reserved[1] = 0;
942 mf.reserved[2] = 0;
943 mf.reserved[3] = 0;
944 mf.addrlow = (u_int32_t)pa;
945 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
946 mf.length = sizeof(sc->sc_status);
947
948 memset(st, 0, sizeof(*st));
949 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
950 BUS_DMASYNC_PREREAD);
951
952 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
953 return (rv);
954
955 for (i = 25; i != 0; i--) {
956 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
957 sizeof(*st), BUS_DMASYNC_POSTREAD);
958 if (st->syncbyte == 0xff)
959 break;
960 if (nosleep)
961 DELAY(100*1000);
962 else
963 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
964 }
965
966 if (st->syncbyte != 0xff) {
967 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
968 rv = EIO;
969 } else {
970 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
971 rv = 0;
972 }
973
974 return (rv);
975 }
976
977 /*
978 * Initialize and populate the IOP's outbound FIFO.
979 */
980 static int
981 iop_ofifo_init(struct iop_softc *sc)
982 {
983 bus_addr_t addr;
984 bus_dma_segment_t seg;
985 struct i2o_exec_outbound_init *mf;
986 int i, rseg, rv;
987 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
988
989 sw = (u_int32_t *)sc->sc_scr;
990
991 mf = (struct i2o_exec_outbound_init *)mb;
992 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
993 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
994 mf->msgictx = IOP_ICTX;
995 mf->msgtctx = 0;
996 mf->pagesize = PAGE_SIZE;
997 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
998
999 /*
1000 * The I2O spec says that there are two SGLs: one for the status
1001 * word, and one for a list of discarded MFAs. It continues to say
1002 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1003 * necessary; this isn't the case (and is in fact a bad thing).
1004 */
1005 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1006 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1007 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1008 (u_int32_t)sc->sc_scr_seg->ds_addr;
1009 mb[0] += 2 << 16;
1010
1011 *sw = 0;
1012 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1013 BUS_DMASYNC_PREREAD);
1014
1015 if ((rv = iop_post(sc, mb)) != 0)
1016 return (rv);
1017
1018 POLL(5000,
1019 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1020 BUS_DMASYNC_POSTREAD),
1021 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1022
1023 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1024 printf("%s: outbound FIFO init failed (%d)\n",
1025 sc->sc_dv.dv_xname, le32toh(*sw));
1026 return (EIO);
1027 }
1028
1029 /* Allocate DMA safe memory for the reply frames. */
1030 if (sc->sc_rep_phys == 0) {
1031 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1032
1033 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1034 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1035 if (rv != 0) {
1036 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1037 rv);
1038 return (rv);
1039 }
1040
1041 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1042 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1043 if (rv != 0) {
1044 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1045 return (rv);
1046 }
1047
1048 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1049 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1050 if (rv != 0) {
1051 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1052 rv);
1053 return (rv);
1054 }
1055
1056 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1057 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1058 if (rv != 0) {
1059 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1060 return (rv);
1061 }
1062
1063 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1064 }
1065
1066 /* Populate the outbound FIFO. */
1067 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1068 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1069 addr += sc->sc_framesize;
1070 }
1071
1072 return (0);
1073 }
1074
1075 /*
1076 * Read the specified number of bytes from the IOP's hardware resource table.
1077 */
1078 static int
1079 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1080 {
1081 struct iop_msg *im;
1082 int rv;
1083 struct i2o_exec_hrt_get *mf;
1084 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1085
1086 im = iop_msg_alloc(sc, IM_WAIT);
1087 mf = (struct i2o_exec_hrt_get *)mb;
1088 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1089 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1090 mf->msgictx = IOP_ICTX;
1091 mf->msgtctx = im->im_tctx;
1092
1093 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1094 rv = iop_msg_post(sc, im, mb, 30000);
1095 iop_msg_unmap(sc, im);
1096 iop_msg_free(sc, im);
1097 return (rv);
1098 }
1099
1100 /*
1101 * Read the IOP's hardware resource table.
1102 */
1103 static int
1104 iop_hrt_get(struct iop_softc *sc)
1105 {
1106 struct i2o_hrt hrthdr, *hrt;
1107 int size, rv;
1108
1109 PHOLD(curproc);
1110 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1111 PRELE(curproc);
1112 if (rv != 0)
1113 return (rv);
1114
1115 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1116 le16toh(hrthdr.numentries)));
1117
1118 size = sizeof(struct i2o_hrt) +
1119 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1120 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1121
1122 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1123 free(hrt, M_DEVBUF);
1124 return (rv);
1125 }
1126
1127 if (sc->sc_hrt != NULL)
1128 free(sc->sc_hrt, M_DEVBUF);
1129 sc->sc_hrt = hrt;
1130 return (0);
1131 }
1132
1133 /*
1134 * Request the specified number of bytes from the IOP's logical
1135 * configuration table. If a change indicator is specified, this
1136 * is a verbatim notification request, so the caller is prepared
1137 * to wait indefinitely.
1138 */
1139 static int
1140 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1141 u_int32_t chgind)
1142 {
1143 struct iop_msg *im;
1144 struct i2o_exec_lct_notify *mf;
1145 int rv;
1146 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1147
1148 im = iop_msg_alloc(sc, IM_WAIT);
1149 memset(lct, 0, size);
1150
1151 mf = (struct i2o_exec_lct_notify *)mb;
1152 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1153 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1154 mf->msgictx = IOP_ICTX;
1155 mf->msgtctx = im->im_tctx;
1156 mf->classid = I2O_CLASS_ANY;
1157 mf->changeindicator = chgind;
1158
1159 #ifdef I2ODEBUG
1160 printf("iop_lct_get0: reading LCT");
1161 if (chgind != 0)
1162 printf(" (async)");
1163 printf("\n");
1164 #endif
1165
1166 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1167 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1168 iop_msg_unmap(sc, im);
1169 iop_msg_free(sc, im);
1170 return (rv);
1171 }
1172
1173 /*
1174 * Read the IOP's logical configuration table.
1175 */
1176 int
1177 iop_lct_get(struct iop_softc *sc)
1178 {
1179 int esize, size, rv;
1180 struct i2o_lct *lct;
1181
1182 esize = le32toh(sc->sc_status.expectedlctsize);
1183 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1184 if (lct == NULL)
1185 return (ENOMEM);
1186
1187 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1188 free(lct, M_DEVBUF);
1189 return (rv);
1190 }
1191
1192 size = le16toh(lct->tablesize) << 2;
1193 if (esize != size) {
1194 free(lct, M_DEVBUF);
1195 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1196 if (lct == NULL)
1197 return (ENOMEM);
1198
1199 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1200 free(lct, M_DEVBUF);
1201 return (rv);
1202 }
1203 }
1204
1205 /* Swap in the new LCT. */
1206 if (sc->sc_lct != NULL)
1207 free(sc->sc_lct, M_DEVBUF);
1208 sc->sc_lct = lct;
1209 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1210 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1211 sizeof(struct i2o_lct_entry);
1212 return (0);
1213 }
1214
1215 /*
1216 * Request the specified parameter group from the target. If an initiator
1217 * is specified (a) don't wait for the operation to complete, but instead
1218 * let the initiator's interrupt handler deal with the reply and (b) place a
1219 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1220 */
1221 int
1222 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1223 int size, struct iop_initiator *ii)
1224 {
1225 struct iop_msg *im;
1226 struct i2o_util_params_op *mf;
1227 struct i2o_reply *rf;
1228 int rv;
1229 struct iop_pgop *pgop;
1230 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1231
1232 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1233 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1234 iop_msg_free(sc, im);
1235 return (ENOMEM);
1236 }
1237 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1238 iop_msg_free(sc, im);
1239 free(pgop, M_DEVBUF);
1240 return (ENOMEM);
1241 }
1242 im->im_dvcontext = pgop;
1243 im->im_rb = rf;
1244
1245 mf = (struct i2o_util_params_op *)mb;
1246 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1247 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1248 mf->msgictx = IOP_ICTX;
1249 mf->msgtctx = im->im_tctx;
1250 mf->flags = 0;
1251
1252 pgop->olh.count = htole16(1);
1253 pgop->olh.reserved = htole16(0);
1254 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1255 pgop->oat.fieldcount = htole16(0xffff);
1256 pgop->oat.group = htole16(group);
1257
1258 if (ii == NULL)
1259 PHOLD(curproc);
1260
1261 memset(buf, 0, size);
1262 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1263 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1264 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1265
1266 if (ii == NULL)
1267 PRELE(curproc);
1268
1269 /* Detect errors; let partial transfers to count as success. */
1270 if (ii == NULL && rv == 0) {
1271 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1272 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1273 rv = 0;
1274 else
1275 rv = (rf->reqstatus != 0 ? EIO : 0);
1276
1277 if (rv != 0)
1278 printf("%s: FIELD_GET failed for tid %d group %d\n",
1279 sc->sc_dv.dv_xname, tid, group);
1280 }
1281
1282 if (ii == NULL || rv != 0) {
1283 iop_msg_unmap(sc, im);
1284 iop_msg_free(sc, im);
1285 free(pgop, M_DEVBUF);
1286 free(rf, M_DEVBUF);
1287 }
1288
1289 return (rv);
1290 }
1291
1292 /*
1293 * Set a single field in a scalar parameter group.
1294 */
1295 int
1296 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1297 int size, int field)
1298 {
1299 struct iop_msg *im;
1300 struct i2o_util_params_op *mf;
1301 struct iop_pgop *pgop;
1302 int rv, totsize;
1303 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1304
1305 totsize = sizeof(*pgop) + size;
1306
1307 im = iop_msg_alloc(sc, IM_WAIT);
1308 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1309 iop_msg_free(sc, im);
1310 return (ENOMEM);
1311 }
1312
1313 mf = (struct i2o_util_params_op *)mb;
1314 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1315 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1316 mf->msgictx = IOP_ICTX;
1317 mf->msgtctx = im->im_tctx;
1318 mf->flags = 0;
1319
1320 pgop->olh.count = htole16(1);
1321 pgop->olh.reserved = htole16(0);
1322 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1323 pgop->oat.fieldcount = htole16(1);
1324 pgop->oat.group = htole16(group);
1325 pgop->oat.fields[0] = htole16(field);
1326 memcpy(pgop + 1, buf, size);
1327
1328 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1329 rv = iop_msg_post(sc, im, mb, 30000);
1330 if (rv != 0)
1331 printf("%s: FIELD_SET failed for tid %d group %d\n",
1332 sc->sc_dv.dv_xname, tid, group);
1333
1334 iop_msg_unmap(sc, im);
1335 iop_msg_free(sc, im);
1336 free(pgop, M_DEVBUF);
1337 return (rv);
1338 }
1339
1340 /*
1341 * Delete all rows in a tablular parameter group.
1342 */
1343 int
1344 iop_table_clear(struct iop_softc *sc, int tid, int group)
1345 {
1346 struct iop_msg *im;
1347 struct i2o_util_params_op *mf;
1348 struct iop_pgop pgop;
1349 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1350 int rv;
1351
1352 im = iop_msg_alloc(sc, IM_WAIT);
1353
1354 mf = (struct i2o_util_params_op *)mb;
1355 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1356 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1357 mf->msgictx = IOP_ICTX;
1358 mf->msgtctx = im->im_tctx;
1359 mf->flags = 0;
1360
1361 pgop.olh.count = htole16(1);
1362 pgop.olh.reserved = htole16(0);
1363 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1364 pgop.oat.fieldcount = htole16(0);
1365 pgop.oat.group = htole16(group);
1366 pgop.oat.fields[0] = htole16(0);
1367
1368 PHOLD(curproc);
1369 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1370 rv = iop_msg_post(sc, im, mb, 30000);
1371 if (rv != 0)
1372 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1373 sc->sc_dv.dv_xname, tid, group);
1374
1375 iop_msg_unmap(sc, im);
1376 PRELE(curproc);
1377 iop_msg_free(sc, im);
1378 return (rv);
1379 }
1380
1381 /*
1382 * Add a single row to a tabular parameter group. The row can have only one
1383 * field.
1384 */
1385 int
1386 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1387 int size, int row)
1388 {
1389 struct iop_msg *im;
1390 struct i2o_util_params_op *mf;
1391 struct iop_pgop *pgop;
1392 int rv, totsize;
1393 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1394
1395 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1396
1397 im = iop_msg_alloc(sc, IM_WAIT);
1398 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1399 iop_msg_free(sc, im);
1400 return (ENOMEM);
1401 }
1402
1403 mf = (struct i2o_util_params_op *)mb;
1404 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1405 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1406 mf->msgictx = IOP_ICTX;
1407 mf->msgtctx = im->im_tctx;
1408 mf->flags = 0;
1409
1410 pgop->olh.count = htole16(1);
1411 pgop->olh.reserved = htole16(0);
1412 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1413 pgop->oat.fieldcount = htole16(1);
1414 pgop->oat.group = htole16(group);
1415 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1416 pgop->oat.fields[1] = htole16(1); /* RowCount */
1417 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1418 memcpy(&pgop->oat.fields[3], buf, size);
1419
1420 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1421 rv = iop_msg_post(sc, im, mb, 30000);
1422 if (rv != 0)
1423 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1424 sc->sc_dv.dv_xname, tid, group, row);
1425
1426 iop_msg_unmap(sc, im);
1427 iop_msg_free(sc, im);
1428 free(pgop, M_DEVBUF);
1429 return (rv);
1430 }
1431
1432 /*
1433 * Execute a simple command (no parameters).
1434 */
1435 int
1436 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1437 int async, int timo)
1438 {
1439 struct iop_msg *im;
1440 struct i2o_msg mf;
1441 int rv, fl;
1442
1443 fl = (async != 0 ? IM_WAIT : IM_POLL);
1444 im = iop_msg_alloc(sc, fl);
1445
1446 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1447 mf.msgfunc = I2O_MSGFUNC(tid, function);
1448 mf.msgictx = ictx;
1449 mf.msgtctx = im->im_tctx;
1450
1451 rv = iop_msg_post(sc, im, &mf, timo);
1452 iop_msg_free(sc, im);
1453 return (rv);
1454 }
1455
1456 /*
1457 * Post the system table to the IOP.
1458 */
1459 static int
1460 iop_systab_set(struct iop_softc *sc)
1461 {
1462 struct i2o_exec_sys_tab_set *mf;
1463 struct iop_msg *im;
1464 bus_space_handle_t bsh;
1465 bus_addr_t boo;
1466 u_int32_t mema[2], ioa[2];
1467 int rv;
1468 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1469
1470 im = iop_msg_alloc(sc, IM_WAIT);
1471
1472 mf = (struct i2o_exec_sys_tab_set *)mb;
1473 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1474 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1475 mf->msgictx = IOP_ICTX;
1476 mf->msgtctx = im->im_tctx;
1477 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1478 mf->segnumber = 0;
1479
1480 mema[1] = sc->sc_status.desiredprivmemsize;
1481 ioa[1] = sc->sc_status.desiredpriviosize;
1482
1483 if (mema[1] != 0) {
1484 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1485 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1486 mema[0] = htole32(boo);
1487 if (rv != 0) {
1488 printf("%s: can't alloc priv mem space, err = %d\n",
1489 sc->sc_dv.dv_xname, rv);
1490 mema[0] = 0;
1491 mema[1] = 0;
1492 }
1493 }
1494
1495 if (ioa[1] != 0) {
1496 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1497 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1498 ioa[0] = htole32(boo);
1499 if (rv != 0) {
1500 printf("%s: can't alloc priv i/o space, err = %d\n",
1501 sc->sc_dv.dv_xname, rv);
1502 ioa[0] = 0;
1503 ioa[1] = 0;
1504 }
1505 }
1506
1507 PHOLD(curproc);
1508 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1509 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1510 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1511 rv = iop_msg_post(sc, im, mb, 5000);
1512 iop_msg_unmap(sc, im);
1513 iop_msg_free(sc, im);
1514 PRELE(curproc);
1515 return (rv);
1516 }
1517
1518 /*
1519 * Reset the IOP. Must be called with interrupts disabled.
1520 */
1521 static int
1522 iop_reset(struct iop_softc *sc)
1523 {
1524 u_int32_t mfa, *sw;
1525 struct i2o_exec_iop_reset mf;
1526 int rv;
1527 paddr_t pa;
1528
1529 sw = (u_int32_t *)sc->sc_scr;
1530 pa = sc->sc_scr_seg->ds_addr;
1531
1532 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1533 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1534 mf.reserved[0] = 0;
1535 mf.reserved[1] = 0;
1536 mf.reserved[2] = 0;
1537 mf.reserved[3] = 0;
1538 mf.statuslow = (u_int32_t)pa;
1539 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1540
1541 *sw = htole32(0);
1542 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1543 BUS_DMASYNC_PREREAD);
1544
1545 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1546 return (rv);
1547
1548 POLL(2500,
1549 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1550 BUS_DMASYNC_POSTREAD), *sw != 0));
1551 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1552 printf("%s: reset rejected, status 0x%x\n",
1553 sc->sc_dv.dv_xname, le32toh(*sw));
1554 return (EIO);
1555 }
1556
1557 /*
1558 * IOP is now in the INIT state. Wait no more than 10 seconds for
1559 * the inbound queue to become responsive.
1560 */
1561 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1562 if (mfa == IOP_MFA_EMPTY) {
1563 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1564 return (EIO);
1565 }
1566
1567 iop_release_mfa(sc, mfa);
1568 return (0);
1569 }
1570
1571 /*
1572 * Register a new initiator. Must be called with the configuration lock
1573 * held.
1574 */
1575 void
1576 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1577 {
1578 static int ictxgen;
1579 int s;
1580
1581 /* 0 is reserved (by us) for system messages. */
1582 ii->ii_ictx = ++ictxgen;
1583
1584 /*
1585 * `Utility initiators' don't make it onto the per-IOP initiator list
1586 * (which is used only for configuration), but do get one slot on
1587 * the inbound queue.
1588 */
1589 if ((ii->ii_flags & II_UTILITY) == 0) {
1590 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1591 sc->sc_nii++;
1592 } else
1593 sc->sc_nuii++;
1594
1595 s = splbio();
1596 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1597 splx(s);
1598 }
1599
1600 /*
1601 * Unregister an initiator. Must be called with the configuration lock
1602 * held.
1603 */
1604 void
1605 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1606 {
1607 int s;
1608
1609 if ((ii->ii_flags & II_UTILITY) == 0) {
1610 LIST_REMOVE(ii, ii_list);
1611 sc->sc_nii--;
1612 } else
1613 sc->sc_nuii--;
1614
1615 s = splbio();
1616 LIST_REMOVE(ii, ii_hash);
1617 splx(s);
1618 }
1619
1620 /*
1621 * Handle a reply frame from the IOP.
1622 */
1623 static int
1624 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1625 {
1626 struct iop_msg *im;
1627 struct i2o_reply *rb;
1628 struct i2o_fault_notify *fn;
1629 struct iop_initiator *ii;
1630 u_int off, ictx, tctx, status, size;
1631
1632 off = (int)(rmfa - sc->sc_rep_phys);
1633 rb = (struct i2o_reply *)(sc->sc_rep + off);
1634
1635 /* Perform reply queue DMA synchronisation. */
1636 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1637 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1638 if (--sc->sc_curib != 0)
1639 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1640 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1641
1642 #ifdef I2ODEBUG
1643 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1644 panic("iop_handle_reply: 64-bit reply");
1645 #endif
1646 /*
1647 * Find the initiator.
1648 */
1649 ictx = le32toh(rb->msgictx);
1650 if (ictx == IOP_ICTX)
1651 ii = NULL;
1652 else {
1653 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1654 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1655 if (ii->ii_ictx == ictx)
1656 break;
1657 if (ii == NULL) {
1658 #ifdef I2ODEBUG
1659 iop_reply_print(sc, rb);
1660 #endif
1661 printf("%s: WARNING: bad ictx returned (%x)\n",
1662 sc->sc_dv.dv_xname, ictx);
1663 return (-1);
1664 }
1665 }
1666
1667 /*
1668 * If we received a transport failure notice, we've got to dig the
1669 * transaction context (if any) out of the original message frame,
1670 * and then release the original MFA back to the inbound FIFO.
1671 */
1672 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1673 status = I2O_STATUS_SUCCESS;
1674
1675 fn = (struct i2o_fault_notify *)rb;
1676 tctx = iop_inl(sc, fn->lowmfa + 12);
1677 iop_release_mfa(sc, fn->lowmfa);
1678 iop_tfn_print(sc, fn);
1679 } else {
1680 status = rb->reqstatus;
1681 tctx = le32toh(rb->msgtctx);
1682 }
1683
1684 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1685 /*
1686 * This initiator tracks state using message wrappers.
1687 *
1688 * Find the originating message wrapper, and if requested
1689 * notify the initiator.
1690 */
1691 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1692 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1693 (im->im_flags & IM_ALLOCED) == 0 ||
1694 tctx != im->im_tctx) {
1695 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1696 sc->sc_dv.dv_xname, tctx, im);
1697 if (im != NULL)
1698 printf("%s: flags=0x%08x tctx=0x%08x\n",
1699 sc->sc_dv.dv_xname, im->im_flags,
1700 im->im_tctx);
1701 #ifdef I2ODEBUG
1702 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1703 iop_reply_print(sc, rb);
1704 #endif
1705 return (-1);
1706 }
1707
1708 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1709 im->im_flags |= IM_FAIL;
1710
1711 #ifdef I2ODEBUG
1712 if ((im->im_flags & IM_REPLIED) != 0)
1713 panic("%s: dup reply", sc->sc_dv.dv_xname);
1714 #endif
1715 im->im_flags |= IM_REPLIED;
1716
1717 #ifdef I2ODEBUG
1718 if (status != I2O_STATUS_SUCCESS)
1719 iop_reply_print(sc, rb);
1720 #endif
1721 im->im_reqstatus = status;
1722
1723 /* Copy the reply frame, if requested. */
1724 if (im->im_rb != NULL) {
1725 size = (le32toh(rb->msgflags) >> 14) & ~3;
1726 #ifdef I2ODEBUG
1727 if (size > sc->sc_framesize)
1728 panic("iop_handle_reply: reply too large");
1729 #endif
1730 memcpy(im->im_rb, rb, size);
1731 }
1732
1733 /* Notify the initiator. */
1734 if ((im->im_flags & IM_WAIT) != 0)
1735 wakeup(im);
1736 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1737 (*ii->ii_intr)(ii->ii_dv, im, rb);
1738 } else {
1739 /*
1740 * This initiator discards message wrappers.
1741 *
1742 * Simply pass the reply frame to the initiator.
1743 */
1744 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1745 }
1746
1747 return (status);
1748 }
1749
1750 /*
1751 * Handle an interrupt from the IOP.
1752 */
1753 int
1754 iop_intr(void *arg)
1755 {
1756 struct iop_softc *sc;
1757 u_int32_t rmfa;
1758
1759 sc = arg;
1760
1761 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1762 return (0);
1763
1764 for (;;) {
1765 /* Double read to account for IOP bug. */
1766 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1767 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1768 if (rmfa == IOP_MFA_EMPTY)
1769 break;
1770 }
1771 iop_handle_reply(sc, rmfa);
1772 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1773 }
1774
1775 return (1);
1776 }
1777
1778 /*
1779 * Handle an event signalled by the executive.
1780 */
1781 static void
1782 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1783 {
1784 struct i2o_util_event_register_reply *rb;
1785 struct iop_softc *sc;
1786 u_int event;
1787
1788 sc = (struct iop_softc *)dv;
1789 rb = reply;
1790
1791 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1792 return;
1793
1794 event = le32toh(rb->event);
1795 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1796 }
1797
1798 /*
1799 * Allocate a message wrapper.
1800 */
1801 struct iop_msg *
1802 iop_msg_alloc(struct iop_softc *sc, int flags)
1803 {
1804 struct iop_msg *im;
1805 static u_int tctxgen;
1806 int s, i;
1807
1808 #ifdef I2ODEBUG
1809 if ((flags & IM_SYSMASK) != 0)
1810 panic("iop_msg_alloc: system flags specified");
1811 #endif
1812
1813 s = splbio();
1814 im = SLIST_FIRST(&sc->sc_im_freelist);
1815 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1816 if (im == NULL)
1817 panic("iop_msg_alloc: no free wrappers");
1818 #endif
1819 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1820 splx(s);
1821
1822 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1823 tctxgen += (1 << IOP_TCTX_SHIFT);
1824 im->im_flags = flags | IM_ALLOCED;
1825 im->im_rb = NULL;
1826 i = 0;
1827 do {
1828 im->im_xfer[i++].ix_size = 0;
1829 } while (i < IOP_MAX_MSG_XFERS);
1830
1831 return (im);
1832 }
1833
1834 /*
1835 * Free a message wrapper.
1836 */
1837 void
1838 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1839 {
1840 int s;
1841
1842 #ifdef I2ODEBUG
1843 if ((im->im_flags & IM_ALLOCED) == 0)
1844 panic("iop_msg_free: wrapper not allocated");
1845 #endif
1846
1847 im->im_flags = 0;
1848 s = splbio();
1849 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1850 splx(s);
1851 }
1852
1853 /*
1854 * Map a data transfer. Write a scatter-gather list into the message frame.
1855 */
1856 int
1857 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1858 void *xferaddr, int xfersize, int out, struct proc *up)
1859 {
1860 bus_dmamap_t dm;
1861 bus_dma_segment_t *ds;
1862 struct iop_xfer *ix;
1863 u_int rv, i, nsegs, flg, off, xn;
1864 u_int32_t *p;
1865
1866 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1867 if (ix->ix_size == 0)
1868 break;
1869
1870 #ifdef I2ODEBUG
1871 if (xfersize == 0)
1872 panic("iop_msg_map: null transfer");
1873 if (xfersize > IOP_MAX_XFER)
1874 panic("iop_msg_map: transfer too large");
1875 if (xn == IOP_MAX_MSG_XFERS)
1876 panic("iop_msg_map: too many xfers");
1877 #endif
1878
1879 /*
1880 * Only the first DMA map is static.
1881 */
1882 if (xn != 0) {
1883 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1884 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1885 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1886 if (rv != 0)
1887 return (rv);
1888 }
1889
1890 dm = ix->ix_map;
1891 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1892 (up == NULL ? BUS_DMA_NOWAIT : 0));
1893 if (rv != 0)
1894 goto bad;
1895
1896 /*
1897 * How many SIMPLE SG elements can we fit in this message?
1898 */
1899 off = mb[0] >> 16;
1900 p = mb + off;
1901 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1902
1903 if (dm->dm_nsegs > nsegs) {
1904 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1905 rv = EFBIG;
1906 DPRINTF(("iop_msg_map: too many segs\n"));
1907 goto bad;
1908 }
1909
1910 nsegs = dm->dm_nsegs;
1911 xfersize = 0;
1912
1913 /*
1914 * Write out the SG list.
1915 */
1916 if (out)
1917 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1918 else
1919 flg = I2O_SGL_SIMPLE;
1920
1921 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1922 p[0] = (u_int32_t)ds->ds_len | flg;
1923 p[1] = (u_int32_t)ds->ds_addr;
1924 xfersize += ds->ds_len;
1925 }
1926
1927 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1928 p[1] = (u_int32_t)ds->ds_addr;
1929 xfersize += ds->ds_len;
1930
1931 /* Fix up the transfer record, and sync the map. */
1932 ix->ix_flags = (out ? IX_OUT : IX_IN);
1933 ix->ix_size = xfersize;
1934 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1935 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1936
1937 /*
1938 * If this is the first xfer we've mapped for this message, adjust
1939 * the SGL offset field in the message header.
1940 */
1941 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1942 mb[0] += (mb[0] >> 12) & 0xf0;
1943 im->im_flags |= IM_SGLOFFADJ;
1944 }
1945 mb[0] += (nsegs << 17);
1946 return (0);
1947
1948 bad:
1949 if (xn != 0)
1950 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1951 return (rv);
1952 }
1953
1954 /*
1955 * Map a block I/O data transfer (different in that there's only one per
1956 * message maximum, and PAGE addressing may be used). Write a scatter
1957 * gather list into the message frame.
1958 */
1959 int
1960 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1961 void *xferaddr, int xfersize, int out)
1962 {
1963 bus_dma_segment_t *ds;
1964 bus_dmamap_t dm;
1965 struct iop_xfer *ix;
1966 u_int rv, i, nsegs, off, slen, tlen, flg;
1967 paddr_t saddr, eaddr;
1968 u_int32_t *p;
1969
1970 #ifdef I2ODEBUG
1971 if (xfersize == 0)
1972 panic("iop_msg_map_bio: null transfer");
1973 if (xfersize > IOP_MAX_XFER)
1974 panic("iop_msg_map_bio: transfer too large");
1975 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1976 panic("iop_msg_map_bio: SGLOFFADJ");
1977 #endif
1978
1979 ix = im->im_xfer;
1980 dm = ix->ix_map;
1981 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1982 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1983 if (rv != 0)
1984 return (rv);
1985
1986 off = mb[0] >> 16;
1987 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1988
1989 /*
1990 * If the transfer is highly fragmented and won't fit using SIMPLE
1991 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1992 * potentially more efficient, both for us and the IOP.
1993 */
1994 if (dm->dm_nsegs > nsegs) {
1995 nsegs = 1;
1996 p = mb + off + 1;
1997
1998 /* XXX This should be done with a bus_space flag. */
1999 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2000 slen = ds->ds_len;
2001 saddr = ds->ds_addr;
2002
2003 while (slen > 0) {
2004 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2005 tlen = min(eaddr - saddr, slen);
2006 slen -= tlen;
2007 *p++ = le32toh(saddr);
2008 saddr = eaddr;
2009 nsegs++;
2010 }
2011 }
2012
2013 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2014 I2O_SGL_END;
2015 if (out)
2016 mb[off] |= I2O_SGL_DATA_OUT;
2017 } else {
2018 p = mb + off;
2019 nsegs = dm->dm_nsegs;
2020
2021 if (out)
2022 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2023 else
2024 flg = I2O_SGL_SIMPLE;
2025
2026 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2027 p[0] = (u_int32_t)ds->ds_len | flg;
2028 p[1] = (u_int32_t)ds->ds_addr;
2029 }
2030
2031 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2032 I2O_SGL_END;
2033 p[1] = (u_int32_t)ds->ds_addr;
2034 nsegs <<= 1;
2035 }
2036
2037 /* Fix up the transfer record, and sync the map. */
2038 ix->ix_flags = (out ? IX_OUT : IX_IN);
2039 ix->ix_size = xfersize;
2040 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2041 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2042
2043 /*
2044 * Adjust the SGL offset and total message size fields. We don't
2045 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2046 */
2047 mb[0] += ((off << 4) + (nsegs << 16));
2048 return (0);
2049 }
2050
2051 /*
2052 * Unmap all data transfers associated with a message wrapper.
2053 */
2054 void
2055 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2056 {
2057 struct iop_xfer *ix;
2058 int i;
2059
2060 #ifdef I2ODEBUG
2061 if (im->im_xfer[0].ix_size == 0)
2062 panic("iop_msg_unmap: no transfers mapped");
2063 #endif
2064
2065 for (ix = im->im_xfer, i = 0;;) {
2066 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2067 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2068 BUS_DMASYNC_POSTREAD);
2069 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2070
2071 /* Only the first DMA map is static. */
2072 if (i != 0)
2073 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2074 if ((++ix)->ix_size == 0)
2075 break;
2076 if (++i >= IOP_MAX_MSG_XFERS)
2077 break;
2078 }
2079 }
2080
2081 /*
2082 * Post a message frame to the IOP's inbound queue.
2083 */
2084 int
2085 iop_post(struct iop_softc *sc, u_int32_t *mb)
2086 {
2087 u_int32_t mfa;
2088 int s;
2089
2090 #ifdef I2ODEBUG
2091 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2092 panic("iop_post: frame too large");
2093 #endif
2094
2095 s = splbio();
2096
2097 /* Allocate a slot with the IOP. */
2098 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2099 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2100 splx(s);
2101 printf("%s: mfa not forthcoming\n",
2102 sc->sc_dv.dv_xname);
2103 return (EAGAIN);
2104 }
2105
2106 /* Perform reply buffer DMA synchronisation. */
2107 if (sc->sc_curib++ == 0)
2108 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2109 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2110
2111 /* Copy out the message frame. */
2112 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2113 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2114 BUS_SPACE_BARRIER_WRITE);
2115
2116 /* Post the MFA back to the IOP. */
2117 iop_outl(sc, IOP_REG_IFIFO, mfa);
2118
2119 splx(s);
2120 return (0);
2121 }
2122
2123 /*
2124 * Post a message to the IOP and deal with completion.
2125 */
2126 int
2127 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2128 {
2129 u_int32_t *mb;
2130 int rv, s;
2131
2132 mb = xmb;
2133
2134 /* Terminate the scatter/gather list chain. */
2135 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2136 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2137
2138 if ((rv = iop_post(sc, mb)) != 0)
2139 return (rv);
2140
2141 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2142 if ((im->im_flags & IM_POLL) != 0)
2143 iop_msg_poll(sc, im, timo);
2144 else
2145 iop_msg_wait(sc, im, timo);
2146
2147 s = splbio();
2148 if ((im->im_flags & IM_REPLIED) != 0) {
2149 if ((im->im_flags & IM_NOSTATUS) != 0)
2150 rv = 0;
2151 else if ((im->im_flags & IM_FAIL) != 0)
2152 rv = ENXIO;
2153 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2154 rv = EIO;
2155 else
2156 rv = 0;
2157 } else
2158 rv = EBUSY;
2159 splx(s);
2160 } else
2161 rv = 0;
2162
2163 return (rv);
2164 }
2165
2166 /*
2167 * Spin until the specified message is replied to.
2168 */
2169 static void
2170 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2171 {
2172 u_int32_t rmfa;
2173 int s, status;
2174
2175 s = splbio();
2176
2177 /* Wait for completion. */
2178 for (timo *= 10; timo != 0; timo--) {
2179 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2180 /* Double read to account for IOP bug. */
2181 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2182 if (rmfa == IOP_MFA_EMPTY)
2183 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2184 if (rmfa != IOP_MFA_EMPTY) {
2185 status = iop_handle_reply(sc, rmfa);
2186
2187 /*
2188 * Return the reply frame to the IOP's
2189 * outbound FIFO.
2190 */
2191 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2192 }
2193 }
2194 if ((im->im_flags & IM_REPLIED) != 0)
2195 break;
2196 DELAY(100);
2197 }
2198
2199 if (timo == 0) {
2200 #ifdef I2ODEBUG
2201 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2202 if (iop_status_get(sc, 1) != 0)
2203 printf("iop_msg_poll: unable to retrieve status\n");
2204 else
2205 printf("iop_msg_poll: IOP state = %d\n",
2206 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2207 #endif
2208 }
2209
2210 splx(s);
2211 }
2212
2213 /*
2214 * Sleep until the specified message is replied to.
2215 */
2216 static void
2217 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2218 {
2219 int s, rv;
2220
2221 s = splbio();
2222 if ((im->im_flags & IM_REPLIED) != 0) {
2223 splx(s);
2224 return;
2225 }
2226 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2227 splx(s);
2228
2229 #ifdef I2ODEBUG
2230 if (rv != 0) {
2231 printf("iop_msg_wait: tsleep() == %d\n", rv);
2232 if (iop_status_get(sc, 0) != 0)
2233 printf("iop_msg_wait: unable to retrieve status\n");
2234 else
2235 printf("iop_msg_wait: IOP state = %d\n",
2236 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2237 }
2238 #endif
2239 }
2240
2241 /*
2242 * Release an unused message frame back to the IOP's inbound fifo.
2243 */
2244 static void
2245 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2246 {
2247
2248 /* Use the frame to issue a no-op. */
2249 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2250 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2251 iop_outl(sc, mfa + 8, 0);
2252 iop_outl(sc, mfa + 12, 0);
2253
2254 iop_outl(sc, IOP_REG_IFIFO, mfa);
2255 }
2256
2257 #ifdef I2ODEBUG
2258 /*
2259 * Dump a reply frame header.
2260 */
2261 static void
2262 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2263 {
2264 u_int function, detail;
2265 #ifdef I2OVERBOSE
2266 const char *statusstr;
2267 #endif
2268
2269 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2270 detail = le16toh(rb->detail);
2271
2272 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2273
2274 #ifdef I2OVERBOSE
2275 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2276 statusstr = iop_status[rb->reqstatus];
2277 else
2278 statusstr = "undefined error code";
2279
2280 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2281 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2282 #else
2283 printf("%s: function=0x%02x status=0x%02x\n",
2284 sc->sc_dv.dv_xname, function, rb->reqstatus);
2285 #endif
2286 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2287 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2288 le32toh(rb->msgtctx));
2289 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2290 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2291 (le32toh(rb->msgflags) >> 8) & 0xff);
2292 }
2293 #endif
2294
2295 /*
2296 * Dump a transport failure reply.
2297 */
2298 static void
2299 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2300 {
2301
2302 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2303
2304 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2305 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2306 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2307 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2308 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2309 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2310 }
2311
2312 /*
2313 * Translate an I2O ASCII field into a C string.
2314 */
2315 void
2316 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2317 {
2318 int hc, lc, i, nit;
2319
2320 dlen--;
2321 lc = 0;
2322 hc = 0;
2323 i = 0;
2324
2325 /*
2326 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2327 * spec has nothing to say about it. Since AMI fields are usually
2328 * filled with junk after the terminator, ...
2329 */
2330 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2331
2332 while (slen-- != 0 && dlen-- != 0) {
2333 if (nit && *src == '\0')
2334 break;
2335 else if (*src <= 0x20 || *src >= 0x7f) {
2336 if (hc)
2337 dst[i++] = ' ';
2338 } else {
2339 hc = 1;
2340 dst[i++] = *src;
2341 lc = i;
2342 }
2343 src++;
2344 }
2345
2346 dst[lc] = '\0';
2347 }
2348
2349 /*
2350 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2351 */
2352 int
2353 iop_print_ident(struct iop_softc *sc, int tid)
2354 {
2355 struct {
2356 struct i2o_param_op_results pr;
2357 struct i2o_param_read_results prr;
2358 struct i2o_param_device_identity di;
2359 } __attribute__ ((__packed__)) p;
2360 char buf[32];
2361 int rv;
2362
2363 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2364 sizeof(p), NULL);
2365 if (rv != 0)
2366 return (rv);
2367
2368 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2369 sizeof(buf));
2370 printf(" <%s, ", buf);
2371 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2372 sizeof(buf));
2373 printf("%s, ", buf);
2374 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2375 printf("%s>", buf);
2376
2377 return (0);
2378 }
2379
2380 /*
2381 * Claim or unclaim the specified TID.
2382 */
2383 int
2384 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2385 int flags)
2386 {
2387 struct iop_msg *im;
2388 struct i2o_util_claim mf;
2389 int rv, func;
2390
2391 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2392 im = iop_msg_alloc(sc, IM_WAIT);
2393
2394 /* We can use the same structure, as they're identical. */
2395 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2396 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2397 mf.msgictx = ii->ii_ictx;
2398 mf.msgtctx = im->im_tctx;
2399 mf.flags = flags;
2400
2401 rv = iop_msg_post(sc, im, &mf, 5000);
2402 iop_msg_free(sc, im);
2403 return (rv);
2404 }
2405
2406 /*
2407 * Perform an abort.
2408 */
2409 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2410 int tctxabort, int flags)
2411 {
2412 struct iop_msg *im;
2413 struct i2o_util_abort mf;
2414 int rv;
2415
2416 im = iop_msg_alloc(sc, IM_WAIT);
2417
2418 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2419 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2420 mf.msgictx = ii->ii_ictx;
2421 mf.msgtctx = im->im_tctx;
2422 mf.flags = (func << 24) | flags;
2423 mf.tctxabort = tctxabort;
2424
2425 rv = iop_msg_post(sc, im, &mf, 5000);
2426 iop_msg_free(sc, im);
2427 return (rv);
2428 }
2429
2430 /*
2431 * Enable or disable reception of events for the specified device.
2432 */
2433 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2434 {
2435 struct i2o_util_event_register mf;
2436
2437 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2438 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2439 mf.msgictx = ii->ii_ictx;
2440 mf.msgtctx = 0;
2441 mf.eventmask = mask;
2442
2443 /* This message is replied to only when events are signalled. */
2444 return (iop_post(sc, (u_int32_t *)&mf));
2445 }
2446
2447 int
2448 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2449 {
2450 struct iop_softc *sc;
2451
2452 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2453 return (ENXIO);
2454 if ((sc->sc_flags & IOP_ONLINE) == 0)
2455 return (ENXIO);
2456 if ((sc->sc_flags & IOP_OPEN) != 0)
2457 return (EBUSY);
2458 sc->sc_flags |= IOP_OPEN;
2459
2460 return (0);
2461 }
2462
2463 int
2464 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2465 {
2466 struct iop_softc *sc;
2467
2468 sc = device_lookup(&iop_cd, minor(dev));
2469 sc->sc_flags &= ~IOP_OPEN;
2470
2471 return (0);
2472 }
2473
2474 int
2475 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2476 {
2477 struct iop_softc *sc;
2478 struct iovec *iov;
2479 int rv, i;
2480
2481 if (securelevel >= 2)
2482 return (EPERM);
2483
2484 sc = device_lookup(&iop_cd, minor(dev));
2485
2486 switch (cmd) {
2487 case IOPIOCPT:
2488 return (iop_passthrough(sc, (struct ioppt *)data, p));
2489
2490 case IOPIOCGSTATUS:
2491 iov = (struct iovec *)data;
2492 i = sizeof(struct i2o_status);
2493 if (i > iov->iov_len)
2494 i = iov->iov_len;
2495 else
2496 iov->iov_len = i;
2497 if ((rv = iop_status_get(sc, 0)) == 0)
2498 rv = copyout(&sc->sc_status, iov->iov_base, i);
2499 return (rv);
2500
2501 case IOPIOCGLCT:
2502 case IOPIOCGTIDMAP:
2503 case IOPIOCRECONFIG:
2504 break;
2505
2506 default:
2507 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2508 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2509 #endif
2510 return (ENOTTY);
2511 }
2512
2513 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2514 return (rv);
2515
2516 switch (cmd) {
2517 case IOPIOCGLCT:
2518 iov = (struct iovec *)data;
2519 i = le16toh(sc->sc_lct->tablesize) << 2;
2520 if (i > iov->iov_len)
2521 i = iov->iov_len;
2522 else
2523 iov->iov_len = i;
2524 rv = copyout(sc->sc_lct, iov->iov_base, i);
2525 break;
2526
2527 case IOPIOCRECONFIG:
2528 rv = iop_reconfigure(sc, 0);
2529 break;
2530
2531 case IOPIOCGTIDMAP:
2532 iov = (struct iovec *)data;
2533 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2534 if (i > iov->iov_len)
2535 i = iov->iov_len;
2536 else
2537 iov->iov_len = i;
2538 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2539 break;
2540 }
2541
2542 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2543 return (rv);
2544 }
2545
2546 static int
2547 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2548 {
2549 struct iop_msg *im;
2550 struct i2o_msg *mf;
2551 struct ioppt_buf *ptb;
2552 int rv, i, mapped;
2553
2554 mf = NULL;
2555 im = NULL;
2556 mapped = 1;
2557
2558 if (pt->pt_msglen > sc->sc_framesize ||
2559 pt->pt_msglen < sizeof(struct i2o_msg) ||
2560 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2561 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2562 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2563 return (EINVAL);
2564
2565 for (i = 0; i < pt->pt_nbufs; i++)
2566 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2567 rv = ENOMEM;
2568 goto bad;
2569 }
2570
2571 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2572 if (mf == NULL)
2573 return (ENOMEM);
2574
2575 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2576 goto bad;
2577
2578 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2579 im->im_rb = (struct i2o_reply *)mf;
2580 mf->msgictx = IOP_ICTX;
2581 mf->msgtctx = im->im_tctx;
2582
2583 for (i = 0; i < pt->pt_nbufs; i++) {
2584 ptb = &pt->pt_bufs[i];
2585 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2586 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2587 if (rv != 0)
2588 goto bad;
2589 mapped = 1;
2590 }
2591
2592 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2593 goto bad;
2594
2595 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2596 if (i > sc->sc_framesize)
2597 i = sc->sc_framesize;
2598 if (i > pt->pt_replylen)
2599 i = pt->pt_replylen;
2600 rv = copyout(im->im_rb, pt->pt_reply, i);
2601
2602 bad:
2603 if (mapped != 0)
2604 iop_msg_unmap(sc, im);
2605 if (im != NULL)
2606 iop_msg_free(sc, im);
2607 if (mf != NULL)
2608 free(mf, M_DEVBUF);
2609 return (rv);
2610 }
2611