iop.c revision 1.26 1 /* $NetBSD: iop.c,v 1.26 2002/09/27 03:18:10 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.26 2002/09/27 03:18:10 thorpej Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 dev_type_open(iopopen);
111 dev_type_close(iopclose);
112 dev_type_ioctl(iopioctl);
113
114 const struct cdevsw iop_cdevsw = {
115 iopopen, iopclose, noread, nowrite, iopioctl,
116 nostop, notty, nopoll, nommap,
117 };
118
119 #define IC_CONFIGURE 0x01
120 #define IC_PRIORITY 0x02
121
122 struct iop_class {
123 u_short ic_class;
124 u_short ic_flags;
125 #ifdef I2OVERBOSE
126 const char *ic_caption;
127 #endif
128 } static const iop_class[] = {
129 {
130 I2O_CLASS_EXECUTIVE,
131 0,
132 COMMENT("executive")
133 },
134 {
135 I2O_CLASS_DDM,
136 0,
137 COMMENT("device driver module")
138 },
139 {
140 I2O_CLASS_RANDOM_BLOCK_STORAGE,
141 IC_CONFIGURE | IC_PRIORITY,
142 IFVERBOSE("random block storage")
143 },
144 {
145 I2O_CLASS_SEQUENTIAL_STORAGE,
146 IC_CONFIGURE | IC_PRIORITY,
147 IFVERBOSE("sequential storage")
148 },
149 {
150 I2O_CLASS_LAN,
151 IC_CONFIGURE | IC_PRIORITY,
152 IFVERBOSE("LAN port")
153 },
154 {
155 I2O_CLASS_WAN,
156 IC_CONFIGURE | IC_PRIORITY,
157 IFVERBOSE("WAN port")
158 },
159 {
160 I2O_CLASS_FIBRE_CHANNEL_PORT,
161 IC_CONFIGURE,
162 IFVERBOSE("fibrechannel port")
163 },
164 {
165 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
166 0,
167 COMMENT("fibrechannel peripheral")
168 },
169 {
170 I2O_CLASS_SCSI_PERIPHERAL,
171 0,
172 COMMENT("SCSI peripheral")
173 },
174 {
175 I2O_CLASS_ATE_PORT,
176 IC_CONFIGURE,
177 IFVERBOSE("ATE port")
178 },
179 {
180 I2O_CLASS_ATE_PERIPHERAL,
181 0,
182 COMMENT("ATE peripheral")
183 },
184 {
185 I2O_CLASS_FLOPPY_CONTROLLER,
186 IC_CONFIGURE,
187 IFVERBOSE("floppy controller")
188 },
189 {
190 I2O_CLASS_FLOPPY_DEVICE,
191 0,
192 COMMENT("floppy device")
193 },
194 {
195 I2O_CLASS_BUS_ADAPTER_PORT,
196 IC_CONFIGURE,
197 IFVERBOSE("bus adapter port" )
198 },
199 };
200
201 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
202 static const char * const iop_status[] = {
203 "success",
204 "abort (dirty)",
205 "abort (no data transfer)",
206 "abort (partial transfer)",
207 "error (dirty)",
208 "error (no data transfer)",
209 "error (partial transfer)",
210 "undefined error code",
211 "process abort (dirty)",
212 "process abort (no data transfer)",
213 "process abort (partial transfer)",
214 "transaction error",
215 };
216 #endif
217
218 static inline u_int32_t iop_inl(struct iop_softc *, int);
219 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
220
221 static void iop_config_interrupts(struct device *);
222 static void iop_configure_devices(struct iop_softc *, int, int);
223 static void iop_devinfo(int, char *);
224 static int iop_print(void *, const char *);
225 static void iop_shutdown(void *);
226 static int iop_submatch(struct device *, struct cfdata *, void *);
227 static int iop_vendor_print(void *, const char *);
228
229 static void iop_adjqparam(struct iop_softc *, int);
230 static void iop_create_reconf_thread(void *);
231 static int iop_handle_reply(struct iop_softc *, u_int32_t);
232 static int iop_hrt_get(struct iop_softc *);
233 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
234 static void iop_intr_event(struct device *, struct iop_msg *, void *);
235 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
236 u_int32_t);
237 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
238 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
239 static int iop_ofifo_init(struct iop_softc *);
240 static int iop_passthrough(struct iop_softc *, struct ioppt *,
241 struct proc *);
242 static void iop_reconf_thread(void *);
243 static void iop_release_mfa(struct iop_softc *, u_int32_t);
244 static int iop_reset(struct iop_softc *);
245 static int iop_systab_set(struct iop_softc *);
246 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
247
248 #ifdef I2ODEBUG
249 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
250 #endif
251
252 static inline u_int32_t
253 iop_inl(struct iop_softc *sc, int off)
254 {
255
256 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
257 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
258 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
259 }
260
261 static inline void
262 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
263 {
264
265 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
266 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
267 BUS_SPACE_BARRIER_WRITE);
268 }
269
270 /*
271 * Initialise the IOP and our interface.
272 */
273 void
274 iop_init(struct iop_softc *sc, const char *intrstr)
275 {
276 struct iop_msg *im;
277 int rv, i, j, state, nsegs;
278 u_int32_t mask;
279 char ident[64];
280
281 state = 0;
282
283 printf("I2O adapter");
284
285 if (iop_ictxhashtbl == NULL)
286 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
287 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
288
289 /* Disable interrupts at the IOP. */
290 mask = iop_inl(sc, IOP_REG_INTR_MASK);
291 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
292
293 /* Allocate a scratch DMA map for small miscellaneous shared data. */
294 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
295 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
296 printf("%s: cannot create scratch dmamap\n",
297 sc->sc_dv.dv_xname);
298 return;
299 }
300
301 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
302 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
303 printf("%s: cannot alloc scratch dmamem\n",
304 sc->sc_dv.dv_xname);
305 goto bail_out;
306 }
307 state++;
308
309 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
310 &sc->sc_scr, 0)) {
311 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
312 goto bail_out;
313 }
314 state++;
315
316 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
317 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
318 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
319 goto bail_out;
320 }
321 state++;
322
323 #ifdef I2ODEBUG
324 /* So that our debug checks don't choke. */
325 sc->sc_framesize = 128;
326 #endif
327
328 /* Reset the adapter and request status. */
329 if ((rv = iop_reset(sc)) != 0) {
330 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
331 goto bail_out;
332 }
333
334 if ((rv = iop_status_get(sc, 1)) != 0) {
335 printf("%s: not responding (get status)\n",
336 sc->sc_dv.dv_xname);
337 goto bail_out;
338 }
339
340 sc->sc_flags |= IOP_HAVESTATUS;
341 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
342 ident, sizeof(ident));
343 printf(" <%s>\n", ident);
344
345 #ifdef I2ODEBUG
346 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
347 le16toh(sc->sc_status.orgid),
348 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
349 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
350 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
351 le32toh(sc->sc_status.desiredprivmemsize),
352 le32toh(sc->sc_status.currentprivmemsize),
353 le32toh(sc->sc_status.currentprivmembase));
354 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
355 le32toh(sc->sc_status.desiredpriviosize),
356 le32toh(sc->sc_status.currentpriviosize),
357 le32toh(sc->sc_status.currentpriviobase));
358 #endif
359
360 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
361 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
362 sc->sc_maxob = IOP_MAX_OUTBOUND;
363 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
364 if (sc->sc_maxib > IOP_MAX_INBOUND)
365 sc->sc_maxib = IOP_MAX_INBOUND;
366 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
367 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
368 sc->sc_framesize = IOP_MAX_MSG_SIZE;
369
370 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
371 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
372 printf("%s: frame size too small (%d)\n",
373 sc->sc_dv.dv_xname, sc->sc_framesize);
374 goto bail_out;
375 }
376 #endif
377
378 /* Allocate message wrappers. */
379 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
380 if (im == NULL) {
381 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
382 goto bail_out;
383 }
384 state++;
385 sc->sc_ims = im;
386 SLIST_INIT(&sc->sc_im_freelist);
387
388 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
389 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
390 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
391 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
392 &im->im_xfer[0].ix_map);
393 if (rv != 0) {
394 printf("%s: couldn't create dmamap (%d)",
395 sc->sc_dv.dv_xname, rv);
396 goto bail_out;
397 }
398
399 im->im_tctx = i;
400 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
401 }
402
403 /* Initialise the IOP's outbound FIFO. */
404 if (iop_ofifo_init(sc) != 0) {
405 printf("%s: unable to init oubound FIFO\n",
406 sc->sc_dv.dv_xname);
407 goto bail_out;
408 }
409
410 /*
411 * Defer further configuration until (a) interrupts are working and
412 * (b) we have enough information to build the system table.
413 */
414 config_interrupts((struct device *)sc, iop_config_interrupts);
415
416 /* Configure shutdown hook before we start any device activity. */
417 if (iop_sdh == NULL)
418 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
419
420 /* Ensure interrupts are enabled at the IOP. */
421 mask = iop_inl(sc, IOP_REG_INTR_MASK);
422 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
423
424 if (intrstr != NULL)
425 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
426 intrstr);
427
428 #ifdef I2ODEBUG
429 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
430 sc->sc_dv.dv_xname, sc->sc_maxib,
431 le32toh(sc->sc_status.maxinboundmframes),
432 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
433 #endif
434
435 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
436 return;
437
438 bail_out:
439 if (state > 3) {
440 for (j = 0; j < i; j++)
441 bus_dmamap_destroy(sc->sc_dmat,
442 sc->sc_ims[j].im_xfer[0].ix_map);
443 free(sc->sc_ims, M_DEVBUF);
444 }
445 if (state > 2)
446 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
447 if (state > 1)
448 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
449 if (state > 0)
450 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
451 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
452 }
453
454 /*
455 * Perform autoconfiguration tasks.
456 */
457 static void
458 iop_config_interrupts(struct device *self)
459 {
460 struct iop_attach_args ia;
461 struct iop_softc *sc, *iop;
462 struct i2o_systab_entry *ste;
463 int rv, i, niop;
464
465 sc = (struct iop_softc *)self;
466 LIST_INIT(&sc->sc_iilist);
467
468 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
469
470 if (iop_hrt_get(sc) != 0) {
471 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
472 return;
473 }
474
475 /*
476 * Build the system table.
477 */
478 if (iop_systab == NULL) {
479 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
480 if ((iop = device_lookup(&iop_cd, i)) == NULL)
481 continue;
482 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
483 continue;
484 if (iop_status_get(iop, 1) != 0) {
485 printf("%s: unable to retrieve status\n",
486 sc->sc_dv.dv_xname);
487 iop->sc_flags &= ~IOP_HAVESTATUS;
488 continue;
489 }
490 niop++;
491 }
492 if (niop == 0)
493 return;
494
495 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
496 sizeof(struct i2o_systab);
497 iop_systab_size = i;
498 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
499
500 iop_systab->numentries = niop;
501 iop_systab->version = I2O_VERSION_11;
502
503 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
504 if ((iop = device_lookup(&iop_cd, i)) == NULL)
505 continue;
506 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
507 continue;
508
509 ste->orgid = iop->sc_status.orgid;
510 ste->iopid = iop->sc_dv.dv_unit + 2;
511 ste->segnumber =
512 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
513 ste->iopcaps = iop->sc_status.iopcaps;
514 ste->inboundmsgframesize =
515 iop->sc_status.inboundmframesize;
516 ste->inboundmsgportaddresslow =
517 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
518 ste++;
519 }
520 }
521
522 /*
523 * Post the system table to the IOP and bring it to the OPERATIONAL
524 * state.
525 */
526 if (iop_systab_set(sc) != 0) {
527 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
528 return;
529 }
530 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
531 30000) != 0) {
532 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
533 return;
534 }
535
536 /*
537 * Set up an event handler for this IOP.
538 */
539 sc->sc_eventii.ii_dv = self;
540 sc->sc_eventii.ii_intr = iop_intr_event;
541 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
542 sc->sc_eventii.ii_tid = I2O_TID_IOP;
543 iop_initiator_register(sc, &sc->sc_eventii);
544
545 rv = iop_util_eventreg(sc, &sc->sc_eventii,
546 I2O_EVENT_EXEC_RESOURCE_LIMITS |
547 I2O_EVENT_EXEC_CONNECTION_FAIL |
548 I2O_EVENT_EXEC_ADAPTER_FAULT |
549 I2O_EVENT_EXEC_POWER_FAIL |
550 I2O_EVENT_EXEC_RESET_PENDING |
551 I2O_EVENT_EXEC_RESET_IMMINENT |
552 I2O_EVENT_EXEC_HARDWARE_FAIL |
553 I2O_EVENT_EXEC_XCT_CHANGE |
554 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
555 I2O_EVENT_GEN_DEVICE_RESET |
556 I2O_EVENT_GEN_STATE_CHANGE |
557 I2O_EVENT_GEN_GENERAL_WARNING);
558 if (rv != 0) {
559 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
560 return;
561 }
562
563 /*
564 * Attempt to match and attach a product-specific extension.
565 */
566 ia.ia_class = I2O_CLASS_ANY;
567 ia.ia_tid = I2O_TID_IOP;
568 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
569
570 /*
571 * Start device configuration.
572 */
573 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
574 if ((rv = iop_reconfigure(sc, 0)) == -1) {
575 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
576 return;
577 }
578 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
579
580 kthread_create(iop_create_reconf_thread, sc);
581 }
582
583 /*
584 * Create the reconfiguration thread. Called after the standard kernel
585 * threads have been created.
586 */
587 static void
588 iop_create_reconf_thread(void *cookie)
589 {
590 struct iop_softc *sc;
591 int rv;
592
593 sc = cookie;
594 sc->sc_flags |= IOP_ONLINE;
595
596 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
597 "%s", sc->sc_dv.dv_xname);
598 if (rv != 0) {
599 printf("%s: unable to create reconfiguration thread (%d)",
600 sc->sc_dv.dv_xname, rv);
601 return;
602 }
603 }
604
605 /*
606 * Reconfiguration thread; listens for LCT change notification, and
607 * initiates re-configuration if received.
608 */
609 static void
610 iop_reconf_thread(void *cookie)
611 {
612 struct iop_softc *sc;
613 struct i2o_lct lct;
614 u_int32_t chgind;
615 int rv;
616
617 sc = cookie;
618 chgind = sc->sc_chgind + 1;
619
620 for (;;) {
621 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
622 sc->sc_dv.dv_xname, chgind));
623
624 PHOLD(sc->sc_reconf_proc);
625 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
626 PRELE(sc->sc_reconf_proc);
627
628 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
629 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
630
631 if (rv == 0 &&
632 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
633 iop_reconfigure(sc, le32toh(lct.changeindicator));
634 chgind = sc->sc_chgind + 1;
635 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
636 }
637
638 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
639 }
640 }
641
642 /*
643 * Reconfigure: find new and removed devices.
644 */
645 int
646 iop_reconfigure(struct iop_softc *sc, u_int chgind)
647 {
648 struct iop_msg *im;
649 struct i2o_hba_bus_scan mf;
650 struct i2o_lct_entry *le;
651 struct iop_initiator *ii, *nextii;
652 int rv, tid, i;
653
654 /*
655 * If the reconfiguration request isn't the result of LCT change
656 * notification, then be more thorough: ask all bus ports to scan
657 * their busses. Wait up to 5 minutes for each bus port to complete
658 * the request.
659 */
660 if (chgind == 0) {
661 if ((rv = iop_lct_get(sc)) != 0) {
662 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
663 return (rv);
664 }
665
666 le = sc->sc_lct->entry;
667 for (i = 0; i < sc->sc_nlctent; i++, le++) {
668 if ((le16toh(le->classid) & 4095) !=
669 I2O_CLASS_BUS_ADAPTER_PORT)
670 continue;
671 tid = le16toh(le->localtid) & 4095;
672
673 im = iop_msg_alloc(sc, IM_WAIT);
674
675 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
676 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
677 mf.msgictx = IOP_ICTX;
678 mf.msgtctx = im->im_tctx;
679
680 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
681 tid));
682
683 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
684 iop_msg_free(sc, im);
685 #ifdef I2ODEBUG
686 if (rv != 0)
687 printf("%s: bus scan failed\n",
688 sc->sc_dv.dv_xname);
689 #endif
690 }
691 } else if (chgind <= sc->sc_chgind) {
692 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
693 return (0);
694 }
695
696 /* Re-read the LCT and determine if it has changed. */
697 if ((rv = iop_lct_get(sc)) != 0) {
698 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
699 return (rv);
700 }
701 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
702
703 chgind = le32toh(sc->sc_lct->changeindicator);
704 if (chgind == sc->sc_chgind) {
705 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
706 return (0);
707 }
708 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
709 sc->sc_chgind = chgind;
710
711 if (sc->sc_tidmap != NULL)
712 free(sc->sc_tidmap, M_DEVBUF);
713 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
714 M_DEVBUF, M_NOWAIT|M_ZERO);
715
716 /* Allow 1 queued command per device while we're configuring. */
717 iop_adjqparam(sc, 1);
718
719 /*
720 * Match and attach child devices. We configure high-level devices
721 * first so that any claims will propagate throughout the LCT,
722 * hopefully masking off aliased devices as a result.
723 *
724 * Re-reading the LCT at this point is a little dangerous, but we'll
725 * trust the IOP (and the operator) to behave itself...
726 */
727 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
728 IC_CONFIGURE | IC_PRIORITY);
729 if ((rv = iop_lct_get(sc)) != 0)
730 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
731 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
732 IC_CONFIGURE);
733
734 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
735 nextii = LIST_NEXT(ii, ii_list);
736
737 /* Detach devices that were configured, but are now gone. */
738 for (i = 0; i < sc->sc_nlctent; i++)
739 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
740 break;
741 if (i == sc->sc_nlctent ||
742 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
743 config_detach(ii->ii_dv, DETACH_FORCE);
744
745 /*
746 * Tell initiators that existed before the re-configuration
747 * to re-configure.
748 */
749 if (ii->ii_reconfig == NULL)
750 continue;
751 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
752 printf("%s: %s failed reconfigure (%d)\n",
753 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
754 }
755
756 /* Re-adjust queue parameters and return. */
757 if (sc->sc_nii != 0)
758 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
759 / sc->sc_nii);
760
761 return (0);
762 }
763
764 /*
765 * Configure I2O devices into the system.
766 */
767 static void
768 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
769 {
770 struct iop_attach_args ia;
771 struct iop_initiator *ii;
772 const struct i2o_lct_entry *le;
773 struct device *dv;
774 int i, j, nent;
775 u_int usertid;
776
777 nent = sc->sc_nlctent;
778 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
779 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
780
781 /* Ignore the device if it's in use. */
782 usertid = le32toh(le->usertid) & 4095;
783 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
784 continue;
785
786 ia.ia_class = le16toh(le->classid) & 4095;
787 ia.ia_tid = sc->sc_tidmap[i].it_tid;
788
789 /* Ignore uninteresting devices. */
790 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
791 if (iop_class[j].ic_class == ia.ia_class)
792 break;
793 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
794 (iop_class[j].ic_flags & mask) != maskval)
795 continue;
796
797 /*
798 * Try to configure the device only if it's not already
799 * configured.
800 */
801 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
802 if (ia.ia_tid == ii->ii_tid) {
803 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
804 strcpy(sc->sc_tidmap[i].it_dvname,
805 ii->ii_dv->dv_xname);
806 break;
807 }
808 }
809 if (ii != NULL)
810 continue;
811
812 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
813 if (dv != NULL) {
814 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
815 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
816 }
817 }
818 }
819
820 /*
821 * Adjust queue parameters for all child devices.
822 */
823 static void
824 iop_adjqparam(struct iop_softc *sc, int mpi)
825 {
826 struct iop_initiator *ii;
827
828 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
829 if (ii->ii_adjqparam != NULL)
830 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
831 }
832
833 static void
834 iop_devinfo(int class, char *devinfo)
835 {
836 #ifdef I2OVERBOSE
837 int i;
838
839 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
840 if (class == iop_class[i].ic_class)
841 break;
842
843 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
844 sprintf(devinfo, "device (class 0x%x)", class);
845 else
846 strcpy(devinfo, iop_class[i].ic_caption);
847 #else
848
849 sprintf(devinfo, "device (class 0x%x)", class);
850 #endif
851 }
852
853 static int
854 iop_print(void *aux, const char *pnp)
855 {
856 struct iop_attach_args *ia;
857 char devinfo[256];
858
859 ia = aux;
860
861 if (pnp != NULL) {
862 iop_devinfo(ia->ia_class, devinfo);
863 printf("%s at %s", devinfo, pnp);
864 }
865 printf(" tid %d", ia->ia_tid);
866 return (UNCONF);
867 }
868
869 static int
870 iop_vendor_print(void *aux, const char *pnp)
871 {
872
873 return (QUIET);
874 }
875
876 static int
877 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
878 {
879 struct iop_attach_args *ia;
880
881 ia = aux;
882
883 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
884 return (0);
885
886 return (config_match(parent, cf, aux));
887 }
888
889 /*
890 * Shut down all configured IOPs.
891 */
892 static void
893 iop_shutdown(void *junk)
894 {
895 struct iop_softc *sc;
896 int i;
897
898 printf("shutting down iop devices...");
899
900 for (i = 0; i < iop_cd.cd_ndevs; i++) {
901 if ((sc = device_lookup(&iop_cd, i)) == NULL)
902 continue;
903 if ((sc->sc_flags & IOP_ONLINE) == 0)
904 continue;
905 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
906 0, 5000);
907 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
908 0, 1000);
909 }
910
911 /* Wait. Some boards could still be flushing, stupidly enough. */
912 delay(5000*1000);
913 printf(" done\n");
914 }
915
916 /*
917 * Retrieve IOP status.
918 */
919 int
920 iop_status_get(struct iop_softc *sc, int nosleep)
921 {
922 struct i2o_exec_status_get mf;
923 struct i2o_status *st;
924 paddr_t pa;
925 int rv, i;
926
927 pa = sc->sc_scr_seg->ds_addr;
928 st = (struct i2o_status *)sc->sc_scr;
929
930 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
931 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
932 mf.reserved[0] = 0;
933 mf.reserved[1] = 0;
934 mf.reserved[2] = 0;
935 mf.reserved[3] = 0;
936 mf.addrlow = (u_int32_t)pa;
937 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
938 mf.length = sizeof(sc->sc_status);
939
940 memset(st, 0, sizeof(*st));
941 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
942 BUS_DMASYNC_PREREAD);
943
944 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
945 return (rv);
946
947 for (i = 25; i != 0; i--) {
948 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
949 sizeof(*st), BUS_DMASYNC_POSTREAD);
950 if (st->syncbyte == 0xff)
951 break;
952 if (nosleep)
953 DELAY(100*1000);
954 else
955 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
956 }
957
958 if (st->syncbyte != 0xff) {
959 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
960 rv = EIO;
961 } else {
962 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
963 rv = 0;
964 }
965
966 return (rv);
967 }
968
969 /*
970 * Initialize and populate the IOP's outbound FIFO.
971 */
972 static int
973 iop_ofifo_init(struct iop_softc *sc)
974 {
975 bus_addr_t addr;
976 bus_dma_segment_t seg;
977 struct i2o_exec_outbound_init *mf;
978 int i, rseg, rv;
979 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
980
981 sw = (u_int32_t *)sc->sc_scr;
982
983 mf = (struct i2o_exec_outbound_init *)mb;
984 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
985 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
986 mf->msgictx = IOP_ICTX;
987 mf->msgtctx = 0;
988 mf->pagesize = PAGE_SIZE;
989 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
990
991 /*
992 * The I2O spec says that there are two SGLs: one for the status
993 * word, and one for a list of discarded MFAs. It continues to say
994 * that if you don't want to get the list of MFAs, an IGNORE SGL is
995 * necessary; this isn't the case (and is in fact a bad thing).
996 */
997 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
998 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
999 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1000 (u_int32_t)sc->sc_scr_seg->ds_addr;
1001 mb[0] += 2 << 16;
1002
1003 *sw = 0;
1004 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1005 BUS_DMASYNC_PREREAD);
1006
1007 if ((rv = iop_post(sc, mb)) != 0)
1008 return (rv);
1009
1010 POLL(5000,
1011 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1012 BUS_DMASYNC_POSTREAD),
1013 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1014
1015 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1016 printf("%s: outbound FIFO init failed (%d)\n",
1017 sc->sc_dv.dv_xname, le32toh(*sw));
1018 return (EIO);
1019 }
1020
1021 /* Allocate DMA safe memory for the reply frames. */
1022 if (sc->sc_rep_phys == 0) {
1023 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1024
1025 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1026 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1027 if (rv != 0) {
1028 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1029 rv);
1030 return (rv);
1031 }
1032
1033 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1034 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1035 if (rv != 0) {
1036 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1037 return (rv);
1038 }
1039
1040 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1041 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1042 if (rv != 0) {
1043 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1044 rv);
1045 return (rv);
1046 }
1047
1048 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1049 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1050 if (rv != 0) {
1051 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1052 return (rv);
1053 }
1054
1055 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1056 }
1057
1058 /* Populate the outbound FIFO. */
1059 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1060 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1061 addr += sc->sc_framesize;
1062 }
1063
1064 return (0);
1065 }
1066
1067 /*
1068 * Read the specified number of bytes from the IOP's hardware resource table.
1069 */
1070 static int
1071 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1072 {
1073 struct iop_msg *im;
1074 int rv;
1075 struct i2o_exec_hrt_get *mf;
1076 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1077
1078 im = iop_msg_alloc(sc, IM_WAIT);
1079 mf = (struct i2o_exec_hrt_get *)mb;
1080 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1081 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1082 mf->msgictx = IOP_ICTX;
1083 mf->msgtctx = im->im_tctx;
1084
1085 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1086 rv = iop_msg_post(sc, im, mb, 30000);
1087 iop_msg_unmap(sc, im);
1088 iop_msg_free(sc, im);
1089 return (rv);
1090 }
1091
1092 /*
1093 * Read the IOP's hardware resource table.
1094 */
1095 static int
1096 iop_hrt_get(struct iop_softc *sc)
1097 {
1098 struct i2o_hrt hrthdr, *hrt;
1099 int size, rv;
1100
1101 PHOLD(curproc);
1102 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1103 PRELE(curproc);
1104 if (rv != 0)
1105 return (rv);
1106
1107 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1108 le16toh(hrthdr.numentries)));
1109
1110 size = sizeof(struct i2o_hrt) +
1111 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1112 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1113
1114 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1115 free(hrt, M_DEVBUF);
1116 return (rv);
1117 }
1118
1119 if (sc->sc_hrt != NULL)
1120 free(sc->sc_hrt, M_DEVBUF);
1121 sc->sc_hrt = hrt;
1122 return (0);
1123 }
1124
1125 /*
1126 * Request the specified number of bytes from the IOP's logical
1127 * configuration table. If a change indicator is specified, this
1128 * is a verbatim notification request, so the caller is prepared
1129 * to wait indefinitely.
1130 */
1131 static int
1132 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1133 u_int32_t chgind)
1134 {
1135 struct iop_msg *im;
1136 struct i2o_exec_lct_notify *mf;
1137 int rv;
1138 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1139
1140 im = iop_msg_alloc(sc, IM_WAIT);
1141 memset(lct, 0, size);
1142
1143 mf = (struct i2o_exec_lct_notify *)mb;
1144 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1145 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1146 mf->msgictx = IOP_ICTX;
1147 mf->msgtctx = im->im_tctx;
1148 mf->classid = I2O_CLASS_ANY;
1149 mf->changeindicator = chgind;
1150
1151 #ifdef I2ODEBUG
1152 printf("iop_lct_get0: reading LCT");
1153 if (chgind != 0)
1154 printf(" (async)");
1155 printf("\n");
1156 #endif
1157
1158 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1159 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1160 iop_msg_unmap(sc, im);
1161 iop_msg_free(sc, im);
1162 return (rv);
1163 }
1164
1165 /*
1166 * Read the IOP's logical configuration table.
1167 */
1168 int
1169 iop_lct_get(struct iop_softc *sc)
1170 {
1171 int esize, size, rv;
1172 struct i2o_lct *lct;
1173
1174 esize = le32toh(sc->sc_status.expectedlctsize);
1175 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1176 if (lct == NULL)
1177 return (ENOMEM);
1178
1179 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1180 free(lct, M_DEVBUF);
1181 return (rv);
1182 }
1183
1184 size = le16toh(lct->tablesize) << 2;
1185 if (esize != size) {
1186 free(lct, M_DEVBUF);
1187 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1188 if (lct == NULL)
1189 return (ENOMEM);
1190
1191 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1192 free(lct, M_DEVBUF);
1193 return (rv);
1194 }
1195 }
1196
1197 /* Swap in the new LCT. */
1198 if (sc->sc_lct != NULL)
1199 free(sc->sc_lct, M_DEVBUF);
1200 sc->sc_lct = lct;
1201 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1202 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1203 sizeof(struct i2o_lct_entry);
1204 return (0);
1205 }
1206
1207 /*
1208 * Request the specified parameter group from the target. If an initiator
1209 * is specified (a) don't wait for the operation to complete, but instead
1210 * let the initiator's interrupt handler deal with the reply and (b) place a
1211 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1212 */
1213 int
1214 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1215 int size, struct iop_initiator *ii)
1216 {
1217 struct iop_msg *im;
1218 struct i2o_util_params_op *mf;
1219 struct i2o_reply *rf;
1220 int rv;
1221 struct iop_pgop *pgop;
1222 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1223
1224 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1225 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1226 iop_msg_free(sc, im);
1227 return (ENOMEM);
1228 }
1229 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1230 iop_msg_free(sc, im);
1231 free(pgop, M_DEVBUF);
1232 return (ENOMEM);
1233 }
1234 im->im_dvcontext = pgop;
1235 im->im_rb = rf;
1236
1237 mf = (struct i2o_util_params_op *)mb;
1238 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1239 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1240 mf->msgictx = IOP_ICTX;
1241 mf->msgtctx = im->im_tctx;
1242 mf->flags = 0;
1243
1244 pgop->olh.count = htole16(1);
1245 pgop->olh.reserved = htole16(0);
1246 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1247 pgop->oat.fieldcount = htole16(0xffff);
1248 pgop->oat.group = htole16(group);
1249
1250 if (ii == NULL)
1251 PHOLD(curproc);
1252
1253 memset(buf, 0, size);
1254 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1255 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1256 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1257
1258 if (ii == NULL)
1259 PRELE(curproc);
1260
1261 /* Detect errors; let partial transfers to count as success. */
1262 if (ii == NULL && rv == 0) {
1263 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1264 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1265 rv = 0;
1266 else
1267 rv = (rf->reqstatus != 0 ? EIO : 0);
1268
1269 if (rv != 0)
1270 printf("%s: FIELD_GET failed for tid %d group %d\n",
1271 sc->sc_dv.dv_xname, tid, group);
1272 }
1273
1274 if (ii == NULL || rv != 0) {
1275 iop_msg_unmap(sc, im);
1276 iop_msg_free(sc, im);
1277 free(pgop, M_DEVBUF);
1278 free(rf, M_DEVBUF);
1279 }
1280
1281 return (rv);
1282 }
1283
1284 /*
1285 * Set a single field in a scalar parameter group.
1286 */
1287 int
1288 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1289 int size, int field)
1290 {
1291 struct iop_msg *im;
1292 struct i2o_util_params_op *mf;
1293 struct iop_pgop *pgop;
1294 int rv, totsize;
1295 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1296
1297 totsize = sizeof(*pgop) + size;
1298
1299 im = iop_msg_alloc(sc, IM_WAIT);
1300 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1301 iop_msg_free(sc, im);
1302 return (ENOMEM);
1303 }
1304
1305 mf = (struct i2o_util_params_op *)mb;
1306 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1307 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1308 mf->msgictx = IOP_ICTX;
1309 mf->msgtctx = im->im_tctx;
1310 mf->flags = 0;
1311
1312 pgop->olh.count = htole16(1);
1313 pgop->olh.reserved = htole16(0);
1314 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1315 pgop->oat.fieldcount = htole16(1);
1316 pgop->oat.group = htole16(group);
1317 pgop->oat.fields[0] = htole16(field);
1318 memcpy(pgop + 1, buf, size);
1319
1320 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1321 rv = iop_msg_post(sc, im, mb, 30000);
1322 if (rv != 0)
1323 printf("%s: FIELD_SET failed for tid %d group %d\n",
1324 sc->sc_dv.dv_xname, tid, group);
1325
1326 iop_msg_unmap(sc, im);
1327 iop_msg_free(sc, im);
1328 free(pgop, M_DEVBUF);
1329 return (rv);
1330 }
1331
1332 /*
1333 * Delete all rows in a tablular parameter group.
1334 */
1335 int
1336 iop_table_clear(struct iop_softc *sc, int tid, int group)
1337 {
1338 struct iop_msg *im;
1339 struct i2o_util_params_op *mf;
1340 struct iop_pgop pgop;
1341 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1342 int rv;
1343
1344 im = iop_msg_alloc(sc, IM_WAIT);
1345
1346 mf = (struct i2o_util_params_op *)mb;
1347 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1348 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1349 mf->msgictx = IOP_ICTX;
1350 mf->msgtctx = im->im_tctx;
1351 mf->flags = 0;
1352
1353 pgop.olh.count = htole16(1);
1354 pgop.olh.reserved = htole16(0);
1355 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1356 pgop.oat.fieldcount = htole16(0);
1357 pgop.oat.group = htole16(group);
1358 pgop.oat.fields[0] = htole16(0);
1359
1360 PHOLD(curproc);
1361 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1362 rv = iop_msg_post(sc, im, mb, 30000);
1363 if (rv != 0)
1364 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1365 sc->sc_dv.dv_xname, tid, group);
1366
1367 iop_msg_unmap(sc, im);
1368 PRELE(curproc);
1369 iop_msg_free(sc, im);
1370 return (rv);
1371 }
1372
1373 /*
1374 * Add a single row to a tabular parameter group. The row can have only one
1375 * field.
1376 */
1377 int
1378 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1379 int size, int row)
1380 {
1381 struct iop_msg *im;
1382 struct i2o_util_params_op *mf;
1383 struct iop_pgop *pgop;
1384 int rv, totsize;
1385 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1386
1387 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1388
1389 im = iop_msg_alloc(sc, IM_WAIT);
1390 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1391 iop_msg_free(sc, im);
1392 return (ENOMEM);
1393 }
1394
1395 mf = (struct i2o_util_params_op *)mb;
1396 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1397 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1398 mf->msgictx = IOP_ICTX;
1399 mf->msgtctx = im->im_tctx;
1400 mf->flags = 0;
1401
1402 pgop->olh.count = htole16(1);
1403 pgop->olh.reserved = htole16(0);
1404 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1405 pgop->oat.fieldcount = htole16(1);
1406 pgop->oat.group = htole16(group);
1407 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1408 pgop->oat.fields[1] = htole16(1); /* RowCount */
1409 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1410 memcpy(&pgop->oat.fields[3], buf, size);
1411
1412 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1413 rv = iop_msg_post(sc, im, mb, 30000);
1414 if (rv != 0)
1415 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1416 sc->sc_dv.dv_xname, tid, group, row);
1417
1418 iop_msg_unmap(sc, im);
1419 iop_msg_free(sc, im);
1420 free(pgop, M_DEVBUF);
1421 return (rv);
1422 }
1423
1424 /*
1425 * Execute a simple command (no parameters).
1426 */
1427 int
1428 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1429 int async, int timo)
1430 {
1431 struct iop_msg *im;
1432 struct i2o_msg mf;
1433 int rv, fl;
1434
1435 fl = (async != 0 ? IM_WAIT : IM_POLL);
1436 im = iop_msg_alloc(sc, fl);
1437
1438 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1439 mf.msgfunc = I2O_MSGFUNC(tid, function);
1440 mf.msgictx = ictx;
1441 mf.msgtctx = im->im_tctx;
1442
1443 rv = iop_msg_post(sc, im, &mf, timo);
1444 iop_msg_free(sc, im);
1445 return (rv);
1446 }
1447
1448 /*
1449 * Post the system table to the IOP.
1450 */
1451 static int
1452 iop_systab_set(struct iop_softc *sc)
1453 {
1454 struct i2o_exec_sys_tab_set *mf;
1455 struct iop_msg *im;
1456 bus_space_handle_t bsh;
1457 bus_addr_t boo;
1458 u_int32_t mema[2], ioa[2];
1459 int rv;
1460 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1461
1462 im = iop_msg_alloc(sc, IM_WAIT);
1463
1464 mf = (struct i2o_exec_sys_tab_set *)mb;
1465 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1466 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1467 mf->msgictx = IOP_ICTX;
1468 mf->msgtctx = im->im_tctx;
1469 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1470 mf->segnumber = 0;
1471
1472 mema[1] = sc->sc_status.desiredprivmemsize;
1473 ioa[1] = sc->sc_status.desiredpriviosize;
1474
1475 if (mema[1] != 0) {
1476 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1477 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1478 mema[0] = htole32(boo);
1479 if (rv != 0) {
1480 printf("%s: can't alloc priv mem space, err = %d\n",
1481 sc->sc_dv.dv_xname, rv);
1482 mema[0] = 0;
1483 mema[1] = 0;
1484 }
1485 }
1486
1487 if (ioa[1] != 0) {
1488 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1489 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1490 ioa[0] = htole32(boo);
1491 if (rv != 0) {
1492 printf("%s: can't alloc priv i/o space, err = %d\n",
1493 sc->sc_dv.dv_xname, rv);
1494 ioa[0] = 0;
1495 ioa[1] = 0;
1496 }
1497 }
1498
1499 PHOLD(curproc);
1500 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1501 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1502 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1503 rv = iop_msg_post(sc, im, mb, 5000);
1504 iop_msg_unmap(sc, im);
1505 iop_msg_free(sc, im);
1506 PRELE(curproc);
1507 return (rv);
1508 }
1509
1510 /*
1511 * Reset the IOP. Must be called with interrupts disabled.
1512 */
1513 static int
1514 iop_reset(struct iop_softc *sc)
1515 {
1516 u_int32_t mfa, *sw;
1517 struct i2o_exec_iop_reset mf;
1518 int rv;
1519 paddr_t pa;
1520
1521 sw = (u_int32_t *)sc->sc_scr;
1522 pa = sc->sc_scr_seg->ds_addr;
1523
1524 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1525 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1526 mf.reserved[0] = 0;
1527 mf.reserved[1] = 0;
1528 mf.reserved[2] = 0;
1529 mf.reserved[3] = 0;
1530 mf.statuslow = (u_int32_t)pa;
1531 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1532
1533 *sw = htole32(0);
1534 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1535 BUS_DMASYNC_PREREAD);
1536
1537 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1538 return (rv);
1539
1540 POLL(2500,
1541 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1542 BUS_DMASYNC_POSTREAD), *sw != 0));
1543 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1544 printf("%s: reset rejected, status 0x%x\n",
1545 sc->sc_dv.dv_xname, le32toh(*sw));
1546 return (EIO);
1547 }
1548
1549 /*
1550 * IOP is now in the INIT state. Wait no more than 10 seconds for
1551 * the inbound queue to become responsive.
1552 */
1553 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1554 if (mfa == IOP_MFA_EMPTY) {
1555 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1556 return (EIO);
1557 }
1558
1559 iop_release_mfa(sc, mfa);
1560 return (0);
1561 }
1562
1563 /*
1564 * Register a new initiator. Must be called with the configuration lock
1565 * held.
1566 */
1567 void
1568 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1569 {
1570 static int ictxgen;
1571 int s;
1572
1573 /* 0 is reserved (by us) for system messages. */
1574 ii->ii_ictx = ++ictxgen;
1575
1576 /*
1577 * `Utility initiators' don't make it onto the per-IOP initiator list
1578 * (which is used only for configuration), but do get one slot on
1579 * the inbound queue.
1580 */
1581 if ((ii->ii_flags & II_UTILITY) == 0) {
1582 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1583 sc->sc_nii++;
1584 } else
1585 sc->sc_nuii++;
1586
1587 s = splbio();
1588 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1589 splx(s);
1590 }
1591
1592 /*
1593 * Unregister an initiator. Must be called with the configuration lock
1594 * held.
1595 */
1596 void
1597 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1598 {
1599 int s;
1600
1601 if ((ii->ii_flags & II_UTILITY) == 0) {
1602 LIST_REMOVE(ii, ii_list);
1603 sc->sc_nii--;
1604 } else
1605 sc->sc_nuii--;
1606
1607 s = splbio();
1608 LIST_REMOVE(ii, ii_hash);
1609 splx(s);
1610 }
1611
1612 /*
1613 * Handle a reply frame from the IOP.
1614 */
1615 static int
1616 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1617 {
1618 struct iop_msg *im;
1619 struct i2o_reply *rb;
1620 struct i2o_fault_notify *fn;
1621 struct iop_initiator *ii;
1622 u_int off, ictx, tctx, status, size;
1623
1624 off = (int)(rmfa - sc->sc_rep_phys);
1625 rb = (struct i2o_reply *)(sc->sc_rep + off);
1626
1627 /* Perform reply queue DMA synchronisation. */
1628 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1629 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1630 if (--sc->sc_curib != 0)
1631 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1632 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1633
1634 #ifdef I2ODEBUG
1635 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1636 panic("iop_handle_reply: 64-bit reply");
1637 #endif
1638 /*
1639 * Find the initiator.
1640 */
1641 ictx = le32toh(rb->msgictx);
1642 if (ictx == IOP_ICTX)
1643 ii = NULL;
1644 else {
1645 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1646 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1647 if (ii->ii_ictx == ictx)
1648 break;
1649 if (ii == NULL) {
1650 #ifdef I2ODEBUG
1651 iop_reply_print(sc, rb);
1652 #endif
1653 printf("%s: WARNING: bad ictx returned (%x)\n",
1654 sc->sc_dv.dv_xname, ictx);
1655 return (-1);
1656 }
1657 }
1658
1659 /*
1660 * If we received a transport failure notice, we've got to dig the
1661 * transaction context (if any) out of the original message frame,
1662 * and then release the original MFA back to the inbound FIFO.
1663 */
1664 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1665 status = I2O_STATUS_SUCCESS;
1666
1667 fn = (struct i2o_fault_notify *)rb;
1668 tctx = iop_inl(sc, fn->lowmfa + 12);
1669 iop_release_mfa(sc, fn->lowmfa);
1670 iop_tfn_print(sc, fn);
1671 } else {
1672 status = rb->reqstatus;
1673 tctx = le32toh(rb->msgtctx);
1674 }
1675
1676 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1677 /*
1678 * This initiator tracks state using message wrappers.
1679 *
1680 * Find the originating message wrapper, and if requested
1681 * notify the initiator.
1682 */
1683 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1684 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1685 (im->im_flags & IM_ALLOCED) == 0 ||
1686 tctx != im->im_tctx) {
1687 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1688 sc->sc_dv.dv_xname, tctx, im);
1689 if (im != NULL)
1690 printf("%s: flags=0x%08x tctx=0x%08x\n",
1691 sc->sc_dv.dv_xname, im->im_flags,
1692 im->im_tctx);
1693 #ifdef I2ODEBUG
1694 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1695 iop_reply_print(sc, rb);
1696 #endif
1697 return (-1);
1698 }
1699
1700 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1701 im->im_flags |= IM_FAIL;
1702
1703 #ifdef I2ODEBUG
1704 if ((im->im_flags & IM_REPLIED) != 0)
1705 panic("%s: dup reply", sc->sc_dv.dv_xname);
1706 #endif
1707 im->im_flags |= IM_REPLIED;
1708
1709 #ifdef I2ODEBUG
1710 if (status != I2O_STATUS_SUCCESS)
1711 iop_reply_print(sc, rb);
1712 #endif
1713 im->im_reqstatus = status;
1714
1715 /* Copy the reply frame, if requested. */
1716 if (im->im_rb != NULL) {
1717 size = (le32toh(rb->msgflags) >> 14) & ~3;
1718 #ifdef I2ODEBUG
1719 if (size > sc->sc_framesize)
1720 panic("iop_handle_reply: reply too large");
1721 #endif
1722 memcpy(im->im_rb, rb, size);
1723 }
1724
1725 /* Notify the initiator. */
1726 if ((im->im_flags & IM_WAIT) != 0)
1727 wakeup(im);
1728 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1729 (*ii->ii_intr)(ii->ii_dv, im, rb);
1730 } else {
1731 /*
1732 * This initiator discards message wrappers.
1733 *
1734 * Simply pass the reply frame to the initiator.
1735 */
1736 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1737 }
1738
1739 return (status);
1740 }
1741
1742 /*
1743 * Handle an interrupt from the IOP.
1744 */
1745 int
1746 iop_intr(void *arg)
1747 {
1748 struct iop_softc *sc;
1749 u_int32_t rmfa;
1750
1751 sc = arg;
1752
1753 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1754 return (0);
1755
1756 for (;;) {
1757 /* Double read to account for IOP bug. */
1758 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1759 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1760 if (rmfa == IOP_MFA_EMPTY)
1761 break;
1762 }
1763 iop_handle_reply(sc, rmfa);
1764 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1765 }
1766
1767 return (1);
1768 }
1769
1770 /*
1771 * Handle an event signalled by the executive.
1772 */
1773 static void
1774 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1775 {
1776 struct i2o_util_event_register_reply *rb;
1777 struct iop_softc *sc;
1778 u_int event;
1779
1780 sc = (struct iop_softc *)dv;
1781 rb = reply;
1782
1783 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1784 return;
1785
1786 event = le32toh(rb->event);
1787 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1788 }
1789
1790 /*
1791 * Allocate a message wrapper.
1792 */
1793 struct iop_msg *
1794 iop_msg_alloc(struct iop_softc *sc, int flags)
1795 {
1796 struct iop_msg *im;
1797 static u_int tctxgen;
1798 int s, i;
1799
1800 #ifdef I2ODEBUG
1801 if ((flags & IM_SYSMASK) != 0)
1802 panic("iop_msg_alloc: system flags specified");
1803 #endif
1804
1805 s = splbio();
1806 im = SLIST_FIRST(&sc->sc_im_freelist);
1807 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1808 if (im == NULL)
1809 panic("iop_msg_alloc: no free wrappers");
1810 #endif
1811 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1812 splx(s);
1813
1814 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1815 tctxgen += (1 << IOP_TCTX_SHIFT);
1816 im->im_flags = flags | IM_ALLOCED;
1817 im->im_rb = NULL;
1818 i = 0;
1819 do {
1820 im->im_xfer[i++].ix_size = 0;
1821 } while (i < IOP_MAX_MSG_XFERS);
1822
1823 return (im);
1824 }
1825
1826 /*
1827 * Free a message wrapper.
1828 */
1829 void
1830 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1831 {
1832 int s;
1833
1834 #ifdef I2ODEBUG
1835 if ((im->im_flags & IM_ALLOCED) == 0)
1836 panic("iop_msg_free: wrapper not allocated");
1837 #endif
1838
1839 im->im_flags = 0;
1840 s = splbio();
1841 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1842 splx(s);
1843 }
1844
1845 /*
1846 * Map a data transfer. Write a scatter-gather list into the message frame.
1847 */
1848 int
1849 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1850 void *xferaddr, int xfersize, int out, struct proc *up)
1851 {
1852 bus_dmamap_t dm;
1853 bus_dma_segment_t *ds;
1854 struct iop_xfer *ix;
1855 u_int rv, i, nsegs, flg, off, xn;
1856 u_int32_t *p;
1857
1858 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1859 if (ix->ix_size == 0)
1860 break;
1861
1862 #ifdef I2ODEBUG
1863 if (xfersize == 0)
1864 panic("iop_msg_map: null transfer");
1865 if (xfersize > IOP_MAX_XFER)
1866 panic("iop_msg_map: transfer too large");
1867 if (xn == IOP_MAX_MSG_XFERS)
1868 panic("iop_msg_map: too many xfers");
1869 #endif
1870
1871 /*
1872 * Only the first DMA map is static.
1873 */
1874 if (xn != 0) {
1875 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1876 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1877 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1878 if (rv != 0)
1879 return (rv);
1880 }
1881
1882 dm = ix->ix_map;
1883 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1884 (up == NULL ? BUS_DMA_NOWAIT : 0));
1885 if (rv != 0)
1886 goto bad;
1887
1888 /*
1889 * How many SIMPLE SG elements can we fit in this message?
1890 */
1891 off = mb[0] >> 16;
1892 p = mb + off;
1893 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1894
1895 if (dm->dm_nsegs > nsegs) {
1896 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1897 rv = EFBIG;
1898 DPRINTF(("iop_msg_map: too many segs\n"));
1899 goto bad;
1900 }
1901
1902 nsegs = dm->dm_nsegs;
1903 xfersize = 0;
1904
1905 /*
1906 * Write out the SG list.
1907 */
1908 if (out)
1909 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1910 else
1911 flg = I2O_SGL_SIMPLE;
1912
1913 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1914 p[0] = (u_int32_t)ds->ds_len | flg;
1915 p[1] = (u_int32_t)ds->ds_addr;
1916 xfersize += ds->ds_len;
1917 }
1918
1919 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1920 p[1] = (u_int32_t)ds->ds_addr;
1921 xfersize += ds->ds_len;
1922
1923 /* Fix up the transfer record, and sync the map. */
1924 ix->ix_flags = (out ? IX_OUT : IX_IN);
1925 ix->ix_size = xfersize;
1926 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1927 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1928
1929 /*
1930 * If this is the first xfer we've mapped for this message, adjust
1931 * the SGL offset field in the message header.
1932 */
1933 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1934 mb[0] += (mb[0] >> 12) & 0xf0;
1935 im->im_flags |= IM_SGLOFFADJ;
1936 }
1937 mb[0] += (nsegs << 17);
1938 return (0);
1939
1940 bad:
1941 if (xn != 0)
1942 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1943 return (rv);
1944 }
1945
1946 /*
1947 * Map a block I/O data transfer (different in that there's only one per
1948 * message maximum, and PAGE addressing may be used). Write a scatter
1949 * gather list into the message frame.
1950 */
1951 int
1952 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1953 void *xferaddr, int xfersize, int out)
1954 {
1955 bus_dma_segment_t *ds;
1956 bus_dmamap_t dm;
1957 struct iop_xfer *ix;
1958 u_int rv, i, nsegs, off, slen, tlen, flg;
1959 paddr_t saddr, eaddr;
1960 u_int32_t *p;
1961
1962 #ifdef I2ODEBUG
1963 if (xfersize == 0)
1964 panic("iop_msg_map_bio: null transfer");
1965 if (xfersize > IOP_MAX_XFER)
1966 panic("iop_msg_map_bio: transfer too large");
1967 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1968 panic("iop_msg_map_bio: SGLOFFADJ");
1969 #endif
1970
1971 ix = im->im_xfer;
1972 dm = ix->ix_map;
1973 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1974 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1975 if (rv != 0)
1976 return (rv);
1977
1978 off = mb[0] >> 16;
1979 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1980
1981 /*
1982 * If the transfer is highly fragmented and won't fit using SIMPLE
1983 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1984 * potentially more efficient, both for us and the IOP.
1985 */
1986 if (dm->dm_nsegs > nsegs) {
1987 nsegs = 1;
1988 p = mb + off + 1;
1989
1990 /* XXX This should be done with a bus_space flag. */
1991 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1992 slen = ds->ds_len;
1993 saddr = ds->ds_addr;
1994
1995 while (slen > 0) {
1996 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1997 tlen = min(eaddr - saddr, slen);
1998 slen -= tlen;
1999 *p++ = le32toh(saddr);
2000 saddr = eaddr;
2001 nsegs++;
2002 }
2003 }
2004
2005 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2006 I2O_SGL_END;
2007 if (out)
2008 mb[off] |= I2O_SGL_DATA_OUT;
2009 } else {
2010 p = mb + off;
2011 nsegs = dm->dm_nsegs;
2012
2013 if (out)
2014 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2015 else
2016 flg = I2O_SGL_SIMPLE;
2017
2018 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2019 p[0] = (u_int32_t)ds->ds_len | flg;
2020 p[1] = (u_int32_t)ds->ds_addr;
2021 }
2022
2023 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2024 I2O_SGL_END;
2025 p[1] = (u_int32_t)ds->ds_addr;
2026 nsegs <<= 1;
2027 }
2028
2029 /* Fix up the transfer record, and sync the map. */
2030 ix->ix_flags = (out ? IX_OUT : IX_IN);
2031 ix->ix_size = xfersize;
2032 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2033 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2034
2035 /*
2036 * Adjust the SGL offset and total message size fields. We don't
2037 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2038 */
2039 mb[0] += ((off << 4) + (nsegs << 16));
2040 return (0);
2041 }
2042
2043 /*
2044 * Unmap all data transfers associated with a message wrapper.
2045 */
2046 void
2047 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2048 {
2049 struct iop_xfer *ix;
2050 int i;
2051
2052 #ifdef I2ODEBUG
2053 if (im->im_xfer[0].ix_size == 0)
2054 panic("iop_msg_unmap: no transfers mapped");
2055 #endif
2056
2057 for (ix = im->im_xfer, i = 0;;) {
2058 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2059 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2060 BUS_DMASYNC_POSTREAD);
2061 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2062
2063 /* Only the first DMA map is static. */
2064 if (i != 0)
2065 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2066 if ((++ix)->ix_size == 0)
2067 break;
2068 if (++i >= IOP_MAX_MSG_XFERS)
2069 break;
2070 }
2071 }
2072
2073 /*
2074 * Post a message frame to the IOP's inbound queue.
2075 */
2076 int
2077 iop_post(struct iop_softc *sc, u_int32_t *mb)
2078 {
2079 u_int32_t mfa;
2080 int s;
2081
2082 #ifdef I2ODEBUG
2083 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2084 panic("iop_post: frame too large");
2085 #endif
2086
2087 s = splbio();
2088
2089 /* Allocate a slot with the IOP. */
2090 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2091 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2092 splx(s);
2093 printf("%s: mfa not forthcoming\n",
2094 sc->sc_dv.dv_xname);
2095 return (EAGAIN);
2096 }
2097
2098 /* Perform reply buffer DMA synchronisation. */
2099 if (sc->sc_curib++ == 0)
2100 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2101 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2102
2103 /* Copy out the message frame. */
2104 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2105 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2106 BUS_SPACE_BARRIER_WRITE);
2107
2108 /* Post the MFA back to the IOP. */
2109 iop_outl(sc, IOP_REG_IFIFO, mfa);
2110
2111 splx(s);
2112 return (0);
2113 }
2114
2115 /*
2116 * Post a message to the IOP and deal with completion.
2117 */
2118 int
2119 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2120 {
2121 u_int32_t *mb;
2122 int rv, s;
2123
2124 mb = xmb;
2125
2126 /* Terminate the scatter/gather list chain. */
2127 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2128 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2129
2130 if ((rv = iop_post(sc, mb)) != 0)
2131 return (rv);
2132
2133 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2134 if ((im->im_flags & IM_POLL) != 0)
2135 iop_msg_poll(sc, im, timo);
2136 else
2137 iop_msg_wait(sc, im, timo);
2138
2139 s = splbio();
2140 if ((im->im_flags & IM_REPLIED) != 0) {
2141 if ((im->im_flags & IM_NOSTATUS) != 0)
2142 rv = 0;
2143 else if ((im->im_flags & IM_FAIL) != 0)
2144 rv = ENXIO;
2145 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2146 rv = EIO;
2147 else
2148 rv = 0;
2149 } else
2150 rv = EBUSY;
2151 splx(s);
2152 } else
2153 rv = 0;
2154
2155 return (rv);
2156 }
2157
2158 /*
2159 * Spin until the specified message is replied to.
2160 */
2161 static void
2162 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2163 {
2164 u_int32_t rmfa;
2165 int s, status;
2166
2167 s = splbio();
2168
2169 /* Wait for completion. */
2170 for (timo *= 10; timo != 0; timo--) {
2171 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2172 /* Double read to account for IOP bug. */
2173 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2174 if (rmfa == IOP_MFA_EMPTY)
2175 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2176 if (rmfa != IOP_MFA_EMPTY) {
2177 status = iop_handle_reply(sc, rmfa);
2178
2179 /*
2180 * Return the reply frame to the IOP's
2181 * outbound FIFO.
2182 */
2183 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2184 }
2185 }
2186 if ((im->im_flags & IM_REPLIED) != 0)
2187 break;
2188 DELAY(100);
2189 }
2190
2191 if (timo == 0) {
2192 #ifdef I2ODEBUG
2193 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2194 if (iop_status_get(sc, 1) != 0)
2195 printf("iop_msg_poll: unable to retrieve status\n");
2196 else
2197 printf("iop_msg_poll: IOP state = %d\n",
2198 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2199 #endif
2200 }
2201
2202 splx(s);
2203 }
2204
2205 /*
2206 * Sleep until the specified message is replied to.
2207 */
2208 static void
2209 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2210 {
2211 int s, rv;
2212
2213 s = splbio();
2214 if ((im->im_flags & IM_REPLIED) != 0) {
2215 splx(s);
2216 return;
2217 }
2218 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2219 splx(s);
2220
2221 #ifdef I2ODEBUG
2222 if (rv != 0) {
2223 printf("iop_msg_wait: tsleep() == %d\n", rv);
2224 if (iop_status_get(sc, 0) != 0)
2225 printf("iop_msg_wait: unable to retrieve status\n");
2226 else
2227 printf("iop_msg_wait: IOP state = %d\n",
2228 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2229 }
2230 #endif
2231 }
2232
2233 /*
2234 * Release an unused message frame back to the IOP's inbound fifo.
2235 */
2236 static void
2237 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2238 {
2239
2240 /* Use the frame to issue a no-op. */
2241 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2242 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2243 iop_outl(sc, mfa + 8, 0);
2244 iop_outl(sc, mfa + 12, 0);
2245
2246 iop_outl(sc, IOP_REG_IFIFO, mfa);
2247 }
2248
2249 #ifdef I2ODEBUG
2250 /*
2251 * Dump a reply frame header.
2252 */
2253 static void
2254 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2255 {
2256 u_int function, detail;
2257 #ifdef I2OVERBOSE
2258 const char *statusstr;
2259 #endif
2260
2261 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2262 detail = le16toh(rb->detail);
2263
2264 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2265
2266 #ifdef I2OVERBOSE
2267 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2268 statusstr = iop_status[rb->reqstatus];
2269 else
2270 statusstr = "undefined error code";
2271
2272 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2273 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2274 #else
2275 printf("%s: function=0x%02x status=0x%02x\n",
2276 sc->sc_dv.dv_xname, function, rb->reqstatus);
2277 #endif
2278 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2279 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2280 le32toh(rb->msgtctx));
2281 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2282 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2283 (le32toh(rb->msgflags) >> 8) & 0xff);
2284 }
2285 #endif
2286
2287 /*
2288 * Dump a transport failure reply.
2289 */
2290 static void
2291 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2292 {
2293
2294 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2295
2296 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2297 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2298 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2299 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2300 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2301 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2302 }
2303
2304 /*
2305 * Translate an I2O ASCII field into a C string.
2306 */
2307 void
2308 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2309 {
2310 int hc, lc, i, nit;
2311
2312 dlen--;
2313 lc = 0;
2314 hc = 0;
2315 i = 0;
2316
2317 /*
2318 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2319 * spec has nothing to say about it. Since AMI fields are usually
2320 * filled with junk after the terminator, ...
2321 */
2322 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2323
2324 while (slen-- != 0 && dlen-- != 0) {
2325 if (nit && *src == '\0')
2326 break;
2327 else if (*src <= 0x20 || *src >= 0x7f) {
2328 if (hc)
2329 dst[i++] = ' ';
2330 } else {
2331 hc = 1;
2332 dst[i++] = *src;
2333 lc = i;
2334 }
2335 src++;
2336 }
2337
2338 dst[lc] = '\0';
2339 }
2340
2341 /*
2342 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2343 */
2344 int
2345 iop_print_ident(struct iop_softc *sc, int tid)
2346 {
2347 struct {
2348 struct i2o_param_op_results pr;
2349 struct i2o_param_read_results prr;
2350 struct i2o_param_device_identity di;
2351 } __attribute__ ((__packed__)) p;
2352 char buf[32];
2353 int rv;
2354
2355 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2356 sizeof(p), NULL);
2357 if (rv != 0)
2358 return (rv);
2359
2360 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2361 sizeof(buf));
2362 printf(" <%s, ", buf);
2363 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2364 sizeof(buf));
2365 printf("%s, ", buf);
2366 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2367 printf("%s>", buf);
2368
2369 return (0);
2370 }
2371
2372 /*
2373 * Claim or unclaim the specified TID.
2374 */
2375 int
2376 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2377 int flags)
2378 {
2379 struct iop_msg *im;
2380 struct i2o_util_claim mf;
2381 int rv, func;
2382
2383 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2384 im = iop_msg_alloc(sc, IM_WAIT);
2385
2386 /* We can use the same structure, as they're identical. */
2387 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2388 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2389 mf.msgictx = ii->ii_ictx;
2390 mf.msgtctx = im->im_tctx;
2391 mf.flags = flags;
2392
2393 rv = iop_msg_post(sc, im, &mf, 5000);
2394 iop_msg_free(sc, im);
2395 return (rv);
2396 }
2397
2398 /*
2399 * Perform an abort.
2400 */
2401 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2402 int tctxabort, int flags)
2403 {
2404 struct iop_msg *im;
2405 struct i2o_util_abort mf;
2406 int rv;
2407
2408 im = iop_msg_alloc(sc, IM_WAIT);
2409
2410 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2411 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2412 mf.msgictx = ii->ii_ictx;
2413 mf.msgtctx = im->im_tctx;
2414 mf.flags = (func << 24) | flags;
2415 mf.tctxabort = tctxabort;
2416
2417 rv = iop_msg_post(sc, im, &mf, 5000);
2418 iop_msg_free(sc, im);
2419 return (rv);
2420 }
2421
2422 /*
2423 * Enable or disable reception of events for the specified device.
2424 */
2425 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2426 {
2427 struct i2o_util_event_register mf;
2428
2429 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2430 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2431 mf.msgictx = ii->ii_ictx;
2432 mf.msgtctx = 0;
2433 mf.eventmask = mask;
2434
2435 /* This message is replied to only when events are signalled. */
2436 return (iop_post(sc, (u_int32_t *)&mf));
2437 }
2438
2439 int
2440 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2441 {
2442 struct iop_softc *sc;
2443
2444 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2445 return (ENXIO);
2446 if ((sc->sc_flags & IOP_ONLINE) == 0)
2447 return (ENXIO);
2448 if ((sc->sc_flags & IOP_OPEN) != 0)
2449 return (EBUSY);
2450 sc->sc_flags |= IOP_OPEN;
2451
2452 return (0);
2453 }
2454
2455 int
2456 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2457 {
2458 struct iop_softc *sc;
2459
2460 sc = device_lookup(&iop_cd, minor(dev));
2461 sc->sc_flags &= ~IOP_OPEN;
2462
2463 return (0);
2464 }
2465
2466 int
2467 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2468 {
2469 struct iop_softc *sc;
2470 struct iovec *iov;
2471 int rv, i;
2472
2473 if (securelevel >= 2)
2474 return (EPERM);
2475
2476 sc = device_lookup(&iop_cd, minor(dev));
2477
2478 switch (cmd) {
2479 case IOPIOCPT:
2480 return (iop_passthrough(sc, (struct ioppt *)data, p));
2481
2482 case IOPIOCGSTATUS:
2483 iov = (struct iovec *)data;
2484 i = sizeof(struct i2o_status);
2485 if (i > iov->iov_len)
2486 i = iov->iov_len;
2487 else
2488 iov->iov_len = i;
2489 if ((rv = iop_status_get(sc, 0)) == 0)
2490 rv = copyout(&sc->sc_status, iov->iov_base, i);
2491 return (rv);
2492
2493 case IOPIOCGLCT:
2494 case IOPIOCGTIDMAP:
2495 case IOPIOCRECONFIG:
2496 break;
2497
2498 default:
2499 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2500 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2501 #endif
2502 return (ENOTTY);
2503 }
2504
2505 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2506 return (rv);
2507
2508 switch (cmd) {
2509 case IOPIOCGLCT:
2510 iov = (struct iovec *)data;
2511 i = le16toh(sc->sc_lct->tablesize) << 2;
2512 if (i > iov->iov_len)
2513 i = iov->iov_len;
2514 else
2515 iov->iov_len = i;
2516 rv = copyout(sc->sc_lct, iov->iov_base, i);
2517 break;
2518
2519 case IOPIOCRECONFIG:
2520 rv = iop_reconfigure(sc, 0);
2521 break;
2522
2523 case IOPIOCGTIDMAP:
2524 iov = (struct iovec *)data;
2525 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2526 if (i > iov->iov_len)
2527 i = iov->iov_len;
2528 else
2529 iov->iov_len = i;
2530 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2531 break;
2532 }
2533
2534 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2535 return (rv);
2536 }
2537
2538 static int
2539 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2540 {
2541 struct iop_msg *im;
2542 struct i2o_msg *mf;
2543 struct ioppt_buf *ptb;
2544 int rv, i, mapped;
2545
2546 mf = NULL;
2547 im = NULL;
2548 mapped = 1;
2549
2550 if (pt->pt_msglen > sc->sc_framesize ||
2551 pt->pt_msglen < sizeof(struct i2o_msg) ||
2552 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2553 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2554 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2555 return (EINVAL);
2556
2557 for (i = 0; i < pt->pt_nbufs; i++)
2558 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2559 rv = ENOMEM;
2560 goto bad;
2561 }
2562
2563 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2564 if (mf == NULL)
2565 return (ENOMEM);
2566
2567 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2568 goto bad;
2569
2570 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2571 im->im_rb = (struct i2o_reply *)mf;
2572 mf->msgictx = IOP_ICTX;
2573 mf->msgtctx = im->im_tctx;
2574
2575 for (i = 0; i < pt->pt_nbufs; i++) {
2576 ptb = &pt->pt_bufs[i];
2577 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2578 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2579 if (rv != 0)
2580 goto bad;
2581 mapped = 1;
2582 }
2583
2584 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2585 goto bad;
2586
2587 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2588 if (i > sc->sc_framesize)
2589 i = sc->sc_framesize;
2590 if (i > pt->pt_replylen)
2591 i = pt->pt_replylen;
2592 rv = copyout(im->im_rb, pt->pt_reply, i);
2593
2594 bad:
2595 if (mapped != 0)
2596 iop_msg_unmap(sc, im);
2597 if (im != NULL)
2598 iop_msg_free(sc, im);
2599 if (mf != NULL)
2600 free(mf, M_DEVBUF);
2601 return (rv);
2602 }
2603