iop.c revision 1.16 1 /* $NetBSD: iop.c,v 1.16 2001/08/22 09:42:05 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/conf.h>
56 #include <sys/kthread.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <machine/bus.h>
61
62 #include <dev/i2o/i2o.h>
63 #include <dev/i2o/iopio.h>
64 #include <dev/i2o/iopreg.h>
65 #include <dev/i2o/iopvar.h>
66
67 #define POLL(ms, cond) \
68 do { \
69 int i; \
70 for (i = (ms) * 10; i; i--) { \
71 if (cond) \
72 break; \
73 DELAY(100); \
74 } \
75 } while (/* CONSTCOND */0);
76
77 #ifdef I2ODEBUG
78 #define DPRINTF(x) printf x
79 #else
80 #define DPRINTF(x)
81 #endif
82
83 #ifdef I2OVERBOSE
84 #define IFVERBOSE(x) x
85 #define COMMENT(x) NULL
86 #else
87 #define IFVERBOSE(x)
88 #define COMMENT(x)
89 #endif
90
91 #define IOP_ICTXHASH_NBUCKETS 16
92 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
93
94 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
95
96 #define IOP_TCTX_SHIFT 12
97 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
98
99 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
100 static u_long iop_ictxhash;
101 static void *iop_sdh;
102 static struct i2o_systab *iop_systab;
103 static int iop_systab_size;
104
105 extern struct cfdriver iop_cd;
106
107 #define IC_CONFIGURE 0x01
108 #define IC_PRIORITY 0x02
109
110 struct iop_class {
111 u_short ic_class;
112 u_short ic_flags;
113 #ifdef I2OVERBOSE
114 const char *ic_caption;
115 #endif
116 } static const iop_class[] = {
117 {
118 I2O_CLASS_EXECUTIVE,
119 0,
120 COMMENT("executive")
121 },
122 {
123 I2O_CLASS_DDM,
124 0,
125 COMMENT("device driver module")
126 },
127 {
128 I2O_CLASS_RANDOM_BLOCK_STORAGE,
129 IC_CONFIGURE | IC_PRIORITY,
130 IFVERBOSE("random block storage")
131 },
132 {
133 I2O_CLASS_SEQUENTIAL_STORAGE,
134 IC_CONFIGURE | IC_PRIORITY,
135 IFVERBOSE("sequential storage")
136 },
137 {
138 I2O_CLASS_LAN,
139 IC_CONFIGURE | IC_PRIORITY,
140 IFVERBOSE("LAN port")
141 },
142 {
143 I2O_CLASS_WAN,
144 IC_CONFIGURE | IC_PRIORITY,
145 IFVERBOSE("WAN port")
146 },
147 {
148 I2O_CLASS_FIBRE_CHANNEL_PORT,
149 IC_CONFIGURE,
150 IFVERBOSE("fibrechannel port")
151 },
152 {
153 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
154 0,
155 COMMENT("fibrechannel peripheral")
156 },
157 {
158 I2O_CLASS_SCSI_PERIPHERAL,
159 0,
160 COMMENT("SCSI peripheral")
161 },
162 {
163 I2O_CLASS_ATE_PORT,
164 IC_CONFIGURE,
165 IFVERBOSE("ATE port")
166 },
167 {
168 I2O_CLASS_ATE_PERIPHERAL,
169 0,
170 COMMENT("ATE peripheral")
171 },
172 {
173 I2O_CLASS_FLOPPY_CONTROLLER,
174 IC_CONFIGURE,
175 IFVERBOSE("floppy controller")
176 },
177 {
178 I2O_CLASS_FLOPPY_DEVICE,
179 0,
180 COMMENT("floppy device")
181 },
182 {
183 I2O_CLASS_BUS_ADAPTER_PORT,
184 IC_CONFIGURE,
185 IFVERBOSE("bus adapter port" )
186 },
187 };
188
189 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
190 static const char * const iop_status[] = {
191 "success",
192 "abort (dirty)",
193 "abort (no data transfer)",
194 "abort (partial transfer)",
195 "error (dirty)",
196 "error (no data transfer)",
197 "error (partial transfer)",
198 "undefined error code",
199 "process abort (dirty)",
200 "process abort (no data transfer)",
201 "process abort (partial transfer)",
202 "transaction error",
203 };
204 #endif
205
206 static inline u_int32_t iop_inl(struct iop_softc *, int);
207 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
208
209 static void iop_config_interrupts(struct device *);
210 static void iop_configure_devices(struct iop_softc *, int, int);
211 static void iop_devinfo(int, char *);
212 static int iop_print(void *, const char *);
213 static int iop_reconfigure(struct iop_softc *, u_int);
214 static void iop_shutdown(void *);
215 static int iop_submatch(struct device *, struct cfdata *, void *);
216 #ifdef notyet
217 static int iop_vendor_print(void *, const char *);
218 #endif
219
220 static void iop_adjqparam(struct iop_softc *, int);
221 static void iop_create_reconf_thread(void *);
222 static int iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int iop_hrt_get(struct iop_softc *);
224 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void iop_intr_event(struct device *, struct iop_msg *, void *);
226 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 u_int32_t);
228 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int iop_ofifo_init(struct iop_softc *);
231 static int iop_passthrough(struct iop_softc *, struct ioppt *,
232 struct proc *);
233 static void iop_reconf_thread(void *);
234 static void iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int iop_reset(struct iop_softc *);
236 static int iop_status_get(struct iop_softc *, int);
237 static int iop_systab_set(struct iop_softc *);
238 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
239
240 #ifdef I2ODEBUG
241 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
242 #endif
243
244 cdev_decl(iop);
245
246 static inline u_int32_t
247 iop_inl(struct iop_softc *sc, int off)
248 {
249
250 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
251 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
252 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
253 }
254
255 static inline void
256 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
257 {
258
259 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
260 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
261 BUS_SPACE_BARRIER_WRITE);
262 }
263
264 /*
265 * Initialise the IOP and our interface.
266 */
267 void
268 iop_init(struct iop_softc *sc, const char *intrstr)
269 {
270 struct iop_msg *im;
271 int rv, i, j, state, nsegs;
272 u_int32_t mask;
273 char ident[64];
274
275 state = 0;
276
277 printf("I2O adapter");
278
279 if (iop_ictxhashtbl == NULL)
280 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
281 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
282
283 /* Disable interrupts at the IOP. */
284 mask = iop_inl(sc, IOP_REG_INTR_MASK);
285 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
286
287 /* Allocate a scratch DMA map for small miscellaneous shared data. */
288 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
289 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
290 printf("%s: cannot create scratch dmamap\n",
291 sc->sc_dv.dv_xname);
292 return;
293 }
294 state++;
295
296 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
297 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
298 printf("%s: cannot alloc scratch dmamem\n",
299 sc->sc_dv.dv_xname);
300 goto bail_out;
301 }
302 state++;
303
304 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
305 &sc->sc_scr, 0)) {
306 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
307 goto bail_out;
308 }
309 state++;
310
311 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
312 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
313 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
314 goto bail_out;
315 }
316 state++;
317
318 /* Reset the adapter and request status. */
319 if ((rv = iop_reset(sc)) != 0) {
320 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
321 goto bail_out;
322 }
323
324 if ((rv = iop_status_get(sc, 1)) != 0) {
325 printf("%s: not responding (get status)\n",
326 sc->sc_dv.dv_xname);
327 goto bail_out;
328 }
329
330 sc->sc_flags |= IOP_HAVESTATUS;
331 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
332 ident, sizeof(ident));
333 printf(" <%s>\n", ident);
334
335 #ifdef I2ODEBUG
336 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
337 le16toh(sc->sc_status.orgid),
338 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
339 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
340 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
341 le32toh(sc->sc_status.desiredprivmemsize),
342 le32toh(sc->sc_status.currentprivmemsize),
343 le32toh(sc->sc_status.currentprivmembase));
344 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
345 le32toh(sc->sc_status.desiredpriviosize),
346 le32toh(sc->sc_status.currentpriviosize),
347 le32toh(sc->sc_status.currentpriviobase));
348 #endif
349
350 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
351 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
352 sc->sc_maxob = IOP_MAX_OUTBOUND;
353 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
354 if (sc->sc_maxib > IOP_MAX_INBOUND)
355 sc->sc_maxib = IOP_MAX_INBOUND;
356
357 /* Allocate message wrappers. */
358 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT);
359 memset(im, 0, sizeof(*im) * sc->sc_maxib);
360 sc->sc_ims = im;
361 SLIST_INIT(&sc->sc_im_freelist);
362
363 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
364 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
365 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
366 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
367 &im->im_xfer[0].ix_map);
368 if (rv != 0) {
369 printf("%s: couldn't create dmamap (%d)",
370 sc->sc_dv.dv_xname, rv);
371 goto bail_out;
372 }
373
374 im->im_tctx = i;
375 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
376 }
377
378 /* Initalise the IOP's outbound FIFO. */
379 if (iop_ofifo_init(sc) != 0) {
380 printf("%s: unable to init oubound FIFO\n",
381 sc->sc_dv.dv_xname);
382 goto bail_out;
383 }
384
385 /*
386 * Defer further configuration until (a) interrupts are working and
387 * (b) we have enough information to build the system table.
388 */
389 config_interrupts((struct device *)sc, iop_config_interrupts);
390
391 /* Configure shutdown hook before we start any device activity. */
392 if (iop_sdh == NULL)
393 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
394
395 /* Ensure interrupts are enabled at the IOP. */
396 mask = iop_inl(sc, IOP_REG_INTR_MASK);
397 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
398
399 if (intrstr != NULL)
400 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
401 intrstr);
402
403 #ifdef I2ODEBUG
404 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
405 sc->sc_dv.dv_xname, sc->sc_maxib,
406 le32toh(sc->sc_status.maxinboundmframes),
407 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
408 #endif
409
410 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
411 return;
412
413 bail_out:
414 if (state > 3) {
415 for (j = 0; j < i; j++)
416 bus_dmamap_destroy(sc->sc_dmat,
417 sc->sc_ims[j].im_xfer[0].ix_map);
418 free(sc->sc_ims, M_DEVBUF);
419 }
420 if (state > 2)
421 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
422 if (state > 1)
423 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
424 if (state > 0)
425 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
426 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
427
428 }
429
430 /*
431 * Perform autoconfiguration tasks.
432 */
433 static void
434 iop_config_interrupts(struct device *self)
435 {
436 struct iop_softc *sc, *iop;
437 struct i2o_systab_entry *ste;
438 int rv, i, niop;
439
440 sc = (struct iop_softc *)self;
441 LIST_INIT(&sc->sc_iilist);
442
443 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
444
445 if (iop_hrt_get(sc) != 0) {
446 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
447 return;
448 }
449
450 /*
451 * Build the system table.
452 */
453 if (iop_systab == NULL) {
454 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
455 if ((iop = device_lookup(&iop_cd, i)) == NULL)
456 continue;
457 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
458 continue;
459 if (iop_status_get(iop, 1) != 0) {
460 printf("%s: unable to retrieve status\n",
461 sc->sc_dv.dv_xname);
462 iop->sc_flags &= ~IOP_HAVESTATUS;
463 continue;
464 }
465 niop++;
466 }
467 if (niop == 0)
468 return;
469
470 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
471 sizeof(struct i2o_systab);
472 iop_systab_size = i;
473 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
474
475 memset(iop_systab, 0, i);
476 iop_systab->numentries = niop;
477 iop_systab->version = I2O_VERSION_11;
478
479 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
480 if ((iop = device_lookup(&iop_cd, i)) == NULL)
481 continue;
482 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
483 continue;
484
485 ste->orgid = iop->sc_status.orgid;
486 ste->iopid = iop->sc_dv.dv_unit + 2;
487 ste->segnumber =
488 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
489 ste->iopcaps = iop->sc_status.iopcaps;
490 ste->inboundmsgframesize =
491 iop->sc_status.inboundmframesize;
492 ste->inboundmsgportaddresslow =
493 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
494 ste++;
495 }
496 }
497
498 /*
499 * Post the system table to the IOP and bring it to the OPERATIONAL
500 * state.
501 */
502 if (iop_systab_set(sc) != 0) {
503 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
504 return;
505 }
506 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
507 30000) != 0) {
508 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
509 return;
510 }
511
512 /*
513 * Set up an event handler for this IOP.
514 */
515 sc->sc_eventii.ii_dv = self;
516 sc->sc_eventii.ii_intr = iop_intr_event;
517 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
518 sc->sc_eventii.ii_tid = I2O_TID_IOP;
519 iop_initiator_register(sc, &sc->sc_eventii);
520
521 rv = iop_util_eventreg(sc, &sc->sc_eventii,
522 I2O_EVENT_EXEC_RESOURCE_LIMITS |
523 I2O_EVENT_EXEC_CONNECTION_FAIL |
524 I2O_EVENT_EXEC_ADAPTER_FAULT |
525 I2O_EVENT_EXEC_POWER_FAIL |
526 I2O_EVENT_EXEC_RESET_PENDING |
527 I2O_EVENT_EXEC_RESET_IMMINENT |
528 I2O_EVENT_EXEC_HARDWARE_FAIL |
529 I2O_EVENT_EXEC_XCT_CHANGE |
530 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
531 I2O_EVENT_GEN_DEVICE_RESET |
532 I2O_EVENT_GEN_STATE_CHANGE |
533 I2O_EVENT_GEN_GENERAL_WARNING);
534 if (rv != 0) {
535 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
536 return;
537 }
538
539 #ifdef notyet
540 /* Attempt to match and attach a product-specific extension. */
541 ia.ia_class = I2O_CLASS_ANY;
542 ia.ia_tid = I2O_TID_IOP;
543 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
544 #endif
545
546 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
547 if ((rv = iop_reconfigure(sc, 0)) == -1) {
548 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
549 return;
550 }
551 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
552
553 kthread_create(iop_create_reconf_thread, sc);
554 }
555
556 /*
557 * Create the reconfiguration thread. Called after the standard kernel
558 * threads have been created.
559 */
560 static void
561 iop_create_reconf_thread(void *cookie)
562 {
563 struct iop_softc *sc;
564 int rv;
565
566 sc = cookie;
567 sc->sc_flags |= IOP_ONLINE;
568
569 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
570 "%s", sc->sc_dv.dv_xname);
571 if (rv != 0) {
572 printf("%s: unable to create reconfiguration thread (%d)",
573 sc->sc_dv.dv_xname, rv);
574 return;
575 }
576 }
577
578 /*
579 * Reconfiguration thread; listens for LCT change notification, and
580 * initiates re-configuration if received.
581 */
582 static void
583 iop_reconf_thread(void *cookie)
584 {
585 struct iop_softc *sc;
586 struct i2o_lct lct;
587 u_int32_t chgind;
588 int rv;
589
590 sc = cookie;
591 chgind = sc->sc_chgind + 1;
592
593 for (;;) {
594 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
595 sc->sc_dv.dv_xname, chgind));
596
597 PHOLD(sc->sc_reconf_proc);
598 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
599 PRELE(sc->sc_reconf_proc);
600
601 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
602 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
603
604 if (rv == 0 &&
605 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
606 iop_reconfigure(sc, le32toh(lct.changeindicator));
607 chgind = sc->sc_chgind + 1;
608 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
609 }
610
611 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
612 }
613 }
614
615 /*
616 * Reconfigure: find new and removed devices.
617 */
618 static int
619 iop_reconfigure(struct iop_softc *sc, u_int chgind)
620 {
621 struct iop_msg *im;
622 struct i2o_hba_bus_scan mf;
623 struct i2o_lct_entry *le;
624 struct iop_initiator *ii, *nextii;
625 int rv, tid, i;
626
627 /*
628 * If the reconfiguration request isn't the result of LCT change
629 * notification, then be more thorough: ask all bus ports to scan
630 * their busses. Wait up to 5 minutes for each bus port to complete
631 * the request.
632 */
633 if (chgind == 0) {
634 if ((rv = iop_lct_get(sc)) != 0) {
635 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
636 return (rv);
637 }
638
639 le = sc->sc_lct->entry;
640 for (i = 0; i < sc->sc_nlctent; i++, le++) {
641 if ((le16toh(le->classid) & 4095) !=
642 I2O_CLASS_BUS_ADAPTER_PORT)
643 continue;
644 tid = le16toh(le->localtid) & 4095;
645
646 im = iop_msg_alloc(sc, IM_WAIT);
647
648 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
649 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
650 mf.msgictx = IOP_ICTX;
651 mf.msgtctx = im->im_tctx;
652
653 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
654 tid));
655
656 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
657 iop_msg_free(sc, im);
658 #ifdef I2ODEBUG
659 if (rv != 0)
660 printf("%s: bus scan failed\n",
661 sc->sc_dv.dv_xname);
662 #endif
663 }
664 } else if (chgind <= sc->sc_chgind) {
665 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
666 return (0);
667 }
668
669 /* Re-read the LCT and determine if it has changed. */
670 if ((rv = iop_lct_get(sc)) != 0) {
671 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
672 return (rv);
673 }
674 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
675
676 chgind = le32toh(sc->sc_lct->changeindicator);
677 if (chgind == sc->sc_chgind) {
678 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
679 return (0);
680 }
681 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
682 sc->sc_chgind = chgind;
683
684 if (sc->sc_tidmap != NULL)
685 free(sc->sc_tidmap, M_DEVBUF);
686 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
687 M_DEVBUF, M_NOWAIT);
688 memset(sc->sc_tidmap, 0, sizeof(sc->sc_tidmap));
689
690 /* Allow 1 queued command per device while we're configuring. */
691 iop_adjqparam(sc, 1);
692
693 /*
694 * Match and attach child devices. We configure high-level devices
695 * first so that any claims will propagate throughout the LCT,
696 * hopefully masking off aliased devices as a result.
697 *
698 * Re-reading the LCT at this point is a little dangerous, but we'll
699 * trust the IOP (and the operator) to behave itself...
700 */
701 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
702 IC_CONFIGURE | IC_PRIORITY);
703 if ((rv = iop_lct_get(sc)) != 0)
704 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
705 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
706 IC_CONFIGURE);
707
708 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
709 nextii = LIST_NEXT(ii, ii_list);
710
711 /* Detach devices that were configured, but are now gone. */
712 for (i = 0; i < sc->sc_nlctent; i++)
713 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
714 break;
715 if (i == sc->sc_nlctent ||
716 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
717 config_detach(ii->ii_dv, DETACH_FORCE);
718
719 /*
720 * Tell initiators that existed before the re-configuration
721 * to re-configure.
722 */
723 if (ii->ii_reconfig == NULL)
724 continue;
725 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
726 printf("%s: %s failed reconfigure (%d)\n",
727 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
728 }
729
730 /* Re-adjust queue parameters and return. */
731 if (sc->sc_nii != 0)
732 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
733 / sc->sc_nii);
734
735 return (0);
736 }
737
738 /*
739 * Configure I2O devices into the system.
740 */
741 static void
742 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
743 {
744 struct iop_attach_args ia;
745 struct iop_initiator *ii;
746 const struct i2o_lct_entry *le;
747 struct device *dv;
748 int i, j, nent;
749 u_int usertid;
750
751 nent = sc->sc_nlctent;
752 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
753 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
754
755 /* Ignore the device if it's in use. */
756 usertid = le32toh(le->usertid) & 4095;
757 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
758 continue;
759
760 ia.ia_class = le16toh(le->classid) & 4095;
761 ia.ia_tid = sc->sc_tidmap[i].it_tid;
762
763 /* Ignore uninteresting devices. */
764 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
765 if (iop_class[j].ic_class == ia.ia_class)
766 break;
767 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
768 (iop_class[j].ic_flags & mask) != maskval)
769 continue;
770
771 /*
772 * Try to configure the device only if it's not already
773 * configured.
774 */
775 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
776 if (ia.ia_tid == ii->ii_tid) {
777 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
778 strcpy(sc->sc_tidmap[i].it_dvname,
779 ii->ii_dv->dv_xname);
780 break;
781 }
782 }
783 if (ii != NULL)
784 continue;
785
786 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
787 if (dv != NULL) {
788 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
789 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
790 }
791 }
792 }
793
794 /*
795 * Adjust queue parameters for all child devices.
796 */
797 static void
798 iop_adjqparam(struct iop_softc *sc, int mpi)
799 {
800 struct iop_initiator *ii;
801
802 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
803 if (ii->ii_adjqparam != NULL)
804 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
805 }
806
807 static void
808 iop_devinfo(int class, char *devinfo)
809 {
810 #ifdef I2OVERBOSE
811 int i;
812
813 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
814 if (class == iop_class[i].ic_class)
815 break;
816
817 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
818 sprintf(devinfo, "device (class 0x%x)", class);
819 else
820 strcpy(devinfo, iop_class[i].ic_caption);
821 #else
822
823 sprintf(devinfo, "device (class 0x%x)", class);
824 #endif
825 }
826
827 static int
828 iop_print(void *aux, const char *pnp)
829 {
830 struct iop_attach_args *ia;
831 char devinfo[256];
832
833 ia = aux;
834
835 if (pnp != NULL) {
836 iop_devinfo(ia->ia_class, devinfo);
837 printf("%s at %s", devinfo, pnp);
838 }
839 printf(" tid %d", ia->ia_tid);
840 return (UNCONF);
841 }
842
843 #ifdef notyet
844 static int
845 iop_vendor_print(void *aux, const char *pnp)
846 {
847
848 if (pnp != NULL)
849 printf("vendor specific extension at %s", pnp);
850 return (UNCONF);
851 }
852 #endif
853
854 static int
855 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
856 {
857 struct iop_attach_args *ia;
858
859 ia = aux;
860
861 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
862 return (0);
863
864 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
865 }
866
867 /*
868 * Shut down all configured IOPs.
869 */
870 static void
871 iop_shutdown(void *junk)
872 {
873 struct iop_softc *sc;
874 int i;
875
876 printf("shutting down iop devices...");
877
878 for (i = 0; i < iop_cd.cd_ndevs; i++) {
879 if ((sc = device_lookup(&iop_cd, i)) == NULL)
880 continue;
881 if ((sc->sc_flags & IOP_ONLINE) == 0)
882 continue;
883 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
884 0, 5000);
885 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
886 0, 1000);
887 }
888
889 /* Wait. Some boards could still be flushing, stupidly enough. */
890 delay(5000*1000);
891 printf(" done.\n");
892 }
893
894 /*
895 * Retrieve IOP status.
896 */
897 static int
898 iop_status_get(struct iop_softc *sc, int nosleep)
899 {
900 struct i2o_exec_status_get mf;
901 struct i2o_status *st;
902 paddr_t pa;
903 int rv, i;
904
905 pa = sc->sc_scr_seg->ds_addr;
906 st = (struct i2o_status *)sc->sc_scr;
907
908 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
909 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
910 mf.reserved[0] = 0;
911 mf.reserved[1] = 0;
912 mf.reserved[2] = 0;
913 mf.reserved[3] = 0;
914 mf.addrlow = (u_int32_t)pa;
915 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
916 mf.length = sizeof(sc->sc_status);
917
918 memset(st, 0, sizeof(*st));
919 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
920 BUS_DMASYNC_PREREAD);
921
922 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
923 return (rv);
924
925 for (i = 25; i != 0; i--) {
926 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
927 sizeof(*st), BUS_DMASYNC_POSTREAD);
928 if (st->syncbyte == 0xff)
929 break;
930 if (nosleep)
931 DELAY(100*1000);
932 else
933 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
934 }
935
936 if (st->syncbyte != 0xff)
937 rv = EIO;
938 else {
939 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
940 rv = 0;
941 }
942
943 return (rv);
944 }
945
946 /*
947 * Initalize and populate the IOP's outbound FIFO.
948 */
949 static int
950 iop_ofifo_init(struct iop_softc *sc)
951 {
952 bus_addr_t addr;
953 bus_dma_segment_t seg;
954 struct i2o_exec_outbound_init *mf;
955 int i, rseg, rv;
956 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
957
958 sw = (u_int32_t *)sc->sc_scr;
959
960 mf = (struct i2o_exec_outbound_init *)mb;
961 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
962 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
963 mf->msgictx = IOP_ICTX;
964 mf->msgtctx = 0;
965 mf->pagesize = PAGE_SIZE;
966 mf->flags = IOP_INIT_CODE | ((IOP_MAX_MSG_SIZE >> 2) << 16);
967
968 /*
969 * The I2O spec says that there are two SGLs: one for the status
970 * word, and one for a list of discarded MFAs. It continues to say
971 * that if you don't want to get the list of MFAs, an IGNORE SGL is
972 * necessary; this isn't the case (and is in fact a bad thing).
973 */
974 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
975 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
976 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
977 (u_int32_t)sc->sc_scr_seg->ds_addr;
978 mb[0] += 2 << 16;
979
980 *sw = 0;
981 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
982 BUS_DMASYNC_PREREAD);
983
984 if ((rv = iop_post(sc, mb)) != 0)
985 return (rv);
986
987 POLL(5000,
988 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
989 BUS_DMASYNC_POSTREAD),
990 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
991
992 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
993 printf("%s: outbound FIFO init failed (%d)\n",
994 sc->sc_dv.dv_xname, le32toh(*sw));
995 return (EIO);
996 }
997
998 /* Allocate DMA safe memory for the reply frames. */
999 if (sc->sc_rep_phys == 0) {
1000 sc->sc_rep_size = sc->sc_maxob * IOP_MAX_MSG_SIZE;
1001
1002 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1003 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1004 if (rv != 0) {
1005 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1006 rv);
1007 return (rv);
1008 }
1009
1010 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1011 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1012 if (rv != 0) {
1013 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1014 return (rv);
1015 }
1016
1017 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1018 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1019 if (rv != 0) {
1020 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1021 rv);
1022 return (rv);
1023 }
1024
1025 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1026 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1027 if (rv != 0) {
1028 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1029 return (rv);
1030 }
1031
1032 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1033 }
1034
1035 /* Populate the outbound FIFO. */
1036 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1037 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1038 addr += IOP_MAX_MSG_SIZE;
1039 }
1040
1041 return (0);
1042 }
1043
1044 /*
1045 * Read the specified number of bytes from the IOP's hardware resource table.
1046 */
1047 static int
1048 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1049 {
1050 struct iop_msg *im;
1051 int rv;
1052 struct i2o_exec_hrt_get *mf;
1053 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1054
1055 im = iop_msg_alloc(sc, IM_WAIT);
1056 mf = (struct i2o_exec_hrt_get *)mb;
1057 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1058 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1059 mf->msgictx = IOP_ICTX;
1060 mf->msgtctx = im->im_tctx;
1061
1062 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1063 rv = iop_msg_post(sc, im, mb, 30000);
1064 iop_msg_unmap(sc, im);
1065 iop_msg_free(sc, im);
1066 return (rv);
1067 }
1068
1069 /*
1070 * Read the IOP's hardware resource table.
1071 */
1072 static int
1073 iop_hrt_get(struct iop_softc *sc)
1074 {
1075 struct i2o_hrt hrthdr, *hrt;
1076 int size, rv;
1077
1078 PHOLD(curproc);
1079 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1080 PRELE(curproc);
1081 if (rv != 0)
1082 return (rv);
1083
1084 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1085 le16toh(hrthdr.numentries)));
1086
1087 size = sizeof(struct i2o_hrt) +
1088 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1089 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1090
1091 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1092 free(hrt, M_DEVBUF);
1093 return (rv);
1094 }
1095
1096 if (sc->sc_hrt != NULL)
1097 free(sc->sc_hrt, M_DEVBUF);
1098 sc->sc_hrt = hrt;
1099 return (0);
1100 }
1101
1102 /*
1103 * Request the specified number of bytes from the IOP's logical
1104 * configuration table. If a change indicator is specified, this
1105 * is a verbatim notification request, so the caller is prepared
1106 * to wait indefinitely.
1107 */
1108 static int
1109 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1110 u_int32_t chgind)
1111 {
1112 struct iop_msg *im;
1113 struct i2o_exec_lct_notify *mf;
1114 int rv;
1115 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1116
1117 im = iop_msg_alloc(sc, IM_WAIT);
1118 memset(lct, 0, size);
1119
1120 mf = (struct i2o_exec_lct_notify *)mb;
1121 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1122 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1123 mf->msgictx = IOP_ICTX;
1124 mf->msgtctx = im->im_tctx;
1125 mf->classid = I2O_CLASS_ANY;
1126 mf->changeindicator = chgind;
1127
1128 #ifdef I2ODEBUG
1129 printf("iop_lct_get0: reading LCT");
1130 if (chgind != 0)
1131 printf(" (async)");
1132 printf("\n");
1133 #endif
1134
1135 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1136 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1137 iop_msg_unmap(sc, im);
1138 iop_msg_free(sc, im);
1139 return (rv);
1140 }
1141
1142 /*
1143 * Read the IOP's logical configuration table.
1144 */
1145 int
1146 iop_lct_get(struct iop_softc *sc)
1147 {
1148 int esize, size, rv;
1149 struct i2o_lct *lct;
1150
1151 esize = le32toh(sc->sc_status.expectedlctsize);
1152 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1153 if (lct == NULL)
1154 return (ENOMEM);
1155
1156 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1157 free(lct, M_DEVBUF);
1158 return (rv);
1159 }
1160
1161 size = le16toh(lct->tablesize) << 2;
1162 if (esize != size) {
1163 free(lct, M_DEVBUF);
1164 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1165 if (lct == NULL)
1166 return (ENOMEM);
1167
1168 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1169 free(lct, M_DEVBUF);
1170 return (rv);
1171 }
1172 }
1173
1174 /* Swap in the new LCT. */
1175 if (sc->sc_lct != NULL)
1176 free(sc->sc_lct, M_DEVBUF);
1177 sc->sc_lct = lct;
1178 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1179 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1180 sizeof(struct i2o_lct_entry);
1181 return (0);
1182 }
1183
1184 /*
1185 * Request the specified parameter group from the target. If an initiator
1186 * is specified (a) don't wait for the operation to complete, but instead
1187 * let the initiator's interrupt handler deal with the reply and (b) place a
1188 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1189 */
1190 int
1191 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1192 int size, struct iop_initiator *ii)
1193 {
1194 struct iop_msg *im;
1195 struct i2o_util_params_op *mf;
1196 struct i2o_reply *rf;
1197 int rv;
1198 struct iop_pgop *pgop;
1199 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1200
1201 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1202 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1203 iop_msg_free(sc, im);
1204 return (ENOMEM);
1205 }
1206 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1207 iop_msg_free(sc, im);
1208 free(pgop, M_DEVBUF);
1209 return (ENOMEM);
1210 }
1211 im->im_dvcontext = pgop;
1212 im->im_rb = rf;
1213
1214 mf = (struct i2o_util_params_op *)mb;
1215 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1216 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1217 mf->msgictx = IOP_ICTX;
1218 mf->msgtctx = im->im_tctx;
1219 mf->flags = 0;
1220
1221 pgop->olh.count = htole16(1);
1222 pgop->olh.reserved = htole16(0);
1223 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1224 pgop->oat.fieldcount = htole16(0xffff);
1225 pgop->oat.group = htole16(group);
1226
1227 if (ii == NULL)
1228 PHOLD(curproc);
1229
1230 memset(buf, 0, size);
1231 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1232 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1233 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1234
1235 if (ii == NULL)
1236 PRELE(curproc);
1237
1238 /* Detect errors; let partial transfers to count as success. */
1239 if (ii == NULL && rv == 0) {
1240 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1241 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1242 rv = 0;
1243 else
1244 rv = (rf->reqstatus != 0 ? EIO : 0);
1245
1246 if (rv != 0)
1247 printf("%s: FIELD_GET failed for tid %d group %d\n",
1248 sc->sc_dv.dv_xname, tid, group);
1249 }
1250
1251 if (ii == NULL || rv != 0) {
1252 iop_msg_unmap(sc, im);
1253 iop_msg_free(sc, im);
1254 free(pgop, M_DEVBUF);
1255 free(rf, M_DEVBUF);
1256 }
1257
1258 return (rv);
1259 }
1260
1261 /*
1262 * Set a single field in a scalar parameter group.
1263 */
1264 int
1265 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1266 int size, int field)
1267 {
1268 struct iop_msg *im;
1269 struct i2o_util_params_op *mf;
1270 struct iop_pgop *pgop;
1271 int rv, totsize;
1272 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1273
1274 totsize = sizeof(*pgop) + size;
1275
1276 im = iop_msg_alloc(sc, IM_WAIT);
1277 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1278 iop_msg_free(sc, im);
1279 return (ENOMEM);
1280 }
1281
1282 mf = (struct i2o_util_params_op *)mb;
1283 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1284 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1285 mf->msgictx = IOP_ICTX;
1286 mf->msgtctx = im->im_tctx;
1287 mf->flags = 0;
1288
1289 pgop->olh.count = htole16(1);
1290 pgop->olh.reserved = htole16(0);
1291 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1292 pgop->oat.fieldcount = htole16(1);
1293 pgop->oat.group = htole16(group);
1294 pgop->oat.fields[0] = htole16(field);
1295 memcpy(pgop + 1, buf, size);
1296
1297 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1298 rv = iop_msg_post(sc, im, mb, 30000);
1299 if (rv != 0)
1300 printf("%s: FIELD_SET failed for tid %d group %d\n",
1301 sc->sc_dv.dv_xname, tid, group);
1302
1303 iop_msg_unmap(sc, im);
1304 iop_msg_free(sc, im);
1305 free(pgop, M_DEVBUF);
1306 return (rv);
1307 }
1308
1309 /*
1310 * Delete all rows in a tablular parameter group.
1311 */
1312 int
1313 iop_table_clear(struct iop_softc *sc, int tid, int group)
1314 {
1315 struct iop_msg *im;
1316 struct i2o_util_params_op *mf;
1317 struct iop_pgop pgop;
1318 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1319 int rv;
1320
1321 im = iop_msg_alloc(sc, IM_WAIT);
1322
1323 mf = (struct i2o_util_params_op *)mb;
1324 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1325 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1326 mf->msgictx = IOP_ICTX;
1327 mf->msgtctx = im->im_tctx;
1328 mf->flags = 0;
1329
1330 pgop.olh.count = htole16(1);
1331 pgop.olh.reserved = htole16(0);
1332 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1333 pgop.oat.fieldcount = htole16(0);
1334 pgop.oat.group = htole16(group);
1335 pgop.oat.fields[0] = htole16(0);
1336
1337 PHOLD(curproc);
1338 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1339 rv = iop_msg_post(sc, im, mb, 30000);
1340 if (rv != 0)
1341 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1342 sc->sc_dv.dv_xname, tid, group);
1343
1344 iop_msg_unmap(sc, im);
1345 PRELE(curproc);
1346 iop_msg_free(sc, im);
1347 return (rv);
1348 }
1349
1350 /*
1351 * Add a single row to a tabular parameter group. The row can have only one
1352 * field.
1353 */
1354 int
1355 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1356 int size, int row)
1357 {
1358 struct iop_msg *im;
1359 struct i2o_util_params_op *mf;
1360 struct iop_pgop *pgop;
1361 int rv, totsize;
1362 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1363
1364 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1365
1366 im = iop_msg_alloc(sc, IM_WAIT);
1367 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1368 iop_msg_free(sc, im);
1369 return (ENOMEM);
1370 }
1371
1372 mf = (struct i2o_util_params_op *)mb;
1373 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1374 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1375 mf->msgictx = IOP_ICTX;
1376 mf->msgtctx = im->im_tctx;
1377 mf->flags = 0;
1378
1379 pgop->olh.count = htole16(1);
1380 pgop->olh.reserved = htole16(0);
1381 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1382 pgop->oat.fieldcount = htole16(1);
1383 pgop->oat.group = htole16(group);
1384 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1385 pgop->oat.fields[1] = htole16(1); /* RowCount */
1386 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1387 memcpy(&pgop->oat.fields[3], buf, size);
1388
1389 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1390 rv = iop_msg_post(sc, im, mb, 30000);
1391 if (rv != 0)
1392 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1393 sc->sc_dv.dv_xname, tid, group, row);
1394
1395 iop_msg_unmap(sc, im);
1396 iop_msg_free(sc, im);
1397 free(pgop, M_DEVBUF);
1398 return (rv);
1399 }
1400
1401 /*
1402 * Execute a simple command (no parameters).
1403 */
1404 int
1405 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1406 int async, int timo)
1407 {
1408 struct iop_msg *im;
1409 struct i2o_msg mf;
1410 int rv, fl;
1411
1412 fl = (async != 0 ? IM_WAIT : IM_POLL);
1413 im = iop_msg_alloc(sc, fl);
1414
1415 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1416 mf.msgfunc = I2O_MSGFUNC(tid, function);
1417 mf.msgictx = ictx;
1418 mf.msgtctx = im->im_tctx;
1419
1420 rv = iop_msg_post(sc, im, &mf, timo);
1421 iop_msg_free(sc, im);
1422 return (rv);
1423 }
1424
1425 /*
1426 * Post the system table to the IOP.
1427 */
1428 static int
1429 iop_systab_set(struct iop_softc *sc)
1430 {
1431 struct i2o_exec_sys_tab_set *mf;
1432 struct iop_msg *im;
1433 bus_space_handle_t bsh;
1434 bus_addr_t boo;
1435 u_int32_t mema[2], ioa[2];
1436 int rv;
1437 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1438
1439 im = iop_msg_alloc(sc, IM_WAIT);
1440
1441 mf = (struct i2o_exec_sys_tab_set *)mb;
1442 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1443 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1444 mf->msgictx = IOP_ICTX;
1445 mf->msgtctx = im->im_tctx;
1446 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1447 mf->segnumber = 0;
1448
1449 mema[1] = sc->sc_status.desiredprivmemsize;
1450 ioa[1] = sc->sc_status.desiredpriviosize;
1451
1452 if (mema[1] != 0) {
1453 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1454 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1455 mema[0] = htole32(boo);
1456 if (rv != 0) {
1457 printf("%s: can't alloc priv mem space, err = %d\n",
1458 sc->sc_dv.dv_xname, rv);
1459 mema[0] = 0;
1460 mema[1] = 0;
1461 }
1462 }
1463
1464 if (ioa[1] != 0) {
1465 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1466 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1467 ioa[0] = htole32(boo);
1468 if (rv != 0) {
1469 printf("%s: can't alloc priv i/o space, err = %d\n",
1470 sc->sc_dv.dv_xname, rv);
1471 ioa[0] = 0;
1472 ioa[1] = 0;
1473 }
1474 }
1475
1476 PHOLD(curproc);
1477 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1478 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1479 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1480 rv = iop_msg_post(sc, im, mb, 5000);
1481 iop_msg_unmap(sc, im);
1482 iop_msg_free(sc, im);
1483 PRELE(curproc);
1484 return (rv);
1485 }
1486
1487 /*
1488 * Reset the IOP. Must be called with interrupts disabled.
1489 */
1490 static int
1491 iop_reset(struct iop_softc *sc)
1492 {
1493 u_int32_t mfa, *sw;
1494 struct i2o_exec_iop_reset mf;
1495 int rv;
1496 paddr_t pa;
1497
1498 sw = (u_int32_t *)sc->sc_scr;
1499 pa = sc->sc_scr_seg->ds_addr;
1500
1501 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1502 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1503 mf.reserved[0] = 0;
1504 mf.reserved[1] = 0;
1505 mf.reserved[2] = 0;
1506 mf.reserved[3] = 0;
1507 mf.statuslow = (u_int32_t)pa;
1508 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1509
1510 *sw = htole32(0);
1511 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1512 BUS_DMASYNC_PREREAD);
1513
1514 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1515 return (rv);
1516
1517 POLL(2500,
1518 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1519 BUS_DMASYNC_POSTREAD), *sw != 0));
1520 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1521 printf("%s: reset rejected, status 0x%x\n",
1522 sc->sc_dv.dv_xname, le32toh(*sw));
1523 return (EIO);
1524 }
1525
1526 /*
1527 * IOP is now in the INIT state. Wait no more than 10 seconds for
1528 * the inbound queue to become responsive.
1529 */
1530 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1531 if (mfa == IOP_MFA_EMPTY) {
1532 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1533 return (EIO);
1534 }
1535
1536 iop_release_mfa(sc, mfa);
1537 return (0);
1538 }
1539
1540 /*
1541 * Register a new initiator. Must be called with the configuration lock
1542 * held.
1543 */
1544 void
1545 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1546 {
1547 static int ictxgen;
1548 int s;
1549
1550 /* 0 is reserved (by us) for system messages. */
1551 ii->ii_ictx = ++ictxgen;
1552
1553 /*
1554 * `Utility initiators' don't make it onto the per-IOP initiator list
1555 * (which is used only for configuration), but do get one slot on
1556 * the inbound queue.
1557 */
1558 if ((ii->ii_flags & II_UTILITY) == 0) {
1559 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1560 sc->sc_nii++;
1561 } else
1562 sc->sc_nuii++;
1563
1564 s = splbio();
1565 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1566 splx(s);
1567 }
1568
1569 /*
1570 * Unregister an initiator. Must be called with the configuration lock
1571 * held.
1572 */
1573 void
1574 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1575 {
1576 int s;
1577
1578 if ((ii->ii_flags & II_UTILITY) == 0) {
1579 LIST_REMOVE(ii, ii_list);
1580 sc->sc_nii--;
1581 } else
1582 sc->sc_nuii--;
1583
1584 s = splbio();
1585 LIST_REMOVE(ii, ii_hash);
1586 splx(s);
1587 }
1588
1589 /*
1590 * Handle a reply frame from the IOP.
1591 */
1592 static int
1593 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1594 {
1595 struct iop_msg *im;
1596 struct i2o_reply *rb;
1597 struct i2o_fault_notify *fn;
1598 struct iop_initiator *ii;
1599 u_int off, ictx, tctx, status, size;
1600
1601 off = (int)(rmfa - sc->sc_rep_phys);
1602 rb = (struct i2o_reply *)(sc->sc_rep + off);
1603
1604 /* Perform reply queue DMA synchronisation. */
1605 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1606 IOP_MAX_MSG_SIZE, BUS_DMASYNC_POSTREAD);
1607 if (--sc->sc_curib != 0)
1608 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1609 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1610
1611 #ifdef I2ODEBUG
1612 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1613 panic("iop_handle_reply: 64-bit reply");
1614 #endif
1615 /*
1616 * Find the initiator.
1617 */
1618 ictx = le32toh(rb->msgictx);
1619 if (ictx == IOP_ICTX)
1620 ii = NULL;
1621 else {
1622 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1623 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1624 if (ii->ii_ictx == ictx)
1625 break;
1626 if (ii == NULL) {
1627 #ifdef I2ODEBUG
1628 iop_reply_print(sc, rb);
1629 #endif
1630 printf("%s: WARNING: bad ictx returned (%x)\n",
1631 sc->sc_dv.dv_xname, ictx);
1632 return (-1);
1633 }
1634 }
1635
1636 /*
1637 * If we received a transport failure notice, we've got to dig the
1638 * transaction context (if any) out of the original message frame,
1639 * and then release the original MFA back to the inbound FIFO.
1640 */
1641 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1642 status = I2O_STATUS_SUCCESS;
1643
1644 fn = (struct i2o_fault_notify *)rb;
1645 tctx = iop_inl(sc, fn->lowmfa + 12);
1646 iop_release_mfa(sc, fn->lowmfa);
1647 iop_tfn_print(sc, fn);
1648 } else {
1649 status = rb->reqstatus;
1650 tctx = le32toh(rb->msgtctx);
1651 }
1652
1653 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1654 /*
1655 * This initiator tracks state using message wrappers.
1656 *
1657 * Find the originating message wrapper, and if requested
1658 * notify the initiator.
1659 */
1660 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1661 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1662 (im->im_flags & IM_ALLOCED) == 0 ||
1663 tctx != im->im_tctx) {
1664 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1665 sc->sc_dv.dv_xname, tctx, im);
1666 if (im != NULL)
1667 printf("%s: flags=0x%08x tctx=0x%08x\n",
1668 sc->sc_dv.dv_xname, im->im_flags,
1669 im->im_tctx);
1670 #ifdef I2ODEBUG
1671 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1672 iop_reply_print(sc, rb);
1673 #endif
1674 return (-1);
1675 }
1676
1677 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1678 im->im_flags |= IM_FAIL;
1679
1680 #ifdef I2ODEBUG
1681 if ((im->im_flags & IM_REPLIED) != 0)
1682 panic("%s: dup reply", sc->sc_dv.dv_xname);
1683 #endif
1684 im->im_flags |= IM_REPLIED;
1685
1686 #ifdef I2ODEBUG
1687 if (status != I2O_STATUS_SUCCESS)
1688 iop_reply_print(sc, rb);
1689 #endif
1690 im->im_reqstatus = status;
1691
1692 /* Copy the reply frame, if requested. */
1693 if (im->im_rb != NULL) {
1694 size = (le32toh(rb->msgflags) >> 14) & ~3;
1695 #ifdef I2ODEBUG
1696 if (size > IOP_MAX_MSG_SIZE)
1697 panic("iop_handle_reply: reply too large");
1698 #endif
1699 memcpy(im->im_rb, rb, size);
1700 }
1701
1702 /* Notify the initiator. */
1703 if ((im->im_flags & IM_WAIT) != 0)
1704 wakeup(im);
1705 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1706 (*ii->ii_intr)(ii->ii_dv, im, rb);
1707 } else {
1708 /*
1709 * This initiator discards message wrappers.
1710 *
1711 * Simply pass the reply frame to the initiator.
1712 */
1713 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1714 }
1715
1716 return (status);
1717 }
1718
1719 /*
1720 * Handle an interrupt from the IOP.
1721 */
1722 int
1723 iop_intr(void *arg)
1724 {
1725 struct iop_softc *sc;
1726 u_int32_t rmfa;
1727
1728 sc = arg;
1729
1730 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1731 return (0);
1732
1733 for (;;) {
1734 /* Double read to account for IOP bug. */
1735 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1736 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1737 if (rmfa == IOP_MFA_EMPTY)
1738 break;
1739 }
1740 iop_handle_reply(sc, rmfa);
1741 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1742 }
1743
1744 return (1);
1745 }
1746
1747 /*
1748 * Handle an event signalled by the executive.
1749 */
1750 static void
1751 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1752 {
1753 struct i2o_util_event_register_reply *rb;
1754 struct iop_softc *sc;
1755 u_int event;
1756
1757 sc = (struct iop_softc *)dv;
1758 rb = reply;
1759
1760 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1761 return;
1762
1763 event = le32toh(rb->event);
1764 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1765 }
1766
1767 /*
1768 * Allocate a message wrapper.
1769 */
1770 struct iop_msg *
1771 iop_msg_alloc(struct iop_softc *sc, int flags)
1772 {
1773 struct iop_msg *im;
1774 static u_int tctxgen;
1775 int s, i;
1776
1777 #ifdef I2ODEBUG
1778 if ((flags & IM_SYSMASK) != 0)
1779 panic("iop_msg_alloc: system flags specified");
1780 #endif
1781
1782 s = splbio();
1783 im = SLIST_FIRST(&sc->sc_im_freelist);
1784 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1785 if (im == NULL)
1786 panic("iop_msg_alloc: no free wrappers");
1787 #endif
1788 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1789 splx(s);
1790
1791 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1792 tctxgen += (1 << IOP_TCTX_SHIFT);
1793 im->im_flags = flags | IM_ALLOCED;
1794 im->im_rb = NULL;
1795 i = 0;
1796 do {
1797 im->im_xfer[i++].ix_size = 0;
1798 } while (i < IOP_MAX_MSG_XFERS);
1799
1800 return (im);
1801 }
1802
1803 /*
1804 * Free a message wrapper.
1805 */
1806 void
1807 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1808 {
1809 int s;
1810
1811 #ifdef I2ODEBUG
1812 if ((im->im_flags & IM_ALLOCED) == 0)
1813 panic("iop_msg_free: wrapper not allocated");
1814 #endif
1815
1816 im->im_flags = 0;
1817 s = splbio();
1818 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1819 splx(s);
1820 }
1821
1822 /*
1823 * Map a data transfer. Write a scatter-gather list into the message frame.
1824 */
1825 int
1826 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1827 void *xferaddr, int xfersize, int out, struct proc *up)
1828 {
1829 bus_dmamap_t dm;
1830 bus_dma_segment_t *ds;
1831 struct iop_xfer *ix;
1832 u_int rv, i, nsegs, flg, off, xn;
1833 u_int32_t *p;
1834
1835 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1836 if (ix->ix_size == 0)
1837 break;
1838
1839 #ifdef I2ODEBUG
1840 if (xfersize == 0)
1841 panic("iop_msg_map: null transfer");
1842 if (xfersize > IOP_MAX_XFER)
1843 panic("iop_msg_map: transfer too large");
1844 if (xn == IOP_MAX_MSG_XFERS)
1845 panic("iop_msg_map: too many xfers");
1846 #endif
1847
1848 /*
1849 * Only the first DMA map is static.
1850 */
1851 if (xn != 0) {
1852 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1853 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1854 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1855 if (rv != 0)
1856 return (rv);
1857 }
1858
1859 dm = ix->ix_map;
1860 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1861 (up == NULL ? BUS_DMA_NOWAIT : 0));
1862 if (rv != 0)
1863 goto bad;
1864
1865 /*
1866 * How many SIMPLE SG elements can we fit in this message?
1867 */
1868 off = mb[0] >> 16;
1869 p = mb + off;
1870 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1871
1872 if (dm->dm_nsegs > nsegs) {
1873 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1874 rv = EFBIG;
1875 DPRINTF(("iop_msg_map: too many segs\n"));
1876 goto bad;
1877 }
1878
1879 nsegs = dm->dm_nsegs;
1880 xfersize = 0;
1881
1882 /*
1883 * Write out the SG list.
1884 */
1885 if (out)
1886 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1887 else
1888 flg = I2O_SGL_SIMPLE;
1889
1890 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1891 p[0] = (u_int32_t)ds->ds_len | flg;
1892 p[1] = (u_int32_t)ds->ds_addr;
1893 xfersize += ds->ds_len;
1894 }
1895
1896 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1897 p[1] = (u_int32_t)ds->ds_addr;
1898 xfersize += ds->ds_len;
1899
1900 /* Fix up the transfer record, and sync the map. */
1901 ix->ix_flags = (out ? IX_OUT : IX_IN);
1902 ix->ix_size = xfersize;
1903 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1904 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1905
1906 /*
1907 * If this is the first xfer we've mapped for this message, adjust
1908 * the SGL offset field in the message header.
1909 */
1910 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1911 mb[0] += (mb[0] >> 12) & 0xf0;
1912 im->im_flags |= IM_SGLOFFADJ;
1913 }
1914 mb[0] += (nsegs << 17);
1915 return (0);
1916
1917 bad:
1918 if (xn != 0)
1919 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1920 return (rv);
1921 }
1922
1923 /*
1924 * Map a block I/O data transfer (different in that there's only one per
1925 * message maximum, and PAGE addressing may be used). Write a scatter
1926 * gather list into the message frame.
1927 */
1928 int
1929 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1930 void *xferaddr, int xfersize, int out)
1931 {
1932 bus_dma_segment_t *ds;
1933 bus_dmamap_t dm;
1934 struct iop_xfer *ix;
1935 u_int rv, i, nsegs, off, slen, tlen, flg;
1936 paddr_t saddr, eaddr;
1937 u_int32_t *p;
1938
1939 #ifdef I2ODEBUG
1940 if (xfersize == 0)
1941 panic("iop_msg_map_bio: null transfer");
1942 if (xfersize > IOP_MAX_XFER)
1943 panic("iop_msg_map_bio: transfer too large");
1944 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1945 panic("iop_msg_map_bio: SGLOFFADJ");
1946 #endif
1947
1948 ix = im->im_xfer;
1949 dm = ix->ix_map;
1950 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1951 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1952 if (rv != 0)
1953 return (rv);
1954
1955 off = mb[0] >> 16;
1956 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1957
1958 /*
1959 * If the transfer is highly fragmented and won't fit using SIMPLE
1960 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1961 * potentially more efficient, both for us and the IOP.
1962 */
1963 if (dm->dm_nsegs > nsegs) {
1964 nsegs = 1;
1965 p = mb + off + 1;
1966
1967 /* XXX This should be done with a bus_space flag. */
1968 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1969 slen = ds->ds_len;
1970 saddr = ds->ds_addr;
1971
1972 while (slen > 0) {
1973 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1974 tlen = min(eaddr - saddr, slen);
1975 slen -= tlen;
1976 *p++ = le32toh(saddr);
1977 saddr = eaddr;
1978 nsegs++;
1979 }
1980 }
1981
1982 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
1983 I2O_SGL_END;
1984 if (out)
1985 mb[off] |= I2O_SGL_DATA_OUT;
1986 } else {
1987 p = mb + off;
1988 nsegs = dm->dm_nsegs;
1989
1990 if (out)
1991 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1992 else
1993 flg = I2O_SGL_SIMPLE;
1994
1995 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1996 p[0] = (u_int32_t)ds->ds_len | flg;
1997 p[1] = (u_int32_t)ds->ds_addr;
1998 }
1999
2000 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2001 I2O_SGL_END;
2002 p[1] = (u_int32_t)ds->ds_addr;
2003 nsegs <<= 1;
2004 }
2005
2006 /* Fix up the transfer record, and sync the map. */
2007 ix->ix_flags = (out ? IX_OUT : IX_IN);
2008 ix->ix_size = xfersize;
2009 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2010 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2011
2012 /*
2013 * Adjust the SGL offset and total message size fields. We don't
2014 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2015 */
2016 mb[0] += ((off << 4) + (nsegs << 16));
2017 return (0);
2018 }
2019
2020 /*
2021 * Unmap all data transfers associated with a message wrapper.
2022 */
2023 void
2024 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2025 {
2026 struct iop_xfer *ix;
2027 int i;
2028
2029 #ifdef I2ODEBUG
2030 if (im->im_xfer[0].ix_size == 0)
2031 panic("iop_msg_unmap: no transfers mapped");
2032 #endif
2033
2034 for (ix = im->im_xfer, i = 0;;) {
2035 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2036 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2037 BUS_DMASYNC_POSTREAD);
2038 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2039
2040 /* Only the first DMA map is static. */
2041 if (i != 0)
2042 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2043 if ((++ix)->ix_size == 0)
2044 break;
2045 if (++i >= IOP_MAX_MSG_XFERS)
2046 break;
2047 }
2048 }
2049
2050 /*
2051 * Post a message frame to the IOP's inbound queue.
2052 */
2053 int
2054 iop_post(struct iop_softc *sc, u_int32_t *mb)
2055 {
2056 u_int32_t mfa;
2057 int s;
2058
2059 #ifdef I2ODEBUG
2060 if ((mb[0] >> 16) > IOP_MAX_MSG_SIZE / 4)
2061 panic("iop_post: frame too large");
2062 #endif
2063
2064 s = splbio();
2065
2066 /* Allocate a slot with the IOP. */
2067 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2068 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2069 splx(s);
2070 printf("%s: mfa not forthcoming\n",
2071 sc->sc_dv.dv_xname);
2072 return (EAGAIN);
2073 }
2074
2075 /* Perform reply buffer DMA synchronisation. */
2076 if (sc->sc_curib++ == 0)
2077 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2078 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2079
2080 /* Copy out the message frame. */
2081 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2082 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2083 BUS_SPACE_BARRIER_WRITE);
2084
2085 /* Post the MFA back to the IOP. */
2086 iop_outl(sc, IOP_REG_IFIFO, mfa);
2087
2088 splx(s);
2089 return (0);
2090 }
2091
2092 /*
2093 * Post a message to the IOP and deal with completion.
2094 */
2095 int
2096 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2097 {
2098 u_int32_t *mb;
2099 int rv, s;
2100
2101 mb = xmb;
2102
2103 /* Terminate the scatter/gather list chain. */
2104 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2105 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2106
2107 if ((rv = iop_post(sc, mb)) != 0)
2108 return (rv);
2109
2110 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2111 if ((im->im_flags & IM_POLL) != 0)
2112 iop_msg_poll(sc, im, timo);
2113 else
2114 iop_msg_wait(sc, im, timo);
2115
2116 s = splbio();
2117 if ((im->im_flags & IM_REPLIED) != 0) {
2118 if ((im->im_flags & IM_NOSTATUS) != 0)
2119 rv = 0;
2120 else if ((im->im_flags & IM_FAIL) != 0)
2121 rv = ENXIO;
2122 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2123 rv = EIO;
2124 else
2125 rv = 0;
2126 } else
2127 rv = EBUSY;
2128 splx(s);
2129 } else
2130 rv = 0;
2131
2132 return (rv);
2133 }
2134
2135 /*
2136 * Spin until the specified message is replied to.
2137 */
2138 static void
2139 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2140 {
2141 u_int32_t rmfa;
2142 int s, status;
2143
2144 s = splbio();
2145
2146 /* Wait for completion. */
2147 for (timo *= 10; timo != 0; timo--) {
2148 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2149 /* Double read to account for IOP bug. */
2150 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2151 if (rmfa == IOP_MFA_EMPTY)
2152 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2153 if (rmfa != IOP_MFA_EMPTY) {
2154 status = iop_handle_reply(sc, rmfa);
2155
2156 /*
2157 * Return the reply frame to the IOP's
2158 * outbound FIFO.
2159 */
2160 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2161 }
2162 }
2163 if ((im->im_flags & IM_REPLIED) != 0)
2164 break;
2165 DELAY(100);
2166 }
2167
2168 if (timo == 0) {
2169 #ifdef I2ODEBUG
2170 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2171 if (iop_status_get(sc, 1) != 0)
2172 printf("iop_msg_poll: unable to retrieve status\n");
2173 else
2174 printf("iop_msg_poll: IOP state = %d\n",
2175 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2176 #endif
2177 }
2178
2179 splx(s);
2180 }
2181
2182 /*
2183 * Sleep until the specified message is replied to.
2184 */
2185 static void
2186 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2187 {
2188 int s, rv;
2189
2190 s = splbio();
2191 if ((im->im_flags & IM_REPLIED) != 0) {
2192 splx(s);
2193 return;
2194 }
2195 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
2196 splx(s);
2197
2198 #ifdef I2ODEBUG
2199 if (rv != 0) {
2200 printf("iop_msg_wait: tsleep() == %d\n", rv);
2201 if (iop_status_get(sc, 0) != 0)
2202 printf("iop_msg_wait: unable to retrieve status\n");
2203 else
2204 printf("iop_msg_wait: IOP state = %d\n",
2205 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2206 }
2207 #endif
2208 }
2209
2210 /*
2211 * Release an unused message frame back to the IOP's inbound fifo.
2212 */
2213 static void
2214 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2215 {
2216
2217 /* Use the frame to issue a no-op. */
2218 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2219 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2220 iop_outl(sc, mfa + 8, 0);
2221 iop_outl(sc, mfa + 12, 0);
2222
2223 iop_outl(sc, IOP_REG_IFIFO, mfa);
2224 }
2225
2226 #ifdef I2ODEBUG
2227 /*
2228 * Dump a reply frame header.
2229 */
2230 static void
2231 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2232 {
2233 u_int function, detail;
2234 #ifdef I2OVERBOSE
2235 const char *statusstr;
2236 #endif
2237
2238 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2239 detail = le16toh(rb->detail);
2240
2241 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2242
2243 #ifdef I2OVERBOSE
2244 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2245 statusstr = iop_status[rb->reqstatus];
2246 else
2247 statusstr = "undefined error code";
2248
2249 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2250 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2251 #else
2252 printf("%s: function=0x%02x status=0x%02x\n",
2253 sc->sc_dv.dv_xname, function, rb->reqstatus);
2254 #endif
2255 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2256 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2257 le32toh(rb->msgtctx));
2258 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2259 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2260 (le32toh(rb->msgflags) >> 8) & 0xff);
2261 }
2262 #endif
2263
2264 /*
2265 * Dump a transport failure reply.
2266 */
2267 static void
2268 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2269 {
2270
2271 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2272
2273 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2274 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2275 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2276 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2277 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2278 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2279 }
2280
2281 /*
2282 * Translate an I2O ASCII field into a C string.
2283 */
2284 void
2285 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2286 {
2287 int hc, lc, i, nit;
2288
2289 dlen--;
2290 lc = 0;
2291 hc = 0;
2292 i = 0;
2293
2294 /*
2295 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2296 * spec has nothing to say about it. Since AMI fields are usually
2297 * filled with junk after the terminator, ...
2298 */
2299 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2300
2301 while (slen-- != 0 && dlen-- != 0) {
2302 if (nit && *src == '\0')
2303 break;
2304 else if (*src <= 0x20 || *src >= 0x7f) {
2305 if (hc)
2306 dst[i++] = ' ';
2307 } else {
2308 hc = 1;
2309 dst[i++] = *src;
2310 lc = i;
2311 }
2312 src++;
2313 }
2314
2315 dst[lc] = '\0';
2316 }
2317
2318 /*
2319 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2320 */
2321 int
2322 iop_print_ident(struct iop_softc *sc, int tid)
2323 {
2324 struct {
2325 struct i2o_param_op_results pr;
2326 struct i2o_param_read_results prr;
2327 struct i2o_param_device_identity di;
2328 } __attribute__ ((__packed__)) p;
2329 char buf[32];
2330 int rv;
2331
2332 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2333 sizeof(p), NULL);
2334 if (rv != 0)
2335 return (rv);
2336
2337 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2338 sizeof(buf));
2339 printf(" <%s, ", buf);
2340 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2341 sizeof(buf));
2342 printf("%s, ", buf);
2343 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2344 printf("%s>", buf);
2345
2346 return (0);
2347 }
2348
2349 /*
2350 * Claim or unclaim the specified TID.
2351 */
2352 int
2353 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2354 int flags)
2355 {
2356 struct iop_msg *im;
2357 struct i2o_util_claim mf;
2358 int rv, func;
2359
2360 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2361 im = iop_msg_alloc(sc, IM_WAIT);
2362
2363 /* We can use the same structure, as they're identical. */
2364 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2365 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2366 mf.msgictx = ii->ii_ictx;
2367 mf.msgtctx = im->im_tctx;
2368 mf.flags = flags;
2369
2370 rv = iop_msg_post(sc, im, &mf, 5000);
2371 iop_msg_free(sc, im);
2372 return (rv);
2373 }
2374
2375 /*
2376 * Perform an abort.
2377 */
2378 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2379 int tctxabort, int flags)
2380 {
2381 struct iop_msg *im;
2382 struct i2o_util_abort mf;
2383 int rv;
2384
2385 im = iop_msg_alloc(sc, IM_WAIT);
2386
2387 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2388 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2389 mf.msgictx = ii->ii_ictx;
2390 mf.msgtctx = im->im_tctx;
2391 mf.flags = (func << 24) | flags;
2392 mf.tctxabort = tctxabort;
2393
2394 rv = iop_msg_post(sc, im, &mf, 5000);
2395 iop_msg_free(sc, im);
2396 return (rv);
2397 }
2398
2399 /*
2400 * Enable or disable reception of events for the specified device.
2401 */
2402 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2403 {
2404 struct i2o_util_event_register mf;
2405
2406 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2407 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2408 mf.msgictx = ii->ii_ictx;
2409 mf.msgtctx = 0;
2410 mf.eventmask = mask;
2411
2412 /* This message is replied to only when events are signalled. */
2413 return (iop_post(sc, (u_int32_t *)&mf));
2414 }
2415
2416 int
2417 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2418 {
2419 struct iop_softc *sc;
2420
2421 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2422 return (ENXIO);
2423 if ((sc->sc_flags & IOP_ONLINE) == 0)
2424 return (ENXIO);
2425 if ((sc->sc_flags & IOP_OPEN) != 0)
2426 return (EBUSY);
2427 sc->sc_flags |= IOP_OPEN;
2428
2429 return (0);
2430 }
2431
2432 int
2433 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2434 {
2435 struct iop_softc *sc;
2436
2437 sc = device_lookup(&iop_cd, minor(dev));
2438 sc->sc_flags &= ~IOP_OPEN;
2439
2440 return (0);
2441 }
2442
2443 int
2444 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2445 {
2446 struct iop_softc *sc;
2447 struct iovec *iov;
2448 int rv, i;
2449
2450 if (securelevel >= 2)
2451 return (EPERM);
2452
2453 sc = device_lookup(&iop_cd, minor(dev));
2454
2455 switch (cmd) {
2456 case IOPIOCPT:
2457 return (iop_passthrough(sc, (struct ioppt *)data, p));
2458
2459 case IOPIOCGSTATUS:
2460 iov = (struct iovec *)data;
2461 i = sizeof(struct i2o_status);
2462 if (i > iov->iov_len)
2463 i = iov->iov_len;
2464 else
2465 iov->iov_len = i;
2466 if ((rv = iop_status_get(sc, 0)) == 0)
2467 rv = copyout(&sc->sc_status, iov->iov_base, i);
2468 return (rv);
2469
2470 case IOPIOCGLCT:
2471 case IOPIOCGTIDMAP:
2472 case IOPIOCRECONFIG:
2473 break;
2474
2475 default:
2476 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2477 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2478 #endif
2479 return (ENOTTY);
2480 }
2481
2482 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2483 return (rv);
2484
2485 switch (cmd) {
2486 case IOPIOCGLCT:
2487 iov = (struct iovec *)data;
2488 i = le16toh(sc->sc_lct->tablesize) << 2;
2489 if (i > iov->iov_len)
2490 i = iov->iov_len;
2491 else
2492 iov->iov_len = i;
2493 rv = copyout(sc->sc_lct, iov->iov_base, i);
2494 break;
2495
2496 case IOPIOCRECONFIG:
2497 rv = iop_reconfigure(sc, 0);
2498 break;
2499
2500 case IOPIOCGTIDMAP:
2501 iov = (struct iovec *)data;
2502 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2503 if (i > iov->iov_len)
2504 i = iov->iov_len;
2505 else
2506 iov->iov_len = i;
2507 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2508 break;
2509 }
2510
2511 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2512 return (rv);
2513 }
2514
2515 static int
2516 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2517 {
2518 struct iop_msg *im;
2519 struct i2o_msg *mf;
2520 struct ioppt_buf *ptb;
2521 int rv, i, mapped;
2522
2523 mf = NULL;
2524 im = NULL;
2525 mapped = 1;
2526
2527 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2528 pt->pt_msglen > (le16toh(sc->sc_status.inboundmframesize) << 2) ||
2529 pt->pt_msglen < sizeof(struct i2o_msg) ||
2530 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2531 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2532 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2533 return (EINVAL);
2534
2535 for (i = 0; i < pt->pt_nbufs; i++)
2536 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2537 rv = ENOMEM;
2538 goto bad;
2539 }
2540
2541 mf = malloc(IOP_MAX_MSG_SIZE, M_DEVBUF, M_WAITOK);
2542 if (mf == NULL)
2543 return (ENOMEM);
2544
2545 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2546 goto bad;
2547
2548 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2549 im->im_rb = (struct i2o_reply *)mf;
2550 mf->msgictx = IOP_ICTX;
2551 mf->msgtctx = im->im_tctx;
2552
2553 for (i = 0; i < pt->pt_nbufs; i++) {
2554 ptb = &pt->pt_bufs[i];
2555 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2556 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2557 if (rv != 0)
2558 goto bad;
2559 mapped = 1;
2560 }
2561
2562 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2563 goto bad;
2564
2565 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2566 if (i > IOP_MAX_MSG_SIZE)
2567 i = IOP_MAX_MSG_SIZE;
2568 if (i > pt->pt_replylen)
2569 i = pt->pt_replylen;
2570 rv = copyout(im->im_rb, pt->pt_reply, i);
2571
2572 bad:
2573 if (mapped != 0)
2574 iop_msg_unmap(sc, im);
2575 if (im != NULL)
2576 iop_msg_free(sc, im);
2577 if (mf != NULL)
2578 free(mf, M_DEVBUF);
2579 return (rv);
2580 }
2581