iop.c revision 1.19 1 /* $NetBSD: iop.c,v 1.19 2001/09/27 18:43:37 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/conf.h>
56 #include <sys/kthread.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <machine/bus.h>
61
62 #include <dev/i2o/i2o.h>
63 #include <dev/i2o/iopio.h>
64 #include <dev/i2o/iopreg.h>
65 #include <dev/i2o/iopvar.h>
66
67 #define POLL(ms, cond) \
68 do { \
69 int i; \
70 for (i = (ms) * 10; i; i--) { \
71 if (cond) \
72 break; \
73 DELAY(100); \
74 } \
75 } while (/* CONSTCOND */0);
76
77 #ifdef I2ODEBUG
78 #define DPRINTF(x) printf x
79 #else
80 #define DPRINTF(x)
81 #endif
82
83 #ifdef I2OVERBOSE
84 #define IFVERBOSE(x) x
85 #define COMMENT(x) NULL
86 #else
87 #define IFVERBOSE(x)
88 #define COMMENT(x)
89 #endif
90
91 #define IOP_ICTXHASH_NBUCKETS 16
92 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
93
94 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
95
96 #define IOP_TCTX_SHIFT 12
97 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
98
99 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
100 static u_long iop_ictxhash;
101 static void *iop_sdh;
102 static struct i2o_systab *iop_systab;
103 static int iop_systab_size;
104
105 extern struct cfdriver iop_cd;
106
107 #define IC_CONFIGURE 0x01
108 #define IC_PRIORITY 0x02
109
110 struct iop_class {
111 u_short ic_class;
112 u_short ic_flags;
113 #ifdef I2OVERBOSE
114 const char *ic_caption;
115 #endif
116 } static const iop_class[] = {
117 {
118 I2O_CLASS_EXECUTIVE,
119 0,
120 COMMENT("executive")
121 },
122 {
123 I2O_CLASS_DDM,
124 0,
125 COMMENT("device driver module")
126 },
127 {
128 I2O_CLASS_RANDOM_BLOCK_STORAGE,
129 IC_CONFIGURE | IC_PRIORITY,
130 IFVERBOSE("random block storage")
131 },
132 {
133 I2O_CLASS_SEQUENTIAL_STORAGE,
134 IC_CONFIGURE | IC_PRIORITY,
135 IFVERBOSE("sequential storage")
136 },
137 {
138 I2O_CLASS_LAN,
139 IC_CONFIGURE | IC_PRIORITY,
140 IFVERBOSE("LAN port")
141 },
142 {
143 I2O_CLASS_WAN,
144 IC_CONFIGURE | IC_PRIORITY,
145 IFVERBOSE("WAN port")
146 },
147 {
148 I2O_CLASS_FIBRE_CHANNEL_PORT,
149 IC_CONFIGURE,
150 IFVERBOSE("fibrechannel port")
151 },
152 {
153 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
154 0,
155 COMMENT("fibrechannel peripheral")
156 },
157 {
158 I2O_CLASS_SCSI_PERIPHERAL,
159 0,
160 COMMENT("SCSI peripheral")
161 },
162 {
163 I2O_CLASS_ATE_PORT,
164 IC_CONFIGURE,
165 IFVERBOSE("ATE port")
166 },
167 {
168 I2O_CLASS_ATE_PERIPHERAL,
169 0,
170 COMMENT("ATE peripheral")
171 },
172 {
173 I2O_CLASS_FLOPPY_CONTROLLER,
174 IC_CONFIGURE,
175 IFVERBOSE("floppy controller")
176 },
177 {
178 I2O_CLASS_FLOPPY_DEVICE,
179 0,
180 COMMENT("floppy device")
181 },
182 {
183 I2O_CLASS_BUS_ADAPTER_PORT,
184 IC_CONFIGURE,
185 IFVERBOSE("bus adapter port" )
186 },
187 };
188
189 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
190 static const char * const iop_status[] = {
191 "success",
192 "abort (dirty)",
193 "abort (no data transfer)",
194 "abort (partial transfer)",
195 "error (dirty)",
196 "error (no data transfer)",
197 "error (partial transfer)",
198 "undefined error code",
199 "process abort (dirty)",
200 "process abort (no data transfer)",
201 "process abort (partial transfer)",
202 "transaction error",
203 };
204 #endif
205
206 static inline u_int32_t iop_inl(struct iop_softc *, int);
207 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
208
209 static void iop_config_interrupts(struct device *);
210 static void iop_configure_devices(struct iop_softc *, int, int);
211 static void iop_devinfo(int, char *);
212 static int iop_print(void *, const char *);
213 static void iop_shutdown(void *);
214 static int iop_submatch(struct device *, struct cfdata *, void *);
215 static int iop_vendor_print(void *, const char *);
216
217 static void iop_adjqparam(struct iop_softc *, int);
218 static void iop_create_reconf_thread(void *);
219 static int iop_handle_reply(struct iop_softc *, u_int32_t);
220 static int iop_hrt_get(struct iop_softc *);
221 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
222 static void iop_intr_event(struct device *, struct iop_msg *, void *);
223 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
224 u_int32_t);
225 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
226 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
227 static int iop_ofifo_init(struct iop_softc *);
228 static int iop_passthrough(struct iop_softc *, struct ioppt *,
229 struct proc *);
230 static void iop_reconf_thread(void *);
231 static void iop_release_mfa(struct iop_softc *, u_int32_t);
232 static int iop_reset(struct iop_softc *);
233 static int iop_systab_set(struct iop_softc *);
234 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
235
236 #ifdef I2ODEBUG
237 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
238 #endif
239
240 cdev_decl(iop);
241
242 static inline u_int32_t
243 iop_inl(struct iop_softc *sc, int off)
244 {
245
246 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
247 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
248 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
249 }
250
251 static inline void
252 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
253 {
254
255 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
256 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
257 BUS_SPACE_BARRIER_WRITE);
258 }
259
260 /*
261 * Initialise the IOP and our interface.
262 */
263 void
264 iop_init(struct iop_softc *sc, const char *intrstr)
265 {
266 struct iop_msg *im;
267 int rv, i, j, state, nsegs;
268 u_int32_t mask;
269 char ident[64];
270
271 state = 0;
272
273 printf("I2O adapter");
274
275 if (iop_ictxhashtbl == NULL)
276 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
277 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
278
279 /* Disable interrupts at the IOP. */
280 mask = iop_inl(sc, IOP_REG_INTR_MASK);
281 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
282
283 /* Allocate a scratch DMA map for small miscellaneous shared data. */
284 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
285 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
286 printf("%s: cannot create scratch dmamap\n",
287 sc->sc_dv.dv_xname);
288 return;
289 }
290 state++;
291
292 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
293 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
294 printf("%s: cannot alloc scratch dmamem\n",
295 sc->sc_dv.dv_xname);
296 goto bail_out;
297 }
298 state++;
299
300 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
301 &sc->sc_scr, 0)) {
302 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
303 goto bail_out;
304 }
305 state++;
306
307 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
308 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
309 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
310 goto bail_out;
311 }
312 state++;
313
314 /* Reset the adapter and request status. */
315 if ((rv = iop_reset(sc)) != 0) {
316 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
317 goto bail_out;
318 }
319
320 if ((rv = iop_status_get(sc, 1)) != 0) {
321 printf("%s: not responding (get status)\n",
322 sc->sc_dv.dv_xname);
323 goto bail_out;
324 }
325
326 sc->sc_flags |= IOP_HAVESTATUS;
327 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
328 ident, sizeof(ident));
329 printf(" <%s>\n", ident);
330
331 #ifdef I2ODEBUG
332 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
333 le16toh(sc->sc_status.orgid),
334 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
335 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
336 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
337 le32toh(sc->sc_status.desiredprivmemsize),
338 le32toh(sc->sc_status.currentprivmemsize),
339 le32toh(sc->sc_status.currentprivmembase));
340 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
341 le32toh(sc->sc_status.desiredpriviosize),
342 le32toh(sc->sc_status.currentpriviosize),
343 le32toh(sc->sc_status.currentpriviobase));
344 #endif
345
346 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
347 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
348 sc->sc_maxob = IOP_MAX_OUTBOUND;
349 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
350 if (sc->sc_maxib > IOP_MAX_INBOUND)
351 sc->sc_maxib = IOP_MAX_INBOUND;
352 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
353 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
354 sc->sc_framesize = IOP_MAX_MSG_SIZE;
355
356 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
357 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
358 printf("%s: frame size too small (%d)\n",
359 sc->sc_dv.dv_xname, sc->sc_framesize);
360 return;
361 }
362 #endif
363
364 /* Allocate message wrappers. */
365 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT);
366 memset(im, 0, sizeof(*im) * sc->sc_maxib);
367 sc->sc_ims = im;
368 SLIST_INIT(&sc->sc_im_freelist);
369
370 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
371 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
372 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
373 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
374 &im->im_xfer[0].ix_map);
375 if (rv != 0) {
376 printf("%s: couldn't create dmamap (%d)",
377 sc->sc_dv.dv_xname, rv);
378 goto bail_out;
379 }
380
381 im->im_tctx = i;
382 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
383 }
384
385 /* Initialise the IOP's outbound FIFO. */
386 if (iop_ofifo_init(sc) != 0) {
387 printf("%s: unable to init oubound FIFO\n",
388 sc->sc_dv.dv_xname);
389 goto bail_out;
390 }
391
392 /*
393 * Defer further configuration until (a) interrupts are working and
394 * (b) we have enough information to build the system table.
395 */
396 config_interrupts((struct device *)sc, iop_config_interrupts);
397
398 /* Configure shutdown hook before we start any device activity. */
399 if (iop_sdh == NULL)
400 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
401
402 /* Ensure interrupts are enabled at the IOP. */
403 mask = iop_inl(sc, IOP_REG_INTR_MASK);
404 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
405
406 if (intrstr != NULL)
407 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
408 intrstr);
409
410 #ifdef I2ODEBUG
411 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
412 sc->sc_dv.dv_xname, sc->sc_maxib,
413 le32toh(sc->sc_status.maxinboundmframes),
414 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
415 #endif
416
417 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
418 return;
419
420 bail_out:
421 if (state > 3) {
422 for (j = 0; j < i; j++)
423 bus_dmamap_destroy(sc->sc_dmat,
424 sc->sc_ims[j].im_xfer[0].ix_map);
425 free(sc->sc_ims, M_DEVBUF);
426 }
427 if (state > 2)
428 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
429 if (state > 1)
430 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
431 if (state > 0)
432 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
433 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
434
435 }
436
437 /*
438 * Perform autoconfiguration tasks.
439 */
440 static void
441 iop_config_interrupts(struct device *self)
442 {
443 struct iop_attach_args ia;
444 struct iop_softc *sc, *iop;
445 struct i2o_systab_entry *ste;
446 int rv, i, niop;
447
448 sc = (struct iop_softc *)self;
449 LIST_INIT(&sc->sc_iilist);
450
451 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
452
453 if (iop_hrt_get(sc) != 0) {
454 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
455 return;
456 }
457
458 /*
459 * Build the system table.
460 */
461 if (iop_systab == NULL) {
462 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
463 if ((iop = device_lookup(&iop_cd, i)) == NULL)
464 continue;
465 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
466 continue;
467 if (iop_status_get(iop, 1) != 0) {
468 printf("%s: unable to retrieve status\n",
469 sc->sc_dv.dv_xname);
470 iop->sc_flags &= ~IOP_HAVESTATUS;
471 continue;
472 }
473 niop++;
474 }
475 if (niop == 0)
476 return;
477
478 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
479 sizeof(struct i2o_systab);
480 iop_systab_size = i;
481 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
482
483 memset(iop_systab, 0, i);
484 iop_systab->numentries = niop;
485 iop_systab->version = I2O_VERSION_11;
486
487 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
488 if ((iop = device_lookup(&iop_cd, i)) == NULL)
489 continue;
490 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
491 continue;
492
493 ste->orgid = iop->sc_status.orgid;
494 ste->iopid = iop->sc_dv.dv_unit + 2;
495 ste->segnumber =
496 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
497 ste->iopcaps = iop->sc_status.iopcaps;
498 ste->inboundmsgframesize =
499 iop->sc_status.inboundmframesize;
500 ste->inboundmsgportaddresslow =
501 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
502 ste++;
503 }
504 }
505
506 /*
507 * Post the system table to the IOP and bring it to the OPERATIONAL
508 * state.
509 */
510 if (iop_systab_set(sc) != 0) {
511 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
512 return;
513 }
514 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
515 30000) != 0) {
516 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
517 return;
518 }
519
520 /*
521 * Set up an event handler for this IOP.
522 */
523 sc->sc_eventii.ii_dv = self;
524 sc->sc_eventii.ii_intr = iop_intr_event;
525 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
526 sc->sc_eventii.ii_tid = I2O_TID_IOP;
527 iop_initiator_register(sc, &sc->sc_eventii);
528
529 rv = iop_util_eventreg(sc, &sc->sc_eventii,
530 I2O_EVENT_EXEC_RESOURCE_LIMITS |
531 I2O_EVENT_EXEC_CONNECTION_FAIL |
532 I2O_EVENT_EXEC_ADAPTER_FAULT |
533 I2O_EVENT_EXEC_POWER_FAIL |
534 I2O_EVENT_EXEC_RESET_PENDING |
535 I2O_EVENT_EXEC_RESET_IMMINENT |
536 I2O_EVENT_EXEC_HARDWARE_FAIL |
537 I2O_EVENT_EXEC_XCT_CHANGE |
538 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
539 I2O_EVENT_GEN_DEVICE_RESET |
540 I2O_EVENT_GEN_STATE_CHANGE |
541 I2O_EVENT_GEN_GENERAL_WARNING);
542 if (rv != 0) {
543 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
544 return;
545 }
546
547 /*
548 * Attempt to match and attach a product-specific extension.
549 */
550 ia.ia_class = I2O_CLASS_ANY;
551 ia.ia_tid = I2O_TID_IOP;
552 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
553
554 /*
555 * Start device configuration.
556 */
557 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
558 if ((rv = iop_reconfigure(sc, 0)) == -1) {
559 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
560 return;
561 }
562 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
563
564 kthread_create(iop_create_reconf_thread, sc);
565 }
566
567 /*
568 * Create the reconfiguration thread. Called after the standard kernel
569 * threads have been created.
570 */
571 static void
572 iop_create_reconf_thread(void *cookie)
573 {
574 struct iop_softc *sc;
575 int rv;
576
577 sc = cookie;
578 sc->sc_flags |= IOP_ONLINE;
579
580 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
581 "%s", sc->sc_dv.dv_xname);
582 if (rv != 0) {
583 printf("%s: unable to create reconfiguration thread (%d)",
584 sc->sc_dv.dv_xname, rv);
585 return;
586 }
587 }
588
589 /*
590 * Reconfiguration thread; listens for LCT change notification, and
591 * initiates re-configuration if received.
592 */
593 static void
594 iop_reconf_thread(void *cookie)
595 {
596 struct iop_softc *sc;
597 struct i2o_lct lct;
598 u_int32_t chgind;
599 int rv;
600
601 sc = cookie;
602 chgind = sc->sc_chgind + 1;
603
604 for (;;) {
605 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
606 sc->sc_dv.dv_xname, chgind));
607
608 PHOLD(sc->sc_reconf_proc);
609 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
610 PRELE(sc->sc_reconf_proc);
611
612 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
613 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
614
615 if (rv == 0 &&
616 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
617 iop_reconfigure(sc, le32toh(lct.changeindicator));
618 chgind = sc->sc_chgind + 1;
619 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
620 }
621
622 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
623 }
624 }
625
626 /*
627 * Reconfigure: find new and removed devices.
628 */
629 int
630 iop_reconfigure(struct iop_softc *sc, u_int chgind)
631 {
632 struct iop_msg *im;
633 struct i2o_hba_bus_scan mf;
634 struct i2o_lct_entry *le;
635 struct iop_initiator *ii, *nextii;
636 int rv, tid, i;
637
638 /*
639 * If the reconfiguration request isn't the result of LCT change
640 * notification, then be more thorough: ask all bus ports to scan
641 * their busses. Wait up to 5 minutes for each bus port to complete
642 * the request.
643 */
644 if (chgind == 0) {
645 if ((rv = iop_lct_get(sc)) != 0) {
646 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
647 return (rv);
648 }
649
650 le = sc->sc_lct->entry;
651 for (i = 0; i < sc->sc_nlctent; i++, le++) {
652 if ((le16toh(le->classid) & 4095) !=
653 I2O_CLASS_BUS_ADAPTER_PORT)
654 continue;
655 tid = le16toh(le->localtid) & 4095;
656
657 im = iop_msg_alloc(sc, IM_WAIT);
658
659 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
660 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
661 mf.msgictx = IOP_ICTX;
662 mf.msgtctx = im->im_tctx;
663
664 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
665 tid));
666
667 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
668 iop_msg_free(sc, im);
669 #ifdef I2ODEBUG
670 if (rv != 0)
671 printf("%s: bus scan failed\n",
672 sc->sc_dv.dv_xname);
673 #endif
674 }
675 } else if (chgind <= sc->sc_chgind) {
676 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
677 return (0);
678 }
679
680 /* Re-read the LCT and determine if it has changed. */
681 if ((rv = iop_lct_get(sc)) != 0) {
682 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
683 return (rv);
684 }
685 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
686
687 chgind = le32toh(sc->sc_lct->changeindicator);
688 if (chgind == sc->sc_chgind) {
689 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
690 return (0);
691 }
692 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
693 sc->sc_chgind = chgind;
694
695 if (sc->sc_tidmap != NULL)
696 free(sc->sc_tidmap, M_DEVBUF);
697 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
698 M_DEVBUF, M_NOWAIT);
699 memset(sc->sc_tidmap, 0, sizeof(sc->sc_tidmap));
700
701 /* Allow 1 queued command per device while we're configuring. */
702 iop_adjqparam(sc, 1);
703
704 /*
705 * Match and attach child devices. We configure high-level devices
706 * first so that any claims will propagate throughout the LCT,
707 * hopefully masking off aliased devices as a result.
708 *
709 * Re-reading the LCT at this point is a little dangerous, but we'll
710 * trust the IOP (and the operator) to behave itself...
711 */
712 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
713 IC_CONFIGURE | IC_PRIORITY);
714 if ((rv = iop_lct_get(sc)) != 0)
715 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
716 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
717 IC_CONFIGURE);
718
719 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
720 nextii = LIST_NEXT(ii, ii_list);
721
722 /* Detach devices that were configured, but are now gone. */
723 for (i = 0; i < sc->sc_nlctent; i++)
724 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
725 break;
726 if (i == sc->sc_nlctent ||
727 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
728 config_detach(ii->ii_dv, DETACH_FORCE);
729
730 /*
731 * Tell initiators that existed before the re-configuration
732 * to re-configure.
733 */
734 if (ii->ii_reconfig == NULL)
735 continue;
736 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
737 printf("%s: %s failed reconfigure (%d)\n",
738 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
739 }
740
741 /* Re-adjust queue parameters and return. */
742 if (sc->sc_nii != 0)
743 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
744 / sc->sc_nii);
745
746 return (0);
747 }
748
749 /*
750 * Configure I2O devices into the system.
751 */
752 static void
753 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
754 {
755 struct iop_attach_args ia;
756 struct iop_initiator *ii;
757 const struct i2o_lct_entry *le;
758 struct device *dv;
759 int i, j, nent;
760 u_int usertid;
761
762 nent = sc->sc_nlctent;
763 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
764 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
765
766 /* Ignore the device if it's in use. */
767 usertid = le32toh(le->usertid) & 4095;
768 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
769 continue;
770
771 ia.ia_class = le16toh(le->classid) & 4095;
772 ia.ia_tid = sc->sc_tidmap[i].it_tid;
773
774 /* Ignore uninteresting devices. */
775 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
776 if (iop_class[j].ic_class == ia.ia_class)
777 break;
778 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
779 (iop_class[j].ic_flags & mask) != maskval)
780 continue;
781
782 /*
783 * Try to configure the device only if it's not already
784 * configured.
785 */
786 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
787 if (ia.ia_tid == ii->ii_tid) {
788 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
789 strcpy(sc->sc_tidmap[i].it_dvname,
790 ii->ii_dv->dv_xname);
791 break;
792 }
793 }
794 if (ii != NULL)
795 continue;
796
797 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
798 if (dv != NULL) {
799 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
800 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
801 }
802 }
803 }
804
805 /*
806 * Adjust queue parameters for all child devices.
807 */
808 static void
809 iop_adjqparam(struct iop_softc *sc, int mpi)
810 {
811 struct iop_initiator *ii;
812
813 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
814 if (ii->ii_adjqparam != NULL)
815 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
816 }
817
818 static void
819 iop_devinfo(int class, char *devinfo)
820 {
821 #ifdef I2OVERBOSE
822 int i;
823
824 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
825 if (class == iop_class[i].ic_class)
826 break;
827
828 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
829 sprintf(devinfo, "device (class 0x%x)", class);
830 else
831 strcpy(devinfo, iop_class[i].ic_caption);
832 #else
833
834 sprintf(devinfo, "device (class 0x%x)", class);
835 #endif
836 }
837
838 static int
839 iop_print(void *aux, const char *pnp)
840 {
841 struct iop_attach_args *ia;
842 char devinfo[256];
843
844 ia = aux;
845
846 if (pnp != NULL) {
847 iop_devinfo(ia->ia_class, devinfo);
848 printf("%s at %s", devinfo, pnp);
849 }
850 printf(" tid %d", ia->ia_tid);
851 return (UNCONF);
852 }
853
854 static int
855 iop_vendor_print(void *aux, const char *pnp)
856 {
857
858 return (QUIET);
859 }
860
861 static int
862 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
863 {
864 struct iop_attach_args *ia;
865
866 ia = aux;
867
868 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
869 return (0);
870
871 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
872 }
873
874 /*
875 * Shut down all configured IOPs.
876 */
877 static void
878 iop_shutdown(void *junk)
879 {
880 struct iop_softc *sc;
881 int i;
882
883 printf("shutting down iop devices...");
884
885 for (i = 0; i < iop_cd.cd_ndevs; i++) {
886 if ((sc = device_lookup(&iop_cd, i)) == NULL)
887 continue;
888 if ((sc->sc_flags & IOP_ONLINE) == 0)
889 continue;
890 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
891 0, 5000);
892 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
893 0, 1000);
894 }
895
896 /* Wait. Some boards could still be flushing, stupidly enough. */
897 delay(5000*1000);
898 printf(" done\n");
899 }
900
901 /*
902 * Retrieve IOP status.
903 */
904 int
905 iop_status_get(struct iop_softc *sc, int nosleep)
906 {
907 struct i2o_exec_status_get mf;
908 struct i2o_status *st;
909 paddr_t pa;
910 int rv, i;
911
912 pa = sc->sc_scr_seg->ds_addr;
913 st = (struct i2o_status *)sc->sc_scr;
914
915 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
916 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
917 mf.reserved[0] = 0;
918 mf.reserved[1] = 0;
919 mf.reserved[2] = 0;
920 mf.reserved[3] = 0;
921 mf.addrlow = (u_int32_t)pa;
922 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
923 mf.length = sizeof(sc->sc_status);
924
925 memset(st, 0, sizeof(*st));
926 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
927 BUS_DMASYNC_PREREAD);
928
929 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
930 return (rv);
931
932 for (i = 25; i != 0; i--) {
933 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
934 sizeof(*st), BUS_DMASYNC_POSTREAD);
935 if (st->syncbyte == 0xff)
936 break;
937 if (nosleep)
938 DELAY(100*1000);
939 else
940 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
941 }
942
943 if (st->syncbyte != 0xff)
944 rv = EIO;
945 else {
946 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
947 rv = 0;
948 }
949
950 return (rv);
951 }
952
953 /*
954 * Initialize and populate the IOP's outbound FIFO.
955 */
956 static int
957 iop_ofifo_init(struct iop_softc *sc)
958 {
959 bus_addr_t addr;
960 bus_dma_segment_t seg;
961 struct i2o_exec_outbound_init *mf;
962 int i, rseg, rv;
963 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
964
965 sw = (u_int32_t *)sc->sc_scr;
966
967 mf = (struct i2o_exec_outbound_init *)mb;
968 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
969 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
970 mf->msgictx = IOP_ICTX;
971 mf->msgtctx = 0;
972 mf->pagesize = PAGE_SIZE;
973 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
974
975 /*
976 * The I2O spec says that there are two SGLs: one for the status
977 * word, and one for a list of discarded MFAs. It continues to say
978 * that if you don't want to get the list of MFAs, an IGNORE SGL is
979 * necessary; this isn't the case (and is in fact a bad thing).
980 */
981 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
982 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
983 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
984 (u_int32_t)sc->sc_scr_seg->ds_addr;
985 mb[0] += 2 << 16;
986
987 *sw = 0;
988 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
989 BUS_DMASYNC_PREREAD);
990
991 if ((rv = iop_post(sc, mb)) != 0)
992 return (rv);
993
994 POLL(5000,
995 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
996 BUS_DMASYNC_POSTREAD),
997 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
998
999 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1000 printf("%s: outbound FIFO init failed (%d)\n",
1001 sc->sc_dv.dv_xname, le32toh(*sw));
1002 return (EIO);
1003 }
1004
1005 /* Allocate DMA safe memory for the reply frames. */
1006 if (sc->sc_rep_phys == 0) {
1007 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1008
1009 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1010 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1011 if (rv != 0) {
1012 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1013 rv);
1014 return (rv);
1015 }
1016
1017 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1018 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1019 if (rv != 0) {
1020 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1021 return (rv);
1022 }
1023
1024 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1025 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1026 if (rv != 0) {
1027 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1028 rv);
1029 return (rv);
1030 }
1031
1032 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1033 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1034 if (rv != 0) {
1035 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1036 return (rv);
1037 }
1038
1039 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1040 }
1041
1042 /* Populate the outbound FIFO. */
1043 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1044 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1045 addr += sc->sc_framesize;
1046 }
1047
1048 return (0);
1049 }
1050
1051 /*
1052 * Read the specified number of bytes from the IOP's hardware resource table.
1053 */
1054 static int
1055 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1056 {
1057 struct iop_msg *im;
1058 int rv;
1059 struct i2o_exec_hrt_get *mf;
1060 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1061
1062 im = iop_msg_alloc(sc, IM_WAIT);
1063 mf = (struct i2o_exec_hrt_get *)mb;
1064 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1065 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1066 mf->msgictx = IOP_ICTX;
1067 mf->msgtctx = im->im_tctx;
1068
1069 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1070 rv = iop_msg_post(sc, im, mb, 30000);
1071 iop_msg_unmap(sc, im);
1072 iop_msg_free(sc, im);
1073 return (rv);
1074 }
1075
1076 /*
1077 * Read the IOP's hardware resource table.
1078 */
1079 static int
1080 iop_hrt_get(struct iop_softc *sc)
1081 {
1082 struct i2o_hrt hrthdr, *hrt;
1083 int size, rv;
1084
1085 PHOLD(curproc);
1086 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1087 PRELE(curproc);
1088 if (rv != 0)
1089 return (rv);
1090
1091 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1092 le16toh(hrthdr.numentries)));
1093
1094 size = sizeof(struct i2o_hrt) +
1095 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1096 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1097
1098 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1099 free(hrt, M_DEVBUF);
1100 return (rv);
1101 }
1102
1103 if (sc->sc_hrt != NULL)
1104 free(sc->sc_hrt, M_DEVBUF);
1105 sc->sc_hrt = hrt;
1106 return (0);
1107 }
1108
1109 /*
1110 * Request the specified number of bytes from the IOP's logical
1111 * configuration table. If a change indicator is specified, this
1112 * is a verbatim notification request, so the caller is prepared
1113 * to wait indefinitely.
1114 */
1115 static int
1116 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1117 u_int32_t chgind)
1118 {
1119 struct iop_msg *im;
1120 struct i2o_exec_lct_notify *mf;
1121 int rv;
1122 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1123
1124 im = iop_msg_alloc(sc, IM_WAIT);
1125 memset(lct, 0, size);
1126
1127 mf = (struct i2o_exec_lct_notify *)mb;
1128 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1129 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1130 mf->msgictx = IOP_ICTX;
1131 mf->msgtctx = im->im_tctx;
1132 mf->classid = I2O_CLASS_ANY;
1133 mf->changeindicator = chgind;
1134
1135 #ifdef I2ODEBUG
1136 printf("iop_lct_get0: reading LCT");
1137 if (chgind != 0)
1138 printf(" (async)");
1139 printf("\n");
1140 #endif
1141
1142 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1143 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1144 iop_msg_unmap(sc, im);
1145 iop_msg_free(sc, im);
1146 return (rv);
1147 }
1148
1149 /*
1150 * Read the IOP's logical configuration table.
1151 */
1152 int
1153 iop_lct_get(struct iop_softc *sc)
1154 {
1155 int esize, size, rv;
1156 struct i2o_lct *lct;
1157
1158 esize = le32toh(sc->sc_status.expectedlctsize);
1159 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1160 if (lct == NULL)
1161 return (ENOMEM);
1162
1163 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1164 free(lct, M_DEVBUF);
1165 return (rv);
1166 }
1167
1168 size = le16toh(lct->tablesize) << 2;
1169 if (esize != size) {
1170 free(lct, M_DEVBUF);
1171 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1172 if (lct == NULL)
1173 return (ENOMEM);
1174
1175 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1176 free(lct, M_DEVBUF);
1177 return (rv);
1178 }
1179 }
1180
1181 /* Swap in the new LCT. */
1182 if (sc->sc_lct != NULL)
1183 free(sc->sc_lct, M_DEVBUF);
1184 sc->sc_lct = lct;
1185 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1186 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1187 sizeof(struct i2o_lct_entry);
1188 return (0);
1189 }
1190
1191 /*
1192 * Request the specified parameter group from the target. If an initiator
1193 * is specified (a) don't wait for the operation to complete, but instead
1194 * let the initiator's interrupt handler deal with the reply and (b) place a
1195 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1196 */
1197 int
1198 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1199 int size, struct iop_initiator *ii)
1200 {
1201 struct iop_msg *im;
1202 struct i2o_util_params_op *mf;
1203 struct i2o_reply *rf;
1204 int rv;
1205 struct iop_pgop *pgop;
1206 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1207
1208 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1209 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1210 iop_msg_free(sc, im);
1211 return (ENOMEM);
1212 }
1213 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1214 iop_msg_free(sc, im);
1215 free(pgop, M_DEVBUF);
1216 return (ENOMEM);
1217 }
1218 im->im_dvcontext = pgop;
1219 im->im_rb = rf;
1220
1221 mf = (struct i2o_util_params_op *)mb;
1222 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1223 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1224 mf->msgictx = IOP_ICTX;
1225 mf->msgtctx = im->im_tctx;
1226 mf->flags = 0;
1227
1228 pgop->olh.count = htole16(1);
1229 pgop->olh.reserved = htole16(0);
1230 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1231 pgop->oat.fieldcount = htole16(0xffff);
1232 pgop->oat.group = htole16(group);
1233
1234 if (ii == NULL)
1235 PHOLD(curproc);
1236
1237 memset(buf, 0, size);
1238 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1239 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1240 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1241
1242 if (ii == NULL)
1243 PRELE(curproc);
1244
1245 /* Detect errors; let partial transfers to count as success. */
1246 if (ii == NULL && rv == 0) {
1247 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1248 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1249 rv = 0;
1250 else
1251 rv = (rf->reqstatus != 0 ? EIO : 0);
1252
1253 if (rv != 0)
1254 printf("%s: FIELD_GET failed for tid %d group %d\n",
1255 sc->sc_dv.dv_xname, tid, group);
1256 }
1257
1258 if (ii == NULL || rv != 0) {
1259 iop_msg_unmap(sc, im);
1260 iop_msg_free(sc, im);
1261 free(pgop, M_DEVBUF);
1262 free(rf, M_DEVBUF);
1263 }
1264
1265 return (rv);
1266 }
1267
1268 /*
1269 * Set a single field in a scalar parameter group.
1270 */
1271 int
1272 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1273 int size, int field)
1274 {
1275 struct iop_msg *im;
1276 struct i2o_util_params_op *mf;
1277 struct iop_pgop *pgop;
1278 int rv, totsize;
1279 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1280
1281 totsize = sizeof(*pgop) + size;
1282
1283 im = iop_msg_alloc(sc, IM_WAIT);
1284 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1285 iop_msg_free(sc, im);
1286 return (ENOMEM);
1287 }
1288
1289 mf = (struct i2o_util_params_op *)mb;
1290 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1291 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1292 mf->msgictx = IOP_ICTX;
1293 mf->msgtctx = im->im_tctx;
1294 mf->flags = 0;
1295
1296 pgop->olh.count = htole16(1);
1297 pgop->olh.reserved = htole16(0);
1298 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1299 pgop->oat.fieldcount = htole16(1);
1300 pgop->oat.group = htole16(group);
1301 pgop->oat.fields[0] = htole16(field);
1302 memcpy(pgop + 1, buf, size);
1303
1304 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1305 rv = iop_msg_post(sc, im, mb, 30000);
1306 if (rv != 0)
1307 printf("%s: FIELD_SET failed for tid %d group %d\n",
1308 sc->sc_dv.dv_xname, tid, group);
1309
1310 iop_msg_unmap(sc, im);
1311 iop_msg_free(sc, im);
1312 free(pgop, M_DEVBUF);
1313 return (rv);
1314 }
1315
1316 /*
1317 * Delete all rows in a tablular parameter group.
1318 */
1319 int
1320 iop_table_clear(struct iop_softc *sc, int tid, int group)
1321 {
1322 struct iop_msg *im;
1323 struct i2o_util_params_op *mf;
1324 struct iop_pgop pgop;
1325 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1326 int rv;
1327
1328 im = iop_msg_alloc(sc, IM_WAIT);
1329
1330 mf = (struct i2o_util_params_op *)mb;
1331 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1332 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1333 mf->msgictx = IOP_ICTX;
1334 mf->msgtctx = im->im_tctx;
1335 mf->flags = 0;
1336
1337 pgop.olh.count = htole16(1);
1338 pgop.olh.reserved = htole16(0);
1339 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1340 pgop.oat.fieldcount = htole16(0);
1341 pgop.oat.group = htole16(group);
1342 pgop.oat.fields[0] = htole16(0);
1343
1344 PHOLD(curproc);
1345 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1346 rv = iop_msg_post(sc, im, mb, 30000);
1347 if (rv != 0)
1348 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1349 sc->sc_dv.dv_xname, tid, group);
1350
1351 iop_msg_unmap(sc, im);
1352 PRELE(curproc);
1353 iop_msg_free(sc, im);
1354 return (rv);
1355 }
1356
1357 /*
1358 * Add a single row to a tabular parameter group. The row can have only one
1359 * field.
1360 */
1361 int
1362 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1363 int size, int row)
1364 {
1365 struct iop_msg *im;
1366 struct i2o_util_params_op *mf;
1367 struct iop_pgop *pgop;
1368 int rv, totsize;
1369 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1370
1371 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1372
1373 im = iop_msg_alloc(sc, IM_WAIT);
1374 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1375 iop_msg_free(sc, im);
1376 return (ENOMEM);
1377 }
1378
1379 mf = (struct i2o_util_params_op *)mb;
1380 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1381 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1382 mf->msgictx = IOP_ICTX;
1383 mf->msgtctx = im->im_tctx;
1384 mf->flags = 0;
1385
1386 pgop->olh.count = htole16(1);
1387 pgop->olh.reserved = htole16(0);
1388 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1389 pgop->oat.fieldcount = htole16(1);
1390 pgop->oat.group = htole16(group);
1391 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1392 pgop->oat.fields[1] = htole16(1); /* RowCount */
1393 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1394 memcpy(&pgop->oat.fields[3], buf, size);
1395
1396 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1397 rv = iop_msg_post(sc, im, mb, 30000);
1398 if (rv != 0)
1399 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1400 sc->sc_dv.dv_xname, tid, group, row);
1401
1402 iop_msg_unmap(sc, im);
1403 iop_msg_free(sc, im);
1404 free(pgop, M_DEVBUF);
1405 return (rv);
1406 }
1407
1408 /*
1409 * Execute a simple command (no parameters).
1410 */
1411 int
1412 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1413 int async, int timo)
1414 {
1415 struct iop_msg *im;
1416 struct i2o_msg mf;
1417 int rv, fl;
1418
1419 fl = (async != 0 ? IM_WAIT : IM_POLL);
1420 im = iop_msg_alloc(sc, fl);
1421
1422 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1423 mf.msgfunc = I2O_MSGFUNC(tid, function);
1424 mf.msgictx = ictx;
1425 mf.msgtctx = im->im_tctx;
1426
1427 rv = iop_msg_post(sc, im, &mf, timo);
1428 iop_msg_free(sc, im);
1429 return (rv);
1430 }
1431
1432 /*
1433 * Post the system table to the IOP.
1434 */
1435 static int
1436 iop_systab_set(struct iop_softc *sc)
1437 {
1438 struct i2o_exec_sys_tab_set *mf;
1439 struct iop_msg *im;
1440 bus_space_handle_t bsh;
1441 bus_addr_t boo;
1442 u_int32_t mema[2], ioa[2];
1443 int rv;
1444 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1445
1446 im = iop_msg_alloc(sc, IM_WAIT);
1447
1448 mf = (struct i2o_exec_sys_tab_set *)mb;
1449 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1450 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1451 mf->msgictx = IOP_ICTX;
1452 mf->msgtctx = im->im_tctx;
1453 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1454 mf->segnumber = 0;
1455
1456 mema[1] = sc->sc_status.desiredprivmemsize;
1457 ioa[1] = sc->sc_status.desiredpriviosize;
1458
1459 if (mema[1] != 0) {
1460 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1461 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1462 mema[0] = htole32(boo);
1463 if (rv != 0) {
1464 printf("%s: can't alloc priv mem space, err = %d\n",
1465 sc->sc_dv.dv_xname, rv);
1466 mema[0] = 0;
1467 mema[1] = 0;
1468 }
1469 }
1470
1471 if (ioa[1] != 0) {
1472 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1473 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1474 ioa[0] = htole32(boo);
1475 if (rv != 0) {
1476 printf("%s: can't alloc priv i/o space, err = %d\n",
1477 sc->sc_dv.dv_xname, rv);
1478 ioa[0] = 0;
1479 ioa[1] = 0;
1480 }
1481 }
1482
1483 PHOLD(curproc);
1484 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1485 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1486 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1487 rv = iop_msg_post(sc, im, mb, 5000);
1488 iop_msg_unmap(sc, im);
1489 iop_msg_free(sc, im);
1490 PRELE(curproc);
1491 return (rv);
1492 }
1493
1494 /*
1495 * Reset the IOP. Must be called with interrupts disabled.
1496 */
1497 static int
1498 iop_reset(struct iop_softc *sc)
1499 {
1500 u_int32_t mfa, *sw;
1501 struct i2o_exec_iop_reset mf;
1502 int rv;
1503 paddr_t pa;
1504
1505 sw = (u_int32_t *)sc->sc_scr;
1506 pa = sc->sc_scr_seg->ds_addr;
1507
1508 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1509 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1510 mf.reserved[0] = 0;
1511 mf.reserved[1] = 0;
1512 mf.reserved[2] = 0;
1513 mf.reserved[3] = 0;
1514 mf.statuslow = (u_int32_t)pa;
1515 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1516
1517 *sw = htole32(0);
1518 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1519 BUS_DMASYNC_PREREAD);
1520
1521 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1522 return (rv);
1523
1524 POLL(2500,
1525 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1526 BUS_DMASYNC_POSTREAD), *sw != 0));
1527 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1528 printf("%s: reset rejected, status 0x%x\n",
1529 sc->sc_dv.dv_xname, le32toh(*sw));
1530 return (EIO);
1531 }
1532
1533 /*
1534 * IOP is now in the INIT state. Wait no more than 10 seconds for
1535 * the inbound queue to become responsive.
1536 */
1537 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1538 if (mfa == IOP_MFA_EMPTY) {
1539 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1540 return (EIO);
1541 }
1542
1543 iop_release_mfa(sc, mfa);
1544 return (0);
1545 }
1546
1547 /*
1548 * Register a new initiator. Must be called with the configuration lock
1549 * held.
1550 */
1551 void
1552 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1553 {
1554 static int ictxgen;
1555 int s;
1556
1557 /* 0 is reserved (by us) for system messages. */
1558 ii->ii_ictx = ++ictxgen;
1559
1560 /*
1561 * `Utility initiators' don't make it onto the per-IOP initiator list
1562 * (which is used only for configuration), but do get one slot on
1563 * the inbound queue.
1564 */
1565 if ((ii->ii_flags & II_UTILITY) == 0) {
1566 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1567 sc->sc_nii++;
1568 } else
1569 sc->sc_nuii++;
1570
1571 s = splbio();
1572 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1573 splx(s);
1574 }
1575
1576 /*
1577 * Unregister an initiator. Must be called with the configuration lock
1578 * held.
1579 */
1580 void
1581 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1582 {
1583 int s;
1584
1585 if ((ii->ii_flags & II_UTILITY) == 0) {
1586 LIST_REMOVE(ii, ii_list);
1587 sc->sc_nii--;
1588 } else
1589 sc->sc_nuii--;
1590
1591 s = splbio();
1592 LIST_REMOVE(ii, ii_hash);
1593 splx(s);
1594 }
1595
1596 /*
1597 * Handle a reply frame from the IOP.
1598 */
1599 static int
1600 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1601 {
1602 struct iop_msg *im;
1603 struct i2o_reply *rb;
1604 struct i2o_fault_notify *fn;
1605 struct iop_initiator *ii;
1606 u_int off, ictx, tctx, status, size;
1607
1608 off = (int)(rmfa - sc->sc_rep_phys);
1609 rb = (struct i2o_reply *)(sc->sc_rep + off);
1610
1611 /* Perform reply queue DMA synchronisation. */
1612 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1613 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1614 if (--sc->sc_curib != 0)
1615 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1616 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1617
1618 #ifdef I2ODEBUG
1619 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1620 panic("iop_handle_reply: 64-bit reply");
1621 #endif
1622 /*
1623 * Find the initiator.
1624 */
1625 ictx = le32toh(rb->msgictx);
1626 if (ictx == IOP_ICTX)
1627 ii = NULL;
1628 else {
1629 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1630 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1631 if (ii->ii_ictx == ictx)
1632 break;
1633 if (ii == NULL) {
1634 #ifdef I2ODEBUG
1635 iop_reply_print(sc, rb);
1636 #endif
1637 printf("%s: WARNING: bad ictx returned (%x)\n",
1638 sc->sc_dv.dv_xname, ictx);
1639 return (-1);
1640 }
1641 }
1642
1643 /*
1644 * If we received a transport failure notice, we've got to dig the
1645 * transaction context (if any) out of the original message frame,
1646 * and then release the original MFA back to the inbound FIFO.
1647 */
1648 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1649 status = I2O_STATUS_SUCCESS;
1650
1651 fn = (struct i2o_fault_notify *)rb;
1652 tctx = iop_inl(sc, fn->lowmfa + 12);
1653 iop_release_mfa(sc, fn->lowmfa);
1654 iop_tfn_print(sc, fn);
1655 } else {
1656 status = rb->reqstatus;
1657 tctx = le32toh(rb->msgtctx);
1658 }
1659
1660 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1661 /*
1662 * This initiator tracks state using message wrappers.
1663 *
1664 * Find the originating message wrapper, and if requested
1665 * notify the initiator.
1666 */
1667 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1668 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1669 (im->im_flags & IM_ALLOCED) == 0 ||
1670 tctx != im->im_tctx) {
1671 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1672 sc->sc_dv.dv_xname, tctx, im);
1673 if (im != NULL)
1674 printf("%s: flags=0x%08x tctx=0x%08x\n",
1675 sc->sc_dv.dv_xname, im->im_flags,
1676 im->im_tctx);
1677 #ifdef I2ODEBUG
1678 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1679 iop_reply_print(sc, rb);
1680 #endif
1681 return (-1);
1682 }
1683
1684 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1685 im->im_flags |= IM_FAIL;
1686
1687 #ifdef I2ODEBUG
1688 if ((im->im_flags & IM_REPLIED) != 0)
1689 panic("%s: dup reply", sc->sc_dv.dv_xname);
1690 #endif
1691 im->im_flags |= IM_REPLIED;
1692
1693 #ifdef I2ODEBUG
1694 if (status != I2O_STATUS_SUCCESS)
1695 iop_reply_print(sc, rb);
1696 #endif
1697 im->im_reqstatus = status;
1698
1699 /* Copy the reply frame, if requested. */
1700 if (im->im_rb != NULL) {
1701 size = (le32toh(rb->msgflags) >> 14) & ~3;
1702 #ifdef I2ODEBUG
1703 if (size > sc->sc_framesize)
1704 panic("iop_handle_reply: reply too large");
1705 #endif
1706 memcpy(im->im_rb, rb, size);
1707 }
1708
1709 /* Notify the initiator. */
1710 if ((im->im_flags & IM_WAIT) != 0)
1711 wakeup(im);
1712 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1713 (*ii->ii_intr)(ii->ii_dv, im, rb);
1714 } else {
1715 /*
1716 * This initiator discards message wrappers.
1717 *
1718 * Simply pass the reply frame to the initiator.
1719 */
1720 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1721 }
1722
1723 return (status);
1724 }
1725
1726 /*
1727 * Handle an interrupt from the IOP.
1728 */
1729 int
1730 iop_intr(void *arg)
1731 {
1732 struct iop_softc *sc;
1733 u_int32_t rmfa;
1734
1735 sc = arg;
1736
1737 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1738 return (0);
1739
1740 for (;;) {
1741 /* Double read to account for IOP bug. */
1742 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1743 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1744 if (rmfa == IOP_MFA_EMPTY)
1745 break;
1746 }
1747 iop_handle_reply(sc, rmfa);
1748 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1749 }
1750
1751 return (1);
1752 }
1753
1754 /*
1755 * Handle an event signalled by the executive.
1756 */
1757 static void
1758 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1759 {
1760 struct i2o_util_event_register_reply *rb;
1761 struct iop_softc *sc;
1762 u_int event;
1763
1764 sc = (struct iop_softc *)dv;
1765 rb = reply;
1766
1767 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1768 return;
1769
1770 event = le32toh(rb->event);
1771 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1772 }
1773
1774 /*
1775 * Allocate a message wrapper.
1776 */
1777 struct iop_msg *
1778 iop_msg_alloc(struct iop_softc *sc, int flags)
1779 {
1780 struct iop_msg *im;
1781 static u_int tctxgen;
1782 int s, i;
1783
1784 #ifdef I2ODEBUG
1785 if ((flags & IM_SYSMASK) != 0)
1786 panic("iop_msg_alloc: system flags specified");
1787 #endif
1788
1789 s = splbio();
1790 im = SLIST_FIRST(&sc->sc_im_freelist);
1791 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1792 if (im == NULL)
1793 panic("iop_msg_alloc: no free wrappers");
1794 #endif
1795 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1796 splx(s);
1797
1798 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1799 tctxgen += (1 << IOP_TCTX_SHIFT);
1800 im->im_flags = flags | IM_ALLOCED;
1801 im->im_rb = NULL;
1802 i = 0;
1803 do {
1804 im->im_xfer[i++].ix_size = 0;
1805 } while (i < IOP_MAX_MSG_XFERS);
1806
1807 return (im);
1808 }
1809
1810 /*
1811 * Free a message wrapper.
1812 */
1813 void
1814 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1815 {
1816 int s;
1817
1818 #ifdef I2ODEBUG
1819 if ((im->im_flags & IM_ALLOCED) == 0)
1820 panic("iop_msg_free: wrapper not allocated");
1821 #endif
1822
1823 im->im_flags = 0;
1824 s = splbio();
1825 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1826 splx(s);
1827 }
1828
1829 /*
1830 * Map a data transfer. Write a scatter-gather list into the message frame.
1831 */
1832 int
1833 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1834 void *xferaddr, int xfersize, int out, struct proc *up)
1835 {
1836 bus_dmamap_t dm;
1837 bus_dma_segment_t *ds;
1838 struct iop_xfer *ix;
1839 u_int rv, i, nsegs, flg, off, xn;
1840 u_int32_t *p;
1841
1842 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1843 if (ix->ix_size == 0)
1844 break;
1845
1846 #ifdef I2ODEBUG
1847 if (xfersize == 0)
1848 panic("iop_msg_map: null transfer");
1849 if (xfersize > IOP_MAX_XFER)
1850 panic("iop_msg_map: transfer too large");
1851 if (xn == IOP_MAX_MSG_XFERS)
1852 panic("iop_msg_map: too many xfers");
1853 #endif
1854
1855 /*
1856 * Only the first DMA map is static.
1857 */
1858 if (xn != 0) {
1859 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1860 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1861 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1862 if (rv != 0)
1863 return (rv);
1864 }
1865
1866 dm = ix->ix_map;
1867 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1868 (up == NULL ? BUS_DMA_NOWAIT : 0));
1869 if (rv != 0)
1870 goto bad;
1871
1872 /*
1873 * How many SIMPLE SG elements can we fit in this message?
1874 */
1875 off = mb[0] >> 16;
1876 p = mb + off;
1877 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1878
1879 if (dm->dm_nsegs > nsegs) {
1880 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1881 rv = EFBIG;
1882 DPRINTF(("iop_msg_map: too many segs\n"));
1883 goto bad;
1884 }
1885
1886 nsegs = dm->dm_nsegs;
1887 xfersize = 0;
1888
1889 /*
1890 * Write out the SG list.
1891 */
1892 if (out)
1893 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1894 else
1895 flg = I2O_SGL_SIMPLE;
1896
1897 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1898 p[0] = (u_int32_t)ds->ds_len | flg;
1899 p[1] = (u_int32_t)ds->ds_addr;
1900 xfersize += ds->ds_len;
1901 }
1902
1903 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1904 p[1] = (u_int32_t)ds->ds_addr;
1905 xfersize += ds->ds_len;
1906
1907 /* Fix up the transfer record, and sync the map. */
1908 ix->ix_flags = (out ? IX_OUT : IX_IN);
1909 ix->ix_size = xfersize;
1910 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1911 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1912
1913 /*
1914 * If this is the first xfer we've mapped for this message, adjust
1915 * the SGL offset field in the message header.
1916 */
1917 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1918 mb[0] += (mb[0] >> 12) & 0xf0;
1919 im->im_flags |= IM_SGLOFFADJ;
1920 }
1921 mb[0] += (nsegs << 17);
1922 return (0);
1923
1924 bad:
1925 if (xn != 0)
1926 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1927 return (rv);
1928 }
1929
1930 /*
1931 * Map a block I/O data transfer (different in that there's only one per
1932 * message maximum, and PAGE addressing may be used). Write a scatter
1933 * gather list into the message frame.
1934 */
1935 int
1936 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1937 void *xferaddr, int xfersize, int out)
1938 {
1939 bus_dma_segment_t *ds;
1940 bus_dmamap_t dm;
1941 struct iop_xfer *ix;
1942 u_int rv, i, nsegs, off, slen, tlen, flg;
1943 paddr_t saddr, eaddr;
1944 u_int32_t *p;
1945
1946 #ifdef I2ODEBUG
1947 if (xfersize == 0)
1948 panic("iop_msg_map_bio: null transfer");
1949 if (xfersize > IOP_MAX_XFER)
1950 panic("iop_msg_map_bio: transfer too large");
1951 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1952 panic("iop_msg_map_bio: SGLOFFADJ");
1953 #endif
1954
1955 ix = im->im_xfer;
1956 dm = ix->ix_map;
1957 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1958 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1959 if (rv != 0)
1960 return (rv);
1961
1962 off = mb[0] >> 16;
1963 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1964
1965 /*
1966 * If the transfer is highly fragmented and won't fit using SIMPLE
1967 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1968 * potentially more efficient, both for us and the IOP.
1969 */
1970 if (dm->dm_nsegs > nsegs) {
1971 nsegs = 1;
1972 p = mb + off + 1;
1973
1974 /* XXX This should be done with a bus_space flag. */
1975 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1976 slen = ds->ds_len;
1977 saddr = ds->ds_addr;
1978
1979 while (slen > 0) {
1980 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1981 tlen = min(eaddr - saddr, slen);
1982 slen -= tlen;
1983 *p++ = le32toh(saddr);
1984 saddr = eaddr;
1985 nsegs++;
1986 }
1987 }
1988
1989 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
1990 I2O_SGL_END;
1991 if (out)
1992 mb[off] |= I2O_SGL_DATA_OUT;
1993 } else {
1994 p = mb + off;
1995 nsegs = dm->dm_nsegs;
1996
1997 if (out)
1998 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1999 else
2000 flg = I2O_SGL_SIMPLE;
2001
2002 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2003 p[0] = (u_int32_t)ds->ds_len | flg;
2004 p[1] = (u_int32_t)ds->ds_addr;
2005 }
2006
2007 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2008 I2O_SGL_END;
2009 p[1] = (u_int32_t)ds->ds_addr;
2010 nsegs <<= 1;
2011 }
2012
2013 /* Fix up the transfer record, and sync the map. */
2014 ix->ix_flags = (out ? IX_OUT : IX_IN);
2015 ix->ix_size = xfersize;
2016 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2017 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2018
2019 /*
2020 * Adjust the SGL offset and total message size fields. We don't
2021 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2022 */
2023 mb[0] += ((off << 4) + (nsegs << 16));
2024 return (0);
2025 }
2026
2027 /*
2028 * Unmap all data transfers associated with a message wrapper.
2029 */
2030 void
2031 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2032 {
2033 struct iop_xfer *ix;
2034 int i;
2035
2036 #ifdef I2ODEBUG
2037 if (im->im_xfer[0].ix_size == 0)
2038 panic("iop_msg_unmap: no transfers mapped");
2039 #endif
2040
2041 for (ix = im->im_xfer, i = 0;;) {
2042 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2043 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2044 BUS_DMASYNC_POSTREAD);
2045 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2046
2047 /* Only the first DMA map is static. */
2048 if (i != 0)
2049 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2050 if ((++ix)->ix_size == 0)
2051 break;
2052 if (++i >= IOP_MAX_MSG_XFERS)
2053 break;
2054 }
2055 }
2056
2057 /*
2058 * Post a message frame to the IOP's inbound queue.
2059 */
2060 int
2061 iop_post(struct iop_softc *sc, u_int32_t *mb)
2062 {
2063 u_int32_t mfa;
2064 int s;
2065
2066 #ifdef I2ODEBUG
2067 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2068 panic("iop_post: frame too large");
2069 #endif
2070
2071 s = splbio();
2072
2073 /* Allocate a slot with the IOP. */
2074 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2075 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2076 splx(s);
2077 printf("%s: mfa not forthcoming\n",
2078 sc->sc_dv.dv_xname);
2079 return (EAGAIN);
2080 }
2081
2082 /* Perform reply buffer DMA synchronisation. */
2083 if (sc->sc_curib++ == 0)
2084 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2085 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2086
2087 /* Copy out the message frame. */
2088 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2089 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2090 BUS_SPACE_BARRIER_WRITE);
2091
2092 /* Post the MFA back to the IOP. */
2093 iop_outl(sc, IOP_REG_IFIFO, mfa);
2094
2095 splx(s);
2096 return (0);
2097 }
2098
2099 /*
2100 * Post a message to the IOP and deal with completion.
2101 */
2102 int
2103 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2104 {
2105 u_int32_t *mb;
2106 int rv, s;
2107
2108 mb = xmb;
2109
2110 /* Terminate the scatter/gather list chain. */
2111 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2112 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2113
2114 if ((rv = iop_post(sc, mb)) != 0)
2115 return (rv);
2116
2117 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2118 if ((im->im_flags & IM_POLL) != 0)
2119 iop_msg_poll(sc, im, timo);
2120 else
2121 iop_msg_wait(sc, im, timo);
2122
2123 s = splbio();
2124 if ((im->im_flags & IM_REPLIED) != 0) {
2125 if ((im->im_flags & IM_NOSTATUS) != 0)
2126 rv = 0;
2127 else if ((im->im_flags & IM_FAIL) != 0)
2128 rv = ENXIO;
2129 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2130 rv = EIO;
2131 else
2132 rv = 0;
2133 } else
2134 rv = EBUSY;
2135 splx(s);
2136 } else
2137 rv = 0;
2138
2139 return (rv);
2140 }
2141
2142 /*
2143 * Spin until the specified message is replied to.
2144 */
2145 static void
2146 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2147 {
2148 u_int32_t rmfa;
2149 int s, status;
2150
2151 s = splbio();
2152
2153 /* Wait for completion. */
2154 for (timo *= 10; timo != 0; timo--) {
2155 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2156 /* Double read to account for IOP bug. */
2157 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2158 if (rmfa == IOP_MFA_EMPTY)
2159 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2160 if (rmfa != IOP_MFA_EMPTY) {
2161 status = iop_handle_reply(sc, rmfa);
2162
2163 /*
2164 * Return the reply frame to the IOP's
2165 * outbound FIFO.
2166 */
2167 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2168 }
2169 }
2170 if ((im->im_flags & IM_REPLIED) != 0)
2171 break;
2172 DELAY(100);
2173 }
2174
2175 if (timo == 0) {
2176 #ifdef I2ODEBUG
2177 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2178 if (iop_status_get(sc, 1) != 0)
2179 printf("iop_msg_poll: unable to retrieve status\n");
2180 else
2181 printf("iop_msg_poll: IOP state = %d\n",
2182 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2183 #endif
2184 }
2185
2186 splx(s);
2187 }
2188
2189 /*
2190 * Sleep until the specified message is replied to.
2191 */
2192 static void
2193 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2194 {
2195 int s, rv;
2196
2197 s = splbio();
2198 if ((im->im_flags & IM_REPLIED) != 0) {
2199 splx(s);
2200 return;
2201 }
2202 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
2203 splx(s);
2204
2205 #ifdef I2ODEBUG
2206 if (rv != 0) {
2207 printf("iop_msg_wait: tsleep() == %d\n", rv);
2208 if (iop_status_get(sc, 0) != 0)
2209 printf("iop_msg_wait: unable to retrieve status\n");
2210 else
2211 printf("iop_msg_wait: IOP state = %d\n",
2212 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2213 }
2214 #endif
2215 }
2216
2217 /*
2218 * Release an unused message frame back to the IOP's inbound fifo.
2219 */
2220 static void
2221 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2222 {
2223
2224 /* Use the frame to issue a no-op. */
2225 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2226 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2227 iop_outl(sc, mfa + 8, 0);
2228 iop_outl(sc, mfa + 12, 0);
2229
2230 iop_outl(sc, IOP_REG_IFIFO, mfa);
2231 }
2232
2233 #ifdef I2ODEBUG
2234 /*
2235 * Dump a reply frame header.
2236 */
2237 static void
2238 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2239 {
2240 u_int function, detail;
2241 #ifdef I2OVERBOSE
2242 const char *statusstr;
2243 #endif
2244
2245 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2246 detail = le16toh(rb->detail);
2247
2248 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2249
2250 #ifdef I2OVERBOSE
2251 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2252 statusstr = iop_status[rb->reqstatus];
2253 else
2254 statusstr = "undefined error code";
2255
2256 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2257 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2258 #else
2259 printf("%s: function=0x%02x status=0x%02x\n",
2260 sc->sc_dv.dv_xname, function, rb->reqstatus);
2261 #endif
2262 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2263 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2264 le32toh(rb->msgtctx));
2265 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2266 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2267 (le32toh(rb->msgflags) >> 8) & 0xff);
2268 }
2269 #endif
2270
2271 /*
2272 * Dump a transport failure reply.
2273 */
2274 static void
2275 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2276 {
2277
2278 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2279
2280 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2281 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2282 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2283 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2284 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2285 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2286 }
2287
2288 /*
2289 * Translate an I2O ASCII field into a C string.
2290 */
2291 void
2292 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2293 {
2294 int hc, lc, i, nit;
2295
2296 dlen--;
2297 lc = 0;
2298 hc = 0;
2299 i = 0;
2300
2301 /*
2302 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2303 * spec has nothing to say about it. Since AMI fields are usually
2304 * filled with junk after the terminator, ...
2305 */
2306 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2307
2308 while (slen-- != 0 && dlen-- != 0) {
2309 if (nit && *src == '\0')
2310 break;
2311 else if (*src <= 0x20 || *src >= 0x7f) {
2312 if (hc)
2313 dst[i++] = ' ';
2314 } else {
2315 hc = 1;
2316 dst[i++] = *src;
2317 lc = i;
2318 }
2319 src++;
2320 }
2321
2322 dst[lc] = '\0';
2323 }
2324
2325 /*
2326 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2327 */
2328 int
2329 iop_print_ident(struct iop_softc *sc, int tid)
2330 {
2331 struct {
2332 struct i2o_param_op_results pr;
2333 struct i2o_param_read_results prr;
2334 struct i2o_param_device_identity di;
2335 } __attribute__ ((__packed__)) p;
2336 char buf[32];
2337 int rv;
2338
2339 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2340 sizeof(p), NULL);
2341 if (rv != 0)
2342 return (rv);
2343
2344 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2345 sizeof(buf));
2346 printf(" <%s, ", buf);
2347 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2348 sizeof(buf));
2349 printf("%s, ", buf);
2350 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2351 printf("%s>", buf);
2352
2353 return (0);
2354 }
2355
2356 /*
2357 * Claim or unclaim the specified TID.
2358 */
2359 int
2360 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2361 int flags)
2362 {
2363 struct iop_msg *im;
2364 struct i2o_util_claim mf;
2365 int rv, func;
2366
2367 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2368 im = iop_msg_alloc(sc, IM_WAIT);
2369
2370 /* We can use the same structure, as they're identical. */
2371 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2372 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2373 mf.msgictx = ii->ii_ictx;
2374 mf.msgtctx = im->im_tctx;
2375 mf.flags = flags;
2376
2377 rv = iop_msg_post(sc, im, &mf, 5000);
2378 iop_msg_free(sc, im);
2379 return (rv);
2380 }
2381
2382 /*
2383 * Perform an abort.
2384 */
2385 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2386 int tctxabort, int flags)
2387 {
2388 struct iop_msg *im;
2389 struct i2o_util_abort mf;
2390 int rv;
2391
2392 im = iop_msg_alloc(sc, IM_WAIT);
2393
2394 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2395 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2396 mf.msgictx = ii->ii_ictx;
2397 mf.msgtctx = im->im_tctx;
2398 mf.flags = (func << 24) | flags;
2399 mf.tctxabort = tctxabort;
2400
2401 rv = iop_msg_post(sc, im, &mf, 5000);
2402 iop_msg_free(sc, im);
2403 return (rv);
2404 }
2405
2406 /*
2407 * Enable or disable reception of events for the specified device.
2408 */
2409 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2410 {
2411 struct i2o_util_event_register mf;
2412
2413 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2414 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2415 mf.msgictx = ii->ii_ictx;
2416 mf.msgtctx = 0;
2417 mf.eventmask = mask;
2418
2419 /* This message is replied to only when events are signalled. */
2420 return (iop_post(sc, (u_int32_t *)&mf));
2421 }
2422
2423 int
2424 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2425 {
2426 struct iop_softc *sc;
2427
2428 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2429 return (ENXIO);
2430 if ((sc->sc_flags & IOP_ONLINE) == 0)
2431 return (ENXIO);
2432 if ((sc->sc_flags & IOP_OPEN) != 0)
2433 return (EBUSY);
2434 sc->sc_flags |= IOP_OPEN;
2435
2436 return (0);
2437 }
2438
2439 int
2440 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2441 {
2442 struct iop_softc *sc;
2443
2444 sc = device_lookup(&iop_cd, minor(dev));
2445 sc->sc_flags &= ~IOP_OPEN;
2446
2447 return (0);
2448 }
2449
2450 int
2451 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2452 {
2453 struct iop_softc *sc;
2454 struct iovec *iov;
2455 int rv, i;
2456
2457 if (securelevel >= 2)
2458 return (EPERM);
2459
2460 sc = device_lookup(&iop_cd, minor(dev));
2461
2462 switch (cmd) {
2463 case IOPIOCPT:
2464 return (iop_passthrough(sc, (struct ioppt *)data, p));
2465
2466 case IOPIOCGSTATUS:
2467 iov = (struct iovec *)data;
2468 i = sizeof(struct i2o_status);
2469 if (i > iov->iov_len)
2470 i = iov->iov_len;
2471 else
2472 iov->iov_len = i;
2473 if ((rv = iop_status_get(sc, 0)) == 0)
2474 rv = copyout(&sc->sc_status, iov->iov_base, i);
2475 return (rv);
2476
2477 case IOPIOCGLCT:
2478 case IOPIOCGTIDMAP:
2479 case IOPIOCRECONFIG:
2480 break;
2481
2482 default:
2483 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2484 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2485 #endif
2486 return (ENOTTY);
2487 }
2488
2489 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2490 return (rv);
2491
2492 switch (cmd) {
2493 case IOPIOCGLCT:
2494 iov = (struct iovec *)data;
2495 i = le16toh(sc->sc_lct->tablesize) << 2;
2496 if (i > iov->iov_len)
2497 i = iov->iov_len;
2498 else
2499 iov->iov_len = i;
2500 rv = copyout(sc->sc_lct, iov->iov_base, i);
2501 break;
2502
2503 case IOPIOCRECONFIG:
2504 rv = iop_reconfigure(sc, 0);
2505 break;
2506
2507 case IOPIOCGTIDMAP:
2508 iov = (struct iovec *)data;
2509 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2510 if (i > iov->iov_len)
2511 i = iov->iov_len;
2512 else
2513 iov->iov_len = i;
2514 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2515 break;
2516 }
2517
2518 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2519 return (rv);
2520 }
2521
2522 static int
2523 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2524 {
2525 struct iop_msg *im;
2526 struct i2o_msg *mf;
2527 struct ioppt_buf *ptb;
2528 int rv, i, mapped;
2529
2530 mf = NULL;
2531 im = NULL;
2532 mapped = 1;
2533
2534 if (pt->pt_msglen > sc->sc_framesize ||
2535 pt->pt_msglen < sizeof(struct i2o_msg) ||
2536 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2537 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2538 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2539 return (EINVAL);
2540
2541 for (i = 0; i < pt->pt_nbufs; i++)
2542 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2543 rv = ENOMEM;
2544 goto bad;
2545 }
2546
2547 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2548 if (mf == NULL)
2549 return (ENOMEM);
2550
2551 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2552 goto bad;
2553
2554 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2555 im->im_rb = (struct i2o_reply *)mf;
2556 mf->msgictx = IOP_ICTX;
2557 mf->msgtctx = im->im_tctx;
2558
2559 for (i = 0; i < pt->pt_nbufs; i++) {
2560 ptb = &pt->pt_bufs[i];
2561 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2562 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2563 if (rv != 0)
2564 goto bad;
2565 mapped = 1;
2566 }
2567
2568 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2569 goto bad;
2570
2571 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2572 if (i > sc->sc_framesize)
2573 i = sc->sc_framesize;
2574 if (i > pt->pt_replylen)
2575 i = pt->pt_replylen;
2576 rv = copyout(im->im_rb, pt->pt_reply, i);
2577
2578 bad:
2579 if (mapped != 0)
2580 iop_msg_unmap(sc, im);
2581 if (im != NULL)
2582 iop_msg_free(sc, im);
2583 if (mf != NULL)
2584 free(mf, M_DEVBUF);
2585 return (rv);
2586 }
2587