iop.c revision 1.22 1 /* $NetBSD: iop.c,v 1.22 2002/01/12 16:49:44 tsutsui Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.22 2002/01/12 16:49:44 tsutsui Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 #define IC_CONFIGURE 0x01
111 #define IC_PRIORITY 0x02
112
113 struct iop_class {
114 u_short ic_class;
115 u_short ic_flags;
116 #ifdef I2OVERBOSE
117 const char *ic_caption;
118 #endif
119 } static const iop_class[] = {
120 {
121 I2O_CLASS_EXECUTIVE,
122 0,
123 COMMENT("executive")
124 },
125 {
126 I2O_CLASS_DDM,
127 0,
128 COMMENT("device driver module")
129 },
130 {
131 I2O_CLASS_RANDOM_BLOCK_STORAGE,
132 IC_CONFIGURE | IC_PRIORITY,
133 IFVERBOSE("random block storage")
134 },
135 {
136 I2O_CLASS_SEQUENTIAL_STORAGE,
137 IC_CONFIGURE | IC_PRIORITY,
138 IFVERBOSE("sequential storage")
139 },
140 {
141 I2O_CLASS_LAN,
142 IC_CONFIGURE | IC_PRIORITY,
143 IFVERBOSE("LAN port")
144 },
145 {
146 I2O_CLASS_WAN,
147 IC_CONFIGURE | IC_PRIORITY,
148 IFVERBOSE("WAN port")
149 },
150 {
151 I2O_CLASS_FIBRE_CHANNEL_PORT,
152 IC_CONFIGURE,
153 IFVERBOSE("fibrechannel port")
154 },
155 {
156 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
157 0,
158 COMMENT("fibrechannel peripheral")
159 },
160 {
161 I2O_CLASS_SCSI_PERIPHERAL,
162 0,
163 COMMENT("SCSI peripheral")
164 },
165 {
166 I2O_CLASS_ATE_PORT,
167 IC_CONFIGURE,
168 IFVERBOSE("ATE port")
169 },
170 {
171 I2O_CLASS_ATE_PERIPHERAL,
172 0,
173 COMMENT("ATE peripheral")
174 },
175 {
176 I2O_CLASS_FLOPPY_CONTROLLER,
177 IC_CONFIGURE,
178 IFVERBOSE("floppy controller")
179 },
180 {
181 I2O_CLASS_FLOPPY_DEVICE,
182 0,
183 COMMENT("floppy device")
184 },
185 {
186 I2O_CLASS_BUS_ADAPTER_PORT,
187 IC_CONFIGURE,
188 IFVERBOSE("bus adapter port" )
189 },
190 };
191
192 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
193 static const char * const iop_status[] = {
194 "success",
195 "abort (dirty)",
196 "abort (no data transfer)",
197 "abort (partial transfer)",
198 "error (dirty)",
199 "error (no data transfer)",
200 "error (partial transfer)",
201 "undefined error code",
202 "process abort (dirty)",
203 "process abort (no data transfer)",
204 "process abort (partial transfer)",
205 "transaction error",
206 };
207 #endif
208
209 static inline u_int32_t iop_inl(struct iop_softc *, int);
210 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
211
212 static void iop_config_interrupts(struct device *);
213 static void iop_configure_devices(struct iop_softc *, int, int);
214 static void iop_devinfo(int, char *);
215 static int iop_print(void *, const char *);
216 static void iop_shutdown(void *);
217 static int iop_submatch(struct device *, struct cfdata *, void *);
218 static int iop_vendor_print(void *, const char *);
219
220 static void iop_adjqparam(struct iop_softc *, int);
221 static void iop_create_reconf_thread(void *);
222 static int iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int iop_hrt_get(struct iop_softc *);
224 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void iop_intr_event(struct device *, struct iop_msg *, void *);
226 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 u_int32_t);
228 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int iop_ofifo_init(struct iop_softc *);
231 static int iop_passthrough(struct iop_softc *, struct ioppt *,
232 struct proc *);
233 static void iop_reconf_thread(void *);
234 static void iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int iop_reset(struct iop_softc *);
236 static int iop_systab_set(struct iop_softc *);
237 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
238
239 #ifdef I2ODEBUG
240 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
241 #endif
242
243 cdev_decl(iop);
244
245 static inline u_int32_t
246 iop_inl(struct iop_softc *sc, int off)
247 {
248
249 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
251 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
252 }
253
254 static inline void
255 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
256 {
257
258 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
259 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
260 BUS_SPACE_BARRIER_WRITE);
261 }
262
263 /*
264 * Initialise the IOP and our interface.
265 */
266 void
267 iop_init(struct iop_softc *sc, const char *intrstr)
268 {
269 struct iop_msg *im;
270 int rv, i, j, state, nsegs;
271 u_int32_t mask;
272 char ident[64];
273
274 state = 0;
275
276 printf("I2O adapter");
277
278 if (iop_ictxhashtbl == NULL)
279 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
280 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
281
282 /* Disable interrupts at the IOP. */
283 mask = iop_inl(sc, IOP_REG_INTR_MASK);
284 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
285
286 /* Allocate a scratch DMA map for small miscellaneous shared data. */
287 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
288 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
289 printf("%s: cannot create scratch dmamap\n",
290 sc->sc_dv.dv_xname);
291 return;
292 }
293 state++;
294
295 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
296 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
297 printf("%s: cannot alloc scratch dmamem\n",
298 sc->sc_dv.dv_xname);
299 goto bail_out;
300 }
301 state++;
302
303 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
304 &sc->sc_scr, 0)) {
305 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
306 goto bail_out;
307 }
308 state++;
309
310 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
311 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
312 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
313 goto bail_out;
314 }
315 state++;
316
317 #ifdef I2ODEBUG
318 /* So that our debug checks don't choke. */
319 sc->sc_framesize = 128;
320 #endif
321
322 /* Reset the adapter and request status. */
323 if ((rv = iop_reset(sc)) != 0) {
324 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
325 goto bail_out;
326 }
327
328 if ((rv = iop_status_get(sc, 1)) != 0) {
329 printf("%s: not responding (get status)\n",
330 sc->sc_dv.dv_xname);
331 goto bail_out;
332 }
333
334 sc->sc_flags |= IOP_HAVESTATUS;
335 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
336 ident, sizeof(ident));
337 printf(" <%s>\n", ident);
338
339 #ifdef I2ODEBUG
340 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
341 le16toh(sc->sc_status.orgid),
342 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
343 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
344 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
345 le32toh(sc->sc_status.desiredprivmemsize),
346 le32toh(sc->sc_status.currentprivmemsize),
347 le32toh(sc->sc_status.currentprivmembase));
348 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
349 le32toh(sc->sc_status.desiredpriviosize),
350 le32toh(sc->sc_status.currentpriviosize),
351 le32toh(sc->sc_status.currentpriviobase));
352 #endif
353
354 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
355 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
356 sc->sc_maxob = IOP_MAX_OUTBOUND;
357 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
358 if (sc->sc_maxib > IOP_MAX_INBOUND)
359 sc->sc_maxib = IOP_MAX_INBOUND;
360 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
361 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
362 sc->sc_framesize = IOP_MAX_MSG_SIZE;
363
364 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
365 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
366 printf("%s: frame size too small (%d)\n",
367 sc->sc_dv.dv_xname, sc->sc_framesize);
368 return;
369 }
370 #endif
371
372 /* Allocate message wrappers. */
373 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
374 sc->sc_ims = im;
375 SLIST_INIT(&sc->sc_im_freelist);
376
377 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
378 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
379 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
380 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
381 &im->im_xfer[0].ix_map);
382 if (rv != 0) {
383 printf("%s: couldn't create dmamap (%d)",
384 sc->sc_dv.dv_xname, rv);
385 goto bail_out;
386 }
387
388 im->im_tctx = i;
389 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
390 }
391
392 /* Initialise the IOP's outbound FIFO. */
393 if (iop_ofifo_init(sc) != 0) {
394 printf("%s: unable to init oubound FIFO\n",
395 sc->sc_dv.dv_xname);
396 goto bail_out;
397 }
398
399 /*
400 * Defer further configuration until (a) interrupts are working and
401 * (b) we have enough information to build the system table.
402 */
403 config_interrupts((struct device *)sc, iop_config_interrupts);
404
405 /* Configure shutdown hook before we start any device activity. */
406 if (iop_sdh == NULL)
407 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
408
409 /* Ensure interrupts are enabled at the IOP. */
410 mask = iop_inl(sc, IOP_REG_INTR_MASK);
411 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
412
413 if (intrstr != NULL)
414 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
415 intrstr);
416
417 #ifdef I2ODEBUG
418 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
419 sc->sc_dv.dv_xname, sc->sc_maxib,
420 le32toh(sc->sc_status.maxinboundmframes),
421 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
422 #endif
423
424 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
425 return;
426
427 bail_out:
428 if (state > 3) {
429 for (j = 0; j < i; j++)
430 bus_dmamap_destroy(sc->sc_dmat,
431 sc->sc_ims[j].im_xfer[0].ix_map);
432 free(sc->sc_ims, M_DEVBUF);
433 }
434 if (state > 2)
435 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
436 if (state > 1)
437 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
438 if (state > 0)
439 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
440 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
441
442 }
443
444 /*
445 * Perform autoconfiguration tasks.
446 */
447 static void
448 iop_config_interrupts(struct device *self)
449 {
450 struct iop_attach_args ia;
451 struct iop_softc *sc, *iop;
452 struct i2o_systab_entry *ste;
453 int rv, i, niop;
454
455 sc = (struct iop_softc *)self;
456 LIST_INIT(&sc->sc_iilist);
457
458 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
459
460 if (iop_hrt_get(sc) != 0) {
461 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
462 return;
463 }
464
465 /*
466 * Build the system table.
467 */
468 if (iop_systab == NULL) {
469 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
470 if ((iop = device_lookup(&iop_cd, i)) == NULL)
471 continue;
472 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
473 continue;
474 if (iop_status_get(iop, 1) != 0) {
475 printf("%s: unable to retrieve status\n",
476 sc->sc_dv.dv_xname);
477 iop->sc_flags &= ~IOP_HAVESTATUS;
478 continue;
479 }
480 niop++;
481 }
482 if (niop == 0)
483 return;
484
485 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
486 sizeof(struct i2o_systab);
487 iop_systab_size = i;
488 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
489
490 iop_systab->numentries = niop;
491 iop_systab->version = I2O_VERSION_11;
492
493 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
494 if ((iop = device_lookup(&iop_cd, i)) == NULL)
495 continue;
496 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
497 continue;
498
499 ste->orgid = iop->sc_status.orgid;
500 ste->iopid = iop->sc_dv.dv_unit + 2;
501 ste->segnumber =
502 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
503 ste->iopcaps = iop->sc_status.iopcaps;
504 ste->inboundmsgframesize =
505 iop->sc_status.inboundmframesize;
506 ste->inboundmsgportaddresslow =
507 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
508 ste++;
509 }
510 }
511
512 /*
513 * Post the system table to the IOP and bring it to the OPERATIONAL
514 * state.
515 */
516 if (iop_systab_set(sc) != 0) {
517 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
518 return;
519 }
520 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
521 30000) != 0) {
522 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
523 return;
524 }
525
526 /*
527 * Set up an event handler for this IOP.
528 */
529 sc->sc_eventii.ii_dv = self;
530 sc->sc_eventii.ii_intr = iop_intr_event;
531 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
532 sc->sc_eventii.ii_tid = I2O_TID_IOP;
533 iop_initiator_register(sc, &sc->sc_eventii);
534
535 rv = iop_util_eventreg(sc, &sc->sc_eventii,
536 I2O_EVENT_EXEC_RESOURCE_LIMITS |
537 I2O_EVENT_EXEC_CONNECTION_FAIL |
538 I2O_EVENT_EXEC_ADAPTER_FAULT |
539 I2O_EVENT_EXEC_POWER_FAIL |
540 I2O_EVENT_EXEC_RESET_PENDING |
541 I2O_EVENT_EXEC_RESET_IMMINENT |
542 I2O_EVENT_EXEC_HARDWARE_FAIL |
543 I2O_EVENT_EXEC_XCT_CHANGE |
544 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
545 I2O_EVENT_GEN_DEVICE_RESET |
546 I2O_EVENT_GEN_STATE_CHANGE |
547 I2O_EVENT_GEN_GENERAL_WARNING);
548 if (rv != 0) {
549 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
550 return;
551 }
552
553 /*
554 * Attempt to match and attach a product-specific extension.
555 */
556 ia.ia_class = I2O_CLASS_ANY;
557 ia.ia_tid = I2O_TID_IOP;
558 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
559
560 /*
561 * Start device configuration.
562 */
563 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
564 if ((rv = iop_reconfigure(sc, 0)) == -1) {
565 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
566 return;
567 }
568 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
569
570 kthread_create(iop_create_reconf_thread, sc);
571 }
572
573 /*
574 * Create the reconfiguration thread. Called after the standard kernel
575 * threads have been created.
576 */
577 static void
578 iop_create_reconf_thread(void *cookie)
579 {
580 struct iop_softc *sc;
581 int rv;
582
583 sc = cookie;
584 sc->sc_flags |= IOP_ONLINE;
585
586 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
587 "%s", sc->sc_dv.dv_xname);
588 if (rv != 0) {
589 printf("%s: unable to create reconfiguration thread (%d)",
590 sc->sc_dv.dv_xname, rv);
591 return;
592 }
593 }
594
595 /*
596 * Reconfiguration thread; listens for LCT change notification, and
597 * initiates re-configuration if received.
598 */
599 static void
600 iop_reconf_thread(void *cookie)
601 {
602 struct iop_softc *sc;
603 struct i2o_lct lct;
604 u_int32_t chgind;
605 int rv;
606
607 sc = cookie;
608 chgind = sc->sc_chgind + 1;
609
610 for (;;) {
611 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
612 sc->sc_dv.dv_xname, chgind));
613
614 PHOLD(sc->sc_reconf_proc);
615 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
616 PRELE(sc->sc_reconf_proc);
617
618 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
619 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
620
621 if (rv == 0 &&
622 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
623 iop_reconfigure(sc, le32toh(lct.changeindicator));
624 chgind = sc->sc_chgind + 1;
625 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
626 }
627
628 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
629 }
630 }
631
632 /*
633 * Reconfigure: find new and removed devices.
634 */
635 int
636 iop_reconfigure(struct iop_softc *sc, u_int chgind)
637 {
638 struct iop_msg *im;
639 struct i2o_hba_bus_scan mf;
640 struct i2o_lct_entry *le;
641 struct iop_initiator *ii, *nextii;
642 int rv, tid, i;
643
644 /*
645 * If the reconfiguration request isn't the result of LCT change
646 * notification, then be more thorough: ask all bus ports to scan
647 * their busses. Wait up to 5 minutes for each bus port to complete
648 * the request.
649 */
650 if (chgind == 0) {
651 if ((rv = iop_lct_get(sc)) != 0) {
652 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
653 return (rv);
654 }
655
656 le = sc->sc_lct->entry;
657 for (i = 0; i < sc->sc_nlctent; i++, le++) {
658 if ((le16toh(le->classid) & 4095) !=
659 I2O_CLASS_BUS_ADAPTER_PORT)
660 continue;
661 tid = le16toh(le->localtid) & 4095;
662
663 im = iop_msg_alloc(sc, IM_WAIT);
664
665 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
666 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
667 mf.msgictx = IOP_ICTX;
668 mf.msgtctx = im->im_tctx;
669
670 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
671 tid));
672
673 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
674 iop_msg_free(sc, im);
675 #ifdef I2ODEBUG
676 if (rv != 0)
677 printf("%s: bus scan failed\n",
678 sc->sc_dv.dv_xname);
679 #endif
680 }
681 } else if (chgind <= sc->sc_chgind) {
682 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
683 return (0);
684 }
685
686 /* Re-read the LCT and determine if it has changed. */
687 if ((rv = iop_lct_get(sc)) != 0) {
688 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
689 return (rv);
690 }
691 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
692
693 chgind = le32toh(sc->sc_lct->changeindicator);
694 if (chgind == sc->sc_chgind) {
695 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
696 return (0);
697 }
698 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
699 sc->sc_chgind = chgind;
700
701 if (sc->sc_tidmap != NULL)
702 free(sc->sc_tidmap, M_DEVBUF);
703 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
704 M_DEVBUF, M_NOWAIT|M_ZERO);
705
706 /* Allow 1 queued command per device while we're configuring. */
707 iop_adjqparam(sc, 1);
708
709 /*
710 * Match and attach child devices. We configure high-level devices
711 * first so that any claims will propagate throughout the LCT,
712 * hopefully masking off aliased devices as a result.
713 *
714 * Re-reading the LCT at this point is a little dangerous, but we'll
715 * trust the IOP (and the operator) to behave itself...
716 */
717 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
718 IC_CONFIGURE | IC_PRIORITY);
719 if ((rv = iop_lct_get(sc)) != 0)
720 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
721 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
722 IC_CONFIGURE);
723
724 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
725 nextii = LIST_NEXT(ii, ii_list);
726
727 /* Detach devices that were configured, but are now gone. */
728 for (i = 0; i < sc->sc_nlctent; i++)
729 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
730 break;
731 if (i == sc->sc_nlctent ||
732 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
733 config_detach(ii->ii_dv, DETACH_FORCE);
734
735 /*
736 * Tell initiators that existed before the re-configuration
737 * to re-configure.
738 */
739 if (ii->ii_reconfig == NULL)
740 continue;
741 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
742 printf("%s: %s failed reconfigure (%d)\n",
743 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
744 }
745
746 /* Re-adjust queue parameters and return. */
747 if (sc->sc_nii != 0)
748 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
749 / sc->sc_nii);
750
751 return (0);
752 }
753
754 /*
755 * Configure I2O devices into the system.
756 */
757 static void
758 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
759 {
760 struct iop_attach_args ia;
761 struct iop_initiator *ii;
762 const struct i2o_lct_entry *le;
763 struct device *dv;
764 int i, j, nent;
765 u_int usertid;
766
767 nent = sc->sc_nlctent;
768 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
769 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
770
771 /* Ignore the device if it's in use. */
772 usertid = le32toh(le->usertid) & 4095;
773 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
774 continue;
775
776 ia.ia_class = le16toh(le->classid) & 4095;
777 ia.ia_tid = sc->sc_tidmap[i].it_tid;
778
779 /* Ignore uninteresting devices. */
780 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
781 if (iop_class[j].ic_class == ia.ia_class)
782 break;
783 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
784 (iop_class[j].ic_flags & mask) != maskval)
785 continue;
786
787 /*
788 * Try to configure the device only if it's not already
789 * configured.
790 */
791 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
792 if (ia.ia_tid == ii->ii_tid) {
793 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
794 strcpy(sc->sc_tidmap[i].it_dvname,
795 ii->ii_dv->dv_xname);
796 break;
797 }
798 }
799 if (ii != NULL)
800 continue;
801
802 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
803 if (dv != NULL) {
804 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
805 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
806 }
807 }
808 }
809
810 /*
811 * Adjust queue parameters for all child devices.
812 */
813 static void
814 iop_adjqparam(struct iop_softc *sc, int mpi)
815 {
816 struct iop_initiator *ii;
817
818 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
819 if (ii->ii_adjqparam != NULL)
820 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
821 }
822
823 static void
824 iop_devinfo(int class, char *devinfo)
825 {
826 #ifdef I2OVERBOSE
827 int i;
828
829 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
830 if (class == iop_class[i].ic_class)
831 break;
832
833 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
834 sprintf(devinfo, "device (class 0x%x)", class);
835 else
836 strcpy(devinfo, iop_class[i].ic_caption);
837 #else
838
839 sprintf(devinfo, "device (class 0x%x)", class);
840 #endif
841 }
842
843 static int
844 iop_print(void *aux, const char *pnp)
845 {
846 struct iop_attach_args *ia;
847 char devinfo[256];
848
849 ia = aux;
850
851 if (pnp != NULL) {
852 iop_devinfo(ia->ia_class, devinfo);
853 printf("%s at %s", devinfo, pnp);
854 }
855 printf(" tid %d", ia->ia_tid);
856 return (UNCONF);
857 }
858
859 static int
860 iop_vendor_print(void *aux, const char *pnp)
861 {
862
863 return (QUIET);
864 }
865
866 static int
867 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
868 {
869 struct iop_attach_args *ia;
870
871 ia = aux;
872
873 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
874 return (0);
875
876 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
877 }
878
879 /*
880 * Shut down all configured IOPs.
881 */
882 static void
883 iop_shutdown(void *junk)
884 {
885 struct iop_softc *sc;
886 int i;
887
888 printf("shutting down iop devices...");
889
890 for (i = 0; i < iop_cd.cd_ndevs; i++) {
891 if ((sc = device_lookup(&iop_cd, i)) == NULL)
892 continue;
893 if ((sc->sc_flags & IOP_ONLINE) == 0)
894 continue;
895 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
896 0, 5000);
897 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
898 0, 1000);
899 }
900
901 /* Wait. Some boards could still be flushing, stupidly enough. */
902 delay(5000*1000);
903 printf(" done\n");
904 }
905
906 /*
907 * Retrieve IOP status.
908 */
909 int
910 iop_status_get(struct iop_softc *sc, int nosleep)
911 {
912 struct i2o_exec_status_get mf;
913 struct i2o_status *st;
914 paddr_t pa;
915 int rv, i;
916
917 pa = sc->sc_scr_seg->ds_addr;
918 st = (struct i2o_status *)sc->sc_scr;
919
920 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
921 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
922 mf.reserved[0] = 0;
923 mf.reserved[1] = 0;
924 mf.reserved[2] = 0;
925 mf.reserved[3] = 0;
926 mf.addrlow = (u_int32_t)pa;
927 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
928 mf.length = sizeof(sc->sc_status);
929
930 memset(st, 0, sizeof(*st));
931 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
932 BUS_DMASYNC_PREREAD);
933
934 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
935 return (rv);
936
937 for (i = 25; i != 0; i--) {
938 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
939 sizeof(*st), BUS_DMASYNC_POSTREAD);
940 if (st->syncbyte == 0xff)
941 break;
942 if (nosleep)
943 DELAY(100*1000);
944 else
945 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
946 }
947
948 if (st->syncbyte != 0xff) {
949 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
950 rv = EIO;
951 } else {
952 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
953 rv = 0;
954 }
955
956 return (rv);
957 }
958
959 /*
960 * Initialize and populate the IOP's outbound FIFO.
961 */
962 static int
963 iop_ofifo_init(struct iop_softc *sc)
964 {
965 bus_addr_t addr;
966 bus_dma_segment_t seg;
967 struct i2o_exec_outbound_init *mf;
968 int i, rseg, rv;
969 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
970
971 sw = (u_int32_t *)sc->sc_scr;
972
973 mf = (struct i2o_exec_outbound_init *)mb;
974 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
975 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
976 mf->msgictx = IOP_ICTX;
977 mf->msgtctx = 0;
978 mf->pagesize = PAGE_SIZE;
979 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
980
981 /*
982 * The I2O spec says that there are two SGLs: one for the status
983 * word, and one for a list of discarded MFAs. It continues to say
984 * that if you don't want to get the list of MFAs, an IGNORE SGL is
985 * necessary; this isn't the case (and is in fact a bad thing).
986 */
987 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
988 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
989 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
990 (u_int32_t)sc->sc_scr_seg->ds_addr;
991 mb[0] += 2 << 16;
992
993 *sw = 0;
994 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
995 BUS_DMASYNC_PREREAD);
996
997 if ((rv = iop_post(sc, mb)) != 0)
998 return (rv);
999
1000 POLL(5000,
1001 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1002 BUS_DMASYNC_POSTREAD),
1003 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1004
1005 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1006 printf("%s: outbound FIFO init failed (%d)\n",
1007 sc->sc_dv.dv_xname, le32toh(*sw));
1008 return (EIO);
1009 }
1010
1011 /* Allocate DMA safe memory for the reply frames. */
1012 if (sc->sc_rep_phys == 0) {
1013 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1014
1015 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1016 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1017 if (rv != 0) {
1018 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1019 rv);
1020 return (rv);
1021 }
1022
1023 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1024 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1025 if (rv != 0) {
1026 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1027 return (rv);
1028 }
1029
1030 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1031 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1032 if (rv != 0) {
1033 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1034 rv);
1035 return (rv);
1036 }
1037
1038 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1039 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1040 if (rv != 0) {
1041 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1042 return (rv);
1043 }
1044
1045 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1046 }
1047
1048 /* Populate the outbound FIFO. */
1049 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1050 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1051 addr += sc->sc_framesize;
1052 }
1053
1054 return (0);
1055 }
1056
1057 /*
1058 * Read the specified number of bytes from the IOP's hardware resource table.
1059 */
1060 static int
1061 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1062 {
1063 struct iop_msg *im;
1064 int rv;
1065 struct i2o_exec_hrt_get *mf;
1066 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1067
1068 im = iop_msg_alloc(sc, IM_WAIT);
1069 mf = (struct i2o_exec_hrt_get *)mb;
1070 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1071 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1072 mf->msgictx = IOP_ICTX;
1073 mf->msgtctx = im->im_tctx;
1074
1075 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1076 rv = iop_msg_post(sc, im, mb, 30000);
1077 iop_msg_unmap(sc, im);
1078 iop_msg_free(sc, im);
1079 return (rv);
1080 }
1081
1082 /*
1083 * Read the IOP's hardware resource table.
1084 */
1085 static int
1086 iop_hrt_get(struct iop_softc *sc)
1087 {
1088 struct i2o_hrt hrthdr, *hrt;
1089 int size, rv;
1090
1091 PHOLD(curproc);
1092 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1093 PRELE(curproc);
1094 if (rv != 0)
1095 return (rv);
1096
1097 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1098 le16toh(hrthdr.numentries)));
1099
1100 size = sizeof(struct i2o_hrt) +
1101 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1102 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1103
1104 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1105 free(hrt, M_DEVBUF);
1106 return (rv);
1107 }
1108
1109 if (sc->sc_hrt != NULL)
1110 free(sc->sc_hrt, M_DEVBUF);
1111 sc->sc_hrt = hrt;
1112 return (0);
1113 }
1114
1115 /*
1116 * Request the specified number of bytes from the IOP's logical
1117 * configuration table. If a change indicator is specified, this
1118 * is a verbatim notification request, so the caller is prepared
1119 * to wait indefinitely.
1120 */
1121 static int
1122 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1123 u_int32_t chgind)
1124 {
1125 struct iop_msg *im;
1126 struct i2o_exec_lct_notify *mf;
1127 int rv;
1128 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1129
1130 im = iop_msg_alloc(sc, IM_WAIT);
1131 memset(lct, 0, size);
1132
1133 mf = (struct i2o_exec_lct_notify *)mb;
1134 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1135 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1136 mf->msgictx = IOP_ICTX;
1137 mf->msgtctx = im->im_tctx;
1138 mf->classid = I2O_CLASS_ANY;
1139 mf->changeindicator = chgind;
1140
1141 #ifdef I2ODEBUG
1142 printf("iop_lct_get0: reading LCT");
1143 if (chgind != 0)
1144 printf(" (async)");
1145 printf("\n");
1146 #endif
1147
1148 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1149 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1150 iop_msg_unmap(sc, im);
1151 iop_msg_free(sc, im);
1152 return (rv);
1153 }
1154
1155 /*
1156 * Read the IOP's logical configuration table.
1157 */
1158 int
1159 iop_lct_get(struct iop_softc *sc)
1160 {
1161 int esize, size, rv;
1162 struct i2o_lct *lct;
1163
1164 esize = le32toh(sc->sc_status.expectedlctsize);
1165 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1166 if (lct == NULL)
1167 return (ENOMEM);
1168
1169 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1170 free(lct, M_DEVBUF);
1171 return (rv);
1172 }
1173
1174 size = le16toh(lct->tablesize) << 2;
1175 if (esize != size) {
1176 free(lct, M_DEVBUF);
1177 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1178 if (lct == NULL)
1179 return (ENOMEM);
1180
1181 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1182 free(lct, M_DEVBUF);
1183 return (rv);
1184 }
1185 }
1186
1187 /* Swap in the new LCT. */
1188 if (sc->sc_lct != NULL)
1189 free(sc->sc_lct, M_DEVBUF);
1190 sc->sc_lct = lct;
1191 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1192 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1193 sizeof(struct i2o_lct_entry);
1194 return (0);
1195 }
1196
1197 /*
1198 * Request the specified parameter group from the target. If an initiator
1199 * is specified (a) don't wait for the operation to complete, but instead
1200 * let the initiator's interrupt handler deal with the reply and (b) place a
1201 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1202 */
1203 int
1204 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1205 int size, struct iop_initiator *ii)
1206 {
1207 struct iop_msg *im;
1208 struct i2o_util_params_op *mf;
1209 struct i2o_reply *rf;
1210 int rv;
1211 struct iop_pgop *pgop;
1212 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1213
1214 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1215 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1216 iop_msg_free(sc, im);
1217 return (ENOMEM);
1218 }
1219 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1220 iop_msg_free(sc, im);
1221 free(pgop, M_DEVBUF);
1222 return (ENOMEM);
1223 }
1224 im->im_dvcontext = pgop;
1225 im->im_rb = rf;
1226
1227 mf = (struct i2o_util_params_op *)mb;
1228 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1229 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1230 mf->msgictx = IOP_ICTX;
1231 mf->msgtctx = im->im_tctx;
1232 mf->flags = 0;
1233
1234 pgop->olh.count = htole16(1);
1235 pgop->olh.reserved = htole16(0);
1236 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1237 pgop->oat.fieldcount = htole16(0xffff);
1238 pgop->oat.group = htole16(group);
1239
1240 if (ii == NULL)
1241 PHOLD(curproc);
1242
1243 memset(buf, 0, size);
1244 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1245 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1246 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1247
1248 if (ii == NULL)
1249 PRELE(curproc);
1250
1251 /* Detect errors; let partial transfers to count as success. */
1252 if (ii == NULL && rv == 0) {
1253 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1254 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1255 rv = 0;
1256 else
1257 rv = (rf->reqstatus != 0 ? EIO : 0);
1258
1259 if (rv != 0)
1260 printf("%s: FIELD_GET failed for tid %d group %d\n",
1261 sc->sc_dv.dv_xname, tid, group);
1262 }
1263
1264 if (ii == NULL || rv != 0) {
1265 iop_msg_unmap(sc, im);
1266 iop_msg_free(sc, im);
1267 free(pgop, M_DEVBUF);
1268 free(rf, M_DEVBUF);
1269 }
1270
1271 return (rv);
1272 }
1273
1274 /*
1275 * Set a single field in a scalar parameter group.
1276 */
1277 int
1278 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1279 int size, int field)
1280 {
1281 struct iop_msg *im;
1282 struct i2o_util_params_op *mf;
1283 struct iop_pgop *pgop;
1284 int rv, totsize;
1285 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1286
1287 totsize = sizeof(*pgop) + size;
1288
1289 im = iop_msg_alloc(sc, IM_WAIT);
1290 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1291 iop_msg_free(sc, im);
1292 return (ENOMEM);
1293 }
1294
1295 mf = (struct i2o_util_params_op *)mb;
1296 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1297 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1298 mf->msgictx = IOP_ICTX;
1299 mf->msgtctx = im->im_tctx;
1300 mf->flags = 0;
1301
1302 pgop->olh.count = htole16(1);
1303 pgop->olh.reserved = htole16(0);
1304 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1305 pgop->oat.fieldcount = htole16(1);
1306 pgop->oat.group = htole16(group);
1307 pgop->oat.fields[0] = htole16(field);
1308 memcpy(pgop + 1, buf, size);
1309
1310 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1311 rv = iop_msg_post(sc, im, mb, 30000);
1312 if (rv != 0)
1313 printf("%s: FIELD_SET failed for tid %d group %d\n",
1314 sc->sc_dv.dv_xname, tid, group);
1315
1316 iop_msg_unmap(sc, im);
1317 iop_msg_free(sc, im);
1318 free(pgop, M_DEVBUF);
1319 return (rv);
1320 }
1321
1322 /*
1323 * Delete all rows in a tablular parameter group.
1324 */
1325 int
1326 iop_table_clear(struct iop_softc *sc, int tid, int group)
1327 {
1328 struct iop_msg *im;
1329 struct i2o_util_params_op *mf;
1330 struct iop_pgop pgop;
1331 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1332 int rv;
1333
1334 im = iop_msg_alloc(sc, IM_WAIT);
1335
1336 mf = (struct i2o_util_params_op *)mb;
1337 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1338 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1339 mf->msgictx = IOP_ICTX;
1340 mf->msgtctx = im->im_tctx;
1341 mf->flags = 0;
1342
1343 pgop.olh.count = htole16(1);
1344 pgop.olh.reserved = htole16(0);
1345 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1346 pgop.oat.fieldcount = htole16(0);
1347 pgop.oat.group = htole16(group);
1348 pgop.oat.fields[0] = htole16(0);
1349
1350 PHOLD(curproc);
1351 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1352 rv = iop_msg_post(sc, im, mb, 30000);
1353 if (rv != 0)
1354 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1355 sc->sc_dv.dv_xname, tid, group);
1356
1357 iop_msg_unmap(sc, im);
1358 PRELE(curproc);
1359 iop_msg_free(sc, im);
1360 return (rv);
1361 }
1362
1363 /*
1364 * Add a single row to a tabular parameter group. The row can have only one
1365 * field.
1366 */
1367 int
1368 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1369 int size, int row)
1370 {
1371 struct iop_msg *im;
1372 struct i2o_util_params_op *mf;
1373 struct iop_pgop *pgop;
1374 int rv, totsize;
1375 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1376
1377 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1378
1379 im = iop_msg_alloc(sc, IM_WAIT);
1380 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1381 iop_msg_free(sc, im);
1382 return (ENOMEM);
1383 }
1384
1385 mf = (struct i2o_util_params_op *)mb;
1386 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1387 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1388 mf->msgictx = IOP_ICTX;
1389 mf->msgtctx = im->im_tctx;
1390 mf->flags = 0;
1391
1392 pgop->olh.count = htole16(1);
1393 pgop->olh.reserved = htole16(0);
1394 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1395 pgop->oat.fieldcount = htole16(1);
1396 pgop->oat.group = htole16(group);
1397 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1398 pgop->oat.fields[1] = htole16(1); /* RowCount */
1399 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1400 memcpy(&pgop->oat.fields[3], buf, size);
1401
1402 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1403 rv = iop_msg_post(sc, im, mb, 30000);
1404 if (rv != 0)
1405 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1406 sc->sc_dv.dv_xname, tid, group, row);
1407
1408 iop_msg_unmap(sc, im);
1409 iop_msg_free(sc, im);
1410 free(pgop, M_DEVBUF);
1411 return (rv);
1412 }
1413
1414 /*
1415 * Execute a simple command (no parameters).
1416 */
1417 int
1418 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1419 int async, int timo)
1420 {
1421 struct iop_msg *im;
1422 struct i2o_msg mf;
1423 int rv, fl;
1424
1425 fl = (async != 0 ? IM_WAIT : IM_POLL);
1426 im = iop_msg_alloc(sc, fl);
1427
1428 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1429 mf.msgfunc = I2O_MSGFUNC(tid, function);
1430 mf.msgictx = ictx;
1431 mf.msgtctx = im->im_tctx;
1432
1433 rv = iop_msg_post(sc, im, &mf, timo);
1434 iop_msg_free(sc, im);
1435 return (rv);
1436 }
1437
1438 /*
1439 * Post the system table to the IOP.
1440 */
1441 static int
1442 iop_systab_set(struct iop_softc *sc)
1443 {
1444 struct i2o_exec_sys_tab_set *mf;
1445 struct iop_msg *im;
1446 bus_space_handle_t bsh;
1447 bus_addr_t boo;
1448 u_int32_t mema[2], ioa[2];
1449 int rv;
1450 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1451
1452 im = iop_msg_alloc(sc, IM_WAIT);
1453
1454 mf = (struct i2o_exec_sys_tab_set *)mb;
1455 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1456 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1457 mf->msgictx = IOP_ICTX;
1458 mf->msgtctx = im->im_tctx;
1459 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1460 mf->segnumber = 0;
1461
1462 mema[1] = sc->sc_status.desiredprivmemsize;
1463 ioa[1] = sc->sc_status.desiredpriviosize;
1464
1465 if (mema[1] != 0) {
1466 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1467 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1468 mema[0] = htole32(boo);
1469 if (rv != 0) {
1470 printf("%s: can't alloc priv mem space, err = %d\n",
1471 sc->sc_dv.dv_xname, rv);
1472 mema[0] = 0;
1473 mema[1] = 0;
1474 }
1475 }
1476
1477 if (ioa[1] != 0) {
1478 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1479 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1480 ioa[0] = htole32(boo);
1481 if (rv != 0) {
1482 printf("%s: can't alloc priv i/o space, err = %d\n",
1483 sc->sc_dv.dv_xname, rv);
1484 ioa[0] = 0;
1485 ioa[1] = 0;
1486 }
1487 }
1488
1489 PHOLD(curproc);
1490 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1491 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1492 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1493 rv = iop_msg_post(sc, im, mb, 5000);
1494 iop_msg_unmap(sc, im);
1495 iop_msg_free(sc, im);
1496 PRELE(curproc);
1497 return (rv);
1498 }
1499
1500 /*
1501 * Reset the IOP. Must be called with interrupts disabled.
1502 */
1503 static int
1504 iop_reset(struct iop_softc *sc)
1505 {
1506 u_int32_t mfa, *sw;
1507 struct i2o_exec_iop_reset mf;
1508 int rv;
1509 paddr_t pa;
1510
1511 sw = (u_int32_t *)sc->sc_scr;
1512 pa = sc->sc_scr_seg->ds_addr;
1513
1514 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1515 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1516 mf.reserved[0] = 0;
1517 mf.reserved[1] = 0;
1518 mf.reserved[2] = 0;
1519 mf.reserved[3] = 0;
1520 mf.statuslow = (u_int32_t)pa;
1521 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1522
1523 *sw = htole32(0);
1524 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1525 BUS_DMASYNC_PREREAD);
1526
1527 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1528 return (rv);
1529
1530 POLL(2500,
1531 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1532 BUS_DMASYNC_POSTREAD), *sw != 0));
1533 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1534 printf("%s: reset rejected, status 0x%x\n",
1535 sc->sc_dv.dv_xname, le32toh(*sw));
1536 return (EIO);
1537 }
1538
1539 /*
1540 * IOP is now in the INIT state. Wait no more than 10 seconds for
1541 * the inbound queue to become responsive.
1542 */
1543 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1544 if (mfa == IOP_MFA_EMPTY) {
1545 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1546 return (EIO);
1547 }
1548
1549 iop_release_mfa(sc, mfa);
1550 return (0);
1551 }
1552
1553 /*
1554 * Register a new initiator. Must be called with the configuration lock
1555 * held.
1556 */
1557 void
1558 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1559 {
1560 static int ictxgen;
1561 int s;
1562
1563 /* 0 is reserved (by us) for system messages. */
1564 ii->ii_ictx = ++ictxgen;
1565
1566 /*
1567 * `Utility initiators' don't make it onto the per-IOP initiator list
1568 * (which is used only for configuration), but do get one slot on
1569 * the inbound queue.
1570 */
1571 if ((ii->ii_flags & II_UTILITY) == 0) {
1572 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1573 sc->sc_nii++;
1574 } else
1575 sc->sc_nuii++;
1576
1577 s = splbio();
1578 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1579 splx(s);
1580 }
1581
1582 /*
1583 * Unregister an initiator. Must be called with the configuration lock
1584 * held.
1585 */
1586 void
1587 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1588 {
1589 int s;
1590
1591 if ((ii->ii_flags & II_UTILITY) == 0) {
1592 LIST_REMOVE(ii, ii_list);
1593 sc->sc_nii--;
1594 } else
1595 sc->sc_nuii--;
1596
1597 s = splbio();
1598 LIST_REMOVE(ii, ii_hash);
1599 splx(s);
1600 }
1601
1602 /*
1603 * Handle a reply frame from the IOP.
1604 */
1605 static int
1606 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1607 {
1608 struct iop_msg *im;
1609 struct i2o_reply *rb;
1610 struct i2o_fault_notify *fn;
1611 struct iop_initiator *ii;
1612 u_int off, ictx, tctx, status, size;
1613
1614 off = (int)(rmfa - sc->sc_rep_phys);
1615 rb = (struct i2o_reply *)(sc->sc_rep + off);
1616
1617 /* Perform reply queue DMA synchronisation. */
1618 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1619 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1620 if (--sc->sc_curib != 0)
1621 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1622 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1623
1624 #ifdef I2ODEBUG
1625 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1626 panic("iop_handle_reply: 64-bit reply");
1627 #endif
1628 /*
1629 * Find the initiator.
1630 */
1631 ictx = le32toh(rb->msgictx);
1632 if (ictx == IOP_ICTX)
1633 ii = NULL;
1634 else {
1635 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1636 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1637 if (ii->ii_ictx == ictx)
1638 break;
1639 if (ii == NULL) {
1640 #ifdef I2ODEBUG
1641 iop_reply_print(sc, rb);
1642 #endif
1643 printf("%s: WARNING: bad ictx returned (%x)\n",
1644 sc->sc_dv.dv_xname, ictx);
1645 return (-1);
1646 }
1647 }
1648
1649 /*
1650 * If we received a transport failure notice, we've got to dig the
1651 * transaction context (if any) out of the original message frame,
1652 * and then release the original MFA back to the inbound FIFO.
1653 */
1654 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1655 status = I2O_STATUS_SUCCESS;
1656
1657 fn = (struct i2o_fault_notify *)rb;
1658 tctx = iop_inl(sc, fn->lowmfa + 12);
1659 iop_release_mfa(sc, fn->lowmfa);
1660 iop_tfn_print(sc, fn);
1661 } else {
1662 status = rb->reqstatus;
1663 tctx = le32toh(rb->msgtctx);
1664 }
1665
1666 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1667 /*
1668 * This initiator tracks state using message wrappers.
1669 *
1670 * Find the originating message wrapper, and if requested
1671 * notify the initiator.
1672 */
1673 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1674 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1675 (im->im_flags & IM_ALLOCED) == 0 ||
1676 tctx != im->im_tctx) {
1677 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1678 sc->sc_dv.dv_xname, tctx, im);
1679 if (im != NULL)
1680 printf("%s: flags=0x%08x tctx=0x%08x\n",
1681 sc->sc_dv.dv_xname, im->im_flags,
1682 im->im_tctx);
1683 #ifdef I2ODEBUG
1684 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1685 iop_reply_print(sc, rb);
1686 #endif
1687 return (-1);
1688 }
1689
1690 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1691 im->im_flags |= IM_FAIL;
1692
1693 #ifdef I2ODEBUG
1694 if ((im->im_flags & IM_REPLIED) != 0)
1695 panic("%s: dup reply", sc->sc_dv.dv_xname);
1696 #endif
1697 im->im_flags |= IM_REPLIED;
1698
1699 #ifdef I2ODEBUG
1700 if (status != I2O_STATUS_SUCCESS)
1701 iop_reply_print(sc, rb);
1702 #endif
1703 im->im_reqstatus = status;
1704
1705 /* Copy the reply frame, if requested. */
1706 if (im->im_rb != NULL) {
1707 size = (le32toh(rb->msgflags) >> 14) & ~3;
1708 #ifdef I2ODEBUG
1709 if (size > sc->sc_framesize)
1710 panic("iop_handle_reply: reply too large");
1711 #endif
1712 memcpy(im->im_rb, rb, size);
1713 }
1714
1715 /* Notify the initiator. */
1716 if ((im->im_flags & IM_WAIT) != 0)
1717 wakeup(im);
1718 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1719 (*ii->ii_intr)(ii->ii_dv, im, rb);
1720 } else {
1721 /*
1722 * This initiator discards message wrappers.
1723 *
1724 * Simply pass the reply frame to the initiator.
1725 */
1726 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1727 }
1728
1729 return (status);
1730 }
1731
1732 /*
1733 * Handle an interrupt from the IOP.
1734 */
1735 int
1736 iop_intr(void *arg)
1737 {
1738 struct iop_softc *sc;
1739 u_int32_t rmfa;
1740
1741 sc = arg;
1742
1743 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1744 return (0);
1745
1746 for (;;) {
1747 /* Double read to account for IOP bug. */
1748 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1749 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1750 if (rmfa == IOP_MFA_EMPTY)
1751 break;
1752 }
1753 iop_handle_reply(sc, rmfa);
1754 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1755 }
1756
1757 return (1);
1758 }
1759
1760 /*
1761 * Handle an event signalled by the executive.
1762 */
1763 static void
1764 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1765 {
1766 struct i2o_util_event_register_reply *rb;
1767 struct iop_softc *sc;
1768 u_int event;
1769
1770 sc = (struct iop_softc *)dv;
1771 rb = reply;
1772
1773 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1774 return;
1775
1776 event = le32toh(rb->event);
1777 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1778 }
1779
1780 /*
1781 * Allocate a message wrapper.
1782 */
1783 struct iop_msg *
1784 iop_msg_alloc(struct iop_softc *sc, int flags)
1785 {
1786 struct iop_msg *im;
1787 static u_int tctxgen;
1788 int s, i;
1789
1790 #ifdef I2ODEBUG
1791 if ((flags & IM_SYSMASK) != 0)
1792 panic("iop_msg_alloc: system flags specified");
1793 #endif
1794
1795 s = splbio();
1796 im = SLIST_FIRST(&sc->sc_im_freelist);
1797 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1798 if (im == NULL)
1799 panic("iop_msg_alloc: no free wrappers");
1800 #endif
1801 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1802 splx(s);
1803
1804 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1805 tctxgen += (1 << IOP_TCTX_SHIFT);
1806 im->im_flags = flags | IM_ALLOCED;
1807 im->im_rb = NULL;
1808 i = 0;
1809 do {
1810 im->im_xfer[i++].ix_size = 0;
1811 } while (i < IOP_MAX_MSG_XFERS);
1812
1813 return (im);
1814 }
1815
1816 /*
1817 * Free a message wrapper.
1818 */
1819 void
1820 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1821 {
1822 int s;
1823
1824 #ifdef I2ODEBUG
1825 if ((im->im_flags & IM_ALLOCED) == 0)
1826 panic("iop_msg_free: wrapper not allocated");
1827 #endif
1828
1829 im->im_flags = 0;
1830 s = splbio();
1831 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1832 splx(s);
1833 }
1834
1835 /*
1836 * Map a data transfer. Write a scatter-gather list into the message frame.
1837 */
1838 int
1839 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1840 void *xferaddr, int xfersize, int out, struct proc *up)
1841 {
1842 bus_dmamap_t dm;
1843 bus_dma_segment_t *ds;
1844 struct iop_xfer *ix;
1845 u_int rv, i, nsegs, flg, off, xn;
1846 u_int32_t *p;
1847
1848 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1849 if (ix->ix_size == 0)
1850 break;
1851
1852 #ifdef I2ODEBUG
1853 if (xfersize == 0)
1854 panic("iop_msg_map: null transfer");
1855 if (xfersize > IOP_MAX_XFER)
1856 panic("iop_msg_map: transfer too large");
1857 if (xn == IOP_MAX_MSG_XFERS)
1858 panic("iop_msg_map: too many xfers");
1859 #endif
1860
1861 /*
1862 * Only the first DMA map is static.
1863 */
1864 if (xn != 0) {
1865 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1866 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1867 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1868 if (rv != 0)
1869 return (rv);
1870 }
1871
1872 dm = ix->ix_map;
1873 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1874 (up == NULL ? BUS_DMA_NOWAIT : 0));
1875 if (rv != 0)
1876 goto bad;
1877
1878 /*
1879 * How many SIMPLE SG elements can we fit in this message?
1880 */
1881 off = mb[0] >> 16;
1882 p = mb + off;
1883 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1884
1885 if (dm->dm_nsegs > nsegs) {
1886 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1887 rv = EFBIG;
1888 DPRINTF(("iop_msg_map: too many segs\n"));
1889 goto bad;
1890 }
1891
1892 nsegs = dm->dm_nsegs;
1893 xfersize = 0;
1894
1895 /*
1896 * Write out the SG list.
1897 */
1898 if (out)
1899 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1900 else
1901 flg = I2O_SGL_SIMPLE;
1902
1903 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1904 p[0] = (u_int32_t)ds->ds_len | flg;
1905 p[1] = (u_int32_t)ds->ds_addr;
1906 xfersize += ds->ds_len;
1907 }
1908
1909 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1910 p[1] = (u_int32_t)ds->ds_addr;
1911 xfersize += ds->ds_len;
1912
1913 /* Fix up the transfer record, and sync the map. */
1914 ix->ix_flags = (out ? IX_OUT : IX_IN);
1915 ix->ix_size = xfersize;
1916 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1917 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1918
1919 /*
1920 * If this is the first xfer we've mapped for this message, adjust
1921 * the SGL offset field in the message header.
1922 */
1923 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1924 mb[0] += (mb[0] >> 12) & 0xf0;
1925 im->im_flags |= IM_SGLOFFADJ;
1926 }
1927 mb[0] += (nsegs << 17);
1928 return (0);
1929
1930 bad:
1931 if (xn != 0)
1932 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1933 return (rv);
1934 }
1935
1936 /*
1937 * Map a block I/O data transfer (different in that there's only one per
1938 * message maximum, and PAGE addressing may be used). Write a scatter
1939 * gather list into the message frame.
1940 */
1941 int
1942 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1943 void *xferaddr, int xfersize, int out)
1944 {
1945 bus_dma_segment_t *ds;
1946 bus_dmamap_t dm;
1947 struct iop_xfer *ix;
1948 u_int rv, i, nsegs, off, slen, tlen, flg;
1949 paddr_t saddr, eaddr;
1950 u_int32_t *p;
1951
1952 #ifdef I2ODEBUG
1953 if (xfersize == 0)
1954 panic("iop_msg_map_bio: null transfer");
1955 if (xfersize > IOP_MAX_XFER)
1956 panic("iop_msg_map_bio: transfer too large");
1957 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1958 panic("iop_msg_map_bio: SGLOFFADJ");
1959 #endif
1960
1961 ix = im->im_xfer;
1962 dm = ix->ix_map;
1963 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1964 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1965 if (rv != 0)
1966 return (rv);
1967
1968 off = mb[0] >> 16;
1969 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1970
1971 /*
1972 * If the transfer is highly fragmented and won't fit using SIMPLE
1973 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1974 * potentially more efficient, both for us and the IOP.
1975 */
1976 if (dm->dm_nsegs > nsegs) {
1977 nsegs = 1;
1978 p = mb + off + 1;
1979
1980 /* XXX This should be done with a bus_space flag. */
1981 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1982 slen = ds->ds_len;
1983 saddr = ds->ds_addr;
1984
1985 while (slen > 0) {
1986 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1987 tlen = min(eaddr - saddr, slen);
1988 slen -= tlen;
1989 *p++ = le32toh(saddr);
1990 saddr = eaddr;
1991 nsegs++;
1992 }
1993 }
1994
1995 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
1996 I2O_SGL_END;
1997 if (out)
1998 mb[off] |= I2O_SGL_DATA_OUT;
1999 } else {
2000 p = mb + off;
2001 nsegs = dm->dm_nsegs;
2002
2003 if (out)
2004 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2005 else
2006 flg = I2O_SGL_SIMPLE;
2007
2008 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2009 p[0] = (u_int32_t)ds->ds_len | flg;
2010 p[1] = (u_int32_t)ds->ds_addr;
2011 }
2012
2013 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2014 I2O_SGL_END;
2015 p[1] = (u_int32_t)ds->ds_addr;
2016 nsegs <<= 1;
2017 }
2018
2019 /* Fix up the transfer record, and sync the map. */
2020 ix->ix_flags = (out ? IX_OUT : IX_IN);
2021 ix->ix_size = xfersize;
2022 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2023 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2024
2025 /*
2026 * Adjust the SGL offset and total message size fields. We don't
2027 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2028 */
2029 mb[0] += ((off << 4) + (nsegs << 16));
2030 return (0);
2031 }
2032
2033 /*
2034 * Unmap all data transfers associated with a message wrapper.
2035 */
2036 void
2037 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2038 {
2039 struct iop_xfer *ix;
2040 int i;
2041
2042 #ifdef I2ODEBUG
2043 if (im->im_xfer[0].ix_size == 0)
2044 panic("iop_msg_unmap: no transfers mapped");
2045 #endif
2046
2047 for (ix = im->im_xfer, i = 0;;) {
2048 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2049 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2050 BUS_DMASYNC_POSTREAD);
2051 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2052
2053 /* Only the first DMA map is static. */
2054 if (i != 0)
2055 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2056 if ((++ix)->ix_size == 0)
2057 break;
2058 if (++i >= IOP_MAX_MSG_XFERS)
2059 break;
2060 }
2061 }
2062
2063 /*
2064 * Post a message frame to the IOP's inbound queue.
2065 */
2066 int
2067 iop_post(struct iop_softc *sc, u_int32_t *mb)
2068 {
2069 u_int32_t mfa;
2070 int s;
2071
2072 #ifdef I2ODEBUG
2073 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2074 panic("iop_post: frame too large");
2075 #endif
2076
2077 s = splbio();
2078
2079 /* Allocate a slot with the IOP. */
2080 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2081 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2082 splx(s);
2083 printf("%s: mfa not forthcoming\n",
2084 sc->sc_dv.dv_xname);
2085 return (EAGAIN);
2086 }
2087
2088 /* Perform reply buffer DMA synchronisation. */
2089 if (sc->sc_curib++ == 0)
2090 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2091 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2092
2093 /* Copy out the message frame. */
2094 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2095 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2096 BUS_SPACE_BARRIER_WRITE);
2097
2098 /* Post the MFA back to the IOP. */
2099 iop_outl(sc, IOP_REG_IFIFO, mfa);
2100
2101 splx(s);
2102 return (0);
2103 }
2104
2105 /*
2106 * Post a message to the IOP and deal with completion.
2107 */
2108 int
2109 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2110 {
2111 u_int32_t *mb;
2112 int rv, s;
2113
2114 mb = xmb;
2115
2116 /* Terminate the scatter/gather list chain. */
2117 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2118 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2119
2120 if ((rv = iop_post(sc, mb)) != 0)
2121 return (rv);
2122
2123 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2124 if ((im->im_flags & IM_POLL) != 0)
2125 iop_msg_poll(sc, im, timo);
2126 else
2127 iop_msg_wait(sc, im, timo);
2128
2129 s = splbio();
2130 if ((im->im_flags & IM_REPLIED) != 0) {
2131 if ((im->im_flags & IM_NOSTATUS) != 0)
2132 rv = 0;
2133 else if ((im->im_flags & IM_FAIL) != 0)
2134 rv = ENXIO;
2135 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2136 rv = EIO;
2137 else
2138 rv = 0;
2139 } else
2140 rv = EBUSY;
2141 splx(s);
2142 } else
2143 rv = 0;
2144
2145 return (rv);
2146 }
2147
2148 /*
2149 * Spin until the specified message is replied to.
2150 */
2151 static void
2152 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2153 {
2154 u_int32_t rmfa;
2155 int s, status;
2156
2157 s = splbio();
2158
2159 /* Wait for completion. */
2160 for (timo *= 10; timo != 0; timo--) {
2161 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2162 /* Double read to account for IOP bug. */
2163 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2164 if (rmfa == IOP_MFA_EMPTY)
2165 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2166 if (rmfa != IOP_MFA_EMPTY) {
2167 status = iop_handle_reply(sc, rmfa);
2168
2169 /*
2170 * Return the reply frame to the IOP's
2171 * outbound FIFO.
2172 */
2173 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2174 }
2175 }
2176 if ((im->im_flags & IM_REPLIED) != 0)
2177 break;
2178 DELAY(100);
2179 }
2180
2181 if (timo == 0) {
2182 #ifdef I2ODEBUG
2183 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2184 if (iop_status_get(sc, 1) != 0)
2185 printf("iop_msg_poll: unable to retrieve status\n");
2186 else
2187 printf("iop_msg_poll: IOP state = %d\n",
2188 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2189 #endif
2190 }
2191
2192 splx(s);
2193 }
2194
2195 /*
2196 * Sleep until the specified message is replied to.
2197 */
2198 static void
2199 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2200 {
2201 int s, rv;
2202
2203 s = splbio();
2204 if ((im->im_flags & IM_REPLIED) != 0) {
2205 splx(s);
2206 return;
2207 }
2208 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
2209 splx(s);
2210
2211 #ifdef I2ODEBUG
2212 if (rv != 0) {
2213 printf("iop_msg_wait: tsleep() == %d\n", rv);
2214 if (iop_status_get(sc, 0) != 0)
2215 printf("iop_msg_wait: unable to retrieve status\n");
2216 else
2217 printf("iop_msg_wait: IOP state = %d\n",
2218 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2219 }
2220 #endif
2221 }
2222
2223 /*
2224 * Release an unused message frame back to the IOP's inbound fifo.
2225 */
2226 static void
2227 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2228 {
2229
2230 /* Use the frame to issue a no-op. */
2231 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2232 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2233 iop_outl(sc, mfa + 8, 0);
2234 iop_outl(sc, mfa + 12, 0);
2235
2236 iop_outl(sc, IOP_REG_IFIFO, mfa);
2237 }
2238
2239 #ifdef I2ODEBUG
2240 /*
2241 * Dump a reply frame header.
2242 */
2243 static void
2244 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2245 {
2246 u_int function, detail;
2247 #ifdef I2OVERBOSE
2248 const char *statusstr;
2249 #endif
2250
2251 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2252 detail = le16toh(rb->detail);
2253
2254 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2255
2256 #ifdef I2OVERBOSE
2257 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2258 statusstr = iop_status[rb->reqstatus];
2259 else
2260 statusstr = "undefined error code";
2261
2262 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2263 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2264 #else
2265 printf("%s: function=0x%02x status=0x%02x\n",
2266 sc->sc_dv.dv_xname, function, rb->reqstatus);
2267 #endif
2268 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2269 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2270 le32toh(rb->msgtctx));
2271 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2272 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2273 (le32toh(rb->msgflags) >> 8) & 0xff);
2274 }
2275 #endif
2276
2277 /*
2278 * Dump a transport failure reply.
2279 */
2280 static void
2281 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2282 {
2283
2284 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2285
2286 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2287 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2288 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2289 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2290 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2291 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2292 }
2293
2294 /*
2295 * Translate an I2O ASCII field into a C string.
2296 */
2297 void
2298 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2299 {
2300 int hc, lc, i, nit;
2301
2302 dlen--;
2303 lc = 0;
2304 hc = 0;
2305 i = 0;
2306
2307 /*
2308 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2309 * spec has nothing to say about it. Since AMI fields are usually
2310 * filled with junk after the terminator, ...
2311 */
2312 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2313
2314 while (slen-- != 0 && dlen-- != 0) {
2315 if (nit && *src == '\0')
2316 break;
2317 else if (*src <= 0x20 || *src >= 0x7f) {
2318 if (hc)
2319 dst[i++] = ' ';
2320 } else {
2321 hc = 1;
2322 dst[i++] = *src;
2323 lc = i;
2324 }
2325 src++;
2326 }
2327
2328 dst[lc] = '\0';
2329 }
2330
2331 /*
2332 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2333 */
2334 int
2335 iop_print_ident(struct iop_softc *sc, int tid)
2336 {
2337 struct {
2338 struct i2o_param_op_results pr;
2339 struct i2o_param_read_results prr;
2340 struct i2o_param_device_identity di;
2341 } __attribute__ ((__packed__)) p;
2342 char buf[32];
2343 int rv;
2344
2345 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2346 sizeof(p), NULL);
2347 if (rv != 0)
2348 return (rv);
2349
2350 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2351 sizeof(buf));
2352 printf(" <%s, ", buf);
2353 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2354 sizeof(buf));
2355 printf("%s, ", buf);
2356 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2357 printf("%s>", buf);
2358
2359 return (0);
2360 }
2361
2362 /*
2363 * Claim or unclaim the specified TID.
2364 */
2365 int
2366 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2367 int flags)
2368 {
2369 struct iop_msg *im;
2370 struct i2o_util_claim mf;
2371 int rv, func;
2372
2373 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2374 im = iop_msg_alloc(sc, IM_WAIT);
2375
2376 /* We can use the same structure, as they're identical. */
2377 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2378 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2379 mf.msgictx = ii->ii_ictx;
2380 mf.msgtctx = im->im_tctx;
2381 mf.flags = flags;
2382
2383 rv = iop_msg_post(sc, im, &mf, 5000);
2384 iop_msg_free(sc, im);
2385 return (rv);
2386 }
2387
2388 /*
2389 * Perform an abort.
2390 */
2391 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2392 int tctxabort, int flags)
2393 {
2394 struct iop_msg *im;
2395 struct i2o_util_abort mf;
2396 int rv;
2397
2398 im = iop_msg_alloc(sc, IM_WAIT);
2399
2400 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2401 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2402 mf.msgictx = ii->ii_ictx;
2403 mf.msgtctx = im->im_tctx;
2404 mf.flags = (func << 24) | flags;
2405 mf.tctxabort = tctxabort;
2406
2407 rv = iop_msg_post(sc, im, &mf, 5000);
2408 iop_msg_free(sc, im);
2409 return (rv);
2410 }
2411
2412 /*
2413 * Enable or disable reception of events for the specified device.
2414 */
2415 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2416 {
2417 struct i2o_util_event_register mf;
2418
2419 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2420 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2421 mf.msgictx = ii->ii_ictx;
2422 mf.msgtctx = 0;
2423 mf.eventmask = mask;
2424
2425 /* This message is replied to only when events are signalled. */
2426 return (iop_post(sc, (u_int32_t *)&mf));
2427 }
2428
2429 int
2430 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2431 {
2432 struct iop_softc *sc;
2433
2434 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2435 return (ENXIO);
2436 if ((sc->sc_flags & IOP_ONLINE) == 0)
2437 return (ENXIO);
2438 if ((sc->sc_flags & IOP_OPEN) != 0)
2439 return (EBUSY);
2440 sc->sc_flags |= IOP_OPEN;
2441
2442 return (0);
2443 }
2444
2445 int
2446 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2447 {
2448 struct iop_softc *sc;
2449
2450 sc = device_lookup(&iop_cd, minor(dev));
2451 sc->sc_flags &= ~IOP_OPEN;
2452
2453 return (0);
2454 }
2455
2456 int
2457 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2458 {
2459 struct iop_softc *sc;
2460 struct iovec *iov;
2461 int rv, i;
2462
2463 if (securelevel >= 2)
2464 return (EPERM);
2465
2466 sc = device_lookup(&iop_cd, minor(dev));
2467
2468 switch (cmd) {
2469 case IOPIOCPT:
2470 return (iop_passthrough(sc, (struct ioppt *)data, p));
2471
2472 case IOPIOCGSTATUS:
2473 iov = (struct iovec *)data;
2474 i = sizeof(struct i2o_status);
2475 if (i > iov->iov_len)
2476 i = iov->iov_len;
2477 else
2478 iov->iov_len = i;
2479 if ((rv = iop_status_get(sc, 0)) == 0)
2480 rv = copyout(&sc->sc_status, iov->iov_base, i);
2481 return (rv);
2482
2483 case IOPIOCGLCT:
2484 case IOPIOCGTIDMAP:
2485 case IOPIOCRECONFIG:
2486 break;
2487
2488 default:
2489 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2490 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2491 #endif
2492 return (ENOTTY);
2493 }
2494
2495 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2496 return (rv);
2497
2498 switch (cmd) {
2499 case IOPIOCGLCT:
2500 iov = (struct iovec *)data;
2501 i = le16toh(sc->sc_lct->tablesize) << 2;
2502 if (i > iov->iov_len)
2503 i = iov->iov_len;
2504 else
2505 iov->iov_len = i;
2506 rv = copyout(sc->sc_lct, iov->iov_base, i);
2507 break;
2508
2509 case IOPIOCRECONFIG:
2510 rv = iop_reconfigure(sc, 0);
2511 break;
2512
2513 case IOPIOCGTIDMAP:
2514 iov = (struct iovec *)data;
2515 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2516 if (i > iov->iov_len)
2517 i = iov->iov_len;
2518 else
2519 iov->iov_len = i;
2520 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2521 break;
2522 }
2523
2524 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2525 return (rv);
2526 }
2527
2528 static int
2529 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2530 {
2531 struct iop_msg *im;
2532 struct i2o_msg *mf;
2533 struct ioppt_buf *ptb;
2534 int rv, i, mapped;
2535
2536 mf = NULL;
2537 im = NULL;
2538 mapped = 1;
2539
2540 if (pt->pt_msglen > sc->sc_framesize ||
2541 pt->pt_msglen < sizeof(struct i2o_msg) ||
2542 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2543 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2544 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2545 return (EINVAL);
2546
2547 for (i = 0; i < pt->pt_nbufs; i++)
2548 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2549 rv = ENOMEM;
2550 goto bad;
2551 }
2552
2553 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2554 if (mf == NULL)
2555 return (ENOMEM);
2556
2557 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2558 goto bad;
2559
2560 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2561 im->im_rb = (struct i2o_reply *)mf;
2562 mf->msgictx = IOP_ICTX;
2563 mf->msgtctx = im->im_tctx;
2564
2565 for (i = 0; i < pt->pt_nbufs; i++) {
2566 ptb = &pt->pt_bufs[i];
2567 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2568 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2569 if (rv != 0)
2570 goto bad;
2571 mapped = 1;
2572 }
2573
2574 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2575 goto bad;
2576
2577 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2578 if (i > sc->sc_framesize)
2579 i = sc->sc_framesize;
2580 if (i > pt->pt_replylen)
2581 i = pt->pt_replylen;
2582 rv = copyout(im->im_rb, pt->pt_reply, i);
2583
2584 bad:
2585 if (mapped != 0)
2586 iop_msg_unmap(sc, im);
2587 if (im != NULL)
2588 iop_msg_free(sc, im);
2589 if (mf != NULL)
2590 free(mf, M_DEVBUF);
2591 return (rv);
2592 }
2593