iop.c revision 1.19.4.2 1 /* $NetBSD: iop.c,v 1.19.4.2 2001/10/25 18:00:39 he Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/conf.h>
56 #include <sys/kthread.h>
57
58 #include <machine/vmparam.h>
59 #include <machine/bus.h>
60
61 #include <vm/vm.h>
62
63 #include <dev/i2o/i2o.h>
64 #include <dev/i2o/iopio.h>
65 #include <dev/i2o/iopreg.h>
66 #include <dev/i2o/iopvar.h>
67
68 #define POLL(ms, cond) \
69 do { \
70 int i; \
71 for (i = (ms) * 10; i; i--) { \
72 if (cond) \
73 break; \
74 DELAY(100); \
75 } \
76 } while (/* CONSTCOND */0);
77
78 #ifdef I2ODEBUG
79 #define DPRINTF(x) printf x
80 #else
81 #define DPRINTF(x)
82 #endif
83
84 #ifdef I2OVERBOSE
85 #define IFVERBOSE(x) x
86 #define COMMENT(x) NULL
87 #else
88 #define IFVERBOSE(x)
89 #define COMMENT(x)
90 #endif
91
92 #define IOP_ICTXHASH_NBUCKETS 16
93 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
94
95 #define IOP_MAX_SEGS (((IOP_MAX_XFER + NBPG - 1) / NBPG) + 1)
96
97 #define IOP_TCTX_SHIFT 12
98 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
99
100 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
101 static u_long iop_ictxhash;
102 static void *iop_sdh;
103 static struct i2o_systab *iop_systab;
104 static int iop_systab_size;
105
106 extern struct cfdriver iop_cd;
107
108 #define IC_CONFIGURE 0x01
109 #define IC_PRIORITY 0x02
110
111 struct iop_class {
112 u_short ic_class;
113 u_short ic_flags;
114 #ifdef I2OVERBOSE
115 const char *ic_caption;
116 #endif
117 } static const iop_class[] = {
118 {
119 I2O_CLASS_EXECUTIVE,
120 0,
121 COMMENT("executive")
122 },
123 {
124 I2O_CLASS_DDM,
125 0,
126 COMMENT("device driver module")
127 },
128 {
129 I2O_CLASS_RANDOM_BLOCK_STORAGE,
130 IC_CONFIGURE | IC_PRIORITY,
131 IFVERBOSE("random block storage")
132 },
133 {
134 I2O_CLASS_SEQUENTIAL_STORAGE,
135 IC_CONFIGURE | IC_PRIORITY,
136 IFVERBOSE("sequential storage")
137 },
138 {
139 I2O_CLASS_LAN,
140 IC_CONFIGURE | IC_PRIORITY,
141 IFVERBOSE("LAN port")
142 },
143 {
144 I2O_CLASS_WAN,
145 IC_CONFIGURE | IC_PRIORITY,
146 IFVERBOSE("WAN port")
147 },
148 {
149 I2O_CLASS_FIBRE_CHANNEL_PORT,
150 IC_CONFIGURE,
151 IFVERBOSE("fibrechannel port")
152 },
153 {
154 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
155 0,
156 COMMENT("fibrechannel peripheral")
157 },
158 {
159 I2O_CLASS_SCSI_PERIPHERAL,
160 0,
161 COMMENT("SCSI peripheral")
162 },
163 {
164 I2O_CLASS_ATE_PORT,
165 IC_CONFIGURE,
166 IFVERBOSE("ATE port")
167 },
168 {
169 I2O_CLASS_ATE_PERIPHERAL,
170 0,
171 COMMENT("ATE peripheral")
172 },
173 {
174 I2O_CLASS_FLOPPY_CONTROLLER,
175 IC_CONFIGURE,
176 IFVERBOSE("floppy controller")
177 },
178 {
179 I2O_CLASS_FLOPPY_DEVICE,
180 0,
181 COMMENT("floppy device")
182 },
183 {
184 I2O_CLASS_BUS_ADAPTER_PORT,
185 IC_CONFIGURE,
186 IFVERBOSE("bus adapter port" )
187 },
188 };
189
190 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
191 static const char * const iop_status[] = {
192 "success",
193 "abort (dirty)",
194 "abort (no data transfer)",
195 "abort (partial transfer)",
196 "error (dirty)",
197 "error (no data transfer)",
198 "error (partial transfer)",
199 "undefined error code",
200 "process abort (dirty)",
201 "process abort (no data transfer)",
202 "process abort (partial transfer)",
203 "transaction error",
204 };
205 #endif
206
207 static inline u_int32_t iop_inl(struct iop_softc *, int);
208 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
209
210 static void iop_config_interrupts(struct device *);
211 static void iop_configure_devices(struct iop_softc *, int, int);
212 static void iop_devinfo(int, char *);
213 static int iop_print(void *, const char *);
214 static int iop_reconfigure(struct iop_softc *, u_int);
215 static void iop_shutdown(void *);
216 static int iop_submatch(struct device *, struct cfdata *, void *);
217 #ifdef notyet
218 static int iop_vendor_print(void *, const char *);
219 #endif
220
221 static void iop_adjqparam(struct iop_softc *, int);
222 static void iop_create_reconf_thread(void *);
223 static int iop_handle_reply(struct iop_softc *, u_int32_t);
224 static int iop_hrt_get(struct iop_softc *);
225 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
226 static void iop_intr_event(struct device *, struct iop_msg *, void *);
227 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
228 u_int32_t);
229 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
230 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
231 static int iop_ofifo_init(struct iop_softc *);
232 static int iop_passthrough(struct iop_softc *, struct ioppt *,
233 struct proc *);
234 static void iop_reconf_thread(void *);
235 static void iop_release_mfa(struct iop_softc *, u_int32_t);
236 static int iop_reset(struct iop_softc *);
237 static int iop_status_get(struct iop_softc *, int);
238 static int iop_systab_set(struct iop_softc *);
239 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
240
241 #ifdef I2ODEBUG
242 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
243 #endif
244
245 cdev_decl(iop);
246
247 static inline u_int32_t
248 iop_inl(struct iop_softc *sc, int off)
249 {
250
251 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
252 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
253 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
254 }
255
256 static inline void
257 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
258 {
259
260 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
261 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
262 BUS_SPACE_BARRIER_WRITE);
263 }
264
265 /*
266 * Initialise the IOP and our interface.
267 */
268 void
269 iop_init(struct iop_softc *sc, const char *intrstr)
270 {
271 struct iop_msg *im;
272 int rv, i, j, state, nsegs;
273 u_int32_t mask;
274 char ident[64];
275
276 state = 0;
277
278 printf("I2O adapter");
279
280 if (iop_ictxhashtbl == NULL)
281 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS,
282 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
283
284 /* Disable interrupts at the IOP. */
285 mask = iop_inl(sc, IOP_REG_INTR_MASK);
286 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
287
288 /* Allocate a scratch DMA map for small miscellaneous shared data. */
289 if (bus_dmamap_create(sc->sc_dmat, NBPG, 1, NBPG, 0,
290 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
291 printf("%s: cannot create scratch dmamap\n",
292 sc->sc_dv.dv_xname);
293 return;
294 }
295 state++;
296
297 if (bus_dmamem_alloc(sc->sc_dmat, NBPG, NBPG, 0,
298 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
299 printf("%s: cannot alloc scratch dmamem\n",
300 sc->sc_dv.dv_xname);
301 goto bail_out;
302 }
303 state++;
304
305 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, NBPG,
306 &sc->sc_scr, 0)) {
307 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
308 goto bail_out;
309 }
310 state++;
311
312 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
313 NBPG, NULL, BUS_DMA_NOWAIT)) {
314 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
315 goto bail_out;
316 }
317 state++;
318
319 /* Reset the adapter and request status. */
320 if ((rv = iop_reset(sc)) != 0) {
321 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
322 goto bail_out;
323 }
324
325 if ((rv = iop_status_get(sc, 1)) != 0) {
326 printf("%s: not responding (get status)\n",
327 sc->sc_dv.dv_xname);
328 goto bail_out;
329 }
330
331 sc->sc_flags |= IOP_HAVESTATUS;
332 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
333 ident, sizeof(ident));
334 printf(" <%s>\n", ident);
335
336 #ifdef I2ODEBUG
337 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
338 le16toh(sc->sc_status.orgid),
339 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
340 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
341 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
342 le32toh(sc->sc_status.desiredprivmemsize),
343 le32toh(sc->sc_status.currentprivmemsize),
344 le32toh(sc->sc_status.currentprivmembase));
345 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
346 le32toh(sc->sc_status.desiredpriviosize),
347 le32toh(sc->sc_status.currentpriviosize),
348 le32toh(sc->sc_status.currentpriviobase));
349 #endif
350
351 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
352 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
353 sc->sc_maxob = IOP_MAX_OUTBOUND;
354 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
355 if (sc->sc_maxib > IOP_MAX_INBOUND)
356 sc->sc_maxib = IOP_MAX_INBOUND;
357
358 /* Allocate message wrappers. */
359 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT);
360 memset(im, 0, sizeof(*im) * sc->sc_maxib);
361 sc->sc_ims = im;
362 SLIST_INIT(&sc->sc_im_freelist);
363
364 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
365 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
366 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
367 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
368 &im->im_xfer[0].ix_map);
369 if (rv != 0) {
370 printf("%s: couldn't create dmamap (%d)",
371 sc->sc_dv.dv_xname, rv);
372 goto bail_out;
373 }
374
375 im->im_tctx = i;
376 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
377 }
378
379 /* Initalise the IOP's outbound FIFO. */
380 if (iop_ofifo_init(sc) != 0) {
381 printf("%s: unable to init oubound FIFO\n",
382 sc->sc_dv.dv_xname);
383 goto bail_out;
384 }
385
386 /*
387 * Defer further configuration until (a) interrupts are working and
388 * (b) we have enough information to build the system table.
389 */
390 config_interrupts((struct device *)sc, iop_config_interrupts);
391
392 /* Configure shutdown hook before we start any device activity. */
393 if (iop_sdh == NULL)
394 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
395
396 /* Ensure interrupts are enabled at the IOP. */
397 mask = iop_inl(sc, IOP_REG_INTR_MASK);
398 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
399
400 if (intrstr != NULL)
401 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
402 intrstr);
403
404 #ifdef I2ODEBUG
405 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
406 sc->sc_dv.dv_xname, sc->sc_maxib,
407 le32toh(sc->sc_status.maxinboundmframes),
408 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
409 #endif
410
411 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
412 return;
413
414 bail_out:
415 if (state > 3) {
416 for (j = 0; j < i; j++)
417 bus_dmamap_destroy(sc->sc_dmat,
418 sc->sc_ims[j].im_xfer[0].ix_map);
419 free(sc->sc_ims, M_DEVBUF);
420 }
421 if (state > 2)
422 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
423 if (state > 1)
424 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, NBPG);
425 if (state > 0)
426 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
427 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
428
429 }
430
431 /*
432 * Perform autoconfiguration tasks.
433 */
434 static void
435 iop_config_interrupts(struct device *self)
436 {
437 struct iop_softc *sc, *iop;
438 struct i2o_systab_entry *ste;
439 int rv, i, niop;
440
441 sc = (struct iop_softc *)self;
442 LIST_INIT(&sc->sc_iilist);
443
444 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
445
446 if (iop_hrt_get(sc) != 0) {
447 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
448 return;
449 }
450
451 /*
452 * Build the system table.
453 */
454 if (iop_systab == NULL) {
455 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
456 if ((iop = device_lookup(&iop_cd, i)) == NULL)
457 continue;
458 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
459 continue;
460 if (iop_status_get(iop, 1) != 0) {
461 printf("%s: unable to retrieve status\n",
462 sc->sc_dv.dv_xname);
463 iop->sc_flags &= ~IOP_HAVESTATUS;
464 continue;
465 }
466 niop++;
467 }
468 if (niop == 0)
469 return;
470
471 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
472 sizeof(struct i2o_systab);
473 iop_systab_size = i;
474 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
475
476 memset(iop_systab, 0, i);
477 iop_systab->numentries = niop;
478 iop_systab->version = I2O_VERSION_11;
479
480 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
481 if ((iop = device_lookup(&iop_cd, i)) == NULL)
482 continue;
483 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
484 continue;
485
486 ste->orgid = iop->sc_status.orgid;
487 ste->iopid = iop->sc_dv.dv_unit + 2;
488 ste->segnumber =
489 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
490 ste->iopcaps = iop->sc_status.iopcaps;
491 ste->inboundmsgframesize =
492 iop->sc_status.inboundmframesize;
493 ste->inboundmsgportaddresslow =
494 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
495 ste++;
496 }
497 }
498
499 /*
500 * Post the system table to the IOP and bring it to the OPERATIONAL
501 * state.
502 */
503 if (iop_systab_set(sc) != 0) {
504 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
505 return;
506 }
507 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
508 30000) != 0) {
509 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
510 return;
511 }
512
513 /*
514 * Set up an event handler for this IOP.
515 */
516 sc->sc_eventii.ii_dv = self;
517 sc->sc_eventii.ii_intr = iop_intr_event;
518 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
519 sc->sc_eventii.ii_tid = I2O_TID_IOP;
520 iop_initiator_register(sc, &sc->sc_eventii);
521
522 rv = iop_util_eventreg(sc, &sc->sc_eventii,
523 I2O_EVENT_EXEC_RESOURCE_LIMITS |
524 I2O_EVENT_EXEC_CONNECTION_FAIL |
525 I2O_EVENT_EXEC_ADAPTER_FAULT |
526 I2O_EVENT_EXEC_POWER_FAIL |
527 I2O_EVENT_EXEC_RESET_PENDING |
528 I2O_EVENT_EXEC_RESET_IMMINENT |
529 I2O_EVENT_EXEC_HARDWARE_FAIL |
530 I2O_EVENT_EXEC_XCT_CHANGE |
531 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
532 I2O_EVENT_GEN_DEVICE_RESET |
533 I2O_EVENT_GEN_STATE_CHANGE |
534 I2O_EVENT_GEN_GENERAL_WARNING);
535 if (rv != 0) {
536 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
537 return;
538 }
539
540 #ifdef notyet
541 /* Attempt to match and attach a product-specific extension. */
542 ia.ia_class = I2O_CLASS_ANY;
543 ia.ia_tid = I2O_TID_IOP;
544 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
545 #endif
546
547 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
548 if ((rv = iop_reconfigure(sc, 0)) == -1) {
549 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
550 return;
551 }
552 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
553
554 kthread_create(iop_create_reconf_thread, sc);
555 }
556
557 /*
558 * Create the reconfiguration thread. Called after the standard kernel
559 * threads have been created.
560 */
561 static void
562 iop_create_reconf_thread(void *cookie)
563 {
564 struct iop_softc *sc;
565 int rv;
566
567 sc = cookie;
568 sc->sc_flags |= IOP_ONLINE;
569
570 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
571 "%s", sc->sc_dv.dv_xname);
572 if (rv != 0) {
573 printf("%s: unable to create reconfiguration thread (%d)",
574 sc->sc_dv.dv_xname, rv);
575 return;
576 }
577 }
578
579 /*
580 * Reconfiguration thread; listens for LCT change notification, and
581 * initiates re-configuration if received.
582 */
583 static void
584 iop_reconf_thread(void *cookie)
585 {
586 struct iop_softc *sc;
587 struct i2o_lct lct;
588 u_int32_t chgind;
589 int rv;
590
591 sc = cookie;
592 chgind = sc->sc_chgind + 1;
593
594 for (;;) {
595 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
596 sc->sc_dv.dv_xname, chgind));
597
598 PHOLD(sc->sc_reconf_proc);
599 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
600 PRELE(sc->sc_reconf_proc);
601
602 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
603 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
604
605 if (rv == 0 &&
606 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
607 iop_reconfigure(sc, le32toh(lct.changeindicator));
608 chgind = sc->sc_chgind + 1;
609 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
610 }
611
612 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
613 }
614 }
615
616 /*
617 * Reconfigure: find new and removed devices.
618 */
619 static int
620 iop_reconfigure(struct iop_softc *sc, u_int chgind)
621 {
622 struct iop_msg *im;
623 struct i2o_hba_bus_scan mf;
624 struct i2o_lct_entry *le;
625 struct iop_initiator *ii, *nextii;
626 int rv, tid, i;
627
628 /*
629 * If the reconfiguration request isn't the result of LCT change
630 * notification, then be more thorough: ask all bus ports to scan
631 * their busses. Wait up to 5 minutes for each bus port to complete
632 * the request.
633 */
634 if (chgind == 0) {
635 if ((rv = iop_lct_get(sc)) != 0) {
636 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
637 return (rv);
638 }
639
640 le = sc->sc_lct->entry;
641 for (i = 0; i < sc->sc_nlctent; i++, le++) {
642 if ((le16toh(le->classid) & 4095) !=
643 I2O_CLASS_BUS_ADAPTER_PORT)
644 continue;
645 tid = le16toh(le->localtid) & 4095;
646
647 im = iop_msg_alloc(sc, IM_WAIT);
648
649 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
650 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
651 mf.msgictx = IOP_ICTX;
652 mf.msgtctx = im->im_tctx;
653
654 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
655 tid));
656
657 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
658 iop_msg_free(sc, im);
659 #ifdef I2ODEBUG
660 if (rv != 0)
661 printf("%s: bus scan failed\n",
662 sc->sc_dv.dv_xname);
663 #endif
664 }
665 } else if (chgind <= sc->sc_chgind) {
666 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
667 return (0);
668 }
669
670 /* Re-read the LCT and determine if it has changed. */
671 if ((rv = iop_lct_get(sc)) != 0) {
672 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
673 return (rv);
674 }
675 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
676
677 chgind = le32toh(sc->sc_lct->changeindicator);
678 if (chgind == sc->sc_chgind) {
679 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
680 return (0);
681 }
682 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
683 sc->sc_chgind = chgind;
684
685 if (sc->sc_tidmap != NULL)
686 free(sc->sc_tidmap, M_DEVBUF);
687 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
688 M_DEVBUF, M_NOWAIT);
689 memset(sc->sc_tidmap, 0, sizeof(sc->sc_tidmap));
690
691 /* Allow 1 queued command per device while we're configuring. */
692 iop_adjqparam(sc, 1);
693
694 /*
695 * Match and attach child devices. We configure high-level devices
696 * first so that any claims will propagate throughout the LCT,
697 * hopefully masking off aliased devices as a result.
698 *
699 * Re-reading the LCT at this point is a little dangerous, but we'll
700 * trust the IOP (and the operator) to behave itself...
701 */
702 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
703 IC_CONFIGURE | IC_PRIORITY);
704 if ((rv = iop_lct_get(sc)) != 0)
705 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
706 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
707 IC_CONFIGURE);
708
709 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
710 nextii = LIST_NEXT(ii, ii_list);
711
712 /* Detach devices that were configured, but are now gone. */
713 for (i = 0; i < sc->sc_nlctent; i++)
714 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
715 break;
716 if (i == sc->sc_nlctent ||
717 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
718 config_detach(ii->ii_dv, DETACH_FORCE);
719
720 /*
721 * Tell initiators that existed before the re-configuration
722 * to re-configure.
723 */
724 if (ii->ii_reconfig == NULL)
725 continue;
726 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
727 printf("%s: %s failed reconfigure (%d)\n",
728 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
729 }
730
731 /* Re-adjust queue parameters and return. */
732 if (sc->sc_nii != 0)
733 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
734 / sc->sc_nii);
735
736 return (0);
737 }
738
739 /*
740 * Configure I2O devices into the system.
741 */
742 static void
743 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
744 {
745 struct iop_attach_args ia;
746 struct iop_initiator *ii;
747 const struct i2o_lct_entry *le;
748 struct device *dv;
749 int i, j, nent;
750 u_int usertid;
751
752 nent = sc->sc_nlctent;
753 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
754 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
755
756 /* Ignore the device if it's in use. */
757 usertid = le32toh(le->usertid) & 4095;
758 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
759 continue;
760
761 ia.ia_class = le16toh(le->classid) & 4095;
762 ia.ia_tid = sc->sc_tidmap[i].it_tid;
763
764 /* Ignore uninteresting devices. */
765 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
766 if (iop_class[j].ic_class == ia.ia_class)
767 break;
768 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
769 (iop_class[j].ic_flags & mask) != maskval)
770 continue;
771
772 /*
773 * Try to configure the device only if it's not already
774 * configured.
775 */
776 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
777 if (ia.ia_tid == ii->ii_tid) {
778 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
779 strcpy(sc->sc_tidmap[i].it_dvname,
780 ii->ii_dv->dv_xname);
781 break;
782 }
783 }
784 if (ii != NULL)
785 continue;
786
787 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
788 if (dv != NULL) {
789 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
790 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
791 }
792 }
793 }
794
795 /*
796 * Adjust queue parameters for all child devices.
797 */
798 static void
799 iop_adjqparam(struct iop_softc *sc, int mpi)
800 {
801 struct iop_initiator *ii;
802
803 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
804 if (ii->ii_adjqparam != NULL)
805 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
806 }
807
808 static void
809 iop_devinfo(int class, char *devinfo)
810 {
811 #ifdef I2OVERBOSE
812 int i;
813
814 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
815 if (class == iop_class[i].ic_class)
816 break;
817
818 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
819 sprintf(devinfo, "device (class 0x%x)", class);
820 else
821 strcpy(devinfo, iop_class[i].ic_caption);
822 #else
823
824 sprintf(devinfo, "device (class 0x%x)", class);
825 #endif
826 }
827
828 static int
829 iop_print(void *aux, const char *pnp)
830 {
831 struct iop_attach_args *ia;
832 char devinfo[256];
833
834 ia = aux;
835
836 if (pnp != NULL) {
837 iop_devinfo(ia->ia_class, devinfo);
838 printf("%s at %s", devinfo, pnp);
839 }
840 printf(" tid %d", ia->ia_tid);
841 return (UNCONF);
842 }
843
844 #ifdef notyet
845 static int
846 iop_vendor_print(void *aux, const char *pnp)
847 {
848
849 if (pnp != NULL)
850 printf("vendor specific extension at %s", pnp);
851 return (UNCONF);
852 }
853 #endif
854
855 static int
856 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
857 {
858 struct iop_attach_args *ia;
859
860 ia = aux;
861
862 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
863 return (0);
864
865 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
866 }
867
868 /*
869 * Shut down all configured IOPs.
870 */
871 static void
872 iop_shutdown(void *junk)
873 {
874 struct iop_softc *sc;
875 int i;
876
877 printf("shutting down iop devices...");
878
879 for (i = 0; i < iop_cd.cd_ndevs; i++) {
880 if ((sc = device_lookup(&iop_cd, i)) == NULL)
881 continue;
882 if ((sc->sc_flags & IOP_ONLINE) == 0)
883 continue;
884 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
885 0, 5000);
886 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
887 0, 1000);
888 }
889
890 /* Wait. Some boards could still be flushing, stupidly enough. */
891 delay(5000*1000);
892 printf(" done.\n");
893 }
894
895 /*
896 * Retrieve IOP status.
897 */
898 static int
899 iop_status_get(struct iop_softc *sc, int nosleep)
900 {
901 struct i2o_exec_status_get mf;
902 struct i2o_status *st;
903 paddr_t pa;
904 int rv, i;
905
906 pa = sc->sc_scr_seg->ds_addr;
907 st = (struct i2o_status *)sc->sc_scr;
908
909 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
910 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
911 mf.reserved[0] = 0;
912 mf.reserved[1] = 0;
913 mf.reserved[2] = 0;
914 mf.reserved[3] = 0;
915 mf.addrlow = (u_int32_t)pa;
916 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
917 mf.length = sizeof(sc->sc_status);
918
919 memset(st, 0, sizeof(*st));
920 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
921 BUS_DMASYNC_PREREAD);
922
923 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
924 return (rv);
925
926 for (i = 25; i != 0; i--) {
927 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
928 sizeof(*st), BUS_DMASYNC_POSTREAD);
929 if (st->syncbyte == 0xff)
930 break;
931 if (nosleep)
932 DELAY(100*1000);
933 else
934 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
935 }
936
937 if (st->syncbyte != 0xff)
938 rv = EIO;
939 else {
940 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
941 rv = 0;
942 }
943
944 return (rv);
945 }
946
947 /*
948 * Initalize and populate the IOP's outbound FIFO.
949 */
950 static int
951 iop_ofifo_init(struct iop_softc *sc)
952 {
953 bus_addr_t addr;
954 bus_dma_segment_t seg;
955 struct i2o_exec_outbound_init *mf;
956 int i, rseg, rv;
957 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
958
959 sw = (u_int32_t *)sc->sc_scr;
960
961 mf = (struct i2o_exec_outbound_init *)mb;
962 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
963 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
964 mf->msgictx = IOP_ICTX;
965 mf->msgtctx = 0;
966 mf->pagesize = NBPG;
967 mf->flags = IOP_INIT_CODE | ((IOP_MAX_MSG_SIZE >> 2) << 16);
968
969 /*
970 * The I2O spec says that there are two SGLs: one for the status
971 * word, and one for a list of discarded MFAs. It continues to say
972 * that if you don't want to get the list of MFAs, an IGNORE SGL is
973 * necessary; this isn't the case (and is in fact a bad thing).
974 */
975 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
976 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
977 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
978 (u_int32_t)sc->sc_scr_seg->ds_addr;
979 mb[0] += 2 << 16;
980
981 *sw = 0;
982 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
983 BUS_DMASYNC_PREREAD);
984
985 if ((rv = iop_post(sc, mb)) != 0)
986 return (rv);
987
988 POLL(5000,
989 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
990 BUS_DMASYNC_POSTREAD),
991 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
992
993 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
994 printf("%s: outbound FIFO init failed (%d)\n",
995 sc->sc_dv.dv_xname, le32toh(*sw));
996 return (EIO);
997 }
998
999 /* Allocate DMA safe memory for the reply frames. */
1000 if (sc->sc_rep_phys == 0) {
1001 sc->sc_rep_size = sc->sc_maxob * IOP_MAX_MSG_SIZE;
1002
1003 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, NBPG,
1004 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1005 if (rv != 0) {
1006 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1007 rv);
1008 return (rv);
1009 }
1010
1011 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1012 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1013 if (rv != 0) {
1014 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1015 return (rv);
1016 }
1017
1018 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1019 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1020 if (rv != 0) {
1021 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1022 rv);
1023 return (rv);
1024 }
1025
1026 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1027 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1028 if (rv != 0) {
1029 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1030 return (rv);
1031 }
1032
1033 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1034 }
1035
1036 /* Populate the outbound FIFO. */
1037 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1038 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1039 addr += IOP_MAX_MSG_SIZE;
1040 }
1041
1042 return (0);
1043 }
1044
1045 /*
1046 * Read the specified number of bytes from the IOP's hardware resource table.
1047 */
1048 static int
1049 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1050 {
1051 struct iop_msg *im;
1052 int rv;
1053 struct i2o_exec_hrt_get *mf;
1054 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1055
1056 im = iop_msg_alloc(sc, IM_WAIT);
1057 mf = (struct i2o_exec_hrt_get *)mb;
1058 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1059 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1060 mf->msgictx = IOP_ICTX;
1061 mf->msgtctx = im->im_tctx;
1062
1063 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1064 rv = iop_msg_post(sc, im, mb, 30000);
1065 iop_msg_unmap(sc, im);
1066 iop_msg_free(sc, im);
1067 return (rv);
1068 }
1069
1070 /*
1071 * Read the IOP's hardware resource table.
1072 */
1073 static int
1074 iop_hrt_get(struct iop_softc *sc)
1075 {
1076 struct i2o_hrt hrthdr, *hrt;
1077 int size, rv;
1078
1079 PHOLD(curproc);
1080 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1081 PRELE(curproc);
1082 if (rv != 0)
1083 return (rv);
1084
1085 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1086 le16toh(hrthdr.numentries)));
1087
1088 size = sizeof(struct i2o_hrt) +
1089 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1090 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1091
1092 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1093 free(hrt, M_DEVBUF);
1094 return (rv);
1095 }
1096
1097 if (sc->sc_hrt != NULL)
1098 free(sc->sc_hrt, M_DEVBUF);
1099 sc->sc_hrt = hrt;
1100 return (0);
1101 }
1102
1103 /*
1104 * Request the specified number of bytes from the IOP's logical
1105 * configuration table. If a change indicator is specified, this
1106 * is a verbatim notification request, so the caller is prepared
1107 * to wait indefinitely.
1108 */
1109 static int
1110 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1111 u_int32_t chgind)
1112 {
1113 struct iop_msg *im;
1114 struct i2o_exec_lct_notify *mf;
1115 int rv;
1116 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1117
1118 im = iop_msg_alloc(sc, IM_WAIT);
1119 memset(lct, 0, size);
1120
1121 mf = (struct i2o_exec_lct_notify *)mb;
1122 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1123 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1124 mf->msgictx = IOP_ICTX;
1125 mf->msgtctx = im->im_tctx;
1126 mf->classid = I2O_CLASS_ANY;
1127 mf->changeindicator = chgind;
1128
1129 #ifdef I2ODEBUG
1130 printf("iop_lct_get0: reading LCT");
1131 if (chgind != 0)
1132 printf(" (async)");
1133 printf("\n");
1134 #endif
1135
1136 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1137 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1138 iop_msg_unmap(sc, im);
1139 iop_msg_free(sc, im);
1140 return (rv);
1141 }
1142
1143 /*
1144 * Read the IOP's logical configuration table.
1145 */
1146 int
1147 iop_lct_get(struct iop_softc *sc)
1148 {
1149 int esize, size, rv;
1150 struct i2o_lct *lct;
1151
1152 esize = le32toh(sc->sc_status.expectedlctsize);
1153 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1154 if (lct == NULL)
1155 return (ENOMEM);
1156
1157 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1158 free(lct, M_DEVBUF);
1159 return (rv);
1160 }
1161
1162 size = le16toh(lct->tablesize) << 2;
1163 if (esize != size) {
1164 free(lct, M_DEVBUF);
1165 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1166 if (lct == NULL)
1167 return (ENOMEM);
1168
1169 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1170 free(lct, M_DEVBUF);
1171 return (rv);
1172 }
1173 }
1174
1175 /* Swap in the new LCT. */
1176 if (sc->sc_lct != NULL)
1177 free(sc->sc_lct, M_DEVBUF);
1178 sc->sc_lct = lct;
1179 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1180 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1181 sizeof(struct i2o_lct_entry);
1182 return (0);
1183 }
1184
1185 /*
1186 * Request the specified parameter group from the target. If an initiator
1187 * is specified (a) don't wait for the operation to complete, but instead
1188 * let the initiator's interrupt handler deal with the reply and (b) place a
1189 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1190 */
1191 int
1192 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1193 int size, struct iop_initiator *ii)
1194 {
1195 struct iop_msg *im;
1196 struct i2o_util_params_op *mf;
1197 struct i2o_reply *rf;
1198 int rv;
1199 struct iop_pgop *pgop;
1200 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1201
1202 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1203 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1204 iop_msg_free(sc, im);
1205 return (ENOMEM);
1206 }
1207 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1208 iop_msg_free(sc, im);
1209 free(pgop, M_DEVBUF);
1210 return (ENOMEM);
1211 }
1212 im->im_dvcontext = pgop;
1213 im->im_rb = rf;
1214
1215 mf = (struct i2o_util_params_op *)mb;
1216 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1217 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1218 mf->msgictx = IOP_ICTX;
1219 mf->msgtctx = im->im_tctx;
1220 mf->flags = 0;
1221
1222 pgop->olh.count = htole16(1);
1223 pgop->olh.reserved = htole16(0);
1224 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1225 pgop->oat.fieldcount = htole16(0xffff);
1226 pgop->oat.group = htole16(group);
1227
1228 if (ii == NULL)
1229 PHOLD(curproc);
1230
1231 memset(buf, 0, size);
1232 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1233 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1234 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1235
1236 if (ii == NULL)
1237 PRELE(curproc);
1238
1239 /* Detect errors; let partial transfers to count as success. */
1240 if (ii == NULL && rv == 0) {
1241 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1242 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1243 rv = 0;
1244 else
1245 rv = (rf->reqstatus != 0 ? EIO : 0);
1246
1247 if (rv != 0)
1248 printf("%s: FIELD_GET failed for tid %d group %d\n",
1249 sc->sc_dv.dv_xname, tid, group);
1250 }
1251
1252 if (ii == NULL || rv != 0) {
1253 iop_msg_unmap(sc, im);
1254 iop_msg_free(sc, im);
1255 free(pgop, M_DEVBUF);
1256 free(rf, M_DEVBUF);
1257 }
1258
1259 return (rv);
1260 }
1261
1262 /*
1263 * Set a single field in a scalar parameter group.
1264 */
1265 int
1266 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1267 int size, int field)
1268 {
1269 struct iop_msg *im;
1270 struct i2o_util_params_op *mf;
1271 struct iop_pgop *pgop;
1272 int rv, totsize;
1273 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1274
1275 totsize = sizeof(*pgop) + size;
1276
1277 im = iop_msg_alloc(sc, IM_WAIT);
1278 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1279 iop_msg_free(sc, im);
1280 return (ENOMEM);
1281 }
1282
1283 mf = (struct i2o_util_params_op *)mb;
1284 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1285 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1286 mf->msgictx = IOP_ICTX;
1287 mf->msgtctx = im->im_tctx;
1288 mf->flags = 0;
1289
1290 pgop->olh.count = htole16(1);
1291 pgop->olh.reserved = htole16(0);
1292 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1293 pgop->oat.fieldcount = htole16(1);
1294 pgop->oat.group = htole16(group);
1295 pgop->oat.fields[0] = htole16(field);
1296 memcpy(pgop + 1, buf, size);
1297
1298 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1299 rv = iop_msg_post(sc, im, mb, 30000);
1300 if (rv != 0)
1301 printf("%s: FIELD_SET failed for tid %d group %d\n",
1302 sc->sc_dv.dv_xname, tid, group);
1303
1304 iop_msg_unmap(sc, im);
1305 iop_msg_free(sc, im);
1306 free(pgop, M_DEVBUF);
1307 return (rv);
1308 }
1309
1310 /*
1311 * Delete all rows in a tablular parameter group.
1312 */
1313 int
1314 iop_table_clear(struct iop_softc *sc, int tid, int group)
1315 {
1316 struct iop_msg *im;
1317 struct i2o_util_params_op *mf;
1318 struct iop_pgop pgop;
1319 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1320 int rv;
1321
1322 im = iop_msg_alloc(sc, IM_WAIT);
1323
1324 mf = (struct i2o_util_params_op *)mb;
1325 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1326 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1327 mf->msgictx = IOP_ICTX;
1328 mf->msgtctx = im->im_tctx;
1329 mf->flags = 0;
1330
1331 pgop.olh.count = htole16(1);
1332 pgop.olh.reserved = htole16(0);
1333 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1334 pgop.oat.fieldcount = htole16(0);
1335 pgop.oat.group = htole16(group);
1336 pgop.oat.fields[0] = htole16(0);
1337
1338 PHOLD(curproc);
1339 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1340 rv = iop_msg_post(sc, im, mb, 30000);
1341 if (rv != 0)
1342 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1343 sc->sc_dv.dv_xname, tid, group);
1344
1345 iop_msg_unmap(sc, im);
1346 PRELE(curproc);
1347 iop_msg_free(sc, im);
1348 return (rv);
1349 }
1350
1351 /*
1352 * Add a single row to a tabular parameter group. The row can have only one
1353 * field.
1354 */
1355 int
1356 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1357 int size, int row)
1358 {
1359 struct iop_msg *im;
1360 struct i2o_util_params_op *mf;
1361 struct iop_pgop *pgop;
1362 int rv, totsize;
1363 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1364
1365 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1366
1367 im = iop_msg_alloc(sc, IM_WAIT);
1368 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1369 iop_msg_free(sc, im);
1370 return (ENOMEM);
1371 }
1372
1373 mf = (struct i2o_util_params_op *)mb;
1374 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1375 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1376 mf->msgictx = IOP_ICTX;
1377 mf->msgtctx = im->im_tctx;
1378 mf->flags = 0;
1379
1380 pgop->olh.count = htole16(1);
1381 pgop->olh.reserved = htole16(0);
1382 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1383 pgop->oat.fieldcount = htole16(1);
1384 pgop->oat.group = htole16(group);
1385 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1386 pgop->oat.fields[1] = htole16(1); /* RowCount */
1387 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1388 memcpy(&pgop->oat.fields[3], buf, size);
1389
1390 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1391 rv = iop_msg_post(sc, im, mb, 30000);
1392 if (rv != 0)
1393 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1394 sc->sc_dv.dv_xname, tid, group, row);
1395
1396 iop_msg_unmap(sc, im);
1397 iop_msg_free(sc, im);
1398 free(pgop, M_DEVBUF);
1399 return (rv);
1400 }
1401
1402 /*
1403 * Execute a simple command (no parameters).
1404 */
1405 int
1406 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1407 int async, int timo)
1408 {
1409 struct iop_msg *im;
1410 struct i2o_msg mf;
1411 int rv, fl;
1412
1413 fl = (async != 0 ? IM_WAIT : IM_POLL);
1414 im = iop_msg_alloc(sc, fl);
1415
1416 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1417 mf.msgfunc = I2O_MSGFUNC(tid, function);
1418 mf.msgictx = ictx;
1419 mf.msgtctx = im->im_tctx;
1420
1421 rv = iop_msg_post(sc, im, &mf, timo);
1422 iop_msg_free(sc, im);
1423 return (rv);
1424 }
1425
1426 /*
1427 * Post the system table to the IOP.
1428 */
1429 static int
1430 iop_systab_set(struct iop_softc *sc)
1431 {
1432 struct i2o_exec_sys_tab_set *mf;
1433 struct iop_msg *im;
1434 bus_space_handle_t bsh;
1435 bus_addr_t boo;
1436 u_int32_t mema[2], ioa[2];
1437 int rv;
1438 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1439
1440 im = iop_msg_alloc(sc, IM_WAIT);
1441
1442 mf = (struct i2o_exec_sys_tab_set *)mb;
1443 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1444 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1445 mf->msgictx = IOP_ICTX;
1446 mf->msgtctx = im->im_tctx;
1447 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1448 mf->segnumber = 0;
1449
1450 mema[1] = sc->sc_status.desiredprivmemsize;
1451 ioa[1] = sc->sc_status.desiredpriviosize;
1452
1453 if (mema[1] != 0) {
1454 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1455 le32toh(mema[1]), NBPG, 0, 0, &boo, &bsh);
1456 mema[0] = htole32(boo);
1457 if (rv != 0) {
1458 printf("%s: can't alloc priv mem space, err = %d\n",
1459 sc->sc_dv.dv_xname, rv);
1460 mema[0] = 0;
1461 mema[1] = 0;
1462 }
1463 }
1464
1465 if (ioa[1] != 0) {
1466 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1467 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1468 ioa[0] = htole32(boo);
1469 if (rv != 0) {
1470 printf("%s: can't alloc priv i/o space, err = %d\n",
1471 sc->sc_dv.dv_xname, rv);
1472 ioa[0] = 0;
1473 ioa[1] = 0;
1474 }
1475 }
1476
1477 PHOLD(curproc);
1478 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1479 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1480 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1481 rv = iop_msg_post(sc, im, mb, 5000);
1482 iop_msg_unmap(sc, im);
1483 iop_msg_free(sc, im);
1484 PRELE(curproc);
1485 return (rv);
1486 }
1487
1488 /*
1489 * Reset the IOP. Must be called with interrupts disabled.
1490 */
1491 static int
1492 iop_reset(struct iop_softc *sc)
1493 {
1494 u_int32_t mfa, *sw;
1495 struct i2o_exec_iop_reset mf;
1496 int rv;
1497 paddr_t pa;
1498
1499 sw = (u_int32_t *)sc->sc_scr;
1500 pa = sc->sc_scr_seg->ds_addr;
1501
1502 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1503 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1504 mf.reserved[0] = 0;
1505 mf.reserved[1] = 0;
1506 mf.reserved[2] = 0;
1507 mf.reserved[3] = 0;
1508 mf.statuslow = (u_int32_t)pa;
1509 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1510
1511 *sw = htole32(0);
1512 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1513 BUS_DMASYNC_PREREAD);
1514
1515 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1516 return (rv);
1517
1518 POLL(2500,
1519 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1520 BUS_DMASYNC_POSTREAD), *sw != 0));
1521 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1522 printf("%s: reset rejected, status 0x%x\n",
1523 sc->sc_dv.dv_xname, le32toh(*sw));
1524 return (EIO);
1525 }
1526
1527 /*
1528 * IOP is now in the INIT state. Wait no more than 10 seconds for
1529 * the inbound queue to become responsive.
1530 */
1531 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1532 if (mfa == IOP_MFA_EMPTY) {
1533 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1534 return (EIO);
1535 }
1536
1537 iop_release_mfa(sc, mfa);
1538 return (0);
1539 }
1540
1541 /*
1542 * Register a new initiator. Must be called with the configuration lock
1543 * held.
1544 */
1545 void
1546 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1547 {
1548 static int ictxgen;
1549 int s;
1550
1551 /* 0 is reserved (by us) for system messages. */
1552 ii->ii_ictx = ++ictxgen;
1553
1554 /*
1555 * `Utility initiators' don't make it onto the per-IOP initiator list
1556 * (which is used only for configuration), but do get one slot on
1557 * the inbound queue.
1558 */
1559 if ((ii->ii_flags & II_UTILITY) == 0) {
1560 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1561 sc->sc_nii++;
1562 } else
1563 sc->sc_nuii++;
1564
1565 s = splbio();
1566 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1567 splx(s);
1568 }
1569
1570 /*
1571 * Unregister an initiator. Must be called with the configuration lock
1572 * held.
1573 */
1574 void
1575 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1576 {
1577 int s;
1578
1579 if ((ii->ii_flags & II_UTILITY) == 0) {
1580 LIST_REMOVE(ii, ii_list);
1581 sc->sc_nii--;
1582 } else
1583 sc->sc_nuii--;
1584
1585 s = splbio();
1586 LIST_REMOVE(ii, ii_hash);
1587 splx(s);
1588 }
1589
1590 /*
1591 * Handle a reply frame from the IOP.
1592 */
1593 static int
1594 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1595 {
1596 struct iop_msg *im;
1597 struct i2o_reply *rb;
1598 struct i2o_fault_notify *fn;
1599 struct iop_initiator *ii;
1600 u_int off, ictx, tctx, status, size;
1601
1602 off = (int)(rmfa - sc->sc_rep_phys);
1603 rb = (struct i2o_reply *)(sc->sc_rep + off);
1604
1605 /* Perform reply queue DMA synchronisation. */
1606 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1607 IOP_MAX_MSG_SIZE, BUS_DMASYNC_POSTREAD);
1608 if (--sc->sc_curib != 0)
1609 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1610 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1611
1612 #ifdef I2ODEBUG
1613 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1614 panic("iop_handle_reply: 64-bit reply");
1615 #endif
1616 /*
1617 * Find the initiator.
1618 */
1619 ictx = le32toh(rb->msgictx);
1620 if (ictx == IOP_ICTX)
1621 ii = NULL;
1622 else {
1623 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1624 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1625 if (ii->ii_ictx == ictx)
1626 break;
1627 if (ii == NULL) {
1628 #ifdef I2ODEBUG
1629 iop_reply_print(sc, rb);
1630 #endif
1631 printf("%s: WARNING: bad ictx returned (%x)\n",
1632 sc->sc_dv.dv_xname, ictx);
1633 return (-1);
1634 }
1635 }
1636
1637 /*
1638 * If we received a transport failure notice, we've got to dig the
1639 * transaction context (if any) out of the original message frame,
1640 * and then release the original MFA back to the inbound FIFO.
1641 */
1642 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1643 status = I2O_STATUS_SUCCESS;
1644
1645 fn = (struct i2o_fault_notify *)rb;
1646 tctx = iop_inl(sc, fn->lowmfa + 12);
1647 iop_release_mfa(sc, fn->lowmfa);
1648 iop_tfn_print(sc, fn);
1649 } else {
1650 status = rb->reqstatus;
1651 tctx = le32toh(rb->msgtctx);
1652 }
1653
1654 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1655 /*
1656 * This initiator tracks state using message wrappers.
1657 *
1658 * Find the originating message wrapper, and if requested
1659 * notify the initiator.
1660 */
1661 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1662 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1663 (im->im_flags & IM_ALLOCED) == 0 ||
1664 tctx != im->im_tctx) {
1665 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1666 sc->sc_dv.dv_xname, tctx, im);
1667 if (im != NULL)
1668 printf("%s: flags=0x%08x tctx=0x%08x\n",
1669 sc->sc_dv.dv_xname, im->im_flags,
1670 im->im_tctx);
1671 #ifdef I2ODEBUG
1672 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1673 iop_reply_print(sc, rb);
1674 #endif
1675 return (-1);
1676 }
1677
1678 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1679 im->im_flags |= IM_FAIL;
1680
1681 #ifdef I2ODEBUG
1682 if ((im->im_flags & IM_REPLIED) != 0)
1683 panic("%s: dup reply", sc->sc_dv.dv_xname);
1684 #endif
1685 im->im_flags |= IM_REPLIED;
1686
1687 #ifdef I2ODEBUG
1688 if (status != I2O_STATUS_SUCCESS)
1689 iop_reply_print(sc, rb);
1690 #endif
1691 im->im_reqstatus = status;
1692
1693 /* Copy the reply frame, if requested. */
1694 if (im->im_rb != NULL) {
1695 size = (le32toh(rb->msgflags) >> 14) & ~3;
1696 #ifdef I2ODEBUG
1697 if (size > IOP_MAX_MSG_SIZE)
1698 panic("iop_handle_reply: reply too large");
1699 #endif
1700 memcpy(im->im_rb, rb, size);
1701 }
1702
1703 /* Notify the initiator. */
1704 if ((im->im_flags & IM_WAIT) != 0)
1705 wakeup(im);
1706 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1707 (*ii->ii_intr)(ii->ii_dv, im, rb);
1708 } else {
1709 /*
1710 * This initiator discards message wrappers.
1711 *
1712 * Simply pass the reply frame to the initiator.
1713 */
1714 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1715 }
1716
1717 return (status);
1718 }
1719
1720 /*
1721 * Handle an interrupt from the IOP.
1722 */
1723 int
1724 iop_intr(void *arg)
1725 {
1726 struct iop_softc *sc;
1727 u_int32_t rmfa;
1728
1729 sc = arg;
1730
1731 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1732 return (0);
1733
1734 for (;;) {
1735 /* Double read to account for IOP bug. */
1736 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1737 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1738 if (rmfa == IOP_MFA_EMPTY)
1739 break;
1740 }
1741 iop_handle_reply(sc, rmfa);
1742 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1743 }
1744
1745 return (1);
1746 }
1747
1748 /*
1749 * Handle an event signalled by the executive.
1750 */
1751 static void
1752 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1753 {
1754 struct i2o_util_event_register_reply *rb;
1755 struct iop_softc *sc;
1756 u_int event;
1757
1758 sc = (struct iop_softc *)dv;
1759 rb = reply;
1760
1761 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1762 return;
1763
1764 event = le32toh(rb->event);
1765 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1766 }
1767
1768 /*
1769 * Allocate a message wrapper.
1770 */
1771 struct iop_msg *
1772 iop_msg_alloc(struct iop_softc *sc, int flags)
1773 {
1774 struct iop_msg *im;
1775 static u_int tctxgen;
1776 int s, i;
1777
1778 #ifdef I2ODEBUG
1779 if ((flags & IM_SYSMASK) != 0)
1780 panic("iop_msg_alloc: system flags specified");
1781 #endif
1782
1783 s = splbio();
1784 im = SLIST_FIRST(&sc->sc_im_freelist);
1785 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1786 if (im == NULL)
1787 panic("iop_msg_alloc: no free wrappers");
1788 #endif
1789 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1790 splx(s);
1791
1792 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1793 tctxgen += (1 << IOP_TCTX_SHIFT);
1794 im->im_flags = flags | IM_ALLOCED;
1795 im->im_rb = NULL;
1796 i = 0;
1797 do {
1798 im->im_xfer[i++].ix_size = 0;
1799 } while (i < IOP_MAX_MSG_XFERS);
1800
1801 return (im);
1802 }
1803
1804 /*
1805 * Free a message wrapper.
1806 */
1807 void
1808 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1809 {
1810 int s;
1811
1812 #ifdef I2ODEBUG
1813 if ((im->im_flags & IM_ALLOCED) == 0)
1814 panic("iop_msg_free: wrapper not allocated");
1815 #endif
1816
1817 im->im_flags = 0;
1818 s = splbio();
1819 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1820 splx(s);
1821 }
1822
1823 /*
1824 * Map a data transfer. Write a scatter-gather list into the message frame.
1825 */
1826 int
1827 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1828 void *xferaddr, int xfersize, int out, struct proc *up)
1829 {
1830 bus_dmamap_t dm;
1831 bus_dma_segment_t *ds;
1832 struct iop_xfer *ix;
1833 u_int rv, i, nsegs, flg, off, xn;
1834 u_int32_t *p;
1835
1836 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1837 if (ix->ix_size == 0)
1838 break;
1839
1840 #ifdef I2ODEBUG
1841 if (xfersize == 0)
1842 panic("iop_msg_map: null transfer");
1843 if (xfersize > IOP_MAX_XFER)
1844 panic("iop_msg_map: transfer too large");
1845 if (xn == IOP_MAX_MSG_XFERS)
1846 panic("iop_msg_map: too many xfers");
1847 #endif
1848
1849 /*
1850 * Only the first DMA map is static.
1851 */
1852 if (xn != 0) {
1853 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1854 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1855 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1856 if (rv != 0)
1857 return (rv);
1858 }
1859
1860 dm = ix->ix_map;
1861 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1862 (up == NULL ? BUS_DMA_NOWAIT : 0));
1863 if (rv != 0)
1864 goto bad;
1865
1866 /*
1867 * How many SIMPLE SG elements can we fit in this message?
1868 */
1869 off = mb[0] >> 16;
1870 p = mb + off;
1871 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1872
1873 if (dm->dm_nsegs > nsegs) {
1874 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1875 rv = EFBIG;
1876 DPRINTF(("iop_msg_map: too many segs\n"));
1877 goto bad;
1878 }
1879
1880 nsegs = dm->dm_nsegs;
1881 xfersize = 0;
1882
1883 /*
1884 * Write out the SG list.
1885 */
1886 if (out)
1887 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1888 else
1889 flg = I2O_SGL_SIMPLE;
1890
1891 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1892 p[0] = (u_int32_t)ds->ds_len | flg;
1893 p[1] = (u_int32_t)ds->ds_addr;
1894 xfersize += ds->ds_len;
1895 }
1896
1897 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1898 p[1] = (u_int32_t)ds->ds_addr;
1899 xfersize += ds->ds_len;
1900
1901 /* Fix up the transfer record, and sync the map. */
1902 ix->ix_flags = (out ? IX_OUT : IX_IN);
1903 ix->ix_size = xfersize;
1904 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1905 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1906
1907 /*
1908 * If this is the first xfer we've mapped for this message, adjust
1909 * the SGL offset field in the message header.
1910 */
1911 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1912 mb[0] += (mb[0] >> 12) & 0xf0;
1913 im->im_flags |= IM_SGLOFFADJ;
1914 }
1915 mb[0] += (nsegs << 17);
1916 return (0);
1917
1918 bad:
1919 if (xn != 0)
1920 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1921 return (rv);
1922 }
1923
1924 /*
1925 * Map a block I/O data transfer (different in that there's only one per
1926 * message maximum, and PAGE addressing may be used). Write a scatter
1927 * gather list into the message frame.
1928 */
1929 int
1930 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1931 void *xferaddr, int xfersize, int out)
1932 {
1933 bus_dma_segment_t *ds;
1934 bus_dmamap_t dm;
1935 struct iop_xfer *ix;
1936 u_int rv, i, nsegs, off, slen, tlen, flg;
1937 paddr_t saddr, eaddr;
1938 u_int32_t *p;
1939
1940 #ifdef I2ODEBUG
1941 if (xfersize == 0)
1942 panic("iop_msg_map_bio: null transfer");
1943 if (xfersize > IOP_MAX_XFER)
1944 panic("iop_msg_map_bio: transfer too large");
1945 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1946 panic("iop_msg_map_bio: SGLOFFADJ");
1947 #endif
1948
1949 ix = im->im_xfer;
1950 dm = ix->ix_map;
1951 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1952 BUS_DMA_NOWAIT);
1953 if (rv != 0)
1954 return (rv);
1955
1956 off = mb[0] >> 16;
1957 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1958
1959 /*
1960 * If the transfer is highly fragmented and won't fit using SIMPLE
1961 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1962 * potentially more efficient, both for us and the IOP.
1963 */
1964 if (dm->dm_nsegs > nsegs) {
1965 nsegs = 1;
1966 p = mb + off + 1;
1967
1968 /* XXX This should be done with a bus_space flag. */
1969 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1970 slen = ds->ds_len;
1971 saddr = ds->ds_addr;
1972
1973 while (slen > 0) {
1974 eaddr = (saddr + NBPG) & ~(NBPG - 1);
1975 tlen = min(eaddr - saddr, slen);
1976 slen -= tlen;
1977 *p++ = le32toh(saddr);
1978 saddr = eaddr;
1979 nsegs++;
1980 }
1981 }
1982
1983 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
1984 I2O_SGL_END;
1985 if (out)
1986 mb[off] |= I2O_SGL_DATA_OUT;
1987 } else {
1988 p = mb + off;
1989 nsegs = dm->dm_nsegs;
1990
1991 if (out)
1992 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1993 else
1994 flg = I2O_SGL_SIMPLE;
1995
1996 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1997 p[0] = (u_int32_t)ds->ds_len | flg;
1998 p[1] = (u_int32_t)ds->ds_addr;
1999 }
2000
2001 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2002 I2O_SGL_END;
2003 p[1] = (u_int32_t)ds->ds_addr;
2004 nsegs <<= 1;
2005 }
2006
2007 /* Fix up the transfer record, and sync the map. */
2008 ix->ix_flags = (out ? IX_OUT : IX_IN);
2009 ix->ix_size = xfersize;
2010 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2011 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2012
2013 /*
2014 * Adjust the SGL offset and total message size fields. We don't
2015 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2016 */
2017 mb[0] += ((off << 4) + (nsegs << 16));
2018 return (0);
2019 }
2020
2021 /*
2022 * Unmap all data transfers associated with a message wrapper.
2023 */
2024 void
2025 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2026 {
2027 struct iop_xfer *ix;
2028 int i;
2029
2030 #ifdef I2ODEBUG
2031 if (im->im_xfer[0].ix_size == 0)
2032 panic("iop_msg_unmap: no transfers mapped");
2033 #endif
2034
2035 for (ix = im->im_xfer, i = 0;;) {
2036 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2037 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2038 BUS_DMASYNC_POSTREAD);
2039 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2040
2041 /* Only the first DMA map is static. */
2042 if (i != 0)
2043 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2044 if ((++ix)->ix_size == 0)
2045 break;
2046 if (++i >= IOP_MAX_MSG_XFERS)
2047 break;
2048 }
2049 }
2050
2051 /*
2052 * Post a message frame to the IOP's inbound queue.
2053 */
2054 int
2055 iop_post(struct iop_softc *sc, u_int32_t *mb)
2056 {
2057 u_int32_t mfa;
2058 int s;
2059
2060 #ifdef I2ODEBUG
2061 if ((mb[0] >> 16) > IOP_MAX_MSG_SIZE / 4)
2062 panic("iop_post: frame too large");
2063 #endif
2064
2065 s = splbio();
2066
2067 /* Allocate a slot with the IOP. */
2068 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2069 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2070 splx(s);
2071 printf("%s: mfa not forthcoming\n",
2072 sc->sc_dv.dv_xname);
2073 return (EAGAIN);
2074 }
2075
2076 /* Perform reply buffer DMA synchronisation. */
2077 if (sc->sc_curib++ == 0)
2078 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2079 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2080
2081 /* Copy out the message frame. */
2082 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2083 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2084 BUS_SPACE_BARRIER_WRITE);
2085
2086 /* Post the MFA back to the IOP. */
2087 iop_outl(sc, IOP_REG_IFIFO, mfa);
2088
2089 splx(s);
2090 return (0);
2091 }
2092
2093 /*
2094 * Post a message to the IOP and deal with completion.
2095 */
2096 int
2097 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2098 {
2099 u_int32_t *mb;
2100 int rv, s;
2101
2102 mb = xmb;
2103
2104 /* Terminate the scatter/gather list chain. */
2105 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2106 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2107
2108 if ((rv = iop_post(sc, mb)) != 0)
2109 return (rv);
2110
2111 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2112 if ((im->im_flags & IM_POLL) != 0)
2113 iop_msg_poll(sc, im, timo);
2114 else
2115 iop_msg_wait(sc, im, timo);
2116
2117 s = splbio();
2118 if ((im->im_flags & IM_REPLIED) != 0) {
2119 if ((im->im_flags & IM_NOSTATUS) != 0)
2120 rv = 0;
2121 else if ((im->im_flags & IM_FAIL) != 0)
2122 rv = ENXIO;
2123 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2124 rv = EIO;
2125 else
2126 rv = 0;
2127 } else
2128 rv = EBUSY;
2129 splx(s);
2130 } else
2131 rv = 0;
2132
2133 return (rv);
2134 }
2135
2136 /*
2137 * Spin until the specified message is replied to.
2138 */
2139 static void
2140 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2141 {
2142 u_int32_t rmfa;
2143 int s, status;
2144
2145 s = splbio();
2146
2147 /* Wait for completion. */
2148 for (timo *= 10; timo != 0; timo--) {
2149 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2150 /* Double read to account for IOP bug. */
2151 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2152 if (rmfa == IOP_MFA_EMPTY)
2153 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2154 if (rmfa != IOP_MFA_EMPTY) {
2155 status = iop_handle_reply(sc, rmfa);
2156
2157 /*
2158 * Return the reply frame to the IOP's
2159 * outbound FIFO.
2160 */
2161 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2162 }
2163 }
2164 if ((im->im_flags & IM_REPLIED) != 0)
2165 break;
2166 DELAY(100);
2167 }
2168
2169 if (timo == 0) {
2170 #ifdef I2ODEBUG
2171 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2172 if (iop_status_get(sc, 1) != 0)
2173 printf("iop_msg_poll: unable to retrieve status\n");
2174 else
2175 printf("iop_msg_poll: IOP state = %d\n",
2176 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2177 #endif
2178 }
2179
2180 splx(s);
2181 }
2182
2183 /*
2184 * Sleep until the specified message is replied to.
2185 */
2186 static void
2187 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2188 {
2189 int s, rv;
2190
2191 s = splbio();
2192 if ((im->im_flags & IM_REPLIED) != 0) {
2193 splx(s);
2194 return;
2195 }
2196 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
2197 splx(s);
2198
2199 #ifdef I2ODEBUG
2200 if (rv != 0) {
2201 printf("iop_msg_wait: tsleep() == %d\n", rv);
2202 if (iop_status_get(sc, 0) != 0)
2203 printf("iop_msg_wait: unable to retrieve status\n");
2204 else
2205 printf("iop_msg_wait: IOP state = %d\n",
2206 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2207 }
2208 #endif
2209 }
2210
2211 /*
2212 * Release an unused message frame back to the IOP's inbound fifo.
2213 */
2214 static void
2215 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2216 {
2217
2218 /* Use the frame to issue a no-op. */
2219 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2220 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2221 iop_outl(sc, mfa + 8, 0);
2222 iop_outl(sc, mfa + 12, 0);
2223
2224 iop_outl(sc, IOP_REG_IFIFO, mfa);
2225 }
2226
2227 #ifdef I2ODEBUG
2228 /*
2229 * Dump a reply frame header.
2230 */
2231 static void
2232 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2233 {
2234 u_int function, detail;
2235 #ifdef I2OVERBOSE
2236 const char *statusstr;
2237 #endif
2238
2239 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2240 detail = le16toh(rb->detail);
2241
2242 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2243
2244 #ifdef I2OVERBOSE
2245 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2246 statusstr = iop_status[rb->reqstatus];
2247 else
2248 statusstr = "undefined error code";
2249
2250 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2251 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2252 #else
2253 printf("%s: function=0x%02x status=0x%02x\n",
2254 sc->sc_dv.dv_xname, function, rb->reqstatus);
2255 #endif
2256 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2257 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2258 le32toh(rb->msgtctx));
2259 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2260 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2261 (le32toh(rb->msgflags) >> 8) & 0xff);
2262 }
2263 #endif
2264
2265 /*
2266 * Dump a transport failure reply.
2267 */
2268 static void
2269 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2270 {
2271
2272 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2273
2274 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2275 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2276 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2277 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2278 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2279 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2280 }
2281
2282 /*
2283 * Translate an I2O ASCII field into a C string.
2284 */
2285 void
2286 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2287 {
2288 int hc, lc, i, nit;
2289
2290 dlen--;
2291 lc = 0;
2292 hc = 0;
2293 i = 0;
2294
2295 /*
2296 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2297 * spec has nothing to say about it. Since AMI fields are usually
2298 * filled with junk after the terminator, ...
2299 */
2300 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2301
2302 while (slen-- != 0 && dlen-- != 0) {
2303 if (nit && *src == '\0')
2304 break;
2305 else if (*src <= 0x20 || *src >= 0x7f) {
2306 if (hc)
2307 dst[i++] = ' ';
2308 } else {
2309 hc = 1;
2310 dst[i++] = *src;
2311 lc = i;
2312 }
2313 src++;
2314 }
2315
2316 dst[lc] = '\0';
2317 }
2318
2319 /*
2320 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2321 */
2322 int
2323 iop_print_ident(struct iop_softc *sc, int tid)
2324 {
2325 struct {
2326 struct i2o_param_op_results pr;
2327 struct i2o_param_read_results prr;
2328 struct i2o_param_device_identity di;
2329 } __attribute__ ((__packed__)) p;
2330 char buf[32];
2331 int rv;
2332
2333 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2334 sizeof(p), NULL);
2335 if (rv != 0)
2336 return (rv);
2337
2338 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2339 sizeof(buf));
2340 printf(" <%s, ", buf);
2341 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2342 sizeof(buf));
2343 printf("%s, ", buf);
2344 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2345 printf("%s>", buf);
2346
2347 return (0);
2348 }
2349
2350 /*
2351 * Claim or unclaim the specified TID.
2352 */
2353 int
2354 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2355 int flags)
2356 {
2357 struct iop_msg *im;
2358 struct i2o_util_claim mf;
2359 int rv, func;
2360
2361 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2362 im = iop_msg_alloc(sc, IM_WAIT);
2363
2364 /* We can use the same structure, as they're identical. */
2365 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2366 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2367 mf.msgictx = ii->ii_ictx;
2368 mf.msgtctx = im->im_tctx;
2369 mf.flags = flags;
2370
2371 rv = iop_msg_post(sc, im, &mf, 5000);
2372 iop_msg_free(sc, im);
2373 return (rv);
2374 }
2375
2376 /*
2377 * Perform an abort.
2378 */
2379 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2380 int tctxabort, int flags)
2381 {
2382 struct iop_msg *im;
2383 struct i2o_util_abort mf;
2384 int rv;
2385
2386 im = iop_msg_alloc(sc, IM_WAIT);
2387
2388 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2389 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2390 mf.msgictx = ii->ii_ictx;
2391 mf.msgtctx = im->im_tctx;
2392 mf.flags = (func << 24) | flags;
2393 mf.tctxabort = tctxabort;
2394
2395 rv = iop_msg_post(sc, im, &mf, 5000);
2396 iop_msg_free(sc, im);
2397 return (rv);
2398 }
2399
2400 /*
2401 * Enable or disable reception of events for the specified device.
2402 */
2403 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2404 {
2405 struct i2o_util_event_register mf;
2406
2407 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2408 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2409 mf.msgictx = ii->ii_ictx;
2410 mf.msgtctx = 0;
2411 mf.eventmask = mask;
2412
2413 /* This message is replied to only when events are signalled. */
2414 return (iop_post(sc, (u_int32_t *)&mf));
2415 }
2416
2417 int
2418 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2419 {
2420 struct iop_softc *sc;
2421
2422 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2423 return (ENXIO);
2424 if ((sc->sc_flags & IOP_ONLINE) == 0)
2425 return (ENXIO);
2426 if ((sc->sc_flags & IOP_OPEN) != 0)
2427 return (EBUSY);
2428 sc->sc_flags |= IOP_OPEN;
2429
2430 return (0);
2431 }
2432
2433 int
2434 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2435 {
2436 struct iop_softc *sc;
2437
2438 sc = device_lookup(&iop_cd, minor(dev));
2439 sc->sc_flags &= ~IOP_OPEN;
2440
2441 return (0);
2442 }
2443
2444 int
2445 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2446 {
2447 struct iop_softc *sc;
2448 struct iovec *iov;
2449 int rv, i;
2450
2451 if (securelevel >= 2)
2452 return (EPERM);
2453
2454 sc = device_lookup(&iop_cd, minor(dev));
2455
2456 switch (cmd) {
2457 case IOPIOCPT:
2458 return (iop_passthrough(sc, (struct ioppt *)data, p));
2459
2460 case IOPIOCGSTATUS:
2461 iov = (struct iovec *)data;
2462 i = sizeof(struct i2o_status);
2463 if (i > iov->iov_len)
2464 i = iov->iov_len;
2465 else
2466 iov->iov_len = i;
2467 if ((rv = iop_status_get(sc, 0)) == 0)
2468 rv = copyout(&sc->sc_status, iov->iov_base, i);
2469 return (rv);
2470
2471 case IOPIOCGLCT:
2472 case IOPIOCGTIDMAP:
2473 case IOPIOCRECONFIG:
2474 break;
2475
2476 default:
2477 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2478 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2479 #endif
2480 return (ENOTTY);
2481 }
2482
2483 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2484 return (rv);
2485
2486 switch (cmd) {
2487 case IOPIOCGLCT:
2488 iov = (struct iovec *)data;
2489 i = le16toh(sc->sc_lct->tablesize) << 2;
2490 if (i > iov->iov_len)
2491 i = iov->iov_len;
2492 else
2493 iov->iov_len = i;
2494 rv = copyout(sc->sc_lct, iov->iov_base, i);
2495 break;
2496
2497 case IOPIOCRECONFIG:
2498 rv = iop_reconfigure(sc, 0);
2499 break;
2500
2501 case IOPIOCGTIDMAP:
2502 iov = (struct iovec *)data;
2503 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2504 if (i > iov->iov_len)
2505 i = iov->iov_len;
2506 else
2507 iov->iov_len = i;
2508 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2509 break;
2510 }
2511
2512 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2513 return (rv);
2514 }
2515
2516 static int
2517 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2518 {
2519 struct iop_msg *im;
2520 struct i2o_msg *mf;
2521 struct ioppt_buf *ptb;
2522 int rv, i, mapped;
2523
2524 mf = NULL;
2525 im = NULL;
2526 mapped = 1;
2527
2528 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2529 pt->pt_msglen > (le16toh(sc->sc_status.inboundmframesize) << 2) ||
2530 pt->pt_msglen < sizeof(struct i2o_msg) ||
2531 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2532 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2533 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2534 return (EINVAL);
2535
2536 for (i = 0; i < pt->pt_nbufs; i++)
2537 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2538 rv = ENOMEM;
2539 goto bad;
2540 }
2541
2542 mf = malloc(IOP_MAX_MSG_SIZE, M_DEVBUF, M_WAITOK);
2543 if (mf == NULL)
2544 return (ENOMEM);
2545
2546 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2547 goto bad;
2548
2549 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2550 im->im_rb = (struct i2o_reply *)mf;
2551 mf->msgictx = IOP_ICTX;
2552 mf->msgtctx = im->im_tctx;
2553
2554 for (i = 0; i < pt->pt_nbufs; i++) {
2555 ptb = &pt->pt_bufs[i];
2556 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2557 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2558 if (rv != 0)
2559 goto bad;
2560 mapped = 1;
2561 }
2562
2563 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2564 goto bad;
2565
2566 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2567 if (i > IOP_MAX_MSG_SIZE)
2568 i = IOP_MAX_MSG_SIZE;
2569 if (i > pt->pt_replylen)
2570 i = pt->pt_replylen;
2571 rv = copyout(im->im_rb, pt->pt_reply, i);
2572
2573 bad:
2574 if (mapped != 0)
2575 iop_msg_unmap(sc, im);
2576 if (im != NULL)
2577 iop_msg_free(sc, im);
2578 if (mf != NULL)
2579 free(mf, M_DEVBUF);
2580 return (rv);
2581 }
2582