iop.c revision 1.19.4.3 1 /* $NetBSD: iop.c,v 1.19.4.3 2001/12/09 19:11:16 he Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/conf.h>
56 #include <sys/kthread.h>
57
58 #include <machine/vmparam.h>
59 #include <machine/bus.h>
60
61 #include <vm/vm.h>
62
63 #include <dev/i2o/i2o.h>
64 #include <dev/i2o/iopio.h>
65 #include <dev/i2o/iopreg.h>
66 #include <dev/i2o/iopvar.h>
67
68 #define POLL(ms, cond) \
69 do { \
70 int i; \
71 for (i = (ms) * 10; i; i--) { \
72 if (cond) \
73 break; \
74 DELAY(100); \
75 } \
76 } while (/* CONSTCOND */0);
77
78 #ifdef I2ODEBUG
79 #define DPRINTF(x) printf x
80 #else
81 #define DPRINTF(x)
82 #endif
83
84 #ifdef I2OVERBOSE
85 #define IFVERBOSE(x) x
86 #define COMMENT(x) NULL
87 #else
88 #define IFVERBOSE(x)
89 #define COMMENT(x)
90 #endif
91
92 #define IOP_ICTXHASH_NBUCKETS 16
93 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
94
95 #define IOP_MAX_SEGS (((IOP_MAX_XFER + NBPG - 1) / NBPG) + 1)
96
97 #define IOP_TCTX_SHIFT 12
98 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
99
100 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
101 static u_long iop_ictxhash;
102 static void *iop_sdh;
103 static struct i2o_systab *iop_systab;
104 static int iop_systab_size;
105
106 extern struct cfdriver iop_cd;
107
108 #define IC_CONFIGURE 0x01
109 #define IC_PRIORITY 0x02
110
111 struct iop_class {
112 u_short ic_class;
113 u_short ic_flags;
114 #ifdef I2OVERBOSE
115 const char *ic_caption;
116 #endif
117 } static const iop_class[] = {
118 {
119 I2O_CLASS_EXECUTIVE,
120 0,
121 COMMENT("executive")
122 },
123 {
124 I2O_CLASS_DDM,
125 0,
126 COMMENT("device driver module")
127 },
128 {
129 I2O_CLASS_RANDOM_BLOCK_STORAGE,
130 IC_CONFIGURE | IC_PRIORITY,
131 IFVERBOSE("random block storage")
132 },
133 {
134 I2O_CLASS_SEQUENTIAL_STORAGE,
135 IC_CONFIGURE | IC_PRIORITY,
136 IFVERBOSE("sequential storage")
137 },
138 {
139 I2O_CLASS_LAN,
140 IC_CONFIGURE | IC_PRIORITY,
141 IFVERBOSE("LAN port")
142 },
143 {
144 I2O_CLASS_WAN,
145 IC_CONFIGURE | IC_PRIORITY,
146 IFVERBOSE("WAN port")
147 },
148 {
149 I2O_CLASS_FIBRE_CHANNEL_PORT,
150 IC_CONFIGURE,
151 IFVERBOSE("fibrechannel port")
152 },
153 {
154 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
155 0,
156 COMMENT("fibrechannel peripheral")
157 },
158 {
159 I2O_CLASS_SCSI_PERIPHERAL,
160 0,
161 COMMENT("SCSI peripheral")
162 },
163 {
164 I2O_CLASS_ATE_PORT,
165 IC_CONFIGURE,
166 IFVERBOSE("ATE port")
167 },
168 {
169 I2O_CLASS_ATE_PERIPHERAL,
170 0,
171 COMMENT("ATE peripheral")
172 },
173 {
174 I2O_CLASS_FLOPPY_CONTROLLER,
175 IC_CONFIGURE,
176 IFVERBOSE("floppy controller")
177 },
178 {
179 I2O_CLASS_FLOPPY_DEVICE,
180 0,
181 COMMENT("floppy device")
182 },
183 {
184 I2O_CLASS_BUS_ADAPTER_PORT,
185 IC_CONFIGURE,
186 IFVERBOSE("bus adapter port" )
187 },
188 };
189
190 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
191 static const char * const iop_status[] = {
192 "success",
193 "abort (dirty)",
194 "abort (no data transfer)",
195 "abort (partial transfer)",
196 "error (dirty)",
197 "error (no data transfer)",
198 "error (partial transfer)",
199 "undefined error code",
200 "process abort (dirty)",
201 "process abort (no data transfer)",
202 "process abort (partial transfer)",
203 "transaction error",
204 };
205 #endif
206
207 static inline u_int32_t iop_inl(struct iop_softc *, int);
208 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
209
210 static void iop_config_interrupts(struct device *);
211 static void iop_configure_devices(struct iop_softc *, int, int);
212 static void iop_devinfo(int, char *);
213 static int iop_print(void *, const char *);
214 static void iop_shutdown(void *);
215 static int iop_submatch(struct device *, struct cfdata *, void *);
216 static int iop_vendor_print(void *, const char *);
217
218 static void iop_adjqparam(struct iop_softc *, int);
219 static void iop_create_reconf_thread(void *);
220 static int iop_handle_reply(struct iop_softc *, u_int32_t);
221 static int iop_hrt_get(struct iop_softc *);
222 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
223 static void iop_intr_event(struct device *, struct iop_msg *, void *);
224 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
225 u_int32_t);
226 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
227 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
228 static int iop_ofifo_init(struct iop_softc *);
229 static int iop_passthrough(struct iop_softc *, struct ioppt *,
230 struct proc *);
231 static void iop_reconf_thread(void *);
232 static void iop_release_mfa(struct iop_softc *, u_int32_t);
233 static int iop_reset(struct iop_softc *);
234 static int iop_systab_set(struct iop_softc *);
235 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
236
237 #ifdef I2ODEBUG
238 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
239 #endif
240
241 cdev_decl(iop);
242
243 static inline u_int32_t
244 iop_inl(struct iop_softc *sc, int off)
245 {
246
247 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
248 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
249 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
250 }
251
252 static inline void
253 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
254 {
255
256 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
257 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
258 BUS_SPACE_BARRIER_WRITE);
259 }
260
261 /*
262 * Initialise the IOP and our interface.
263 */
264 void
265 iop_init(struct iop_softc *sc, const char *intrstr)
266 {
267 struct iop_msg *im;
268 int rv, i, j, state, nsegs;
269 u_int32_t mask;
270 char ident[64];
271
272 state = 0;
273
274 printf("I2O adapter");
275
276 if (iop_ictxhashtbl == NULL)
277 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS,
278 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
279
280 /* Disable interrupts at the IOP. */
281 mask = iop_inl(sc, IOP_REG_INTR_MASK);
282 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
283
284 /* Allocate a scratch DMA map for small miscellaneous shared data. */
285 if (bus_dmamap_create(sc->sc_dmat, NBPG, 1, NBPG, 0,
286 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
287 printf("%s: cannot create scratch dmamap\n",
288 sc->sc_dv.dv_xname);
289 return;
290 }
291 state++;
292
293 if (bus_dmamem_alloc(sc->sc_dmat, NBPG, NBPG, 0,
294 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
295 printf("%s: cannot alloc scratch dmamem\n",
296 sc->sc_dv.dv_xname);
297 goto bail_out;
298 }
299 state++;
300
301 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, NBPG,
302 &sc->sc_scr, 0)) {
303 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
304 goto bail_out;
305 }
306 state++;
307
308 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
309 NBPG, NULL, BUS_DMA_NOWAIT)) {
310 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
311 goto bail_out;
312 }
313 state++;
314
315 /* Reset the adapter and request status. */
316 if ((rv = iop_reset(sc)) != 0) {
317 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
318 goto bail_out;
319 }
320
321 if ((rv = iop_status_get(sc, 1)) != 0) {
322 printf("%s: not responding (get status)\n",
323 sc->sc_dv.dv_xname);
324 goto bail_out;
325 }
326
327 sc->sc_flags |= IOP_HAVESTATUS;
328 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
329 ident, sizeof(ident));
330 printf(" <%s>\n", ident);
331
332 #ifdef I2ODEBUG
333 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
334 le16toh(sc->sc_status.orgid),
335 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
336 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
337 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
338 le32toh(sc->sc_status.desiredprivmemsize),
339 le32toh(sc->sc_status.currentprivmemsize),
340 le32toh(sc->sc_status.currentprivmembase));
341 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
342 le32toh(sc->sc_status.desiredpriviosize),
343 le32toh(sc->sc_status.currentpriviosize),
344 le32toh(sc->sc_status.currentpriviobase));
345 #endif
346
347 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
348 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
349 sc->sc_maxob = IOP_MAX_OUTBOUND;
350 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
351 if (sc->sc_maxib > IOP_MAX_INBOUND)
352 sc->sc_maxib = IOP_MAX_INBOUND;
353
354 /* Allocate message wrappers. */
355 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT);
356 memset(im, 0, sizeof(*im) * sc->sc_maxib);
357 sc->sc_ims = im;
358 SLIST_INIT(&sc->sc_im_freelist);
359
360 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
361 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
362 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
363 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
364 &im->im_xfer[0].ix_map);
365 if (rv != 0) {
366 printf("%s: couldn't create dmamap (%d)",
367 sc->sc_dv.dv_xname, rv);
368 goto bail_out;
369 }
370
371 im->im_tctx = i;
372 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
373 }
374
375 /* Initialise the IOP's outbound FIFO. */
376 if (iop_ofifo_init(sc) != 0) {
377 printf("%s: unable to init oubound FIFO\n",
378 sc->sc_dv.dv_xname);
379 goto bail_out;
380 }
381
382 /*
383 * Defer further configuration until (a) interrupts are working and
384 * (b) we have enough information to build the system table.
385 */
386 config_interrupts((struct device *)sc, iop_config_interrupts);
387
388 /* Configure shutdown hook before we start any device activity. */
389 if (iop_sdh == NULL)
390 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
391
392 /* Ensure interrupts are enabled at the IOP. */
393 mask = iop_inl(sc, IOP_REG_INTR_MASK);
394 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
395
396 if (intrstr != NULL)
397 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
398 intrstr);
399
400 #ifdef I2ODEBUG
401 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
402 sc->sc_dv.dv_xname, sc->sc_maxib,
403 le32toh(sc->sc_status.maxinboundmframes),
404 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
405 #endif
406
407 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
408 return;
409
410 bail_out:
411 if (state > 3) {
412 for (j = 0; j < i; j++)
413 bus_dmamap_destroy(sc->sc_dmat,
414 sc->sc_ims[j].im_xfer[0].ix_map);
415 free(sc->sc_ims, M_DEVBUF);
416 }
417 if (state > 2)
418 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
419 if (state > 1)
420 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, NBPG);
421 if (state > 0)
422 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
423 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
424
425 }
426
427 /*
428 * Perform autoconfiguration tasks.
429 */
430 static void
431 iop_config_interrupts(struct device *self)
432 {
433 struct iop_attach_args ia;
434 struct iop_softc *sc, *iop;
435 struct i2o_systab_entry *ste;
436 int rv, i, niop;
437
438 sc = (struct iop_softc *)self;
439 LIST_INIT(&sc->sc_iilist);
440
441 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
442
443 if (iop_hrt_get(sc) != 0) {
444 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
445 return;
446 }
447
448 /*
449 * Build the system table.
450 */
451 if (iop_systab == NULL) {
452 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
453 if ((iop = device_lookup(&iop_cd, i)) == NULL)
454 continue;
455 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
456 continue;
457 if (iop_status_get(iop, 1) != 0) {
458 printf("%s: unable to retrieve status\n",
459 sc->sc_dv.dv_xname);
460 iop->sc_flags &= ~IOP_HAVESTATUS;
461 continue;
462 }
463 niop++;
464 }
465 if (niop == 0)
466 return;
467
468 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
469 sizeof(struct i2o_systab);
470 iop_systab_size = i;
471 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
472
473 memset(iop_systab, 0, i);
474 iop_systab->numentries = niop;
475 iop_systab->version = I2O_VERSION_11;
476
477 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
478 if ((iop = device_lookup(&iop_cd, i)) == NULL)
479 continue;
480 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
481 continue;
482
483 ste->orgid = iop->sc_status.orgid;
484 ste->iopid = iop->sc_dv.dv_unit + 2;
485 ste->segnumber =
486 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
487 ste->iopcaps = iop->sc_status.iopcaps;
488 ste->inboundmsgframesize =
489 iop->sc_status.inboundmframesize;
490 ste->inboundmsgportaddresslow =
491 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
492 ste++;
493 }
494 }
495
496 /*
497 * Post the system table to the IOP and bring it to the OPERATIONAL
498 * state.
499 */
500 if (iop_systab_set(sc) != 0) {
501 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
502 return;
503 }
504 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
505 30000) != 0) {
506 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
507 return;
508 }
509
510 /*
511 * Set up an event handler for this IOP.
512 */
513 sc->sc_eventii.ii_dv = self;
514 sc->sc_eventii.ii_intr = iop_intr_event;
515 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
516 sc->sc_eventii.ii_tid = I2O_TID_IOP;
517 iop_initiator_register(sc, &sc->sc_eventii);
518
519 rv = iop_util_eventreg(sc, &sc->sc_eventii,
520 I2O_EVENT_EXEC_RESOURCE_LIMITS |
521 I2O_EVENT_EXEC_CONNECTION_FAIL |
522 I2O_EVENT_EXEC_ADAPTER_FAULT |
523 I2O_EVENT_EXEC_POWER_FAIL |
524 I2O_EVENT_EXEC_RESET_PENDING |
525 I2O_EVENT_EXEC_RESET_IMMINENT |
526 I2O_EVENT_EXEC_HARDWARE_FAIL |
527 I2O_EVENT_EXEC_XCT_CHANGE |
528 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
529 I2O_EVENT_GEN_DEVICE_RESET |
530 I2O_EVENT_GEN_STATE_CHANGE |
531 I2O_EVENT_GEN_GENERAL_WARNING);
532 if (rv != 0) {
533 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
534 return;
535 }
536
537 /*
538 * Attempt to match and attach a product-specific extension.
539 */
540 ia.ia_class = I2O_CLASS_ANY;
541 ia.ia_tid = I2O_TID_IOP;
542 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
543
544 /*
545 * Start device configuration.
546 */
547 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
548 if ((rv = iop_reconfigure(sc, 0)) == -1) {
549 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
550 return;
551 }
552 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
553
554 kthread_create(iop_create_reconf_thread, sc);
555 }
556
557 /*
558 * Create the reconfiguration thread. Called after the standard kernel
559 * threads have been created.
560 */
561 static void
562 iop_create_reconf_thread(void *cookie)
563 {
564 struct iop_softc *sc;
565 int rv;
566
567 sc = cookie;
568 sc->sc_flags |= IOP_ONLINE;
569
570 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
571 "%s", sc->sc_dv.dv_xname);
572 if (rv != 0) {
573 printf("%s: unable to create reconfiguration thread (%d)",
574 sc->sc_dv.dv_xname, rv);
575 return;
576 }
577 }
578
579 /*
580 * Reconfiguration thread; listens for LCT change notification, and
581 * initiates re-configuration if received.
582 */
583 static void
584 iop_reconf_thread(void *cookie)
585 {
586 struct iop_softc *sc;
587 struct i2o_lct lct;
588 u_int32_t chgind;
589 int rv;
590
591 sc = cookie;
592 chgind = sc->sc_chgind + 1;
593
594 for (;;) {
595 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
596 sc->sc_dv.dv_xname, chgind));
597
598 PHOLD(sc->sc_reconf_proc);
599 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
600 PRELE(sc->sc_reconf_proc);
601
602 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
603 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
604
605 if (rv == 0 &&
606 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
607 iop_reconfigure(sc, le32toh(lct.changeindicator));
608 chgind = sc->sc_chgind + 1;
609 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
610 }
611
612 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
613 }
614 }
615
616 /*
617 * Reconfigure: find new and removed devices.
618 */
619 int
620 iop_reconfigure(struct iop_softc *sc, u_int chgind)
621 {
622 struct iop_msg *im;
623 struct i2o_hba_bus_scan mf;
624 struct i2o_lct_entry *le;
625 struct iop_initiator *ii, *nextii;
626 int rv, tid, i;
627
628 /*
629 * If the reconfiguration request isn't the result of LCT change
630 * notification, then be more thorough: ask all bus ports to scan
631 * their busses. Wait up to 5 minutes for each bus port to complete
632 * the request.
633 */
634 if (chgind == 0) {
635 if ((rv = iop_lct_get(sc)) != 0) {
636 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
637 return (rv);
638 }
639
640 le = sc->sc_lct->entry;
641 for (i = 0; i < sc->sc_nlctent; i++, le++) {
642 if ((le16toh(le->classid) & 4095) !=
643 I2O_CLASS_BUS_ADAPTER_PORT)
644 continue;
645 tid = le16toh(le->localtid) & 4095;
646
647 im = iop_msg_alloc(sc, IM_WAIT);
648
649 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
650 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
651 mf.msgictx = IOP_ICTX;
652 mf.msgtctx = im->im_tctx;
653
654 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
655 tid));
656
657 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
658 iop_msg_free(sc, im);
659 #ifdef I2ODEBUG
660 if (rv != 0)
661 printf("%s: bus scan failed\n",
662 sc->sc_dv.dv_xname);
663 #endif
664 }
665 } else if (chgind <= sc->sc_chgind) {
666 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
667 return (0);
668 }
669
670 /* Re-read the LCT and determine if it has changed. */
671 if ((rv = iop_lct_get(sc)) != 0) {
672 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
673 return (rv);
674 }
675 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
676
677 chgind = le32toh(sc->sc_lct->changeindicator);
678 if (chgind == sc->sc_chgind) {
679 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
680 return (0);
681 }
682 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
683 sc->sc_chgind = chgind;
684
685 if (sc->sc_tidmap != NULL)
686 free(sc->sc_tidmap, M_DEVBUF);
687 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
688 M_DEVBUF, M_NOWAIT);
689 memset(sc->sc_tidmap, 0, sizeof(sc->sc_tidmap));
690
691 /* Allow 1 queued command per device while we're configuring. */
692 iop_adjqparam(sc, 1);
693
694 /*
695 * Match and attach child devices. We configure high-level devices
696 * first so that any claims will propagate throughout the LCT,
697 * hopefully masking off aliased devices as a result.
698 *
699 * Re-reading the LCT at this point is a little dangerous, but we'll
700 * trust the IOP (and the operator) to behave itself...
701 */
702 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
703 IC_CONFIGURE | IC_PRIORITY);
704 if ((rv = iop_lct_get(sc)) != 0)
705 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
706 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
707 IC_CONFIGURE);
708
709 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
710 nextii = LIST_NEXT(ii, ii_list);
711
712 /* Detach devices that were configured, but are now gone. */
713 for (i = 0; i < sc->sc_nlctent; i++)
714 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
715 break;
716 if (i == sc->sc_nlctent ||
717 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
718 config_detach(ii->ii_dv, DETACH_FORCE);
719
720 /*
721 * Tell initiators that existed before the re-configuration
722 * to re-configure.
723 */
724 if (ii->ii_reconfig == NULL)
725 continue;
726 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
727 printf("%s: %s failed reconfigure (%d)\n",
728 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
729 }
730
731 /* Re-adjust queue parameters and return. */
732 if (sc->sc_nii != 0)
733 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
734 / sc->sc_nii);
735
736 return (0);
737 }
738
739 /*
740 * Configure I2O devices into the system.
741 */
742 static void
743 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
744 {
745 struct iop_attach_args ia;
746 struct iop_initiator *ii;
747 const struct i2o_lct_entry *le;
748 struct device *dv;
749 int i, j, nent;
750 u_int usertid;
751
752 nent = sc->sc_nlctent;
753 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
754 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
755
756 /* Ignore the device if it's in use. */
757 usertid = le32toh(le->usertid) & 4095;
758 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
759 continue;
760
761 ia.ia_class = le16toh(le->classid) & 4095;
762 ia.ia_tid = sc->sc_tidmap[i].it_tid;
763
764 /* Ignore uninteresting devices. */
765 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
766 if (iop_class[j].ic_class == ia.ia_class)
767 break;
768 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
769 (iop_class[j].ic_flags & mask) != maskval)
770 continue;
771
772 /*
773 * Try to configure the device only if it's not already
774 * configured.
775 */
776 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
777 if (ia.ia_tid == ii->ii_tid) {
778 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
779 strcpy(sc->sc_tidmap[i].it_dvname,
780 ii->ii_dv->dv_xname);
781 break;
782 }
783 }
784 if (ii != NULL)
785 continue;
786
787 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
788 if (dv != NULL) {
789 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
790 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
791 }
792 }
793 }
794
795 /*
796 * Adjust queue parameters for all child devices.
797 */
798 static void
799 iop_adjqparam(struct iop_softc *sc, int mpi)
800 {
801 struct iop_initiator *ii;
802
803 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
804 if (ii->ii_adjqparam != NULL)
805 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
806 }
807
808 static void
809 iop_devinfo(int class, char *devinfo)
810 {
811 #ifdef I2OVERBOSE
812 int i;
813
814 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
815 if (class == iop_class[i].ic_class)
816 break;
817
818 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
819 sprintf(devinfo, "device (class 0x%x)", class);
820 else
821 strcpy(devinfo, iop_class[i].ic_caption);
822 #else
823
824 sprintf(devinfo, "device (class 0x%x)", class);
825 #endif
826 }
827
828 static int
829 iop_print(void *aux, const char *pnp)
830 {
831 struct iop_attach_args *ia;
832 char devinfo[256];
833
834 ia = aux;
835
836 if (pnp != NULL) {
837 iop_devinfo(ia->ia_class, devinfo);
838 printf("%s at %s", devinfo, pnp);
839 }
840 printf(" tid %d", ia->ia_tid);
841 return (UNCONF);
842 }
843
844 static int
845 iop_vendor_print(void *aux, const char *pnp)
846 {
847
848 return (QUIET);
849 }
850
851 static int
852 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
853 {
854 struct iop_attach_args *ia;
855
856 ia = aux;
857
858 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
859 return (0);
860
861 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
862 }
863
864 /*
865 * Shut down all configured IOPs.
866 */
867 static void
868 iop_shutdown(void *junk)
869 {
870 struct iop_softc *sc;
871 int i;
872
873 printf("shutting down iop devices...");
874
875 for (i = 0; i < iop_cd.cd_ndevs; i++) {
876 if ((sc = device_lookup(&iop_cd, i)) == NULL)
877 continue;
878 if ((sc->sc_flags & IOP_ONLINE) == 0)
879 continue;
880 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
881 0, 5000);
882 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
883 0, 1000);
884 }
885
886 /* Wait. Some boards could still be flushing, stupidly enough. */
887 delay(5000*1000);
888 printf(" done\n");
889 }
890
891 /*
892 * Retrieve IOP status.
893 */
894 int
895 iop_status_get(struct iop_softc *sc, int nosleep)
896 {
897 struct i2o_exec_status_get mf;
898 struct i2o_status *st;
899 paddr_t pa;
900 int rv, i;
901
902 pa = sc->sc_scr_seg->ds_addr;
903 st = (struct i2o_status *)sc->sc_scr;
904
905 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
906 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
907 mf.reserved[0] = 0;
908 mf.reserved[1] = 0;
909 mf.reserved[2] = 0;
910 mf.reserved[3] = 0;
911 mf.addrlow = (u_int32_t)pa;
912 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
913 mf.length = sizeof(sc->sc_status);
914
915 memset(st, 0, sizeof(*st));
916 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
917 BUS_DMASYNC_PREREAD);
918
919 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
920 return (rv);
921
922 for (i = 25; i != 0; i--) {
923 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
924 sizeof(*st), BUS_DMASYNC_POSTREAD);
925 if (st->syncbyte == 0xff)
926 break;
927 if (nosleep)
928 DELAY(100*1000);
929 else
930 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
931 }
932
933 if (st->syncbyte != 0xff)
934 rv = EIO;
935 else {
936 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
937 rv = 0;
938 }
939
940 return (rv);
941 }
942
943 /*
944 * Initialize and populate the IOP's outbound FIFO.
945 */
946 static int
947 iop_ofifo_init(struct iop_softc *sc)
948 {
949 bus_addr_t addr;
950 bus_dma_segment_t seg;
951 struct i2o_exec_outbound_init *mf;
952 int i, rseg, rv;
953 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
954
955 sw = (u_int32_t *)sc->sc_scr;
956
957 mf = (struct i2o_exec_outbound_init *)mb;
958 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
959 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
960 mf->msgictx = IOP_ICTX;
961 mf->msgtctx = 0;
962 mf->pagesize = NBPG;
963 mf->flags = IOP_INIT_CODE | ((IOP_MAX_MSG_SIZE >> 2) << 16);
964
965 /*
966 * The I2O spec says that there are two SGLs: one for the status
967 * word, and one for a list of discarded MFAs. It continues to say
968 * that if you don't want to get the list of MFAs, an IGNORE SGL is
969 * necessary; this isn't the case (and is in fact a bad thing).
970 */
971 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
972 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
973 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
974 (u_int32_t)sc->sc_scr_seg->ds_addr;
975 mb[0] += 2 << 16;
976
977 *sw = 0;
978 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
979 BUS_DMASYNC_PREREAD);
980
981 if ((rv = iop_post(sc, mb)) != 0)
982 return (rv);
983
984 POLL(5000,
985 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
986 BUS_DMASYNC_POSTREAD),
987 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
988
989 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
990 printf("%s: outbound FIFO init failed (%d)\n",
991 sc->sc_dv.dv_xname, le32toh(*sw));
992 return (EIO);
993 }
994
995 /* Allocate DMA safe memory for the reply frames. */
996 if (sc->sc_rep_phys == 0) {
997 sc->sc_rep_size = sc->sc_maxob * IOP_MAX_MSG_SIZE;
998
999 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, NBPG,
1000 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1001 if (rv != 0) {
1002 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1003 rv);
1004 return (rv);
1005 }
1006
1007 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1008 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1009 if (rv != 0) {
1010 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1011 return (rv);
1012 }
1013
1014 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1015 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1016 if (rv != 0) {
1017 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1018 rv);
1019 return (rv);
1020 }
1021
1022 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1023 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1024 if (rv != 0) {
1025 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1026 return (rv);
1027 }
1028
1029 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1030 }
1031
1032 /* Populate the outbound FIFO. */
1033 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1034 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1035 addr += IOP_MAX_MSG_SIZE;
1036 }
1037
1038 return (0);
1039 }
1040
1041 /*
1042 * Read the specified number of bytes from the IOP's hardware resource table.
1043 */
1044 static int
1045 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1046 {
1047 struct iop_msg *im;
1048 int rv;
1049 struct i2o_exec_hrt_get *mf;
1050 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1051
1052 im = iop_msg_alloc(sc, IM_WAIT);
1053 mf = (struct i2o_exec_hrt_get *)mb;
1054 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1055 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1056 mf->msgictx = IOP_ICTX;
1057 mf->msgtctx = im->im_tctx;
1058
1059 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1060 rv = iop_msg_post(sc, im, mb, 30000);
1061 iop_msg_unmap(sc, im);
1062 iop_msg_free(sc, im);
1063 return (rv);
1064 }
1065
1066 /*
1067 * Read the IOP's hardware resource table.
1068 */
1069 static int
1070 iop_hrt_get(struct iop_softc *sc)
1071 {
1072 struct i2o_hrt hrthdr, *hrt;
1073 int size, rv;
1074
1075 PHOLD(curproc);
1076 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1077 PRELE(curproc);
1078 if (rv != 0)
1079 return (rv);
1080
1081 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1082 le16toh(hrthdr.numentries)));
1083
1084 size = sizeof(struct i2o_hrt) +
1085 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1086 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1087
1088 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1089 free(hrt, M_DEVBUF);
1090 return (rv);
1091 }
1092
1093 if (sc->sc_hrt != NULL)
1094 free(sc->sc_hrt, M_DEVBUF);
1095 sc->sc_hrt = hrt;
1096 return (0);
1097 }
1098
1099 /*
1100 * Request the specified number of bytes from the IOP's logical
1101 * configuration table. If a change indicator is specified, this
1102 * is a verbatim notification request, so the caller is prepared
1103 * to wait indefinitely.
1104 */
1105 static int
1106 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1107 u_int32_t chgind)
1108 {
1109 struct iop_msg *im;
1110 struct i2o_exec_lct_notify *mf;
1111 int rv;
1112 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1113
1114 im = iop_msg_alloc(sc, IM_WAIT);
1115 memset(lct, 0, size);
1116
1117 mf = (struct i2o_exec_lct_notify *)mb;
1118 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1119 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1120 mf->msgictx = IOP_ICTX;
1121 mf->msgtctx = im->im_tctx;
1122 mf->classid = I2O_CLASS_ANY;
1123 mf->changeindicator = chgind;
1124
1125 #ifdef I2ODEBUG
1126 printf("iop_lct_get0: reading LCT");
1127 if (chgind != 0)
1128 printf(" (async)");
1129 printf("\n");
1130 #endif
1131
1132 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1133 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1134 iop_msg_unmap(sc, im);
1135 iop_msg_free(sc, im);
1136 return (rv);
1137 }
1138
1139 /*
1140 * Read the IOP's logical configuration table.
1141 */
1142 int
1143 iop_lct_get(struct iop_softc *sc)
1144 {
1145 int esize, size, rv;
1146 struct i2o_lct *lct;
1147
1148 esize = le32toh(sc->sc_status.expectedlctsize);
1149 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1150 if (lct == NULL)
1151 return (ENOMEM);
1152
1153 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1154 free(lct, M_DEVBUF);
1155 return (rv);
1156 }
1157
1158 size = le16toh(lct->tablesize) << 2;
1159 if (esize != size) {
1160 free(lct, M_DEVBUF);
1161 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1162 if (lct == NULL)
1163 return (ENOMEM);
1164
1165 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1166 free(lct, M_DEVBUF);
1167 return (rv);
1168 }
1169 }
1170
1171 /* Swap in the new LCT. */
1172 if (sc->sc_lct != NULL)
1173 free(sc->sc_lct, M_DEVBUF);
1174 sc->sc_lct = lct;
1175 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1176 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1177 sizeof(struct i2o_lct_entry);
1178 return (0);
1179 }
1180
1181 /*
1182 * Request the specified parameter group from the target. If an initiator
1183 * is specified (a) don't wait for the operation to complete, but instead
1184 * let the initiator's interrupt handler deal with the reply and (b) place a
1185 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1186 */
1187 int
1188 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1189 int size, struct iop_initiator *ii)
1190 {
1191 struct iop_msg *im;
1192 struct i2o_util_params_op *mf;
1193 struct i2o_reply *rf;
1194 int rv;
1195 struct iop_pgop *pgop;
1196 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1197
1198 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1199 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1200 iop_msg_free(sc, im);
1201 return (ENOMEM);
1202 }
1203 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1204 iop_msg_free(sc, im);
1205 free(pgop, M_DEVBUF);
1206 return (ENOMEM);
1207 }
1208 im->im_dvcontext = pgop;
1209 im->im_rb = rf;
1210
1211 mf = (struct i2o_util_params_op *)mb;
1212 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1213 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1214 mf->msgictx = IOP_ICTX;
1215 mf->msgtctx = im->im_tctx;
1216 mf->flags = 0;
1217
1218 pgop->olh.count = htole16(1);
1219 pgop->olh.reserved = htole16(0);
1220 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1221 pgop->oat.fieldcount = htole16(0xffff);
1222 pgop->oat.group = htole16(group);
1223
1224 if (ii == NULL)
1225 PHOLD(curproc);
1226
1227 memset(buf, 0, size);
1228 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1229 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1230 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1231
1232 if (ii == NULL)
1233 PRELE(curproc);
1234
1235 /* Detect errors; let partial transfers to count as success. */
1236 if (ii == NULL && rv == 0) {
1237 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1238 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1239 rv = 0;
1240 else
1241 rv = (rf->reqstatus != 0 ? EIO : 0);
1242
1243 if (rv != 0)
1244 printf("%s: FIELD_GET failed for tid %d group %d\n",
1245 sc->sc_dv.dv_xname, tid, group);
1246 }
1247
1248 if (ii == NULL || rv != 0) {
1249 iop_msg_unmap(sc, im);
1250 iop_msg_free(sc, im);
1251 free(pgop, M_DEVBUF);
1252 free(rf, M_DEVBUF);
1253 }
1254
1255 return (rv);
1256 }
1257
1258 /*
1259 * Set a single field in a scalar parameter group.
1260 */
1261 int
1262 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1263 int size, int field)
1264 {
1265 struct iop_msg *im;
1266 struct i2o_util_params_op *mf;
1267 struct iop_pgop *pgop;
1268 int rv, totsize;
1269 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1270
1271 totsize = sizeof(*pgop) + size;
1272
1273 im = iop_msg_alloc(sc, IM_WAIT);
1274 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1275 iop_msg_free(sc, im);
1276 return (ENOMEM);
1277 }
1278
1279 mf = (struct i2o_util_params_op *)mb;
1280 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1281 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1282 mf->msgictx = IOP_ICTX;
1283 mf->msgtctx = im->im_tctx;
1284 mf->flags = 0;
1285
1286 pgop->olh.count = htole16(1);
1287 pgop->olh.reserved = htole16(0);
1288 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1289 pgop->oat.fieldcount = htole16(1);
1290 pgop->oat.group = htole16(group);
1291 pgop->oat.fields[0] = htole16(field);
1292 memcpy(pgop + 1, buf, size);
1293
1294 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1295 rv = iop_msg_post(sc, im, mb, 30000);
1296 if (rv != 0)
1297 printf("%s: FIELD_SET failed for tid %d group %d\n",
1298 sc->sc_dv.dv_xname, tid, group);
1299
1300 iop_msg_unmap(sc, im);
1301 iop_msg_free(sc, im);
1302 free(pgop, M_DEVBUF);
1303 return (rv);
1304 }
1305
1306 /*
1307 * Delete all rows in a tablular parameter group.
1308 */
1309 int
1310 iop_table_clear(struct iop_softc *sc, int tid, int group)
1311 {
1312 struct iop_msg *im;
1313 struct i2o_util_params_op *mf;
1314 struct iop_pgop pgop;
1315 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1316 int rv;
1317
1318 im = iop_msg_alloc(sc, IM_WAIT);
1319
1320 mf = (struct i2o_util_params_op *)mb;
1321 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1322 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1323 mf->msgictx = IOP_ICTX;
1324 mf->msgtctx = im->im_tctx;
1325 mf->flags = 0;
1326
1327 pgop.olh.count = htole16(1);
1328 pgop.olh.reserved = htole16(0);
1329 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1330 pgop.oat.fieldcount = htole16(0);
1331 pgop.oat.group = htole16(group);
1332 pgop.oat.fields[0] = htole16(0);
1333
1334 PHOLD(curproc);
1335 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1336 rv = iop_msg_post(sc, im, mb, 30000);
1337 if (rv != 0)
1338 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1339 sc->sc_dv.dv_xname, tid, group);
1340
1341 iop_msg_unmap(sc, im);
1342 PRELE(curproc);
1343 iop_msg_free(sc, im);
1344 return (rv);
1345 }
1346
1347 /*
1348 * Add a single row to a tabular parameter group. The row can have only one
1349 * field.
1350 */
1351 int
1352 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1353 int size, int row)
1354 {
1355 struct iop_msg *im;
1356 struct i2o_util_params_op *mf;
1357 struct iop_pgop *pgop;
1358 int rv, totsize;
1359 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1360
1361 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1362
1363 im = iop_msg_alloc(sc, IM_WAIT);
1364 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1365 iop_msg_free(sc, im);
1366 return (ENOMEM);
1367 }
1368
1369 mf = (struct i2o_util_params_op *)mb;
1370 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1371 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1372 mf->msgictx = IOP_ICTX;
1373 mf->msgtctx = im->im_tctx;
1374 mf->flags = 0;
1375
1376 pgop->olh.count = htole16(1);
1377 pgop->olh.reserved = htole16(0);
1378 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1379 pgop->oat.fieldcount = htole16(1);
1380 pgop->oat.group = htole16(group);
1381 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1382 pgop->oat.fields[1] = htole16(1); /* RowCount */
1383 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1384 memcpy(&pgop->oat.fields[3], buf, size);
1385
1386 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1387 rv = iop_msg_post(sc, im, mb, 30000);
1388 if (rv != 0)
1389 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1390 sc->sc_dv.dv_xname, tid, group, row);
1391
1392 iop_msg_unmap(sc, im);
1393 iop_msg_free(sc, im);
1394 free(pgop, M_DEVBUF);
1395 return (rv);
1396 }
1397
1398 /*
1399 * Execute a simple command (no parameters).
1400 */
1401 int
1402 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1403 int async, int timo)
1404 {
1405 struct iop_msg *im;
1406 struct i2o_msg mf;
1407 int rv, fl;
1408
1409 fl = (async != 0 ? IM_WAIT : IM_POLL);
1410 im = iop_msg_alloc(sc, fl);
1411
1412 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1413 mf.msgfunc = I2O_MSGFUNC(tid, function);
1414 mf.msgictx = ictx;
1415 mf.msgtctx = im->im_tctx;
1416
1417 rv = iop_msg_post(sc, im, &mf, timo);
1418 iop_msg_free(sc, im);
1419 return (rv);
1420 }
1421
1422 /*
1423 * Post the system table to the IOP.
1424 */
1425 static int
1426 iop_systab_set(struct iop_softc *sc)
1427 {
1428 struct i2o_exec_sys_tab_set *mf;
1429 struct iop_msg *im;
1430 bus_space_handle_t bsh;
1431 bus_addr_t boo;
1432 u_int32_t mema[2], ioa[2];
1433 int rv;
1434 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1435
1436 im = iop_msg_alloc(sc, IM_WAIT);
1437
1438 mf = (struct i2o_exec_sys_tab_set *)mb;
1439 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1440 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1441 mf->msgictx = IOP_ICTX;
1442 mf->msgtctx = im->im_tctx;
1443 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1444 mf->segnumber = 0;
1445
1446 mema[1] = sc->sc_status.desiredprivmemsize;
1447 ioa[1] = sc->sc_status.desiredpriviosize;
1448
1449 if (mema[1] != 0) {
1450 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1451 le32toh(mema[1]), NBPG, 0, 0, &boo, &bsh);
1452 mema[0] = htole32(boo);
1453 if (rv != 0) {
1454 printf("%s: can't alloc priv mem space, err = %d\n",
1455 sc->sc_dv.dv_xname, rv);
1456 mema[0] = 0;
1457 mema[1] = 0;
1458 }
1459 }
1460
1461 if (ioa[1] != 0) {
1462 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1463 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1464 ioa[0] = htole32(boo);
1465 if (rv != 0) {
1466 printf("%s: can't alloc priv i/o space, err = %d\n",
1467 sc->sc_dv.dv_xname, rv);
1468 ioa[0] = 0;
1469 ioa[1] = 0;
1470 }
1471 }
1472
1473 PHOLD(curproc);
1474 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1475 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1476 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1477 rv = iop_msg_post(sc, im, mb, 5000);
1478 iop_msg_unmap(sc, im);
1479 iop_msg_free(sc, im);
1480 PRELE(curproc);
1481 return (rv);
1482 }
1483
1484 /*
1485 * Reset the IOP. Must be called with interrupts disabled.
1486 */
1487 static int
1488 iop_reset(struct iop_softc *sc)
1489 {
1490 u_int32_t mfa, *sw;
1491 struct i2o_exec_iop_reset mf;
1492 int rv;
1493 paddr_t pa;
1494
1495 sw = (u_int32_t *)sc->sc_scr;
1496 pa = sc->sc_scr_seg->ds_addr;
1497
1498 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1499 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1500 mf.reserved[0] = 0;
1501 mf.reserved[1] = 0;
1502 mf.reserved[2] = 0;
1503 mf.reserved[3] = 0;
1504 mf.statuslow = (u_int32_t)pa;
1505 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1506
1507 *sw = htole32(0);
1508 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1509 BUS_DMASYNC_PREREAD);
1510
1511 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1512 return (rv);
1513
1514 POLL(2500,
1515 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1516 BUS_DMASYNC_POSTREAD), *sw != 0));
1517 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1518 printf("%s: reset rejected, status 0x%x\n",
1519 sc->sc_dv.dv_xname, le32toh(*sw));
1520 return (EIO);
1521 }
1522
1523 /*
1524 * IOP is now in the INIT state. Wait no more than 10 seconds for
1525 * the inbound queue to become responsive.
1526 */
1527 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1528 if (mfa == IOP_MFA_EMPTY) {
1529 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1530 return (EIO);
1531 }
1532
1533 iop_release_mfa(sc, mfa);
1534 return (0);
1535 }
1536
1537 /*
1538 * Register a new initiator. Must be called with the configuration lock
1539 * held.
1540 */
1541 void
1542 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1543 {
1544 static int ictxgen;
1545 int s;
1546
1547 /* 0 is reserved (by us) for system messages. */
1548 ii->ii_ictx = ++ictxgen;
1549
1550 /*
1551 * `Utility initiators' don't make it onto the per-IOP initiator list
1552 * (which is used only for configuration), but do get one slot on
1553 * the inbound queue.
1554 */
1555 if ((ii->ii_flags & II_UTILITY) == 0) {
1556 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1557 sc->sc_nii++;
1558 } else
1559 sc->sc_nuii++;
1560
1561 s = splbio();
1562 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1563 splx(s);
1564 }
1565
1566 /*
1567 * Unregister an initiator. Must be called with the configuration lock
1568 * held.
1569 */
1570 void
1571 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1572 {
1573 int s;
1574
1575 if ((ii->ii_flags & II_UTILITY) == 0) {
1576 LIST_REMOVE(ii, ii_list);
1577 sc->sc_nii--;
1578 } else
1579 sc->sc_nuii--;
1580
1581 s = splbio();
1582 LIST_REMOVE(ii, ii_hash);
1583 splx(s);
1584 }
1585
1586 /*
1587 * Handle a reply frame from the IOP.
1588 */
1589 static int
1590 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1591 {
1592 struct iop_msg *im;
1593 struct i2o_reply *rb;
1594 struct i2o_fault_notify *fn;
1595 struct iop_initiator *ii;
1596 u_int off, ictx, tctx, status, size;
1597
1598 off = (int)(rmfa - sc->sc_rep_phys);
1599 rb = (struct i2o_reply *)(sc->sc_rep + off);
1600
1601 /* Perform reply queue DMA synchronisation. */
1602 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1603 IOP_MAX_MSG_SIZE, BUS_DMASYNC_POSTREAD);
1604 if (--sc->sc_curib != 0)
1605 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1606 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1607
1608 #ifdef I2ODEBUG
1609 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1610 panic("iop_handle_reply: 64-bit reply");
1611 #endif
1612 /*
1613 * Find the initiator.
1614 */
1615 ictx = le32toh(rb->msgictx);
1616 if (ictx == IOP_ICTX)
1617 ii = NULL;
1618 else {
1619 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1620 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1621 if (ii->ii_ictx == ictx)
1622 break;
1623 if (ii == NULL) {
1624 #ifdef I2ODEBUG
1625 iop_reply_print(sc, rb);
1626 #endif
1627 printf("%s: WARNING: bad ictx returned (%x)\n",
1628 sc->sc_dv.dv_xname, ictx);
1629 return (-1);
1630 }
1631 }
1632
1633 /*
1634 * If we received a transport failure notice, we've got to dig the
1635 * transaction context (if any) out of the original message frame,
1636 * and then release the original MFA back to the inbound FIFO.
1637 */
1638 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1639 status = I2O_STATUS_SUCCESS;
1640
1641 fn = (struct i2o_fault_notify *)rb;
1642 tctx = iop_inl(sc, fn->lowmfa + 12);
1643 iop_release_mfa(sc, fn->lowmfa);
1644 iop_tfn_print(sc, fn);
1645 } else {
1646 status = rb->reqstatus;
1647 tctx = le32toh(rb->msgtctx);
1648 }
1649
1650 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1651 /*
1652 * This initiator tracks state using message wrappers.
1653 *
1654 * Find the originating message wrapper, and if requested
1655 * notify the initiator.
1656 */
1657 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1658 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1659 (im->im_flags & IM_ALLOCED) == 0 ||
1660 tctx != im->im_tctx) {
1661 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1662 sc->sc_dv.dv_xname, tctx, im);
1663 if (im != NULL)
1664 printf("%s: flags=0x%08x tctx=0x%08x\n",
1665 sc->sc_dv.dv_xname, im->im_flags,
1666 im->im_tctx);
1667 #ifdef I2ODEBUG
1668 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1669 iop_reply_print(sc, rb);
1670 #endif
1671 return (-1);
1672 }
1673
1674 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1675 im->im_flags |= IM_FAIL;
1676
1677 #ifdef I2ODEBUG
1678 if ((im->im_flags & IM_REPLIED) != 0)
1679 panic("%s: dup reply", sc->sc_dv.dv_xname);
1680 #endif
1681 im->im_flags |= IM_REPLIED;
1682
1683 #ifdef I2ODEBUG
1684 if (status != I2O_STATUS_SUCCESS)
1685 iop_reply_print(sc, rb);
1686 #endif
1687 im->im_reqstatus = status;
1688
1689 /* Copy the reply frame, if requested. */
1690 if (im->im_rb != NULL) {
1691 size = (le32toh(rb->msgflags) >> 14) & ~3;
1692 #ifdef I2ODEBUG
1693 if (size > IOP_MAX_MSG_SIZE)
1694 panic("iop_handle_reply: reply too large");
1695 #endif
1696 memcpy(im->im_rb, rb, size);
1697 }
1698
1699 /* Notify the initiator. */
1700 if ((im->im_flags & IM_WAIT) != 0)
1701 wakeup(im);
1702 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1703 (*ii->ii_intr)(ii->ii_dv, im, rb);
1704 } else {
1705 /*
1706 * This initiator discards message wrappers.
1707 *
1708 * Simply pass the reply frame to the initiator.
1709 */
1710 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1711 }
1712
1713 return (status);
1714 }
1715
1716 /*
1717 * Handle an interrupt from the IOP.
1718 */
1719 int
1720 iop_intr(void *arg)
1721 {
1722 struct iop_softc *sc;
1723 u_int32_t rmfa;
1724
1725 sc = arg;
1726
1727 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1728 return (0);
1729
1730 for (;;) {
1731 /* Double read to account for IOP bug. */
1732 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1733 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1734 if (rmfa == IOP_MFA_EMPTY)
1735 break;
1736 }
1737 iop_handle_reply(sc, rmfa);
1738 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1739 }
1740
1741 return (1);
1742 }
1743
1744 /*
1745 * Handle an event signalled by the executive.
1746 */
1747 static void
1748 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1749 {
1750 struct i2o_util_event_register_reply *rb;
1751 struct iop_softc *sc;
1752 u_int event;
1753
1754 sc = (struct iop_softc *)dv;
1755 rb = reply;
1756
1757 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1758 return;
1759
1760 event = le32toh(rb->event);
1761 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1762 }
1763
1764 /*
1765 * Allocate a message wrapper.
1766 */
1767 struct iop_msg *
1768 iop_msg_alloc(struct iop_softc *sc, int flags)
1769 {
1770 struct iop_msg *im;
1771 static u_int tctxgen;
1772 int s, i;
1773
1774 #ifdef I2ODEBUG
1775 if ((flags & IM_SYSMASK) != 0)
1776 panic("iop_msg_alloc: system flags specified");
1777 #endif
1778
1779 s = splbio();
1780 im = SLIST_FIRST(&sc->sc_im_freelist);
1781 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1782 if (im == NULL)
1783 panic("iop_msg_alloc: no free wrappers");
1784 #endif
1785 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1786 splx(s);
1787
1788 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1789 tctxgen += (1 << IOP_TCTX_SHIFT);
1790 im->im_flags = flags | IM_ALLOCED;
1791 im->im_rb = NULL;
1792 i = 0;
1793 do {
1794 im->im_xfer[i++].ix_size = 0;
1795 } while (i < IOP_MAX_MSG_XFERS);
1796
1797 return (im);
1798 }
1799
1800 /*
1801 * Free a message wrapper.
1802 */
1803 void
1804 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1805 {
1806 int s;
1807
1808 #ifdef I2ODEBUG
1809 if ((im->im_flags & IM_ALLOCED) == 0)
1810 panic("iop_msg_free: wrapper not allocated");
1811 #endif
1812
1813 im->im_flags = 0;
1814 s = splbio();
1815 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1816 splx(s);
1817 }
1818
1819 /*
1820 * Map a data transfer. Write a scatter-gather list into the message frame.
1821 */
1822 int
1823 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1824 void *xferaddr, int xfersize, int out, struct proc *up)
1825 {
1826 bus_dmamap_t dm;
1827 bus_dma_segment_t *ds;
1828 struct iop_xfer *ix;
1829 u_int rv, i, nsegs, flg, off, xn;
1830 u_int32_t *p;
1831
1832 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1833 if (ix->ix_size == 0)
1834 break;
1835
1836 #ifdef I2ODEBUG
1837 if (xfersize == 0)
1838 panic("iop_msg_map: null transfer");
1839 if (xfersize > IOP_MAX_XFER)
1840 panic("iop_msg_map: transfer too large");
1841 if (xn == IOP_MAX_MSG_XFERS)
1842 panic("iop_msg_map: too many xfers");
1843 #endif
1844
1845 /*
1846 * Only the first DMA map is static.
1847 */
1848 if (xn != 0) {
1849 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1850 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1851 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1852 if (rv != 0)
1853 return (rv);
1854 }
1855
1856 dm = ix->ix_map;
1857 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1858 (up == NULL ? BUS_DMA_NOWAIT : 0));
1859 if (rv != 0)
1860 goto bad;
1861
1862 /*
1863 * How many SIMPLE SG elements can we fit in this message?
1864 */
1865 off = mb[0] >> 16;
1866 p = mb + off;
1867 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1868
1869 if (dm->dm_nsegs > nsegs) {
1870 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1871 rv = EFBIG;
1872 DPRINTF(("iop_msg_map: too many segs\n"));
1873 goto bad;
1874 }
1875
1876 nsegs = dm->dm_nsegs;
1877 xfersize = 0;
1878
1879 /*
1880 * Write out the SG list.
1881 */
1882 if (out)
1883 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1884 else
1885 flg = I2O_SGL_SIMPLE;
1886
1887 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1888 p[0] = (u_int32_t)ds->ds_len | flg;
1889 p[1] = (u_int32_t)ds->ds_addr;
1890 xfersize += ds->ds_len;
1891 }
1892
1893 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1894 p[1] = (u_int32_t)ds->ds_addr;
1895 xfersize += ds->ds_len;
1896
1897 /* Fix up the transfer record, and sync the map. */
1898 ix->ix_flags = (out ? IX_OUT : IX_IN);
1899 ix->ix_size = xfersize;
1900 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1901 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1902
1903 /*
1904 * If this is the first xfer we've mapped for this message, adjust
1905 * the SGL offset field in the message header.
1906 */
1907 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1908 mb[0] += (mb[0] >> 12) & 0xf0;
1909 im->im_flags |= IM_SGLOFFADJ;
1910 }
1911 mb[0] += (nsegs << 17);
1912 return (0);
1913
1914 bad:
1915 if (xn != 0)
1916 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1917 return (rv);
1918 }
1919
1920 /*
1921 * Map a block I/O data transfer (different in that there's only one per
1922 * message maximum, and PAGE addressing may be used). Write a scatter
1923 * gather list into the message frame.
1924 */
1925 int
1926 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1927 void *xferaddr, int xfersize, int out)
1928 {
1929 bus_dma_segment_t *ds;
1930 bus_dmamap_t dm;
1931 struct iop_xfer *ix;
1932 u_int rv, i, nsegs, off, slen, tlen, flg;
1933 paddr_t saddr, eaddr;
1934 u_int32_t *p;
1935
1936 #ifdef I2ODEBUG
1937 if (xfersize == 0)
1938 panic("iop_msg_map_bio: null transfer");
1939 if (xfersize > IOP_MAX_XFER)
1940 panic("iop_msg_map_bio: transfer too large");
1941 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1942 panic("iop_msg_map_bio: SGLOFFADJ");
1943 #endif
1944
1945 ix = im->im_xfer;
1946 dm = ix->ix_map;
1947 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1948 BUS_DMA_NOWAIT);
1949 if (rv != 0)
1950 return (rv);
1951
1952 off = mb[0] >> 16;
1953 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1954
1955 /*
1956 * If the transfer is highly fragmented and won't fit using SIMPLE
1957 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1958 * potentially more efficient, both for us and the IOP.
1959 */
1960 if (dm->dm_nsegs > nsegs) {
1961 nsegs = 1;
1962 p = mb + off + 1;
1963
1964 /* XXX This should be done with a bus_space flag. */
1965 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1966 slen = ds->ds_len;
1967 saddr = ds->ds_addr;
1968
1969 while (slen > 0) {
1970 eaddr = (saddr + NBPG) & ~(NBPG - 1);
1971 tlen = min(eaddr - saddr, slen);
1972 slen -= tlen;
1973 *p++ = le32toh(saddr);
1974 saddr = eaddr;
1975 nsegs++;
1976 }
1977 }
1978
1979 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
1980 I2O_SGL_END;
1981 if (out)
1982 mb[off] |= I2O_SGL_DATA_OUT;
1983 } else {
1984 p = mb + off;
1985 nsegs = dm->dm_nsegs;
1986
1987 if (out)
1988 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1989 else
1990 flg = I2O_SGL_SIMPLE;
1991
1992 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1993 p[0] = (u_int32_t)ds->ds_len | flg;
1994 p[1] = (u_int32_t)ds->ds_addr;
1995 }
1996
1997 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
1998 I2O_SGL_END;
1999 p[1] = (u_int32_t)ds->ds_addr;
2000 nsegs <<= 1;
2001 }
2002
2003 /* Fix up the transfer record, and sync the map. */
2004 ix->ix_flags = (out ? IX_OUT : IX_IN);
2005 ix->ix_size = xfersize;
2006 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2007 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2008
2009 /*
2010 * Adjust the SGL offset and total message size fields. We don't
2011 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2012 */
2013 mb[0] += ((off << 4) + (nsegs << 16));
2014 return (0);
2015 }
2016
2017 /*
2018 * Unmap all data transfers associated with a message wrapper.
2019 */
2020 void
2021 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2022 {
2023 struct iop_xfer *ix;
2024 int i;
2025
2026 #ifdef I2ODEBUG
2027 if (im->im_xfer[0].ix_size == 0)
2028 panic("iop_msg_unmap: no transfers mapped");
2029 #endif
2030
2031 for (ix = im->im_xfer, i = 0;;) {
2032 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2033 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2034 BUS_DMASYNC_POSTREAD);
2035 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2036
2037 /* Only the first DMA map is static. */
2038 if (i != 0)
2039 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2040 if ((++ix)->ix_size == 0)
2041 break;
2042 if (++i >= IOP_MAX_MSG_XFERS)
2043 break;
2044 }
2045 }
2046
2047 /*
2048 * Post a message frame to the IOP's inbound queue.
2049 */
2050 int
2051 iop_post(struct iop_softc *sc, u_int32_t *mb)
2052 {
2053 u_int32_t mfa;
2054 int s;
2055
2056 #ifdef I2ODEBUG
2057 if ((mb[0] >> 16) > IOP_MAX_MSG_SIZE / 4)
2058 panic("iop_post: frame too large");
2059 #endif
2060
2061 s = splbio();
2062
2063 /* Allocate a slot with the IOP. */
2064 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2065 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2066 splx(s);
2067 printf("%s: mfa not forthcoming\n",
2068 sc->sc_dv.dv_xname);
2069 return (EAGAIN);
2070 }
2071
2072 /* Perform reply buffer DMA synchronisation. */
2073 if (sc->sc_curib++ == 0)
2074 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2075 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2076
2077 /* Copy out the message frame. */
2078 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2079 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2080 BUS_SPACE_BARRIER_WRITE);
2081
2082 /* Post the MFA back to the IOP. */
2083 iop_outl(sc, IOP_REG_IFIFO, mfa);
2084
2085 splx(s);
2086 return (0);
2087 }
2088
2089 /*
2090 * Post a message to the IOP and deal with completion.
2091 */
2092 int
2093 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2094 {
2095 u_int32_t *mb;
2096 int rv, s;
2097
2098 mb = xmb;
2099
2100 /* Terminate the scatter/gather list chain. */
2101 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2102 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2103
2104 if ((rv = iop_post(sc, mb)) != 0)
2105 return (rv);
2106
2107 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2108 if ((im->im_flags & IM_POLL) != 0)
2109 iop_msg_poll(sc, im, timo);
2110 else
2111 iop_msg_wait(sc, im, timo);
2112
2113 s = splbio();
2114 if ((im->im_flags & IM_REPLIED) != 0) {
2115 if ((im->im_flags & IM_NOSTATUS) != 0)
2116 rv = 0;
2117 else if ((im->im_flags & IM_FAIL) != 0)
2118 rv = ENXIO;
2119 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2120 rv = EIO;
2121 else
2122 rv = 0;
2123 } else
2124 rv = EBUSY;
2125 splx(s);
2126 } else
2127 rv = 0;
2128
2129 return (rv);
2130 }
2131
2132 /*
2133 * Spin until the specified message is replied to.
2134 */
2135 static void
2136 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2137 {
2138 u_int32_t rmfa;
2139 int s, status;
2140
2141 s = splbio();
2142
2143 /* Wait for completion. */
2144 for (timo *= 10; timo != 0; timo--) {
2145 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2146 /* Double read to account for IOP bug. */
2147 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2148 if (rmfa == IOP_MFA_EMPTY)
2149 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2150 if (rmfa != IOP_MFA_EMPTY) {
2151 status = iop_handle_reply(sc, rmfa);
2152
2153 /*
2154 * Return the reply frame to the IOP's
2155 * outbound FIFO.
2156 */
2157 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2158 }
2159 }
2160 if ((im->im_flags & IM_REPLIED) != 0)
2161 break;
2162 DELAY(100);
2163 }
2164
2165 if (timo == 0) {
2166 #ifdef I2ODEBUG
2167 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2168 if (iop_status_get(sc, 1) != 0)
2169 printf("iop_msg_poll: unable to retrieve status\n");
2170 else
2171 printf("iop_msg_poll: IOP state = %d\n",
2172 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2173 #endif
2174 }
2175
2176 splx(s);
2177 }
2178
2179 /*
2180 * Sleep until the specified message is replied to.
2181 */
2182 static void
2183 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2184 {
2185 int s, rv;
2186
2187 s = splbio();
2188 if ((im->im_flags & IM_REPLIED) != 0) {
2189 splx(s);
2190 return;
2191 }
2192 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
2193 splx(s);
2194
2195 #ifdef I2ODEBUG
2196 if (rv != 0) {
2197 printf("iop_msg_wait: tsleep() == %d\n", rv);
2198 if (iop_status_get(sc, 0) != 0)
2199 printf("iop_msg_wait: unable to retrieve status\n");
2200 else
2201 printf("iop_msg_wait: IOP state = %d\n",
2202 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2203 }
2204 #endif
2205 }
2206
2207 /*
2208 * Release an unused message frame back to the IOP's inbound fifo.
2209 */
2210 static void
2211 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2212 {
2213
2214 /* Use the frame to issue a no-op. */
2215 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2216 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2217 iop_outl(sc, mfa + 8, 0);
2218 iop_outl(sc, mfa + 12, 0);
2219
2220 iop_outl(sc, IOP_REG_IFIFO, mfa);
2221 }
2222
2223 #ifdef I2ODEBUG
2224 /*
2225 * Dump a reply frame header.
2226 */
2227 static void
2228 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2229 {
2230 u_int function, detail;
2231 #ifdef I2OVERBOSE
2232 const char *statusstr;
2233 #endif
2234
2235 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2236 detail = le16toh(rb->detail);
2237
2238 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2239
2240 #ifdef I2OVERBOSE
2241 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2242 statusstr = iop_status[rb->reqstatus];
2243 else
2244 statusstr = "undefined error code";
2245
2246 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2247 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2248 #else
2249 printf("%s: function=0x%02x status=0x%02x\n",
2250 sc->sc_dv.dv_xname, function, rb->reqstatus);
2251 #endif
2252 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2253 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2254 le32toh(rb->msgtctx));
2255 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2256 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2257 (le32toh(rb->msgflags) >> 8) & 0xff);
2258 }
2259 #endif
2260
2261 /*
2262 * Dump a transport failure reply.
2263 */
2264 static void
2265 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2266 {
2267
2268 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2269
2270 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2271 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2272 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2273 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2274 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2275 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2276 }
2277
2278 /*
2279 * Translate an I2O ASCII field into a C string.
2280 */
2281 void
2282 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2283 {
2284 int hc, lc, i, nit;
2285
2286 dlen--;
2287 lc = 0;
2288 hc = 0;
2289 i = 0;
2290
2291 /*
2292 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2293 * spec has nothing to say about it. Since AMI fields are usually
2294 * filled with junk after the terminator, ...
2295 */
2296 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2297
2298 while (slen-- != 0 && dlen-- != 0) {
2299 if (nit && *src == '\0')
2300 break;
2301 else if (*src <= 0x20 || *src >= 0x7f) {
2302 if (hc)
2303 dst[i++] = ' ';
2304 } else {
2305 hc = 1;
2306 dst[i++] = *src;
2307 lc = i;
2308 }
2309 src++;
2310 }
2311
2312 dst[lc] = '\0';
2313 }
2314
2315 /*
2316 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2317 */
2318 int
2319 iop_print_ident(struct iop_softc *sc, int tid)
2320 {
2321 struct {
2322 struct i2o_param_op_results pr;
2323 struct i2o_param_read_results prr;
2324 struct i2o_param_device_identity di;
2325 } __attribute__ ((__packed__)) p;
2326 char buf[32];
2327 int rv;
2328
2329 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2330 sizeof(p), NULL);
2331 if (rv != 0)
2332 return (rv);
2333
2334 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2335 sizeof(buf));
2336 printf(" <%s, ", buf);
2337 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2338 sizeof(buf));
2339 printf("%s, ", buf);
2340 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2341 printf("%s>", buf);
2342
2343 return (0);
2344 }
2345
2346 /*
2347 * Claim or unclaim the specified TID.
2348 */
2349 int
2350 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2351 int flags)
2352 {
2353 struct iop_msg *im;
2354 struct i2o_util_claim mf;
2355 int rv, func;
2356
2357 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2358 im = iop_msg_alloc(sc, IM_WAIT);
2359
2360 /* We can use the same structure, as they're identical. */
2361 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2362 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2363 mf.msgictx = ii->ii_ictx;
2364 mf.msgtctx = im->im_tctx;
2365 mf.flags = flags;
2366
2367 rv = iop_msg_post(sc, im, &mf, 5000);
2368 iop_msg_free(sc, im);
2369 return (rv);
2370 }
2371
2372 /*
2373 * Perform an abort.
2374 */
2375 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2376 int tctxabort, int flags)
2377 {
2378 struct iop_msg *im;
2379 struct i2o_util_abort mf;
2380 int rv;
2381
2382 im = iop_msg_alloc(sc, IM_WAIT);
2383
2384 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2385 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2386 mf.msgictx = ii->ii_ictx;
2387 mf.msgtctx = im->im_tctx;
2388 mf.flags = (func << 24) | flags;
2389 mf.tctxabort = tctxabort;
2390
2391 rv = iop_msg_post(sc, im, &mf, 5000);
2392 iop_msg_free(sc, im);
2393 return (rv);
2394 }
2395
2396 /*
2397 * Enable or disable reception of events for the specified device.
2398 */
2399 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2400 {
2401 struct i2o_util_event_register mf;
2402
2403 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2404 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2405 mf.msgictx = ii->ii_ictx;
2406 mf.msgtctx = 0;
2407 mf.eventmask = mask;
2408
2409 /* This message is replied to only when events are signalled. */
2410 return (iop_post(sc, (u_int32_t *)&mf));
2411 }
2412
2413 int
2414 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2415 {
2416 struct iop_softc *sc;
2417
2418 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2419 return (ENXIO);
2420 if ((sc->sc_flags & IOP_ONLINE) == 0)
2421 return (ENXIO);
2422 if ((sc->sc_flags & IOP_OPEN) != 0)
2423 return (EBUSY);
2424 sc->sc_flags |= IOP_OPEN;
2425
2426 return (0);
2427 }
2428
2429 int
2430 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2431 {
2432 struct iop_softc *sc;
2433
2434 sc = device_lookup(&iop_cd, minor(dev));
2435 sc->sc_flags &= ~IOP_OPEN;
2436
2437 return (0);
2438 }
2439
2440 int
2441 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2442 {
2443 struct iop_softc *sc;
2444 struct iovec *iov;
2445 int rv, i;
2446
2447 if (securelevel >= 2)
2448 return (EPERM);
2449
2450 sc = device_lookup(&iop_cd, minor(dev));
2451
2452 switch (cmd) {
2453 case IOPIOCPT:
2454 return (iop_passthrough(sc, (struct ioppt *)data, p));
2455
2456 case IOPIOCGSTATUS:
2457 iov = (struct iovec *)data;
2458 i = sizeof(struct i2o_status);
2459 if (i > iov->iov_len)
2460 i = iov->iov_len;
2461 else
2462 iov->iov_len = i;
2463 if ((rv = iop_status_get(sc, 0)) == 0)
2464 rv = copyout(&sc->sc_status, iov->iov_base, i);
2465 return (rv);
2466
2467 case IOPIOCGLCT:
2468 case IOPIOCGTIDMAP:
2469 case IOPIOCRECONFIG:
2470 break;
2471
2472 default:
2473 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2474 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2475 #endif
2476 return (ENOTTY);
2477 }
2478
2479 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2480 return (rv);
2481
2482 switch (cmd) {
2483 case IOPIOCGLCT:
2484 iov = (struct iovec *)data;
2485 i = le16toh(sc->sc_lct->tablesize) << 2;
2486 if (i > iov->iov_len)
2487 i = iov->iov_len;
2488 else
2489 iov->iov_len = i;
2490 rv = copyout(sc->sc_lct, iov->iov_base, i);
2491 break;
2492
2493 case IOPIOCRECONFIG:
2494 rv = iop_reconfigure(sc, 0);
2495 break;
2496
2497 case IOPIOCGTIDMAP:
2498 iov = (struct iovec *)data;
2499 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2500 if (i > iov->iov_len)
2501 i = iov->iov_len;
2502 else
2503 iov->iov_len = i;
2504 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2505 break;
2506 }
2507
2508 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2509 return (rv);
2510 }
2511
2512 static int
2513 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2514 {
2515 struct iop_msg *im;
2516 struct i2o_msg *mf;
2517 struct ioppt_buf *ptb;
2518 int rv, i, mapped;
2519
2520 mf = NULL;
2521 im = NULL;
2522 mapped = 1;
2523
2524 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2525 pt->pt_msglen > (le16toh(sc->sc_status.inboundmframesize) << 2) ||
2526 pt->pt_msglen < sizeof(struct i2o_msg) ||
2527 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2528 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2529 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2530 return (EINVAL);
2531
2532 for (i = 0; i < pt->pt_nbufs; i++)
2533 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2534 rv = ENOMEM;
2535 goto bad;
2536 }
2537
2538 mf = malloc(IOP_MAX_MSG_SIZE, M_DEVBUF, M_WAITOK);
2539 if (mf == NULL)
2540 return (ENOMEM);
2541
2542 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2543 goto bad;
2544
2545 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2546 im->im_rb = (struct i2o_reply *)mf;
2547 mf->msgictx = IOP_ICTX;
2548 mf->msgtctx = im->im_tctx;
2549
2550 for (i = 0; i < pt->pt_nbufs; i++) {
2551 ptb = &pt->pt_bufs[i];
2552 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2553 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2554 if (rv != 0)
2555 goto bad;
2556 mapped = 1;
2557 }
2558
2559 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2560 goto bad;
2561
2562 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2563 if (i > IOP_MAX_MSG_SIZE)
2564 i = IOP_MAX_MSG_SIZE;
2565 if (i > pt->pt_replylen)
2566 i = pt->pt_replylen;
2567 rv = copyout(im->im_rb, pt->pt_reply, i);
2568
2569 bad:
2570 if (mapped != 0)
2571 iop_msg_unmap(sc, im);
2572 if (im != NULL)
2573 iop_msg_free(sc, im);
2574 if (mf != NULL)
2575 free(mf, M_DEVBUF);
2576 return (rv);
2577 }
2578