iop.c revision 1.16.2.1 1 /* $NetBSD: iop.c,v 1.16.2.1 2001/09/07 04:45:24 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/conf.h>
56 #include <sys/kthread.h>
57 #include <sys/vnode.h>
58
59 #include <miscfs/specfs/specdev.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 #define IC_CONFIGURE 0x01
111 #define IC_PRIORITY 0x02
112
113 struct iop_class {
114 u_short ic_class;
115 u_short ic_flags;
116 #ifdef I2OVERBOSE
117 const char *ic_caption;
118 #endif
119 } static const iop_class[] = {
120 {
121 I2O_CLASS_EXECUTIVE,
122 0,
123 COMMENT("executive")
124 },
125 {
126 I2O_CLASS_DDM,
127 0,
128 COMMENT("device driver module")
129 },
130 {
131 I2O_CLASS_RANDOM_BLOCK_STORAGE,
132 IC_CONFIGURE | IC_PRIORITY,
133 IFVERBOSE("random block storage")
134 },
135 {
136 I2O_CLASS_SEQUENTIAL_STORAGE,
137 IC_CONFIGURE | IC_PRIORITY,
138 IFVERBOSE("sequential storage")
139 },
140 {
141 I2O_CLASS_LAN,
142 IC_CONFIGURE | IC_PRIORITY,
143 IFVERBOSE("LAN port")
144 },
145 {
146 I2O_CLASS_WAN,
147 IC_CONFIGURE | IC_PRIORITY,
148 IFVERBOSE("WAN port")
149 },
150 {
151 I2O_CLASS_FIBRE_CHANNEL_PORT,
152 IC_CONFIGURE,
153 IFVERBOSE("fibrechannel port")
154 },
155 {
156 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
157 0,
158 COMMENT("fibrechannel peripheral")
159 },
160 {
161 I2O_CLASS_SCSI_PERIPHERAL,
162 0,
163 COMMENT("SCSI peripheral")
164 },
165 {
166 I2O_CLASS_ATE_PORT,
167 IC_CONFIGURE,
168 IFVERBOSE("ATE port")
169 },
170 {
171 I2O_CLASS_ATE_PERIPHERAL,
172 0,
173 COMMENT("ATE peripheral")
174 },
175 {
176 I2O_CLASS_FLOPPY_CONTROLLER,
177 IC_CONFIGURE,
178 IFVERBOSE("floppy controller")
179 },
180 {
181 I2O_CLASS_FLOPPY_DEVICE,
182 0,
183 COMMENT("floppy device")
184 },
185 {
186 I2O_CLASS_BUS_ADAPTER_PORT,
187 IC_CONFIGURE,
188 IFVERBOSE("bus adapter port" )
189 },
190 };
191
192 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
193 static const char * const iop_status[] = {
194 "success",
195 "abort (dirty)",
196 "abort (no data transfer)",
197 "abort (partial transfer)",
198 "error (dirty)",
199 "error (no data transfer)",
200 "error (partial transfer)",
201 "undefined error code",
202 "process abort (dirty)",
203 "process abort (no data transfer)",
204 "process abort (partial transfer)",
205 "transaction error",
206 };
207 #endif
208
209 static inline u_int32_t iop_inl(struct iop_softc *, int);
210 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
211
212 static void iop_config_interrupts(struct device *);
213 static void iop_configure_devices(struct iop_softc *, int, int);
214 static void iop_devinfo(int, char *);
215 static int iop_print(void *, const char *);
216 static int iop_reconfigure(struct iop_softc *, u_int);
217 static void iop_shutdown(void *);
218 static int iop_submatch(struct device *, struct cfdata *, void *);
219 #ifdef notyet
220 static int iop_vendor_print(void *, const char *);
221 #endif
222
223 static void iop_adjqparam(struct iop_softc *, int);
224 static void iop_create_reconf_thread(void *);
225 static int iop_handle_reply(struct iop_softc *, u_int32_t);
226 static int iop_hrt_get(struct iop_softc *);
227 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
228 static void iop_intr_event(struct device *, struct iop_msg *, void *);
229 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
230 u_int32_t);
231 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
232 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
233 static int iop_ofifo_init(struct iop_softc *);
234 static int iop_passthrough(struct iop_softc *, struct ioppt *,
235 struct proc *);
236 static void iop_reconf_thread(void *);
237 static void iop_release_mfa(struct iop_softc *, u_int32_t);
238 static int iop_reset(struct iop_softc *);
239 static int iop_status_get(struct iop_softc *, int);
240 static int iop_systab_set(struct iop_softc *);
241 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
242
243 #ifdef I2ODEBUG
244 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
245 #endif
246
247 cdev_decl(iop);
248
249 static inline u_int32_t
250 iop_inl(struct iop_softc *sc, int off)
251 {
252
253 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
254 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
255 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
256 }
257
258 static inline void
259 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
260 {
261
262 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
263 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
264 BUS_SPACE_BARRIER_WRITE);
265 }
266
267 /*
268 * Initialise the IOP and our interface.
269 */
270 void
271 iop_init(struct iop_softc *sc, const char *intrstr)
272 {
273 struct iop_msg *im;
274 int rv, i, j, state, nsegs;
275 u_int32_t mask;
276 char ident[64];
277
278 state = 0;
279
280 printf("I2O adapter");
281
282 if (iop_ictxhashtbl == NULL)
283 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
284 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
285
286 /* Disable interrupts at the IOP. */
287 mask = iop_inl(sc, IOP_REG_INTR_MASK);
288 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
289
290 /* Allocate a scratch DMA map for small miscellaneous shared data. */
291 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
292 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
293 printf("%s: cannot create scratch dmamap\n",
294 sc->sc_dv.dv_xname);
295 return;
296 }
297 state++;
298
299 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
300 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
301 printf("%s: cannot alloc scratch dmamem\n",
302 sc->sc_dv.dv_xname);
303 goto bail_out;
304 }
305 state++;
306
307 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
308 &sc->sc_scr, 0)) {
309 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
310 goto bail_out;
311 }
312 state++;
313
314 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
315 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
316 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
317 goto bail_out;
318 }
319 state++;
320
321 /* Reset the adapter and request status. */
322 if ((rv = iop_reset(sc)) != 0) {
323 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
324 goto bail_out;
325 }
326
327 if ((rv = iop_status_get(sc, 1)) != 0) {
328 printf("%s: not responding (get status)\n",
329 sc->sc_dv.dv_xname);
330 goto bail_out;
331 }
332
333 sc->sc_flags |= IOP_HAVESTATUS;
334 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
335 ident, sizeof(ident));
336 printf(" <%s>\n", ident);
337
338 #ifdef I2ODEBUG
339 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
340 le16toh(sc->sc_status.orgid),
341 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
342 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
343 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
344 le32toh(sc->sc_status.desiredprivmemsize),
345 le32toh(sc->sc_status.currentprivmemsize),
346 le32toh(sc->sc_status.currentprivmembase));
347 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
348 le32toh(sc->sc_status.desiredpriviosize),
349 le32toh(sc->sc_status.currentpriviosize),
350 le32toh(sc->sc_status.currentpriviobase));
351 #endif
352
353 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
354 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
355 sc->sc_maxob = IOP_MAX_OUTBOUND;
356 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
357 if (sc->sc_maxib > IOP_MAX_INBOUND)
358 sc->sc_maxib = IOP_MAX_INBOUND;
359
360 /* Allocate message wrappers. */
361 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT);
362 memset(im, 0, sizeof(*im) * sc->sc_maxib);
363 sc->sc_ims = im;
364 SLIST_INIT(&sc->sc_im_freelist);
365
366 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
367 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
368 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
369 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
370 &im->im_xfer[0].ix_map);
371 if (rv != 0) {
372 printf("%s: couldn't create dmamap (%d)",
373 sc->sc_dv.dv_xname, rv);
374 goto bail_out;
375 }
376
377 im->im_tctx = i;
378 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
379 }
380
381 /* Initalise the IOP's outbound FIFO. */
382 if (iop_ofifo_init(sc) != 0) {
383 printf("%s: unable to init oubound FIFO\n",
384 sc->sc_dv.dv_xname);
385 goto bail_out;
386 }
387
388 /*
389 * Defer further configuration until (a) interrupts are working and
390 * (b) we have enough information to build the system table.
391 */
392 config_interrupts((struct device *)sc, iop_config_interrupts);
393
394 /* Configure shutdown hook before we start any device activity. */
395 if (iop_sdh == NULL)
396 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
397
398 /* Ensure interrupts are enabled at the IOP. */
399 mask = iop_inl(sc, IOP_REG_INTR_MASK);
400 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
401
402 if (intrstr != NULL)
403 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
404 intrstr);
405
406 #ifdef I2ODEBUG
407 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
408 sc->sc_dv.dv_xname, sc->sc_maxib,
409 le32toh(sc->sc_status.maxinboundmframes),
410 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
411 #endif
412
413 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
414 return;
415
416 bail_out:
417 if (state > 3) {
418 for (j = 0; j < i; j++)
419 bus_dmamap_destroy(sc->sc_dmat,
420 sc->sc_ims[j].im_xfer[0].ix_map);
421 free(sc->sc_ims, M_DEVBUF);
422 }
423 if (state > 2)
424 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
425 if (state > 1)
426 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
427 if (state > 0)
428 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
429 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
430
431 }
432
433 /*
434 * Perform autoconfiguration tasks.
435 */
436 static void
437 iop_config_interrupts(struct device *self)
438 {
439 struct iop_softc *sc, *iop;
440 struct i2o_systab_entry *ste;
441 int rv, i, niop;
442
443 sc = (struct iop_softc *)self;
444 LIST_INIT(&sc->sc_iilist);
445
446 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
447
448 if (iop_hrt_get(sc) != 0) {
449 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
450 return;
451 }
452
453 /*
454 * Build the system table.
455 */
456 if (iop_systab == NULL) {
457 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
458 if ((iop = device_lookup(&iop_cd, i)) == NULL)
459 continue;
460 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
461 continue;
462 if (iop_status_get(iop, 1) != 0) {
463 printf("%s: unable to retrieve status\n",
464 sc->sc_dv.dv_xname);
465 iop->sc_flags &= ~IOP_HAVESTATUS;
466 continue;
467 }
468 niop++;
469 }
470 if (niop == 0)
471 return;
472
473 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
474 sizeof(struct i2o_systab);
475 iop_systab_size = i;
476 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
477
478 memset(iop_systab, 0, i);
479 iop_systab->numentries = niop;
480 iop_systab->version = I2O_VERSION_11;
481
482 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
483 if ((iop = device_lookup(&iop_cd, i)) == NULL)
484 continue;
485 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
486 continue;
487
488 ste->orgid = iop->sc_status.orgid;
489 ste->iopid = iop->sc_dv.dv_unit + 2;
490 ste->segnumber =
491 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
492 ste->iopcaps = iop->sc_status.iopcaps;
493 ste->inboundmsgframesize =
494 iop->sc_status.inboundmframesize;
495 ste->inboundmsgportaddresslow =
496 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
497 ste++;
498 }
499 }
500
501 /*
502 * Post the system table to the IOP and bring it to the OPERATIONAL
503 * state.
504 */
505 if (iop_systab_set(sc) != 0) {
506 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
507 return;
508 }
509 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
510 30000) != 0) {
511 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
512 return;
513 }
514
515 /*
516 * Set up an event handler for this IOP.
517 */
518 sc->sc_eventii.ii_dv = self;
519 sc->sc_eventii.ii_intr = iop_intr_event;
520 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
521 sc->sc_eventii.ii_tid = I2O_TID_IOP;
522 iop_initiator_register(sc, &sc->sc_eventii);
523
524 rv = iop_util_eventreg(sc, &sc->sc_eventii,
525 I2O_EVENT_EXEC_RESOURCE_LIMITS |
526 I2O_EVENT_EXEC_CONNECTION_FAIL |
527 I2O_EVENT_EXEC_ADAPTER_FAULT |
528 I2O_EVENT_EXEC_POWER_FAIL |
529 I2O_EVENT_EXEC_RESET_PENDING |
530 I2O_EVENT_EXEC_RESET_IMMINENT |
531 I2O_EVENT_EXEC_HARDWARE_FAIL |
532 I2O_EVENT_EXEC_XCT_CHANGE |
533 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
534 I2O_EVENT_GEN_DEVICE_RESET |
535 I2O_EVENT_GEN_STATE_CHANGE |
536 I2O_EVENT_GEN_GENERAL_WARNING);
537 if (rv != 0) {
538 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
539 return;
540 }
541
542 #ifdef notyet
543 /* Attempt to match and attach a product-specific extension. */
544 ia.ia_class = I2O_CLASS_ANY;
545 ia.ia_tid = I2O_TID_IOP;
546 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
547 #endif
548
549 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
550 if ((rv = iop_reconfigure(sc, 0)) == -1) {
551 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
552 return;
553 }
554 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
555
556 kthread_create(iop_create_reconf_thread, sc);
557 }
558
559 /*
560 * Create the reconfiguration thread. Called after the standard kernel
561 * threads have been created.
562 */
563 static void
564 iop_create_reconf_thread(void *cookie)
565 {
566 struct iop_softc *sc;
567 int rv;
568
569 sc = cookie;
570 sc->sc_flags |= IOP_ONLINE;
571
572 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
573 "%s", sc->sc_dv.dv_xname);
574 if (rv != 0) {
575 printf("%s: unable to create reconfiguration thread (%d)",
576 sc->sc_dv.dv_xname, rv);
577 return;
578 }
579 }
580
581 /*
582 * Reconfiguration thread; listens for LCT change notification, and
583 * initiates re-configuration if received.
584 */
585 static void
586 iop_reconf_thread(void *cookie)
587 {
588 struct iop_softc *sc;
589 struct i2o_lct lct;
590 u_int32_t chgind;
591 int rv;
592
593 sc = cookie;
594 chgind = sc->sc_chgind + 1;
595
596 for (;;) {
597 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
598 sc->sc_dv.dv_xname, chgind));
599
600 PHOLD(sc->sc_reconf_proc);
601 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
602 PRELE(sc->sc_reconf_proc);
603
604 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
605 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
606
607 if (rv == 0 &&
608 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
609 iop_reconfigure(sc, le32toh(lct.changeindicator));
610 chgind = sc->sc_chgind + 1;
611 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
612 }
613
614 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
615 }
616 }
617
618 /*
619 * Reconfigure: find new and removed devices.
620 */
621 static int
622 iop_reconfigure(struct iop_softc *sc, u_int chgind)
623 {
624 struct iop_msg *im;
625 struct i2o_hba_bus_scan mf;
626 struct i2o_lct_entry *le;
627 struct iop_initiator *ii, *nextii;
628 int rv, tid, i;
629
630 /*
631 * If the reconfiguration request isn't the result of LCT change
632 * notification, then be more thorough: ask all bus ports to scan
633 * their busses. Wait up to 5 minutes for each bus port to complete
634 * the request.
635 */
636 if (chgind == 0) {
637 if ((rv = iop_lct_get(sc)) != 0) {
638 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
639 return (rv);
640 }
641
642 le = sc->sc_lct->entry;
643 for (i = 0; i < sc->sc_nlctent; i++, le++) {
644 if ((le16toh(le->classid) & 4095) !=
645 I2O_CLASS_BUS_ADAPTER_PORT)
646 continue;
647 tid = le16toh(le->localtid) & 4095;
648
649 im = iop_msg_alloc(sc, IM_WAIT);
650
651 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
652 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
653 mf.msgictx = IOP_ICTX;
654 mf.msgtctx = im->im_tctx;
655
656 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
657 tid));
658
659 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
660 iop_msg_free(sc, im);
661 #ifdef I2ODEBUG
662 if (rv != 0)
663 printf("%s: bus scan failed\n",
664 sc->sc_dv.dv_xname);
665 #endif
666 }
667 } else if (chgind <= sc->sc_chgind) {
668 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
669 return (0);
670 }
671
672 /* Re-read the LCT and determine if it has changed. */
673 if ((rv = iop_lct_get(sc)) != 0) {
674 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
675 return (rv);
676 }
677 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
678
679 chgind = le32toh(sc->sc_lct->changeindicator);
680 if (chgind == sc->sc_chgind) {
681 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
682 return (0);
683 }
684 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
685 sc->sc_chgind = chgind;
686
687 if (sc->sc_tidmap != NULL)
688 free(sc->sc_tidmap, M_DEVBUF);
689 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
690 M_DEVBUF, M_NOWAIT);
691 memset(sc->sc_tidmap, 0, sizeof(sc->sc_tidmap));
692
693 /* Allow 1 queued command per device while we're configuring. */
694 iop_adjqparam(sc, 1);
695
696 /*
697 * Match and attach child devices. We configure high-level devices
698 * first so that any claims will propagate throughout the LCT,
699 * hopefully masking off aliased devices as a result.
700 *
701 * Re-reading the LCT at this point is a little dangerous, but we'll
702 * trust the IOP (and the operator) to behave itself...
703 */
704 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
705 IC_CONFIGURE | IC_PRIORITY);
706 if ((rv = iop_lct_get(sc)) != 0)
707 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
708 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
709 IC_CONFIGURE);
710
711 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
712 nextii = LIST_NEXT(ii, ii_list);
713
714 /* Detach devices that were configured, but are now gone. */
715 for (i = 0; i < sc->sc_nlctent; i++)
716 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
717 break;
718 if (i == sc->sc_nlctent ||
719 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
720 config_detach(ii->ii_dv, DETACH_FORCE);
721
722 /*
723 * Tell initiators that existed before the re-configuration
724 * to re-configure.
725 */
726 if (ii->ii_reconfig == NULL)
727 continue;
728 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
729 printf("%s: %s failed reconfigure (%d)\n",
730 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
731 }
732
733 /* Re-adjust queue parameters and return. */
734 if (sc->sc_nii != 0)
735 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
736 / sc->sc_nii);
737
738 return (0);
739 }
740
741 /*
742 * Configure I2O devices into the system.
743 */
744 static void
745 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
746 {
747 struct iop_attach_args ia;
748 struct iop_initiator *ii;
749 const struct i2o_lct_entry *le;
750 struct device *dv;
751 int i, j, nent;
752 u_int usertid;
753
754 nent = sc->sc_nlctent;
755 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
756 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
757
758 /* Ignore the device if it's in use. */
759 usertid = le32toh(le->usertid) & 4095;
760 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
761 continue;
762
763 ia.ia_class = le16toh(le->classid) & 4095;
764 ia.ia_tid = sc->sc_tidmap[i].it_tid;
765
766 /* Ignore uninteresting devices. */
767 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
768 if (iop_class[j].ic_class == ia.ia_class)
769 break;
770 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
771 (iop_class[j].ic_flags & mask) != maskval)
772 continue;
773
774 /*
775 * Try to configure the device only if it's not already
776 * configured.
777 */
778 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
779 if (ia.ia_tid == ii->ii_tid) {
780 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
781 strcpy(sc->sc_tidmap[i].it_dvname,
782 ii->ii_dv->dv_xname);
783 break;
784 }
785 }
786 if (ii != NULL)
787 continue;
788
789 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
790 if (dv != NULL) {
791 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
792 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
793 }
794 }
795 }
796
797 /*
798 * Adjust queue parameters for all child devices.
799 */
800 static void
801 iop_adjqparam(struct iop_softc *sc, int mpi)
802 {
803 struct iop_initiator *ii;
804
805 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
806 if (ii->ii_adjqparam != NULL)
807 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
808 }
809
810 static void
811 iop_devinfo(int class, char *devinfo)
812 {
813 #ifdef I2OVERBOSE
814 int i;
815
816 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
817 if (class == iop_class[i].ic_class)
818 break;
819
820 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
821 sprintf(devinfo, "device (class 0x%x)", class);
822 else
823 strcpy(devinfo, iop_class[i].ic_caption);
824 #else
825
826 sprintf(devinfo, "device (class 0x%x)", class);
827 #endif
828 }
829
830 static int
831 iop_print(void *aux, const char *pnp)
832 {
833 struct iop_attach_args *ia;
834 char devinfo[256];
835
836 ia = aux;
837
838 if (pnp != NULL) {
839 iop_devinfo(ia->ia_class, devinfo);
840 printf("%s at %s", devinfo, pnp);
841 }
842 printf(" tid %d", ia->ia_tid);
843 return (UNCONF);
844 }
845
846 #ifdef notyet
847 static int
848 iop_vendor_print(void *aux, const char *pnp)
849 {
850
851 if (pnp != NULL)
852 printf("vendor specific extension at %s", pnp);
853 return (UNCONF);
854 }
855 #endif
856
857 static int
858 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
859 {
860 struct iop_attach_args *ia;
861
862 ia = aux;
863
864 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
865 return (0);
866
867 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
868 }
869
870 /*
871 * Shut down all configured IOPs.
872 */
873 static void
874 iop_shutdown(void *junk)
875 {
876 struct iop_softc *sc;
877 int i;
878
879 printf("shutting down iop devices...");
880
881 for (i = 0; i < iop_cd.cd_ndevs; i++) {
882 if ((sc = device_lookup(&iop_cd, i)) == NULL)
883 continue;
884 if ((sc->sc_flags & IOP_ONLINE) == 0)
885 continue;
886 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
887 0, 5000);
888 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
889 0, 1000);
890 }
891
892 /* Wait. Some boards could still be flushing, stupidly enough. */
893 delay(5000*1000);
894 printf(" done.\n");
895 }
896
897 /*
898 * Retrieve IOP status.
899 */
900 static int
901 iop_status_get(struct iop_softc *sc, int nosleep)
902 {
903 struct i2o_exec_status_get mf;
904 struct i2o_status *st;
905 paddr_t pa;
906 int rv, i;
907
908 pa = sc->sc_scr_seg->ds_addr;
909 st = (struct i2o_status *)sc->sc_scr;
910
911 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
912 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
913 mf.reserved[0] = 0;
914 mf.reserved[1] = 0;
915 mf.reserved[2] = 0;
916 mf.reserved[3] = 0;
917 mf.addrlow = (u_int32_t)pa;
918 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
919 mf.length = sizeof(sc->sc_status);
920
921 memset(st, 0, sizeof(*st));
922 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
923 BUS_DMASYNC_PREREAD);
924
925 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
926 return (rv);
927
928 for (i = 25; i != 0; i--) {
929 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
930 sizeof(*st), BUS_DMASYNC_POSTREAD);
931 if (st->syncbyte == 0xff)
932 break;
933 if (nosleep)
934 DELAY(100*1000);
935 else
936 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
937 }
938
939 if (st->syncbyte != 0xff)
940 rv = EIO;
941 else {
942 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
943 rv = 0;
944 }
945
946 return (rv);
947 }
948
949 /*
950 * Initalize and populate the IOP's outbound FIFO.
951 */
952 static int
953 iop_ofifo_init(struct iop_softc *sc)
954 {
955 bus_addr_t addr;
956 bus_dma_segment_t seg;
957 struct i2o_exec_outbound_init *mf;
958 int i, rseg, rv;
959 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
960
961 sw = (u_int32_t *)sc->sc_scr;
962
963 mf = (struct i2o_exec_outbound_init *)mb;
964 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
965 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
966 mf->msgictx = IOP_ICTX;
967 mf->msgtctx = 0;
968 mf->pagesize = PAGE_SIZE;
969 mf->flags = IOP_INIT_CODE | ((IOP_MAX_MSG_SIZE >> 2) << 16);
970
971 /*
972 * The I2O spec says that there are two SGLs: one for the status
973 * word, and one for a list of discarded MFAs. It continues to say
974 * that if you don't want to get the list of MFAs, an IGNORE SGL is
975 * necessary; this isn't the case (and is in fact a bad thing).
976 */
977 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
978 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
979 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
980 (u_int32_t)sc->sc_scr_seg->ds_addr;
981 mb[0] += 2 << 16;
982
983 *sw = 0;
984 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
985 BUS_DMASYNC_PREREAD);
986
987 if ((rv = iop_post(sc, mb)) != 0)
988 return (rv);
989
990 POLL(5000,
991 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
992 BUS_DMASYNC_POSTREAD),
993 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
994
995 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
996 printf("%s: outbound FIFO init failed (%d)\n",
997 sc->sc_dv.dv_xname, le32toh(*sw));
998 return (EIO);
999 }
1000
1001 /* Allocate DMA safe memory for the reply frames. */
1002 if (sc->sc_rep_phys == 0) {
1003 sc->sc_rep_size = sc->sc_maxob * IOP_MAX_MSG_SIZE;
1004
1005 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1006 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1007 if (rv != 0) {
1008 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1009 rv);
1010 return (rv);
1011 }
1012
1013 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1014 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1015 if (rv != 0) {
1016 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1017 return (rv);
1018 }
1019
1020 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1021 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1022 if (rv != 0) {
1023 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1024 rv);
1025 return (rv);
1026 }
1027
1028 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1029 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1030 if (rv != 0) {
1031 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1032 return (rv);
1033 }
1034
1035 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1036 }
1037
1038 /* Populate the outbound FIFO. */
1039 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1040 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1041 addr += IOP_MAX_MSG_SIZE;
1042 }
1043
1044 return (0);
1045 }
1046
1047 /*
1048 * Read the specified number of bytes from the IOP's hardware resource table.
1049 */
1050 static int
1051 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1052 {
1053 struct iop_msg *im;
1054 int rv;
1055 struct i2o_exec_hrt_get *mf;
1056 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1057
1058 im = iop_msg_alloc(sc, IM_WAIT);
1059 mf = (struct i2o_exec_hrt_get *)mb;
1060 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1061 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1062 mf->msgictx = IOP_ICTX;
1063 mf->msgtctx = im->im_tctx;
1064
1065 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1066 rv = iop_msg_post(sc, im, mb, 30000);
1067 iop_msg_unmap(sc, im);
1068 iop_msg_free(sc, im);
1069 return (rv);
1070 }
1071
1072 /*
1073 * Read the IOP's hardware resource table.
1074 */
1075 static int
1076 iop_hrt_get(struct iop_softc *sc)
1077 {
1078 struct i2o_hrt hrthdr, *hrt;
1079 int size, rv;
1080
1081 PHOLD(curproc);
1082 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1083 PRELE(curproc);
1084 if (rv != 0)
1085 return (rv);
1086
1087 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1088 le16toh(hrthdr.numentries)));
1089
1090 size = sizeof(struct i2o_hrt) +
1091 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1092 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1093
1094 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1095 free(hrt, M_DEVBUF);
1096 return (rv);
1097 }
1098
1099 if (sc->sc_hrt != NULL)
1100 free(sc->sc_hrt, M_DEVBUF);
1101 sc->sc_hrt = hrt;
1102 return (0);
1103 }
1104
1105 /*
1106 * Request the specified number of bytes from the IOP's logical
1107 * configuration table. If a change indicator is specified, this
1108 * is a verbatim notification request, so the caller is prepared
1109 * to wait indefinitely.
1110 */
1111 static int
1112 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1113 u_int32_t chgind)
1114 {
1115 struct iop_msg *im;
1116 struct i2o_exec_lct_notify *mf;
1117 int rv;
1118 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1119
1120 im = iop_msg_alloc(sc, IM_WAIT);
1121 memset(lct, 0, size);
1122
1123 mf = (struct i2o_exec_lct_notify *)mb;
1124 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1125 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1126 mf->msgictx = IOP_ICTX;
1127 mf->msgtctx = im->im_tctx;
1128 mf->classid = I2O_CLASS_ANY;
1129 mf->changeindicator = chgind;
1130
1131 #ifdef I2ODEBUG
1132 printf("iop_lct_get0: reading LCT");
1133 if (chgind != 0)
1134 printf(" (async)");
1135 printf("\n");
1136 #endif
1137
1138 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1139 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1140 iop_msg_unmap(sc, im);
1141 iop_msg_free(sc, im);
1142 return (rv);
1143 }
1144
1145 /*
1146 * Read the IOP's logical configuration table.
1147 */
1148 int
1149 iop_lct_get(struct iop_softc *sc)
1150 {
1151 int esize, size, rv;
1152 struct i2o_lct *lct;
1153
1154 esize = le32toh(sc->sc_status.expectedlctsize);
1155 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1156 if (lct == NULL)
1157 return (ENOMEM);
1158
1159 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1160 free(lct, M_DEVBUF);
1161 return (rv);
1162 }
1163
1164 size = le16toh(lct->tablesize) << 2;
1165 if (esize != size) {
1166 free(lct, M_DEVBUF);
1167 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1168 if (lct == NULL)
1169 return (ENOMEM);
1170
1171 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1172 free(lct, M_DEVBUF);
1173 return (rv);
1174 }
1175 }
1176
1177 /* Swap in the new LCT. */
1178 if (sc->sc_lct != NULL)
1179 free(sc->sc_lct, M_DEVBUF);
1180 sc->sc_lct = lct;
1181 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1182 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1183 sizeof(struct i2o_lct_entry);
1184 return (0);
1185 }
1186
1187 /*
1188 * Request the specified parameter group from the target. If an initiator
1189 * is specified (a) don't wait for the operation to complete, but instead
1190 * let the initiator's interrupt handler deal with the reply and (b) place a
1191 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1192 */
1193 int
1194 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1195 int size, struct iop_initiator *ii)
1196 {
1197 struct iop_msg *im;
1198 struct i2o_util_params_op *mf;
1199 struct i2o_reply *rf;
1200 int rv;
1201 struct iop_pgop *pgop;
1202 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1203
1204 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1205 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1206 iop_msg_free(sc, im);
1207 return (ENOMEM);
1208 }
1209 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1210 iop_msg_free(sc, im);
1211 free(pgop, M_DEVBUF);
1212 return (ENOMEM);
1213 }
1214 im->im_dvcontext = pgop;
1215 im->im_rb = rf;
1216
1217 mf = (struct i2o_util_params_op *)mb;
1218 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1219 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1220 mf->msgictx = IOP_ICTX;
1221 mf->msgtctx = im->im_tctx;
1222 mf->flags = 0;
1223
1224 pgop->olh.count = htole16(1);
1225 pgop->olh.reserved = htole16(0);
1226 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1227 pgop->oat.fieldcount = htole16(0xffff);
1228 pgop->oat.group = htole16(group);
1229
1230 if (ii == NULL)
1231 PHOLD(curproc);
1232
1233 memset(buf, 0, size);
1234 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1235 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1236 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1237
1238 if (ii == NULL)
1239 PRELE(curproc);
1240
1241 /* Detect errors; let partial transfers to count as success. */
1242 if (ii == NULL && rv == 0) {
1243 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1244 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1245 rv = 0;
1246 else
1247 rv = (rf->reqstatus != 0 ? EIO : 0);
1248
1249 if (rv != 0)
1250 printf("%s: FIELD_GET failed for tid %d group %d\n",
1251 sc->sc_dv.dv_xname, tid, group);
1252 }
1253
1254 if (ii == NULL || rv != 0) {
1255 iop_msg_unmap(sc, im);
1256 iop_msg_free(sc, im);
1257 free(pgop, M_DEVBUF);
1258 free(rf, M_DEVBUF);
1259 }
1260
1261 return (rv);
1262 }
1263
1264 /*
1265 * Set a single field in a scalar parameter group.
1266 */
1267 int
1268 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1269 int size, int field)
1270 {
1271 struct iop_msg *im;
1272 struct i2o_util_params_op *mf;
1273 struct iop_pgop *pgop;
1274 int rv, totsize;
1275 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1276
1277 totsize = sizeof(*pgop) + size;
1278
1279 im = iop_msg_alloc(sc, IM_WAIT);
1280 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1281 iop_msg_free(sc, im);
1282 return (ENOMEM);
1283 }
1284
1285 mf = (struct i2o_util_params_op *)mb;
1286 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1287 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1288 mf->msgictx = IOP_ICTX;
1289 mf->msgtctx = im->im_tctx;
1290 mf->flags = 0;
1291
1292 pgop->olh.count = htole16(1);
1293 pgop->olh.reserved = htole16(0);
1294 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1295 pgop->oat.fieldcount = htole16(1);
1296 pgop->oat.group = htole16(group);
1297 pgop->oat.fields[0] = htole16(field);
1298 memcpy(pgop + 1, buf, size);
1299
1300 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1301 rv = iop_msg_post(sc, im, mb, 30000);
1302 if (rv != 0)
1303 printf("%s: FIELD_SET failed for tid %d group %d\n",
1304 sc->sc_dv.dv_xname, tid, group);
1305
1306 iop_msg_unmap(sc, im);
1307 iop_msg_free(sc, im);
1308 free(pgop, M_DEVBUF);
1309 return (rv);
1310 }
1311
1312 /*
1313 * Delete all rows in a tablular parameter group.
1314 */
1315 int
1316 iop_table_clear(struct iop_softc *sc, int tid, int group)
1317 {
1318 struct iop_msg *im;
1319 struct i2o_util_params_op *mf;
1320 struct iop_pgop pgop;
1321 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1322 int rv;
1323
1324 im = iop_msg_alloc(sc, IM_WAIT);
1325
1326 mf = (struct i2o_util_params_op *)mb;
1327 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1328 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1329 mf->msgictx = IOP_ICTX;
1330 mf->msgtctx = im->im_tctx;
1331 mf->flags = 0;
1332
1333 pgop.olh.count = htole16(1);
1334 pgop.olh.reserved = htole16(0);
1335 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1336 pgop.oat.fieldcount = htole16(0);
1337 pgop.oat.group = htole16(group);
1338 pgop.oat.fields[0] = htole16(0);
1339
1340 PHOLD(curproc);
1341 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1342 rv = iop_msg_post(sc, im, mb, 30000);
1343 if (rv != 0)
1344 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1345 sc->sc_dv.dv_xname, tid, group);
1346
1347 iop_msg_unmap(sc, im);
1348 PRELE(curproc);
1349 iop_msg_free(sc, im);
1350 return (rv);
1351 }
1352
1353 /*
1354 * Add a single row to a tabular parameter group. The row can have only one
1355 * field.
1356 */
1357 int
1358 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1359 int size, int row)
1360 {
1361 struct iop_msg *im;
1362 struct i2o_util_params_op *mf;
1363 struct iop_pgop *pgop;
1364 int rv, totsize;
1365 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1366
1367 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1368
1369 im = iop_msg_alloc(sc, IM_WAIT);
1370 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1371 iop_msg_free(sc, im);
1372 return (ENOMEM);
1373 }
1374
1375 mf = (struct i2o_util_params_op *)mb;
1376 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1377 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1378 mf->msgictx = IOP_ICTX;
1379 mf->msgtctx = im->im_tctx;
1380 mf->flags = 0;
1381
1382 pgop->olh.count = htole16(1);
1383 pgop->olh.reserved = htole16(0);
1384 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1385 pgop->oat.fieldcount = htole16(1);
1386 pgop->oat.group = htole16(group);
1387 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1388 pgop->oat.fields[1] = htole16(1); /* RowCount */
1389 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1390 memcpy(&pgop->oat.fields[3], buf, size);
1391
1392 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1393 rv = iop_msg_post(sc, im, mb, 30000);
1394 if (rv != 0)
1395 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1396 sc->sc_dv.dv_xname, tid, group, row);
1397
1398 iop_msg_unmap(sc, im);
1399 iop_msg_free(sc, im);
1400 free(pgop, M_DEVBUF);
1401 return (rv);
1402 }
1403
1404 /*
1405 * Execute a simple command (no parameters).
1406 */
1407 int
1408 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1409 int async, int timo)
1410 {
1411 struct iop_msg *im;
1412 struct i2o_msg mf;
1413 int rv, fl;
1414
1415 fl = (async != 0 ? IM_WAIT : IM_POLL);
1416 im = iop_msg_alloc(sc, fl);
1417
1418 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1419 mf.msgfunc = I2O_MSGFUNC(tid, function);
1420 mf.msgictx = ictx;
1421 mf.msgtctx = im->im_tctx;
1422
1423 rv = iop_msg_post(sc, im, &mf, timo);
1424 iop_msg_free(sc, im);
1425 return (rv);
1426 }
1427
1428 /*
1429 * Post the system table to the IOP.
1430 */
1431 static int
1432 iop_systab_set(struct iop_softc *sc)
1433 {
1434 struct i2o_exec_sys_tab_set *mf;
1435 struct iop_msg *im;
1436 bus_space_handle_t bsh;
1437 bus_addr_t boo;
1438 u_int32_t mema[2], ioa[2];
1439 int rv;
1440 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1441
1442 im = iop_msg_alloc(sc, IM_WAIT);
1443
1444 mf = (struct i2o_exec_sys_tab_set *)mb;
1445 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1446 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1447 mf->msgictx = IOP_ICTX;
1448 mf->msgtctx = im->im_tctx;
1449 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1450 mf->segnumber = 0;
1451
1452 mema[1] = sc->sc_status.desiredprivmemsize;
1453 ioa[1] = sc->sc_status.desiredpriviosize;
1454
1455 if (mema[1] != 0) {
1456 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1457 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1458 mema[0] = htole32(boo);
1459 if (rv != 0) {
1460 printf("%s: can't alloc priv mem space, err = %d\n",
1461 sc->sc_dv.dv_xname, rv);
1462 mema[0] = 0;
1463 mema[1] = 0;
1464 }
1465 }
1466
1467 if (ioa[1] != 0) {
1468 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1469 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1470 ioa[0] = htole32(boo);
1471 if (rv != 0) {
1472 printf("%s: can't alloc priv i/o space, err = %d\n",
1473 sc->sc_dv.dv_xname, rv);
1474 ioa[0] = 0;
1475 ioa[1] = 0;
1476 }
1477 }
1478
1479 PHOLD(curproc);
1480 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1481 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1482 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1483 rv = iop_msg_post(sc, im, mb, 5000);
1484 iop_msg_unmap(sc, im);
1485 iop_msg_free(sc, im);
1486 PRELE(curproc);
1487 return (rv);
1488 }
1489
1490 /*
1491 * Reset the IOP. Must be called with interrupts disabled.
1492 */
1493 static int
1494 iop_reset(struct iop_softc *sc)
1495 {
1496 u_int32_t mfa, *sw;
1497 struct i2o_exec_iop_reset mf;
1498 int rv;
1499 paddr_t pa;
1500
1501 sw = (u_int32_t *)sc->sc_scr;
1502 pa = sc->sc_scr_seg->ds_addr;
1503
1504 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1505 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1506 mf.reserved[0] = 0;
1507 mf.reserved[1] = 0;
1508 mf.reserved[2] = 0;
1509 mf.reserved[3] = 0;
1510 mf.statuslow = (u_int32_t)pa;
1511 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1512
1513 *sw = htole32(0);
1514 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1515 BUS_DMASYNC_PREREAD);
1516
1517 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1518 return (rv);
1519
1520 POLL(2500,
1521 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1522 BUS_DMASYNC_POSTREAD), *sw != 0));
1523 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1524 printf("%s: reset rejected, status 0x%x\n",
1525 sc->sc_dv.dv_xname, le32toh(*sw));
1526 return (EIO);
1527 }
1528
1529 /*
1530 * IOP is now in the INIT state. Wait no more than 10 seconds for
1531 * the inbound queue to become responsive.
1532 */
1533 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1534 if (mfa == IOP_MFA_EMPTY) {
1535 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1536 return (EIO);
1537 }
1538
1539 iop_release_mfa(sc, mfa);
1540 return (0);
1541 }
1542
1543 /*
1544 * Register a new initiator. Must be called with the configuration lock
1545 * held.
1546 */
1547 void
1548 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1549 {
1550 static int ictxgen;
1551 int s;
1552
1553 /* 0 is reserved (by us) for system messages. */
1554 ii->ii_ictx = ++ictxgen;
1555
1556 /*
1557 * `Utility initiators' don't make it onto the per-IOP initiator list
1558 * (which is used only for configuration), but do get one slot on
1559 * the inbound queue.
1560 */
1561 if ((ii->ii_flags & II_UTILITY) == 0) {
1562 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1563 sc->sc_nii++;
1564 } else
1565 sc->sc_nuii++;
1566
1567 s = splbio();
1568 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1569 splx(s);
1570 }
1571
1572 /*
1573 * Unregister an initiator. Must be called with the configuration lock
1574 * held.
1575 */
1576 void
1577 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1578 {
1579 int s;
1580
1581 if ((ii->ii_flags & II_UTILITY) == 0) {
1582 LIST_REMOVE(ii, ii_list);
1583 sc->sc_nii--;
1584 } else
1585 sc->sc_nuii--;
1586
1587 s = splbio();
1588 LIST_REMOVE(ii, ii_hash);
1589 splx(s);
1590 }
1591
1592 /*
1593 * Handle a reply frame from the IOP.
1594 */
1595 static int
1596 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1597 {
1598 struct iop_msg *im;
1599 struct i2o_reply *rb;
1600 struct i2o_fault_notify *fn;
1601 struct iop_initiator *ii;
1602 u_int off, ictx, tctx, status, size;
1603
1604 off = (int)(rmfa - sc->sc_rep_phys);
1605 rb = (struct i2o_reply *)(sc->sc_rep + off);
1606
1607 /* Perform reply queue DMA synchronisation. */
1608 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1609 IOP_MAX_MSG_SIZE, BUS_DMASYNC_POSTREAD);
1610 if (--sc->sc_curib != 0)
1611 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1612 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1613
1614 #ifdef I2ODEBUG
1615 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1616 panic("iop_handle_reply: 64-bit reply");
1617 #endif
1618 /*
1619 * Find the initiator.
1620 */
1621 ictx = le32toh(rb->msgictx);
1622 if (ictx == IOP_ICTX)
1623 ii = NULL;
1624 else {
1625 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1626 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1627 if (ii->ii_ictx == ictx)
1628 break;
1629 if (ii == NULL) {
1630 #ifdef I2ODEBUG
1631 iop_reply_print(sc, rb);
1632 #endif
1633 printf("%s: WARNING: bad ictx returned (%x)\n",
1634 sc->sc_dv.dv_xname, ictx);
1635 return (-1);
1636 }
1637 }
1638
1639 /*
1640 * If we received a transport failure notice, we've got to dig the
1641 * transaction context (if any) out of the original message frame,
1642 * and then release the original MFA back to the inbound FIFO.
1643 */
1644 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1645 status = I2O_STATUS_SUCCESS;
1646
1647 fn = (struct i2o_fault_notify *)rb;
1648 tctx = iop_inl(sc, fn->lowmfa + 12);
1649 iop_release_mfa(sc, fn->lowmfa);
1650 iop_tfn_print(sc, fn);
1651 } else {
1652 status = rb->reqstatus;
1653 tctx = le32toh(rb->msgtctx);
1654 }
1655
1656 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1657 /*
1658 * This initiator tracks state using message wrappers.
1659 *
1660 * Find the originating message wrapper, and if requested
1661 * notify the initiator.
1662 */
1663 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1664 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1665 (im->im_flags & IM_ALLOCED) == 0 ||
1666 tctx != im->im_tctx) {
1667 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1668 sc->sc_dv.dv_xname, tctx, im);
1669 if (im != NULL)
1670 printf("%s: flags=0x%08x tctx=0x%08x\n",
1671 sc->sc_dv.dv_xname, im->im_flags,
1672 im->im_tctx);
1673 #ifdef I2ODEBUG
1674 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1675 iop_reply_print(sc, rb);
1676 #endif
1677 return (-1);
1678 }
1679
1680 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1681 im->im_flags |= IM_FAIL;
1682
1683 #ifdef I2ODEBUG
1684 if ((im->im_flags & IM_REPLIED) != 0)
1685 panic("%s: dup reply", sc->sc_dv.dv_xname);
1686 #endif
1687 im->im_flags |= IM_REPLIED;
1688
1689 #ifdef I2ODEBUG
1690 if (status != I2O_STATUS_SUCCESS)
1691 iop_reply_print(sc, rb);
1692 #endif
1693 im->im_reqstatus = status;
1694
1695 /* Copy the reply frame, if requested. */
1696 if (im->im_rb != NULL) {
1697 size = (le32toh(rb->msgflags) >> 14) & ~3;
1698 #ifdef I2ODEBUG
1699 if (size > IOP_MAX_MSG_SIZE)
1700 panic("iop_handle_reply: reply too large");
1701 #endif
1702 memcpy(im->im_rb, rb, size);
1703 }
1704
1705 /* Notify the initiator. */
1706 if ((im->im_flags & IM_WAIT) != 0)
1707 wakeup(im);
1708 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1709 (*ii->ii_intr)(ii->ii_dv, im, rb);
1710 } else {
1711 /*
1712 * This initiator discards message wrappers.
1713 *
1714 * Simply pass the reply frame to the initiator.
1715 */
1716 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1717 }
1718
1719 return (status);
1720 }
1721
1722 /*
1723 * Handle an interrupt from the IOP.
1724 */
1725 int
1726 iop_intr(void *arg)
1727 {
1728 struct iop_softc *sc;
1729 u_int32_t rmfa;
1730
1731 sc = arg;
1732
1733 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1734 return (0);
1735
1736 for (;;) {
1737 /* Double read to account for IOP bug. */
1738 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1739 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1740 if (rmfa == IOP_MFA_EMPTY)
1741 break;
1742 }
1743 iop_handle_reply(sc, rmfa);
1744 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1745 }
1746
1747 return (1);
1748 }
1749
1750 /*
1751 * Handle an event signalled by the executive.
1752 */
1753 static void
1754 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1755 {
1756 struct i2o_util_event_register_reply *rb;
1757 struct iop_softc *sc;
1758 u_int event;
1759
1760 sc = (struct iop_softc *)dv;
1761 rb = reply;
1762
1763 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1764 return;
1765
1766 event = le32toh(rb->event);
1767 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1768 }
1769
1770 /*
1771 * Allocate a message wrapper.
1772 */
1773 struct iop_msg *
1774 iop_msg_alloc(struct iop_softc *sc, int flags)
1775 {
1776 struct iop_msg *im;
1777 static u_int tctxgen;
1778 int s, i;
1779
1780 #ifdef I2ODEBUG
1781 if ((flags & IM_SYSMASK) != 0)
1782 panic("iop_msg_alloc: system flags specified");
1783 #endif
1784
1785 s = splbio();
1786 im = SLIST_FIRST(&sc->sc_im_freelist);
1787 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1788 if (im == NULL)
1789 panic("iop_msg_alloc: no free wrappers");
1790 #endif
1791 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1792 splx(s);
1793
1794 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1795 tctxgen += (1 << IOP_TCTX_SHIFT);
1796 im->im_flags = flags | IM_ALLOCED;
1797 im->im_rb = NULL;
1798 i = 0;
1799 do {
1800 im->im_xfer[i++].ix_size = 0;
1801 } while (i < IOP_MAX_MSG_XFERS);
1802
1803 return (im);
1804 }
1805
1806 /*
1807 * Free a message wrapper.
1808 */
1809 void
1810 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1811 {
1812 int s;
1813
1814 #ifdef I2ODEBUG
1815 if ((im->im_flags & IM_ALLOCED) == 0)
1816 panic("iop_msg_free: wrapper not allocated");
1817 #endif
1818
1819 im->im_flags = 0;
1820 s = splbio();
1821 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1822 splx(s);
1823 }
1824
1825 /*
1826 * Map a data transfer. Write a scatter-gather list into the message frame.
1827 */
1828 int
1829 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1830 void *xferaddr, int xfersize, int out, struct proc *up)
1831 {
1832 bus_dmamap_t dm;
1833 bus_dma_segment_t *ds;
1834 struct iop_xfer *ix;
1835 u_int rv, i, nsegs, flg, off, xn;
1836 u_int32_t *p;
1837
1838 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1839 if (ix->ix_size == 0)
1840 break;
1841
1842 #ifdef I2ODEBUG
1843 if (xfersize == 0)
1844 panic("iop_msg_map: null transfer");
1845 if (xfersize > IOP_MAX_XFER)
1846 panic("iop_msg_map: transfer too large");
1847 if (xn == IOP_MAX_MSG_XFERS)
1848 panic("iop_msg_map: too many xfers");
1849 #endif
1850
1851 /*
1852 * Only the first DMA map is static.
1853 */
1854 if (xn != 0) {
1855 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1856 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1857 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1858 if (rv != 0)
1859 return (rv);
1860 }
1861
1862 dm = ix->ix_map;
1863 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1864 (up == NULL ? BUS_DMA_NOWAIT : 0));
1865 if (rv != 0)
1866 goto bad;
1867
1868 /*
1869 * How many SIMPLE SG elements can we fit in this message?
1870 */
1871 off = mb[0] >> 16;
1872 p = mb + off;
1873 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1874
1875 if (dm->dm_nsegs > nsegs) {
1876 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1877 rv = EFBIG;
1878 DPRINTF(("iop_msg_map: too many segs\n"));
1879 goto bad;
1880 }
1881
1882 nsegs = dm->dm_nsegs;
1883 xfersize = 0;
1884
1885 /*
1886 * Write out the SG list.
1887 */
1888 if (out)
1889 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1890 else
1891 flg = I2O_SGL_SIMPLE;
1892
1893 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1894 p[0] = (u_int32_t)ds->ds_len | flg;
1895 p[1] = (u_int32_t)ds->ds_addr;
1896 xfersize += ds->ds_len;
1897 }
1898
1899 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1900 p[1] = (u_int32_t)ds->ds_addr;
1901 xfersize += ds->ds_len;
1902
1903 /* Fix up the transfer record, and sync the map. */
1904 ix->ix_flags = (out ? IX_OUT : IX_IN);
1905 ix->ix_size = xfersize;
1906 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1907 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1908
1909 /*
1910 * If this is the first xfer we've mapped for this message, adjust
1911 * the SGL offset field in the message header.
1912 */
1913 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1914 mb[0] += (mb[0] >> 12) & 0xf0;
1915 im->im_flags |= IM_SGLOFFADJ;
1916 }
1917 mb[0] += (nsegs << 17);
1918 return (0);
1919
1920 bad:
1921 if (xn != 0)
1922 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1923 return (rv);
1924 }
1925
1926 /*
1927 * Map a block I/O data transfer (different in that there's only one per
1928 * message maximum, and PAGE addressing may be used). Write a scatter
1929 * gather list into the message frame.
1930 */
1931 int
1932 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1933 void *xferaddr, int xfersize, int out)
1934 {
1935 bus_dma_segment_t *ds;
1936 bus_dmamap_t dm;
1937 struct iop_xfer *ix;
1938 u_int rv, i, nsegs, off, slen, tlen, flg;
1939 paddr_t saddr, eaddr;
1940 u_int32_t *p;
1941
1942 #ifdef I2ODEBUG
1943 if (xfersize == 0)
1944 panic("iop_msg_map_bio: null transfer");
1945 if (xfersize > IOP_MAX_XFER)
1946 panic("iop_msg_map_bio: transfer too large");
1947 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1948 panic("iop_msg_map_bio: SGLOFFADJ");
1949 #endif
1950
1951 ix = im->im_xfer;
1952 dm = ix->ix_map;
1953 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1954 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1955 if (rv != 0)
1956 return (rv);
1957
1958 off = mb[0] >> 16;
1959 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1960
1961 /*
1962 * If the transfer is highly fragmented and won't fit using SIMPLE
1963 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1964 * potentially more efficient, both for us and the IOP.
1965 */
1966 if (dm->dm_nsegs > nsegs) {
1967 nsegs = 1;
1968 p = mb + off + 1;
1969
1970 /* XXX This should be done with a bus_space flag. */
1971 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1972 slen = ds->ds_len;
1973 saddr = ds->ds_addr;
1974
1975 while (slen > 0) {
1976 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1977 tlen = min(eaddr - saddr, slen);
1978 slen -= tlen;
1979 *p++ = le32toh(saddr);
1980 saddr = eaddr;
1981 nsegs++;
1982 }
1983 }
1984
1985 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
1986 I2O_SGL_END;
1987 if (out)
1988 mb[off] |= I2O_SGL_DATA_OUT;
1989 } else {
1990 p = mb + off;
1991 nsegs = dm->dm_nsegs;
1992
1993 if (out)
1994 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1995 else
1996 flg = I2O_SGL_SIMPLE;
1997
1998 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1999 p[0] = (u_int32_t)ds->ds_len | flg;
2000 p[1] = (u_int32_t)ds->ds_addr;
2001 }
2002
2003 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2004 I2O_SGL_END;
2005 p[1] = (u_int32_t)ds->ds_addr;
2006 nsegs <<= 1;
2007 }
2008
2009 /* Fix up the transfer record, and sync the map. */
2010 ix->ix_flags = (out ? IX_OUT : IX_IN);
2011 ix->ix_size = xfersize;
2012 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2013 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2014
2015 /*
2016 * Adjust the SGL offset and total message size fields. We don't
2017 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2018 */
2019 mb[0] += ((off << 4) + (nsegs << 16));
2020 return (0);
2021 }
2022
2023 /*
2024 * Unmap all data transfers associated with a message wrapper.
2025 */
2026 void
2027 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2028 {
2029 struct iop_xfer *ix;
2030 int i;
2031
2032 #ifdef I2ODEBUG
2033 if (im->im_xfer[0].ix_size == 0)
2034 panic("iop_msg_unmap: no transfers mapped");
2035 #endif
2036
2037 for (ix = im->im_xfer, i = 0;;) {
2038 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2039 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2040 BUS_DMASYNC_POSTREAD);
2041 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2042
2043 /* Only the first DMA map is static. */
2044 if (i != 0)
2045 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2046 if ((++ix)->ix_size == 0)
2047 break;
2048 if (++i >= IOP_MAX_MSG_XFERS)
2049 break;
2050 }
2051 }
2052
2053 /*
2054 * Post a message frame to the IOP's inbound queue.
2055 */
2056 int
2057 iop_post(struct iop_softc *sc, u_int32_t *mb)
2058 {
2059 u_int32_t mfa;
2060 int s;
2061
2062 #ifdef I2ODEBUG
2063 if ((mb[0] >> 16) > IOP_MAX_MSG_SIZE / 4)
2064 panic("iop_post: frame too large");
2065 #endif
2066
2067 s = splbio();
2068
2069 /* Allocate a slot with the IOP. */
2070 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2071 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2072 splx(s);
2073 printf("%s: mfa not forthcoming\n",
2074 sc->sc_dv.dv_xname);
2075 return (EAGAIN);
2076 }
2077
2078 /* Perform reply buffer DMA synchronisation. */
2079 if (sc->sc_curib++ == 0)
2080 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2081 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2082
2083 /* Copy out the message frame. */
2084 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2085 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2086 BUS_SPACE_BARRIER_WRITE);
2087
2088 /* Post the MFA back to the IOP. */
2089 iop_outl(sc, IOP_REG_IFIFO, mfa);
2090
2091 splx(s);
2092 return (0);
2093 }
2094
2095 /*
2096 * Post a message to the IOP and deal with completion.
2097 */
2098 int
2099 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2100 {
2101 u_int32_t *mb;
2102 int rv, s;
2103
2104 mb = xmb;
2105
2106 /* Terminate the scatter/gather list chain. */
2107 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2108 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2109
2110 if ((rv = iop_post(sc, mb)) != 0)
2111 return (rv);
2112
2113 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2114 if ((im->im_flags & IM_POLL) != 0)
2115 iop_msg_poll(sc, im, timo);
2116 else
2117 iop_msg_wait(sc, im, timo);
2118
2119 s = splbio();
2120 if ((im->im_flags & IM_REPLIED) != 0) {
2121 if ((im->im_flags & IM_NOSTATUS) != 0)
2122 rv = 0;
2123 else if ((im->im_flags & IM_FAIL) != 0)
2124 rv = ENXIO;
2125 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2126 rv = EIO;
2127 else
2128 rv = 0;
2129 } else
2130 rv = EBUSY;
2131 splx(s);
2132 } else
2133 rv = 0;
2134
2135 return (rv);
2136 }
2137
2138 /*
2139 * Spin until the specified message is replied to.
2140 */
2141 static void
2142 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2143 {
2144 u_int32_t rmfa;
2145 int s, status;
2146
2147 s = splbio();
2148
2149 /* Wait for completion. */
2150 for (timo *= 10; timo != 0; timo--) {
2151 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2152 /* Double read to account for IOP bug. */
2153 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2154 if (rmfa == IOP_MFA_EMPTY)
2155 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2156 if (rmfa != IOP_MFA_EMPTY) {
2157 status = iop_handle_reply(sc, rmfa);
2158
2159 /*
2160 * Return the reply frame to the IOP's
2161 * outbound FIFO.
2162 */
2163 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2164 }
2165 }
2166 if ((im->im_flags & IM_REPLIED) != 0)
2167 break;
2168 DELAY(100);
2169 }
2170
2171 if (timo == 0) {
2172 #ifdef I2ODEBUG
2173 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2174 if (iop_status_get(sc, 1) != 0)
2175 printf("iop_msg_poll: unable to retrieve status\n");
2176 else
2177 printf("iop_msg_poll: IOP state = %d\n",
2178 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2179 #endif
2180 }
2181
2182 splx(s);
2183 }
2184
2185 /*
2186 * Sleep until the specified message is replied to.
2187 */
2188 static void
2189 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2190 {
2191 int s, rv;
2192
2193 s = splbio();
2194 if ((im->im_flags & IM_REPLIED) != 0) {
2195 splx(s);
2196 return;
2197 }
2198 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
2199 splx(s);
2200
2201 #ifdef I2ODEBUG
2202 if (rv != 0) {
2203 printf("iop_msg_wait: tsleep() == %d\n", rv);
2204 if (iop_status_get(sc, 0) != 0)
2205 printf("iop_msg_wait: unable to retrieve status\n");
2206 else
2207 printf("iop_msg_wait: IOP state = %d\n",
2208 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2209 }
2210 #endif
2211 }
2212
2213 /*
2214 * Release an unused message frame back to the IOP's inbound fifo.
2215 */
2216 static void
2217 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2218 {
2219
2220 /* Use the frame to issue a no-op. */
2221 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2222 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2223 iop_outl(sc, mfa + 8, 0);
2224 iop_outl(sc, mfa + 12, 0);
2225
2226 iop_outl(sc, IOP_REG_IFIFO, mfa);
2227 }
2228
2229 #ifdef I2ODEBUG
2230 /*
2231 * Dump a reply frame header.
2232 */
2233 static void
2234 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2235 {
2236 u_int function, detail;
2237 #ifdef I2OVERBOSE
2238 const char *statusstr;
2239 #endif
2240
2241 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2242 detail = le16toh(rb->detail);
2243
2244 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2245
2246 #ifdef I2OVERBOSE
2247 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2248 statusstr = iop_status[rb->reqstatus];
2249 else
2250 statusstr = "undefined error code";
2251
2252 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2253 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2254 #else
2255 printf("%s: function=0x%02x status=0x%02x\n",
2256 sc->sc_dv.dv_xname, function, rb->reqstatus);
2257 #endif
2258 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2259 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2260 le32toh(rb->msgtctx));
2261 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2262 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2263 (le32toh(rb->msgflags) >> 8) & 0xff);
2264 }
2265 #endif
2266
2267 /*
2268 * Dump a transport failure reply.
2269 */
2270 static void
2271 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2272 {
2273
2274 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2275
2276 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2277 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2278 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2279 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2280 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2281 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2282 }
2283
2284 /*
2285 * Translate an I2O ASCII field into a C string.
2286 */
2287 void
2288 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2289 {
2290 int hc, lc, i, nit;
2291
2292 dlen--;
2293 lc = 0;
2294 hc = 0;
2295 i = 0;
2296
2297 /*
2298 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2299 * spec has nothing to say about it. Since AMI fields are usually
2300 * filled with junk after the terminator, ...
2301 */
2302 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2303
2304 while (slen-- != 0 && dlen-- != 0) {
2305 if (nit && *src == '\0')
2306 break;
2307 else if (*src <= 0x20 || *src >= 0x7f) {
2308 if (hc)
2309 dst[i++] = ' ';
2310 } else {
2311 hc = 1;
2312 dst[i++] = *src;
2313 lc = i;
2314 }
2315 src++;
2316 }
2317
2318 dst[lc] = '\0';
2319 }
2320
2321 /*
2322 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2323 */
2324 int
2325 iop_print_ident(struct iop_softc *sc, int tid)
2326 {
2327 struct {
2328 struct i2o_param_op_results pr;
2329 struct i2o_param_read_results prr;
2330 struct i2o_param_device_identity di;
2331 } __attribute__ ((__packed__)) p;
2332 char buf[32];
2333 int rv;
2334
2335 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2336 sizeof(p), NULL);
2337 if (rv != 0)
2338 return (rv);
2339
2340 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2341 sizeof(buf));
2342 printf(" <%s, ", buf);
2343 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2344 sizeof(buf));
2345 printf("%s, ", buf);
2346 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2347 printf("%s>", buf);
2348
2349 return (0);
2350 }
2351
2352 /*
2353 * Claim or unclaim the specified TID.
2354 */
2355 int
2356 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2357 int flags)
2358 {
2359 struct iop_msg *im;
2360 struct i2o_util_claim mf;
2361 int rv, func;
2362
2363 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2364 im = iop_msg_alloc(sc, IM_WAIT);
2365
2366 /* We can use the same structure, as they're identical. */
2367 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2368 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2369 mf.msgictx = ii->ii_ictx;
2370 mf.msgtctx = im->im_tctx;
2371 mf.flags = flags;
2372
2373 rv = iop_msg_post(sc, im, &mf, 5000);
2374 iop_msg_free(sc, im);
2375 return (rv);
2376 }
2377
2378 /*
2379 * Perform an abort.
2380 */
2381 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2382 int tctxabort, int flags)
2383 {
2384 struct iop_msg *im;
2385 struct i2o_util_abort mf;
2386 int rv;
2387
2388 im = iop_msg_alloc(sc, IM_WAIT);
2389
2390 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2391 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2392 mf.msgictx = ii->ii_ictx;
2393 mf.msgtctx = im->im_tctx;
2394 mf.flags = (func << 24) | flags;
2395 mf.tctxabort = tctxabort;
2396
2397 rv = iop_msg_post(sc, im, &mf, 5000);
2398 iop_msg_free(sc, im);
2399 return (rv);
2400 }
2401
2402 /*
2403 * Enable or disable reception of events for the specified device.
2404 */
2405 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2406 {
2407 struct i2o_util_event_register mf;
2408
2409 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2410 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2411 mf.msgictx = ii->ii_ictx;
2412 mf.msgtctx = 0;
2413 mf.eventmask = mask;
2414
2415 /* This message is replied to only when events are signalled. */
2416 return (iop_post(sc, (u_int32_t *)&mf));
2417 }
2418
2419 int
2420 iopopen(struct vnode *devvp, int flag, int mode, struct proc *p)
2421 {
2422 struct iop_softc *sc;
2423
2424 if ((sc = device_lookup(&iop_cd, minor(devvp->v_rdev))) == NULL)
2425 return (ENXIO);
2426 devvp->v_devcookie = sc;
2427 if ((sc->sc_flags & IOP_ONLINE) == 0)
2428 return (ENXIO);
2429 if ((sc->sc_flags & IOP_OPEN) != 0)
2430 return (EBUSY);
2431 sc->sc_flags |= IOP_OPEN;
2432
2433 return (0);
2434 }
2435
2436 int
2437 iopclose(struct vnode *devvp, int flag, int mode, struct proc *p)
2438 {
2439 struct iop_softc *sc;
2440
2441 sc = devvp->v_devcookie;
2442 sc->sc_flags &= ~IOP_OPEN;
2443
2444 return (0);
2445 }
2446
2447 int
2448 iopioctl(struct vnode *devvp, u_long cmd, caddr_t data, int flag,
2449 struct proc *p)
2450 {
2451 struct iop_softc *sc;
2452 struct iovec *iov;
2453 int rv, i;
2454
2455 if (securelevel >= 2)
2456 return (EPERM);
2457
2458 sc = devvp->v_devcookie;
2459
2460 switch (cmd) {
2461 case IOPIOCPT:
2462 return (iop_passthrough(sc, (struct ioppt *)data, p));
2463
2464 case IOPIOCGSTATUS:
2465 iov = (struct iovec *)data;
2466 i = sizeof(struct i2o_status);
2467 if (i > iov->iov_len)
2468 i = iov->iov_len;
2469 else
2470 iov->iov_len = i;
2471 if ((rv = iop_status_get(sc, 0)) == 0)
2472 rv = copyout(&sc->sc_status, iov->iov_base, i);
2473 return (rv);
2474
2475 case IOPIOCGLCT:
2476 case IOPIOCGTIDMAP:
2477 case IOPIOCRECONFIG:
2478 break;
2479
2480 default:
2481 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2482 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2483 #endif
2484 return (ENOTTY);
2485 }
2486
2487 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2488 return (rv);
2489
2490 switch (cmd) {
2491 case IOPIOCGLCT:
2492 iov = (struct iovec *)data;
2493 i = le16toh(sc->sc_lct->tablesize) << 2;
2494 if (i > iov->iov_len)
2495 i = iov->iov_len;
2496 else
2497 iov->iov_len = i;
2498 rv = copyout(sc->sc_lct, iov->iov_base, i);
2499 break;
2500
2501 case IOPIOCRECONFIG:
2502 rv = iop_reconfigure(sc, 0);
2503 break;
2504
2505 case IOPIOCGTIDMAP:
2506 iov = (struct iovec *)data;
2507 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2508 if (i > iov->iov_len)
2509 i = iov->iov_len;
2510 else
2511 iov->iov_len = i;
2512 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2513 break;
2514 }
2515
2516 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2517 return (rv);
2518 }
2519
2520 static int
2521 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2522 {
2523 struct iop_msg *im;
2524 struct i2o_msg *mf;
2525 struct ioppt_buf *ptb;
2526 int rv, i, mapped;
2527
2528 mf = NULL;
2529 im = NULL;
2530 mapped = 1;
2531
2532 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2533 pt->pt_msglen > (le16toh(sc->sc_status.inboundmframesize) << 2) ||
2534 pt->pt_msglen < sizeof(struct i2o_msg) ||
2535 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2536 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2537 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2538 return (EINVAL);
2539
2540 for (i = 0; i < pt->pt_nbufs; i++)
2541 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2542 rv = ENOMEM;
2543 goto bad;
2544 }
2545
2546 mf = malloc(IOP_MAX_MSG_SIZE, M_DEVBUF, M_WAITOK);
2547 if (mf == NULL)
2548 return (ENOMEM);
2549
2550 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2551 goto bad;
2552
2553 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2554 im->im_rb = (struct i2o_reply *)mf;
2555 mf->msgictx = IOP_ICTX;
2556 mf->msgtctx = im->im_tctx;
2557
2558 for (i = 0; i < pt->pt_nbufs; i++) {
2559 ptb = &pt->pt_bufs[i];
2560 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2561 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2562 if (rv != 0)
2563 goto bad;
2564 mapped = 1;
2565 }
2566
2567 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2568 goto bad;
2569
2570 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2571 if (i > IOP_MAX_MSG_SIZE)
2572 i = IOP_MAX_MSG_SIZE;
2573 if (i > pt->pt_replylen)
2574 i = pt->pt_replylen;
2575 rv = copyout(im->im_rb, pt->pt_reply, i);
2576
2577 bad:
2578 if (mapped != 0)
2579 iop_msg_unmap(sc, im);
2580 if (im != NULL)
2581 iop_msg_free(sc, im);
2582 if (mf != NULL)
2583 free(mf, M_DEVBUF);
2584 return (rv);
2585 }
2586