iop.c revision 1.10.2.9 1 /* $NetBSD: iop.c,v 1.10.2.9 2002/01/11 23:38:57 nathanw Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.10.2.9 2002/01/11 23:38:57 nathanw Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 #define IC_CONFIGURE 0x01
111 #define IC_PRIORITY 0x02
112
113 struct iop_class {
114 u_short ic_class;
115 u_short ic_flags;
116 #ifdef I2OVERBOSE
117 const char *ic_caption;
118 #endif
119 } static const iop_class[] = {
120 {
121 I2O_CLASS_EXECUTIVE,
122 0,
123 COMMENT("executive")
124 },
125 {
126 I2O_CLASS_DDM,
127 0,
128 COMMENT("device driver module")
129 },
130 {
131 I2O_CLASS_RANDOM_BLOCK_STORAGE,
132 IC_CONFIGURE | IC_PRIORITY,
133 IFVERBOSE("random block storage")
134 },
135 {
136 I2O_CLASS_SEQUENTIAL_STORAGE,
137 IC_CONFIGURE | IC_PRIORITY,
138 IFVERBOSE("sequential storage")
139 },
140 {
141 I2O_CLASS_LAN,
142 IC_CONFIGURE | IC_PRIORITY,
143 IFVERBOSE("LAN port")
144 },
145 {
146 I2O_CLASS_WAN,
147 IC_CONFIGURE | IC_PRIORITY,
148 IFVERBOSE("WAN port")
149 },
150 {
151 I2O_CLASS_FIBRE_CHANNEL_PORT,
152 IC_CONFIGURE,
153 IFVERBOSE("fibrechannel port")
154 },
155 {
156 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
157 0,
158 COMMENT("fibrechannel peripheral")
159 },
160 {
161 I2O_CLASS_SCSI_PERIPHERAL,
162 0,
163 COMMENT("SCSI peripheral")
164 },
165 {
166 I2O_CLASS_ATE_PORT,
167 IC_CONFIGURE,
168 IFVERBOSE("ATE port")
169 },
170 {
171 I2O_CLASS_ATE_PERIPHERAL,
172 0,
173 COMMENT("ATE peripheral")
174 },
175 {
176 I2O_CLASS_FLOPPY_CONTROLLER,
177 IC_CONFIGURE,
178 IFVERBOSE("floppy controller")
179 },
180 {
181 I2O_CLASS_FLOPPY_DEVICE,
182 0,
183 COMMENT("floppy device")
184 },
185 {
186 I2O_CLASS_BUS_ADAPTER_PORT,
187 IC_CONFIGURE,
188 IFVERBOSE("bus adapter port" )
189 },
190 };
191
192 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
193 static const char * const iop_status[] = {
194 "success",
195 "abort (dirty)",
196 "abort (no data transfer)",
197 "abort (partial transfer)",
198 "error (dirty)",
199 "error (no data transfer)",
200 "error (partial transfer)",
201 "undefined error code",
202 "process abort (dirty)",
203 "process abort (no data transfer)",
204 "process abort (partial transfer)",
205 "transaction error",
206 };
207 #endif
208
209 static inline u_int32_t iop_inl(struct iop_softc *, int);
210 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
211
212 static void iop_config_interrupts(struct device *);
213 static void iop_configure_devices(struct iop_softc *, int, int);
214 static void iop_devinfo(int, char *);
215 static int iop_print(void *, const char *);
216 static void iop_shutdown(void *);
217 static int iop_submatch(struct device *, struct cfdata *, void *);
218 static int iop_vendor_print(void *, const char *);
219
220 static void iop_adjqparam(struct iop_softc *, int);
221 static void iop_create_reconf_thread(void *);
222 static int iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int iop_hrt_get(struct iop_softc *);
224 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void iop_intr_event(struct device *, struct iop_msg *, void *);
226 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 u_int32_t);
228 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int iop_ofifo_init(struct iop_softc *);
231 static int iop_passthrough(struct iop_softc *, struct ioppt *,
232 struct proc *);
233 static void iop_reconf_thread(void *);
234 static void iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int iop_reset(struct iop_softc *);
236 static int iop_systab_set(struct iop_softc *);
237 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
238
239 #ifdef I2ODEBUG
240 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
241 #endif
242
243 cdev_decl(iop);
244
245 static inline u_int32_t
246 iop_inl(struct iop_softc *sc, int off)
247 {
248
249 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
251 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
252 }
253
254 static inline void
255 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
256 {
257
258 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
259 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
260 BUS_SPACE_BARRIER_WRITE);
261 }
262
263 /*
264 * Initialise the IOP and our interface.
265 */
266 void
267 iop_init(struct iop_softc *sc, const char *intrstr)
268 {
269 struct iop_msg *im;
270 int rv, i, j, state, nsegs;
271 u_int32_t mask;
272 char ident[64];
273
274 state = 0;
275
276 printf("I2O adapter");
277
278 if (iop_ictxhashtbl == NULL)
279 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
280 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
281
282 /* Disable interrupts at the IOP. */
283 mask = iop_inl(sc, IOP_REG_INTR_MASK);
284 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
285
286 /* Allocate a scratch DMA map for small miscellaneous shared data. */
287 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
288 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
289 printf("%s: cannot create scratch dmamap\n",
290 sc->sc_dv.dv_xname);
291 return;
292 }
293 state++;
294
295 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
296 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
297 printf("%s: cannot alloc scratch dmamem\n",
298 sc->sc_dv.dv_xname);
299 goto bail_out;
300 }
301 state++;
302
303 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
304 &sc->sc_scr, 0)) {
305 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
306 goto bail_out;
307 }
308 state++;
309
310 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
311 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
312 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
313 goto bail_out;
314 }
315 state++;
316
317 #ifdef I2ODEBUG
318 /* So that our debug checks don't choke. */
319 sc->sc_framesize = 128;
320 #endif
321
322 /* Reset the adapter and request status. */
323 if ((rv = iop_reset(sc)) != 0) {
324 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
325 goto bail_out;
326 }
327
328 if ((rv = iop_status_get(sc, 1)) != 0) {
329 printf("%s: not responding (get status)\n",
330 sc->sc_dv.dv_xname);
331 goto bail_out;
332 }
333
334 sc->sc_flags |= IOP_HAVESTATUS;
335 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
336 ident, sizeof(ident));
337 printf(" <%s>\n", ident);
338
339 #ifdef I2ODEBUG
340 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
341 le16toh(sc->sc_status.orgid),
342 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
343 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
344 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
345 le32toh(sc->sc_status.desiredprivmemsize),
346 le32toh(sc->sc_status.currentprivmemsize),
347 le32toh(sc->sc_status.currentprivmembase));
348 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
349 le32toh(sc->sc_status.desiredpriviosize),
350 le32toh(sc->sc_status.currentpriviosize),
351 le32toh(sc->sc_status.currentpriviobase));
352 #endif
353
354 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
355 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
356 sc->sc_maxob = IOP_MAX_OUTBOUND;
357 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
358 if (sc->sc_maxib > IOP_MAX_INBOUND)
359 sc->sc_maxib = IOP_MAX_INBOUND;
360 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
361 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
362 sc->sc_framesize = IOP_MAX_MSG_SIZE;
363
364 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
365 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
366 printf("%s: frame size too small (%d)\n",
367 sc->sc_dv.dv_xname, sc->sc_framesize);
368 return;
369 }
370 #endif
371
372 /* Allocate message wrappers. */
373 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT);
374 memset(im, 0, sizeof(*im) * sc->sc_maxib);
375 sc->sc_ims = im;
376 SLIST_INIT(&sc->sc_im_freelist);
377
378 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
379 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
380 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
381 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
382 &im->im_xfer[0].ix_map);
383 if (rv != 0) {
384 printf("%s: couldn't create dmamap (%d)",
385 sc->sc_dv.dv_xname, rv);
386 goto bail_out;
387 }
388
389 im->im_tctx = i;
390 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
391 }
392
393 /* Initialise the IOP's outbound FIFO. */
394 if (iop_ofifo_init(sc) != 0) {
395 printf("%s: unable to init oubound FIFO\n",
396 sc->sc_dv.dv_xname);
397 goto bail_out;
398 }
399
400 /*
401 * Defer further configuration until (a) interrupts are working and
402 * (b) we have enough information to build the system table.
403 */
404 config_interrupts((struct device *)sc, iop_config_interrupts);
405
406 /* Configure shutdown hook before we start any device activity. */
407 if (iop_sdh == NULL)
408 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
409
410 /* Ensure interrupts are enabled at the IOP. */
411 mask = iop_inl(sc, IOP_REG_INTR_MASK);
412 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
413
414 if (intrstr != NULL)
415 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
416 intrstr);
417
418 #ifdef I2ODEBUG
419 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
420 sc->sc_dv.dv_xname, sc->sc_maxib,
421 le32toh(sc->sc_status.maxinboundmframes),
422 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
423 #endif
424
425 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
426 return;
427
428 bail_out:
429 if (state > 3) {
430 for (j = 0; j < i; j++)
431 bus_dmamap_destroy(sc->sc_dmat,
432 sc->sc_ims[j].im_xfer[0].ix_map);
433 free(sc->sc_ims, M_DEVBUF);
434 }
435 if (state > 2)
436 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
437 if (state > 1)
438 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
439 if (state > 0)
440 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
441 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
442
443 }
444
445 /*
446 * Perform autoconfiguration tasks.
447 */
448 static void
449 iop_config_interrupts(struct device *self)
450 {
451 struct iop_attach_args ia;
452 struct iop_softc *sc, *iop;
453 struct i2o_systab_entry *ste;
454 int rv, i, niop;
455
456 sc = (struct iop_softc *)self;
457 LIST_INIT(&sc->sc_iilist);
458
459 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
460
461 if (iop_hrt_get(sc) != 0) {
462 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
463 return;
464 }
465
466 /*
467 * Build the system table.
468 */
469 if (iop_systab == NULL) {
470 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
471 if ((iop = device_lookup(&iop_cd, i)) == NULL)
472 continue;
473 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
474 continue;
475 if (iop_status_get(iop, 1) != 0) {
476 printf("%s: unable to retrieve status\n",
477 sc->sc_dv.dv_xname);
478 iop->sc_flags &= ~IOP_HAVESTATUS;
479 continue;
480 }
481 niop++;
482 }
483 if (niop == 0)
484 return;
485
486 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
487 sizeof(struct i2o_systab);
488 iop_systab_size = i;
489 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
490
491 memset(iop_systab, 0, i);
492 iop_systab->numentries = niop;
493 iop_systab->version = I2O_VERSION_11;
494
495 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
496 if ((iop = device_lookup(&iop_cd, i)) == NULL)
497 continue;
498 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
499 continue;
500
501 ste->orgid = iop->sc_status.orgid;
502 ste->iopid = iop->sc_dv.dv_unit + 2;
503 ste->segnumber =
504 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
505 ste->iopcaps = iop->sc_status.iopcaps;
506 ste->inboundmsgframesize =
507 iop->sc_status.inboundmframesize;
508 ste->inboundmsgportaddresslow =
509 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
510 ste++;
511 }
512 }
513
514 /*
515 * Post the system table to the IOP and bring it to the OPERATIONAL
516 * state.
517 */
518 if (iop_systab_set(sc) != 0) {
519 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
520 return;
521 }
522 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
523 30000) != 0) {
524 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
525 return;
526 }
527
528 /*
529 * Set up an event handler for this IOP.
530 */
531 sc->sc_eventii.ii_dv = self;
532 sc->sc_eventii.ii_intr = iop_intr_event;
533 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
534 sc->sc_eventii.ii_tid = I2O_TID_IOP;
535 iop_initiator_register(sc, &sc->sc_eventii);
536
537 rv = iop_util_eventreg(sc, &sc->sc_eventii,
538 I2O_EVENT_EXEC_RESOURCE_LIMITS |
539 I2O_EVENT_EXEC_CONNECTION_FAIL |
540 I2O_EVENT_EXEC_ADAPTER_FAULT |
541 I2O_EVENT_EXEC_POWER_FAIL |
542 I2O_EVENT_EXEC_RESET_PENDING |
543 I2O_EVENT_EXEC_RESET_IMMINENT |
544 I2O_EVENT_EXEC_HARDWARE_FAIL |
545 I2O_EVENT_EXEC_XCT_CHANGE |
546 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
547 I2O_EVENT_GEN_DEVICE_RESET |
548 I2O_EVENT_GEN_STATE_CHANGE |
549 I2O_EVENT_GEN_GENERAL_WARNING);
550 if (rv != 0) {
551 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
552 return;
553 }
554
555 /*
556 * Attempt to match and attach a product-specific extension.
557 */
558 ia.ia_class = I2O_CLASS_ANY;
559 ia.ia_tid = I2O_TID_IOP;
560 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
561
562 /*
563 * Start device configuration.
564 */
565 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
566 if ((rv = iop_reconfigure(sc, 0)) == -1) {
567 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
568 return;
569 }
570 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
571
572 kthread_create(iop_create_reconf_thread, sc);
573 }
574
575 /*
576 * Create the reconfiguration thread. Called after the standard kernel
577 * threads have been created.
578 */
579 static void
580 iop_create_reconf_thread(void *cookie)
581 {
582 struct iop_softc *sc;
583 int rv;
584
585 sc = cookie;
586 sc->sc_flags |= IOP_ONLINE;
587
588 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
589 "%s", sc->sc_dv.dv_xname);
590 if (rv != 0) {
591 printf("%s: unable to create reconfiguration thread (%d)",
592 sc->sc_dv.dv_xname, rv);
593 return;
594 }
595 }
596
597 /*
598 * Reconfiguration thread; listens for LCT change notification, and
599 * initiates re-configuration if received.
600 */
601 static void
602 iop_reconf_thread(void *cookie)
603 {
604 struct iop_softc *sc;
605 struct lwp *l;
606 struct i2o_lct lct;
607 u_int32_t chgind;
608 int rv;
609
610 sc = cookie;
611 chgind = sc->sc_chgind + 1;
612 l = curproc;
613
614 for (;;) {
615 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
616 sc->sc_dv.dv_xname, chgind));
617
618 PHOLD(l);
619 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
620 PRELE(l);
621
622 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
623 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
624
625 if (rv == 0 &&
626 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
627 iop_reconfigure(sc, le32toh(lct.changeindicator));
628 chgind = sc->sc_chgind + 1;
629 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
630 }
631
632 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
633 }
634 }
635
636 /*
637 * Reconfigure: find new and removed devices.
638 */
639 int
640 iop_reconfigure(struct iop_softc *sc, u_int chgind)
641 {
642 struct iop_msg *im;
643 struct i2o_hba_bus_scan mf;
644 struct i2o_lct_entry *le;
645 struct iop_initiator *ii, *nextii;
646 int rv, tid, i;
647
648 /*
649 * If the reconfiguration request isn't the result of LCT change
650 * notification, then be more thorough: ask all bus ports to scan
651 * their busses. Wait up to 5 minutes for each bus port to complete
652 * the request.
653 */
654 if (chgind == 0) {
655 if ((rv = iop_lct_get(sc)) != 0) {
656 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
657 return (rv);
658 }
659
660 le = sc->sc_lct->entry;
661 for (i = 0; i < sc->sc_nlctent; i++, le++) {
662 if ((le16toh(le->classid) & 4095) !=
663 I2O_CLASS_BUS_ADAPTER_PORT)
664 continue;
665 tid = le16toh(le->localtid) & 4095;
666
667 im = iop_msg_alloc(sc, IM_WAIT);
668
669 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
670 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
671 mf.msgictx = IOP_ICTX;
672 mf.msgtctx = im->im_tctx;
673
674 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
675 tid));
676
677 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
678 iop_msg_free(sc, im);
679 #ifdef I2ODEBUG
680 if (rv != 0)
681 printf("%s: bus scan failed\n",
682 sc->sc_dv.dv_xname);
683 #endif
684 }
685 } else if (chgind <= sc->sc_chgind) {
686 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
687 return (0);
688 }
689
690 /* Re-read the LCT and determine if it has changed. */
691 if ((rv = iop_lct_get(sc)) != 0) {
692 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
693 return (rv);
694 }
695 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
696
697 chgind = le32toh(sc->sc_lct->changeindicator);
698 if (chgind == sc->sc_chgind) {
699 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
700 return (0);
701 }
702 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
703 sc->sc_chgind = chgind;
704
705 if (sc->sc_tidmap != NULL)
706 free(sc->sc_tidmap, M_DEVBUF);
707 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
708 M_DEVBUF, M_NOWAIT);
709 memset(sc->sc_tidmap, 0, sizeof(sc->sc_tidmap));
710
711 /* Allow 1 queued command per device while we're configuring. */
712 iop_adjqparam(sc, 1);
713
714 /*
715 * Match and attach child devices. We configure high-level devices
716 * first so that any claims will propagate throughout the LCT,
717 * hopefully masking off aliased devices as a result.
718 *
719 * Re-reading the LCT at this point is a little dangerous, but we'll
720 * trust the IOP (and the operator) to behave itself...
721 */
722 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
723 IC_CONFIGURE | IC_PRIORITY);
724 if ((rv = iop_lct_get(sc)) != 0)
725 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
726 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
727 IC_CONFIGURE);
728
729 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
730 nextii = LIST_NEXT(ii, ii_list);
731
732 /* Detach devices that were configured, but are now gone. */
733 for (i = 0; i < sc->sc_nlctent; i++)
734 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
735 break;
736 if (i == sc->sc_nlctent ||
737 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
738 config_detach(ii->ii_dv, DETACH_FORCE);
739
740 /*
741 * Tell initiators that existed before the re-configuration
742 * to re-configure.
743 */
744 if (ii->ii_reconfig == NULL)
745 continue;
746 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
747 printf("%s: %s failed reconfigure (%d)\n",
748 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
749 }
750
751 /* Re-adjust queue parameters and return. */
752 if (sc->sc_nii != 0)
753 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
754 / sc->sc_nii);
755
756 return (0);
757 }
758
759 /*
760 * Configure I2O devices into the system.
761 */
762 static void
763 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
764 {
765 struct iop_attach_args ia;
766 struct iop_initiator *ii;
767 const struct i2o_lct_entry *le;
768 struct device *dv;
769 int i, j, nent;
770 u_int usertid;
771
772 nent = sc->sc_nlctent;
773 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
774 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
775
776 /* Ignore the device if it's in use. */
777 usertid = le32toh(le->usertid) & 4095;
778 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
779 continue;
780
781 ia.ia_class = le16toh(le->classid) & 4095;
782 ia.ia_tid = sc->sc_tidmap[i].it_tid;
783
784 /* Ignore uninteresting devices. */
785 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
786 if (iop_class[j].ic_class == ia.ia_class)
787 break;
788 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
789 (iop_class[j].ic_flags & mask) != maskval)
790 continue;
791
792 /*
793 * Try to configure the device only if it's not already
794 * configured.
795 */
796 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
797 if (ia.ia_tid == ii->ii_tid) {
798 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
799 strcpy(sc->sc_tidmap[i].it_dvname,
800 ii->ii_dv->dv_xname);
801 break;
802 }
803 }
804 if (ii != NULL)
805 continue;
806
807 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
808 if (dv != NULL) {
809 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
810 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
811 }
812 }
813 }
814
815 /*
816 * Adjust queue parameters for all child devices.
817 */
818 static void
819 iop_adjqparam(struct iop_softc *sc, int mpi)
820 {
821 struct iop_initiator *ii;
822
823 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
824 if (ii->ii_adjqparam != NULL)
825 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
826 }
827
828 static void
829 iop_devinfo(int class, char *devinfo)
830 {
831 #ifdef I2OVERBOSE
832 int i;
833
834 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
835 if (class == iop_class[i].ic_class)
836 break;
837
838 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
839 sprintf(devinfo, "device (class 0x%x)", class);
840 else
841 strcpy(devinfo, iop_class[i].ic_caption);
842 #else
843
844 sprintf(devinfo, "device (class 0x%x)", class);
845 #endif
846 }
847
848 static int
849 iop_print(void *aux, const char *pnp)
850 {
851 struct iop_attach_args *ia;
852 char devinfo[256];
853
854 ia = aux;
855
856 if (pnp != NULL) {
857 iop_devinfo(ia->ia_class, devinfo);
858 printf("%s at %s", devinfo, pnp);
859 }
860 printf(" tid %d", ia->ia_tid);
861 return (UNCONF);
862 }
863
864 static int
865 iop_vendor_print(void *aux, const char *pnp)
866 {
867
868 return (QUIET);
869 }
870
871 static int
872 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
873 {
874 struct iop_attach_args *ia;
875
876 ia = aux;
877
878 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
879 return (0);
880
881 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
882 }
883
884 /*
885 * Shut down all configured IOPs.
886 */
887 static void
888 iop_shutdown(void *junk)
889 {
890 struct iop_softc *sc;
891 int i;
892
893 printf("shutting down iop devices...");
894
895 for (i = 0; i < iop_cd.cd_ndevs; i++) {
896 if ((sc = device_lookup(&iop_cd, i)) == NULL)
897 continue;
898 if ((sc->sc_flags & IOP_ONLINE) == 0)
899 continue;
900 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
901 0, 5000);
902 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
903 0, 1000);
904 }
905
906 /* Wait. Some boards could still be flushing, stupidly enough. */
907 delay(5000*1000);
908 printf(" done\n");
909 }
910
911 /*
912 * Retrieve IOP status.
913 */
914 int
915 iop_status_get(struct iop_softc *sc, int nosleep)
916 {
917 struct i2o_exec_status_get mf;
918 struct i2o_status *st;
919 paddr_t pa;
920 int rv, i;
921
922 pa = sc->sc_scr_seg->ds_addr;
923 st = (struct i2o_status *)sc->sc_scr;
924
925 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
926 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
927 mf.reserved[0] = 0;
928 mf.reserved[1] = 0;
929 mf.reserved[2] = 0;
930 mf.reserved[3] = 0;
931 mf.addrlow = (u_int32_t)pa;
932 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
933 mf.length = sizeof(sc->sc_status);
934
935 memset(st, 0, sizeof(*st));
936 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
937 BUS_DMASYNC_PREREAD);
938
939 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
940 return (rv);
941
942 for (i = 25; i != 0; i--) {
943 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
944 sizeof(*st), BUS_DMASYNC_POSTREAD);
945 if (st->syncbyte == 0xff)
946 break;
947 if (nosleep)
948 DELAY(100*1000);
949 else
950 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
951 }
952
953 if (st->syncbyte != 0xff) {
954 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
955 rv = EIO;
956 } else {
957 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
958 rv = 0;
959 }
960
961 return (rv);
962 }
963
964 /*
965 * Initialize and populate the IOP's outbound FIFO.
966 */
967 static int
968 iop_ofifo_init(struct iop_softc *sc)
969 {
970 bus_addr_t addr;
971 bus_dma_segment_t seg;
972 struct i2o_exec_outbound_init *mf;
973 int i, rseg, rv;
974 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
975
976 sw = (u_int32_t *)sc->sc_scr;
977
978 mf = (struct i2o_exec_outbound_init *)mb;
979 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
980 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
981 mf->msgictx = IOP_ICTX;
982 mf->msgtctx = 0;
983 mf->pagesize = PAGE_SIZE;
984 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
985
986 /*
987 * The I2O spec says that there are two SGLs: one for the status
988 * word, and one for a list of discarded MFAs. It continues to say
989 * that if you don't want to get the list of MFAs, an IGNORE SGL is
990 * necessary; this isn't the case (and is in fact a bad thing).
991 */
992 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
993 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
994 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
995 (u_int32_t)sc->sc_scr_seg->ds_addr;
996 mb[0] += 2 << 16;
997
998 *sw = 0;
999 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1000 BUS_DMASYNC_PREREAD);
1001
1002 if ((rv = iop_post(sc, mb)) != 0)
1003 return (rv);
1004
1005 POLL(5000,
1006 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1007 BUS_DMASYNC_POSTREAD),
1008 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1009
1010 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1011 printf("%s: outbound FIFO init failed (%d)\n",
1012 sc->sc_dv.dv_xname, le32toh(*sw));
1013 return (EIO);
1014 }
1015
1016 /* Allocate DMA safe memory for the reply frames. */
1017 if (sc->sc_rep_phys == 0) {
1018 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1019
1020 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1021 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1022 if (rv != 0) {
1023 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1024 rv);
1025 return (rv);
1026 }
1027
1028 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1029 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1030 if (rv != 0) {
1031 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1032 return (rv);
1033 }
1034
1035 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1036 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1037 if (rv != 0) {
1038 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1039 rv);
1040 return (rv);
1041 }
1042
1043 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1044 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1045 if (rv != 0) {
1046 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1047 return (rv);
1048 }
1049
1050 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1051 }
1052
1053 /* Populate the outbound FIFO. */
1054 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1055 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1056 addr += sc->sc_framesize;
1057 }
1058
1059 return (0);
1060 }
1061
1062 /*
1063 * Read the specified number of bytes from the IOP's hardware resource table.
1064 */
1065 static int
1066 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1067 {
1068 struct iop_msg *im;
1069 int rv;
1070 struct i2o_exec_hrt_get *mf;
1071 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1072
1073 im = iop_msg_alloc(sc, IM_WAIT);
1074 mf = (struct i2o_exec_hrt_get *)mb;
1075 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1076 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1077 mf->msgictx = IOP_ICTX;
1078 mf->msgtctx = im->im_tctx;
1079
1080 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1081 rv = iop_msg_post(sc, im, mb, 30000);
1082 iop_msg_unmap(sc, im);
1083 iop_msg_free(sc, im);
1084 return (rv);
1085 }
1086
1087 /*
1088 * Read the IOP's hardware resource table.
1089 */
1090 static int
1091 iop_hrt_get(struct iop_softc *sc)
1092 {
1093 struct i2o_hrt hrthdr, *hrt;
1094 int size, rv;
1095
1096 PHOLD(curproc);
1097 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1098 PRELE(curproc);
1099 if (rv != 0)
1100 return (rv);
1101
1102 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1103 le16toh(hrthdr.numentries)));
1104
1105 size = sizeof(struct i2o_hrt) +
1106 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1107 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1108
1109 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1110 free(hrt, M_DEVBUF);
1111 return (rv);
1112 }
1113
1114 if (sc->sc_hrt != NULL)
1115 free(sc->sc_hrt, M_DEVBUF);
1116 sc->sc_hrt = hrt;
1117 return (0);
1118 }
1119
1120 /*
1121 * Request the specified number of bytes from the IOP's logical
1122 * configuration table. If a change indicator is specified, this
1123 * is a verbatim notification request, so the caller is prepared
1124 * to wait indefinitely.
1125 */
1126 static int
1127 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1128 u_int32_t chgind)
1129 {
1130 struct iop_msg *im;
1131 struct i2o_exec_lct_notify *mf;
1132 int rv;
1133 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1134
1135 im = iop_msg_alloc(sc, IM_WAIT);
1136 memset(lct, 0, size);
1137
1138 mf = (struct i2o_exec_lct_notify *)mb;
1139 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1140 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1141 mf->msgictx = IOP_ICTX;
1142 mf->msgtctx = im->im_tctx;
1143 mf->classid = I2O_CLASS_ANY;
1144 mf->changeindicator = chgind;
1145
1146 #ifdef I2ODEBUG
1147 printf("iop_lct_get0: reading LCT");
1148 if (chgind != 0)
1149 printf(" (async)");
1150 printf("\n");
1151 #endif
1152
1153 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1154 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1155 iop_msg_unmap(sc, im);
1156 iop_msg_free(sc, im);
1157 return (rv);
1158 }
1159
1160 /*
1161 * Read the IOP's logical configuration table.
1162 */
1163 int
1164 iop_lct_get(struct iop_softc *sc)
1165 {
1166 int esize, size, rv;
1167 struct i2o_lct *lct;
1168
1169 esize = le32toh(sc->sc_status.expectedlctsize);
1170 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1171 if (lct == NULL)
1172 return (ENOMEM);
1173
1174 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1175 free(lct, M_DEVBUF);
1176 return (rv);
1177 }
1178
1179 size = le16toh(lct->tablesize) << 2;
1180 if (esize != size) {
1181 free(lct, M_DEVBUF);
1182 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1183 if (lct == NULL)
1184 return (ENOMEM);
1185
1186 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1187 free(lct, M_DEVBUF);
1188 return (rv);
1189 }
1190 }
1191
1192 /* Swap in the new LCT. */
1193 if (sc->sc_lct != NULL)
1194 free(sc->sc_lct, M_DEVBUF);
1195 sc->sc_lct = lct;
1196 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1197 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1198 sizeof(struct i2o_lct_entry);
1199 return (0);
1200 }
1201
1202 /*
1203 * Request the specified parameter group from the target. If an initiator
1204 * is specified (a) don't wait for the operation to complete, but instead
1205 * let the initiator's interrupt handler deal with the reply and (b) place a
1206 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1207 */
1208 int
1209 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1210 int size, struct iop_initiator *ii)
1211 {
1212 struct iop_msg *im;
1213 struct i2o_util_params_op *mf;
1214 struct i2o_reply *rf;
1215 int rv;
1216 struct iop_pgop *pgop;
1217 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1218
1219 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1220 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1221 iop_msg_free(sc, im);
1222 return (ENOMEM);
1223 }
1224 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1225 iop_msg_free(sc, im);
1226 free(pgop, M_DEVBUF);
1227 return (ENOMEM);
1228 }
1229 im->im_dvcontext = pgop;
1230 im->im_rb = rf;
1231
1232 mf = (struct i2o_util_params_op *)mb;
1233 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1234 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1235 mf->msgictx = IOP_ICTX;
1236 mf->msgtctx = im->im_tctx;
1237 mf->flags = 0;
1238
1239 pgop->olh.count = htole16(1);
1240 pgop->olh.reserved = htole16(0);
1241 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1242 pgop->oat.fieldcount = htole16(0xffff);
1243 pgop->oat.group = htole16(group);
1244
1245 if (ii == NULL)
1246 PHOLD(curproc);
1247
1248 memset(buf, 0, size);
1249 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1250 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1251 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1252
1253 if (ii == NULL)
1254 PRELE(curproc);
1255
1256 /* Detect errors; let partial transfers to count as success. */
1257 if (ii == NULL && rv == 0) {
1258 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1259 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1260 rv = 0;
1261 else
1262 rv = (rf->reqstatus != 0 ? EIO : 0);
1263
1264 if (rv != 0)
1265 printf("%s: FIELD_GET failed for tid %d group %d\n",
1266 sc->sc_dv.dv_xname, tid, group);
1267 }
1268
1269 if (ii == NULL || rv != 0) {
1270 iop_msg_unmap(sc, im);
1271 iop_msg_free(sc, im);
1272 free(pgop, M_DEVBUF);
1273 free(rf, M_DEVBUF);
1274 }
1275
1276 return (rv);
1277 }
1278
1279 /*
1280 * Set a single field in a scalar parameter group.
1281 */
1282 int
1283 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1284 int size, int field)
1285 {
1286 struct iop_msg *im;
1287 struct i2o_util_params_op *mf;
1288 struct iop_pgop *pgop;
1289 int rv, totsize;
1290 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1291
1292 totsize = sizeof(*pgop) + size;
1293
1294 im = iop_msg_alloc(sc, IM_WAIT);
1295 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1296 iop_msg_free(sc, im);
1297 return (ENOMEM);
1298 }
1299
1300 mf = (struct i2o_util_params_op *)mb;
1301 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1302 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1303 mf->msgictx = IOP_ICTX;
1304 mf->msgtctx = im->im_tctx;
1305 mf->flags = 0;
1306
1307 pgop->olh.count = htole16(1);
1308 pgop->olh.reserved = htole16(0);
1309 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1310 pgop->oat.fieldcount = htole16(1);
1311 pgop->oat.group = htole16(group);
1312 pgop->oat.fields[0] = htole16(field);
1313 memcpy(pgop + 1, buf, size);
1314
1315 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1316 rv = iop_msg_post(sc, im, mb, 30000);
1317 if (rv != 0)
1318 printf("%s: FIELD_SET failed for tid %d group %d\n",
1319 sc->sc_dv.dv_xname, tid, group);
1320
1321 iop_msg_unmap(sc, im);
1322 iop_msg_free(sc, im);
1323 free(pgop, M_DEVBUF);
1324 return (rv);
1325 }
1326
1327 /*
1328 * Delete all rows in a tablular parameter group.
1329 */
1330 int
1331 iop_table_clear(struct iop_softc *sc, int tid, int group)
1332 {
1333 struct iop_msg *im;
1334 struct i2o_util_params_op *mf;
1335 struct iop_pgop pgop;
1336 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1337 int rv;
1338
1339 im = iop_msg_alloc(sc, IM_WAIT);
1340
1341 mf = (struct i2o_util_params_op *)mb;
1342 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1343 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1344 mf->msgictx = IOP_ICTX;
1345 mf->msgtctx = im->im_tctx;
1346 mf->flags = 0;
1347
1348 pgop.olh.count = htole16(1);
1349 pgop.olh.reserved = htole16(0);
1350 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1351 pgop.oat.fieldcount = htole16(0);
1352 pgop.oat.group = htole16(group);
1353 pgop.oat.fields[0] = htole16(0);
1354
1355 PHOLD(curproc);
1356 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1357 rv = iop_msg_post(sc, im, mb, 30000);
1358 if (rv != 0)
1359 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1360 sc->sc_dv.dv_xname, tid, group);
1361
1362 iop_msg_unmap(sc, im);
1363 PRELE(curproc);
1364 iop_msg_free(sc, im);
1365 return (rv);
1366 }
1367
1368 /*
1369 * Add a single row to a tabular parameter group. The row can have only one
1370 * field.
1371 */
1372 int
1373 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1374 int size, int row)
1375 {
1376 struct iop_msg *im;
1377 struct i2o_util_params_op *mf;
1378 struct iop_pgop *pgop;
1379 int rv, totsize;
1380 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1381
1382 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1383
1384 im = iop_msg_alloc(sc, IM_WAIT);
1385 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1386 iop_msg_free(sc, im);
1387 return (ENOMEM);
1388 }
1389
1390 mf = (struct i2o_util_params_op *)mb;
1391 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1392 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1393 mf->msgictx = IOP_ICTX;
1394 mf->msgtctx = im->im_tctx;
1395 mf->flags = 0;
1396
1397 pgop->olh.count = htole16(1);
1398 pgop->olh.reserved = htole16(0);
1399 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1400 pgop->oat.fieldcount = htole16(1);
1401 pgop->oat.group = htole16(group);
1402 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1403 pgop->oat.fields[1] = htole16(1); /* RowCount */
1404 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1405 memcpy(&pgop->oat.fields[3], buf, size);
1406
1407 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1408 rv = iop_msg_post(sc, im, mb, 30000);
1409 if (rv != 0)
1410 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1411 sc->sc_dv.dv_xname, tid, group, row);
1412
1413 iop_msg_unmap(sc, im);
1414 iop_msg_free(sc, im);
1415 free(pgop, M_DEVBUF);
1416 return (rv);
1417 }
1418
1419 /*
1420 * Execute a simple command (no parameters).
1421 */
1422 int
1423 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1424 int async, int timo)
1425 {
1426 struct iop_msg *im;
1427 struct i2o_msg mf;
1428 int rv, fl;
1429
1430 fl = (async != 0 ? IM_WAIT : IM_POLL);
1431 im = iop_msg_alloc(sc, fl);
1432
1433 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1434 mf.msgfunc = I2O_MSGFUNC(tid, function);
1435 mf.msgictx = ictx;
1436 mf.msgtctx = im->im_tctx;
1437
1438 rv = iop_msg_post(sc, im, &mf, timo);
1439 iop_msg_free(sc, im);
1440 return (rv);
1441 }
1442
1443 /*
1444 * Post the system table to the IOP.
1445 */
1446 static int
1447 iop_systab_set(struct iop_softc *sc)
1448 {
1449 struct i2o_exec_sys_tab_set *mf;
1450 struct iop_msg *im;
1451 bus_space_handle_t bsh;
1452 bus_addr_t boo;
1453 u_int32_t mema[2], ioa[2];
1454 int rv;
1455 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1456
1457 im = iop_msg_alloc(sc, IM_WAIT);
1458
1459 mf = (struct i2o_exec_sys_tab_set *)mb;
1460 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1461 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1462 mf->msgictx = IOP_ICTX;
1463 mf->msgtctx = im->im_tctx;
1464 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1465 mf->segnumber = 0;
1466
1467 mema[1] = sc->sc_status.desiredprivmemsize;
1468 ioa[1] = sc->sc_status.desiredpriviosize;
1469
1470 if (mema[1] != 0) {
1471 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1472 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1473 mema[0] = htole32(boo);
1474 if (rv != 0) {
1475 printf("%s: can't alloc priv mem space, err = %d\n",
1476 sc->sc_dv.dv_xname, rv);
1477 mema[0] = 0;
1478 mema[1] = 0;
1479 }
1480 }
1481
1482 if (ioa[1] != 0) {
1483 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1484 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1485 ioa[0] = htole32(boo);
1486 if (rv != 0) {
1487 printf("%s: can't alloc priv i/o space, err = %d\n",
1488 sc->sc_dv.dv_xname, rv);
1489 ioa[0] = 0;
1490 ioa[1] = 0;
1491 }
1492 }
1493
1494 PHOLD(curproc);
1495 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1496 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1497 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1498 rv = iop_msg_post(sc, im, mb, 5000);
1499 iop_msg_unmap(sc, im);
1500 iop_msg_free(sc, im);
1501 PRELE(curproc);
1502 return (rv);
1503 }
1504
1505 /*
1506 * Reset the IOP. Must be called with interrupts disabled.
1507 */
1508 static int
1509 iop_reset(struct iop_softc *sc)
1510 {
1511 u_int32_t mfa, *sw;
1512 struct i2o_exec_iop_reset mf;
1513 int rv;
1514 paddr_t pa;
1515
1516 sw = (u_int32_t *)sc->sc_scr;
1517 pa = sc->sc_scr_seg->ds_addr;
1518
1519 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1520 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1521 mf.reserved[0] = 0;
1522 mf.reserved[1] = 0;
1523 mf.reserved[2] = 0;
1524 mf.reserved[3] = 0;
1525 mf.statuslow = (u_int32_t)pa;
1526 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1527
1528 *sw = htole32(0);
1529 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1530 BUS_DMASYNC_PREREAD);
1531
1532 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1533 return (rv);
1534
1535 POLL(2500,
1536 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1537 BUS_DMASYNC_POSTREAD), *sw != 0));
1538 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1539 printf("%s: reset rejected, status 0x%x\n",
1540 sc->sc_dv.dv_xname, le32toh(*sw));
1541 return (EIO);
1542 }
1543
1544 /*
1545 * IOP is now in the INIT state. Wait no more than 10 seconds for
1546 * the inbound queue to become responsive.
1547 */
1548 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1549 if (mfa == IOP_MFA_EMPTY) {
1550 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1551 return (EIO);
1552 }
1553
1554 iop_release_mfa(sc, mfa);
1555 return (0);
1556 }
1557
1558 /*
1559 * Register a new initiator. Must be called with the configuration lock
1560 * held.
1561 */
1562 void
1563 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1564 {
1565 static int ictxgen;
1566 int s;
1567
1568 /* 0 is reserved (by us) for system messages. */
1569 ii->ii_ictx = ++ictxgen;
1570
1571 /*
1572 * `Utility initiators' don't make it onto the per-IOP initiator list
1573 * (which is used only for configuration), but do get one slot on
1574 * the inbound queue.
1575 */
1576 if ((ii->ii_flags & II_UTILITY) == 0) {
1577 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1578 sc->sc_nii++;
1579 } else
1580 sc->sc_nuii++;
1581
1582 s = splbio();
1583 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1584 splx(s);
1585 }
1586
1587 /*
1588 * Unregister an initiator. Must be called with the configuration lock
1589 * held.
1590 */
1591 void
1592 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1593 {
1594 int s;
1595
1596 if ((ii->ii_flags & II_UTILITY) == 0) {
1597 LIST_REMOVE(ii, ii_list);
1598 sc->sc_nii--;
1599 } else
1600 sc->sc_nuii--;
1601
1602 s = splbio();
1603 LIST_REMOVE(ii, ii_hash);
1604 splx(s);
1605 }
1606
1607 /*
1608 * Handle a reply frame from the IOP.
1609 */
1610 static int
1611 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1612 {
1613 struct iop_msg *im;
1614 struct i2o_reply *rb;
1615 struct i2o_fault_notify *fn;
1616 struct iop_initiator *ii;
1617 u_int off, ictx, tctx, status, size;
1618
1619 off = (int)(rmfa - sc->sc_rep_phys);
1620 rb = (struct i2o_reply *)(sc->sc_rep + off);
1621
1622 /* Perform reply queue DMA synchronisation. */
1623 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1624 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1625 if (--sc->sc_curib != 0)
1626 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1627 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1628
1629 #ifdef I2ODEBUG
1630 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1631 panic("iop_handle_reply: 64-bit reply");
1632 #endif
1633 /*
1634 * Find the initiator.
1635 */
1636 ictx = le32toh(rb->msgictx);
1637 if (ictx == IOP_ICTX)
1638 ii = NULL;
1639 else {
1640 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1641 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1642 if (ii->ii_ictx == ictx)
1643 break;
1644 if (ii == NULL) {
1645 #ifdef I2ODEBUG
1646 iop_reply_print(sc, rb);
1647 #endif
1648 printf("%s: WARNING: bad ictx returned (%x)\n",
1649 sc->sc_dv.dv_xname, ictx);
1650 return (-1);
1651 }
1652 }
1653
1654 /*
1655 * If we received a transport failure notice, we've got to dig the
1656 * transaction context (if any) out of the original message frame,
1657 * and then release the original MFA back to the inbound FIFO.
1658 */
1659 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1660 status = I2O_STATUS_SUCCESS;
1661
1662 fn = (struct i2o_fault_notify *)rb;
1663 tctx = iop_inl(sc, fn->lowmfa + 12);
1664 iop_release_mfa(sc, fn->lowmfa);
1665 iop_tfn_print(sc, fn);
1666 } else {
1667 status = rb->reqstatus;
1668 tctx = le32toh(rb->msgtctx);
1669 }
1670
1671 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1672 /*
1673 * This initiator tracks state using message wrappers.
1674 *
1675 * Find the originating message wrapper, and if requested
1676 * notify the initiator.
1677 */
1678 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1679 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1680 (im->im_flags & IM_ALLOCED) == 0 ||
1681 tctx != im->im_tctx) {
1682 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1683 sc->sc_dv.dv_xname, tctx, im);
1684 if (im != NULL)
1685 printf("%s: flags=0x%08x tctx=0x%08x\n",
1686 sc->sc_dv.dv_xname, im->im_flags,
1687 im->im_tctx);
1688 #ifdef I2ODEBUG
1689 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1690 iop_reply_print(sc, rb);
1691 #endif
1692 return (-1);
1693 }
1694
1695 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1696 im->im_flags |= IM_FAIL;
1697
1698 #ifdef I2ODEBUG
1699 if ((im->im_flags & IM_REPLIED) != 0)
1700 panic("%s: dup reply", sc->sc_dv.dv_xname);
1701 #endif
1702 im->im_flags |= IM_REPLIED;
1703
1704 #ifdef I2ODEBUG
1705 if (status != I2O_STATUS_SUCCESS)
1706 iop_reply_print(sc, rb);
1707 #endif
1708 im->im_reqstatus = status;
1709
1710 /* Copy the reply frame, if requested. */
1711 if (im->im_rb != NULL) {
1712 size = (le32toh(rb->msgflags) >> 14) & ~3;
1713 #ifdef I2ODEBUG
1714 if (size > sc->sc_framesize)
1715 panic("iop_handle_reply: reply too large");
1716 #endif
1717 memcpy(im->im_rb, rb, size);
1718 }
1719
1720 /* Notify the initiator. */
1721 if ((im->im_flags & IM_WAIT) != 0)
1722 wakeup(im);
1723 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1724 (*ii->ii_intr)(ii->ii_dv, im, rb);
1725 } else {
1726 /*
1727 * This initiator discards message wrappers.
1728 *
1729 * Simply pass the reply frame to the initiator.
1730 */
1731 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1732 }
1733
1734 return (status);
1735 }
1736
1737 /*
1738 * Handle an interrupt from the IOP.
1739 */
1740 int
1741 iop_intr(void *arg)
1742 {
1743 struct iop_softc *sc;
1744 u_int32_t rmfa;
1745
1746 sc = arg;
1747
1748 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1749 return (0);
1750
1751 for (;;) {
1752 /* Double read to account for IOP bug. */
1753 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1754 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1755 if (rmfa == IOP_MFA_EMPTY)
1756 break;
1757 }
1758 iop_handle_reply(sc, rmfa);
1759 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1760 }
1761
1762 return (1);
1763 }
1764
1765 /*
1766 * Handle an event signalled by the executive.
1767 */
1768 static void
1769 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1770 {
1771 struct i2o_util_event_register_reply *rb;
1772 struct iop_softc *sc;
1773 u_int event;
1774
1775 sc = (struct iop_softc *)dv;
1776 rb = reply;
1777
1778 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1779 return;
1780
1781 event = le32toh(rb->event);
1782 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1783 }
1784
1785 /*
1786 * Allocate a message wrapper.
1787 */
1788 struct iop_msg *
1789 iop_msg_alloc(struct iop_softc *sc, int flags)
1790 {
1791 struct iop_msg *im;
1792 static u_int tctxgen;
1793 int s, i;
1794
1795 #ifdef I2ODEBUG
1796 if ((flags & IM_SYSMASK) != 0)
1797 panic("iop_msg_alloc: system flags specified");
1798 #endif
1799
1800 s = splbio();
1801 im = SLIST_FIRST(&sc->sc_im_freelist);
1802 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1803 if (im == NULL)
1804 panic("iop_msg_alloc: no free wrappers");
1805 #endif
1806 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1807 splx(s);
1808
1809 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1810 tctxgen += (1 << IOP_TCTX_SHIFT);
1811 im->im_flags = flags | IM_ALLOCED;
1812 im->im_rb = NULL;
1813 i = 0;
1814 do {
1815 im->im_xfer[i++].ix_size = 0;
1816 } while (i < IOP_MAX_MSG_XFERS);
1817
1818 return (im);
1819 }
1820
1821 /*
1822 * Free a message wrapper.
1823 */
1824 void
1825 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1826 {
1827 int s;
1828
1829 #ifdef I2ODEBUG
1830 if ((im->im_flags & IM_ALLOCED) == 0)
1831 panic("iop_msg_free: wrapper not allocated");
1832 #endif
1833
1834 im->im_flags = 0;
1835 s = splbio();
1836 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1837 splx(s);
1838 }
1839
1840 /*
1841 * Map a data transfer. Write a scatter-gather list into the message frame.
1842 */
1843 int
1844 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1845 void *xferaddr, int xfersize, int out, struct proc *up)
1846 {
1847 bus_dmamap_t dm;
1848 bus_dma_segment_t *ds;
1849 struct iop_xfer *ix;
1850 u_int rv, i, nsegs, flg, off, xn;
1851 u_int32_t *p;
1852
1853 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1854 if (ix->ix_size == 0)
1855 break;
1856
1857 #ifdef I2ODEBUG
1858 if (xfersize == 0)
1859 panic("iop_msg_map: null transfer");
1860 if (xfersize > IOP_MAX_XFER)
1861 panic("iop_msg_map: transfer too large");
1862 if (xn == IOP_MAX_MSG_XFERS)
1863 panic("iop_msg_map: too many xfers");
1864 #endif
1865
1866 /*
1867 * Only the first DMA map is static.
1868 */
1869 if (xn != 0) {
1870 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1871 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1872 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1873 if (rv != 0)
1874 return (rv);
1875 }
1876
1877 dm = ix->ix_map;
1878 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1879 (up == NULL ? BUS_DMA_NOWAIT : 0));
1880 if (rv != 0)
1881 goto bad;
1882
1883 /*
1884 * How many SIMPLE SG elements can we fit in this message?
1885 */
1886 off = mb[0] >> 16;
1887 p = mb + off;
1888 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1889
1890 if (dm->dm_nsegs > nsegs) {
1891 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1892 rv = EFBIG;
1893 DPRINTF(("iop_msg_map: too many segs\n"));
1894 goto bad;
1895 }
1896
1897 nsegs = dm->dm_nsegs;
1898 xfersize = 0;
1899
1900 /*
1901 * Write out the SG list.
1902 */
1903 if (out)
1904 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1905 else
1906 flg = I2O_SGL_SIMPLE;
1907
1908 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1909 p[0] = (u_int32_t)ds->ds_len | flg;
1910 p[1] = (u_int32_t)ds->ds_addr;
1911 xfersize += ds->ds_len;
1912 }
1913
1914 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1915 p[1] = (u_int32_t)ds->ds_addr;
1916 xfersize += ds->ds_len;
1917
1918 /* Fix up the transfer record, and sync the map. */
1919 ix->ix_flags = (out ? IX_OUT : IX_IN);
1920 ix->ix_size = xfersize;
1921 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1922 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1923
1924 /*
1925 * If this is the first xfer we've mapped for this message, adjust
1926 * the SGL offset field in the message header.
1927 */
1928 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1929 mb[0] += (mb[0] >> 12) & 0xf0;
1930 im->im_flags |= IM_SGLOFFADJ;
1931 }
1932 mb[0] += (nsegs << 17);
1933 return (0);
1934
1935 bad:
1936 if (xn != 0)
1937 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1938 return (rv);
1939 }
1940
1941 /*
1942 * Map a block I/O data transfer (different in that there's only one per
1943 * message maximum, and PAGE addressing may be used). Write a scatter
1944 * gather list into the message frame.
1945 */
1946 int
1947 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1948 void *xferaddr, int xfersize, int out)
1949 {
1950 bus_dma_segment_t *ds;
1951 bus_dmamap_t dm;
1952 struct iop_xfer *ix;
1953 u_int rv, i, nsegs, off, slen, tlen, flg;
1954 paddr_t saddr, eaddr;
1955 u_int32_t *p;
1956
1957 #ifdef I2ODEBUG
1958 if (xfersize == 0)
1959 panic("iop_msg_map_bio: null transfer");
1960 if (xfersize > IOP_MAX_XFER)
1961 panic("iop_msg_map_bio: transfer too large");
1962 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1963 panic("iop_msg_map_bio: SGLOFFADJ");
1964 #endif
1965
1966 ix = im->im_xfer;
1967 dm = ix->ix_map;
1968 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1969 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1970 if (rv != 0)
1971 return (rv);
1972
1973 off = mb[0] >> 16;
1974 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1975
1976 /*
1977 * If the transfer is highly fragmented and won't fit using SIMPLE
1978 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1979 * potentially more efficient, both for us and the IOP.
1980 */
1981 if (dm->dm_nsegs > nsegs) {
1982 nsegs = 1;
1983 p = mb + off + 1;
1984
1985 /* XXX This should be done with a bus_space flag. */
1986 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1987 slen = ds->ds_len;
1988 saddr = ds->ds_addr;
1989
1990 while (slen > 0) {
1991 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1992 tlen = min(eaddr - saddr, slen);
1993 slen -= tlen;
1994 *p++ = le32toh(saddr);
1995 saddr = eaddr;
1996 nsegs++;
1997 }
1998 }
1999
2000 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2001 I2O_SGL_END;
2002 if (out)
2003 mb[off] |= I2O_SGL_DATA_OUT;
2004 } else {
2005 p = mb + off;
2006 nsegs = dm->dm_nsegs;
2007
2008 if (out)
2009 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2010 else
2011 flg = I2O_SGL_SIMPLE;
2012
2013 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2014 p[0] = (u_int32_t)ds->ds_len | flg;
2015 p[1] = (u_int32_t)ds->ds_addr;
2016 }
2017
2018 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2019 I2O_SGL_END;
2020 p[1] = (u_int32_t)ds->ds_addr;
2021 nsegs <<= 1;
2022 }
2023
2024 /* Fix up the transfer record, and sync the map. */
2025 ix->ix_flags = (out ? IX_OUT : IX_IN);
2026 ix->ix_size = xfersize;
2027 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2028 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2029
2030 /*
2031 * Adjust the SGL offset and total message size fields. We don't
2032 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2033 */
2034 mb[0] += ((off << 4) + (nsegs << 16));
2035 return (0);
2036 }
2037
2038 /*
2039 * Unmap all data transfers associated with a message wrapper.
2040 */
2041 void
2042 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2043 {
2044 struct iop_xfer *ix;
2045 int i;
2046
2047 #ifdef I2ODEBUG
2048 if (im->im_xfer[0].ix_size == 0)
2049 panic("iop_msg_unmap: no transfers mapped");
2050 #endif
2051
2052 for (ix = im->im_xfer, i = 0;;) {
2053 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2054 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2055 BUS_DMASYNC_POSTREAD);
2056 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2057
2058 /* Only the first DMA map is static. */
2059 if (i != 0)
2060 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2061 if ((++ix)->ix_size == 0)
2062 break;
2063 if (++i >= IOP_MAX_MSG_XFERS)
2064 break;
2065 }
2066 }
2067
2068 /*
2069 * Post a message frame to the IOP's inbound queue.
2070 */
2071 int
2072 iop_post(struct iop_softc *sc, u_int32_t *mb)
2073 {
2074 u_int32_t mfa;
2075 int s;
2076
2077 #ifdef I2ODEBUG
2078 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2079 panic("iop_post: frame too large");
2080 #endif
2081
2082 s = splbio();
2083
2084 /* Allocate a slot with the IOP. */
2085 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2086 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2087 splx(s);
2088 printf("%s: mfa not forthcoming\n",
2089 sc->sc_dv.dv_xname);
2090 return (EAGAIN);
2091 }
2092
2093 /* Perform reply buffer DMA synchronisation. */
2094 if (sc->sc_curib++ == 0)
2095 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2096 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2097
2098 /* Copy out the message frame. */
2099 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2100 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2101 BUS_SPACE_BARRIER_WRITE);
2102
2103 /* Post the MFA back to the IOP. */
2104 iop_outl(sc, IOP_REG_IFIFO, mfa);
2105
2106 splx(s);
2107 return (0);
2108 }
2109
2110 /*
2111 * Post a message to the IOP and deal with completion.
2112 */
2113 int
2114 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2115 {
2116 u_int32_t *mb;
2117 int rv, s;
2118
2119 mb = xmb;
2120
2121 /* Terminate the scatter/gather list chain. */
2122 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2123 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2124
2125 if ((rv = iop_post(sc, mb)) != 0)
2126 return (rv);
2127
2128 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2129 if ((im->im_flags & IM_POLL) != 0)
2130 iop_msg_poll(sc, im, timo);
2131 else
2132 iop_msg_wait(sc, im, timo);
2133
2134 s = splbio();
2135 if ((im->im_flags & IM_REPLIED) != 0) {
2136 if ((im->im_flags & IM_NOSTATUS) != 0)
2137 rv = 0;
2138 else if ((im->im_flags & IM_FAIL) != 0)
2139 rv = ENXIO;
2140 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2141 rv = EIO;
2142 else
2143 rv = 0;
2144 } else
2145 rv = EBUSY;
2146 splx(s);
2147 } else
2148 rv = 0;
2149
2150 return (rv);
2151 }
2152
2153 /*
2154 * Spin until the specified message is replied to.
2155 */
2156 static void
2157 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2158 {
2159 u_int32_t rmfa;
2160 int s, status;
2161
2162 s = splbio();
2163
2164 /* Wait for completion. */
2165 for (timo *= 10; timo != 0; timo--) {
2166 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2167 /* Double read to account for IOP bug. */
2168 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2169 if (rmfa == IOP_MFA_EMPTY)
2170 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2171 if (rmfa != IOP_MFA_EMPTY) {
2172 status = iop_handle_reply(sc, rmfa);
2173
2174 /*
2175 * Return the reply frame to the IOP's
2176 * outbound FIFO.
2177 */
2178 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2179 }
2180 }
2181 if ((im->im_flags & IM_REPLIED) != 0)
2182 break;
2183 DELAY(100);
2184 }
2185
2186 if (timo == 0) {
2187 #ifdef I2ODEBUG
2188 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2189 if (iop_status_get(sc, 1) != 0)
2190 printf("iop_msg_poll: unable to retrieve status\n");
2191 else
2192 printf("iop_msg_poll: IOP state = %d\n",
2193 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2194 #endif
2195 }
2196
2197 splx(s);
2198 }
2199
2200 /*
2201 * Sleep until the specified message is replied to.
2202 */
2203 static void
2204 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2205 {
2206 int s, rv;
2207
2208 s = splbio();
2209 if ((im->im_flags & IM_REPLIED) != 0) {
2210 splx(s);
2211 return;
2212 }
2213 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
2214 splx(s);
2215
2216 #ifdef I2ODEBUG
2217 if (rv != 0) {
2218 printf("iop_msg_wait: tsleep() == %d\n", rv);
2219 if (iop_status_get(sc, 0) != 0)
2220 printf("iop_msg_wait: unable to retrieve status\n");
2221 else
2222 printf("iop_msg_wait: IOP state = %d\n",
2223 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2224 }
2225 #endif
2226 }
2227
2228 /*
2229 * Release an unused message frame back to the IOP's inbound fifo.
2230 */
2231 static void
2232 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2233 {
2234
2235 /* Use the frame to issue a no-op. */
2236 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2237 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2238 iop_outl(sc, mfa + 8, 0);
2239 iop_outl(sc, mfa + 12, 0);
2240
2241 iop_outl(sc, IOP_REG_IFIFO, mfa);
2242 }
2243
2244 #ifdef I2ODEBUG
2245 /*
2246 * Dump a reply frame header.
2247 */
2248 static void
2249 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2250 {
2251 u_int function, detail;
2252 #ifdef I2OVERBOSE
2253 const char *statusstr;
2254 #endif
2255
2256 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2257 detail = le16toh(rb->detail);
2258
2259 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2260
2261 #ifdef I2OVERBOSE
2262 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2263 statusstr = iop_status[rb->reqstatus];
2264 else
2265 statusstr = "undefined error code";
2266
2267 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2268 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2269 #else
2270 printf("%s: function=0x%02x status=0x%02x\n",
2271 sc->sc_dv.dv_xname, function, rb->reqstatus);
2272 #endif
2273 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2274 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2275 le32toh(rb->msgtctx));
2276 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2277 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2278 (le32toh(rb->msgflags) >> 8) & 0xff);
2279 }
2280 #endif
2281
2282 /*
2283 * Dump a transport failure reply.
2284 */
2285 static void
2286 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2287 {
2288
2289 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2290
2291 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2292 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2293 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2294 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2295 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2296 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2297 }
2298
2299 /*
2300 * Translate an I2O ASCII field into a C string.
2301 */
2302 void
2303 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2304 {
2305 int hc, lc, i, nit;
2306
2307 dlen--;
2308 lc = 0;
2309 hc = 0;
2310 i = 0;
2311
2312 /*
2313 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2314 * spec has nothing to say about it. Since AMI fields are usually
2315 * filled with junk after the terminator, ...
2316 */
2317 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2318
2319 while (slen-- != 0 && dlen-- != 0) {
2320 if (nit && *src == '\0')
2321 break;
2322 else if (*src <= 0x20 || *src >= 0x7f) {
2323 if (hc)
2324 dst[i++] = ' ';
2325 } else {
2326 hc = 1;
2327 dst[i++] = *src;
2328 lc = i;
2329 }
2330 src++;
2331 }
2332
2333 dst[lc] = '\0';
2334 }
2335
2336 /*
2337 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2338 */
2339 int
2340 iop_print_ident(struct iop_softc *sc, int tid)
2341 {
2342 struct {
2343 struct i2o_param_op_results pr;
2344 struct i2o_param_read_results prr;
2345 struct i2o_param_device_identity di;
2346 } __attribute__ ((__packed__)) p;
2347 char buf[32];
2348 int rv;
2349
2350 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2351 sizeof(p), NULL);
2352 if (rv != 0)
2353 return (rv);
2354
2355 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2356 sizeof(buf));
2357 printf(" <%s, ", buf);
2358 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2359 sizeof(buf));
2360 printf("%s, ", buf);
2361 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2362 printf("%s>", buf);
2363
2364 return (0);
2365 }
2366
2367 /*
2368 * Claim or unclaim the specified TID.
2369 */
2370 int
2371 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2372 int flags)
2373 {
2374 struct iop_msg *im;
2375 struct i2o_util_claim mf;
2376 int rv, func;
2377
2378 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2379 im = iop_msg_alloc(sc, IM_WAIT);
2380
2381 /* We can use the same structure, as they're identical. */
2382 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2383 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2384 mf.msgictx = ii->ii_ictx;
2385 mf.msgtctx = im->im_tctx;
2386 mf.flags = flags;
2387
2388 rv = iop_msg_post(sc, im, &mf, 5000);
2389 iop_msg_free(sc, im);
2390 return (rv);
2391 }
2392
2393 /*
2394 * Perform an abort.
2395 */
2396 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2397 int tctxabort, int flags)
2398 {
2399 struct iop_msg *im;
2400 struct i2o_util_abort mf;
2401 int rv;
2402
2403 im = iop_msg_alloc(sc, IM_WAIT);
2404
2405 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2406 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2407 mf.msgictx = ii->ii_ictx;
2408 mf.msgtctx = im->im_tctx;
2409 mf.flags = (func << 24) | flags;
2410 mf.tctxabort = tctxabort;
2411
2412 rv = iop_msg_post(sc, im, &mf, 5000);
2413 iop_msg_free(sc, im);
2414 return (rv);
2415 }
2416
2417 /*
2418 * Enable or disable reception of events for the specified device.
2419 */
2420 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2421 {
2422 struct i2o_util_event_register mf;
2423
2424 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2425 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2426 mf.msgictx = ii->ii_ictx;
2427 mf.msgtctx = 0;
2428 mf.eventmask = mask;
2429
2430 /* This message is replied to only when events are signalled. */
2431 return (iop_post(sc, (u_int32_t *)&mf));
2432 }
2433
2434 int
2435 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2436 {
2437 struct iop_softc *sc;
2438
2439 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2440 return (ENXIO);
2441 if ((sc->sc_flags & IOP_ONLINE) == 0)
2442 return (ENXIO);
2443 if ((sc->sc_flags & IOP_OPEN) != 0)
2444 return (EBUSY);
2445 sc->sc_flags |= IOP_OPEN;
2446
2447 return (0);
2448 }
2449
2450 int
2451 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2452 {
2453 struct iop_softc *sc;
2454
2455 sc = device_lookup(&iop_cd, minor(dev));
2456 sc->sc_flags &= ~IOP_OPEN;
2457
2458 return (0);
2459 }
2460
2461 int
2462 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2463 {
2464 struct iop_softc *sc;
2465 struct iovec *iov;
2466 int rv, i;
2467
2468 if (securelevel >= 2)
2469 return (EPERM);
2470
2471 sc = device_lookup(&iop_cd, minor(dev));
2472
2473 switch (cmd) {
2474 case IOPIOCPT:
2475 return (iop_passthrough(sc, (struct ioppt *)data, p));
2476
2477 case IOPIOCGSTATUS:
2478 iov = (struct iovec *)data;
2479 i = sizeof(struct i2o_status);
2480 if (i > iov->iov_len)
2481 i = iov->iov_len;
2482 else
2483 iov->iov_len = i;
2484 if ((rv = iop_status_get(sc, 0)) == 0)
2485 rv = copyout(&sc->sc_status, iov->iov_base, i);
2486 return (rv);
2487
2488 case IOPIOCGLCT:
2489 case IOPIOCGTIDMAP:
2490 case IOPIOCRECONFIG:
2491 break;
2492
2493 default:
2494 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2495 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2496 #endif
2497 return (ENOTTY);
2498 }
2499
2500 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2501 return (rv);
2502
2503 switch (cmd) {
2504 case IOPIOCGLCT:
2505 iov = (struct iovec *)data;
2506 i = le16toh(sc->sc_lct->tablesize) << 2;
2507 if (i > iov->iov_len)
2508 i = iov->iov_len;
2509 else
2510 iov->iov_len = i;
2511 rv = copyout(sc->sc_lct, iov->iov_base, i);
2512 break;
2513
2514 case IOPIOCRECONFIG:
2515 rv = iop_reconfigure(sc, 0);
2516 break;
2517
2518 case IOPIOCGTIDMAP:
2519 iov = (struct iovec *)data;
2520 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2521 if (i > iov->iov_len)
2522 i = iov->iov_len;
2523 else
2524 iov->iov_len = i;
2525 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2526 break;
2527 }
2528
2529 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2530 return (rv);
2531 }
2532
2533 static int
2534 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2535 {
2536 struct iop_msg *im;
2537 struct i2o_msg *mf;
2538 struct ioppt_buf *ptb;
2539 int rv, i, mapped;
2540
2541 mf = NULL;
2542 im = NULL;
2543 mapped = 1;
2544
2545 if (pt->pt_msglen > sc->sc_framesize ||
2546 pt->pt_msglen < sizeof(struct i2o_msg) ||
2547 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2548 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2549 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2550 return (EINVAL);
2551
2552 for (i = 0; i < pt->pt_nbufs; i++)
2553 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2554 rv = ENOMEM;
2555 goto bad;
2556 }
2557
2558 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2559 if (mf == NULL)
2560 return (ENOMEM);
2561
2562 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2563 goto bad;
2564
2565 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2566 im->im_rb = (struct i2o_reply *)mf;
2567 mf->msgictx = IOP_ICTX;
2568 mf->msgtctx = im->im_tctx;
2569
2570 for (i = 0; i < pt->pt_nbufs; i++) {
2571 ptb = &pt->pt_bufs[i];
2572 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2573 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2574 if (rv != 0)
2575 goto bad;
2576 mapped = 1;
2577 }
2578
2579 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2580 goto bad;
2581
2582 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2583 if (i > sc->sc_framesize)
2584 i = sc->sc_framesize;
2585 if (i > pt->pt_replylen)
2586 i = pt->pt_replylen;
2587 rv = copyout(im->im_rb, pt->pt_reply, i);
2588
2589 bad:
2590 if (mapped != 0)
2591 iop_msg_unmap(sc, im);
2592 if (im != NULL)
2593 iop_msg_free(sc, im);
2594 if (mf != NULL)
2595 free(mf, M_DEVBUF);
2596 return (rv);
2597 }
2598