iop.c revision 1.24.4.1 1 /* $NetBSD: iop.c,v 1.24.4.1 2002/10/23 12:22:35 lukem Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.24.4.1 2002/10/23 12:22:35 lukem Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 #define IC_CONFIGURE 0x01
111 #define IC_PRIORITY 0x02
112
113 struct iop_class {
114 u_short ic_class;
115 u_short ic_flags;
116 #ifdef I2OVERBOSE
117 const char *ic_caption;
118 #endif
119 } static const iop_class[] = {
120 {
121 I2O_CLASS_EXECUTIVE,
122 0,
123 COMMENT("executive")
124 },
125 {
126 I2O_CLASS_DDM,
127 0,
128 COMMENT("device driver module")
129 },
130 {
131 I2O_CLASS_RANDOM_BLOCK_STORAGE,
132 IC_CONFIGURE | IC_PRIORITY,
133 IFVERBOSE("random block storage")
134 },
135 {
136 I2O_CLASS_SEQUENTIAL_STORAGE,
137 IC_CONFIGURE | IC_PRIORITY,
138 IFVERBOSE("sequential storage")
139 },
140 {
141 I2O_CLASS_LAN,
142 IC_CONFIGURE | IC_PRIORITY,
143 IFVERBOSE("LAN port")
144 },
145 {
146 I2O_CLASS_WAN,
147 IC_CONFIGURE | IC_PRIORITY,
148 IFVERBOSE("WAN port")
149 },
150 {
151 I2O_CLASS_FIBRE_CHANNEL_PORT,
152 IC_CONFIGURE,
153 IFVERBOSE("fibrechannel port")
154 },
155 {
156 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
157 0,
158 COMMENT("fibrechannel peripheral")
159 },
160 {
161 I2O_CLASS_SCSI_PERIPHERAL,
162 0,
163 COMMENT("SCSI peripheral")
164 },
165 {
166 I2O_CLASS_ATE_PORT,
167 IC_CONFIGURE,
168 IFVERBOSE("ATE port")
169 },
170 {
171 I2O_CLASS_ATE_PERIPHERAL,
172 0,
173 COMMENT("ATE peripheral")
174 },
175 {
176 I2O_CLASS_FLOPPY_CONTROLLER,
177 IC_CONFIGURE,
178 IFVERBOSE("floppy controller")
179 },
180 {
181 I2O_CLASS_FLOPPY_DEVICE,
182 0,
183 COMMENT("floppy device")
184 },
185 {
186 I2O_CLASS_BUS_ADAPTER_PORT,
187 IC_CONFIGURE,
188 IFVERBOSE("bus adapter port" )
189 },
190 };
191
192 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
193 static const char * const iop_status[] = {
194 "success",
195 "abort (dirty)",
196 "abort (no data transfer)",
197 "abort (partial transfer)",
198 "error (dirty)",
199 "error (no data transfer)",
200 "error (partial transfer)",
201 "undefined error code",
202 "process abort (dirty)",
203 "process abort (no data transfer)",
204 "process abort (partial transfer)",
205 "transaction error",
206 };
207 #endif
208
209 static inline u_int32_t iop_inl(struct iop_softc *, int);
210 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
211
212 static void iop_config_interrupts(struct device *);
213 static void iop_configure_devices(struct iop_softc *, int, int);
214 static void iop_devinfo(int, char *);
215 static int iop_print(void *, const char *);
216 static void iop_shutdown(void *);
217 static int iop_submatch(struct device *, struct cfdata *, void *);
218 static int iop_vendor_print(void *, const char *);
219
220 static void iop_adjqparam(struct iop_softc *, int);
221 static void iop_create_reconf_thread(void *);
222 static int iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int iop_hrt_get(struct iop_softc *);
224 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void iop_intr_event(struct device *, struct iop_msg *, void *);
226 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 u_int32_t);
228 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int iop_ofifo_init(struct iop_softc *);
231 static int iop_passthrough(struct iop_softc *, struct ioppt *,
232 struct proc *);
233 static void iop_reconf_thread(void *);
234 static void iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int iop_reset(struct iop_softc *);
236 static int iop_systab_set(struct iop_softc *);
237 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
238
239 #ifdef I2ODEBUG
240 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
241 #endif
242
243 cdev_decl(iop);
244
245 static inline u_int32_t
246 iop_inl(struct iop_softc *sc, int off)
247 {
248
249 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
251 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
252 }
253
254 static inline void
255 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
256 {
257
258 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
259 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
260 BUS_SPACE_BARRIER_WRITE);
261 }
262
263 /*
264 * Initialise the IOP and our interface.
265 */
266 void
267 iop_init(struct iop_softc *sc, const char *intrstr)
268 {
269 struct iop_msg *im;
270 int rv, i, j, state, nsegs;
271 u_int32_t mask;
272 char ident[64];
273
274 state = 0;
275
276 printf("I2O adapter");
277
278 if (iop_ictxhashtbl == NULL)
279 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
280 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
281
282 /* Disable interrupts at the IOP. */
283 mask = iop_inl(sc, IOP_REG_INTR_MASK);
284 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
285
286 /* Allocate a scratch DMA map for small miscellaneous shared data. */
287 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
288 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
289 printf("%s: cannot create scratch dmamap\n",
290 sc->sc_dv.dv_xname);
291 return;
292 }
293
294 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
295 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
296 printf("%s: cannot alloc scratch dmamem\n",
297 sc->sc_dv.dv_xname);
298 goto bail_out;
299 }
300 state++;
301
302 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
303 &sc->sc_scr, 0)) {
304 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
305 goto bail_out;
306 }
307 state++;
308
309 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
310 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
311 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
312 goto bail_out;
313 }
314 state++;
315
316 #ifdef I2ODEBUG
317 /* So that our debug checks don't choke. */
318 sc->sc_framesize = 128;
319 #endif
320
321 /* Reset the adapter and request status. */
322 if ((rv = iop_reset(sc)) != 0) {
323 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
324 goto bail_out;
325 }
326
327 if ((rv = iop_status_get(sc, 1)) != 0) {
328 printf("%s: not responding (get status)\n",
329 sc->sc_dv.dv_xname);
330 goto bail_out;
331 }
332
333 sc->sc_flags |= IOP_HAVESTATUS;
334 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
335 ident, sizeof(ident));
336 printf(" <%s>\n", ident);
337
338 #ifdef I2ODEBUG
339 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
340 le16toh(sc->sc_status.orgid),
341 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
342 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
343 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
344 le32toh(sc->sc_status.desiredprivmemsize),
345 le32toh(sc->sc_status.currentprivmemsize),
346 le32toh(sc->sc_status.currentprivmembase));
347 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
348 le32toh(sc->sc_status.desiredpriviosize),
349 le32toh(sc->sc_status.currentpriviosize),
350 le32toh(sc->sc_status.currentpriviobase));
351 #endif
352
353 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
354 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
355 sc->sc_maxob = IOP_MAX_OUTBOUND;
356 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
357 if (sc->sc_maxib > IOP_MAX_INBOUND)
358 sc->sc_maxib = IOP_MAX_INBOUND;
359 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
360 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
361 sc->sc_framesize = IOP_MAX_MSG_SIZE;
362
363 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
364 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
365 printf("%s: frame size too small (%d)\n",
366 sc->sc_dv.dv_xname, sc->sc_framesize);
367 goto bail_out;
368 }
369 #endif
370
371 /* Allocate message wrappers. */
372 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
373 if (im == NULL) {
374 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
375 goto bail_out;
376 }
377 state++;
378 sc->sc_ims = im;
379 SLIST_INIT(&sc->sc_im_freelist);
380
381 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
382 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
383 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
384 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
385 &im->im_xfer[0].ix_map);
386 if (rv != 0) {
387 printf("%s: couldn't create dmamap (%d)",
388 sc->sc_dv.dv_xname, rv);
389 goto bail_out;
390 }
391
392 im->im_tctx = i;
393 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
394 }
395
396 /* Initialise the IOP's outbound FIFO. */
397 if (iop_ofifo_init(sc) != 0) {
398 printf("%s: unable to init oubound FIFO\n",
399 sc->sc_dv.dv_xname);
400 goto bail_out;
401 }
402
403 /*
404 * Defer further configuration until (a) interrupts are working and
405 * (b) we have enough information to build the system table.
406 */
407 config_interrupts((struct device *)sc, iop_config_interrupts);
408
409 /* Configure shutdown hook before we start any device activity. */
410 if (iop_sdh == NULL)
411 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
412
413 /* Ensure interrupts are enabled at the IOP. */
414 mask = iop_inl(sc, IOP_REG_INTR_MASK);
415 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
416
417 if (intrstr != NULL)
418 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
419 intrstr);
420
421 #ifdef I2ODEBUG
422 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
423 sc->sc_dv.dv_xname, sc->sc_maxib,
424 le32toh(sc->sc_status.maxinboundmframes),
425 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
426 #endif
427
428 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
429 return;
430
431 bail_out:
432 if (state > 3) {
433 for (j = 0; j < i; j++)
434 bus_dmamap_destroy(sc->sc_dmat,
435 sc->sc_ims[j].im_xfer[0].ix_map);
436 free(sc->sc_ims, M_DEVBUF);
437 }
438 if (state > 2)
439 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
440 if (state > 1)
441 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
442 if (state > 0)
443 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
444 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
445 }
446
447 /*
448 * Perform autoconfiguration tasks.
449 */
450 static void
451 iop_config_interrupts(struct device *self)
452 {
453 struct iop_attach_args ia;
454 struct iop_softc *sc, *iop;
455 struct i2o_systab_entry *ste;
456 int rv, i, niop;
457
458 sc = (struct iop_softc *)self;
459 LIST_INIT(&sc->sc_iilist);
460
461 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
462
463 if (iop_hrt_get(sc) != 0) {
464 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
465 return;
466 }
467
468 /*
469 * Build the system table.
470 */
471 if (iop_systab == NULL) {
472 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
473 if ((iop = device_lookup(&iop_cd, i)) == NULL)
474 continue;
475 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
476 continue;
477 if (iop_status_get(iop, 1) != 0) {
478 printf("%s: unable to retrieve status\n",
479 sc->sc_dv.dv_xname);
480 iop->sc_flags &= ~IOP_HAVESTATUS;
481 continue;
482 }
483 niop++;
484 }
485 if (niop == 0)
486 return;
487
488 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
489 sizeof(struct i2o_systab);
490 iop_systab_size = i;
491 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
492
493 iop_systab->numentries = niop;
494 iop_systab->version = I2O_VERSION_11;
495
496 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
497 if ((iop = device_lookup(&iop_cd, i)) == NULL)
498 continue;
499 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
500 continue;
501
502 ste->orgid = iop->sc_status.orgid;
503 ste->iopid = iop->sc_dv.dv_unit + 2;
504 ste->segnumber =
505 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
506 ste->iopcaps = iop->sc_status.iopcaps;
507 ste->inboundmsgframesize =
508 iop->sc_status.inboundmframesize;
509 ste->inboundmsgportaddresslow =
510 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
511 ste++;
512 }
513 }
514
515 /*
516 * Post the system table to the IOP and bring it to the OPERATIONAL
517 * state.
518 */
519 if (iop_systab_set(sc) != 0) {
520 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
521 return;
522 }
523 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
524 30000) != 0) {
525 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
526 return;
527 }
528
529 /*
530 * Set up an event handler for this IOP.
531 */
532 sc->sc_eventii.ii_dv = self;
533 sc->sc_eventii.ii_intr = iop_intr_event;
534 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
535 sc->sc_eventii.ii_tid = I2O_TID_IOP;
536 iop_initiator_register(sc, &sc->sc_eventii);
537
538 rv = iop_util_eventreg(sc, &sc->sc_eventii,
539 I2O_EVENT_EXEC_RESOURCE_LIMITS |
540 I2O_EVENT_EXEC_CONNECTION_FAIL |
541 I2O_EVENT_EXEC_ADAPTER_FAULT |
542 I2O_EVENT_EXEC_POWER_FAIL |
543 I2O_EVENT_EXEC_RESET_PENDING |
544 I2O_EVENT_EXEC_RESET_IMMINENT |
545 I2O_EVENT_EXEC_HARDWARE_FAIL |
546 I2O_EVENT_EXEC_XCT_CHANGE |
547 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
548 I2O_EVENT_GEN_DEVICE_RESET |
549 I2O_EVENT_GEN_STATE_CHANGE |
550 I2O_EVENT_GEN_GENERAL_WARNING);
551 if (rv != 0) {
552 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
553 return;
554 }
555
556 /*
557 * Attempt to match and attach a product-specific extension.
558 */
559 ia.ia_class = I2O_CLASS_ANY;
560 ia.ia_tid = I2O_TID_IOP;
561 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
562
563 /*
564 * Start device configuration.
565 */
566 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
567 if ((rv = iop_reconfigure(sc, 0)) == -1) {
568 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
569 return;
570 }
571 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
572
573 kthread_create(iop_create_reconf_thread, sc);
574 }
575
576 /*
577 * Create the reconfiguration thread. Called after the standard kernel
578 * threads have been created.
579 */
580 static void
581 iop_create_reconf_thread(void *cookie)
582 {
583 struct iop_softc *sc;
584 int rv;
585
586 sc = cookie;
587 sc->sc_flags |= IOP_ONLINE;
588
589 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
590 "%s", sc->sc_dv.dv_xname);
591 if (rv != 0) {
592 printf("%s: unable to create reconfiguration thread (%d)",
593 sc->sc_dv.dv_xname, rv);
594 return;
595 }
596 }
597
598 /*
599 * Reconfiguration thread; listens for LCT change notification, and
600 * initiates re-configuration if received.
601 */
602 static void
603 iop_reconf_thread(void *cookie)
604 {
605 struct iop_softc *sc;
606 struct i2o_lct lct;
607 u_int32_t chgind;
608 int rv;
609
610 sc = cookie;
611 chgind = sc->sc_chgind + 1;
612
613 for (;;) {
614 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
615 sc->sc_dv.dv_xname, chgind));
616
617 PHOLD(sc->sc_reconf_proc);
618 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
619 PRELE(sc->sc_reconf_proc);
620
621 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
622 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
623
624 if (rv == 0 &&
625 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
626 iop_reconfigure(sc, le32toh(lct.changeindicator));
627 chgind = sc->sc_chgind + 1;
628 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
629 }
630
631 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
632 }
633 }
634
635 /*
636 * Reconfigure: find new and removed devices.
637 */
638 int
639 iop_reconfigure(struct iop_softc *sc, u_int chgind)
640 {
641 struct iop_msg *im;
642 struct i2o_hba_bus_scan mf;
643 struct i2o_lct_entry *le;
644 struct iop_initiator *ii, *nextii;
645 int rv, tid, i;
646
647 /*
648 * If the reconfiguration request isn't the result of LCT change
649 * notification, then be more thorough: ask all bus ports to scan
650 * their busses. Wait up to 5 minutes for each bus port to complete
651 * the request.
652 */
653 if (chgind == 0) {
654 if ((rv = iop_lct_get(sc)) != 0) {
655 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
656 return (rv);
657 }
658
659 le = sc->sc_lct->entry;
660 for (i = 0; i < sc->sc_nlctent; i++, le++) {
661 if ((le16toh(le->classid) & 4095) !=
662 I2O_CLASS_BUS_ADAPTER_PORT)
663 continue;
664 tid = le16toh(le->localtid) & 4095;
665
666 im = iop_msg_alloc(sc, IM_WAIT);
667
668 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
669 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
670 mf.msgictx = IOP_ICTX;
671 mf.msgtctx = im->im_tctx;
672
673 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
674 tid));
675
676 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
677 iop_msg_free(sc, im);
678 #ifdef I2ODEBUG
679 if (rv != 0)
680 printf("%s: bus scan failed\n",
681 sc->sc_dv.dv_xname);
682 #endif
683 }
684 } else if (chgind <= sc->sc_chgind) {
685 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
686 return (0);
687 }
688
689 /* Re-read the LCT and determine if it has changed. */
690 if ((rv = iop_lct_get(sc)) != 0) {
691 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
692 return (rv);
693 }
694 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
695
696 chgind = le32toh(sc->sc_lct->changeindicator);
697 if (chgind == sc->sc_chgind) {
698 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
699 return (0);
700 }
701 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
702 sc->sc_chgind = chgind;
703
704 if (sc->sc_tidmap != NULL)
705 free(sc->sc_tidmap, M_DEVBUF);
706 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
707 M_DEVBUF, M_NOWAIT|M_ZERO);
708
709 /* Allow 1 queued command per device while we're configuring. */
710 iop_adjqparam(sc, 1);
711
712 /*
713 * Match and attach child devices. We configure high-level devices
714 * first so that any claims will propagate throughout the LCT,
715 * hopefully masking off aliased devices as a result.
716 *
717 * Re-reading the LCT at this point is a little dangerous, but we'll
718 * trust the IOP (and the operator) to behave itself...
719 */
720 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
721 IC_CONFIGURE | IC_PRIORITY);
722 if ((rv = iop_lct_get(sc)) != 0)
723 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
724 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
725 IC_CONFIGURE);
726
727 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
728 nextii = LIST_NEXT(ii, ii_list);
729
730 /* Detach devices that were configured, but are now gone. */
731 for (i = 0; i < sc->sc_nlctent; i++)
732 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
733 break;
734 if (i == sc->sc_nlctent ||
735 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
736 config_detach(ii->ii_dv, DETACH_FORCE);
737
738 /*
739 * Tell initiators that existed before the re-configuration
740 * to re-configure.
741 */
742 if (ii->ii_reconfig == NULL)
743 continue;
744 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
745 printf("%s: %s failed reconfigure (%d)\n",
746 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
747 }
748
749 /* Re-adjust queue parameters and return. */
750 if (sc->sc_nii != 0)
751 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
752 / sc->sc_nii);
753
754 return (0);
755 }
756
757 /*
758 * Configure I2O devices into the system.
759 */
760 static void
761 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
762 {
763 struct iop_attach_args ia;
764 struct iop_initiator *ii;
765 const struct i2o_lct_entry *le;
766 struct device *dv;
767 int i, j, nent;
768 u_int usertid;
769
770 nent = sc->sc_nlctent;
771 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
772 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
773
774 /* Ignore the device if it's in use. */
775 usertid = le32toh(le->usertid) & 4095;
776 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
777 continue;
778
779 ia.ia_class = le16toh(le->classid) & 4095;
780 ia.ia_tid = sc->sc_tidmap[i].it_tid;
781
782 /* Ignore uninteresting devices. */
783 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
784 if (iop_class[j].ic_class == ia.ia_class)
785 break;
786 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
787 (iop_class[j].ic_flags & mask) != maskval)
788 continue;
789
790 /*
791 * Try to configure the device only if it's not already
792 * configured.
793 */
794 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
795 if (ia.ia_tid == ii->ii_tid) {
796 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
797 strcpy(sc->sc_tidmap[i].it_dvname,
798 ii->ii_dv->dv_xname);
799 break;
800 }
801 }
802 if (ii != NULL)
803 continue;
804
805 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
806 if (dv != NULL) {
807 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
808 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
809 }
810 }
811 }
812
813 /*
814 * Adjust queue parameters for all child devices.
815 */
816 static void
817 iop_adjqparam(struct iop_softc *sc, int mpi)
818 {
819 struct iop_initiator *ii;
820
821 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
822 if (ii->ii_adjqparam != NULL)
823 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
824 }
825
826 static void
827 iop_devinfo(int class, char *devinfo)
828 {
829 #ifdef I2OVERBOSE
830 int i;
831
832 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
833 if (class == iop_class[i].ic_class)
834 break;
835
836 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
837 sprintf(devinfo, "device (class 0x%x)", class);
838 else
839 strcpy(devinfo, iop_class[i].ic_caption);
840 #else
841
842 sprintf(devinfo, "device (class 0x%x)", class);
843 #endif
844 }
845
846 static int
847 iop_print(void *aux, const char *pnp)
848 {
849 struct iop_attach_args *ia;
850 char devinfo[256];
851
852 ia = aux;
853
854 if (pnp != NULL) {
855 iop_devinfo(ia->ia_class, devinfo);
856 printf("%s at %s", devinfo, pnp);
857 }
858 printf(" tid %d", ia->ia_tid);
859 return (UNCONF);
860 }
861
862 static int
863 iop_vendor_print(void *aux, const char *pnp)
864 {
865
866 return (QUIET);
867 }
868
869 static int
870 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
871 {
872 struct iop_attach_args *ia;
873
874 ia = aux;
875
876 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
877 return (0);
878
879 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
880 }
881
882 /*
883 * Shut down all configured IOPs.
884 */
885 static void
886 iop_shutdown(void *junk)
887 {
888 struct iop_softc *sc;
889 int i;
890
891 printf("shutting down iop devices...");
892
893 for (i = 0; i < iop_cd.cd_ndevs; i++) {
894 if ((sc = device_lookup(&iop_cd, i)) == NULL)
895 continue;
896 if ((sc->sc_flags & IOP_ONLINE) == 0)
897 continue;
898
899 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
900 0, 5000);
901
902 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
903 /*
904 * Some AMI firmware revisions will go to sleep and
905 * never come back after this.
906 */
907 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
908 IOP_ICTX, 0, 1000);
909 }
910 }
911
912 /* Wait. Some boards could still be flushing, stupidly enough. */
913 delay(5000*1000);
914 printf(" done\n");
915 }
916
917 /*
918 * Retrieve IOP status.
919 */
920 int
921 iop_status_get(struct iop_softc *sc, int nosleep)
922 {
923 struct i2o_exec_status_get mf;
924 struct i2o_status *st;
925 paddr_t pa;
926 int rv, i;
927
928 pa = sc->sc_scr_seg->ds_addr;
929 st = (struct i2o_status *)sc->sc_scr;
930
931 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
932 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
933 mf.reserved[0] = 0;
934 mf.reserved[1] = 0;
935 mf.reserved[2] = 0;
936 mf.reserved[3] = 0;
937 mf.addrlow = (u_int32_t)pa;
938 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
939 mf.length = sizeof(sc->sc_status);
940
941 memset(st, 0, sizeof(*st));
942 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
943 BUS_DMASYNC_PREREAD);
944
945 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
946 return (rv);
947
948 for (i = 25; i != 0; i--) {
949 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
950 sizeof(*st), BUS_DMASYNC_POSTREAD);
951 if (st->syncbyte == 0xff)
952 break;
953 if (nosleep)
954 DELAY(100*1000);
955 else
956 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
957 }
958
959 if (st->syncbyte != 0xff) {
960 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
961 rv = EIO;
962 } else {
963 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
964 rv = 0;
965 }
966
967 return (rv);
968 }
969
970 /*
971 * Initialize and populate the IOP's outbound FIFO.
972 */
973 static int
974 iop_ofifo_init(struct iop_softc *sc)
975 {
976 bus_addr_t addr;
977 bus_dma_segment_t seg;
978 struct i2o_exec_outbound_init *mf;
979 int i, rseg, rv;
980 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
981
982 sw = (u_int32_t *)sc->sc_scr;
983
984 mf = (struct i2o_exec_outbound_init *)mb;
985 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
986 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
987 mf->msgictx = IOP_ICTX;
988 mf->msgtctx = 0;
989 mf->pagesize = PAGE_SIZE;
990 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
991
992 /*
993 * The I2O spec says that there are two SGLs: one for the status
994 * word, and one for a list of discarded MFAs. It continues to say
995 * that if you don't want to get the list of MFAs, an IGNORE SGL is
996 * necessary; this isn't the case (and is in fact a bad thing).
997 */
998 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
999 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1000 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1001 (u_int32_t)sc->sc_scr_seg->ds_addr;
1002 mb[0] += 2 << 16;
1003
1004 *sw = 0;
1005 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1006 BUS_DMASYNC_PREREAD);
1007
1008 if ((rv = iop_post(sc, mb)) != 0)
1009 return (rv);
1010
1011 POLL(5000,
1012 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1013 BUS_DMASYNC_POSTREAD),
1014 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1015
1016 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1017 printf("%s: outbound FIFO init failed (%d)\n",
1018 sc->sc_dv.dv_xname, le32toh(*sw));
1019 return (EIO);
1020 }
1021
1022 /* Allocate DMA safe memory for the reply frames. */
1023 if (sc->sc_rep_phys == 0) {
1024 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1025
1026 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1027 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1028 if (rv != 0) {
1029 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1030 rv);
1031 return (rv);
1032 }
1033
1034 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1035 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1036 if (rv != 0) {
1037 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1038 return (rv);
1039 }
1040
1041 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1042 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1043 if (rv != 0) {
1044 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1045 rv);
1046 return (rv);
1047 }
1048
1049 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1050 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1051 if (rv != 0) {
1052 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1053 return (rv);
1054 }
1055
1056 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1057 }
1058
1059 /* Populate the outbound FIFO. */
1060 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1061 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1062 addr += sc->sc_framesize;
1063 }
1064
1065 return (0);
1066 }
1067
1068 /*
1069 * Read the specified number of bytes from the IOP's hardware resource table.
1070 */
1071 static int
1072 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1073 {
1074 struct iop_msg *im;
1075 int rv;
1076 struct i2o_exec_hrt_get *mf;
1077 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1078
1079 im = iop_msg_alloc(sc, IM_WAIT);
1080 mf = (struct i2o_exec_hrt_get *)mb;
1081 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1082 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1083 mf->msgictx = IOP_ICTX;
1084 mf->msgtctx = im->im_tctx;
1085
1086 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1087 rv = iop_msg_post(sc, im, mb, 30000);
1088 iop_msg_unmap(sc, im);
1089 iop_msg_free(sc, im);
1090 return (rv);
1091 }
1092
1093 /*
1094 * Read the IOP's hardware resource table.
1095 */
1096 static int
1097 iop_hrt_get(struct iop_softc *sc)
1098 {
1099 struct i2o_hrt hrthdr, *hrt;
1100 int size, rv;
1101
1102 PHOLD(curproc);
1103 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1104 PRELE(curproc);
1105 if (rv != 0)
1106 return (rv);
1107
1108 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1109 le16toh(hrthdr.numentries)));
1110
1111 size = sizeof(struct i2o_hrt) +
1112 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1113 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1114
1115 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1116 free(hrt, M_DEVBUF);
1117 return (rv);
1118 }
1119
1120 if (sc->sc_hrt != NULL)
1121 free(sc->sc_hrt, M_DEVBUF);
1122 sc->sc_hrt = hrt;
1123 return (0);
1124 }
1125
1126 /*
1127 * Request the specified number of bytes from the IOP's logical
1128 * configuration table. If a change indicator is specified, this
1129 * is a verbatim notification request, so the caller is prepared
1130 * to wait indefinitely.
1131 */
1132 static int
1133 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1134 u_int32_t chgind)
1135 {
1136 struct iop_msg *im;
1137 struct i2o_exec_lct_notify *mf;
1138 int rv;
1139 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1140
1141 im = iop_msg_alloc(sc, IM_WAIT);
1142 memset(lct, 0, size);
1143
1144 mf = (struct i2o_exec_lct_notify *)mb;
1145 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1146 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1147 mf->msgictx = IOP_ICTX;
1148 mf->msgtctx = im->im_tctx;
1149 mf->classid = I2O_CLASS_ANY;
1150 mf->changeindicator = chgind;
1151
1152 #ifdef I2ODEBUG
1153 printf("iop_lct_get0: reading LCT");
1154 if (chgind != 0)
1155 printf(" (async)");
1156 printf("\n");
1157 #endif
1158
1159 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1160 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1161 iop_msg_unmap(sc, im);
1162 iop_msg_free(sc, im);
1163 return (rv);
1164 }
1165
1166 /*
1167 * Read the IOP's logical configuration table.
1168 */
1169 int
1170 iop_lct_get(struct iop_softc *sc)
1171 {
1172 int esize, size, rv;
1173 struct i2o_lct *lct;
1174
1175 esize = le32toh(sc->sc_status.expectedlctsize);
1176 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1177 if (lct == NULL)
1178 return (ENOMEM);
1179
1180 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1181 free(lct, M_DEVBUF);
1182 return (rv);
1183 }
1184
1185 size = le16toh(lct->tablesize) << 2;
1186 if (esize != size) {
1187 free(lct, M_DEVBUF);
1188 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1189 if (lct == NULL)
1190 return (ENOMEM);
1191
1192 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1193 free(lct, M_DEVBUF);
1194 return (rv);
1195 }
1196 }
1197
1198 /* Swap in the new LCT. */
1199 if (sc->sc_lct != NULL)
1200 free(sc->sc_lct, M_DEVBUF);
1201 sc->sc_lct = lct;
1202 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1203 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1204 sizeof(struct i2o_lct_entry);
1205 return (0);
1206 }
1207
1208 /*
1209 * Request the specified parameter group from the target. If an initiator
1210 * is specified (a) don't wait for the operation to complete, but instead
1211 * let the initiator's interrupt handler deal with the reply and (b) place a
1212 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1213 */
1214 int
1215 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1216 int size, struct iop_initiator *ii)
1217 {
1218 struct iop_msg *im;
1219 struct i2o_util_params_op *mf;
1220 struct i2o_reply *rf;
1221 int rv;
1222 struct iop_pgop *pgop;
1223 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1224
1225 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1226 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1227 iop_msg_free(sc, im);
1228 return (ENOMEM);
1229 }
1230 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1231 iop_msg_free(sc, im);
1232 free(pgop, M_DEVBUF);
1233 return (ENOMEM);
1234 }
1235 im->im_dvcontext = pgop;
1236 im->im_rb = rf;
1237
1238 mf = (struct i2o_util_params_op *)mb;
1239 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1240 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1241 mf->msgictx = IOP_ICTX;
1242 mf->msgtctx = im->im_tctx;
1243 mf->flags = 0;
1244
1245 pgop->olh.count = htole16(1);
1246 pgop->olh.reserved = htole16(0);
1247 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1248 pgop->oat.fieldcount = htole16(0xffff);
1249 pgop->oat.group = htole16(group);
1250
1251 if (ii == NULL)
1252 PHOLD(curproc);
1253
1254 memset(buf, 0, size);
1255 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1256 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1257 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1258
1259 if (ii == NULL)
1260 PRELE(curproc);
1261
1262 /* Detect errors; let partial transfers to count as success. */
1263 if (ii == NULL && rv == 0) {
1264 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1265 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1266 rv = 0;
1267 else
1268 rv = (rf->reqstatus != 0 ? EIO : 0);
1269
1270 if (rv != 0)
1271 printf("%s: FIELD_GET failed for tid %d group %d\n",
1272 sc->sc_dv.dv_xname, tid, group);
1273 }
1274
1275 if (ii == NULL || rv != 0) {
1276 iop_msg_unmap(sc, im);
1277 iop_msg_free(sc, im);
1278 free(pgop, M_DEVBUF);
1279 free(rf, M_DEVBUF);
1280 }
1281
1282 return (rv);
1283 }
1284
1285 /*
1286 * Set a single field in a scalar parameter group.
1287 */
1288 int
1289 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1290 int size, int field)
1291 {
1292 struct iop_msg *im;
1293 struct i2o_util_params_op *mf;
1294 struct iop_pgop *pgop;
1295 int rv, totsize;
1296 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1297
1298 totsize = sizeof(*pgop) + size;
1299
1300 im = iop_msg_alloc(sc, IM_WAIT);
1301 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1302 iop_msg_free(sc, im);
1303 return (ENOMEM);
1304 }
1305
1306 mf = (struct i2o_util_params_op *)mb;
1307 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1308 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1309 mf->msgictx = IOP_ICTX;
1310 mf->msgtctx = im->im_tctx;
1311 mf->flags = 0;
1312
1313 pgop->olh.count = htole16(1);
1314 pgop->olh.reserved = htole16(0);
1315 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1316 pgop->oat.fieldcount = htole16(1);
1317 pgop->oat.group = htole16(group);
1318 pgop->oat.fields[0] = htole16(field);
1319 memcpy(pgop + 1, buf, size);
1320
1321 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1322 rv = iop_msg_post(sc, im, mb, 30000);
1323 if (rv != 0)
1324 printf("%s: FIELD_SET failed for tid %d group %d\n",
1325 sc->sc_dv.dv_xname, tid, group);
1326
1327 iop_msg_unmap(sc, im);
1328 iop_msg_free(sc, im);
1329 free(pgop, M_DEVBUF);
1330 return (rv);
1331 }
1332
1333 /*
1334 * Delete all rows in a tablular parameter group.
1335 */
1336 int
1337 iop_table_clear(struct iop_softc *sc, int tid, int group)
1338 {
1339 struct iop_msg *im;
1340 struct i2o_util_params_op *mf;
1341 struct iop_pgop pgop;
1342 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1343 int rv;
1344
1345 im = iop_msg_alloc(sc, IM_WAIT);
1346
1347 mf = (struct i2o_util_params_op *)mb;
1348 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1349 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1350 mf->msgictx = IOP_ICTX;
1351 mf->msgtctx = im->im_tctx;
1352 mf->flags = 0;
1353
1354 pgop.olh.count = htole16(1);
1355 pgop.olh.reserved = htole16(0);
1356 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1357 pgop.oat.fieldcount = htole16(0);
1358 pgop.oat.group = htole16(group);
1359 pgop.oat.fields[0] = htole16(0);
1360
1361 PHOLD(curproc);
1362 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1363 rv = iop_msg_post(sc, im, mb, 30000);
1364 if (rv != 0)
1365 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1366 sc->sc_dv.dv_xname, tid, group);
1367
1368 iop_msg_unmap(sc, im);
1369 PRELE(curproc);
1370 iop_msg_free(sc, im);
1371 return (rv);
1372 }
1373
1374 /*
1375 * Add a single row to a tabular parameter group. The row can have only one
1376 * field.
1377 */
1378 int
1379 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1380 int size, int row)
1381 {
1382 struct iop_msg *im;
1383 struct i2o_util_params_op *mf;
1384 struct iop_pgop *pgop;
1385 int rv, totsize;
1386 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1387
1388 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1389
1390 im = iop_msg_alloc(sc, IM_WAIT);
1391 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1392 iop_msg_free(sc, im);
1393 return (ENOMEM);
1394 }
1395
1396 mf = (struct i2o_util_params_op *)mb;
1397 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1398 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1399 mf->msgictx = IOP_ICTX;
1400 mf->msgtctx = im->im_tctx;
1401 mf->flags = 0;
1402
1403 pgop->olh.count = htole16(1);
1404 pgop->olh.reserved = htole16(0);
1405 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1406 pgop->oat.fieldcount = htole16(1);
1407 pgop->oat.group = htole16(group);
1408 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1409 pgop->oat.fields[1] = htole16(1); /* RowCount */
1410 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1411 memcpy(&pgop->oat.fields[3], buf, size);
1412
1413 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1414 rv = iop_msg_post(sc, im, mb, 30000);
1415 if (rv != 0)
1416 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1417 sc->sc_dv.dv_xname, tid, group, row);
1418
1419 iop_msg_unmap(sc, im);
1420 iop_msg_free(sc, im);
1421 free(pgop, M_DEVBUF);
1422 return (rv);
1423 }
1424
1425 /*
1426 * Execute a simple command (no parameters).
1427 */
1428 int
1429 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1430 int async, int timo)
1431 {
1432 struct iop_msg *im;
1433 struct i2o_msg mf;
1434 int rv, fl;
1435
1436 fl = (async != 0 ? IM_WAIT : IM_POLL);
1437 im = iop_msg_alloc(sc, fl);
1438
1439 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1440 mf.msgfunc = I2O_MSGFUNC(tid, function);
1441 mf.msgictx = ictx;
1442 mf.msgtctx = im->im_tctx;
1443
1444 rv = iop_msg_post(sc, im, &mf, timo);
1445 iop_msg_free(sc, im);
1446 return (rv);
1447 }
1448
1449 /*
1450 * Post the system table to the IOP.
1451 */
1452 static int
1453 iop_systab_set(struct iop_softc *sc)
1454 {
1455 struct i2o_exec_sys_tab_set *mf;
1456 struct iop_msg *im;
1457 bus_space_handle_t bsh;
1458 bus_addr_t boo;
1459 u_int32_t mema[2], ioa[2];
1460 int rv;
1461 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1462
1463 im = iop_msg_alloc(sc, IM_WAIT);
1464
1465 mf = (struct i2o_exec_sys_tab_set *)mb;
1466 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1467 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1468 mf->msgictx = IOP_ICTX;
1469 mf->msgtctx = im->im_tctx;
1470 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1471 mf->segnumber = 0;
1472
1473 mema[1] = sc->sc_status.desiredprivmemsize;
1474 ioa[1] = sc->sc_status.desiredpriviosize;
1475
1476 if (mema[1] != 0) {
1477 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1478 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1479 mema[0] = htole32(boo);
1480 if (rv != 0) {
1481 printf("%s: can't alloc priv mem space, err = %d\n",
1482 sc->sc_dv.dv_xname, rv);
1483 mema[0] = 0;
1484 mema[1] = 0;
1485 }
1486 }
1487
1488 if (ioa[1] != 0) {
1489 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1490 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1491 ioa[0] = htole32(boo);
1492 if (rv != 0) {
1493 printf("%s: can't alloc priv i/o space, err = %d\n",
1494 sc->sc_dv.dv_xname, rv);
1495 ioa[0] = 0;
1496 ioa[1] = 0;
1497 }
1498 }
1499
1500 PHOLD(curproc);
1501 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1502 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1503 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1504 rv = iop_msg_post(sc, im, mb, 5000);
1505 iop_msg_unmap(sc, im);
1506 iop_msg_free(sc, im);
1507 PRELE(curproc);
1508 return (rv);
1509 }
1510
1511 /*
1512 * Reset the IOP. Must be called with interrupts disabled.
1513 */
1514 static int
1515 iop_reset(struct iop_softc *sc)
1516 {
1517 u_int32_t mfa, *sw;
1518 struct i2o_exec_iop_reset mf;
1519 int rv;
1520 paddr_t pa;
1521
1522 sw = (u_int32_t *)sc->sc_scr;
1523 pa = sc->sc_scr_seg->ds_addr;
1524
1525 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1526 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1527 mf.reserved[0] = 0;
1528 mf.reserved[1] = 0;
1529 mf.reserved[2] = 0;
1530 mf.reserved[3] = 0;
1531 mf.statuslow = (u_int32_t)pa;
1532 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1533
1534 *sw = htole32(0);
1535 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1536 BUS_DMASYNC_PREREAD);
1537
1538 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1539 return (rv);
1540
1541 POLL(2500,
1542 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1543 BUS_DMASYNC_POSTREAD), *sw != 0));
1544 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1545 printf("%s: reset rejected, status 0x%x\n",
1546 sc->sc_dv.dv_xname, le32toh(*sw));
1547 return (EIO);
1548 }
1549
1550 /*
1551 * IOP is now in the INIT state. Wait no more than 10 seconds for
1552 * the inbound queue to become responsive.
1553 */
1554 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1555 if (mfa == IOP_MFA_EMPTY) {
1556 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1557 return (EIO);
1558 }
1559
1560 iop_release_mfa(sc, mfa);
1561 return (0);
1562 }
1563
1564 /*
1565 * Register a new initiator. Must be called with the configuration lock
1566 * held.
1567 */
1568 void
1569 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1570 {
1571 static int ictxgen;
1572 int s;
1573
1574 /* 0 is reserved (by us) for system messages. */
1575 ii->ii_ictx = ++ictxgen;
1576
1577 /*
1578 * `Utility initiators' don't make it onto the per-IOP initiator list
1579 * (which is used only for configuration), but do get one slot on
1580 * the inbound queue.
1581 */
1582 if ((ii->ii_flags & II_UTILITY) == 0) {
1583 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1584 sc->sc_nii++;
1585 } else
1586 sc->sc_nuii++;
1587
1588 s = splbio();
1589 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1590 splx(s);
1591 }
1592
1593 /*
1594 * Unregister an initiator. Must be called with the configuration lock
1595 * held.
1596 */
1597 void
1598 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1599 {
1600 int s;
1601
1602 if ((ii->ii_flags & II_UTILITY) == 0) {
1603 LIST_REMOVE(ii, ii_list);
1604 sc->sc_nii--;
1605 } else
1606 sc->sc_nuii--;
1607
1608 s = splbio();
1609 LIST_REMOVE(ii, ii_hash);
1610 splx(s);
1611 }
1612
1613 /*
1614 * Handle a reply frame from the IOP.
1615 */
1616 static int
1617 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1618 {
1619 struct iop_msg *im;
1620 struct i2o_reply *rb;
1621 struct i2o_fault_notify *fn;
1622 struct iop_initiator *ii;
1623 u_int off, ictx, tctx, status, size;
1624
1625 off = (int)(rmfa - sc->sc_rep_phys);
1626 rb = (struct i2o_reply *)(sc->sc_rep + off);
1627
1628 /* Perform reply queue DMA synchronisation. */
1629 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1630 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1631 if (--sc->sc_curib != 0)
1632 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1633 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1634
1635 #ifdef I2ODEBUG
1636 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1637 panic("iop_handle_reply: 64-bit reply");
1638 #endif
1639 /*
1640 * Find the initiator.
1641 */
1642 ictx = le32toh(rb->msgictx);
1643 if (ictx == IOP_ICTX)
1644 ii = NULL;
1645 else {
1646 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1647 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1648 if (ii->ii_ictx == ictx)
1649 break;
1650 if (ii == NULL) {
1651 #ifdef I2ODEBUG
1652 iop_reply_print(sc, rb);
1653 #endif
1654 printf("%s: WARNING: bad ictx returned (%x)\n",
1655 sc->sc_dv.dv_xname, ictx);
1656 return (-1);
1657 }
1658 }
1659
1660 /*
1661 * If we received a transport failure notice, we've got to dig the
1662 * transaction context (if any) out of the original message frame,
1663 * and then release the original MFA back to the inbound FIFO.
1664 */
1665 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1666 status = I2O_STATUS_SUCCESS;
1667
1668 fn = (struct i2o_fault_notify *)rb;
1669 tctx = iop_inl(sc, fn->lowmfa + 12);
1670 iop_release_mfa(sc, fn->lowmfa);
1671 iop_tfn_print(sc, fn);
1672 } else {
1673 status = rb->reqstatus;
1674 tctx = le32toh(rb->msgtctx);
1675 }
1676
1677 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1678 /*
1679 * This initiator tracks state using message wrappers.
1680 *
1681 * Find the originating message wrapper, and if requested
1682 * notify the initiator.
1683 */
1684 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1685 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1686 (im->im_flags & IM_ALLOCED) == 0 ||
1687 tctx != im->im_tctx) {
1688 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1689 sc->sc_dv.dv_xname, tctx, im);
1690 if (im != NULL)
1691 printf("%s: flags=0x%08x tctx=0x%08x\n",
1692 sc->sc_dv.dv_xname, im->im_flags,
1693 im->im_tctx);
1694 #ifdef I2ODEBUG
1695 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1696 iop_reply_print(sc, rb);
1697 #endif
1698 return (-1);
1699 }
1700
1701 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1702 im->im_flags |= IM_FAIL;
1703
1704 #ifdef I2ODEBUG
1705 if ((im->im_flags & IM_REPLIED) != 0)
1706 panic("%s: dup reply", sc->sc_dv.dv_xname);
1707 #endif
1708 im->im_flags |= IM_REPLIED;
1709
1710 #ifdef I2ODEBUG
1711 if (status != I2O_STATUS_SUCCESS)
1712 iop_reply_print(sc, rb);
1713 #endif
1714 im->im_reqstatus = status;
1715
1716 /* Copy the reply frame, if requested. */
1717 if (im->im_rb != NULL) {
1718 size = (le32toh(rb->msgflags) >> 14) & ~3;
1719 #ifdef I2ODEBUG
1720 if (size > sc->sc_framesize)
1721 panic("iop_handle_reply: reply too large");
1722 #endif
1723 memcpy(im->im_rb, rb, size);
1724 }
1725
1726 /* Notify the initiator. */
1727 if ((im->im_flags & IM_WAIT) != 0)
1728 wakeup(im);
1729 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1730 (*ii->ii_intr)(ii->ii_dv, im, rb);
1731 } else {
1732 /*
1733 * This initiator discards message wrappers.
1734 *
1735 * Simply pass the reply frame to the initiator.
1736 */
1737 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1738 }
1739
1740 return (status);
1741 }
1742
1743 /*
1744 * Handle an interrupt from the IOP.
1745 */
1746 int
1747 iop_intr(void *arg)
1748 {
1749 struct iop_softc *sc;
1750 u_int32_t rmfa;
1751
1752 sc = arg;
1753
1754 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1755 return (0);
1756
1757 for (;;) {
1758 /* Double read to account for IOP bug. */
1759 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1760 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1761 if (rmfa == IOP_MFA_EMPTY)
1762 break;
1763 }
1764 iop_handle_reply(sc, rmfa);
1765 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1766 }
1767
1768 return (1);
1769 }
1770
1771 /*
1772 * Handle an event signalled by the executive.
1773 */
1774 static void
1775 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1776 {
1777 struct i2o_util_event_register_reply *rb;
1778 struct iop_softc *sc;
1779 u_int event;
1780
1781 sc = (struct iop_softc *)dv;
1782 rb = reply;
1783
1784 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1785 return;
1786
1787 event = le32toh(rb->event);
1788 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1789 }
1790
1791 /*
1792 * Allocate a message wrapper.
1793 */
1794 struct iop_msg *
1795 iop_msg_alloc(struct iop_softc *sc, int flags)
1796 {
1797 struct iop_msg *im;
1798 static u_int tctxgen;
1799 int s, i;
1800
1801 #ifdef I2ODEBUG
1802 if ((flags & IM_SYSMASK) != 0)
1803 panic("iop_msg_alloc: system flags specified");
1804 #endif
1805
1806 s = splbio();
1807 im = SLIST_FIRST(&sc->sc_im_freelist);
1808 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1809 if (im == NULL)
1810 panic("iop_msg_alloc: no free wrappers");
1811 #endif
1812 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1813 splx(s);
1814
1815 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1816 tctxgen += (1 << IOP_TCTX_SHIFT);
1817 im->im_flags = flags | IM_ALLOCED;
1818 im->im_rb = NULL;
1819 i = 0;
1820 do {
1821 im->im_xfer[i++].ix_size = 0;
1822 } while (i < IOP_MAX_MSG_XFERS);
1823
1824 return (im);
1825 }
1826
1827 /*
1828 * Free a message wrapper.
1829 */
1830 void
1831 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1832 {
1833 int s;
1834
1835 #ifdef I2ODEBUG
1836 if ((im->im_flags & IM_ALLOCED) == 0)
1837 panic("iop_msg_free: wrapper not allocated");
1838 #endif
1839
1840 im->im_flags = 0;
1841 s = splbio();
1842 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1843 splx(s);
1844 }
1845
1846 /*
1847 * Map a data transfer. Write a scatter-gather list into the message frame.
1848 */
1849 int
1850 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1851 void *xferaddr, int xfersize, int out, struct proc *up)
1852 {
1853 bus_dmamap_t dm;
1854 bus_dma_segment_t *ds;
1855 struct iop_xfer *ix;
1856 u_int rv, i, nsegs, flg, off, xn;
1857 u_int32_t *p;
1858
1859 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1860 if (ix->ix_size == 0)
1861 break;
1862
1863 #ifdef I2ODEBUG
1864 if (xfersize == 0)
1865 panic("iop_msg_map: null transfer");
1866 if (xfersize > IOP_MAX_XFER)
1867 panic("iop_msg_map: transfer too large");
1868 if (xn == IOP_MAX_MSG_XFERS)
1869 panic("iop_msg_map: too many xfers");
1870 #endif
1871
1872 /*
1873 * Only the first DMA map is static.
1874 */
1875 if (xn != 0) {
1876 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1877 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1878 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1879 if (rv != 0)
1880 return (rv);
1881 }
1882
1883 dm = ix->ix_map;
1884 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1885 (up == NULL ? BUS_DMA_NOWAIT : 0));
1886 if (rv != 0)
1887 goto bad;
1888
1889 /*
1890 * How many SIMPLE SG elements can we fit in this message?
1891 */
1892 off = mb[0] >> 16;
1893 p = mb + off;
1894 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1895
1896 if (dm->dm_nsegs > nsegs) {
1897 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1898 rv = EFBIG;
1899 DPRINTF(("iop_msg_map: too many segs\n"));
1900 goto bad;
1901 }
1902
1903 nsegs = dm->dm_nsegs;
1904 xfersize = 0;
1905
1906 /*
1907 * Write out the SG list.
1908 */
1909 if (out)
1910 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1911 else
1912 flg = I2O_SGL_SIMPLE;
1913
1914 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1915 p[0] = (u_int32_t)ds->ds_len | flg;
1916 p[1] = (u_int32_t)ds->ds_addr;
1917 xfersize += ds->ds_len;
1918 }
1919
1920 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1921 p[1] = (u_int32_t)ds->ds_addr;
1922 xfersize += ds->ds_len;
1923
1924 /* Fix up the transfer record, and sync the map. */
1925 ix->ix_flags = (out ? IX_OUT : IX_IN);
1926 ix->ix_size = xfersize;
1927 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1928 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1929
1930 /*
1931 * If this is the first xfer we've mapped for this message, adjust
1932 * the SGL offset field in the message header.
1933 */
1934 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1935 mb[0] += (mb[0] >> 12) & 0xf0;
1936 im->im_flags |= IM_SGLOFFADJ;
1937 }
1938 mb[0] += (nsegs << 17);
1939 return (0);
1940
1941 bad:
1942 if (xn != 0)
1943 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1944 return (rv);
1945 }
1946
1947 /*
1948 * Map a block I/O data transfer (different in that there's only one per
1949 * message maximum, and PAGE addressing may be used). Write a scatter
1950 * gather list into the message frame.
1951 */
1952 int
1953 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1954 void *xferaddr, int xfersize, int out)
1955 {
1956 bus_dma_segment_t *ds;
1957 bus_dmamap_t dm;
1958 struct iop_xfer *ix;
1959 u_int rv, i, nsegs, off, slen, tlen, flg;
1960 paddr_t saddr, eaddr;
1961 u_int32_t *p;
1962
1963 #ifdef I2ODEBUG
1964 if (xfersize == 0)
1965 panic("iop_msg_map_bio: null transfer");
1966 if (xfersize > IOP_MAX_XFER)
1967 panic("iop_msg_map_bio: transfer too large");
1968 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1969 panic("iop_msg_map_bio: SGLOFFADJ");
1970 #endif
1971
1972 ix = im->im_xfer;
1973 dm = ix->ix_map;
1974 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1975 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1976 if (rv != 0)
1977 return (rv);
1978
1979 off = mb[0] >> 16;
1980 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1981
1982 /*
1983 * If the transfer is highly fragmented and won't fit using SIMPLE
1984 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1985 * potentially more efficient, both for us and the IOP.
1986 */
1987 if (dm->dm_nsegs > nsegs) {
1988 nsegs = 1;
1989 p = mb + off + 1;
1990
1991 /* XXX This should be done with a bus_space flag. */
1992 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1993 slen = ds->ds_len;
1994 saddr = ds->ds_addr;
1995
1996 while (slen > 0) {
1997 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1998 tlen = min(eaddr - saddr, slen);
1999 slen -= tlen;
2000 *p++ = le32toh(saddr);
2001 saddr = eaddr;
2002 nsegs++;
2003 }
2004 }
2005
2006 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2007 I2O_SGL_END;
2008 if (out)
2009 mb[off] |= I2O_SGL_DATA_OUT;
2010 } else {
2011 p = mb + off;
2012 nsegs = dm->dm_nsegs;
2013
2014 if (out)
2015 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2016 else
2017 flg = I2O_SGL_SIMPLE;
2018
2019 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2020 p[0] = (u_int32_t)ds->ds_len | flg;
2021 p[1] = (u_int32_t)ds->ds_addr;
2022 }
2023
2024 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2025 I2O_SGL_END;
2026 p[1] = (u_int32_t)ds->ds_addr;
2027 nsegs <<= 1;
2028 }
2029
2030 /* Fix up the transfer record, and sync the map. */
2031 ix->ix_flags = (out ? IX_OUT : IX_IN);
2032 ix->ix_size = xfersize;
2033 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2034 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2035
2036 /*
2037 * Adjust the SGL offset and total message size fields. We don't
2038 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2039 */
2040 mb[0] += ((off << 4) + (nsegs << 16));
2041 return (0);
2042 }
2043
2044 /*
2045 * Unmap all data transfers associated with a message wrapper.
2046 */
2047 void
2048 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2049 {
2050 struct iop_xfer *ix;
2051 int i;
2052
2053 #ifdef I2ODEBUG
2054 if (im->im_xfer[0].ix_size == 0)
2055 panic("iop_msg_unmap: no transfers mapped");
2056 #endif
2057
2058 for (ix = im->im_xfer, i = 0;;) {
2059 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2060 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2061 BUS_DMASYNC_POSTREAD);
2062 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2063
2064 /* Only the first DMA map is static. */
2065 if (i != 0)
2066 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2067 if ((++ix)->ix_size == 0)
2068 break;
2069 if (++i >= IOP_MAX_MSG_XFERS)
2070 break;
2071 }
2072 }
2073
2074 /*
2075 * Post a message frame to the IOP's inbound queue.
2076 */
2077 int
2078 iop_post(struct iop_softc *sc, u_int32_t *mb)
2079 {
2080 u_int32_t mfa;
2081 int s;
2082
2083 #ifdef I2ODEBUG
2084 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2085 panic("iop_post: frame too large");
2086 #endif
2087
2088 s = splbio();
2089
2090 /* Allocate a slot with the IOP. */
2091 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2092 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2093 splx(s);
2094 printf("%s: mfa not forthcoming\n",
2095 sc->sc_dv.dv_xname);
2096 return (EAGAIN);
2097 }
2098
2099 /* Perform reply buffer DMA synchronisation. */
2100 if (sc->sc_curib++ == 0)
2101 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2102 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2103
2104 /* Copy out the message frame. */
2105 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2106 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2107 BUS_SPACE_BARRIER_WRITE);
2108
2109 /* Post the MFA back to the IOP. */
2110 iop_outl(sc, IOP_REG_IFIFO, mfa);
2111
2112 splx(s);
2113 return (0);
2114 }
2115
2116 /*
2117 * Post a message to the IOP and deal with completion.
2118 */
2119 int
2120 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2121 {
2122 u_int32_t *mb;
2123 int rv, s;
2124
2125 mb = xmb;
2126
2127 /* Terminate the scatter/gather list chain. */
2128 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2129 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2130
2131 if ((rv = iop_post(sc, mb)) != 0)
2132 return (rv);
2133
2134 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2135 if ((im->im_flags & IM_POLL) != 0)
2136 iop_msg_poll(sc, im, timo);
2137 else
2138 iop_msg_wait(sc, im, timo);
2139
2140 s = splbio();
2141 if ((im->im_flags & IM_REPLIED) != 0) {
2142 if ((im->im_flags & IM_NOSTATUS) != 0)
2143 rv = 0;
2144 else if ((im->im_flags & IM_FAIL) != 0)
2145 rv = ENXIO;
2146 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2147 rv = EIO;
2148 else
2149 rv = 0;
2150 } else
2151 rv = EBUSY;
2152 splx(s);
2153 } else
2154 rv = 0;
2155
2156 return (rv);
2157 }
2158
2159 /*
2160 * Spin until the specified message is replied to.
2161 */
2162 static void
2163 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2164 {
2165 u_int32_t rmfa;
2166 int s, status;
2167
2168 s = splbio();
2169
2170 /* Wait for completion. */
2171 for (timo *= 10; timo != 0; timo--) {
2172 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2173 /* Double read to account for IOP bug. */
2174 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2175 if (rmfa == IOP_MFA_EMPTY)
2176 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2177 if (rmfa != IOP_MFA_EMPTY) {
2178 status = iop_handle_reply(sc, rmfa);
2179
2180 /*
2181 * Return the reply frame to the IOP's
2182 * outbound FIFO.
2183 */
2184 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2185 }
2186 }
2187 if ((im->im_flags & IM_REPLIED) != 0)
2188 break;
2189 DELAY(100);
2190 }
2191
2192 if (timo == 0) {
2193 #ifdef I2ODEBUG
2194 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2195 if (iop_status_get(sc, 1) != 0)
2196 printf("iop_msg_poll: unable to retrieve status\n");
2197 else
2198 printf("iop_msg_poll: IOP state = %d\n",
2199 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2200 #endif
2201 }
2202
2203 splx(s);
2204 }
2205
2206 /*
2207 * Sleep until the specified message is replied to.
2208 */
2209 static void
2210 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2211 {
2212 int s, rv;
2213
2214 s = splbio();
2215 if ((im->im_flags & IM_REPLIED) != 0) {
2216 splx(s);
2217 return;
2218 }
2219 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2220 splx(s);
2221
2222 #ifdef I2ODEBUG
2223 if (rv != 0) {
2224 printf("iop_msg_wait: tsleep() == %d\n", rv);
2225 if (iop_status_get(sc, 0) != 0)
2226 printf("iop_msg_wait: unable to retrieve status\n");
2227 else
2228 printf("iop_msg_wait: IOP state = %d\n",
2229 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2230 }
2231 #endif
2232 }
2233
2234 /*
2235 * Release an unused message frame back to the IOP's inbound fifo.
2236 */
2237 static void
2238 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2239 {
2240
2241 /* Use the frame to issue a no-op. */
2242 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2243 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2244 iop_outl(sc, mfa + 8, 0);
2245 iop_outl(sc, mfa + 12, 0);
2246
2247 iop_outl(sc, IOP_REG_IFIFO, mfa);
2248 }
2249
2250 #ifdef I2ODEBUG
2251 /*
2252 * Dump a reply frame header.
2253 */
2254 static void
2255 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2256 {
2257 u_int function, detail;
2258 #ifdef I2OVERBOSE
2259 const char *statusstr;
2260 #endif
2261
2262 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2263 detail = le16toh(rb->detail);
2264
2265 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2266
2267 #ifdef I2OVERBOSE
2268 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2269 statusstr = iop_status[rb->reqstatus];
2270 else
2271 statusstr = "undefined error code";
2272
2273 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2274 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2275 #else
2276 printf("%s: function=0x%02x status=0x%02x\n",
2277 sc->sc_dv.dv_xname, function, rb->reqstatus);
2278 #endif
2279 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2280 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2281 le32toh(rb->msgtctx));
2282 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2283 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2284 (le32toh(rb->msgflags) >> 8) & 0xff);
2285 }
2286 #endif
2287
2288 /*
2289 * Dump a transport failure reply.
2290 */
2291 static void
2292 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2293 {
2294
2295 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2296
2297 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2298 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2299 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2300 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2301 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2302 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2303 }
2304
2305 /*
2306 * Translate an I2O ASCII field into a C string.
2307 */
2308 void
2309 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2310 {
2311 int hc, lc, i, nit;
2312
2313 dlen--;
2314 lc = 0;
2315 hc = 0;
2316 i = 0;
2317
2318 /*
2319 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2320 * spec has nothing to say about it. Since AMI fields are usually
2321 * filled with junk after the terminator, ...
2322 */
2323 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2324
2325 while (slen-- != 0 && dlen-- != 0) {
2326 if (nit && *src == '\0')
2327 break;
2328 else if (*src <= 0x20 || *src >= 0x7f) {
2329 if (hc)
2330 dst[i++] = ' ';
2331 } else {
2332 hc = 1;
2333 dst[i++] = *src;
2334 lc = i;
2335 }
2336 src++;
2337 }
2338
2339 dst[lc] = '\0';
2340 }
2341
2342 /*
2343 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2344 */
2345 int
2346 iop_print_ident(struct iop_softc *sc, int tid)
2347 {
2348 struct {
2349 struct i2o_param_op_results pr;
2350 struct i2o_param_read_results prr;
2351 struct i2o_param_device_identity di;
2352 } __attribute__ ((__packed__)) p;
2353 char buf[32];
2354 int rv;
2355
2356 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2357 sizeof(p), NULL);
2358 if (rv != 0)
2359 return (rv);
2360
2361 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2362 sizeof(buf));
2363 printf(" <%s, ", buf);
2364 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2365 sizeof(buf));
2366 printf("%s, ", buf);
2367 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2368 printf("%s>", buf);
2369
2370 return (0);
2371 }
2372
2373 /*
2374 * Claim or unclaim the specified TID.
2375 */
2376 int
2377 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2378 int flags)
2379 {
2380 struct iop_msg *im;
2381 struct i2o_util_claim mf;
2382 int rv, func;
2383
2384 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2385 im = iop_msg_alloc(sc, IM_WAIT);
2386
2387 /* We can use the same structure, as they're identical. */
2388 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2389 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2390 mf.msgictx = ii->ii_ictx;
2391 mf.msgtctx = im->im_tctx;
2392 mf.flags = flags;
2393
2394 rv = iop_msg_post(sc, im, &mf, 5000);
2395 iop_msg_free(sc, im);
2396 return (rv);
2397 }
2398
2399 /*
2400 * Perform an abort.
2401 */
2402 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2403 int tctxabort, int flags)
2404 {
2405 struct iop_msg *im;
2406 struct i2o_util_abort mf;
2407 int rv;
2408
2409 im = iop_msg_alloc(sc, IM_WAIT);
2410
2411 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2412 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2413 mf.msgictx = ii->ii_ictx;
2414 mf.msgtctx = im->im_tctx;
2415 mf.flags = (func << 24) | flags;
2416 mf.tctxabort = tctxabort;
2417
2418 rv = iop_msg_post(sc, im, &mf, 5000);
2419 iop_msg_free(sc, im);
2420 return (rv);
2421 }
2422
2423 /*
2424 * Enable or disable reception of events for the specified device.
2425 */
2426 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2427 {
2428 struct i2o_util_event_register mf;
2429
2430 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2431 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2432 mf.msgictx = ii->ii_ictx;
2433 mf.msgtctx = 0;
2434 mf.eventmask = mask;
2435
2436 /* This message is replied to only when events are signalled. */
2437 return (iop_post(sc, (u_int32_t *)&mf));
2438 }
2439
2440 int
2441 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2442 {
2443 struct iop_softc *sc;
2444
2445 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2446 return (ENXIO);
2447 if ((sc->sc_flags & IOP_ONLINE) == 0)
2448 return (ENXIO);
2449 if ((sc->sc_flags & IOP_OPEN) != 0)
2450 return (EBUSY);
2451 sc->sc_flags |= IOP_OPEN;
2452
2453 return (0);
2454 }
2455
2456 int
2457 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2458 {
2459 struct iop_softc *sc;
2460
2461 sc = device_lookup(&iop_cd, minor(dev));
2462 sc->sc_flags &= ~IOP_OPEN;
2463
2464 return (0);
2465 }
2466
2467 int
2468 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2469 {
2470 struct iop_softc *sc;
2471 struct iovec *iov;
2472 int rv, i;
2473
2474 if (securelevel >= 2)
2475 return (EPERM);
2476
2477 sc = device_lookup(&iop_cd, minor(dev));
2478
2479 switch (cmd) {
2480 case IOPIOCPT:
2481 return (iop_passthrough(sc, (struct ioppt *)data, p));
2482
2483 case IOPIOCGSTATUS:
2484 iov = (struct iovec *)data;
2485 i = sizeof(struct i2o_status);
2486 if (i > iov->iov_len)
2487 i = iov->iov_len;
2488 else
2489 iov->iov_len = i;
2490 if ((rv = iop_status_get(sc, 0)) == 0)
2491 rv = copyout(&sc->sc_status, iov->iov_base, i);
2492 return (rv);
2493
2494 case IOPIOCGLCT:
2495 case IOPIOCGTIDMAP:
2496 case IOPIOCRECONFIG:
2497 break;
2498
2499 default:
2500 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2501 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2502 #endif
2503 return (ENOTTY);
2504 }
2505
2506 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2507 return (rv);
2508
2509 switch (cmd) {
2510 case IOPIOCGLCT:
2511 iov = (struct iovec *)data;
2512 i = le16toh(sc->sc_lct->tablesize) << 2;
2513 if (i > iov->iov_len)
2514 i = iov->iov_len;
2515 else
2516 iov->iov_len = i;
2517 rv = copyout(sc->sc_lct, iov->iov_base, i);
2518 break;
2519
2520 case IOPIOCRECONFIG:
2521 rv = iop_reconfigure(sc, 0);
2522 break;
2523
2524 case IOPIOCGTIDMAP:
2525 iov = (struct iovec *)data;
2526 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2527 if (i > iov->iov_len)
2528 i = iov->iov_len;
2529 else
2530 iov->iov_len = i;
2531 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2532 break;
2533 }
2534
2535 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2536 return (rv);
2537 }
2538
2539 static int
2540 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2541 {
2542 struct iop_msg *im;
2543 struct i2o_msg *mf;
2544 struct ioppt_buf *ptb;
2545 int rv, i, mapped;
2546
2547 mf = NULL;
2548 im = NULL;
2549 mapped = 1;
2550
2551 if (pt->pt_msglen > sc->sc_framesize ||
2552 pt->pt_msglen < sizeof(struct i2o_msg) ||
2553 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2554 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2555 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2556 return (EINVAL);
2557
2558 for (i = 0; i < pt->pt_nbufs; i++)
2559 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2560 rv = ENOMEM;
2561 goto bad;
2562 }
2563
2564 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2565 if (mf == NULL)
2566 return (ENOMEM);
2567
2568 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2569 goto bad;
2570
2571 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2572 im->im_rb = (struct i2o_reply *)mf;
2573 mf->msgictx = IOP_ICTX;
2574 mf->msgtctx = im->im_tctx;
2575
2576 for (i = 0; i < pt->pt_nbufs; i++) {
2577 ptb = &pt->pt_bufs[i];
2578 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2579 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2580 if (rv != 0)
2581 goto bad;
2582 mapped = 1;
2583 }
2584
2585 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2586 goto bad;
2587
2588 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2589 if (i > sc->sc_framesize)
2590 i = sc->sc_framesize;
2591 if (i > pt->pt_replylen)
2592 i = pt->pt_replylen;
2593 rv = copyout(im->im_rb, pt->pt_reply, i);
2594
2595 bad:
2596 if (mapped != 0)
2597 iop_msg_unmap(sc, im);
2598 if (im != NULL)
2599 iop_msg_free(sc, im);
2600 if (mf != NULL)
2601 free(mf, M_DEVBUF);
2602 return (rv);
2603 }
2604