iop.c revision 1.21 1 /* $NetBSD: iop.c,v 1.21 2002/01/02 19:04:17 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.21 2002/01/02 19:04:17 ad Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #define POLL(ms, cond) \
71 do { \
72 int i; \
73 for (i = (ms) * 10; i; i--) { \
74 if (cond) \
75 break; \
76 DELAY(100); \
77 } \
78 } while (/* CONSTCOND */0);
79
80 #ifdef I2ODEBUG
81 #define DPRINTF(x) printf x
82 #else
83 #define DPRINTF(x)
84 #endif
85
86 #ifdef I2OVERBOSE
87 #define IFVERBOSE(x) x
88 #define COMMENT(x) NULL
89 #else
90 #define IFVERBOSE(x)
91 #define COMMENT(x)
92 #endif
93
94 #define IOP_ICTXHASH_NBUCKETS 16
95 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96
97 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98
99 #define IOP_TCTX_SHIFT 12
100 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101
102 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 static u_long iop_ictxhash;
104 static void *iop_sdh;
105 static struct i2o_systab *iop_systab;
106 static int iop_systab_size;
107
108 extern struct cfdriver iop_cd;
109
110 #define IC_CONFIGURE 0x01
111 #define IC_PRIORITY 0x02
112
113 struct iop_class {
114 u_short ic_class;
115 u_short ic_flags;
116 #ifdef I2OVERBOSE
117 const char *ic_caption;
118 #endif
119 } static const iop_class[] = {
120 {
121 I2O_CLASS_EXECUTIVE,
122 0,
123 COMMENT("executive")
124 },
125 {
126 I2O_CLASS_DDM,
127 0,
128 COMMENT("device driver module")
129 },
130 {
131 I2O_CLASS_RANDOM_BLOCK_STORAGE,
132 IC_CONFIGURE | IC_PRIORITY,
133 IFVERBOSE("random block storage")
134 },
135 {
136 I2O_CLASS_SEQUENTIAL_STORAGE,
137 IC_CONFIGURE | IC_PRIORITY,
138 IFVERBOSE("sequential storage")
139 },
140 {
141 I2O_CLASS_LAN,
142 IC_CONFIGURE | IC_PRIORITY,
143 IFVERBOSE("LAN port")
144 },
145 {
146 I2O_CLASS_WAN,
147 IC_CONFIGURE | IC_PRIORITY,
148 IFVERBOSE("WAN port")
149 },
150 {
151 I2O_CLASS_FIBRE_CHANNEL_PORT,
152 IC_CONFIGURE,
153 IFVERBOSE("fibrechannel port")
154 },
155 {
156 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
157 0,
158 COMMENT("fibrechannel peripheral")
159 },
160 {
161 I2O_CLASS_SCSI_PERIPHERAL,
162 0,
163 COMMENT("SCSI peripheral")
164 },
165 {
166 I2O_CLASS_ATE_PORT,
167 IC_CONFIGURE,
168 IFVERBOSE("ATE port")
169 },
170 {
171 I2O_CLASS_ATE_PERIPHERAL,
172 0,
173 COMMENT("ATE peripheral")
174 },
175 {
176 I2O_CLASS_FLOPPY_CONTROLLER,
177 IC_CONFIGURE,
178 IFVERBOSE("floppy controller")
179 },
180 {
181 I2O_CLASS_FLOPPY_DEVICE,
182 0,
183 COMMENT("floppy device")
184 },
185 {
186 I2O_CLASS_BUS_ADAPTER_PORT,
187 IC_CONFIGURE,
188 IFVERBOSE("bus adapter port" )
189 },
190 };
191
192 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
193 static const char * const iop_status[] = {
194 "success",
195 "abort (dirty)",
196 "abort (no data transfer)",
197 "abort (partial transfer)",
198 "error (dirty)",
199 "error (no data transfer)",
200 "error (partial transfer)",
201 "undefined error code",
202 "process abort (dirty)",
203 "process abort (no data transfer)",
204 "process abort (partial transfer)",
205 "transaction error",
206 };
207 #endif
208
209 static inline u_int32_t iop_inl(struct iop_softc *, int);
210 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
211
212 static void iop_config_interrupts(struct device *);
213 static void iop_configure_devices(struct iop_softc *, int, int);
214 static void iop_devinfo(int, char *);
215 static int iop_print(void *, const char *);
216 static void iop_shutdown(void *);
217 static int iop_submatch(struct device *, struct cfdata *, void *);
218 static int iop_vendor_print(void *, const char *);
219
220 static void iop_adjqparam(struct iop_softc *, int);
221 static void iop_create_reconf_thread(void *);
222 static int iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int iop_hrt_get(struct iop_softc *);
224 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void iop_intr_event(struct device *, struct iop_msg *, void *);
226 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 u_int32_t);
228 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int iop_ofifo_init(struct iop_softc *);
231 static int iop_passthrough(struct iop_softc *, struct ioppt *,
232 struct proc *);
233 static void iop_reconf_thread(void *);
234 static void iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int iop_reset(struct iop_softc *);
236 static int iop_systab_set(struct iop_softc *);
237 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
238
239 #ifdef I2ODEBUG
240 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
241 #endif
242
243 cdev_decl(iop);
244
245 static inline u_int32_t
246 iop_inl(struct iop_softc *sc, int off)
247 {
248
249 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
251 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
252 }
253
254 static inline void
255 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
256 {
257
258 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
259 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
260 BUS_SPACE_BARRIER_WRITE);
261 }
262
263 /*
264 * Initialise the IOP and our interface.
265 */
266 void
267 iop_init(struct iop_softc *sc, const char *intrstr)
268 {
269 struct iop_msg *im;
270 int rv, i, j, state, nsegs;
271 u_int32_t mask;
272 char ident[64];
273
274 state = 0;
275
276 printf("I2O adapter");
277
278 if (iop_ictxhashtbl == NULL)
279 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
280 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
281
282 /* Disable interrupts at the IOP. */
283 mask = iop_inl(sc, IOP_REG_INTR_MASK);
284 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
285
286 /* Allocate a scratch DMA map for small miscellaneous shared data. */
287 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
288 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
289 printf("%s: cannot create scratch dmamap\n",
290 sc->sc_dv.dv_xname);
291 return;
292 }
293 state++;
294
295 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
296 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
297 printf("%s: cannot alloc scratch dmamem\n",
298 sc->sc_dv.dv_xname);
299 goto bail_out;
300 }
301 state++;
302
303 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
304 &sc->sc_scr, 0)) {
305 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
306 goto bail_out;
307 }
308 state++;
309
310 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
311 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
312 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
313 goto bail_out;
314 }
315 state++;
316
317 #ifdef I2ODEBUG
318 /* So that our debug checks don't choke. */
319 sc->sc_framesize = 128;
320 #endif
321
322 /* Reset the adapter and request status. */
323 if ((rv = iop_reset(sc)) != 0) {
324 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
325 goto bail_out;
326 }
327
328 if ((rv = iop_status_get(sc, 1)) != 0) {
329 printf("%s: not responding (get status)\n",
330 sc->sc_dv.dv_xname);
331 goto bail_out;
332 }
333
334 sc->sc_flags |= IOP_HAVESTATUS;
335 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
336 ident, sizeof(ident));
337 printf(" <%s>\n", ident);
338
339 #ifdef I2ODEBUG
340 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
341 le16toh(sc->sc_status.orgid),
342 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
343 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
344 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
345 le32toh(sc->sc_status.desiredprivmemsize),
346 le32toh(sc->sc_status.currentprivmemsize),
347 le32toh(sc->sc_status.currentprivmembase));
348 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
349 le32toh(sc->sc_status.desiredpriviosize),
350 le32toh(sc->sc_status.currentpriviosize),
351 le32toh(sc->sc_status.currentpriviobase));
352 #endif
353
354 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
355 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
356 sc->sc_maxob = IOP_MAX_OUTBOUND;
357 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
358 if (sc->sc_maxib > IOP_MAX_INBOUND)
359 sc->sc_maxib = IOP_MAX_INBOUND;
360 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
361 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
362 sc->sc_framesize = IOP_MAX_MSG_SIZE;
363
364 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
365 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
366 printf("%s: frame size too small (%d)\n",
367 sc->sc_dv.dv_xname, sc->sc_framesize);
368 return;
369 }
370 #endif
371
372 /* Allocate message wrappers. */
373 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT);
374 memset(im, 0, sizeof(*im) * sc->sc_maxib);
375 sc->sc_ims = im;
376 SLIST_INIT(&sc->sc_im_freelist);
377
378 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
379 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
380 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
381 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
382 &im->im_xfer[0].ix_map);
383 if (rv != 0) {
384 printf("%s: couldn't create dmamap (%d)",
385 sc->sc_dv.dv_xname, rv);
386 goto bail_out;
387 }
388
389 im->im_tctx = i;
390 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
391 }
392
393 /* Initialise the IOP's outbound FIFO. */
394 if (iop_ofifo_init(sc) != 0) {
395 printf("%s: unable to init oubound FIFO\n",
396 sc->sc_dv.dv_xname);
397 goto bail_out;
398 }
399
400 /*
401 * Defer further configuration until (a) interrupts are working and
402 * (b) we have enough information to build the system table.
403 */
404 config_interrupts((struct device *)sc, iop_config_interrupts);
405
406 /* Configure shutdown hook before we start any device activity. */
407 if (iop_sdh == NULL)
408 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
409
410 /* Ensure interrupts are enabled at the IOP. */
411 mask = iop_inl(sc, IOP_REG_INTR_MASK);
412 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
413
414 if (intrstr != NULL)
415 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
416 intrstr);
417
418 #ifdef I2ODEBUG
419 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
420 sc->sc_dv.dv_xname, sc->sc_maxib,
421 le32toh(sc->sc_status.maxinboundmframes),
422 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
423 #endif
424
425 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
426 return;
427
428 bail_out:
429 if (state > 3) {
430 for (j = 0; j < i; j++)
431 bus_dmamap_destroy(sc->sc_dmat,
432 sc->sc_ims[j].im_xfer[0].ix_map);
433 free(sc->sc_ims, M_DEVBUF);
434 }
435 if (state > 2)
436 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
437 if (state > 1)
438 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
439 if (state > 0)
440 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
441 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
442
443 }
444
445 /*
446 * Perform autoconfiguration tasks.
447 */
448 static void
449 iop_config_interrupts(struct device *self)
450 {
451 struct iop_attach_args ia;
452 struct iop_softc *sc, *iop;
453 struct i2o_systab_entry *ste;
454 int rv, i, niop;
455
456 sc = (struct iop_softc *)self;
457 LIST_INIT(&sc->sc_iilist);
458
459 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
460
461 if (iop_hrt_get(sc) != 0) {
462 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
463 return;
464 }
465
466 /*
467 * Build the system table.
468 */
469 if (iop_systab == NULL) {
470 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
471 if ((iop = device_lookup(&iop_cd, i)) == NULL)
472 continue;
473 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
474 continue;
475 if (iop_status_get(iop, 1) != 0) {
476 printf("%s: unable to retrieve status\n",
477 sc->sc_dv.dv_xname);
478 iop->sc_flags &= ~IOP_HAVESTATUS;
479 continue;
480 }
481 niop++;
482 }
483 if (niop == 0)
484 return;
485
486 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
487 sizeof(struct i2o_systab);
488 iop_systab_size = i;
489 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
490
491 memset(iop_systab, 0, i);
492 iop_systab->numentries = niop;
493 iop_systab->version = I2O_VERSION_11;
494
495 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
496 if ((iop = device_lookup(&iop_cd, i)) == NULL)
497 continue;
498 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
499 continue;
500
501 ste->orgid = iop->sc_status.orgid;
502 ste->iopid = iop->sc_dv.dv_unit + 2;
503 ste->segnumber =
504 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
505 ste->iopcaps = iop->sc_status.iopcaps;
506 ste->inboundmsgframesize =
507 iop->sc_status.inboundmframesize;
508 ste->inboundmsgportaddresslow =
509 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
510 ste++;
511 }
512 }
513
514 /*
515 * Post the system table to the IOP and bring it to the OPERATIONAL
516 * state.
517 */
518 if (iop_systab_set(sc) != 0) {
519 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
520 return;
521 }
522 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
523 30000) != 0) {
524 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
525 return;
526 }
527
528 /*
529 * Set up an event handler for this IOP.
530 */
531 sc->sc_eventii.ii_dv = self;
532 sc->sc_eventii.ii_intr = iop_intr_event;
533 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
534 sc->sc_eventii.ii_tid = I2O_TID_IOP;
535 iop_initiator_register(sc, &sc->sc_eventii);
536
537 rv = iop_util_eventreg(sc, &sc->sc_eventii,
538 I2O_EVENT_EXEC_RESOURCE_LIMITS |
539 I2O_EVENT_EXEC_CONNECTION_FAIL |
540 I2O_EVENT_EXEC_ADAPTER_FAULT |
541 I2O_EVENT_EXEC_POWER_FAIL |
542 I2O_EVENT_EXEC_RESET_PENDING |
543 I2O_EVENT_EXEC_RESET_IMMINENT |
544 I2O_EVENT_EXEC_HARDWARE_FAIL |
545 I2O_EVENT_EXEC_XCT_CHANGE |
546 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
547 I2O_EVENT_GEN_DEVICE_RESET |
548 I2O_EVENT_GEN_STATE_CHANGE |
549 I2O_EVENT_GEN_GENERAL_WARNING);
550 if (rv != 0) {
551 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
552 return;
553 }
554
555 /*
556 * Attempt to match and attach a product-specific extension.
557 */
558 ia.ia_class = I2O_CLASS_ANY;
559 ia.ia_tid = I2O_TID_IOP;
560 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
561
562 /*
563 * Start device configuration.
564 */
565 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
566 if ((rv = iop_reconfigure(sc, 0)) == -1) {
567 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
568 return;
569 }
570 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
571
572 kthread_create(iop_create_reconf_thread, sc);
573 }
574
575 /*
576 * Create the reconfiguration thread. Called after the standard kernel
577 * threads have been created.
578 */
579 static void
580 iop_create_reconf_thread(void *cookie)
581 {
582 struct iop_softc *sc;
583 int rv;
584
585 sc = cookie;
586 sc->sc_flags |= IOP_ONLINE;
587
588 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
589 "%s", sc->sc_dv.dv_xname);
590 if (rv != 0) {
591 printf("%s: unable to create reconfiguration thread (%d)",
592 sc->sc_dv.dv_xname, rv);
593 return;
594 }
595 }
596
597 /*
598 * Reconfiguration thread; listens for LCT change notification, and
599 * initiates re-configuration if received.
600 */
601 static void
602 iop_reconf_thread(void *cookie)
603 {
604 struct iop_softc *sc;
605 struct i2o_lct lct;
606 u_int32_t chgind;
607 int rv;
608
609 sc = cookie;
610 chgind = sc->sc_chgind + 1;
611
612 for (;;) {
613 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
614 sc->sc_dv.dv_xname, chgind));
615
616 PHOLD(sc->sc_reconf_proc);
617 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
618 PRELE(sc->sc_reconf_proc);
619
620 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
621 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
622
623 if (rv == 0 &&
624 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
625 iop_reconfigure(sc, le32toh(lct.changeindicator));
626 chgind = sc->sc_chgind + 1;
627 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
628 }
629
630 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
631 }
632 }
633
634 /*
635 * Reconfigure: find new and removed devices.
636 */
637 int
638 iop_reconfigure(struct iop_softc *sc, u_int chgind)
639 {
640 struct iop_msg *im;
641 struct i2o_hba_bus_scan mf;
642 struct i2o_lct_entry *le;
643 struct iop_initiator *ii, *nextii;
644 int rv, tid, i;
645
646 /*
647 * If the reconfiguration request isn't the result of LCT change
648 * notification, then be more thorough: ask all bus ports to scan
649 * their busses. Wait up to 5 minutes for each bus port to complete
650 * the request.
651 */
652 if (chgind == 0) {
653 if ((rv = iop_lct_get(sc)) != 0) {
654 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
655 return (rv);
656 }
657
658 le = sc->sc_lct->entry;
659 for (i = 0; i < sc->sc_nlctent; i++, le++) {
660 if ((le16toh(le->classid) & 4095) !=
661 I2O_CLASS_BUS_ADAPTER_PORT)
662 continue;
663 tid = le16toh(le->localtid) & 4095;
664
665 im = iop_msg_alloc(sc, IM_WAIT);
666
667 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
668 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
669 mf.msgictx = IOP_ICTX;
670 mf.msgtctx = im->im_tctx;
671
672 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
673 tid));
674
675 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
676 iop_msg_free(sc, im);
677 #ifdef I2ODEBUG
678 if (rv != 0)
679 printf("%s: bus scan failed\n",
680 sc->sc_dv.dv_xname);
681 #endif
682 }
683 } else if (chgind <= sc->sc_chgind) {
684 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
685 return (0);
686 }
687
688 /* Re-read the LCT and determine if it has changed. */
689 if ((rv = iop_lct_get(sc)) != 0) {
690 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
691 return (rv);
692 }
693 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
694
695 chgind = le32toh(sc->sc_lct->changeindicator);
696 if (chgind == sc->sc_chgind) {
697 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
698 return (0);
699 }
700 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
701 sc->sc_chgind = chgind;
702
703 if (sc->sc_tidmap != NULL)
704 free(sc->sc_tidmap, M_DEVBUF);
705 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
706 M_DEVBUF, M_NOWAIT);
707 memset(sc->sc_tidmap, 0, sizeof(sc->sc_tidmap));
708
709 /* Allow 1 queued command per device while we're configuring. */
710 iop_adjqparam(sc, 1);
711
712 /*
713 * Match and attach child devices. We configure high-level devices
714 * first so that any claims will propagate throughout the LCT,
715 * hopefully masking off aliased devices as a result.
716 *
717 * Re-reading the LCT at this point is a little dangerous, but we'll
718 * trust the IOP (and the operator) to behave itself...
719 */
720 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
721 IC_CONFIGURE | IC_PRIORITY);
722 if ((rv = iop_lct_get(sc)) != 0)
723 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
724 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
725 IC_CONFIGURE);
726
727 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
728 nextii = LIST_NEXT(ii, ii_list);
729
730 /* Detach devices that were configured, but are now gone. */
731 for (i = 0; i < sc->sc_nlctent; i++)
732 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
733 break;
734 if (i == sc->sc_nlctent ||
735 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
736 config_detach(ii->ii_dv, DETACH_FORCE);
737
738 /*
739 * Tell initiators that existed before the re-configuration
740 * to re-configure.
741 */
742 if (ii->ii_reconfig == NULL)
743 continue;
744 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
745 printf("%s: %s failed reconfigure (%d)\n",
746 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
747 }
748
749 /* Re-adjust queue parameters and return. */
750 if (sc->sc_nii != 0)
751 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
752 / sc->sc_nii);
753
754 return (0);
755 }
756
757 /*
758 * Configure I2O devices into the system.
759 */
760 static void
761 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
762 {
763 struct iop_attach_args ia;
764 struct iop_initiator *ii;
765 const struct i2o_lct_entry *le;
766 struct device *dv;
767 int i, j, nent;
768 u_int usertid;
769
770 nent = sc->sc_nlctent;
771 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
772 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
773
774 /* Ignore the device if it's in use. */
775 usertid = le32toh(le->usertid) & 4095;
776 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
777 continue;
778
779 ia.ia_class = le16toh(le->classid) & 4095;
780 ia.ia_tid = sc->sc_tidmap[i].it_tid;
781
782 /* Ignore uninteresting devices. */
783 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
784 if (iop_class[j].ic_class == ia.ia_class)
785 break;
786 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
787 (iop_class[j].ic_flags & mask) != maskval)
788 continue;
789
790 /*
791 * Try to configure the device only if it's not already
792 * configured.
793 */
794 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
795 if (ia.ia_tid == ii->ii_tid) {
796 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
797 strcpy(sc->sc_tidmap[i].it_dvname,
798 ii->ii_dv->dv_xname);
799 break;
800 }
801 }
802 if (ii != NULL)
803 continue;
804
805 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
806 if (dv != NULL) {
807 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
808 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
809 }
810 }
811 }
812
813 /*
814 * Adjust queue parameters for all child devices.
815 */
816 static void
817 iop_adjqparam(struct iop_softc *sc, int mpi)
818 {
819 struct iop_initiator *ii;
820
821 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
822 if (ii->ii_adjqparam != NULL)
823 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
824 }
825
826 static void
827 iop_devinfo(int class, char *devinfo)
828 {
829 #ifdef I2OVERBOSE
830 int i;
831
832 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
833 if (class == iop_class[i].ic_class)
834 break;
835
836 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
837 sprintf(devinfo, "device (class 0x%x)", class);
838 else
839 strcpy(devinfo, iop_class[i].ic_caption);
840 #else
841
842 sprintf(devinfo, "device (class 0x%x)", class);
843 #endif
844 }
845
846 static int
847 iop_print(void *aux, const char *pnp)
848 {
849 struct iop_attach_args *ia;
850 char devinfo[256];
851
852 ia = aux;
853
854 if (pnp != NULL) {
855 iop_devinfo(ia->ia_class, devinfo);
856 printf("%s at %s", devinfo, pnp);
857 }
858 printf(" tid %d", ia->ia_tid);
859 return (UNCONF);
860 }
861
862 static int
863 iop_vendor_print(void *aux, const char *pnp)
864 {
865
866 return (QUIET);
867 }
868
869 static int
870 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
871 {
872 struct iop_attach_args *ia;
873
874 ia = aux;
875
876 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
877 return (0);
878
879 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
880 }
881
882 /*
883 * Shut down all configured IOPs.
884 */
885 static void
886 iop_shutdown(void *junk)
887 {
888 struct iop_softc *sc;
889 int i;
890
891 printf("shutting down iop devices...");
892
893 for (i = 0; i < iop_cd.cd_ndevs; i++) {
894 if ((sc = device_lookup(&iop_cd, i)) == NULL)
895 continue;
896 if ((sc->sc_flags & IOP_ONLINE) == 0)
897 continue;
898 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
899 0, 5000);
900 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
901 0, 1000);
902 }
903
904 /* Wait. Some boards could still be flushing, stupidly enough. */
905 delay(5000*1000);
906 printf(" done\n");
907 }
908
909 /*
910 * Retrieve IOP status.
911 */
912 int
913 iop_status_get(struct iop_softc *sc, int nosleep)
914 {
915 struct i2o_exec_status_get mf;
916 struct i2o_status *st;
917 paddr_t pa;
918 int rv, i;
919
920 pa = sc->sc_scr_seg->ds_addr;
921 st = (struct i2o_status *)sc->sc_scr;
922
923 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
924 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
925 mf.reserved[0] = 0;
926 mf.reserved[1] = 0;
927 mf.reserved[2] = 0;
928 mf.reserved[3] = 0;
929 mf.addrlow = (u_int32_t)pa;
930 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
931 mf.length = sizeof(sc->sc_status);
932
933 memset(st, 0, sizeof(*st));
934 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
935 BUS_DMASYNC_PREREAD);
936
937 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
938 return (rv);
939
940 for (i = 25; i != 0; i--) {
941 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
942 sizeof(*st), BUS_DMASYNC_POSTREAD);
943 if (st->syncbyte == 0xff)
944 break;
945 if (nosleep)
946 DELAY(100*1000);
947 else
948 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
949 }
950
951 if (st->syncbyte != 0xff) {
952 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
953 rv = EIO;
954 } else {
955 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
956 rv = 0;
957 }
958
959 return (rv);
960 }
961
962 /*
963 * Initialize and populate the IOP's outbound FIFO.
964 */
965 static int
966 iop_ofifo_init(struct iop_softc *sc)
967 {
968 bus_addr_t addr;
969 bus_dma_segment_t seg;
970 struct i2o_exec_outbound_init *mf;
971 int i, rseg, rv;
972 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
973
974 sw = (u_int32_t *)sc->sc_scr;
975
976 mf = (struct i2o_exec_outbound_init *)mb;
977 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
978 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
979 mf->msgictx = IOP_ICTX;
980 mf->msgtctx = 0;
981 mf->pagesize = PAGE_SIZE;
982 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
983
984 /*
985 * The I2O spec says that there are two SGLs: one for the status
986 * word, and one for a list of discarded MFAs. It continues to say
987 * that if you don't want to get the list of MFAs, an IGNORE SGL is
988 * necessary; this isn't the case (and is in fact a bad thing).
989 */
990 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
991 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
992 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
993 (u_int32_t)sc->sc_scr_seg->ds_addr;
994 mb[0] += 2 << 16;
995
996 *sw = 0;
997 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
998 BUS_DMASYNC_PREREAD);
999
1000 if ((rv = iop_post(sc, mb)) != 0)
1001 return (rv);
1002
1003 POLL(5000,
1004 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1005 BUS_DMASYNC_POSTREAD),
1006 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1007
1008 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1009 printf("%s: outbound FIFO init failed (%d)\n",
1010 sc->sc_dv.dv_xname, le32toh(*sw));
1011 return (EIO);
1012 }
1013
1014 /* Allocate DMA safe memory for the reply frames. */
1015 if (sc->sc_rep_phys == 0) {
1016 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1017
1018 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1019 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1020 if (rv != 0) {
1021 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1022 rv);
1023 return (rv);
1024 }
1025
1026 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1027 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1028 if (rv != 0) {
1029 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1030 return (rv);
1031 }
1032
1033 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1034 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1035 if (rv != 0) {
1036 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1037 rv);
1038 return (rv);
1039 }
1040
1041 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1042 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1043 if (rv != 0) {
1044 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1045 return (rv);
1046 }
1047
1048 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1049 }
1050
1051 /* Populate the outbound FIFO. */
1052 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1053 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1054 addr += sc->sc_framesize;
1055 }
1056
1057 return (0);
1058 }
1059
1060 /*
1061 * Read the specified number of bytes from the IOP's hardware resource table.
1062 */
1063 static int
1064 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1065 {
1066 struct iop_msg *im;
1067 int rv;
1068 struct i2o_exec_hrt_get *mf;
1069 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1070
1071 im = iop_msg_alloc(sc, IM_WAIT);
1072 mf = (struct i2o_exec_hrt_get *)mb;
1073 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1074 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1075 mf->msgictx = IOP_ICTX;
1076 mf->msgtctx = im->im_tctx;
1077
1078 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1079 rv = iop_msg_post(sc, im, mb, 30000);
1080 iop_msg_unmap(sc, im);
1081 iop_msg_free(sc, im);
1082 return (rv);
1083 }
1084
1085 /*
1086 * Read the IOP's hardware resource table.
1087 */
1088 static int
1089 iop_hrt_get(struct iop_softc *sc)
1090 {
1091 struct i2o_hrt hrthdr, *hrt;
1092 int size, rv;
1093
1094 PHOLD(curproc);
1095 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1096 PRELE(curproc);
1097 if (rv != 0)
1098 return (rv);
1099
1100 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1101 le16toh(hrthdr.numentries)));
1102
1103 size = sizeof(struct i2o_hrt) +
1104 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1105 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1106
1107 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1108 free(hrt, M_DEVBUF);
1109 return (rv);
1110 }
1111
1112 if (sc->sc_hrt != NULL)
1113 free(sc->sc_hrt, M_DEVBUF);
1114 sc->sc_hrt = hrt;
1115 return (0);
1116 }
1117
1118 /*
1119 * Request the specified number of bytes from the IOP's logical
1120 * configuration table. If a change indicator is specified, this
1121 * is a verbatim notification request, so the caller is prepared
1122 * to wait indefinitely.
1123 */
1124 static int
1125 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1126 u_int32_t chgind)
1127 {
1128 struct iop_msg *im;
1129 struct i2o_exec_lct_notify *mf;
1130 int rv;
1131 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1132
1133 im = iop_msg_alloc(sc, IM_WAIT);
1134 memset(lct, 0, size);
1135
1136 mf = (struct i2o_exec_lct_notify *)mb;
1137 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1138 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1139 mf->msgictx = IOP_ICTX;
1140 mf->msgtctx = im->im_tctx;
1141 mf->classid = I2O_CLASS_ANY;
1142 mf->changeindicator = chgind;
1143
1144 #ifdef I2ODEBUG
1145 printf("iop_lct_get0: reading LCT");
1146 if (chgind != 0)
1147 printf(" (async)");
1148 printf("\n");
1149 #endif
1150
1151 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1152 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1153 iop_msg_unmap(sc, im);
1154 iop_msg_free(sc, im);
1155 return (rv);
1156 }
1157
1158 /*
1159 * Read the IOP's logical configuration table.
1160 */
1161 int
1162 iop_lct_get(struct iop_softc *sc)
1163 {
1164 int esize, size, rv;
1165 struct i2o_lct *lct;
1166
1167 esize = le32toh(sc->sc_status.expectedlctsize);
1168 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1169 if (lct == NULL)
1170 return (ENOMEM);
1171
1172 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1173 free(lct, M_DEVBUF);
1174 return (rv);
1175 }
1176
1177 size = le16toh(lct->tablesize) << 2;
1178 if (esize != size) {
1179 free(lct, M_DEVBUF);
1180 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1181 if (lct == NULL)
1182 return (ENOMEM);
1183
1184 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1185 free(lct, M_DEVBUF);
1186 return (rv);
1187 }
1188 }
1189
1190 /* Swap in the new LCT. */
1191 if (sc->sc_lct != NULL)
1192 free(sc->sc_lct, M_DEVBUF);
1193 sc->sc_lct = lct;
1194 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1195 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1196 sizeof(struct i2o_lct_entry);
1197 return (0);
1198 }
1199
1200 /*
1201 * Request the specified parameter group from the target. If an initiator
1202 * is specified (a) don't wait for the operation to complete, but instead
1203 * let the initiator's interrupt handler deal with the reply and (b) place a
1204 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1205 */
1206 int
1207 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1208 int size, struct iop_initiator *ii)
1209 {
1210 struct iop_msg *im;
1211 struct i2o_util_params_op *mf;
1212 struct i2o_reply *rf;
1213 int rv;
1214 struct iop_pgop *pgop;
1215 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1216
1217 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1218 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1219 iop_msg_free(sc, im);
1220 return (ENOMEM);
1221 }
1222 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1223 iop_msg_free(sc, im);
1224 free(pgop, M_DEVBUF);
1225 return (ENOMEM);
1226 }
1227 im->im_dvcontext = pgop;
1228 im->im_rb = rf;
1229
1230 mf = (struct i2o_util_params_op *)mb;
1231 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1232 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1233 mf->msgictx = IOP_ICTX;
1234 mf->msgtctx = im->im_tctx;
1235 mf->flags = 0;
1236
1237 pgop->olh.count = htole16(1);
1238 pgop->olh.reserved = htole16(0);
1239 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1240 pgop->oat.fieldcount = htole16(0xffff);
1241 pgop->oat.group = htole16(group);
1242
1243 if (ii == NULL)
1244 PHOLD(curproc);
1245
1246 memset(buf, 0, size);
1247 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1248 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1249 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1250
1251 if (ii == NULL)
1252 PRELE(curproc);
1253
1254 /* Detect errors; let partial transfers to count as success. */
1255 if (ii == NULL && rv == 0) {
1256 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1257 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1258 rv = 0;
1259 else
1260 rv = (rf->reqstatus != 0 ? EIO : 0);
1261
1262 if (rv != 0)
1263 printf("%s: FIELD_GET failed for tid %d group %d\n",
1264 sc->sc_dv.dv_xname, tid, group);
1265 }
1266
1267 if (ii == NULL || rv != 0) {
1268 iop_msg_unmap(sc, im);
1269 iop_msg_free(sc, im);
1270 free(pgop, M_DEVBUF);
1271 free(rf, M_DEVBUF);
1272 }
1273
1274 return (rv);
1275 }
1276
1277 /*
1278 * Set a single field in a scalar parameter group.
1279 */
1280 int
1281 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1282 int size, int field)
1283 {
1284 struct iop_msg *im;
1285 struct i2o_util_params_op *mf;
1286 struct iop_pgop *pgop;
1287 int rv, totsize;
1288 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1289
1290 totsize = sizeof(*pgop) + size;
1291
1292 im = iop_msg_alloc(sc, IM_WAIT);
1293 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1294 iop_msg_free(sc, im);
1295 return (ENOMEM);
1296 }
1297
1298 mf = (struct i2o_util_params_op *)mb;
1299 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1300 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1301 mf->msgictx = IOP_ICTX;
1302 mf->msgtctx = im->im_tctx;
1303 mf->flags = 0;
1304
1305 pgop->olh.count = htole16(1);
1306 pgop->olh.reserved = htole16(0);
1307 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1308 pgop->oat.fieldcount = htole16(1);
1309 pgop->oat.group = htole16(group);
1310 pgop->oat.fields[0] = htole16(field);
1311 memcpy(pgop + 1, buf, size);
1312
1313 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1314 rv = iop_msg_post(sc, im, mb, 30000);
1315 if (rv != 0)
1316 printf("%s: FIELD_SET failed for tid %d group %d\n",
1317 sc->sc_dv.dv_xname, tid, group);
1318
1319 iop_msg_unmap(sc, im);
1320 iop_msg_free(sc, im);
1321 free(pgop, M_DEVBUF);
1322 return (rv);
1323 }
1324
1325 /*
1326 * Delete all rows in a tablular parameter group.
1327 */
1328 int
1329 iop_table_clear(struct iop_softc *sc, int tid, int group)
1330 {
1331 struct iop_msg *im;
1332 struct i2o_util_params_op *mf;
1333 struct iop_pgop pgop;
1334 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1335 int rv;
1336
1337 im = iop_msg_alloc(sc, IM_WAIT);
1338
1339 mf = (struct i2o_util_params_op *)mb;
1340 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1341 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1342 mf->msgictx = IOP_ICTX;
1343 mf->msgtctx = im->im_tctx;
1344 mf->flags = 0;
1345
1346 pgop.olh.count = htole16(1);
1347 pgop.olh.reserved = htole16(0);
1348 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1349 pgop.oat.fieldcount = htole16(0);
1350 pgop.oat.group = htole16(group);
1351 pgop.oat.fields[0] = htole16(0);
1352
1353 PHOLD(curproc);
1354 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1355 rv = iop_msg_post(sc, im, mb, 30000);
1356 if (rv != 0)
1357 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1358 sc->sc_dv.dv_xname, tid, group);
1359
1360 iop_msg_unmap(sc, im);
1361 PRELE(curproc);
1362 iop_msg_free(sc, im);
1363 return (rv);
1364 }
1365
1366 /*
1367 * Add a single row to a tabular parameter group. The row can have only one
1368 * field.
1369 */
1370 int
1371 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1372 int size, int row)
1373 {
1374 struct iop_msg *im;
1375 struct i2o_util_params_op *mf;
1376 struct iop_pgop *pgop;
1377 int rv, totsize;
1378 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1379
1380 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1381
1382 im = iop_msg_alloc(sc, IM_WAIT);
1383 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1384 iop_msg_free(sc, im);
1385 return (ENOMEM);
1386 }
1387
1388 mf = (struct i2o_util_params_op *)mb;
1389 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1390 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1391 mf->msgictx = IOP_ICTX;
1392 mf->msgtctx = im->im_tctx;
1393 mf->flags = 0;
1394
1395 pgop->olh.count = htole16(1);
1396 pgop->olh.reserved = htole16(0);
1397 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1398 pgop->oat.fieldcount = htole16(1);
1399 pgop->oat.group = htole16(group);
1400 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1401 pgop->oat.fields[1] = htole16(1); /* RowCount */
1402 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1403 memcpy(&pgop->oat.fields[3], buf, size);
1404
1405 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1406 rv = iop_msg_post(sc, im, mb, 30000);
1407 if (rv != 0)
1408 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1409 sc->sc_dv.dv_xname, tid, group, row);
1410
1411 iop_msg_unmap(sc, im);
1412 iop_msg_free(sc, im);
1413 free(pgop, M_DEVBUF);
1414 return (rv);
1415 }
1416
1417 /*
1418 * Execute a simple command (no parameters).
1419 */
1420 int
1421 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1422 int async, int timo)
1423 {
1424 struct iop_msg *im;
1425 struct i2o_msg mf;
1426 int rv, fl;
1427
1428 fl = (async != 0 ? IM_WAIT : IM_POLL);
1429 im = iop_msg_alloc(sc, fl);
1430
1431 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1432 mf.msgfunc = I2O_MSGFUNC(tid, function);
1433 mf.msgictx = ictx;
1434 mf.msgtctx = im->im_tctx;
1435
1436 rv = iop_msg_post(sc, im, &mf, timo);
1437 iop_msg_free(sc, im);
1438 return (rv);
1439 }
1440
1441 /*
1442 * Post the system table to the IOP.
1443 */
1444 static int
1445 iop_systab_set(struct iop_softc *sc)
1446 {
1447 struct i2o_exec_sys_tab_set *mf;
1448 struct iop_msg *im;
1449 bus_space_handle_t bsh;
1450 bus_addr_t boo;
1451 u_int32_t mema[2], ioa[2];
1452 int rv;
1453 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1454
1455 im = iop_msg_alloc(sc, IM_WAIT);
1456
1457 mf = (struct i2o_exec_sys_tab_set *)mb;
1458 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1459 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1460 mf->msgictx = IOP_ICTX;
1461 mf->msgtctx = im->im_tctx;
1462 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1463 mf->segnumber = 0;
1464
1465 mema[1] = sc->sc_status.desiredprivmemsize;
1466 ioa[1] = sc->sc_status.desiredpriviosize;
1467
1468 if (mema[1] != 0) {
1469 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1470 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1471 mema[0] = htole32(boo);
1472 if (rv != 0) {
1473 printf("%s: can't alloc priv mem space, err = %d\n",
1474 sc->sc_dv.dv_xname, rv);
1475 mema[0] = 0;
1476 mema[1] = 0;
1477 }
1478 }
1479
1480 if (ioa[1] != 0) {
1481 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1482 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1483 ioa[0] = htole32(boo);
1484 if (rv != 0) {
1485 printf("%s: can't alloc priv i/o space, err = %d\n",
1486 sc->sc_dv.dv_xname, rv);
1487 ioa[0] = 0;
1488 ioa[1] = 0;
1489 }
1490 }
1491
1492 PHOLD(curproc);
1493 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1494 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1495 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1496 rv = iop_msg_post(sc, im, mb, 5000);
1497 iop_msg_unmap(sc, im);
1498 iop_msg_free(sc, im);
1499 PRELE(curproc);
1500 return (rv);
1501 }
1502
1503 /*
1504 * Reset the IOP. Must be called with interrupts disabled.
1505 */
1506 static int
1507 iop_reset(struct iop_softc *sc)
1508 {
1509 u_int32_t mfa, *sw;
1510 struct i2o_exec_iop_reset mf;
1511 int rv;
1512 paddr_t pa;
1513
1514 sw = (u_int32_t *)sc->sc_scr;
1515 pa = sc->sc_scr_seg->ds_addr;
1516
1517 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1518 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1519 mf.reserved[0] = 0;
1520 mf.reserved[1] = 0;
1521 mf.reserved[2] = 0;
1522 mf.reserved[3] = 0;
1523 mf.statuslow = (u_int32_t)pa;
1524 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1525
1526 *sw = htole32(0);
1527 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1528 BUS_DMASYNC_PREREAD);
1529
1530 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1531 return (rv);
1532
1533 POLL(2500,
1534 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1535 BUS_DMASYNC_POSTREAD), *sw != 0));
1536 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1537 printf("%s: reset rejected, status 0x%x\n",
1538 sc->sc_dv.dv_xname, le32toh(*sw));
1539 return (EIO);
1540 }
1541
1542 /*
1543 * IOP is now in the INIT state. Wait no more than 10 seconds for
1544 * the inbound queue to become responsive.
1545 */
1546 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1547 if (mfa == IOP_MFA_EMPTY) {
1548 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1549 return (EIO);
1550 }
1551
1552 iop_release_mfa(sc, mfa);
1553 return (0);
1554 }
1555
1556 /*
1557 * Register a new initiator. Must be called with the configuration lock
1558 * held.
1559 */
1560 void
1561 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1562 {
1563 static int ictxgen;
1564 int s;
1565
1566 /* 0 is reserved (by us) for system messages. */
1567 ii->ii_ictx = ++ictxgen;
1568
1569 /*
1570 * `Utility initiators' don't make it onto the per-IOP initiator list
1571 * (which is used only for configuration), but do get one slot on
1572 * the inbound queue.
1573 */
1574 if ((ii->ii_flags & II_UTILITY) == 0) {
1575 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1576 sc->sc_nii++;
1577 } else
1578 sc->sc_nuii++;
1579
1580 s = splbio();
1581 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1582 splx(s);
1583 }
1584
1585 /*
1586 * Unregister an initiator. Must be called with the configuration lock
1587 * held.
1588 */
1589 void
1590 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1591 {
1592 int s;
1593
1594 if ((ii->ii_flags & II_UTILITY) == 0) {
1595 LIST_REMOVE(ii, ii_list);
1596 sc->sc_nii--;
1597 } else
1598 sc->sc_nuii--;
1599
1600 s = splbio();
1601 LIST_REMOVE(ii, ii_hash);
1602 splx(s);
1603 }
1604
1605 /*
1606 * Handle a reply frame from the IOP.
1607 */
1608 static int
1609 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1610 {
1611 struct iop_msg *im;
1612 struct i2o_reply *rb;
1613 struct i2o_fault_notify *fn;
1614 struct iop_initiator *ii;
1615 u_int off, ictx, tctx, status, size;
1616
1617 off = (int)(rmfa - sc->sc_rep_phys);
1618 rb = (struct i2o_reply *)(sc->sc_rep + off);
1619
1620 /* Perform reply queue DMA synchronisation. */
1621 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1622 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1623 if (--sc->sc_curib != 0)
1624 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1625 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1626
1627 #ifdef I2ODEBUG
1628 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1629 panic("iop_handle_reply: 64-bit reply");
1630 #endif
1631 /*
1632 * Find the initiator.
1633 */
1634 ictx = le32toh(rb->msgictx);
1635 if (ictx == IOP_ICTX)
1636 ii = NULL;
1637 else {
1638 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1639 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1640 if (ii->ii_ictx == ictx)
1641 break;
1642 if (ii == NULL) {
1643 #ifdef I2ODEBUG
1644 iop_reply_print(sc, rb);
1645 #endif
1646 printf("%s: WARNING: bad ictx returned (%x)\n",
1647 sc->sc_dv.dv_xname, ictx);
1648 return (-1);
1649 }
1650 }
1651
1652 /*
1653 * If we received a transport failure notice, we've got to dig the
1654 * transaction context (if any) out of the original message frame,
1655 * and then release the original MFA back to the inbound FIFO.
1656 */
1657 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1658 status = I2O_STATUS_SUCCESS;
1659
1660 fn = (struct i2o_fault_notify *)rb;
1661 tctx = iop_inl(sc, fn->lowmfa + 12);
1662 iop_release_mfa(sc, fn->lowmfa);
1663 iop_tfn_print(sc, fn);
1664 } else {
1665 status = rb->reqstatus;
1666 tctx = le32toh(rb->msgtctx);
1667 }
1668
1669 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1670 /*
1671 * This initiator tracks state using message wrappers.
1672 *
1673 * Find the originating message wrapper, and if requested
1674 * notify the initiator.
1675 */
1676 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1677 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1678 (im->im_flags & IM_ALLOCED) == 0 ||
1679 tctx != im->im_tctx) {
1680 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1681 sc->sc_dv.dv_xname, tctx, im);
1682 if (im != NULL)
1683 printf("%s: flags=0x%08x tctx=0x%08x\n",
1684 sc->sc_dv.dv_xname, im->im_flags,
1685 im->im_tctx);
1686 #ifdef I2ODEBUG
1687 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1688 iop_reply_print(sc, rb);
1689 #endif
1690 return (-1);
1691 }
1692
1693 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1694 im->im_flags |= IM_FAIL;
1695
1696 #ifdef I2ODEBUG
1697 if ((im->im_flags & IM_REPLIED) != 0)
1698 panic("%s: dup reply", sc->sc_dv.dv_xname);
1699 #endif
1700 im->im_flags |= IM_REPLIED;
1701
1702 #ifdef I2ODEBUG
1703 if (status != I2O_STATUS_SUCCESS)
1704 iop_reply_print(sc, rb);
1705 #endif
1706 im->im_reqstatus = status;
1707
1708 /* Copy the reply frame, if requested. */
1709 if (im->im_rb != NULL) {
1710 size = (le32toh(rb->msgflags) >> 14) & ~3;
1711 #ifdef I2ODEBUG
1712 if (size > sc->sc_framesize)
1713 panic("iop_handle_reply: reply too large");
1714 #endif
1715 memcpy(im->im_rb, rb, size);
1716 }
1717
1718 /* Notify the initiator. */
1719 if ((im->im_flags & IM_WAIT) != 0)
1720 wakeup(im);
1721 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1722 (*ii->ii_intr)(ii->ii_dv, im, rb);
1723 } else {
1724 /*
1725 * This initiator discards message wrappers.
1726 *
1727 * Simply pass the reply frame to the initiator.
1728 */
1729 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1730 }
1731
1732 return (status);
1733 }
1734
1735 /*
1736 * Handle an interrupt from the IOP.
1737 */
1738 int
1739 iop_intr(void *arg)
1740 {
1741 struct iop_softc *sc;
1742 u_int32_t rmfa;
1743
1744 sc = arg;
1745
1746 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1747 return (0);
1748
1749 for (;;) {
1750 /* Double read to account for IOP bug. */
1751 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1752 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1753 if (rmfa == IOP_MFA_EMPTY)
1754 break;
1755 }
1756 iop_handle_reply(sc, rmfa);
1757 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1758 }
1759
1760 return (1);
1761 }
1762
1763 /*
1764 * Handle an event signalled by the executive.
1765 */
1766 static void
1767 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1768 {
1769 struct i2o_util_event_register_reply *rb;
1770 struct iop_softc *sc;
1771 u_int event;
1772
1773 sc = (struct iop_softc *)dv;
1774 rb = reply;
1775
1776 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1777 return;
1778
1779 event = le32toh(rb->event);
1780 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1781 }
1782
1783 /*
1784 * Allocate a message wrapper.
1785 */
1786 struct iop_msg *
1787 iop_msg_alloc(struct iop_softc *sc, int flags)
1788 {
1789 struct iop_msg *im;
1790 static u_int tctxgen;
1791 int s, i;
1792
1793 #ifdef I2ODEBUG
1794 if ((flags & IM_SYSMASK) != 0)
1795 panic("iop_msg_alloc: system flags specified");
1796 #endif
1797
1798 s = splbio();
1799 im = SLIST_FIRST(&sc->sc_im_freelist);
1800 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1801 if (im == NULL)
1802 panic("iop_msg_alloc: no free wrappers");
1803 #endif
1804 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1805 splx(s);
1806
1807 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1808 tctxgen += (1 << IOP_TCTX_SHIFT);
1809 im->im_flags = flags | IM_ALLOCED;
1810 im->im_rb = NULL;
1811 i = 0;
1812 do {
1813 im->im_xfer[i++].ix_size = 0;
1814 } while (i < IOP_MAX_MSG_XFERS);
1815
1816 return (im);
1817 }
1818
1819 /*
1820 * Free a message wrapper.
1821 */
1822 void
1823 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1824 {
1825 int s;
1826
1827 #ifdef I2ODEBUG
1828 if ((im->im_flags & IM_ALLOCED) == 0)
1829 panic("iop_msg_free: wrapper not allocated");
1830 #endif
1831
1832 im->im_flags = 0;
1833 s = splbio();
1834 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1835 splx(s);
1836 }
1837
1838 /*
1839 * Map a data transfer. Write a scatter-gather list into the message frame.
1840 */
1841 int
1842 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1843 void *xferaddr, int xfersize, int out, struct proc *up)
1844 {
1845 bus_dmamap_t dm;
1846 bus_dma_segment_t *ds;
1847 struct iop_xfer *ix;
1848 u_int rv, i, nsegs, flg, off, xn;
1849 u_int32_t *p;
1850
1851 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1852 if (ix->ix_size == 0)
1853 break;
1854
1855 #ifdef I2ODEBUG
1856 if (xfersize == 0)
1857 panic("iop_msg_map: null transfer");
1858 if (xfersize > IOP_MAX_XFER)
1859 panic("iop_msg_map: transfer too large");
1860 if (xn == IOP_MAX_MSG_XFERS)
1861 panic("iop_msg_map: too many xfers");
1862 #endif
1863
1864 /*
1865 * Only the first DMA map is static.
1866 */
1867 if (xn != 0) {
1868 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1869 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1870 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1871 if (rv != 0)
1872 return (rv);
1873 }
1874
1875 dm = ix->ix_map;
1876 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1877 (up == NULL ? BUS_DMA_NOWAIT : 0));
1878 if (rv != 0)
1879 goto bad;
1880
1881 /*
1882 * How many SIMPLE SG elements can we fit in this message?
1883 */
1884 off = mb[0] >> 16;
1885 p = mb + off;
1886 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1887
1888 if (dm->dm_nsegs > nsegs) {
1889 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1890 rv = EFBIG;
1891 DPRINTF(("iop_msg_map: too many segs\n"));
1892 goto bad;
1893 }
1894
1895 nsegs = dm->dm_nsegs;
1896 xfersize = 0;
1897
1898 /*
1899 * Write out the SG list.
1900 */
1901 if (out)
1902 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1903 else
1904 flg = I2O_SGL_SIMPLE;
1905
1906 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1907 p[0] = (u_int32_t)ds->ds_len | flg;
1908 p[1] = (u_int32_t)ds->ds_addr;
1909 xfersize += ds->ds_len;
1910 }
1911
1912 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1913 p[1] = (u_int32_t)ds->ds_addr;
1914 xfersize += ds->ds_len;
1915
1916 /* Fix up the transfer record, and sync the map. */
1917 ix->ix_flags = (out ? IX_OUT : IX_IN);
1918 ix->ix_size = xfersize;
1919 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1920 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1921
1922 /*
1923 * If this is the first xfer we've mapped for this message, adjust
1924 * the SGL offset field in the message header.
1925 */
1926 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1927 mb[0] += (mb[0] >> 12) & 0xf0;
1928 im->im_flags |= IM_SGLOFFADJ;
1929 }
1930 mb[0] += (nsegs << 17);
1931 return (0);
1932
1933 bad:
1934 if (xn != 0)
1935 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1936 return (rv);
1937 }
1938
1939 /*
1940 * Map a block I/O data transfer (different in that there's only one per
1941 * message maximum, and PAGE addressing may be used). Write a scatter
1942 * gather list into the message frame.
1943 */
1944 int
1945 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1946 void *xferaddr, int xfersize, int out)
1947 {
1948 bus_dma_segment_t *ds;
1949 bus_dmamap_t dm;
1950 struct iop_xfer *ix;
1951 u_int rv, i, nsegs, off, slen, tlen, flg;
1952 paddr_t saddr, eaddr;
1953 u_int32_t *p;
1954
1955 #ifdef I2ODEBUG
1956 if (xfersize == 0)
1957 panic("iop_msg_map_bio: null transfer");
1958 if (xfersize > IOP_MAX_XFER)
1959 panic("iop_msg_map_bio: transfer too large");
1960 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1961 panic("iop_msg_map_bio: SGLOFFADJ");
1962 #endif
1963
1964 ix = im->im_xfer;
1965 dm = ix->ix_map;
1966 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1967 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1968 if (rv != 0)
1969 return (rv);
1970
1971 off = mb[0] >> 16;
1972 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1973
1974 /*
1975 * If the transfer is highly fragmented and won't fit using SIMPLE
1976 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1977 * potentially more efficient, both for us and the IOP.
1978 */
1979 if (dm->dm_nsegs > nsegs) {
1980 nsegs = 1;
1981 p = mb + off + 1;
1982
1983 /* XXX This should be done with a bus_space flag. */
1984 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1985 slen = ds->ds_len;
1986 saddr = ds->ds_addr;
1987
1988 while (slen > 0) {
1989 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1990 tlen = min(eaddr - saddr, slen);
1991 slen -= tlen;
1992 *p++ = le32toh(saddr);
1993 saddr = eaddr;
1994 nsegs++;
1995 }
1996 }
1997
1998 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
1999 I2O_SGL_END;
2000 if (out)
2001 mb[off] |= I2O_SGL_DATA_OUT;
2002 } else {
2003 p = mb + off;
2004 nsegs = dm->dm_nsegs;
2005
2006 if (out)
2007 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2008 else
2009 flg = I2O_SGL_SIMPLE;
2010
2011 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2012 p[0] = (u_int32_t)ds->ds_len | flg;
2013 p[1] = (u_int32_t)ds->ds_addr;
2014 }
2015
2016 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2017 I2O_SGL_END;
2018 p[1] = (u_int32_t)ds->ds_addr;
2019 nsegs <<= 1;
2020 }
2021
2022 /* Fix up the transfer record, and sync the map. */
2023 ix->ix_flags = (out ? IX_OUT : IX_IN);
2024 ix->ix_size = xfersize;
2025 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2026 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2027
2028 /*
2029 * Adjust the SGL offset and total message size fields. We don't
2030 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2031 */
2032 mb[0] += ((off << 4) + (nsegs << 16));
2033 return (0);
2034 }
2035
2036 /*
2037 * Unmap all data transfers associated with a message wrapper.
2038 */
2039 void
2040 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2041 {
2042 struct iop_xfer *ix;
2043 int i;
2044
2045 #ifdef I2ODEBUG
2046 if (im->im_xfer[0].ix_size == 0)
2047 panic("iop_msg_unmap: no transfers mapped");
2048 #endif
2049
2050 for (ix = im->im_xfer, i = 0;;) {
2051 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2052 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2053 BUS_DMASYNC_POSTREAD);
2054 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2055
2056 /* Only the first DMA map is static. */
2057 if (i != 0)
2058 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2059 if ((++ix)->ix_size == 0)
2060 break;
2061 if (++i >= IOP_MAX_MSG_XFERS)
2062 break;
2063 }
2064 }
2065
2066 /*
2067 * Post a message frame to the IOP's inbound queue.
2068 */
2069 int
2070 iop_post(struct iop_softc *sc, u_int32_t *mb)
2071 {
2072 u_int32_t mfa;
2073 int s;
2074
2075 #ifdef I2ODEBUG
2076 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2077 panic("iop_post: frame too large");
2078 #endif
2079
2080 s = splbio();
2081
2082 /* Allocate a slot with the IOP. */
2083 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2084 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2085 splx(s);
2086 printf("%s: mfa not forthcoming\n",
2087 sc->sc_dv.dv_xname);
2088 return (EAGAIN);
2089 }
2090
2091 /* Perform reply buffer DMA synchronisation. */
2092 if (sc->sc_curib++ == 0)
2093 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2094 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2095
2096 /* Copy out the message frame. */
2097 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2098 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2099 BUS_SPACE_BARRIER_WRITE);
2100
2101 /* Post the MFA back to the IOP. */
2102 iop_outl(sc, IOP_REG_IFIFO, mfa);
2103
2104 splx(s);
2105 return (0);
2106 }
2107
2108 /*
2109 * Post a message to the IOP and deal with completion.
2110 */
2111 int
2112 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2113 {
2114 u_int32_t *mb;
2115 int rv, s;
2116
2117 mb = xmb;
2118
2119 /* Terminate the scatter/gather list chain. */
2120 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2121 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2122
2123 if ((rv = iop_post(sc, mb)) != 0)
2124 return (rv);
2125
2126 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2127 if ((im->im_flags & IM_POLL) != 0)
2128 iop_msg_poll(sc, im, timo);
2129 else
2130 iop_msg_wait(sc, im, timo);
2131
2132 s = splbio();
2133 if ((im->im_flags & IM_REPLIED) != 0) {
2134 if ((im->im_flags & IM_NOSTATUS) != 0)
2135 rv = 0;
2136 else if ((im->im_flags & IM_FAIL) != 0)
2137 rv = ENXIO;
2138 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2139 rv = EIO;
2140 else
2141 rv = 0;
2142 } else
2143 rv = EBUSY;
2144 splx(s);
2145 } else
2146 rv = 0;
2147
2148 return (rv);
2149 }
2150
2151 /*
2152 * Spin until the specified message is replied to.
2153 */
2154 static void
2155 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2156 {
2157 u_int32_t rmfa;
2158 int s, status;
2159
2160 s = splbio();
2161
2162 /* Wait for completion. */
2163 for (timo *= 10; timo != 0; timo--) {
2164 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2165 /* Double read to account for IOP bug. */
2166 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2167 if (rmfa == IOP_MFA_EMPTY)
2168 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2169 if (rmfa != IOP_MFA_EMPTY) {
2170 status = iop_handle_reply(sc, rmfa);
2171
2172 /*
2173 * Return the reply frame to the IOP's
2174 * outbound FIFO.
2175 */
2176 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2177 }
2178 }
2179 if ((im->im_flags & IM_REPLIED) != 0)
2180 break;
2181 DELAY(100);
2182 }
2183
2184 if (timo == 0) {
2185 #ifdef I2ODEBUG
2186 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2187 if (iop_status_get(sc, 1) != 0)
2188 printf("iop_msg_poll: unable to retrieve status\n");
2189 else
2190 printf("iop_msg_poll: IOP state = %d\n",
2191 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2192 #endif
2193 }
2194
2195 splx(s);
2196 }
2197
2198 /*
2199 * Sleep until the specified message is replied to.
2200 */
2201 static void
2202 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2203 {
2204 int s, rv;
2205
2206 s = splbio();
2207 if ((im->im_flags & IM_REPLIED) != 0) {
2208 splx(s);
2209 return;
2210 }
2211 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
2212 splx(s);
2213
2214 #ifdef I2ODEBUG
2215 if (rv != 0) {
2216 printf("iop_msg_wait: tsleep() == %d\n", rv);
2217 if (iop_status_get(sc, 0) != 0)
2218 printf("iop_msg_wait: unable to retrieve status\n");
2219 else
2220 printf("iop_msg_wait: IOP state = %d\n",
2221 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2222 }
2223 #endif
2224 }
2225
2226 /*
2227 * Release an unused message frame back to the IOP's inbound fifo.
2228 */
2229 static void
2230 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2231 {
2232
2233 /* Use the frame to issue a no-op. */
2234 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2235 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2236 iop_outl(sc, mfa + 8, 0);
2237 iop_outl(sc, mfa + 12, 0);
2238
2239 iop_outl(sc, IOP_REG_IFIFO, mfa);
2240 }
2241
2242 #ifdef I2ODEBUG
2243 /*
2244 * Dump a reply frame header.
2245 */
2246 static void
2247 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2248 {
2249 u_int function, detail;
2250 #ifdef I2OVERBOSE
2251 const char *statusstr;
2252 #endif
2253
2254 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2255 detail = le16toh(rb->detail);
2256
2257 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2258
2259 #ifdef I2OVERBOSE
2260 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2261 statusstr = iop_status[rb->reqstatus];
2262 else
2263 statusstr = "undefined error code";
2264
2265 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2266 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2267 #else
2268 printf("%s: function=0x%02x status=0x%02x\n",
2269 sc->sc_dv.dv_xname, function, rb->reqstatus);
2270 #endif
2271 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2272 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2273 le32toh(rb->msgtctx));
2274 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2275 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2276 (le32toh(rb->msgflags) >> 8) & 0xff);
2277 }
2278 #endif
2279
2280 /*
2281 * Dump a transport failure reply.
2282 */
2283 static void
2284 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2285 {
2286
2287 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2288
2289 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2290 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2291 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2292 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2293 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2294 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2295 }
2296
2297 /*
2298 * Translate an I2O ASCII field into a C string.
2299 */
2300 void
2301 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2302 {
2303 int hc, lc, i, nit;
2304
2305 dlen--;
2306 lc = 0;
2307 hc = 0;
2308 i = 0;
2309
2310 /*
2311 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2312 * spec has nothing to say about it. Since AMI fields are usually
2313 * filled with junk after the terminator, ...
2314 */
2315 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2316
2317 while (slen-- != 0 && dlen-- != 0) {
2318 if (nit && *src == '\0')
2319 break;
2320 else if (*src <= 0x20 || *src >= 0x7f) {
2321 if (hc)
2322 dst[i++] = ' ';
2323 } else {
2324 hc = 1;
2325 dst[i++] = *src;
2326 lc = i;
2327 }
2328 src++;
2329 }
2330
2331 dst[lc] = '\0';
2332 }
2333
2334 /*
2335 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2336 */
2337 int
2338 iop_print_ident(struct iop_softc *sc, int tid)
2339 {
2340 struct {
2341 struct i2o_param_op_results pr;
2342 struct i2o_param_read_results prr;
2343 struct i2o_param_device_identity di;
2344 } __attribute__ ((__packed__)) p;
2345 char buf[32];
2346 int rv;
2347
2348 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2349 sizeof(p), NULL);
2350 if (rv != 0)
2351 return (rv);
2352
2353 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2354 sizeof(buf));
2355 printf(" <%s, ", buf);
2356 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2357 sizeof(buf));
2358 printf("%s, ", buf);
2359 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2360 printf("%s>", buf);
2361
2362 return (0);
2363 }
2364
2365 /*
2366 * Claim or unclaim the specified TID.
2367 */
2368 int
2369 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2370 int flags)
2371 {
2372 struct iop_msg *im;
2373 struct i2o_util_claim mf;
2374 int rv, func;
2375
2376 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2377 im = iop_msg_alloc(sc, IM_WAIT);
2378
2379 /* We can use the same structure, as they're identical. */
2380 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2381 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2382 mf.msgictx = ii->ii_ictx;
2383 mf.msgtctx = im->im_tctx;
2384 mf.flags = flags;
2385
2386 rv = iop_msg_post(sc, im, &mf, 5000);
2387 iop_msg_free(sc, im);
2388 return (rv);
2389 }
2390
2391 /*
2392 * Perform an abort.
2393 */
2394 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2395 int tctxabort, int flags)
2396 {
2397 struct iop_msg *im;
2398 struct i2o_util_abort mf;
2399 int rv;
2400
2401 im = iop_msg_alloc(sc, IM_WAIT);
2402
2403 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2404 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2405 mf.msgictx = ii->ii_ictx;
2406 mf.msgtctx = im->im_tctx;
2407 mf.flags = (func << 24) | flags;
2408 mf.tctxabort = tctxabort;
2409
2410 rv = iop_msg_post(sc, im, &mf, 5000);
2411 iop_msg_free(sc, im);
2412 return (rv);
2413 }
2414
2415 /*
2416 * Enable or disable reception of events for the specified device.
2417 */
2418 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2419 {
2420 struct i2o_util_event_register mf;
2421
2422 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2423 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2424 mf.msgictx = ii->ii_ictx;
2425 mf.msgtctx = 0;
2426 mf.eventmask = mask;
2427
2428 /* This message is replied to only when events are signalled. */
2429 return (iop_post(sc, (u_int32_t *)&mf));
2430 }
2431
2432 int
2433 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2434 {
2435 struct iop_softc *sc;
2436
2437 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2438 return (ENXIO);
2439 if ((sc->sc_flags & IOP_ONLINE) == 0)
2440 return (ENXIO);
2441 if ((sc->sc_flags & IOP_OPEN) != 0)
2442 return (EBUSY);
2443 sc->sc_flags |= IOP_OPEN;
2444
2445 return (0);
2446 }
2447
2448 int
2449 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2450 {
2451 struct iop_softc *sc;
2452
2453 sc = device_lookup(&iop_cd, minor(dev));
2454 sc->sc_flags &= ~IOP_OPEN;
2455
2456 return (0);
2457 }
2458
2459 int
2460 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2461 {
2462 struct iop_softc *sc;
2463 struct iovec *iov;
2464 int rv, i;
2465
2466 if (securelevel >= 2)
2467 return (EPERM);
2468
2469 sc = device_lookup(&iop_cd, minor(dev));
2470
2471 switch (cmd) {
2472 case IOPIOCPT:
2473 return (iop_passthrough(sc, (struct ioppt *)data, p));
2474
2475 case IOPIOCGSTATUS:
2476 iov = (struct iovec *)data;
2477 i = sizeof(struct i2o_status);
2478 if (i > iov->iov_len)
2479 i = iov->iov_len;
2480 else
2481 iov->iov_len = i;
2482 if ((rv = iop_status_get(sc, 0)) == 0)
2483 rv = copyout(&sc->sc_status, iov->iov_base, i);
2484 return (rv);
2485
2486 case IOPIOCGLCT:
2487 case IOPIOCGTIDMAP:
2488 case IOPIOCRECONFIG:
2489 break;
2490
2491 default:
2492 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2493 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2494 #endif
2495 return (ENOTTY);
2496 }
2497
2498 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2499 return (rv);
2500
2501 switch (cmd) {
2502 case IOPIOCGLCT:
2503 iov = (struct iovec *)data;
2504 i = le16toh(sc->sc_lct->tablesize) << 2;
2505 if (i > iov->iov_len)
2506 i = iov->iov_len;
2507 else
2508 iov->iov_len = i;
2509 rv = copyout(sc->sc_lct, iov->iov_base, i);
2510 break;
2511
2512 case IOPIOCRECONFIG:
2513 rv = iop_reconfigure(sc, 0);
2514 break;
2515
2516 case IOPIOCGTIDMAP:
2517 iov = (struct iovec *)data;
2518 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2519 if (i > iov->iov_len)
2520 i = iov->iov_len;
2521 else
2522 iov->iov_len = i;
2523 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2524 break;
2525 }
2526
2527 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2528 return (rv);
2529 }
2530
2531 static int
2532 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2533 {
2534 struct iop_msg *im;
2535 struct i2o_msg *mf;
2536 struct ioppt_buf *ptb;
2537 int rv, i, mapped;
2538
2539 mf = NULL;
2540 im = NULL;
2541 mapped = 1;
2542
2543 if (pt->pt_msglen > sc->sc_framesize ||
2544 pt->pt_msglen < sizeof(struct i2o_msg) ||
2545 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2546 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2547 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2548 return (EINVAL);
2549
2550 for (i = 0; i < pt->pt_nbufs; i++)
2551 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2552 rv = ENOMEM;
2553 goto bad;
2554 }
2555
2556 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2557 if (mf == NULL)
2558 return (ENOMEM);
2559
2560 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2561 goto bad;
2562
2563 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2564 im->im_rb = (struct i2o_reply *)mf;
2565 mf->msgictx = IOP_ICTX;
2566 mf->msgtctx = im->im_tctx;
2567
2568 for (i = 0; i < pt->pt_nbufs; i++) {
2569 ptb = &pt->pt_bufs[i];
2570 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2571 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2572 if (rv != 0)
2573 goto bad;
2574 mapped = 1;
2575 }
2576
2577 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2578 goto bad;
2579
2580 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2581 if (i > sc->sc_framesize)
2582 i = sc->sc_framesize;
2583 if (i > pt->pt_replylen)
2584 i = pt->pt_replylen;
2585 rv = copyout(im->im_rb, pt->pt_reply, i);
2586
2587 bad:
2588 if (mapped != 0)
2589 iop_msg_unmap(sc, im);
2590 if (im != NULL)
2591 iop_msg_free(sc, im);
2592 if (mf != NULL)
2593 free(mf, M_DEVBUF);
2594 return (rv);
2595 }
2596