iop.c revision 1.62 1 /* $NetBSD: iop.c,v 1.62 2006/12/02 03:10:42 elad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.62 2006/12/02 03:10:42 elad Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60 #include <sys/kauth.h>
61
62 #include <uvm/uvm_extern.h>
63
64 #include <machine/bus.h>
65
66 #include <dev/i2o/i2o.h>
67 #include <dev/i2o/iopio.h>
68 #include <dev/i2o/iopreg.h>
69 #include <dev/i2o/iopvar.h>
70
71 #include "locators.h"
72
73 #define POLL(ms, cond) \
74 do { \
75 int xi; \
76 for (xi = (ms) * 10; xi; xi--) { \
77 if (cond) \
78 break; \
79 DELAY(100); \
80 } \
81 } while (/* CONSTCOND */0);
82
83 #ifdef I2ODEBUG
84 #define DPRINTF(x) printf x
85 #else
86 #define DPRINTF(x)
87 #endif
88
89 #ifdef I2OVERBOSE
90 #define IFVERBOSE(x) x
91 #define COMMENT(x) NULL
92 #else
93 #define IFVERBOSE(x)
94 #define COMMENT(x)
95 #endif
96
97 #define IOP_ICTXHASH_NBUCKETS 16
98 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
99
100 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
101
102 #define IOP_TCTX_SHIFT 12
103 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
104
105 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
106 static u_long iop_ictxhash;
107 static void *iop_sdh;
108 static struct i2o_systab *iop_systab;
109 static int iop_systab_size;
110
111 extern struct cfdriver iop_cd;
112
113 dev_type_open(iopopen);
114 dev_type_close(iopclose);
115 dev_type_ioctl(iopioctl);
116
117 const struct cdevsw iop_cdevsw = {
118 iopopen, iopclose, noread, nowrite, iopioctl,
119 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
120 };
121
122 #define IC_CONFIGURE 0x01
123 #define IC_PRIORITY 0x02
124
125 static struct iop_class {
126 u_short ic_class;
127 u_short ic_flags;
128 #ifdef I2OVERBOSE
129 const char *ic_caption;
130 #endif
131 } const iop_class[] = {
132 {
133 I2O_CLASS_EXECUTIVE,
134 0,
135 IFVERBOSE("executive")
136 },
137 {
138 I2O_CLASS_DDM,
139 0,
140 COMMENT("device driver module")
141 },
142 {
143 I2O_CLASS_RANDOM_BLOCK_STORAGE,
144 IC_CONFIGURE | IC_PRIORITY,
145 IFVERBOSE("random block storage")
146 },
147 {
148 I2O_CLASS_SEQUENTIAL_STORAGE,
149 IC_CONFIGURE | IC_PRIORITY,
150 IFVERBOSE("sequential storage")
151 },
152 {
153 I2O_CLASS_LAN,
154 IC_CONFIGURE | IC_PRIORITY,
155 IFVERBOSE("LAN port")
156 },
157 {
158 I2O_CLASS_WAN,
159 IC_CONFIGURE | IC_PRIORITY,
160 IFVERBOSE("WAN port")
161 },
162 {
163 I2O_CLASS_FIBRE_CHANNEL_PORT,
164 IC_CONFIGURE,
165 IFVERBOSE("fibrechannel port")
166 },
167 {
168 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
169 0,
170 COMMENT("fibrechannel peripheral")
171 },
172 {
173 I2O_CLASS_SCSI_PERIPHERAL,
174 0,
175 COMMENT("SCSI peripheral")
176 },
177 {
178 I2O_CLASS_ATE_PORT,
179 IC_CONFIGURE,
180 IFVERBOSE("ATE port")
181 },
182 {
183 I2O_CLASS_ATE_PERIPHERAL,
184 0,
185 COMMENT("ATE peripheral")
186 },
187 {
188 I2O_CLASS_FLOPPY_CONTROLLER,
189 IC_CONFIGURE,
190 IFVERBOSE("floppy controller")
191 },
192 {
193 I2O_CLASS_FLOPPY_DEVICE,
194 0,
195 COMMENT("floppy device")
196 },
197 {
198 I2O_CLASS_BUS_ADAPTER_PORT,
199 IC_CONFIGURE,
200 IFVERBOSE("bus adapter port" )
201 },
202 };
203
204 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
205 static const char * const iop_status[] = {
206 "success",
207 "abort (dirty)",
208 "abort (no data transfer)",
209 "abort (partial transfer)",
210 "error (dirty)",
211 "error (no data transfer)",
212 "error (partial transfer)",
213 "undefined error code",
214 "process abort (dirty)",
215 "process abort (no data transfer)",
216 "process abort (partial transfer)",
217 "transaction error",
218 };
219 #endif
220
221 static inline u_int32_t iop_inl(struct iop_softc *, int);
222 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
223
224 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
225 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
226
227 static void iop_config_interrupts(struct device *);
228 static void iop_configure_devices(struct iop_softc *, int, int);
229 static void iop_devinfo(int, char *, size_t);
230 static int iop_print(void *, const char *);
231 static void iop_shutdown(void *);
232
233 static void iop_adjqparam(struct iop_softc *, int);
234 static void iop_create_reconf_thread(void *);
235 static int iop_handle_reply(struct iop_softc *, u_int32_t);
236 static int iop_hrt_get(struct iop_softc *);
237 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
238 static void iop_intr_event(struct device *, struct iop_msg *, void *);
239 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
240 u_int32_t);
241 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
242 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
243 static int iop_ofifo_init(struct iop_softc *);
244 static int iop_passthrough(struct iop_softc *, struct ioppt *,
245 struct proc *);
246 static void iop_reconf_thread(void *);
247 static void iop_release_mfa(struct iop_softc *, u_int32_t);
248 static int iop_reset(struct iop_softc *);
249 static int iop_sys_enable(struct iop_softc *);
250 static int iop_systab_set(struct iop_softc *);
251 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
252
253 #ifdef I2ODEBUG
254 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
255 #endif
256
257 static inline u_int32_t
258 iop_inl(struct iop_softc *sc, int off)
259 {
260
261 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
262 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
263 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
264 }
265
266 static inline void
267 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
268 {
269
270 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
271 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
272 BUS_SPACE_BARRIER_WRITE);
273 }
274
275 static inline u_int32_t
276 iop_inl_msg(struct iop_softc *sc, int off)
277 {
278
279 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
280 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
281 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
282 }
283
284 static inline void
285 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
286 {
287
288 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
289 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
290 BUS_SPACE_BARRIER_WRITE);
291 }
292
293 /*
294 * Initialise the IOP and our interface.
295 */
296 void
297 iop_init(struct iop_softc *sc, const char *intrstr)
298 {
299 struct iop_msg *im;
300 int rv, i, j, state, nsegs;
301 u_int32_t mask;
302 char ident[64];
303
304 state = 0;
305
306 printf("I2O adapter");
307
308 if (iop_ictxhashtbl == NULL)
309 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
310 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
311
312 /* Disable interrupts at the IOP. */
313 mask = iop_inl(sc, IOP_REG_INTR_MASK);
314 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
315
316 /* Allocate a scratch DMA map for small miscellaneous shared data. */
317 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
318 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
319 printf("%s: cannot create scratch dmamap\n",
320 sc->sc_dv.dv_xname);
321 return;
322 }
323
324 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
325 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
326 printf("%s: cannot alloc scratch dmamem\n",
327 sc->sc_dv.dv_xname);
328 goto bail_out;
329 }
330 state++;
331
332 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
333 &sc->sc_scr, 0)) {
334 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
335 goto bail_out;
336 }
337 state++;
338
339 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
340 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
341 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
342 goto bail_out;
343 }
344 state++;
345
346 #ifdef I2ODEBUG
347 /* So that our debug checks don't choke. */
348 sc->sc_framesize = 128;
349 #endif
350
351 /* Reset the adapter and request status. */
352 if ((rv = iop_reset(sc)) != 0) {
353 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
354 goto bail_out;
355 }
356
357 if ((rv = iop_status_get(sc, 1)) != 0) {
358 printf("%s: not responding (get status)\n",
359 sc->sc_dv.dv_xname);
360 goto bail_out;
361 }
362
363 sc->sc_flags |= IOP_HAVESTATUS;
364 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
365 ident, sizeof(ident));
366 printf(" <%s>\n", ident);
367
368 #ifdef I2ODEBUG
369 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
370 le16toh(sc->sc_status.orgid),
371 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
372 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
373 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
374 le32toh(sc->sc_status.desiredprivmemsize),
375 le32toh(sc->sc_status.currentprivmemsize),
376 le32toh(sc->sc_status.currentprivmembase));
377 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
378 le32toh(sc->sc_status.desiredpriviosize),
379 le32toh(sc->sc_status.currentpriviosize),
380 le32toh(sc->sc_status.currentpriviobase));
381 #endif
382
383 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
384 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
385 sc->sc_maxob = IOP_MAX_OUTBOUND;
386 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
387 if (sc->sc_maxib > IOP_MAX_INBOUND)
388 sc->sc_maxib = IOP_MAX_INBOUND;
389 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
390 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
391 sc->sc_framesize = IOP_MAX_MSG_SIZE;
392
393 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
394 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
395 printf("%s: frame size too small (%d)\n",
396 sc->sc_dv.dv_xname, sc->sc_framesize);
397 goto bail_out;
398 }
399 #endif
400
401 /* Allocate message wrappers. */
402 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
403 if (im == NULL) {
404 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
405 goto bail_out;
406 }
407 state++;
408 sc->sc_ims = im;
409 SLIST_INIT(&sc->sc_im_freelist);
410
411 for (i = 0; i < sc->sc_maxib; i++, im++) {
412 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
413 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
414 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
415 &im->im_xfer[0].ix_map);
416 if (rv != 0) {
417 printf("%s: couldn't create dmamap (%d)",
418 sc->sc_dv.dv_xname, rv);
419 goto bail_out3;
420 }
421
422 im->im_tctx = i;
423 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
424 }
425
426 /* Initialise the IOP's outbound FIFO. */
427 if (iop_ofifo_init(sc) != 0) {
428 printf("%s: unable to init oubound FIFO\n",
429 sc->sc_dv.dv_xname);
430 goto bail_out3;
431 }
432
433 /*
434 * Defer further configuration until (a) interrupts are working and
435 * (b) we have enough information to build the system table.
436 */
437 config_interrupts((struct device *)sc, iop_config_interrupts);
438
439 /* Configure shutdown hook before we start any device activity. */
440 if (iop_sdh == NULL)
441 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
442
443 /* Ensure interrupts are enabled at the IOP. */
444 mask = iop_inl(sc, IOP_REG_INTR_MASK);
445 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
446
447 if (intrstr != NULL)
448 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
449 intrstr);
450
451 #ifdef I2ODEBUG
452 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
453 sc->sc_dv.dv_xname, sc->sc_maxib,
454 le32toh(sc->sc_status.maxinboundmframes),
455 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
456 #endif
457
458 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
459 return;
460
461 bail_out3:
462 if (state > 3) {
463 for (j = 0; j < i; j++)
464 bus_dmamap_destroy(sc->sc_dmat,
465 sc->sc_ims[j].im_xfer[0].ix_map);
466 free(sc->sc_ims, M_DEVBUF);
467 }
468 bail_out:
469 if (state > 2)
470 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
471 if (state > 1)
472 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
473 if (state > 0)
474 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
475 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
476 }
477
478 /*
479 * Perform autoconfiguration tasks.
480 */
481 static void
482 iop_config_interrupts(struct device *self)
483 {
484 struct iop_attach_args ia;
485 struct iop_softc *sc, *iop;
486 struct i2o_systab_entry *ste;
487 int rv, i, niop;
488 int locs[IOPCF_NLOCS];
489
490 sc = device_private(self);
491 LIST_INIT(&sc->sc_iilist);
492
493 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
494
495 if (iop_hrt_get(sc) != 0) {
496 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
497 return;
498 }
499
500 /*
501 * Build the system table.
502 */
503 if (iop_systab == NULL) {
504 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
505 if ((iop = device_lookup(&iop_cd, i)) == NULL)
506 continue;
507 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
508 continue;
509 if (iop_status_get(iop, 1) != 0) {
510 printf("%s: unable to retrieve status\n",
511 sc->sc_dv.dv_xname);
512 iop->sc_flags &= ~IOP_HAVESTATUS;
513 continue;
514 }
515 niop++;
516 }
517 if (niop == 0)
518 return;
519
520 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
521 sizeof(struct i2o_systab);
522 iop_systab_size = i;
523 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
524
525 iop_systab->numentries = niop;
526 iop_systab->version = I2O_VERSION_11;
527
528 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
529 if ((iop = device_lookup(&iop_cd, i)) == NULL)
530 continue;
531 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
532 continue;
533
534 ste->orgid = iop->sc_status.orgid;
535 ste->iopid = device_unit(&iop->sc_dv) + 2;
536 ste->segnumber =
537 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
538 ste->iopcaps = iop->sc_status.iopcaps;
539 ste->inboundmsgframesize =
540 iop->sc_status.inboundmframesize;
541 ste->inboundmsgportaddresslow =
542 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
543 ste++;
544 }
545 }
546
547 /*
548 * Post the system table to the IOP and bring it to the OPERATIONAL
549 * state.
550 */
551 if (iop_systab_set(sc) != 0) {
552 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
553 return;
554 }
555 if (iop_sys_enable(sc) != 0) {
556 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
557 return;
558 }
559
560 /*
561 * Set up an event handler for this IOP.
562 */
563 sc->sc_eventii.ii_dv = self;
564 sc->sc_eventii.ii_intr = iop_intr_event;
565 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
566 sc->sc_eventii.ii_tid = I2O_TID_IOP;
567 iop_initiator_register(sc, &sc->sc_eventii);
568
569 rv = iop_util_eventreg(sc, &sc->sc_eventii,
570 I2O_EVENT_EXEC_RESOURCE_LIMITS |
571 I2O_EVENT_EXEC_CONNECTION_FAIL |
572 I2O_EVENT_EXEC_ADAPTER_FAULT |
573 I2O_EVENT_EXEC_POWER_FAIL |
574 I2O_EVENT_EXEC_RESET_PENDING |
575 I2O_EVENT_EXEC_RESET_IMMINENT |
576 I2O_EVENT_EXEC_HARDWARE_FAIL |
577 I2O_EVENT_EXEC_XCT_CHANGE |
578 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
579 I2O_EVENT_GEN_DEVICE_RESET |
580 I2O_EVENT_GEN_STATE_CHANGE |
581 I2O_EVENT_GEN_GENERAL_WARNING);
582 if (rv != 0) {
583 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
584 return;
585 }
586
587 /*
588 * Attempt to match and attach a product-specific extension.
589 */
590 ia.ia_class = I2O_CLASS_ANY;
591 ia.ia_tid = I2O_TID_IOP;
592 locs[IOPCF_TID] = I2O_TID_IOP;
593 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
594 config_stdsubmatch);
595
596 /*
597 * Start device configuration.
598 */
599 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
600 if ((rv = iop_reconfigure(sc, 0)) == -1) {
601 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
602 return;
603 }
604 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
605
606 kthread_create(iop_create_reconf_thread, sc);
607 }
608
609 /*
610 * Create the reconfiguration thread. Called after the standard kernel
611 * threads have been created.
612 */
613 static void
614 iop_create_reconf_thread(void *cookie)
615 {
616 struct iop_softc *sc;
617 int rv;
618
619 sc = cookie;
620 sc->sc_flags |= IOP_ONLINE;
621
622 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
623 "%s", sc->sc_dv.dv_xname);
624 if (rv != 0) {
625 printf("%s: unable to create reconfiguration thread (%d)",
626 sc->sc_dv.dv_xname, rv);
627 return;
628 }
629 }
630
631 /*
632 * Reconfiguration thread; listens for LCT change notification, and
633 * initiates re-configuration if received.
634 */
635 static void
636 iop_reconf_thread(void *cookie)
637 {
638 struct iop_softc *sc;
639 struct lwp *l;
640 struct i2o_lct lct;
641 u_int32_t chgind;
642 int rv;
643
644 sc = cookie;
645 chgind = sc->sc_chgind + 1;
646 l = curlwp;
647
648 for (;;) {
649 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
650 sc->sc_dv.dv_xname, chgind));
651
652 PHOLD(l);
653 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
654 PRELE(l);
655
656 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
657 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
658
659 if (rv == 0 &&
660 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
661 iop_reconfigure(sc, le32toh(lct.changeindicator));
662 chgind = sc->sc_chgind + 1;
663 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
664 }
665
666 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
667 }
668 }
669
670 /*
671 * Reconfigure: find new and removed devices.
672 */
673 int
674 iop_reconfigure(struct iop_softc *sc, u_int chgind)
675 {
676 struct iop_msg *im;
677 struct i2o_hba_bus_scan mf;
678 struct i2o_lct_entry *le;
679 struct iop_initiator *ii, *nextii;
680 int rv, tid, i;
681
682 /*
683 * If the reconfiguration request isn't the result of LCT change
684 * notification, then be more thorough: ask all bus ports to scan
685 * their busses. Wait up to 5 minutes for each bus port to complete
686 * the request.
687 */
688 if (chgind == 0) {
689 if ((rv = iop_lct_get(sc)) != 0) {
690 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
691 return (rv);
692 }
693
694 le = sc->sc_lct->entry;
695 for (i = 0; i < sc->sc_nlctent; i++, le++) {
696 if ((le16toh(le->classid) & 4095) !=
697 I2O_CLASS_BUS_ADAPTER_PORT)
698 continue;
699 tid = le16toh(le->localtid) & 4095;
700
701 im = iop_msg_alloc(sc, IM_WAIT);
702
703 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
704 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
705 mf.msgictx = IOP_ICTX;
706 mf.msgtctx = im->im_tctx;
707
708 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
709 tid));
710
711 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
712 iop_msg_free(sc, im);
713 #ifdef I2ODEBUG
714 if (rv != 0)
715 printf("%s: bus scan failed\n",
716 sc->sc_dv.dv_xname);
717 #endif
718 }
719 } else if (chgind <= sc->sc_chgind) {
720 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
721 return (0);
722 }
723
724 /* Re-read the LCT and determine if it has changed. */
725 if ((rv = iop_lct_get(sc)) != 0) {
726 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
727 return (rv);
728 }
729 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
730
731 chgind = le32toh(sc->sc_lct->changeindicator);
732 if (chgind == sc->sc_chgind) {
733 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
734 return (0);
735 }
736 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
737 sc->sc_chgind = chgind;
738
739 if (sc->sc_tidmap != NULL)
740 free(sc->sc_tidmap, M_DEVBUF);
741 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
742 M_DEVBUF, M_NOWAIT|M_ZERO);
743
744 /* Allow 1 queued command per device while we're configuring. */
745 iop_adjqparam(sc, 1);
746
747 /*
748 * Match and attach child devices. We configure high-level devices
749 * first so that any claims will propagate throughout the LCT,
750 * hopefully masking off aliased devices as a result.
751 *
752 * Re-reading the LCT at this point is a little dangerous, but we'll
753 * trust the IOP (and the operator) to behave itself...
754 */
755 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
756 IC_CONFIGURE | IC_PRIORITY);
757 if ((rv = iop_lct_get(sc)) != 0) {
758 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
759 }
760 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
761 IC_CONFIGURE);
762
763 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
764 nextii = LIST_NEXT(ii, ii_list);
765
766 /* Detach devices that were configured, but are now gone. */
767 for (i = 0; i < sc->sc_nlctent; i++)
768 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
769 break;
770 if (i == sc->sc_nlctent ||
771 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
772 config_detach(ii->ii_dv, DETACH_FORCE);
773 continue;
774 }
775
776 /*
777 * Tell initiators that existed before the re-configuration
778 * to re-configure.
779 */
780 if (ii->ii_reconfig == NULL)
781 continue;
782 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
783 printf("%s: %s failed reconfigure (%d)\n",
784 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
785 }
786
787 /* Re-adjust queue parameters and return. */
788 if (sc->sc_nii != 0)
789 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
790 / sc->sc_nii);
791
792 return (0);
793 }
794
795 /*
796 * Configure I2O devices into the system.
797 */
798 static void
799 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
800 {
801 struct iop_attach_args ia;
802 struct iop_initiator *ii;
803 const struct i2o_lct_entry *le;
804 struct device *dv;
805 int i, j, nent;
806 u_int usertid;
807 int locs[IOPCF_NLOCS];
808
809 nent = sc->sc_nlctent;
810 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
811 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
812
813 /* Ignore the device if it's in use. */
814 usertid = le32toh(le->usertid) & 4095;
815 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
816 continue;
817
818 ia.ia_class = le16toh(le->classid) & 4095;
819 ia.ia_tid = sc->sc_tidmap[i].it_tid;
820
821 /* Ignore uninteresting devices. */
822 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
823 if (iop_class[j].ic_class == ia.ia_class)
824 break;
825 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
826 (iop_class[j].ic_flags & mask) != maskval)
827 continue;
828
829 /*
830 * Try to configure the device only if it's not already
831 * configured.
832 */
833 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
834 if (ia.ia_tid == ii->ii_tid) {
835 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
836 strcpy(sc->sc_tidmap[i].it_dvname,
837 ii->ii_dv->dv_xname);
838 break;
839 }
840 }
841 if (ii != NULL)
842 continue;
843
844 locs[IOPCF_TID] = ia.ia_tid;
845
846 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
847 iop_print, config_stdsubmatch);
848 if (dv != NULL) {
849 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
850 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
851 }
852 }
853 }
854
855 /*
856 * Adjust queue parameters for all child devices.
857 */
858 static void
859 iop_adjqparam(struct iop_softc *sc, int mpi)
860 {
861 struct iop_initiator *ii;
862
863 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
864 if (ii->ii_adjqparam != NULL)
865 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
866 }
867
868 static void
869 iop_devinfo(int class, char *devinfo, size_t l)
870 {
871 #ifdef I2OVERBOSE
872 int i;
873
874 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
875 if (class == iop_class[i].ic_class)
876 break;
877
878 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
879 snprintf(devinfo, l, "device (class 0x%x)", class);
880 else
881 strlcpy(devinfo, iop_class[i].ic_caption, l);
882 #else
883
884 snprintf(devinfo, l, "device (class 0x%x)", class);
885 #endif
886 }
887
888 static int
889 iop_print(void *aux, const char *pnp)
890 {
891 struct iop_attach_args *ia;
892 char devinfo[256];
893
894 ia = aux;
895
896 if (pnp != NULL) {
897 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
898 aprint_normal("%s at %s", devinfo, pnp);
899 }
900 aprint_normal(" tid %d", ia->ia_tid);
901 return (UNCONF);
902 }
903
904 /*
905 * Shut down all configured IOPs.
906 */
907 static void
908 iop_shutdown(void *junk)
909 {
910 struct iop_softc *sc;
911 int i;
912
913 printf("shutting down iop devices...");
914
915 for (i = 0; i < iop_cd.cd_ndevs; i++) {
916 if ((sc = device_lookup(&iop_cd, i)) == NULL)
917 continue;
918 if ((sc->sc_flags & IOP_ONLINE) == 0)
919 continue;
920
921 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
922 0, 5000);
923
924 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
925 /*
926 * Some AMI firmware revisions will go to sleep and
927 * never come back after this.
928 */
929 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
930 IOP_ICTX, 0, 1000);
931 }
932 }
933
934 /* Wait. Some boards could still be flushing, stupidly enough. */
935 delay(5000*1000);
936 printf(" done\n");
937 }
938
939 /*
940 * Retrieve IOP status.
941 */
942 int
943 iop_status_get(struct iop_softc *sc, int nosleep)
944 {
945 struct i2o_exec_status_get mf;
946 struct i2o_status *st;
947 paddr_t pa;
948 int rv, i;
949
950 pa = sc->sc_scr_seg->ds_addr;
951 st = (struct i2o_status *)sc->sc_scr;
952
953 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
954 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
955 mf.reserved[0] = 0;
956 mf.reserved[1] = 0;
957 mf.reserved[2] = 0;
958 mf.reserved[3] = 0;
959 mf.addrlow = (u_int32_t)pa;
960 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
961 mf.length = sizeof(sc->sc_status);
962
963 memset(st, 0, sizeof(*st));
964 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
965 BUS_DMASYNC_PREREAD);
966
967 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
968 return (rv);
969
970 for (i = 25; i != 0; i--) {
971 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
972 sizeof(*st), BUS_DMASYNC_POSTREAD);
973 if (st->syncbyte == 0xff)
974 break;
975 if (nosleep)
976 DELAY(100*1000);
977 else
978 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
979 }
980
981 if (st->syncbyte != 0xff) {
982 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
983 rv = EIO;
984 } else {
985 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
986 rv = 0;
987 }
988
989 return (rv);
990 }
991
992 /*
993 * Initialize and populate the IOP's outbound FIFO.
994 */
995 static int
996 iop_ofifo_init(struct iop_softc *sc)
997 {
998 bus_addr_t addr;
999 bus_dma_segment_t seg;
1000 struct i2o_exec_outbound_init *mf;
1001 int i, rseg, rv;
1002 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1003
1004 sw = (u_int32_t *)sc->sc_scr;
1005
1006 mf = (struct i2o_exec_outbound_init *)mb;
1007 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1008 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1009 mf->msgictx = IOP_ICTX;
1010 mf->msgtctx = 0;
1011 mf->pagesize = PAGE_SIZE;
1012 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1013
1014 /*
1015 * The I2O spec says that there are two SGLs: one for the status
1016 * word, and one for a list of discarded MFAs. It continues to say
1017 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1018 * necessary; this isn't the case (and is in fact a bad thing).
1019 */
1020 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1021 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1022 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1023 (u_int32_t)sc->sc_scr_seg->ds_addr;
1024 mb[0] += 2 << 16;
1025
1026 *sw = 0;
1027 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1028 BUS_DMASYNC_PREREAD);
1029
1030 if ((rv = iop_post(sc, mb)) != 0)
1031 return (rv);
1032
1033 POLL(5000,
1034 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1035 BUS_DMASYNC_POSTREAD),
1036 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1037
1038 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1039 printf("%s: outbound FIFO init failed (%d)\n",
1040 sc->sc_dv.dv_xname, le32toh(*sw));
1041 return (EIO);
1042 }
1043
1044 /* Allocate DMA safe memory for the reply frames. */
1045 if (sc->sc_rep_phys == 0) {
1046 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1047
1048 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1049 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1050 if (rv != 0) {
1051 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1052 rv);
1053 return (rv);
1054 }
1055
1056 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1057 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1058 if (rv != 0) {
1059 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1060 return (rv);
1061 }
1062
1063 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1064 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1065 if (rv != 0) {
1066 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1067 rv);
1068 return (rv);
1069 }
1070
1071 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1072 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1073 if (rv != 0) {
1074 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1075 return (rv);
1076 }
1077
1078 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1079 }
1080
1081 /* Populate the outbound FIFO. */
1082 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1083 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1084 addr += sc->sc_framesize;
1085 }
1086
1087 return (0);
1088 }
1089
1090 /*
1091 * Read the specified number of bytes from the IOP's hardware resource table.
1092 */
1093 static int
1094 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1095 {
1096 struct iop_msg *im;
1097 int rv;
1098 struct i2o_exec_hrt_get *mf;
1099 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1100
1101 im = iop_msg_alloc(sc, IM_WAIT);
1102 mf = (struct i2o_exec_hrt_get *)mb;
1103 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1104 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1105 mf->msgictx = IOP_ICTX;
1106 mf->msgtctx = im->im_tctx;
1107
1108 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1109 rv = iop_msg_post(sc, im, mb, 30000);
1110 iop_msg_unmap(sc, im);
1111 iop_msg_free(sc, im);
1112 return (rv);
1113 }
1114
1115 /*
1116 * Read the IOP's hardware resource table.
1117 */
1118 static int
1119 iop_hrt_get(struct iop_softc *sc)
1120 {
1121 struct i2o_hrt hrthdr, *hrt;
1122 int size, rv;
1123
1124 PHOLD(curlwp);
1125 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1126 PRELE(curlwp);
1127 if (rv != 0)
1128 return (rv);
1129
1130 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1131 le16toh(hrthdr.numentries)));
1132
1133 size = sizeof(struct i2o_hrt) +
1134 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1135 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1136
1137 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1138 free(hrt, M_DEVBUF);
1139 return (rv);
1140 }
1141
1142 if (sc->sc_hrt != NULL)
1143 free(sc->sc_hrt, M_DEVBUF);
1144 sc->sc_hrt = hrt;
1145 return (0);
1146 }
1147
1148 /*
1149 * Request the specified number of bytes from the IOP's logical
1150 * configuration table. If a change indicator is specified, this
1151 * is a verbatim notification request, so the caller is prepared
1152 * to wait indefinitely.
1153 */
1154 static int
1155 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1156 u_int32_t chgind)
1157 {
1158 struct iop_msg *im;
1159 struct i2o_exec_lct_notify *mf;
1160 int rv;
1161 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1162
1163 im = iop_msg_alloc(sc, IM_WAIT);
1164 memset(lct, 0, size);
1165
1166 mf = (struct i2o_exec_lct_notify *)mb;
1167 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1168 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1169 mf->msgictx = IOP_ICTX;
1170 mf->msgtctx = im->im_tctx;
1171 mf->classid = I2O_CLASS_ANY;
1172 mf->changeindicator = chgind;
1173
1174 #ifdef I2ODEBUG
1175 printf("iop_lct_get0: reading LCT");
1176 if (chgind != 0)
1177 printf(" (async)");
1178 printf("\n");
1179 #endif
1180
1181 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1182 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1183 iop_msg_unmap(sc, im);
1184 iop_msg_free(sc, im);
1185 return (rv);
1186 }
1187
1188 /*
1189 * Read the IOP's logical configuration table.
1190 */
1191 int
1192 iop_lct_get(struct iop_softc *sc)
1193 {
1194 int esize, size, rv;
1195 struct i2o_lct *lct;
1196
1197 esize = le32toh(sc->sc_status.expectedlctsize);
1198 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1199 if (lct == NULL)
1200 return (ENOMEM);
1201
1202 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1203 free(lct, M_DEVBUF);
1204 return (rv);
1205 }
1206
1207 size = le16toh(lct->tablesize) << 2;
1208 if (esize != size) {
1209 free(lct, M_DEVBUF);
1210 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1211 if (lct == NULL)
1212 return (ENOMEM);
1213
1214 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1215 free(lct, M_DEVBUF);
1216 return (rv);
1217 }
1218 }
1219
1220 /* Swap in the new LCT. */
1221 if (sc->sc_lct != NULL)
1222 free(sc->sc_lct, M_DEVBUF);
1223 sc->sc_lct = lct;
1224 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1225 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1226 sizeof(struct i2o_lct_entry);
1227 return (0);
1228 }
1229
1230 /*
1231 * Post a SYS_ENABLE message to the adapter.
1232 */
1233 int
1234 iop_sys_enable(struct iop_softc *sc)
1235 {
1236 struct iop_msg *im;
1237 struct i2o_msg mf;
1238 int rv;
1239
1240 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1241
1242 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1243 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1244 mf.msgictx = IOP_ICTX;
1245 mf.msgtctx = im->im_tctx;
1246
1247 rv = iop_msg_post(sc, im, &mf, 30000);
1248 if (rv == 0) {
1249 if ((im->im_flags & IM_FAIL) != 0)
1250 rv = ENXIO;
1251 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1252 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1253 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1254 rv = 0;
1255 else
1256 rv = EIO;
1257 }
1258
1259 iop_msg_free(sc, im);
1260 return (rv);
1261 }
1262
1263 /*
1264 * Request the specified parameter group from the target. If an initiator
1265 * is specified (a) don't wait for the operation to complete, but instead
1266 * let the initiator's interrupt handler deal with the reply and (b) place a
1267 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1268 */
1269 int
1270 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1271 int size, struct iop_initiator *ii)
1272 {
1273 struct iop_msg *im;
1274 struct i2o_util_params_op *mf;
1275 int rv;
1276 struct iop_pgop *pgop;
1277 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1278
1279 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1280 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1281 iop_msg_free(sc, im);
1282 return (ENOMEM);
1283 }
1284 im->im_dvcontext = pgop;
1285
1286 mf = (struct i2o_util_params_op *)mb;
1287 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1288 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1289 mf->msgictx = IOP_ICTX;
1290 mf->msgtctx = im->im_tctx;
1291 mf->flags = 0;
1292
1293 pgop->olh.count = htole16(1);
1294 pgop->olh.reserved = htole16(0);
1295 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1296 pgop->oat.fieldcount = htole16(0xffff);
1297 pgop->oat.group = htole16(group);
1298
1299 if (ii == NULL)
1300 PHOLD(curlwp);
1301
1302 memset(buf, 0, size);
1303 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1304 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1305 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1306
1307 if (ii == NULL)
1308 PRELE(curlwp);
1309
1310 /* Detect errors; let partial transfers to count as success. */
1311 if (ii == NULL && rv == 0) {
1312 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1313 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1314 rv = 0;
1315 else
1316 rv = (im->im_reqstatus != 0 ? EIO : 0);
1317
1318 if (rv != 0)
1319 printf("%s: FIELD_GET failed for tid %d group %d\n",
1320 sc->sc_dv.dv_xname, tid, group);
1321 }
1322
1323 if (ii == NULL || rv != 0) {
1324 iop_msg_unmap(sc, im);
1325 iop_msg_free(sc, im);
1326 free(pgop, M_DEVBUF);
1327 }
1328
1329 return (rv);
1330 }
1331
1332 /*
1333 * Set a single field in a scalar parameter group.
1334 */
1335 int
1336 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1337 int size, int field)
1338 {
1339 struct iop_msg *im;
1340 struct i2o_util_params_op *mf;
1341 struct iop_pgop *pgop;
1342 int rv, totsize;
1343 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1344
1345 totsize = sizeof(*pgop) + size;
1346
1347 im = iop_msg_alloc(sc, IM_WAIT);
1348 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1349 iop_msg_free(sc, im);
1350 return (ENOMEM);
1351 }
1352
1353 mf = (struct i2o_util_params_op *)mb;
1354 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1355 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1356 mf->msgictx = IOP_ICTX;
1357 mf->msgtctx = im->im_tctx;
1358 mf->flags = 0;
1359
1360 pgop->olh.count = htole16(1);
1361 pgop->olh.reserved = htole16(0);
1362 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1363 pgop->oat.fieldcount = htole16(1);
1364 pgop->oat.group = htole16(group);
1365 pgop->oat.fields[0] = htole16(field);
1366 memcpy(pgop + 1, buf, size);
1367
1368 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1369 rv = iop_msg_post(sc, im, mb, 30000);
1370 if (rv != 0)
1371 printf("%s: FIELD_SET failed for tid %d group %d\n",
1372 sc->sc_dv.dv_xname, tid, group);
1373
1374 iop_msg_unmap(sc, im);
1375 iop_msg_free(sc, im);
1376 free(pgop, M_DEVBUF);
1377 return (rv);
1378 }
1379
1380 /*
1381 * Delete all rows in a tablular parameter group.
1382 */
1383 int
1384 iop_table_clear(struct iop_softc *sc, int tid, int group)
1385 {
1386 struct iop_msg *im;
1387 struct i2o_util_params_op *mf;
1388 struct iop_pgop pgop;
1389 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1390 int rv;
1391
1392 im = iop_msg_alloc(sc, IM_WAIT);
1393
1394 mf = (struct i2o_util_params_op *)mb;
1395 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1396 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1397 mf->msgictx = IOP_ICTX;
1398 mf->msgtctx = im->im_tctx;
1399 mf->flags = 0;
1400
1401 pgop.olh.count = htole16(1);
1402 pgop.olh.reserved = htole16(0);
1403 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1404 pgop.oat.fieldcount = htole16(0);
1405 pgop.oat.group = htole16(group);
1406 pgop.oat.fields[0] = htole16(0);
1407
1408 PHOLD(curlwp);
1409 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1410 rv = iop_msg_post(sc, im, mb, 30000);
1411 if (rv != 0)
1412 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1413 sc->sc_dv.dv_xname, tid, group);
1414
1415 iop_msg_unmap(sc, im);
1416 PRELE(curlwp);
1417 iop_msg_free(sc, im);
1418 return (rv);
1419 }
1420
1421 /*
1422 * Add a single row to a tabular parameter group. The row can have only one
1423 * field.
1424 */
1425 int
1426 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1427 int size, int row)
1428 {
1429 struct iop_msg *im;
1430 struct i2o_util_params_op *mf;
1431 struct iop_pgop *pgop;
1432 int rv, totsize;
1433 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1434
1435 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1436
1437 im = iop_msg_alloc(sc, IM_WAIT);
1438 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1439 iop_msg_free(sc, im);
1440 return (ENOMEM);
1441 }
1442
1443 mf = (struct i2o_util_params_op *)mb;
1444 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1445 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1446 mf->msgictx = IOP_ICTX;
1447 mf->msgtctx = im->im_tctx;
1448 mf->flags = 0;
1449
1450 pgop->olh.count = htole16(1);
1451 pgop->olh.reserved = htole16(0);
1452 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1453 pgop->oat.fieldcount = htole16(1);
1454 pgop->oat.group = htole16(group);
1455 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1456 pgop->oat.fields[1] = htole16(1); /* RowCount */
1457 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1458 memcpy(&pgop->oat.fields[3], buf, size);
1459
1460 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1461 rv = iop_msg_post(sc, im, mb, 30000);
1462 if (rv != 0)
1463 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1464 sc->sc_dv.dv_xname, tid, group, row);
1465
1466 iop_msg_unmap(sc, im);
1467 iop_msg_free(sc, im);
1468 free(pgop, M_DEVBUF);
1469 return (rv);
1470 }
1471
1472 /*
1473 * Execute a simple command (no parameters).
1474 */
1475 int
1476 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1477 int async, int timo)
1478 {
1479 struct iop_msg *im;
1480 struct i2o_msg mf;
1481 int rv, fl;
1482
1483 fl = (async != 0 ? IM_WAIT : IM_POLL);
1484 im = iop_msg_alloc(sc, fl);
1485
1486 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1487 mf.msgfunc = I2O_MSGFUNC(tid, function);
1488 mf.msgictx = ictx;
1489 mf.msgtctx = im->im_tctx;
1490
1491 rv = iop_msg_post(sc, im, &mf, timo);
1492 iop_msg_free(sc, im);
1493 return (rv);
1494 }
1495
1496 /*
1497 * Post the system table to the IOP.
1498 */
1499 static int
1500 iop_systab_set(struct iop_softc *sc)
1501 {
1502 struct i2o_exec_sys_tab_set *mf;
1503 struct iop_msg *im;
1504 bus_space_handle_t bsh;
1505 bus_addr_t boo;
1506 u_int32_t mema[2], ioa[2];
1507 int rv;
1508 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1509
1510 im = iop_msg_alloc(sc, IM_WAIT);
1511
1512 mf = (struct i2o_exec_sys_tab_set *)mb;
1513 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1514 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1515 mf->msgictx = IOP_ICTX;
1516 mf->msgtctx = im->im_tctx;
1517 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1518 mf->segnumber = 0;
1519
1520 mema[1] = sc->sc_status.desiredprivmemsize;
1521 ioa[1] = sc->sc_status.desiredpriviosize;
1522
1523 if (mema[1] != 0) {
1524 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1525 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1526 mema[0] = htole32(boo);
1527 if (rv != 0) {
1528 printf("%s: can't alloc priv mem space, err = %d\n",
1529 sc->sc_dv.dv_xname, rv);
1530 mema[0] = 0;
1531 mema[1] = 0;
1532 }
1533 }
1534
1535 if (ioa[1] != 0) {
1536 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1537 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1538 ioa[0] = htole32(boo);
1539 if (rv != 0) {
1540 printf("%s: can't alloc priv i/o space, err = %d\n",
1541 sc->sc_dv.dv_xname, rv);
1542 ioa[0] = 0;
1543 ioa[1] = 0;
1544 }
1545 }
1546
1547 PHOLD(curlwp);
1548 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1549 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1550 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1551 rv = iop_msg_post(sc, im, mb, 5000);
1552 iop_msg_unmap(sc, im);
1553 iop_msg_free(sc, im);
1554 PRELE(curlwp);
1555 return (rv);
1556 }
1557
1558 /*
1559 * Reset the IOP. Must be called with interrupts disabled.
1560 */
1561 static int
1562 iop_reset(struct iop_softc *sc)
1563 {
1564 u_int32_t mfa, *sw;
1565 struct i2o_exec_iop_reset mf;
1566 int rv;
1567 paddr_t pa;
1568
1569 sw = (u_int32_t *)sc->sc_scr;
1570 pa = sc->sc_scr_seg->ds_addr;
1571
1572 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1573 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1574 mf.reserved[0] = 0;
1575 mf.reserved[1] = 0;
1576 mf.reserved[2] = 0;
1577 mf.reserved[3] = 0;
1578 mf.statuslow = (u_int32_t)pa;
1579 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1580
1581 *sw = htole32(0);
1582 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1583 BUS_DMASYNC_PREREAD);
1584
1585 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1586 return (rv);
1587
1588 POLL(2500,
1589 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1590 BUS_DMASYNC_POSTREAD), *sw != 0));
1591 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1592 printf("%s: reset rejected, status 0x%x\n",
1593 sc->sc_dv.dv_xname, le32toh(*sw));
1594 return (EIO);
1595 }
1596
1597 /*
1598 * IOP is now in the INIT state. Wait no more than 10 seconds for
1599 * the inbound queue to become responsive.
1600 */
1601 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1602 if (mfa == IOP_MFA_EMPTY) {
1603 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1604 return (EIO);
1605 }
1606
1607 iop_release_mfa(sc, mfa);
1608 return (0);
1609 }
1610
1611 /*
1612 * Register a new initiator. Must be called with the configuration lock
1613 * held.
1614 */
1615 void
1616 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1617 {
1618 static int ictxgen;
1619 int s;
1620
1621 /* 0 is reserved (by us) for system messages. */
1622 ii->ii_ictx = ++ictxgen;
1623
1624 /*
1625 * `Utility initiators' don't make it onto the per-IOP initiator list
1626 * (which is used only for configuration), but do get one slot on
1627 * the inbound queue.
1628 */
1629 if ((ii->ii_flags & II_UTILITY) == 0) {
1630 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1631 sc->sc_nii++;
1632 } else
1633 sc->sc_nuii++;
1634
1635 s = splbio();
1636 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1637 splx(s);
1638 }
1639
1640 /*
1641 * Unregister an initiator. Must be called with the configuration lock
1642 * held.
1643 */
1644 void
1645 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1646 {
1647 int s;
1648
1649 if ((ii->ii_flags & II_UTILITY) == 0) {
1650 LIST_REMOVE(ii, ii_list);
1651 sc->sc_nii--;
1652 } else
1653 sc->sc_nuii--;
1654
1655 s = splbio();
1656 LIST_REMOVE(ii, ii_hash);
1657 splx(s);
1658 }
1659
1660 /*
1661 * Handle a reply frame from the IOP.
1662 */
1663 static int
1664 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1665 {
1666 struct iop_msg *im;
1667 struct i2o_reply *rb;
1668 struct i2o_fault_notify *fn;
1669 struct iop_initiator *ii;
1670 u_int off, ictx, tctx, status, size;
1671
1672 off = (int)(rmfa - sc->sc_rep_phys);
1673 rb = (struct i2o_reply *)(sc->sc_rep + off);
1674
1675 /* Perform reply queue DMA synchronisation. */
1676 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1677 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1678 if (--sc->sc_curib != 0)
1679 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1680 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1681
1682 #ifdef I2ODEBUG
1683 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1684 panic("iop_handle_reply: 64-bit reply");
1685 #endif
1686 /*
1687 * Find the initiator.
1688 */
1689 ictx = le32toh(rb->msgictx);
1690 if (ictx == IOP_ICTX)
1691 ii = NULL;
1692 else {
1693 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1694 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1695 if (ii->ii_ictx == ictx)
1696 break;
1697 if (ii == NULL) {
1698 #ifdef I2ODEBUG
1699 iop_reply_print(sc, rb);
1700 #endif
1701 printf("%s: WARNING: bad ictx returned (%x)\n",
1702 sc->sc_dv.dv_xname, ictx);
1703 return (-1);
1704 }
1705 }
1706
1707 /*
1708 * If we received a transport failure notice, we've got to dig the
1709 * transaction context (if any) out of the original message frame,
1710 * and then release the original MFA back to the inbound FIFO.
1711 */
1712 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1713 status = I2O_STATUS_SUCCESS;
1714
1715 fn = (struct i2o_fault_notify *)rb;
1716 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1717 iop_release_mfa(sc, fn->lowmfa);
1718 iop_tfn_print(sc, fn);
1719 } else {
1720 status = rb->reqstatus;
1721 tctx = le32toh(rb->msgtctx);
1722 }
1723
1724 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1725 /*
1726 * This initiator tracks state using message wrappers.
1727 *
1728 * Find the originating message wrapper, and if requested
1729 * notify the initiator.
1730 */
1731 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1732 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1733 (im->im_flags & IM_ALLOCED) == 0 ||
1734 tctx != im->im_tctx) {
1735 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1736 sc->sc_dv.dv_xname, tctx, im);
1737 if (im != NULL)
1738 printf("%s: flags=0x%08x tctx=0x%08x\n",
1739 sc->sc_dv.dv_xname, im->im_flags,
1740 im->im_tctx);
1741 #ifdef I2ODEBUG
1742 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1743 iop_reply_print(sc, rb);
1744 #endif
1745 return (-1);
1746 }
1747
1748 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1749 im->im_flags |= IM_FAIL;
1750
1751 #ifdef I2ODEBUG
1752 if ((im->im_flags & IM_REPLIED) != 0)
1753 panic("%s: dup reply", sc->sc_dv.dv_xname);
1754 #endif
1755 im->im_flags |= IM_REPLIED;
1756
1757 #ifdef I2ODEBUG
1758 if (status != I2O_STATUS_SUCCESS)
1759 iop_reply_print(sc, rb);
1760 #endif
1761 im->im_reqstatus = status;
1762 im->im_detstatus = le16toh(rb->detail);
1763
1764 /* Copy the reply frame, if requested. */
1765 if (im->im_rb != NULL) {
1766 size = (le32toh(rb->msgflags) >> 14) & ~3;
1767 #ifdef I2ODEBUG
1768 if (size > sc->sc_framesize)
1769 panic("iop_handle_reply: reply too large");
1770 #endif
1771 memcpy(im->im_rb, rb, size);
1772 }
1773
1774 /* Notify the initiator. */
1775 if ((im->im_flags & IM_WAIT) != 0)
1776 wakeup(im);
1777 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1778 if (ii)
1779 (*ii->ii_intr)(ii->ii_dv, im, rb);
1780 }
1781 } else {
1782 /*
1783 * This initiator discards message wrappers.
1784 *
1785 * Simply pass the reply frame to the initiator.
1786 */
1787 if (ii)
1788 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1789 }
1790
1791 return (status);
1792 }
1793
1794 /*
1795 * Handle an interrupt from the IOP.
1796 */
1797 int
1798 iop_intr(void *arg)
1799 {
1800 struct iop_softc *sc;
1801 u_int32_t rmfa;
1802
1803 sc = arg;
1804
1805 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1806 return (0);
1807
1808 for (;;) {
1809 /* Double read to account for IOP bug. */
1810 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1811 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1812 if (rmfa == IOP_MFA_EMPTY)
1813 break;
1814 }
1815 iop_handle_reply(sc, rmfa);
1816 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1817 }
1818
1819 return (1);
1820 }
1821
1822 /*
1823 * Handle an event signalled by the executive.
1824 */
1825 static void
1826 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1827 {
1828 struct i2o_util_event_register_reply *rb;
1829 u_int event;
1830
1831 rb = reply;
1832
1833 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1834 return;
1835
1836 event = le32toh(rb->event);
1837 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1838 }
1839
1840 /*
1841 * Allocate a message wrapper.
1842 */
1843 struct iop_msg *
1844 iop_msg_alloc(struct iop_softc *sc, int flags)
1845 {
1846 struct iop_msg *im;
1847 static u_int tctxgen;
1848 int s, i;
1849
1850 #ifdef I2ODEBUG
1851 if ((flags & IM_SYSMASK) != 0)
1852 panic("iop_msg_alloc: system flags specified");
1853 #endif
1854
1855 s = splbio();
1856 im = SLIST_FIRST(&sc->sc_im_freelist);
1857 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1858 if (im == NULL)
1859 panic("iop_msg_alloc: no free wrappers");
1860 #endif
1861 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1862 splx(s);
1863
1864 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1865 tctxgen += (1 << IOP_TCTX_SHIFT);
1866 im->im_flags = flags | IM_ALLOCED;
1867 im->im_rb = NULL;
1868 i = 0;
1869 do {
1870 im->im_xfer[i++].ix_size = 0;
1871 } while (i < IOP_MAX_MSG_XFERS);
1872
1873 return (im);
1874 }
1875
1876 /*
1877 * Free a message wrapper.
1878 */
1879 void
1880 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1881 {
1882 int s;
1883
1884 #ifdef I2ODEBUG
1885 if ((im->im_flags & IM_ALLOCED) == 0)
1886 panic("iop_msg_free: wrapper not allocated");
1887 #endif
1888
1889 im->im_flags = 0;
1890 s = splbio();
1891 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1892 splx(s);
1893 }
1894
1895 /*
1896 * Map a data transfer. Write a scatter-gather list into the message frame.
1897 */
1898 int
1899 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1900 void *xferaddr, int xfersize, int out, struct proc *up)
1901 {
1902 bus_dmamap_t dm;
1903 bus_dma_segment_t *ds;
1904 struct iop_xfer *ix;
1905 u_int rv, i, nsegs, flg, off, xn;
1906 u_int32_t *p;
1907
1908 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1909 if (ix->ix_size == 0)
1910 break;
1911
1912 #ifdef I2ODEBUG
1913 if (xfersize == 0)
1914 panic("iop_msg_map: null transfer");
1915 if (xfersize > IOP_MAX_XFER)
1916 panic("iop_msg_map: transfer too large");
1917 if (xn == IOP_MAX_MSG_XFERS)
1918 panic("iop_msg_map: too many xfers");
1919 #endif
1920
1921 /*
1922 * Only the first DMA map is static.
1923 */
1924 if (xn != 0) {
1925 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1926 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1927 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1928 if (rv != 0)
1929 return (rv);
1930 }
1931
1932 dm = ix->ix_map;
1933 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1934 (up == NULL ? BUS_DMA_NOWAIT : 0));
1935 if (rv != 0)
1936 goto bad;
1937
1938 /*
1939 * How many SIMPLE SG elements can we fit in this message?
1940 */
1941 off = mb[0] >> 16;
1942 p = mb + off;
1943 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1944
1945 if (dm->dm_nsegs > nsegs) {
1946 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1947 rv = EFBIG;
1948 DPRINTF(("iop_msg_map: too many segs\n"));
1949 goto bad;
1950 }
1951
1952 nsegs = dm->dm_nsegs;
1953 xfersize = 0;
1954
1955 /*
1956 * Write out the SG list.
1957 */
1958 if (out)
1959 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1960 else
1961 flg = I2O_SGL_SIMPLE;
1962
1963 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1964 p[0] = (u_int32_t)ds->ds_len | flg;
1965 p[1] = (u_int32_t)ds->ds_addr;
1966 xfersize += ds->ds_len;
1967 }
1968
1969 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1970 p[1] = (u_int32_t)ds->ds_addr;
1971 xfersize += ds->ds_len;
1972
1973 /* Fix up the transfer record, and sync the map. */
1974 ix->ix_flags = (out ? IX_OUT : IX_IN);
1975 ix->ix_size = xfersize;
1976 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1977 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1978
1979 /*
1980 * If this is the first xfer we've mapped for this message, adjust
1981 * the SGL offset field in the message header.
1982 */
1983 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1984 mb[0] += (mb[0] >> 12) & 0xf0;
1985 im->im_flags |= IM_SGLOFFADJ;
1986 }
1987 mb[0] += (nsegs << 17);
1988 return (0);
1989
1990 bad:
1991 if (xn != 0)
1992 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1993 return (rv);
1994 }
1995
1996 /*
1997 * Map a block I/O data transfer (different in that there's only one per
1998 * message maximum, and PAGE addressing may be used). Write a scatter
1999 * gather list into the message frame.
2000 */
2001 int
2002 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
2003 void *xferaddr, int xfersize, int out)
2004 {
2005 bus_dma_segment_t *ds;
2006 bus_dmamap_t dm;
2007 struct iop_xfer *ix;
2008 u_int rv, i, nsegs, off, slen, tlen, flg;
2009 paddr_t saddr, eaddr;
2010 u_int32_t *p;
2011
2012 #ifdef I2ODEBUG
2013 if (xfersize == 0)
2014 panic("iop_msg_map_bio: null transfer");
2015 if (xfersize > IOP_MAX_XFER)
2016 panic("iop_msg_map_bio: transfer too large");
2017 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2018 panic("iop_msg_map_bio: SGLOFFADJ");
2019 #endif
2020
2021 ix = im->im_xfer;
2022 dm = ix->ix_map;
2023 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2024 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2025 if (rv != 0)
2026 return (rv);
2027
2028 off = mb[0] >> 16;
2029 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2030
2031 /*
2032 * If the transfer is highly fragmented and won't fit using SIMPLE
2033 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2034 * potentially more efficient, both for us and the IOP.
2035 */
2036 if (dm->dm_nsegs > nsegs) {
2037 nsegs = 1;
2038 p = mb + off + 1;
2039
2040 /* XXX This should be done with a bus_space flag. */
2041 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2042 slen = ds->ds_len;
2043 saddr = ds->ds_addr;
2044
2045 while (slen > 0) {
2046 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2047 tlen = min(eaddr - saddr, slen);
2048 slen -= tlen;
2049 *p++ = le32toh(saddr);
2050 saddr = eaddr;
2051 nsegs++;
2052 }
2053 }
2054
2055 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2056 I2O_SGL_END;
2057 if (out)
2058 mb[off] |= I2O_SGL_DATA_OUT;
2059 } else {
2060 p = mb + off;
2061 nsegs = dm->dm_nsegs;
2062
2063 if (out)
2064 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2065 else
2066 flg = I2O_SGL_SIMPLE;
2067
2068 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2069 p[0] = (u_int32_t)ds->ds_len | flg;
2070 p[1] = (u_int32_t)ds->ds_addr;
2071 }
2072
2073 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2074 I2O_SGL_END;
2075 p[1] = (u_int32_t)ds->ds_addr;
2076 nsegs <<= 1;
2077 }
2078
2079 /* Fix up the transfer record, and sync the map. */
2080 ix->ix_flags = (out ? IX_OUT : IX_IN);
2081 ix->ix_size = xfersize;
2082 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2083 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2084
2085 /*
2086 * Adjust the SGL offset and total message size fields. We don't
2087 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2088 */
2089 mb[0] += ((off << 4) + (nsegs << 16));
2090 return (0);
2091 }
2092
2093 /*
2094 * Unmap all data transfers associated with a message wrapper.
2095 */
2096 void
2097 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2098 {
2099 struct iop_xfer *ix;
2100 int i;
2101
2102 #ifdef I2ODEBUG
2103 if (im->im_xfer[0].ix_size == 0)
2104 panic("iop_msg_unmap: no transfers mapped");
2105 #endif
2106
2107 for (ix = im->im_xfer, i = 0;;) {
2108 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2109 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2110 BUS_DMASYNC_POSTREAD);
2111 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2112
2113 /* Only the first DMA map is static. */
2114 if (i != 0)
2115 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2116 if ((++ix)->ix_size == 0)
2117 break;
2118 if (++i >= IOP_MAX_MSG_XFERS)
2119 break;
2120 }
2121 }
2122
2123 /*
2124 * Post a message frame to the IOP's inbound queue.
2125 */
2126 int
2127 iop_post(struct iop_softc *sc, u_int32_t *mb)
2128 {
2129 u_int32_t mfa;
2130 int s;
2131
2132 #ifdef I2ODEBUG
2133 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2134 panic("iop_post: frame too large");
2135 #endif
2136
2137 s = splbio();
2138
2139 /* Allocate a slot with the IOP. */
2140 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2141 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2142 splx(s);
2143 printf("%s: mfa not forthcoming\n",
2144 sc->sc_dv.dv_xname);
2145 return (EAGAIN);
2146 }
2147
2148 /* Perform reply buffer DMA synchronisation. */
2149 if (sc->sc_curib++ == 0)
2150 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2151 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2152
2153 /* Copy out the message frame. */
2154 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2155 mb[0] >> 16);
2156 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2157 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2158
2159 /* Post the MFA back to the IOP. */
2160 iop_outl(sc, IOP_REG_IFIFO, mfa);
2161
2162 splx(s);
2163 return (0);
2164 }
2165
2166 /*
2167 * Post a message to the IOP and deal with completion.
2168 */
2169 int
2170 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2171 {
2172 u_int32_t *mb;
2173 int rv, s;
2174
2175 mb = xmb;
2176
2177 /* Terminate the scatter/gather list chain. */
2178 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2179 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2180
2181 if ((rv = iop_post(sc, mb)) != 0)
2182 return (rv);
2183
2184 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2185 if ((im->im_flags & IM_POLL) != 0)
2186 iop_msg_poll(sc, im, timo);
2187 else
2188 iop_msg_wait(sc, im, timo);
2189
2190 s = splbio();
2191 if ((im->im_flags & IM_REPLIED) != 0) {
2192 if ((im->im_flags & IM_NOSTATUS) != 0)
2193 rv = 0;
2194 else if ((im->im_flags & IM_FAIL) != 0)
2195 rv = ENXIO;
2196 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2197 rv = EIO;
2198 else
2199 rv = 0;
2200 } else
2201 rv = EBUSY;
2202 splx(s);
2203 } else
2204 rv = 0;
2205
2206 return (rv);
2207 }
2208
2209 /*
2210 * Spin until the specified message is replied to.
2211 */
2212 static void
2213 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2214 {
2215 u_int32_t rmfa;
2216 int s;
2217
2218 s = splbio();
2219
2220 /* Wait for completion. */
2221 for (timo *= 10; timo != 0; timo--) {
2222 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2223 /* Double read to account for IOP bug. */
2224 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2225 if (rmfa == IOP_MFA_EMPTY)
2226 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2227 if (rmfa != IOP_MFA_EMPTY) {
2228 iop_handle_reply(sc, rmfa);
2229
2230 /*
2231 * Return the reply frame to the IOP's
2232 * outbound FIFO.
2233 */
2234 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2235 }
2236 }
2237 if ((im->im_flags & IM_REPLIED) != 0)
2238 break;
2239 DELAY(100);
2240 }
2241
2242 if (timo == 0) {
2243 #ifdef I2ODEBUG
2244 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2245 if (iop_status_get(sc, 1) != 0)
2246 printf("iop_msg_poll: unable to retrieve status\n");
2247 else
2248 printf("iop_msg_poll: IOP state = %d\n",
2249 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2250 #endif
2251 }
2252
2253 splx(s);
2254 }
2255
2256 /*
2257 * Sleep until the specified message is replied to.
2258 */
2259 static void
2260 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2261 {
2262 int s, rv;
2263
2264 s = splbio();
2265 if ((im->im_flags & IM_REPLIED) != 0) {
2266 splx(s);
2267 return;
2268 }
2269 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2270 splx(s);
2271
2272 #ifdef I2ODEBUG
2273 if (rv != 0) {
2274 printf("iop_msg_wait: tsleep() == %d\n", rv);
2275 if (iop_status_get(sc, 0) != 0)
2276 printf("iop_msg_wait: unable to retrieve status\n");
2277 else
2278 printf("iop_msg_wait: IOP state = %d\n",
2279 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2280 }
2281 #endif
2282 }
2283
2284 /*
2285 * Release an unused message frame back to the IOP's inbound fifo.
2286 */
2287 static void
2288 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2289 {
2290
2291 /* Use the frame to issue a no-op. */
2292 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2293 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2294 iop_outl_msg(sc, mfa + 8, 0);
2295 iop_outl_msg(sc, mfa + 12, 0);
2296
2297 iop_outl(sc, IOP_REG_IFIFO, mfa);
2298 }
2299
2300 #ifdef I2ODEBUG
2301 /*
2302 * Dump a reply frame header.
2303 */
2304 static void
2305 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2306 {
2307 u_int function, detail;
2308 #ifdef I2OVERBOSE
2309 const char *statusstr;
2310 #endif
2311
2312 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2313 detail = le16toh(rb->detail);
2314
2315 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2316
2317 #ifdef I2OVERBOSE
2318 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2319 statusstr = iop_status[rb->reqstatus];
2320 else
2321 statusstr = "undefined error code";
2322
2323 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2324 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2325 #else
2326 printf("%s: function=0x%02x status=0x%02x\n",
2327 sc->sc_dv.dv_xname, function, rb->reqstatus);
2328 #endif
2329 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2330 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2331 le32toh(rb->msgtctx));
2332 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2333 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2334 (le32toh(rb->msgflags) >> 8) & 0xff);
2335 }
2336 #endif
2337
2338 /*
2339 * Dump a transport failure reply.
2340 */
2341 static void
2342 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2343 {
2344
2345 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2346
2347 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2348 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2349 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2350 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2351 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2352 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2353 }
2354
2355 /*
2356 * Translate an I2O ASCII field into a C string.
2357 */
2358 void
2359 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2360 {
2361 int hc, lc, i, nit;
2362
2363 dlen--;
2364 lc = 0;
2365 hc = 0;
2366 i = 0;
2367
2368 /*
2369 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2370 * spec has nothing to say about it. Since AMI fields are usually
2371 * filled with junk after the terminator, ...
2372 */
2373 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2374
2375 while (slen-- != 0 && dlen-- != 0) {
2376 if (nit && *src == '\0')
2377 break;
2378 else if (*src <= 0x20 || *src >= 0x7f) {
2379 if (hc)
2380 dst[i++] = ' ';
2381 } else {
2382 hc = 1;
2383 dst[i++] = *src;
2384 lc = i;
2385 }
2386 src++;
2387 }
2388
2389 dst[lc] = '\0';
2390 }
2391
2392 /*
2393 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2394 */
2395 int
2396 iop_print_ident(struct iop_softc *sc, int tid)
2397 {
2398 struct {
2399 struct i2o_param_op_results pr;
2400 struct i2o_param_read_results prr;
2401 struct i2o_param_device_identity di;
2402 } __attribute__ ((__packed__)) p;
2403 char buf[32];
2404 int rv;
2405
2406 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2407 sizeof(p), NULL);
2408 if (rv != 0)
2409 return (rv);
2410
2411 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2412 sizeof(buf));
2413 printf(" <%s, ", buf);
2414 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2415 sizeof(buf));
2416 printf("%s, ", buf);
2417 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2418 printf("%s>", buf);
2419
2420 return (0);
2421 }
2422
2423 /*
2424 * Claim or unclaim the specified TID.
2425 */
2426 int
2427 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2428 int flags)
2429 {
2430 struct iop_msg *im;
2431 struct i2o_util_claim mf;
2432 int rv, func;
2433
2434 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2435 im = iop_msg_alloc(sc, IM_WAIT);
2436
2437 /* We can use the same structure, as they're identical. */
2438 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2439 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2440 mf.msgictx = ii->ii_ictx;
2441 mf.msgtctx = im->im_tctx;
2442 mf.flags = flags;
2443
2444 rv = iop_msg_post(sc, im, &mf, 5000);
2445 iop_msg_free(sc, im);
2446 return (rv);
2447 }
2448
2449 /*
2450 * Perform an abort.
2451 */
2452 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2453 int tctxabort, int flags)
2454 {
2455 struct iop_msg *im;
2456 struct i2o_util_abort mf;
2457 int rv;
2458
2459 im = iop_msg_alloc(sc, IM_WAIT);
2460
2461 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2462 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2463 mf.msgictx = ii->ii_ictx;
2464 mf.msgtctx = im->im_tctx;
2465 mf.flags = (func << 24) | flags;
2466 mf.tctxabort = tctxabort;
2467
2468 rv = iop_msg_post(sc, im, &mf, 5000);
2469 iop_msg_free(sc, im);
2470 return (rv);
2471 }
2472
2473 /*
2474 * Enable or disable reception of events for the specified device.
2475 */
2476 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2477 {
2478 struct i2o_util_event_register mf;
2479
2480 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2481 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2482 mf.msgictx = ii->ii_ictx;
2483 mf.msgtctx = 0;
2484 mf.eventmask = mask;
2485
2486 /* This message is replied to only when events are signalled. */
2487 return (iop_post(sc, (u_int32_t *)&mf));
2488 }
2489
2490 int
2491 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2492 {
2493 struct iop_softc *sc;
2494
2495 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2496 return (ENXIO);
2497 if ((sc->sc_flags & IOP_ONLINE) == 0)
2498 return (ENXIO);
2499 if ((sc->sc_flags & IOP_OPEN) != 0)
2500 return (EBUSY);
2501 sc->sc_flags |= IOP_OPEN;
2502
2503 return (0);
2504 }
2505
2506 int
2507 iopclose(dev_t dev, int flag, int mode,
2508 struct lwp *l)
2509 {
2510 struct iop_softc *sc;
2511
2512 sc = device_lookup(&iop_cd, minor(dev));
2513 sc->sc_flags &= ~IOP_OPEN;
2514
2515 return (0);
2516 }
2517
2518 int
2519 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct lwp *l)
2520 {
2521 struct iop_softc *sc;
2522 struct iovec *iov;
2523 int rv, i;
2524
2525 sc = device_lookup(&iop_cd, minor(dev));
2526
2527 switch (cmd) {
2528 case IOPIOCPT:
2529 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2530 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2531 if (rv)
2532 return (rv);
2533
2534 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2535
2536 case IOPIOCGSTATUS:
2537 iov = (struct iovec *)data;
2538 i = sizeof(struct i2o_status);
2539 if (i > iov->iov_len)
2540 i = iov->iov_len;
2541 else
2542 iov->iov_len = i;
2543 if ((rv = iop_status_get(sc, 0)) == 0)
2544 rv = copyout(&sc->sc_status, iov->iov_base, i);
2545 return (rv);
2546
2547 case IOPIOCGLCT:
2548 case IOPIOCGTIDMAP:
2549 case IOPIOCRECONFIG:
2550 break;
2551
2552 default:
2553 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2554 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2555 #endif
2556 return (ENOTTY);
2557 }
2558
2559 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2560 return (rv);
2561
2562 switch (cmd) {
2563 case IOPIOCGLCT:
2564 iov = (struct iovec *)data;
2565 i = le16toh(sc->sc_lct->tablesize) << 2;
2566 if (i > iov->iov_len)
2567 i = iov->iov_len;
2568 else
2569 iov->iov_len = i;
2570 rv = copyout(sc->sc_lct, iov->iov_base, i);
2571 break;
2572
2573 case IOPIOCRECONFIG:
2574 if ((rv = lockmgr(&sc->sc_conflock, LK_UPGRADE, NULL)) == 0)
2575 rv = iop_reconfigure(sc, 0);
2576 break;
2577
2578 case IOPIOCGTIDMAP:
2579 iov = (struct iovec *)data;
2580 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2581 if (i > iov->iov_len)
2582 i = iov->iov_len;
2583 else
2584 iov->iov_len = i;
2585 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2586 break;
2587 }
2588
2589 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2590 return (rv);
2591 }
2592
2593 static int
2594 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2595 {
2596 struct iop_msg *im;
2597 struct i2o_msg *mf;
2598 struct ioppt_buf *ptb;
2599 int rv, i, mapped;
2600
2601 mf = NULL;
2602 im = NULL;
2603 mapped = 1;
2604
2605 if (pt->pt_msglen > sc->sc_framesize ||
2606 pt->pt_msglen < sizeof(struct i2o_msg) ||
2607 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2608 pt->pt_nbufs < 0 ||
2609 #if 0
2610 pt->pt_replylen < 0 ||
2611 #endif
2612 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2613 return (EINVAL);
2614
2615 for (i = 0; i < pt->pt_nbufs; i++)
2616 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2617 rv = ENOMEM;
2618 goto bad;
2619 }
2620
2621 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2622 if (mf == NULL)
2623 return (ENOMEM);
2624
2625 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2626 goto bad;
2627
2628 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2629 im->im_rb = (struct i2o_reply *)mf;
2630 mf->msgictx = IOP_ICTX;
2631 mf->msgtctx = im->im_tctx;
2632
2633 for (i = 0; i < pt->pt_nbufs; i++) {
2634 ptb = &pt->pt_bufs[i];
2635 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2636 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2637 if (rv != 0)
2638 goto bad;
2639 mapped = 1;
2640 }
2641
2642 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2643 goto bad;
2644
2645 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2646 if (i > sc->sc_framesize)
2647 i = sc->sc_framesize;
2648 if (i > pt->pt_replylen)
2649 i = pt->pt_replylen;
2650 rv = copyout(im->im_rb, pt->pt_reply, i);
2651
2652 bad:
2653 if (mapped != 0)
2654 iop_msg_unmap(sc, im);
2655 if (im != NULL)
2656 iop_msg_free(sc, im);
2657 if (mf != NULL)
2658 free(mf, M_DEVBUF);
2659 return (rv);
2660 }
2661