iop.c revision 1.84 1 /* $NetBSD: iop.c,v 1.84 2013/10/17 21:16:12 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Support for I2O IOPs (intelligent I/O processors).
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.84 2013/10/17 21:16:12 christos Exp $");
38
39 #include "iop.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/ioctl.h>
49 #include <sys/endian.h>
50 #include <sys/conf.h>
51 #include <sys/kthread.h>
52 #include <sys/kauth.h>
53 #include <sys/bus.h>
54
55 #include <dev/i2o/i2o.h>
56 #include <dev/i2o/iopio.h>
57 #include <dev/i2o/iopreg.h>
58 #include <dev/i2o/iopvar.h>
59
60 #include "locators.h"
61
62 #define POLL(ms, cond) \
63 do { \
64 int xi; \
65 for (xi = (ms) * 10; xi; xi--) { \
66 if (cond) \
67 break; \
68 DELAY(100); \
69 } \
70 } while (/* CONSTCOND */0);
71
72 #ifdef I2ODEBUG
73 #define DPRINTF(x) printf x
74 #else
75 #define DPRINTF(x)
76 #endif
77
78 #define IOP_ICTXHASH_NBUCKETS 16
79 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
80
81 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
82
83 #define IOP_TCTX_SHIFT 12
84 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
85
86 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
87 static u_long iop_ictxhash;
88 static void *iop_sdh;
89 static struct i2o_systab *iop_systab;
90 static int iop_systab_size;
91
92 extern struct cfdriver iop_cd;
93
94 dev_type_open(iopopen);
95 dev_type_close(iopclose);
96 dev_type_ioctl(iopioctl);
97
98 const struct cdevsw iop_cdevsw = {
99 iopopen, iopclose, noread, nowrite, iopioctl,
100 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
101 };
102
103 #define IC_CONFIGURE 0x01
104 #define IC_PRIORITY 0x02
105
106 static struct iop_class {
107 u_short ic_class;
108 u_short ic_flags;
109 const char *ic_caption;
110 } const iop_class[] = {
111 {
112 I2O_CLASS_EXECUTIVE,
113 0,
114 "executive"
115 },
116 {
117 I2O_CLASS_DDM,
118 0,
119 "device driver module"
120 },
121 {
122 I2O_CLASS_RANDOM_BLOCK_STORAGE,
123 IC_CONFIGURE | IC_PRIORITY,
124 "random block storage"
125 },
126 {
127 I2O_CLASS_SEQUENTIAL_STORAGE,
128 IC_CONFIGURE | IC_PRIORITY,
129 "sequential storage"
130 },
131 {
132 I2O_CLASS_LAN,
133 IC_CONFIGURE | IC_PRIORITY,
134 "LAN port"
135 },
136 {
137 I2O_CLASS_WAN,
138 IC_CONFIGURE | IC_PRIORITY,
139 "WAN port"
140 },
141 {
142 I2O_CLASS_FIBRE_CHANNEL_PORT,
143 IC_CONFIGURE,
144 "fibrechannel port"
145 },
146 {
147 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
148 0,
149 "fibrechannel peripheral"
150 },
151 {
152 I2O_CLASS_SCSI_PERIPHERAL,
153 0,
154 "SCSI peripheral"
155 },
156 {
157 I2O_CLASS_ATE_PORT,
158 IC_CONFIGURE,
159 "ATE port"
160 },
161 {
162 I2O_CLASS_ATE_PERIPHERAL,
163 0,
164 "ATE peripheral"
165 },
166 {
167 I2O_CLASS_FLOPPY_CONTROLLER,
168 IC_CONFIGURE,
169 "floppy controller"
170 },
171 {
172 I2O_CLASS_FLOPPY_DEVICE,
173 0,
174 "floppy device"
175 },
176 {
177 I2O_CLASS_BUS_ADAPTER_PORT,
178 IC_CONFIGURE,
179 "bus adapter port"
180 },
181 };
182
183 #ifdef I2ODEBUG
184 static const char * const iop_status[] = {
185 "success",
186 "abort (dirty)",
187 "abort (no data transfer)",
188 "abort (partial transfer)",
189 "error (dirty)",
190 "error (no data transfer)",
191 "error (partial transfer)",
192 "undefined error code",
193 "process abort (dirty)",
194 "process abort (no data transfer)",
195 "process abort (partial transfer)",
196 "transaction error",
197 };
198 #endif
199
200 static inline u_int32_t iop_inl(struct iop_softc *, int);
201 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
202
203 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
204 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
205
206 static void iop_config_interrupts(device_t);
207 static void iop_configure_devices(struct iop_softc *, int, int);
208 static void iop_devinfo(int, char *, size_t);
209 static int iop_print(void *, const char *);
210 static void iop_shutdown(void *);
211
212 static void iop_adjqparam(struct iop_softc *, int);
213 static int iop_handle_reply(struct iop_softc *, u_int32_t);
214 static int iop_hrt_get(struct iop_softc *);
215 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
216 static void iop_intr_event(device_t, struct iop_msg *, void *);
217 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
218 u_int32_t);
219 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
220 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
221 static int iop_ofifo_init(struct iop_softc *);
222 static int iop_passthrough(struct iop_softc *, struct ioppt *,
223 struct proc *);
224 static void iop_reconf_thread(void *);
225 static void iop_release_mfa(struct iop_softc *, u_int32_t);
226 static int iop_reset(struct iop_softc *);
227 static int iop_sys_enable(struct iop_softc *);
228 static int iop_systab_set(struct iop_softc *);
229 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
230
231 #ifdef I2ODEBUG
232 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
233 #endif
234
235 static inline u_int32_t
236 iop_inl(struct iop_softc *sc, int off)
237 {
238
239 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
240 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
241 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
242 }
243
244 static inline void
245 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
246 {
247
248 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
249 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250 BUS_SPACE_BARRIER_WRITE);
251 }
252
253 static inline u_int32_t
254 iop_inl_msg(struct iop_softc *sc, int off)
255 {
256
257 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
258 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
259 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
260 }
261
262 static inline void
263 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
264 {
265
266 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
267 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
268 BUS_SPACE_BARRIER_WRITE);
269 }
270
271 /*
272 * Initialise the IOP and our interface.
273 */
274 void
275 iop_init(struct iop_softc *sc, const char *intrstr)
276 {
277 struct iop_msg *im;
278 int rv, i, j, state, nsegs;
279 u_int32_t mask;
280 char ident[64];
281
282 state = 0;
283
284 printf("I2O adapter");
285
286 mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
287 mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
288 cv_init(&sc->sc_confcv, "iopconf");
289
290 if (iop_ictxhashtbl == NULL) {
291 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
292 true, &iop_ictxhash);
293 }
294
295 /* Disable interrupts at the IOP. */
296 mask = iop_inl(sc, IOP_REG_INTR_MASK);
297 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
298
299 /* Allocate a scratch DMA map for small miscellaneous shared data. */
300 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
301 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
302 aprint_error_dev(sc->sc_dev, "cannot create scratch dmamap\n");
303 return;
304 }
305
306 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
307 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
308 aprint_error_dev(sc->sc_dev, "cannot alloc scratch dmamem\n");
309 goto bail_out;
310 }
311 state++;
312
313 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
314 &sc->sc_scr, 0)) {
315 aprint_error_dev(sc->sc_dev, "cannot map scratch dmamem\n");
316 goto bail_out;
317 }
318 state++;
319
320 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
321 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
322 aprint_error_dev(sc->sc_dev, "cannot load scratch dmamap\n");
323 goto bail_out;
324 }
325 state++;
326
327 #ifdef I2ODEBUG
328 /* So that our debug checks don't choke. */
329 sc->sc_framesize = 128;
330 #endif
331
332 /* Avoid syncing the reply map until it's set up. */
333 sc->sc_curib = 0x123;
334
335 /* Reset the adapter and request status. */
336 if ((rv = iop_reset(sc)) != 0) {
337 aprint_error_dev(sc->sc_dev, "not responding (reset)\n");
338 goto bail_out;
339 }
340
341 if ((rv = iop_status_get(sc, 1)) != 0) {
342 aprint_error_dev(sc->sc_dev, "not responding (get status)\n");
343 goto bail_out;
344 }
345
346 sc->sc_flags |= IOP_HAVESTATUS;
347 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
348 ident, sizeof(ident));
349 printf(" <%s>\n", ident);
350
351 #ifdef I2ODEBUG
352 printf("%s: orgid=0x%04x version=%d\n",
353 device_xname(sc->sc_dev),
354 le16toh(sc->sc_status.orgid),
355 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
356 printf("%s: type want have cbase\n", device_xname(sc->sc_dev));
357 printf("%s: mem %04x %04x %08x\n", device_xname(sc->sc_dev),
358 le32toh(sc->sc_status.desiredprivmemsize),
359 le32toh(sc->sc_status.currentprivmemsize),
360 le32toh(sc->sc_status.currentprivmembase));
361 printf("%s: i/o %04x %04x %08x\n", device_xname(sc->sc_dev),
362 le32toh(sc->sc_status.desiredpriviosize),
363 le32toh(sc->sc_status.currentpriviosize),
364 le32toh(sc->sc_status.currentpriviobase));
365 #endif
366
367 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
368 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
369 sc->sc_maxob = IOP_MAX_OUTBOUND;
370 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
371 if (sc->sc_maxib > IOP_MAX_INBOUND)
372 sc->sc_maxib = IOP_MAX_INBOUND;
373 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
374 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
375 sc->sc_framesize = IOP_MAX_MSG_SIZE;
376
377 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
378 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
379 aprint_error_dev(sc->sc_dev, "frame size too small (%d)\n",
380 sc->sc_framesize);
381 goto bail_out;
382 }
383 #endif
384
385 /* Allocate message wrappers. */
386 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
387 if (im == NULL) {
388 aprint_error_dev(sc->sc_dev, "memory allocation failure\n");
389 goto bail_out;
390 }
391 state++;
392 sc->sc_ims = im;
393 SLIST_INIT(&sc->sc_im_freelist);
394
395 for (i = 0; i < sc->sc_maxib; i++, im++) {
396 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
397 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
398 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
399 &im->im_xfer[0].ix_map);
400 if (rv != 0) {
401 aprint_error_dev(sc->sc_dev, "couldn't create dmamap (%d)", rv);
402 goto bail_out3;
403 }
404
405 im->im_tctx = i;
406 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
407 cv_init(&im->im_cv, "iopmsg");
408 }
409
410 /* Initialise the IOP's outbound FIFO. */
411 if (iop_ofifo_init(sc) != 0) {
412 aprint_error_dev(sc->sc_dev, "unable to init oubound FIFO\n");
413 goto bail_out3;
414 }
415
416 /*
417 * Defer further configuration until (a) interrupts are working and
418 * (b) we have enough information to build the system table.
419 */
420 config_interrupts(sc->sc_dev, iop_config_interrupts);
421
422 /* Configure shutdown hook before we start any device activity. */
423 if (iop_sdh == NULL)
424 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
425
426 /* Ensure interrupts are enabled at the IOP. */
427 mask = iop_inl(sc, IOP_REG_INTR_MASK);
428 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
429
430 if (intrstr != NULL)
431 printf("%s: interrupting at %s\n", device_xname(sc->sc_dev),
432 intrstr);
433
434 #ifdef I2ODEBUG
435 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
436 device_xname(sc->sc_dev), sc->sc_maxib,
437 le32toh(sc->sc_status.maxinboundmframes),
438 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
439 #endif
440
441 return;
442
443 bail_out3:
444 if (state > 3) {
445 for (j = 0; j < i; j++)
446 bus_dmamap_destroy(sc->sc_dmat,
447 sc->sc_ims[j].im_xfer[0].ix_map);
448 free(sc->sc_ims, M_DEVBUF);
449 }
450 bail_out:
451 if (state > 2)
452 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
453 if (state > 1)
454 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
455 if (state > 0)
456 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
457 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
458 }
459
460 /*
461 * Perform autoconfiguration tasks.
462 */
463 static void
464 iop_config_interrupts(device_t self)
465 {
466 struct iop_attach_args ia;
467 struct iop_softc *sc, *iop;
468 struct i2o_systab_entry *ste;
469 int rv, i, niop;
470 int locs[IOPCF_NLOCS];
471
472 sc = device_private(self);
473 mutex_enter(&sc->sc_conflock);
474
475 LIST_INIT(&sc->sc_iilist);
476
477 printf("%s: configuring...\n", device_xname(sc->sc_dev));
478
479 if (iop_hrt_get(sc) != 0) {
480 printf("%s: unable to retrieve HRT\n", device_xname(sc->sc_dev));
481 mutex_exit(&sc->sc_conflock);
482 return;
483 }
484
485 /*
486 * Build the system table.
487 */
488 if (iop_systab == NULL) {
489 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
490 if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
491 continue;
492 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
493 continue;
494 if (iop_status_get(iop, 1) != 0) {
495 aprint_error_dev(sc->sc_dev, "unable to retrieve status\n");
496 iop->sc_flags &= ~IOP_HAVESTATUS;
497 continue;
498 }
499 niop++;
500 }
501 if (niop == 0) {
502 mutex_exit(&sc->sc_conflock);
503 return;
504 }
505
506 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
507 sizeof(struct i2o_systab);
508 iop_systab_size = i;
509 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
510
511 iop_systab->numentries = niop;
512 iop_systab->version = I2O_VERSION_11;
513
514 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
515 if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
516 continue;
517 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
518 continue;
519
520 ste->orgid = iop->sc_status.orgid;
521 ste->iopid = device_unit(iop->sc_dev) + 2;
522 ste->segnumber =
523 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
524 ste->iopcaps = iop->sc_status.iopcaps;
525 ste->inboundmsgframesize =
526 iop->sc_status.inboundmframesize;
527 ste->inboundmsgportaddresslow =
528 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
529 ste++;
530 }
531 }
532
533 /*
534 * Post the system table to the IOP and bring it to the OPERATIONAL
535 * state.
536 */
537 if (iop_systab_set(sc) != 0) {
538 aprint_error_dev(sc->sc_dev, "unable to set system table\n");
539 mutex_exit(&sc->sc_conflock);
540 return;
541 }
542 if (iop_sys_enable(sc) != 0) {
543 aprint_error_dev(sc->sc_dev, "unable to enable system\n");
544 mutex_exit(&sc->sc_conflock);
545 return;
546 }
547
548 /*
549 * Set up an event handler for this IOP.
550 */
551 sc->sc_eventii.ii_dv = self;
552 sc->sc_eventii.ii_intr = iop_intr_event;
553 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
554 sc->sc_eventii.ii_tid = I2O_TID_IOP;
555 iop_initiator_register(sc, &sc->sc_eventii);
556
557 rv = iop_util_eventreg(sc, &sc->sc_eventii,
558 I2O_EVENT_EXEC_RESOURCE_LIMITS |
559 I2O_EVENT_EXEC_CONNECTION_FAIL |
560 I2O_EVENT_EXEC_ADAPTER_FAULT |
561 I2O_EVENT_EXEC_POWER_FAIL |
562 I2O_EVENT_EXEC_RESET_PENDING |
563 I2O_EVENT_EXEC_RESET_IMMINENT |
564 I2O_EVENT_EXEC_HARDWARE_FAIL |
565 I2O_EVENT_EXEC_XCT_CHANGE |
566 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
567 I2O_EVENT_GEN_DEVICE_RESET |
568 I2O_EVENT_GEN_STATE_CHANGE |
569 I2O_EVENT_GEN_GENERAL_WARNING);
570 if (rv != 0) {
571 aprint_error_dev(sc->sc_dev, "unable to register for events");
572 mutex_exit(&sc->sc_conflock);
573 return;
574 }
575
576 /*
577 * Attempt to match and attach a product-specific extension.
578 */
579 ia.ia_class = I2O_CLASS_ANY;
580 ia.ia_tid = I2O_TID_IOP;
581 locs[IOPCF_TID] = I2O_TID_IOP;
582 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
583 config_stdsubmatch);
584
585 /*
586 * Start device configuration.
587 */
588 if ((rv = iop_reconfigure(sc, 0)) == -1)
589 aprint_error_dev(sc->sc_dev, "configure failed (%d)\n", rv);
590
591
592 sc->sc_flags |= IOP_ONLINE;
593 rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
594 &sc->sc_reconf_thread, "%s", device_xname(sc->sc_dev));
595 mutex_exit(&sc->sc_conflock);
596 if (rv != 0) {
597 aprint_error_dev(sc->sc_dev, "unable to create reconfiguration thread (%d)", rv);
598 return;
599 }
600 }
601
602 /*
603 * Reconfiguration thread; listens for LCT change notification, and
604 * initiates re-configuration if received.
605 */
606 static void
607 iop_reconf_thread(void *cookie)
608 {
609 struct iop_softc *sc;
610 struct i2o_lct lct;
611 u_int32_t chgind;
612 int rv;
613
614 sc = cookie;
615 chgind = sc->sc_chgind + 1;
616
617 for (;;) {
618 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
619 device_xname(sc->sc_dev), chgind));
620
621 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
622
623 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
624 device_xname(sc->sc_dev), le32toh(lct.changeindicator), rv));
625
626 mutex_enter(&sc->sc_conflock);
627 if (rv == 0) {
628 iop_reconfigure(sc, le32toh(lct.changeindicator));
629 chgind = sc->sc_chgind + 1;
630 }
631 (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
632 mutex_exit(&sc->sc_conflock);
633 }
634 }
635
636 /*
637 * Reconfigure: find new and removed devices.
638 */
639 int
640 iop_reconfigure(struct iop_softc *sc, u_int chgind)
641 {
642 struct iop_msg *im;
643 struct i2o_hba_bus_scan mf;
644 struct i2o_lct_entry *le;
645 struct iop_initiator *ii, *nextii;
646 int rv, tid, i;
647
648 KASSERT(mutex_owned(&sc->sc_conflock));
649
650 /*
651 * If the reconfiguration request isn't the result of LCT change
652 * notification, then be more thorough: ask all bus ports to scan
653 * their busses. Wait up to 5 minutes for each bus port to complete
654 * the request.
655 */
656 if (chgind == 0) {
657 if ((rv = iop_lct_get(sc)) != 0) {
658 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
659 return (rv);
660 }
661
662 le = sc->sc_lct->entry;
663 for (i = 0; i < sc->sc_nlctent; i++, le++) {
664 if ((le16toh(le->classid) & 4095) !=
665 I2O_CLASS_BUS_ADAPTER_PORT)
666 continue;
667 tid = le16toh(le->localtid) & 4095;
668
669 im = iop_msg_alloc(sc, IM_WAIT);
670
671 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
672 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
673 mf.msgictx = IOP_ICTX;
674 mf.msgtctx = im->im_tctx;
675
676 DPRINTF(("%s: scanning bus %d\n", device_xname(sc->sc_dev),
677 tid));
678
679 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
680 iop_msg_free(sc, im);
681 #ifdef I2ODEBUG
682 if (rv != 0)
683 aprint_error_dev(sc->sc_dev, "bus scan failed\n");
684 #endif
685 }
686 } else if (chgind <= sc->sc_chgind) {
687 DPRINTF(("%s: LCT unchanged (async)\n", device_xname(sc->sc_dev)));
688 return (0);
689 }
690
691 /* Re-read the LCT and determine if it has changed. */
692 if ((rv = iop_lct_get(sc)) != 0) {
693 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
694 return (rv);
695 }
696 DPRINTF(("%s: %d LCT entries\n", device_xname(sc->sc_dev), sc->sc_nlctent));
697
698 chgind = le32toh(sc->sc_lct->changeindicator);
699 if (chgind == sc->sc_chgind) {
700 DPRINTF(("%s: LCT unchanged\n", device_xname(sc->sc_dev)));
701 return (0);
702 }
703 DPRINTF(("%s: LCT changed\n", device_xname(sc->sc_dev)));
704 sc->sc_chgind = chgind;
705
706 if (sc->sc_tidmap != NULL)
707 free(sc->sc_tidmap, M_DEVBUF);
708 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
709 M_DEVBUF, M_NOWAIT|M_ZERO);
710
711 /* Allow 1 queued command per device while we're configuring. */
712 iop_adjqparam(sc, 1);
713
714 /*
715 * Match and attach child devices. We configure high-level devices
716 * first so that any claims will propagate throughout the LCT,
717 * hopefully masking off aliased devices as a result.
718 *
719 * Re-reading the LCT at this point is a little dangerous, but we'll
720 * trust the IOP (and the operator) to behave itself...
721 */
722 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
723 IC_CONFIGURE | IC_PRIORITY);
724 if ((rv = iop_lct_get(sc)) != 0) {
725 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
726 }
727 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
728 IC_CONFIGURE);
729
730 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
731 nextii = LIST_NEXT(ii, ii_list);
732
733 /* Detach devices that were configured, but are now gone. */
734 for (i = 0; i < sc->sc_nlctent; i++)
735 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
736 break;
737 if (i == sc->sc_nlctent ||
738 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
739 config_detach(ii->ii_dv, DETACH_FORCE);
740 continue;
741 }
742
743 /*
744 * Tell initiators that existed before the re-configuration
745 * to re-configure.
746 */
747 if (ii->ii_reconfig == NULL)
748 continue;
749 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
750 aprint_error_dev(sc->sc_dev, "%s failed reconfigure (%d)\n",
751 device_xname(ii->ii_dv), rv);
752 }
753
754 /* Re-adjust queue parameters and return. */
755 if (sc->sc_nii != 0)
756 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
757 / sc->sc_nii);
758
759 return (0);
760 }
761
762 /*
763 * Configure I2O devices into the system.
764 */
765 static void
766 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
767 {
768 struct iop_attach_args ia;
769 struct iop_initiator *ii;
770 const struct i2o_lct_entry *le;
771 device_t dv;
772 int i, j, nent;
773 u_int usertid;
774 int locs[IOPCF_NLOCS];
775
776 nent = sc->sc_nlctent;
777 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
778 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
779
780 /* Ignore the device if it's in use. */
781 usertid = le32toh(le->usertid) & 4095;
782 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
783 continue;
784
785 ia.ia_class = le16toh(le->classid) & 4095;
786 ia.ia_tid = sc->sc_tidmap[i].it_tid;
787
788 /* Ignore uninteresting devices. */
789 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
790 if (iop_class[j].ic_class == ia.ia_class)
791 break;
792 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
793 (iop_class[j].ic_flags & mask) != maskval)
794 continue;
795
796 /*
797 * Try to configure the device only if it's not already
798 * configured.
799 */
800 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
801 if (ia.ia_tid == ii->ii_tid) {
802 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
803 strcpy(sc->sc_tidmap[i].it_dvname,
804 device_xname(ii->ii_dv));
805 break;
806 }
807 }
808 if (ii != NULL)
809 continue;
810
811 locs[IOPCF_TID] = ia.ia_tid;
812
813 dv = config_found_sm_loc(sc->sc_dev, "iop", locs, &ia,
814 iop_print, config_stdsubmatch);
815 if (dv != NULL) {
816 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
817 strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
818 }
819 }
820 }
821
822 /*
823 * Adjust queue parameters for all child devices.
824 */
825 static void
826 iop_adjqparam(struct iop_softc *sc, int mpi)
827 {
828 struct iop_initiator *ii;
829
830 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
831 if (ii->ii_adjqparam != NULL)
832 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
833 }
834
835 static void
836 iop_devinfo(int class, char *devinfo, size_t l)
837 {
838 int i;
839
840 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
841 if (class == iop_class[i].ic_class)
842 break;
843
844 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
845 snprintf(devinfo, l, "device (class 0x%x)", class);
846 else
847 strlcpy(devinfo, iop_class[i].ic_caption, l);
848 }
849
850 static int
851 iop_print(void *aux, const char *pnp)
852 {
853 struct iop_attach_args *ia;
854 char devinfo[256];
855
856 ia = aux;
857
858 if (pnp != NULL) {
859 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
860 aprint_normal("%s at %s", devinfo, pnp);
861 }
862 aprint_normal(" tid %d", ia->ia_tid);
863 return (UNCONF);
864 }
865
866 /*
867 * Shut down all configured IOPs.
868 */
869 static void
870 iop_shutdown(void *junk)
871 {
872 struct iop_softc *sc;
873 int i;
874
875 printf("shutting down iop devices...");
876
877 for (i = 0; i < iop_cd.cd_ndevs; i++) {
878 if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
879 continue;
880 if ((sc->sc_flags & IOP_ONLINE) == 0)
881 continue;
882
883 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
884 0, 5000);
885
886 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
887 /*
888 * Some AMI firmware revisions will go to sleep and
889 * never come back after this.
890 */
891 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
892 IOP_ICTX, 0, 1000);
893 }
894 }
895
896 /* Wait. Some boards could still be flushing, stupidly enough. */
897 delay(5000*1000);
898 printf(" done\n");
899 }
900
901 /*
902 * Retrieve IOP status.
903 */
904 int
905 iop_status_get(struct iop_softc *sc, int nosleep)
906 {
907 struct i2o_exec_status_get mf;
908 struct i2o_status *st;
909 paddr_t pa;
910 int rv, i;
911
912 pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
913 st = (struct i2o_status *)sc->sc_scr;
914
915 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
916 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
917 mf.reserved[0] = 0;
918 mf.reserved[1] = 0;
919 mf.reserved[2] = 0;
920 mf.reserved[3] = 0;
921 mf.addrlow = (u_int32_t)pa;
922 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
923 mf.length = sizeof(sc->sc_status);
924
925 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
926 BUS_DMASYNC_PREWRITE);
927 memset(st, 0, sizeof(*st));
928 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
929 BUS_DMASYNC_POSTWRITE);
930
931 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
932 return (rv);
933
934 for (i = 100; i != 0; i--) {
935 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
936 sizeof(*st), BUS_DMASYNC_POSTREAD);
937 if (st->syncbyte == 0xff)
938 break;
939 if (nosleep)
940 DELAY(100*1000);
941 else
942 kpause("iopstat", false, hz / 10, NULL);
943 }
944
945 if (st->syncbyte != 0xff) {
946 aprint_error_dev(sc->sc_dev, "STATUS_GET timed out\n");
947 rv = EIO;
948 } else {
949 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
950 rv = 0;
951 }
952
953 return (rv);
954 }
955
956 /*
957 * Initialize and populate the IOP's outbound FIFO.
958 */
959 static int
960 iop_ofifo_init(struct iop_softc *sc)
961 {
962 bus_addr_t addr;
963 bus_dma_segment_t seg;
964 struct i2o_exec_outbound_init *mf;
965 int i, rseg, rv;
966 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
967
968 sw = (u_int32_t *)sc->sc_scr;
969
970 mf = (struct i2o_exec_outbound_init *)mb;
971 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
972 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
973 mf->msgictx = IOP_ICTX;
974 mf->msgtctx = 0;
975 mf->pagesize = PAGE_SIZE;
976 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
977
978 /*
979 * The I2O spec says that there are two SGLs: one for the status
980 * word, and one for a list of discarded MFAs. It continues to say
981 * that if you don't want to get the list of MFAs, an IGNORE SGL is
982 * necessary; this isn't the case (and is in fact a bad thing).
983 */
984 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
985 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
986 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
987 (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
988 mb[0] += 2 << 16;
989
990 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
991 BUS_DMASYNC_POSTWRITE);
992 *sw = 0;
993 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
994 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
995
996 if ((rv = iop_post(sc, mb)) != 0)
997 return (rv);
998
999 POLL(5000,
1000 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1001 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD),
1002 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1003
1004 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1005 aprint_error_dev(sc->sc_dev, "outbound FIFO init failed (%d)\n",
1006 le32toh(*sw));
1007 return (EIO);
1008 }
1009
1010 /* Allocate DMA safe memory for the reply frames. */
1011 if (sc->sc_rep_phys == 0) {
1012 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1013
1014 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1015 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1016 if (rv != 0) {
1017 aprint_error_dev(sc->sc_dev, "DMA alloc = %d\n",
1018 rv);
1019 return (rv);
1020 }
1021
1022 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1023 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1024 if (rv != 0) {
1025 aprint_error_dev(sc->sc_dev, "DMA map = %d\n", rv);
1026 return (rv);
1027 }
1028
1029 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1030 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1031 if (rv != 0) {
1032 aprint_error_dev(sc->sc_dev, "DMA create = %d\n", rv);
1033 return (rv);
1034 }
1035
1036 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1037 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1038 if (rv != 0) {
1039 aprint_error_dev(sc->sc_dev, "DMA load = %d\n", rv);
1040 return (rv);
1041 }
1042
1043 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1044
1045 /* Now safe to sync the reply map. */
1046 sc->sc_curib = 0;
1047 }
1048
1049 /* Populate the outbound FIFO. */
1050 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1051 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1052 addr += sc->sc_framesize;
1053 }
1054
1055 return (0);
1056 }
1057
1058 /*
1059 * Read the specified number of bytes from the IOP's hardware resource table.
1060 */
1061 static int
1062 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1063 {
1064 struct iop_msg *im;
1065 int rv;
1066 struct i2o_exec_hrt_get *mf;
1067 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1068
1069 im = iop_msg_alloc(sc, IM_WAIT);
1070 mf = (struct i2o_exec_hrt_get *)mb;
1071 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1072 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1073 mf->msgictx = IOP_ICTX;
1074 mf->msgtctx = im->im_tctx;
1075
1076 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1077 rv = iop_msg_post(sc, im, mb, 30000);
1078 iop_msg_unmap(sc, im);
1079 iop_msg_free(sc, im);
1080 return (rv);
1081 }
1082
1083 /*
1084 * Read the IOP's hardware resource table.
1085 */
1086 static int
1087 iop_hrt_get(struct iop_softc *sc)
1088 {
1089 struct i2o_hrt hrthdr, *hrt;
1090 int size, rv;
1091
1092 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1093 if (rv != 0)
1094 return (rv);
1095
1096 DPRINTF(("%s: %d hrt entries\n", device_xname(sc->sc_dev),
1097 le16toh(hrthdr.numentries)));
1098
1099 size = sizeof(struct i2o_hrt) +
1100 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1101 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1102
1103 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1104 free(hrt, M_DEVBUF);
1105 return (rv);
1106 }
1107
1108 if (sc->sc_hrt != NULL)
1109 free(sc->sc_hrt, M_DEVBUF);
1110 sc->sc_hrt = hrt;
1111 return (0);
1112 }
1113
1114 /*
1115 * Request the specified number of bytes from the IOP's logical
1116 * configuration table. If a change indicator is specified, this
1117 * is a verbatim notification request, so the caller is prepared
1118 * to wait indefinitely.
1119 */
1120 static int
1121 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1122 u_int32_t chgind)
1123 {
1124 struct iop_msg *im;
1125 struct i2o_exec_lct_notify *mf;
1126 int rv;
1127 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1128
1129 im = iop_msg_alloc(sc, IM_WAIT);
1130 memset(lct, 0, size);
1131
1132 mf = (struct i2o_exec_lct_notify *)mb;
1133 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1134 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1135 mf->msgictx = IOP_ICTX;
1136 mf->msgtctx = im->im_tctx;
1137 mf->classid = I2O_CLASS_ANY;
1138 mf->changeindicator = chgind;
1139
1140 #ifdef I2ODEBUG
1141 printf("iop_lct_get0: reading LCT");
1142 if (chgind != 0)
1143 printf(" (async)");
1144 printf("\n");
1145 #endif
1146
1147 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1148 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1149 iop_msg_unmap(sc, im);
1150 iop_msg_free(sc, im);
1151 return (rv);
1152 }
1153
1154 /*
1155 * Read the IOP's logical configuration table.
1156 */
1157 int
1158 iop_lct_get(struct iop_softc *sc)
1159 {
1160 int esize, size, rv;
1161 struct i2o_lct *lct;
1162
1163 esize = le32toh(sc->sc_status.expectedlctsize);
1164 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1165 if (lct == NULL)
1166 return (ENOMEM);
1167
1168 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1169 free(lct, M_DEVBUF);
1170 return (rv);
1171 }
1172
1173 size = le16toh(lct->tablesize) << 2;
1174 if (esize != size) {
1175 free(lct, M_DEVBUF);
1176 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1177 if (lct == NULL)
1178 return (ENOMEM);
1179
1180 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1181 free(lct, M_DEVBUF);
1182 return (rv);
1183 }
1184 }
1185
1186 /* Swap in the new LCT. */
1187 if (sc->sc_lct != NULL)
1188 free(sc->sc_lct, M_DEVBUF);
1189 sc->sc_lct = lct;
1190 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1191 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1192 sizeof(struct i2o_lct_entry);
1193 return (0);
1194 }
1195
1196 /*
1197 * Post a SYS_ENABLE message to the adapter.
1198 */
1199 int
1200 iop_sys_enable(struct iop_softc *sc)
1201 {
1202 struct iop_msg *im;
1203 struct i2o_msg mf;
1204 int rv;
1205
1206 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1207
1208 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1209 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1210 mf.msgictx = IOP_ICTX;
1211 mf.msgtctx = im->im_tctx;
1212
1213 rv = iop_msg_post(sc, im, &mf, 30000);
1214 if (rv == 0) {
1215 if ((im->im_flags & IM_FAIL) != 0)
1216 rv = ENXIO;
1217 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1218 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1219 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1220 rv = 0;
1221 else
1222 rv = EIO;
1223 }
1224
1225 iop_msg_free(sc, im);
1226 return (rv);
1227 }
1228
1229 /*
1230 * Request the specified parameter group from the target. If an initiator
1231 * is specified (a) don't wait for the operation to complete, but instead
1232 * let the initiator's interrupt handler deal with the reply and (b) place a
1233 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1234 */
1235 int
1236 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1237 int size, struct iop_initiator *ii)
1238 {
1239 struct iop_msg *im;
1240 struct i2o_util_params_op *mf;
1241 int rv;
1242 struct iop_pgop *pgop;
1243 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1244
1245 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1246 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1247 iop_msg_free(sc, im);
1248 return (ENOMEM);
1249 }
1250 im->im_dvcontext = pgop;
1251
1252 mf = (struct i2o_util_params_op *)mb;
1253 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1254 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1255 mf->msgictx = IOP_ICTX;
1256 mf->msgtctx = im->im_tctx;
1257 mf->flags = 0;
1258
1259 pgop->olh.count = htole16(1);
1260 pgop->olh.reserved = htole16(0);
1261 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1262 pgop->oat.fieldcount = htole16(0xffff);
1263 pgop->oat.group = htole16(group);
1264
1265 memset(buf, 0, size);
1266 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1267 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1268 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1269
1270 /* Detect errors; let partial transfers to count as success. */
1271 if (ii == NULL && rv == 0) {
1272 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1273 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1274 rv = 0;
1275 else
1276 rv = (im->im_reqstatus != 0 ? EIO : 0);
1277
1278 if (rv != 0)
1279 printf("%s: FIELD_GET failed for tid %d group %d\n",
1280 device_xname(sc->sc_dev), tid, group);
1281 }
1282
1283 if (ii == NULL || rv != 0) {
1284 iop_msg_unmap(sc, im);
1285 iop_msg_free(sc, im);
1286 free(pgop, M_DEVBUF);
1287 }
1288
1289 return (rv);
1290 }
1291
1292 /*
1293 * Set a single field in a scalar parameter group.
1294 */
1295 int
1296 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1297 int size, int field)
1298 {
1299 struct iop_msg *im;
1300 struct i2o_util_params_op *mf;
1301 struct iop_pgop *pgop;
1302 int rv, totsize;
1303 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1304
1305 totsize = sizeof(*pgop) + size;
1306
1307 im = iop_msg_alloc(sc, IM_WAIT);
1308 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1309 iop_msg_free(sc, im);
1310 return (ENOMEM);
1311 }
1312
1313 mf = (struct i2o_util_params_op *)mb;
1314 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1315 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1316 mf->msgictx = IOP_ICTX;
1317 mf->msgtctx = im->im_tctx;
1318 mf->flags = 0;
1319
1320 pgop->olh.count = htole16(1);
1321 pgop->olh.reserved = htole16(0);
1322 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1323 pgop->oat.fieldcount = htole16(1);
1324 pgop->oat.group = htole16(group);
1325 pgop->oat.fields[0] = htole16(field);
1326 memcpy(pgop + 1, buf, size);
1327
1328 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1329 rv = iop_msg_post(sc, im, mb, 30000);
1330 if (rv != 0)
1331 aprint_error_dev(sc->sc_dev, "FIELD_SET failed for tid %d group %d\n",
1332 tid, group);
1333
1334 iop_msg_unmap(sc, im);
1335 iop_msg_free(sc, im);
1336 free(pgop, M_DEVBUF);
1337 return (rv);
1338 }
1339
1340 /*
1341 * Delete all rows in a tablular parameter group.
1342 */
1343 int
1344 iop_table_clear(struct iop_softc *sc, int tid, int group)
1345 {
1346 struct iop_msg *im;
1347 struct i2o_util_params_op *mf;
1348 struct iop_pgop pgop;
1349 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1350 int rv;
1351
1352 im = iop_msg_alloc(sc, IM_WAIT);
1353
1354 mf = (struct i2o_util_params_op *)mb;
1355 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1356 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1357 mf->msgictx = IOP_ICTX;
1358 mf->msgtctx = im->im_tctx;
1359 mf->flags = 0;
1360
1361 pgop.olh.count = htole16(1);
1362 pgop.olh.reserved = htole16(0);
1363 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1364 pgop.oat.fieldcount = htole16(0);
1365 pgop.oat.group = htole16(group);
1366 pgop.oat.fields[0] = htole16(0);
1367
1368 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1369 rv = iop_msg_post(sc, im, mb, 30000);
1370 if (rv != 0)
1371 aprint_error_dev(sc->sc_dev, "TABLE_CLEAR failed for tid %d group %d\n",
1372 tid, group);
1373
1374 iop_msg_unmap(sc, im);
1375 iop_msg_free(sc, im);
1376 return (rv);
1377 }
1378
1379 /*
1380 * Add a single row to a tabular parameter group. The row can have only one
1381 * field.
1382 */
1383 int
1384 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1385 int size, int row)
1386 {
1387 struct iop_msg *im;
1388 struct i2o_util_params_op *mf;
1389 struct iop_pgop *pgop;
1390 int rv, totsize;
1391 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1392
1393 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1394
1395 im = iop_msg_alloc(sc, IM_WAIT);
1396 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1397 iop_msg_free(sc, im);
1398 return (ENOMEM);
1399 }
1400
1401 mf = (struct i2o_util_params_op *)mb;
1402 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1403 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1404 mf->msgictx = IOP_ICTX;
1405 mf->msgtctx = im->im_tctx;
1406 mf->flags = 0;
1407
1408 pgop->olh.count = htole16(1);
1409 pgop->olh.reserved = htole16(0);
1410 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1411 pgop->oat.fieldcount = htole16(1);
1412 pgop->oat.group = htole16(group);
1413 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1414 pgop->oat.fields[1] = htole16(1); /* RowCount */
1415 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1416 memcpy(&pgop->oat.fields[3], buf, size);
1417
1418 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1419 rv = iop_msg_post(sc, im, mb, 30000);
1420 if (rv != 0)
1421 aprint_error_dev(sc->sc_dev, "ADD_ROW failed for tid %d group %d row %d\n",
1422 tid, group, row);
1423
1424 iop_msg_unmap(sc, im);
1425 iop_msg_free(sc, im);
1426 free(pgop, M_DEVBUF);
1427 return (rv);
1428 }
1429
1430 /*
1431 * Execute a simple command (no parameters).
1432 */
1433 int
1434 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1435 int async, int timo)
1436 {
1437 struct iop_msg *im;
1438 struct i2o_msg mf;
1439 int rv, fl;
1440
1441 fl = (async != 0 ? IM_WAIT : IM_POLL);
1442 im = iop_msg_alloc(sc, fl);
1443
1444 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1445 mf.msgfunc = I2O_MSGFUNC(tid, function);
1446 mf.msgictx = ictx;
1447 mf.msgtctx = im->im_tctx;
1448
1449 rv = iop_msg_post(sc, im, &mf, timo);
1450 iop_msg_free(sc, im);
1451 return (rv);
1452 }
1453
1454 /*
1455 * Post the system table to the IOP.
1456 */
1457 static int
1458 iop_systab_set(struct iop_softc *sc)
1459 {
1460 struct i2o_exec_sys_tab_set *mf;
1461 struct iop_msg *im;
1462 bus_space_handle_t bsh;
1463 bus_addr_t boo;
1464 u_int32_t mema[2], ioa[2];
1465 int rv;
1466 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1467
1468 im = iop_msg_alloc(sc, IM_WAIT);
1469
1470 mf = (struct i2o_exec_sys_tab_set *)mb;
1471 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1472 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1473 mf->msgictx = IOP_ICTX;
1474 mf->msgtctx = im->im_tctx;
1475 mf->iopid = (device_unit(sc->sc_dev) + 2) << 12;
1476 mf->segnumber = 0;
1477
1478 mema[1] = sc->sc_status.desiredprivmemsize;
1479 ioa[1] = sc->sc_status.desiredpriviosize;
1480
1481 if (mema[1] != 0) {
1482 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1483 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1484 mema[0] = htole32(boo);
1485 if (rv != 0) {
1486 aprint_error_dev(sc->sc_dev, "can't alloc priv mem space, err = %d\n", rv);
1487 mema[0] = 0;
1488 mema[1] = 0;
1489 }
1490 }
1491
1492 if (ioa[1] != 0) {
1493 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1494 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1495 ioa[0] = htole32(boo);
1496 if (rv != 0) {
1497 aprint_error_dev(sc->sc_dev, "can't alloc priv i/o space, err = %d\n", rv);
1498 ioa[0] = 0;
1499 ioa[1] = 0;
1500 }
1501 }
1502
1503 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1504 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1505 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1506 rv = iop_msg_post(sc, im, mb, 5000);
1507 iop_msg_unmap(sc, im);
1508 iop_msg_free(sc, im);
1509 return (rv);
1510 }
1511
1512 /*
1513 * Reset the IOP. Must be called with interrupts disabled.
1514 */
1515 static int
1516 iop_reset(struct iop_softc *sc)
1517 {
1518 u_int32_t mfa, *sw;
1519 struct i2o_exec_iop_reset mf;
1520 int rv;
1521 paddr_t pa;
1522
1523 sw = (u_int32_t *)sc->sc_scr;
1524 pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
1525
1526 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1527 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1528 mf.reserved[0] = 0;
1529 mf.reserved[1] = 0;
1530 mf.reserved[2] = 0;
1531 mf.reserved[3] = 0;
1532 mf.statuslow = (u_int32_t)pa;
1533 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1534
1535 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1536 BUS_DMASYNC_POSTWRITE);
1537 *sw = htole32(0);
1538 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1539 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1540
1541 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1542 return (rv);
1543
1544 POLL(2500,
1545 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1546 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD), *sw != 0));
1547 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1548 aprint_error_dev(sc->sc_dev, "reset rejected, status 0x%x\n",
1549 le32toh(*sw));
1550 return (EIO);
1551 }
1552
1553 /*
1554 * IOP is now in the INIT state. Wait no more than 10 seconds for
1555 * the inbound queue to become responsive.
1556 */
1557 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1558 if (mfa == IOP_MFA_EMPTY) {
1559 aprint_error_dev(sc->sc_dev, "reset failed\n");
1560 return (EIO);
1561 }
1562
1563 iop_release_mfa(sc, mfa);
1564 return (0);
1565 }
1566
1567 /*
1568 * Register a new initiator. Must be called with the configuration lock
1569 * held.
1570 */
1571 void
1572 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1573 {
1574 static int ictxgen;
1575
1576 /* 0 is reserved (by us) for system messages. */
1577 ii->ii_ictx = ++ictxgen;
1578
1579 /*
1580 * `Utility initiators' don't make it onto the per-IOP initiator list
1581 * (which is used only for configuration), but do get one slot on
1582 * the inbound queue.
1583 */
1584 if ((ii->ii_flags & II_UTILITY) == 0) {
1585 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1586 sc->sc_nii++;
1587 } else
1588 sc->sc_nuii++;
1589
1590 cv_init(&ii->ii_cv, "iopevt");
1591
1592 mutex_spin_enter(&sc->sc_intrlock);
1593 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1594 mutex_spin_exit(&sc->sc_intrlock);
1595 }
1596
1597 /*
1598 * Unregister an initiator. Must be called with the configuration lock
1599 * held.
1600 */
1601 void
1602 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1603 {
1604
1605 if ((ii->ii_flags & II_UTILITY) == 0) {
1606 LIST_REMOVE(ii, ii_list);
1607 sc->sc_nii--;
1608 } else
1609 sc->sc_nuii--;
1610
1611 mutex_spin_enter(&sc->sc_intrlock);
1612 LIST_REMOVE(ii, ii_hash);
1613 mutex_spin_exit(&sc->sc_intrlock);
1614
1615 cv_destroy(&ii->ii_cv);
1616 }
1617
1618 /*
1619 * Handle a reply frame from the IOP.
1620 */
1621 static int
1622 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1623 {
1624 struct iop_msg *im;
1625 struct i2o_reply *rb;
1626 struct i2o_fault_notify *fn;
1627 struct iop_initiator *ii;
1628 u_int off, ictx, tctx, status, size;
1629
1630 KASSERT(mutex_owned(&sc->sc_intrlock));
1631
1632 off = (int)(rmfa - sc->sc_rep_phys);
1633 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1634
1635 /* Perform reply queue DMA synchronisation. */
1636 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1637 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1638
1639 #ifdef I2ODEBUG
1640 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1641 panic("iop_handle_reply: 64-bit reply");
1642 #endif
1643 /*
1644 * Find the initiator.
1645 */
1646 ictx = le32toh(rb->msgictx);
1647 if (ictx == IOP_ICTX)
1648 ii = NULL;
1649 else {
1650 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1651 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1652 if (ii->ii_ictx == ictx)
1653 break;
1654 if (ii == NULL) {
1655 #ifdef I2ODEBUG
1656 iop_reply_print(sc, rb);
1657 #endif
1658 aprint_error_dev(sc->sc_dev, "WARNING: bad ictx returned (%x)\n",
1659 ictx);
1660 return (-1);
1661 }
1662 }
1663
1664 /*
1665 * If we received a transport failure notice, we've got to dig the
1666 * transaction context (if any) out of the original message frame,
1667 * and then release the original MFA back to the inbound FIFO.
1668 */
1669 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1670 status = I2O_STATUS_SUCCESS;
1671
1672 fn = (struct i2o_fault_notify *)rb;
1673 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1674 iop_release_mfa(sc, fn->lowmfa);
1675 iop_tfn_print(sc, fn);
1676 } else {
1677 status = rb->reqstatus;
1678 tctx = le32toh(rb->msgtctx);
1679 }
1680
1681 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1682 /*
1683 * This initiator tracks state using message wrappers.
1684 *
1685 * Find the originating message wrapper, and if requested
1686 * notify the initiator.
1687 */
1688 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1689 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1690 (im->im_flags & IM_ALLOCED) == 0 ||
1691 tctx != im->im_tctx) {
1692 aprint_error_dev(sc->sc_dev, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1693 if (im != NULL)
1694 aprint_error_dev(sc->sc_dev, "flags=0x%08x tctx=0x%08x\n",
1695 im->im_flags, im->im_tctx);
1696 #ifdef I2ODEBUG
1697 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1698 iop_reply_print(sc, rb);
1699 #endif
1700 return (-1);
1701 }
1702
1703 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1704 im->im_flags |= IM_FAIL;
1705
1706 #ifdef I2ODEBUG
1707 if ((im->im_flags & IM_REPLIED) != 0)
1708 panic("%s: dup reply", device_xname(sc->sc_dev));
1709 #endif
1710 im->im_flags |= IM_REPLIED;
1711
1712 #ifdef I2ODEBUG
1713 if (status != I2O_STATUS_SUCCESS)
1714 iop_reply_print(sc, rb);
1715 #endif
1716 im->im_reqstatus = status;
1717 im->im_detstatus = le16toh(rb->detail);
1718
1719 /* Copy the reply frame, if requested. */
1720 if (im->im_rb != NULL) {
1721 size = (le32toh(rb->msgflags) >> 14) & ~3;
1722 #ifdef I2ODEBUG
1723 if (size > sc->sc_framesize)
1724 panic("iop_handle_reply: reply too large");
1725 #endif
1726 memcpy(im->im_rb, rb, size);
1727 }
1728
1729 /* Notify the initiator. */
1730 if ((im->im_flags & IM_WAIT) != 0)
1731 cv_broadcast(&im->im_cv);
1732 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1733 if (ii != NULL) {
1734 mutex_spin_exit(&sc->sc_intrlock);
1735 (*ii->ii_intr)(ii->ii_dv, im, rb);
1736 mutex_spin_enter(&sc->sc_intrlock);
1737 }
1738 }
1739 } else {
1740 /*
1741 * This initiator discards message wrappers.
1742 *
1743 * Simply pass the reply frame to the initiator.
1744 */
1745 if (ii != NULL) {
1746 mutex_spin_exit(&sc->sc_intrlock);
1747 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1748 mutex_spin_enter(&sc->sc_intrlock);
1749 }
1750 }
1751
1752 return (status);
1753 }
1754
1755 /*
1756 * Handle an interrupt from the IOP.
1757 */
1758 int
1759 iop_intr(void *arg)
1760 {
1761 struct iop_softc *sc;
1762 u_int32_t rmfa;
1763
1764 sc = arg;
1765
1766 mutex_spin_enter(&sc->sc_intrlock);
1767
1768 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1769 mutex_spin_exit(&sc->sc_intrlock);
1770 return (0);
1771 }
1772
1773 for (;;) {
1774 /* Double read to account for IOP bug. */
1775 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1776 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1777 if (rmfa == IOP_MFA_EMPTY)
1778 break;
1779 }
1780 iop_handle_reply(sc, rmfa);
1781 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1782 }
1783
1784 mutex_spin_exit(&sc->sc_intrlock);
1785 return (1);
1786 }
1787
1788 /*
1789 * Handle an event signalled by the executive.
1790 */
1791 static void
1792 iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
1793 {
1794 struct i2o_util_event_register_reply *rb;
1795 u_int event;
1796
1797 rb = reply;
1798
1799 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1800 return;
1801
1802 event = le32toh(rb->event);
1803 printf("%s: event 0x%08x received\n", device_xname(dv), event);
1804 }
1805
1806 /*
1807 * Allocate a message wrapper.
1808 */
1809 struct iop_msg *
1810 iop_msg_alloc(struct iop_softc *sc, int flags)
1811 {
1812 struct iop_msg *im;
1813 static u_int tctxgen;
1814 int i;
1815
1816 #ifdef I2ODEBUG
1817 if ((flags & IM_SYSMASK) != 0)
1818 panic("iop_msg_alloc: system flags specified");
1819 #endif
1820
1821 mutex_spin_enter(&sc->sc_intrlock);
1822 im = SLIST_FIRST(&sc->sc_im_freelist);
1823 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1824 if (im == NULL)
1825 panic("iop_msg_alloc: no free wrappers");
1826 #endif
1827 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1828 mutex_spin_exit(&sc->sc_intrlock);
1829
1830 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1831 tctxgen += (1 << IOP_TCTX_SHIFT);
1832 im->im_flags = flags | IM_ALLOCED;
1833 im->im_rb = NULL;
1834 i = 0;
1835 do {
1836 im->im_xfer[i++].ix_size = 0;
1837 } while (i < IOP_MAX_MSG_XFERS);
1838
1839 return (im);
1840 }
1841
1842 /*
1843 * Free a message wrapper.
1844 */
1845 void
1846 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1847 {
1848
1849 #ifdef I2ODEBUG
1850 if ((im->im_flags & IM_ALLOCED) == 0)
1851 panic("iop_msg_free: wrapper not allocated");
1852 #endif
1853
1854 im->im_flags = 0;
1855 mutex_spin_enter(&sc->sc_intrlock);
1856 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1857 mutex_spin_exit(&sc->sc_intrlock);
1858 }
1859
1860 /*
1861 * Map a data transfer. Write a scatter-gather list into the message frame.
1862 */
1863 int
1864 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1865 void *xferaddr, int xfersize, int out, struct proc *up)
1866 {
1867 bus_dmamap_t dm;
1868 bus_dma_segment_t *ds;
1869 struct iop_xfer *ix;
1870 u_int rv, i, nsegs, flg, off, xn;
1871 u_int32_t *p;
1872
1873 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1874 if (ix->ix_size == 0)
1875 break;
1876
1877 #ifdef I2ODEBUG
1878 if (xfersize == 0)
1879 panic("iop_msg_map: null transfer");
1880 if (xfersize > IOP_MAX_XFER)
1881 panic("iop_msg_map: transfer too large");
1882 if (xn == IOP_MAX_MSG_XFERS)
1883 panic("iop_msg_map: too many xfers");
1884 #endif
1885
1886 /*
1887 * Only the first DMA map is static.
1888 */
1889 if (xn != 0) {
1890 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1891 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1892 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1893 if (rv != 0)
1894 return (rv);
1895 }
1896
1897 dm = ix->ix_map;
1898 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1899 (up == NULL ? BUS_DMA_NOWAIT : 0));
1900 if (rv != 0)
1901 goto bad;
1902
1903 /*
1904 * How many SIMPLE SG elements can we fit in this message?
1905 */
1906 off = mb[0] >> 16;
1907 p = mb + off;
1908 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1909
1910 if (dm->dm_nsegs > nsegs) {
1911 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1912 rv = EFBIG;
1913 DPRINTF(("iop_msg_map: too many segs\n"));
1914 goto bad;
1915 }
1916
1917 nsegs = dm->dm_nsegs;
1918 xfersize = 0;
1919
1920 /*
1921 * Write out the SG list.
1922 */
1923 if (out)
1924 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1925 else
1926 flg = I2O_SGL_SIMPLE;
1927
1928 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1929 p[0] = (u_int32_t)ds->ds_len | flg;
1930 p[1] = (u_int32_t)ds->ds_addr;
1931 xfersize += ds->ds_len;
1932 }
1933
1934 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1935 p[1] = (u_int32_t)ds->ds_addr;
1936 xfersize += ds->ds_len;
1937
1938 /* Fix up the transfer record, and sync the map. */
1939 ix->ix_flags = (out ? IX_OUT : IX_IN);
1940 ix->ix_size = xfersize;
1941 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1942 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1943
1944 /*
1945 * If this is the first xfer we've mapped for this message, adjust
1946 * the SGL offset field in the message header.
1947 */
1948 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1949 mb[0] += (mb[0] >> 12) & 0xf0;
1950 im->im_flags |= IM_SGLOFFADJ;
1951 }
1952 mb[0] += (nsegs << 17);
1953 return (0);
1954
1955 bad:
1956 if (xn != 0)
1957 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1958 return (rv);
1959 }
1960
1961 /*
1962 * Map a block I/O data transfer (different in that there's only one per
1963 * message maximum, and PAGE addressing may be used). Write a scatter
1964 * gather list into the message frame.
1965 */
1966 int
1967 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1968 void *xferaddr, int xfersize, int out)
1969 {
1970 bus_dma_segment_t *ds;
1971 bus_dmamap_t dm;
1972 struct iop_xfer *ix;
1973 u_int rv, i, nsegs, off, slen, tlen, flg;
1974 paddr_t saddr, eaddr;
1975 u_int32_t *p;
1976
1977 #ifdef I2ODEBUG
1978 if (xfersize == 0)
1979 panic("iop_msg_map_bio: null transfer");
1980 if (xfersize > IOP_MAX_XFER)
1981 panic("iop_msg_map_bio: transfer too large");
1982 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1983 panic("iop_msg_map_bio: SGLOFFADJ");
1984 #endif
1985
1986 ix = im->im_xfer;
1987 dm = ix->ix_map;
1988 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1989 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1990 if (rv != 0)
1991 return (rv);
1992
1993 off = mb[0] >> 16;
1994 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1995
1996 /*
1997 * If the transfer is highly fragmented and won't fit using SIMPLE
1998 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1999 * potentially more efficient, both for us and the IOP.
2000 */
2001 if (dm->dm_nsegs > nsegs) {
2002 nsegs = 1;
2003 p = mb + off + 1;
2004
2005 /* XXX This should be done with a bus_space flag. */
2006 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2007 slen = ds->ds_len;
2008 saddr = ds->ds_addr;
2009
2010 while (slen > 0) {
2011 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2012 tlen = min(eaddr - saddr, slen);
2013 slen -= tlen;
2014 *p++ = le32toh(saddr);
2015 saddr = eaddr;
2016 nsegs++;
2017 }
2018 }
2019
2020 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2021 I2O_SGL_END;
2022 if (out)
2023 mb[off] |= I2O_SGL_DATA_OUT;
2024 } else {
2025 p = mb + off;
2026 nsegs = dm->dm_nsegs;
2027
2028 if (out)
2029 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2030 else
2031 flg = I2O_SGL_SIMPLE;
2032
2033 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2034 p[0] = (u_int32_t)ds->ds_len | flg;
2035 p[1] = (u_int32_t)ds->ds_addr;
2036 }
2037
2038 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2039 I2O_SGL_END;
2040 p[1] = (u_int32_t)ds->ds_addr;
2041 nsegs <<= 1;
2042 }
2043
2044 /* Fix up the transfer record, and sync the map. */
2045 ix->ix_flags = (out ? IX_OUT : IX_IN);
2046 ix->ix_size = xfersize;
2047 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2048 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2049
2050 /*
2051 * Adjust the SGL offset and total message size fields. We don't
2052 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2053 */
2054 mb[0] += ((off << 4) + (nsegs << 16));
2055 return (0);
2056 }
2057
2058 /*
2059 * Unmap all data transfers associated with a message wrapper.
2060 */
2061 void
2062 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2063 {
2064 struct iop_xfer *ix;
2065 int i;
2066
2067 #ifdef I2ODEBUG
2068 if (im->im_xfer[0].ix_size == 0)
2069 panic("iop_msg_unmap: no transfers mapped");
2070 #endif
2071
2072 for (ix = im->im_xfer, i = 0;;) {
2073 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2074 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2075 BUS_DMASYNC_POSTREAD);
2076 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2077
2078 /* Only the first DMA map is static. */
2079 if (i != 0)
2080 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2081 if ((++ix)->ix_size == 0)
2082 break;
2083 if (++i >= IOP_MAX_MSG_XFERS)
2084 break;
2085 }
2086 }
2087
2088 /*
2089 * Post a message frame to the IOP's inbound queue.
2090 */
2091 int
2092 iop_post(struct iop_softc *sc, u_int32_t *mb)
2093 {
2094 u_int32_t mfa;
2095
2096 #ifdef I2ODEBUG
2097 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2098 panic("iop_post: frame too large");
2099 #endif
2100
2101 mutex_spin_enter(&sc->sc_intrlock);
2102
2103 /* Allocate a slot with the IOP. */
2104 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2105 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2106 mutex_spin_exit(&sc->sc_intrlock);
2107 aprint_error_dev(sc->sc_dev, "mfa not forthcoming\n");
2108 return (EAGAIN);
2109 }
2110
2111 /* Perform reply buffer DMA synchronisation. */
2112 if (sc->sc_rep_size != 0) {
2113 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2114 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2115 }
2116
2117 /* Copy out the message frame. */
2118 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2119 mb[0] >> 16);
2120 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2121 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2122
2123 /* Post the MFA back to the IOP. */
2124 iop_outl(sc, IOP_REG_IFIFO, mfa);
2125
2126 mutex_spin_exit(&sc->sc_intrlock);
2127 return (0);
2128 }
2129
2130 /*
2131 * Post a message to the IOP and deal with completion.
2132 */
2133 int
2134 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2135 {
2136 u_int32_t *mb;
2137 int rv;
2138
2139 mb = xmb;
2140
2141 /* Terminate the scatter/gather list chain. */
2142 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2143 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2144
2145 if ((rv = iop_post(sc, mb)) != 0)
2146 return (rv);
2147
2148 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2149 if ((im->im_flags & IM_POLL) != 0)
2150 iop_msg_poll(sc, im, timo);
2151 else
2152 iop_msg_wait(sc, im, timo);
2153
2154 mutex_spin_enter(&sc->sc_intrlock);
2155 if ((im->im_flags & IM_REPLIED) != 0) {
2156 if ((im->im_flags & IM_NOSTATUS) != 0)
2157 rv = 0;
2158 else if ((im->im_flags & IM_FAIL) != 0)
2159 rv = ENXIO;
2160 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2161 rv = EIO;
2162 else
2163 rv = 0;
2164 } else
2165 rv = EBUSY;
2166 mutex_spin_exit(&sc->sc_intrlock);
2167 } else
2168 rv = 0;
2169
2170 return (rv);
2171 }
2172
2173 /*
2174 * Spin until the specified message is replied to.
2175 */
2176 static void
2177 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2178 {
2179 u_int32_t rmfa;
2180
2181 mutex_spin_enter(&sc->sc_intrlock);
2182
2183 for (timo *= 10; timo != 0; timo--) {
2184 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2185 /* Double read to account for IOP bug. */
2186 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2187 if (rmfa == IOP_MFA_EMPTY)
2188 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2189 if (rmfa != IOP_MFA_EMPTY) {
2190 iop_handle_reply(sc, rmfa);
2191
2192 /*
2193 * Return the reply frame to the IOP's
2194 * outbound FIFO.
2195 */
2196 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2197 }
2198 }
2199 if ((im->im_flags & IM_REPLIED) != 0)
2200 break;
2201 mutex_spin_exit(&sc->sc_intrlock);
2202 DELAY(100);
2203 mutex_spin_enter(&sc->sc_intrlock);
2204 }
2205
2206 if (timo == 0) {
2207 #ifdef I2ODEBUG
2208 printf("%s: poll - no reply\n", device_xname(sc->sc_dev));
2209 if (iop_status_get(sc, 1) != 0)
2210 printf("iop_msg_poll: unable to retrieve status\n");
2211 else
2212 printf("iop_msg_poll: IOP state = %d\n",
2213 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2214 #endif
2215 }
2216
2217 mutex_spin_exit(&sc->sc_intrlock);
2218 }
2219
2220 /*
2221 * Sleep until the specified message is replied to.
2222 */
2223 static void
2224 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2225 {
2226 int rv;
2227
2228 mutex_spin_enter(&sc->sc_intrlock);
2229 if ((im->im_flags & IM_REPLIED) != 0) {
2230 mutex_spin_exit(&sc->sc_intrlock);
2231 return;
2232 }
2233 rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2234 mutex_spin_exit(&sc->sc_intrlock);
2235
2236 #ifdef I2ODEBUG
2237 if (rv != 0) {
2238 printf("iop_msg_wait: tsleep() == %d\n", rv);
2239 if (iop_status_get(sc, 0) != 0)
2240 printf("%s: unable to retrieve status\n", __func__);
2241 else
2242 printf("%s: IOP state = %d\n", __func__,
2243 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2244 }
2245 #else
2246 __USE(rv);
2247 #endif
2248 }
2249
2250 /*
2251 * Release an unused message frame back to the IOP's inbound fifo.
2252 */
2253 static void
2254 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2255 {
2256
2257 /* Use the frame to issue a no-op. */
2258 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2259 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2260 iop_outl_msg(sc, mfa + 8, 0);
2261 iop_outl_msg(sc, mfa + 12, 0);
2262
2263 iop_outl(sc, IOP_REG_IFIFO, mfa);
2264 }
2265
2266 #ifdef I2ODEBUG
2267 /*
2268 * Dump a reply frame header.
2269 */
2270 static void
2271 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2272 {
2273 u_int function, detail;
2274 const char *statusstr;
2275
2276 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2277 detail = le16toh(rb->detail);
2278
2279 printf("%s: reply:\n", device_xname(sc->sc_dev));
2280
2281 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2282 statusstr = iop_status[rb->reqstatus];
2283 else
2284 statusstr = "undefined error code";
2285
2286 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2287 device_xname(sc->sc_dev), function, rb->reqstatus, statusstr);
2288 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2289 device_xname(sc->sc_dev), detail, le32toh(rb->msgictx),
2290 le32toh(rb->msgtctx));
2291 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", device_xname(sc->sc_dev),
2292 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2293 (le32toh(rb->msgflags) >> 8) & 0xff);
2294 }
2295 #endif
2296
2297 /*
2298 * Dump a transport failure reply.
2299 */
2300 static void
2301 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2302 {
2303
2304 printf("%s: WARNING: transport failure:\n", device_xname(sc->sc_dev));
2305
2306 printf("%s: ictx=0x%08x tctx=0x%08x\n", device_xname(sc->sc_dev),
2307 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2308 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2309 device_xname(sc->sc_dev), fn->failurecode, fn->severity);
2310 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2311 device_xname(sc->sc_dev), fn->highestver, fn->lowestver);
2312 }
2313
2314 /*
2315 * Translate an I2O ASCII field into a C string.
2316 */
2317 void
2318 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2319 {
2320 int hc, lc, i, nit;
2321
2322 dlen--;
2323 lc = 0;
2324 hc = 0;
2325 i = 0;
2326
2327 /*
2328 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2329 * spec has nothing to say about it. Since AMI fields are usually
2330 * filled with junk after the terminator, ...
2331 */
2332 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2333
2334 while (slen-- != 0 && dlen-- != 0) {
2335 if (nit && *src == '\0')
2336 break;
2337 else if (*src <= 0x20 || *src >= 0x7f) {
2338 if (hc)
2339 dst[i++] = ' ';
2340 } else {
2341 hc = 1;
2342 dst[i++] = *src;
2343 lc = i;
2344 }
2345 src++;
2346 }
2347
2348 dst[lc] = '\0';
2349 }
2350
2351 /*
2352 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2353 */
2354 int
2355 iop_print_ident(struct iop_softc *sc, int tid)
2356 {
2357 struct {
2358 struct i2o_param_op_results pr;
2359 struct i2o_param_read_results prr;
2360 struct i2o_param_device_identity di;
2361 } __packed p;
2362 char buf[32];
2363 int rv;
2364
2365 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2366 sizeof(p), NULL);
2367 if (rv != 0)
2368 return (rv);
2369
2370 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2371 sizeof(buf));
2372 printf(" <%s, ", buf);
2373 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2374 sizeof(buf));
2375 printf("%s, ", buf);
2376 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2377 printf("%s>", buf);
2378
2379 return (0);
2380 }
2381
2382 /*
2383 * Claim or unclaim the specified TID.
2384 */
2385 int
2386 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2387 int flags)
2388 {
2389 struct iop_msg *im;
2390 struct i2o_util_claim mf;
2391 int rv, func;
2392
2393 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2394 im = iop_msg_alloc(sc, IM_WAIT);
2395
2396 /* We can use the same structure, as they're identical. */
2397 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2398 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2399 mf.msgictx = ii->ii_ictx;
2400 mf.msgtctx = im->im_tctx;
2401 mf.flags = flags;
2402
2403 rv = iop_msg_post(sc, im, &mf, 5000);
2404 iop_msg_free(sc, im);
2405 return (rv);
2406 }
2407
2408 /*
2409 * Perform an abort.
2410 */
2411 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2412 int tctxabort, int flags)
2413 {
2414 struct iop_msg *im;
2415 struct i2o_util_abort mf;
2416 int rv;
2417
2418 im = iop_msg_alloc(sc, IM_WAIT);
2419
2420 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2421 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2422 mf.msgictx = ii->ii_ictx;
2423 mf.msgtctx = im->im_tctx;
2424 mf.flags = (func << 24) | flags;
2425 mf.tctxabort = tctxabort;
2426
2427 rv = iop_msg_post(sc, im, &mf, 5000);
2428 iop_msg_free(sc, im);
2429 return (rv);
2430 }
2431
2432 /*
2433 * Enable or disable reception of events for the specified device.
2434 */
2435 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2436 {
2437 struct i2o_util_event_register mf;
2438
2439 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2440 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2441 mf.msgictx = ii->ii_ictx;
2442 mf.msgtctx = 0;
2443 mf.eventmask = mask;
2444
2445 /* This message is replied to only when events are signalled. */
2446 return (iop_post(sc, (u_int32_t *)&mf));
2447 }
2448
2449 int
2450 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2451 {
2452 struct iop_softc *sc;
2453
2454 if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL)
2455 return (ENXIO);
2456 if ((sc->sc_flags & IOP_ONLINE) == 0)
2457 return (ENXIO);
2458 if ((sc->sc_flags & IOP_OPEN) != 0)
2459 return (EBUSY);
2460 sc->sc_flags |= IOP_OPEN;
2461
2462 return (0);
2463 }
2464
2465 int
2466 iopclose(dev_t dev, int flag, int mode,
2467 struct lwp *l)
2468 {
2469 struct iop_softc *sc;
2470
2471 sc = device_lookup_private(&iop_cd, minor(dev));
2472 sc->sc_flags &= ~IOP_OPEN;
2473
2474 return (0);
2475 }
2476
2477 int
2478 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2479 {
2480 struct iop_softc *sc;
2481 struct iovec *iov;
2482 int rv, i;
2483
2484 sc = device_lookup_private(&iop_cd, minor(dev));
2485 rv = 0;
2486
2487 switch (cmd) {
2488 case IOPIOCPT:
2489 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2490 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2491 if (rv)
2492 return (rv);
2493
2494 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2495
2496 case IOPIOCGSTATUS:
2497 iov = (struct iovec *)data;
2498 i = sizeof(struct i2o_status);
2499 if (i > iov->iov_len)
2500 i = iov->iov_len;
2501 else
2502 iov->iov_len = i;
2503 if ((rv = iop_status_get(sc, 0)) == 0)
2504 rv = copyout(&sc->sc_status, iov->iov_base, i);
2505 return (rv);
2506
2507 case IOPIOCGLCT:
2508 case IOPIOCGTIDMAP:
2509 case IOPIOCRECONFIG:
2510 break;
2511
2512 default:
2513 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2514 printf("%s: unknown ioctl %lx\n", device_xname(sc->sc_dev), cmd);
2515 #endif
2516 return (ENOTTY);
2517 }
2518
2519 mutex_enter(&sc->sc_conflock);
2520
2521 switch (cmd) {
2522 case IOPIOCGLCT:
2523 iov = (struct iovec *)data;
2524 i = le16toh(sc->sc_lct->tablesize) << 2;
2525 if (i > iov->iov_len)
2526 i = iov->iov_len;
2527 else
2528 iov->iov_len = i;
2529 rv = copyout(sc->sc_lct, iov->iov_base, i);
2530 break;
2531
2532 case IOPIOCRECONFIG:
2533 rv = iop_reconfigure(sc, 0);
2534 break;
2535
2536 case IOPIOCGTIDMAP:
2537 iov = (struct iovec *)data;
2538 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2539 if (i > iov->iov_len)
2540 i = iov->iov_len;
2541 else
2542 iov->iov_len = i;
2543 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2544 break;
2545 }
2546
2547 mutex_exit(&sc->sc_conflock);
2548 return (rv);
2549 }
2550
2551 static int
2552 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2553 {
2554 struct iop_msg *im;
2555 struct i2o_msg *mf;
2556 struct ioppt_buf *ptb;
2557 int rv, i, mapped;
2558
2559 mf = NULL;
2560 im = NULL;
2561 mapped = 1;
2562
2563 if (pt->pt_msglen > sc->sc_framesize ||
2564 pt->pt_msglen < sizeof(struct i2o_msg) ||
2565 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2566 pt->pt_nbufs < 0 ||
2567 #if 0
2568 pt->pt_replylen < 0 ||
2569 #endif
2570 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2571 return (EINVAL);
2572
2573 for (i = 0; i < pt->pt_nbufs; i++)
2574 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2575 rv = ENOMEM;
2576 goto bad;
2577 }
2578
2579 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2580 if (mf == NULL)
2581 return (ENOMEM);
2582
2583 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2584 goto bad;
2585
2586 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2587 im->im_rb = (struct i2o_reply *)mf;
2588 mf->msgictx = IOP_ICTX;
2589 mf->msgtctx = im->im_tctx;
2590
2591 for (i = 0; i < pt->pt_nbufs; i++) {
2592 ptb = &pt->pt_bufs[i];
2593 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2594 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2595 if (rv != 0)
2596 goto bad;
2597 mapped = 1;
2598 }
2599
2600 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2601 goto bad;
2602
2603 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2604 if (i > sc->sc_framesize)
2605 i = sc->sc_framesize;
2606 if (i > pt->pt_replylen)
2607 i = pt->pt_replylen;
2608 rv = copyout(im->im_rb, pt->pt_reply, i);
2609
2610 bad:
2611 if (mapped != 0)
2612 iop_msg_unmap(sc, im);
2613 if (im != NULL)
2614 iop_msg_free(sc, im);
2615 if (mf != NULL)
2616 free(mf, M_DEVBUF);
2617 return (rv);
2618 }
2619