iop.c revision 1.83 1 /* $NetBSD: iop.c,v 1.83 2013/09/14 13:08:31 joerg Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Support for I2O IOPs (intelligent I/O processors).
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.83 2013/09/14 13:08:31 joerg Exp $");
38
39 #include "iop.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/ioctl.h>
49 #include <sys/endian.h>
50 #include <sys/conf.h>
51 #include <sys/kthread.h>
52 #include <sys/kauth.h>
53 #include <sys/bus.h>
54
55 #include <dev/i2o/i2o.h>
56 #include <dev/i2o/iopio.h>
57 #include <dev/i2o/iopreg.h>
58 #include <dev/i2o/iopvar.h>
59
60 #include "locators.h"
61
62 #define POLL(ms, cond) \
63 do { \
64 int xi; \
65 for (xi = (ms) * 10; xi; xi--) { \
66 if (cond) \
67 break; \
68 DELAY(100); \
69 } \
70 } while (/* CONSTCOND */0);
71
72 #ifdef I2ODEBUG
73 #define DPRINTF(x) printf x
74 #else
75 #define DPRINTF(x)
76 #endif
77
78 #define IOP_ICTXHASH_NBUCKETS 16
79 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
80
81 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
82
83 #define IOP_TCTX_SHIFT 12
84 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
85
86 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
87 static u_long iop_ictxhash;
88 static void *iop_sdh;
89 static struct i2o_systab *iop_systab;
90 static int iop_systab_size;
91
92 extern struct cfdriver iop_cd;
93
94 dev_type_open(iopopen);
95 dev_type_close(iopclose);
96 dev_type_ioctl(iopioctl);
97
98 const struct cdevsw iop_cdevsw = {
99 iopopen, iopclose, noread, nowrite, iopioctl,
100 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
101 };
102
103 #define IC_CONFIGURE 0x01
104 #define IC_PRIORITY 0x02
105
106 static struct iop_class {
107 u_short ic_class;
108 u_short ic_flags;
109 const char *ic_caption;
110 } const iop_class[] = {
111 {
112 I2O_CLASS_EXECUTIVE,
113 0,
114 "executive"
115 },
116 {
117 I2O_CLASS_DDM,
118 0,
119 "device driver module"
120 },
121 {
122 I2O_CLASS_RANDOM_BLOCK_STORAGE,
123 IC_CONFIGURE | IC_PRIORITY,
124 "random block storage"
125 },
126 {
127 I2O_CLASS_SEQUENTIAL_STORAGE,
128 IC_CONFIGURE | IC_PRIORITY,
129 "sequential storage"
130 },
131 {
132 I2O_CLASS_LAN,
133 IC_CONFIGURE | IC_PRIORITY,
134 "LAN port"
135 },
136 {
137 I2O_CLASS_WAN,
138 IC_CONFIGURE | IC_PRIORITY,
139 "WAN port"
140 },
141 {
142 I2O_CLASS_FIBRE_CHANNEL_PORT,
143 IC_CONFIGURE,
144 "fibrechannel port"
145 },
146 {
147 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
148 0,
149 "fibrechannel peripheral"
150 },
151 {
152 I2O_CLASS_SCSI_PERIPHERAL,
153 0,
154 "SCSI peripheral"
155 },
156 {
157 I2O_CLASS_ATE_PORT,
158 IC_CONFIGURE,
159 "ATE port"
160 },
161 {
162 I2O_CLASS_ATE_PERIPHERAL,
163 0,
164 "ATE peripheral"
165 },
166 {
167 I2O_CLASS_FLOPPY_CONTROLLER,
168 IC_CONFIGURE,
169 "floppy controller"
170 },
171 {
172 I2O_CLASS_FLOPPY_DEVICE,
173 0,
174 "floppy device"
175 },
176 {
177 I2O_CLASS_BUS_ADAPTER_PORT,
178 IC_CONFIGURE,
179 "bus adapter port"
180 },
181 };
182
183 #ifdef I2ODEBUG
184 static const char * const iop_status[] = {
185 "success",
186 "abort (dirty)",
187 "abort (no data transfer)",
188 "abort (partial transfer)",
189 "error (dirty)",
190 "error (no data transfer)",
191 "error (partial transfer)",
192 "undefined error code",
193 "process abort (dirty)",
194 "process abort (no data transfer)",
195 "process abort (partial transfer)",
196 "transaction error",
197 };
198 #endif
199
200 static inline u_int32_t iop_inl(struct iop_softc *, int);
201 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
202
203 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
204 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
205
206 static void iop_config_interrupts(device_t);
207 static void iop_configure_devices(struct iop_softc *, int, int);
208 static void iop_devinfo(int, char *, size_t);
209 static int iop_print(void *, const char *);
210 static void iop_shutdown(void *);
211
212 static void iop_adjqparam(struct iop_softc *, int);
213 static int iop_handle_reply(struct iop_softc *, u_int32_t);
214 static int iop_hrt_get(struct iop_softc *);
215 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
216 static void iop_intr_event(device_t, struct iop_msg *, void *);
217 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
218 u_int32_t);
219 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
220 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
221 static int iop_ofifo_init(struct iop_softc *);
222 static int iop_passthrough(struct iop_softc *, struct ioppt *,
223 struct proc *);
224 static void iop_reconf_thread(void *);
225 static void iop_release_mfa(struct iop_softc *, u_int32_t);
226 static int iop_reset(struct iop_softc *);
227 static int iop_sys_enable(struct iop_softc *);
228 static int iop_systab_set(struct iop_softc *);
229 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
230
231 #ifdef I2ODEBUG
232 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
233 #endif
234
235 static inline u_int32_t
236 iop_inl(struct iop_softc *sc, int off)
237 {
238
239 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
240 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
241 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
242 }
243
244 static inline void
245 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
246 {
247
248 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
249 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250 BUS_SPACE_BARRIER_WRITE);
251 }
252
253 static inline u_int32_t
254 iop_inl_msg(struct iop_softc *sc, int off)
255 {
256
257 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
258 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
259 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
260 }
261
262 static inline void
263 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
264 {
265
266 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
267 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
268 BUS_SPACE_BARRIER_WRITE);
269 }
270
271 /*
272 * Initialise the IOP and our interface.
273 */
274 void
275 iop_init(struct iop_softc *sc, const char *intrstr)
276 {
277 struct iop_msg *im;
278 int rv, i, j, state, nsegs;
279 u_int32_t mask;
280 char ident[64];
281
282 state = 0;
283
284 printf("I2O adapter");
285
286 mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
287 mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
288 cv_init(&sc->sc_confcv, "iopconf");
289
290 if (iop_ictxhashtbl == NULL) {
291 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
292 true, &iop_ictxhash);
293 }
294
295 /* Disable interrupts at the IOP. */
296 mask = iop_inl(sc, IOP_REG_INTR_MASK);
297 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
298
299 /* Allocate a scratch DMA map for small miscellaneous shared data. */
300 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
301 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
302 aprint_error_dev(sc->sc_dev, "cannot create scratch dmamap\n");
303 return;
304 }
305
306 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
307 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
308 aprint_error_dev(sc->sc_dev, "cannot alloc scratch dmamem\n");
309 goto bail_out;
310 }
311 state++;
312
313 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
314 &sc->sc_scr, 0)) {
315 aprint_error_dev(sc->sc_dev, "cannot map scratch dmamem\n");
316 goto bail_out;
317 }
318 state++;
319
320 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
321 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
322 aprint_error_dev(sc->sc_dev, "cannot load scratch dmamap\n");
323 goto bail_out;
324 }
325 state++;
326
327 #ifdef I2ODEBUG
328 /* So that our debug checks don't choke. */
329 sc->sc_framesize = 128;
330 #endif
331
332 /* Avoid syncing the reply map until it's set up. */
333 sc->sc_curib = 0x123;
334
335 /* Reset the adapter and request status. */
336 if ((rv = iop_reset(sc)) != 0) {
337 aprint_error_dev(sc->sc_dev, "not responding (reset)\n");
338 goto bail_out;
339 }
340
341 if ((rv = iop_status_get(sc, 1)) != 0) {
342 aprint_error_dev(sc->sc_dev, "not responding (get status)\n");
343 goto bail_out;
344 }
345
346 sc->sc_flags |= IOP_HAVESTATUS;
347 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
348 ident, sizeof(ident));
349 printf(" <%s>\n", ident);
350
351 #ifdef I2ODEBUG
352 printf("%s: orgid=0x%04x version=%d\n",
353 device_xname(sc->sc_dev),
354 le16toh(sc->sc_status.orgid),
355 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
356 printf("%s: type want have cbase\n", device_xname(sc->sc_dev));
357 printf("%s: mem %04x %04x %08x\n", device_xname(sc->sc_dev),
358 le32toh(sc->sc_status.desiredprivmemsize),
359 le32toh(sc->sc_status.currentprivmemsize),
360 le32toh(sc->sc_status.currentprivmembase));
361 printf("%s: i/o %04x %04x %08x\n", device_xname(sc->sc_dev),
362 le32toh(sc->sc_status.desiredpriviosize),
363 le32toh(sc->sc_status.currentpriviosize),
364 le32toh(sc->sc_status.currentpriviobase));
365 #endif
366
367 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
368 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
369 sc->sc_maxob = IOP_MAX_OUTBOUND;
370 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
371 if (sc->sc_maxib > IOP_MAX_INBOUND)
372 sc->sc_maxib = IOP_MAX_INBOUND;
373 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
374 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
375 sc->sc_framesize = IOP_MAX_MSG_SIZE;
376
377 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
378 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
379 aprint_error_dev(sc->sc_dev, "frame size too small (%d)\n",
380 sc->sc_framesize);
381 goto bail_out;
382 }
383 #endif
384
385 /* Allocate message wrappers. */
386 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
387 if (im == NULL) {
388 aprint_error_dev(sc->sc_dev, "memory allocation failure\n");
389 goto bail_out;
390 }
391 state++;
392 sc->sc_ims = im;
393 SLIST_INIT(&sc->sc_im_freelist);
394
395 for (i = 0; i < sc->sc_maxib; i++, im++) {
396 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
397 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
398 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
399 &im->im_xfer[0].ix_map);
400 if (rv != 0) {
401 aprint_error_dev(sc->sc_dev, "couldn't create dmamap (%d)", rv);
402 goto bail_out3;
403 }
404
405 im->im_tctx = i;
406 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
407 cv_init(&im->im_cv, "iopmsg");
408 }
409
410 /* Initialise the IOP's outbound FIFO. */
411 if (iop_ofifo_init(sc) != 0) {
412 aprint_error_dev(sc->sc_dev, "unable to init oubound FIFO\n");
413 goto bail_out3;
414 }
415
416 /*
417 * Defer further configuration until (a) interrupts are working and
418 * (b) we have enough information to build the system table.
419 */
420 config_interrupts(sc->sc_dev, iop_config_interrupts);
421
422 /* Configure shutdown hook before we start any device activity. */
423 if (iop_sdh == NULL)
424 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
425
426 /* Ensure interrupts are enabled at the IOP. */
427 mask = iop_inl(sc, IOP_REG_INTR_MASK);
428 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
429
430 if (intrstr != NULL)
431 printf("%s: interrupting at %s\n", device_xname(sc->sc_dev),
432 intrstr);
433
434 #ifdef I2ODEBUG
435 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
436 device_xname(sc->sc_dev), sc->sc_maxib,
437 le32toh(sc->sc_status.maxinboundmframes),
438 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
439 #endif
440
441 return;
442
443 bail_out3:
444 if (state > 3) {
445 for (j = 0; j < i; j++)
446 bus_dmamap_destroy(sc->sc_dmat,
447 sc->sc_ims[j].im_xfer[0].ix_map);
448 free(sc->sc_ims, M_DEVBUF);
449 }
450 bail_out:
451 if (state > 2)
452 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
453 if (state > 1)
454 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
455 if (state > 0)
456 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
457 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
458 }
459
460 /*
461 * Perform autoconfiguration tasks.
462 */
463 static void
464 iop_config_interrupts(device_t self)
465 {
466 struct iop_attach_args ia;
467 struct iop_softc *sc, *iop;
468 struct i2o_systab_entry *ste;
469 int rv, i, niop;
470 int locs[IOPCF_NLOCS];
471
472 sc = device_private(self);
473 mutex_enter(&sc->sc_conflock);
474
475 LIST_INIT(&sc->sc_iilist);
476
477 printf("%s: configuring...\n", device_xname(sc->sc_dev));
478
479 if (iop_hrt_get(sc) != 0) {
480 printf("%s: unable to retrieve HRT\n", device_xname(sc->sc_dev));
481 mutex_exit(&sc->sc_conflock);
482 return;
483 }
484
485 /*
486 * Build the system table.
487 */
488 if (iop_systab == NULL) {
489 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
490 if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
491 continue;
492 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
493 continue;
494 if (iop_status_get(iop, 1) != 0) {
495 aprint_error_dev(sc->sc_dev, "unable to retrieve status\n");
496 iop->sc_flags &= ~IOP_HAVESTATUS;
497 continue;
498 }
499 niop++;
500 }
501 if (niop == 0) {
502 mutex_exit(&sc->sc_conflock);
503 return;
504 }
505
506 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
507 sizeof(struct i2o_systab);
508 iop_systab_size = i;
509 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
510
511 iop_systab->numentries = niop;
512 iop_systab->version = I2O_VERSION_11;
513
514 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
515 if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
516 continue;
517 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
518 continue;
519
520 ste->orgid = iop->sc_status.orgid;
521 ste->iopid = device_unit(iop->sc_dev) + 2;
522 ste->segnumber =
523 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
524 ste->iopcaps = iop->sc_status.iopcaps;
525 ste->inboundmsgframesize =
526 iop->sc_status.inboundmframesize;
527 ste->inboundmsgportaddresslow =
528 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
529 ste++;
530 }
531 }
532
533 /*
534 * Post the system table to the IOP and bring it to the OPERATIONAL
535 * state.
536 */
537 if (iop_systab_set(sc) != 0) {
538 aprint_error_dev(sc->sc_dev, "unable to set system table\n");
539 mutex_exit(&sc->sc_conflock);
540 return;
541 }
542 if (iop_sys_enable(sc) != 0) {
543 aprint_error_dev(sc->sc_dev, "unable to enable system\n");
544 mutex_exit(&sc->sc_conflock);
545 return;
546 }
547
548 /*
549 * Set up an event handler for this IOP.
550 */
551 sc->sc_eventii.ii_dv = self;
552 sc->sc_eventii.ii_intr = iop_intr_event;
553 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
554 sc->sc_eventii.ii_tid = I2O_TID_IOP;
555 iop_initiator_register(sc, &sc->sc_eventii);
556
557 rv = iop_util_eventreg(sc, &sc->sc_eventii,
558 I2O_EVENT_EXEC_RESOURCE_LIMITS |
559 I2O_EVENT_EXEC_CONNECTION_FAIL |
560 I2O_EVENT_EXEC_ADAPTER_FAULT |
561 I2O_EVENT_EXEC_POWER_FAIL |
562 I2O_EVENT_EXEC_RESET_PENDING |
563 I2O_EVENT_EXEC_RESET_IMMINENT |
564 I2O_EVENT_EXEC_HARDWARE_FAIL |
565 I2O_EVENT_EXEC_XCT_CHANGE |
566 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
567 I2O_EVENT_GEN_DEVICE_RESET |
568 I2O_EVENT_GEN_STATE_CHANGE |
569 I2O_EVENT_GEN_GENERAL_WARNING);
570 if (rv != 0) {
571 aprint_error_dev(sc->sc_dev, "unable to register for events");
572 mutex_exit(&sc->sc_conflock);
573 return;
574 }
575
576 /*
577 * Attempt to match and attach a product-specific extension.
578 */
579 ia.ia_class = I2O_CLASS_ANY;
580 ia.ia_tid = I2O_TID_IOP;
581 locs[IOPCF_TID] = I2O_TID_IOP;
582 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
583 config_stdsubmatch);
584
585 /*
586 * Start device configuration.
587 */
588 if ((rv = iop_reconfigure(sc, 0)) == -1)
589 aprint_error_dev(sc->sc_dev, "configure failed (%d)\n", rv);
590
591
592 sc->sc_flags |= IOP_ONLINE;
593 rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
594 &sc->sc_reconf_thread, "%s", device_xname(sc->sc_dev));
595 mutex_exit(&sc->sc_conflock);
596 if (rv != 0) {
597 aprint_error_dev(sc->sc_dev, "unable to create reconfiguration thread (%d)", rv);
598 return;
599 }
600 }
601
602 /*
603 * Reconfiguration thread; listens for LCT change notification, and
604 * initiates re-configuration if received.
605 */
606 static void
607 iop_reconf_thread(void *cookie)
608 {
609 struct iop_softc *sc;
610 struct lwp *l;
611 struct i2o_lct lct;
612 u_int32_t chgind;
613 int rv;
614
615 sc = cookie;
616 chgind = sc->sc_chgind + 1;
617 l = curlwp;
618
619 for (;;) {
620 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
621 device_xname(sc->sc_dev), chgind));
622
623 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
624
625 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
626 device_xname(sc->sc_dev), le32toh(lct.changeindicator), rv));
627
628 mutex_enter(&sc->sc_conflock);
629 if (rv == 0) {
630 iop_reconfigure(sc, le32toh(lct.changeindicator));
631 chgind = sc->sc_chgind + 1;
632 }
633 (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
634 mutex_exit(&sc->sc_conflock);
635 }
636 }
637
638 /*
639 * Reconfigure: find new and removed devices.
640 */
641 int
642 iop_reconfigure(struct iop_softc *sc, u_int chgind)
643 {
644 struct iop_msg *im;
645 struct i2o_hba_bus_scan mf;
646 struct i2o_lct_entry *le;
647 struct iop_initiator *ii, *nextii;
648 int rv, tid, i;
649
650 KASSERT(mutex_owned(&sc->sc_conflock));
651
652 /*
653 * If the reconfiguration request isn't the result of LCT change
654 * notification, then be more thorough: ask all bus ports to scan
655 * their busses. Wait up to 5 minutes for each bus port to complete
656 * the request.
657 */
658 if (chgind == 0) {
659 if ((rv = iop_lct_get(sc)) != 0) {
660 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
661 return (rv);
662 }
663
664 le = sc->sc_lct->entry;
665 for (i = 0; i < sc->sc_nlctent; i++, le++) {
666 if ((le16toh(le->classid) & 4095) !=
667 I2O_CLASS_BUS_ADAPTER_PORT)
668 continue;
669 tid = le16toh(le->localtid) & 4095;
670
671 im = iop_msg_alloc(sc, IM_WAIT);
672
673 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
674 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
675 mf.msgictx = IOP_ICTX;
676 mf.msgtctx = im->im_tctx;
677
678 DPRINTF(("%s: scanning bus %d\n", device_xname(sc->sc_dev),
679 tid));
680
681 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
682 iop_msg_free(sc, im);
683 #ifdef I2ODEBUG
684 if (rv != 0)
685 aprint_error_dev(sc->sc_dev, "bus scan failed\n");
686 #endif
687 }
688 } else if (chgind <= sc->sc_chgind) {
689 DPRINTF(("%s: LCT unchanged (async)\n", device_xname(sc->sc_dev)));
690 return (0);
691 }
692
693 /* Re-read the LCT and determine if it has changed. */
694 if ((rv = iop_lct_get(sc)) != 0) {
695 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
696 return (rv);
697 }
698 DPRINTF(("%s: %d LCT entries\n", device_xname(sc->sc_dev), sc->sc_nlctent));
699
700 chgind = le32toh(sc->sc_lct->changeindicator);
701 if (chgind == sc->sc_chgind) {
702 DPRINTF(("%s: LCT unchanged\n", device_xname(sc->sc_dev)));
703 return (0);
704 }
705 DPRINTF(("%s: LCT changed\n", device_xname(sc->sc_dev)));
706 sc->sc_chgind = chgind;
707
708 if (sc->sc_tidmap != NULL)
709 free(sc->sc_tidmap, M_DEVBUF);
710 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
711 M_DEVBUF, M_NOWAIT|M_ZERO);
712
713 /* Allow 1 queued command per device while we're configuring. */
714 iop_adjqparam(sc, 1);
715
716 /*
717 * Match and attach child devices. We configure high-level devices
718 * first so that any claims will propagate throughout the LCT,
719 * hopefully masking off aliased devices as a result.
720 *
721 * Re-reading the LCT at this point is a little dangerous, but we'll
722 * trust the IOP (and the operator) to behave itself...
723 */
724 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
725 IC_CONFIGURE | IC_PRIORITY);
726 if ((rv = iop_lct_get(sc)) != 0) {
727 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
728 }
729 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
730 IC_CONFIGURE);
731
732 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
733 nextii = LIST_NEXT(ii, ii_list);
734
735 /* Detach devices that were configured, but are now gone. */
736 for (i = 0; i < sc->sc_nlctent; i++)
737 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
738 break;
739 if (i == sc->sc_nlctent ||
740 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
741 config_detach(ii->ii_dv, DETACH_FORCE);
742 continue;
743 }
744
745 /*
746 * Tell initiators that existed before the re-configuration
747 * to re-configure.
748 */
749 if (ii->ii_reconfig == NULL)
750 continue;
751 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
752 aprint_error_dev(sc->sc_dev, "%s failed reconfigure (%d)\n",
753 device_xname(ii->ii_dv), rv);
754 }
755
756 /* Re-adjust queue parameters and return. */
757 if (sc->sc_nii != 0)
758 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
759 / sc->sc_nii);
760
761 return (0);
762 }
763
764 /*
765 * Configure I2O devices into the system.
766 */
767 static void
768 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
769 {
770 struct iop_attach_args ia;
771 struct iop_initiator *ii;
772 const struct i2o_lct_entry *le;
773 device_t dv;
774 int i, j, nent;
775 u_int usertid;
776 int locs[IOPCF_NLOCS];
777
778 nent = sc->sc_nlctent;
779 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
780 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
781
782 /* Ignore the device if it's in use. */
783 usertid = le32toh(le->usertid) & 4095;
784 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
785 continue;
786
787 ia.ia_class = le16toh(le->classid) & 4095;
788 ia.ia_tid = sc->sc_tidmap[i].it_tid;
789
790 /* Ignore uninteresting devices. */
791 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
792 if (iop_class[j].ic_class == ia.ia_class)
793 break;
794 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
795 (iop_class[j].ic_flags & mask) != maskval)
796 continue;
797
798 /*
799 * Try to configure the device only if it's not already
800 * configured.
801 */
802 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
803 if (ia.ia_tid == ii->ii_tid) {
804 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
805 strcpy(sc->sc_tidmap[i].it_dvname,
806 device_xname(ii->ii_dv));
807 break;
808 }
809 }
810 if (ii != NULL)
811 continue;
812
813 locs[IOPCF_TID] = ia.ia_tid;
814
815 dv = config_found_sm_loc(sc->sc_dev, "iop", locs, &ia,
816 iop_print, config_stdsubmatch);
817 if (dv != NULL) {
818 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
819 strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
820 }
821 }
822 }
823
824 /*
825 * Adjust queue parameters for all child devices.
826 */
827 static void
828 iop_adjqparam(struct iop_softc *sc, int mpi)
829 {
830 struct iop_initiator *ii;
831
832 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
833 if (ii->ii_adjqparam != NULL)
834 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
835 }
836
837 static void
838 iop_devinfo(int class, char *devinfo, size_t l)
839 {
840 int i;
841
842 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
843 if (class == iop_class[i].ic_class)
844 break;
845
846 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
847 snprintf(devinfo, l, "device (class 0x%x)", class);
848 else
849 strlcpy(devinfo, iop_class[i].ic_caption, l);
850 }
851
852 static int
853 iop_print(void *aux, const char *pnp)
854 {
855 struct iop_attach_args *ia;
856 char devinfo[256];
857
858 ia = aux;
859
860 if (pnp != NULL) {
861 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
862 aprint_normal("%s at %s", devinfo, pnp);
863 }
864 aprint_normal(" tid %d", ia->ia_tid);
865 return (UNCONF);
866 }
867
868 /*
869 * Shut down all configured IOPs.
870 */
871 static void
872 iop_shutdown(void *junk)
873 {
874 struct iop_softc *sc;
875 int i;
876
877 printf("shutting down iop devices...");
878
879 for (i = 0; i < iop_cd.cd_ndevs; i++) {
880 if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
881 continue;
882 if ((sc->sc_flags & IOP_ONLINE) == 0)
883 continue;
884
885 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
886 0, 5000);
887
888 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
889 /*
890 * Some AMI firmware revisions will go to sleep and
891 * never come back after this.
892 */
893 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
894 IOP_ICTX, 0, 1000);
895 }
896 }
897
898 /* Wait. Some boards could still be flushing, stupidly enough. */
899 delay(5000*1000);
900 printf(" done\n");
901 }
902
903 /*
904 * Retrieve IOP status.
905 */
906 int
907 iop_status_get(struct iop_softc *sc, int nosleep)
908 {
909 struct i2o_exec_status_get mf;
910 struct i2o_status *st;
911 paddr_t pa;
912 int rv, i;
913
914 pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
915 st = (struct i2o_status *)sc->sc_scr;
916
917 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
918 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
919 mf.reserved[0] = 0;
920 mf.reserved[1] = 0;
921 mf.reserved[2] = 0;
922 mf.reserved[3] = 0;
923 mf.addrlow = (u_int32_t)pa;
924 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
925 mf.length = sizeof(sc->sc_status);
926
927 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
928 BUS_DMASYNC_PREWRITE);
929 memset(st, 0, sizeof(*st));
930 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
931 BUS_DMASYNC_POSTWRITE);
932
933 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
934 return (rv);
935
936 for (i = 100; i != 0; i--) {
937 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
938 sizeof(*st), BUS_DMASYNC_POSTREAD);
939 if (st->syncbyte == 0xff)
940 break;
941 if (nosleep)
942 DELAY(100*1000);
943 else
944 kpause("iopstat", false, hz / 10, NULL);
945 }
946
947 if (st->syncbyte != 0xff) {
948 aprint_error_dev(sc->sc_dev, "STATUS_GET timed out\n");
949 rv = EIO;
950 } else {
951 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
952 rv = 0;
953 }
954
955 return (rv);
956 }
957
958 /*
959 * Initialize and populate the IOP's outbound FIFO.
960 */
961 static int
962 iop_ofifo_init(struct iop_softc *sc)
963 {
964 bus_addr_t addr;
965 bus_dma_segment_t seg;
966 struct i2o_exec_outbound_init *mf;
967 int i, rseg, rv;
968 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
969
970 sw = (u_int32_t *)sc->sc_scr;
971
972 mf = (struct i2o_exec_outbound_init *)mb;
973 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
974 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
975 mf->msgictx = IOP_ICTX;
976 mf->msgtctx = 0;
977 mf->pagesize = PAGE_SIZE;
978 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
979
980 /*
981 * The I2O spec says that there are two SGLs: one for the status
982 * word, and one for a list of discarded MFAs. It continues to say
983 * that if you don't want to get the list of MFAs, an IGNORE SGL is
984 * necessary; this isn't the case (and is in fact a bad thing).
985 */
986 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
987 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
988 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
989 (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
990 mb[0] += 2 << 16;
991
992 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
993 BUS_DMASYNC_POSTWRITE);
994 *sw = 0;
995 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
996 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
997
998 if ((rv = iop_post(sc, mb)) != 0)
999 return (rv);
1000
1001 POLL(5000,
1002 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1003 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD),
1004 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1005
1006 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1007 aprint_error_dev(sc->sc_dev, "outbound FIFO init failed (%d)\n",
1008 le32toh(*sw));
1009 return (EIO);
1010 }
1011
1012 /* Allocate DMA safe memory for the reply frames. */
1013 if (sc->sc_rep_phys == 0) {
1014 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1015
1016 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1017 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1018 if (rv != 0) {
1019 aprint_error_dev(sc->sc_dev, "DMA alloc = %d\n",
1020 rv);
1021 return (rv);
1022 }
1023
1024 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1025 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1026 if (rv != 0) {
1027 aprint_error_dev(sc->sc_dev, "DMA map = %d\n", rv);
1028 return (rv);
1029 }
1030
1031 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1032 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1033 if (rv != 0) {
1034 aprint_error_dev(sc->sc_dev, "DMA create = %d\n", rv);
1035 return (rv);
1036 }
1037
1038 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1039 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1040 if (rv != 0) {
1041 aprint_error_dev(sc->sc_dev, "DMA load = %d\n", rv);
1042 return (rv);
1043 }
1044
1045 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1046
1047 /* Now safe to sync the reply map. */
1048 sc->sc_curib = 0;
1049 }
1050
1051 /* Populate the outbound FIFO. */
1052 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1053 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1054 addr += sc->sc_framesize;
1055 }
1056
1057 return (0);
1058 }
1059
1060 /*
1061 * Read the specified number of bytes from the IOP's hardware resource table.
1062 */
1063 static int
1064 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1065 {
1066 struct iop_msg *im;
1067 int rv;
1068 struct i2o_exec_hrt_get *mf;
1069 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1070
1071 im = iop_msg_alloc(sc, IM_WAIT);
1072 mf = (struct i2o_exec_hrt_get *)mb;
1073 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1074 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1075 mf->msgictx = IOP_ICTX;
1076 mf->msgtctx = im->im_tctx;
1077
1078 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1079 rv = iop_msg_post(sc, im, mb, 30000);
1080 iop_msg_unmap(sc, im);
1081 iop_msg_free(sc, im);
1082 return (rv);
1083 }
1084
1085 /*
1086 * Read the IOP's hardware resource table.
1087 */
1088 static int
1089 iop_hrt_get(struct iop_softc *sc)
1090 {
1091 struct i2o_hrt hrthdr, *hrt;
1092 int size, rv;
1093
1094 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1095 if (rv != 0)
1096 return (rv);
1097
1098 DPRINTF(("%s: %d hrt entries\n", device_xname(sc->sc_dev),
1099 le16toh(hrthdr.numentries)));
1100
1101 size = sizeof(struct i2o_hrt) +
1102 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1103 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1104
1105 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1106 free(hrt, M_DEVBUF);
1107 return (rv);
1108 }
1109
1110 if (sc->sc_hrt != NULL)
1111 free(sc->sc_hrt, M_DEVBUF);
1112 sc->sc_hrt = hrt;
1113 return (0);
1114 }
1115
1116 /*
1117 * Request the specified number of bytes from the IOP's logical
1118 * configuration table. If a change indicator is specified, this
1119 * is a verbatim notification request, so the caller is prepared
1120 * to wait indefinitely.
1121 */
1122 static int
1123 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1124 u_int32_t chgind)
1125 {
1126 struct iop_msg *im;
1127 struct i2o_exec_lct_notify *mf;
1128 int rv;
1129 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1130
1131 im = iop_msg_alloc(sc, IM_WAIT);
1132 memset(lct, 0, size);
1133
1134 mf = (struct i2o_exec_lct_notify *)mb;
1135 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1136 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1137 mf->msgictx = IOP_ICTX;
1138 mf->msgtctx = im->im_tctx;
1139 mf->classid = I2O_CLASS_ANY;
1140 mf->changeindicator = chgind;
1141
1142 #ifdef I2ODEBUG
1143 printf("iop_lct_get0: reading LCT");
1144 if (chgind != 0)
1145 printf(" (async)");
1146 printf("\n");
1147 #endif
1148
1149 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1150 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1151 iop_msg_unmap(sc, im);
1152 iop_msg_free(sc, im);
1153 return (rv);
1154 }
1155
1156 /*
1157 * Read the IOP's logical configuration table.
1158 */
1159 int
1160 iop_lct_get(struct iop_softc *sc)
1161 {
1162 int esize, size, rv;
1163 struct i2o_lct *lct;
1164
1165 esize = le32toh(sc->sc_status.expectedlctsize);
1166 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1167 if (lct == NULL)
1168 return (ENOMEM);
1169
1170 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1171 free(lct, M_DEVBUF);
1172 return (rv);
1173 }
1174
1175 size = le16toh(lct->tablesize) << 2;
1176 if (esize != size) {
1177 free(lct, M_DEVBUF);
1178 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1179 if (lct == NULL)
1180 return (ENOMEM);
1181
1182 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1183 free(lct, M_DEVBUF);
1184 return (rv);
1185 }
1186 }
1187
1188 /* Swap in the new LCT. */
1189 if (sc->sc_lct != NULL)
1190 free(sc->sc_lct, M_DEVBUF);
1191 sc->sc_lct = lct;
1192 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1193 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1194 sizeof(struct i2o_lct_entry);
1195 return (0);
1196 }
1197
1198 /*
1199 * Post a SYS_ENABLE message to the adapter.
1200 */
1201 int
1202 iop_sys_enable(struct iop_softc *sc)
1203 {
1204 struct iop_msg *im;
1205 struct i2o_msg mf;
1206 int rv;
1207
1208 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1209
1210 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1211 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1212 mf.msgictx = IOP_ICTX;
1213 mf.msgtctx = im->im_tctx;
1214
1215 rv = iop_msg_post(sc, im, &mf, 30000);
1216 if (rv == 0) {
1217 if ((im->im_flags & IM_FAIL) != 0)
1218 rv = ENXIO;
1219 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1220 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1221 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1222 rv = 0;
1223 else
1224 rv = EIO;
1225 }
1226
1227 iop_msg_free(sc, im);
1228 return (rv);
1229 }
1230
1231 /*
1232 * Request the specified parameter group from the target. If an initiator
1233 * is specified (a) don't wait for the operation to complete, but instead
1234 * let the initiator's interrupt handler deal with the reply and (b) place a
1235 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1236 */
1237 int
1238 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1239 int size, struct iop_initiator *ii)
1240 {
1241 struct iop_msg *im;
1242 struct i2o_util_params_op *mf;
1243 int rv;
1244 struct iop_pgop *pgop;
1245 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1246
1247 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1248 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1249 iop_msg_free(sc, im);
1250 return (ENOMEM);
1251 }
1252 im->im_dvcontext = pgop;
1253
1254 mf = (struct i2o_util_params_op *)mb;
1255 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1256 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1257 mf->msgictx = IOP_ICTX;
1258 mf->msgtctx = im->im_tctx;
1259 mf->flags = 0;
1260
1261 pgop->olh.count = htole16(1);
1262 pgop->olh.reserved = htole16(0);
1263 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1264 pgop->oat.fieldcount = htole16(0xffff);
1265 pgop->oat.group = htole16(group);
1266
1267 memset(buf, 0, size);
1268 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1269 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1270 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1271
1272 /* Detect errors; let partial transfers to count as success. */
1273 if (ii == NULL && rv == 0) {
1274 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1275 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1276 rv = 0;
1277 else
1278 rv = (im->im_reqstatus != 0 ? EIO : 0);
1279
1280 if (rv != 0)
1281 printf("%s: FIELD_GET failed for tid %d group %d\n",
1282 device_xname(sc->sc_dev), tid, group);
1283 }
1284
1285 if (ii == NULL || rv != 0) {
1286 iop_msg_unmap(sc, im);
1287 iop_msg_free(sc, im);
1288 free(pgop, M_DEVBUF);
1289 }
1290
1291 return (rv);
1292 }
1293
1294 /*
1295 * Set a single field in a scalar parameter group.
1296 */
1297 int
1298 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1299 int size, int field)
1300 {
1301 struct iop_msg *im;
1302 struct i2o_util_params_op *mf;
1303 struct iop_pgop *pgop;
1304 int rv, totsize;
1305 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1306
1307 totsize = sizeof(*pgop) + size;
1308
1309 im = iop_msg_alloc(sc, IM_WAIT);
1310 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1311 iop_msg_free(sc, im);
1312 return (ENOMEM);
1313 }
1314
1315 mf = (struct i2o_util_params_op *)mb;
1316 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1317 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1318 mf->msgictx = IOP_ICTX;
1319 mf->msgtctx = im->im_tctx;
1320 mf->flags = 0;
1321
1322 pgop->olh.count = htole16(1);
1323 pgop->olh.reserved = htole16(0);
1324 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1325 pgop->oat.fieldcount = htole16(1);
1326 pgop->oat.group = htole16(group);
1327 pgop->oat.fields[0] = htole16(field);
1328 memcpy(pgop + 1, buf, size);
1329
1330 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1331 rv = iop_msg_post(sc, im, mb, 30000);
1332 if (rv != 0)
1333 aprint_error_dev(sc->sc_dev, "FIELD_SET failed for tid %d group %d\n",
1334 tid, group);
1335
1336 iop_msg_unmap(sc, im);
1337 iop_msg_free(sc, im);
1338 free(pgop, M_DEVBUF);
1339 return (rv);
1340 }
1341
1342 /*
1343 * Delete all rows in a tablular parameter group.
1344 */
1345 int
1346 iop_table_clear(struct iop_softc *sc, int tid, int group)
1347 {
1348 struct iop_msg *im;
1349 struct i2o_util_params_op *mf;
1350 struct iop_pgop pgop;
1351 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1352 int rv;
1353
1354 im = iop_msg_alloc(sc, IM_WAIT);
1355
1356 mf = (struct i2o_util_params_op *)mb;
1357 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1358 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1359 mf->msgictx = IOP_ICTX;
1360 mf->msgtctx = im->im_tctx;
1361 mf->flags = 0;
1362
1363 pgop.olh.count = htole16(1);
1364 pgop.olh.reserved = htole16(0);
1365 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1366 pgop.oat.fieldcount = htole16(0);
1367 pgop.oat.group = htole16(group);
1368 pgop.oat.fields[0] = htole16(0);
1369
1370 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1371 rv = iop_msg_post(sc, im, mb, 30000);
1372 if (rv != 0)
1373 aprint_error_dev(sc->sc_dev, "TABLE_CLEAR failed for tid %d group %d\n",
1374 tid, group);
1375
1376 iop_msg_unmap(sc, im);
1377 iop_msg_free(sc, im);
1378 return (rv);
1379 }
1380
1381 /*
1382 * Add a single row to a tabular parameter group. The row can have only one
1383 * field.
1384 */
1385 int
1386 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1387 int size, int row)
1388 {
1389 struct iop_msg *im;
1390 struct i2o_util_params_op *mf;
1391 struct iop_pgop *pgop;
1392 int rv, totsize;
1393 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1394
1395 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1396
1397 im = iop_msg_alloc(sc, IM_WAIT);
1398 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1399 iop_msg_free(sc, im);
1400 return (ENOMEM);
1401 }
1402
1403 mf = (struct i2o_util_params_op *)mb;
1404 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1405 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1406 mf->msgictx = IOP_ICTX;
1407 mf->msgtctx = im->im_tctx;
1408 mf->flags = 0;
1409
1410 pgop->olh.count = htole16(1);
1411 pgop->olh.reserved = htole16(0);
1412 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1413 pgop->oat.fieldcount = htole16(1);
1414 pgop->oat.group = htole16(group);
1415 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1416 pgop->oat.fields[1] = htole16(1); /* RowCount */
1417 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1418 memcpy(&pgop->oat.fields[3], buf, size);
1419
1420 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1421 rv = iop_msg_post(sc, im, mb, 30000);
1422 if (rv != 0)
1423 aprint_error_dev(sc->sc_dev, "ADD_ROW failed for tid %d group %d row %d\n",
1424 tid, group, row);
1425
1426 iop_msg_unmap(sc, im);
1427 iop_msg_free(sc, im);
1428 free(pgop, M_DEVBUF);
1429 return (rv);
1430 }
1431
1432 /*
1433 * Execute a simple command (no parameters).
1434 */
1435 int
1436 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1437 int async, int timo)
1438 {
1439 struct iop_msg *im;
1440 struct i2o_msg mf;
1441 int rv, fl;
1442
1443 fl = (async != 0 ? IM_WAIT : IM_POLL);
1444 im = iop_msg_alloc(sc, fl);
1445
1446 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1447 mf.msgfunc = I2O_MSGFUNC(tid, function);
1448 mf.msgictx = ictx;
1449 mf.msgtctx = im->im_tctx;
1450
1451 rv = iop_msg_post(sc, im, &mf, timo);
1452 iop_msg_free(sc, im);
1453 return (rv);
1454 }
1455
1456 /*
1457 * Post the system table to the IOP.
1458 */
1459 static int
1460 iop_systab_set(struct iop_softc *sc)
1461 {
1462 struct i2o_exec_sys_tab_set *mf;
1463 struct iop_msg *im;
1464 bus_space_handle_t bsh;
1465 bus_addr_t boo;
1466 u_int32_t mema[2], ioa[2];
1467 int rv;
1468 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1469
1470 im = iop_msg_alloc(sc, IM_WAIT);
1471
1472 mf = (struct i2o_exec_sys_tab_set *)mb;
1473 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1474 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1475 mf->msgictx = IOP_ICTX;
1476 mf->msgtctx = im->im_tctx;
1477 mf->iopid = (device_unit(sc->sc_dev) + 2) << 12;
1478 mf->segnumber = 0;
1479
1480 mema[1] = sc->sc_status.desiredprivmemsize;
1481 ioa[1] = sc->sc_status.desiredpriviosize;
1482
1483 if (mema[1] != 0) {
1484 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1485 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1486 mema[0] = htole32(boo);
1487 if (rv != 0) {
1488 aprint_error_dev(sc->sc_dev, "can't alloc priv mem space, err = %d\n", rv);
1489 mema[0] = 0;
1490 mema[1] = 0;
1491 }
1492 }
1493
1494 if (ioa[1] != 0) {
1495 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1496 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1497 ioa[0] = htole32(boo);
1498 if (rv != 0) {
1499 aprint_error_dev(sc->sc_dev, "can't alloc priv i/o space, err = %d\n", rv);
1500 ioa[0] = 0;
1501 ioa[1] = 0;
1502 }
1503 }
1504
1505 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1506 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1507 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1508 rv = iop_msg_post(sc, im, mb, 5000);
1509 iop_msg_unmap(sc, im);
1510 iop_msg_free(sc, im);
1511 return (rv);
1512 }
1513
1514 /*
1515 * Reset the IOP. Must be called with interrupts disabled.
1516 */
1517 static int
1518 iop_reset(struct iop_softc *sc)
1519 {
1520 u_int32_t mfa, *sw;
1521 struct i2o_exec_iop_reset mf;
1522 int rv;
1523 paddr_t pa;
1524
1525 sw = (u_int32_t *)sc->sc_scr;
1526 pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
1527
1528 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1529 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1530 mf.reserved[0] = 0;
1531 mf.reserved[1] = 0;
1532 mf.reserved[2] = 0;
1533 mf.reserved[3] = 0;
1534 mf.statuslow = (u_int32_t)pa;
1535 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1536
1537 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1538 BUS_DMASYNC_POSTWRITE);
1539 *sw = htole32(0);
1540 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1541 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1542
1543 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1544 return (rv);
1545
1546 POLL(2500,
1547 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1548 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD), *sw != 0));
1549 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1550 aprint_error_dev(sc->sc_dev, "reset rejected, status 0x%x\n",
1551 le32toh(*sw));
1552 return (EIO);
1553 }
1554
1555 /*
1556 * IOP is now in the INIT state. Wait no more than 10 seconds for
1557 * the inbound queue to become responsive.
1558 */
1559 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1560 if (mfa == IOP_MFA_EMPTY) {
1561 aprint_error_dev(sc->sc_dev, "reset failed\n");
1562 return (EIO);
1563 }
1564
1565 iop_release_mfa(sc, mfa);
1566 return (0);
1567 }
1568
1569 /*
1570 * Register a new initiator. Must be called with the configuration lock
1571 * held.
1572 */
1573 void
1574 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1575 {
1576 static int ictxgen;
1577
1578 /* 0 is reserved (by us) for system messages. */
1579 ii->ii_ictx = ++ictxgen;
1580
1581 /*
1582 * `Utility initiators' don't make it onto the per-IOP initiator list
1583 * (which is used only for configuration), but do get one slot on
1584 * the inbound queue.
1585 */
1586 if ((ii->ii_flags & II_UTILITY) == 0) {
1587 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1588 sc->sc_nii++;
1589 } else
1590 sc->sc_nuii++;
1591
1592 cv_init(&ii->ii_cv, "iopevt");
1593
1594 mutex_spin_enter(&sc->sc_intrlock);
1595 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1596 mutex_spin_exit(&sc->sc_intrlock);
1597 }
1598
1599 /*
1600 * Unregister an initiator. Must be called with the configuration lock
1601 * held.
1602 */
1603 void
1604 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1605 {
1606
1607 if ((ii->ii_flags & II_UTILITY) == 0) {
1608 LIST_REMOVE(ii, ii_list);
1609 sc->sc_nii--;
1610 } else
1611 sc->sc_nuii--;
1612
1613 mutex_spin_enter(&sc->sc_intrlock);
1614 LIST_REMOVE(ii, ii_hash);
1615 mutex_spin_exit(&sc->sc_intrlock);
1616
1617 cv_destroy(&ii->ii_cv);
1618 }
1619
1620 /*
1621 * Handle a reply frame from the IOP.
1622 */
1623 static int
1624 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1625 {
1626 struct iop_msg *im;
1627 struct i2o_reply *rb;
1628 struct i2o_fault_notify *fn;
1629 struct iop_initiator *ii;
1630 u_int off, ictx, tctx, status, size;
1631
1632 KASSERT(mutex_owned(&sc->sc_intrlock));
1633
1634 off = (int)(rmfa - sc->sc_rep_phys);
1635 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1636
1637 /* Perform reply queue DMA synchronisation. */
1638 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1639 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1640
1641 #ifdef I2ODEBUG
1642 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1643 panic("iop_handle_reply: 64-bit reply");
1644 #endif
1645 /*
1646 * Find the initiator.
1647 */
1648 ictx = le32toh(rb->msgictx);
1649 if (ictx == IOP_ICTX)
1650 ii = NULL;
1651 else {
1652 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1653 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1654 if (ii->ii_ictx == ictx)
1655 break;
1656 if (ii == NULL) {
1657 #ifdef I2ODEBUG
1658 iop_reply_print(sc, rb);
1659 #endif
1660 aprint_error_dev(sc->sc_dev, "WARNING: bad ictx returned (%x)\n",
1661 ictx);
1662 return (-1);
1663 }
1664 }
1665
1666 /*
1667 * If we received a transport failure notice, we've got to dig the
1668 * transaction context (if any) out of the original message frame,
1669 * and then release the original MFA back to the inbound FIFO.
1670 */
1671 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1672 status = I2O_STATUS_SUCCESS;
1673
1674 fn = (struct i2o_fault_notify *)rb;
1675 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1676 iop_release_mfa(sc, fn->lowmfa);
1677 iop_tfn_print(sc, fn);
1678 } else {
1679 status = rb->reqstatus;
1680 tctx = le32toh(rb->msgtctx);
1681 }
1682
1683 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1684 /*
1685 * This initiator tracks state using message wrappers.
1686 *
1687 * Find the originating message wrapper, and if requested
1688 * notify the initiator.
1689 */
1690 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1691 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1692 (im->im_flags & IM_ALLOCED) == 0 ||
1693 tctx != im->im_tctx) {
1694 aprint_error_dev(sc->sc_dev, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1695 if (im != NULL)
1696 aprint_error_dev(sc->sc_dev, "flags=0x%08x tctx=0x%08x\n",
1697 im->im_flags, im->im_tctx);
1698 #ifdef I2ODEBUG
1699 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1700 iop_reply_print(sc, rb);
1701 #endif
1702 return (-1);
1703 }
1704
1705 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1706 im->im_flags |= IM_FAIL;
1707
1708 #ifdef I2ODEBUG
1709 if ((im->im_flags & IM_REPLIED) != 0)
1710 panic("%s: dup reply", device_xname(sc->sc_dev));
1711 #endif
1712 im->im_flags |= IM_REPLIED;
1713
1714 #ifdef I2ODEBUG
1715 if (status != I2O_STATUS_SUCCESS)
1716 iop_reply_print(sc, rb);
1717 #endif
1718 im->im_reqstatus = status;
1719 im->im_detstatus = le16toh(rb->detail);
1720
1721 /* Copy the reply frame, if requested. */
1722 if (im->im_rb != NULL) {
1723 size = (le32toh(rb->msgflags) >> 14) & ~3;
1724 #ifdef I2ODEBUG
1725 if (size > sc->sc_framesize)
1726 panic("iop_handle_reply: reply too large");
1727 #endif
1728 memcpy(im->im_rb, rb, size);
1729 }
1730
1731 /* Notify the initiator. */
1732 if ((im->im_flags & IM_WAIT) != 0)
1733 cv_broadcast(&im->im_cv);
1734 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1735 if (ii != NULL) {
1736 mutex_spin_exit(&sc->sc_intrlock);
1737 (*ii->ii_intr)(ii->ii_dv, im, rb);
1738 mutex_spin_enter(&sc->sc_intrlock);
1739 }
1740 }
1741 } else {
1742 /*
1743 * This initiator discards message wrappers.
1744 *
1745 * Simply pass the reply frame to the initiator.
1746 */
1747 if (ii != NULL) {
1748 mutex_spin_exit(&sc->sc_intrlock);
1749 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1750 mutex_spin_enter(&sc->sc_intrlock);
1751 }
1752 }
1753
1754 return (status);
1755 }
1756
1757 /*
1758 * Handle an interrupt from the IOP.
1759 */
1760 int
1761 iop_intr(void *arg)
1762 {
1763 struct iop_softc *sc;
1764 u_int32_t rmfa;
1765
1766 sc = arg;
1767
1768 mutex_spin_enter(&sc->sc_intrlock);
1769
1770 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1771 mutex_spin_exit(&sc->sc_intrlock);
1772 return (0);
1773 }
1774
1775 for (;;) {
1776 /* Double read to account for IOP bug. */
1777 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1778 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1779 if (rmfa == IOP_MFA_EMPTY)
1780 break;
1781 }
1782 iop_handle_reply(sc, rmfa);
1783 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1784 }
1785
1786 mutex_spin_exit(&sc->sc_intrlock);
1787 return (1);
1788 }
1789
1790 /*
1791 * Handle an event signalled by the executive.
1792 */
1793 static void
1794 iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
1795 {
1796 struct i2o_util_event_register_reply *rb;
1797 u_int event;
1798
1799 rb = reply;
1800
1801 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1802 return;
1803
1804 event = le32toh(rb->event);
1805 printf("%s: event 0x%08x received\n", device_xname(dv), event);
1806 }
1807
1808 /*
1809 * Allocate a message wrapper.
1810 */
1811 struct iop_msg *
1812 iop_msg_alloc(struct iop_softc *sc, int flags)
1813 {
1814 struct iop_msg *im;
1815 static u_int tctxgen;
1816 int i;
1817
1818 #ifdef I2ODEBUG
1819 if ((flags & IM_SYSMASK) != 0)
1820 panic("iop_msg_alloc: system flags specified");
1821 #endif
1822
1823 mutex_spin_enter(&sc->sc_intrlock);
1824 im = SLIST_FIRST(&sc->sc_im_freelist);
1825 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1826 if (im == NULL)
1827 panic("iop_msg_alloc: no free wrappers");
1828 #endif
1829 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1830 mutex_spin_exit(&sc->sc_intrlock);
1831
1832 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1833 tctxgen += (1 << IOP_TCTX_SHIFT);
1834 im->im_flags = flags | IM_ALLOCED;
1835 im->im_rb = NULL;
1836 i = 0;
1837 do {
1838 im->im_xfer[i++].ix_size = 0;
1839 } while (i < IOP_MAX_MSG_XFERS);
1840
1841 return (im);
1842 }
1843
1844 /*
1845 * Free a message wrapper.
1846 */
1847 void
1848 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1849 {
1850
1851 #ifdef I2ODEBUG
1852 if ((im->im_flags & IM_ALLOCED) == 0)
1853 panic("iop_msg_free: wrapper not allocated");
1854 #endif
1855
1856 im->im_flags = 0;
1857 mutex_spin_enter(&sc->sc_intrlock);
1858 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1859 mutex_spin_exit(&sc->sc_intrlock);
1860 }
1861
1862 /*
1863 * Map a data transfer. Write a scatter-gather list into the message frame.
1864 */
1865 int
1866 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1867 void *xferaddr, int xfersize, int out, struct proc *up)
1868 {
1869 bus_dmamap_t dm;
1870 bus_dma_segment_t *ds;
1871 struct iop_xfer *ix;
1872 u_int rv, i, nsegs, flg, off, xn;
1873 u_int32_t *p;
1874
1875 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1876 if (ix->ix_size == 0)
1877 break;
1878
1879 #ifdef I2ODEBUG
1880 if (xfersize == 0)
1881 panic("iop_msg_map: null transfer");
1882 if (xfersize > IOP_MAX_XFER)
1883 panic("iop_msg_map: transfer too large");
1884 if (xn == IOP_MAX_MSG_XFERS)
1885 panic("iop_msg_map: too many xfers");
1886 #endif
1887
1888 /*
1889 * Only the first DMA map is static.
1890 */
1891 if (xn != 0) {
1892 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1893 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1894 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1895 if (rv != 0)
1896 return (rv);
1897 }
1898
1899 dm = ix->ix_map;
1900 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1901 (up == NULL ? BUS_DMA_NOWAIT : 0));
1902 if (rv != 0)
1903 goto bad;
1904
1905 /*
1906 * How many SIMPLE SG elements can we fit in this message?
1907 */
1908 off = mb[0] >> 16;
1909 p = mb + off;
1910 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1911
1912 if (dm->dm_nsegs > nsegs) {
1913 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1914 rv = EFBIG;
1915 DPRINTF(("iop_msg_map: too many segs\n"));
1916 goto bad;
1917 }
1918
1919 nsegs = dm->dm_nsegs;
1920 xfersize = 0;
1921
1922 /*
1923 * Write out the SG list.
1924 */
1925 if (out)
1926 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1927 else
1928 flg = I2O_SGL_SIMPLE;
1929
1930 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1931 p[0] = (u_int32_t)ds->ds_len | flg;
1932 p[1] = (u_int32_t)ds->ds_addr;
1933 xfersize += ds->ds_len;
1934 }
1935
1936 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1937 p[1] = (u_int32_t)ds->ds_addr;
1938 xfersize += ds->ds_len;
1939
1940 /* Fix up the transfer record, and sync the map. */
1941 ix->ix_flags = (out ? IX_OUT : IX_IN);
1942 ix->ix_size = xfersize;
1943 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1944 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1945
1946 /*
1947 * If this is the first xfer we've mapped for this message, adjust
1948 * the SGL offset field in the message header.
1949 */
1950 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1951 mb[0] += (mb[0] >> 12) & 0xf0;
1952 im->im_flags |= IM_SGLOFFADJ;
1953 }
1954 mb[0] += (nsegs << 17);
1955 return (0);
1956
1957 bad:
1958 if (xn != 0)
1959 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1960 return (rv);
1961 }
1962
1963 /*
1964 * Map a block I/O data transfer (different in that there's only one per
1965 * message maximum, and PAGE addressing may be used). Write a scatter
1966 * gather list into the message frame.
1967 */
1968 int
1969 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1970 void *xferaddr, int xfersize, int out)
1971 {
1972 bus_dma_segment_t *ds;
1973 bus_dmamap_t dm;
1974 struct iop_xfer *ix;
1975 u_int rv, i, nsegs, off, slen, tlen, flg;
1976 paddr_t saddr, eaddr;
1977 u_int32_t *p;
1978
1979 #ifdef I2ODEBUG
1980 if (xfersize == 0)
1981 panic("iop_msg_map_bio: null transfer");
1982 if (xfersize > IOP_MAX_XFER)
1983 panic("iop_msg_map_bio: transfer too large");
1984 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1985 panic("iop_msg_map_bio: SGLOFFADJ");
1986 #endif
1987
1988 ix = im->im_xfer;
1989 dm = ix->ix_map;
1990 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1991 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1992 if (rv != 0)
1993 return (rv);
1994
1995 off = mb[0] >> 16;
1996 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1997
1998 /*
1999 * If the transfer is highly fragmented and won't fit using SIMPLE
2000 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2001 * potentially more efficient, both for us and the IOP.
2002 */
2003 if (dm->dm_nsegs > nsegs) {
2004 nsegs = 1;
2005 p = mb + off + 1;
2006
2007 /* XXX This should be done with a bus_space flag. */
2008 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2009 slen = ds->ds_len;
2010 saddr = ds->ds_addr;
2011
2012 while (slen > 0) {
2013 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2014 tlen = min(eaddr - saddr, slen);
2015 slen -= tlen;
2016 *p++ = le32toh(saddr);
2017 saddr = eaddr;
2018 nsegs++;
2019 }
2020 }
2021
2022 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2023 I2O_SGL_END;
2024 if (out)
2025 mb[off] |= I2O_SGL_DATA_OUT;
2026 } else {
2027 p = mb + off;
2028 nsegs = dm->dm_nsegs;
2029
2030 if (out)
2031 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2032 else
2033 flg = I2O_SGL_SIMPLE;
2034
2035 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2036 p[0] = (u_int32_t)ds->ds_len | flg;
2037 p[1] = (u_int32_t)ds->ds_addr;
2038 }
2039
2040 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2041 I2O_SGL_END;
2042 p[1] = (u_int32_t)ds->ds_addr;
2043 nsegs <<= 1;
2044 }
2045
2046 /* Fix up the transfer record, and sync the map. */
2047 ix->ix_flags = (out ? IX_OUT : IX_IN);
2048 ix->ix_size = xfersize;
2049 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2050 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2051
2052 /*
2053 * Adjust the SGL offset and total message size fields. We don't
2054 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2055 */
2056 mb[0] += ((off << 4) + (nsegs << 16));
2057 return (0);
2058 }
2059
2060 /*
2061 * Unmap all data transfers associated with a message wrapper.
2062 */
2063 void
2064 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2065 {
2066 struct iop_xfer *ix;
2067 int i;
2068
2069 #ifdef I2ODEBUG
2070 if (im->im_xfer[0].ix_size == 0)
2071 panic("iop_msg_unmap: no transfers mapped");
2072 #endif
2073
2074 for (ix = im->im_xfer, i = 0;;) {
2075 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2076 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2077 BUS_DMASYNC_POSTREAD);
2078 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2079
2080 /* Only the first DMA map is static. */
2081 if (i != 0)
2082 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2083 if ((++ix)->ix_size == 0)
2084 break;
2085 if (++i >= IOP_MAX_MSG_XFERS)
2086 break;
2087 }
2088 }
2089
2090 /*
2091 * Post a message frame to the IOP's inbound queue.
2092 */
2093 int
2094 iop_post(struct iop_softc *sc, u_int32_t *mb)
2095 {
2096 u_int32_t mfa;
2097
2098 #ifdef I2ODEBUG
2099 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2100 panic("iop_post: frame too large");
2101 #endif
2102
2103 mutex_spin_enter(&sc->sc_intrlock);
2104
2105 /* Allocate a slot with the IOP. */
2106 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2107 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2108 mutex_spin_exit(&sc->sc_intrlock);
2109 aprint_error_dev(sc->sc_dev, "mfa not forthcoming\n");
2110 return (EAGAIN);
2111 }
2112
2113 /* Perform reply buffer DMA synchronisation. */
2114 if (sc->sc_rep_size != 0) {
2115 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2116 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2117 }
2118
2119 /* Copy out the message frame. */
2120 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2121 mb[0] >> 16);
2122 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2123 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2124
2125 /* Post the MFA back to the IOP. */
2126 iop_outl(sc, IOP_REG_IFIFO, mfa);
2127
2128 mutex_spin_exit(&sc->sc_intrlock);
2129 return (0);
2130 }
2131
2132 /*
2133 * Post a message to the IOP and deal with completion.
2134 */
2135 int
2136 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2137 {
2138 u_int32_t *mb;
2139 int rv;
2140
2141 mb = xmb;
2142
2143 /* Terminate the scatter/gather list chain. */
2144 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2145 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2146
2147 if ((rv = iop_post(sc, mb)) != 0)
2148 return (rv);
2149
2150 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2151 if ((im->im_flags & IM_POLL) != 0)
2152 iop_msg_poll(sc, im, timo);
2153 else
2154 iop_msg_wait(sc, im, timo);
2155
2156 mutex_spin_enter(&sc->sc_intrlock);
2157 if ((im->im_flags & IM_REPLIED) != 0) {
2158 if ((im->im_flags & IM_NOSTATUS) != 0)
2159 rv = 0;
2160 else if ((im->im_flags & IM_FAIL) != 0)
2161 rv = ENXIO;
2162 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2163 rv = EIO;
2164 else
2165 rv = 0;
2166 } else
2167 rv = EBUSY;
2168 mutex_spin_exit(&sc->sc_intrlock);
2169 } else
2170 rv = 0;
2171
2172 return (rv);
2173 }
2174
2175 /*
2176 * Spin until the specified message is replied to.
2177 */
2178 static void
2179 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2180 {
2181 u_int32_t rmfa;
2182
2183 mutex_spin_enter(&sc->sc_intrlock);
2184
2185 for (timo *= 10; timo != 0; timo--) {
2186 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2187 /* Double read to account for IOP bug. */
2188 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2189 if (rmfa == IOP_MFA_EMPTY)
2190 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2191 if (rmfa != IOP_MFA_EMPTY) {
2192 iop_handle_reply(sc, rmfa);
2193
2194 /*
2195 * Return the reply frame to the IOP's
2196 * outbound FIFO.
2197 */
2198 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2199 }
2200 }
2201 if ((im->im_flags & IM_REPLIED) != 0)
2202 break;
2203 mutex_spin_exit(&sc->sc_intrlock);
2204 DELAY(100);
2205 mutex_spin_enter(&sc->sc_intrlock);
2206 }
2207
2208 if (timo == 0) {
2209 #ifdef I2ODEBUG
2210 printf("%s: poll - no reply\n", device_xname(sc->sc_dev));
2211 if (iop_status_get(sc, 1) != 0)
2212 printf("iop_msg_poll: unable to retrieve status\n");
2213 else
2214 printf("iop_msg_poll: IOP state = %d\n",
2215 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2216 #endif
2217 }
2218
2219 mutex_spin_exit(&sc->sc_intrlock);
2220 }
2221
2222 /*
2223 * Sleep until the specified message is replied to.
2224 */
2225 static void
2226 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2227 {
2228 int rv;
2229
2230 mutex_spin_enter(&sc->sc_intrlock);
2231 if ((im->im_flags & IM_REPLIED) != 0) {
2232 mutex_spin_exit(&sc->sc_intrlock);
2233 return;
2234 }
2235 rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2236 mutex_spin_exit(&sc->sc_intrlock);
2237
2238 #ifdef I2ODEBUG
2239 if (rv != 0) {
2240 printf("iop_msg_wait: tsleep() == %d\n", rv);
2241 if (iop_status_get(sc, 0) != 0)
2242 printf("iop_msg_wait: unable to retrieve status\n");
2243 else
2244 printf("iop_msg_wait: IOP state = %d\n",
2245 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2246 }
2247 #endif
2248 }
2249
2250 /*
2251 * Release an unused message frame back to the IOP's inbound fifo.
2252 */
2253 static void
2254 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2255 {
2256
2257 /* Use the frame to issue a no-op. */
2258 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2259 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2260 iop_outl_msg(sc, mfa + 8, 0);
2261 iop_outl_msg(sc, mfa + 12, 0);
2262
2263 iop_outl(sc, IOP_REG_IFIFO, mfa);
2264 }
2265
2266 #ifdef I2ODEBUG
2267 /*
2268 * Dump a reply frame header.
2269 */
2270 static void
2271 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2272 {
2273 u_int function, detail;
2274 const char *statusstr;
2275
2276 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2277 detail = le16toh(rb->detail);
2278
2279 printf("%s: reply:\n", device_xname(sc->sc_dev));
2280
2281 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2282 statusstr = iop_status[rb->reqstatus];
2283 else
2284 statusstr = "undefined error code";
2285
2286 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2287 device_xname(sc->sc_dev), function, rb->reqstatus, statusstr);
2288 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2289 device_xname(sc->sc_dev), detail, le32toh(rb->msgictx),
2290 le32toh(rb->msgtctx));
2291 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", device_xname(sc->sc_dev),
2292 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2293 (le32toh(rb->msgflags) >> 8) & 0xff);
2294 }
2295 #endif
2296
2297 /*
2298 * Dump a transport failure reply.
2299 */
2300 static void
2301 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2302 {
2303
2304 printf("%s: WARNING: transport failure:\n", device_xname(sc->sc_dev));
2305
2306 printf("%s: ictx=0x%08x tctx=0x%08x\n", device_xname(sc->sc_dev),
2307 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2308 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2309 device_xname(sc->sc_dev), fn->failurecode, fn->severity);
2310 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2311 device_xname(sc->sc_dev), fn->highestver, fn->lowestver);
2312 }
2313
2314 /*
2315 * Translate an I2O ASCII field into a C string.
2316 */
2317 void
2318 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2319 {
2320 int hc, lc, i, nit;
2321
2322 dlen--;
2323 lc = 0;
2324 hc = 0;
2325 i = 0;
2326
2327 /*
2328 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2329 * spec has nothing to say about it. Since AMI fields are usually
2330 * filled with junk after the terminator, ...
2331 */
2332 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2333
2334 while (slen-- != 0 && dlen-- != 0) {
2335 if (nit && *src == '\0')
2336 break;
2337 else if (*src <= 0x20 || *src >= 0x7f) {
2338 if (hc)
2339 dst[i++] = ' ';
2340 } else {
2341 hc = 1;
2342 dst[i++] = *src;
2343 lc = i;
2344 }
2345 src++;
2346 }
2347
2348 dst[lc] = '\0';
2349 }
2350
2351 /*
2352 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2353 */
2354 int
2355 iop_print_ident(struct iop_softc *sc, int tid)
2356 {
2357 struct {
2358 struct i2o_param_op_results pr;
2359 struct i2o_param_read_results prr;
2360 struct i2o_param_device_identity di;
2361 } __packed p;
2362 char buf[32];
2363 int rv;
2364
2365 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2366 sizeof(p), NULL);
2367 if (rv != 0)
2368 return (rv);
2369
2370 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2371 sizeof(buf));
2372 printf(" <%s, ", buf);
2373 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2374 sizeof(buf));
2375 printf("%s, ", buf);
2376 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2377 printf("%s>", buf);
2378
2379 return (0);
2380 }
2381
2382 /*
2383 * Claim or unclaim the specified TID.
2384 */
2385 int
2386 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2387 int flags)
2388 {
2389 struct iop_msg *im;
2390 struct i2o_util_claim mf;
2391 int rv, func;
2392
2393 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2394 im = iop_msg_alloc(sc, IM_WAIT);
2395
2396 /* We can use the same structure, as they're identical. */
2397 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2398 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2399 mf.msgictx = ii->ii_ictx;
2400 mf.msgtctx = im->im_tctx;
2401 mf.flags = flags;
2402
2403 rv = iop_msg_post(sc, im, &mf, 5000);
2404 iop_msg_free(sc, im);
2405 return (rv);
2406 }
2407
2408 /*
2409 * Perform an abort.
2410 */
2411 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2412 int tctxabort, int flags)
2413 {
2414 struct iop_msg *im;
2415 struct i2o_util_abort mf;
2416 int rv;
2417
2418 im = iop_msg_alloc(sc, IM_WAIT);
2419
2420 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2421 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2422 mf.msgictx = ii->ii_ictx;
2423 mf.msgtctx = im->im_tctx;
2424 mf.flags = (func << 24) | flags;
2425 mf.tctxabort = tctxabort;
2426
2427 rv = iop_msg_post(sc, im, &mf, 5000);
2428 iop_msg_free(sc, im);
2429 return (rv);
2430 }
2431
2432 /*
2433 * Enable or disable reception of events for the specified device.
2434 */
2435 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2436 {
2437 struct i2o_util_event_register mf;
2438
2439 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2440 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2441 mf.msgictx = ii->ii_ictx;
2442 mf.msgtctx = 0;
2443 mf.eventmask = mask;
2444
2445 /* This message is replied to only when events are signalled. */
2446 return (iop_post(sc, (u_int32_t *)&mf));
2447 }
2448
2449 int
2450 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2451 {
2452 struct iop_softc *sc;
2453
2454 if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL)
2455 return (ENXIO);
2456 if ((sc->sc_flags & IOP_ONLINE) == 0)
2457 return (ENXIO);
2458 if ((sc->sc_flags & IOP_OPEN) != 0)
2459 return (EBUSY);
2460 sc->sc_flags |= IOP_OPEN;
2461
2462 return (0);
2463 }
2464
2465 int
2466 iopclose(dev_t dev, int flag, int mode,
2467 struct lwp *l)
2468 {
2469 struct iop_softc *sc;
2470
2471 sc = device_lookup_private(&iop_cd, minor(dev));
2472 sc->sc_flags &= ~IOP_OPEN;
2473
2474 return (0);
2475 }
2476
2477 int
2478 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2479 {
2480 struct iop_softc *sc;
2481 struct iovec *iov;
2482 int rv, i;
2483
2484 sc = device_lookup_private(&iop_cd, minor(dev));
2485 rv = 0;
2486
2487 switch (cmd) {
2488 case IOPIOCPT:
2489 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2490 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2491 if (rv)
2492 return (rv);
2493
2494 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2495
2496 case IOPIOCGSTATUS:
2497 iov = (struct iovec *)data;
2498 i = sizeof(struct i2o_status);
2499 if (i > iov->iov_len)
2500 i = iov->iov_len;
2501 else
2502 iov->iov_len = i;
2503 if ((rv = iop_status_get(sc, 0)) == 0)
2504 rv = copyout(&sc->sc_status, iov->iov_base, i);
2505 return (rv);
2506
2507 case IOPIOCGLCT:
2508 case IOPIOCGTIDMAP:
2509 case IOPIOCRECONFIG:
2510 break;
2511
2512 default:
2513 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2514 printf("%s: unknown ioctl %lx\n", device_xname(sc->sc_dev), cmd);
2515 #endif
2516 return (ENOTTY);
2517 }
2518
2519 mutex_enter(&sc->sc_conflock);
2520
2521 switch (cmd) {
2522 case IOPIOCGLCT:
2523 iov = (struct iovec *)data;
2524 i = le16toh(sc->sc_lct->tablesize) << 2;
2525 if (i > iov->iov_len)
2526 i = iov->iov_len;
2527 else
2528 iov->iov_len = i;
2529 rv = copyout(sc->sc_lct, iov->iov_base, i);
2530 break;
2531
2532 case IOPIOCRECONFIG:
2533 rv = iop_reconfigure(sc, 0);
2534 break;
2535
2536 case IOPIOCGTIDMAP:
2537 iov = (struct iovec *)data;
2538 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2539 if (i > iov->iov_len)
2540 i = iov->iov_len;
2541 else
2542 iov->iov_len = i;
2543 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2544 break;
2545 }
2546
2547 mutex_exit(&sc->sc_conflock);
2548 return (rv);
2549 }
2550
2551 static int
2552 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2553 {
2554 struct iop_msg *im;
2555 struct i2o_msg *mf;
2556 struct ioppt_buf *ptb;
2557 int rv, i, mapped;
2558
2559 mf = NULL;
2560 im = NULL;
2561 mapped = 1;
2562
2563 if (pt->pt_msglen > sc->sc_framesize ||
2564 pt->pt_msglen < sizeof(struct i2o_msg) ||
2565 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2566 pt->pt_nbufs < 0 ||
2567 #if 0
2568 pt->pt_replylen < 0 ||
2569 #endif
2570 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2571 return (EINVAL);
2572
2573 for (i = 0; i < pt->pt_nbufs; i++)
2574 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2575 rv = ENOMEM;
2576 goto bad;
2577 }
2578
2579 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2580 if (mf == NULL)
2581 return (ENOMEM);
2582
2583 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2584 goto bad;
2585
2586 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2587 im->im_rb = (struct i2o_reply *)mf;
2588 mf->msgictx = IOP_ICTX;
2589 mf->msgtctx = im->im_tctx;
2590
2591 for (i = 0; i < pt->pt_nbufs; i++) {
2592 ptb = &pt->pt_bufs[i];
2593 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2594 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2595 if (rv != 0)
2596 goto bad;
2597 mapped = 1;
2598 }
2599
2600 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2601 goto bad;
2602
2603 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2604 if (i > sc->sc_framesize)
2605 i = sc->sc_framesize;
2606 if (i > pt->pt_replylen)
2607 i = pt->pt_replylen;
2608 rv = copyout(im->im_rb, pt->pt_reply, i);
2609
2610 bad:
2611 if (mapped != 0)
2612 iop_msg_unmap(sc, im);
2613 if (im != NULL)
2614 iop_msg_free(sc, im);
2615 if (mf != NULL)
2616 free(mf, M_DEVBUF);
2617 return (rv);
2618 }
2619