iop.c revision 1.90 1 /* $NetBSD: iop.c,v 1.90 2019/11/10 21:16:35 chs Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Support for I2O IOPs (intelligent I/O processors).
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.90 2019/11/10 21:16:35 chs Exp $");
38
39 #include "iop.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/ioctl.h>
49 #include <sys/endian.h>
50 #include <sys/conf.h>
51 #include <sys/kthread.h>
52 #include <sys/kauth.h>
53 #include <sys/bus.h>
54
55 #include <dev/i2o/i2o.h>
56 #include <dev/i2o/iopio.h>
57 #include <dev/i2o/iopreg.h>
58 #include <dev/i2o/iopvar.h>
59
60 #include "ioconf.h"
61 #include "locators.h"
62
63 #define POLL(ms, cond) \
64 do { \
65 int xi; \
66 for (xi = (ms) * 10; xi; xi--) { \
67 if (cond) \
68 break; \
69 DELAY(100); \
70 } \
71 } while (/* CONSTCOND */0);
72
73 #ifdef I2ODEBUG
74 #define DPRINTF(x) printf x
75 #else
76 #define DPRINTF(x)
77 #endif
78
79 #define IOP_ICTXHASH_NBUCKETS 16
80 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
81
82 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
83
84 #define IOP_TCTX_SHIFT 12
85 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
86
87 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
88 static u_long iop_ictxhash;
89 static void *iop_sdh;
90 static struct i2o_systab *iop_systab;
91 static int iop_systab_size;
92
93 dev_type_open(iopopen);
94 dev_type_close(iopclose);
95 dev_type_ioctl(iopioctl);
96
97 const struct cdevsw iop_cdevsw = {
98 .d_open = iopopen,
99 .d_close = iopclose,
100 .d_read = noread,
101 .d_write = nowrite,
102 .d_ioctl = iopioctl,
103 .d_stop = nostop,
104 .d_tty = notty,
105 .d_poll = nopoll,
106 .d_mmap = nommap,
107 .d_kqfilter = nokqfilter,
108 .d_discard = nodiscard,
109 .d_flag = D_OTHER,
110 };
111
112 #define IC_CONFIGURE 0x01
113 #define IC_PRIORITY 0x02
114
115 static struct iop_class {
116 u_short ic_class;
117 u_short ic_flags;
118 const char *ic_caption;
119 } const iop_class[] = {
120 {
121 I2O_CLASS_EXECUTIVE,
122 0,
123 "executive"
124 },
125 {
126 I2O_CLASS_DDM,
127 0,
128 "device driver module"
129 },
130 {
131 I2O_CLASS_RANDOM_BLOCK_STORAGE,
132 IC_CONFIGURE | IC_PRIORITY,
133 "random block storage"
134 },
135 {
136 I2O_CLASS_SEQUENTIAL_STORAGE,
137 IC_CONFIGURE | IC_PRIORITY,
138 "sequential storage"
139 },
140 {
141 I2O_CLASS_LAN,
142 IC_CONFIGURE | IC_PRIORITY,
143 "LAN port"
144 },
145 {
146 I2O_CLASS_WAN,
147 IC_CONFIGURE | IC_PRIORITY,
148 "WAN port"
149 },
150 {
151 I2O_CLASS_FIBRE_CHANNEL_PORT,
152 IC_CONFIGURE,
153 "fibrechannel port"
154 },
155 {
156 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
157 0,
158 "fibrechannel peripheral"
159 },
160 {
161 I2O_CLASS_SCSI_PERIPHERAL,
162 0,
163 "SCSI peripheral"
164 },
165 {
166 I2O_CLASS_ATE_PORT,
167 IC_CONFIGURE,
168 "ATE port"
169 },
170 {
171 I2O_CLASS_ATE_PERIPHERAL,
172 0,
173 "ATE peripheral"
174 },
175 {
176 I2O_CLASS_FLOPPY_CONTROLLER,
177 IC_CONFIGURE,
178 "floppy controller"
179 },
180 {
181 I2O_CLASS_FLOPPY_DEVICE,
182 0,
183 "floppy device"
184 },
185 {
186 I2O_CLASS_BUS_ADAPTER_PORT,
187 IC_CONFIGURE,
188 "bus adapter port"
189 },
190 };
191
192 #ifdef I2ODEBUG
193 static const char * const iop_status[] = {
194 "success",
195 "abort (dirty)",
196 "abort (no data transfer)",
197 "abort (partial transfer)",
198 "error (dirty)",
199 "error (no data transfer)",
200 "error (partial transfer)",
201 "undefined error code",
202 "process abort (dirty)",
203 "process abort (no data transfer)",
204 "process abort (partial transfer)",
205 "transaction error",
206 };
207 #endif
208
209 static inline u_int32_t iop_inl(struct iop_softc *, int);
210 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
211
212 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
213 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
214
215 static void iop_config_interrupts(device_t);
216 static void iop_configure_devices(struct iop_softc *, int, int);
217 static void iop_devinfo(int, char *, size_t);
218 static int iop_print(void *, const char *);
219 static void iop_shutdown(void *);
220
221 static void iop_adjqparam(struct iop_softc *, int);
222 static int iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int iop_hrt_get(struct iop_softc *);
224 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void iop_intr_event(device_t, struct iop_msg *, void *);
226 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 u_int32_t);
228 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int iop_ofifo_init(struct iop_softc *);
231 static int iop_passthrough(struct iop_softc *, struct ioppt *,
232 struct proc *);
233 static void iop_reconf_thread(void *);
234 static void iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int iop_reset(struct iop_softc *);
236 static int iop_sys_enable(struct iop_softc *);
237 static int iop_systab_set(struct iop_softc *);
238 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
239
240 #ifdef I2ODEBUG
241 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
242 #endif
243
244 static inline u_int32_t
245 iop_inl(struct iop_softc *sc, int off)
246 {
247
248 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
249 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
250 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
251 }
252
253 static inline void
254 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
255 {
256
257 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
258 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
259 BUS_SPACE_BARRIER_WRITE);
260 }
261
262 static inline u_int32_t
263 iop_inl_msg(struct iop_softc *sc, int off)
264 {
265
266 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
267 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
268 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
269 }
270
271 static inline void
272 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
273 {
274
275 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
276 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
277 BUS_SPACE_BARRIER_WRITE);
278 }
279
280 /*
281 * Initialise the IOP and our interface.
282 */
283 void
284 iop_init(struct iop_softc *sc, const char *intrstr)
285 {
286 struct iop_msg *im;
287 int rv, i, j, state, nsegs;
288 u_int32_t mask;
289 char ident[64];
290
291 state = 0;
292
293 printf("I2O adapter");
294
295 mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
296 mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
297 cv_init(&sc->sc_confcv, "iopconf");
298
299 if (iop_ictxhashtbl == NULL) {
300 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
301 true, &iop_ictxhash);
302 }
303
304 /* Disable interrupts at the IOP. */
305 mask = iop_inl(sc, IOP_REG_INTR_MASK);
306 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
307
308 /* Allocate a scratch DMA map for small miscellaneous shared data. */
309 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
310 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
311 aprint_error_dev(sc->sc_dev, "cannot create scratch dmamap\n");
312 return;
313 }
314
315 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
316 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
317 aprint_error_dev(sc->sc_dev, "cannot alloc scratch dmamem\n");
318 goto bail_out;
319 }
320 state++;
321
322 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
323 &sc->sc_scr, 0)) {
324 aprint_error_dev(sc->sc_dev, "cannot map scratch dmamem\n");
325 goto bail_out;
326 }
327 state++;
328
329 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
330 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
331 aprint_error_dev(sc->sc_dev, "cannot load scratch dmamap\n");
332 goto bail_out;
333 }
334 state++;
335
336 #ifdef I2ODEBUG
337 /* So that our debug checks don't choke. */
338 sc->sc_framesize = 128;
339 #endif
340
341 /* Avoid syncing the reply map until it's set up. */
342 sc->sc_curib = 0x123;
343
344 /* Reset the adapter and request status. */
345 if ((rv = iop_reset(sc)) != 0) {
346 aprint_error_dev(sc->sc_dev, "not responding (reset)\n");
347 goto bail_out;
348 }
349
350 if ((rv = iop_status_get(sc, 1)) != 0) {
351 aprint_error_dev(sc->sc_dev, "not responding (get status)\n");
352 goto bail_out;
353 }
354
355 sc->sc_flags |= IOP_HAVESTATUS;
356 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
357 ident, sizeof(ident));
358 printf(" <%s>\n", ident);
359
360 #ifdef I2ODEBUG
361 printf("%s: orgid=0x%04x version=%d\n",
362 device_xname(sc->sc_dev),
363 le16toh(sc->sc_status.orgid),
364 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
365 printf("%s: type want have cbase\n", device_xname(sc->sc_dev));
366 printf("%s: mem %04x %04x %08x\n", device_xname(sc->sc_dev),
367 le32toh(sc->sc_status.desiredprivmemsize),
368 le32toh(sc->sc_status.currentprivmemsize),
369 le32toh(sc->sc_status.currentprivmembase));
370 printf("%s: i/o %04x %04x %08x\n", device_xname(sc->sc_dev),
371 le32toh(sc->sc_status.desiredpriviosize),
372 le32toh(sc->sc_status.currentpriviosize),
373 le32toh(sc->sc_status.currentpriviobase));
374 #endif
375
376 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
377 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
378 sc->sc_maxob = IOP_MAX_OUTBOUND;
379 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
380 if (sc->sc_maxib > IOP_MAX_INBOUND)
381 sc->sc_maxib = IOP_MAX_INBOUND;
382 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
383 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
384 sc->sc_framesize = IOP_MAX_MSG_SIZE;
385
386 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
387 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
388 aprint_error_dev(sc->sc_dev, "frame size too small (%d)\n",
389 sc->sc_framesize);
390 goto bail_out;
391 }
392 #endif
393
394 /* Allocate message wrappers. */
395 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_WAITOK|M_ZERO);
396 state++;
397 sc->sc_ims = im;
398 SLIST_INIT(&sc->sc_im_freelist);
399
400 for (i = 0; i < sc->sc_maxib; i++, im++) {
401 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
402 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
403 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
404 &im->im_xfer[0].ix_map);
405 if (rv != 0) {
406 aprint_error_dev(sc->sc_dev, "couldn't create dmamap (%d)", rv);
407 goto bail_out3;
408 }
409
410 im->im_tctx = i;
411 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
412 cv_init(&im->im_cv, "iopmsg");
413 }
414
415 /* Initialise the IOP's outbound FIFO. */
416 if (iop_ofifo_init(sc) != 0) {
417 aprint_error_dev(sc->sc_dev, "unable to init oubound FIFO\n");
418 goto bail_out3;
419 }
420
421 /*
422 * Defer further configuration until (a) interrupts are working and
423 * (b) we have enough information to build the system table.
424 */
425 config_interrupts(sc->sc_dev, iop_config_interrupts);
426
427 /* Configure shutdown hook before we start any device activity. */
428 if (iop_sdh == NULL)
429 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
430
431 /* Ensure interrupts are enabled at the IOP. */
432 mask = iop_inl(sc, IOP_REG_INTR_MASK);
433 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
434
435 if (intrstr != NULL)
436 printf("%s: interrupting at %s\n", device_xname(sc->sc_dev),
437 intrstr);
438
439 #ifdef I2ODEBUG
440 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
441 device_xname(sc->sc_dev), sc->sc_maxib,
442 le32toh(sc->sc_status.maxinboundmframes),
443 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
444 #endif
445
446 return;
447
448 bail_out3:
449 if (state > 3) {
450 for (j = 0; j < i; j++)
451 bus_dmamap_destroy(sc->sc_dmat,
452 sc->sc_ims[j].im_xfer[0].ix_map);
453 free(sc->sc_ims, M_DEVBUF);
454 }
455 bail_out:
456 if (state > 2)
457 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
458 if (state > 1)
459 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
460 if (state > 0)
461 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
462 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
463 }
464
465 /*
466 * Perform autoconfiguration tasks.
467 */
468 static void
469 iop_config_interrupts(device_t self)
470 {
471 struct iop_attach_args ia;
472 struct iop_softc *sc, *iop;
473 struct i2o_systab_entry *ste;
474 int rv, i, niop;
475 int locs[IOPCF_NLOCS];
476
477 sc = device_private(self);
478 mutex_enter(&sc->sc_conflock);
479
480 LIST_INIT(&sc->sc_iilist);
481
482 printf("%s: configuring...\n", device_xname(sc->sc_dev));
483
484 if (iop_hrt_get(sc) != 0) {
485 printf("%s: unable to retrieve HRT\n", device_xname(sc->sc_dev));
486 mutex_exit(&sc->sc_conflock);
487 return;
488 }
489
490 /*
491 * Build the system table.
492 */
493 if (iop_systab == NULL) {
494 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
495 if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
496 continue;
497 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
498 continue;
499 if (iop_status_get(iop, 1) != 0) {
500 aprint_error_dev(sc->sc_dev, "unable to retrieve status\n");
501 iop->sc_flags &= ~IOP_HAVESTATUS;
502 continue;
503 }
504 niop++;
505 }
506 if (niop == 0) {
507 mutex_exit(&sc->sc_conflock);
508 return;
509 }
510
511 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
512 sizeof(struct i2o_systab);
513 iop_systab_size = i;
514 iop_systab = malloc(i, M_DEVBUF, M_WAITOK|M_ZERO);
515 iop_systab->numentries = niop;
516 iop_systab->version = I2O_VERSION_11;
517
518 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
519 if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
520 continue;
521 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
522 continue;
523
524 ste->orgid = iop->sc_status.orgid;
525 ste->iopid = device_unit(iop->sc_dev) + 2;
526 ste->segnumber =
527 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
528 ste->iopcaps = iop->sc_status.iopcaps;
529 ste->inboundmsgframesize =
530 iop->sc_status.inboundmframesize;
531 ste->inboundmsgportaddresslow =
532 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
533 ste++;
534 }
535 }
536
537 /*
538 * Post the system table to the IOP and bring it to the OPERATIONAL
539 * state.
540 */
541 if (iop_systab_set(sc) != 0) {
542 aprint_error_dev(sc->sc_dev, "unable to set system table\n");
543 mutex_exit(&sc->sc_conflock);
544 return;
545 }
546 if (iop_sys_enable(sc) != 0) {
547 aprint_error_dev(sc->sc_dev, "unable to enable system\n");
548 mutex_exit(&sc->sc_conflock);
549 return;
550 }
551
552 /*
553 * Set up an event handler for this IOP.
554 */
555 sc->sc_eventii.ii_dv = self;
556 sc->sc_eventii.ii_intr = iop_intr_event;
557 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
558 sc->sc_eventii.ii_tid = I2O_TID_IOP;
559 iop_initiator_register(sc, &sc->sc_eventii);
560
561 rv = iop_util_eventreg(sc, &sc->sc_eventii,
562 I2O_EVENT_EXEC_RESOURCE_LIMITS |
563 I2O_EVENT_EXEC_CONNECTION_FAIL |
564 I2O_EVENT_EXEC_ADAPTER_FAULT |
565 I2O_EVENT_EXEC_POWER_FAIL |
566 I2O_EVENT_EXEC_RESET_PENDING |
567 I2O_EVENT_EXEC_RESET_IMMINENT |
568 I2O_EVENT_EXEC_HARDWARE_FAIL |
569 I2O_EVENT_EXEC_XCT_CHANGE |
570 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
571 I2O_EVENT_GEN_DEVICE_RESET |
572 I2O_EVENT_GEN_STATE_CHANGE |
573 I2O_EVENT_GEN_GENERAL_WARNING);
574 if (rv != 0) {
575 aprint_error_dev(sc->sc_dev, "unable to register for events");
576 mutex_exit(&sc->sc_conflock);
577 return;
578 }
579
580 /*
581 * Attempt to match and attach a product-specific extension.
582 */
583 ia.ia_class = I2O_CLASS_ANY;
584 ia.ia_tid = I2O_TID_IOP;
585 locs[IOPCF_TID] = I2O_TID_IOP;
586 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
587 config_stdsubmatch);
588
589 /*
590 * Start device configuration.
591 */
592 if ((rv = iop_reconfigure(sc, 0)) == -1)
593 aprint_error_dev(sc->sc_dev, "configure failed (%d)\n", rv);
594
595
596 sc->sc_flags |= IOP_ONLINE;
597 rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
598 &sc->sc_reconf_thread, "%s", device_xname(sc->sc_dev));
599 mutex_exit(&sc->sc_conflock);
600 if (rv != 0) {
601 aprint_error_dev(sc->sc_dev, "unable to create reconfiguration thread (%d)", rv);
602 return;
603 }
604 }
605
606 /*
607 * Reconfiguration thread; listens for LCT change notification, and
608 * initiates re-configuration if received.
609 */
610 static void
611 iop_reconf_thread(void *cookie)
612 {
613 struct iop_softc *sc;
614 struct i2o_lct lct;
615 u_int32_t chgind;
616 int rv;
617
618 sc = cookie;
619 chgind = sc->sc_chgind + 1;
620
621 for (;;) {
622 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
623 device_xname(sc->sc_dev), chgind));
624
625 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
626
627 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
628 device_xname(sc->sc_dev), le32toh(lct.changeindicator), rv));
629
630 mutex_enter(&sc->sc_conflock);
631 if (rv == 0) {
632 iop_reconfigure(sc, le32toh(lct.changeindicator));
633 chgind = sc->sc_chgind + 1;
634 }
635 (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
636 mutex_exit(&sc->sc_conflock);
637 }
638 }
639
640 /*
641 * Reconfigure: find new and removed devices.
642 */
643 int
644 iop_reconfigure(struct iop_softc *sc, u_int chgind)
645 {
646 struct iop_msg *im;
647 struct i2o_hba_bus_scan mf;
648 struct i2o_lct_entry *le;
649 struct iop_initiator *ii, *nextii;
650 int rv, tid, i;
651
652 KASSERT(mutex_owned(&sc->sc_conflock));
653
654 /*
655 * If the reconfiguration request isn't the result of LCT change
656 * notification, then be more thorough: ask all bus ports to scan
657 * their busses. Wait up to 5 minutes for each bus port to complete
658 * the request.
659 */
660 if (chgind == 0) {
661 if ((rv = iop_lct_get(sc)) != 0) {
662 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
663 return (rv);
664 }
665
666 le = sc->sc_lct->entry;
667 for (i = 0; i < sc->sc_nlctent; i++, le++) {
668 if ((le16toh(le->classid) & 4095) !=
669 I2O_CLASS_BUS_ADAPTER_PORT)
670 continue;
671 tid = le16toh(le->localtid) & 4095;
672
673 im = iop_msg_alloc(sc, IM_WAIT);
674
675 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
676 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
677 mf.msgictx = IOP_ICTX;
678 mf.msgtctx = im->im_tctx;
679
680 DPRINTF(("%s: scanning bus %d\n", device_xname(sc->sc_dev),
681 tid));
682
683 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
684 iop_msg_free(sc, im);
685 #ifdef I2ODEBUG
686 if (rv != 0)
687 aprint_error_dev(sc->sc_dev, "bus scan failed\n");
688 #endif
689 }
690 } else if (chgind <= sc->sc_chgind) {
691 DPRINTF(("%s: LCT unchanged (async)\n", device_xname(sc->sc_dev)));
692 return (0);
693 }
694
695 /* Re-read the LCT and determine if it has changed. */
696 if ((rv = iop_lct_get(sc)) != 0) {
697 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
698 return (rv);
699 }
700 DPRINTF(("%s: %d LCT entries\n", device_xname(sc->sc_dev), sc->sc_nlctent));
701
702 chgind = le32toh(sc->sc_lct->changeindicator);
703 if (chgind == sc->sc_chgind) {
704 DPRINTF(("%s: LCT unchanged\n", device_xname(sc->sc_dev)));
705 return (0);
706 }
707 DPRINTF(("%s: LCT changed\n", device_xname(sc->sc_dev)));
708 sc->sc_chgind = chgind;
709
710 if (sc->sc_tidmap != NULL)
711 free(sc->sc_tidmap, M_DEVBUF);
712 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
713 M_DEVBUF, M_WAITOK|M_ZERO);
714
715 /* Allow 1 queued command per device while we're configuring. */
716 iop_adjqparam(sc, 1);
717
718 /*
719 * Match and attach child devices. We configure high-level devices
720 * first so that any claims will propagate throughout the LCT,
721 * hopefully masking off aliased devices as a result.
722 *
723 * Re-reading the LCT at this point is a little dangerous, but we'll
724 * trust the IOP (and the operator) to behave itself...
725 */
726 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
727 IC_CONFIGURE | IC_PRIORITY);
728 if ((rv = iop_lct_get(sc)) != 0) {
729 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
730 }
731 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
732 IC_CONFIGURE);
733
734 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
735 nextii = LIST_NEXT(ii, ii_list);
736
737 /* Detach devices that were configured, but are now gone. */
738 for (i = 0; i < sc->sc_nlctent; i++)
739 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
740 break;
741 if (i == sc->sc_nlctent ||
742 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
743 config_detach(ii->ii_dv, DETACH_FORCE);
744 continue;
745 }
746
747 /*
748 * Tell initiators that existed before the re-configuration
749 * to re-configure.
750 */
751 if (ii->ii_reconfig == NULL)
752 continue;
753 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
754 aprint_error_dev(sc->sc_dev, "%s failed reconfigure (%d)\n",
755 device_xname(ii->ii_dv), rv);
756 }
757
758 /* Re-adjust queue parameters and return. */
759 if (sc->sc_nii != 0)
760 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
761 / sc->sc_nii);
762
763 return (0);
764 }
765
766 /*
767 * Configure I2O devices into the system.
768 */
769 static void
770 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
771 {
772 struct iop_attach_args ia;
773 struct iop_initiator *ii;
774 const struct i2o_lct_entry *le;
775 device_t dv;
776 int i, j, nent;
777 u_int usertid;
778 int locs[IOPCF_NLOCS];
779
780 nent = sc->sc_nlctent;
781 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
782 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
783
784 /* Ignore the device if it's in use. */
785 usertid = le32toh(le->usertid) & 4095;
786 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
787 continue;
788
789 ia.ia_class = le16toh(le->classid) & 4095;
790 ia.ia_tid = sc->sc_tidmap[i].it_tid;
791
792 /* Ignore uninteresting devices. */
793 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
794 if (iop_class[j].ic_class == ia.ia_class)
795 break;
796 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
797 (iop_class[j].ic_flags & mask) != maskval)
798 continue;
799
800 /*
801 * Try to configure the device only if it's not already
802 * configured.
803 */
804 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
805 if (ia.ia_tid == ii->ii_tid) {
806 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
807 strcpy(sc->sc_tidmap[i].it_dvname,
808 device_xname(ii->ii_dv));
809 break;
810 }
811 }
812 if (ii != NULL)
813 continue;
814
815 locs[IOPCF_TID] = ia.ia_tid;
816
817 dv = config_found_sm_loc(sc->sc_dev, "iop", locs, &ia,
818 iop_print, config_stdsubmatch);
819 if (dv != NULL) {
820 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
821 strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
822 }
823 }
824 }
825
826 /*
827 * Adjust queue parameters for all child devices.
828 */
829 static void
830 iop_adjqparam(struct iop_softc *sc, int mpi)
831 {
832 struct iop_initiator *ii;
833
834 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
835 if (ii->ii_adjqparam != NULL)
836 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
837 }
838
839 static void
840 iop_devinfo(int class, char *devinfo, size_t l)
841 {
842 int i;
843
844 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
845 if (class == iop_class[i].ic_class)
846 break;
847
848 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
849 snprintf(devinfo, l, "device (class 0x%x)", class);
850 else
851 strlcpy(devinfo, iop_class[i].ic_caption, l);
852 }
853
854 static int
855 iop_print(void *aux, const char *pnp)
856 {
857 struct iop_attach_args *ia;
858 char devinfo[256];
859
860 ia = aux;
861
862 if (pnp != NULL) {
863 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
864 aprint_normal("%s at %s", devinfo, pnp);
865 }
866 aprint_normal(" tid %d", ia->ia_tid);
867 return (UNCONF);
868 }
869
870 /*
871 * Shut down all configured IOPs.
872 */
873 static void
874 iop_shutdown(void *junk)
875 {
876 struct iop_softc *sc;
877 int i;
878
879 printf("shutting down iop devices...");
880
881 for (i = 0; i < iop_cd.cd_ndevs; i++) {
882 if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
883 continue;
884 if ((sc->sc_flags & IOP_ONLINE) == 0)
885 continue;
886
887 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
888 0, 5000);
889
890 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
891 /*
892 * Some AMI firmware revisions will go to sleep and
893 * never come back after this.
894 */
895 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
896 IOP_ICTX, 0, 1000);
897 }
898 }
899
900 /* Wait. Some boards could still be flushing, stupidly enough. */
901 delay(5000*1000);
902 printf(" done\n");
903 }
904
905 /*
906 * Retrieve IOP status.
907 */
908 int
909 iop_status_get(struct iop_softc *sc, int nosleep)
910 {
911 struct i2o_exec_status_get mf;
912 struct i2o_status *st;
913 paddr_t pa;
914 int rv, i;
915
916 pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
917 st = (struct i2o_status *)sc->sc_scr;
918
919 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
920 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
921 mf.reserved[0] = 0;
922 mf.reserved[1] = 0;
923 mf.reserved[2] = 0;
924 mf.reserved[3] = 0;
925 mf.addrlow = (u_int32_t)pa;
926 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
927 mf.length = sizeof(sc->sc_status);
928
929 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
930 BUS_DMASYNC_PREWRITE);
931 memset(st, 0, sizeof(*st));
932 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
933 BUS_DMASYNC_POSTWRITE);
934
935 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
936 return (rv);
937
938 for (i = 100; i != 0; i--) {
939 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
940 sizeof(*st), BUS_DMASYNC_POSTREAD);
941 if (st->syncbyte == 0xff)
942 break;
943 if (nosleep)
944 DELAY(100*1000);
945 else
946 kpause("iopstat", false, hz / 10, NULL);
947 }
948
949 if (st->syncbyte != 0xff) {
950 aprint_error_dev(sc->sc_dev, "STATUS_GET timed out\n");
951 rv = EIO;
952 } else {
953 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
954 rv = 0;
955 }
956
957 return (rv);
958 }
959
960 /*
961 * Initialize and populate the IOP's outbound FIFO.
962 */
963 static int
964 iop_ofifo_init(struct iop_softc *sc)
965 {
966 bus_addr_t addr;
967 bus_dma_segment_t seg;
968 struct i2o_exec_outbound_init *mf;
969 int i, rseg, rv;
970 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
971
972 sw = (u_int32_t *)sc->sc_scr;
973
974 mf = (struct i2o_exec_outbound_init *)mb;
975 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
976 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
977 mf->msgictx = IOP_ICTX;
978 mf->msgtctx = 0;
979 mf->pagesize = PAGE_SIZE;
980 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
981
982 /*
983 * The I2O spec says that there are two SGLs: one for the status
984 * word, and one for a list of discarded MFAs. It continues to say
985 * that if you don't want to get the list of MFAs, an IGNORE SGL is
986 * necessary; this isn't the case (and is in fact a bad thing).
987 */
988 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
989 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
990 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
991 (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
992 mb[0] += 2 << 16;
993
994 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
995 BUS_DMASYNC_POSTWRITE);
996 *sw = 0;
997 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
998 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
999
1000 if ((rv = iop_post(sc, mb)) != 0)
1001 return (rv);
1002
1003 POLL(5000,
1004 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1005 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD),
1006 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1007
1008 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1009 aprint_error_dev(sc->sc_dev, "outbound FIFO init failed (%d)\n",
1010 le32toh(*sw));
1011 return (EIO);
1012 }
1013
1014 /* Allocate DMA safe memory for the reply frames. */
1015 if (sc->sc_rep_phys == 0) {
1016 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1017
1018 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1019 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1020 if (rv != 0) {
1021 aprint_error_dev(sc->sc_dev, "DMA alloc = %d\n",
1022 rv);
1023 return (rv);
1024 }
1025
1026 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1027 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1028 if (rv != 0) {
1029 aprint_error_dev(sc->sc_dev, "DMA map = %d\n", rv);
1030 return (rv);
1031 }
1032
1033 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1034 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1035 if (rv != 0) {
1036 aprint_error_dev(sc->sc_dev, "DMA create = %d\n", rv);
1037 return (rv);
1038 }
1039
1040 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1041 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1042 if (rv != 0) {
1043 aprint_error_dev(sc->sc_dev, "DMA load = %d\n", rv);
1044 return (rv);
1045 }
1046
1047 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1048
1049 /* Now safe to sync the reply map. */
1050 sc->sc_curib = 0;
1051 }
1052
1053 /* Populate the outbound FIFO. */
1054 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1055 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1056 addr += sc->sc_framesize;
1057 }
1058
1059 return (0);
1060 }
1061
1062 /*
1063 * Read the specified number of bytes from the IOP's hardware resource table.
1064 */
1065 static int
1066 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1067 {
1068 struct iop_msg *im;
1069 int rv;
1070 struct i2o_exec_hrt_get *mf;
1071 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1072
1073 im = iop_msg_alloc(sc, IM_WAIT);
1074 mf = (struct i2o_exec_hrt_get *)mb;
1075 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1076 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1077 mf->msgictx = IOP_ICTX;
1078 mf->msgtctx = im->im_tctx;
1079
1080 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1081 rv = iop_msg_post(sc, im, mb, 30000);
1082 iop_msg_unmap(sc, im);
1083 iop_msg_free(sc, im);
1084 return (rv);
1085 }
1086
1087 /*
1088 * Read the IOP's hardware resource table.
1089 */
1090 static int
1091 iop_hrt_get(struct iop_softc *sc)
1092 {
1093 struct i2o_hrt hrthdr, *hrt;
1094 int size, rv;
1095
1096 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1097 if (rv != 0)
1098 return (rv);
1099
1100 DPRINTF(("%s: %d hrt entries\n", device_xname(sc->sc_dev),
1101 le16toh(hrthdr.numentries)));
1102
1103 size = sizeof(struct i2o_hrt) +
1104 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1105 hrt = malloc(size, M_DEVBUF, M_WAITOK);
1106 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1107 free(hrt, M_DEVBUF);
1108 return (rv);
1109 }
1110
1111 if (sc->sc_hrt != NULL)
1112 free(sc->sc_hrt, M_DEVBUF);
1113 sc->sc_hrt = hrt;
1114 return (0);
1115 }
1116
1117 /*
1118 * Request the specified number of bytes from the IOP's logical
1119 * configuration table. If a change indicator is specified, this
1120 * is a verbatim notification request, so the caller is prepared
1121 * to wait indefinitely.
1122 */
1123 static int
1124 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1125 u_int32_t chgind)
1126 {
1127 struct iop_msg *im;
1128 struct i2o_exec_lct_notify *mf;
1129 int rv;
1130 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1131
1132 im = iop_msg_alloc(sc, IM_WAIT);
1133 memset(lct, 0, size);
1134
1135 mf = (struct i2o_exec_lct_notify *)mb;
1136 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1137 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1138 mf->msgictx = IOP_ICTX;
1139 mf->msgtctx = im->im_tctx;
1140 mf->classid = I2O_CLASS_ANY;
1141 mf->changeindicator = chgind;
1142
1143 #ifdef I2ODEBUG
1144 printf("iop_lct_get0: reading LCT");
1145 if (chgind != 0)
1146 printf(" (async)");
1147 printf("\n");
1148 #endif
1149
1150 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1151 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1152 iop_msg_unmap(sc, im);
1153 iop_msg_free(sc, im);
1154 return (rv);
1155 }
1156
1157 /*
1158 * Read the IOP's logical configuration table.
1159 */
1160 int
1161 iop_lct_get(struct iop_softc *sc)
1162 {
1163 int esize, size, rv;
1164 struct i2o_lct *lct;
1165
1166 esize = le32toh(sc->sc_status.expectedlctsize);
1167 lct = malloc(esize, M_DEVBUF, M_WAITOK);
1168 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1169 free(lct, M_DEVBUF);
1170 return (rv);
1171 }
1172
1173 size = le16toh(lct->tablesize) << 2;
1174 if (esize != size) {
1175 free(lct, M_DEVBUF);
1176 lct = malloc(size, M_DEVBUF, M_WAITOK);
1177 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1178 free(lct, M_DEVBUF);
1179 return (rv);
1180 }
1181 }
1182
1183 /* Swap in the new LCT. */
1184 if (sc->sc_lct != NULL)
1185 free(sc->sc_lct, M_DEVBUF);
1186 sc->sc_lct = lct;
1187 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1188 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1189 sizeof(struct i2o_lct_entry);
1190 return (0);
1191 }
1192
1193 /*
1194 * Post a SYS_ENABLE message to the adapter.
1195 */
1196 int
1197 iop_sys_enable(struct iop_softc *sc)
1198 {
1199 struct iop_msg *im;
1200 struct i2o_msg mf;
1201 int rv;
1202
1203 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1204
1205 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1206 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1207 mf.msgictx = IOP_ICTX;
1208 mf.msgtctx = im->im_tctx;
1209
1210 rv = iop_msg_post(sc, im, &mf, 30000);
1211 if (rv == 0) {
1212 if ((im->im_flags & IM_FAIL) != 0)
1213 rv = ENXIO;
1214 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1215 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1216 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1217 rv = 0;
1218 else
1219 rv = EIO;
1220 }
1221
1222 iop_msg_free(sc, im);
1223 return (rv);
1224 }
1225
1226 /*
1227 * Request the specified parameter group from the target. If an initiator
1228 * is specified (a) don't wait for the operation to complete, but instead
1229 * let the initiator's interrupt handler deal with the reply and (b) place a
1230 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1231 */
1232 int
1233 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1234 int size, struct iop_initiator *ii)
1235 {
1236 struct iop_msg *im;
1237 struct i2o_util_params_op *mf;
1238 int rv;
1239 struct iop_pgop *pgop;
1240 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1241
1242 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1243 pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK);
1244 im->im_dvcontext = pgop;
1245
1246 mf = (struct i2o_util_params_op *)mb;
1247 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1248 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1249 mf->msgictx = IOP_ICTX;
1250 mf->msgtctx = im->im_tctx;
1251 mf->flags = 0;
1252
1253 pgop->olh.count = htole16(1);
1254 pgop->olh.reserved = htole16(0);
1255 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1256 pgop->oat.fieldcount = htole16(0xffff);
1257 pgop->oat.group = htole16(group);
1258
1259 memset(buf, 0, size);
1260 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1261 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1262 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1263
1264 /* Detect errors; let partial transfers to count as success. */
1265 if (ii == NULL && rv == 0) {
1266 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1267 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1268 rv = 0;
1269 else
1270 rv = (im->im_reqstatus != 0 ? EIO : 0);
1271
1272 if (rv != 0)
1273 printf("%s: FIELD_GET failed for tid %d group %d\n",
1274 device_xname(sc->sc_dev), tid, group);
1275 }
1276
1277 if (ii == NULL || rv != 0) {
1278 iop_msg_unmap(sc, im);
1279 iop_msg_free(sc, im);
1280 free(pgop, M_DEVBUF);
1281 }
1282
1283 return (rv);
1284 }
1285
1286 /*
1287 * Set a single field in a scalar parameter group.
1288 */
1289 int
1290 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1291 int size, int field)
1292 {
1293 struct iop_msg *im;
1294 struct i2o_util_params_op *mf;
1295 struct iop_pgop *pgop;
1296 int rv, totsize;
1297 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1298
1299 totsize = sizeof(*pgop) + size;
1300
1301 im = iop_msg_alloc(sc, IM_WAIT);
1302 pgop = malloc(totsize, M_DEVBUF, M_WAITOK);
1303 mf = (struct i2o_util_params_op *)mb;
1304 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1305 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1306 mf->msgictx = IOP_ICTX;
1307 mf->msgtctx = im->im_tctx;
1308 mf->flags = 0;
1309
1310 pgop->olh.count = htole16(1);
1311 pgop->olh.reserved = htole16(0);
1312 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1313 pgop->oat.fieldcount = htole16(1);
1314 pgop->oat.group = htole16(group);
1315 pgop->oat.fields[0] = htole16(field);
1316 memcpy(pgop + 1, buf, size);
1317
1318 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1319 rv = iop_msg_post(sc, im, mb, 30000);
1320 if (rv != 0)
1321 aprint_error_dev(sc->sc_dev, "FIELD_SET failed for tid %d group %d\n",
1322 tid, group);
1323
1324 iop_msg_unmap(sc, im);
1325 iop_msg_free(sc, im);
1326 free(pgop, M_DEVBUF);
1327 return (rv);
1328 }
1329
1330 /*
1331 * Delete all rows in a tablular parameter group.
1332 */
1333 int
1334 iop_table_clear(struct iop_softc *sc, int tid, int group)
1335 {
1336 struct iop_msg *im;
1337 struct i2o_util_params_op *mf;
1338 struct iop_pgop pgop;
1339 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1340 int rv;
1341
1342 im = iop_msg_alloc(sc, IM_WAIT);
1343
1344 mf = (struct i2o_util_params_op *)mb;
1345 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1346 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1347 mf->msgictx = IOP_ICTX;
1348 mf->msgtctx = im->im_tctx;
1349 mf->flags = 0;
1350
1351 pgop.olh.count = htole16(1);
1352 pgop.olh.reserved = htole16(0);
1353 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1354 pgop.oat.fieldcount = htole16(0);
1355 pgop.oat.group = htole16(group);
1356 pgop.oat.fields[0] = htole16(0);
1357
1358 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1359 rv = iop_msg_post(sc, im, mb, 30000);
1360 if (rv != 0)
1361 aprint_error_dev(sc->sc_dev, "TABLE_CLEAR failed for tid %d group %d\n",
1362 tid, group);
1363
1364 iop_msg_unmap(sc, im);
1365 iop_msg_free(sc, im);
1366 return (rv);
1367 }
1368
1369 /*
1370 * Add a single row to a tabular parameter group. The row can have only one
1371 * field.
1372 */
1373 int
1374 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1375 int size, int row)
1376 {
1377 struct iop_msg *im;
1378 struct i2o_util_params_op *mf;
1379 struct iop_pgop *pgop;
1380 int rv, totsize;
1381 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1382
1383 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1384
1385 im = iop_msg_alloc(sc, IM_WAIT);
1386 pgop = malloc(totsize, M_DEVBUF, M_WAITOK);
1387 mf = (struct i2o_util_params_op *)mb;
1388 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1389 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1390 mf->msgictx = IOP_ICTX;
1391 mf->msgtctx = im->im_tctx;
1392 mf->flags = 0;
1393
1394 pgop->olh.count = htole16(1);
1395 pgop->olh.reserved = htole16(0);
1396 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1397 pgop->oat.fieldcount = htole16(1);
1398 pgop->oat.group = htole16(group);
1399 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1400 pgop->oat.fields[1] = htole16(1); /* RowCount */
1401 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1402 memcpy(&pgop->oat.fields[3], buf, size);
1403
1404 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1405 rv = iop_msg_post(sc, im, mb, 30000);
1406 if (rv != 0)
1407 aprint_error_dev(sc->sc_dev, "ADD_ROW failed for tid %d group %d row %d\n",
1408 tid, group, row);
1409
1410 iop_msg_unmap(sc, im);
1411 iop_msg_free(sc, im);
1412 free(pgop, M_DEVBUF);
1413 return (rv);
1414 }
1415
1416 /*
1417 * Execute a simple command (no parameters).
1418 */
1419 int
1420 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1421 int async, int timo)
1422 {
1423 struct iop_msg *im;
1424 struct i2o_msg mf;
1425 int rv, fl;
1426
1427 fl = (async != 0 ? IM_WAIT : IM_POLL);
1428 im = iop_msg_alloc(sc, fl);
1429
1430 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1431 mf.msgfunc = I2O_MSGFUNC(tid, function);
1432 mf.msgictx = ictx;
1433 mf.msgtctx = im->im_tctx;
1434
1435 rv = iop_msg_post(sc, im, &mf, timo);
1436 iop_msg_free(sc, im);
1437 return (rv);
1438 }
1439
1440 /*
1441 * Post the system table to the IOP.
1442 */
1443 static int
1444 iop_systab_set(struct iop_softc *sc)
1445 {
1446 struct i2o_exec_sys_tab_set *mf;
1447 struct iop_msg *im;
1448 bus_space_handle_t bsh;
1449 bus_addr_t boo;
1450 u_int32_t mema[2], ioa[2];
1451 int rv;
1452 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1453
1454 im = iop_msg_alloc(sc, IM_WAIT);
1455
1456 mf = (struct i2o_exec_sys_tab_set *)mb;
1457 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1458 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1459 mf->msgictx = IOP_ICTX;
1460 mf->msgtctx = im->im_tctx;
1461 mf->iopid = (device_unit(sc->sc_dev) + 2) << 12;
1462 mf->segnumber = 0;
1463
1464 mema[1] = sc->sc_status.desiredprivmemsize;
1465 ioa[1] = sc->sc_status.desiredpriviosize;
1466
1467 if (mema[1] != 0) {
1468 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1469 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1470 mema[0] = htole32(boo);
1471 if (rv != 0) {
1472 aprint_error_dev(sc->sc_dev, "can't alloc priv mem space, err = %d\n", rv);
1473 mema[0] = 0;
1474 mema[1] = 0;
1475 }
1476 }
1477
1478 if (ioa[1] != 0) {
1479 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1480 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1481 ioa[0] = htole32(boo);
1482 if (rv != 0) {
1483 aprint_error_dev(sc->sc_dev, "can't alloc priv i/o space, err = %d\n", rv);
1484 ioa[0] = 0;
1485 ioa[1] = 0;
1486 }
1487 }
1488
1489 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1490 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1491 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1492 rv = iop_msg_post(sc, im, mb, 5000);
1493 iop_msg_unmap(sc, im);
1494 iop_msg_free(sc, im);
1495 return (rv);
1496 }
1497
1498 /*
1499 * Reset the IOP. Must be called with interrupts disabled.
1500 */
1501 static int
1502 iop_reset(struct iop_softc *sc)
1503 {
1504 u_int32_t mfa, *sw;
1505 struct i2o_exec_iop_reset mf;
1506 int rv;
1507 paddr_t pa;
1508
1509 sw = (u_int32_t *)sc->sc_scr;
1510 pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
1511
1512 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1513 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1514 mf.reserved[0] = 0;
1515 mf.reserved[1] = 0;
1516 mf.reserved[2] = 0;
1517 mf.reserved[3] = 0;
1518 mf.statuslow = (u_int32_t)pa;
1519 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1520
1521 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1522 BUS_DMASYNC_POSTWRITE);
1523 *sw = htole32(0);
1524 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1525 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1526
1527 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1528 return (rv);
1529
1530 POLL(2500,
1531 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1532 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD), *sw != 0));
1533 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1534 aprint_error_dev(sc->sc_dev, "reset rejected, status 0x%x\n",
1535 le32toh(*sw));
1536 return (EIO);
1537 }
1538
1539 /*
1540 * IOP is now in the INIT state. Wait no more than 10 seconds for
1541 * the inbound queue to become responsive.
1542 */
1543 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1544 if (mfa == IOP_MFA_EMPTY) {
1545 aprint_error_dev(sc->sc_dev, "reset failed\n");
1546 return (EIO);
1547 }
1548
1549 iop_release_mfa(sc, mfa);
1550 return (0);
1551 }
1552
1553 /*
1554 * Register a new initiator. Must be called with the configuration lock
1555 * held.
1556 */
1557 void
1558 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1559 {
1560 static int ictxgen;
1561
1562 /* 0 is reserved (by us) for system messages. */
1563 ii->ii_ictx = ++ictxgen;
1564
1565 /*
1566 * `Utility initiators' don't make it onto the per-IOP initiator list
1567 * (which is used only for configuration), but do get one slot on
1568 * the inbound queue.
1569 */
1570 if ((ii->ii_flags & II_UTILITY) == 0) {
1571 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1572 sc->sc_nii++;
1573 } else
1574 sc->sc_nuii++;
1575
1576 cv_init(&ii->ii_cv, "iopevt");
1577
1578 mutex_spin_enter(&sc->sc_intrlock);
1579 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1580 mutex_spin_exit(&sc->sc_intrlock);
1581 }
1582
1583 /*
1584 * Unregister an initiator. Must be called with the configuration lock
1585 * held.
1586 */
1587 void
1588 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1589 {
1590
1591 if ((ii->ii_flags & II_UTILITY) == 0) {
1592 LIST_REMOVE(ii, ii_list);
1593 sc->sc_nii--;
1594 } else
1595 sc->sc_nuii--;
1596
1597 mutex_spin_enter(&sc->sc_intrlock);
1598 LIST_REMOVE(ii, ii_hash);
1599 mutex_spin_exit(&sc->sc_intrlock);
1600
1601 cv_destroy(&ii->ii_cv);
1602 }
1603
1604 /*
1605 * Handle a reply frame from the IOP.
1606 */
1607 static int
1608 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1609 {
1610 struct iop_msg *im;
1611 struct i2o_reply *rb;
1612 struct i2o_fault_notify *fn;
1613 struct iop_initiator *ii;
1614 u_int off, ictx, tctx, status, size;
1615
1616 KASSERT(mutex_owned(&sc->sc_intrlock));
1617
1618 off = (int)(rmfa - sc->sc_rep_phys);
1619 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1620
1621 /* Perform reply queue DMA synchronisation. */
1622 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1623 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1624
1625 #ifdef I2ODEBUG
1626 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1627 panic("iop_handle_reply: 64-bit reply");
1628 #endif
1629 /*
1630 * Find the initiator.
1631 */
1632 ictx = le32toh(rb->msgictx);
1633 if (ictx == IOP_ICTX)
1634 ii = NULL;
1635 else {
1636 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1637 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1638 if (ii->ii_ictx == ictx)
1639 break;
1640 if (ii == NULL) {
1641 #ifdef I2ODEBUG
1642 iop_reply_print(sc, rb);
1643 #endif
1644 aprint_error_dev(sc->sc_dev, "WARNING: bad ictx returned (%x)\n",
1645 ictx);
1646 return (-1);
1647 }
1648 }
1649
1650 /*
1651 * If we received a transport failure notice, we've got to dig the
1652 * transaction context (if any) out of the original message frame,
1653 * and then release the original MFA back to the inbound FIFO.
1654 */
1655 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1656 status = I2O_STATUS_SUCCESS;
1657
1658 fn = (struct i2o_fault_notify *)rb;
1659 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1660 iop_release_mfa(sc, fn->lowmfa);
1661 iop_tfn_print(sc, fn);
1662 } else {
1663 status = rb->reqstatus;
1664 tctx = le32toh(rb->msgtctx);
1665 }
1666
1667 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1668 /*
1669 * This initiator tracks state using message wrappers.
1670 *
1671 * Find the originating message wrapper, and if requested
1672 * notify the initiator.
1673 */
1674 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1675 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1676 (im->im_flags & IM_ALLOCED) == 0 ||
1677 tctx != im->im_tctx) {
1678 aprint_error_dev(sc->sc_dev, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1679 if (im != NULL)
1680 aprint_error_dev(sc->sc_dev, "flags=0x%08x tctx=0x%08x\n",
1681 im->im_flags, im->im_tctx);
1682 #ifdef I2ODEBUG
1683 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1684 iop_reply_print(sc, rb);
1685 #endif
1686 return (-1);
1687 }
1688
1689 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1690 im->im_flags |= IM_FAIL;
1691
1692 #ifdef I2ODEBUG
1693 if ((im->im_flags & IM_REPLIED) != 0)
1694 panic("%s: dup reply", device_xname(sc->sc_dev));
1695 #endif
1696 im->im_flags |= IM_REPLIED;
1697
1698 #ifdef I2ODEBUG
1699 if (status != I2O_STATUS_SUCCESS)
1700 iop_reply_print(sc, rb);
1701 #endif
1702 im->im_reqstatus = status;
1703 im->im_detstatus = le16toh(rb->detail);
1704
1705 /* Copy the reply frame, if requested. */
1706 if (im->im_rb != NULL) {
1707 size = (le32toh(rb->msgflags) >> 14) & ~3;
1708 #ifdef I2ODEBUG
1709 if (size > sc->sc_framesize)
1710 panic("iop_handle_reply: reply too large");
1711 #endif
1712 memcpy(im->im_rb, rb, size);
1713 }
1714
1715 /* Notify the initiator. */
1716 if ((im->im_flags & IM_WAIT) != 0)
1717 cv_broadcast(&im->im_cv);
1718 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1719 if (ii != NULL) {
1720 mutex_spin_exit(&sc->sc_intrlock);
1721 (*ii->ii_intr)(ii->ii_dv, im, rb);
1722 mutex_spin_enter(&sc->sc_intrlock);
1723 }
1724 }
1725 } else {
1726 /*
1727 * This initiator discards message wrappers.
1728 *
1729 * Simply pass the reply frame to the initiator.
1730 */
1731 if (ii != NULL) {
1732 mutex_spin_exit(&sc->sc_intrlock);
1733 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1734 mutex_spin_enter(&sc->sc_intrlock);
1735 }
1736 }
1737
1738 return (status);
1739 }
1740
1741 /*
1742 * Handle an interrupt from the IOP.
1743 */
1744 int
1745 iop_intr(void *arg)
1746 {
1747 struct iop_softc *sc;
1748 u_int32_t rmfa;
1749
1750 sc = arg;
1751
1752 mutex_spin_enter(&sc->sc_intrlock);
1753
1754 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1755 mutex_spin_exit(&sc->sc_intrlock);
1756 return (0);
1757 }
1758
1759 for (;;) {
1760 /* Double read to account for IOP bug. */
1761 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1762 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1763 if (rmfa == IOP_MFA_EMPTY)
1764 break;
1765 }
1766 iop_handle_reply(sc, rmfa);
1767 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1768 }
1769
1770 mutex_spin_exit(&sc->sc_intrlock);
1771 return (1);
1772 }
1773
1774 /*
1775 * Handle an event signalled by the executive.
1776 */
1777 static void
1778 iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
1779 {
1780 struct i2o_util_event_register_reply *rb;
1781 u_int event;
1782
1783 rb = reply;
1784
1785 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1786 return;
1787
1788 event = le32toh(rb->event);
1789 printf("%s: event 0x%08x received\n", device_xname(dv), event);
1790 }
1791
1792 /*
1793 * Allocate a message wrapper.
1794 */
1795 struct iop_msg *
1796 iop_msg_alloc(struct iop_softc *sc, int flags)
1797 {
1798 struct iop_msg *im;
1799 static u_int tctxgen;
1800 int i;
1801
1802 #ifdef I2ODEBUG
1803 if ((flags & IM_SYSMASK) != 0)
1804 panic("iop_msg_alloc: system flags specified");
1805 #endif
1806
1807 mutex_spin_enter(&sc->sc_intrlock);
1808 im = SLIST_FIRST(&sc->sc_im_freelist);
1809 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1810 if (im == NULL)
1811 panic("iop_msg_alloc: no free wrappers");
1812 #endif
1813 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1814 mutex_spin_exit(&sc->sc_intrlock);
1815
1816 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1817 tctxgen += (1 << IOP_TCTX_SHIFT);
1818 im->im_flags = flags | IM_ALLOCED;
1819 im->im_rb = NULL;
1820 i = 0;
1821 do {
1822 im->im_xfer[i++].ix_size = 0;
1823 } while (i < IOP_MAX_MSG_XFERS);
1824
1825 return (im);
1826 }
1827
1828 /*
1829 * Free a message wrapper.
1830 */
1831 void
1832 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1833 {
1834
1835 #ifdef I2ODEBUG
1836 if ((im->im_flags & IM_ALLOCED) == 0)
1837 panic("iop_msg_free: wrapper not allocated");
1838 #endif
1839
1840 im->im_flags = 0;
1841 mutex_spin_enter(&sc->sc_intrlock);
1842 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1843 mutex_spin_exit(&sc->sc_intrlock);
1844 }
1845
1846 /*
1847 * Map a data transfer. Write a scatter-gather list into the message frame.
1848 */
1849 int
1850 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1851 void *xferaddr, int xfersize, int out, struct proc *up)
1852 {
1853 bus_dmamap_t dm;
1854 bus_dma_segment_t *ds;
1855 struct iop_xfer *ix;
1856 u_int rv, i, nsegs, flg, off, xn;
1857 u_int32_t *p;
1858
1859 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1860 if (ix->ix_size == 0)
1861 break;
1862
1863 #ifdef I2ODEBUG
1864 if (xfersize == 0)
1865 panic("iop_msg_map: null transfer");
1866 if (xfersize > IOP_MAX_XFER)
1867 panic("iop_msg_map: transfer too large");
1868 if (xn == IOP_MAX_MSG_XFERS)
1869 panic("iop_msg_map: too many xfers");
1870 #endif
1871
1872 /*
1873 * Only the first DMA map is static.
1874 */
1875 if (xn != 0) {
1876 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1877 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1878 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1879 if (rv != 0)
1880 return (rv);
1881 }
1882
1883 dm = ix->ix_map;
1884 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1885 (up == NULL ? BUS_DMA_NOWAIT : 0));
1886 if (rv != 0)
1887 goto bad;
1888
1889 /*
1890 * How many SIMPLE SG elements can we fit in this message?
1891 */
1892 off = mb[0] >> 16;
1893 p = mb + off;
1894 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1895
1896 if (dm->dm_nsegs > nsegs) {
1897 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1898 rv = EFBIG;
1899 DPRINTF(("iop_msg_map: too many segs\n"));
1900 goto bad;
1901 }
1902
1903 nsegs = dm->dm_nsegs;
1904 xfersize = 0;
1905
1906 /*
1907 * Write out the SG list.
1908 */
1909 if (out)
1910 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1911 else
1912 flg = I2O_SGL_SIMPLE;
1913
1914 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1915 p[0] = (u_int32_t)ds->ds_len | flg;
1916 p[1] = (u_int32_t)ds->ds_addr;
1917 xfersize += ds->ds_len;
1918 }
1919
1920 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1921 p[1] = (u_int32_t)ds->ds_addr;
1922 xfersize += ds->ds_len;
1923
1924 /* Fix up the transfer record, and sync the map. */
1925 ix->ix_flags = (out ? IX_OUT : IX_IN);
1926 ix->ix_size = xfersize;
1927 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1928 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1929
1930 /*
1931 * If this is the first xfer we've mapped for this message, adjust
1932 * the SGL offset field in the message header.
1933 */
1934 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1935 mb[0] += (mb[0] >> 12) & 0xf0;
1936 im->im_flags |= IM_SGLOFFADJ;
1937 }
1938 mb[0] += (nsegs << 17);
1939 return (0);
1940
1941 bad:
1942 if (xn != 0)
1943 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1944 return (rv);
1945 }
1946
1947 /*
1948 * Map a block I/O data transfer (different in that there's only one per
1949 * message maximum, and PAGE addressing may be used). Write a scatter
1950 * gather list into the message frame.
1951 */
1952 int
1953 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1954 void *xferaddr, int xfersize, int out)
1955 {
1956 bus_dma_segment_t *ds;
1957 bus_dmamap_t dm;
1958 struct iop_xfer *ix;
1959 u_int rv, i, nsegs, off, slen, tlen, flg;
1960 paddr_t saddr, eaddr;
1961 u_int32_t *p;
1962
1963 #ifdef I2ODEBUG
1964 if (xfersize == 0)
1965 panic("iop_msg_map_bio: null transfer");
1966 if (xfersize > IOP_MAX_XFER)
1967 panic("iop_msg_map_bio: transfer too large");
1968 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1969 panic("iop_msg_map_bio: SGLOFFADJ");
1970 #endif
1971
1972 ix = im->im_xfer;
1973 dm = ix->ix_map;
1974 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1975 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1976 if (rv != 0)
1977 return (rv);
1978
1979 off = mb[0] >> 16;
1980 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1981
1982 /*
1983 * If the transfer is highly fragmented and won't fit using SIMPLE
1984 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1985 * potentially more efficient, both for us and the IOP.
1986 */
1987 if (dm->dm_nsegs > nsegs) {
1988 nsegs = 1;
1989 p = mb + off + 1;
1990
1991 /* XXX This should be done with a bus_space flag. */
1992 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1993 slen = ds->ds_len;
1994 saddr = ds->ds_addr;
1995
1996 while (slen > 0) {
1997 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1998 tlen = uimin(eaddr - saddr, slen);
1999 slen -= tlen;
2000 *p++ = le32toh(saddr);
2001 saddr = eaddr;
2002 nsegs++;
2003 }
2004 }
2005
2006 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2007 I2O_SGL_END;
2008 if (out)
2009 mb[off] |= I2O_SGL_DATA_OUT;
2010 } else {
2011 p = mb + off;
2012 nsegs = dm->dm_nsegs;
2013
2014 if (out)
2015 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2016 else
2017 flg = I2O_SGL_SIMPLE;
2018
2019 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2020 p[0] = (u_int32_t)ds->ds_len | flg;
2021 p[1] = (u_int32_t)ds->ds_addr;
2022 }
2023
2024 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2025 I2O_SGL_END;
2026 p[1] = (u_int32_t)ds->ds_addr;
2027 nsegs <<= 1;
2028 }
2029
2030 /* Fix up the transfer record, and sync the map. */
2031 ix->ix_flags = (out ? IX_OUT : IX_IN);
2032 ix->ix_size = xfersize;
2033 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2034 out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2035
2036 /*
2037 * Adjust the SGL offset and total message size fields. We don't
2038 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2039 */
2040 mb[0] += ((off << 4) + (nsegs << 16));
2041 return (0);
2042 }
2043
2044 /*
2045 * Unmap all data transfers associated with a message wrapper.
2046 */
2047 void
2048 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2049 {
2050 struct iop_xfer *ix;
2051 int i;
2052
2053 #ifdef I2ODEBUG
2054 if (im->im_xfer[0].ix_size == 0)
2055 panic("iop_msg_unmap: no transfers mapped");
2056 #endif
2057
2058 for (ix = im->im_xfer, i = 0;;) {
2059 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2060 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2061 BUS_DMASYNC_POSTREAD);
2062 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2063
2064 /* Only the first DMA map is static. */
2065 if (i != 0)
2066 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2067 if (++i >= IOP_MAX_MSG_XFERS)
2068 break;
2069 if ((++ix)->ix_size == 0)
2070 break;
2071 }
2072 }
2073
2074 /*
2075 * Post a message frame to the IOP's inbound queue.
2076 */
2077 int
2078 iop_post(struct iop_softc *sc, u_int32_t *mb)
2079 {
2080 u_int32_t mfa;
2081
2082 #ifdef I2ODEBUG
2083 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2084 panic("iop_post: frame too large");
2085 #endif
2086
2087 mutex_spin_enter(&sc->sc_intrlock);
2088
2089 /* Allocate a slot with the IOP. */
2090 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2091 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2092 mutex_spin_exit(&sc->sc_intrlock);
2093 aprint_error_dev(sc->sc_dev, "mfa not forthcoming\n");
2094 return (EAGAIN);
2095 }
2096
2097 /* Perform reply buffer DMA synchronisation. */
2098 if (sc->sc_rep_size != 0) {
2099 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2100 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2101 }
2102
2103 /* Copy out the message frame. */
2104 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2105 mb[0] >> 16);
2106 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2107 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2108
2109 /* Post the MFA back to the IOP. */
2110 iop_outl(sc, IOP_REG_IFIFO, mfa);
2111
2112 mutex_spin_exit(&sc->sc_intrlock);
2113 return (0);
2114 }
2115
2116 /*
2117 * Post a message to the IOP and deal with completion.
2118 */
2119 int
2120 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2121 {
2122 u_int32_t *mb;
2123 int rv;
2124
2125 mb = xmb;
2126
2127 /* Terminate the scatter/gather list chain. */
2128 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2129 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2130
2131 if ((rv = iop_post(sc, mb)) != 0)
2132 return (rv);
2133
2134 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2135 if ((im->im_flags & IM_POLL) != 0)
2136 iop_msg_poll(sc, im, timo);
2137 else
2138 iop_msg_wait(sc, im, timo);
2139
2140 mutex_spin_enter(&sc->sc_intrlock);
2141 if ((im->im_flags & IM_REPLIED) != 0) {
2142 if ((im->im_flags & IM_NOSTATUS) != 0)
2143 rv = 0;
2144 else if ((im->im_flags & IM_FAIL) != 0)
2145 rv = ENXIO;
2146 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2147 rv = EIO;
2148 else
2149 rv = 0;
2150 } else
2151 rv = EBUSY;
2152 mutex_spin_exit(&sc->sc_intrlock);
2153 } else
2154 rv = 0;
2155
2156 return (rv);
2157 }
2158
2159 /*
2160 * Spin until the specified message is replied to.
2161 */
2162 static void
2163 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2164 {
2165 u_int32_t rmfa;
2166
2167 mutex_spin_enter(&sc->sc_intrlock);
2168
2169 for (timo *= 10; timo != 0; timo--) {
2170 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2171 /* Double read to account for IOP bug. */
2172 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2173 if (rmfa == IOP_MFA_EMPTY)
2174 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2175 if (rmfa != IOP_MFA_EMPTY) {
2176 iop_handle_reply(sc, rmfa);
2177
2178 /*
2179 * Return the reply frame to the IOP's
2180 * outbound FIFO.
2181 */
2182 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2183 }
2184 }
2185 if ((im->im_flags & IM_REPLIED) != 0)
2186 break;
2187 mutex_spin_exit(&sc->sc_intrlock);
2188 DELAY(100);
2189 mutex_spin_enter(&sc->sc_intrlock);
2190 }
2191
2192 if (timo == 0) {
2193 #ifdef I2ODEBUG
2194 printf("%s: poll - no reply\n", device_xname(sc->sc_dev));
2195 if (iop_status_get(sc, 1) != 0)
2196 printf("iop_msg_poll: unable to retrieve status\n");
2197 else
2198 printf("iop_msg_poll: IOP state = %d\n",
2199 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2200 #endif
2201 }
2202
2203 mutex_spin_exit(&sc->sc_intrlock);
2204 }
2205
2206 /*
2207 * Sleep until the specified message is replied to.
2208 */
2209 static void
2210 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2211 {
2212 int rv;
2213
2214 mutex_spin_enter(&sc->sc_intrlock);
2215 if ((im->im_flags & IM_REPLIED) != 0) {
2216 mutex_spin_exit(&sc->sc_intrlock);
2217 return;
2218 }
2219 rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2220 mutex_spin_exit(&sc->sc_intrlock);
2221
2222 #ifdef I2ODEBUG
2223 if (rv != 0) {
2224 printf("iop_msg_wait: tsleep() == %d\n", rv);
2225 if (iop_status_get(sc, 0) != 0)
2226 printf("%s: unable to retrieve status\n", __func__);
2227 else
2228 printf("%s: IOP state = %d\n", __func__,
2229 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2230 }
2231 #else
2232 __USE(rv);
2233 #endif
2234 }
2235
2236 /*
2237 * Release an unused message frame back to the IOP's inbound fifo.
2238 */
2239 static void
2240 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2241 {
2242
2243 /* Use the frame to issue a no-op. */
2244 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2245 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2246 iop_outl_msg(sc, mfa + 8, 0);
2247 iop_outl_msg(sc, mfa + 12, 0);
2248
2249 iop_outl(sc, IOP_REG_IFIFO, mfa);
2250 }
2251
2252 #ifdef I2ODEBUG
2253 /*
2254 * Dump a reply frame header.
2255 */
2256 static void
2257 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2258 {
2259 u_int function, detail;
2260 const char *statusstr;
2261
2262 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2263 detail = le16toh(rb->detail);
2264
2265 printf("%s: reply:\n", device_xname(sc->sc_dev));
2266
2267 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2268 statusstr = iop_status[rb->reqstatus];
2269 else
2270 statusstr = "undefined error code";
2271
2272 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2273 device_xname(sc->sc_dev), function, rb->reqstatus, statusstr);
2274 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2275 device_xname(sc->sc_dev), detail, le32toh(rb->msgictx),
2276 le32toh(rb->msgtctx));
2277 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", device_xname(sc->sc_dev),
2278 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2279 (le32toh(rb->msgflags) >> 8) & 0xff);
2280 }
2281 #endif
2282
2283 /*
2284 * Dump a transport failure reply.
2285 */
2286 static void
2287 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2288 {
2289
2290 printf("%s: WARNING: transport failure:\n", device_xname(sc->sc_dev));
2291
2292 printf("%s: ictx=0x%08x tctx=0x%08x\n", device_xname(sc->sc_dev),
2293 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2294 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2295 device_xname(sc->sc_dev), fn->failurecode, fn->severity);
2296 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2297 device_xname(sc->sc_dev), fn->highestver, fn->lowestver);
2298 }
2299
2300 /*
2301 * Translate an I2O ASCII field into a C string.
2302 */
2303 void
2304 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2305 {
2306 int hc, lc, i, nit;
2307
2308 dlen--;
2309 lc = 0;
2310 hc = 0;
2311 i = 0;
2312
2313 /*
2314 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2315 * spec has nothing to say about it. Since AMI fields are usually
2316 * filled with junk after the terminator, ...
2317 */
2318 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2319
2320 while (slen-- != 0 && dlen-- != 0) {
2321 if (nit && *src == '\0')
2322 break;
2323 else if (*src <= 0x20 || *src >= 0x7f) {
2324 if (hc)
2325 dst[i++] = ' ';
2326 } else {
2327 hc = 1;
2328 dst[i++] = *src;
2329 lc = i;
2330 }
2331 src++;
2332 }
2333
2334 dst[lc] = '\0';
2335 }
2336
2337 /*
2338 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2339 */
2340 int
2341 iop_print_ident(struct iop_softc *sc, int tid)
2342 {
2343 struct {
2344 struct i2o_param_op_results pr;
2345 struct i2o_param_read_results prr;
2346 struct i2o_param_device_identity di;
2347 } __packed p;
2348 char buf[32];
2349 int rv;
2350
2351 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2352 sizeof(p), NULL);
2353 if (rv != 0)
2354 return (rv);
2355
2356 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2357 sizeof(buf));
2358 printf(" <%s, ", buf);
2359 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2360 sizeof(buf));
2361 printf("%s, ", buf);
2362 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2363 printf("%s>", buf);
2364
2365 return (0);
2366 }
2367
2368 /*
2369 * Claim or unclaim the specified TID.
2370 */
2371 int
2372 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2373 int flags)
2374 {
2375 struct iop_msg *im;
2376 struct i2o_util_claim mf;
2377 int rv, func;
2378
2379 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2380 im = iop_msg_alloc(sc, IM_WAIT);
2381
2382 /* We can use the same structure, as they're identical. */
2383 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2384 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2385 mf.msgictx = ii->ii_ictx;
2386 mf.msgtctx = im->im_tctx;
2387 mf.flags = flags;
2388
2389 rv = iop_msg_post(sc, im, &mf, 5000);
2390 iop_msg_free(sc, im);
2391 return (rv);
2392 }
2393
2394 /*
2395 * Perform an abort.
2396 */
2397 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2398 int tctxabort, int flags)
2399 {
2400 struct iop_msg *im;
2401 struct i2o_util_abort mf;
2402 int rv;
2403
2404 im = iop_msg_alloc(sc, IM_WAIT);
2405
2406 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2407 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2408 mf.msgictx = ii->ii_ictx;
2409 mf.msgtctx = im->im_tctx;
2410 mf.flags = (func << 24) | flags;
2411 mf.tctxabort = tctxabort;
2412
2413 rv = iop_msg_post(sc, im, &mf, 5000);
2414 iop_msg_free(sc, im);
2415 return (rv);
2416 }
2417
2418 /*
2419 * Enable or disable reception of events for the specified device.
2420 */
2421 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2422 {
2423 struct i2o_util_event_register mf;
2424
2425 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2426 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2427 mf.msgictx = ii->ii_ictx;
2428 mf.msgtctx = 0;
2429 mf.eventmask = mask;
2430
2431 /* This message is replied to only when events are signalled. */
2432 return (iop_post(sc, (u_int32_t *)&mf));
2433 }
2434
2435 int
2436 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2437 {
2438 struct iop_softc *sc;
2439
2440 if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL)
2441 return (ENXIO);
2442 if ((sc->sc_flags & IOP_ONLINE) == 0)
2443 return (ENXIO);
2444 if ((sc->sc_flags & IOP_OPEN) != 0)
2445 return (EBUSY);
2446 sc->sc_flags |= IOP_OPEN;
2447
2448 return (0);
2449 }
2450
2451 int
2452 iopclose(dev_t dev, int flag, int mode,
2453 struct lwp *l)
2454 {
2455 struct iop_softc *sc;
2456
2457 sc = device_lookup_private(&iop_cd, minor(dev));
2458 sc->sc_flags &= ~IOP_OPEN;
2459
2460 return (0);
2461 }
2462
2463 int
2464 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2465 {
2466 struct iop_softc *sc;
2467 struct iovec *iov;
2468 int rv, i;
2469
2470 sc = device_lookup_private(&iop_cd, minor(dev));
2471 rv = 0;
2472
2473 switch (cmd) {
2474 case IOPIOCPT:
2475 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2476 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2477 if (rv)
2478 return (rv);
2479
2480 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2481
2482 case IOPIOCGSTATUS:
2483 iov = (struct iovec *)data;
2484 i = sizeof(struct i2o_status);
2485 if (i > iov->iov_len)
2486 i = iov->iov_len;
2487 else
2488 iov->iov_len = i;
2489 if ((rv = iop_status_get(sc, 0)) == 0)
2490 rv = copyout(&sc->sc_status, iov->iov_base, i);
2491 return (rv);
2492
2493 case IOPIOCGLCT:
2494 case IOPIOCGTIDMAP:
2495 case IOPIOCRECONFIG:
2496 break;
2497
2498 default:
2499 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2500 printf("%s: unknown ioctl %lx\n", device_xname(sc->sc_dev), cmd);
2501 #endif
2502 return (ENOTTY);
2503 }
2504
2505 mutex_enter(&sc->sc_conflock);
2506
2507 switch (cmd) {
2508 case IOPIOCGLCT:
2509 iov = (struct iovec *)data;
2510 i = le16toh(sc->sc_lct->tablesize) << 2;
2511 if (i > iov->iov_len)
2512 i = iov->iov_len;
2513 else
2514 iov->iov_len = i;
2515 rv = copyout(sc->sc_lct, iov->iov_base, i);
2516 break;
2517
2518 case IOPIOCRECONFIG:
2519 rv = iop_reconfigure(sc, 0);
2520 break;
2521
2522 case IOPIOCGTIDMAP:
2523 iov = (struct iovec *)data;
2524 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2525 if (i > iov->iov_len)
2526 i = iov->iov_len;
2527 else
2528 iov->iov_len = i;
2529 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2530 break;
2531 }
2532
2533 mutex_exit(&sc->sc_conflock);
2534 return (rv);
2535 }
2536
2537 static int
2538 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2539 {
2540 struct iop_msg *im;
2541 struct i2o_msg *mf;
2542 struct ioppt_buf *ptb;
2543 int rv, i, mapped;
2544
2545 mf = NULL;
2546 im = NULL;
2547 mapped = 1;
2548
2549 if (pt->pt_msglen > sc->sc_framesize ||
2550 pt->pt_msglen < sizeof(struct i2o_msg) ||
2551 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2552 pt->pt_nbufs < 0 ||
2553 #if 0
2554 pt->pt_replylen < 0 ||
2555 #endif
2556 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2557 return (EINVAL);
2558
2559 for (i = 0; i < pt->pt_nbufs; i++)
2560 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2561 rv = ENOMEM;
2562 goto bad;
2563 }
2564
2565 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2566 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2567 goto bad;
2568
2569 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2570 im->im_rb = (struct i2o_reply *)mf;
2571 mf->msgictx = IOP_ICTX;
2572 mf->msgtctx = im->im_tctx;
2573
2574 for (i = 0; i < pt->pt_nbufs; i++) {
2575 ptb = &pt->pt_bufs[i];
2576 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2577 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2578 if (rv != 0)
2579 goto bad;
2580 mapped = 1;
2581 }
2582
2583 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2584 goto bad;
2585
2586 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2587 if (i > sc->sc_framesize)
2588 i = sc->sc_framesize;
2589 if (i > pt->pt_replylen)
2590 i = pt->pt_replylen;
2591 rv = copyout(im->im_rb, pt->pt_reply, i);
2592
2593 bad:
2594 if (mapped != 0)
2595 iop_msg_unmap(sc, im);
2596 if (im != NULL)
2597 iop_msg_free(sc, im);
2598 if (mf != NULL)
2599 free(mf, M_DEVBUF);
2600 return (rv);
2601 }
2602