iop.c revision 1.69.2.1 1 /* $NetBSD: iop.c,v 1.69.2.1 2008/05/18 12:33:41 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Support for I2O IOPs (intelligent I/O processors).
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.69.2.1 2008/05/18 12:33:41 yamt Exp $");
38
39 #include "iop.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/ioctl.h>
49 #include <sys/endian.h>
50 #include <sys/conf.h>
51 #include <sys/kthread.h>
52 #include <sys/kauth.h>
53 #include <sys/bus.h>
54
55 #include <uvm/uvm_extern.h>
56
57 #include <dev/i2o/i2o.h>
58 #include <dev/i2o/iopio.h>
59 #include <dev/i2o/iopreg.h>
60 #include <dev/i2o/iopvar.h>
61
62 #include "locators.h"
63
64 #define POLL(ms, cond) \
65 do { \
66 int xi; \
67 for (xi = (ms) * 10; xi; xi--) { \
68 if (cond) \
69 break; \
70 DELAY(100); \
71 } \
72 } while (/* CONSTCOND */0);
73
74 #ifdef I2ODEBUG
75 #define DPRINTF(x) printf x
76 #else
77 #define DPRINTF(x)
78 #endif
79
80 #define IOP_ICTXHASH_NBUCKETS 16
81 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
82
83 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
84
85 #define IOP_TCTX_SHIFT 12
86 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
87
88 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
89 static u_long iop_ictxhash;
90 static void *iop_sdh;
91 static struct i2o_systab *iop_systab;
92 static int iop_systab_size;
93
94 extern struct cfdriver iop_cd;
95
96 dev_type_open(iopopen);
97 dev_type_close(iopclose);
98 dev_type_ioctl(iopioctl);
99
100 const struct cdevsw iop_cdevsw = {
101 iopopen, iopclose, noread, nowrite, iopioctl,
102 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
103 };
104
105 #define IC_CONFIGURE 0x01
106 #define IC_PRIORITY 0x02
107
108 static struct iop_class {
109 u_short ic_class;
110 u_short ic_flags;
111 const char *ic_caption;
112 } const iop_class[] = {
113 {
114 I2O_CLASS_EXECUTIVE,
115 0,
116 "executive"
117 },
118 {
119 I2O_CLASS_DDM,
120 0,
121 "device driver module"
122 },
123 {
124 I2O_CLASS_RANDOM_BLOCK_STORAGE,
125 IC_CONFIGURE | IC_PRIORITY,
126 "random block storage"
127 },
128 {
129 I2O_CLASS_SEQUENTIAL_STORAGE,
130 IC_CONFIGURE | IC_PRIORITY,
131 "sequential storage"
132 },
133 {
134 I2O_CLASS_LAN,
135 IC_CONFIGURE | IC_PRIORITY,
136 "LAN port"
137 },
138 {
139 I2O_CLASS_WAN,
140 IC_CONFIGURE | IC_PRIORITY,
141 "WAN port"
142 },
143 {
144 I2O_CLASS_FIBRE_CHANNEL_PORT,
145 IC_CONFIGURE,
146 "fibrechannel port"
147 },
148 {
149 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
150 0,
151 "fibrechannel peripheral"
152 },
153 {
154 I2O_CLASS_SCSI_PERIPHERAL,
155 0,
156 "SCSI peripheral"
157 },
158 {
159 I2O_CLASS_ATE_PORT,
160 IC_CONFIGURE,
161 "ATE port"
162 },
163 {
164 I2O_CLASS_ATE_PERIPHERAL,
165 0,
166 "ATE peripheral"
167 },
168 {
169 I2O_CLASS_FLOPPY_CONTROLLER,
170 IC_CONFIGURE,
171 "floppy controller"
172 },
173 {
174 I2O_CLASS_FLOPPY_DEVICE,
175 0,
176 "floppy device"
177 },
178 {
179 I2O_CLASS_BUS_ADAPTER_PORT,
180 IC_CONFIGURE,
181 "bus adapter port"
182 },
183 };
184
185 static const char * const iop_status[] = {
186 "success",
187 "abort (dirty)",
188 "abort (no data transfer)",
189 "abort (partial transfer)",
190 "error (dirty)",
191 "error (no data transfer)",
192 "error (partial transfer)",
193 "undefined error code",
194 "process abort (dirty)",
195 "process abort (no data transfer)",
196 "process abort (partial transfer)",
197 "transaction error",
198 };
199
200 static inline u_int32_t iop_inl(struct iop_softc *, int);
201 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
202
203 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
204 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
205
206 static void iop_config_interrupts(struct device *);
207 static void iop_configure_devices(struct iop_softc *, int, int);
208 static void iop_devinfo(int, char *, size_t);
209 static int iop_print(void *, const char *);
210 static void iop_shutdown(void *);
211
212 static void iop_adjqparam(struct iop_softc *, int);
213 static int iop_handle_reply(struct iop_softc *, u_int32_t);
214 static int iop_hrt_get(struct iop_softc *);
215 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
216 static void iop_intr_event(struct device *, struct iop_msg *, void *);
217 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
218 u_int32_t);
219 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
220 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
221 static int iop_ofifo_init(struct iop_softc *);
222 static int iop_passthrough(struct iop_softc *, struct ioppt *,
223 struct proc *);
224 static void iop_reconf_thread(void *);
225 static void iop_release_mfa(struct iop_softc *, u_int32_t);
226 static int iop_reset(struct iop_softc *);
227 static int iop_sys_enable(struct iop_softc *);
228 static int iop_systab_set(struct iop_softc *);
229 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
230
231 #ifdef I2ODEBUG
232 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
233 #endif
234
235 static inline u_int32_t
236 iop_inl(struct iop_softc *sc, int off)
237 {
238
239 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
240 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
241 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
242 }
243
244 static inline void
245 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
246 {
247
248 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
249 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250 BUS_SPACE_BARRIER_WRITE);
251 }
252
253 static inline u_int32_t
254 iop_inl_msg(struct iop_softc *sc, int off)
255 {
256
257 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
258 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
259 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
260 }
261
262 static inline void
263 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
264 {
265
266 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
267 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
268 BUS_SPACE_BARRIER_WRITE);
269 }
270
271 /*
272 * Initialise the IOP and our interface.
273 */
274 void
275 iop_init(struct iop_softc *sc, const char *intrstr)
276 {
277 struct iop_msg *im;
278 int rv, i, j, state, nsegs;
279 u_int32_t mask;
280 char ident[64];
281
282 state = 0;
283
284 printf("I2O adapter");
285
286 mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
287 mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
288 cv_init(&sc->sc_confcv, "iopconf");
289
290 if (iop_ictxhashtbl == NULL) {
291 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
292 true, &iop_ictxhash);
293 }
294
295 /* Disable interrupts at the IOP. */
296 mask = iop_inl(sc, IOP_REG_INTR_MASK);
297 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
298
299 /* Allocate a scratch DMA map for small miscellaneous shared data. */
300 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
301 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
302 aprint_error_dev(&sc->sc_dv, "cannot create scratch dmamap\n");
303 return;
304 }
305
306 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
307 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
308 aprint_error_dev(&sc->sc_dv, "cannot alloc scratch dmamem\n");
309 goto bail_out;
310 }
311 state++;
312
313 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
314 &sc->sc_scr, 0)) {
315 aprint_error_dev(&sc->sc_dv, "cannot map scratch dmamem\n");
316 goto bail_out;
317 }
318 state++;
319
320 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
321 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
322 aprint_error_dev(&sc->sc_dv, "cannot load scratch dmamap\n");
323 goto bail_out;
324 }
325 state++;
326
327 #ifdef I2ODEBUG
328 /* So that our debug checks don't choke. */
329 sc->sc_framesize = 128;
330 #endif
331
332 /* Avoid syncing the reply map until it's set up. */
333 sc->sc_curib = 0x123;
334
335 /* Reset the adapter and request status. */
336 if ((rv = iop_reset(sc)) != 0) {
337 aprint_error_dev(&sc->sc_dv, "not responding (reset)\n");
338 goto bail_out;
339 }
340
341 if ((rv = iop_status_get(sc, 1)) != 0) {
342 aprint_error_dev(&sc->sc_dv, "not responding (get status)\n");
343 goto bail_out;
344 }
345
346 sc->sc_flags |= IOP_HAVESTATUS;
347 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
348 ident, sizeof(ident));
349 printf(" <%s>\n", ident);
350
351 #ifdef I2ODEBUG
352 printf("%s: orgid=0x%04x version=%d\n",
353 device_xname(&sc->sc_dv),
354 le16toh(sc->sc_status.orgid),
355 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
356 printf("%s: type want have cbase\n", device_xname(&sc->sc_dv));
357 printf("%s: mem %04x %04x %08x\n", device_xname(&sc->sc_dv),
358 le32toh(sc->sc_status.desiredprivmemsize),
359 le32toh(sc->sc_status.currentprivmemsize),
360 le32toh(sc->sc_status.currentprivmembase));
361 printf("%s: i/o %04x %04x %08x\n", device_xname(&sc->sc_dv),
362 le32toh(sc->sc_status.desiredpriviosize),
363 le32toh(sc->sc_status.currentpriviosize),
364 le32toh(sc->sc_status.currentpriviobase));
365 #endif
366
367 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
368 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
369 sc->sc_maxob = IOP_MAX_OUTBOUND;
370 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
371 if (sc->sc_maxib > IOP_MAX_INBOUND)
372 sc->sc_maxib = IOP_MAX_INBOUND;
373 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
374 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
375 sc->sc_framesize = IOP_MAX_MSG_SIZE;
376
377 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
378 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
379 aprint_error_dev(&sc->sc_dv, "frame size too small (%d)\n",
380 sc->sc_framesize);
381 goto bail_out;
382 }
383 #endif
384
385 /* Allocate message wrappers. */
386 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
387 if (im == NULL) {
388 aprint_error_dev(&sc->sc_dv, "memory allocation failure\n");
389 goto bail_out;
390 }
391 state++;
392 sc->sc_ims = im;
393 SLIST_INIT(&sc->sc_im_freelist);
394
395 for (i = 0; i < sc->sc_maxib; i++, im++) {
396 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
397 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
398 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
399 &im->im_xfer[0].ix_map);
400 if (rv != 0) {
401 aprint_error_dev(&sc->sc_dv, "couldn't create dmamap (%d)", rv);
402 goto bail_out3;
403 }
404
405 im->im_tctx = i;
406 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
407 cv_init(&im->im_cv, "iopmsg");
408 }
409
410 /* Initialise the IOP's outbound FIFO. */
411 if (iop_ofifo_init(sc) != 0) {
412 aprint_error_dev(&sc->sc_dv, "unable to init oubound FIFO\n");
413 goto bail_out3;
414 }
415
416 /*
417 * Defer further configuration until (a) interrupts are working and
418 * (b) we have enough information to build the system table.
419 */
420 config_interrupts((struct device *)sc, iop_config_interrupts);
421
422 /* Configure shutdown hook before we start any device activity. */
423 if (iop_sdh == NULL)
424 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
425
426 /* Ensure interrupts are enabled at the IOP. */
427 mask = iop_inl(sc, IOP_REG_INTR_MASK);
428 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
429
430 if (intrstr != NULL)
431 printf("%s: interrupting at %s\n", device_xname(&sc->sc_dv),
432 intrstr);
433
434 #ifdef I2ODEBUG
435 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
436 device_xname(&sc->sc_dv), sc->sc_maxib,
437 le32toh(sc->sc_status.maxinboundmframes),
438 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
439 #endif
440
441 return;
442
443 bail_out3:
444 if (state > 3) {
445 for (j = 0; j < i; j++)
446 bus_dmamap_destroy(sc->sc_dmat,
447 sc->sc_ims[j].im_xfer[0].ix_map);
448 free(sc->sc_ims, M_DEVBUF);
449 }
450 bail_out:
451 if (state > 2)
452 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
453 if (state > 1)
454 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
455 if (state > 0)
456 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
457 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
458 }
459
460 /*
461 * Perform autoconfiguration tasks.
462 */
463 static void
464 iop_config_interrupts(struct device *self)
465 {
466 struct iop_attach_args ia;
467 struct iop_softc *sc, *iop;
468 struct i2o_systab_entry *ste;
469 int rv, i, niop;
470 int locs[IOPCF_NLOCS];
471
472 sc = device_private(self);
473 mutex_enter(&sc->sc_conflock);
474
475 LIST_INIT(&sc->sc_iilist);
476
477 printf("%s: configuring...\n", device_xname(&sc->sc_dv));
478
479 if (iop_hrt_get(sc) != 0) {
480 printf("%s: unable to retrieve HRT\n", device_xname(&sc->sc_dv));
481 mutex_exit(&sc->sc_conflock);
482 return;
483 }
484
485 /*
486 * Build the system table.
487 */
488 if (iop_systab == NULL) {
489 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
490 if ((iop = device_lookup(&iop_cd, i)) == NULL)
491 continue;
492 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
493 continue;
494 if (iop_status_get(iop, 1) != 0) {
495 aprint_error_dev(&sc->sc_dv, "unable to retrieve status\n");
496 iop->sc_flags &= ~IOP_HAVESTATUS;
497 continue;
498 }
499 niop++;
500 }
501 if (niop == 0) {
502 mutex_exit(&sc->sc_conflock);
503 return;
504 }
505
506 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
507 sizeof(struct i2o_systab);
508 iop_systab_size = i;
509 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
510
511 iop_systab->numentries = niop;
512 iop_systab->version = I2O_VERSION_11;
513
514 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
515 if ((iop = device_lookup(&iop_cd, i)) == NULL)
516 continue;
517 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
518 continue;
519
520 ste->orgid = iop->sc_status.orgid;
521 ste->iopid = device_unit(&iop->sc_dv) + 2;
522 ste->segnumber =
523 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
524 ste->iopcaps = iop->sc_status.iopcaps;
525 ste->inboundmsgframesize =
526 iop->sc_status.inboundmframesize;
527 ste->inboundmsgportaddresslow =
528 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
529 ste++;
530 }
531 }
532
533 /*
534 * Post the system table to the IOP and bring it to the OPERATIONAL
535 * state.
536 */
537 if (iop_systab_set(sc) != 0) {
538 aprint_error_dev(&sc->sc_dv, "unable to set system table\n");
539 mutex_exit(&sc->sc_conflock);
540 return;
541 }
542 if (iop_sys_enable(sc) != 0) {
543 aprint_error_dev(&sc->sc_dv, "unable to enable system\n");
544 mutex_exit(&sc->sc_conflock);
545 return;
546 }
547
548 /*
549 * Set up an event handler for this IOP.
550 */
551 sc->sc_eventii.ii_dv = self;
552 sc->sc_eventii.ii_intr = iop_intr_event;
553 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
554 sc->sc_eventii.ii_tid = I2O_TID_IOP;
555 iop_initiator_register(sc, &sc->sc_eventii);
556
557 rv = iop_util_eventreg(sc, &sc->sc_eventii,
558 I2O_EVENT_EXEC_RESOURCE_LIMITS |
559 I2O_EVENT_EXEC_CONNECTION_FAIL |
560 I2O_EVENT_EXEC_ADAPTER_FAULT |
561 I2O_EVENT_EXEC_POWER_FAIL |
562 I2O_EVENT_EXEC_RESET_PENDING |
563 I2O_EVENT_EXEC_RESET_IMMINENT |
564 I2O_EVENT_EXEC_HARDWARE_FAIL |
565 I2O_EVENT_EXEC_XCT_CHANGE |
566 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
567 I2O_EVENT_GEN_DEVICE_RESET |
568 I2O_EVENT_GEN_STATE_CHANGE |
569 I2O_EVENT_GEN_GENERAL_WARNING);
570 if (rv != 0) {
571 aprint_error_dev(&sc->sc_dv, "unable to register for events");
572 mutex_exit(&sc->sc_conflock);
573 return;
574 }
575
576 /*
577 * Attempt to match and attach a product-specific extension.
578 */
579 ia.ia_class = I2O_CLASS_ANY;
580 ia.ia_tid = I2O_TID_IOP;
581 locs[IOPCF_TID] = I2O_TID_IOP;
582 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
583 config_stdsubmatch);
584
585 /*
586 * Start device configuration.
587 */
588 if ((rv = iop_reconfigure(sc, 0)) == -1)
589 aprint_error_dev(&sc->sc_dv, "configure failed (%d)\n", rv);
590
591
592 sc->sc_flags |= IOP_ONLINE;
593 rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
594 &sc->sc_reconf_thread, "%s", device_xname(&sc->sc_dv));
595 mutex_exit(&sc->sc_conflock);
596 if (rv != 0) {
597 aprint_error_dev(&sc->sc_dv, "unable to create reconfiguration thread (%d)", rv);
598 return;
599 }
600 }
601
602 /*
603 * Reconfiguration thread; listens for LCT change notification, and
604 * initiates re-configuration if received.
605 */
606 static void
607 iop_reconf_thread(void *cookie)
608 {
609 struct iop_softc *sc;
610 struct lwp *l;
611 struct i2o_lct lct;
612 u_int32_t chgind;
613 int rv;
614
615 sc = cookie;
616 chgind = sc->sc_chgind + 1;
617 l = curlwp;
618
619 for (;;) {
620 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
621 device_xname(&sc->sc_dv), chgind));
622
623 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
624
625 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
626 device_xname(&sc->sc_dv), le32toh(lct.changeindicator), rv));
627
628 mutex_enter(&sc->sc_conflock);
629 if (rv == 0) {
630 iop_reconfigure(sc, le32toh(lct.changeindicator));
631 chgind = sc->sc_chgind + 1;
632 }
633 (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
634 mutex_exit(&sc->sc_conflock);
635 }
636 }
637
638 /*
639 * Reconfigure: find new and removed devices.
640 */
641 int
642 iop_reconfigure(struct iop_softc *sc, u_int chgind)
643 {
644 struct iop_msg *im;
645 struct i2o_hba_bus_scan mf;
646 struct i2o_lct_entry *le;
647 struct iop_initiator *ii, *nextii;
648 int rv, tid, i;
649
650 KASSERT(mutex_owned(&sc->sc_conflock));
651
652 /*
653 * If the reconfiguration request isn't the result of LCT change
654 * notification, then be more thorough: ask all bus ports to scan
655 * their busses. Wait up to 5 minutes for each bus port to complete
656 * the request.
657 */
658 if (chgind == 0) {
659 if ((rv = iop_lct_get(sc)) != 0) {
660 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
661 return (rv);
662 }
663
664 le = sc->sc_lct->entry;
665 for (i = 0; i < sc->sc_nlctent; i++, le++) {
666 if ((le16toh(le->classid) & 4095) !=
667 I2O_CLASS_BUS_ADAPTER_PORT)
668 continue;
669 tid = le16toh(le->localtid) & 4095;
670
671 im = iop_msg_alloc(sc, IM_WAIT);
672
673 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
674 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
675 mf.msgictx = IOP_ICTX;
676 mf.msgtctx = im->im_tctx;
677
678 DPRINTF(("%s: scanning bus %d\n", device_xname(&sc->sc_dv),
679 tid));
680
681 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
682 iop_msg_free(sc, im);
683 #ifdef I2ODEBUG
684 if (rv != 0)
685 aprint_error_dev(&sc->sc_dv, "bus scan failed\n");
686 #endif
687 }
688 } else if (chgind <= sc->sc_chgind) {
689 DPRINTF(("%s: LCT unchanged (async)\n", device_xname(&sc->sc_dv)));
690 return (0);
691 }
692
693 /* Re-read the LCT and determine if it has changed. */
694 if ((rv = iop_lct_get(sc)) != 0) {
695 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
696 return (rv);
697 }
698 DPRINTF(("%s: %d LCT entries\n", device_xname(&sc->sc_dv), sc->sc_nlctent));
699
700 chgind = le32toh(sc->sc_lct->changeindicator);
701 if (chgind == sc->sc_chgind) {
702 DPRINTF(("%s: LCT unchanged\n", device_xname(&sc->sc_dv)));
703 return (0);
704 }
705 DPRINTF(("%s: LCT changed\n", device_xname(&sc->sc_dv)));
706 sc->sc_chgind = chgind;
707
708 if (sc->sc_tidmap != NULL)
709 free(sc->sc_tidmap, M_DEVBUF);
710 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
711 M_DEVBUF, M_NOWAIT|M_ZERO);
712
713 /* Allow 1 queued command per device while we're configuring. */
714 iop_adjqparam(sc, 1);
715
716 /*
717 * Match and attach child devices. We configure high-level devices
718 * first so that any claims will propagate throughout the LCT,
719 * hopefully masking off aliased devices as a result.
720 *
721 * Re-reading the LCT at this point is a little dangerous, but we'll
722 * trust the IOP (and the operator) to behave itself...
723 */
724 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
725 IC_CONFIGURE | IC_PRIORITY);
726 if ((rv = iop_lct_get(sc)) != 0) {
727 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
728 }
729 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
730 IC_CONFIGURE);
731
732 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
733 nextii = LIST_NEXT(ii, ii_list);
734
735 /* Detach devices that were configured, but are now gone. */
736 for (i = 0; i < sc->sc_nlctent; i++)
737 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
738 break;
739 if (i == sc->sc_nlctent ||
740 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
741 config_detach(ii->ii_dv, DETACH_FORCE);
742 continue;
743 }
744
745 /*
746 * Tell initiators that existed before the re-configuration
747 * to re-configure.
748 */
749 if (ii->ii_reconfig == NULL)
750 continue;
751 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
752 aprint_error_dev(&sc->sc_dv, "%s failed reconfigure (%d)\n",
753 device_xname(ii->ii_dv), rv);
754 }
755
756 /* Re-adjust queue parameters and return. */
757 if (sc->sc_nii != 0)
758 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
759 / sc->sc_nii);
760
761 return (0);
762 }
763
764 /*
765 * Configure I2O devices into the system.
766 */
767 static void
768 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
769 {
770 struct iop_attach_args ia;
771 struct iop_initiator *ii;
772 const struct i2o_lct_entry *le;
773 struct device *dv;
774 int i, j, nent;
775 u_int usertid;
776 int locs[IOPCF_NLOCS];
777
778 nent = sc->sc_nlctent;
779 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
780 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
781
782 /* Ignore the device if it's in use. */
783 usertid = le32toh(le->usertid) & 4095;
784 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
785 continue;
786
787 ia.ia_class = le16toh(le->classid) & 4095;
788 ia.ia_tid = sc->sc_tidmap[i].it_tid;
789
790 /* Ignore uninteresting devices. */
791 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
792 if (iop_class[j].ic_class == ia.ia_class)
793 break;
794 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
795 (iop_class[j].ic_flags & mask) != maskval)
796 continue;
797
798 /*
799 * Try to configure the device only if it's not already
800 * configured.
801 */
802 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
803 if (ia.ia_tid == ii->ii_tid) {
804 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
805 strcpy(sc->sc_tidmap[i].it_dvname,
806 device_xname(ii->ii_dv));
807 break;
808 }
809 }
810 if (ii != NULL)
811 continue;
812
813 locs[IOPCF_TID] = ia.ia_tid;
814
815 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
816 iop_print, config_stdsubmatch);
817 if (dv != NULL) {
818 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
819 strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
820 }
821 }
822 }
823
824 /*
825 * Adjust queue parameters for all child devices.
826 */
827 static void
828 iop_adjqparam(struct iop_softc *sc, int mpi)
829 {
830 struct iop_initiator *ii;
831
832 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
833 if (ii->ii_adjqparam != NULL)
834 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
835 }
836
837 static void
838 iop_devinfo(int class, char *devinfo, size_t l)
839 {
840 int i;
841
842 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
843 if (class == iop_class[i].ic_class)
844 break;
845
846 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
847 snprintf(devinfo, l, "device (class 0x%x)", class);
848 else
849 strlcpy(devinfo, iop_class[i].ic_caption, l);
850 }
851
852 static int
853 iop_print(void *aux, const char *pnp)
854 {
855 struct iop_attach_args *ia;
856 char devinfo[256];
857
858 ia = aux;
859
860 if (pnp != NULL) {
861 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
862 aprint_normal("%s at %s", devinfo, pnp);
863 }
864 aprint_normal(" tid %d", ia->ia_tid);
865 return (UNCONF);
866 }
867
868 /*
869 * Shut down all configured IOPs.
870 */
871 static void
872 iop_shutdown(void *junk)
873 {
874 struct iop_softc *sc;
875 int i;
876
877 printf("shutting down iop devices...");
878
879 for (i = 0; i < iop_cd.cd_ndevs; i++) {
880 if ((sc = device_lookup(&iop_cd, i)) == NULL)
881 continue;
882 if ((sc->sc_flags & IOP_ONLINE) == 0)
883 continue;
884
885 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
886 0, 5000);
887
888 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
889 /*
890 * Some AMI firmware revisions will go to sleep and
891 * never come back after this.
892 */
893 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
894 IOP_ICTX, 0, 1000);
895 }
896 }
897
898 /* Wait. Some boards could still be flushing, stupidly enough. */
899 delay(5000*1000);
900 printf(" done\n");
901 }
902
903 /*
904 * Retrieve IOP status.
905 */
906 int
907 iop_status_get(struct iop_softc *sc, int nosleep)
908 {
909 struct i2o_exec_status_get mf;
910 struct i2o_status *st;
911 paddr_t pa;
912 int rv, i;
913
914 pa = sc->sc_scr_seg->ds_addr;
915 st = (struct i2o_status *)sc->sc_scr;
916
917 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
918 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
919 mf.reserved[0] = 0;
920 mf.reserved[1] = 0;
921 mf.reserved[2] = 0;
922 mf.reserved[3] = 0;
923 mf.addrlow = (u_int32_t)pa;
924 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
925 mf.length = sizeof(sc->sc_status);
926
927 memset(st, 0, sizeof(*st));
928 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
929 BUS_DMASYNC_PREREAD);
930
931 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
932 return (rv);
933
934 for (i = 25; i != 0; i--) {
935 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
936 sizeof(*st), BUS_DMASYNC_POSTREAD);
937 if (st->syncbyte == 0xff)
938 break;
939 if (nosleep)
940 DELAY(100*1000);
941 else
942 kpause("iopstat", false, hz / 10, NULL);
943 }
944
945 if (st->syncbyte != 0xff) {
946 aprint_error_dev(&sc->sc_dv, "STATUS_GET timed out\n");
947 rv = EIO;
948 } else {
949 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
950 rv = 0;
951 }
952
953 return (rv);
954 }
955
956 /*
957 * Initialize and populate the IOP's outbound FIFO.
958 */
959 static int
960 iop_ofifo_init(struct iop_softc *sc)
961 {
962 bus_addr_t addr;
963 bus_dma_segment_t seg;
964 struct i2o_exec_outbound_init *mf;
965 int i, rseg, rv;
966 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
967
968 sw = (u_int32_t *)sc->sc_scr;
969
970 mf = (struct i2o_exec_outbound_init *)mb;
971 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
972 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
973 mf->msgictx = IOP_ICTX;
974 mf->msgtctx = 0;
975 mf->pagesize = PAGE_SIZE;
976 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
977
978 /*
979 * The I2O spec says that there are two SGLs: one for the status
980 * word, and one for a list of discarded MFAs. It continues to say
981 * that if you don't want to get the list of MFAs, an IGNORE SGL is
982 * necessary; this isn't the case (and is in fact a bad thing).
983 */
984 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
985 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
986 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
987 (u_int32_t)sc->sc_scr_seg->ds_addr;
988 mb[0] += 2 << 16;
989
990 *sw = 0;
991 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
992 BUS_DMASYNC_PREREAD);
993
994 if ((rv = iop_post(sc, mb)) != 0)
995 return (rv);
996
997 POLL(5000,
998 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
999 BUS_DMASYNC_POSTREAD),
1000 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1001
1002 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1003 aprint_error_dev(&sc->sc_dv, "outbound FIFO init failed (%d)\n",
1004 le32toh(*sw));
1005 return (EIO);
1006 }
1007
1008 /* Allocate DMA safe memory for the reply frames. */
1009 if (sc->sc_rep_phys == 0) {
1010 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1011
1012 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1013 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1014 if (rv != 0) {
1015 aprint_error_dev(&sc->sc_dv, "DMA alloc = %d\n",
1016 rv);
1017 return (rv);
1018 }
1019
1020 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1021 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1022 if (rv != 0) {
1023 aprint_error_dev(&sc->sc_dv, "DMA map = %d\n", rv);
1024 return (rv);
1025 }
1026
1027 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1028 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1029 if (rv != 0) {
1030 aprint_error_dev(&sc->sc_dv, "DMA create = %d\n", rv);
1031 return (rv);
1032 }
1033
1034 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1035 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1036 if (rv != 0) {
1037 aprint_error_dev(&sc->sc_dv, "DMA load = %d\n", rv);
1038 return (rv);
1039 }
1040
1041 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1042
1043 /* Now safe to sync the reply map. */
1044 sc->sc_curib = 0;
1045 }
1046
1047 /* Populate the outbound FIFO. */
1048 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1049 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1050 addr += sc->sc_framesize;
1051 }
1052
1053 return (0);
1054 }
1055
1056 /*
1057 * Read the specified number of bytes from the IOP's hardware resource table.
1058 */
1059 static int
1060 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1061 {
1062 struct iop_msg *im;
1063 int rv;
1064 struct i2o_exec_hrt_get *mf;
1065 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1066
1067 im = iop_msg_alloc(sc, IM_WAIT);
1068 mf = (struct i2o_exec_hrt_get *)mb;
1069 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1070 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1071 mf->msgictx = IOP_ICTX;
1072 mf->msgtctx = im->im_tctx;
1073
1074 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1075 rv = iop_msg_post(sc, im, mb, 30000);
1076 iop_msg_unmap(sc, im);
1077 iop_msg_free(sc, im);
1078 return (rv);
1079 }
1080
1081 /*
1082 * Read the IOP's hardware resource table.
1083 */
1084 static int
1085 iop_hrt_get(struct iop_softc *sc)
1086 {
1087 struct i2o_hrt hrthdr, *hrt;
1088 int size, rv;
1089
1090 uvm_lwp_hold(curlwp);
1091 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1092 uvm_lwp_rele(curlwp);
1093 if (rv != 0)
1094 return (rv);
1095
1096 DPRINTF(("%s: %d hrt entries\n", device_xname(&sc->sc_dv),
1097 le16toh(hrthdr.numentries)));
1098
1099 size = sizeof(struct i2o_hrt) +
1100 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1101 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1102
1103 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1104 free(hrt, M_DEVBUF);
1105 return (rv);
1106 }
1107
1108 if (sc->sc_hrt != NULL)
1109 free(sc->sc_hrt, M_DEVBUF);
1110 sc->sc_hrt = hrt;
1111 return (0);
1112 }
1113
1114 /*
1115 * Request the specified number of bytes from the IOP's logical
1116 * configuration table. If a change indicator is specified, this
1117 * is a verbatim notification request, so the caller is prepared
1118 * to wait indefinitely.
1119 */
1120 static int
1121 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1122 u_int32_t chgind)
1123 {
1124 struct iop_msg *im;
1125 struct i2o_exec_lct_notify *mf;
1126 int rv;
1127 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1128
1129 im = iop_msg_alloc(sc, IM_WAIT);
1130 memset(lct, 0, size);
1131
1132 mf = (struct i2o_exec_lct_notify *)mb;
1133 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1134 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1135 mf->msgictx = IOP_ICTX;
1136 mf->msgtctx = im->im_tctx;
1137 mf->classid = I2O_CLASS_ANY;
1138 mf->changeindicator = chgind;
1139
1140 #ifdef I2ODEBUG
1141 printf("iop_lct_get0: reading LCT");
1142 if (chgind != 0)
1143 printf(" (async)");
1144 printf("\n");
1145 #endif
1146
1147 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1148 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1149 iop_msg_unmap(sc, im);
1150 iop_msg_free(sc, im);
1151 return (rv);
1152 }
1153
1154 /*
1155 * Read the IOP's logical configuration table.
1156 */
1157 int
1158 iop_lct_get(struct iop_softc *sc)
1159 {
1160 int esize, size, rv;
1161 struct i2o_lct *lct;
1162
1163 esize = le32toh(sc->sc_status.expectedlctsize);
1164 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1165 if (lct == NULL)
1166 return (ENOMEM);
1167
1168 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1169 free(lct, M_DEVBUF);
1170 return (rv);
1171 }
1172
1173 size = le16toh(lct->tablesize) << 2;
1174 if (esize != size) {
1175 free(lct, M_DEVBUF);
1176 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1177 if (lct == NULL)
1178 return (ENOMEM);
1179
1180 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1181 free(lct, M_DEVBUF);
1182 return (rv);
1183 }
1184 }
1185
1186 /* Swap in the new LCT. */
1187 if (sc->sc_lct != NULL)
1188 free(sc->sc_lct, M_DEVBUF);
1189 sc->sc_lct = lct;
1190 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1191 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1192 sizeof(struct i2o_lct_entry);
1193 return (0);
1194 }
1195
1196 /*
1197 * Post a SYS_ENABLE message to the adapter.
1198 */
1199 int
1200 iop_sys_enable(struct iop_softc *sc)
1201 {
1202 struct iop_msg *im;
1203 struct i2o_msg mf;
1204 int rv;
1205
1206 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1207
1208 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1209 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1210 mf.msgictx = IOP_ICTX;
1211 mf.msgtctx = im->im_tctx;
1212
1213 rv = iop_msg_post(sc, im, &mf, 30000);
1214 if (rv == 0) {
1215 if ((im->im_flags & IM_FAIL) != 0)
1216 rv = ENXIO;
1217 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1218 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1219 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1220 rv = 0;
1221 else
1222 rv = EIO;
1223 }
1224
1225 iop_msg_free(sc, im);
1226 return (rv);
1227 }
1228
1229 /*
1230 * Request the specified parameter group from the target. If an initiator
1231 * is specified (a) don't wait for the operation to complete, but instead
1232 * let the initiator's interrupt handler deal with the reply and (b) place a
1233 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1234 */
1235 int
1236 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1237 int size, struct iop_initiator *ii)
1238 {
1239 struct iop_msg *im;
1240 struct i2o_util_params_op *mf;
1241 int rv;
1242 struct iop_pgop *pgop;
1243 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1244
1245 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1246 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1247 iop_msg_free(sc, im);
1248 return (ENOMEM);
1249 }
1250 im->im_dvcontext = pgop;
1251
1252 mf = (struct i2o_util_params_op *)mb;
1253 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1254 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1255 mf->msgictx = IOP_ICTX;
1256 mf->msgtctx = im->im_tctx;
1257 mf->flags = 0;
1258
1259 pgop->olh.count = htole16(1);
1260 pgop->olh.reserved = htole16(0);
1261 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1262 pgop->oat.fieldcount = htole16(0xffff);
1263 pgop->oat.group = htole16(group);
1264
1265 if (ii == NULL)
1266 uvm_lwp_hold(curlwp);
1267
1268 memset(buf, 0, size);
1269 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1270 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1271 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1272
1273 if (ii == NULL)
1274 uvm_lwp_rele(curlwp);
1275
1276 /* Detect errors; let partial transfers to count as success. */
1277 if (ii == NULL && rv == 0) {
1278 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1279 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1280 rv = 0;
1281 else
1282 rv = (im->im_reqstatus != 0 ? EIO : 0);
1283
1284 if (rv != 0)
1285 printf("%s: FIELD_GET failed for tid %d group %d\n",
1286 device_xname(&sc->sc_dv), tid, group);
1287 }
1288
1289 if (ii == NULL || rv != 0) {
1290 iop_msg_unmap(sc, im);
1291 iop_msg_free(sc, im);
1292 free(pgop, M_DEVBUF);
1293 }
1294
1295 return (rv);
1296 }
1297
1298 /*
1299 * Set a single field in a scalar parameter group.
1300 */
1301 int
1302 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1303 int size, int field)
1304 {
1305 struct iop_msg *im;
1306 struct i2o_util_params_op *mf;
1307 struct iop_pgop *pgop;
1308 int rv, totsize;
1309 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1310
1311 totsize = sizeof(*pgop) + size;
1312
1313 im = iop_msg_alloc(sc, IM_WAIT);
1314 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1315 iop_msg_free(sc, im);
1316 return (ENOMEM);
1317 }
1318
1319 mf = (struct i2o_util_params_op *)mb;
1320 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1321 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1322 mf->msgictx = IOP_ICTX;
1323 mf->msgtctx = im->im_tctx;
1324 mf->flags = 0;
1325
1326 pgop->olh.count = htole16(1);
1327 pgop->olh.reserved = htole16(0);
1328 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1329 pgop->oat.fieldcount = htole16(1);
1330 pgop->oat.group = htole16(group);
1331 pgop->oat.fields[0] = htole16(field);
1332 memcpy(pgop + 1, buf, size);
1333
1334 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1335 rv = iop_msg_post(sc, im, mb, 30000);
1336 if (rv != 0)
1337 aprint_error_dev(&sc->sc_dv, "FIELD_SET failed for tid %d group %d\n",
1338 tid, group);
1339
1340 iop_msg_unmap(sc, im);
1341 iop_msg_free(sc, im);
1342 free(pgop, M_DEVBUF);
1343 return (rv);
1344 }
1345
1346 /*
1347 * Delete all rows in a tablular parameter group.
1348 */
1349 int
1350 iop_table_clear(struct iop_softc *sc, int tid, int group)
1351 {
1352 struct iop_msg *im;
1353 struct i2o_util_params_op *mf;
1354 struct iop_pgop pgop;
1355 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1356 int rv;
1357
1358 im = iop_msg_alloc(sc, IM_WAIT);
1359
1360 mf = (struct i2o_util_params_op *)mb;
1361 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1362 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1363 mf->msgictx = IOP_ICTX;
1364 mf->msgtctx = im->im_tctx;
1365 mf->flags = 0;
1366
1367 pgop.olh.count = htole16(1);
1368 pgop.olh.reserved = htole16(0);
1369 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1370 pgop.oat.fieldcount = htole16(0);
1371 pgop.oat.group = htole16(group);
1372 pgop.oat.fields[0] = htole16(0);
1373
1374 uvm_lwp_hold(curlwp);
1375 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1376 rv = iop_msg_post(sc, im, mb, 30000);
1377 if (rv != 0)
1378 aprint_error_dev(&sc->sc_dv, "TABLE_CLEAR failed for tid %d group %d\n",
1379 tid, group);
1380
1381 iop_msg_unmap(sc, im);
1382 uvm_lwp_rele(curlwp);
1383 iop_msg_free(sc, im);
1384 return (rv);
1385 }
1386
1387 /*
1388 * Add a single row to a tabular parameter group. The row can have only one
1389 * field.
1390 */
1391 int
1392 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1393 int size, int row)
1394 {
1395 struct iop_msg *im;
1396 struct i2o_util_params_op *mf;
1397 struct iop_pgop *pgop;
1398 int rv, totsize;
1399 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1400
1401 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1402
1403 im = iop_msg_alloc(sc, IM_WAIT);
1404 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1405 iop_msg_free(sc, im);
1406 return (ENOMEM);
1407 }
1408
1409 mf = (struct i2o_util_params_op *)mb;
1410 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1411 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1412 mf->msgictx = IOP_ICTX;
1413 mf->msgtctx = im->im_tctx;
1414 mf->flags = 0;
1415
1416 pgop->olh.count = htole16(1);
1417 pgop->olh.reserved = htole16(0);
1418 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1419 pgop->oat.fieldcount = htole16(1);
1420 pgop->oat.group = htole16(group);
1421 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1422 pgop->oat.fields[1] = htole16(1); /* RowCount */
1423 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1424 memcpy(&pgop->oat.fields[3], buf, size);
1425
1426 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1427 rv = iop_msg_post(sc, im, mb, 30000);
1428 if (rv != 0)
1429 aprint_error_dev(&sc->sc_dv, "ADD_ROW failed for tid %d group %d row %d\n",
1430 tid, group, row);
1431
1432 iop_msg_unmap(sc, im);
1433 iop_msg_free(sc, im);
1434 free(pgop, M_DEVBUF);
1435 return (rv);
1436 }
1437
1438 /*
1439 * Execute a simple command (no parameters).
1440 */
1441 int
1442 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1443 int async, int timo)
1444 {
1445 struct iop_msg *im;
1446 struct i2o_msg mf;
1447 int rv, fl;
1448
1449 fl = (async != 0 ? IM_WAIT : IM_POLL);
1450 im = iop_msg_alloc(sc, fl);
1451
1452 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1453 mf.msgfunc = I2O_MSGFUNC(tid, function);
1454 mf.msgictx = ictx;
1455 mf.msgtctx = im->im_tctx;
1456
1457 rv = iop_msg_post(sc, im, &mf, timo);
1458 iop_msg_free(sc, im);
1459 return (rv);
1460 }
1461
1462 /*
1463 * Post the system table to the IOP.
1464 */
1465 static int
1466 iop_systab_set(struct iop_softc *sc)
1467 {
1468 struct i2o_exec_sys_tab_set *mf;
1469 struct iop_msg *im;
1470 bus_space_handle_t bsh;
1471 bus_addr_t boo;
1472 u_int32_t mema[2], ioa[2];
1473 int rv;
1474 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1475
1476 im = iop_msg_alloc(sc, IM_WAIT);
1477
1478 mf = (struct i2o_exec_sys_tab_set *)mb;
1479 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1480 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1481 mf->msgictx = IOP_ICTX;
1482 mf->msgtctx = im->im_tctx;
1483 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1484 mf->segnumber = 0;
1485
1486 mema[1] = sc->sc_status.desiredprivmemsize;
1487 ioa[1] = sc->sc_status.desiredpriviosize;
1488
1489 if (mema[1] != 0) {
1490 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1491 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1492 mema[0] = htole32(boo);
1493 if (rv != 0) {
1494 aprint_error_dev(&sc->sc_dv, "can't alloc priv mem space, err = %d\n", rv);
1495 mema[0] = 0;
1496 mema[1] = 0;
1497 }
1498 }
1499
1500 if (ioa[1] != 0) {
1501 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1502 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1503 ioa[0] = htole32(boo);
1504 if (rv != 0) {
1505 aprint_error_dev(&sc->sc_dv, "can't alloc priv i/o space, err = %d\n", rv);
1506 ioa[0] = 0;
1507 ioa[1] = 0;
1508 }
1509 }
1510
1511 uvm_lwp_hold(curlwp);
1512 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1513 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1514 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1515 rv = iop_msg_post(sc, im, mb, 5000);
1516 iop_msg_unmap(sc, im);
1517 iop_msg_free(sc, im);
1518 uvm_lwp_rele(curlwp);
1519 return (rv);
1520 }
1521
1522 /*
1523 * Reset the IOP. Must be called with interrupts disabled.
1524 */
1525 static int
1526 iop_reset(struct iop_softc *sc)
1527 {
1528 u_int32_t mfa, *sw;
1529 struct i2o_exec_iop_reset mf;
1530 int rv;
1531 paddr_t pa;
1532
1533 sw = (u_int32_t *)sc->sc_scr;
1534 pa = sc->sc_scr_seg->ds_addr;
1535
1536 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1537 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1538 mf.reserved[0] = 0;
1539 mf.reserved[1] = 0;
1540 mf.reserved[2] = 0;
1541 mf.reserved[3] = 0;
1542 mf.statuslow = (u_int32_t)pa;
1543 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1544
1545 *sw = htole32(0);
1546 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1547 BUS_DMASYNC_PREREAD);
1548
1549 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1550 return (rv);
1551
1552 POLL(2500,
1553 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1554 BUS_DMASYNC_POSTREAD), *sw != 0));
1555 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1556 aprint_error_dev(&sc->sc_dv, "reset rejected, status 0x%x\n",
1557 le32toh(*sw));
1558 return (EIO);
1559 }
1560
1561 /*
1562 * IOP is now in the INIT state. Wait no more than 10 seconds for
1563 * the inbound queue to become responsive.
1564 */
1565 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1566 if (mfa == IOP_MFA_EMPTY) {
1567 aprint_error_dev(&sc->sc_dv, "reset failed\n");
1568 return (EIO);
1569 }
1570
1571 iop_release_mfa(sc, mfa);
1572 return (0);
1573 }
1574
1575 /*
1576 * Register a new initiator. Must be called with the configuration lock
1577 * held.
1578 */
1579 void
1580 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1581 {
1582 static int ictxgen;
1583
1584 /* 0 is reserved (by us) for system messages. */
1585 ii->ii_ictx = ++ictxgen;
1586
1587 /*
1588 * `Utility initiators' don't make it onto the per-IOP initiator list
1589 * (which is used only for configuration), but do get one slot on
1590 * the inbound queue.
1591 */
1592 if ((ii->ii_flags & II_UTILITY) == 0) {
1593 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1594 sc->sc_nii++;
1595 } else
1596 sc->sc_nuii++;
1597
1598 cv_init(&ii->ii_cv, "iopevt");
1599
1600 mutex_spin_enter(&sc->sc_intrlock);
1601 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1602 mutex_spin_exit(&sc->sc_intrlock);
1603 }
1604
1605 /*
1606 * Unregister an initiator. Must be called with the configuration lock
1607 * held.
1608 */
1609 void
1610 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1611 {
1612
1613 if ((ii->ii_flags & II_UTILITY) == 0) {
1614 LIST_REMOVE(ii, ii_list);
1615 sc->sc_nii--;
1616 } else
1617 sc->sc_nuii--;
1618
1619 mutex_spin_enter(&sc->sc_intrlock);
1620 LIST_REMOVE(ii, ii_hash);
1621 mutex_spin_exit(&sc->sc_intrlock);
1622
1623 cv_destroy(&ii->ii_cv);
1624 }
1625
1626 /*
1627 * Handle a reply frame from the IOP.
1628 */
1629 static int
1630 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1631 {
1632 struct iop_msg *im;
1633 struct i2o_reply *rb;
1634 struct i2o_fault_notify *fn;
1635 struct iop_initiator *ii;
1636 u_int off, ictx, tctx, status, size;
1637
1638 KASSERT(mutex_owned(&sc->sc_intrlock));
1639
1640 off = (int)(rmfa - sc->sc_rep_phys);
1641 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1642
1643 /* Perform reply queue DMA synchronisation. */
1644 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1645 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1646 if (--sc->sc_curib != 0)
1647 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1648 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1649
1650 #ifdef I2ODEBUG
1651 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1652 panic("iop_handle_reply: 64-bit reply");
1653 #endif
1654 /*
1655 * Find the initiator.
1656 */
1657 ictx = le32toh(rb->msgictx);
1658 if (ictx == IOP_ICTX)
1659 ii = NULL;
1660 else {
1661 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1662 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1663 if (ii->ii_ictx == ictx)
1664 break;
1665 if (ii == NULL) {
1666 #ifdef I2ODEBUG
1667 iop_reply_print(sc, rb);
1668 #endif
1669 aprint_error_dev(&sc->sc_dv, "WARNING: bad ictx returned (%x)\n",
1670 ictx);
1671 return (-1);
1672 }
1673 }
1674
1675 /*
1676 * If we received a transport failure notice, we've got to dig the
1677 * transaction context (if any) out of the original message frame,
1678 * and then release the original MFA back to the inbound FIFO.
1679 */
1680 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1681 status = I2O_STATUS_SUCCESS;
1682
1683 fn = (struct i2o_fault_notify *)rb;
1684 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1685 iop_release_mfa(sc, fn->lowmfa);
1686 iop_tfn_print(sc, fn);
1687 } else {
1688 status = rb->reqstatus;
1689 tctx = le32toh(rb->msgtctx);
1690 }
1691
1692 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1693 /*
1694 * This initiator tracks state using message wrappers.
1695 *
1696 * Find the originating message wrapper, and if requested
1697 * notify the initiator.
1698 */
1699 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1700 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1701 (im->im_flags & IM_ALLOCED) == 0 ||
1702 tctx != im->im_tctx) {
1703 aprint_error_dev(&sc->sc_dv, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1704 if (im != NULL)
1705 aprint_error_dev(&sc->sc_dv, "flags=0x%08x tctx=0x%08x\n",
1706 im->im_flags, im->im_tctx);
1707 #ifdef I2ODEBUG
1708 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1709 iop_reply_print(sc, rb);
1710 #endif
1711 return (-1);
1712 }
1713
1714 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1715 im->im_flags |= IM_FAIL;
1716
1717 #ifdef I2ODEBUG
1718 if ((im->im_flags & IM_REPLIED) != 0)
1719 panic("%s: dup reply", device_xname(&sc->sc_dv));
1720 #endif
1721 im->im_flags |= IM_REPLIED;
1722
1723 #ifdef I2ODEBUG
1724 if (status != I2O_STATUS_SUCCESS)
1725 iop_reply_print(sc, rb);
1726 #endif
1727 im->im_reqstatus = status;
1728 im->im_detstatus = le16toh(rb->detail);
1729
1730 /* Copy the reply frame, if requested. */
1731 if (im->im_rb != NULL) {
1732 size = (le32toh(rb->msgflags) >> 14) & ~3;
1733 #ifdef I2ODEBUG
1734 if (size > sc->sc_framesize)
1735 panic("iop_handle_reply: reply too large");
1736 #endif
1737 memcpy(im->im_rb, rb, size);
1738 }
1739
1740 /* Notify the initiator. */
1741 if ((im->im_flags & IM_WAIT) != 0)
1742 cv_broadcast(&im->im_cv);
1743 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1744 if (ii != NULL) {
1745 mutex_spin_exit(&sc->sc_intrlock);
1746 (*ii->ii_intr)(ii->ii_dv, im, rb);
1747 mutex_spin_enter(&sc->sc_intrlock);
1748 }
1749 }
1750 } else {
1751 /*
1752 * This initiator discards message wrappers.
1753 *
1754 * Simply pass the reply frame to the initiator.
1755 */
1756 if (ii != NULL) {
1757 mutex_spin_exit(&sc->sc_intrlock);
1758 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1759 mutex_spin_enter(&sc->sc_intrlock);
1760 }
1761 }
1762
1763 return (status);
1764 }
1765
1766 /*
1767 * Handle an interrupt from the IOP.
1768 */
1769 int
1770 iop_intr(void *arg)
1771 {
1772 struct iop_softc *sc;
1773 u_int32_t rmfa;
1774
1775 sc = arg;
1776
1777 mutex_spin_enter(&sc->sc_intrlock);
1778
1779 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1780 mutex_spin_exit(&sc->sc_intrlock);
1781 return (0);
1782 }
1783
1784 for (;;) {
1785 /* Double read to account for IOP bug. */
1786 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1787 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1788 if (rmfa == IOP_MFA_EMPTY)
1789 break;
1790 }
1791 iop_handle_reply(sc, rmfa);
1792 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1793 }
1794
1795 mutex_spin_exit(&sc->sc_intrlock);
1796 return (1);
1797 }
1798
1799 /*
1800 * Handle an event signalled by the executive.
1801 */
1802 static void
1803 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1804 {
1805 struct i2o_util_event_register_reply *rb;
1806 u_int event;
1807
1808 rb = reply;
1809
1810 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1811 return;
1812
1813 event = le32toh(rb->event);
1814 printf("%s: event 0x%08x received\n", device_xname(dv), event);
1815 }
1816
1817 /*
1818 * Allocate a message wrapper.
1819 */
1820 struct iop_msg *
1821 iop_msg_alloc(struct iop_softc *sc, int flags)
1822 {
1823 struct iop_msg *im;
1824 static u_int tctxgen;
1825 int i;
1826
1827 #ifdef I2ODEBUG
1828 if ((flags & IM_SYSMASK) != 0)
1829 panic("iop_msg_alloc: system flags specified");
1830 #endif
1831
1832 mutex_spin_enter(&sc->sc_intrlock);
1833 im = SLIST_FIRST(&sc->sc_im_freelist);
1834 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1835 if (im == NULL)
1836 panic("iop_msg_alloc: no free wrappers");
1837 #endif
1838 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1839 mutex_spin_exit(&sc->sc_intrlock);
1840
1841 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1842 tctxgen += (1 << IOP_TCTX_SHIFT);
1843 im->im_flags = flags | IM_ALLOCED;
1844 im->im_rb = NULL;
1845 i = 0;
1846 do {
1847 im->im_xfer[i++].ix_size = 0;
1848 } while (i < IOP_MAX_MSG_XFERS);
1849
1850 return (im);
1851 }
1852
1853 /*
1854 * Free a message wrapper.
1855 */
1856 void
1857 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1858 {
1859
1860 #ifdef I2ODEBUG
1861 if ((im->im_flags & IM_ALLOCED) == 0)
1862 panic("iop_msg_free: wrapper not allocated");
1863 #endif
1864
1865 im->im_flags = 0;
1866 mutex_spin_enter(&sc->sc_intrlock);
1867 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1868 mutex_spin_exit(&sc->sc_intrlock);
1869 }
1870
1871 /*
1872 * Map a data transfer. Write a scatter-gather list into the message frame.
1873 */
1874 int
1875 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1876 void *xferaddr, int xfersize, int out, struct proc *up)
1877 {
1878 bus_dmamap_t dm;
1879 bus_dma_segment_t *ds;
1880 struct iop_xfer *ix;
1881 u_int rv, i, nsegs, flg, off, xn;
1882 u_int32_t *p;
1883
1884 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1885 if (ix->ix_size == 0)
1886 break;
1887
1888 #ifdef I2ODEBUG
1889 if (xfersize == 0)
1890 panic("iop_msg_map: null transfer");
1891 if (xfersize > IOP_MAX_XFER)
1892 panic("iop_msg_map: transfer too large");
1893 if (xn == IOP_MAX_MSG_XFERS)
1894 panic("iop_msg_map: too many xfers");
1895 #endif
1896
1897 /*
1898 * Only the first DMA map is static.
1899 */
1900 if (xn != 0) {
1901 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1902 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1903 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1904 if (rv != 0)
1905 return (rv);
1906 }
1907
1908 dm = ix->ix_map;
1909 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1910 (up == NULL ? BUS_DMA_NOWAIT : 0));
1911 if (rv != 0)
1912 goto bad;
1913
1914 /*
1915 * How many SIMPLE SG elements can we fit in this message?
1916 */
1917 off = mb[0] >> 16;
1918 p = mb + off;
1919 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1920
1921 if (dm->dm_nsegs > nsegs) {
1922 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1923 rv = EFBIG;
1924 DPRINTF(("iop_msg_map: too many segs\n"));
1925 goto bad;
1926 }
1927
1928 nsegs = dm->dm_nsegs;
1929 xfersize = 0;
1930
1931 /*
1932 * Write out the SG list.
1933 */
1934 if (out)
1935 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1936 else
1937 flg = I2O_SGL_SIMPLE;
1938
1939 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1940 p[0] = (u_int32_t)ds->ds_len | flg;
1941 p[1] = (u_int32_t)ds->ds_addr;
1942 xfersize += ds->ds_len;
1943 }
1944
1945 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1946 p[1] = (u_int32_t)ds->ds_addr;
1947 xfersize += ds->ds_len;
1948
1949 /* Fix up the transfer record, and sync the map. */
1950 ix->ix_flags = (out ? IX_OUT : IX_IN);
1951 ix->ix_size = xfersize;
1952 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1953 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1954
1955 /*
1956 * If this is the first xfer we've mapped for this message, adjust
1957 * the SGL offset field in the message header.
1958 */
1959 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1960 mb[0] += (mb[0] >> 12) & 0xf0;
1961 im->im_flags |= IM_SGLOFFADJ;
1962 }
1963 mb[0] += (nsegs << 17);
1964 return (0);
1965
1966 bad:
1967 if (xn != 0)
1968 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1969 return (rv);
1970 }
1971
1972 /*
1973 * Map a block I/O data transfer (different in that there's only one per
1974 * message maximum, and PAGE addressing may be used). Write a scatter
1975 * gather list into the message frame.
1976 */
1977 int
1978 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1979 void *xferaddr, int xfersize, int out)
1980 {
1981 bus_dma_segment_t *ds;
1982 bus_dmamap_t dm;
1983 struct iop_xfer *ix;
1984 u_int rv, i, nsegs, off, slen, tlen, flg;
1985 paddr_t saddr, eaddr;
1986 u_int32_t *p;
1987
1988 #ifdef I2ODEBUG
1989 if (xfersize == 0)
1990 panic("iop_msg_map_bio: null transfer");
1991 if (xfersize > IOP_MAX_XFER)
1992 panic("iop_msg_map_bio: transfer too large");
1993 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1994 panic("iop_msg_map_bio: SGLOFFADJ");
1995 #endif
1996
1997 ix = im->im_xfer;
1998 dm = ix->ix_map;
1999 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2000 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2001 if (rv != 0)
2002 return (rv);
2003
2004 off = mb[0] >> 16;
2005 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2006
2007 /*
2008 * If the transfer is highly fragmented and won't fit using SIMPLE
2009 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2010 * potentially more efficient, both for us and the IOP.
2011 */
2012 if (dm->dm_nsegs > nsegs) {
2013 nsegs = 1;
2014 p = mb + off + 1;
2015
2016 /* XXX This should be done with a bus_space flag. */
2017 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2018 slen = ds->ds_len;
2019 saddr = ds->ds_addr;
2020
2021 while (slen > 0) {
2022 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2023 tlen = min(eaddr - saddr, slen);
2024 slen -= tlen;
2025 *p++ = le32toh(saddr);
2026 saddr = eaddr;
2027 nsegs++;
2028 }
2029 }
2030
2031 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2032 I2O_SGL_END;
2033 if (out)
2034 mb[off] |= I2O_SGL_DATA_OUT;
2035 } else {
2036 p = mb + off;
2037 nsegs = dm->dm_nsegs;
2038
2039 if (out)
2040 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2041 else
2042 flg = I2O_SGL_SIMPLE;
2043
2044 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2045 p[0] = (u_int32_t)ds->ds_len | flg;
2046 p[1] = (u_int32_t)ds->ds_addr;
2047 }
2048
2049 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2050 I2O_SGL_END;
2051 p[1] = (u_int32_t)ds->ds_addr;
2052 nsegs <<= 1;
2053 }
2054
2055 /* Fix up the transfer record, and sync the map. */
2056 ix->ix_flags = (out ? IX_OUT : IX_IN);
2057 ix->ix_size = xfersize;
2058 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2059 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2060
2061 /*
2062 * Adjust the SGL offset and total message size fields. We don't
2063 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2064 */
2065 mb[0] += ((off << 4) + (nsegs << 16));
2066 return (0);
2067 }
2068
2069 /*
2070 * Unmap all data transfers associated with a message wrapper.
2071 */
2072 void
2073 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2074 {
2075 struct iop_xfer *ix;
2076 int i;
2077
2078 #ifdef I2ODEBUG
2079 if (im->im_xfer[0].ix_size == 0)
2080 panic("iop_msg_unmap: no transfers mapped");
2081 #endif
2082
2083 for (ix = im->im_xfer, i = 0;;) {
2084 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2085 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2086 BUS_DMASYNC_POSTREAD);
2087 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2088
2089 /* Only the first DMA map is static. */
2090 if (i != 0)
2091 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2092 if ((++ix)->ix_size == 0)
2093 break;
2094 if (++i >= IOP_MAX_MSG_XFERS)
2095 break;
2096 }
2097 }
2098
2099 /*
2100 * Post a message frame to the IOP's inbound queue.
2101 */
2102 int
2103 iop_post(struct iop_softc *sc, u_int32_t *mb)
2104 {
2105 u_int32_t mfa;
2106
2107 #ifdef I2ODEBUG
2108 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2109 panic("iop_post: frame too large");
2110 #endif
2111
2112 mutex_spin_enter(&sc->sc_intrlock);
2113
2114 /* Allocate a slot with the IOP. */
2115 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2116 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2117 mutex_spin_exit(&sc->sc_intrlock);
2118 aprint_error_dev(&sc->sc_dv, "mfa not forthcoming\n");
2119 return (EAGAIN);
2120 }
2121
2122 /* Perform reply buffer DMA synchronisation. */
2123 if (sc->sc_curib++ == 0)
2124 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2125 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2126
2127 /* Copy out the message frame. */
2128 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2129 mb[0] >> 16);
2130 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2131 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2132
2133 /* Post the MFA back to the IOP. */
2134 iop_outl(sc, IOP_REG_IFIFO, mfa);
2135
2136 mutex_spin_exit(&sc->sc_intrlock);
2137 return (0);
2138 }
2139
2140 /*
2141 * Post a message to the IOP and deal with completion.
2142 */
2143 int
2144 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2145 {
2146 u_int32_t *mb;
2147 int rv;
2148
2149 mb = xmb;
2150
2151 /* Terminate the scatter/gather list chain. */
2152 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2153 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2154
2155 if ((rv = iop_post(sc, mb)) != 0)
2156 return (rv);
2157
2158 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2159 if ((im->im_flags & IM_POLL) != 0)
2160 iop_msg_poll(sc, im, timo);
2161 else
2162 iop_msg_wait(sc, im, timo);
2163
2164 mutex_spin_enter(&sc->sc_intrlock);
2165 if ((im->im_flags & IM_REPLIED) != 0) {
2166 if ((im->im_flags & IM_NOSTATUS) != 0)
2167 rv = 0;
2168 else if ((im->im_flags & IM_FAIL) != 0)
2169 rv = ENXIO;
2170 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2171 rv = EIO;
2172 else
2173 rv = 0;
2174 } else
2175 rv = EBUSY;
2176 mutex_spin_exit(&sc->sc_intrlock);
2177 } else
2178 rv = 0;
2179
2180 return (rv);
2181 }
2182
2183 /*
2184 * Spin until the specified message is replied to.
2185 */
2186 static void
2187 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2188 {
2189 u_int32_t rmfa;
2190
2191 mutex_spin_enter(&sc->sc_intrlock);
2192
2193 for (timo *= 10; timo != 0; timo--) {
2194 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2195 /* Double read to account for IOP bug. */
2196 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2197 if (rmfa == IOP_MFA_EMPTY)
2198 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2199 if (rmfa != IOP_MFA_EMPTY) {
2200 iop_handle_reply(sc, rmfa);
2201
2202 /*
2203 * Return the reply frame to the IOP's
2204 * outbound FIFO.
2205 */
2206 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2207 }
2208 }
2209 if ((im->im_flags & IM_REPLIED) != 0)
2210 break;
2211 mutex_spin_exit(&sc->sc_intrlock);
2212 DELAY(100);
2213 mutex_spin_enter(&sc->sc_intrlock);
2214 }
2215
2216 if (timo == 0) {
2217 #ifdef I2ODEBUG
2218 printf("%s: poll - no reply\n", device_xname(&sc->sc_dv));
2219 if (iop_status_get(sc, 1) != 0)
2220 printf("iop_msg_poll: unable to retrieve status\n");
2221 else
2222 printf("iop_msg_poll: IOP state = %d\n",
2223 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2224 #endif
2225 }
2226
2227 mutex_spin_exit(&sc->sc_intrlock);
2228 }
2229
2230 /*
2231 * Sleep until the specified message is replied to.
2232 */
2233 static void
2234 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2235 {
2236 int rv;
2237
2238 mutex_spin_enter(&sc->sc_intrlock);
2239 if ((im->im_flags & IM_REPLIED) != 0) {
2240 mutex_spin_exit(&sc->sc_intrlock);
2241 return;
2242 }
2243 rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2244 mutex_spin_exit(&sc->sc_intrlock);
2245
2246 #ifdef I2ODEBUG
2247 if (rv != 0) {
2248 printf("iop_msg_wait: tsleep() == %d\n", rv);
2249 if (iop_status_get(sc, 0) != 0)
2250 printf("iop_msg_wait: unable to retrieve status\n");
2251 else
2252 printf("iop_msg_wait: IOP state = %d\n",
2253 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2254 }
2255 #endif
2256 }
2257
2258 /*
2259 * Release an unused message frame back to the IOP's inbound fifo.
2260 */
2261 static void
2262 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2263 {
2264
2265 /* Use the frame to issue a no-op. */
2266 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2267 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2268 iop_outl_msg(sc, mfa + 8, 0);
2269 iop_outl_msg(sc, mfa + 12, 0);
2270
2271 iop_outl(sc, IOP_REG_IFIFO, mfa);
2272 }
2273
2274 #ifdef I2ODEBUG
2275 /*
2276 * Dump a reply frame header.
2277 */
2278 static void
2279 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2280 {
2281 u_int function, detail;
2282 const char *statusstr;
2283
2284 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2285 detail = le16toh(rb->detail);
2286
2287 printf("%s: reply:\n", device_xname(&sc->sc_dv));
2288
2289 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2290 statusstr = iop_status[rb->reqstatus];
2291 else
2292 statusstr = "undefined error code";
2293
2294 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2295 device_xname(&sc->sc_dv), function, rb->reqstatus, statusstr);
2296 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2297 device_xname(&sc->sc_dv), detail, le32toh(rb->msgictx),
2298 le32toh(rb->msgtctx));
2299 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", device_xname(&sc->sc_dv),
2300 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2301 (le32toh(rb->msgflags) >> 8) & 0xff);
2302 }
2303 #endif
2304
2305 /*
2306 * Dump a transport failure reply.
2307 */
2308 static void
2309 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2310 {
2311
2312 printf("%s: WARNING: transport failure:\n", device_xname(&sc->sc_dv));
2313
2314 printf("%s: ictx=0x%08x tctx=0x%08x\n", device_xname(&sc->sc_dv),
2315 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2316 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2317 device_xname(&sc->sc_dv), fn->failurecode, fn->severity);
2318 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2319 device_xname(&sc->sc_dv), fn->highestver, fn->lowestver);
2320 }
2321
2322 /*
2323 * Translate an I2O ASCII field into a C string.
2324 */
2325 void
2326 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2327 {
2328 int hc, lc, i, nit;
2329
2330 dlen--;
2331 lc = 0;
2332 hc = 0;
2333 i = 0;
2334
2335 /*
2336 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2337 * spec has nothing to say about it. Since AMI fields are usually
2338 * filled with junk after the terminator, ...
2339 */
2340 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2341
2342 while (slen-- != 0 && dlen-- != 0) {
2343 if (nit && *src == '\0')
2344 break;
2345 else if (*src <= 0x20 || *src >= 0x7f) {
2346 if (hc)
2347 dst[i++] = ' ';
2348 } else {
2349 hc = 1;
2350 dst[i++] = *src;
2351 lc = i;
2352 }
2353 src++;
2354 }
2355
2356 dst[lc] = '\0';
2357 }
2358
2359 /*
2360 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2361 */
2362 int
2363 iop_print_ident(struct iop_softc *sc, int tid)
2364 {
2365 struct {
2366 struct i2o_param_op_results pr;
2367 struct i2o_param_read_results prr;
2368 struct i2o_param_device_identity di;
2369 } __attribute__ ((__packed__)) p;
2370 char buf[32];
2371 int rv;
2372
2373 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2374 sizeof(p), NULL);
2375 if (rv != 0)
2376 return (rv);
2377
2378 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2379 sizeof(buf));
2380 printf(" <%s, ", buf);
2381 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2382 sizeof(buf));
2383 printf("%s, ", buf);
2384 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2385 printf("%s>", buf);
2386
2387 return (0);
2388 }
2389
2390 /*
2391 * Claim or unclaim the specified TID.
2392 */
2393 int
2394 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2395 int flags)
2396 {
2397 struct iop_msg *im;
2398 struct i2o_util_claim mf;
2399 int rv, func;
2400
2401 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2402 im = iop_msg_alloc(sc, IM_WAIT);
2403
2404 /* We can use the same structure, as they're identical. */
2405 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2406 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2407 mf.msgictx = ii->ii_ictx;
2408 mf.msgtctx = im->im_tctx;
2409 mf.flags = flags;
2410
2411 rv = iop_msg_post(sc, im, &mf, 5000);
2412 iop_msg_free(sc, im);
2413 return (rv);
2414 }
2415
2416 /*
2417 * Perform an abort.
2418 */
2419 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2420 int tctxabort, int flags)
2421 {
2422 struct iop_msg *im;
2423 struct i2o_util_abort mf;
2424 int rv;
2425
2426 im = iop_msg_alloc(sc, IM_WAIT);
2427
2428 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2429 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2430 mf.msgictx = ii->ii_ictx;
2431 mf.msgtctx = im->im_tctx;
2432 mf.flags = (func << 24) | flags;
2433 mf.tctxabort = tctxabort;
2434
2435 rv = iop_msg_post(sc, im, &mf, 5000);
2436 iop_msg_free(sc, im);
2437 return (rv);
2438 }
2439
2440 /*
2441 * Enable or disable reception of events for the specified device.
2442 */
2443 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2444 {
2445 struct i2o_util_event_register mf;
2446
2447 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2448 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2449 mf.msgictx = ii->ii_ictx;
2450 mf.msgtctx = 0;
2451 mf.eventmask = mask;
2452
2453 /* This message is replied to only when events are signalled. */
2454 return (iop_post(sc, (u_int32_t *)&mf));
2455 }
2456
2457 int
2458 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2459 {
2460 struct iop_softc *sc;
2461
2462 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2463 return (ENXIO);
2464 if ((sc->sc_flags & IOP_ONLINE) == 0)
2465 return (ENXIO);
2466 if ((sc->sc_flags & IOP_OPEN) != 0)
2467 return (EBUSY);
2468 sc->sc_flags |= IOP_OPEN;
2469
2470 return (0);
2471 }
2472
2473 int
2474 iopclose(dev_t dev, int flag, int mode,
2475 struct lwp *l)
2476 {
2477 struct iop_softc *sc;
2478
2479 sc = device_lookup(&iop_cd, minor(dev));
2480 sc->sc_flags &= ~IOP_OPEN;
2481
2482 return (0);
2483 }
2484
2485 int
2486 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2487 {
2488 struct iop_softc *sc;
2489 struct iovec *iov;
2490 int rv, i;
2491
2492 sc = device_lookup(&iop_cd, minor(dev));
2493 rv = 0;
2494
2495 switch (cmd) {
2496 case IOPIOCPT:
2497 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2498 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2499 if (rv)
2500 return (rv);
2501
2502 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2503
2504 case IOPIOCGSTATUS:
2505 iov = (struct iovec *)data;
2506 i = sizeof(struct i2o_status);
2507 if (i > iov->iov_len)
2508 i = iov->iov_len;
2509 else
2510 iov->iov_len = i;
2511 if ((rv = iop_status_get(sc, 0)) == 0)
2512 rv = copyout(&sc->sc_status, iov->iov_base, i);
2513 return (rv);
2514
2515 case IOPIOCGLCT:
2516 case IOPIOCGTIDMAP:
2517 case IOPIOCRECONFIG:
2518 break;
2519
2520 default:
2521 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2522 printf("%s: unknown ioctl %lx\n", device_xname(&sc->sc_dv), cmd);
2523 #endif
2524 return (ENOTTY);
2525 }
2526
2527 mutex_enter(&sc->sc_conflock);
2528
2529 switch (cmd) {
2530 case IOPIOCGLCT:
2531 iov = (struct iovec *)data;
2532 i = le16toh(sc->sc_lct->tablesize) << 2;
2533 if (i > iov->iov_len)
2534 i = iov->iov_len;
2535 else
2536 iov->iov_len = i;
2537 rv = copyout(sc->sc_lct, iov->iov_base, i);
2538 break;
2539
2540 case IOPIOCRECONFIG:
2541 rv = iop_reconfigure(sc, 0);
2542 break;
2543
2544 case IOPIOCGTIDMAP:
2545 iov = (struct iovec *)data;
2546 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2547 if (i > iov->iov_len)
2548 i = iov->iov_len;
2549 else
2550 iov->iov_len = i;
2551 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2552 break;
2553 }
2554
2555 mutex_exit(&sc->sc_conflock);
2556 return (rv);
2557 }
2558
2559 static int
2560 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2561 {
2562 struct iop_msg *im;
2563 struct i2o_msg *mf;
2564 struct ioppt_buf *ptb;
2565 int rv, i, mapped;
2566
2567 mf = NULL;
2568 im = NULL;
2569 mapped = 1;
2570
2571 if (pt->pt_msglen > sc->sc_framesize ||
2572 pt->pt_msglen < sizeof(struct i2o_msg) ||
2573 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2574 pt->pt_nbufs < 0 ||
2575 #if 0
2576 pt->pt_replylen < 0 ||
2577 #endif
2578 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2579 return (EINVAL);
2580
2581 for (i = 0; i < pt->pt_nbufs; i++)
2582 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2583 rv = ENOMEM;
2584 goto bad;
2585 }
2586
2587 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2588 if (mf == NULL)
2589 return (ENOMEM);
2590
2591 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2592 goto bad;
2593
2594 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2595 im->im_rb = (struct i2o_reply *)mf;
2596 mf->msgictx = IOP_ICTX;
2597 mf->msgtctx = im->im_tctx;
2598
2599 for (i = 0; i < pt->pt_nbufs; i++) {
2600 ptb = &pt->pt_bufs[i];
2601 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2602 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2603 if (rv != 0)
2604 goto bad;
2605 mapped = 1;
2606 }
2607
2608 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2609 goto bad;
2610
2611 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2612 if (i > sc->sc_framesize)
2613 i = sc->sc_framesize;
2614 if (i > pt->pt_replylen)
2615 i = pt->pt_replylen;
2616 rv = copyout(im->im_rb, pt->pt_reply, i);
2617
2618 bad:
2619 if (mapped != 0)
2620 iop_msg_unmap(sc, im);
2621 if (im != NULL)
2622 iop_msg_free(sc, im);
2623 if (mf != NULL)
2624 free(mf, M_DEVBUF);
2625 return (rv);
2626 }
2627