iop.c revision 1.68.12.1 1 /* $NetBSD: iop.c,v 1.68.12.1 2008/04/05 23:33:21 mjf Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.68.12.1 2008/04/05 23:33:21 mjf Exp $");
45
46 #include "iop.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/queue.h>
53 #include <sys/proc.h>
54 #include <sys/malloc.h>
55 #include <sys/ioctl.h>
56 #include <sys/endian.h>
57 #include <sys/conf.h>
58 #include <sys/kthread.h>
59 #include <sys/kauth.h>
60 #include <sys/bus.h>
61
62 #include <uvm/uvm_extern.h>
63
64 #include <dev/i2o/i2o.h>
65 #include <dev/i2o/iopio.h>
66 #include <dev/i2o/iopreg.h>
67 #include <dev/i2o/iopvar.h>
68
69 #include "locators.h"
70
71 #define POLL(ms, cond) \
72 do { \
73 int xi; \
74 for (xi = (ms) * 10; xi; xi--) { \
75 if (cond) \
76 break; \
77 DELAY(100); \
78 } \
79 } while (/* CONSTCOND */0);
80
81 #ifdef I2ODEBUG
82 #define DPRINTF(x) printf x
83 #else
84 #define DPRINTF(x)
85 #endif
86
87 #define IOP_ICTXHASH_NBUCKETS 16
88 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
89
90 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
91
92 #define IOP_TCTX_SHIFT 12
93 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
94
95 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
96 static u_long iop_ictxhash;
97 static void *iop_sdh;
98 static struct i2o_systab *iop_systab;
99 static int iop_systab_size;
100
101 extern struct cfdriver iop_cd;
102
103 dev_type_open(iopopen);
104 dev_type_close(iopclose);
105 dev_type_ioctl(iopioctl);
106
107 const struct cdevsw iop_cdevsw = {
108 iopopen, iopclose, noread, nowrite, iopioctl,
109 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
110 };
111
112 #define IC_CONFIGURE 0x01
113 #define IC_PRIORITY 0x02
114
115 static struct iop_class {
116 u_short ic_class;
117 u_short ic_flags;
118 const char *ic_caption;
119 } const iop_class[] = {
120 {
121 I2O_CLASS_EXECUTIVE,
122 0,
123 "executive"
124 },
125 {
126 I2O_CLASS_DDM,
127 0,
128 "device driver module"
129 },
130 {
131 I2O_CLASS_RANDOM_BLOCK_STORAGE,
132 IC_CONFIGURE | IC_PRIORITY,
133 "random block storage"
134 },
135 {
136 I2O_CLASS_SEQUENTIAL_STORAGE,
137 IC_CONFIGURE | IC_PRIORITY,
138 "sequential storage"
139 },
140 {
141 I2O_CLASS_LAN,
142 IC_CONFIGURE | IC_PRIORITY,
143 "LAN port"
144 },
145 {
146 I2O_CLASS_WAN,
147 IC_CONFIGURE | IC_PRIORITY,
148 "WAN port"
149 },
150 {
151 I2O_CLASS_FIBRE_CHANNEL_PORT,
152 IC_CONFIGURE,
153 "fibrechannel port"
154 },
155 {
156 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
157 0,
158 "fibrechannel peripheral"
159 },
160 {
161 I2O_CLASS_SCSI_PERIPHERAL,
162 0,
163 "SCSI peripheral"
164 },
165 {
166 I2O_CLASS_ATE_PORT,
167 IC_CONFIGURE,
168 "ATE port"
169 },
170 {
171 I2O_CLASS_ATE_PERIPHERAL,
172 0,
173 "ATE peripheral"
174 },
175 {
176 I2O_CLASS_FLOPPY_CONTROLLER,
177 IC_CONFIGURE,
178 "floppy controller"
179 },
180 {
181 I2O_CLASS_FLOPPY_DEVICE,
182 0,
183 "floppy device"
184 },
185 {
186 I2O_CLASS_BUS_ADAPTER_PORT,
187 IC_CONFIGURE,
188 "bus adapter port"
189 },
190 };
191
192 static const char * const iop_status[] = {
193 "success",
194 "abort (dirty)",
195 "abort (no data transfer)",
196 "abort (partial transfer)",
197 "error (dirty)",
198 "error (no data transfer)",
199 "error (partial transfer)",
200 "undefined error code",
201 "process abort (dirty)",
202 "process abort (no data transfer)",
203 "process abort (partial transfer)",
204 "transaction error",
205 };
206
207 static inline u_int32_t iop_inl(struct iop_softc *, int);
208 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
209
210 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
211 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
212
213 static void iop_config_interrupts(struct device *);
214 static void iop_configure_devices(struct iop_softc *, int, int);
215 static void iop_devinfo(int, char *, size_t);
216 static int iop_print(void *, const char *);
217 static void iop_shutdown(void *);
218
219 static void iop_adjqparam(struct iop_softc *, int);
220 static int iop_handle_reply(struct iop_softc *, u_int32_t);
221 static int iop_hrt_get(struct iop_softc *);
222 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
223 static void iop_intr_event(struct device *, struct iop_msg *, void *);
224 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
225 u_int32_t);
226 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
227 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
228 static int iop_ofifo_init(struct iop_softc *);
229 static int iop_passthrough(struct iop_softc *, struct ioppt *,
230 struct proc *);
231 static void iop_reconf_thread(void *);
232 static void iop_release_mfa(struct iop_softc *, u_int32_t);
233 static int iop_reset(struct iop_softc *);
234 static int iop_sys_enable(struct iop_softc *);
235 static int iop_systab_set(struct iop_softc *);
236 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
237
238 #ifdef I2ODEBUG
239 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
240 #endif
241
242 static inline u_int32_t
243 iop_inl(struct iop_softc *sc, int off)
244 {
245
246 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
247 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
248 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
249 }
250
251 static inline void
252 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
253 {
254
255 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
256 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
257 BUS_SPACE_BARRIER_WRITE);
258 }
259
260 static inline u_int32_t
261 iop_inl_msg(struct iop_softc *sc, int off)
262 {
263
264 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
265 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
266 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
267 }
268
269 static inline void
270 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
271 {
272
273 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
274 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
275 BUS_SPACE_BARRIER_WRITE);
276 }
277
278 /*
279 * Initialise the IOP and our interface.
280 */
281 void
282 iop_init(struct iop_softc *sc, const char *intrstr)
283 {
284 struct iop_msg *im;
285 int rv, i, j, state, nsegs, maj;
286 u_int32_t mask;
287 char ident[64];
288 device_t dev = &sc->sc_dv;
289
290 state = 0;
291
292 printf("I2O adapter");
293
294 mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
295 mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
296 cv_init(&sc->sc_confcv, "iopconf");
297
298 if (iop_ictxhashtbl == NULL)
299 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
300 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
301
302 /* Disable interrupts at the IOP. */
303 mask = iop_inl(sc, IOP_REG_INTR_MASK);
304 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
305
306 /* Allocate a scratch DMA map for small miscellaneous shared data. */
307 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
308 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
309 printf("%s: cannot create scratch dmamap\n",
310 sc->sc_dv.dv_xname);
311 return;
312 }
313
314 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
315 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
316 printf("%s: cannot alloc scratch dmamem\n",
317 sc->sc_dv.dv_xname);
318 goto bail_out;
319 }
320 state++;
321
322 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
323 &sc->sc_scr, 0)) {
324 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
325 goto bail_out;
326 }
327 state++;
328
329 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
330 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
331 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
332 goto bail_out;
333 }
334 state++;
335
336 #ifdef I2ODEBUG
337 /* So that our debug checks don't choke. */
338 sc->sc_framesize = 128;
339 #endif
340
341 /* Avoid syncing the reply map until it's set up. */
342 sc->sc_curib = 0x123;
343
344 /* Reset the adapter and request status. */
345 if ((rv = iop_reset(sc)) != 0) {
346 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
347 goto bail_out;
348 }
349
350 if ((rv = iop_status_get(sc, 1)) != 0) {
351 printf("%s: not responding (get status)\n",
352 sc->sc_dv.dv_xname);
353 goto bail_out;
354 }
355
356 sc->sc_flags |= IOP_HAVESTATUS;
357 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
358 ident, sizeof(ident));
359 printf(" <%s>\n", ident);
360
361 #ifdef I2ODEBUG
362 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
363 le16toh(sc->sc_status.orgid),
364 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
365 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
366 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
367 le32toh(sc->sc_status.desiredprivmemsize),
368 le32toh(sc->sc_status.currentprivmemsize),
369 le32toh(sc->sc_status.currentprivmembase));
370 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
371 le32toh(sc->sc_status.desiredpriviosize),
372 le32toh(sc->sc_status.currentpriviosize),
373 le32toh(sc->sc_status.currentpriviobase));
374 #endif
375
376 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
377 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
378 sc->sc_maxob = IOP_MAX_OUTBOUND;
379 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
380 if (sc->sc_maxib > IOP_MAX_INBOUND)
381 sc->sc_maxib = IOP_MAX_INBOUND;
382 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
383 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
384 sc->sc_framesize = IOP_MAX_MSG_SIZE;
385
386 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
387 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
388 printf("%s: frame size too small (%d)\n",
389 sc->sc_dv.dv_xname, sc->sc_framesize);
390 goto bail_out;
391 }
392 #endif
393
394 /* Allocate message wrappers. */
395 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
396 if (im == NULL) {
397 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
398 goto bail_out;
399 }
400 state++;
401 sc->sc_ims = im;
402 SLIST_INIT(&sc->sc_im_freelist);
403
404 for (i = 0; i < sc->sc_maxib; i++, im++) {
405 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
406 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
407 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
408 &im->im_xfer[0].ix_map);
409 if (rv != 0) {
410 printf("%s: couldn't create dmamap (%d)",
411 sc->sc_dv.dv_xname, rv);
412 goto bail_out3;
413 }
414
415 im->im_tctx = i;
416 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
417 cv_init(&im->im_cv, "iopmsg");
418 }
419
420 /* Initialise the IOP's outbound FIFO. */
421 if (iop_ofifo_init(sc) != 0) {
422 printf("%s: unable to init oubound FIFO\n",
423 sc->sc_dv.dv_xname);
424 goto bail_out3;
425 }
426
427 /*
428 * Defer further configuration until (a) interrupts are working and
429 * (b) we have enough information to build the system table.
430 */
431 config_interrupts((struct device *)sc, iop_config_interrupts);
432
433 /* Configure shutdown hook before we start any device activity. */
434 if (iop_sdh == NULL)
435 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
436
437 /* Ensure interrupts are enabled at the IOP. */
438 mask = iop_inl(sc, IOP_REG_INTR_MASK);
439 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
440
441 if (intrstr != NULL)
442 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
443 intrstr);
444
445 #ifdef I2ODEBUG
446 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
447 sc->sc_dv.dv_xname, sc->sc_maxib,
448 le32toh(sc->sc_status.maxinboundmframes),
449 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
450 #endif
451
452 maj = cdevsw_lookup_major(&iop_cdevsw);
453 device_register_name(makedev(maj, device_unit(dev)), dev, true,
454 DEV_OTHER, device_xname(dev));
455
456 return;
457
458 bail_out3:
459 if (state > 3) {
460 for (j = 0; j < i; j++)
461 bus_dmamap_destroy(sc->sc_dmat,
462 sc->sc_ims[j].im_xfer[0].ix_map);
463 free(sc->sc_ims, M_DEVBUF);
464 }
465 bail_out:
466 if (state > 2)
467 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
468 if (state > 1)
469 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
470 if (state > 0)
471 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
472 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
473 }
474
475 /*
476 * Perform autoconfiguration tasks.
477 */
478 static void
479 iop_config_interrupts(struct device *self)
480 {
481 struct iop_attach_args ia;
482 struct iop_softc *sc, *iop;
483 struct i2o_systab_entry *ste;
484 int rv, i, niop;
485 int locs[IOPCF_NLOCS];
486
487 sc = device_private(self);
488 mutex_enter(&sc->sc_conflock);
489
490 LIST_INIT(&sc->sc_iilist);
491
492 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
493
494 if (iop_hrt_get(sc) != 0) {
495 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
496 mutex_exit(&sc->sc_conflock);
497 return;
498 }
499
500 /*
501 * Build the system table.
502 */
503 if (iop_systab == NULL) {
504 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
505 if ((iop = device_lookup(&iop_cd, i)) == NULL)
506 continue;
507 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
508 continue;
509 if (iop_status_get(iop, 1) != 0) {
510 printf("%s: unable to retrieve status\n",
511 sc->sc_dv.dv_xname);
512 iop->sc_flags &= ~IOP_HAVESTATUS;
513 continue;
514 }
515 niop++;
516 }
517 if (niop == 0) {
518 mutex_exit(&sc->sc_conflock);
519 return;
520 }
521
522 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
523 sizeof(struct i2o_systab);
524 iop_systab_size = i;
525 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
526
527 iop_systab->numentries = niop;
528 iop_systab->version = I2O_VERSION_11;
529
530 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
531 if ((iop = device_lookup(&iop_cd, i)) == NULL)
532 continue;
533 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
534 continue;
535
536 ste->orgid = iop->sc_status.orgid;
537 ste->iopid = device_unit(&iop->sc_dv) + 2;
538 ste->segnumber =
539 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
540 ste->iopcaps = iop->sc_status.iopcaps;
541 ste->inboundmsgframesize =
542 iop->sc_status.inboundmframesize;
543 ste->inboundmsgportaddresslow =
544 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
545 ste++;
546 }
547 }
548
549 /*
550 * Post the system table to the IOP and bring it to the OPERATIONAL
551 * state.
552 */
553 if (iop_systab_set(sc) != 0) {
554 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
555 mutex_exit(&sc->sc_conflock);
556 return;
557 }
558 if (iop_sys_enable(sc) != 0) {
559 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
560 mutex_exit(&sc->sc_conflock);
561 return;
562 }
563
564 /*
565 * Set up an event handler for this IOP.
566 */
567 sc->sc_eventii.ii_dv = self;
568 sc->sc_eventii.ii_intr = iop_intr_event;
569 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
570 sc->sc_eventii.ii_tid = I2O_TID_IOP;
571 iop_initiator_register(sc, &sc->sc_eventii);
572
573 rv = iop_util_eventreg(sc, &sc->sc_eventii,
574 I2O_EVENT_EXEC_RESOURCE_LIMITS |
575 I2O_EVENT_EXEC_CONNECTION_FAIL |
576 I2O_EVENT_EXEC_ADAPTER_FAULT |
577 I2O_EVENT_EXEC_POWER_FAIL |
578 I2O_EVENT_EXEC_RESET_PENDING |
579 I2O_EVENT_EXEC_RESET_IMMINENT |
580 I2O_EVENT_EXEC_HARDWARE_FAIL |
581 I2O_EVENT_EXEC_XCT_CHANGE |
582 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
583 I2O_EVENT_GEN_DEVICE_RESET |
584 I2O_EVENT_GEN_STATE_CHANGE |
585 I2O_EVENT_GEN_GENERAL_WARNING);
586 if (rv != 0) {
587 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
588 mutex_exit(&sc->sc_conflock);
589 return;
590 }
591
592 /*
593 * Attempt to match and attach a product-specific extension.
594 */
595 ia.ia_class = I2O_CLASS_ANY;
596 ia.ia_tid = I2O_TID_IOP;
597 locs[IOPCF_TID] = I2O_TID_IOP;
598 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
599 config_stdsubmatch);
600
601 /*
602 * Start device configuration.
603 */
604 if ((rv = iop_reconfigure(sc, 0)) == -1)
605 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
606
607
608 sc->sc_flags |= IOP_ONLINE;
609 rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
610 &sc->sc_reconf_thread, "%s", sc->sc_dv.dv_xname);
611 mutex_exit(&sc->sc_conflock);
612 if (rv != 0) {
613 printf("%s: unable to create reconfiguration thread (%d)",
614 sc->sc_dv.dv_xname, rv);
615 return;
616 }
617 }
618
619 /*
620 * Reconfiguration thread; listens for LCT change notification, and
621 * initiates re-configuration if received.
622 */
623 static void
624 iop_reconf_thread(void *cookie)
625 {
626 struct iop_softc *sc;
627 struct lwp *l;
628 struct i2o_lct lct;
629 u_int32_t chgind;
630 int rv;
631
632 sc = cookie;
633 chgind = sc->sc_chgind + 1;
634 l = curlwp;
635
636 for (;;) {
637 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
638 sc->sc_dv.dv_xname, chgind));
639
640 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
641
642 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
643 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
644
645 mutex_enter(&sc->sc_conflock);
646 if (rv == 0) {
647 iop_reconfigure(sc, le32toh(lct.changeindicator));
648 chgind = sc->sc_chgind + 1;
649 }
650 (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
651 mutex_exit(&sc->sc_conflock);
652 }
653 }
654
655 /*
656 * Reconfigure: find new and removed devices.
657 */
658 int
659 iop_reconfigure(struct iop_softc *sc, u_int chgind)
660 {
661 struct iop_msg *im;
662 struct i2o_hba_bus_scan mf;
663 struct i2o_lct_entry *le;
664 struct iop_initiator *ii, *nextii;
665 int rv, tid, i;
666
667 KASSERT(mutex_owned(&sc->sc_conflock));
668
669 /*
670 * If the reconfiguration request isn't the result of LCT change
671 * notification, then be more thorough: ask all bus ports to scan
672 * their busses. Wait up to 5 minutes for each bus port to complete
673 * the request.
674 */
675 if (chgind == 0) {
676 if ((rv = iop_lct_get(sc)) != 0) {
677 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
678 return (rv);
679 }
680
681 le = sc->sc_lct->entry;
682 for (i = 0; i < sc->sc_nlctent; i++, le++) {
683 if ((le16toh(le->classid) & 4095) !=
684 I2O_CLASS_BUS_ADAPTER_PORT)
685 continue;
686 tid = le16toh(le->localtid) & 4095;
687
688 im = iop_msg_alloc(sc, IM_WAIT);
689
690 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
691 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
692 mf.msgictx = IOP_ICTX;
693 mf.msgtctx = im->im_tctx;
694
695 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
696 tid));
697
698 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
699 iop_msg_free(sc, im);
700 #ifdef I2ODEBUG
701 if (rv != 0)
702 printf("%s: bus scan failed\n",
703 sc->sc_dv.dv_xname);
704 #endif
705 }
706 } else if (chgind <= sc->sc_chgind) {
707 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
708 return (0);
709 }
710
711 /* Re-read the LCT and determine if it has changed. */
712 if ((rv = iop_lct_get(sc)) != 0) {
713 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
714 return (rv);
715 }
716 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
717
718 chgind = le32toh(sc->sc_lct->changeindicator);
719 if (chgind == sc->sc_chgind) {
720 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
721 return (0);
722 }
723 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
724 sc->sc_chgind = chgind;
725
726 if (sc->sc_tidmap != NULL)
727 free(sc->sc_tidmap, M_DEVBUF);
728 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
729 M_DEVBUF, M_NOWAIT|M_ZERO);
730
731 /* Allow 1 queued command per device while we're configuring. */
732 iop_adjqparam(sc, 1);
733
734 /*
735 * Match and attach child devices. We configure high-level devices
736 * first so that any claims will propagate throughout the LCT,
737 * hopefully masking off aliased devices as a result.
738 *
739 * Re-reading the LCT at this point is a little dangerous, but we'll
740 * trust the IOP (and the operator) to behave itself...
741 */
742 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
743 IC_CONFIGURE | IC_PRIORITY);
744 if ((rv = iop_lct_get(sc)) != 0) {
745 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
746 }
747 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
748 IC_CONFIGURE);
749
750 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
751 nextii = LIST_NEXT(ii, ii_list);
752
753 /* Detach devices that were configured, but are now gone. */
754 for (i = 0; i < sc->sc_nlctent; i++)
755 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
756 break;
757 if (i == sc->sc_nlctent ||
758 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
759 config_detach(ii->ii_dv, DETACH_FORCE);
760 continue;
761 }
762
763 /*
764 * Tell initiators that existed before the re-configuration
765 * to re-configure.
766 */
767 if (ii->ii_reconfig == NULL)
768 continue;
769 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
770 printf("%s: %s failed reconfigure (%d)\n",
771 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
772 }
773
774 /* Re-adjust queue parameters and return. */
775 if (sc->sc_nii != 0)
776 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
777 / sc->sc_nii);
778
779 return (0);
780 }
781
782 /*
783 * Configure I2O devices into the system.
784 */
785 static void
786 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
787 {
788 struct iop_attach_args ia;
789 struct iop_initiator *ii;
790 const struct i2o_lct_entry *le;
791 struct device *dv;
792 int i, j, nent;
793 u_int usertid;
794 int locs[IOPCF_NLOCS];
795
796 nent = sc->sc_nlctent;
797 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
798 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
799
800 /* Ignore the device if it's in use. */
801 usertid = le32toh(le->usertid) & 4095;
802 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
803 continue;
804
805 ia.ia_class = le16toh(le->classid) & 4095;
806 ia.ia_tid = sc->sc_tidmap[i].it_tid;
807
808 /* Ignore uninteresting devices. */
809 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
810 if (iop_class[j].ic_class == ia.ia_class)
811 break;
812 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
813 (iop_class[j].ic_flags & mask) != maskval)
814 continue;
815
816 /*
817 * Try to configure the device only if it's not already
818 * configured.
819 */
820 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
821 if (ia.ia_tid == ii->ii_tid) {
822 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
823 strcpy(sc->sc_tidmap[i].it_dvname,
824 ii->ii_dv->dv_xname);
825 break;
826 }
827 }
828 if (ii != NULL)
829 continue;
830
831 locs[IOPCF_TID] = ia.ia_tid;
832
833 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
834 iop_print, config_stdsubmatch);
835 if (dv != NULL) {
836 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
837 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
838 }
839 }
840 }
841
842 /*
843 * Adjust queue parameters for all child devices.
844 */
845 static void
846 iop_adjqparam(struct iop_softc *sc, int mpi)
847 {
848 struct iop_initiator *ii;
849
850 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
851 if (ii->ii_adjqparam != NULL)
852 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
853 }
854
855 static void
856 iop_devinfo(int class, char *devinfo, size_t l)
857 {
858 int i;
859
860 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
861 if (class == iop_class[i].ic_class)
862 break;
863
864 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
865 snprintf(devinfo, l, "device (class 0x%x)", class);
866 else
867 strlcpy(devinfo, iop_class[i].ic_caption, l);
868 }
869
870 static int
871 iop_print(void *aux, const char *pnp)
872 {
873 struct iop_attach_args *ia;
874 char devinfo[256];
875
876 ia = aux;
877
878 if (pnp != NULL) {
879 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
880 aprint_normal("%s at %s", devinfo, pnp);
881 }
882 aprint_normal(" tid %d", ia->ia_tid);
883 return (UNCONF);
884 }
885
886 /*
887 * Shut down all configured IOPs.
888 */
889 static void
890 iop_shutdown(void *junk)
891 {
892 struct iop_softc *sc;
893 int i;
894
895 printf("shutting down iop devices...");
896
897 for (i = 0; i < iop_cd.cd_ndevs; i++) {
898 if ((sc = device_lookup(&iop_cd, i)) == NULL)
899 continue;
900 if ((sc->sc_flags & IOP_ONLINE) == 0)
901 continue;
902
903 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
904 0, 5000);
905
906 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
907 /*
908 * Some AMI firmware revisions will go to sleep and
909 * never come back after this.
910 */
911 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
912 IOP_ICTX, 0, 1000);
913 }
914 }
915
916 /* Wait. Some boards could still be flushing, stupidly enough. */
917 delay(5000*1000);
918 printf(" done\n");
919 }
920
921 /*
922 * Retrieve IOP status.
923 */
924 int
925 iop_status_get(struct iop_softc *sc, int nosleep)
926 {
927 struct i2o_exec_status_get mf;
928 struct i2o_status *st;
929 paddr_t pa;
930 int rv, i;
931
932 pa = sc->sc_scr_seg->ds_addr;
933 st = (struct i2o_status *)sc->sc_scr;
934
935 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
936 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
937 mf.reserved[0] = 0;
938 mf.reserved[1] = 0;
939 mf.reserved[2] = 0;
940 mf.reserved[3] = 0;
941 mf.addrlow = (u_int32_t)pa;
942 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
943 mf.length = sizeof(sc->sc_status);
944
945 memset(st, 0, sizeof(*st));
946 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
947 BUS_DMASYNC_PREREAD);
948
949 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
950 return (rv);
951
952 for (i = 25; i != 0; i--) {
953 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
954 sizeof(*st), BUS_DMASYNC_POSTREAD);
955 if (st->syncbyte == 0xff)
956 break;
957 if (nosleep)
958 DELAY(100*1000);
959 else
960 kpause("iopstat", false, hz / 10, NULL);
961 }
962
963 if (st->syncbyte != 0xff) {
964 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
965 rv = EIO;
966 } else {
967 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
968 rv = 0;
969 }
970
971 return (rv);
972 }
973
974 /*
975 * Initialize and populate the IOP's outbound FIFO.
976 */
977 static int
978 iop_ofifo_init(struct iop_softc *sc)
979 {
980 bus_addr_t addr;
981 bus_dma_segment_t seg;
982 struct i2o_exec_outbound_init *mf;
983 int i, rseg, rv;
984 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
985
986 sw = (u_int32_t *)sc->sc_scr;
987
988 mf = (struct i2o_exec_outbound_init *)mb;
989 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
990 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
991 mf->msgictx = IOP_ICTX;
992 mf->msgtctx = 0;
993 mf->pagesize = PAGE_SIZE;
994 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
995
996 /*
997 * The I2O spec says that there are two SGLs: one for the status
998 * word, and one for a list of discarded MFAs. It continues to say
999 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1000 * necessary; this isn't the case (and is in fact a bad thing).
1001 */
1002 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1003 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1004 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1005 (u_int32_t)sc->sc_scr_seg->ds_addr;
1006 mb[0] += 2 << 16;
1007
1008 *sw = 0;
1009 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1010 BUS_DMASYNC_PREREAD);
1011
1012 if ((rv = iop_post(sc, mb)) != 0)
1013 return (rv);
1014
1015 POLL(5000,
1016 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1017 BUS_DMASYNC_POSTREAD),
1018 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1019
1020 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1021 printf("%s: outbound FIFO init failed (%d)\n",
1022 sc->sc_dv.dv_xname, le32toh(*sw));
1023 return (EIO);
1024 }
1025
1026 /* Allocate DMA safe memory for the reply frames. */
1027 if (sc->sc_rep_phys == 0) {
1028 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1029
1030 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1031 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1032 if (rv != 0) {
1033 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1034 rv);
1035 return (rv);
1036 }
1037
1038 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1039 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1040 if (rv != 0) {
1041 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1042 return (rv);
1043 }
1044
1045 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1046 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1047 if (rv != 0) {
1048 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1049 rv);
1050 return (rv);
1051 }
1052
1053 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1054 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1055 if (rv != 0) {
1056 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1057 return (rv);
1058 }
1059
1060 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1061
1062 /* Now safe to sync the reply map. */
1063 sc->sc_curib = 0;
1064 }
1065
1066 /* Populate the outbound FIFO. */
1067 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1068 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1069 addr += sc->sc_framesize;
1070 }
1071
1072 return (0);
1073 }
1074
1075 /*
1076 * Read the specified number of bytes from the IOP's hardware resource table.
1077 */
1078 static int
1079 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1080 {
1081 struct iop_msg *im;
1082 int rv;
1083 struct i2o_exec_hrt_get *mf;
1084 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1085
1086 im = iop_msg_alloc(sc, IM_WAIT);
1087 mf = (struct i2o_exec_hrt_get *)mb;
1088 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1089 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1090 mf->msgictx = IOP_ICTX;
1091 mf->msgtctx = im->im_tctx;
1092
1093 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1094 rv = iop_msg_post(sc, im, mb, 30000);
1095 iop_msg_unmap(sc, im);
1096 iop_msg_free(sc, im);
1097 return (rv);
1098 }
1099
1100 /*
1101 * Read the IOP's hardware resource table.
1102 */
1103 static int
1104 iop_hrt_get(struct iop_softc *sc)
1105 {
1106 struct i2o_hrt hrthdr, *hrt;
1107 int size, rv;
1108
1109 uvm_lwp_hold(curlwp);
1110 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1111 uvm_lwp_rele(curlwp);
1112 if (rv != 0)
1113 return (rv);
1114
1115 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1116 le16toh(hrthdr.numentries)));
1117
1118 size = sizeof(struct i2o_hrt) +
1119 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1120 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1121
1122 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1123 free(hrt, M_DEVBUF);
1124 return (rv);
1125 }
1126
1127 if (sc->sc_hrt != NULL)
1128 free(sc->sc_hrt, M_DEVBUF);
1129 sc->sc_hrt = hrt;
1130 return (0);
1131 }
1132
1133 /*
1134 * Request the specified number of bytes from the IOP's logical
1135 * configuration table. If a change indicator is specified, this
1136 * is a verbatim notification request, so the caller is prepared
1137 * to wait indefinitely.
1138 */
1139 static int
1140 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1141 u_int32_t chgind)
1142 {
1143 struct iop_msg *im;
1144 struct i2o_exec_lct_notify *mf;
1145 int rv;
1146 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1147
1148 im = iop_msg_alloc(sc, IM_WAIT);
1149 memset(lct, 0, size);
1150
1151 mf = (struct i2o_exec_lct_notify *)mb;
1152 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1153 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1154 mf->msgictx = IOP_ICTX;
1155 mf->msgtctx = im->im_tctx;
1156 mf->classid = I2O_CLASS_ANY;
1157 mf->changeindicator = chgind;
1158
1159 #ifdef I2ODEBUG
1160 printf("iop_lct_get0: reading LCT");
1161 if (chgind != 0)
1162 printf(" (async)");
1163 printf("\n");
1164 #endif
1165
1166 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1167 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1168 iop_msg_unmap(sc, im);
1169 iop_msg_free(sc, im);
1170 return (rv);
1171 }
1172
1173 /*
1174 * Read the IOP's logical configuration table.
1175 */
1176 int
1177 iop_lct_get(struct iop_softc *sc)
1178 {
1179 int esize, size, rv;
1180 struct i2o_lct *lct;
1181
1182 esize = le32toh(sc->sc_status.expectedlctsize);
1183 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1184 if (lct == NULL)
1185 return (ENOMEM);
1186
1187 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1188 free(lct, M_DEVBUF);
1189 return (rv);
1190 }
1191
1192 size = le16toh(lct->tablesize) << 2;
1193 if (esize != size) {
1194 free(lct, M_DEVBUF);
1195 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1196 if (lct == NULL)
1197 return (ENOMEM);
1198
1199 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1200 free(lct, M_DEVBUF);
1201 return (rv);
1202 }
1203 }
1204
1205 /* Swap in the new LCT. */
1206 if (sc->sc_lct != NULL)
1207 free(sc->sc_lct, M_DEVBUF);
1208 sc->sc_lct = lct;
1209 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1210 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1211 sizeof(struct i2o_lct_entry);
1212 return (0);
1213 }
1214
1215 /*
1216 * Post a SYS_ENABLE message to the adapter.
1217 */
1218 int
1219 iop_sys_enable(struct iop_softc *sc)
1220 {
1221 struct iop_msg *im;
1222 struct i2o_msg mf;
1223 int rv;
1224
1225 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1226
1227 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1228 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1229 mf.msgictx = IOP_ICTX;
1230 mf.msgtctx = im->im_tctx;
1231
1232 rv = iop_msg_post(sc, im, &mf, 30000);
1233 if (rv == 0) {
1234 if ((im->im_flags & IM_FAIL) != 0)
1235 rv = ENXIO;
1236 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1237 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1238 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1239 rv = 0;
1240 else
1241 rv = EIO;
1242 }
1243
1244 iop_msg_free(sc, im);
1245 return (rv);
1246 }
1247
1248 /*
1249 * Request the specified parameter group from the target. If an initiator
1250 * is specified (a) don't wait for the operation to complete, but instead
1251 * let the initiator's interrupt handler deal with the reply and (b) place a
1252 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1253 */
1254 int
1255 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1256 int size, struct iop_initiator *ii)
1257 {
1258 struct iop_msg *im;
1259 struct i2o_util_params_op *mf;
1260 int rv;
1261 struct iop_pgop *pgop;
1262 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1263
1264 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1265 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1266 iop_msg_free(sc, im);
1267 return (ENOMEM);
1268 }
1269 im->im_dvcontext = pgop;
1270
1271 mf = (struct i2o_util_params_op *)mb;
1272 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1273 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1274 mf->msgictx = IOP_ICTX;
1275 mf->msgtctx = im->im_tctx;
1276 mf->flags = 0;
1277
1278 pgop->olh.count = htole16(1);
1279 pgop->olh.reserved = htole16(0);
1280 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1281 pgop->oat.fieldcount = htole16(0xffff);
1282 pgop->oat.group = htole16(group);
1283
1284 if (ii == NULL)
1285 uvm_lwp_hold(curlwp);
1286
1287 memset(buf, 0, size);
1288 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1289 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1290 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1291
1292 if (ii == NULL)
1293 uvm_lwp_rele(curlwp);
1294
1295 /* Detect errors; let partial transfers to count as success. */
1296 if (ii == NULL && rv == 0) {
1297 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1298 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1299 rv = 0;
1300 else
1301 rv = (im->im_reqstatus != 0 ? EIO : 0);
1302
1303 if (rv != 0)
1304 printf("%s: FIELD_GET failed for tid %d group %d\n",
1305 sc->sc_dv.dv_xname, tid, group);
1306 }
1307
1308 if (ii == NULL || rv != 0) {
1309 iop_msg_unmap(sc, im);
1310 iop_msg_free(sc, im);
1311 free(pgop, M_DEVBUF);
1312 }
1313
1314 return (rv);
1315 }
1316
1317 /*
1318 * Set a single field in a scalar parameter group.
1319 */
1320 int
1321 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1322 int size, int field)
1323 {
1324 struct iop_msg *im;
1325 struct i2o_util_params_op *mf;
1326 struct iop_pgop *pgop;
1327 int rv, totsize;
1328 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1329
1330 totsize = sizeof(*pgop) + size;
1331
1332 im = iop_msg_alloc(sc, IM_WAIT);
1333 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1334 iop_msg_free(sc, im);
1335 return (ENOMEM);
1336 }
1337
1338 mf = (struct i2o_util_params_op *)mb;
1339 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1340 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1341 mf->msgictx = IOP_ICTX;
1342 mf->msgtctx = im->im_tctx;
1343 mf->flags = 0;
1344
1345 pgop->olh.count = htole16(1);
1346 pgop->olh.reserved = htole16(0);
1347 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1348 pgop->oat.fieldcount = htole16(1);
1349 pgop->oat.group = htole16(group);
1350 pgop->oat.fields[0] = htole16(field);
1351 memcpy(pgop + 1, buf, size);
1352
1353 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1354 rv = iop_msg_post(sc, im, mb, 30000);
1355 if (rv != 0)
1356 printf("%s: FIELD_SET failed for tid %d group %d\n",
1357 sc->sc_dv.dv_xname, tid, group);
1358
1359 iop_msg_unmap(sc, im);
1360 iop_msg_free(sc, im);
1361 free(pgop, M_DEVBUF);
1362 return (rv);
1363 }
1364
1365 /*
1366 * Delete all rows in a tablular parameter group.
1367 */
1368 int
1369 iop_table_clear(struct iop_softc *sc, int tid, int group)
1370 {
1371 struct iop_msg *im;
1372 struct i2o_util_params_op *mf;
1373 struct iop_pgop pgop;
1374 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1375 int rv;
1376
1377 im = iop_msg_alloc(sc, IM_WAIT);
1378
1379 mf = (struct i2o_util_params_op *)mb;
1380 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1381 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1382 mf->msgictx = IOP_ICTX;
1383 mf->msgtctx = im->im_tctx;
1384 mf->flags = 0;
1385
1386 pgop.olh.count = htole16(1);
1387 pgop.olh.reserved = htole16(0);
1388 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1389 pgop.oat.fieldcount = htole16(0);
1390 pgop.oat.group = htole16(group);
1391 pgop.oat.fields[0] = htole16(0);
1392
1393 uvm_lwp_hold(curlwp);
1394 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1395 rv = iop_msg_post(sc, im, mb, 30000);
1396 if (rv != 0)
1397 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1398 sc->sc_dv.dv_xname, tid, group);
1399
1400 iop_msg_unmap(sc, im);
1401 uvm_lwp_rele(curlwp);
1402 iop_msg_free(sc, im);
1403 return (rv);
1404 }
1405
1406 /*
1407 * Add a single row to a tabular parameter group. The row can have only one
1408 * field.
1409 */
1410 int
1411 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1412 int size, int row)
1413 {
1414 struct iop_msg *im;
1415 struct i2o_util_params_op *mf;
1416 struct iop_pgop *pgop;
1417 int rv, totsize;
1418 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1419
1420 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1421
1422 im = iop_msg_alloc(sc, IM_WAIT);
1423 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1424 iop_msg_free(sc, im);
1425 return (ENOMEM);
1426 }
1427
1428 mf = (struct i2o_util_params_op *)mb;
1429 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1430 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1431 mf->msgictx = IOP_ICTX;
1432 mf->msgtctx = im->im_tctx;
1433 mf->flags = 0;
1434
1435 pgop->olh.count = htole16(1);
1436 pgop->olh.reserved = htole16(0);
1437 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1438 pgop->oat.fieldcount = htole16(1);
1439 pgop->oat.group = htole16(group);
1440 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1441 pgop->oat.fields[1] = htole16(1); /* RowCount */
1442 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1443 memcpy(&pgop->oat.fields[3], buf, size);
1444
1445 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1446 rv = iop_msg_post(sc, im, mb, 30000);
1447 if (rv != 0)
1448 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1449 sc->sc_dv.dv_xname, tid, group, row);
1450
1451 iop_msg_unmap(sc, im);
1452 iop_msg_free(sc, im);
1453 free(pgop, M_DEVBUF);
1454 return (rv);
1455 }
1456
1457 /*
1458 * Execute a simple command (no parameters).
1459 */
1460 int
1461 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1462 int async, int timo)
1463 {
1464 struct iop_msg *im;
1465 struct i2o_msg mf;
1466 int rv, fl;
1467
1468 fl = (async != 0 ? IM_WAIT : IM_POLL);
1469 im = iop_msg_alloc(sc, fl);
1470
1471 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1472 mf.msgfunc = I2O_MSGFUNC(tid, function);
1473 mf.msgictx = ictx;
1474 mf.msgtctx = im->im_tctx;
1475
1476 rv = iop_msg_post(sc, im, &mf, timo);
1477 iop_msg_free(sc, im);
1478 return (rv);
1479 }
1480
1481 /*
1482 * Post the system table to the IOP.
1483 */
1484 static int
1485 iop_systab_set(struct iop_softc *sc)
1486 {
1487 struct i2o_exec_sys_tab_set *mf;
1488 struct iop_msg *im;
1489 bus_space_handle_t bsh;
1490 bus_addr_t boo;
1491 u_int32_t mema[2], ioa[2];
1492 int rv;
1493 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1494
1495 im = iop_msg_alloc(sc, IM_WAIT);
1496
1497 mf = (struct i2o_exec_sys_tab_set *)mb;
1498 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1499 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1500 mf->msgictx = IOP_ICTX;
1501 mf->msgtctx = im->im_tctx;
1502 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1503 mf->segnumber = 0;
1504
1505 mema[1] = sc->sc_status.desiredprivmemsize;
1506 ioa[1] = sc->sc_status.desiredpriviosize;
1507
1508 if (mema[1] != 0) {
1509 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1510 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1511 mema[0] = htole32(boo);
1512 if (rv != 0) {
1513 printf("%s: can't alloc priv mem space, err = %d\n",
1514 sc->sc_dv.dv_xname, rv);
1515 mema[0] = 0;
1516 mema[1] = 0;
1517 }
1518 }
1519
1520 if (ioa[1] != 0) {
1521 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1522 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1523 ioa[0] = htole32(boo);
1524 if (rv != 0) {
1525 printf("%s: can't alloc priv i/o space, err = %d\n",
1526 sc->sc_dv.dv_xname, rv);
1527 ioa[0] = 0;
1528 ioa[1] = 0;
1529 }
1530 }
1531
1532 uvm_lwp_hold(curlwp);
1533 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1534 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1535 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1536 rv = iop_msg_post(sc, im, mb, 5000);
1537 iop_msg_unmap(sc, im);
1538 iop_msg_free(sc, im);
1539 uvm_lwp_rele(curlwp);
1540 return (rv);
1541 }
1542
1543 /*
1544 * Reset the IOP. Must be called with interrupts disabled.
1545 */
1546 static int
1547 iop_reset(struct iop_softc *sc)
1548 {
1549 u_int32_t mfa, *sw;
1550 struct i2o_exec_iop_reset mf;
1551 int rv;
1552 paddr_t pa;
1553
1554 sw = (u_int32_t *)sc->sc_scr;
1555 pa = sc->sc_scr_seg->ds_addr;
1556
1557 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1558 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1559 mf.reserved[0] = 0;
1560 mf.reserved[1] = 0;
1561 mf.reserved[2] = 0;
1562 mf.reserved[3] = 0;
1563 mf.statuslow = (u_int32_t)pa;
1564 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1565
1566 *sw = htole32(0);
1567 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1568 BUS_DMASYNC_PREREAD);
1569
1570 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1571 return (rv);
1572
1573 POLL(2500,
1574 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1575 BUS_DMASYNC_POSTREAD), *sw != 0));
1576 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1577 printf("%s: reset rejected, status 0x%x\n",
1578 sc->sc_dv.dv_xname, le32toh(*sw));
1579 return (EIO);
1580 }
1581
1582 /*
1583 * IOP is now in the INIT state. Wait no more than 10 seconds for
1584 * the inbound queue to become responsive.
1585 */
1586 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1587 if (mfa == IOP_MFA_EMPTY) {
1588 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1589 return (EIO);
1590 }
1591
1592 iop_release_mfa(sc, mfa);
1593 return (0);
1594 }
1595
1596 /*
1597 * Register a new initiator. Must be called with the configuration lock
1598 * held.
1599 */
1600 void
1601 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1602 {
1603 static int ictxgen;
1604
1605 /* 0 is reserved (by us) for system messages. */
1606 ii->ii_ictx = ++ictxgen;
1607
1608 /*
1609 * `Utility initiators' don't make it onto the per-IOP initiator list
1610 * (which is used only for configuration), but do get one slot on
1611 * the inbound queue.
1612 */
1613 if ((ii->ii_flags & II_UTILITY) == 0) {
1614 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1615 sc->sc_nii++;
1616 } else
1617 sc->sc_nuii++;
1618
1619 cv_init(&ii->ii_cv, "iopevt");
1620
1621 mutex_spin_enter(&sc->sc_intrlock);
1622 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1623 mutex_spin_exit(&sc->sc_intrlock);
1624 }
1625
1626 /*
1627 * Unregister an initiator. Must be called with the configuration lock
1628 * held.
1629 */
1630 void
1631 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1632 {
1633
1634 if ((ii->ii_flags & II_UTILITY) == 0) {
1635 LIST_REMOVE(ii, ii_list);
1636 sc->sc_nii--;
1637 } else
1638 sc->sc_nuii--;
1639
1640 mutex_spin_enter(&sc->sc_intrlock);
1641 LIST_REMOVE(ii, ii_hash);
1642 mutex_spin_exit(&sc->sc_intrlock);
1643
1644 cv_destroy(&ii->ii_cv);
1645 }
1646
1647 /*
1648 * Handle a reply frame from the IOP.
1649 */
1650 static int
1651 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1652 {
1653 struct iop_msg *im;
1654 struct i2o_reply *rb;
1655 struct i2o_fault_notify *fn;
1656 struct iop_initiator *ii;
1657 u_int off, ictx, tctx, status, size;
1658
1659 KASSERT(mutex_owned(&sc->sc_intrlock));
1660
1661 off = (int)(rmfa - sc->sc_rep_phys);
1662 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1663
1664 /* Perform reply queue DMA synchronisation. */
1665 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1666 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1667 if (--sc->sc_curib != 0)
1668 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1669 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1670
1671 #ifdef I2ODEBUG
1672 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1673 panic("iop_handle_reply: 64-bit reply");
1674 #endif
1675 /*
1676 * Find the initiator.
1677 */
1678 ictx = le32toh(rb->msgictx);
1679 if (ictx == IOP_ICTX)
1680 ii = NULL;
1681 else {
1682 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1683 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1684 if (ii->ii_ictx == ictx)
1685 break;
1686 if (ii == NULL) {
1687 #ifdef I2ODEBUG
1688 iop_reply_print(sc, rb);
1689 #endif
1690 printf("%s: WARNING: bad ictx returned (%x)\n",
1691 sc->sc_dv.dv_xname, ictx);
1692 return (-1);
1693 }
1694 }
1695
1696 /*
1697 * If we received a transport failure notice, we've got to dig the
1698 * transaction context (if any) out of the original message frame,
1699 * and then release the original MFA back to the inbound FIFO.
1700 */
1701 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1702 status = I2O_STATUS_SUCCESS;
1703
1704 fn = (struct i2o_fault_notify *)rb;
1705 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1706 iop_release_mfa(sc, fn->lowmfa);
1707 iop_tfn_print(sc, fn);
1708 } else {
1709 status = rb->reqstatus;
1710 tctx = le32toh(rb->msgtctx);
1711 }
1712
1713 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1714 /*
1715 * This initiator tracks state using message wrappers.
1716 *
1717 * Find the originating message wrapper, and if requested
1718 * notify the initiator.
1719 */
1720 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1721 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1722 (im->im_flags & IM_ALLOCED) == 0 ||
1723 tctx != im->im_tctx) {
1724 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1725 sc->sc_dv.dv_xname, tctx, im);
1726 if (im != NULL)
1727 printf("%s: flags=0x%08x tctx=0x%08x\n",
1728 sc->sc_dv.dv_xname, im->im_flags,
1729 im->im_tctx);
1730 #ifdef I2ODEBUG
1731 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1732 iop_reply_print(sc, rb);
1733 #endif
1734 return (-1);
1735 }
1736
1737 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1738 im->im_flags |= IM_FAIL;
1739
1740 #ifdef I2ODEBUG
1741 if ((im->im_flags & IM_REPLIED) != 0)
1742 panic("%s: dup reply", sc->sc_dv.dv_xname);
1743 #endif
1744 im->im_flags |= IM_REPLIED;
1745
1746 #ifdef I2ODEBUG
1747 if (status != I2O_STATUS_SUCCESS)
1748 iop_reply_print(sc, rb);
1749 #endif
1750 im->im_reqstatus = status;
1751 im->im_detstatus = le16toh(rb->detail);
1752
1753 /* Copy the reply frame, if requested. */
1754 if (im->im_rb != NULL) {
1755 size = (le32toh(rb->msgflags) >> 14) & ~3;
1756 #ifdef I2ODEBUG
1757 if (size > sc->sc_framesize)
1758 panic("iop_handle_reply: reply too large");
1759 #endif
1760 memcpy(im->im_rb, rb, size);
1761 }
1762
1763 /* Notify the initiator. */
1764 if ((im->im_flags & IM_WAIT) != 0)
1765 cv_broadcast(&im->im_cv);
1766 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1767 if (ii != NULL) {
1768 mutex_spin_exit(&sc->sc_intrlock);
1769 (*ii->ii_intr)(ii->ii_dv, im, rb);
1770 mutex_spin_enter(&sc->sc_intrlock);
1771 }
1772 }
1773 } else {
1774 /*
1775 * This initiator discards message wrappers.
1776 *
1777 * Simply pass the reply frame to the initiator.
1778 */
1779 if (ii != NULL) {
1780 mutex_spin_exit(&sc->sc_intrlock);
1781 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1782 mutex_spin_enter(&sc->sc_intrlock);
1783 }
1784 }
1785
1786 return (status);
1787 }
1788
1789 /*
1790 * Handle an interrupt from the IOP.
1791 */
1792 int
1793 iop_intr(void *arg)
1794 {
1795 struct iop_softc *sc;
1796 u_int32_t rmfa;
1797
1798 sc = arg;
1799
1800 mutex_spin_enter(&sc->sc_intrlock);
1801
1802 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1803 mutex_spin_exit(&sc->sc_intrlock);
1804 return (0);
1805 }
1806
1807 for (;;) {
1808 /* Double read to account for IOP bug. */
1809 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1810 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1811 if (rmfa == IOP_MFA_EMPTY)
1812 break;
1813 }
1814 iop_handle_reply(sc, rmfa);
1815 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1816 }
1817
1818 mutex_spin_exit(&sc->sc_intrlock);
1819 return (1);
1820 }
1821
1822 /*
1823 * Handle an event signalled by the executive.
1824 */
1825 static void
1826 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1827 {
1828 struct i2o_util_event_register_reply *rb;
1829 u_int event;
1830
1831 rb = reply;
1832
1833 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1834 return;
1835
1836 event = le32toh(rb->event);
1837 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1838 }
1839
1840 /*
1841 * Allocate a message wrapper.
1842 */
1843 struct iop_msg *
1844 iop_msg_alloc(struct iop_softc *sc, int flags)
1845 {
1846 struct iop_msg *im;
1847 static u_int tctxgen;
1848 int i;
1849
1850 #ifdef I2ODEBUG
1851 if ((flags & IM_SYSMASK) != 0)
1852 panic("iop_msg_alloc: system flags specified");
1853 #endif
1854
1855 mutex_spin_enter(&sc->sc_intrlock);
1856 im = SLIST_FIRST(&sc->sc_im_freelist);
1857 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1858 if (im == NULL)
1859 panic("iop_msg_alloc: no free wrappers");
1860 #endif
1861 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1862 mutex_spin_exit(&sc->sc_intrlock);
1863
1864 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1865 tctxgen += (1 << IOP_TCTX_SHIFT);
1866 im->im_flags = flags | IM_ALLOCED;
1867 im->im_rb = NULL;
1868 i = 0;
1869 do {
1870 im->im_xfer[i++].ix_size = 0;
1871 } while (i < IOP_MAX_MSG_XFERS);
1872
1873 return (im);
1874 }
1875
1876 /*
1877 * Free a message wrapper.
1878 */
1879 void
1880 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1881 {
1882
1883 #ifdef I2ODEBUG
1884 if ((im->im_flags & IM_ALLOCED) == 0)
1885 panic("iop_msg_free: wrapper not allocated");
1886 #endif
1887
1888 im->im_flags = 0;
1889 mutex_spin_enter(&sc->sc_intrlock);
1890 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1891 mutex_spin_exit(&sc->sc_intrlock);
1892 }
1893
1894 /*
1895 * Map a data transfer. Write a scatter-gather list into the message frame.
1896 */
1897 int
1898 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1899 void *xferaddr, int xfersize, int out, struct proc *up)
1900 {
1901 bus_dmamap_t dm;
1902 bus_dma_segment_t *ds;
1903 struct iop_xfer *ix;
1904 u_int rv, i, nsegs, flg, off, xn;
1905 u_int32_t *p;
1906
1907 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1908 if (ix->ix_size == 0)
1909 break;
1910
1911 #ifdef I2ODEBUG
1912 if (xfersize == 0)
1913 panic("iop_msg_map: null transfer");
1914 if (xfersize > IOP_MAX_XFER)
1915 panic("iop_msg_map: transfer too large");
1916 if (xn == IOP_MAX_MSG_XFERS)
1917 panic("iop_msg_map: too many xfers");
1918 #endif
1919
1920 /*
1921 * Only the first DMA map is static.
1922 */
1923 if (xn != 0) {
1924 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1925 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1926 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1927 if (rv != 0)
1928 return (rv);
1929 }
1930
1931 dm = ix->ix_map;
1932 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1933 (up == NULL ? BUS_DMA_NOWAIT : 0));
1934 if (rv != 0)
1935 goto bad;
1936
1937 /*
1938 * How many SIMPLE SG elements can we fit in this message?
1939 */
1940 off = mb[0] >> 16;
1941 p = mb + off;
1942 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1943
1944 if (dm->dm_nsegs > nsegs) {
1945 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1946 rv = EFBIG;
1947 DPRINTF(("iop_msg_map: too many segs\n"));
1948 goto bad;
1949 }
1950
1951 nsegs = dm->dm_nsegs;
1952 xfersize = 0;
1953
1954 /*
1955 * Write out the SG list.
1956 */
1957 if (out)
1958 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1959 else
1960 flg = I2O_SGL_SIMPLE;
1961
1962 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1963 p[0] = (u_int32_t)ds->ds_len | flg;
1964 p[1] = (u_int32_t)ds->ds_addr;
1965 xfersize += ds->ds_len;
1966 }
1967
1968 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1969 p[1] = (u_int32_t)ds->ds_addr;
1970 xfersize += ds->ds_len;
1971
1972 /* Fix up the transfer record, and sync the map. */
1973 ix->ix_flags = (out ? IX_OUT : IX_IN);
1974 ix->ix_size = xfersize;
1975 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1976 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1977
1978 /*
1979 * If this is the first xfer we've mapped for this message, adjust
1980 * the SGL offset field in the message header.
1981 */
1982 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1983 mb[0] += (mb[0] >> 12) & 0xf0;
1984 im->im_flags |= IM_SGLOFFADJ;
1985 }
1986 mb[0] += (nsegs << 17);
1987 return (0);
1988
1989 bad:
1990 if (xn != 0)
1991 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1992 return (rv);
1993 }
1994
1995 /*
1996 * Map a block I/O data transfer (different in that there's only one per
1997 * message maximum, and PAGE addressing may be used). Write a scatter
1998 * gather list into the message frame.
1999 */
2000 int
2001 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
2002 void *xferaddr, int xfersize, int out)
2003 {
2004 bus_dma_segment_t *ds;
2005 bus_dmamap_t dm;
2006 struct iop_xfer *ix;
2007 u_int rv, i, nsegs, off, slen, tlen, flg;
2008 paddr_t saddr, eaddr;
2009 u_int32_t *p;
2010
2011 #ifdef I2ODEBUG
2012 if (xfersize == 0)
2013 panic("iop_msg_map_bio: null transfer");
2014 if (xfersize > IOP_MAX_XFER)
2015 panic("iop_msg_map_bio: transfer too large");
2016 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2017 panic("iop_msg_map_bio: SGLOFFADJ");
2018 #endif
2019
2020 ix = im->im_xfer;
2021 dm = ix->ix_map;
2022 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2023 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2024 if (rv != 0)
2025 return (rv);
2026
2027 off = mb[0] >> 16;
2028 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2029
2030 /*
2031 * If the transfer is highly fragmented and won't fit using SIMPLE
2032 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2033 * potentially more efficient, both for us and the IOP.
2034 */
2035 if (dm->dm_nsegs > nsegs) {
2036 nsegs = 1;
2037 p = mb + off + 1;
2038
2039 /* XXX This should be done with a bus_space flag. */
2040 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2041 slen = ds->ds_len;
2042 saddr = ds->ds_addr;
2043
2044 while (slen > 0) {
2045 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2046 tlen = min(eaddr - saddr, slen);
2047 slen -= tlen;
2048 *p++ = le32toh(saddr);
2049 saddr = eaddr;
2050 nsegs++;
2051 }
2052 }
2053
2054 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2055 I2O_SGL_END;
2056 if (out)
2057 mb[off] |= I2O_SGL_DATA_OUT;
2058 } else {
2059 p = mb + off;
2060 nsegs = dm->dm_nsegs;
2061
2062 if (out)
2063 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2064 else
2065 flg = I2O_SGL_SIMPLE;
2066
2067 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2068 p[0] = (u_int32_t)ds->ds_len | flg;
2069 p[1] = (u_int32_t)ds->ds_addr;
2070 }
2071
2072 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2073 I2O_SGL_END;
2074 p[1] = (u_int32_t)ds->ds_addr;
2075 nsegs <<= 1;
2076 }
2077
2078 /* Fix up the transfer record, and sync the map. */
2079 ix->ix_flags = (out ? IX_OUT : IX_IN);
2080 ix->ix_size = xfersize;
2081 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2082 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2083
2084 /*
2085 * Adjust the SGL offset and total message size fields. We don't
2086 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2087 */
2088 mb[0] += ((off << 4) + (nsegs << 16));
2089 return (0);
2090 }
2091
2092 /*
2093 * Unmap all data transfers associated with a message wrapper.
2094 */
2095 void
2096 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2097 {
2098 struct iop_xfer *ix;
2099 int i;
2100
2101 #ifdef I2ODEBUG
2102 if (im->im_xfer[0].ix_size == 0)
2103 panic("iop_msg_unmap: no transfers mapped");
2104 #endif
2105
2106 for (ix = im->im_xfer, i = 0;;) {
2107 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2108 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2109 BUS_DMASYNC_POSTREAD);
2110 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2111
2112 /* Only the first DMA map is static. */
2113 if (i != 0)
2114 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2115 if ((++ix)->ix_size == 0)
2116 break;
2117 if (++i >= IOP_MAX_MSG_XFERS)
2118 break;
2119 }
2120 }
2121
2122 /*
2123 * Post a message frame to the IOP's inbound queue.
2124 */
2125 int
2126 iop_post(struct iop_softc *sc, u_int32_t *mb)
2127 {
2128 u_int32_t mfa;
2129
2130 #ifdef I2ODEBUG
2131 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2132 panic("iop_post: frame too large");
2133 #endif
2134
2135 mutex_spin_enter(&sc->sc_intrlock);
2136
2137 /* Allocate a slot with the IOP. */
2138 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2139 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2140 mutex_spin_exit(&sc->sc_intrlock);
2141 printf("%s: mfa not forthcoming\n",
2142 sc->sc_dv.dv_xname);
2143 return (EAGAIN);
2144 }
2145
2146 /* Perform reply buffer DMA synchronisation. */
2147 if (sc->sc_curib++ == 0)
2148 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2149 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2150
2151 /* Copy out the message frame. */
2152 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2153 mb[0] >> 16);
2154 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2155 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2156
2157 /* Post the MFA back to the IOP. */
2158 iop_outl(sc, IOP_REG_IFIFO, mfa);
2159
2160 mutex_spin_exit(&sc->sc_intrlock);
2161 return (0);
2162 }
2163
2164 /*
2165 * Post a message to the IOP and deal with completion.
2166 */
2167 int
2168 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2169 {
2170 u_int32_t *mb;
2171 int rv;
2172
2173 mb = xmb;
2174
2175 /* Terminate the scatter/gather list chain. */
2176 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2177 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2178
2179 if ((rv = iop_post(sc, mb)) != 0)
2180 return (rv);
2181
2182 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2183 if ((im->im_flags & IM_POLL) != 0)
2184 iop_msg_poll(sc, im, timo);
2185 else
2186 iop_msg_wait(sc, im, timo);
2187
2188 mutex_spin_enter(&sc->sc_intrlock);
2189 if ((im->im_flags & IM_REPLIED) != 0) {
2190 if ((im->im_flags & IM_NOSTATUS) != 0)
2191 rv = 0;
2192 else if ((im->im_flags & IM_FAIL) != 0)
2193 rv = ENXIO;
2194 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2195 rv = EIO;
2196 else
2197 rv = 0;
2198 } else
2199 rv = EBUSY;
2200 mutex_spin_exit(&sc->sc_intrlock);
2201 } else
2202 rv = 0;
2203
2204 return (rv);
2205 }
2206
2207 /*
2208 * Spin until the specified message is replied to.
2209 */
2210 static void
2211 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2212 {
2213 u_int32_t rmfa;
2214
2215 mutex_spin_enter(&sc->sc_intrlock);
2216
2217 for (timo *= 10; timo != 0; timo--) {
2218 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2219 /* Double read to account for IOP bug. */
2220 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2221 if (rmfa == IOP_MFA_EMPTY)
2222 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2223 if (rmfa != IOP_MFA_EMPTY) {
2224 iop_handle_reply(sc, rmfa);
2225
2226 /*
2227 * Return the reply frame to the IOP's
2228 * outbound FIFO.
2229 */
2230 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2231 }
2232 }
2233 if ((im->im_flags & IM_REPLIED) != 0)
2234 break;
2235 mutex_spin_exit(&sc->sc_intrlock);
2236 DELAY(100);
2237 mutex_spin_enter(&sc->sc_intrlock);
2238 }
2239
2240 if (timo == 0) {
2241 #ifdef I2ODEBUG
2242 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2243 if (iop_status_get(sc, 1) != 0)
2244 printf("iop_msg_poll: unable to retrieve status\n");
2245 else
2246 printf("iop_msg_poll: IOP state = %d\n",
2247 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2248 #endif
2249 }
2250
2251 mutex_spin_exit(&sc->sc_intrlock);
2252 }
2253
2254 /*
2255 * Sleep until the specified message is replied to.
2256 */
2257 static void
2258 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2259 {
2260 int rv;
2261
2262 mutex_spin_enter(&sc->sc_intrlock);
2263 if ((im->im_flags & IM_REPLIED) != 0) {
2264 mutex_spin_exit(&sc->sc_intrlock);
2265 return;
2266 }
2267 rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2268 mutex_spin_exit(&sc->sc_intrlock);
2269
2270 #ifdef I2ODEBUG
2271 if (rv != 0) {
2272 printf("iop_msg_wait: tsleep() == %d\n", rv);
2273 if (iop_status_get(sc, 0) != 0)
2274 printf("iop_msg_wait: unable to retrieve status\n");
2275 else
2276 printf("iop_msg_wait: IOP state = %d\n",
2277 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2278 }
2279 #endif
2280 }
2281
2282 /*
2283 * Release an unused message frame back to the IOP's inbound fifo.
2284 */
2285 static void
2286 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2287 {
2288
2289 /* Use the frame to issue a no-op. */
2290 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2291 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2292 iop_outl_msg(sc, mfa + 8, 0);
2293 iop_outl_msg(sc, mfa + 12, 0);
2294
2295 iop_outl(sc, IOP_REG_IFIFO, mfa);
2296 }
2297
2298 #ifdef I2ODEBUG
2299 /*
2300 * Dump a reply frame header.
2301 */
2302 static void
2303 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2304 {
2305 u_int function, detail;
2306 const char *statusstr;
2307
2308 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2309 detail = le16toh(rb->detail);
2310
2311 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2312
2313 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2314 statusstr = iop_status[rb->reqstatus];
2315 else
2316 statusstr = "undefined error code";
2317
2318 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2319 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2320 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2321 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2322 le32toh(rb->msgtctx));
2323 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2324 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2325 (le32toh(rb->msgflags) >> 8) & 0xff);
2326 }
2327 #endif
2328
2329 /*
2330 * Dump a transport failure reply.
2331 */
2332 static void
2333 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2334 {
2335
2336 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2337
2338 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2339 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2340 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2341 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2342 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2343 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2344 }
2345
2346 /*
2347 * Translate an I2O ASCII field into a C string.
2348 */
2349 void
2350 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2351 {
2352 int hc, lc, i, nit;
2353
2354 dlen--;
2355 lc = 0;
2356 hc = 0;
2357 i = 0;
2358
2359 /*
2360 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2361 * spec has nothing to say about it. Since AMI fields are usually
2362 * filled with junk after the terminator, ...
2363 */
2364 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2365
2366 while (slen-- != 0 && dlen-- != 0) {
2367 if (nit && *src == '\0')
2368 break;
2369 else if (*src <= 0x20 || *src >= 0x7f) {
2370 if (hc)
2371 dst[i++] = ' ';
2372 } else {
2373 hc = 1;
2374 dst[i++] = *src;
2375 lc = i;
2376 }
2377 src++;
2378 }
2379
2380 dst[lc] = '\0';
2381 }
2382
2383 /*
2384 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2385 */
2386 int
2387 iop_print_ident(struct iop_softc *sc, int tid)
2388 {
2389 struct {
2390 struct i2o_param_op_results pr;
2391 struct i2o_param_read_results prr;
2392 struct i2o_param_device_identity di;
2393 } __attribute__ ((__packed__)) p;
2394 char buf[32];
2395 int rv;
2396
2397 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2398 sizeof(p), NULL);
2399 if (rv != 0)
2400 return (rv);
2401
2402 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2403 sizeof(buf));
2404 printf(" <%s, ", buf);
2405 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2406 sizeof(buf));
2407 printf("%s, ", buf);
2408 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2409 printf("%s>", buf);
2410
2411 return (0);
2412 }
2413
2414 /*
2415 * Claim or unclaim the specified TID.
2416 */
2417 int
2418 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2419 int flags)
2420 {
2421 struct iop_msg *im;
2422 struct i2o_util_claim mf;
2423 int rv, func;
2424
2425 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2426 im = iop_msg_alloc(sc, IM_WAIT);
2427
2428 /* We can use the same structure, as they're identical. */
2429 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2430 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2431 mf.msgictx = ii->ii_ictx;
2432 mf.msgtctx = im->im_tctx;
2433 mf.flags = flags;
2434
2435 rv = iop_msg_post(sc, im, &mf, 5000);
2436 iop_msg_free(sc, im);
2437 return (rv);
2438 }
2439
2440 /*
2441 * Perform an abort.
2442 */
2443 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2444 int tctxabort, int flags)
2445 {
2446 struct iop_msg *im;
2447 struct i2o_util_abort mf;
2448 int rv;
2449
2450 im = iop_msg_alloc(sc, IM_WAIT);
2451
2452 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2453 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2454 mf.msgictx = ii->ii_ictx;
2455 mf.msgtctx = im->im_tctx;
2456 mf.flags = (func << 24) | flags;
2457 mf.tctxabort = tctxabort;
2458
2459 rv = iop_msg_post(sc, im, &mf, 5000);
2460 iop_msg_free(sc, im);
2461 return (rv);
2462 }
2463
2464 /*
2465 * Enable or disable reception of events for the specified device.
2466 */
2467 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2468 {
2469 struct i2o_util_event_register mf;
2470
2471 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2472 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2473 mf.msgictx = ii->ii_ictx;
2474 mf.msgtctx = 0;
2475 mf.eventmask = mask;
2476
2477 /* This message is replied to only when events are signalled. */
2478 return (iop_post(sc, (u_int32_t *)&mf));
2479 }
2480
2481 int
2482 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2483 {
2484 struct iop_softc *sc;
2485
2486 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2487 return (ENXIO);
2488 if ((sc->sc_flags & IOP_ONLINE) == 0)
2489 return (ENXIO);
2490 if ((sc->sc_flags & IOP_OPEN) != 0)
2491 return (EBUSY);
2492 sc->sc_flags |= IOP_OPEN;
2493
2494 return (0);
2495 }
2496
2497 int
2498 iopclose(dev_t dev, int flag, int mode,
2499 struct lwp *l)
2500 {
2501 struct iop_softc *sc;
2502
2503 sc = device_lookup(&iop_cd, minor(dev));
2504 sc->sc_flags &= ~IOP_OPEN;
2505
2506 return (0);
2507 }
2508
2509 int
2510 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2511 {
2512 struct iop_softc *sc;
2513 struct iovec *iov;
2514 int rv, i;
2515
2516 sc = device_lookup(&iop_cd, minor(dev));
2517 rv = 0;
2518
2519 switch (cmd) {
2520 case IOPIOCPT:
2521 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2522 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2523 if (rv)
2524 return (rv);
2525
2526 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2527
2528 case IOPIOCGSTATUS:
2529 iov = (struct iovec *)data;
2530 i = sizeof(struct i2o_status);
2531 if (i > iov->iov_len)
2532 i = iov->iov_len;
2533 else
2534 iov->iov_len = i;
2535 if ((rv = iop_status_get(sc, 0)) == 0)
2536 rv = copyout(&sc->sc_status, iov->iov_base, i);
2537 return (rv);
2538
2539 case IOPIOCGLCT:
2540 case IOPIOCGTIDMAP:
2541 case IOPIOCRECONFIG:
2542 break;
2543
2544 default:
2545 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2546 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2547 #endif
2548 return (ENOTTY);
2549 }
2550
2551 mutex_enter(&sc->sc_conflock);
2552
2553 switch (cmd) {
2554 case IOPIOCGLCT:
2555 iov = (struct iovec *)data;
2556 i = le16toh(sc->sc_lct->tablesize) << 2;
2557 if (i > iov->iov_len)
2558 i = iov->iov_len;
2559 else
2560 iov->iov_len = i;
2561 rv = copyout(sc->sc_lct, iov->iov_base, i);
2562 break;
2563
2564 case IOPIOCRECONFIG:
2565 rv = iop_reconfigure(sc, 0);
2566 break;
2567
2568 case IOPIOCGTIDMAP:
2569 iov = (struct iovec *)data;
2570 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2571 if (i > iov->iov_len)
2572 i = iov->iov_len;
2573 else
2574 iov->iov_len = i;
2575 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2576 break;
2577 }
2578
2579 mutex_exit(&sc->sc_conflock);
2580 return (rv);
2581 }
2582
2583 static int
2584 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2585 {
2586 struct iop_msg *im;
2587 struct i2o_msg *mf;
2588 struct ioppt_buf *ptb;
2589 int rv, i, mapped;
2590
2591 mf = NULL;
2592 im = NULL;
2593 mapped = 1;
2594
2595 if (pt->pt_msglen > sc->sc_framesize ||
2596 pt->pt_msglen < sizeof(struct i2o_msg) ||
2597 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2598 pt->pt_nbufs < 0 ||
2599 #if 0
2600 pt->pt_replylen < 0 ||
2601 #endif
2602 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2603 return (EINVAL);
2604
2605 for (i = 0; i < pt->pt_nbufs; i++)
2606 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2607 rv = ENOMEM;
2608 goto bad;
2609 }
2610
2611 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2612 if (mf == NULL)
2613 return (ENOMEM);
2614
2615 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2616 goto bad;
2617
2618 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2619 im->im_rb = (struct i2o_reply *)mf;
2620 mf->msgictx = IOP_ICTX;
2621 mf->msgtctx = im->im_tctx;
2622
2623 for (i = 0; i < pt->pt_nbufs; i++) {
2624 ptb = &pt->pt_bufs[i];
2625 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2626 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2627 if (rv != 0)
2628 goto bad;
2629 mapped = 1;
2630 }
2631
2632 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2633 goto bad;
2634
2635 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2636 if (i > sc->sc_framesize)
2637 i = sc->sc_framesize;
2638 if (i > pt->pt_replylen)
2639 i = pt->pt_replylen;
2640 rv = copyout(im->im_rb, pt->pt_reply, i);
2641
2642 bad:
2643 if (mapped != 0)
2644 iop_msg_unmap(sc, im);
2645 if (im != NULL)
2646 iop_msg_free(sc, im);
2647 if (mf != NULL)
2648 free(mf, M_DEVBUF);
2649 return (rv);
2650 }
2651