iop.c revision 1.66 1 /* $NetBSD: iop.c,v 1.66 2007/07/09 21:00:33 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.66 2007/07/09 21:00:33 ad Exp $");
45
46 #include "iop.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/queue.h>
53 #include <sys/proc.h>
54 #include <sys/malloc.h>
55 #include <sys/ioctl.h>
56 #include <sys/endian.h>
57 #include <sys/conf.h>
58 #include <sys/kthread.h>
59 #include <sys/kauth.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #include "locators.h"
71
72 #define POLL(ms, cond) \
73 do { \
74 int xi; \
75 for (xi = (ms) * 10; xi; xi--) { \
76 if (cond) \
77 break; \
78 DELAY(100); \
79 } \
80 } while (/* CONSTCOND */0);
81
82 #ifdef I2ODEBUG
83 #define DPRINTF(x) printf x
84 #else
85 #define DPRINTF(x)
86 #endif
87
88 #define IOP_ICTXHASH_NBUCKETS 16
89 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
90
91 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
92
93 #define IOP_TCTX_SHIFT 12
94 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
95
96 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
97 static u_long iop_ictxhash;
98 static void *iop_sdh;
99 static struct i2o_systab *iop_systab;
100 static int iop_systab_size;
101
102 extern struct cfdriver iop_cd;
103
104 dev_type_open(iopopen);
105 dev_type_close(iopclose);
106 dev_type_ioctl(iopioctl);
107
108 const struct cdevsw iop_cdevsw = {
109 iopopen, iopclose, noread, nowrite, iopioctl,
110 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
111 };
112
113 #define IC_CONFIGURE 0x01
114 #define IC_PRIORITY 0x02
115
116 static struct iop_class {
117 u_short ic_class;
118 u_short ic_flags;
119 const char *ic_caption;
120 } const iop_class[] = {
121 {
122 I2O_CLASS_EXECUTIVE,
123 0,
124 "executive"
125 },
126 {
127 I2O_CLASS_DDM,
128 0,
129 "device driver module"
130 },
131 {
132 I2O_CLASS_RANDOM_BLOCK_STORAGE,
133 IC_CONFIGURE | IC_PRIORITY,
134 "random block storage"
135 },
136 {
137 I2O_CLASS_SEQUENTIAL_STORAGE,
138 IC_CONFIGURE | IC_PRIORITY,
139 "sequential storage"
140 },
141 {
142 I2O_CLASS_LAN,
143 IC_CONFIGURE | IC_PRIORITY,
144 "LAN port"
145 },
146 {
147 I2O_CLASS_WAN,
148 IC_CONFIGURE | IC_PRIORITY,
149 "WAN port"
150 },
151 {
152 I2O_CLASS_FIBRE_CHANNEL_PORT,
153 IC_CONFIGURE,
154 "fibrechannel port"
155 },
156 {
157 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
158 0,
159 "fibrechannel peripheral"
160 },
161 {
162 I2O_CLASS_SCSI_PERIPHERAL,
163 0,
164 "SCSI peripheral"
165 },
166 {
167 I2O_CLASS_ATE_PORT,
168 IC_CONFIGURE,
169 "ATE port"
170 },
171 {
172 I2O_CLASS_ATE_PERIPHERAL,
173 0,
174 "ATE peripheral"
175 },
176 {
177 I2O_CLASS_FLOPPY_CONTROLLER,
178 IC_CONFIGURE,
179 "floppy controller"
180 },
181 {
182 I2O_CLASS_FLOPPY_DEVICE,
183 0,
184 "floppy device"
185 },
186 {
187 I2O_CLASS_BUS_ADAPTER_PORT,
188 IC_CONFIGURE,
189 "bus adapter port"
190 },
191 };
192
193 static const char * const iop_status[] = {
194 "success",
195 "abort (dirty)",
196 "abort (no data transfer)",
197 "abort (partial transfer)",
198 "error (dirty)",
199 "error (no data transfer)",
200 "error (partial transfer)",
201 "undefined error code",
202 "process abort (dirty)",
203 "process abort (no data transfer)",
204 "process abort (partial transfer)",
205 "transaction error",
206 };
207
208 static inline u_int32_t iop_inl(struct iop_softc *, int);
209 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
210
211 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
212 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
213
214 static void iop_config_interrupts(struct device *);
215 static void iop_configure_devices(struct iop_softc *, int, int);
216 static void iop_devinfo(int, char *, size_t);
217 static int iop_print(void *, const char *);
218 static void iop_shutdown(void *);
219
220 static void iop_adjqparam(struct iop_softc *, int);
221 static int iop_handle_reply(struct iop_softc *, u_int32_t);
222 static int iop_hrt_get(struct iop_softc *);
223 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
224 static void iop_intr_event(struct device *, struct iop_msg *, void *);
225 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
226 u_int32_t);
227 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
228 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
229 static int iop_ofifo_init(struct iop_softc *);
230 static int iop_passthrough(struct iop_softc *, struct ioppt *,
231 struct proc *);
232 static void iop_reconf_thread(void *);
233 static void iop_release_mfa(struct iop_softc *, u_int32_t);
234 static int iop_reset(struct iop_softc *);
235 static int iop_sys_enable(struct iop_softc *);
236 static int iop_systab_set(struct iop_softc *);
237 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
238
239 #ifdef I2ODEBUG
240 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
241 #endif
242
243 static inline u_int32_t
244 iop_inl(struct iop_softc *sc, int off)
245 {
246
247 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
248 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
249 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
250 }
251
252 static inline void
253 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
254 {
255
256 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
257 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
258 BUS_SPACE_BARRIER_WRITE);
259 }
260
261 static inline u_int32_t
262 iop_inl_msg(struct iop_softc *sc, int off)
263 {
264
265 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
266 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
267 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
268 }
269
270 static inline void
271 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
272 {
273
274 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
275 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
276 BUS_SPACE_BARRIER_WRITE);
277 }
278
279 /*
280 * Initialise the IOP and our interface.
281 */
282 void
283 iop_init(struct iop_softc *sc, const char *intrstr)
284 {
285 struct iop_msg *im;
286 int rv, i, j, state, nsegs;
287 u_int32_t mask;
288 char ident[64];
289
290 state = 0;
291
292 printf("I2O adapter");
293
294 mutex_init(&sc->sc_intrlock, MUTEX_DRIVER, IPL_VM);
295 mutex_init(&sc->sc_conflock, MUTEX_DRIVER, IPL_NONE);
296 cv_init(&sc->sc_confcv, "iopconf");
297
298 if (iop_ictxhashtbl == NULL)
299 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
300 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
301
302 /* Disable interrupts at the IOP. */
303 mask = iop_inl(sc, IOP_REG_INTR_MASK);
304 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
305
306 /* Allocate a scratch DMA map for small miscellaneous shared data. */
307 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
308 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
309 printf("%s: cannot create scratch dmamap\n",
310 sc->sc_dv.dv_xname);
311 return;
312 }
313
314 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
315 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
316 printf("%s: cannot alloc scratch dmamem\n",
317 sc->sc_dv.dv_xname);
318 goto bail_out;
319 }
320 state++;
321
322 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
323 &sc->sc_scr, 0)) {
324 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
325 goto bail_out;
326 }
327 state++;
328
329 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
330 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
331 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
332 goto bail_out;
333 }
334 state++;
335
336 #ifdef I2ODEBUG
337 /* So that our debug checks don't choke. */
338 sc->sc_framesize = 128;
339 #endif
340
341 /* Avoid syncing the reply map until it's set up. */
342 sc->sc_curib = 0x123;
343
344 /* Reset the adapter and request status. */
345 if ((rv = iop_reset(sc)) != 0) {
346 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
347 goto bail_out;
348 }
349
350 if ((rv = iop_status_get(sc, 1)) != 0) {
351 printf("%s: not responding (get status)\n",
352 sc->sc_dv.dv_xname);
353 goto bail_out;
354 }
355
356 sc->sc_flags |= IOP_HAVESTATUS;
357 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
358 ident, sizeof(ident));
359 printf(" <%s>\n", ident);
360
361 #ifdef I2ODEBUG
362 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
363 le16toh(sc->sc_status.orgid),
364 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
365 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
366 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
367 le32toh(sc->sc_status.desiredprivmemsize),
368 le32toh(sc->sc_status.currentprivmemsize),
369 le32toh(sc->sc_status.currentprivmembase));
370 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
371 le32toh(sc->sc_status.desiredpriviosize),
372 le32toh(sc->sc_status.currentpriviosize),
373 le32toh(sc->sc_status.currentpriviobase));
374 #endif
375
376 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
377 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
378 sc->sc_maxob = IOP_MAX_OUTBOUND;
379 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
380 if (sc->sc_maxib > IOP_MAX_INBOUND)
381 sc->sc_maxib = IOP_MAX_INBOUND;
382 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
383 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
384 sc->sc_framesize = IOP_MAX_MSG_SIZE;
385
386 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
387 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
388 printf("%s: frame size too small (%d)\n",
389 sc->sc_dv.dv_xname, sc->sc_framesize);
390 goto bail_out;
391 }
392 #endif
393
394 /* Allocate message wrappers. */
395 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
396 if (im == NULL) {
397 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
398 goto bail_out;
399 }
400 state++;
401 sc->sc_ims = im;
402 SLIST_INIT(&sc->sc_im_freelist);
403
404 for (i = 0; i < sc->sc_maxib; i++, im++) {
405 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
406 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
407 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
408 &im->im_xfer[0].ix_map);
409 if (rv != 0) {
410 printf("%s: couldn't create dmamap (%d)",
411 sc->sc_dv.dv_xname, rv);
412 goto bail_out3;
413 }
414
415 im->im_tctx = i;
416 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
417 cv_init(&im->im_cv, "iopmsg");
418 }
419
420 /* Initialise the IOP's outbound FIFO. */
421 if (iop_ofifo_init(sc) != 0) {
422 printf("%s: unable to init oubound FIFO\n",
423 sc->sc_dv.dv_xname);
424 goto bail_out3;
425 }
426
427 /*
428 * Defer further configuration until (a) interrupts are working and
429 * (b) we have enough information to build the system table.
430 */
431 config_interrupts((struct device *)sc, iop_config_interrupts);
432
433 /* Configure shutdown hook before we start any device activity. */
434 if (iop_sdh == NULL)
435 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
436
437 /* Ensure interrupts are enabled at the IOP. */
438 mask = iop_inl(sc, IOP_REG_INTR_MASK);
439 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
440
441 if (intrstr != NULL)
442 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
443 intrstr);
444
445 #ifdef I2ODEBUG
446 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
447 sc->sc_dv.dv_xname, sc->sc_maxib,
448 le32toh(sc->sc_status.maxinboundmframes),
449 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
450 #endif
451
452 return;
453
454 bail_out3:
455 if (state > 3) {
456 for (j = 0; j < i; j++)
457 bus_dmamap_destroy(sc->sc_dmat,
458 sc->sc_ims[j].im_xfer[0].ix_map);
459 free(sc->sc_ims, M_DEVBUF);
460 }
461 bail_out:
462 if (state > 2)
463 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
464 if (state > 1)
465 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
466 if (state > 0)
467 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
468 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
469 }
470
471 /*
472 * Perform autoconfiguration tasks.
473 */
474 static void
475 iop_config_interrupts(struct device *self)
476 {
477 struct iop_attach_args ia;
478 struct iop_softc *sc, *iop;
479 struct i2o_systab_entry *ste;
480 int rv, i, niop;
481 int locs[IOPCF_NLOCS];
482
483 sc = device_private(self);
484 mutex_enter(&sc->sc_conflock);
485
486 LIST_INIT(&sc->sc_iilist);
487
488 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
489
490 if (iop_hrt_get(sc) != 0) {
491 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
492 mutex_exit(&sc->sc_conflock);
493 return;
494 }
495
496 /*
497 * Build the system table.
498 */
499 if (iop_systab == NULL) {
500 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
501 if ((iop = device_lookup(&iop_cd, i)) == NULL)
502 continue;
503 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
504 continue;
505 if (iop_status_get(iop, 1) != 0) {
506 printf("%s: unable to retrieve status\n",
507 sc->sc_dv.dv_xname);
508 iop->sc_flags &= ~IOP_HAVESTATUS;
509 continue;
510 }
511 niop++;
512 }
513 if (niop == 0) {
514 mutex_exit(&sc->sc_conflock);
515 return;
516 }
517
518 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
519 sizeof(struct i2o_systab);
520 iop_systab_size = i;
521 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
522
523 iop_systab->numentries = niop;
524 iop_systab->version = I2O_VERSION_11;
525
526 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
527 if ((iop = device_lookup(&iop_cd, i)) == NULL)
528 continue;
529 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
530 continue;
531
532 ste->orgid = iop->sc_status.orgid;
533 ste->iopid = device_unit(&iop->sc_dv) + 2;
534 ste->segnumber =
535 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
536 ste->iopcaps = iop->sc_status.iopcaps;
537 ste->inboundmsgframesize =
538 iop->sc_status.inboundmframesize;
539 ste->inboundmsgportaddresslow =
540 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
541 ste++;
542 }
543 }
544
545 /*
546 * Post the system table to the IOP and bring it to the OPERATIONAL
547 * state.
548 */
549 if (iop_systab_set(sc) != 0) {
550 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
551 mutex_exit(&sc->sc_conflock);
552 return;
553 }
554 if (iop_sys_enable(sc) != 0) {
555 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
556 mutex_exit(&sc->sc_conflock);
557 return;
558 }
559
560 /*
561 * Set up an event handler for this IOP.
562 */
563 sc->sc_eventii.ii_dv = self;
564 sc->sc_eventii.ii_intr = iop_intr_event;
565 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
566 sc->sc_eventii.ii_tid = I2O_TID_IOP;
567 iop_initiator_register(sc, &sc->sc_eventii);
568
569 rv = iop_util_eventreg(sc, &sc->sc_eventii,
570 I2O_EVENT_EXEC_RESOURCE_LIMITS |
571 I2O_EVENT_EXEC_CONNECTION_FAIL |
572 I2O_EVENT_EXEC_ADAPTER_FAULT |
573 I2O_EVENT_EXEC_POWER_FAIL |
574 I2O_EVENT_EXEC_RESET_PENDING |
575 I2O_EVENT_EXEC_RESET_IMMINENT |
576 I2O_EVENT_EXEC_HARDWARE_FAIL |
577 I2O_EVENT_EXEC_XCT_CHANGE |
578 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
579 I2O_EVENT_GEN_DEVICE_RESET |
580 I2O_EVENT_GEN_STATE_CHANGE |
581 I2O_EVENT_GEN_GENERAL_WARNING);
582 if (rv != 0) {
583 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
584 mutex_exit(&sc->sc_conflock);
585 return;
586 }
587
588 /*
589 * Attempt to match and attach a product-specific extension.
590 */
591 ia.ia_class = I2O_CLASS_ANY;
592 ia.ia_tid = I2O_TID_IOP;
593 locs[IOPCF_TID] = I2O_TID_IOP;
594 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
595 config_stdsubmatch);
596
597 /*
598 * Start device configuration.
599 */
600 if ((rv = iop_reconfigure(sc, 0)) == -1)
601 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
602
603
604 sc->sc_flags |= IOP_ONLINE;
605 rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
606 &sc->sc_reconf_thread, "%s", sc->sc_dv.dv_xname);
607 mutex_exit(&sc->sc_conflock);
608 if (rv != 0) {
609 printf("%s: unable to create reconfiguration thread (%d)",
610 sc->sc_dv.dv_xname, rv);
611 return;
612 }
613 }
614
615 /*
616 * Reconfiguration thread; listens for LCT change notification, and
617 * initiates re-configuration if received.
618 */
619 static void
620 iop_reconf_thread(void *cookie)
621 {
622 struct iop_softc *sc;
623 struct lwp *l;
624 struct i2o_lct lct;
625 u_int32_t chgind;
626 int rv;
627
628 sc = cookie;
629 chgind = sc->sc_chgind + 1;
630 l = curlwp;
631
632 for (;;) {
633 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
634 sc->sc_dv.dv_xname, chgind));
635
636 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
637
638 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
639 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
640
641 mutex_enter(&sc->sc_conflock);
642 if (rv == 0) {
643 iop_reconfigure(sc, le32toh(lct.changeindicator));
644 chgind = sc->sc_chgind + 1;
645 }
646 (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
647 mutex_exit(&sc->sc_conflock);
648 }
649 }
650
651 /*
652 * Reconfigure: find new and removed devices.
653 */
654 int
655 iop_reconfigure(struct iop_softc *sc, u_int chgind)
656 {
657 struct iop_msg *im;
658 struct i2o_hba_bus_scan mf;
659 struct i2o_lct_entry *le;
660 struct iop_initiator *ii, *nextii;
661 int rv, tid, i;
662
663 KASSERT(mutex_owned(&sc->sc_conflock));
664
665 /*
666 * If the reconfiguration request isn't the result of LCT change
667 * notification, then be more thorough: ask all bus ports to scan
668 * their busses. Wait up to 5 minutes for each bus port to complete
669 * the request.
670 */
671 if (chgind == 0) {
672 if ((rv = iop_lct_get(sc)) != 0) {
673 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
674 return (rv);
675 }
676
677 le = sc->sc_lct->entry;
678 for (i = 0; i < sc->sc_nlctent; i++, le++) {
679 if ((le16toh(le->classid) & 4095) !=
680 I2O_CLASS_BUS_ADAPTER_PORT)
681 continue;
682 tid = le16toh(le->localtid) & 4095;
683
684 im = iop_msg_alloc(sc, IM_WAIT);
685
686 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
687 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
688 mf.msgictx = IOP_ICTX;
689 mf.msgtctx = im->im_tctx;
690
691 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
692 tid));
693
694 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
695 iop_msg_free(sc, im);
696 #ifdef I2ODEBUG
697 if (rv != 0)
698 printf("%s: bus scan failed\n",
699 sc->sc_dv.dv_xname);
700 #endif
701 }
702 } else if (chgind <= sc->sc_chgind) {
703 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
704 return (0);
705 }
706
707 /* Re-read the LCT and determine if it has changed. */
708 if ((rv = iop_lct_get(sc)) != 0) {
709 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
710 return (rv);
711 }
712 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
713
714 chgind = le32toh(sc->sc_lct->changeindicator);
715 if (chgind == sc->sc_chgind) {
716 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
717 return (0);
718 }
719 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
720 sc->sc_chgind = chgind;
721
722 if (sc->sc_tidmap != NULL)
723 free(sc->sc_tidmap, M_DEVBUF);
724 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
725 M_DEVBUF, M_NOWAIT|M_ZERO);
726
727 /* Allow 1 queued command per device while we're configuring. */
728 iop_adjqparam(sc, 1);
729
730 /*
731 * Match and attach child devices. We configure high-level devices
732 * first so that any claims will propagate throughout the LCT,
733 * hopefully masking off aliased devices as a result.
734 *
735 * Re-reading the LCT at this point is a little dangerous, but we'll
736 * trust the IOP (and the operator) to behave itself...
737 */
738 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
739 IC_CONFIGURE | IC_PRIORITY);
740 if ((rv = iop_lct_get(sc)) != 0) {
741 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
742 }
743 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
744 IC_CONFIGURE);
745
746 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
747 nextii = LIST_NEXT(ii, ii_list);
748
749 /* Detach devices that were configured, but are now gone. */
750 for (i = 0; i < sc->sc_nlctent; i++)
751 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
752 break;
753 if (i == sc->sc_nlctent ||
754 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
755 config_detach(ii->ii_dv, DETACH_FORCE);
756 continue;
757 }
758
759 /*
760 * Tell initiators that existed before the re-configuration
761 * to re-configure.
762 */
763 if (ii->ii_reconfig == NULL)
764 continue;
765 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
766 printf("%s: %s failed reconfigure (%d)\n",
767 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
768 }
769
770 /* Re-adjust queue parameters and return. */
771 if (sc->sc_nii != 0)
772 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
773 / sc->sc_nii);
774
775 return (0);
776 }
777
778 /*
779 * Configure I2O devices into the system.
780 */
781 static void
782 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
783 {
784 struct iop_attach_args ia;
785 struct iop_initiator *ii;
786 const struct i2o_lct_entry *le;
787 struct device *dv;
788 int i, j, nent;
789 u_int usertid;
790 int locs[IOPCF_NLOCS];
791
792 nent = sc->sc_nlctent;
793 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
794 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
795
796 /* Ignore the device if it's in use. */
797 usertid = le32toh(le->usertid) & 4095;
798 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
799 continue;
800
801 ia.ia_class = le16toh(le->classid) & 4095;
802 ia.ia_tid = sc->sc_tidmap[i].it_tid;
803
804 /* Ignore uninteresting devices. */
805 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
806 if (iop_class[j].ic_class == ia.ia_class)
807 break;
808 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
809 (iop_class[j].ic_flags & mask) != maskval)
810 continue;
811
812 /*
813 * Try to configure the device only if it's not already
814 * configured.
815 */
816 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
817 if (ia.ia_tid == ii->ii_tid) {
818 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
819 strcpy(sc->sc_tidmap[i].it_dvname,
820 ii->ii_dv->dv_xname);
821 break;
822 }
823 }
824 if (ii != NULL)
825 continue;
826
827 locs[IOPCF_TID] = ia.ia_tid;
828
829 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
830 iop_print, config_stdsubmatch);
831 if (dv != NULL) {
832 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
833 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
834 }
835 }
836 }
837
838 /*
839 * Adjust queue parameters for all child devices.
840 */
841 static void
842 iop_adjqparam(struct iop_softc *sc, int mpi)
843 {
844 struct iop_initiator *ii;
845
846 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
847 if (ii->ii_adjqparam != NULL)
848 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
849 }
850
851 static void
852 iop_devinfo(int class, char *devinfo, size_t l)
853 {
854 int i;
855
856 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
857 if (class == iop_class[i].ic_class)
858 break;
859
860 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
861 snprintf(devinfo, l, "device (class 0x%x)", class);
862 else
863 strlcpy(devinfo, iop_class[i].ic_caption, l);
864 }
865
866 static int
867 iop_print(void *aux, const char *pnp)
868 {
869 struct iop_attach_args *ia;
870 char devinfo[256];
871
872 ia = aux;
873
874 if (pnp != NULL) {
875 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
876 aprint_normal("%s at %s", devinfo, pnp);
877 }
878 aprint_normal(" tid %d", ia->ia_tid);
879 return (UNCONF);
880 }
881
882 /*
883 * Shut down all configured IOPs.
884 */
885 static void
886 iop_shutdown(void *junk)
887 {
888 struct iop_softc *sc;
889 int i;
890
891 printf("shutting down iop devices...");
892
893 for (i = 0; i < iop_cd.cd_ndevs; i++) {
894 if ((sc = device_lookup(&iop_cd, i)) == NULL)
895 continue;
896 if ((sc->sc_flags & IOP_ONLINE) == 0)
897 continue;
898
899 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
900 0, 5000);
901
902 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
903 /*
904 * Some AMI firmware revisions will go to sleep and
905 * never come back after this.
906 */
907 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
908 IOP_ICTX, 0, 1000);
909 }
910 }
911
912 /* Wait. Some boards could still be flushing, stupidly enough. */
913 delay(5000*1000);
914 printf(" done\n");
915 }
916
917 /*
918 * Retrieve IOP status.
919 */
920 int
921 iop_status_get(struct iop_softc *sc, int nosleep)
922 {
923 struct i2o_exec_status_get mf;
924 struct i2o_status *st;
925 paddr_t pa;
926 int rv, i;
927
928 pa = sc->sc_scr_seg->ds_addr;
929 st = (struct i2o_status *)sc->sc_scr;
930
931 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
932 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
933 mf.reserved[0] = 0;
934 mf.reserved[1] = 0;
935 mf.reserved[2] = 0;
936 mf.reserved[3] = 0;
937 mf.addrlow = (u_int32_t)pa;
938 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
939 mf.length = sizeof(sc->sc_status);
940
941 memset(st, 0, sizeof(*st));
942 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
943 BUS_DMASYNC_PREREAD);
944
945 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
946 return (rv);
947
948 for (i = 25; i != 0; i--) {
949 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
950 sizeof(*st), BUS_DMASYNC_POSTREAD);
951 if (st->syncbyte == 0xff)
952 break;
953 if (nosleep)
954 DELAY(100*1000);
955 else
956 kpause("iopstat", false, hz / 10, NULL);
957 }
958
959 if (st->syncbyte != 0xff) {
960 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
961 rv = EIO;
962 } else {
963 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
964 rv = 0;
965 }
966
967 return (rv);
968 }
969
970 /*
971 * Initialize and populate the IOP's outbound FIFO.
972 */
973 static int
974 iop_ofifo_init(struct iop_softc *sc)
975 {
976 bus_addr_t addr;
977 bus_dma_segment_t seg;
978 struct i2o_exec_outbound_init *mf;
979 int i, rseg, rv;
980 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
981
982 sw = (u_int32_t *)sc->sc_scr;
983
984 mf = (struct i2o_exec_outbound_init *)mb;
985 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
986 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
987 mf->msgictx = IOP_ICTX;
988 mf->msgtctx = 0;
989 mf->pagesize = PAGE_SIZE;
990 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
991
992 /*
993 * The I2O spec says that there are two SGLs: one for the status
994 * word, and one for a list of discarded MFAs. It continues to say
995 * that if you don't want to get the list of MFAs, an IGNORE SGL is
996 * necessary; this isn't the case (and is in fact a bad thing).
997 */
998 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
999 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1000 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1001 (u_int32_t)sc->sc_scr_seg->ds_addr;
1002 mb[0] += 2 << 16;
1003
1004 *sw = 0;
1005 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1006 BUS_DMASYNC_PREREAD);
1007
1008 if ((rv = iop_post(sc, mb)) != 0)
1009 return (rv);
1010
1011 POLL(5000,
1012 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1013 BUS_DMASYNC_POSTREAD),
1014 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1015
1016 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1017 printf("%s: outbound FIFO init failed (%d)\n",
1018 sc->sc_dv.dv_xname, le32toh(*sw));
1019 return (EIO);
1020 }
1021
1022 /* Allocate DMA safe memory for the reply frames. */
1023 if (sc->sc_rep_phys == 0) {
1024 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1025
1026 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1027 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1028 if (rv != 0) {
1029 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1030 rv);
1031 return (rv);
1032 }
1033
1034 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1035 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1036 if (rv != 0) {
1037 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1038 return (rv);
1039 }
1040
1041 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1042 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1043 if (rv != 0) {
1044 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1045 rv);
1046 return (rv);
1047 }
1048
1049 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1050 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1051 if (rv != 0) {
1052 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1053 return (rv);
1054 }
1055
1056 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1057
1058 /* Now safe to sync the reply map. */
1059 sc->sc_curib = 0;
1060 }
1061
1062 /* Populate the outbound FIFO. */
1063 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1064 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1065 addr += sc->sc_framesize;
1066 }
1067
1068 return (0);
1069 }
1070
1071 /*
1072 * Read the specified number of bytes from the IOP's hardware resource table.
1073 */
1074 static int
1075 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1076 {
1077 struct iop_msg *im;
1078 int rv;
1079 struct i2o_exec_hrt_get *mf;
1080 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1081
1082 im = iop_msg_alloc(sc, IM_WAIT);
1083 mf = (struct i2o_exec_hrt_get *)mb;
1084 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1085 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1086 mf->msgictx = IOP_ICTX;
1087 mf->msgtctx = im->im_tctx;
1088
1089 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1090 rv = iop_msg_post(sc, im, mb, 30000);
1091 iop_msg_unmap(sc, im);
1092 iop_msg_free(sc, im);
1093 return (rv);
1094 }
1095
1096 /*
1097 * Read the IOP's hardware resource table.
1098 */
1099 static int
1100 iop_hrt_get(struct iop_softc *sc)
1101 {
1102 struct i2o_hrt hrthdr, *hrt;
1103 int size, rv;
1104
1105 uvm_lwp_hold(curlwp);
1106 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1107 uvm_lwp_rele(curlwp);
1108 if (rv != 0)
1109 return (rv);
1110
1111 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1112 le16toh(hrthdr.numentries)));
1113
1114 size = sizeof(struct i2o_hrt) +
1115 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1116 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1117
1118 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1119 free(hrt, M_DEVBUF);
1120 return (rv);
1121 }
1122
1123 if (sc->sc_hrt != NULL)
1124 free(sc->sc_hrt, M_DEVBUF);
1125 sc->sc_hrt = hrt;
1126 return (0);
1127 }
1128
1129 /*
1130 * Request the specified number of bytes from the IOP's logical
1131 * configuration table. If a change indicator is specified, this
1132 * is a verbatim notification request, so the caller is prepared
1133 * to wait indefinitely.
1134 */
1135 static int
1136 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1137 u_int32_t chgind)
1138 {
1139 struct iop_msg *im;
1140 struct i2o_exec_lct_notify *mf;
1141 int rv;
1142 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1143
1144 im = iop_msg_alloc(sc, IM_WAIT);
1145 memset(lct, 0, size);
1146
1147 mf = (struct i2o_exec_lct_notify *)mb;
1148 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1149 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1150 mf->msgictx = IOP_ICTX;
1151 mf->msgtctx = im->im_tctx;
1152 mf->classid = I2O_CLASS_ANY;
1153 mf->changeindicator = chgind;
1154
1155 #ifdef I2ODEBUG
1156 printf("iop_lct_get0: reading LCT");
1157 if (chgind != 0)
1158 printf(" (async)");
1159 printf("\n");
1160 #endif
1161
1162 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1163 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1164 iop_msg_unmap(sc, im);
1165 iop_msg_free(sc, im);
1166 return (rv);
1167 }
1168
1169 /*
1170 * Read the IOP's logical configuration table.
1171 */
1172 int
1173 iop_lct_get(struct iop_softc *sc)
1174 {
1175 int esize, size, rv;
1176 struct i2o_lct *lct;
1177
1178 esize = le32toh(sc->sc_status.expectedlctsize);
1179 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1180 if (lct == NULL)
1181 return (ENOMEM);
1182
1183 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1184 free(lct, M_DEVBUF);
1185 return (rv);
1186 }
1187
1188 size = le16toh(lct->tablesize) << 2;
1189 if (esize != size) {
1190 free(lct, M_DEVBUF);
1191 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1192 if (lct == NULL)
1193 return (ENOMEM);
1194
1195 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1196 free(lct, M_DEVBUF);
1197 return (rv);
1198 }
1199 }
1200
1201 /* Swap in the new LCT. */
1202 if (sc->sc_lct != NULL)
1203 free(sc->sc_lct, M_DEVBUF);
1204 sc->sc_lct = lct;
1205 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1206 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1207 sizeof(struct i2o_lct_entry);
1208 return (0);
1209 }
1210
1211 /*
1212 * Post a SYS_ENABLE message to the adapter.
1213 */
1214 int
1215 iop_sys_enable(struct iop_softc *sc)
1216 {
1217 struct iop_msg *im;
1218 struct i2o_msg mf;
1219 int rv;
1220
1221 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1222
1223 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1224 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1225 mf.msgictx = IOP_ICTX;
1226 mf.msgtctx = im->im_tctx;
1227
1228 rv = iop_msg_post(sc, im, &mf, 30000);
1229 if (rv == 0) {
1230 if ((im->im_flags & IM_FAIL) != 0)
1231 rv = ENXIO;
1232 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1233 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1234 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1235 rv = 0;
1236 else
1237 rv = EIO;
1238 }
1239
1240 iop_msg_free(sc, im);
1241 return (rv);
1242 }
1243
1244 /*
1245 * Request the specified parameter group from the target. If an initiator
1246 * is specified (a) don't wait for the operation to complete, but instead
1247 * let the initiator's interrupt handler deal with the reply and (b) place a
1248 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1249 */
1250 int
1251 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1252 int size, struct iop_initiator *ii)
1253 {
1254 struct iop_msg *im;
1255 struct i2o_util_params_op *mf;
1256 int rv;
1257 struct iop_pgop *pgop;
1258 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1259
1260 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1261 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1262 iop_msg_free(sc, im);
1263 return (ENOMEM);
1264 }
1265 im->im_dvcontext = pgop;
1266
1267 mf = (struct i2o_util_params_op *)mb;
1268 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1269 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1270 mf->msgictx = IOP_ICTX;
1271 mf->msgtctx = im->im_tctx;
1272 mf->flags = 0;
1273
1274 pgop->olh.count = htole16(1);
1275 pgop->olh.reserved = htole16(0);
1276 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1277 pgop->oat.fieldcount = htole16(0xffff);
1278 pgop->oat.group = htole16(group);
1279
1280 if (ii == NULL)
1281 uvm_lwp_hold(curlwp);
1282
1283 memset(buf, 0, size);
1284 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1285 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1286 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1287
1288 if (ii == NULL)
1289 uvm_lwp_rele(curlwp);
1290
1291 /* Detect errors; let partial transfers to count as success. */
1292 if (ii == NULL && rv == 0) {
1293 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1294 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1295 rv = 0;
1296 else
1297 rv = (im->im_reqstatus != 0 ? EIO : 0);
1298
1299 if (rv != 0)
1300 printf("%s: FIELD_GET failed for tid %d group %d\n",
1301 sc->sc_dv.dv_xname, tid, group);
1302 }
1303
1304 if (ii == NULL || rv != 0) {
1305 iop_msg_unmap(sc, im);
1306 iop_msg_free(sc, im);
1307 free(pgop, M_DEVBUF);
1308 }
1309
1310 return (rv);
1311 }
1312
1313 /*
1314 * Set a single field in a scalar parameter group.
1315 */
1316 int
1317 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1318 int size, int field)
1319 {
1320 struct iop_msg *im;
1321 struct i2o_util_params_op *mf;
1322 struct iop_pgop *pgop;
1323 int rv, totsize;
1324 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1325
1326 totsize = sizeof(*pgop) + size;
1327
1328 im = iop_msg_alloc(sc, IM_WAIT);
1329 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1330 iop_msg_free(sc, im);
1331 return (ENOMEM);
1332 }
1333
1334 mf = (struct i2o_util_params_op *)mb;
1335 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1336 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1337 mf->msgictx = IOP_ICTX;
1338 mf->msgtctx = im->im_tctx;
1339 mf->flags = 0;
1340
1341 pgop->olh.count = htole16(1);
1342 pgop->olh.reserved = htole16(0);
1343 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1344 pgop->oat.fieldcount = htole16(1);
1345 pgop->oat.group = htole16(group);
1346 pgop->oat.fields[0] = htole16(field);
1347 memcpy(pgop + 1, buf, size);
1348
1349 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1350 rv = iop_msg_post(sc, im, mb, 30000);
1351 if (rv != 0)
1352 printf("%s: FIELD_SET failed for tid %d group %d\n",
1353 sc->sc_dv.dv_xname, tid, group);
1354
1355 iop_msg_unmap(sc, im);
1356 iop_msg_free(sc, im);
1357 free(pgop, M_DEVBUF);
1358 return (rv);
1359 }
1360
1361 /*
1362 * Delete all rows in a tablular parameter group.
1363 */
1364 int
1365 iop_table_clear(struct iop_softc *sc, int tid, int group)
1366 {
1367 struct iop_msg *im;
1368 struct i2o_util_params_op *mf;
1369 struct iop_pgop pgop;
1370 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1371 int rv;
1372
1373 im = iop_msg_alloc(sc, IM_WAIT);
1374
1375 mf = (struct i2o_util_params_op *)mb;
1376 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1377 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1378 mf->msgictx = IOP_ICTX;
1379 mf->msgtctx = im->im_tctx;
1380 mf->flags = 0;
1381
1382 pgop.olh.count = htole16(1);
1383 pgop.olh.reserved = htole16(0);
1384 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1385 pgop.oat.fieldcount = htole16(0);
1386 pgop.oat.group = htole16(group);
1387 pgop.oat.fields[0] = htole16(0);
1388
1389 uvm_lwp_hold(curlwp);
1390 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1391 rv = iop_msg_post(sc, im, mb, 30000);
1392 if (rv != 0)
1393 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1394 sc->sc_dv.dv_xname, tid, group);
1395
1396 iop_msg_unmap(sc, im);
1397 uvm_lwp_rele(curlwp);
1398 iop_msg_free(sc, im);
1399 return (rv);
1400 }
1401
1402 /*
1403 * Add a single row to a tabular parameter group. The row can have only one
1404 * field.
1405 */
1406 int
1407 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1408 int size, int row)
1409 {
1410 struct iop_msg *im;
1411 struct i2o_util_params_op *mf;
1412 struct iop_pgop *pgop;
1413 int rv, totsize;
1414 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1415
1416 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1417
1418 im = iop_msg_alloc(sc, IM_WAIT);
1419 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1420 iop_msg_free(sc, im);
1421 return (ENOMEM);
1422 }
1423
1424 mf = (struct i2o_util_params_op *)mb;
1425 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1426 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1427 mf->msgictx = IOP_ICTX;
1428 mf->msgtctx = im->im_tctx;
1429 mf->flags = 0;
1430
1431 pgop->olh.count = htole16(1);
1432 pgop->olh.reserved = htole16(0);
1433 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1434 pgop->oat.fieldcount = htole16(1);
1435 pgop->oat.group = htole16(group);
1436 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1437 pgop->oat.fields[1] = htole16(1); /* RowCount */
1438 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1439 memcpy(&pgop->oat.fields[3], buf, size);
1440
1441 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1442 rv = iop_msg_post(sc, im, mb, 30000);
1443 if (rv != 0)
1444 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1445 sc->sc_dv.dv_xname, tid, group, row);
1446
1447 iop_msg_unmap(sc, im);
1448 iop_msg_free(sc, im);
1449 free(pgop, M_DEVBUF);
1450 return (rv);
1451 }
1452
1453 /*
1454 * Execute a simple command (no parameters).
1455 */
1456 int
1457 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1458 int async, int timo)
1459 {
1460 struct iop_msg *im;
1461 struct i2o_msg mf;
1462 int rv, fl;
1463
1464 fl = (async != 0 ? IM_WAIT : IM_POLL);
1465 im = iop_msg_alloc(sc, fl);
1466
1467 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1468 mf.msgfunc = I2O_MSGFUNC(tid, function);
1469 mf.msgictx = ictx;
1470 mf.msgtctx = im->im_tctx;
1471
1472 rv = iop_msg_post(sc, im, &mf, timo);
1473 iop_msg_free(sc, im);
1474 return (rv);
1475 }
1476
1477 /*
1478 * Post the system table to the IOP.
1479 */
1480 static int
1481 iop_systab_set(struct iop_softc *sc)
1482 {
1483 struct i2o_exec_sys_tab_set *mf;
1484 struct iop_msg *im;
1485 bus_space_handle_t bsh;
1486 bus_addr_t boo;
1487 u_int32_t mema[2], ioa[2];
1488 int rv;
1489 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1490
1491 im = iop_msg_alloc(sc, IM_WAIT);
1492
1493 mf = (struct i2o_exec_sys_tab_set *)mb;
1494 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1495 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1496 mf->msgictx = IOP_ICTX;
1497 mf->msgtctx = im->im_tctx;
1498 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1499 mf->segnumber = 0;
1500
1501 mema[1] = sc->sc_status.desiredprivmemsize;
1502 ioa[1] = sc->sc_status.desiredpriviosize;
1503
1504 if (mema[1] != 0) {
1505 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1506 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1507 mema[0] = htole32(boo);
1508 if (rv != 0) {
1509 printf("%s: can't alloc priv mem space, err = %d\n",
1510 sc->sc_dv.dv_xname, rv);
1511 mema[0] = 0;
1512 mema[1] = 0;
1513 }
1514 }
1515
1516 if (ioa[1] != 0) {
1517 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1518 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1519 ioa[0] = htole32(boo);
1520 if (rv != 0) {
1521 printf("%s: can't alloc priv i/o space, err = %d\n",
1522 sc->sc_dv.dv_xname, rv);
1523 ioa[0] = 0;
1524 ioa[1] = 0;
1525 }
1526 }
1527
1528 uvm_lwp_hold(curlwp);
1529 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1530 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1531 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1532 rv = iop_msg_post(sc, im, mb, 5000);
1533 iop_msg_unmap(sc, im);
1534 iop_msg_free(sc, im);
1535 uvm_lwp_rele(curlwp);
1536 return (rv);
1537 }
1538
1539 /*
1540 * Reset the IOP. Must be called with interrupts disabled.
1541 */
1542 static int
1543 iop_reset(struct iop_softc *sc)
1544 {
1545 u_int32_t mfa, *sw;
1546 struct i2o_exec_iop_reset mf;
1547 int rv;
1548 paddr_t pa;
1549
1550 sw = (u_int32_t *)sc->sc_scr;
1551 pa = sc->sc_scr_seg->ds_addr;
1552
1553 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1554 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1555 mf.reserved[0] = 0;
1556 mf.reserved[1] = 0;
1557 mf.reserved[2] = 0;
1558 mf.reserved[3] = 0;
1559 mf.statuslow = (u_int32_t)pa;
1560 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1561
1562 *sw = htole32(0);
1563 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1564 BUS_DMASYNC_PREREAD);
1565
1566 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1567 return (rv);
1568
1569 POLL(2500,
1570 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1571 BUS_DMASYNC_POSTREAD), *sw != 0));
1572 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1573 printf("%s: reset rejected, status 0x%x\n",
1574 sc->sc_dv.dv_xname, le32toh(*sw));
1575 return (EIO);
1576 }
1577
1578 /*
1579 * IOP is now in the INIT state. Wait no more than 10 seconds for
1580 * the inbound queue to become responsive.
1581 */
1582 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1583 if (mfa == IOP_MFA_EMPTY) {
1584 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1585 return (EIO);
1586 }
1587
1588 iop_release_mfa(sc, mfa);
1589 return (0);
1590 }
1591
1592 /*
1593 * Register a new initiator. Must be called with the configuration lock
1594 * held.
1595 */
1596 void
1597 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1598 {
1599 static int ictxgen;
1600
1601 /* 0 is reserved (by us) for system messages. */
1602 ii->ii_ictx = ++ictxgen;
1603
1604 /*
1605 * `Utility initiators' don't make it onto the per-IOP initiator list
1606 * (which is used only for configuration), but do get one slot on
1607 * the inbound queue.
1608 */
1609 if ((ii->ii_flags & II_UTILITY) == 0) {
1610 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1611 sc->sc_nii++;
1612 } else
1613 sc->sc_nuii++;
1614
1615 cv_init(&ii->ii_cv, "iopevt");
1616
1617 mutex_spin_enter(&sc->sc_intrlock);
1618 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1619 mutex_spin_exit(&sc->sc_intrlock);
1620 }
1621
1622 /*
1623 * Unregister an initiator. Must be called with the configuration lock
1624 * held.
1625 */
1626 void
1627 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1628 {
1629
1630 if ((ii->ii_flags & II_UTILITY) == 0) {
1631 LIST_REMOVE(ii, ii_list);
1632 sc->sc_nii--;
1633 } else
1634 sc->sc_nuii--;
1635
1636 mutex_spin_enter(&sc->sc_intrlock);
1637 LIST_REMOVE(ii, ii_hash);
1638 mutex_spin_exit(&sc->sc_intrlock);
1639
1640 cv_destroy(&ii->ii_cv);
1641 }
1642
1643 /*
1644 * Handle a reply frame from the IOP.
1645 */
1646 static int
1647 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1648 {
1649 struct iop_msg *im;
1650 struct i2o_reply *rb;
1651 struct i2o_fault_notify *fn;
1652 struct iop_initiator *ii;
1653 u_int off, ictx, tctx, status, size;
1654
1655 KASSERT(mutex_owned(&sc->sc_intrlock));
1656
1657 off = (int)(rmfa - sc->sc_rep_phys);
1658 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1659
1660 /* Perform reply queue DMA synchronisation. */
1661 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1662 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1663 if (--sc->sc_curib != 0)
1664 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1665 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1666
1667 #ifdef I2ODEBUG
1668 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1669 panic("iop_handle_reply: 64-bit reply");
1670 #endif
1671 /*
1672 * Find the initiator.
1673 */
1674 ictx = le32toh(rb->msgictx);
1675 if (ictx == IOP_ICTX)
1676 ii = NULL;
1677 else {
1678 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1679 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1680 if (ii->ii_ictx == ictx)
1681 break;
1682 if (ii == NULL) {
1683 #ifdef I2ODEBUG
1684 iop_reply_print(sc, rb);
1685 #endif
1686 printf("%s: WARNING: bad ictx returned (%x)\n",
1687 sc->sc_dv.dv_xname, ictx);
1688 return (-1);
1689 }
1690 }
1691
1692 /*
1693 * If we received a transport failure notice, we've got to dig the
1694 * transaction context (if any) out of the original message frame,
1695 * and then release the original MFA back to the inbound FIFO.
1696 */
1697 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1698 status = I2O_STATUS_SUCCESS;
1699
1700 fn = (struct i2o_fault_notify *)rb;
1701 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1702 iop_release_mfa(sc, fn->lowmfa);
1703 iop_tfn_print(sc, fn);
1704 } else {
1705 status = rb->reqstatus;
1706 tctx = le32toh(rb->msgtctx);
1707 }
1708
1709 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1710 /*
1711 * This initiator tracks state using message wrappers.
1712 *
1713 * Find the originating message wrapper, and if requested
1714 * notify the initiator.
1715 */
1716 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1717 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1718 (im->im_flags & IM_ALLOCED) == 0 ||
1719 tctx != im->im_tctx) {
1720 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1721 sc->sc_dv.dv_xname, tctx, im);
1722 if (im != NULL)
1723 printf("%s: flags=0x%08x tctx=0x%08x\n",
1724 sc->sc_dv.dv_xname, im->im_flags,
1725 im->im_tctx);
1726 #ifdef I2ODEBUG
1727 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1728 iop_reply_print(sc, rb);
1729 #endif
1730 return (-1);
1731 }
1732
1733 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1734 im->im_flags |= IM_FAIL;
1735
1736 #ifdef I2ODEBUG
1737 if ((im->im_flags & IM_REPLIED) != 0)
1738 panic("%s: dup reply", sc->sc_dv.dv_xname);
1739 #endif
1740 im->im_flags |= IM_REPLIED;
1741
1742 #ifdef I2ODEBUG
1743 if (status != I2O_STATUS_SUCCESS)
1744 iop_reply_print(sc, rb);
1745 #endif
1746 im->im_reqstatus = status;
1747 im->im_detstatus = le16toh(rb->detail);
1748
1749 /* Copy the reply frame, if requested. */
1750 if (im->im_rb != NULL) {
1751 size = (le32toh(rb->msgflags) >> 14) & ~3;
1752 #ifdef I2ODEBUG
1753 if (size > sc->sc_framesize)
1754 panic("iop_handle_reply: reply too large");
1755 #endif
1756 memcpy(im->im_rb, rb, size);
1757 }
1758
1759 /* Notify the initiator. */
1760 if ((im->im_flags & IM_WAIT) != 0)
1761 cv_broadcast(&im->im_cv);
1762 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1763 if (ii != NULL) {
1764 mutex_spin_exit(&sc->sc_intrlock);
1765 (*ii->ii_intr)(ii->ii_dv, im, rb);
1766 mutex_spin_enter(&sc->sc_intrlock);
1767 }
1768 }
1769 } else {
1770 /*
1771 * This initiator discards message wrappers.
1772 *
1773 * Simply pass the reply frame to the initiator.
1774 */
1775 if (ii != NULL) {
1776 mutex_spin_exit(&sc->sc_intrlock);
1777 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1778 mutex_spin_enter(&sc->sc_intrlock);
1779 }
1780 }
1781
1782 return (status);
1783 }
1784
1785 /*
1786 * Handle an interrupt from the IOP.
1787 */
1788 int
1789 iop_intr(void *arg)
1790 {
1791 struct iop_softc *sc;
1792 u_int32_t rmfa;
1793
1794 sc = arg;
1795
1796 mutex_spin_enter(&sc->sc_intrlock);
1797
1798 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1799 mutex_spin_exit(&sc->sc_intrlock);
1800 return (0);
1801 }
1802
1803 for (;;) {
1804 /* Double read to account for IOP bug. */
1805 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1806 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1807 if (rmfa == IOP_MFA_EMPTY)
1808 break;
1809 }
1810 iop_handle_reply(sc, rmfa);
1811 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1812 }
1813
1814 mutex_spin_exit(&sc->sc_intrlock);
1815 return (1);
1816 }
1817
1818 /*
1819 * Handle an event signalled by the executive.
1820 */
1821 static void
1822 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1823 {
1824 struct i2o_util_event_register_reply *rb;
1825 u_int event;
1826
1827 rb = reply;
1828
1829 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1830 return;
1831
1832 event = le32toh(rb->event);
1833 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1834 }
1835
1836 /*
1837 * Allocate a message wrapper.
1838 */
1839 struct iop_msg *
1840 iop_msg_alloc(struct iop_softc *sc, int flags)
1841 {
1842 struct iop_msg *im;
1843 static u_int tctxgen;
1844 int i;
1845
1846 #ifdef I2ODEBUG
1847 if ((flags & IM_SYSMASK) != 0)
1848 panic("iop_msg_alloc: system flags specified");
1849 #endif
1850
1851 mutex_spin_enter(&sc->sc_intrlock);
1852 im = SLIST_FIRST(&sc->sc_im_freelist);
1853 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1854 if (im == NULL)
1855 panic("iop_msg_alloc: no free wrappers");
1856 #endif
1857 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1858 mutex_spin_exit(&sc->sc_intrlock);
1859
1860 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1861 tctxgen += (1 << IOP_TCTX_SHIFT);
1862 im->im_flags = flags | IM_ALLOCED;
1863 im->im_rb = NULL;
1864 i = 0;
1865 do {
1866 im->im_xfer[i++].ix_size = 0;
1867 } while (i < IOP_MAX_MSG_XFERS);
1868
1869 return (im);
1870 }
1871
1872 /*
1873 * Free a message wrapper.
1874 */
1875 void
1876 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1877 {
1878
1879 #ifdef I2ODEBUG
1880 if ((im->im_flags & IM_ALLOCED) == 0)
1881 panic("iop_msg_free: wrapper not allocated");
1882 #endif
1883
1884 im->im_flags = 0;
1885 mutex_spin_enter(&sc->sc_intrlock);
1886 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1887 mutex_spin_exit(&sc->sc_intrlock);
1888 }
1889
1890 /*
1891 * Map a data transfer. Write a scatter-gather list into the message frame.
1892 */
1893 int
1894 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1895 void *xferaddr, int xfersize, int out, struct proc *up)
1896 {
1897 bus_dmamap_t dm;
1898 bus_dma_segment_t *ds;
1899 struct iop_xfer *ix;
1900 u_int rv, i, nsegs, flg, off, xn;
1901 u_int32_t *p;
1902
1903 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1904 if (ix->ix_size == 0)
1905 break;
1906
1907 #ifdef I2ODEBUG
1908 if (xfersize == 0)
1909 panic("iop_msg_map: null transfer");
1910 if (xfersize > IOP_MAX_XFER)
1911 panic("iop_msg_map: transfer too large");
1912 if (xn == IOP_MAX_MSG_XFERS)
1913 panic("iop_msg_map: too many xfers");
1914 #endif
1915
1916 /*
1917 * Only the first DMA map is static.
1918 */
1919 if (xn != 0) {
1920 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1921 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1922 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1923 if (rv != 0)
1924 return (rv);
1925 }
1926
1927 dm = ix->ix_map;
1928 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1929 (up == NULL ? BUS_DMA_NOWAIT : 0));
1930 if (rv != 0)
1931 goto bad;
1932
1933 /*
1934 * How many SIMPLE SG elements can we fit in this message?
1935 */
1936 off = mb[0] >> 16;
1937 p = mb + off;
1938 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1939
1940 if (dm->dm_nsegs > nsegs) {
1941 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1942 rv = EFBIG;
1943 DPRINTF(("iop_msg_map: too many segs\n"));
1944 goto bad;
1945 }
1946
1947 nsegs = dm->dm_nsegs;
1948 xfersize = 0;
1949
1950 /*
1951 * Write out the SG list.
1952 */
1953 if (out)
1954 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1955 else
1956 flg = I2O_SGL_SIMPLE;
1957
1958 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1959 p[0] = (u_int32_t)ds->ds_len | flg;
1960 p[1] = (u_int32_t)ds->ds_addr;
1961 xfersize += ds->ds_len;
1962 }
1963
1964 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1965 p[1] = (u_int32_t)ds->ds_addr;
1966 xfersize += ds->ds_len;
1967
1968 /* Fix up the transfer record, and sync the map. */
1969 ix->ix_flags = (out ? IX_OUT : IX_IN);
1970 ix->ix_size = xfersize;
1971 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1972 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1973
1974 /*
1975 * If this is the first xfer we've mapped for this message, adjust
1976 * the SGL offset field in the message header.
1977 */
1978 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1979 mb[0] += (mb[0] >> 12) & 0xf0;
1980 im->im_flags |= IM_SGLOFFADJ;
1981 }
1982 mb[0] += (nsegs << 17);
1983 return (0);
1984
1985 bad:
1986 if (xn != 0)
1987 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1988 return (rv);
1989 }
1990
1991 /*
1992 * Map a block I/O data transfer (different in that there's only one per
1993 * message maximum, and PAGE addressing may be used). Write a scatter
1994 * gather list into the message frame.
1995 */
1996 int
1997 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1998 void *xferaddr, int xfersize, int out)
1999 {
2000 bus_dma_segment_t *ds;
2001 bus_dmamap_t dm;
2002 struct iop_xfer *ix;
2003 u_int rv, i, nsegs, off, slen, tlen, flg;
2004 paddr_t saddr, eaddr;
2005 u_int32_t *p;
2006
2007 #ifdef I2ODEBUG
2008 if (xfersize == 0)
2009 panic("iop_msg_map_bio: null transfer");
2010 if (xfersize > IOP_MAX_XFER)
2011 panic("iop_msg_map_bio: transfer too large");
2012 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2013 panic("iop_msg_map_bio: SGLOFFADJ");
2014 #endif
2015
2016 ix = im->im_xfer;
2017 dm = ix->ix_map;
2018 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2019 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2020 if (rv != 0)
2021 return (rv);
2022
2023 off = mb[0] >> 16;
2024 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2025
2026 /*
2027 * If the transfer is highly fragmented and won't fit using SIMPLE
2028 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2029 * potentially more efficient, both for us and the IOP.
2030 */
2031 if (dm->dm_nsegs > nsegs) {
2032 nsegs = 1;
2033 p = mb + off + 1;
2034
2035 /* XXX This should be done with a bus_space flag. */
2036 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2037 slen = ds->ds_len;
2038 saddr = ds->ds_addr;
2039
2040 while (slen > 0) {
2041 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2042 tlen = min(eaddr - saddr, slen);
2043 slen -= tlen;
2044 *p++ = le32toh(saddr);
2045 saddr = eaddr;
2046 nsegs++;
2047 }
2048 }
2049
2050 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2051 I2O_SGL_END;
2052 if (out)
2053 mb[off] |= I2O_SGL_DATA_OUT;
2054 } else {
2055 p = mb + off;
2056 nsegs = dm->dm_nsegs;
2057
2058 if (out)
2059 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2060 else
2061 flg = I2O_SGL_SIMPLE;
2062
2063 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2064 p[0] = (u_int32_t)ds->ds_len | flg;
2065 p[1] = (u_int32_t)ds->ds_addr;
2066 }
2067
2068 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2069 I2O_SGL_END;
2070 p[1] = (u_int32_t)ds->ds_addr;
2071 nsegs <<= 1;
2072 }
2073
2074 /* Fix up the transfer record, and sync the map. */
2075 ix->ix_flags = (out ? IX_OUT : IX_IN);
2076 ix->ix_size = xfersize;
2077 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2078 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2079
2080 /*
2081 * Adjust the SGL offset and total message size fields. We don't
2082 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2083 */
2084 mb[0] += ((off << 4) + (nsegs << 16));
2085 return (0);
2086 }
2087
2088 /*
2089 * Unmap all data transfers associated with a message wrapper.
2090 */
2091 void
2092 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2093 {
2094 struct iop_xfer *ix;
2095 int i;
2096
2097 #ifdef I2ODEBUG
2098 if (im->im_xfer[0].ix_size == 0)
2099 panic("iop_msg_unmap: no transfers mapped");
2100 #endif
2101
2102 for (ix = im->im_xfer, i = 0;;) {
2103 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2104 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2105 BUS_DMASYNC_POSTREAD);
2106 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2107
2108 /* Only the first DMA map is static. */
2109 if (i != 0)
2110 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2111 if ((++ix)->ix_size == 0)
2112 break;
2113 if (++i >= IOP_MAX_MSG_XFERS)
2114 break;
2115 }
2116 }
2117
2118 /*
2119 * Post a message frame to the IOP's inbound queue.
2120 */
2121 int
2122 iop_post(struct iop_softc *sc, u_int32_t *mb)
2123 {
2124 u_int32_t mfa;
2125
2126 #ifdef I2ODEBUG
2127 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2128 panic("iop_post: frame too large");
2129 #endif
2130
2131 mutex_spin_enter(&sc->sc_intrlock);
2132
2133 /* Allocate a slot with the IOP. */
2134 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2135 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2136 mutex_spin_exit(&sc->sc_intrlock);
2137 printf("%s: mfa not forthcoming\n",
2138 sc->sc_dv.dv_xname);
2139 return (EAGAIN);
2140 }
2141
2142 /* Perform reply buffer DMA synchronisation. */
2143 if (sc->sc_curib++ == 0)
2144 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2145 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2146
2147 /* Copy out the message frame. */
2148 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2149 mb[0] >> 16);
2150 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2151 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2152
2153 /* Post the MFA back to the IOP. */
2154 iop_outl(sc, IOP_REG_IFIFO, mfa);
2155
2156 mutex_spin_exit(&sc->sc_intrlock);
2157 return (0);
2158 }
2159
2160 /*
2161 * Post a message to the IOP and deal with completion.
2162 */
2163 int
2164 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2165 {
2166 u_int32_t *mb;
2167 int rv;
2168
2169 mb = xmb;
2170
2171 /* Terminate the scatter/gather list chain. */
2172 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2173 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2174
2175 if ((rv = iop_post(sc, mb)) != 0)
2176 return (rv);
2177
2178 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2179 if ((im->im_flags & IM_POLL) != 0)
2180 iop_msg_poll(sc, im, timo);
2181 else
2182 iop_msg_wait(sc, im, timo);
2183
2184 mutex_spin_enter(&sc->sc_intrlock);
2185 if ((im->im_flags & IM_REPLIED) != 0) {
2186 if ((im->im_flags & IM_NOSTATUS) != 0)
2187 rv = 0;
2188 else if ((im->im_flags & IM_FAIL) != 0)
2189 rv = ENXIO;
2190 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2191 rv = EIO;
2192 else
2193 rv = 0;
2194 } else
2195 rv = EBUSY;
2196 mutex_spin_exit(&sc->sc_intrlock);
2197 } else
2198 rv = 0;
2199
2200 return (rv);
2201 }
2202
2203 /*
2204 * Spin until the specified message is replied to.
2205 */
2206 static void
2207 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2208 {
2209 u_int32_t rmfa;
2210
2211 mutex_spin_enter(&sc->sc_intrlock);
2212
2213 for (timo *= 10; timo != 0; timo--) {
2214 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2215 /* Double read to account for IOP bug. */
2216 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2217 if (rmfa == IOP_MFA_EMPTY)
2218 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2219 if (rmfa != IOP_MFA_EMPTY) {
2220 iop_handle_reply(sc, rmfa);
2221
2222 /*
2223 * Return the reply frame to the IOP's
2224 * outbound FIFO.
2225 */
2226 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2227 }
2228 }
2229 if ((im->im_flags & IM_REPLIED) != 0)
2230 break;
2231 mutex_spin_exit(&sc->sc_intrlock);
2232 DELAY(100);
2233 mutex_spin_enter(&sc->sc_intrlock);
2234 }
2235
2236 if (timo == 0) {
2237 #ifdef I2ODEBUG
2238 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2239 if (iop_status_get(sc, 1) != 0)
2240 printf("iop_msg_poll: unable to retrieve status\n");
2241 else
2242 printf("iop_msg_poll: IOP state = %d\n",
2243 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2244 #endif
2245 }
2246
2247 mutex_spin_exit(&sc->sc_intrlock);
2248 }
2249
2250 /*
2251 * Sleep until the specified message is replied to.
2252 */
2253 static void
2254 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2255 {
2256 int rv;
2257
2258 mutex_spin_enter(&sc->sc_intrlock);
2259 if ((im->im_flags & IM_REPLIED) != 0) {
2260 mutex_spin_exit(&sc->sc_intrlock);
2261 return;
2262 }
2263 rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2264 mutex_spin_exit(&sc->sc_intrlock);
2265
2266 #ifdef I2ODEBUG
2267 if (rv != 0) {
2268 printf("iop_msg_wait: tsleep() == %d\n", rv);
2269 if (iop_status_get(sc, 0) != 0)
2270 printf("iop_msg_wait: unable to retrieve status\n");
2271 else
2272 printf("iop_msg_wait: IOP state = %d\n",
2273 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2274 }
2275 #endif
2276 }
2277
2278 /*
2279 * Release an unused message frame back to the IOP's inbound fifo.
2280 */
2281 static void
2282 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2283 {
2284
2285 /* Use the frame to issue a no-op. */
2286 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2287 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2288 iop_outl_msg(sc, mfa + 8, 0);
2289 iop_outl_msg(sc, mfa + 12, 0);
2290
2291 iop_outl(sc, IOP_REG_IFIFO, mfa);
2292 }
2293
2294 #ifdef I2ODEBUG
2295 /*
2296 * Dump a reply frame header.
2297 */
2298 static void
2299 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2300 {
2301 u_int function, detail;
2302 const char *statusstr;
2303
2304 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2305 detail = le16toh(rb->detail);
2306
2307 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2308
2309 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2310 statusstr = iop_status[rb->reqstatus];
2311 else
2312 statusstr = "undefined error code";
2313
2314 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2315 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2316 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2317 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2318 le32toh(rb->msgtctx));
2319 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2320 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2321 (le32toh(rb->msgflags) >> 8) & 0xff);
2322 }
2323 #endif
2324
2325 /*
2326 * Dump a transport failure reply.
2327 */
2328 static void
2329 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2330 {
2331
2332 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2333
2334 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2335 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2336 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2337 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2338 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2339 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2340 }
2341
2342 /*
2343 * Translate an I2O ASCII field into a C string.
2344 */
2345 void
2346 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2347 {
2348 int hc, lc, i, nit;
2349
2350 dlen--;
2351 lc = 0;
2352 hc = 0;
2353 i = 0;
2354
2355 /*
2356 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2357 * spec has nothing to say about it. Since AMI fields are usually
2358 * filled with junk after the terminator, ...
2359 */
2360 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2361
2362 while (slen-- != 0 && dlen-- != 0) {
2363 if (nit && *src == '\0')
2364 break;
2365 else if (*src <= 0x20 || *src >= 0x7f) {
2366 if (hc)
2367 dst[i++] = ' ';
2368 } else {
2369 hc = 1;
2370 dst[i++] = *src;
2371 lc = i;
2372 }
2373 src++;
2374 }
2375
2376 dst[lc] = '\0';
2377 }
2378
2379 /*
2380 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2381 */
2382 int
2383 iop_print_ident(struct iop_softc *sc, int tid)
2384 {
2385 struct {
2386 struct i2o_param_op_results pr;
2387 struct i2o_param_read_results prr;
2388 struct i2o_param_device_identity di;
2389 } __attribute__ ((__packed__)) p;
2390 char buf[32];
2391 int rv;
2392
2393 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2394 sizeof(p), NULL);
2395 if (rv != 0)
2396 return (rv);
2397
2398 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2399 sizeof(buf));
2400 printf(" <%s, ", buf);
2401 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2402 sizeof(buf));
2403 printf("%s, ", buf);
2404 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2405 printf("%s>", buf);
2406
2407 return (0);
2408 }
2409
2410 /*
2411 * Claim or unclaim the specified TID.
2412 */
2413 int
2414 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2415 int flags)
2416 {
2417 struct iop_msg *im;
2418 struct i2o_util_claim mf;
2419 int rv, func;
2420
2421 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2422 im = iop_msg_alloc(sc, IM_WAIT);
2423
2424 /* We can use the same structure, as they're identical. */
2425 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2426 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2427 mf.msgictx = ii->ii_ictx;
2428 mf.msgtctx = im->im_tctx;
2429 mf.flags = flags;
2430
2431 rv = iop_msg_post(sc, im, &mf, 5000);
2432 iop_msg_free(sc, im);
2433 return (rv);
2434 }
2435
2436 /*
2437 * Perform an abort.
2438 */
2439 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2440 int tctxabort, int flags)
2441 {
2442 struct iop_msg *im;
2443 struct i2o_util_abort mf;
2444 int rv;
2445
2446 im = iop_msg_alloc(sc, IM_WAIT);
2447
2448 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2449 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2450 mf.msgictx = ii->ii_ictx;
2451 mf.msgtctx = im->im_tctx;
2452 mf.flags = (func << 24) | flags;
2453 mf.tctxabort = tctxabort;
2454
2455 rv = iop_msg_post(sc, im, &mf, 5000);
2456 iop_msg_free(sc, im);
2457 return (rv);
2458 }
2459
2460 /*
2461 * Enable or disable reception of events for the specified device.
2462 */
2463 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2464 {
2465 struct i2o_util_event_register mf;
2466
2467 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2468 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2469 mf.msgictx = ii->ii_ictx;
2470 mf.msgtctx = 0;
2471 mf.eventmask = mask;
2472
2473 /* This message is replied to only when events are signalled. */
2474 return (iop_post(sc, (u_int32_t *)&mf));
2475 }
2476
2477 int
2478 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2479 {
2480 struct iop_softc *sc;
2481
2482 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2483 return (ENXIO);
2484 if ((sc->sc_flags & IOP_ONLINE) == 0)
2485 return (ENXIO);
2486 if ((sc->sc_flags & IOP_OPEN) != 0)
2487 return (EBUSY);
2488 sc->sc_flags |= IOP_OPEN;
2489
2490 return (0);
2491 }
2492
2493 int
2494 iopclose(dev_t dev, int flag, int mode,
2495 struct lwp *l)
2496 {
2497 struct iop_softc *sc;
2498
2499 sc = device_lookup(&iop_cd, minor(dev));
2500 sc->sc_flags &= ~IOP_OPEN;
2501
2502 return (0);
2503 }
2504
2505 int
2506 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2507 {
2508 struct iop_softc *sc;
2509 struct iovec *iov;
2510 int rv, i;
2511
2512 sc = device_lookup(&iop_cd, minor(dev));
2513 rv = 0;
2514
2515 switch (cmd) {
2516 case IOPIOCPT:
2517 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2518 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2519 if (rv)
2520 return (rv);
2521
2522 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2523
2524 case IOPIOCGSTATUS:
2525 iov = (struct iovec *)data;
2526 i = sizeof(struct i2o_status);
2527 if (i > iov->iov_len)
2528 i = iov->iov_len;
2529 else
2530 iov->iov_len = i;
2531 if ((rv = iop_status_get(sc, 0)) == 0)
2532 rv = copyout(&sc->sc_status, iov->iov_base, i);
2533 return (rv);
2534
2535 case IOPIOCGLCT:
2536 case IOPIOCGTIDMAP:
2537 case IOPIOCRECONFIG:
2538 break;
2539
2540 default:
2541 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2542 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2543 #endif
2544 return (ENOTTY);
2545 }
2546
2547 mutex_enter(&sc->sc_conflock);
2548
2549 switch (cmd) {
2550 case IOPIOCGLCT:
2551 iov = (struct iovec *)data;
2552 i = le16toh(sc->sc_lct->tablesize) << 2;
2553 if (i > iov->iov_len)
2554 i = iov->iov_len;
2555 else
2556 iov->iov_len = i;
2557 rv = copyout(sc->sc_lct, iov->iov_base, i);
2558 break;
2559
2560 case IOPIOCRECONFIG:
2561 rv = iop_reconfigure(sc, 0);
2562 break;
2563
2564 case IOPIOCGTIDMAP:
2565 iov = (struct iovec *)data;
2566 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2567 if (i > iov->iov_len)
2568 i = iov->iov_len;
2569 else
2570 iov->iov_len = i;
2571 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2572 break;
2573 }
2574
2575 mutex_exit(&sc->sc_conflock);
2576 return (rv);
2577 }
2578
2579 static int
2580 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2581 {
2582 struct iop_msg *im;
2583 struct i2o_msg *mf;
2584 struct ioppt_buf *ptb;
2585 int rv, i, mapped;
2586
2587 mf = NULL;
2588 im = NULL;
2589 mapped = 1;
2590
2591 if (pt->pt_msglen > sc->sc_framesize ||
2592 pt->pt_msglen < sizeof(struct i2o_msg) ||
2593 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2594 pt->pt_nbufs < 0 ||
2595 #if 0
2596 pt->pt_replylen < 0 ||
2597 #endif
2598 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2599 return (EINVAL);
2600
2601 for (i = 0; i < pt->pt_nbufs; i++)
2602 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2603 rv = ENOMEM;
2604 goto bad;
2605 }
2606
2607 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2608 if (mf == NULL)
2609 return (ENOMEM);
2610
2611 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2612 goto bad;
2613
2614 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2615 im->im_rb = (struct i2o_reply *)mf;
2616 mf->msgictx = IOP_ICTX;
2617 mf->msgtctx = im->im_tctx;
2618
2619 for (i = 0; i < pt->pt_nbufs; i++) {
2620 ptb = &pt->pt_bufs[i];
2621 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2622 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2623 if (rv != 0)
2624 goto bad;
2625 mapped = 1;
2626 }
2627
2628 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2629 goto bad;
2630
2631 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2632 if (i > sc->sc_framesize)
2633 i = sc->sc_framesize;
2634 if (i > pt->pt_replylen)
2635 i = pt->pt_replylen;
2636 rv = copyout(im->im_rb, pt->pt_reply, i);
2637
2638 bad:
2639 if (mapped != 0)
2640 iop_msg_unmap(sc, im);
2641 if (im != NULL)
2642 iop_msg_free(sc, im);
2643 if (mf != NULL)
2644 free(mf, M_DEVBUF);
2645 return (rv);
2646 }
2647