iop.c revision 1.67 1 /* $NetBSD: iop.c,v 1.67 2007/10/19 11:59:43 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.67 2007/10/19 11:59:43 ad Exp $");
45
46 #include "iop.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/queue.h>
53 #include <sys/proc.h>
54 #include <sys/malloc.h>
55 #include <sys/ioctl.h>
56 #include <sys/endian.h>
57 #include <sys/conf.h>
58 #include <sys/kthread.h>
59 #include <sys/kauth.h>
60 #include <sys/bus.h>
61
62 #include <uvm/uvm_extern.h>
63
64 #include <dev/i2o/i2o.h>
65 #include <dev/i2o/iopio.h>
66 #include <dev/i2o/iopreg.h>
67 #include <dev/i2o/iopvar.h>
68
69 #include "locators.h"
70
71 #define POLL(ms, cond) \
72 do { \
73 int xi; \
74 for (xi = (ms) * 10; xi; xi--) { \
75 if (cond) \
76 break; \
77 DELAY(100); \
78 } \
79 } while (/* CONSTCOND */0);
80
81 #ifdef I2ODEBUG
82 #define DPRINTF(x) printf x
83 #else
84 #define DPRINTF(x)
85 #endif
86
87 #define IOP_ICTXHASH_NBUCKETS 16
88 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
89
90 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
91
92 #define IOP_TCTX_SHIFT 12
93 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
94
95 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
96 static u_long iop_ictxhash;
97 static void *iop_sdh;
98 static struct i2o_systab *iop_systab;
99 static int iop_systab_size;
100
101 extern struct cfdriver iop_cd;
102
103 dev_type_open(iopopen);
104 dev_type_close(iopclose);
105 dev_type_ioctl(iopioctl);
106
107 const struct cdevsw iop_cdevsw = {
108 iopopen, iopclose, noread, nowrite, iopioctl,
109 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
110 };
111
112 #define IC_CONFIGURE 0x01
113 #define IC_PRIORITY 0x02
114
115 static struct iop_class {
116 u_short ic_class;
117 u_short ic_flags;
118 const char *ic_caption;
119 } const iop_class[] = {
120 {
121 I2O_CLASS_EXECUTIVE,
122 0,
123 "executive"
124 },
125 {
126 I2O_CLASS_DDM,
127 0,
128 "device driver module"
129 },
130 {
131 I2O_CLASS_RANDOM_BLOCK_STORAGE,
132 IC_CONFIGURE | IC_PRIORITY,
133 "random block storage"
134 },
135 {
136 I2O_CLASS_SEQUENTIAL_STORAGE,
137 IC_CONFIGURE | IC_PRIORITY,
138 "sequential storage"
139 },
140 {
141 I2O_CLASS_LAN,
142 IC_CONFIGURE | IC_PRIORITY,
143 "LAN port"
144 },
145 {
146 I2O_CLASS_WAN,
147 IC_CONFIGURE | IC_PRIORITY,
148 "WAN port"
149 },
150 {
151 I2O_CLASS_FIBRE_CHANNEL_PORT,
152 IC_CONFIGURE,
153 "fibrechannel port"
154 },
155 {
156 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
157 0,
158 "fibrechannel peripheral"
159 },
160 {
161 I2O_CLASS_SCSI_PERIPHERAL,
162 0,
163 "SCSI peripheral"
164 },
165 {
166 I2O_CLASS_ATE_PORT,
167 IC_CONFIGURE,
168 "ATE port"
169 },
170 {
171 I2O_CLASS_ATE_PERIPHERAL,
172 0,
173 "ATE peripheral"
174 },
175 {
176 I2O_CLASS_FLOPPY_CONTROLLER,
177 IC_CONFIGURE,
178 "floppy controller"
179 },
180 {
181 I2O_CLASS_FLOPPY_DEVICE,
182 0,
183 "floppy device"
184 },
185 {
186 I2O_CLASS_BUS_ADAPTER_PORT,
187 IC_CONFIGURE,
188 "bus adapter port"
189 },
190 };
191
192 static const char * const iop_status[] = {
193 "success",
194 "abort (dirty)",
195 "abort (no data transfer)",
196 "abort (partial transfer)",
197 "error (dirty)",
198 "error (no data transfer)",
199 "error (partial transfer)",
200 "undefined error code",
201 "process abort (dirty)",
202 "process abort (no data transfer)",
203 "process abort (partial transfer)",
204 "transaction error",
205 };
206
207 static inline u_int32_t iop_inl(struct iop_softc *, int);
208 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
209
210 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
211 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
212
213 static void iop_config_interrupts(struct device *);
214 static void iop_configure_devices(struct iop_softc *, int, int);
215 static void iop_devinfo(int, char *, size_t);
216 static int iop_print(void *, const char *);
217 static void iop_shutdown(void *);
218
219 static void iop_adjqparam(struct iop_softc *, int);
220 static int iop_handle_reply(struct iop_softc *, u_int32_t);
221 static int iop_hrt_get(struct iop_softc *);
222 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
223 static void iop_intr_event(struct device *, struct iop_msg *, void *);
224 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
225 u_int32_t);
226 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
227 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
228 static int iop_ofifo_init(struct iop_softc *);
229 static int iop_passthrough(struct iop_softc *, struct ioppt *,
230 struct proc *);
231 static void iop_reconf_thread(void *);
232 static void iop_release_mfa(struct iop_softc *, u_int32_t);
233 static int iop_reset(struct iop_softc *);
234 static int iop_sys_enable(struct iop_softc *);
235 static int iop_systab_set(struct iop_softc *);
236 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
237
238 #ifdef I2ODEBUG
239 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
240 #endif
241
242 static inline u_int32_t
243 iop_inl(struct iop_softc *sc, int off)
244 {
245
246 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
247 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
248 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
249 }
250
251 static inline void
252 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
253 {
254
255 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
256 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
257 BUS_SPACE_BARRIER_WRITE);
258 }
259
260 static inline u_int32_t
261 iop_inl_msg(struct iop_softc *sc, int off)
262 {
263
264 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
265 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
266 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
267 }
268
269 static inline void
270 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
271 {
272
273 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
274 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
275 BUS_SPACE_BARRIER_WRITE);
276 }
277
278 /*
279 * Initialise the IOP and our interface.
280 */
281 void
282 iop_init(struct iop_softc *sc, const char *intrstr)
283 {
284 struct iop_msg *im;
285 int rv, i, j, state, nsegs;
286 u_int32_t mask;
287 char ident[64];
288
289 state = 0;
290
291 printf("I2O adapter");
292
293 mutex_init(&sc->sc_intrlock, MUTEX_DRIVER, IPL_VM);
294 mutex_init(&sc->sc_conflock, MUTEX_DRIVER, IPL_NONE);
295 cv_init(&sc->sc_confcv, "iopconf");
296
297 if (iop_ictxhashtbl == NULL)
298 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
299 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
300
301 /* Disable interrupts at the IOP. */
302 mask = iop_inl(sc, IOP_REG_INTR_MASK);
303 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
304
305 /* Allocate a scratch DMA map for small miscellaneous shared data. */
306 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
307 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
308 printf("%s: cannot create scratch dmamap\n",
309 sc->sc_dv.dv_xname);
310 return;
311 }
312
313 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
314 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
315 printf("%s: cannot alloc scratch dmamem\n",
316 sc->sc_dv.dv_xname);
317 goto bail_out;
318 }
319 state++;
320
321 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
322 &sc->sc_scr, 0)) {
323 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
324 goto bail_out;
325 }
326 state++;
327
328 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
329 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
330 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
331 goto bail_out;
332 }
333 state++;
334
335 #ifdef I2ODEBUG
336 /* So that our debug checks don't choke. */
337 sc->sc_framesize = 128;
338 #endif
339
340 /* Avoid syncing the reply map until it's set up. */
341 sc->sc_curib = 0x123;
342
343 /* Reset the adapter and request status. */
344 if ((rv = iop_reset(sc)) != 0) {
345 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
346 goto bail_out;
347 }
348
349 if ((rv = iop_status_get(sc, 1)) != 0) {
350 printf("%s: not responding (get status)\n",
351 sc->sc_dv.dv_xname);
352 goto bail_out;
353 }
354
355 sc->sc_flags |= IOP_HAVESTATUS;
356 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
357 ident, sizeof(ident));
358 printf(" <%s>\n", ident);
359
360 #ifdef I2ODEBUG
361 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
362 le16toh(sc->sc_status.orgid),
363 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
364 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
365 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
366 le32toh(sc->sc_status.desiredprivmemsize),
367 le32toh(sc->sc_status.currentprivmemsize),
368 le32toh(sc->sc_status.currentprivmembase));
369 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
370 le32toh(sc->sc_status.desiredpriviosize),
371 le32toh(sc->sc_status.currentpriviosize),
372 le32toh(sc->sc_status.currentpriviobase));
373 #endif
374
375 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
376 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
377 sc->sc_maxob = IOP_MAX_OUTBOUND;
378 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
379 if (sc->sc_maxib > IOP_MAX_INBOUND)
380 sc->sc_maxib = IOP_MAX_INBOUND;
381 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
382 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
383 sc->sc_framesize = IOP_MAX_MSG_SIZE;
384
385 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
386 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
387 printf("%s: frame size too small (%d)\n",
388 sc->sc_dv.dv_xname, sc->sc_framesize);
389 goto bail_out;
390 }
391 #endif
392
393 /* Allocate message wrappers. */
394 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
395 if (im == NULL) {
396 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
397 goto bail_out;
398 }
399 state++;
400 sc->sc_ims = im;
401 SLIST_INIT(&sc->sc_im_freelist);
402
403 for (i = 0; i < sc->sc_maxib; i++, im++) {
404 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
405 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
406 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
407 &im->im_xfer[0].ix_map);
408 if (rv != 0) {
409 printf("%s: couldn't create dmamap (%d)",
410 sc->sc_dv.dv_xname, rv);
411 goto bail_out3;
412 }
413
414 im->im_tctx = i;
415 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
416 cv_init(&im->im_cv, "iopmsg");
417 }
418
419 /* Initialise the IOP's outbound FIFO. */
420 if (iop_ofifo_init(sc) != 0) {
421 printf("%s: unable to init oubound FIFO\n",
422 sc->sc_dv.dv_xname);
423 goto bail_out3;
424 }
425
426 /*
427 * Defer further configuration until (a) interrupts are working and
428 * (b) we have enough information to build the system table.
429 */
430 config_interrupts((struct device *)sc, iop_config_interrupts);
431
432 /* Configure shutdown hook before we start any device activity. */
433 if (iop_sdh == NULL)
434 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
435
436 /* Ensure interrupts are enabled at the IOP. */
437 mask = iop_inl(sc, IOP_REG_INTR_MASK);
438 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
439
440 if (intrstr != NULL)
441 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
442 intrstr);
443
444 #ifdef I2ODEBUG
445 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
446 sc->sc_dv.dv_xname, sc->sc_maxib,
447 le32toh(sc->sc_status.maxinboundmframes),
448 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
449 #endif
450
451 return;
452
453 bail_out3:
454 if (state > 3) {
455 for (j = 0; j < i; j++)
456 bus_dmamap_destroy(sc->sc_dmat,
457 sc->sc_ims[j].im_xfer[0].ix_map);
458 free(sc->sc_ims, M_DEVBUF);
459 }
460 bail_out:
461 if (state > 2)
462 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
463 if (state > 1)
464 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
465 if (state > 0)
466 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
467 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
468 }
469
470 /*
471 * Perform autoconfiguration tasks.
472 */
473 static void
474 iop_config_interrupts(struct device *self)
475 {
476 struct iop_attach_args ia;
477 struct iop_softc *sc, *iop;
478 struct i2o_systab_entry *ste;
479 int rv, i, niop;
480 int locs[IOPCF_NLOCS];
481
482 sc = device_private(self);
483 mutex_enter(&sc->sc_conflock);
484
485 LIST_INIT(&sc->sc_iilist);
486
487 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
488
489 if (iop_hrt_get(sc) != 0) {
490 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
491 mutex_exit(&sc->sc_conflock);
492 return;
493 }
494
495 /*
496 * Build the system table.
497 */
498 if (iop_systab == NULL) {
499 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
500 if ((iop = device_lookup(&iop_cd, i)) == NULL)
501 continue;
502 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
503 continue;
504 if (iop_status_get(iop, 1) != 0) {
505 printf("%s: unable to retrieve status\n",
506 sc->sc_dv.dv_xname);
507 iop->sc_flags &= ~IOP_HAVESTATUS;
508 continue;
509 }
510 niop++;
511 }
512 if (niop == 0) {
513 mutex_exit(&sc->sc_conflock);
514 return;
515 }
516
517 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
518 sizeof(struct i2o_systab);
519 iop_systab_size = i;
520 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
521
522 iop_systab->numentries = niop;
523 iop_systab->version = I2O_VERSION_11;
524
525 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
526 if ((iop = device_lookup(&iop_cd, i)) == NULL)
527 continue;
528 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
529 continue;
530
531 ste->orgid = iop->sc_status.orgid;
532 ste->iopid = device_unit(&iop->sc_dv) + 2;
533 ste->segnumber =
534 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
535 ste->iopcaps = iop->sc_status.iopcaps;
536 ste->inboundmsgframesize =
537 iop->sc_status.inboundmframesize;
538 ste->inboundmsgportaddresslow =
539 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
540 ste++;
541 }
542 }
543
544 /*
545 * Post the system table to the IOP and bring it to the OPERATIONAL
546 * state.
547 */
548 if (iop_systab_set(sc) != 0) {
549 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
550 mutex_exit(&sc->sc_conflock);
551 return;
552 }
553 if (iop_sys_enable(sc) != 0) {
554 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
555 mutex_exit(&sc->sc_conflock);
556 return;
557 }
558
559 /*
560 * Set up an event handler for this IOP.
561 */
562 sc->sc_eventii.ii_dv = self;
563 sc->sc_eventii.ii_intr = iop_intr_event;
564 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
565 sc->sc_eventii.ii_tid = I2O_TID_IOP;
566 iop_initiator_register(sc, &sc->sc_eventii);
567
568 rv = iop_util_eventreg(sc, &sc->sc_eventii,
569 I2O_EVENT_EXEC_RESOURCE_LIMITS |
570 I2O_EVENT_EXEC_CONNECTION_FAIL |
571 I2O_EVENT_EXEC_ADAPTER_FAULT |
572 I2O_EVENT_EXEC_POWER_FAIL |
573 I2O_EVENT_EXEC_RESET_PENDING |
574 I2O_EVENT_EXEC_RESET_IMMINENT |
575 I2O_EVENT_EXEC_HARDWARE_FAIL |
576 I2O_EVENT_EXEC_XCT_CHANGE |
577 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
578 I2O_EVENT_GEN_DEVICE_RESET |
579 I2O_EVENT_GEN_STATE_CHANGE |
580 I2O_EVENT_GEN_GENERAL_WARNING);
581 if (rv != 0) {
582 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
583 mutex_exit(&sc->sc_conflock);
584 return;
585 }
586
587 /*
588 * Attempt to match and attach a product-specific extension.
589 */
590 ia.ia_class = I2O_CLASS_ANY;
591 ia.ia_tid = I2O_TID_IOP;
592 locs[IOPCF_TID] = I2O_TID_IOP;
593 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
594 config_stdsubmatch);
595
596 /*
597 * Start device configuration.
598 */
599 if ((rv = iop_reconfigure(sc, 0)) == -1)
600 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
601
602
603 sc->sc_flags |= IOP_ONLINE;
604 rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
605 &sc->sc_reconf_thread, "%s", sc->sc_dv.dv_xname);
606 mutex_exit(&sc->sc_conflock);
607 if (rv != 0) {
608 printf("%s: unable to create reconfiguration thread (%d)",
609 sc->sc_dv.dv_xname, rv);
610 return;
611 }
612 }
613
614 /*
615 * Reconfiguration thread; listens for LCT change notification, and
616 * initiates re-configuration if received.
617 */
618 static void
619 iop_reconf_thread(void *cookie)
620 {
621 struct iop_softc *sc;
622 struct lwp *l;
623 struct i2o_lct lct;
624 u_int32_t chgind;
625 int rv;
626
627 sc = cookie;
628 chgind = sc->sc_chgind + 1;
629 l = curlwp;
630
631 for (;;) {
632 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
633 sc->sc_dv.dv_xname, chgind));
634
635 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
636
637 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
638 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
639
640 mutex_enter(&sc->sc_conflock);
641 if (rv == 0) {
642 iop_reconfigure(sc, le32toh(lct.changeindicator));
643 chgind = sc->sc_chgind + 1;
644 }
645 (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
646 mutex_exit(&sc->sc_conflock);
647 }
648 }
649
650 /*
651 * Reconfigure: find new and removed devices.
652 */
653 int
654 iop_reconfigure(struct iop_softc *sc, u_int chgind)
655 {
656 struct iop_msg *im;
657 struct i2o_hba_bus_scan mf;
658 struct i2o_lct_entry *le;
659 struct iop_initiator *ii, *nextii;
660 int rv, tid, i;
661
662 KASSERT(mutex_owned(&sc->sc_conflock));
663
664 /*
665 * If the reconfiguration request isn't the result of LCT change
666 * notification, then be more thorough: ask all bus ports to scan
667 * their busses. Wait up to 5 minutes for each bus port to complete
668 * the request.
669 */
670 if (chgind == 0) {
671 if ((rv = iop_lct_get(sc)) != 0) {
672 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
673 return (rv);
674 }
675
676 le = sc->sc_lct->entry;
677 for (i = 0; i < sc->sc_nlctent; i++, le++) {
678 if ((le16toh(le->classid) & 4095) !=
679 I2O_CLASS_BUS_ADAPTER_PORT)
680 continue;
681 tid = le16toh(le->localtid) & 4095;
682
683 im = iop_msg_alloc(sc, IM_WAIT);
684
685 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
686 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
687 mf.msgictx = IOP_ICTX;
688 mf.msgtctx = im->im_tctx;
689
690 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
691 tid));
692
693 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
694 iop_msg_free(sc, im);
695 #ifdef I2ODEBUG
696 if (rv != 0)
697 printf("%s: bus scan failed\n",
698 sc->sc_dv.dv_xname);
699 #endif
700 }
701 } else if (chgind <= sc->sc_chgind) {
702 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
703 return (0);
704 }
705
706 /* Re-read the LCT and determine if it has changed. */
707 if ((rv = iop_lct_get(sc)) != 0) {
708 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
709 return (rv);
710 }
711 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
712
713 chgind = le32toh(sc->sc_lct->changeindicator);
714 if (chgind == sc->sc_chgind) {
715 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
716 return (0);
717 }
718 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
719 sc->sc_chgind = chgind;
720
721 if (sc->sc_tidmap != NULL)
722 free(sc->sc_tidmap, M_DEVBUF);
723 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
724 M_DEVBUF, M_NOWAIT|M_ZERO);
725
726 /* Allow 1 queued command per device while we're configuring. */
727 iop_adjqparam(sc, 1);
728
729 /*
730 * Match and attach child devices. We configure high-level devices
731 * first so that any claims will propagate throughout the LCT,
732 * hopefully masking off aliased devices as a result.
733 *
734 * Re-reading the LCT at this point is a little dangerous, but we'll
735 * trust the IOP (and the operator) to behave itself...
736 */
737 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
738 IC_CONFIGURE | IC_PRIORITY);
739 if ((rv = iop_lct_get(sc)) != 0) {
740 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
741 }
742 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
743 IC_CONFIGURE);
744
745 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
746 nextii = LIST_NEXT(ii, ii_list);
747
748 /* Detach devices that were configured, but are now gone. */
749 for (i = 0; i < sc->sc_nlctent; i++)
750 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
751 break;
752 if (i == sc->sc_nlctent ||
753 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
754 config_detach(ii->ii_dv, DETACH_FORCE);
755 continue;
756 }
757
758 /*
759 * Tell initiators that existed before the re-configuration
760 * to re-configure.
761 */
762 if (ii->ii_reconfig == NULL)
763 continue;
764 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
765 printf("%s: %s failed reconfigure (%d)\n",
766 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
767 }
768
769 /* Re-adjust queue parameters and return. */
770 if (sc->sc_nii != 0)
771 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
772 / sc->sc_nii);
773
774 return (0);
775 }
776
777 /*
778 * Configure I2O devices into the system.
779 */
780 static void
781 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
782 {
783 struct iop_attach_args ia;
784 struct iop_initiator *ii;
785 const struct i2o_lct_entry *le;
786 struct device *dv;
787 int i, j, nent;
788 u_int usertid;
789 int locs[IOPCF_NLOCS];
790
791 nent = sc->sc_nlctent;
792 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
793 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
794
795 /* Ignore the device if it's in use. */
796 usertid = le32toh(le->usertid) & 4095;
797 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
798 continue;
799
800 ia.ia_class = le16toh(le->classid) & 4095;
801 ia.ia_tid = sc->sc_tidmap[i].it_tid;
802
803 /* Ignore uninteresting devices. */
804 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
805 if (iop_class[j].ic_class == ia.ia_class)
806 break;
807 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
808 (iop_class[j].ic_flags & mask) != maskval)
809 continue;
810
811 /*
812 * Try to configure the device only if it's not already
813 * configured.
814 */
815 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
816 if (ia.ia_tid == ii->ii_tid) {
817 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
818 strcpy(sc->sc_tidmap[i].it_dvname,
819 ii->ii_dv->dv_xname);
820 break;
821 }
822 }
823 if (ii != NULL)
824 continue;
825
826 locs[IOPCF_TID] = ia.ia_tid;
827
828 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
829 iop_print, config_stdsubmatch);
830 if (dv != NULL) {
831 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
832 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
833 }
834 }
835 }
836
837 /*
838 * Adjust queue parameters for all child devices.
839 */
840 static void
841 iop_adjqparam(struct iop_softc *sc, int mpi)
842 {
843 struct iop_initiator *ii;
844
845 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
846 if (ii->ii_adjqparam != NULL)
847 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
848 }
849
850 static void
851 iop_devinfo(int class, char *devinfo, size_t l)
852 {
853 int i;
854
855 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
856 if (class == iop_class[i].ic_class)
857 break;
858
859 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
860 snprintf(devinfo, l, "device (class 0x%x)", class);
861 else
862 strlcpy(devinfo, iop_class[i].ic_caption, l);
863 }
864
865 static int
866 iop_print(void *aux, const char *pnp)
867 {
868 struct iop_attach_args *ia;
869 char devinfo[256];
870
871 ia = aux;
872
873 if (pnp != NULL) {
874 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
875 aprint_normal("%s at %s", devinfo, pnp);
876 }
877 aprint_normal(" tid %d", ia->ia_tid);
878 return (UNCONF);
879 }
880
881 /*
882 * Shut down all configured IOPs.
883 */
884 static void
885 iop_shutdown(void *junk)
886 {
887 struct iop_softc *sc;
888 int i;
889
890 printf("shutting down iop devices...");
891
892 for (i = 0; i < iop_cd.cd_ndevs; i++) {
893 if ((sc = device_lookup(&iop_cd, i)) == NULL)
894 continue;
895 if ((sc->sc_flags & IOP_ONLINE) == 0)
896 continue;
897
898 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
899 0, 5000);
900
901 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
902 /*
903 * Some AMI firmware revisions will go to sleep and
904 * never come back after this.
905 */
906 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
907 IOP_ICTX, 0, 1000);
908 }
909 }
910
911 /* Wait. Some boards could still be flushing, stupidly enough. */
912 delay(5000*1000);
913 printf(" done\n");
914 }
915
916 /*
917 * Retrieve IOP status.
918 */
919 int
920 iop_status_get(struct iop_softc *sc, int nosleep)
921 {
922 struct i2o_exec_status_get mf;
923 struct i2o_status *st;
924 paddr_t pa;
925 int rv, i;
926
927 pa = sc->sc_scr_seg->ds_addr;
928 st = (struct i2o_status *)sc->sc_scr;
929
930 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
931 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
932 mf.reserved[0] = 0;
933 mf.reserved[1] = 0;
934 mf.reserved[2] = 0;
935 mf.reserved[3] = 0;
936 mf.addrlow = (u_int32_t)pa;
937 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
938 mf.length = sizeof(sc->sc_status);
939
940 memset(st, 0, sizeof(*st));
941 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
942 BUS_DMASYNC_PREREAD);
943
944 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
945 return (rv);
946
947 for (i = 25; i != 0; i--) {
948 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
949 sizeof(*st), BUS_DMASYNC_POSTREAD);
950 if (st->syncbyte == 0xff)
951 break;
952 if (nosleep)
953 DELAY(100*1000);
954 else
955 kpause("iopstat", false, hz / 10, NULL);
956 }
957
958 if (st->syncbyte != 0xff) {
959 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
960 rv = EIO;
961 } else {
962 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
963 rv = 0;
964 }
965
966 return (rv);
967 }
968
969 /*
970 * Initialize and populate the IOP's outbound FIFO.
971 */
972 static int
973 iop_ofifo_init(struct iop_softc *sc)
974 {
975 bus_addr_t addr;
976 bus_dma_segment_t seg;
977 struct i2o_exec_outbound_init *mf;
978 int i, rseg, rv;
979 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
980
981 sw = (u_int32_t *)sc->sc_scr;
982
983 mf = (struct i2o_exec_outbound_init *)mb;
984 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
985 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
986 mf->msgictx = IOP_ICTX;
987 mf->msgtctx = 0;
988 mf->pagesize = PAGE_SIZE;
989 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
990
991 /*
992 * The I2O spec says that there are two SGLs: one for the status
993 * word, and one for a list of discarded MFAs. It continues to say
994 * that if you don't want to get the list of MFAs, an IGNORE SGL is
995 * necessary; this isn't the case (and is in fact a bad thing).
996 */
997 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
998 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
999 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1000 (u_int32_t)sc->sc_scr_seg->ds_addr;
1001 mb[0] += 2 << 16;
1002
1003 *sw = 0;
1004 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1005 BUS_DMASYNC_PREREAD);
1006
1007 if ((rv = iop_post(sc, mb)) != 0)
1008 return (rv);
1009
1010 POLL(5000,
1011 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1012 BUS_DMASYNC_POSTREAD),
1013 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1014
1015 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1016 printf("%s: outbound FIFO init failed (%d)\n",
1017 sc->sc_dv.dv_xname, le32toh(*sw));
1018 return (EIO);
1019 }
1020
1021 /* Allocate DMA safe memory for the reply frames. */
1022 if (sc->sc_rep_phys == 0) {
1023 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1024
1025 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1026 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1027 if (rv != 0) {
1028 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1029 rv);
1030 return (rv);
1031 }
1032
1033 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1034 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1035 if (rv != 0) {
1036 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1037 return (rv);
1038 }
1039
1040 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1041 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1042 if (rv != 0) {
1043 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1044 rv);
1045 return (rv);
1046 }
1047
1048 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1049 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1050 if (rv != 0) {
1051 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1052 return (rv);
1053 }
1054
1055 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1056
1057 /* Now safe to sync the reply map. */
1058 sc->sc_curib = 0;
1059 }
1060
1061 /* Populate the outbound FIFO. */
1062 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1063 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1064 addr += sc->sc_framesize;
1065 }
1066
1067 return (0);
1068 }
1069
1070 /*
1071 * Read the specified number of bytes from the IOP's hardware resource table.
1072 */
1073 static int
1074 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1075 {
1076 struct iop_msg *im;
1077 int rv;
1078 struct i2o_exec_hrt_get *mf;
1079 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1080
1081 im = iop_msg_alloc(sc, IM_WAIT);
1082 mf = (struct i2o_exec_hrt_get *)mb;
1083 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1084 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1085 mf->msgictx = IOP_ICTX;
1086 mf->msgtctx = im->im_tctx;
1087
1088 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1089 rv = iop_msg_post(sc, im, mb, 30000);
1090 iop_msg_unmap(sc, im);
1091 iop_msg_free(sc, im);
1092 return (rv);
1093 }
1094
1095 /*
1096 * Read the IOP's hardware resource table.
1097 */
1098 static int
1099 iop_hrt_get(struct iop_softc *sc)
1100 {
1101 struct i2o_hrt hrthdr, *hrt;
1102 int size, rv;
1103
1104 uvm_lwp_hold(curlwp);
1105 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1106 uvm_lwp_rele(curlwp);
1107 if (rv != 0)
1108 return (rv);
1109
1110 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1111 le16toh(hrthdr.numentries)));
1112
1113 size = sizeof(struct i2o_hrt) +
1114 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1115 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1116
1117 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1118 free(hrt, M_DEVBUF);
1119 return (rv);
1120 }
1121
1122 if (sc->sc_hrt != NULL)
1123 free(sc->sc_hrt, M_DEVBUF);
1124 sc->sc_hrt = hrt;
1125 return (0);
1126 }
1127
1128 /*
1129 * Request the specified number of bytes from the IOP's logical
1130 * configuration table. If a change indicator is specified, this
1131 * is a verbatim notification request, so the caller is prepared
1132 * to wait indefinitely.
1133 */
1134 static int
1135 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1136 u_int32_t chgind)
1137 {
1138 struct iop_msg *im;
1139 struct i2o_exec_lct_notify *mf;
1140 int rv;
1141 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1142
1143 im = iop_msg_alloc(sc, IM_WAIT);
1144 memset(lct, 0, size);
1145
1146 mf = (struct i2o_exec_lct_notify *)mb;
1147 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1148 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1149 mf->msgictx = IOP_ICTX;
1150 mf->msgtctx = im->im_tctx;
1151 mf->classid = I2O_CLASS_ANY;
1152 mf->changeindicator = chgind;
1153
1154 #ifdef I2ODEBUG
1155 printf("iop_lct_get0: reading LCT");
1156 if (chgind != 0)
1157 printf(" (async)");
1158 printf("\n");
1159 #endif
1160
1161 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1162 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1163 iop_msg_unmap(sc, im);
1164 iop_msg_free(sc, im);
1165 return (rv);
1166 }
1167
1168 /*
1169 * Read the IOP's logical configuration table.
1170 */
1171 int
1172 iop_lct_get(struct iop_softc *sc)
1173 {
1174 int esize, size, rv;
1175 struct i2o_lct *lct;
1176
1177 esize = le32toh(sc->sc_status.expectedlctsize);
1178 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1179 if (lct == NULL)
1180 return (ENOMEM);
1181
1182 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1183 free(lct, M_DEVBUF);
1184 return (rv);
1185 }
1186
1187 size = le16toh(lct->tablesize) << 2;
1188 if (esize != size) {
1189 free(lct, M_DEVBUF);
1190 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1191 if (lct == NULL)
1192 return (ENOMEM);
1193
1194 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1195 free(lct, M_DEVBUF);
1196 return (rv);
1197 }
1198 }
1199
1200 /* Swap in the new LCT. */
1201 if (sc->sc_lct != NULL)
1202 free(sc->sc_lct, M_DEVBUF);
1203 sc->sc_lct = lct;
1204 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1205 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1206 sizeof(struct i2o_lct_entry);
1207 return (0);
1208 }
1209
1210 /*
1211 * Post a SYS_ENABLE message to the adapter.
1212 */
1213 int
1214 iop_sys_enable(struct iop_softc *sc)
1215 {
1216 struct iop_msg *im;
1217 struct i2o_msg mf;
1218 int rv;
1219
1220 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1221
1222 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1223 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1224 mf.msgictx = IOP_ICTX;
1225 mf.msgtctx = im->im_tctx;
1226
1227 rv = iop_msg_post(sc, im, &mf, 30000);
1228 if (rv == 0) {
1229 if ((im->im_flags & IM_FAIL) != 0)
1230 rv = ENXIO;
1231 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1232 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1233 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1234 rv = 0;
1235 else
1236 rv = EIO;
1237 }
1238
1239 iop_msg_free(sc, im);
1240 return (rv);
1241 }
1242
1243 /*
1244 * Request the specified parameter group from the target. If an initiator
1245 * is specified (a) don't wait for the operation to complete, but instead
1246 * let the initiator's interrupt handler deal with the reply and (b) place a
1247 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1248 */
1249 int
1250 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1251 int size, struct iop_initiator *ii)
1252 {
1253 struct iop_msg *im;
1254 struct i2o_util_params_op *mf;
1255 int rv;
1256 struct iop_pgop *pgop;
1257 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1258
1259 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1260 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1261 iop_msg_free(sc, im);
1262 return (ENOMEM);
1263 }
1264 im->im_dvcontext = pgop;
1265
1266 mf = (struct i2o_util_params_op *)mb;
1267 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1268 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1269 mf->msgictx = IOP_ICTX;
1270 mf->msgtctx = im->im_tctx;
1271 mf->flags = 0;
1272
1273 pgop->olh.count = htole16(1);
1274 pgop->olh.reserved = htole16(0);
1275 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1276 pgop->oat.fieldcount = htole16(0xffff);
1277 pgop->oat.group = htole16(group);
1278
1279 if (ii == NULL)
1280 uvm_lwp_hold(curlwp);
1281
1282 memset(buf, 0, size);
1283 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1284 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1285 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1286
1287 if (ii == NULL)
1288 uvm_lwp_rele(curlwp);
1289
1290 /* Detect errors; let partial transfers to count as success. */
1291 if (ii == NULL && rv == 0) {
1292 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1293 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1294 rv = 0;
1295 else
1296 rv = (im->im_reqstatus != 0 ? EIO : 0);
1297
1298 if (rv != 0)
1299 printf("%s: FIELD_GET failed for tid %d group %d\n",
1300 sc->sc_dv.dv_xname, tid, group);
1301 }
1302
1303 if (ii == NULL || rv != 0) {
1304 iop_msg_unmap(sc, im);
1305 iop_msg_free(sc, im);
1306 free(pgop, M_DEVBUF);
1307 }
1308
1309 return (rv);
1310 }
1311
1312 /*
1313 * Set a single field in a scalar parameter group.
1314 */
1315 int
1316 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1317 int size, int field)
1318 {
1319 struct iop_msg *im;
1320 struct i2o_util_params_op *mf;
1321 struct iop_pgop *pgop;
1322 int rv, totsize;
1323 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1324
1325 totsize = sizeof(*pgop) + size;
1326
1327 im = iop_msg_alloc(sc, IM_WAIT);
1328 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1329 iop_msg_free(sc, im);
1330 return (ENOMEM);
1331 }
1332
1333 mf = (struct i2o_util_params_op *)mb;
1334 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1335 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1336 mf->msgictx = IOP_ICTX;
1337 mf->msgtctx = im->im_tctx;
1338 mf->flags = 0;
1339
1340 pgop->olh.count = htole16(1);
1341 pgop->olh.reserved = htole16(0);
1342 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1343 pgop->oat.fieldcount = htole16(1);
1344 pgop->oat.group = htole16(group);
1345 pgop->oat.fields[0] = htole16(field);
1346 memcpy(pgop + 1, buf, size);
1347
1348 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1349 rv = iop_msg_post(sc, im, mb, 30000);
1350 if (rv != 0)
1351 printf("%s: FIELD_SET failed for tid %d group %d\n",
1352 sc->sc_dv.dv_xname, tid, group);
1353
1354 iop_msg_unmap(sc, im);
1355 iop_msg_free(sc, im);
1356 free(pgop, M_DEVBUF);
1357 return (rv);
1358 }
1359
1360 /*
1361 * Delete all rows in a tablular parameter group.
1362 */
1363 int
1364 iop_table_clear(struct iop_softc *sc, int tid, int group)
1365 {
1366 struct iop_msg *im;
1367 struct i2o_util_params_op *mf;
1368 struct iop_pgop pgop;
1369 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1370 int rv;
1371
1372 im = iop_msg_alloc(sc, IM_WAIT);
1373
1374 mf = (struct i2o_util_params_op *)mb;
1375 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1376 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1377 mf->msgictx = IOP_ICTX;
1378 mf->msgtctx = im->im_tctx;
1379 mf->flags = 0;
1380
1381 pgop.olh.count = htole16(1);
1382 pgop.olh.reserved = htole16(0);
1383 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1384 pgop.oat.fieldcount = htole16(0);
1385 pgop.oat.group = htole16(group);
1386 pgop.oat.fields[0] = htole16(0);
1387
1388 uvm_lwp_hold(curlwp);
1389 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1390 rv = iop_msg_post(sc, im, mb, 30000);
1391 if (rv != 0)
1392 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1393 sc->sc_dv.dv_xname, tid, group);
1394
1395 iop_msg_unmap(sc, im);
1396 uvm_lwp_rele(curlwp);
1397 iop_msg_free(sc, im);
1398 return (rv);
1399 }
1400
1401 /*
1402 * Add a single row to a tabular parameter group. The row can have only one
1403 * field.
1404 */
1405 int
1406 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1407 int size, int row)
1408 {
1409 struct iop_msg *im;
1410 struct i2o_util_params_op *mf;
1411 struct iop_pgop *pgop;
1412 int rv, totsize;
1413 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1414
1415 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1416
1417 im = iop_msg_alloc(sc, IM_WAIT);
1418 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1419 iop_msg_free(sc, im);
1420 return (ENOMEM);
1421 }
1422
1423 mf = (struct i2o_util_params_op *)mb;
1424 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1425 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1426 mf->msgictx = IOP_ICTX;
1427 mf->msgtctx = im->im_tctx;
1428 mf->flags = 0;
1429
1430 pgop->olh.count = htole16(1);
1431 pgop->olh.reserved = htole16(0);
1432 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1433 pgop->oat.fieldcount = htole16(1);
1434 pgop->oat.group = htole16(group);
1435 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1436 pgop->oat.fields[1] = htole16(1); /* RowCount */
1437 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1438 memcpy(&pgop->oat.fields[3], buf, size);
1439
1440 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1441 rv = iop_msg_post(sc, im, mb, 30000);
1442 if (rv != 0)
1443 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1444 sc->sc_dv.dv_xname, tid, group, row);
1445
1446 iop_msg_unmap(sc, im);
1447 iop_msg_free(sc, im);
1448 free(pgop, M_DEVBUF);
1449 return (rv);
1450 }
1451
1452 /*
1453 * Execute a simple command (no parameters).
1454 */
1455 int
1456 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1457 int async, int timo)
1458 {
1459 struct iop_msg *im;
1460 struct i2o_msg mf;
1461 int rv, fl;
1462
1463 fl = (async != 0 ? IM_WAIT : IM_POLL);
1464 im = iop_msg_alloc(sc, fl);
1465
1466 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1467 mf.msgfunc = I2O_MSGFUNC(tid, function);
1468 mf.msgictx = ictx;
1469 mf.msgtctx = im->im_tctx;
1470
1471 rv = iop_msg_post(sc, im, &mf, timo);
1472 iop_msg_free(sc, im);
1473 return (rv);
1474 }
1475
1476 /*
1477 * Post the system table to the IOP.
1478 */
1479 static int
1480 iop_systab_set(struct iop_softc *sc)
1481 {
1482 struct i2o_exec_sys_tab_set *mf;
1483 struct iop_msg *im;
1484 bus_space_handle_t bsh;
1485 bus_addr_t boo;
1486 u_int32_t mema[2], ioa[2];
1487 int rv;
1488 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1489
1490 im = iop_msg_alloc(sc, IM_WAIT);
1491
1492 mf = (struct i2o_exec_sys_tab_set *)mb;
1493 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1494 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1495 mf->msgictx = IOP_ICTX;
1496 mf->msgtctx = im->im_tctx;
1497 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1498 mf->segnumber = 0;
1499
1500 mema[1] = sc->sc_status.desiredprivmemsize;
1501 ioa[1] = sc->sc_status.desiredpriviosize;
1502
1503 if (mema[1] != 0) {
1504 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1505 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1506 mema[0] = htole32(boo);
1507 if (rv != 0) {
1508 printf("%s: can't alloc priv mem space, err = %d\n",
1509 sc->sc_dv.dv_xname, rv);
1510 mema[0] = 0;
1511 mema[1] = 0;
1512 }
1513 }
1514
1515 if (ioa[1] != 0) {
1516 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1517 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1518 ioa[0] = htole32(boo);
1519 if (rv != 0) {
1520 printf("%s: can't alloc priv i/o space, err = %d\n",
1521 sc->sc_dv.dv_xname, rv);
1522 ioa[0] = 0;
1523 ioa[1] = 0;
1524 }
1525 }
1526
1527 uvm_lwp_hold(curlwp);
1528 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1529 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1530 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1531 rv = iop_msg_post(sc, im, mb, 5000);
1532 iop_msg_unmap(sc, im);
1533 iop_msg_free(sc, im);
1534 uvm_lwp_rele(curlwp);
1535 return (rv);
1536 }
1537
1538 /*
1539 * Reset the IOP. Must be called with interrupts disabled.
1540 */
1541 static int
1542 iop_reset(struct iop_softc *sc)
1543 {
1544 u_int32_t mfa, *sw;
1545 struct i2o_exec_iop_reset mf;
1546 int rv;
1547 paddr_t pa;
1548
1549 sw = (u_int32_t *)sc->sc_scr;
1550 pa = sc->sc_scr_seg->ds_addr;
1551
1552 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1553 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1554 mf.reserved[0] = 0;
1555 mf.reserved[1] = 0;
1556 mf.reserved[2] = 0;
1557 mf.reserved[3] = 0;
1558 mf.statuslow = (u_int32_t)pa;
1559 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1560
1561 *sw = htole32(0);
1562 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1563 BUS_DMASYNC_PREREAD);
1564
1565 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1566 return (rv);
1567
1568 POLL(2500,
1569 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1570 BUS_DMASYNC_POSTREAD), *sw != 0));
1571 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1572 printf("%s: reset rejected, status 0x%x\n",
1573 sc->sc_dv.dv_xname, le32toh(*sw));
1574 return (EIO);
1575 }
1576
1577 /*
1578 * IOP is now in the INIT state. Wait no more than 10 seconds for
1579 * the inbound queue to become responsive.
1580 */
1581 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1582 if (mfa == IOP_MFA_EMPTY) {
1583 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1584 return (EIO);
1585 }
1586
1587 iop_release_mfa(sc, mfa);
1588 return (0);
1589 }
1590
1591 /*
1592 * Register a new initiator. Must be called with the configuration lock
1593 * held.
1594 */
1595 void
1596 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1597 {
1598 static int ictxgen;
1599
1600 /* 0 is reserved (by us) for system messages. */
1601 ii->ii_ictx = ++ictxgen;
1602
1603 /*
1604 * `Utility initiators' don't make it onto the per-IOP initiator list
1605 * (which is used only for configuration), but do get one slot on
1606 * the inbound queue.
1607 */
1608 if ((ii->ii_flags & II_UTILITY) == 0) {
1609 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1610 sc->sc_nii++;
1611 } else
1612 sc->sc_nuii++;
1613
1614 cv_init(&ii->ii_cv, "iopevt");
1615
1616 mutex_spin_enter(&sc->sc_intrlock);
1617 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1618 mutex_spin_exit(&sc->sc_intrlock);
1619 }
1620
1621 /*
1622 * Unregister an initiator. Must be called with the configuration lock
1623 * held.
1624 */
1625 void
1626 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1627 {
1628
1629 if ((ii->ii_flags & II_UTILITY) == 0) {
1630 LIST_REMOVE(ii, ii_list);
1631 sc->sc_nii--;
1632 } else
1633 sc->sc_nuii--;
1634
1635 mutex_spin_enter(&sc->sc_intrlock);
1636 LIST_REMOVE(ii, ii_hash);
1637 mutex_spin_exit(&sc->sc_intrlock);
1638
1639 cv_destroy(&ii->ii_cv);
1640 }
1641
1642 /*
1643 * Handle a reply frame from the IOP.
1644 */
1645 static int
1646 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1647 {
1648 struct iop_msg *im;
1649 struct i2o_reply *rb;
1650 struct i2o_fault_notify *fn;
1651 struct iop_initiator *ii;
1652 u_int off, ictx, tctx, status, size;
1653
1654 KASSERT(mutex_owned(&sc->sc_intrlock));
1655
1656 off = (int)(rmfa - sc->sc_rep_phys);
1657 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1658
1659 /* Perform reply queue DMA synchronisation. */
1660 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1661 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1662 if (--sc->sc_curib != 0)
1663 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1664 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1665
1666 #ifdef I2ODEBUG
1667 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1668 panic("iop_handle_reply: 64-bit reply");
1669 #endif
1670 /*
1671 * Find the initiator.
1672 */
1673 ictx = le32toh(rb->msgictx);
1674 if (ictx == IOP_ICTX)
1675 ii = NULL;
1676 else {
1677 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1678 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1679 if (ii->ii_ictx == ictx)
1680 break;
1681 if (ii == NULL) {
1682 #ifdef I2ODEBUG
1683 iop_reply_print(sc, rb);
1684 #endif
1685 printf("%s: WARNING: bad ictx returned (%x)\n",
1686 sc->sc_dv.dv_xname, ictx);
1687 return (-1);
1688 }
1689 }
1690
1691 /*
1692 * If we received a transport failure notice, we've got to dig the
1693 * transaction context (if any) out of the original message frame,
1694 * and then release the original MFA back to the inbound FIFO.
1695 */
1696 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1697 status = I2O_STATUS_SUCCESS;
1698
1699 fn = (struct i2o_fault_notify *)rb;
1700 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1701 iop_release_mfa(sc, fn->lowmfa);
1702 iop_tfn_print(sc, fn);
1703 } else {
1704 status = rb->reqstatus;
1705 tctx = le32toh(rb->msgtctx);
1706 }
1707
1708 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1709 /*
1710 * This initiator tracks state using message wrappers.
1711 *
1712 * Find the originating message wrapper, and if requested
1713 * notify the initiator.
1714 */
1715 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1716 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1717 (im->im_flags & IM_ALLOCED) == 0 ||
1718 tctx != im->im_tctx) {
1719 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1720 sc->sc_dv.dv_xname, tctx, im);
1721 if (im != NULL)
1722 printf("%s: flags=0x%08x tctx=0x%08x\n",
1723 sc->sc_dv.dv_xname, im->im_flags,
1724 im->im_tctx);
1725 #ifdef I2ODEBUG
1726 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1727 iop_reply_print(sc, rb);
1728 #endif
1729 return (-1);
1730 }
1731
1732 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1733 im->im_flags |= IM_FAIL;
1734
1735 #ifdef I2ODEBUG
1736 if ((im->im_flags & IM_REPLIED) != 0)
1737 panic("%s: dup reply", sc->sc_dv.dv_xname);
1738 #endif
1739 im->im_flags |= IM_REPLIED;
1740
1741 #ifdef I2ODEBUG
1742 if (status != I2O_STATUS_SUCCESS)
1743 iop_reply_print(sc, rb);
1744 #endif
1745 im->im_reqstatus = status;
1746 im->im_detstatus = le16toh(rb->detail);
1747
1748 /* Copy the reply frame, if requested. */
1749 if (im->im_rb != NULL) {
1750 size = (le32toh(rb->msgflags) >> 14) & ~3;
1751 #ifdef I2ODEBUG
1752 if (size > sc->sc_framesize)
1753 panic("iop_handle_reply: reply too large");
1754 #endif
1755 memcpy(im->im_rb, rb, size);
1756 }
1757
1758 /* Notify the initiator. */
1759 if ((im->im_flags & IM_WAIT) != 0)
1760 cv_broadcast(&im->im_cv);
1761 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1762 if (ii != NULL) {
1763 mutex_spin_exit(&sc->sc_intrlock);
1764 (*ii->ii_intr)(ii->ii_dv, im, rb);
1765 mutex_spin_enter(&sc->sc_intrlock);
1766 }
1767 }
1768 } else {
1769 /*
1770 * This initiator discards message wrappers.
1771 *
1772 * Simply pass the reply frame to the initiator.
1773 */
1774 if (ii != NULL) {
1775 mutex_spin_exit(&sc->sc_intrlock);
1776 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1777 mutex_spin_enter(&sc->sc_intrlock);
1778 }
1779 }
1780
1781 return (status);
1782 }
1783
1784 /*
1785 * Handle an interrupt from the IOP.
1786 */
1787 int
1788 iop_intr(void *arg)
1789 {
1790 struct iop_softc *sc;
1791 u_int32_t rmfa;
1792
1793 sc = arg;
1794
1795 mutex_spin_enter(&sc->sc_intrlock);
1796
1797 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1798 mutex_spin_exit(&sc->sc_intrlock);
1799 return (0);
1800 }
1801
1802 for (;;) {
1803 /* Double read to account for IOP bug. */
1804 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1805 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1806 if (rmfa == IOP_MFA_EMPTY)
1807 break;
1808 }
1809 iop_handle_reply(sc, rmfa);
1810 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1811 }
1812
1813 mutex_spin_exit(&sc->sc_intrlock);
1814 return (1);
1815 }
1816
1817 /*
1818 * Handle an event signalled by the executive.
1819 */
1820 static void
1821 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1822 {
1823 struct i2o_util_event_register_reply *rb;
1824 u_int event;
1825
1826 rb = reply;
1827
1828 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1829 return;
1830
1831 event = le32toh(rb->event);
1832 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1833 }
1834
1835 /*
1836 * Allocate a message wrapper.
1837 */
1838 struct iop_msg *
1839 iop_msg_alloc(struct iop_softc *sc, int flags)
1840 {
1841 struct iop_msg *im;
1842 static u_int tctxgen;
1843 int i;
1844
1845 #ifdef I2ODEBUG
1846 if ((flags & IM_SYSMASK) != 0)
1847 panic("iop_msg_alloc: system flags specified");
1848 #endif
1849
1850 mutex_spin_enter(&sc->sc_intrlock);
1851 im = SLIST_FIRST(&sc->sc_im_freelist);
1852 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1853 if (im == NULL)
1854 panic("iop_msg_alloc: no free wrappers");
1855 #endif
1856 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1857 mutex_spin_exit(&sc->sc_intrlock);
1858
1859 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1860 tctxgen += (1 << IOP_TCTX_SHIFT);
1861 im->im_flags = flags | IM_ALLOCED;
1862 im->im_rb = NULL;
1863 i = 0;
1864 do {
1865 im->im_xfer[i++].ix_size = 0;
1866 } while (i < IOP_MAX_MSG_XFERS);
1867
1868 return (im);
1869 }
1870
1871 /*
1872 * Free a message wrapper.
1873 */
1874 void
1875 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1876 {
1877
1878 #ifdef I2ODEBUG
1879 if ((im->im_flags & IM_ALLOCED) == 0)
1880 panic("iop_msg_free: wrapper not allocated");
1881 #endif
1882
1883 im->im_flags = 0;
1884 mutex_spin_enter(&sc->sc_intrlock);
1885 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1886 mutex_spin_exit(&sc->sc_intrlock);
1887 }
1888
1889 /*
1890 * Map a data transfer. Write a scatter-gather list into the message frame.
1891 */
1892 int
1893 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1894 void *xferaddr, int xfersize, int out, struct proc *up)
1895 {
1896 bus_dmamap_t dm;
1897 bus_dma_segment_t *ds;
1898 struct iop_xfer *ix;
1899 u_int rv, i, nsegs, flg, off, xn;
1900 u_int32_t *p;
1901
1902 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1903 if (ix->ix_size == 0)
1904 break;
1905
1906 #ifdef I2ODEBUG
1907 if (xfersize == 0)
1908 panic("iop_msg_map: null transfer");
1909 if (xfersize > IOP_MAX_XFER)
1910 panic("iop_msg_map: transfer too large");
1911 if (xn == IOP_MAX_MSG_XFERS)
1912 panic("iop_msg_map: too many xfers");
1913 #endif
1914
1915 /*
1916 * Only the first DMA map is static.
1917 */
1918 if (xn != 0) {
1919 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1920 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1921 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1922 if (rv != 0)
1923 return (rv);
1924 }
1925
1926 dm = ix->ix_map;
1927 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1928 (up == NULL ? BUS_DMA_NOWAIT : 0));
1929 if (rv != 0)
1930 goto bad;
1931
1932 /*
1933 * How many SIMPLE SG elements can we fit in this message?
1934 */
1935 off = mb[0] >> 16;
1936 p = mb + off;
1937 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1938
1939 if (dm->dm_nsegs > nsegs) {
1940 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1941 rv = EFBIG;
1942 DPRINTF(("iop_msg_map: too many segs\n"));
1943 goto bad;
1944 }
1945
1946 nsegs = dm->dm_nsegs;
1947 xfersize = 0;
1948
1949 /*
1950 * Write out the SG list.
1951 */
1952 if (out)
1953 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1954 else
1955 flg = I2O_SGL_SIMPLE;
1956
1957 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1958 p[0] = (u_int32_t)ds->ds_len | flg;
1959 p[1] = (u_int32_t)ds->ds_addr;
1960 xfersize += ds->ds_len;
1961 }
1962
1963 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1964 p[1] = (u_int32_t)ds->ds_addr;
1965 xfersize += ds->ds_len;
1966
1967 /* Fix up the transfer record, and sync the map. */
1968 ix->ix_flags = (out ? IX_OUT : IX_IN);
1969 ix->ix_size = xfersize;
1970 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1971 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1972
1973 /*
1974 * If this is the first xfer we've mapped for this message, adjust
1975 * the SGL offset field in the message header.
1976 */
1977 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1978 mb[0] += (mb[0] >> 12) & 0xf0;
1979 im->im_flags |= IM_SGLOFFADJ;
1980 }
1981 mb[0] += (nsegs << 17);
1982 return (0);
1983
1984 bad:
1985 if (xn != 0)
1986 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1987 return (rv);
1988 }
1989
1990 /*
1991 * Map a block I/O data transfer (different in that there's only one per
1992 * message maximum, and PAGE addressing may be used). Write a scatter
1993 * gather list into the message frame.
1994 */
1995 int
1996 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1997 void *xferaddr, int xfersize, int out)
1998 {
1999 bus_dma_segment_t *ds;
2000 bus_dmamap_t dm;
2001 struct iop_xfer *ix;
2002 u_int rv, i, nsegs, off, slen, tlen, flg;
2003 paddr_t saddr, eaddr;
2004 u_int32_t *p;
2005
2006 #ifdef I2ODEBUG
2007 if (xfersize == 0)
2008 panic("iop_msg_map_bio: null transfer");
2009 if (xfersize > IOP_MAX_XFER)
2010 panic("iop_msg_map_bio: transfer too large");
2011 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2012 panic("iop_msg_map_bio: SGLOFFADJ");
2013 #endif
2014
2015 ix = im->im_xfer;
2016 dm = ix->ix_map;
2017 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2018 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2019 if (rv != 0)
2020 return (rv);
2021
2022 off = mb[0] >> 16;
2023 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2024
2025 /*
2026 * If the transfer is highly fragmented and won't fit using SIMPLE
2027 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2028 * potentially more efficient, both for us and the IOP.
2029 */
2030 if (dm->dm_nsegs > nsegs) {
2031 nsegs = 1;
2032 p = mb + off + 1;
2033
2034 /* XXX This should be done with a bus_space flag. */
2035 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2036 slen = ds->ds_len;
2037 saddr = ds->ds_addr;
2038
2039 while (slen > 0) {
2040 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2041 tlen = min(eaddr - saddr, slen);
2042 slen -= tlen;
2043 *p++ = le32toh(saddr);
2044 saddr = eaddr;
2045 nsegs++;
2046 }
2047 }
2048
2049 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2050 I2O_SGL_END;
2051 if (out)
2052 mb[off] |= I2O_SGL_DATA_OUT;
2053 } else {
2054 p = mb + off;
2055 nsegs = dm->dm_nsegs;
2056
2057 if (out)
2058 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2059 else
2060 flg = I2O_SGL_SIMPLE;
2061
2062 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2063 p[0] = (u_int32_t)ds->ds_len | flg;
2064 p[1] = (u_int32_t)ds->ds_addr;
2065 }
2066
2067 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2068 I2O_SGL_END;
2069 p[1] = (u_int32_t)ds->ds_addr;
2070 nsegs <<= 1;
2071 }
2072
2073 /* Fix up the transfer record, and sync the map. */
2074 ix->ix_flags = (out ? IX_OUT : IX_IN);
2075 ix->ix_size = xfersize;
2076 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2077 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2078
2079 /*
2080 * Adjust the SGL offset and total message size fields. We don't
2081 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2082 */
2083 mb[0] += ((off << 4) + (nsegs << 16));
2084 return (0);
2085 }
2086
2087 /*
2088 * Unmap all data transfers associated with a message wrapper.
2089 */
2090 void
2091 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2092 {
2093 struct iop_xfer *ix;
2094 int i;
2095
2096 #ifdef I2ODEBUG
2097 if (im->im_xfer[0].ix_size == 0)
2098 panic("iop_msg_unmap: no transfers mapped");
2099 #endif
2100
2101 for (ix = im->im_xfer, i = 0;;) {
2102 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2103 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2104 BUS_DMASYNC_POSTREAD);
2105 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2106
2107 /* Only the first DMA map is static. */
2108 if (i != 0)
2109 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2110 if ((++ix)->ix_size == 0)
2111 break;
2112 if (++i >= IOP_MAX_MSG_XFERS)
2113 break;
2114 }
2115 }
2116
2117 /*
2118 * Post a message frame to the IOP's inbound queue.
2119 */
2120 int
2121 iop_post(struct iop_softc *sc, u_int32_t *mb)
2122 {
2123 u_int32_t mfa;
2124
2125 #ifdef I2ODEBUG
2126 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2127 panic("iop_post: frame too large");
2128 #endif
2129
2130 mutex_spin_enter(&sc->sc_intrlock);
2131
2132 /* Allocate a slot with the IOP. */
2133 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2134 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2135 mutex_spin_exit(&sc->sc_intrlock);
2136 printf("%s: mfa not forthcoming\n",
2137 sc->sc_dv.dv_xname);
2138 return (EAGAIN);
2139 }
2140
2141 /* Perform reply buffer DMA synchronisation. */
2142 if (sc->sc_curib++ == 0)
2143 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2144 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2145
2146 /* Copy out the message frame. */
2147 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2148 mb[0] >> 16);
2149 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2150 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2151
2152 /* Post the MFA back to the IOP. */
2153 iop_outl(sc, IOP_REG_IFIFO, mfa);
2154
2155 mutex_spin_exit(&sc->sc_intrlock);
2156 return (0);
2157 }
2158
2159 /*
2160 * Post a message to the IOP and deal with completion.
2161 */
2162 int
2163 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2164 {
2165 u_int32_t *mb;
2166 int rv;
2167
2168 mb = xmb;
2169
2170 /* Terminate the scatter/gather list chain. */
2171 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2172 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2173
2174 if ((rv = iop_post(sc, mb)) != 0)
2175 return (rv);
2176
2177 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2178 if ((im->im_flags & IM_POLL) != 0)
2179 iop_msg_poll(sc, im, timo);
2180 else
2181 iop_msg_wait(sc, im, timo);
2182
2183 mutex_spin_enter(&sc->sc_intrlock);
2184 if ((im->im_flags & IM_REPLIED) != 0) {
2185 if ((im->im_flags & IM_NOSTATUS) != 0)
2186 rv = 0;
2187 else if ((im->im_flags & IM_FAIL) != 0)
2188 rv = ENXIO;
2189 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2190 rv = EIO;
2191 else
2192 rv = 0;
2193 } else
2194 rv = EBUSY;
2195 mutex_spin_exit(&sc->sc_intrlock);
2196 } else
2197 rv = 0;
2198
2199 return (rv);
2200 }
2201
2202 /*
2203 * Spin until the specified message is replied to.
2204 */
2205 static void
2206 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2207 {
2208 u_int32_t rmfa;
2209
2210 mutex_spin_enter(&sc->sc_intrlock);
2211
2212 for (timo *= 10; timo != 0; timo--) {
2213 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2214 /* Double read to account for IOP bug. */
2215 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2216 if (rmfa == IOP_MFA_EMPTY)
2217 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2218 if (rmfa != IOP_MFA_EMPTY) {
2219 iop_handle_reply(sc, rmfa);
2220
2221 /*
2222 * Return the reply frame to the IOP's
2223 * outbound FIFO.
2224 */
2225 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2226 }
2227 }
2228 if ((im->im_flags & IM_REPLIED) != 0)
2229 break;
2230 mutex_spin_exit(&sc->sc_intrlock);
2231 DELAY(100);
2232 mutex_spin_enter(&sc->sc_intrlock);
2233 }
2234
2235 if (timo == 0) {
2236 #ifdef I2ODEBUG
2237 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2238 if (iop_status_get(sc, 1) != 0)
2239 printf("iop_msg_poll: unable to retrieve status\n");
2240 else
2241 printf("iop_msg_poll: IOP state = %d\n",
2242 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2243 #endif
2244 }
2245
2246 mutex_spin_exit(&sc->sc_intrlock);
2247 }
2248
2249 /*
2250 * Sleep until the specified message is replied to.
2251 */
2252 static void
2253 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2254 {
2255 int rv;
2256
2257 mutex_spin_enter(&sc->sc_intrlock);
2258 if ((im->im_flags & IM_REPLIED) != 0) {
2259 mutex_spin_exit(&sc->sc_intrlock);
2260 return;
2261 }
2262 rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2263 mutex_spin_exit(&sc->sc_intrlock);
2264
2265 #ifdef I2ODEBUG
2266 if (rv != 0) {
2267 printf("iop_msg_wait: tsleep() == %d\n", rv);
2268 if (iop_status_get(sc, 0) != 0)
2269 printf("iop_msg_wait: unable to retrieve status\n");
2270 else
2271 printf("iop_msg_wait: IOP state = %d\n",
2272 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2273 }
2274 #endif
2275 }
2276
2277 /*
2278 * Release an unused message frame back to the IOP's inbound fifo.
2279 */
2280 static void
2281 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2282 {
2283
2284 /* Use the frame to issue a no-op. */
2285 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2286 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2287 iop_outl_msg(sc, mfa + 8, 0);
2288 iop_outl_msg(sc, mfa + 12, 0);
2289
2290 iop_outl(sc, IOP_REG_IFIFO, mfa);
2291 }
2292
2293 #ifdef I2ODEBUG
2294 /*
2295 * Dump a reply frame header.
2296 */
2297 static void
2298 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2299 {
2300 u_int function, detail;
2301 const char *statusstr;
2302
2303 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2304 detail = le16toh(rb->detail);
2305
2306 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2307
2308 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2309 statusstr = iop_status[rb->reqstatus];
2310 else
2311 statusstr = "undefined error code";
2312
2313 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2314 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2315 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2316 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2317 le32toh(rb->msgtctx));
2318 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2319 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2320 (le32toh(rb->msgflags) >> 8) & 0xff);
2321 }
2322 #endif
2323
2324 /*
2325 * Dump a transport failure reply.
2326 */
2327 static void
2328 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2329 {
2330
2331 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2332
2333 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2334 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2335 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2336 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2337 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2338 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2339 }
2340
2341 /*
2342 * Translate an I2O ASCII field into a C string.
2343 */
2344 void
2345 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2346 {
2347 int hc, lc, i, nit;
2348
2349 dlen--;
2350 lc = 0;
2351 hc = 0;
2352 i = 0;
2353
2354 /*
2355 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2356 * spec has nothing to say about it. Since AMI fields are usually
2357 * filled with junk after the terminator, ...
2358 */
2359 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2360
2361 while (slen-- != 0 && dlen-- != 0) {
2362 if (nit && *src == '\0')
2363 break;
2364 else if (*src <= 0x20 || *src >= 0x7f) {
2365 if (hc)
2366 dst[i++] = ' ';
2367 } else {
2368 hc = 1;
2369 dst[i++] = *src;
2370 lc = i;
2371 }
2372 src++;
2373 }
2374
2375 dst[lc] = '\0';
2376 }
2377
2378 /*
2379 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2380 */
2381 int
2382 iop_print_ident(struct iop_softc *sc, int tid)
2383 {
2384 struct {
2385 struct i2o_param_op_results pr;
2386 struct i2o_param_read_results prr;
2387 struct i2o_param_device_identity di;
2388 } __attribute__ ((__packed__)) p;
2389 char buf[32];
2390 int rv;
2391
2392 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2393 sizeof(p), NULL);
2394 if (rv != 0)
2395 return (rv);
2396
2397 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2398 sizeof(buf));
2399 printf(" <%s, ", buf);
2400 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2401 sizeof(buf));
2402 printf("%s, ", buf);
2403 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2404 printf("%s>", buf);
2405
2406 return (0);
2407 }
2408
2409 /*
2410 * Claim or unclaim the specified TID.
2411 */
2412 int
2413 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2414 int flags)
2415 {
2416 struct iop_msg *im;
2417 struct i2o_util_claim mf;
2418 int rv, func;
2419
2420 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2421 im = iop_msg_alloc(sc, IM_WAIT);
2422
2423 /* We can use the same structure, as they're identical. */
2424 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2425 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2426 mf.msgictx = ii->ii_ictx;
2427 mf.msgtctx = im->im_tctx;
2428 mf.flags = flags;
2429
2430 rv = iop_msg_post(sc, im, &mf, 5000);
2431 iop_msg_free(sc, im);
2432 return (rv);
2433 }
2434
2435 /*
2436 * Perform an abort.
2437 */
2438 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2439 int tctxabort, int flags)
2440 {
2441 struct iop_msg *im;
2442 struct i2o_util_abort mf;
2443 int rv;
2444
2445 im = iop_msg_alloc(sc, IM_WAIT);
2446
2447 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2448 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2449 mf.msgictx = ii->ii_ictx;
2450 mf.msgtctx = im->im_tctx;
2451 mf.flags = (func << 24) | flags;
2452 mf.tctxabort = tctxabort;
2453
2454 rv = iop_msg_post(sc, im, &mf, 5000);
2455 iop_msg_free(sc, im);
2456 return (rv);
2457 }
2458
2459 /*
2460 * Enable or disable reception of events for the specified device.
2461 */
2462 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2463 {
2464 struct i2o_util_event_register mf;
2465
2466 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2467 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2468 mf.msgictx = ii->ii_ictx;
2469 mf.msgtctx = 0;
2470 mf.eventmask = mask;
2471
2472 /* This message is replied to only when events are signalled. */
2473 return (iop_post(sc, (u_int32_t *)&mf));
2474 }
2475
2476 int
2477 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2478 {
2479 struct iop_softc *sc;
2480
2481 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2482 return (ENXIO);
2483 if ((sc->sc_flags & IOP_ONLINE) == 0)
2484 return (ENXIO);
2485 if ((sc->sc_flags & IOP_OPEN) != 0)
2486 return (EBUSY);
2487 sc->sc_flags |= IOP_OPEN;
2488
2489 return (0);
2490 }
2491
2492 int
2493 iopclose(dev_t dev, int flag, int mode,
2494 struct lwp *l)
2495 {
2496 struct iop_softc *sc;
2497
2498 sc = device_lookup(&iop_cd, minor(dev));
2499 sc->sc_flags &= ~IOP_OPEN;
2500
2501 return (0);
2502 }
2503
2504 int
2505 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2506 {
2507 struct iop_softc *sc;
2508 struct iovec *iov;
2509 int rv, i;
2510
2511 sc = device_lookup(&iop_cd, minor(dev));
2512 rv = 0;
2513
2514 switch (cmd) {
2515 case IOPIOCPT:
2516 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2517 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2518 if (rv)
2519 return (rv);
2520
2521 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2522
2523 case IOPIOCGSTATUS:
2524 iov = (struct iovec *)data;
2525 i = sizeof(struct i2o_status);
2526 if (i > iov->iov_len)
2527 i = iov->iov_len;
2528 else
2529 iov->iov_len = i;
2530 if ((rv = iop_status_get(sc, 0)) == 0)
2531 rv = copyout(&sc->sc_status, iov->iov_base, i);
2532 return (rv);
2533
2534 case IOPIOCGLCT:
2535 case IOPIOCGTIDMAP:
2536 case IOPIOCRECONFIG:
2537 break;
2538
2539 default:
2540 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2541 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2542 #endif
2543 return (ENOTTY);
2544 }
2545
2546 mutex_enter(&sc->sc_conflock);
2547
2548 switch (cmd) {
2549 case IOPIOCGLCT:
2550 iov = (struct iovec *)data;
2551 i = le16toh(sc->sc_lct->tablesize) << 2;
2552 if (i > iov->iov_len)
2553 i = iov->iov_len;
2554 else
2555 iov->iov_len = i;
2556 rv = copyout(sc->sc_lct, iov->iov_base, i);
2557 break;
2558
2559 case IOPIOCRECONFIG:
2560 rv = iop_reconfigure(sc, 0);
2561 break;
2562
2563 case IOPIOCGTIDMAP:
2564 iov = (struct iovec *)data;
2565 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2566 if (i > iov->iov_len)
2567 i = iov->iov_len;
2568 else
2569 iov->iov_len = i;
2570 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2571 break;
2572 }
2573
2574 mutex_exit(&sc->sc_conflock);
2575 return (rv);
2576 }
2577
2578 static int
2579 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2580 {
2581 struct iop_msg *im;
2582 struct i2o_msg *mf;
2583 struct ioppt_buf *ptb;
2584 int rv, i, mapped;
2585
2586 mf = NULL;
2587 im = NULL;
2588 mapped = 1;
2589
2590 if (pt->pt_msglen > sc->sc_framesize ||
2591 pt->pt_msglen < sizeof(struct i2o_msg) ||
2592 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2593 pt->pt_nbufs < 0 ||
2594 #if 0
2595 pt->pt_replylen < 0 ||
2596 #endif
2597 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2598 return (EINVAL);
2599
2600 for (i = 0; i < pt->pt_nbufs; i++)
2601 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2602 rv = ENOMEM;
2603 goto bad;
2604 }
2605
2606 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2607 if (mf == NULL)
2608 return (ENOMEM);
2609
2610 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2611 goto bad;
2612
2613 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2614 im->im_rb = (struct i2o_reply *)mf;
2615 mf->msgictx = IOP_ICTX;
2616 mf->msgtctx = im->im_tctx;
2617
2618 for (i = 0; i < pt->pt_nbufs; i++) {
2619 ptb = &pt->pt_bufs[i];
2620 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2621 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2622 if (rv != 0)
2623 goto bad;
2624 mapped = 1;
2625 }
2626
2627 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2628 goto bad;
2629
2630 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2631 if (i > sc->sc_framesize)
2632 i = sc->sc_framesize;
2633 if (i > pt->pt_replylen)
2634 i = pt->pt_replylen;
2635 rv = copyout(im->im_rb, pt->pt_reply, i);
2636
2637 bad:
2638 if (mapped != 0)
2639 iop_msg_unmap(sc, im);
2640 if (im != NULL)
2641 iop_msg_free(sc, im);
2642 if (mf != NULL)
2643 free(mf, M_DEVBUF);
2644 return (rv);
2645 }
2646