iop.c revision 1.15 1 /* $NetBSD: iop.c,v 1.15 2001/08/04 16:54:18 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include "opt_i2o.h"
44 #include "iop.h"
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/device.h>
50 #include <sys/queue.h>
51 #include <sys/proc.h>
52 #include <sys/malloc.h>
53 #include <sys/ioctl.h>
54 #include <sys/endian.h>
55 #include <sys/conf.h>
56 #include <sys/kthread.h>
57
58 #include <uvm/uvm_extern.h>
59
60 #include <machine/bus.h>
61
62 #include <dev/i2o/i2o.h>
63 #include <dev/i2o/iopio.h>
64 #include <dev/i2o/iopreg.h>
65 #include <dev/i2o/iopvar.h>
66
67 #define POLL(ms, cond) \
68 do { \
69 int i; \
70 for (i = (ms) * 10; i; i--) { \
71 if (cond) \
72 break; \
73 DELAY(100); \
74 } \
75 } while (/* CONSTCOND */0);
76
77 #ifdef I2ODEBUG
78 #define DPRINTF(x) printf x
79 #else
80 #define DPRINTF(x)
81 #endif
82
83 #ifdef I2OVERBOSE
84 #define IFVERBOSE(x) x
85 #define COMMENT(x) NULL
86 #else
87 #define IFVERBOSE(x)
88 #define COMMENT(x)
89 #endif
90
91 #define IOP_ICTXHASH_NBUCKETS 16
92 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
93
94 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
95
96 #define IOP_TCTX_SHIFT 12
97 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
98
99 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
100 static u_long iop_ictxhash;
101 static void *iop_sdh;
102 static struct i2o_systab *iop_systab;
103 static int iop_systab_size;
104
105 extern struct cfdriver iop_cd;
106
107 #define IC_CONFIGURE 0x01
108 #define IC_PRIORITY 0x02
109
110 struct iop_class {
111 u_short ic_class;
112 u_short ic_flags;
113 #ifdef I2OVERBOSE
114 const char *ic_caption;
115 #endif
116 } static const iop_class[] = {
117 {
118 I2O_CLASS_EXECUTIVE,
119 0,
120 COMMENT("executive")
121 },
122 {
123 I2O_CLASS_DDM,
124 0,
125 COMMENT("device driver module")
126 },
127 {
128 I2O_CLASS_RANDOM_BLOCK_STORAGE,
129 IC_CONFIGURE | IC_PRIORITY,
130 IFVERBOSE("random block storage")
131 },
132 {
133 I2O_CLASS_SEQUENTIAL_STORAGE,
134 IC_CONFIGURE | IC_PRIORITY,
135 IFVERBOSE("sequential storage")
136 },
137 {
138 I2O_CLASS_LAN,
139 IC_CONFIGURE | IC_PRIORITY,
140 IFVERBOSE("LAN port")
141 },
142 {
143 I2O_CLASS_WAN,
144 IC_CONFIGURE | IC_PRIORITY,
145 IFVERBOSE("WAN port")
146 },
147 {
148 I2O_CLASS_FIBRE_CHANNEL_PORT,
149 IC_CONFIGURE,
150 IFVERBOSE("fibrechannel port")
151 },
152 {
153 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
154 0,
155 COMMENT("fibrechannel peripheral")
156 },
157 {
158 I2O_CLASS_SCSI_PERIPHERAL,
159 0,
160 COMMENT("SCSI peripheral")
161 },
162 {
163 I2O_CLASS_ATE_PORT,
164 IC_CONFIGURE,
165 IFVERBOSE("ATE port")
166 },
167 {
168 I2O_CLASS_ATE_PERIPHERAL,
169 0,
170 COMMENT("ATE peripheral")
171 },
172 {
173 I2O_CLASS_FLOPPY_CONTROLLER,
174 IC_CONFIGURE,
175 IFVERBOSE("floppy controller")
176 },
177 {
178 I2O_CLASS_FLOPPY_DEVICE,
179 0,
180 COMMENT("floppy device")
181 },
182 {
183 I2O_CLASS_BUS_ADAPTER_PORT,
184 IC_CONFIGURE,
185 IFVERBOSE("bus adapter port" )
186 },
187 };
188
189 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
190 static const char * const iop_status[] = {
191 "success",
192 "abort (dirty)",
193 "abort (no data transfer)",
194 "abort (partial transfer)",
195 "error (dirty)",
196 "error (no data transfer)",
197 "error (partial transfer)",
198 "undefined error code",
199 "process abort (dirty)",
200 "process abort (no data transfer)",
201 "process abort (partial transfer)",
202 "transaction error",
203 };
204 #endif
205
206 static inline u_int32_t iop_inl(struct iop_softc *, int);
207 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
208
209 static void iop_config_interrupts(struct device *);
210 static void iop_configure_devices(struct iop_softc *, int, int);
211 static void iop_devinfo(int, char *);
212 static int iop_print(void *, const char *);
213 static int iop_reconfigure(struct iop_softc *, u_int);
214 static void iop_shutdown(void *);
215 static int iop_submatch(struct device *, struct cfdata *, void *);
216 #ifdef notyet
217 static int iop_vendor_print(void *, const char *);
218 #endif
219
220 static void iop_adjqparam(struct iop_softc *, int);
221 static void iop_create_reconf_thread(void *);
222 static int iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int iop_hrt_get(struct iop_softc *);
224 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void iop_intr_event(struct device *, struct iop_msg *, void *);
226 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 u_int32_t);
228 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int iop_ofifo_init(struct iop_softc *);
231 static int iop_passthrough(struct iop_softc *, struct ioppt *,
232 struct proc *);
233 static void iop_reconf_thread(void *);
234 static void iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int iop_reset(struct iop_softc *);
236 static int iop_status_get(struct iop_softc *, int);
237 static int iop_systab_set(struct iop_softc *);
238 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
239
240 #ifdef I2ODEBUG
241 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
242 #endif
243
244 cdev_decl(iop);
245
246 static inline u_int32_t
247 iop_inl(struct iop_softc *sc, int off)
248 {
249
250 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
251 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
252 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
253 }
254
255 static inline void
256 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
257 {
258
259 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
260 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
261 BUS_SPACE_BARRIER_WRITE);
262 }
263
264 /*
265 * Initialise the IOP and our interface.
266 */
267 void
268 iop_init(struct iop_softc *sc, const char *intrstr)
269 {
270 struct iop_msg *im;
271 int rv, i, j, state, nsegs;
272 u_int32_t mask;
273 char ident[64];
274
275 state = 0;
276
277 printf("I2O adapter");
278
279 if (iop_ictxhashtbl == NULL)
280 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
281 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
282
283 /* Disable interrupts at the IOP. */
284 mask = iop_inl(sc, IOP_REG_INTR_MASK);
285 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
286
287 /* Allocate a scratch DMA map for small miscellaneous shared data. */
288 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
289 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
290 printf("%s: cannot create scratch dmamap\n",
291 sc->sc_dv.dv_xname);
292 return;
293 }
294 state++;
295
296 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
297 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
298 printf("%s: cannot alloc scratch dmamem\n",
299 sc->sc_dv.dv_xname);
300 goto bail_out;
301 }
302 state++;
303
304 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
305 &sc->sc_scr, 0)) {
306 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
307 goto bail_out;
308 }
309 state++;
310
311 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
312 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
313 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
314 goto bail_out;
315 }
316 state++;
317
318 /* Reset the adapter and request status. */
319 if ((rv = iop_reset(sc)) != 0) {
320 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
321 goto bail_out;
322 }
323
324 if ((rv = iop_status_get(sc, 1)) != 0) {
325 printf("%s: not responding (get status)\n",
326 sc->sc_dv.dv_xname);
327 goto bail_out;
328 }
329
330 sc->sc_flags |= IOP_HAVESTATUS;
331 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
332 ident, sizeof(ident));
333 printf(" <%s>\n", ident);
334
335 #ifdef I2ODEBUG
336 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
337 le16toh(sc->sc_status.orgid),
338 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
339 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
340 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
341 le32toh(sc->sc_status.desiredprivmemsize),
342 le32toh(sc->sc_status.currentprivmemsize),
343 le32toh(sc->sc_status.currentprivmembase));
344 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
345 le32toh(sc->sc_status.desiredpriviosize),
346 le32toh(sc->sc_status.currentpriviosize),
347 le32toh(sc->sc_status.currentpriviobase));
348 #endif
349
350 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
351 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
352 sc->sc_maxob = IOP_MAX_OUTBOUND;
353 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
354 if (sc->sc_maxib > IOP_MAX_INBOUND)
355 sc->sc_maxib = IOP_MAX_INBOUND;
356
357 /* Allocate message wrappers. */
358 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT);
359 memset(im, 0, sizeof(*im) * sc->sc_maxib);
360 sc->sc_ims = im;
361 SLIST_INIT(&sc->sc_im_freelist);
362
363 for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
364 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
365 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
366 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
367 &im->im_xfer[0].ix_map);
368 if (rv != 0) {
369 printf("%s: couldn't create dmamap (%d)",
370 sc->sc_dv.dv_xname, rv);
371 goto bail_out;
372 }
373
374 im->im_tctx = i;
375 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
376 }
377
378 /* Initalise the IOP's outbound FIFO. */
379 if (iop_ofifo_init(sc) != 0) {
380 printf("%s: unable to init oubound FIFO\n",
381 sc->sc_dv.dv_xname);
382 goto bail_out;
383 }
384
385 /*
386 * Defer further configuration until (a) interrupts are working and
387 * (b) we have enough information to build the system table.
388 */
389 config_interrupts((struct device *)sc, iop_config_interrupts);
390
391 /* Configure shutdown hook before we start any device activity. */
392 if (iop_sdh == NULL)
393 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
394
395 /* Ensure interrupts are enabled at the IOP. */
396 mask = iop_inl(sc, IOP_REG_INTR_MASK);
397 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
398
399 if (intrstr != NULL)
400 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
401 intrstr);
402
403 #ifdef I2ODEBUG
404 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
405 sc->sc_dv.dv_xname, sc->sc_maxib,
406 le32toh(sc->sc_status.maxinboundmframes),
407 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
408 #endif
409
410 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
411 return;
412
413 bail_out:
414 if (state > 3) {
415 for (j = 0; j < i; j++)
416 bus_dmamap_destroy(sc->sc_dmat,
417 sc->sc_ims[j].im_xfer[0].ix_map);
418 free(sc->sc_ims, M_DEVBUF);
419 }
420 if (state > 2)
421 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
422 if (state > 1)
423 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
424 if (state > 0)
425 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
426 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
427
428 }
429
430 /*
431 * Perform autoconfiguration tasks.
432 */
433 static void
434 iop_config_interrupts(struct device *self)
435 {
436 struct iop_softc *sc, *iop;
437 struct i2o_systab_entry *ste;
438 int rv, i, niop;
439
440 sc = (struct iop_softc *)self;
441 LIST_INIT(&sc->sc_iilist);
442
443 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
444
445 if (iop_hrt_get(sc) != 0) {
446 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
447 return;
448 }
449
450 /*
451 * Build the system table.
452 */
453 if (iop_systab == NULL) {
454 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
455 if ((iop = device_lookup(&iop_cd, i)) == NULL)
456 continue;
457 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
458 continue;
459 if (iop_status_get(iop, 1) != 0) {
460 printf("%s: unable to retrieve status\n",
461 sc->sc_dv.dv_xname);
462 iop->sc_flags &= ~IOP_HAVESTATUS;
463 continue;
464 }
465 niop++;
466 }
467 if (niop == 0)
468 return;
469
470 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
471 sizeof(struct i2o_systab);
472 iop_systab_size = i;
473 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT);
474
475 memset(iop_systab, 0, i);
476 iop_systab->numentries = niop;
477 iop_systab->version = I2O_VERSION_11;
478
479 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
480 if ((iop = device_lookup(&iop_cd, i)) == NULL)
481 continue;
482 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
483 continue;
484
485 ste->orgid = iop->sc_status.orgid;
486 ste->iopid = iop->sc_dv.dv_unit + 2;
487 ste->segnumber =
488 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
489 ste->iopcaps = iop->sc_status.iopcaps;
490 ste->inboundmsgframesize =
491 iop->sc_status.inboundmframesize;
492 ste->inboundmsgportaddresslow =
493 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
494 ste++;
495 }
496 }
497
498 /*
499 * Post the system table to the IOP and bring it to the OPERATIONAL
500 * state.
501 */
502 if (iop_systab_set(sc) != 0) {
503 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
504 return;
505 }
506 if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
507 30000) != 0) {
508 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
509 return;
510 }
511
512 /*
513 * Set up an event handler for this IOP.
514 */
515 sc->sc_eventii.ii_dv = self;
516 sc->sc_eventii.ii_intr = iop_intr_event;
517 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
518 sc->sc_eventii.ii_tid = I2O_TID_IOP;
519 iop_initiator_register(sc, &sc->sc_eventii);
520
521 rv = iop_util_eventreg(sc, &sc->sc_eventii,
522 I2O_EVENT_EXEC_RESOURCE_LIMITS |
523 I2O_EVENT_EXEC_CONNECTION_FAIL |
524 I2O_EVENT_EXEC_ADAPTER_FAULT |
525 I2O_EVENT_EXEC_POWER_FAIL |
526 I2O_EVENT_EXEC_RESET_PENDING |
527 I2O_EVENT_EXEC_RESET_IMMINENT |
528 I2O_EVENT_EXEC_HARDWARE_FAIL |
529 I2O_EVENT_EXEC_XCT_CHANGE |
530 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
531 I2O_EVENT_GEN_DEVICE_RESET |
532 I2O_EVENT_GEN_STATE_CHANGE |
533 I2O_EVENT_GEN_GENERAL_WARNING);
534 if (rv != 0) {
535 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
536 return;
537 }
538
539 #ifdef notyet
540 /* Attempt to match and attach a product-specific extension. */
541 ia.ia_class = I2O_CLASS_ANY;
542 ia.ia_tid = I2O_TID_IOP;
543 config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
544 #endif
545
546 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
547 if ((rv = iop_reconfigure(sc, 0)) == -1) {
548 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
549 return;
550 }
551 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
552
553 kthread_create(iop_create_reconf_thread, sc);
554 }
555
556 /*
557 * Create the reconfiguration thread. Called after the standard kernel
558 * threads have been created.
559 */
560 static void
561 iop_create_reconf_thread(void *cookie)
562 {
563 struct iop_softc *sc;
564 int rv;
565
566 sc = cookie;
567 sc->sc_flags |= IOP_ONLINE;
568
569 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
570 "%s", sc->sc_dv.dv_xname);
571 if (rv != 0) {
572 printf("%s: unable to create reconfiguration thread (%d)",
573 sc->sc_dv.dv_xname, rv);
574 return;
575 }
576 }
577
578 /*
579 * Reconfiguration thread; listens for LCT change notification, and
580 * initiates re-configuration if received.
581 */
582 static void
583 iop_reconf_thread(void *cookie)
584 {
585 struct iop_softc *sc;
586 struct i2o_lct lct;
587 u_int32_t chgind;
588 int rv;
589
590 sc = cookie;
591 chgind = sc->sc_chgind + 1;
592
593 for (;;) {
594 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
595 sc->sc_dv.dv_xname, chgind));
596
597 PHOLD(sc->sc_reconf_proc);
598 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
599 PRELE(sc->sc_reconf_proc);
600
601 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
602 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
603
604 if (rv == 0 &&
605 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
606 iop_reconfigure(sc, le32toh(lct.changeindicator));
607 chgind = sc->sc_chgind + 1;
608 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
609 }
610
611 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
612 }
613 }
614
615 /*
616 * Reconfigure: find new and removed devices.
617 */
618 static int
619 iop_reconfigure(struct iop_softc *sc, u_int chgind)
620 {
621 struct iop_msg *im;
622 struct i2o_hba_bus_scan mf;
623 struct i2o_lct_entry *le;
624 struct iop_initiator *ii, *nextii;
625 int rv, tid, i;
626
627 /*
628 * If the reconfiguration request isn't the result of LCT change
629 * notification, then be more thorough: ask all bus ports to scan
630 * their busses. Wait up to 5 minutes for each bus port to complete
631 * the request.
632 */
633 if (chgind == 0) {
634 if ((rv = iop_lct_get(sc)) != 0) {
635 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
636 return (rv);
637 }
638
639 le = sc->sc_lct->entry;
640 for (i = 0; i < sc->sc_nlctent; i++, le++) {
641 if ((le16toh(le->classid) & 4095) !=
642 I2O_CLASS_BUS_ADAPTER_PORT)
643 continue;
644 tid = le16toh(le->localtid) & 4095;
645
646 im = iop_msg_alloc(sc, IM_WAIT);
647
648 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
649 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
650 mf.msgictx = IOP_ICTX;
651 mf.msgtctx = im->im_tctx;
652
653 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
654 tid));
655
656 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
657 iop_msg_free(sc, im);
658 #ifdef I2ODEBUG
659 if (rv != 0)
660 printf("%s: bus scan failed\n",
661 sc->sc_dv.dv_xname);
662 #endif
663 }
664 } else if (chgind <= sc->sc_chgind) {
665 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
666 return (0);
667 }
668
669 /* Re-read the LCT and determine if it has changed. */
670 if ((rv = iop_lct_get(sc)) != 0) {
671 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
672 return (rv);
673 }
674 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
675
676 chgind = le32toh(sc->sc_lct->changeindicator);
677 if (chgind == sc->sc_chgind) {
678 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
679 return (0);
680 }
681 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
682 sc->sc_chgind = chgind;
683
684 if (sc->sc_tidmap != NULL)
685 free(sc->sc_tidmap, M_DEVBUF);
686 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
687 M_DEVBUF, M_NOWAIT);
688 memset(sc->sc_tidmap, 0, sizeof(sc->sc_tidmap));
689
690 /* Allow 1 queued command per device while we're configuring. */
691 iop_adjqparam(sc, 1);
692
693 /*
694 * Match and attach child devices. We configure high-level devices
695 * first so that any claims will propagate throughout the LCT,
696 * hopefully masking off aliased devices as a result.
697 *
698 * Re-reading the LCT at this point is a little dangerous, but we'll
699 * trust the IOP (and the operator) to behave itself...
700 */
701 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
702 IC_CONFIGURE | IC_PRIORITY);
703 if ((rv = iop_lct_get(sc)) != 0)
704 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
705 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
706 IC_CONFIGURE);
707
708 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
709 nextii = LIST_NEXT(ii, ii_list);
710
711 /* Detach devices that were configured, but are now gone. */
712 for (i = 0; i < sc->sc_nlctent; i++)
713 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
714 break;
715 if (i == sc->sc_nlctent ||
716 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
717 config_detach(ii->ii_dv, DETACH_FORCE);
718
719 /*
720 * Tell initiators that existed before the re-configuration
721 * to re-configure.
722 */
723 if (ii->ii_reconfig == NULL)
724 continue;
725 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
726 printf("%s: %s failed reconfigure (%d)\n",
727 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
728 }
729
730 /* Re-adjust queue parameters and return. */
731 if (sc->sc_nii != 0)
732 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
733 / sc->sc_nii);
734
735 return (0);
736 }
737
738 /*
739 * Configure I2O devices into the system.
740 */
741 static void
742 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
743 {
744 struct iop_attach_args ia;
745 struct iop_initiator *ii;
746 const struct i2o_lct_entry *le;
747 struct device *dv;
748 int i, j, nent;
749 u_int usertid;
750
751 nent = sc->sc_nlctent;
752 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
753 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
754
755 /* Ignore the device if it's in use. */
756 usertid = le32toh(le->usertid) & 4095;
757 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
758 continue;
759
760 ia.ia_class = le16toh(le->classid) & 4095;
761 ia.ia_tid = sc->sc_tidmap[i].it_tid;
762
763 /* Ignore uninteresting devices. */
764 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
765 if (iop_class[j].ic_class == ia.ia_class)
766 break;
767 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
768 (iop_class[j].ic_flags & mask) != maskval)
769 continue;
770
771 /*
772 * Try to configure the device only if it's not already
773 * configured.
774 */
775 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
776 if (ia.ia_tid == ii->ii_tid) {
777 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
778 strcpy(sc->sc_tidmap[i].it_dvname,
779 ii->ii_dv->dv_xname);
780 break;
781 }
782 }
783 if (ii != NULL)
784 continue;
785
786 dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
787 if (dv != NULL) {
788 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
789 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
790 }
791 }
792 }
793
794 /*
795 * Adjust queue parameters for all child devices.
796 */
797 static void
798 iop_adjqparam(struct iop_softc *sc, int mpi)
799 {
800 struct iop_initiator *ii;
801
802 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
803 if (ii->ii_adjqparam != NULL)
804 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
805 }
806
807 static void
808 iop_devinfo(int class, char *devinfo)
809 {
810 #ifdef I2OVERBOSE
811 int i;
812
813 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
814 if (class == iop_class[i].ic_class)
815 break;
816
817 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
818 sprintf(devinfo, "device (class 0x%x)", class);
819 else
820 strcpy(devinfo, iop_class[i].ic_caption);
821 #else
822
823 sprintf(devinfo, "device (class 0x%x)", class);
824 #endif
825 }
826
827 static int
828 iop_print(void *aux, const char *pnp)
829 {
830 struct iop_attach_args *ia;
831 char devinfo[256];
832
833 ia = aux;
834
835 if (pnp != NULL) {
836 iop_devinfo(ia->ia_class, devinfo);
837 printf("%s at %s", devinfo, pnp);
838 }
839 printf(" tid %d", ia->ia_tid);
840 return (UNCONF);
841 }
842
843 #ifdef notyet
844 static int
845 iop_vendor_print(void *aux, const char *pnp)
846 {
847
848 if (pnp != NULL)
849 printf("vendor specific extension at %s", pnp);
850 return (UNCONF);
851 }
852 #endif
853
854 static int
855 iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
856 {
857 struct iop_attach_args *ia;
858
859 ia = aux;
860
861 if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
862 return (0);
863
864 return ((*cf->cf_attach->ca_match)(parent, cf, aux));
865 }
866
867 /*
868 * Shut down all configured IOPs.
869 */
870 static void
871 iop_shutdown(void *junk)
872 {
873 struct iop_softc *sc;
874 int i;
875
876 printf("shutting down iop devices...");
877
878 for (i = 0; i < iop_cd.cd_ndevs; i++) {
879 if ((sc = device_lookup(&iop_cd, i)) == NULL)
880 continue;
881 if ((sc->sc_flags & IOP_ONLINE) == 0)
882 continue;
883 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
884 0, 5000);
885 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, IOP_ICTX,
886 0, 1000);
887 }
888
889 /* Wait. Some boards could still be flushing, stupidly enough. */
890 delay(5000*1000);
891 printf(" done.\n");
892 }
893
894 /*
895 * Retrieve IOP status.
896 */
897 static int
898 iop_status_get(struct iop_softc *sc, int nosleep)
899 {
900 struct i2o_exec_status_get mf;
901 struct i2o_status *st;
902 paddr_t pa;
903 int rv, i;
904
905 pa = sc->sc_scr_seg->ds_addr;
906 st = (struct i2o_status *)sc->sc_scr;
907
908 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
909 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
910 mf.reserved[0] = 0;
911 mf.reserved[1] = 0;
912 mf.reserved[2] = 0;
913 mf.reserved[3] = 0;
914 mf.addrlow = (u_int32_t)pa;
915 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
916 mf.length = sizeof(sc->sc_status);
917
918 memset(st, 0, sizeof(*st));
919 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
920 BUS_DMASYNC_PREREAD);
921
922 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
923 return (rv);
924
925 for (i = 25; i != 0; i--) {
926 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
927 sizeof(*st), BUS_DMASYNC_POSTREAD);
928 if (st->syncbyte == 0xff)
929 break;
930 if (nosleep)
931 DELAY(100*1000);
932 else
933 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
934 }
935
936 if (st->syncbyte != 0xff)
937 rv = EIO;
938 else {
939 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
940 rv = 0;
941 }
942
943 return (rv);
944 }
945
946 /*
947 * Initalize and populate the IOP's outbound FIFO.
948 */
949 static int
950 iop_ofifo_init(struct iop_softc *sc)
951 {
952 bus_addr_t addr;
953 bus_dma_segment_t seg;
954 struct i2o_exec_outbound_init *mf;
955 int i, rseg, rv;
956 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
957
958 sw = (u_int32_t *)sc->sc_scr;
959
960 mf = (struct i2o_exec_outbound_init *)mb;
961 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
962 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
963 mf->msgictx = IOP_ICTX;
964 mf->msgtctx = 0;
965 mf->pagesize = PAGE_SIZE;
966 mf->flags = IOP_INIT_CODE | ((IOP_MAX_MSG_SIZE >> 2) << 16);
967
968 /*
969 * The I2O spec says that there are two SGLs: one for the status
970 * word, and one for a list of discarded MFAs. It continues to say
971 * that if you don't want to get the list of MFAs, an IGNORE SGL is
972 * necessary; this isn't the case (and is in fact a bad thing).
973 */
974 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
975 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
976 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
977 (u_int32_t)sc->sc_scr_seg->ds_addr;
978 mb[0] += 2 << 16;
979
980 *sw = 0;
981 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
982 BUS_DMASYNC_PREREAD);
983
984 if ((rv = iop_post(sc, mb)) != 0)
985 return (rv);
986
987 POLL(5000,
988 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
989 BUS_DMASYNC_POSTREAD),
990 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
991
992 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
993 printf("%s: outbound FIFO init failed (%d)\n",
994 sc->sc_dv.dv_xname, le32toh(*sw));
995 return (EIO);
996 }
997
998 /* Allocate DMA safe memory for the reply frames. */
999 if (sc->sc_rep_phys == 0) {
1000 sc->sc_rep_size = sc->sc_maxob * IOP_MAX_MSG_SIZE;
1001
1002 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1003 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1004 if (rv != 0) {
1005 printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1006 rv);
1007 return (rv);
1008 }
1009
1010 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1011 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1012 if (rv != 0) {
1013 printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1014 return (rv);
1015 }
1016
1017 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1018 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1019 if (rv != 0) {
1020 printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1021 rv);
1022 return (rv);
1023 }
1024
1025 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1026 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1027 if (rv != 0) {
1028 printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1029 return (rv);
1030 }
1031
1032 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1033 }
1034
1035 /* Populate the outbound FIFO. */
1036 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1037 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1038 addr += IOP_MAX_MSG_SIZE;
1039 }
1040
1041 return (0);
1042 }
1043
1044 /*
1045 * Read the specified number of bytes from the IOP's hardware resource table.
1046 */
1047 static int
1048 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1049 {
1050 struct iop_msg *im;
1051 int rv;
1052 struct i2o_exec_hrt_get *mf;
1053 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1054
1055 im = iop_msg_alloc(sc, IM_WAIT);
1056 mf = (struct i2o_exec_hrt_get *)mb;
1057 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1058 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1059 mf->msgictx = IOP_ICTX;
1060 mf->msgtctx = im->im_tctx;
1061
1062 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1063 rv = iop_msg_post(sc, im, mb, 30000);
1064 iop_msg_unmap(sc, im);
1065 iop_msg_free(sc, im);
1066 return (rv);
1067 }
1068
1069 /*
1070 * Read the IOP's hardware resource table.
1071 */
1072 static int
1073 iop_hrt_get(struct iop_softc *sc)
1074 {
1075 struct i2o_hrt hrthdr, *hrt;
1076 int size, rv;
1077
1078 PHOLD(curproc);
1079 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1080 PRELE(curproc);
1081 if (rv != 0)
1082 return (rv);
1083
1084 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1085 le16toh(hrthdr.numentries)));
1086
1087 size = sizeof(struct i2o_hrt) +
1088 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1089 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1090
1091 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1092 free(hrt, M_DEVBUF);
1093 return (rv);
1094 }
1095
1096 if (sc->sc_hrt != NULL)
1097 free(sc->sc_hrt, M_DEVBUF);
1098 sc->sc_hrt = hrt;
1099 return (0);
1100 }
1101
1102 /*
1103 * Request the specified number of bytes from the IOP's logical
1104 * configuration table. If a change indicator is specified, this
1105 * is a verbatim notification request, so the caller is prepared
1106 * to wait indefinitely.
1107 */
1108 static int
1109 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1110 u_int32_t chgind)
1111 {
1112 struct iop_msg *im;
1113 struct i2o_exec_lct_notify *mf;
1114 int rv;
1115 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1116
1117 im = iop_msg_alloc(sc, IM_WAIT);
1118 memset(lct, 0, size);
1119
1120 mf = (struct i2o_exec_lct_notify *)mb;
1121 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1122 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1123 mf->msgictx = IOP_ICTX;
1124 mf->msgtctx = im->im_tctx;
1125 mf->classid = I2O_CLASS_ANY;
1126 mf->changeindicator = chgind;
1127
1128 #ifdef I2ODEBUG
1129 printf("iop_lct_get0: reading LCT");
1130 if (chgind != 0)
1131 printf(" (async)");
1132 printf("\n");
1133 #endif
1134
1135 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1136 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1137 iop_msg_unmap(sc, im);
1138 iop_msg_free(sc, im);
1139 return (rv);
1140 }
1141
1142 /*
1143 * Read the IOP's logical configuration table.
1144 */
1145 int
1146 iop_lct_get(struct iop_softc *sc)
1147 {
1148 int esize, size, rv;
1149 struct i2o_lct *lct;
1150
1151 esize = le32toh(sc->sc_status.expectedlctsize);
1152 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1153 if (lct == NULL)
1154 return (ENOMEM);
1155
1156 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1157 free(lct, M_DEVBUF);
1158 return (rv);
1159 }
1160
1161 size = le16toh(lct->tablesize) << 2;
1162 if (esize != size) {
1163 free(lct, M_DEVBUF);
1164 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1165 if (lct == NULL)
1166 return (ENOMEM);
1167
1168 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1169 free(lct, M_DEVBUF);
1170 return (rv);
1171 }
1172 }
1173
1174 /* Swap in the new LCT. */
1175 if (sc->sc_lct != NULL)
1176 free(sc->sc_lct, M_DEVBUF);
1177 sc->sc_lct = lct;
1178 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1179 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1180 sizeof(struct i2o_lct_entry);
1181 return (0);
1182 }
1183
1184 /*
1185 * Request the specified parameter group from the target. If an initiator
1186 * is specified (a) don't wait for the operation to complete, but instead
1187 * let the initiator's interrupt handler deal with the reply and (b) place a
1188 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1189 */
1190 int
1191 iop_param_op(struct iop_softc *sc, int tid, struct iop_initiator *ii,
1192 int write, int group, void *buf, int size)
1193 {
1194 struct iop_msg *im;
1195 struct i2o_util_params_op *mf;
1196 struct i2o_reply *rf;
1197 int rv, func, op;
1198 struct iop_pgop *pgop;
1199 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1200
1201 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1202 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1203 iop_msg_free(sc, im);
1204 return (ENOMEM);
1205 }
1206 if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1207 iop_msg_free(sc, im);
1208 free(pgop, M_DEVBUF);
1209 return (ENOMEM);
1210 }
1211 im->im_dvcontext = pgop;
1212 im->im_rb = rf;
1213
1214 if (write) {
1215 func = I2O_UTIL_PARAMS_SET;
1216 op = I2O_PARAMS_OP_FIELD_SET;
1217 } else {
1218 func = I2O_UTIL_PARAMS_GET;
1219 op = I2O_PARAMS_OP_FIELD_GET;
1220 }
1221
1222 mf = (struct i2o_util_params_op *)mb;
1223 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1224 mf->msgfunc = I2O_MSGFUNC(tid, func);
1225 mf->msgictx = IOP_ICTX;
1226 mf->msgtctx = im->im_tctx;
1227 mf->flags = 0;
1228
1229 pgop->olh.count = htole16(1);
1230 pgop->olh.reserved = htole16(0);
1231 pgop->oat.operation = htole16(op);
1232 pgop->oat.fieldcount = htole16(0xffff);
1233 pgop->oat.group = htole16(group);
1234
1235 if (ii == NULL)
1236 PHOLD(curproc);
1237
1238 memset(buf, 0, size);
1239 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1240 iop_msg_map(sc, im, mb, buf, size, write, NULL);
1241 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1242
1243 if (ii == NULL)
1244 PRELE(curproc);
1245
1246 /* Detect errors; let partial transfers to count as success. */
1247 if (ii == NULL && rv == 0) {
1248 if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1249 le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1250 rv = 0;
1251 else
1252 rv = (rf->reqstatus != 0 ? EIO : 0);
1253 }
1254
1255 if (ii == NULL || rv != 0) {
1256 iop_msg_unmap(sc, im);
1257 iop_msg_free(sc, im);
1258 free(pgop, M_DEVBUF);
1259 free(rf, M_DEVBUF);
1260 }
1261
1262 return (rv);
1263 }
1264
1265 /*
1266 * Execute a simple command (no parameters).
1267 */
1268 int
1269 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1270 int async, int timo)
1271 {
1272 struct iop_msg *im;
1273 struct i2o_msg mf;
1274 int rv, fl;
1275
1276 fl = (async != 0 ? IM_WAIT : IM_POLL);
1277 im = iop_msg_alloc(sc, fl);
1278
1279 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1280 mf.msgfunc = I2O_MSGFUNC(tid, function);
1281 mf.msgictx = ictx;
1282 mf.msgtctx = im->im_tctx;
1283
1284 rv = iop_msg_post(sc, im, &mf, timo);
1285 iop_msg_free(sc, im);
1286 return (rv);
1287 }
1288
1289 /*
1290 * Post the system table to the IOP.
1291 */
1292 static int
1293 iop_systab_set(struct iop_softc *sc)
1294 {
1295 struct i2o_exec_sys_tab_set *mf;
1296 struct iop_msg *im;
1297 bus_space_handle_t bsh;
1298 bus_addr_t boo;
1299 u_int32_t mema[2], ioa[2];
1300 int rv;
1301 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1302
1303 im = iop_msg_alloc(sc, IM_WAIT);
1304
1305 mf = (struct i2o_exec_sys_tab_set *)mb;
1306 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1307 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1308 mf->msgictx = IOP_ICTX;
1309 mf->msgtctx = im->im_tctx;
1310 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1311 mf->segnumber = 0;
1312
1313 mema[1] = sc->sc_status.desiredprivmemsize;
1314 ioa[1] = sc->sc_status.desiredpriviosize;
1315
1316 if (mema[1] != 0) {
1317 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1318 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1319 mema[0] = htole32(boo);
1320 if (rv != 0) {
1321 printf("%s: can't alloc priv mem space, err = %d\n",
1322 sc->sc_dv.dv_xname, rv);
1323 mema[0] = 0;
1324 mema[1] = 0;
1325 }
1326 }
1327
1328 if (ioa[1] != 0) {
1329 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1330 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1331 ioa[0] = htole32(boo);
1332 if (rv != 0) {
1333 printf("%s: can't alloc priv i/o space, err = %d\n",
1334 sc->sc_dv.dv_xname, rv);
1335 ioa[0] = 0;
1336 ioa[1] = 0;
1337 }
1338 }
1339
1340 PHOLD(curproc);
1341 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1342 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1343 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1344 rv = iop_msg_post(sc, im, mb, 5000);
1345 iop_msg_unmap(sc, im);
1346 iop_msg_free(sc, im);
1347 PRELE(curproc);
1348 return (rv);
1349 }
1350
1351 /*
1352 * Reset the IOP. Must be called with interrupts disabled.
1353 */
1354 static int
1355 iop_reset(struct iop_softc *sc)
1356 {
1357 u_int32_t mfa, *sw;
1358 struct i2o_exec_iop_reset mf;
1359 int rv;
1360 paddr_t pa;
1361
1362 sw = (u_int32_t *)sc->sc_scr;
1363 pa = sc->sc_scr_seg->ds_addr;
1364
1365 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1366 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1367 mf.reserved[0] = 0;
1368 mf.reserved[1] = 0;
1369 mf.reserved[2] = 0;
1370 mf.reserved[3] = 0;
1371 mf.statuslow = (u_int32_t)pa;
1372 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1373
1374 *sw = htole32(0);
1375 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1376 BUS_DMASYNC_PREREAD);
1377
1378 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1379 return (rv);
1380
1381 POLL(2500,
1382 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1383 BUS_DMASYNC_POSTREAD), *sw != 0));
1384 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1385 printf("%s: reset rejected, status 0x%x\n",
1386 sc->sc_dv.dv_xname, le32toh(*sw));
1387 return (EIO);
1388 }
1389
1390 /*
1391 * IOP is now in the INIT state. Wait no more than 10 seconds for
1392 * the inbound queue to become responsive.
1393 */
1394 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1395 if (mfa == IOP_MFA_EMPTY) {
1396 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1397 return (EIO);
1398 }
1399
1400 iop_release_mfa(sc, mfa);
1401 return (0);
1402 }
1403
1404 /*
1405 * Register a new initiator. Must be called with the configuration lock
1406 * held.
1407 */
1408 void
1409 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1410 {
1411 static int ictxgen;
1412 int s;
1413
1414 /* 0 is reserved (by us) for system messages. */
1415 ii->ii_ictx = ++ictxgen;
1416
1417 /*
1418 * `Utility initiators' don't make it onto the per-IOP initiator list
1419 * (which is used only for configuration), but do get one slot on
1420 * the inbound queue.
1421 */
1422 if ((ii->ii_flags & II_UTILITY) == 0) {
1423 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1424 sc->sc_nii++;
1425 } else
1426 sc->sc_nuii++;
1427
1428 s = splbio();
1429 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1430 splx(s);
1431 }
1432
1433 /*
1434 * Unregister an initiator. Must be called with the configuration lock
1435 * held.
1436 */
1437 void
1438 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1439 {
1440 int s;
1441
1442 if ((ii->ii_flags & II_UTILITY) == 0) {
1443 LIST_REMOVE(ii, ii_list);
1444 sc->sc_nii--;
1445 } else
1446 sc->sc_nuii--;
1447
1448 s = splbio();
1449 LIST_REMOVE(ii, ii_hash);
1450 splx(s);
1451 }
1452
1453 /*
1454 * Handle a reply frame from the IOP.
1455 */
1456 static int
1457 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1458 {
1459 struct iop_msg *im;
1460 struct i2o_reply *rb;
1461 struct i2o_fault_notify *fn;
1462 struct iop_initiator *ii;
1463 u_int off, ictx, tctx, status, size;
1464
1465 off = (int)(rmfa - sc->sc_rep_phys);
1466 rb = (struct i2o_reply *)(sc->sc_rep + off);
1467
1468 /* Perform reply queue DMA synchronisation. */
1469 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1470 IOP_MAX_MSG_SIZE, BUS_DMASYNC_POSTREAD);
1471 if (--sc->sc_curib != 0)
1472 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1473 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1474
1475 #ifdef I2ODEBUG
1476 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1477 panic("iop_handle_reply: 64-bit reply");
1478 #endif
1479 /*
1480 * Find the initiator.
1481 */
1482 ictx = le32toh(rb->msgictx);
1483 if (ictx == IOP_ICTX)
1484 ii = NULL;
1485 else {
1486 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1487 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1488 if (ii->ii_ictx == ictx)
1489 break;
1490 if (ii == NULL) {
1491 #ifdef I2ODEBUG
1492 iop_reply_print(sc, rb);
1493 #endif
1494 printf("%s: WARNING: bad ictx returned (%x)\n",
1495 sc->sc_dv.dv_xname, ictx);
1496 return (-1);
1497 }
1498 }
1499
1500 /*
1501 * If we received a transport failure notice, we've got to dig the
1502 * transaction context (if any) out of the original message frame,
1503 * and then release the original MFA back to the inbound FIFO.
1504 */
1505 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1506 status = I2O_STATUS_SUCCESS;
1507
1508 fn = (struct i2o_fault_notify *)rb;
1509 tctx = iop_inl(sc, fn->lowmfa + 12);
1510 iop_release_mfa(sc, fn->lowmfa);
1511 iop_tfn_print(sc, fn);
1512 } else {
1513 status = rb->reqstatus;
1514 tctx = le32toh(rb->msgtctx);
1515 }
1516
1517 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1518 /*
1519 * This initiator tracks state using message wrappers.
1520 *
1521 * Find the originating message wrapper, and if requested
1522 * notify the initiator.
1523 */
1524 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1525 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1526 (im->im_flags & IM_ALLOCED) == 0 ||
1527 tctx != im->im_tctx) {
1528 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1529 sc->sc_dv.dv_xname, tctx, im);
1530 if (im != NULL)
1531 printf("%s: flags=0x%08x tctx=0x%08x\n",
1532 sc->sc_dv.dv_xname, im->im_flags,
1533 im->im_tctx);
1534 #ifdef I2ODEBUG
1535 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1536 iop_reply_print(sc, rb);
1537 #endif
1538 return (-1);
1539 }
1540
1541 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1542 im->im_flags |= IM_FAIL;
1543
1544 #ifdef I2ODEBUG
1545 if ((im->im_flags & IM_REPLIED) != 0)
1546 panic("%s: dup reply", sc->sc_dv.dv_xname);
1547 #endif
1548 im->im_flags |= IM_REPLIED;
1549
1550 #ifdef I2ODEBUG
1551 if (status != I2O_STATUS_SUCCESS)
1552 iop_reply_print(sc, rb);
1553 #endif
1554 im->im_reqstatus = status;
1555
1556 /* Copy the reply frame, if requested. */
1557 if (im->im_rb != NULL) {
1558 size = (le32toh(rb->msgflags) >> 14) & ~3;
1559 #ifdef I2ODEBUG
1560 if (size > IOP_MAX_MSG_SIZE)
1561 panic("iop_handle_reply: reply too large");
1562 #endif
1563 memcpy(im->im_rb, rb, size);
1564 }
1565
1566 /* Notify the initiator. */
1567 if ((im->im_flags & IM_WAIT) != 0)
1568 wakeup(im);
1569 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1570 (*ii->ii_intr)(ii->ii_dv, im, rb);
1571 } else {
1572 /*
1573 * This initiator discards message wrappers.
1574 *
1575 * Simply pass the reply frame to the initiator.
1576 */
1577 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1578 }
1579
1580 return (status);
1581 }
1582
1583 /*
1584 * Handle an interrupt from the IOP.
1585 */
1586 int
1587 iop_intr(void *arg)
1588 {
1589 struct iop_softc *sc;
1590 u_int32_t rmfa;
1591
1592 sc = arg;
1593
1594 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1595 return (0);
1596
1597 for (;;) {
1598 /* Double read to account for IOP bug. */
1599 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1600 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1601 if (rmfa == IOP_MFA_EMPTY)
1602 break;
1603 }
1604 iop_handle_reply(sc, rmfa);
1605 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1606 }
1607
1608 return (1);
1609 }
1610
1611 /*
1612 * Handle an event signalled by the executive.
1613 */
1614 static void
1615 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1616 {
1617 struct i2o_util_event_register_reply *rb;
1618 struct iop_softc *sc;
1619 u_int event;
1620
1621 sc = (struct iop_softc *)dv;
1622 rb = reply;
1623
1624 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1625 return;
1626
1627 event = le32toh(rb->event);
1628 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1629 }
1630
1631 /*
1632 * Allocate a message wrapper.
1633 */
1634 struct iop_msg *
1635 iop_msg_alloc(struct iop_softc *sc, int flags)
1636 {
1637 struct iop_msg *im;
1638 static u_int tctxgen;
1639 int s, i;
1640
1641 #ifdef I2ODEBUG
1642 if ((flags & IM_SYSMASK) != 0)
1643 panic("iop_msg_alloc: system flags specified");
1644 #endif
1645
1646 s = splbio();
1647 im = SLIST_FIRST(&sc->sc_im_freelist);
1648 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1649 if (im == NULL)
1650 panic("iop_msg_alloc: no free wrappers");
1651 #endif
1652 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1653 splx(s);
1654
1655 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1656 tctxgen += (1 << IOP_TCTX_SHIFT);
1657 im->im_flags = flags | IM_ALLOCED;
1658 im->im_rb = NULL;
1659 i = 0;
1660 do {
1661 im->im_xfer[i++].ix_size = 0;
1662 } while (i < IOP_MAX_MSG_XFERS);
1663
1664 return (im);
1665 }
1666
1667 /*
1668 * Free a message wrapper.
1669 */
1670 void
1671 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1672 {
1673 int s;
1674
1675 #ifdef I2ODEBUG
1676 if ((im->im_flags & IM_ALLOCED) == 0)
1677 panic("iop_msg_free: wrapper not allocated");
1678 #endif
1679
1680 im->im_flags = 0;
1681 s = splbio();
1682 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1683 splx(s);
1684 }
1685
1686 /*
1687 * Map a data transfer. Write a scatter-gather list into the message frame.
1688 */
1689 int
1690 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1691 void *xferaddr, int xfersize, int out, struct proc *up)
1692 {
1693 bus_dmamap_t dm;
1694 bus_dma_segment_t *ds;
1695 struct iop_xfer *ix;
1696 u_int rv, i, nsegs, flg, off, xn;
1697 u_int32_t *p;
1698
1699 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1700 if (ix->ix_size == 0)
1701 break;
1702
1703 #ifdef I2ODEBUG
1704 if (xfersize == 0)
1705 panic("iop_msg_map: null transfer");
1706 if (xfersize > IOP_MAX_XFER)
1707 panic("iop_msg_map: transfer too large");
1708 if (xn == IOP_MAX_MSG_XFERS)
1709 panic("iop_msg_map: too many xfers");
1710 #endif
1711
1712 /*
1713 * Only the first DMA map is static.
1714 */
1715 if (xn != 0) {
1716 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1717 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1718 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1719 if (rv != 0)
1720 return (rv);
1721 }
1722
1723 dm = ix->ix_map;
1724 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1725 (up == NULL ? BUS_DMA_NOWAIT : 0));
1726 if (rv != 0)
1727 goto bad;
1728
1729 /*
1730 * How many SIMPLE SG elements can we fit in this message?
1731 */
1732 off = mb[0] >> 16;
1733 p = mb + off;
1734 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1735
1736 if (dm->dm_nsegs > nsegs) {
1737 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1738 rv = EFBIG;
1739 DPRINTF(("iop_msg_map: too many segs\n"));
1740 goto bad;
1741 }
1742
1743 nsegs = dm->dm_nsegs;
1744 xfersize = 0;
1745
1746 /*
1747 * Write out the SG list.
1748 */
1749 if (out)
1750 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1751 else
1752 flg = I2O_SGL_SIMPLE;
1753
1754 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1755 p[0] = (u_int32_t)ds->ds_len | flg;
1756 p[1] = (u_int32_t)ds->ds_addr;
1757 xfersize += ds->ds_len;
1758 }
1759
1760 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1761 p[1] = (u_int32_t)ds->ds_addr;
1762 xfersize += ds->ds_len;
1763
1764 /* Fix up the transfer record, and sync the map. */
1765 ix->ix_flags = (out ? IX_OUT : IX_IN);
1766 ix->ix_size = xfersize;
1767 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1768 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1769
1770 /*
1771 * If this is the first xfer we've mapped for this message, adjust
1772 * the SGL offset field in the message header.
1773 */
1774 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1775 mb[0] += (mb[0] >> 12) & 0xf0;
1776 im->im_flags |= IM_SGLOFFADJ;
1777 }
1778 mb[0] += (nsegs << 17);
1779 return (0);
1780
1781 bad:
1782 if (xn != 0)
1783 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1784 return (rv);
1785 }
1786
1787 /*
1788 * Map a block I/O data transfer (different in that there's only one per
1789 * message maximum, and PAGE addressing may be used). Write a scatter
1790 * gather list into the message frame.
1791 */
1792 int
1793 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1794 void *xferaddr, int xfersize, int out)
1795 {
1796 bus_dma_segment_t *ds;
1797 bus_dmamap_t dm;
1798 struct iop_xfer *ix;
1799 u_int rv, i, nsegs, off, slen, tlen, flg;
1800 paddr_t saddr, eaddr;
1801 u_int32_t *p;
1802
1803 #ifdef I2ODEBUG
1804 if (xfersize == 0)
1805 panic("iop_msg_map_bio: null transfer");
1806 if (xfersize > IOP_MAX_XFER)
1807 panic("iop_msg_map_bio: transfer too large");
1808 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1809 panic("iop_msg_map_bio: SGLOFFADJ");
1810 #endif
1811
1812 ix = im->im_xfer;
1813 dm = ix->ix_map;
1814 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1815 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1816 if (rv != 0)
1817 return (rv);
1818
1819 off = mb[0] >> 16;
1820 nsegs = ((IOP_MAX_MSG_SIZE / 4) - off) >> 1;
1821
1822 /*
1823 * If the transfer is highly fragmented and won't fit using SIMPLE
1824 * elements, use PAGE_LIST elements instead. SIMPLE elements are
1825 * potentially more efficient, both for us and the IOP.
1826 */
1827 if (dm->dm_nsegs > nsegs) {
1828 nsegs = 1;
1829 p = mb + off + 1;
1830
1831 /* XXX This should be done with a bus_space flag. */
1832 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1833 slen = ds->ds_len;
1834 saddr = ds->ds_addr;
1835
1836 while (slen > 0) {
1837 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1838 tlen = min(eaddr - saddr, slen);
1839 slen -= tlen;
1840 *p++ = le32toh(saddr);
1841 saddr = eaddr;
1842 nsegs++;
1843 }
1844 }
1845
1846 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
1847 I2O_SGL_END;
1848 if (out)
1849 mb[off] |= I2O_SGL_DATA_OUT;
1850 } else {
1851 p = mb + off;
1852 nsegs = dm->dm_nsegs;
1853
1854 if (out)
1855 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1856 else
1857 flg = I2O_SGL_SIMPLE;
1858
1859 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1860 p[0] = (u_int32_t)ds->ds_len | flg;
1861 p[1] = (u_int32_t)ds->ds_addr;
1862 }
1863
1864 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
1865 I2O_SGL_END;
1866 p[1] = (u_int32_t)ds->ds_addr;
1867 nsegs <<= 1;
1868 }
1869
1870 /* Fix up the transfer record, and sync the map. */
1871 ix->ix_flags = (out ? IX_OUT : IX_IN);
1872 ix->ix_size = xfersize;
1873 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1874 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1875
1876 /*
1877 * Adjust the SGL offset and total message size fields. We don't
1878 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
1879 */
1880 mb[0] += ((off << 4) + (nsegs << 16));
1881 return (0);
1882 }
1883
1884 /*
1885 * Unmap all data transfers associated with a message wrapper.
1886 */
1887 void
1888 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
1889 {
1890 struct iop_xfer *ix;
1891 int i;
1892
1893 #ifdef I2ODEBUG
1894 if (im->im_xfer[0].ix_size == 0)
1895 panic("iop_msg_unmap: no transfers mapped");
1896 #endif
1897
1898 for (ix = im->im_xfer, i = 0;;) {
1899 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
1900 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
1901 BUS_DMASYNC_POSTREAD);
1902 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1903
1904 /* Only the first DMA map is static. */
1905 if (i != 0)
1906 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1907 if ((++ix)->ix_size == 0)
1908 break;
1909 if (++i >= IOP_MAX_MSG_XFERS)
1910 break;
1911 }
1912 }
1913
1914 /*
1915 * Post a message frame to the IOP's inbound queue.
1916 */
1917 int
1918 iop_post(struct iop_softc *sc, u_int32_t *mb)
1919 {
1920 u_int32_t mfa;
1921 int s;
1922
1923 #ifdef I2ODEBUG
1924 if ((mb[0] >> 16) > IOP_MAX_MSG_SIZE / 4)
1925 panic("iop_post: frame too large");
1926 #endif
1927
1928 s = splbio();
1929
1930 /* Allocate a slot with the IOP. */
1931 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
1932 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
1933 splx(s);
1934 printf("%s: mfa not forthcoming\n",
1935 sc->sc_dv.dv_xname);
1936 return (EAGAIN);
1937 }
1938
1939 /* Perform reply buffer DMA synchronisation. */
1940 if (sc->sc_curib++ == 0)
1941 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
1942 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1943
1944 /* Copy out the message frame. */
1945 bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
1946 bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
1947 BUS_SPACE_BARRIER_WRITE);
1948
1949 /* Post the MFA back to the IOP. */
1950 iop_outl(sc, IOP_REG_IFIFO, mfa);
1951
1952 splx(s);
1953 return (0);
1954 }
1955
1956 /*
1957 * Post a message to the IOP and deal with completion.
1958 */
1959 int
1960 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
1961 {
1962 u_int32_t *mb;
1963 int rv, s;
1964
1965 mb = xmb;
1966
1967 /* Terminate the scatter/gather list chain. */
1968 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1969 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
1970
1971 if ((rv = iop_post(sc, mb)) != 0)
1972 return (rv);
1973
1974 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
1975 if ((im->im_flags & IM_POLL) != 0)
1976 iop_msg_poll(sc, im, timo);
1977 else
1978 iop_msg_wait(sc, im, timo);
1979
1980 s = splbio();
1981 if ((im->im_flags & IM_REPLIED) != 0) {
1982 if ((im->im_flags & IM_NOSTATUS) != 0)
1983 rv = 0;
1984 else if ((im->im_flags & IM_FAIL) != 0)
1985 rv = ENXIO;
1986 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
1987 rv = EIO;
1988 else
1989 rv = 0;
1990 } else
1991 rv = EBUSY;
1992 splx(s);
1993 } else
1994 rv = 0;
1995
1996 return (rv);
1997 }
1998
1999 /*
2000 * Spin until the specified message is replied to.
2001 */
2002 static void
2003 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2004 {
2005 u_int32_t rmfa;
2006 int s, status;
2007
2008 s = splbio();
2009
2010 /* Wait for completion. */
2011 for (timo *= 10; timo != 0; timo--) {
2012 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2013 /* Double read to account for IOP bug. */
2014 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2015 if (rmfa == IOP_MFA_EMPTY)
2016 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2017 if (rmfa != IOP_MFA_EMPTY) {
2018 status = iop_handle_reply(sc, rmfa);
2019
2020 /*
2021 * Return the reply frame to the IOP's
2022 * outbound FIFO.
2023 */
2024 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2025 }
2026 }
2027 if ((im->im_flags & IM_REPLIED) != 0)
2028 break;
2029 DELAY(100);
2030 }
2031
2032 if (timo == 0) {
2033 #ifdef I2ODEBUG
2034 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2035 if (iop_status_get(sc, 1) != 0)
2036 printf("iop_msg_poll: unable to retrieve status\n");
2037 else
2038 printf("iop_msg_poll: IOP state = %d\n",
2039 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2040 #endif
2041 }
2042
2043 splx(s);
2044 }
2045
2046 /*
2047 * Sleep until the specified message is replied to.
2048 */
2049 static void
2050 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2051 {
2052 int s, rv;
2053
2054 s = splbio();
2055 if ((im->im_flags & IM_REPLIED) != 0) {
2056 splx(s);
2057 return;
2058 }
2059 rv = tsleep(im, PRIBIO, "iopmsg", timo * hz / 1000);
2060 splx(s);
2061
2062 #ifdef I2ODEBUG
2063 if (rv != 0) {
2064 printf("iop_msg_wait: tsleep() == %d\n", rv);
2065 if (iop_status_get(sc, 0) != 0)
2066 printf("iop_msg_wait: unable to retrieve status\n");
2067 else
2068 printf("iop_msg_wait: IOP state = %d\n",
2069 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2070 }
2071 #endif
2072 }
2073
2074 /*
2075 * Release an unused message frame back to the IOP's inbound fifo.
2076 */
2077 static void
2078 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2079 {
2080
2081 /* Use the frame to issue a no-op. */
2082 iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2083 iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2084 iop_outl(sc, mfa + 8, 0);
2085 iop_outl(sc, mfa + 12, 0);
2086
2087 iop_outl(sc, IOP_REG_IFIFO, mfa);
2088 }
2089
2090 #ifdef I2ODEBUG
2091 /*
2092 * Dump a reply frame header.
2093 */
2094 static void
2095 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2096 {
2097 u_int function, detail;
2098 #ifdef I2OVERBOSE
2099 const char *statusstr;
2100 #endif
2101
2102 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2103 detail = le16toh(rb->detail);
2104
2105 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2106
2107 #ifdef I2OVERBOSE
2108 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2109 statusstr = iop_status[rb->reqstatus];
2110 else
2111 statusstr = "undefined error code";
2112
2113 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2114 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2115 #else
2116 printf("%s: function=0x%02x status=0x%02x\n",
2117 sc->sc_dv.dv_xname, function, rb->reqstatus);
2118 #endif
2119 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2120 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2121 le32toh(rb->msgtctx));
2122 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2123 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2124 (le32toh(rb->msgflags) >> 8) & 0xff);
2125 }
2126 #endif
2127
2128 /*
2129 * Dump a transport failure reply.
2130 */
2131 static void
2132 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2133 {
2134
2135 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2136
2137 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2138 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2139 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2140 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2141 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2142 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2143 }
2144
2145 /*
2146 * Translate an I2O ASCII field into a C string.
2147 */
2148 void
2149 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2150 {
2151 int hc, lc, i, nit;
2152
2153 dlen--;
2154 lc = 0;
2155 hc = 0;
2156 i = 0;
2157
2158 /*
2159 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2160 * spec has nothing to say about it. Since AMI fields are usually
2161 * filled with junk after the terminator, ...
2162 */
2163 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2164
2165 while (slen-- != 0 && dlen-- != 0) {
2166 if (nit && *src == '\0')
2167 break;
2168 else if (*src <= 0x20 || *src >= 0x7f) {
2169 if (hc)
2170 dst[i++] = ' ';
2171 } else {
2172 hc = 1;
2173 dst[i++] = *src;
2174 lc = i;
2175 }
2176 src++;
2177 }
2178
2179 dst[lc] = '\0';
2180 }
2181
2182 /*
2183 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2184 */
2185 int
2186 iop_print_ident(struct iop_softc *sc, int tid)
2187 {
2188 struct {
2189 struct i2o_param_op_results pr;
2190 struct i2o_param_read_results prr;
2191 struct i2o_param_device_identity di;
2192 } __attribute__ ((__packed__)) p;
2193 char buf[32];
2194 int rv;
2195
2196 rv = iop_param_op(sc, tid, NULL, 0, I2O_PARAM_DEVICE_IDENTITY, &p,
2197 sizeof(p));
2198 if (rv != 0)
2199 return (rv);
2200
2201 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2202 sizeof(buf));
2203 printf(" <%s, ", buf);
2204 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2205 sizeof(buf));
2206 printf("%s, ", buf);
2207 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2208 printf("%s>", buf);
2209
2210 return (0);
2211 }
2212
2213 /*
2214 * Claim or unclaim the specified TID.
2215 */
2216 int
2217 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2218 int flags)
2219 {
2220 struct iop_msg *im;
2221 struct i2o_util_claim mf;
2222 int rv, func;
2223
2224 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2225 im = iop_msg_alloc(sc, IM_WAIT);
2226
2227 /* We can use the same structure, as they're identical. */
2228 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2229 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2230 mf.msgictx = ii->ii_ictx;
2231 mf.msgtctx = im->im_tctx;
2232 mf.flags = flags;
2233
2234 rv = iop_msg_post(sc, im, &mf, 5000);
2235 iop_msg_free(sc, im);
2236 return (rv);
2237 }
2238
2239 /*
2240 * Perform an abort.
2241 */
2242 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2243 int tctxabort, int flags)
2244 {
2245 struct iop_msg *im;
2246 struct i2o_util_abort mf;
2247 int rv;
2248
2249 im = iop_msg_alloc(sc, IM_WAIT);
2250
2251 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2252 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2253 mf.msgictx = ii->ii_ictx;
2254 mf.msgtctx = im->im_tctx;
2255 mf.flags = (func << 24) | flags;
2256 mf.tctxabort = tctxabort;
2257
2258 rv = iop_msg_post(sc, im, &mf, 5000);
2259 iop_msg_free(sc, im);
2260 return (rv);
2261 }
2262
2263 /*
2264 * Enable or disable reception of events for the specified device.
2265 */
2266 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2267 {
2268 struct i2o_util_event_register mf;
2269
2270 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2271 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2272 mf.msgictx = ii->ii_ictx;
2273 mf.msgtctx = 0;
2274 mf.eventmask = mask;
2275
2276 /* This message is replied to only when events are signalled. */
2277 return (iop_post(sc, (u_int32_t *)&mf));
2278 }
2279
2280 int
2281 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2282 {
2283 struct iop_softc *sc;
2284
2285 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2286 return (ENXIO);
2287 if ((sc->sc_flags & IOP_ONLINE) == 0)
2288 return (ENXIO);
2289 if ((sc->sc_flags & IOP_OPEN) != 0)
2290 return (EBUSY);
2291 sc->sc_flags |= IOP_OPEN;
2292
2293 return (0);
2294 }
2295
2296 int
2297 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2298 {
2299 struct iop_softc *sc;
2300
2301 sc = device_lookup(&iop_cd, minor(dev));
2302 sc->sc_flags &= ~IOP_OPEN;
2303
2304 return (0);
2305 }
2306
2307 int
2308 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2309 {
2310 struct iop_softc *sc;
2311 struct iovec *iov;
2312 int rv, i;
2313
2314 if (securelevel >= 2)
2315 return (EPERM);
2316
2317 sc = device_lookup(&iop_cd, minor(dev));
2318
2319 switch (cmd) {
2320 case IOPIOCPT:
2321 return (iop_passthrough(sc, (struct ioppt *)data, p));
2322
2323 case IOPIOCGSTATUS:
2324 iov = (struct iovec *)data;
2325 i = sizeof(struct i2o_status);
2326 if (i > iov->iov_len)
2327 i = iov->iov_len;
2328 else
2329 iov->iov_len = i;
2330 if ((rv = iop_status_get(sc, 0)) == 0)
2331 rv = copyout(&sc->sc_status, iov->iov_base, i);
2332 return (rv);
2333
2334 case IOPIOCGLCT:
2335 case IOPIOCGTIDMAP:
2336 case IOPIOCRECONFIG:
2337 break;
2338
2339 default:
2340 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2341 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2342 #endif
2343 return (ENOTTY);
2344 }
2345
2346 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2347 return (rv);
2348
2349 switch (cmd) {
2350 case IOPIOCGLCT:
2351 iov = (struct iovec *)data;
2352 i = le16toh(sc->sc_lct->tablesize) << 2;
2353 if (i > iov->iov_len)
2354 i = iov->iov_len;
2355 else
2356 iov->iov_len = i;
2357 rv = copyout(sc->sc_lct, iov->iov_base, i);
2358 break;
2359
2360 case IOPIOCRECONFIG:
2361 rv = iop_reconfigure(sc, 0);
2362 break;
2363
2364 case IOPIOCGTIDMAP:
2365 iov = (struct iovec *)data;
2366 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2367 if (i > iov->iov_len)
2368 i = iov->iov_len;
2369 else
2370 iov->iov_len = i;
2371 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2372 break;
2373 }
2374
2375 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2376 return (rv);
2377 }
2378
2379 static int
2380 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2381 {
2382 struct iop_msg *im;
2383 struct i2o_msg *mf;
2384 struct ioppt_buf *ptb;
2385 int rv, i, mapped;
2386
2387 mf = NULL;
2388 im = NULL;
2389 mapped = 1;
2390
2391 if (pt->pt_msglen > IOP_MAX_MSG_SIZE ||
2392 pt->pt_msglen > (le16toh(sc->sc_status.inboundmframesize) << 2) ||
2393 pt->pt_msglen < sizeof(struct i2o_msg) ||
2394 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2395 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2396 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2397 return (EINVAL);
2398
2399 for (i = 0; i < pt->pt_nbufs; i++)
2400 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2401 rv = ENOMEM;
2402 goto bad;
2403 }
2404
2405 mf = malloc(IOP_MAX_MSG_SIZE, M_DEVBUF, M_WAITOK);
2406 if (mf == NULL)
2407 return (ENOMEM);
2408
2409 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2410 goto bad;
2411
2412 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2413 im->im_rb = (struct i2o_reply *)mf;
2414 mf->msgictx = IOP_ICTX;
2415 mf->msgtctx = im->im_tctx;
2416
2417 for (i = 0; i < pt->pt_nbufs; i++) {
2418 ptb = &pt->pt_bufs[i];
2419 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2420 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2421 if (rv != 0)
2422 goto bad;
2423 mapped = 1;
2424 }
2425
2426 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2427 goto bad;
2428
2429 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2430 if (i > IOP_MAX_MSG_SIZE)
2431 i = IOP_MAX_MSG_SIZE;
2432 if (i > pt->pt_replylen)
2433 i = pt->pt_replylen;
2434 rv = copyout(im->im_rb, pt->pt_reply, i);
2435
2436 bad:
2437 if (mapped != 0)
2438 iop_msg_unmap(sc, im);
2439 if (im != NULL)
2440 iop_msg_free(sc, im);
2441 if (mf != NULL)
2442 free(mf, M_DEVBUF);
2443 return (rv);
2444 }
2445