iop.c revision 1.49 1 /* $NetBSD: iop.c,v 1.49 2005/08/25 18:35:39 drochner Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.49 2005/08/25 18:35:39 drochner Exp $");
45
46 #include "opt_i2o.h"
47 #include "iop.h"
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/device.h>
53 #include <sys/queue.h>
54 #include <sys/proc.h>
55 #include <sys/malloc.h>
56 #include <sys/ioctl.h>
57 #include <sys/endian.h>
58 #include <sys/conf.h>
59 #include <sys/kthread.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #include "locators.h"
71
72 #define POLL(ms, cond) \
73 do { \
74 int xi; \
75 for (xi = (ms) * 10; xi; xi--) { \
76 if (cond) \
77 break; \
78 DELAY(100); \
79 } \
80 } while (/* CONSTCOND */0);
81
82 #ifdef I2ODEBUG
83 #define DPRINTF(x) printf x
84 #else
85 #define DPRINTF(x)
86 #endif
87
88 #ifdef I2OVERBOSE
89 #define IFVERBOSE(x) x
90 #define COMMENT(x) NULL
91 #else
92 #define IFVERBOSE(x)
93 #define COMMENT(x)
94 #endif
95
96 #define IOP_ICTXHASH_NBUCKETS 16
97 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
98
99 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
100
101 #define IOP_TCTX_SHIFT 12
102 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
103
104 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
105 static u_long iop_ictxhash;
106 static void *iop_sdh;
107 static struct i2o_systab *iop_systab;
108 static int iop_systab_size;
109
110 extern struct cfdriver iop_cd;
111
112 dev_type_open(iopopen);
113 dev_type_close(iopclose);
114 dev_type_ioctl(iopioctl);
115
116 const struct cdevsw iop_cdevsw = {
117 iopopen, iopclose, noread, nowrite, iopioctl,
118 nostop, notty, nopoll, nommap, nokqfilter,
119 };
120
121 #define IC_CONFIGURE 0x01
122 #define IC_PRIORITY 0x02
123
124 struct iop_class {
125 u_short ic_class;
126 u_short ic_flags;
127 #ifdef I2OVERBOSE
128 const char *ic_caption;
129 #endif
130 } static const iop_class[] = {
131 {
132 I2O_CLASS_EXECUTIVE,
133 0,
134 IFVERBOSE("executive")
135 },
136 {
137 I2O_CLASS_DDM,
138 0,
139 COMMENT("device driver module")
140 },
141 {
142 I2O_CLASS_RANDOM_BLOCK_STORAGE,
143 IC_CONFIGURE | IC_PRIORITY,
144 IFVERBOSE("random block storage")
145 },
146 {
147 I2O_CLASS_SEQUENTIAL_STORAGE,
148 IC_CONFIGURE | IC_PRIORITY,
149 IFVERBOSE("sequential storage")
150 },
151 {
152 I2O_CLASS_LAN,
153 IC_CONFIGURE | IC_PRIORITY,
154 IFVERBOSE("LAN port")
155 },
156 {
157 I2O_CLASS_WAN,
158 IC_CONFIGURE | IC_PRIORITY,
159 IFVERBOSE("WAN port")
160 },
161 {
162 I2O_CLASS_FIBRE_CHANNEL_PORT,
163 IC_CONFIGURE,
164 IFVERBOSE("fibrechannel port")
165 },
166 {
167 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
168 0,
169 COMMENT("fibrechannel peripheral")
170 },
171 {
172 I2O_CLASS_SCSI_PERIPHERAL,
173 0,
174 COMMENT("SCSI peripheral")
175 },
176 {
177 I2O_CLASS_ATE_PORT,
178 IC_CONFIGURE,
179 IFVERBOSE("ATE port")
180 },
181 {
182 I2O_CLASS_ATE_PERIPHERAL,
183 0,
184 COMMENT("ATE peripheral")
185 },
186 {
187 I2O_CLASS_FLOPPY_CONTROLLER,
188 IC_CONFIGURE,
189 IFVERBOSE("floppy controller")
190 },
191 {
192 I2O_CLASS_FLOPPY_DEVICE,
193 0,
194 COMMENT("floppy device")
195 },
196 {
197 I2O_CLASS_BUS_ADAPTER_PORT,
198 IC_CONFIGURE,
199 IFVERBOSE("bus adapter port" )
200 },
201 };
202
203 #if defined(I2ODEBUG) && defined(I2OVERBOSE)
204 static const char * const iop_status[] = {
205 "success",
206 "abort (dirty)",
207 "abort (no data transfer)",
208 "abort (partial transfer)",
209 "error (dirty)",
210 "error (no data transfer)",
211 "error (partial transfer)",
212 "undefined error code",
213 "process abort (dirty)",
214 "process abort (no data transfer)",
215 "process abort (partial transfer)",
216 "transaction error",
217 };
218 #endif
219
220 static inline u_int32_t iop_inl(struct iop_softc *, int);
221 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
222
223 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
224 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
225
226 static void iop_config_interrupts(struct device *);
227 static void iop_configure_devices(struct iop_softc *, int, int);
228 static void iop_devinfo(int, char *, size_t);
229 static int iop_print(void *, const char *);
230 static void iop_shutdown(void *);
231 static int iop_submatch(struct device *, struct cfdata *,
232 const locdesc_t *, void *);
233
234 static void iop_adjqparam(struct iop_softc *, int);
235 static void iop_create_reconf_thread(void *);
236 static int iop_handle_reply(struct iop_softc *, u_int32_t);
237 static int iop_hrt_get(struct iop_softc *);
238 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
239 static void iop_intr_event(struct device *, struct iop_msg *, void *);
240 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
241 u_int32_t);
242 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
243 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
244 static int iop_ofifo_init(struct iop_softc *);
245 static int iop_passthrough(struct iop_softc *, struct ioppt *,
246 struct proc *);
247 static void iop_reconf_thread(void *);
248 static void iop_release_mfa(struct iop_softc *, u_int32_t);
249 static int iop_reset(struct iop_softc *);
250 static int iop_sys_enable(struct iop_softc *);
251 static int iop_systab_set(struct iop_softc *);
252 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
253
254 #ifdef I2ODEBUG
255 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
256 #endif
257
258 static inline u_int32_t
259 iop_inl(struct iop_softc *sc, int off)
260 {
261
262 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
263 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
264 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
265 }
266
267 static inline void
268 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
269 {
270
271 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
272 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
273 BUS_SPACE_BARRIER_WRITE);
274 }
275
276 static inline u_int32_t
277 iop_inl_msg(struct iop_softc *sc, int off)
278 {
279
280 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
281 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
282 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
283 }
284
285 static inline void
286 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
287 {
288
289 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
290 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
291 BUS_SPACE_BARRIER_WRITE);
292 }
293
294 /*
295 * Initialise the IOP and our interface.
296 */
297 void
298 iop_init(struct iop_softc *sc, const char *intrstr)
299 {
300 struct iop_msg *im;
301 int rv, i, j, state, nsegs;
302 u_int32_t mask;
303 char ident[64];
304
305 state = 0;
306
307 printf("I2O adapter");
308
309 if (iop_ictxhashtbl == NULL)
310 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
311 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
312
313 /* Disable interrupts at the IOP. */
314 mask = iop_inl(sc, IOP_REG_INTR_MASK);
315 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
316
317 /* Allocate a scratch DMA map for small miscellaneous shared data. */
318 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
319 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
320 printf("%s: cannot create scratch dmamap\n",
321 sc->sc_dv.dv_xname);
322 return;
323 }
324
325 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
326 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
327 printf("%s: cannot alloc scratch dmamem\n",
328 sc->sc_dv.dv_xname);
329 goto bail_out;
330 }
331 state++;
332
333 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
334 &sc->sc_scr, 0)) {
335 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
336 goto bail_out;
337 }
338 state++;
339
340 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
341 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
342 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
343 goto bail_out;
344 }
345 state++;
346
347 #ifdef I2ODEBUG
348 /* So that our debug checks don't choke. */
349 sc->sc_framesize = 128;
350 #endif
351
352 /* Reset the adapter and request status. */
353 if ((rv = iop_reset(sc)) != 0) {
354 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
355 goto bail_out;
356 }
357
358 if ((rv = iop_status_get(sc, 1)) != 0) {
359 printf("%s: not responding (get status)\n",
360 sc->sc_dv.dv_xname);
361 goto bail_out;
362 }
363
364 sc->sc_flags |= IOP_HAVESTATUS;
365 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
366 ident, sizeof(ident));
367 printf(" <%s>\n", ident);
368
369 #ifdef I2ODEBUG
370 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
371 le16toh(sc->sc_status.orgid),
372 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
373 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
374 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
375 le32toh(sc->sc_status.desiredprivmemsize),
376 le32toh(sc->sc_status.currentprivmemsize),
377 le32toh(sc->sc_status.currentprivmembase));
378 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
379 le32toh(sc->sc_status.desiredpriviosize),
380 le32toh(sc->sc_status.currentpriviosize),
381 le32toh(sc->sc_status.currentpriviobase));
382 #endif
383
384 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
385 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
386 sc->sc_maxob = IOP_MAX_OUTBOUND;
387 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
388 if (sc->sc_maxib > IOP_MAX_INBOUND)
389 sc->sc_maxib = IOP_MAX_INBOUND;
390 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
391 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
392 sc->sc_framesize = IOP_MAX_MSG_SIZE;
393
394 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
395 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
396 printf("%s: frame size too small (%d)\n",
397 sc->sc_dv.dv_xname, sc->sc_framesize);
398 goto bail_out;
399 }
400 #endif
401
402 /* Allocate message wrappers. */
403 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
404 if (im == NULL) {
405 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
406 goto bail_out;
407 }
408 state++;
409 sc->sc_ims = im;
410 SLIST_INIT(&sc->sc_im_freelist);
411
412 for (i = 0; i < sc->sc_maxib; i++, im++) {
413 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
414 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
415 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
416 &im->im_xfer[0].ix_map);
417 if (rv != 0) {
418 printf("%s: couldn't create dmamap (%d)",
419 sc->sc_dv.dv_xname, rv);
420 goto bail_out3;
421 }
422
423 im->im_tctx = i;
424 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
425 }
426
427 /* Initialise the IOP's outbound FIFO. */
428 if (iop_ofifo_init(sc) != 0) {
429 printf("%s: unable to init oubound FIFO\n",
430 sc->sc_dv.dv_xname);
431 goto bail_out3;
432 }
433
434 /*
435 * Defer further configuration until (a) interrupts are working and
436 * (b) we have enough information to build the system table.
437 */
438 config_interrupts((struct device *)sc, iop_config_interrupts);
439
440 /* Configure shutdown hook before we start any device activity. */
441 if (iop_sdh == NULL)
442 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
443
444 /* Ensure interrupts are enabled at the IOP. */
445 mask = iop_inl(sc, IOP_REG_INTR_MASK);
446 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
447
448 if (intrstr != NULL)
449 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
450 intrstr);
451
452 #ifdef I2ODEBUG
453 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
454 sc->sc_dv.dv_xname, sc->sc_maxib,
455 le32toh(sc->sc_status.maxinboundmframes),
456 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
457 #endif
458
459 lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
460 return;
461
462 bail_out3:
463 if (state > 3) {
464 for (j = 0; j < i; j++)
465 bus_dmamap_destroy(sc->sc_dmat,
466 sc->sc_ims[j].im_xfer[0].ix_map);
467 free(sc->sc_ims, M_DEVBUF);
468 }
469 bail_out:
470 if (state > 2)
471 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
472 if (state > 1)
473 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
474 if (state > 0)
475 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
476 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
477 }
478
479 /*
480 * Perform autoconfiguration tasks.
481 */
482 static void
483 iop_config_interrupts(struct device *self)
484 {
485 struct iop_attach_args ia;
486 struct iop_softc *sc, *iop;
487 struct i2o_systab_entry *ste;
488 int rv, i, niop;
489 int locs[IOPCF_NLOCS];
490
491 sc = (struct iop_softc *)self;
492 LIST_INIT(&sc->sc_iilist);
493
494 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
495
496 if (iop_hrt_get(sc) != 0) {
497 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
498 return;
499 }
500
501 /*
502 * Build the system table.
503 */
504 if (iop_systab == NULL) {
505 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
506 if ((iop = device_lookup(&iop_cd, i)) == NULL)
507 continue;
508 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
509 continue;
510 if (iop_status_get(iop, 1) != 0) {
511 printf("%s: unable to retrieve status\n",
512 sc->sc_dv.dv_xname);
513 iop->sc_flags &= ~IOP_HAVESTATUS;
514 continue;
515 }
516 niop++;
517 }
518 if (niop == 0)
519 return;
520
521 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
522 sizeof(struct i2o_systab);
523 iop_systab_size = i;
524 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
525
526 iop_systab->numentries = niop;
527 iop_systab->version = I2O_VERSION_11;
528
529 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
530 if ((iop = device_lookup(&iop_cd, i)) == NULL)
531 continue;
532 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
533 continue;
534
535 ste->orgid = iop->sc_status.orgid;
536 ste->iopid = iop->sc_dv.dv_unit + 2;
537 ste->segnumber =
538 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
539 ste->iopcaps = iop->sc_status.iopcaps;
540 ste->inboundmsgframesize =
541 iop->sc_status.inboundmframesize;
542 ste->inboundmsgportaddresslow =
543 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
544 ste++;
545 }
546 }
547
548 /*
549 * Post the system table to the IOP and bring it to the OPERATIONAL
550 * state.
551 */
552 if (iop_systab_set(sc) != 0) {
553 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
554 return;
555 }
556 if (iop_sys_enable(sc) != 0) {
557 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
558 return;
559 }
560
561 /*
562 * Set up an event handler for this IOP.
563 */
564 sc->sc_eventii.ii_dv = self;
565 sc->sc_eventii.ii_intr = iop_intr_event;
566 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
567 sc->sc_eventii.ii_tid = I2O_TID_IOP;
568 iop_initiator_register(sc, &sc->sc_eventii);
569
570 rv = iop_util_eventreg(sc, &sc->sc_eventii,
571 I2O_EVENT_EXEC_RESOURCE_LIMITS |
572 I2O_EVENT_EXEC_CONNECTION_FAIL |
573 I2O_EVENT_EXEC_ADAPTER_FAULT |
574 I2O_EVENT_EXEC_POWER_FAIL |
575 I2O_EVENT_EXEC_RESET_PENDING |
576 I2O_EVENT_EXEC_RESET_IMMINENT |
577 I2O_EVENT_EXEC_HARDWARE_FAIL |
578 I2O_EVENT_EXEC_XCT_CHANGE |
579 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
580 I2O_EVENT_GEN_DEVICE_RESET |
581 I2O_EVENT_GEN_STATE_CHANGE |
582 I2O_EVENT_GEN_GENERAL_WARNING);
583 if (rv != 0) {
584 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
585 return;
586 }
587
588 /*
589 * Attempt to match and attach a product-specific extension.
590 */
591 ia.ia_class = I2O_CLASS_ANY;
592 ia.ia_tid = I2O_TID_IOP;
593 locs[IOPCF_TID] = I2O_TID_IOP;
594 config_found_sm_loc(self, "iop", locs, &ia, iop_print, iop_submatch);
595
596 /*
597 * Start device configuration.
598 */
599 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
600 if ((rv = iop_reconfigure(sc, 0)) == -1) {
601 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
602 return;
603 }
604 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
605
606 kthread_create(iop_create_reconf_thread, sc);
607 }
608
609 /*
610 * Create the reconfiguration thread. Called after the standard kernel
611 * threads have been created.
612 */
613 static void
614 iop_create_reconf_thread(void *cookie)
615 {
616 struct iop_softc *sc;
617 int rv;
618
619 sc = cookie;
620 sc->sc_flags |= IOP_ONLINE;
621
622 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
623 "%s", sc->sc_dv.dv_xname);
624 if (rv != 0) {
625 printf("%s: unable to create reconfiguration thread (%d)",
626 sc->sc_dv.dv_xname, rv);
627 return;
628 }
629 }
630
631 /*
632 * Reconfiguration thread; listens for LCT change notification, and
633 * initiates re-configuration if received.
634 */
635 static void
636 iop_reconf_thread(void *cookie)
637 {
638 struct iop_softc *sc;
639 struct lwp *l;
640 struct i2o_lct lct;
641 u_int32_t chgind;
642 int rv;
643
644 sc = cookie;
645 chgind = sc->sc_chgind + 1;
646 l = curlwp;
647
648 for (;;) {
649 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
650 sc->sc_dv.dv_xname, chgind));
651
652 PHOLD(l);
653 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
654 PRELE(l);
655
656 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
657 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
658
659 if (rv == 0 &&
660 lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
661 iop_reconfigure(sc, le32toh(lct.changeindicator));
662 chgind = sc->sc_chgind + 1;
663 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
664 }
665
666 tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
667 }
668 }
669
670 /*
671 * Reconfigure: find new and removed devices.
672 */
673 int
674 iop_reconfigure(struct iop_softc *sc, u_int chgind)
675 {
676 struct iop_msg *im;
677 struct i2o_hba_bus_scan mf;
678 struct i2o_lct_entry *le;
679 struct iop_initiator *ii, *nextii;
680 int rv, tid, i;
681
682 /*
683 * If the reconfiguration request isn't the result of LCT change
684 * notification, then be more thorough: ask all bus ports to scan
685 * their busses. Wait up to 5 minutes for each bus port to complete
686 * the request.
687 */
688 if (chgind == 0) {
689 if ((rv = iop_lct_get(sc)) != 0) {
690 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
691 return (rv);
692 }
693
694 le = sc->sc_lct->entry;
695 for (i = 0; i < sc->sc_nlctent; i++, le++) {
696 if ((le16toh(le->classid) & 4095) !=
697 I2O_CLASS_BUS_ADAPTER_PORT)
698 continue;
699 tid = le16toh(le->localtid) & 4095;
700
701 im = iop_msg_alloc(sc, IM_WAIT);
702
703 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
704 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
705 mf.msgictx = IOP_ICTX;
706 mf.msgtctx = im->im_tctx;
707
708 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
709 tid));
710
711 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
712 iop_msg_free(sc, im);
713 #ifdef I2ODEBUG
714 if (rv != 0)
715 printf("%s: bus scan failed\n",
716 sc->sc_dv.dv_xname);
717 #endif
718 }
719 } else if (chgind <= sc->sc_chgind) {
720 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
721 return (0);
722 }
723
724 /* Re-read the LCT and determine if it has changed. */
725 if ((rv = iop_lct_get(sc)) != 0) {
726 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
727 return (rv);
728 }
729 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
730
731 chgind = le32toh(sc->sc_lct->changeindicator);
732 if (chgind == sc->sc_chgind) {
733 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
734 return (0);
735 }
736 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
737 sc->sc_chgind = chgind;
738
739 if (sc->sc_tidmap != NULL)
740 free(sc->sc_tidmap, M_DEVBUF);
741 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
742 M_DEVBUF, M_NOWAIT|M_ZERO);
743
744 /* Allow 1 queued command per device while we're configuring. */
745 iop_adjqparam(sc, 1);
746
747 /*
748 * Match and attach child devices. We configure high-level devices
749 * first so that any claims will propagate throughout the LCT,
750 * hopefully masking off aliased devices as a result.
751 *
752 * Re-reading the LCT at this point is a little dangerous, but we'll
753 * trust the IOP (and the operator) to behave itself...
754 */
755 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
756 IC_CONFIGURE | IC_PRIORITY);
757 if ((rv = iop_lct_get(sc)) != 0)
758 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
759 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
760 IC_CONFIGURE);
761
762 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
763 nextii = LIST_NEXT(ii, ii_list);
764
765 /* Detach devices that were configured, but are now gone. */
766 for (i = 0; i < sc->sc_nlctent; i++)
767 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
768 break;
769 if (i == sc->sc_nlctent ||
770 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
771 config_detach(ii->ii_dv, DETACH_FORCE);
772
773 /*
774 * Tell initiators that existed before the re-configuration
775 * to re-configure.
776 */
777 if (ii->ii_reconfig == NULL)
778 continue;
779 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
780 printf("%s: %s failed reconfigure (%d)\n",
781 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
782 }
783
784 /* Re-adjust queue parameters and return. */
785 if (sc->sc_nii != 0)
786 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
787 / sc->sc_nii);
788
789 return (0);
790 }
791
792 /*
793 * Configure I2O devices into the system.
794 */
795 static void
796 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
797 {
798 struct iop_attach_args ia;
799 struct iop_initiator *ii;
800 const struct i2o_lct_entry *le;
801 struct device *dv;
802 int i, j, nent;
803 u_int usertid;
804 int locs[IOPCF_NLOCS];
805
806 nent = sc->sc_nlctent;
807 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
808 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
809
810 /* Ignore the device if it's in use. */
811 usertid = le32toh(le->usertid) & 4095;
812 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
813 continue;
814
815 ia.ia_class = le16toh(le->classid) & 4095;
816 ia.ia_tid = sc->sc_tidmap[i].it_tid;
817
818 /* Ignore uninteresting devices. */
819 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
820 if (iop_class[j].ic_class == ia.ia_class)
821 break;
822 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
823 (iop_class[j].ic_flags & mask) != maskval)
824 continue;
825
826 /*
827 * Try to configure the device only if it's not already
828 * configured.
829 */
830 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
831 if (ia.ia_tid == ii->ii_tid) {
832 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
833 strcpy(sc->sc_tidmap[i].it_dvname,
834 ii->ii_dv->dv_xname);
835 break;
836 }
837 }
838 if (ii != NULL)
839 continue;
840
841 locs[IOPCF_TID] = ia.ia_tid;
842
843 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
844 iop_print, iop_submatch);
845 if (dv != NULL) {
846 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
847 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
848 }
849 }
850 }
851
852 /*
853 * Adjust queue parameters for all child devices.
854 */
855 static void
856 iop_adjqparam(struct iop_softc *sc, int mpi)
857 {
858 struct iop_initiator *ii;
859
860 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
861 if (ii->ii_adjqparam != NULL)
862 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
863 }
864
865 static void
866 iop_devinfo(int class, char *devinfo, size_t l)
867 {
868 #ifdef I2OVERBOSE
869 int i;
870
871 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
872 if (class == iop_class[i].ic_class)
873 break;
874
875 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
876 snprintf(devinfo, l, "device (class 0x%x)", class);
877 else
878 strlcpy(devinfo, iop_class[i].ic_caption, l);
879 #else
880
881 snprintf(devinfo, l, "device (class 0x%x)", class);
882 #endif
883 }
884
885 static int
886 iop_print(void *aux, const char *pnp)
887 {
888 struct iop_attach_args *ia;
889 char devinfo[256];
890
891 ia = aux;
892
893 if (pnp != NULL) {
894 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
895 aprint_normal("%s at %s", devinfo, pnp);
896 }
897 aprint_normal(" tid %d", ia->ia_tid);
898 return (UNCONF);
899 }
900
901 static int
902 iop_submatch(struct device *parent, struct cfdata *cf,
903 const locdesc_t *locs, void *aux)
904 {
905
906 if (cf->cf_loc[IOPCF_TID] != IOPCF_TID_DEFAULT &&
907 cf->cf_loc[IOPCF_TID] != locs[IOPCF_TID])
908 return (0);
909
910 return (config_match(parent, cf, aux));
911 }
912
913 /*
914 * Shut down all configured IOPs.
915 */
916 static void
917 iop_shutdown(void *junk)
918 {
919 struct iop_softc *sc;
920 int i;
921
922 printf("shutting down iop devices...");
923
924 for (i = 0; i < iop_cd.cd_ndevs; i++) {
925 if ((sc = device_lookup(&iop_cd, i)) == NULL)
926 continue;
927 if ((sc->sc_flags & IOP_ONLINE) == 0)
928 continue;
929
930 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
931 0, 5000);
932
933 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
934 /*
935 * Some AMI firmware revisions will go to sleep and
936 * never come back after this.
937 */
938 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
939 IOP_ICTX, 0, 1000);
940 }
941 }
942
943 /* Wait. Some boards could still be flushing, stupidly enough. */
944 delay(5000*1000);
945 printf(" done\n");
946 }
947
948 /*
949 * Retrieve IOP status.
950 */
951 int
952 iop_status_get(struct iop_softc *sc, int nosleep)
953 {
954 struct i2o_exec_status_get mf;
955 struct i2o_status *st;
956 paddr_t pa;
957 int rv, i;
958
959 pa = sc->sc_scr_seg->ds_addr;
960 st = (struct i2o_status *)sc->sc_scr;
961
962 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
963 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
964 mf.reserved[0] = 0;
965 mf.reserved[1] = 0;
966 mf.reserved[2] = 0;
967 mf.reserved[3] = 0;
968 mf.addrlow = (u_int32_t)pa;
969 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
970 mf.length = sizeof(sc->sc_status);
971
972 memset(st, 0, sizeof(*st));
973 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
974 BUS_DMASYNC_PREREAD);
975
976 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
977 return (rv);
978
979 for (i = 25; i != 0; i--) {
980 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
981 sizeof(*st), BUS_DMASYNC_POSTREAD);
982 if (st->syncbyte == 0xff)
983 break;
984 if (nosleep)
985 DELAY(100*1000);
986 else
987 tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
988 }
989
990 if (st->syncbyte != 0xff) {
991 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
992 rv = EIO;
993 } else {
994 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
995 rv = 0;
996 }
997
998 return (rv);
999 }
1000
1001 /*
1002 * Initialize and populate the IOP's outbound FIFO.
1003 */
1004 static int
1005 iop_ofifo_init(struct iop_softc *sc)
1006 {
1007 bus_addr_t addr;
1008 bus_dma_segment_t seg;
1009 struct i2o_exec_outbound_init *mf;
1010 int i, rseg, rv;
1011 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1012
1013 sw = (u_int32_t *)sc->sc_scr;
1014
1015 mf = (struct i2o_exec_outbound_init *)mb;
1016 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1017 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1018 mf->msgictx = IOP_ICTX;
1019 mf->msgtctx = 0;
1020 mf->pagesize = PAGE_SIZE;
1021 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1022
1023 /*
1024 * The I2O spec says that there are two SGLs: one for the status
1025 * word, and one for a list of discarded MFAs. It continues to say
1026 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1027 * necessary; this isn't the case (and is in fact a bad thing).
1028 */
1029 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1030 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1031 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1032 (u_int32_t)sc->sc_scr_seg->ds_addr;
1033 mb[0] += 2 << 16;
1034
1035 *sw = 0;
1036 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1037 BUS_DMASYNC_PREREAD);
1038
1039 if ((rv = iop_post(sc, mb)) != 0)
1040 return (rv);
1041
1042 POLL(5000,
1043 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1044 BUS_DMASYNC_POSTREAD),
1045 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1046
1047 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1048 printf("%s: outbound FIFO init failed (%d)\n",
1049 sc->sc_dv.dv_xname, le32toh(*sw));
1050 return (EIO);
1051 }
1052
1053 /* Allocate DMA safe memory for the reply frames. */
1054 if (sc->sc_rep_phys == 0) {
1055 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1056
1057 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1058 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1059 if (rv != 0) {
1060 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1061 rv);
1062 return (rv);
1063 }
1064
1065 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1066 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1067 if (rv != 0) {
1068 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1069 return (rv);
1070 }
1071
1072 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1073 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1074 if (rv != 0) {
1075 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1076 rv);
1077 return (rv);
1078 }
1079
1080 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1081 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1082 if (rv != 0) {
1083 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1084 return (rv);
1085 }
1086
1087 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1088 }
1089
1090 /* Populate the outbound FIFO. */
1091 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1092 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1093 addr += sc->sc_framesize;
1094 }
1095
1096 return (0);
1097 }
1098
1099 /*
1100 * Read the specified number of bytes from the IOP's hardware resource table.
1101 */
1102 static int
1103 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1104 {
1105 struct iop_msg *im;
1106 int rv;
1107 struct i2o_exec_hrt_get *mf;
1108 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1109
1110 im = iop_msg_alloc(sc, IM_WAIT);
1111 mf = (struct i2o_exec_hrt_get *)mb;
1112 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1113 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1114 mf->msgictx = IOP_ICTX;
1115 mf->msgtctx = im->im_tctx;
1116
1117 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1118 rv = iop_msg_post(sc, im, mb, 30000);
1119 iop_msg_unmap(sc, im);
1120 iop_msg_free(sc, im);
1121 return (rv);
1122 }
1123
1124 /*
1125 * Read the IOP's hardware resource table.
1126 */
1127 static int
1128 iop_hrt_get(struct iop_softc *sc)
1129 {
1130 struct i2o_hrt hrthdr, *hrt;
1131 int size, rv;
1132
1133 PHOLD(curlwp);
1134 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1135 PRELE(curlwp);
1136 if (rv != 0)
1137 return (rv);
1138
1139 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1140 le16toh(hrthdr.numentries)));
1141
1142 size = sizeof(struct i2o_hrt) +
1143 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1144 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1145
1146 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1147 free(hrt, M_DEVBUF);
1148 return (rv);
1149 }
1150
1151 if (sc->sc_hrt != NULL)
1152 free(sc->sc_hrt, M_DEVBUF);
1153 sc->sc_hrt = hrt;
1154 return (0);
1155 }
1156
1157 /*
1158 * Request the specified number of bytes from the IOP's logical
1159 * configuration table. If a change indicator is specified, this
1160 * is a verbatim notification request, so the caller is prepared
1161 * to wait indefinitely.
1162 */
1163 static int
1164 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1165 u_int32_t chgind)
1166 {
1167 struct iop_msg *im;
1168 struct i2o_exec_lct_notify *mf;
1169 int rv;
1170 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1171
1172 im = iop_msg_alloc(sc, IM_WAIT);
1173 memset(lct, 0, size);
1174
1175 mf = (struct i2o_exec_lct_notify *)mb;
1176 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1177 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1178 mf->msgictx = IOP_ICTX;
1179 mf->msgtctx = im->im_tctx;
1180 mf->classid = I2O_CLASS_ANY;
1181 mf->changeindicator = chgind;
1182
1183 #ifdef I2ODEBUG
1184 printf("iop_lct_get0: reading LCT");
1185 if (chgind != 0)
1186 printf(" (async)");
1187 printf("\n");
1188 #endif
1189
1190 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1191 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1192 iop_msg_unmap(sc, im);
1193 iop_msg_free(sc, im);
1194 return (rv);
1195 }
1196
1197 /*
1198 * Read the IOP's logical configuration table.
1199 */
1200 int
1201 iop_lct_get(struct iop_softc *sc)
1202 {
1203 int esize, size, rv;
1204 struct i2o_lct *lct;
1205
1206 esize = le32toh(sc->sc_status.expectedlctsize);
1207 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1208 if (lct == NULL)
1209 return (ENOMEM);
1210
1211 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1212 free(lct, M_DEVBUF);
1213 return (rv);
1214 }
1215
1216 size = le16toh(lct->tablesize) << 2;
1217 if (esize != size) {
1218 free(lct, M_DEVBUF);
1219 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1220 if (lct == NULL)
1221 return (ENOMEM);
1222
1223 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1224 free(lct, M_DEVBUF);
1225 return (rv);
1226 }
1227 }
1228
1229 /* Swap in the new LCT. */
1230 if (sc->sc_lct != NULL)
1231 free(sc->sc_lct, M_DEVBUF);
1232 sc->sc_lct = lct;
1233 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1234 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1235 sizeof(struct i2o_lct_entry);
1236 return (0);
1237 }
1238
1239 /*
1240 * Post a SYS_ENABLE message to the adapter.
1241 */
1242 int
1243 iop_sys_enable(struct iop_softc *sc)
1244 {
1245 struct iop_msg *im;
1246 struct i2o_msg mf;
1247 int rv;
1248
1249 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1250
1251 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1252 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1253 mf.msgictx = IOP_ICTX;
1254 mf.msgtctx = im->im_tctx;
1255
1256 rv = iop_msg_post(sc, im, &mf, 30000);
1257 if (rv == 0) {
1258 if ((im->im_flags & IM_FAIL) != 0)
1259 rv = ENXIO;
1260 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1261 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1262 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1263 rv = 0;
1264 else
1265 rv = EIO;
1266 }
1267
1268 iop_msg_free(sc, im);
1269 return (rv);
1270 }
1271
1272 /*
1273 * Request the specified parameter group from the target. If an initiator
1274 * is specified (a) don't wait for the operation to complete, but instead
1275 * let the initiator's interrupt handler deal with the reply and (b) place a
1276 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1277 */
1278 int
1279 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1280 int size, struct iop_initiator *ii)
1281 {
1282 struct iop_msg *im;
1283 struct i2o_util_params_op *mf;
1284 int rv;
1285 struct iop_pgop *pgop;
1286 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1287
1288 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1289 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1290 iop_msg_free(sc, im);
1291 return (ENOMEM);
1292 }
1293 im->im_dvcontext = pgop;
1294
1295 mf = (struct i2o_util_params_op *)mb;
1296 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1297 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1298 mf->msgictx = IOP_ICTX;
1299 mf->msgtctx = im->im_tctx;
1300 mf->flags = 0;
1301
1302 pgop->olh.count = htole16(1);
1303 pgop->olh.reserved = htole16(0);
1304 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1305 pgop->oat.fieldcount = htole16(0xffff);
1306 pgop->oat.group = htole16(group);
1307
1308 if (ii == NULL)
1309 PHOLD(curlwp);
1310
1311 memset(buf, 0, size);
1312 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1313 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1314 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1315
1316 if (ii == NULL)
1317 PRELE(curlwp);
1318
1319 /* Detect errors; let partial transfers to count as success. */
1320 if (ii == NULL && rv == 0) {
1321 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1322 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1323 rv = 0;
1324 else
1325 rv = (im->im_reqstatus != 0 ? EIO : 0);
1326
1327 if (rv != 0)
1328 printf("%s: FIELD_GET failed for tid %d group %d\n",
1329 sc->sc_dv.dv_xname, tid, group);
1330 }
1331
1332 if (ii == NULL || rv != 0) {
1333 iop_msg_unmap(sc, im);
1334 iop_msg_free(sc, im);
1335 free(pgop, M_DEVBUF);
1336 }
1337
1338 return (rv);
1339 }
1340
1341 /*
1342 * Set a single field in a scalar parameter group.
1343 */
1344 int
1345 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1346 int size, int field)
1347 {
1348 struct iop_msg *im;
1349 struct i2o_util_params_op *mf;
1350 struct iop_pgop *pgop;
1351 int rv, totsize;
1352 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1353
1354 totsize = sizeof(*pgop) + size;
1355
1356 im = iop_msg_alloc(sc, IM_WAIT);
1357 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1358 iop_msg_free(sc, im);
1359 return (ENOMEM);
1360 }
1361
1362 mf = (struct i2o_util_params_op *)mb;
1363 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1364 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1365 mf->msgictx = IOP_ICTX;
1366 mf->msgtctx = im->im_tctx;
1367 mf->flags = 0;
1368
1369 pgop->olh.count = htole16(1);
1370 pgop->olh.reserved = htole16(0);
1371 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1372 pgop->oat.fieldcount = htole16(1);
1373 pgop->oat.group = htole16(group);
1374 pgop->oat.fields[0] = htole16(field);
1375 memcpy(pgop + 1, buf, size);
1376
1377 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1378 rv = iop_msg_post(sc, im, mb, 30000);
1379 if (rv != 0)
1380 printf("%s: FIELD_SET failed for tid %d group %d\n",
1381 sc->sc_dv.dv_xname, tid, group);
1382
1383 iop_msg_unmap(sc, im);
1384 iop_msg_free(sc, im);
1385 free(pgop, M_DEVBUF);
1386 return (rv);
1387 }
1388
1389 /*
1390 * Delete all rows in a tablular parameter group.
1391 */
1392 int
1393 iop_table_clear(struct iop_softc *sc, int tid, int group)
1394 {
1395 struct iop_msg *im;
1396 struct i2o_util_params_op *mf;
1397 struct iop_pgop pgop;
1398 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1399 int rv;
1400
1401 im = iop_msg_alloc(sc, IM_WAIT);
1402
1403 mf = (struct i2o_util_params_op *)mb;
1404 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1405 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1406 mf->msgictx = IOP_ICTX;
1407 mf->msgtctx = im->im_tctx;
1408 mf->flags = 0;
1409
1410 pgop.olh.count = htole16(1);
1411 pgop.olh.reserved = htole16(0);
1412 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1413 pgop.oat.fieldcount = htole16(0);
1414 pgop.oat.group = htole16(group);
1415 pgop.oat.fields[0] = htole16(0);
1416
1417 PHOLD(curlwp);
1418 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1419 rv = iop_msg_post(sc, im, mb, 30000);
1420 if (rv != 0)
1421 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1422 sc->sc_dv.dv_xname, tid, group);
1423
1424 iop_msg_unmap(sc, im);
1425 PRELE(curlwp);
1426 iop_msg_free(sc, im);
1427 return (rv);
1428 }
1429
1430 /*
1431 * Add a single row to a tabular parameter group. The row can have only one
1432 * field.
1433 */
1434 int
1435 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1436 int size, int row)
1437 {
1438 struct iop_msg *im;
1439 struct i2o_util_params_op *mf;
1440 struct iop_pgop *pgop;
1441 int rv, totsize;
1442 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1443
1444 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1445
1446 im = iop_msg_alloc(sc, IM_WAIT);
1447 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1448 iop_msg_free(sc, im);
1449 return (ENOMEM);
1450 }
1451
1452 mf = (struct i2o_util_params_op *)mb;
1453 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1454 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1455 mf->msgictx = IOP_ICTX;
1456 mf->msgtctx = im->im_tctx;
1457 mf->flags = 0;
1458
1459 pgop->olh.count = htole16(1);
1460 pgop->olh.reserved = htole16(0);
1461 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1462 pgop->oat.fieldcount = htole16(1);
1463 pgop->oat.group = htole16(group);
1464 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1465 pgop->oat.fields[1] = htole16(1); /* RowCount */
1466 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1467 memcpy(&pgop->oat.fields[3], buf, size);
1468
1469 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1470 rv = iop_msg_post(sc, im, mb, 30000);
1471 if (rv != 0)
1472 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1473 sc->sc_dv.dv_xname, tid, group, row);
1474
1475 iop_msg_unmap(sc, im);
1476 iop_msg_free(sc, im);
1477 free(pgop, M_DEVBUF);
1478 return (rv);
1479 }
1480
1481 /*
1482 * Execute a simple command (no parameters).
1483 */
1484 int
1485 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1486 int async, int timo)
1487 {
1488 struct iop_msg *im;
1489 struct i2o_msg mf;
1490 int rv, fl;
1491
1492 fl = (async != 0 ? IM_WAIT : IM_POLL);
1493 im = iop_msg_alloc(sc, fl);
1494
1495 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1496 mf.msgfunc = I2O_MSGFUNC(tid, function);
1497 mf.msgictx = ictx;
1498 mf.msgtctx = im->im_tctx;
1499
1500 rv = iop_msg_post(sc, im, &mf, timo);
1501 iop_msg_free(sc, im);
1502 return (rv);
1503 }
1504
1505 /*
1506 * Post the system table to the IOP.
1507 */
1508 static int
1509 iop_systab_set(struct iop_softc *sc)
1510 {
1511 struct i2o_exec_sys_tab_set *mf;
1512 struct iop_msg *im;
1513 bus_space_handle_t bsh;
1514 bus_addr_t boo;
1515 u_int32_t mema[2], ioa[2];
1516 int rv;
1517 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1518
1519 im = iop_msg_alloc(sc, IM_WAIT);
1520
1521 mf = (struct i2o_exec_sys_tab_set *)mb;
1522 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1523 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1524 mf->msgictx = IOP_ICTX;
1525 mf->msgtctx = im->im_tctx;
1526 mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1527 mf->segnumber = 0;
1528
1529 mema[1] = sc->sc_status.desiredprivmemsize;
1530 ioa[1] = sc->sc_status.desiredpriviosize;
1531
1532 if (mema[1] != 0) {
1533 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1534 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1535 mema[0] = htole32(boo);
1536 if (rv != 0) {
1537 printf("%s: can't alloc priv mem space, err = %d\n",
1538 sc->sc_dv.dv_xname, rv);
1539 mema[0] = 0;
1540 mema[1] = 0;
1541 }
1542 }
1543
1544 if (ioa[1] != 0) {
1545 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1546 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1547 ioa[0] = htole32(boo);
1548 if (rv != 0) {
1549 printf("%s: can't alloc priv i/o space, err = %d\n",
1550 sc->sc_dv.dv_xname, rv);
1551 ioa[0] = 0;
1552 ioa[1] = 0;
1553 }
1554 }
1555
1556 PHOLD(curlwp);
1557 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1558 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1559 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1560 rv = iop_msg_post(sc, im, mb, 5000);
1561 iop_msg_unmap(sc, im);
1562 iop_msg_free(sc, im);
1563 PRELE(curlwp);
1564 return (rv);
1565 }
1566
1567 /*
1568 * Reset the IOP. Must be called with interrupts disabled.
1569 */
1570 static int
1571 iop_reset(struct iop_softc *sc)
1572 {
1573 u_int32_t mfa, *sw;
1574 struct i2o_exec_iop_reset mf;
1575 int rv;
1576 paddr_t pa;
1577
1578 sw = (u_int32_t *)sc->sc_scr;
1579 pa = sc->sc_scr_seg->ds_addr;
1580
1581 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1582 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1583 mf.reserved[0] = 0;
1584 mf.reserved[1] = 0;
1585 mf.reserved[2] = 0;
1586 mf.reserved[3] = 0;
1587 mf.statuslow = (u_int32_t)pa;
1588 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1589
1590 *sw = htole32(0);
1591 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1592 BUS_DMASYNC_PREREAD);
1593
1594 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1595 return (rv);
1596
1597 POLL(2500,
1598 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1599 BUS_DMASYNC_POSTREAD), *sw != 0));
1600 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1601 printf("%s: reset rejected, status 0x%x\n",
1602 sc->sc_dv.dv_xname, le32toh(*sw));
1603 return (EIO);
1604 }
1605
1606 /*
1607 * IOP is now in the INIT state. Wait no more than 10 seconds for
1608 * the inbound queue to become responsive.
1609 */
1610 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1611 if (mfa == IOP_MFA_EMPTY) {
1612 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1613 return (EIO);
1614 }
1615
1616 iop_release_mfa(sc, mfa);
1617 return (0);
1618 }
1619
1620 /*
1621 * Register a new initiator. Must be called with the configuration lock
1622 * held.
1623 */
1624 void
1625 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1626 {
1627 static int ictxgen;
1628 int s;
1629
1630 /* 0 is reserved (by us) for system messages. */
1631 ii->ii_ictx = ++ictxgen;
1632
1633 /*
1634 * `Utility initiators' don't make it onto the per-IOP initiator list
1635 * (which is used only for configuration), but do get one slot on
1636 * the inbound queue.
1637 */
1638 if ((ii->ii_flags & II_UTILITY) == 0) {
1639 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1640 sc->sc_nii++;
1641 } else
1642 sc->sc_nuii++;
1643
1644 s = splbio();
1645 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1646 splx(s);
1647 }
1648
1649 /*
1650 * Unregister an initiator. Must be called with the configuration lock
1651 * held.
1652 */
1653 void
1654 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1655 {
1656 int s;
1657
1658 if ((ii->ii_flags & II_UTILITY) == 0) {
1659 LIST_REMOVE(ii, ii_list);
1660 sc->sc_nii--;
1661 } else
1662 sc->sc_nuii--;
1663
1664 s = splbio();
1665 LIST_REMOVE(ii, ii_hash);
1666 splx(s);
1667 }
1668
1669 /*
1670 * Handle a reply frame from the IOP.
1671 */
1672 static int
1673 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1674 {
1675 struct iop_msg *im;
1676 struct i2o_reply *rb;
1677 struct i2o_fault_notify *fn;
1678 struct iop_initiator *ii;
1679 u_int off, ictx, tctx, status, size;
1680
1681 off = (int)(rmfa - sc->sc_rep_phys);
1682 rb = (struct i2o_reply *)(sc->sc_rep + off);
1683
1684 /* Perform reply queue DMA synchronisation. */
1685 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1686 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1687 if (--sc->sc_curib != 0)
1688 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1689 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1690
1691 #ifdef I2ODEBUG
1692 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1693 panic("iop_handle_reply: 64-bit reply");
1694 #endif
1695 /*
1696 * Find the initiator.
1697 */
1698 ictx = le32toh(rb->msgictx);
1699 if (ictx == IOP_ICTX)
1700 ii = NULL;
1701 else {
1702 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1703 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1704 if (ii->ii_ictx == ictx)
1705 break;
1706 if (ii == NULL) {
1707 #ifdef I2ODEBUG
1708 iop_reply_print(sc, rb);
1709 #endif
1710 printf("%s: WARNING: bad ictx returned (%x)\n",
1711 sc->sc_dv.dv_xname, ictx);
1712 return (-1);
1713 }
1714 }
1715
1716 /*
1717 * If we received a transport failure notice, we've got to dig the
1718 * transaction context (if any) out of the original message frame,
1719 * and then release the original MFA back to the inbound FIFO.
1720 */
1721 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1722 status = I2O_STATUS_SUCCESS;
1723
1724 fn = (struct i2o_fault_notify *)rb;
1725 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1726 iop_release_mfa(sc, fn->lowmfa);
1727 iop_tfn_print(sc, fn);
1728 } else {
1729 status = rb->reqstatus;
1730 tctx = le32toh(rb->msgtctx);
1731 }
1732
1733 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1734 /*
1735 * This initiator tracks state using message wrappers.
1736 *
1737 * Find the originating message wrapper, and if requested
1738 * notify the initiator.
1739 */
1740 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1741 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1742 (im->im_flags & IM_ALLOCED) == 0 ||
1743 tctx != im->im_tctx) {
1744 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1745 sc->sc_dv.dv_xname, tctx, im);
1746 if (im != NULL)
1747 printf("%s: flags=0x%08x tctx=0x%08x\n",
1748 sc->sc_dv.dv_xname, im->im_flags,
1749 im->im_tctx);
1750 #ifdef I2ODEBUG
1751 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1752 iop_reply_print(sc, rb);
1753 #endif
1754 return (-1);
1755 }
1756
1757 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1758 im->im_flags |= IM_FAIL;
1759
1760 #ifdef I2ODEBUG
1761 if ((im->im_flags & IM_REPLIED) != 0)
1762 panic("%s: dup reply", sc->sc_dv.dv_xname);
1763 #endif
1764 im->im_flags |= IM_REPLIED;
1765
1766 #ifdef I2ODEBUG
1767 if (status != I2O_STATUS_SUCCESS)
1768 iop_reply_print(sc, rb);
1769 #endif
1770 im->im_reqstatus = status;
1771 im->im_detstatus = le16toh(rb->detail);
1772
1773 /* Copy the reply frame, if requested. */
1774 if (im->im_rb != NULL) {
1775 size = (le32toh(rb->msgflags) >> 14) & ~3;
1776 #ifdef I2ODEBUG
1777 if (size > sc->sc_framesize)
1778 panic("iop_handle_reply: reply too large");
1779 #endif
1780 memcpy(im->im_rb, rb, size);
1781 }
1782
1783 /* Notify the initiator. */
1784 if ((im->im_flags & IM_WAIT) != 0)
1785 wakeup(im);
1786 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1787 (*ii->ii_intr)(ii->ii_dv, im, rb);
1788 } else {
1789 /*
1790 * This initiator discards message wrappers.
1791 *
1792 * Simply pass the reply frame to the initiator.
1793 */
1794 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1795 }
1796
1797 return (status);
1798 }
1799
1800 /*
1801 * Handle an interrupt from the IOP.
1802 */
1803 int
1804 iop_intr(void *arg)
1805 {
1806 struct iop_softc *sc;
1807 u_int32_t rmfa;
1808
1809 sc = arg;
1810
1811 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1812 return (0);
1813
1814 for (;;) {
1815 /* Double read to account for IOP bug. */
1816 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1817 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1818 if (rmfa == IOP_MFA_EMPTY)
1819 break;
1820 }
1821 iop_handle_reply(sc, rmfa);
1822 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1823 }
1824
1825 return (1);
1826 }
1827
1828 /*
1829 * Handle an event signalled by the executive.
1830 */
1831 static void
1832 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1833 {
1834 struct i2o_util_event_register_reply *rb;
1835 u_int event;
1836
1837 rb = reply;
1838
1839 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1840 return;
1841
1842 event = le32toh(rb->event);
1843 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1844 }
1845
1846 /*
1847 * Allocate a message wrapper.
1848 */
1849 struct iop_msg *
1850 iop_msg_alloc(struct iop_softc *sc, int flags)
1851 {
1852 struct iop_msg *im;
1853 static u_int tctxgen;
1854 int s, i;
1855
1856 #ifdef I2ODEBUG
1857 if ((flags & IM_SYSMASK) != 0)
1858 panic("iop_msg_alloc: system flags specified");
1859 #endif
1860
1861 s = splbio();
1862 im = SLIST_FIRST(&sc->sc_im_freelist);
1863 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1864 if (im == NULL)
1865 panic("iop_msg_alloc: no free wrappers");
1866 #endif
1867 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1868 splx(s);
1869
1870 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1871 tctxgen += (1 << IOP_TCTX_SHIFT);
1872 im->im_flags = flags | IM_ALLOCED;
1873 im->im_rb = NULL;
1874 i = 0;
1875 do {
1876 im->im_xfer[i++].ix_size = 0;
1877 } while (i < IOP_MAX_MSG_XFERS);
1878
1879 return (im);
1880 }
1881
1882 /*
1883 * Free a message wrapper.
1884 */
1885 void
1886 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1887 {
1888 int s;
1889
1890 #ifdef I2ODEBUG
1891 if ((im->im_flags & IM_ALLOCED) == 0)
1892 panic("iop_msg_free: wrapper not allocated");
1893 #endif
1894
1895 im->im_flags = 0;
1896 s = splbio();
1897 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1898 splx(s);
1899 }
1900
1901 /*
1902 * Map a data transfer. Write a scatter-gather list into the message frame.
1903 */
1904 int
1905 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1906 void *xferaddr, int xfersize, int out, struct proc *up)
1907 {
1908 bus_dmamap_t dm;
1909 bus_dma_segment_t *ds;
1910 struct iop_xfer *ix;
1911 u_int rv, i, nsegs, flg, off, xn;
1912 u_int32_t *p;
1913
1914 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1915 if (ix->ix_size == 0)
1916 break;
1917
1918 #ifdef I2ODEBUG
1919 if (xfersize == 0)
1920 panic("iop_msg_map: null transfer");
1921 if (xfersize > IOP_MAX_XFER)
1922 panic("iop_msg_map: transfer too large");
1923 if (xn == IOP_MAX_MSG_XFERS)
1924 panic("iop_msg_map: too many xfers");
1925 #endif
1926
1927 /*
1928 * Only the first DMA map is static.
1929 */
1930 if (xn != 0) {
1931 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1932 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1933 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1934 if (rv != 0)
1935 return (rv);
1936 }
1937
1938 dm = ix->ix_map;
1939 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1940 (up == NULL ? BUS_DMA_NOWAIT : 0));
1941 if (rv != 0)
1942 goto bad;
1943
1944 /*
1945 * How many SIMPLE SG elements can we fit in this message?
1946 */
1947 off = mb[0] >> 16;
1948 p = mb + off;
1949 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1950
1951 if (dm->dm_nsegs > nsegs) {
1952 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1953 rv = EFBIG;
1954 DPRINTF(("iop_msg_map: too many segs\n"));
1955 goto bad;
1956 }
1957
1958 nsegs = dm->dm_nsegs;
1959 xfersize = 0;
1960
1961 /*
1962 * Write out the SG list.
1963 */
1964 if (out)
1965 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1966 else
1967 flg = I2O_SGL_SIMPLE;
1968
1969 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1970 p[0] = (u_int32_t)ds->ds_len | flg;
1971 p[1] = (u_int32_t)ds->ds_addr;
1972 xfersize += ds->ds_len;
1973 }
1974
1975 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1976 p[1] = (u_int32_t)ds->ds_addr;
1977 xfersize += ds->ds_len;
1978
1979 /* Fix up the transfer record, and sync the map. */
1980 ix->ix_flags = (out ? IX_OUT : IX_IN);
1981 ix->ix_size = xfersize;
1982 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1983 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1984
1985 /*
1986 * If this is the first xfer we've mapped for this message, adjust
1987 * the SGL offset field in the message header.
1988 */
1989 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1990 mb[0] += (mb[0] >> 12) & 0xf0;
1991 im->im_flags |= IM_SGLOFFADJ;
1992 }
1993 mb[0] += (nsegs << 17);
1994 return (0);
1995
1996 bad:
1997 if (xn != 0)
1998 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1999 return (rv);
2000 }
2001
2002 /*
2003 * Map a block I/O data transfer (different in that there's only one per
2004 * message maximum, and PAGE addressing may be used). Write a scatter
2005 * gather list into the message frame.
2006 */
2007 int
2008 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
2009 void *xferaddr, int xfersize, int out)
2010 {
2011 bus_dma_segment_t *ds;
2012 bus_dmamap_t dm;
2013 struct iop_xfer *ix;
2014 u_int rv, i, nsegs, off, slen, tlen, flg;
2015 paddr_t saddr, eaddr;
2016 u_int32_t *p;
2017
2018 #ifdef I2ODEBUG
2019 if (xfersize == 0)
2020 panic("iop_msg_map_bio: null transfer");
2021 if (xfersize > IOP_MAX_XFER)
2022 panic("iop_msg_map_bio: transfer too large");
2023 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2024 panic("iop_msg_map_bio: SGLOFFADJ");
2025 #endif
2026
2027 ix = im->im_xfer;
2028 dm = ix->ix_map;
2029 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2030 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2031 if (rv != 0)
2032 return (rv);
2033
2034 off = mb[0] >> 16;
2035 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2036
2037 /*
2038 * If the transfer is highly fragmented and won't fit using SIMPLE
2039 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2040 * potentially more efficient, both for us and the IOP.
2041 */
2042 if (dm->dm_nsegs > nsegs) {
2043 nsegs = 1;
2044 p = mb + off + 1;
2045
2046 /* XXX This should be done with a bus_space flag. */
2047 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2048 slen = ds->ds_len;
2049 saddr = ds->ds_addr;
2050
2051 while (slen > 0) {
2052 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2053 tlen = min(eaddr - saddr, slen);
2054 slen -= tlen;
2055 *p++ = le32toh(saddr);
2056 saddr = eaddr;
2057 nsegs++;
2058 }
2059 }
2060
2061 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2062 I2O_SGL_END;
2063 if (out)
2064 mb[off] |= I2O_SGL_DATA_OUT;
2065 } else {
2066 p = mb + off;
2067 nsegs = dm->dm_nsegs;
2068
2069 if (out)
2070 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2071 else
2072 flg = I2O_SGL_SIMPLE;
2073
2074 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2075 p[0] = (u_int32_t)ds->ds_len | flg;
2076 p[1] = (u_int32_t)ds->ds_addr;
2077 }
2078
2079 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2080 I2O_SGL_END;
2081 p[1] = (u_int32_t)ds->ds_addr;
2082 nsegs <<= 1;
2083 }
2084
2085 /* Fix up the transfer record, and sync the map. */
2086 ix->ix_flags = (out ? IX_OUT : IX_IN);
2087 ix->ix_size = xfersize;
2088 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2089 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2090
2091 /*
2092 * Adjust the SGL offset and total message size fields. We don't
2093 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2094 */
2095 mb[0] += ((off << 4) + (nsegs << 16));
2096 return (0);
2097 }
2098
2099 /*
2100 * Unmap all data transfers associated with a message wrapper.
2101 */
2102 void
2103 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2104 {
2105 struct iop_xfer *ix;
2106 int i;
2107
2108 #ifdef I2ODEBUG
2109 if (im->im_xfer[0].ix_size == 0)
2110 panic("iop_msg_unmap: no transfers mapped");
2111 #endif
2112
2113 for (ix = im->im_xfer, i = 0;;) {
2114 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2115 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2116 BUS_DMASYNC_POSTREAD);
2117 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2118
2119 /* Only the first DMA map is static. */
2120 if (i != 0)
2121 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2122 if ((++ix)->ix_size == 0)
2123 break;
2124 if (++i >= IOP_MAX_MSG_XFERS)
2125 break;
2126 }
2127 }
2128
2129 /*
2130 * Post a message frame to the IOP's inbound queue.
2131 */
2132 int
2133 iop_post(struct iop_softc *sc, u_int32_t *mb)
2134 {
2135 u_int32_t mfa;
2136 int s;
2137
2138 #ifdef I2ODEBUG
2139 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2140 panic("iop_post: frame too large");
2141 #endif
2142
2143 s = splbio();
2144
2145 /* Allocate a slot with the IOP. */
2146 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2147 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2148 splx(s);
2149 printf("%s: mfa not forthcoming\n",
2150 sc->sc_dv.dv_xname);
2151 return (EAGAIN);
2152 }
2153
2154 /* Perform reply buffer DMA synchronisation. */
2155 if (sc->sc_curib++ == 0)
2156 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2157 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2158
2159 /* Copy out the message frame. */
2160 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2161 mb[0] >> 16);
2162 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2163 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2164
2165 /* Post the MFA back to the IOP. */
2166 iop_outl(sc, IOP_REG_IFIFO, mfa);
2167
2168 splx(s);
2169 return (0);
2170 }
2171
2172 /*
2173 * Post a message to the IOP and deal with completion.
2174 */
2175 int
2176 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2177 {
2178 u_int32_t *mb;
2179 int rv, s;
2180
2181 mb = xmb;
2182
2183 /* Terminate the scatter/gather list chain. */
2184 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2185 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2186
2187 if ((rv = iop_post(sc, mb)) != 0)
2188 return (rv);
2189
2190 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2191 if ((im->im_flags & IM_POLL) != 0)
2192 iop_msg_poll(sc, im, timo);
2193 else
2194 iop_msg_wait(sc, im, timo);
2195
2196 s = splbio();
2197 if ((im->im_flags & IM_REPLIED) != 0) {
2198 if ((im->im_flags & IM_NOSTATUS) != 0)
2199 rv = 0;
2200 else if ((im->im_flags & IM_FAIL) != 0)
2201 rv = ENXIO;
2202 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2203 rv = EIO;
2204 else
2205 rv = 0;
2206 } else
2207 rv = EBUSY;
2208 splx(s);
2209 } else
2210 rv = 0;
2211
2212 return (rv);
2213 }
2214
2215 /*
2216 * Spin until the specified message is replied to.
2217 */
2218 static void
2219 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2220 {
2221 u_int32_t rmfa;
2222 int s;
2223
2224 s = splbio();
2225
2226 /* Wait for completion. */
2227 for (timo *= 10; timo != 0; timo--) {
2228 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2229 /* Double read to account for IOP bug. */
2230 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2231 if (rmfa == IOP_MFA_EMPTY)
2232 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2233 if (rmfa != IOP_MFA_EMPTY) {
2234 iop_handle_reply(sc, rmfa);
2235
2236 /*
2237 * Return the reply frame to the IOP's
2238 * outbound FIFO.
2239 */
2240 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2241 }
2242 }
2243 if ((im->im_flags & IM_REPLIED) != 0)
2244 break;
2245 DELAY(100);
2246 }
2247
2248 if (timo == 0) {
2249 #ifdef I2ODEBUG
2250 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2251 if (iop_status_get(sc, 1) != 0)
2252 printf("iop_msg_poll: unable to retrieve status\n");
2253 else
2254 printf("iop_msg_poll: IOP state = %d\n",
2255 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2256 #endif
2257 }
2258
2259 splx(s);
2260 }
2261
2262 /*
2263 * Sleep until the specified message is replied to.
2264 */
2265 static void
2266 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2267 {
2268 int s, rv;
2269
2270 s = splbio();
2271 if ((im->im_flags & IM_REPLIED) != 0) {
2272 splx(s);
2273 return;
2274 }
2275 rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2276 splx(s);
2277
2278 #ifdef I2ODEBUG
2279 if (rv != 0) {
2280 printf("iop_msg_wait: tsleep() == %d\n", rv);
2281 if (iop_status_get(sc, 0) != 0)
2282 printf("iop_msg_wait: unable to retrieve status\n");
2283 else
2284 printf("iop_msg_wait: IOP state = %d\n",
2285 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2286 }
2287 #endif
2288 }
2289
2290 /*
2291 * Release an unused message frame back to the IOP's inbound fifo.
2292 */
2293 static void
2294 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2295 {
2296
2297 /* Use the frame to issue a no-op. */
2298 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2299 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2300 iop_outl_msg(sc, mfa + 8, 0);
2301 iop_outl_msg(sc, mfa + 12, 0);
2302
2303 iop_outl(sc, IOP_REG_IFIFO, mfa);
2304 }
2305
2306 #ifdef I2ODEBUG
2307 /*
2308 * Dump a reply frame header.
2309 */
2310 static void
2311 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2312 {
2313 u_int function, detail;
2314 #ifdef I2OVERBOSE
2315 const char *statusstr;
2316 #endif
2317
2318 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2319 detail = le16toh(rb->detail);
2320
2321 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2322
2323 #ifdef I2OVERBOSE
2324 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2325 statusstr = iop_status[rb->reqstatus];
2326 else
2327 statusstr = "undefined error code";
2328
2329 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2330 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2331 #else
2332 printf("%s: function=0x%02x status=0x%02x\n",
2333 sc->sc_dv.dv_xname, function, rb->reqstatus);
2334 #endif
2335 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2336 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2337 le32toh(rb->msgtctx));
2338 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2339 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2340 (le32toh(rb->msgflags) >> 8) & 0xff);
2341 }
2342 #endif
2343
2344 /*
2345 * Dump a transport failure reply.
2346 */
2347 static void
2348 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2349 {
2350
2351 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2352
2353 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2354 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2355 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2356 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2357 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2358 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2359 }
2360
2361 /*
2362 * Translate an I2O ASCII field into a C string.
2363 */
2364 void
2365 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2366 {
2367 int hc, lc, i, nit;
2368
2369 dlen--;
2370 lc = 0;
2371 hc = 0;
2372 i = 0;
2373
2374 /*
2375 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2376 * spec has nothing to say about it. Since AMI fields are usually
2377 * filled with junk after the terminator, ...
2378 */
2379 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2380
2381 while (slen-- != 0 && dlen-- != 0) {
2382 if (nit && *src == '\0')
2383 break;
2384 else if (*src <= 0x20 || *src >= 0x7f) {
2385 if (hc)
2386 dst[i++] = ' ';
2387 } else {
2388 hc = 1;
2389 dst[i++] = *src;
2390 lc = i;
2391 }
2392 src++;
2393 }
2394
2395 dst[lc] = '\0';
2396 }
2397
2398 /*
2399 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2400 */
2401 int
2402 iop_print_ident(struct iop_softc *sc, int tid)
2403 {
2404 struct {
2405 struct i2o_param_op_results pr;
2406 struct i2o_param_read_results prr;
2407 struct i2o_param_device_identity di;
2408 } __attribute__ ((__packed__)) p;
2409 char buf[32];
2410 int rv;
2411
2412 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2413 sizeof(p), NULL);
2414 if (rv != 0)
2415 return (rv);
2416
2417 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2418 sizeof(buf));
2419 printf(" <%s, ", buf);
2420 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2421 sizeof(buf));
2422 printf("%s, ", buf);
2423 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2424 printf("%s>", buf);
2425
2426 return (0);
2427 }
2428
2429 /*
2430 * Claim or unclaim the specified TID.
2431 */
2432 int
2433 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2434 int flags)
2435 {
2436 struct iop_msg *im;
2437 struct i2o_util_claim mf;
2438 int rv, func;
2439
2440 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2441 im = iop_msg_alloc(sc, IM_WAIT);
2442
2443 /* We can use the same structure, as they're identical. */
2444 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2445 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2446 mf.msgictx = ii->ii_ictx;
2447 mf.msgtctx = im->im_tctx;
2448 mf.flags = flags;
2449
2450 rv = iop_msg_post(sc, im, &mf, 5000);
2451 iop_msg_free(sc, im);
2452 return (rv);
2453 }
2454
2455 /*
2456 * Perform an abort.
2457 */
2458 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2459 int tctxabort, int flags)
2460 {
2461 struct iop_msg *im;
2462 struct i2o_util_abort mf;
2463 int rv;
2464
2465 im = iop_msg_alloc(sc, IM_WAIT);
2466
2467 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2468 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2469 mf.msgictx = ii->ii_ictx;
2470 mf.msgtctx = im->im_tctx;
2471 mf.flags = (func << 24) | flags;
2472 mf.tctxabort = tctxabort;
2473
2474 rv = iop_msg_post(sc, im, &mf, 5000);
2475 iop_msg_free(sc, im);
2476 return (rv);
2477 }
2478
2479 /*
2480 * Enable or disable reception of events for the specified device.
2481 */
2482 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2483 {
2484 struct i2o_util_event_register mf;
2485
2486 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2487 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2488 mf.msgictx = ii->ii_ictx;
2489 mf.msgtctx = 0;
2490 mf.eventmask = mask;
2491
2492 /* This message is replied to only when events are signalled. */
2493 return (iop_post(sc, (u_int32_t *)&mf));
2494 }
2495
2496 int
2497 iopopen(dev_t dev, int flag, int mode, struct proc *p)
2498 {
2499 struct iop_softc *sc;
2500
2501 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2502 return (ENXIO);
2503 if ((sc->sc_flags & IOP_ONLINE) == 0)
2504 return (ENXIO);
2505 if ((sc->sc_flags & IOP_OPEN) != 0)
2506 return (EBUSY);
2507 sc->sc_flags |= IOP_OPEN;
2508
2509 return (0);
2510 }
2511
2512 int
2513 iopclose(dev_t dev, int flag, int mode, struct proc *p)
2514 {
2515 struct iop_softc *sc;
2516
2517 sc = device_lookup(&iop_cd, minor(dev));
2518 sc->sc_flags &= ~IOP_OPEN;
2519
2520 return (0);
2521 }
2522
2523 int
2524 iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2525 {
2526 struct iop_softc *sc;
2527 struct iovec *iov;
2528 int rv, i;
2529
2530 if (securelevel >= 2)
2531 return (EPERM);
2532
2533 sc = device_lookup(&iop_cd, minor(dev));
2534
2535 switch (cmd) {
2536 case IOPIOCPT:
2537 return (iop_passthrough(sc, (struct ioppt *)data, p));
2538
2539 case IOPIOCGSTATUS:
2540 iov = (struct iovec *)data;
2541 i = sizeof(struct i2o_status);
2542 if (i > iov->iov_len)
2543 i = iov->iov_len;
2544 else
2545 iov->iov_len = i;
2546 if ((rv = iop_status_get(sc, 0)) == 0)
2547 rv = copyout(&sc->sc_status, iov->iov_base, i);
2548 return (rv);
2549
2550 case IOPIOCGLCT:
2551 case IOPIOCGTIDMAP:
2552 case IOPIOCRECONFIG:
2553 break;
2554
2555 default:
2556 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2557 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2558 #endif
2559 return (ENOTTY);
2560 }
2561
2562 if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2563 return (rv);
2564
2565 switch (cmd) {
2566 case IOPIOCGLCT:
2567 iov = (struct iovec *)data;
2568 i = le16toh(sc->sc_lct->tablesize) << 2;
2569 if (i > iov->iov_len)
2570 i = iov->iov_len;
2571 else
2572 iov->iov_len = i;
2573 rv = copyout(sc->sc_lct, iov->iov_base, i);
2574 break;
2575
2576 case IOPIOCRECONFIG:
2577 if ((rv = lockmgr(&sc->sc_conflock, LK_UPGRADE, NULL)) == 0)
2578 rv = iop_reconfigure(sc, 0);
2579 break;
2580
2581 case IOPIOCGTIDMAP:
2582 iov = (struct iovec *)data;
2583 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2584 if (i > iov->iov_len)
2585 i = iov->iov_len;
2586 else
2587 iov->iov_len = i;
2588 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2589 break;
2590 }
2591
2592 lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2593 return (rv);
2594 }
2595
2596 static int
2597 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2598 {
2599 struct iop_msg *im;
2600 struct i2o_msg *mf;
2601 struct ioppt_buf *ptb;
2602 int rv, i, mapped;
2603
2604 mf = NULL;
2605 im = NULL;
2606 mapped = 1;
2607
2608 if (pt->pt_msglen > sc->sc_framesize ||
2609 pt->pt_msglen < sizeof(struct i2o_msg) ||
2610 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2611 pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2612 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2613 return (EINVAL);
2614
2615 for (i = 0; i < pt->pt_nbufs; i++)
2616 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2617 rv = ENOMEM;
2618 goto bad;
2619 }
2620
2621 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2622 if (mf == NULL)
2623 return (ENOMEM);
2624
2625 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2626 goto bad;
2627
2628 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2629 im->im_rb = (struct i2o_reply *)mf;
2630 mf->msgictx = IOP_ICTX;
2631 mf->msgtctx = im->im_tctx;
2632
2633 for (i = 0; i < pt->pt_nbufs; i++) {
2634 ptb = &pt->pt_bufs[i];
2635 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2636 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2637 if (rv != 0)
2638 goto bad;
2639 mapped = 1;
2640 }
2641
2642 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2643 goto bad;
2644
2645 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2646 if (i > sc->sc_framesize)
2647 i = sc->sc_framesize;
2648 if (i > pt->pt_replylen)
2649 i = pt->pt_replylen;
2650 rv = copyout(im->im_rb, pt->pt_reply, i);
2651
2652 bad:
2653 if (mapped != 0)
2654 iop_msg_unmap(sc, im);
2655 if (im != NULL)
2656 iop_msg_free(sc, im);
2657 if (mf != NULL)
2658 free(mf, M_DEVBUF);
2659 return (rv);
2660 }
2661