iop.c revision 1.65 1 /* $NetBSD: iop.c,v 1.65 2007/06/16 12:32:12 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.65 2007/06/16 12:32:12 ad Exp $");
45
46 #include "iop.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/queue.h>
53 #include <sys/proc.h>
54 #include <sys/malloc.h>
55 #include <sys/ioctl.h>
56 #include <sys/endian.h>
57 #include <sys/conf.h>
58 #include <sys/kthread.h>
59 #include <sys/kauth.h>
60
61 #include <uvm/uvm_extern.h>
62
63 #include <machine/bus.h>
64
65 #include <dev/i2o/i2o.h>
66 #include <dev/i2o/iopio.h>
67 #include <dev/i2o/iopreg.h>
68 #include <dev/i2o/iopvar.h>
69
70 #include "locators.h"
71
72 #define POLL(ms, cond) \
73 do { \
74 int xi; \
75 for (xi = (ms) * 10; xi; xi--) { \
76 if (cond) \
77 break; \
78 DELAY(100); \
79 } \
80 } while (/* CONSTCOND */0);
81
82 #ifdef I2ODEBUG
83 #define DPRINTF(x) printf x
84 #else
85 #define DPRINTF(x)
86 #endif
87
88 #define IOP_ICTXHASH_NBUCKETS 16
89 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
90
91 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
92
93 #define IOP_TCTX_SHIFT 12
94 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
95
96 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
97 static u_long iop_ictxhash;
98 static void *iop_sdh;
99 static struct i2o_systab *iop_systab;
100 static int iop_systab_size;
101
102 extern struct cfdriver iop_cd;
103
104 dev_type_open(iopopen);
105 dev_type_close(iopclose);
106 dev_type_ioctl(iopioctl);
107
108 const struct cdevsw iop_cdevsw = {
109 iopopen, iopclose, noread, nowrite, iopioctl,
110 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
111 };
112
113 #define IC_CONFIGURE 0x01
114 #define IC_PRIORITY 0x02
115
116 static struct iop_class {
117 u_short ic_class;
118 u_short ic_flags;
119 const char *ic_caption;
120 } const iop_class[] = {
121 {
122 I2O_CLASS_EXECUTIVE,
123 0,
124 "executive"
125 },
126 {
127 I2O_CLASS_DDM,
128 0,
129 "device driver module"
130 },
131 {
132 I2O_CLASS_RANDOM_BLOCK_STORAGE,
133 IC_CONFIGURE | IC_PRIORITY,
134 "random block storage"
135 },
136 {
137 I2O_CLASS_SEQUENTIAL_STORAGE,
138 IC_CONFIGURE | IC_PRIORITY,
139 "sequential storage"
140 },
141 {
142 I2O_CLASS_LAN,
143 IC_CONFIGURE | IC_PRIORITY,
144 "LAN port"
145 },
146 {
147 I2O_CLASS_WAN,
148 IC_CONFIGURE | IC_PRIORITY,
149 "WAN port"
150 },
151 {
152 I2O_CLASS_FIBRE_CHANNEL_PORT,
153 IC_CONFIGURE,
154 "fibrechannel port"
155 },
156 {
157 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
158 0,
159 "fibrechannel peripheral"
160 },
161 {
162 I2O_CLASS_SCSI_PERIPHERAL,
163 0,
164 "SCSI peripheral"
165 },
166 {
167 I2O_CLASS_ATE_PORT,
168 IC_CONFIGURE,
169 "ATE port"
170 },
171 {
172 I2O_CLASS_ATE_PERIPHERAL,
173 0,
174 "ATE peripheral"
175 },
176 {
177 I2O_CLASS_FLOPPY_CONTROLLER,
178 IC_CONFIGURE,
179 "floppy controller"
180 },
181 {
182 I2O_CLASS_FLOPPY_DEVICE,
183 0,
184 "floppy device"
185 },
186 {
187 I2O_CLASS_BUS_ADAPTER_PORT,
188 IC_CONFIGURE,
189 "bus adapter port"
190 },
191 };
192
193 static const char * const iop_status[] = {
194 "success",
195 "abort (dirty)",
196 "abort (no data transfer)",
197 "abort (partial transfer)",
198 "error (dirty)",
199 "error (no data transfer)",
200 "error (partial transfer)",
201 "undefined error code",
202 "process abort (dirty)",
203 "process abort (no data transfer)",
204 "process abort (partial transfer)",
205 "transaction error",
206 };
207
208 static inline u_int32_t iop_inl(struct iop_softc *, int);
209 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
210
211 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
212 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
213
214 static void iop_config_interrupts(struct device *);
215 static void iop_configure_devices(struct iop_softc *, int, int);
216 static void iop_devinfo(int, char *, size_t);
217 static int iop_print(void *, const char *);
218 static void iop_shutdown(void *);
219
220 static void iop_adjqparam(struct iop_softc *, int);
221 static void iop_create_reconf_thread(void *);
222 static int iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int iop_hrt_get(struct iop_softc *);
224 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void iop_intr_event(struct device *, struct iop_msg *, void *);
226 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 u_int32_t);
228 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int iop_ofifo_init(struct iop_softc *);
231 static int iop_passthrough(struct iop_softc *, struct ioppt *,
232 struct proc *);
233 static void iop_reconf_thread(void *);
234 static void iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int iop_reset(struct iop_softc *);
236 static int iop_sys_enable(struct iop_softc *);
237 static int iop_systab_set(struct iop_softc *);
238 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
239
240 #ifdef I2ODEBUG
241 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
242 #endif
243
244 static inline u_int32_t
245 iop_inl(struct iop_softc *sc, int off)
246 {
247
248 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
249 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
250 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
251 }
252
253 static inline void
254 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
255 {
256
257 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
258 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
259 BUS_SPACE_BARRIER_WRITE);
260 }
261
262 static inline u_int32_t
263 iop_inl_msg(struct iop_softc *sc, int off)
264 {
265
266 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
267 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
268 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
269 }
270
271 static inline void
272 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
273 {
274
275 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
276 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
277 BUS_SPACE_BARRIER_WRITE);
278 }
279
280 /*
281 * Initialise the IOP and our interface.
282 */
283 void
284 iop_init(struct iop_softc *sc, const char *intrstr)
285 {
286 struct iop_msg *im;
287 int rv, i, j, state, nsegs;
288 u_int32_t mask;
289 char ident[64];
290
291 state = 0;
292
293 printf("I2O adapter");
294
295 mutex_init(&sc->sc_intrlock, MUTEX_DRIVER, IPL_VM);
296 mutex_init(&sc->sc_conflock, MUTEX_DRIVER, IPL_NONE);
297 cv_init(&sc->sc_confcv, "iopconf");
298
299 if (iop_ictxhashtbl == NULL)
300 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
301 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
302
303 /* Disable interrupts at the IOP. */
304 mask = iop_inl(sc, IOP_REG_INTR_MASK);
305 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
306
307 /* Allocate a scratch DMA map for small miscellaneous shared data. */
308 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
309 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
310 printf("%s: cannot create scratch dmamap\n",
311 sc->sc_dv.dv_xname);
312 return;
313 }
314
315 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
316 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
317 printf("%s: cannot alloc scratch dmamem\n",
318 sc->sc_dv.dv_xname);
319 goto bail_out;
320 }
321 state++;
322
323 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
324 &sc->sc_scr, 0)) {
325 printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
326 goto bail_out;
327 }
328 state++;
329
330 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
331 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
332 printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
333 goto bail_out;
334 }
335 state++;
336
337 #ifdef I2ODEBUG
338 /* So that our debug checks don't choke. */
339 sc->sc_framesize = 128;
340 #endif
341
342 /* Avoid syncing the reply map until it's set up. */
343 sc->sc_curib = 0x123;
344
345 /* Reset the adapter and request status. */
346 if ((rv = iop_reset(sc)) != 0) {
347 printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
348 goto bail_out;
349 }
350
351 if ((rv = iop_status_get(sc, 1)) != 0) {
352 printf("%s: not responding (get status)\n",
353 sc->sc_dv.dv_xname);
354 goto bail_out;
355 }
356
357 sc->sc_flags |= IOP_HAVESTATUS;
358 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
359 ident, sizeof(ident));
360 printf(" <%s>\n", ident);
361
362 #ifdef I2ODEBUG
363 printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
364 le16toh(sc->sc_status.orgid),
365 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
366 printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
367 printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
368 le32toh(sc->sc_status.desiredprivmemsize),
369 le32toh(sc->sc_status.currentprivmemsize),
370 le32toh(sc->sc_status.currentprivmembase));
371 printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
372 le32toh(sc->sc_status.desiredpriviosize),
373 le32toh(sc->sc_status.currentpriviosize),
374 le32toh(sc->sc_status.currentpriviobase));
375 #endif
376
377 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
378 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
379 sc->sc_maxob = IOP_MAX_OUTBOUND;
380 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
381 if (sc->sc_maxib > IOP_MAX_INBOUND)
382 sc->sc_maxib = IOP_MAX_INBOUND;
383 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
384 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
385 sc->sc_framesize = IOP_MAX_MSG_SIZE;
386
387 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
388 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
389 printf("%s: frame size too small (%d)\n",
390 sc->sc_dv.dv_xname, sc->sc_framesize);
391 goto bail_out;
392 }
393 #endif
394
395 /* Allocate message wrappers. */
396 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
397 if (im == NULL) {
398 printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
399 goto bail_out;
400 }
401 state++;
402 sc->sc_ims = im;
403 SLIST_INIT(&sc->sc_im_freelist);
404
405 for (i = 0; i < sc->sc_maxib; i++, im++) {
406 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
407 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
408 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
409 &im->im_xfer[0].ix_map);
410 if (rv != 0) {
411 printf("%s: couldn't create dmamap (%d)",
412 sc->sc_dv.dv_xname, rv);
413 goto bail_out3;
414 }
415
416 im->im_tctx = i;
417 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
418 cv_init(&im->im_cv, "iopmsg");
419 }
420
421 /* Initialise the IOP's outbound FIFO. */
422 if (iop_ofifo_init(sc) != 0) {
423 printf("%s: unable to init oubound FIFO\n",
424 sc->sc_dv.dv_xname);
425 goto bail_out3;
426 }
427
428 /*
429 * Defer further configuration until (a) interrupts are working and
430 * (b) we have enough information to build the system table.
431 */
432 config_interrupts((struct device *)sc, iop_config_interrupts);
433
434 /* Configure shutdown hook before we start any device activity. */
435 if (iop_sdh == NULL)
436 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
437
438 /* Ensure interrupts are enabled at the IOP. */
439 mask = iop_inl(sc, IOP_REG_INTR_MASK);
440 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
441
442 if (intrstr != NULL)
443 printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
444 intrstr);
445
446 #ifdef I2ODEBUG
447 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
448 sc->sc_dv.dv_xname, sc->sc_maxib,
449 le32toh(sc->sc_status.maxinboundmframes),
450 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
451 #endif
452
453 return;
454
455 bail_out3:
456 if (state > 3) {
457 for (j = 0; j < i; j++)
458 bus_dmamap_destroy(sc->sc_dmat,
459 sc->sc_ims[j].im_xfer[0].ix_map);
460 free(sc->sc_ims, M_DEVBUF);
461 }
462 bail_out:
463 if (state > 2)
464 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
465 if (state > 1)
466 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
467 if (state > 0)
468 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
469 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
470 }
471
472 /*
473 * Perform autoconfiguration tasks.
474 */
475 static void
476 iop_config_interrupts(struct device *self)
477 {
478 struct iop_attach_args ia;
479 struct iop_softc *sc, *iop;
480 struct i2o_systab_entry *ste;
481 int rv, i, niop;
482 int locs[IOPCF_NLOCS];
483
484 sc = device_private(self);
485 mutex_enter(&sc->sc_conflock);
486
487 LIST_INIT(&sc->sc_iilist);
488
489 printf("%s: configuring...\n", sc->sc_dv.dv_xname);
490
491 if (iop_hrt_get(sc) != 0) {
492 printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
493 mutex_exit(&sc->sc_conflock);
494 return;
495 }
496
497 /*
498 * Build the system table.
499 */
500 if (iop_systab == NULL) {
501 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
502 if ((iop = device_lookup(&iop_cd, i)) == NULL)
503 continue;
504 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
505 continue;
506 if (iop_status_get(iop, 1) != 0) {
507 printf("%s: unable to retrieve status\n",
508 sc->sc_dv.dv_xname);
509 iop->sc_flags &= ~IOP_HAVESTATUS;
510 continue;
511 }
512 niop++;
513 }
514 if (niop == 0) {
515 mutex_exit(&sc->sc_conflock);
516 return;
517 }
518
519 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
520 sizeof(struct i2o_systab);
521 iop_systab_size = i;
522 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
523
524 iop_systab->numentries = niop;
525 iop_systab->version = I2O_VERSION_11;
526
527 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
528 if ((iop = device_lookup(&iop_cd, i)) == NULL)
529 continue;
530 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
531 continue;
532
533 ste->orgid = iop->sc_status.orgid;
534 ste->iopid = device_unit(&iop->sc_dv) + 2;
535 ste->segnumber =
536 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
537 ste->iopcaps = iop->sc_status.iopcaps;
538 ste->inboundmsgframesize =
539 iop->sc_status.inboundmframesize;
540 ste->inboundmsgportaddresslow =
541 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
542 ste++;
543 }
544 }
545
546 /*
547 * Post the system table to the IOP and bring it to the OPERATIONAL
548 * state.
549 */
550 if (iop_systab_set(sc) != 0) {
551 printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
552 mutex_exit(&sc->sc_conflock);
553 return;
554 }
555 if (iop_sys_enable(sc) != 0) {
556 printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
557 mutex_exit(&sc->sc_conflock);
558 return;
559 }
560
561 /*
562 * Set up an event handler for this IOP.
563 */
564 sc->sc_eventii.ii_dv = self;
565 sc->sc_eventii.ii_intr = iop_intr_event;
566 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
567 sc->sc_eventii.ii_tid = I2O_TID_IOP;
568 iop_initiator_register(sc, &sc->sc_eventii);
569
570 rv = iop_util_eventreg(sc, &sc->sc_eventii,
571 I2O_EVENT_EXEC_RESOURCE_LIMITS |
572 I2O_EVENT_EXEC_CONNECTION_FAIL |
573 I2O_EVENT_EXEC_ADAPTER_FAULT |
574 I2O_EVENT_EXEC_POWER_FAIL |
575 I2O_EVENT_EXEC_RESET_PENDING |
576 I2O_EVENT_EXEC_RESET_IMMINENT |
577 I2O_EVENT_EXEC_HARDWARE_FAIL |
578 I2O_EVENT_EXEC_XCT_CHANGE |
579 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
580 I2O_EVENT_GEN_DEVICE_RESET |
581 I2O_EVENT_GEN_STATE_CHANGE |
582 I2O_EVENT_GEN_GENERAL_WARNING);
583 if (rv != 0) {
584 printf("%s: unable to register for events", sc->sc_dv.dv_xname);
585 mutex_exit(&sc->sc_conflock);
586 return;
587 }
588
589 /*
590 * Attempt to match and attach a product-specific extension.
591 */
592 ia.ia_class = I2O_CLASS_ANY;
593 ia.ia_tid = I2O_TID_IOP;
594 locs[IOPCF_TID] = I2O_TID_IOP;
595 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
596 config_stdsubmatch);
597
598 /*
599 * Start device configuration.
600 */
601 if ((rv = iop_reconfigure(sc, 0)) == -1)
602 printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
603
604 mutex_exit(&sc->sc_conflock);
605
606 if (rv == 0)
607 kthread_create(iop_create_reconf_thread, sc);
608
609 }
610
611 /*
612 * Create the reconfiguration thread. Called after the standard kernel
613 * threads have been created.
614 */
615 static void
616 iop_create_reconf_thread(void *cookie)
617 {
618 struct iop_softc *sc;
619 int rv;
620
621 sc = cookie;
622 mutex_enter(&sc->sc_conflock);
623 sc->sc_flags |= IOP_ONLINE;
624 mutex_exit(&sc->sc_conflock);
625
626 rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
627 "%s", sc->sc_dv.dv_xname);
628 if (rv != 0) {
629 printf("%s: unable to create reconfiguration thread (%d)",
630 sc->sc_dv.dv_xname, rv);
631 return;
632 }
633 }
634
635 /*
636 * Reconfiguration thread; listens for LCT change notification, and
637 * initiates re-configuration if received.
638 */
639 static void
640 iop_reconf_thread(void *cookie)
641 {
642 struct iop_softc *sc;
643 struct lwp *l;
644 struct i2o_lct lct;
645 u_int32_t chgind;
646 int rv;
647
648 sc = cookie;
649 chgind = sc->sc_chgind + 1;
650 l = curlwp;
651
652 for (;;) {
653 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
654 sc->sc_dv.dv_xname, chgind));
655
656 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
657
658 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
659 sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
660
661 mutex_enter(&sc->sc_conflock);
662 if (rv == 0) {
663 iop_reconfigure(sc, le32toh(lct.changeindicator));
664 chgind = sc->sc_chgind + 1;
665 }
666 (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
667 mutex_exit(&sc->sc_conflock);
668 }
669 }
670
671 /*
672 * Reconfigure: find new and removed devices.
673 */
674 int
675 iop_reconfigure(struct iop_softc *sc, u_int chgind)
676 {
677 struct iop_msg *im;
678 struct i2o_hba_bus_scan mf;
679 struct i2o_lct_entry *le;
680 struct iop_initiator *ii, *nextii;
681 int rv, tid, i;
682
683 KASSERT(mutex_owned(&sc->sc_conflock));
684
685 /*
686 * If the reconfiguration request isn't the result of LCT change
687 * notification, then be more thorough: ask all bus ports to scan
688 * their busses. Wait up to 5 minutes for each bus port to complete
689 * the request.
690 */
691 if (chgind == 0) {
692 if ((rv = iop_lct_get(sc)) != 0) {
693 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
694 return (rv);
695 }
696
697 le = sc->sc_lct->entry;
698 for (i = 0; i < sc->sc_nlctent; i++, le++) {
699 if ((le16toh(le->classid) & 4095) !=
700 I2O_CLASS_BUS_ADAPTER_PORT)
701 continue;
702 tid = le16toh(le->localtid) & 4095;
703
704 im = iop_msg_alloc(sc, IM_WAIT);
705
706 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
707 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
708 mf.msgictx = IOP_ICTX;
709 mf.msgtctx = im->im_tctx;
710
711 DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
712 tid));
713
714 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
715 iop_msg_free(sc, im);
716 #ifdef I2ODEBUG
717 if (rv != 0)
718 printf("%s: bus scan failed\n",
719 sc->sc_dv.dv_xname);
720 #endif
721 }
722 } else if (chgind <= sc->sc_chgind) {
723 DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
724 return (0);
725 }
726
727 /* Re-read the LCT and determine if it has changed. */
728 if ((rv = iop_lct_get(sc)) != 0) {
729 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
730 return (rv);
731 }
732 DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
733
734 chgind = le32toh(sc->sc_lct->changeindicator);
735 if (chgind == sc->sc_chgind) {
736 DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
737 return (0);
738 }
739 DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
740 sc->sc_chgind = chgind;
741
742 if (sc->sc_tidmap != NULL)
743 free(sc->sc_tidmap, M_DEVBUF);
744 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
745 M_DEVBUF, M_NOWAIT|M_ZERO);
746
747 /* Allow 1 queued command per device while we're configuring. */
748 iop_adjqparam(sc, 1);
749
750 /*
751 * Match and attach child devices. We configure high-level devices
752 * first so that any claims will propagate throughout the LCT,
753 * hopefully masking off aliased devices as a result.
754 *
755 * Re-reading the LCT at this point is a little dangerous, but we'll
756 * trust the IOP (and the operator) to behave itself...
757 */
758 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
759 IC_CONFIGURE | IC_PRIORITY);
760 if ((rv = iop_lct_get(sc)) != 0) {
761 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
762 }
763 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
764 IC_CONFIGURE);
765
766 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
767 nextii = LIST_NEXT(ii, ii_list);
768
769 /* Detach devices that were configured, but are now gone. */
770 for (i = 0; i < sc->sc_nlctent; i++)
771 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
772 break;
773 if (i == sc->sc_nlctent ||
774 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
775 config_detach(ii->ii_dv, DETACH_FORCE);
776 continue;
777 }
778
779 /*
780 * Tell initiators that existed before the re-configuration
781 * to re-configure.
782 */
783 if (ii->ii_reconfig == NULL)
784 continue;
785 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
786 printf("%s: %s failed reconfigure (%d)\n",
787 sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
788 }
789
790 /* Re-adjust queue parameters and return. */
791 if (sc->sc_nii != 0)
792 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
793 / sc->sc_nii);
794
795 return (0);
796 }
797
798 /*
799 * Configure I2O devices into the system.
800 */
801 static void
802 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
803 {
804 struct iop_attach_args ia;
805 struct iop_initiator *ii;
806 const struct i2o_lct_entry *le;
807 struct device *dv;
808 int i, j, nent;
809 u_int usertid;
810 int locs[IOPCF_NLOCS];
811
812 nent = sc->sc_nlctent;
813 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
814 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
815
816 /* Ignore the device if it's in use. */
817 usertid = le32toh(le->usertid) & 4095;
818 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
819 continue;
820
821 ia.ia_class = le16toh(le->classid) & 4095;
822 ia.ia_tid = sc->sc_tidmap[i].it_tid;
823
824 /* Ignore uninteresting devices. */
825 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
826 if (iop_class[j].ic_class == ia.ia_class)
827 break;
828 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
829 (iop_class[j].ic_flags & mask) != maskval)
830 continue;
831
832 /*
833 * Try to configure the device only if it's not already
834 * configured.
835 */
836 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
837 if (ia.ia_tid == ii->ii_tid) {
838 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
839 strcpy(sc->sc_tidmap[i].it_dvname,
840 ii->ii_dv->dv_xname);
841 break;
842 }
843 }
844 if (ii != NULL)
845 continue;
846
847 locs[IOPCF_TID] = ia.ia_tid;
848
849 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
850 iop_print, config_stdsubmatch);
851 if (dv != NULL) {
852 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
853 strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
854 }
855 }
856 }
857
858 /*
859 * Adjust queue parameters for all child devices.
860 */
861 static void
862 iop_adjqparam(struct iop_softc *sc, int mpi)
863 {
864 struct iop_initiator *ii;
865
866 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
867 if (ii->ii_adjqparam != NULL)
868 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
869 }
870
871 static void
872 iop_devinfo(int class, char *devinfo, size_t l)
873 {
874 int i;
875
876 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
877 if (class == iop_class[i].ic_class)
878 break;
879
880 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
881 snprintf(devinfo, l, "device (class 0x%x)", class);
882 else
883 strlcpy(devinfo, iop_class[i].ic_caption, l);
884 }
885
886 static int
887 iop_print(void *aux, const char *pnp)
888 {
889 struct iop_attach_args *ia;
890 char devinfo[256];
891
892 ia = aux;
893
894 if (pnp != NULL) {
895 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
896 aprint_normal("%s at %s", devinfo, pnp);
897 }
898 aprint_normal(" tid %d", ia->ia_tid);
899 return (UNCONF);
900 }
901
902 /*
903 * Shut down all configured IOPs.
904 */
905 static void
906 iop_shutdown(void *junk)
907 {
908 struct iop_softc *sc;
909 int i;
910
911 printf("shutting down iop devices...");
912
913 for (i = 0; i < iop_cd.cd_ndevs; i++) {
914 if ((sc = device_lookup(&iop_cd, i)) == NULL)
915 continue;
916 if ((sc->sc_flags & IOP_ONLINE) == 0)
917 continue;
918
919 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
920 0, 5000);
921
922 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
923 /*
924 * Some AMI firmware revisions will go to sleep and
925 * never come back after this.
926 */
927 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
928 IOP_ICTX, 0, 1000);
929 }
930 }
931
932 /* Wait. Some boards could still be flushing, stupidly enough. */
933 delay(5000*1000);
934 printf(" done\n");
935 }
936
937 /*
938 * Retrieve IOP status.
939 */
940 int
941 iop_status_get(struct iop_softc *sc, int nosleep)
942 {
943 struct i2o_exec_status_get mf;
944 struct i2o_status *st;
945 paddr_t pa;
946 int rv, i;
947
948 pa = sc->sc_scr_seg->ds_addr;
949 st = (struct i2o_status *)sc->sc_scr;
950
951 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
952 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
953 mf.reserved[0] = 0;
954 mf.reserved[1] = 0;
955 mf.reserved[2] = 0;
956 mf.reserved[3] = 0;
957 mf.addrlow = (u_int32_t)pa;
958 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
959 mf.length = sizeof(sc->sc_status);
960
961 memset(st, 0, sizeof(*st));
962 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
963 BUS_DMASYNC_PREREAD);
964
965 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
966 return (rv);
967
968 for (i = 25; i != 0; i--) {
969 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
970 sizeof(*st), BUS_DMASYNC_POSTREAD);
971 if (st->syncbyte == 0xff)
972 break;
973 if (nosleep)
974 DELAY(100*1000);
975 else
976 kpause("iopstat", false, hz / 10, NULL);
977 }
978
979 if (st->syncbyte != 0xff) {
980 printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
981 rv = EIO;
982 } else {
983 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
984 rv = 0;
985 }
986
987 return (rv);
988 }
989
990 /*
991 * Initialize and populate the IOP's outbound FIFO.
992 */
993 static int
994 iop_ofifo_init(struct iop_softc *sc)
995 {
996 bus_addr_t addr;
997 bus_dma_segment_t seg;
998 struct i2o_exec_outbound_init *mf;
999 int i, rseg, rv;
1000 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1001
1002 sw = (u_int32_t *)sc->sc_scr;
1003
1004 mf = (struct i2o_exec_outbound_init *)mb;
1005 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1006 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1007 mf->msgictx = IOP_ICTX;
1008 mf->msgtctx = 0;
1009 mf->pagesize = PAGE_SIZE;
1010 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1011
1012 /*
1013 * The I2O spec says that there are two SGLs: one for the status
1014 * word, and one for a list of discarded MFAs. It continues to say
1015 * that if you don't want to get the list of MFAs, an IGNORE SGL is
1016 * necessary; this isn't the case (and is in fact a bad thing).
1017 */
1018 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1019 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1020 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1021 (u_int32_t)sc->sc_scr_seg->ds_addr;
1022 mb[0] += 2 << 16;
1023
1024 *sw = 0;
1025 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1026 BUS_DMASYNC_PREREAD);
1027
1028 if ((rv = iop_post(sc, mb)) != 0)
1029 return (rv);
1030
1031 POLL(5000,
1032 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1033 BUS_DMASYNC_POSTREAD),
1034 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1035
1036 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1037 printf("%s: outbound FIFO init failed (%d)\n",
1038 sc->sc_dv.dv_xname, le32toh(*sw));
1039 return (EIO);
1040 }
1041
1042 /* Allocate DMA safe memory for the reply frames. */
1043 if (sc->sc_rep_phys == 0) {
1044 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1045
1046 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1047 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1048 if (rv != 0) {
1049 printf("%s: DMA alloc = %d\n", sc->sc_dv.dv_xname,
1050 rv);
1051 return (rv);
1052 }
1053
1054 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1055 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1056 if (rv != 0) {
1057 printf("%s: DMA map = %d\n", sc->sc_dv.dv_xname, rv);
1058 return (rv);
1059 }
1060
1061 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1062 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1063 if (rv != 0) {
1064 printf("%s: DMA create = %d\n", sc->sc_dv.dv_xname,
1065 rv);
1066 return (rv);
1067 }
1068
1069 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1070 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1071 if (rv != 0) {
1072 printf("%s: DMA load = %d\n", sc->sc_dv.dv_xname, rv);
1073 return (rv);
1074 }
1075
1076 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1077
1078 /* Now safe to sync the reply map. */
1079 sc->sc_curib = 0;
1080 }
1081
1082 /* Populate the outbound FIFO. */
1083 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1084 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1085 addr += sc->sc_framesize;
1086 }
1087
1088 return (0);
1089 }
1090
1091 /*
1092 * Read the specified number of bytes from the IOP's hardware resource table.
1093 */
1094 static int
1095 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1096 {
1097 struct iop_msg *im;
1098 int rv;
1099 struct i2o_exec_hrt_get *mf;
1100 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1101
1102 im = iop_msg_alloc(sc, IM_WAIT);
1103 mf = (struct i2o_exec_hrt_get *)mb;
1104 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1105 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1106 mf->msgictx = IOP_ICTX;
1107 mf->msgtctx = im->im_tctx;
1108
1109 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1110 rv = iop_msg_post(sc, im, mb, 30000);
1111 iop_msg_unmap(sc, im);
1112 iop_msg_free(sc, im);
1113 return (rv);
1114 }
1115
1116 /*
1117 * Read the IOP's hardware resource table.
1118 */
1119 static int
1120 iop_hrt_get(struct iop_softc *sc)
1121 {
1122 struct i2o_hrt hrthdr, *hrt;
1123 int size, rv;
1124
1125 PHOLD(curlwp);
1126 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1127 PRELE(curlwp);
1128 if (rv != 0)
1129 return (rv);
1130
1131 DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1132 le16toh(hrthdr.numentries)));
1133
1134 size = sizeof(struct i2o_hrt) +
1135 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1136 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1137
1138 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1139 free(hrt, M_DEVBUF);
1140 return (rv);
1141 }
1142
1143 if (sc->sc_hrt != NULL)
1144 free(sc->sc_hrt, M_DEVBUF);
1145 sc->sc_hrt = hrt;
1146 return (0);
1147 }
1148
1149 /*
1150 * Request the specified number of bytes from the IOP's logical
1151 * configuration table. If a change indicator is specified, this
1152 * is a verbatim notification request, so the caller is prepared
1153 * to wait indefinitely.
1154 */
1155 static int
1156 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1157 u_int32_t chgind)
1158 {
1159 struct iop_msg *im;
1160 struct i2o_exec_lct_notify *mf;
1161 int rv;
1162 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1163
1164 im = iop_msg_alloc(sc, IM_WAIT);
1165 memset(lct, 0, size);
1166
1167 mf = (struct i2o_exec_lct_notify *)mb;
1168 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1169 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1170 mf->msgictx = IOP_ICTX;
1171 mf->msgtctx = im->im_tctx;
1172 mf->classid = I2O_CLASS_ANY;
1173 mf->changeindicator = chgind;
1174
1175 #ifdef I2ODEBUG
1176 printf("iop_lct_get0: reading LCT");
1177 if (chgind != 0)
1178 printf(" (async)");
1179 printf("\n");
1180 #endif
1181
1182 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1183 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1184 iop_msg_unmap(sc, im);
1185 iop_msg_free(sc, im);
1186 return (rv);
1187 }
1188
1189 /*
1190 * Read the IOP's logical configuration table.
1191 */
1192 int
1193 iop_lct_get(struct iop_softc *sc)
1194 {
1195 int esize, size, rv;
1196 struct i2o_lct *lct;
1197
1198 esize = le32toh(sc->sc_status.expectedlctsize);
1199 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1200 if (lct == NULL)
1201 return (ENOMEM);
1202
1203 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1204 free(lct, M_DEVBUF);
1205 return (rv);
1206 }
1207
1208 size = le16toh(lct->tablesize) << 2;
1209 if (esize != size) {
1210 free(lct, M_DEVBUF);
1211 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1212 if (lct == NULL)
1213 return (ENOMEM);
1214
1215 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1216 free(lct, M_DEVBUF);
1217 return (rv);
1218 }
1219 }
1220
1221 /* Swap in the new LCT. */
1222 if (sc->sc_lct != NULL)
1223 free(sc->sc_lct, M_DEVBUF);
1224 sc->sc_lct = lct;
1225 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1226 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1227 sizeof(struct i2o_lct_entry);
1228 return (0);
1229 }
1230
1231 /*
1232 * Post a SYS_ENABLE message to the adapter.
1233 */
1234 int
1235 iop_sys_enable(struct iop_softc *sc)
1236 {
1237 struct iop_msg *im;
1238 struct i2o_msg mf;
1239 int rv;
1240
1241 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1242
1243 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1244 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1245 mf.msgictx = IOP_ICTX;
1246 mf.msgtctx = im->im_tctx;
1247
1248 rv = iop_msg_post(sc, im, &mf, 30000);
1249 if (rv == 0) {
1250 if ((im->im_flags & IM_FAIL) != 0)
1251 rv = ENXIO;
1252 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1253 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1254 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1255 rv = 0;
1256 else
1257 rv = EIO;
1258 }
1259
1260 iop_msg_free(sc, im);
1261 return (rv);
1262 }
1263
1264 /*
1265 * Request the specified parameter group from the target. If an initiator
1266 * is specified (a) don't wait for the operation to complete, but instead
1267 * let the initiator's interrupt handler deal with the reply and (b) place a
1268 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1269 */
1270 int
1271 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1272 int size, struct iop_initiator *ii)
1273 {
1274 struct iop_msg *im;
1275 struct i2o_util_params_op *mf;
1276 int rv;
1277 struct iop_pgop *pgop;
1278 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1279
1280 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1281 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1282 iop_msg_free(sc, im);
1283 return (ENOMEM);
1284 }
1285 im->im_dvcontext = pgop;
1286
1287 mf = (struct i2o_util_params_op *)mb;
1288 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1289 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1290 mf->msgictx = IOP_ICTX;
1291 mf->msgtctx = im->im_tctx;
1292 mf->flags = 0;
1293
1294 pgop->olh.count = htole16(1);
1295 pgop->olh.reserved = htole16(0);
1296 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1297 pgop->oat.fieldcount = htole16(0xffff);
1298 pgop->oat.group = htole16(group);
1299
1300 if (ii == NULL)
1301 PHOLD(curlwp);
1302
1303 memset(buf, 0, size);
1304 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1305 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1306 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1307
1308 if (ii == NULL)
1309 PRELE(curlwp);
1310
1311 /* Detect errors; let partial transfers to count as success. */
1312 if (ii == NULL && rv == 0) {
1313 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1314 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1315 rv = 0;
1316 else
1317 rv = (im->im_reqstatus != 0 ? EIO : 0);
1318
1319 if (rv != 0)
1320 printf("%s: FIELD_GET failed for tid %d group %d\n",
1321 sc->sc_dv.dv_xname, tid, group);
1322 }
1323
1324 if (ii == NULL || rv != 0) {
1325 iop_msg_unmap(sc, im);
1326 iop_msg_free(sc, im);
1327 free(pgop, M_DEVBUF);
1328 }
1329
1330 return (rv);
1331 }
1332
1333 /*
1334 * Set a single field in a scalar parameter group.
1335 */
1336 int
1337 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1338 int size, int field)
1339 {
1340 struct iop_msg *im;
1341 struct i2o_util_params_op *mf;
1342 struct iop_pgop *pgop;
1343 int rv, totsize;
1344 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1345
1346 totsize = sizeof(*pgop) + size;
1347
1348 im = iop_msg_alloc(sc, IM_WAIT);
1349 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1350 iop_msg_free(sc, im);
1351 return (ENOMEM);
1352 }
1353
1354 mf = (struct i2o_util_params_op *)mb;
1355 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1356 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1357 mf->msgictx = IOP_ICTX;
1358 mf->msgtctx = im->im_tctx;
1359 mf->flags = 0;
1360
1361 pgop->olh.count = htole16(1);
1362 pgop->olh.reserved = htole16(0);
1363 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1364 pgop->oat.fieldcount = htole16(1);
1365 pgop->oat.group = htole16(group);
1366 pgop->oat.fields[0] = htole16(field);
1367 memcpy(pgop + 1, buf, size);
1368
1369 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1370 rv = iop_msg_post(sc, im, mb, 30000);
1371 if (rv != 0)
1372 printf("%s: FIELD_SET failed for tid %d group %d\n",
1373 sc->sc_dv.dv_xname, tid, group);
1374
1375 iop_msg_unmap(sc, im);
1376 iop_msg_free(sc, im);
1377 free(pgop, M_DEVBUF);
1378 return (rv);
1379 }
1380
1381 /*
1382 * Delete all rows in a tablular parameter group.
1383 */
1384 int
1385 iop_table_clear(struct iop_softc *sc, int tid, int group)
1386 {
1387 struct iop_msg *im;
1388 struct i2o_util_params_op *mf;
1389 struct iop_pgop pgop;
1390 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1391 int rv;
1392
1393 im = iop_msg_alloc(sc, IM_WAIT);
1394
1395 mf = (struct i2o_util_params_op *)mb;
1396 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1397 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1398 mf->msgictx = IOP_ICTX;
1399 mf->msgtctx = im->im_tctx;
1400 mf->flags = 0;
1401
1402 pgop.olh.count = htole16(1);
1403 pgop.olh.reserved = htole16(0);
1404 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1405 pgop.oat.fieldcount = htole16(0);
1406 pgop.oat.group = htole16(group);
1407 pgop.oat.fields[0] = htole16(0);
1408
1409 PHOLD(curlwp);
1410 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1411 rv = iop_msg_post(sc, im, mb, 30000);
1412 if (rv != 0)
1413 printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1414 sc->sc_dv.dv_xname, tid, group);
1415
1416 iop_msg_unmap(sc, im);
1417 PRELE(curlwp);
1418 iop_msg_free(sc, im);
1419 return (rv);
1420 }
1421
1422 /*
1423 * Add a single row to a tabular parameter group. The row can have only one
1424 * field.
1425 */
1426 int
1427 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1428 int size, int row)
1429 {
1430 struct iop_msg *im;
1431 struct i2o_util_params_op *mf;
1432 struct iop_pgop *pgop;
1433 int rv, totsize;
1434 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1435
1436 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1437
1438 im = iop_msg_alloc(sc, IM_WAIT);
1439 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1440 iop_msg_free(sc, im);
1441 return (ENOMEM);
1442 }
1443
1444 mf = (struct i2o_util_params_op *)mb;
1445 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1446 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1447 mf->msgictx = IOP_ICTX;
1448 mf->msgtctx = im->im_tctx;
1449 mf->flags = 0;
1450
1451 pgop->olh.count = htole16(1);
1452 pgop->olh.reserved = htole16(0);
1453 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1454 pgop->oat.fieldcount = htole16(1);
1455 pgop->oat.group = htole16(group);
1456 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1457 pgop->oat.fields[1] = htole16(1); /* RowCount */
1458 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1459 memcpy(&pgop->oat.fields[3], buf, size);
1460
1461 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1462 rv = iop_msg_post(sc, im, mb, 30000);
1463 if (rv != 0)
1464 printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1465 sc->sc_dv.dv_xname, tid, group, row);
1466
1467 iop_msg_unmap(sc, im);
1468 iop_msg_free(sc, im);
1469 free(pgop, M_DEVBUF);
1470 return (rv);
1471 }
1472
1473 /*
1474 * Execute a simple command (no parameters).
1475 */
1476 int
1477 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1478 int async, int timo)
1479 {
1480 struct iop_msg *im;
1481 struct i2o_msg mf;
1482 int rv, fl;
1483
1484 fl = (async != 0 ? IM_WAIT : IM_POLL);
1485 im = iop_msg_alloc(sc, fl);
1486
1487 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1488 mf.msgfunc = I2O_MSGFUNC(tid, function);
1489 mf.msgictx = ictx;
1490 mf.msgtctx = im->im_tctx;
1491
1492 rv = iop_msg_post(sc, im, &mf, timo);
1493 iop_msg_free(sc, im);
1494 return (rv);
1495 }
1496
1497 /*
1498 * Post the system table to the IOP.
1499 */
1500 static int
1501 iop_systab_set(struct iop_softc *sc)
1502 {
1503 struct i2o_exec_sys_tab_set *mf;
1504 struct iop_msg *im;
1505 bus_space_handle_t bsh;
1506 bus_addr_t boo;
1507 u_int32_t mema[2], ioa[2];
1508 int rv;
1509 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1510
1511 im = iop_msg_alloc(sc, IM_WAIT);
1512
1513 mf = (struct i2o_exec_sys_tab_set *)mb;
1514 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1515 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1516 mf->msgictx = IOP_ICTX;
1517 mf->msgtctx = im->im_tctx;
1518 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1519 mf->segnumber = 0;
1520
1521 mema[1] = sc->sc_status.desiredprivmemsize;
1522 ioa[1] = sc->sc_status.desiredpriviosize;
1523
1524 if (mema[1] != 0) {
1525 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1526 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1527 mema[0] = htole32(boo);
1528 if (rv != 0) {
1529 printf("%s: can't alloc priv mem space, err = %d\n",
1530 sc->sc_dv.dv_xname, rv);
1531 mema[0] = 0;
1532 mema[1] = 0;
1533 }
1534 }
1535
1536 if (ioa[1] != 0) {
1537 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1538 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1539 ioa[0] = htole32(boo);
1540 if (rv != 0) {
1541 printf("%s: can't alloc priv i/o space, err = %d\n",
1542 sc->sc_dv.dv_xname, rv);
1543 ioa[0] = 0;
1544 ioa[1] = 0;
1545 }
1546 }
1547
1548 PHOLD(curlwp);
1549 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1550 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1551 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1552 rv = iop_msg_post(sc, im, mb, 5000);
1553 iop_msg_unmap(sc, im);
1554 iop_msg_free(sc, im);
1555 PRELE(curlwp);
1556 return (rv);
1557 }
1558
1559 /*
1560 * Reset the IOP. Must be called with interrupts disabled.
1561 */
1562 static int
1563 iop_reset(struct iop_softc *sc)
1564 {
1565 u_int32_t mfa, *sw;
1566 struct i2o_exec_iop_reset mf;
1567 int rv;
1568 paddr_t pa;
1569
1570 sw = (u_int32_t *)sc->sc_scr;
1571 pa = sc->sc_scr_seg->ds_addr;
1572
1573 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1574 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1575 mf.reserved[0] = 0;
1576 mf.reserved[1] = 0;
1577 mf.reserved[2] = 0;
1578 mf.reserved[3] = 0;
1579 mf.statuslow = (u_int32_t)pa;
1580 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1581
1582 *sw = htole32(0);
1583 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1584 BUS_DMASYNC_PREREAD);
1585
1586 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1587 return (rv);
1588
1589 POLL(2500,
1590 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1591 BUS_DMASYNC_POSTREAD), *sw != 0));
1592 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1593 printf("%s: reset rejected, status 0x%x\n",
1594 sc->sc_dv.dv_xname, le32toh(*sw));
1595 return (EIO);
1596 }
1597
1598 /*
1599 * IOP is now in the INIT state. Wait no more than 10 seconds for
1600 * the inbound queue to become responsive.
1601 */
1602 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1603 if (mfa == IOP_MFA_EMPTY) {
1604 printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1605 return (EIO);
1606 }
1607
1608 iop_release_mfa(sc, mfa);
1609 return (0);
1610 }
1611
1612 /*
1613 * Register a new initiator. Must be called with the configuration lock
1614 * held.
1615 */
1616 void
1617 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1618 {
1619 static int ictxgen;
1620
1621 /* 0 is reserved (by us) for system messages. */
1622 ii->ii_ictx = ++ictxgen;
1623
1624 /*
1625 * `Utility initiators' don't make it onto the per-IOP initiator list
1626 * (which is used only for configuration), but do get one slot on
1627 * the inbound queue.
1628 */
1629 if ((ii->ii_flags & II_UTILITY) == 0) {
1630 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1631 sc->sc_nii++;
1632 } else
1633 sc->sc_nuii++;
1634
1635 cv_init(&ii->ii_cv, "iopevt");
1636
1637 mutex_spin_enter(&sc->sc_intrlock);
1638 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1639 mutex_spin_exit(&sc->sc_intrlock);
1640 }
1641
1642 /*
1643 * Unregister an initiator. Must be called with the configuration lock
1644 * held.
1645 */
1646 void
1647 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1648 {
1649
1650 if ((ii->ii_flags & II_UTILITY) == 0) {
1651 LIST_REMOVE(ii, ii_list);
1652 sc->sc_nii--;
1653 } else
1654 sc->sc_nuii--;
1655
1656 mutex_spin_enter(&sc->sc_intrlock);
1657 LIST_REMOVE(ii, ii_hash);
1658 mutex_spin_exit(&sc->sc_intrlock);
1659
1660 cv_destroy(&ii->ii_cv);
1661 }
1662
1663 /*
1664 * Handle a reply frame from the IOP.
1665 */
1666 static int
1667 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1668 {
1669 struct iop_msg *im;
1670 struct i2o_reply *rb;
1671 struct i2o_fault_notify *fn;
1672 struct iop_initiator *ii;
1673 u_int off, ictx, tctx, status, size;
1674
1675 KASSERT(mutex_owned(&sc->sc_intrlock));
1676
1677 off = (int)(rmfa - sc->sc_rep_phys);
1678 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1679
1680 /* Perform reply queue DMA synchronisation. */
1681 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1682 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1683 if (--sc->sc_curib != 0)
1684 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1685 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1686
1687 #ifdef I2ODEBUG
1688 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1689 panic("iop_handle_reply: 64-bit reply");
1690 #endif
1691 /*
1692 * Find the initiator.
1693 */
1694 ictx = le32toh(rb->msgictx);
1695 if (ictx == IOP_ICTX)
1696 ii = NULL;
1697 else {
1698 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1699 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1700 if (ii->ii_ictx == ictx)
1701 break;
1702 if (ii == NULL) {
1703 #ifdef I2ODEBUG
1704 iop_reply_print(sc, rb);
1705 #endif
1706 printf("%s: WARNING: bad ictx returned (%x)\n",
1707 sc->sc_dv.dv_xname, ictx);
1708 return (-1);
1709 }
1710 }
1711
1712 /*
1713 * If we received a transport failure notice, we've got to dig the
1714 * transaction context (if any) out of the original message frame,
1715 * and then release the original MFA back to the inbound FIFO.
1716 */
1717 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1718 status = I2O_STATUS_SUCCESS;
1719
1720 fn = (struct i2o_fault_notify *)rb;
1721 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1722 iop_release_mfa(sc, fn->lowmfa);
1723 iop_tfn_print(sc, fn);
1724 } else {
1725 status = rb->reqstatus;
1726 tctx = le32toh(rb->msgtctx);
1727 }
1728
1729 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1730 /*
1731 * This initiator tracks state using message wrappers.
1732 *
1733 * Find the originating message wrapper, and if requested
1734 * notify the initiator.
1735 */
1736 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1737 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1738 (im->im_flags & IM_ALLOCED) == 0 ||
1739 tctx != im->im_tctx) {
1740 printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1741 sc->sc_dv.dv_xname, tctx, im);
1742 if (im != NULL)
1743 printf("%s: flags=0x%08x tctx=0x%08x\n",
1744 sc->sc_dv.dv_xname, im->im_flags,
1745 im->im_tctx);
1746 #ifdef I2ODEBUG
1747 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1748 iop_reply_print(sc, rb);
1749 #endif
1750 return (-1);
1751 }
1752
1753 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1754 im->im_flags |= IM_FAIL;
1755
1756 #ifdef I2ODEBUG
1757 if ((im->im_flags & IM_REPLIED) != 0)
1758 panic("%s: dup reply", sc->sc_dv.dv_xname);
1759 #endif
1760 im->im_flags |= IM_REPLIED;
1761
1762 #ifdef I2ODEBUG
1763 if (status != I2O_STATUS_SUCCESS)
1764 iop_reply_print(sc, rb);
1765 #endif
1766 im->im_reqstatus = status;
1767 im->im_detstatus = le16toh(rb->detail);
1768
1769 /* Copy the reply frame, if requested. */
1770 if (im->im_rb != NULL) {
1771 size = (le32toh(rb->msgflags) >> 14) & ~3;
1772 #ifdef I2ODEBUG
1773 if (size > sc->sc_framesize)
1774 panic("iop_handle_reply: reply too large");
1775 #endif
1776 memcpy(im->im_rb, rb, size);
1777 }
1778
1779 /* Notify the initiator. */
1780 if ((im->im_flags & IM_WAIT) != 0)
1781 cv_broadcast(&im->im_cv);
1782 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1783 if (ii != NULL) {
1784 mutex_spin_exit(&sc->sc_intrlock);
1785 (*ii->ii_intr)(ii->ii_dv, im, rb);
1786 mutex_spin_enter(&sc->sc_intrlock);
1787 }
1788 }
1789 } else {
1790 /*
1791 * This initiator discards message wrappers.
1792 *
1793 * Simply pass the reply frame to the initiator.
1794 */
1795 if (ii != NULL) {
1796 mutex_spin_exit(&sc->sc_intrlock);
1797 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1798 mutex_spin_enter(&sc->sc_intrlock);
1799 }
1800 }
1801
1802 return (status);
1803 }
1804
1805 /*
1806 * Handle an interrupt from the IOP.
1807 */
1808 int
1809 iop_intr(void *arg)
1810 {
1811 struct iop_softc *sc;
1812 u_int32_t rmfa;
1813
1814 sc = arg;
1815
1816 mutex_spin_enter(&sc->sc_intrlock);
1817
1818 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1819 mutex_spin_exit(&sc->sc_intrlock);
1820 return (0);
1821 }
1822
1823 for (;;) {
1824 /* Double read to account for IOP bug. */
1825 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1826 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1827 if (rmfa == IOP_MFA_EMPTY)
1828 break;
1829 }
1830 iop_handle_reply(sc, rmfa);
1831 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1832 }
1833
1834 mutex_spin_exit(&sc->sc_intrlock);
1835 return (1);
1836 }
1837
1838 /*
1839 * Handle an event signalled by the executive.
1840 */
1841 static void
1842 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1843 {
1844 struct i2o_util_event_register_reply *rb;
1845 u_int event;
1846
1847 rb = reply;
1848
1849 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1850 return;
1851
1852 event = le32toh(rb->event);
1853 printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1854 }
1855
1856 /*
1857 * Allocate a message wrapper.
1858 */
1859 struct iop_msg *
1860 iop_msg_alloc(struct iop_softc *sc, int flags)
1861 {
1862 struct iop_msg *im;
1863 static u_int tctxgen;
1864 int i;
1865
1866 #ifdef I2ODEBUG
1867 if ((flags & IM_SYSMASK) != 0)
1868 panic("iop_msg_alloc: system flags specified");
1869 #endif
1870
1871 mutex_spin_enter(&sc->sc_intrlock);
1872 im = SLIST_FIRST(&sc->sc_im_freelist);
1873 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1874 if (im == NULL)
1875 panic("iop_msg_alloc: no free wrappers");
1876 #endif
1877 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1878 mutex_spin_exit(&sc->sc_intrlock);
1879
1880 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1881 tctxgen += (1 << IOP_TCTX_SHIFT);
1882 im->im_flags = flags | IM_ALLOCED;
1883 im->im_rb = NULL;
1884 i = 0;
1885 do {
1886 im->im_xfer[i++].ix_size = 0;
1887 } while (i < IOP_MAX_MSG_XFERS);
1888
1889 return (im);
1890 }
1891
1892 /*
1893 * Free a message wrapper.
1894 */
1895 void
1896 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1897 {
1898
1899 #ifdef I2ODEBUG
1900 if ((im->im_flags & IM_ALLOCED) == 0)
1901 panic("iop_msg_free: wrapper not allocated");
1902 #endif
1903
1904 im->im_flags = 0;
1905 mutex_spin_enter(&sc->sc_intrlock);
1906 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1907 mutex_spin_exit(&sc->sc_intrlock);
1908 }
1909
1910 /*
1911 * Map a data transfer. Write a scatter-gather list into the message frame.
1912 */
1913 int
1914 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1915 void *xferaddr, int xfersize, int out, struct proc *up)
1916 {
1917 bus_dmamap_t dm;
1918 bus_dma_segment_t *ds;
1919 struct iop_xfer *ix;
1920 u_int rv, i, nsegs, flg, off, xn;
1921 u_int32_t *p;
1922
1923 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1924 if (ix->ix_size == 0)
1925 break;
1926
1927 #ifdef I2ODEBUG
1928 if (xfersize == 0)
1929 panic("iop_msg_map: null transfer");
1930 if (xfersize > IOP_MAX_XFER)
1931 panic("iop_msg_map: transfer too large");
1932 if (xn == IOP_MAX_MSG_XFERS)
1933 panic("iop_msg_map: too many xfers");
1934 #endif
1935
1936 /*
1937 * Only the first DMA map is static.
1938 */
1939 if (xn != 0) {
1940 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1941 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1942 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1943 if (rv != 0)
1944 return (rv);
1945 }
1946
1947 dm = ix->ix_map;
1948 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1949 (up == NULL ? BUS_DMA_NOWAIT : 0));
1950 if (rv != 0)
1951 goto bad;
1952
1953 /*
1954 * How many SIMPLE SG elements can we fit in this message?
1955 */
1956 off = mb[0] >> 16;
1957 p = mb + off;
1958 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1959
1960 if (dm->dm_nsegs > nsegs) {
1961 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1962 rv = EFBIG;
1963 DPRINTF(("iop_msg_map: too many segs\n"));
1964 goto bad;
1965 }
1966
1967 nsegs = dm->dm_nsegs;
1968 xfersize = 0;
1969
1970 /*
1971 * Write out the SG list.
1972 */
1973 if (out)
1974 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1975 else
1976 flg = I2O_SGL_SIMPLE;
1977
1978 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1979 p[0] = (u_int32_t)ds->ds_len | flg;
1980 p[1] = (u_int32_t)ds->ds_addr;
1981 xfersize += ds->ds_len;
1982 }
1983
1984 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1985 p[1] = (u_int32_t)ds->ds_addr;
1986 xfersize += ds->ds_len;
1987
1988 /* Fix up the transfer record, and sync the map. */
1989 ix->ix_flags = (out ? IX_OUT : IX_IN);
1990 ix->ix_size = xfersize;
1991 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1992 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1993
1994 /*
1995 * If this is the first xfer we've mapped for this message, adjust
1996 * the SGL offset field in the message header.
1997 */
1998 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1999 mb[0] += (mb[0] >> 12) & 0xf0;
2000 im->im_flags |= IM_SGLOFFADJ;
2001 }
2002 mb[0] += (nsegs << 17);
2003 return (0);
2004
2005 bad:
2006 if (xn != 0)
2007 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2008 return (rv);
2009 }
2010
2011 /*
2012 * Map a block I/O data transfer (different in that there's only one per
2013 * message maximum, and PAGE addressing may be used). Write a scatter
2014 * gather list into the message frame.
2015 */
2016 int
2017 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
2018 void *xferaddr, int xfersize, int out)
2019 {
2020 bus_dma_segment_t *ds;
2021 bus_dmamap_t dm;
2022 struct iop_xfer *ix;
2023 u_int rv, i, nsegs, off, slen, tlen, flg;
2024 paddr_t saddr, eaddr;
2025 u_int32_t *p;
2026
2027 #ifdef I2ODEBUG
2028 if (xfersize == 0)
2029 panic("iop_msg_map_bio: null transfer");
2030 if (xfersize > IOP_MAX_XFER)
2031 panic("iop_msg_map_bio: transfer too large");
2032 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2033 panic("iop_msg_map_bio: SGLOFFADJ");
2034 #endif
2035
2036 ix = im->im_xfer;
2037 dm = ix->ix_map;
2038 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2039 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2040 if (rv != 0)
2041 return (rv);
2042
2043 off = mb[0] >> 16;
2044 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2045
2046 /*
2047 * If the transfer is highly fragmented and won't fit using SIMPLE
2048 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2049 * potentially more efficient, both for us and the IOP.
2050 */
2051 if (dm->dm_nsegs > nsegs) {
2052 nsegs = 1;
2053 p = mb + off + 1;
2054
2055 /* XXX This should be done with a bus_space flag. */
2056 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2057 slen = ds->ds_len;
2058 saddr = ds->ds_addr;
2059
2060 while (slen > 0) {
2061 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2062 tlen = min(eaddr - saddr, slen);
2063 slen -= tlen;
2064 *p++ = le32toh(saddr);
2065 saddr = eaddr;
2066 nsegs++;
2067 }
2068 }
2069
2070 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2071 I2O_SGL_END;
2072 if (out)
2073 mb[off] |= I2O_SGL_DATA_OUT;
2074 } else {
2075 p = mb + off;
2076 nsegs = dm->dm_nsegs;
2077
2078 if (out)
2079 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2080 else
2081 flg = I2O_SGL_SIMPLE;
2082
2083 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2084 p[0] = (u_int32_t)ds->ds_len | flg;
2085 p[1] = (u_int32_t)ds->ds_addr;
2086 }
2087
2088 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2089 I2O_SGL_END;
2090 p[1] = (u_int32_t)ds->ds_addr;
2091 nsegs <<= 1;
2092 }
2093
2094 /* Fix up the transfer record, and sync the map. */
2095 ix->ix_flags = (out ? IX_OUT : IX_IN);
2096 ix->ix_size = xfersize;
2097 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2098 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2099
2100 /*
2101 * Adjust the SGL offset and total message size fields. We don't
2102 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2103 */
2104 mb[0] += ((off << 4) + (nsegs << 16));
2105 return (0);
2106 }
2107
2108 /*
2109 * Unmap all data transfers associated with a message wrapper.
2110 */
2111 void
2112 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2113 {
2114 struct iop_xfer *ix;
2115 int i;
2116
2117 #ifdef I2ODEBUG
2118 if (im->im_xfer[0].ix_size == 0)
2119 panic("iop_msg_unmap: no transfers mapped");
2120 #endif
2121
2122 for (ix = im->im_xfer, i = 0;;) {
2123 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2124 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2125 BUS_DMASYNC_POSTREAD);
2126 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2127
2128 /* Only the first DMA map is static. */
2129 if (i != 0)
2130 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2131 if ((++ix)->ix_size == 0)
2132 break;
2133 if (++i >= IOP_MAX_MSG_XFERS)
2134 break;
2135 }
2136 }
2137
2138 /*
2139 * Post a message frame to the IOP's inbound queue.
2140 */
2141 int
2142 iop_post(struct iop_softc *sc, u_int32_t *mb)
2143 {
2144 u_int32_t mfa;
2145
2146 #ifdef I2ODEBUG
2147 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2148 panic("iop_post: frame too large");
2149 #endif
2150
2151 mutex_spin_enter(&sc->sc_intrlock);
2152
2153 /* Allocate a slot with the IOP. */
2154 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2155 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2156 mutex_spin_exit(&sc->sc_intrlock);
2157 printf("%s: mfa not forthcoming\n",
2158 sc->sc_dv.dv_xname);
2159 return (EAGAIN);
2160 }
2161
2162 /* Perform reply buffer DMA synchronisation. */
2163 if (sc->sc_curib++ == 0)
2164 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2165 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2166
2167 /* Copy out the message frame. */
2168 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2169 mb[0] >> 16);
2170 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2171 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2172
2173 /* Post the MFA back to the IOP. */
2174 iop_outl(sc, IOP_REG_IFIFO, mfa);
2175
2176 mutex_spin_exit(&sc->sc_intrlock);
2177 return (0);
2178 }
2179
2180 /*
2181 * Post a message to the IOP and deal with completion.
2182 */
2183 int
2184 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2185 {
2186 u_int32_t *mb;
2187 int rv;
2188
2189 mb = xmb;
2190
2191 /* Terminate the scatter/gather list chain. */
2192 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2193 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2194
2195 if ((rv = iop_post(sc, mb)) != 0)
2196 return (rv);
2197
2198 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2199 if ((im->im_flags & IM_POLL) != 0)
2200 iop_msg_poll(sc, im, timo);
2201 else
2202 iop_msg_wait(sc, im, timo);
2203
2204 mutex_spin_enter(&sc->sc_intrlock);
2205 if ((im->im_flags & IM_REPLIED) != 0) {
2206 if ((im->im_flags & IM_NOSTATUS) != 0)
2207 rv = 0;
2208 else if ((im->im_flags & IM_FAIL) != 0)
2209 rv = ENXIO;
2210 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2211 rv = EIO;
2212 else
2213 rv = 0;
2214 } else
2215 rv = EBUSY;
2216 mutex_spin_exit(&sc->sc_intrlock);
2217 } else
2218 rv = 0;
2219
2220 return (rv);
2221 }
2222
2223 /*
2224 * Spin until the specified message is replied to.
2225 */
2226 static void
2227 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2228 {
2229 u_int32_t rmfa;
2230
2231 mutex_spin_enter(&sc->sc_intrlock);
2232
2233 for (timo *= 10; timo != 0; timo--) {
2234 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2235 /* Double read to account for IOP bug. */
2236 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2237 if (rmfa == IOP_MFA_EMPTY)
2238 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2239 if (rmfa != IOP_MFA_EMPTY) {
2240 iop_handle_reply(sc, rmfa);
2241
2242 /*
2243 * Return the reply frame to the IOP's
2244 * outbound FIFO.
2245 */
2246 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2247 }
2248 }
2249 if ((im->im_flags & IM_REPLIED) != 0)
2250 break;
2251 mutex_spin_exit(&sc->sc_intrlock);
2252 DELAY(100);
2253 mutex_spin_enter(&sc->sc_intrlock);
2254 }
2255
2256 if (timo == 0) {
2257 #ifdef I2ODEBUG
2258 printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2259 if (iop_status_get(sc, 1) != 0)
2260 printf("iop_msg_poll: unable to retrieve status\n");
2261 else
2262 printf("iop_msg_poll: IOP state = %d\n",
2263 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2264 #endif
2265 }
2266
2267 mutex_spin_exit(&sc->sc_intrlock);
2268 }
2269
2270 /*
2271 * Sleep until the specified message is replied to.
2272 */
2273 static void
2274 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2275 {
2276 int rv;
2277
2278 mutex_spin_enter(&sc->sc_intrlock);
2279 if ((im->im_flags & IM_REPLIED) != 0) {
2280 mutex_spin_exit(&sc->sc_intrlock);
2281 return;
2282 }
2283 rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2284 mutex_spin_exit(&sc->sc_intrlock);
2285
2286 #ifdef I2ODEBUG
2287 if (rv != 0) {
2288 printf("iop_msg_wait: tsleep() == %d\n", rv);
2289 if (iop_status_get(sc, 0) != 0)
2290 printf("iop_msg_wait: unable to retrieve status\n");
2291 else
2292 printf("iop_msg_wait: IOP state = %d\n",
2293 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2294 }
2295 #endif
2296 }
2297
2298 /*
2299 * Release an unused message frame back to the IOP's inbound fifo.
2300 */
2301 static void
2302 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2303 {
2304
2305 /* Use the frame to issue a no-op. */
2306 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2307 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2308 iop_outl_msg(sc, mfa + 8, 0);
2309 iop_outl_msg(sc, mfa + 12, 0);
2310
2311 iop_outl(sc, IOP_REG_IFIFO, mfa);
2312 }
2313
2314 #ifdef I2ODEBUG
2315 /*
2316 * Dump a reply frame header.
2317 */
2318 static void
2319 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2320 {
2321 u_int function, detail;
2322 const char *statusstr;
2323
2324 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2325 detail = le16toh(rb->detail);
2326
2327 printf("%s: reply:\n", sc->sc_dv.dv_xname);
2328
2329 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2330 statusstr = iop_status[rb->reqstatus];
2331 else
2332 statusstr = "undefined error code";
2333
2334 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2335 sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2336 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2337 sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2338 le32toh(rb->msgtctx));
2339 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2340 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2341 (le32toh(rb->msgflags) >> 8) & 0xff);
2342 }
2343 #endif
2344
2345 /*
2346 * Dump a transport failure reply.
2347 */
2348 static void
2349 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2350 {
2351
2352 printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2353
2354 printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2355 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2356 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2357 sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2358 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2359 sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2360 }
2361
2362 /*
2363 * Translate an I2O ASCII field into a C string.
2364 */
2365 void
2366 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2367 {
2368 int hc, lc, i, nit;
2369
2370 dlen--;
2371 lc = 0;
2372 hc = 0;
2373 i = 0;
2374
2375 /*
2376 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2377 * spec has nothing to say about it. Since AMI fields are usually
2378 * filled with junk after the terminator, ...
2379 */
2380 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2381
2382 while (slen-- != 0 && dlen-- != 0) {
2383 if (nit && *src == '\0')
2384 break;
2385 else if (*src <= 0x20 || *src >= 0x7f) {
2386 if (hc)
2387 dst[i++] = ' ';
2388 } else {
2389 hc = 1;
2390 dst[i++] = *src;
2391 lc = i;
2392 }
2393 src++;
2394 }
2395
2396 dst[lc] = '\0';
2397 }
2398
2399 /*
2400 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2401 */
2402 int
2403 iop_print_ident(struct iop_softc *sc, int tid)
2404 {
2405 struct {
2406 struct i2o_param_op_results pr;
2407 struct i2o_param_read_results prr;
2408 struct i2o_param_device_identity di;
2409 } __attribute__ ((__packed__)) p;
2410 char buf[32];
2411 int rv;
2412
2413 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2414 sizeof(p), NULL);
2415 if (rv != 0)
2416 return (rv);
2417
2418 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2419 sizeof(buf));
2420 printf(" <%s, ", buf);
2421 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2422 sizeof(buf));
2423 printf("%s, ", buf);
2424 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2425 printf("%s>", buf);
2426
2427 return (0);
2428 }
2429
2430 /*
2431 * Claim or unclaim the specified TID.
2432 */
2433 int
2434 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2435 int flags)
2436 {
2437 struct iop_msg *im;
2438 struct i2o_util_claim mf;
2439 int rv, func;
2440
2441 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2442 im = iop_msg_alloc(sc, IM_WAIT);
2443
2444 /* We can use the same structure, as they're identical. */
2445 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2446 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2447 mf.msgictx = ii->ii_ictx;
2448 mf.msgtctx = im->im_tctx;
2449 mf.flags = flags;
2450
2451 rv = iop_msg_post(sc, im, &mf, 5000);
2452 iop_msg_free(sc, im);
2453 return (rv);
2454 }
2455
2456 /*
2457 * Perform an abort.
2458 */
2459 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2460 int tctxabort, int flags)
2461 {
2462 struct iop_msg *im;
2463 struct i2o_util_abort mf;
2464 int rv;
2465
2466 im = iop_msg_alloc(sc, IM_WAIT);
2467
2468 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2469 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2470 mf.msgictx = ii->ii_ictx;
2471 mf.msgtctx = im->im_tctx;
2472 mf.flags = (func << 24) | flags;
2473 mf.tctxabort = tctxabort;
2474
2475 rv = iop_msg_post(sc, im, &mf, 5000);
2476 iop_msg_free(sc, im);
2477 return (rv);
2478 }
2479
2480 /*
2481 * Enable or disable reception of events for the specified device.
2482 */
2483 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2484 {
2485 struct i2o_util_event_register mf;
2486
2487 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2488 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2489 mf.msgictx = ii->ii_ictx;
2490 mf.msgtctx = 0;
2491 mf.eventmask = mask;
2492
2493 /* This message is replied to only when events are signalled. */
2494 return (iop_post(sc, (u_int32_t *)&mf));
2495 }
2496
2497 int
2498 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2499 {
2500 struct iop_softc *sc;
2501
2502 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2503 return (ENXIO);
2504 if ((sc->sc_flags & IOP_ONLINE) == 0)
2505 return (ENXIO);
2506 if ((sc->sc_flags & IOP_OPEN) != 0)
2507 return (EBUSY);
2508 sc->sc_flags |= IOP_OPEN;
2509
2510 return (0);
2511 }
2512
2513 int
2514 iopclose(dev_t dev, int flag, int mode,
2515 struct lwp *l)
2516 {
2517 struct iop_softc *sc;
2518
2519 sc = device_lookup(&iop_cd, minor(dev));
2520 sc->sc_flags &= ~IOP_OPEN;
2521
2522 return (0);
2523 }
2524
2525 int
2526 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2527 {
2528 struct iop_softc *sc;
2529 struct iovec *iov;
2530 int rv, i;
2531
2532 sc = device_lookup(&iop_cd, minor(dev));
2533 rv = 0;
2534
2535 switch (cmd) {
2536 case IOPIOCPT:
2537 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2538 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2539 if (rv)
2540 return (rv);
2541
2542 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2543
2544 case IOPIOCGSTATUS:
2545 iov = (struct iovec *)data;
2546 i = sizeof(struct i2o_status);
2547 if (i > iov->iov_len)
2548 i = iov->iov_len;
2549 else
2550 iov->iov_len = i;
2551 if ((rv = iop_status_get(sc, 0)) == 0)
2552 rv = copyout(&sc->sc_status, iov->iov_base, i);
2553 return (rv);
2554
2555 case IOPIOCGLCT:
2556 case IOPIOCGTIDMAP:
2557 case IOPIOCRECONFIG:
2558 break;
2559
2560 default:
2561 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2562 printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2563 #endif
2564 return (ENOTTY);
2565 }
2566
2567 mutex_enter(&sc->sc_conflock);
2568
2569 switch (cmd) {
2570 case IOPIOCGLCT:
2571 iov = (struct iovec *)data;
2572 i = le16toh(sc->sc_lct->tablesize) << 2;
2573 if (i > iov->iov_len)
2574 i = iov->iov_len;
2575 else
2576 iov->iov_len = i;
2577 rv = copyout(sc->sc_lct, iov->iov_base, i);
2578 break;
2579
2580 case IOPIOCRECONFIG:
2581 rv = iop_reconfigure(sc, 0);
2582 break;
2583
2584 case IOPIOCGTIDMAP:
2585 iov = (struct iovec *)data;
2586 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2587 if (i > iov->iov_len)
2588 i = iov->iov_len;
2589 else
2590 iov->iov_len = i;
2591 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2592 break;
2593 }
2594
2595 mutex_exit(&sc->sc_conflock);
2596 return (rv);
2597 }
2598
2599 static int
2600 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2601 {
2602 struct iop_msg *im;
2603 struct i2o_msg *mf;
2604 struct ioppt_buf *ptb;
2605 int rv, i, mapped;
2606
2607 mf = NULL;
2608 im = NULL;
2609 mapped = 1;
2610
2611 if (pt->pt_msglen > sc->sc_framesize ||
2612 pt->pt_msglen < sizeof(struct i2o_msg) ||
2613 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2614 pt->pt_nbufs < 0 ||
2615 #if 0
2616 pt->pt_replylen < 0 ||
2617 #endif
2618 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2619 return (EINVAL);
2620
2621 for (i = 0; i < pt->pt_nbufs; i++)
2622 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2623 rv = ENOMEM;
2624 goto bad;
2625 }
2626
2627 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2628 if (mf == NULL)
2629 return (ENOMEM);
2630
2631 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2632 goto bad;
2633
2634 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2635 im->im_rb = (struct i2o_reply *)mf;
2636 mf->msgictx = IOP_ICTX;
2637 mf->msgtctx = im->im_tctx;
2638
2639 for (i = 0; i < pt->pt_nbufs; i++) {
2640 ptb = &pt->pt_bufs[i];
2641 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2642 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2643 if (rv != 0)
2644 goto bad;
2645 mapped = 1;
2646 }
2647
2648 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2649 goto bad;
2650
2651 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2652 if (i > sc->sc_framesize)
2653 i = sc->sc_framesize;
2654 if (i > pt->pt_replylen)
2655 i = pt->pt_replylen;
2656 rv = copyout(im->im_rb, pt->pt_reply, i);
2657
2658 bad:
2659 if (mapped != 0)
2660 iop_msg_unmap(sc, im);
2661 if (im != NULL)
2662 iop_msg_free(sc, im);
2663 if (mf != NULL)
2664 free(mf, M_DEVBUF);
2665 return (rv);
2666 }
2667