iop.c revision 1.68.12.2 1 /* $NetBSD: iop.c,v 1.68.12.2 2008/06/02 13:23:17 mjf Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Support for I2O IOPs (intelligent I/O processors).
34 */
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.68.12.2 2008/06/02 13:23:17 mjf Exp $");
38
39 #include "iop.h"
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/ioctl.h>
49 #include <sys/endian.h>
50 #include <sys/conf.h>
51 #include <sys/kthread.h>
52 #include <sys/kauth.h>
53 #include <sys/bus.h>
54
55 #include <uvm/uvm_extern.h>
56
57 #include <dev/i2o/i2o.h>
58 #include <dev/i2o/iopio.h>
59 #include <dev/i2o/iopreg.h>
60 #include <dev/i2o/iopvar.h>
61
62 #include "locators.h"
63
64 #define POLL(ms, cond) \
65 do { \
66 int xi; \
67 for (xi = (ms) * 10; xi; xi--) { \
68 if (cond) \
69 break; \
70 DELAY(100); \
71 } \
72 } while (/* CONSTCOND */0);
73
74 #ifdef I2ODEBUG
75 #define DPRINTF(x) printf x
76 #else
77 #define DPRINTF(x)
78 #endif
79
80 #define IOP_ICTXHASH_NBUCKETS 16
81 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
82
83 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
84
85 #define IOP_TCTX_SHIFT 12
86 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
87
88 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
89 static u_long iop_ictxhash;
90 static void *iop_sdh;
91 static struct i2o_systab *iop_systab;
92 static int iop_systab_size;
93
94 extern struct cfdriver iop_cd;
95
96 dev_type_open(iopopen);
97 dev_type_close(iopclose);
98 dev_type_ioctl(iopioctl);
99
100 const struct cdevsw iop_cdevsw = {
101 iopopen, iopclose, noread, nowrite, iopioctl,
102 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
103 };
104
105 #define IC_CONFIGURE 0x01
106 #define IC_PRIORITY 0x02
107
108 static struct iop_class {
109 u_short ic_class;
110 u_short ic_flags;
111 const char *ic_caption;
112 } const iop_class[] = {
113 {
114 I2O_CLASS_EXECUTIVE,
115 0,
116 "executive"
117 },
118 {
119 I2O_CLASS_DDM,
120 0,
121 "device driver module"
122 },
123 {
124 I2O_CLASS_RANDOM_BLOCK_STORAGE,
125 IC_CONFIGURE | IC_PRIORITY,
126 "random block storage"
127 },
128 {
129 I2O_CLASS_SEQUENTIAL_STORAGE,
130 IC_CONFIGURE | IC_PRIORITY,
131 "sequential storage"
132 },
133 {
134 I2O_CLASS_LAN,
135 IC_CONFIGURE | IC_PRIORITY,
136 "LAN port"
137 },
138 {
139 I2O_CLASS_WAN,
140 IC_CONFIGURE | IC_PRIORITY,
141 "WAN port"
142 },
143 {
144 I2O_CLASS_FIBRE_CHANNEL_PORT,
145 IC_CONFIGURE,
146 "fibrechannel port"
147 },
148 {
149 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
150 0,
151 "fibrechannel peripheral"
152 },
153 {
154 I2O_CLASS_SCSI_PERIPHERAL,
155 0,
156 "SCSI peripheral"
157 },
158 {
159 I2O_CLASS_ATE_PORT,
160 IC_CONFIGURE,
161 "ATE port"
162 },
163 {
164 I2O_CLASS_ATE_PERIPHERAL,
165 0,
166 "ATE peripheral"
167 },
168 {
169 I2O_CLASS_FLOPPY_CONTROLLER,
170 IC_CONFIGURE,
171 "floppy controller"
172 },
173 {
174 I2O_CLASS_FLOPPY_DEVICE,
175 0,
176 "floppy device"
177 },
178 {
179 I2O_CLASS_BUS_ADAPTER_PORT,
180 IC_CONFIGURE,
181 "bus adapter port"
182 },
183 };
184
185 static const char * const iop_status[] = {
186 "success",
187 "abort (dirty)",
188 "abort (no data transfer)",
189 "abort (partial transfer)",
190 "error (dirty)",
191 "error (no data transfer)",
192 "error (partial transfer)",
193 "undefined error code",
194 "process abort (dirty)",
195 "process abort (no data transfer)",
196 "process abort (partial transfer)",
197 "transaction error",
198 };
199
200 static inline u_int32_t iop_inl(struct iop_softc *, int);
201 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
202
203 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
204 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
205
206 static void iop_config_interrupts(struct device *);
207 static void iop_configure_devices(struct iop_softc *, int, int);
208 static void iop_devinfo(int, char *, size_t);
209 static int iop_print(void *, const char *);
210 static void iop_shutdown(void *);
211
212 static void iop_adjqparam(struct iop_softc *, int);
213 static int iop_handle_reply(struct iop_softc *, u_int32_t);
214 static int iop_hrt_get(struct iop_softc *);
215 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
216 static void iop_intr_event(struct device *, struct iop_msg *, void *);
217 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
218 u_int32_t);
219 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
220 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
221 static int iop_ofifo_init(struct iop_softc *);
222 static int iop_passthrough(struct iop_softc *, struct ioppt *,
223 struct proc *);
224 static void iop_reconf_thread(void *);
225 static void iop_release_mfa(struct iop_softc *, u_int32_t);
226 static int iop_reset(struct iop_softc *);
227 static int iop_sys_enable(struct iop_softc *);
228 static int iop_systab_set(struct iop_softc *);
229 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
230
231 #ifdef I2ODEBUG
232 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
233 #endif
234
235 static inline u_int32_t
236 iop_inl(struct iop_softc *sc, int off)
237 {
238
239 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
240 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
241 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
242 }
243
244 static inline void
245 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
246 {
247
248 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
249 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250 BUS_SPACE_BARRIER_WRITE);
251 }
252
253 static inline u_int32_t
254 iop_inl_msg(struct iop_softc *sc, int off)
255 {
256
257 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
258 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
259 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
260 }
261
262 static inline void
263 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
264 {
265
266 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
267 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
268 BUS_SPACE_BARRIER_WRITE);
269 }
270
271 /*
272 * Initialise the IOP and our interface.
273 */
274 void
275 iop_init(struct iop_softc *sc, const char *intrstr)
276 {
277 struct iop_msg *im;
278 int rv, i, j, state, nsegs, maj;
279 u_int32_t mask;
280 char ident[64];
281 device_t dev = &sc->sc_dv;
282
283 state = 0;
284
285 printf("I2O adapter");
286
287 mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
288 mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
289 cv_init(&sc->sc_confcv, "iopconf");
290
291 if (iop_ictxhashtbl == NULL) {
292 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
293 true, &iop_ictxhash);
294 }
295
296 /* Disable interrupts at the IOP. */
297 mask = iop_inl(sc, IOP_REG_INTR_MASK);
298 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
299
300 /* Allocate a scratch DMA map for small miscellaneous shared data. */
301 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
302 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
303 aprint_error_dev(&sc->sc_dv, "cannot create scratch dmamap\n");
304 return;
305 }
306
307 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
308 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
309 aprint_error_dev(&sc->sc_dv, "cannot alloc scratch dmamem\n");
310 goto bail_out;
311 }
312 state++;
313
314 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
315 &sc->sc_scr, 0)) {
316 aprint_error_dev(&sc->sc_dv, "cannot map scratch dmamem\n");
317 goto bail_out;
318 }
319 state++;
320
321 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
322 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
323 aprint_error_dev(&sc->sc_dv, "cannot load scratch dmamap\n");
324 goto bail_out;
325 }
326 state++;
327
328 #ifdef I2ODEBUG
329 /* So that our debug checks don't choke. */
330 sc->sc_framesize = 128;
331 #endif
332
333 /* Avoid syncing the reply map until it's set up. */
334 sc->sc_curib = 0x123;
335
336 /* Reset the adapter and request status. */
337 if ((rv = iop_reset(sc)) != 0) {
338 aprint_error_dev(&sc->sc_dv, "not responding (reset)\n");
339 goto bail_out;
340 }
341
342 if ((rv = iop_status_get(sc, 1)) != 0) {
343 aprint_error_dev(&sc->sc_dv, "not responding (get status)\n");
344 goto bail_out;
345 }
346
347 sc->sc_flags |= IOP_HAVESTATUS;
348 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
349 ident, sizeof(ident));
350 printf(" <%s>\n", ident);
351
352 #ifdef I2ODEBUG
353 printf("%s: orgid=0x%04x version=%d\n",
354 device_xname(&sc->sc_dv),
355 le16toh(sc->sc_status.orgid),
356 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
357 printf("%s: type want have cbase\n", device_xname(&sc->sc_dv));
358 printf("%s: mem %04x %04x %08x\n", device_xname(&sc->sc_dv),
359 le32toh(sc->sc_status.desiredprivmemsize),
360 le32toh(sc->sc_status.currentprivmemsize),
361 le32toh(sc->sc_status.currentprivmembase));
362 printf("%s: i/o %04x %04x %08x\n", device_xname(&sc->sc_dv),
363 le32toh(sc->sc_status.desiredpriviosize),
364 le32toh(sc->sc_status.currentpriviosize),
365 le32toh(sc->sc_status.currentpriviobase));
366 #endif
367
368 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
369 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
370 sc->sc_maxob = IOP_MAX_OUTBOUND;
371 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
372 if (sc->sc_maxib > IOP_MAX_INBOUND)
373 sc->sc_maxib = IOP_MAX_INBOUND;
374 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
375 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
376 sc->sc_framesize = IOP_MAX_MSG_SIZE;
377
378 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
379 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
380 aprint_error_dev(&sc->sc_dv, "frame size too small (%d)\n",
381 sc->sc_framesize);
382 goto bail_out;
383 }
384 #endif
385
386 /* Allocate message wrappers. */
387 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
388 if (im == NULL) {
389 aprint_error_dev(&sc->sc_dv, "memory allocation failure\n");
390 goto bail_out;
391 }
392 state++;
393 sc->sc_ims = im;
394 SLIST_INIT(&sc->sc_im_freelist);
395
396 for (i = 0; i < sc->sc_maxib; i++, im++) {
397 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
398 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
399 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
400 &im->im_xfer[0].ix_map);
401 if (rv != 0) {
402 aprint_error_dev(&sc->sc_dv, "couldn't create dmamap (%d)", rv);
403 goto bail_out3;
404 }
405
406 im->im_tctx = i;
407 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
408 cv_init(&im->im_cv, "iopmsg");
409 }
410
411 /* Initialise the IOP's outbound FIFO. */
412 if (iop_ofifo_init(sc) != 0) {
413 aprint_error_dev(&sc->sc_dv, "unable to init oubound FIFO\n");
414 goto bail_out3;
415 }
416
417 /*
418 * Defer further configuration until (a) interrupts are working and
419 * (b) we have enough information to build the system table.
420 */
421 config_interrupts((struct device *)sc, iop_config_interrupts);
422
423 /* Configure shutdown hook before we start any device activity. */
424 if (iop_sdh == NULL)
425 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
426
427 /* Ensure interrupts are enabled at the IOP. */
428 mask = iop_inl(sc, IOP_REG_INTR_MASK);
429 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
430
431 if (intrstr != NULL)
432 printf("%s: interrupting at %s\n", device_xname(&sc->sc_dv),
433 intrstr);
434
435 #ifdef I2ODEBUG
436 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
437 device_xname(&sc->sc_dv), sc->sc_maxib,
438 le32toh(sc->sc_status.maxinboundmframes),
439 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
440 #endif
441
442 maj = cdevsw_lookup_major(&iop_cdevsw);
443 device_register_name(makedev(maj, device_unit(dev)), dev, true,
444 DEV_OTHER, device_xname(dev));
445
446 return;
447
448 bail_out3:
449 if (state > 3) {
450 for (j = 0; j < i; j++)
451 bus_dmamap_destroy(sc->sc_dmat,
452 sc->sc_ims[j].im_xfer[0].ix_map);
453 free(sc->sc_ims, M_DEVBUF);
454 }
455 bail_out:
456 if (state > 2)
457 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
458 if (state > 1)
459 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
460 if (state > 0)
461 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
462 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
463 }
464
465 /*
466 * Perform autoconfiguration tasks.
467 */
468 static void
469 iop_config_interrupts(struct device *self)
470 {
471 struct iop_attach_args ia;
472 struct iop_softc *sc, *iop;
473 struct i2o_systab_entry *ste;
474 int rv, i, niop;
475 int locs[IOPCF_NLOCS];
476
477 sc = device_private(self);
478 mutex_enter(&sc->sc_conflock);
479
480 LIST_INIT(&sc->sc_iilist);
481
482 printf("%s: configuring...\n", device_xname(&sc->sc_dv));
483
484 if (iop_hrt_get(sc) != 0) {
485 printf("%s: unable to retrieve HRT\n", device_xname(&sc->sc_dv));
486 mutex_exit(&sc->sc_conflock);
487 return;
488 }
489
490 /*
491 * Build the system table.
492 */
493 if (iop_systab == NULL) {
494 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
495 if ((iop = device_lookup(&iop_cd, i)) == NULL)
496 continue;
497 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
498 continue;
499 if (iop_status_get(iop, 1) != 0) {
500 aprint_error_dev(&sc->sc_dv, "unable to retrieve status\n");
501 iop->sc_flags &= ~IOP_HAVESTATUS;
502 continue;
503 }
504 niop++;
505 }
506 if (niop == 0) {
507 mutex_exit(&sc->sc_conflock);
508 return;
509 }
510
511 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
512 sizeof(struct i2o_systab);
513 iop_systab_size = i;
514 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
515
516 iop_systab->numentries = niop;
517 iop_systab->version = I2O_VERSION_11;
518
519 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
520 if ((iop = device_lookup(&iop_cd, i)) == NULL)
521 continue;
522 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
523 continue;
524
525 ste->orgid = iop->sc_status.orgid;
526 ste->iopid = device_unit(&iop->sc_dv) + 2;
527 ste->segnumber =
528 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
529 ste->iopcaps = iop->sc_status.iopcaps;
530 ste->inboundmsgframesize =
531 iop->sc_status.inboundmframesize;
532 ste->inboundmsgportaddresslow =
533 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
534 ste++;
535 }
536 }
537
538 /*
539 * Post the system table to the IOP and bring it to the OPERATIONAL
540 * state.
541 */
542 if (iop_systab_set(sc) != 0) {
543 aprint_error_dev(&sc->sc_dv, "unable to set system table\n");
544 mutex_exit(&sc->sc_conflock);
545 return;
546 }
547 if (iop_sys_enable(sc) != 0) {
548 aprint_error_dev(&sc->sc_dv, "unable to enable system\n");
549 mutex_exit(&sc->sc_conflock);
550 return;
551 }
552
553 /*
554 * Set up an event handler for this IOP.
555 */
556 sc->sc_eventii.ii_dv = self;
557 sc->sc_eventii.ii_intr = iop_intr_event;
558 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
559 sc->sc_eventii.ii_tid = I2O_TID_IOP;
560 iop_initiator_register(sc, &sc->sc_eventii);
561
562 rv = iop_util_eventreg(sc, &sc->sc_eventii,
563 I2O_EVENT_EXEC_RESOURCE_LIMITS |
564 I2O_EVENT_EXEC_CONNECTION_FAIL |
565 I2O_EVENT_EXEC_ADAPTER_FAULT |
566 I2O_EVENT_EXEC_POWER_FAIL |
567 I2O_EVENT_EXEC_RESET_PENDING |
568 I2O_EVENT_EXEC_RESET_IMMINENT |
569 I2O_EVENT_EXEC_HARDWARE_FAIL |
570 I2O_EVENT_EXEC_XCT_CHANGE |
571 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
572 I2O_EVENT_GEN_DEVICE_RESET |
573 I2O_EVENT_GEN_STATE_CHANGE |
574 I2O_EVENT_GEN_GENERAL_WARNING);
575 if (rv != 0) {
576 aprint_error_dev(&sc->sc_dv, "unable to register for events");
577 mutex_exit(&sc->sc_conflock);
578 return;
579 }
580
581 /*
582 * Attempt to match and attach a product-specific extension.
583 */
584 ia.ia_class = I2O_CLASS_ANY;
585 ia.ia_tid = I2O_TID_IOP;
586 locs[IOPCF_TID] = I2O_TID_IOP;
587 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
588 config_stdsubmatch);
589
590 /*
591 * Start device configuration.
592 */
593 if ((rv = iop_reconfigure(sc, 0)) == -1)
594 aprint_error_dev(&sc->sc_dv, "configure failed (%d)\n", rv);
595
596
597 sc->sc_flags |= IOP_ONLINE;
598 rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
599 &sc->sc_reconf_thread, "%s", device_xname(&sc->sc_dv));
600 mutex_exit(&sc->sc_conflock);
601 if (rv != 0) {
602 aprint_error_dev(&sc->sc_dv, "unable to create reconfiguration thread (%d)", rv);
603 return;
604 }
605 }
606
607 /*
608 * Reconfiguration thread; listens for LCT change notification, and
609 * initiates re-configuration if received.
610 */
611 static void
612 iop_reconf_thread(void *cookie)
613 {
614 struct iop_softc *sc;
615 struct lwp *l;
616 struct i2o_lct lct;
617 u_int32_t chgind;
618 int rv;
619
620 sc = cookie;
621 chgind = sc->sc_chgind + 1;
622 l = curlwp;
623
624 for (;;) {
625 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
626 device_xname(&sc->sc_dv), chgind));
627
628 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
629
630 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
631 device_xname(&sc->sc_dv), le32toh(lct.changeindicator), rv));
632
633 mutex_enter(&sc->sc_conflock);
634 if (rv == 0) {
635 iop_reconfigure(sc, le32toh(lct.changeindicator));
636 chgind = sc->sc_chgind + 1;
637 }
638 (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
639 mutex_exit(&sc->sc_conflock);
640 }
641 }
642
643 /*
644 * Reconfigure: find new and removed devices.
645 */
646 int
647 iop_reconfigure(struct iop_softc *sc, u_int chgind)
648 {
649 struct iop_msg *im;
650 struct i2o_hba_bus_scan mf;
651 struct i2o_lct_entry *le;
652 struct iop_initiator *ii, *nextii;
653 int rv, tid, i;
654
655 KASSERT(mutex_owned(&sc->sc_conflock));
656
657 /*
658 * If the reconfiguration request isn't the result of LCT change
659 * notification, then be more thorough: ask all bus ports to scan
660 * their busses. Wait up to 5 minutes for each bus port to complete
661 * the request.
662 */
663 if (chgind == 0) {
664 if ((rv = iop_lct_get(sc)) != 0) {
665 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
666 return (rv);
667 }
668
669 le = sc->sc_lct->entry;
670 for (i = 0; i < sc->sc_nlctent; i++, le++) {
671 if ((le16toh(le->classid) & 4095) !=
672 I2O_CLASS_BUS_ADAPTER_PORT)
673 continue;
674 tid = le16toh(le->localtid) & 4095;
675
676 im = iop_msg_alloc(sc, IM_WAIT);
677
678 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
679 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
680 mf.msgictx = IOP_ICTX;
681 mf.msgtctx = im->im_tctx;
682
683 DPRINTF(("%s: scanning bus %d\n", device_xname(&sc->sc_dv),
684 tid));
685
686 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
687 iop_msg_free(sc, im);
688 #ifdef I2ODEBUG
689 if (rv != 0)
690 aprint_error_dev(&sc->sc_dv, "bus scan failed\n");
691 #endif
692 }
693 } else if (chgind <= sc->sc_chgind) {
694 DPRINTF(("%s: LCT unchanged (async)\n", device_xname(&sc->sc_dv)));
695 return (0);
696 }
697
698 /* Re-read the LCT and determine if it has changed. */
699 if ((rv = iop_lct_get(sc)) != 0) {
700 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
701 return (rv);
702 }
703 DPRINTF(("%s: %d LCT entries\n", device_xname(&sc->sc_dv), sc->sc_nlctent));
704
705 chgind = le32toh(sc->sc_lct->changeindicator);
706 if (chgind == sc->sc_chgind) {
707 DPRINTF(("%s: LCT unchanged\n", device_xname(&sc->sc_dv)));
708 return (0);
709 }
710 DPRINTF(("%s: LCT changed\n", device_xname(&sc->sc_dv)));
711 sc->sc_chgind = chgind;
712
713 if (sc->sc_tidmap != NULL)
714 free(sc->sc_tidmap, M_DEVBUF);
715 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
716 M_DEVBUF, M_NOWAIT|M_ZERO);
717
718 /* Allow 1 queued command per device while we're configuring. */
719 iop_adjqparam(sc, 1);
720
721 /*
722 * Match and attach child devices. We configure high-level devices
723 * first so that any claims will propagate throughout the LCT,
724 * hopefully masking off aliased devices as a result.
725 *
726 * Re-reading the LCT at this point is a little dangerous, but we'll
727 * trust the IOP (and the operator) to behave itself...
728 */
729 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
730 IC_CONFIGURE | IC_PRIORITY);
731 if ((rv = iop_lct_get(sc)) != 0) {
732 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
733 }
734 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
735 IC_CONFIGURE);
736
737 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
738 nextii = LIST_NEXT(ii, ii_list);
739
740 /* Detach devices that were configured, but are now gone. */
741 for (i = 0; i < sc->sc_nlctent; i++)
742 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
743 break;
744 if (i == sc->sc_nlctent ||
745 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
746 config_detach(ii->ii_dv, DETACH_FORCE);
747 continue;
748 }
749
750 /*
751 * Tell initiators that existed before the re-configuration
752 * to re-configure.
753 */
754 if (ii->ii_reconfig == NULL)
755 continue;
756 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
757 aprint_error_dev(&sc->sc_dv, "%s failed reconfigure (%d)\n",
758 device_xname(ii->ii_dv), rv);
759 }
760
761 /* Re-adjust queue parameters and return. */
762 if (sc->sc_nii != 0)
763 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
764 / sc->sc_nii);
765
766 return (0);
767 }
768
769 /*
770 * Configure I2O devices into the system.
771 */
772 static void
773 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
774 {
775 struct iop_attach_args ia;
776 struct iop_initiator *ii;
777 const struct i2o_lct_entry *le;
778 struct device *dv;
779 int i, j, nent;
780 u_int usertid;
781 int locs[IOPCF_NLOCS];
782
783 nent = sc->sc_nlctent;
784 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
785 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
786
787 /* Ignore the device if it's in use. */
788 usertid = le32toh(le->usertid) & 4095;
789 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
790 continue;
791
792 ia.ia_class = le16toh(le->classid) & 4095;
793 ia.ia_tid = sc->sc_tidmap[i].it_tid;
794
795 /* Ignore uninteresting devices. */
796 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
797 if (iop_class[j].ic_class == ia.ia_class)
798 break;
799 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
800 (iop_class[j].ic_flags & mask) != maskval)
801 continue;
802
803 /*
804 * Try to configure the device only if it's not already
805 * configured.
806 */
807 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
808 if (ia.ia_tid == ii->ii_tid) {
809 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
810 strcpy(sc->sc_tidmap[i].it_dvname,
811 device_xname(ii->ii_dv));
812 break;
813 }
814 }
815 if (ii != NULL)
816 continue;
817
818 locs[IOPCF_TID] = ia.ia_tid;
819
820 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
821 iop_print, config_stdsubmatch);
822 if (dv != NULL) {
823 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
824 strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
825 }
826 }
827 }
828
829 /*
830 * Adjust queue parameters for all child devices.
831 */
832 static void
833 iop_adjqparam(struct iop_softc *sc, int mpi)
834 {
835 struct iop_initiator *ii;
836
837 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
838 if (ii->ii_adjqparam != NULL)
839 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
840 }
841
842 static void
843 iop_devinfo(int class, char *devinfo, size_t l)
844 {
845 int i;
846
847 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
848 if (class == iop_class[i].ic_class)
849 break;
850
851 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
852 snprintf(devinfo, l, "device (class 0x%x)", class);
853 else
854 strlcpy(devinfo, iop_class[i].ic_caption, l);
855 }
856
857 static int
858 iop_print(void *aux, const char *pnp)
859 {
860 struct iop_attach_args *ia;
861 char devinfo[256];
862
863 ia = aux;
864
865 if (pnp != NULL) {
866 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
867 aprint_normal("%s at %s", devinfo, pnp);
868 }
869 aprint_normal(" tid %d", ia->ia_tid);
870 return (UNCONF);
871 }
872
873 /*
874 * Shut down all configured IOPs.
875 */
876 static void
877 iop_shutdown(void *junk)
878 {
879 struct iop_softc *sc;
880 int i;
881
882 printf("shutting down iop devices...");
883
884 for (i = 0; i < iop_cd.cd_ndevs; i++) {
885 if ((sc = device_lookup(&iop_cd, i)) == NULL)
886 continue;
887 if ((sc->sc_flags & IOP_ONLINE) == 0)
888 continue;
889
890 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
891 0, 5000);
892
893 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
894 /*
895 * Some AMI firmware revisions will go to sleep and
896 * never come back after this.
897 */
898 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
899 IOP_ICTX, 0, 1000);
900 }
901 }
902
903 /* Wait. Some boards could still be flushing, stupidly enough. */
904 delay(5000*1000);
905 printf(" done\n");
906 }
907
908 /*
909 * Retrieve IOP status.
910 */
911 int
912 iop_status_get(struct iop_softc *sc, int nosleep)
913 {
914 struct i2o_exec_status_get mf;
915 struct i2o_status *st;
916 paddr_t pa;
917 int rv, i;
918
919 pa = sc->sc_scr_seg->ds_addr;
920 st = (struct i2o_status *)sc->sc_scr;
921
922 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
923 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
924 mf.reserved[0] = 0;
925 mf.reserved[1] = 0;
926 mf.reserved[2] = 0;
927 mf.reserved[3] = 0;
928 mf.addrlow = (u_int32_t)pa;
929 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
930 mf.length = sizeof(sc->sc_status);
931
932 memset(st, 0, sizeof(*st));
933 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
934 BUS_DMASYNC_PREREAD);
935
936 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
937 return (rv);
938
939 for (i = 25; i != 0; i--) {
940 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
941 sizeof(*st), BUS_DMASYNC_POSTREAD);
942 if (st->syncbyte == 0xff)
943 break;
944 if (nosleep)
945 DELAY(100*1000);
946 else
947 kpause("iopstat", false, hz / 10, NULL);
948 }
949
950 if (st->syncbyte != 0xff) {
951 aprint_error_dev(&sc->sc_dv, "STATUS_GET timed out\n");
952 rv = EIO;
953 } else {
954 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
955 rv = 0;
956 }
957
958 return (rv);
959 }
960
961 /*
962 * Initialize and populate the IOP's outbound FIFO.
963 */
964 static int
965 iop_ofifo_init(struct iop_softc *sc)
966 {
967 bus_addr_t addr;
968 bus_dma_segment_t seg;
969 struct i2o_exec_outbound_init *mf;
970 int i, rseg, rv;
971 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
972
973 sw = (u_int32_t *)sc->sc_scr;
974
975 mf = (struct i2o_exec_outbound_init *)mb;
976 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
977 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
978 mf->msgictx = IOP_ICTX;
979 mf->msgtctx = 0;
980 mf->pagesize = PAGE_SIZE;
981 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
982
983 /*
984 * The I2O spec says that there are two SGLs: one for the status
985 * word, and one for a list of discarded MFAs. It continues to say
986 * that if you don't want to get the list of MFAs, an IGNORE SGL is
987 * necessary; this isn't the case (and is in fact a bad thing).
988 */
989 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
990 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
991 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
992 (u_int32_t)sc->sc_scr_seg->ds_addr;
993 mb[0] += 2 << 16;
994
995 *sw = 0;
996 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
997 BUS_DMASYNC_PREREAD);
998
999 if ((rv = iop_post(sc, mb)) != 0)
1000 return (rv);
1001
1002 POLL(5000,
1003 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1004 BUS_DMASYNC_POSTREAD),
1005 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1006
1007 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1008 aprint_error_dev(&sc->sc_dv, "outbound FIFO init failed (%d)\n",
1009 le32toh(*sw));
1010 return (EIO);
1011 }
1012
1013 /* Allocate DMA safe memory for the reply frames. */
1014 if (sc->sc_rep_phys == 0) {
1015 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1016
1017 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1018 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1019 if (rv != 0) {
1020 aprint_error_dev(&sc->sc_dv, "DMA alloc = %d\n",
1021 rv);
1022 return (rv);
1023 }
1024
1025 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1026 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1027 if (rv != 0) {
1028 aprint_error_dev(&sc->sc_dv, "DMA map = %d\n", rv);
1029 return (rv);
1030 }
1031
1032 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1033 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1034 if (rv != 0) {
1035 aprint_error_dev(&sc->sc_dv, "DMA create = %d\n", rv);
1036 return (rv);
1037 }
1038
1039 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1040 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1041 if (rv != 0) {
1042 aprint_error_dev(&sc->sc_dv, "DMA load = %d\n", rv);
1043 return (rv);
1044 }
1045
1046 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1047
1048 /* Now safe to sync the reply map. */
1049 sc->sc_curib = 0;
1050 }
1051
1052 /* Populate the outbound FIFO. */
1053 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1054 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1055 addr += sc->sc_framesize;
1056 }
1057
1058 return (0);
1059 }
1060
1061 /*
1062 * Read the specified number of bytes from the IOP's hardware resource table.
1063 */
1064 static int
1065 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1066 {
1067 struct iop_msg *im;
1068 int rv;
1069 struct i2o_exec_hrt_get *mf;
1070 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1071
1072 im = iop_msg_alloc(sc, IM_WAIT);
1073 mf = (struct i2o_exec_hrt_get *)mb;
1074 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1075 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1076 mf->msgictx = IOP_ICTX;
1077 mf->msgtctx = im->im_tctx;
1078
1079 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1080 rv = iop_msg_post(sc, im, mb, 30000);
1081 iop_msg_unmap(sc, im);
1082 iop_msg_free(sc, im);
1083 return (rv);
1084 }
1085
1086 /*
1087 * Read the IOP's hardware resource table.
1088 */
1089 static int
1090 iop_hrt_get(struct iop_softc *sc)
1091 {
1092 struct i2o_hrt hrthdr, *hrt;
1093 int size, rv;
1094
1095 uvm_lwp_hold(curlwp);
1096 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1097 uvm_lwp_rele(curlwp);
1098 if (rv != 0)
1099 return (rv);
1100
1101 DPRINTF(("%s: %d hrt entries\n", device_xname(&sc->sc_dv),
1102 le16toh(hrthdr.numentries)));
1103
1104 size = sizeof(struct i2o_hrt) +
1105 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1106 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1107
1108 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1109 free(hrt, M_DEVBUF);
1110 return (rv);
1111 }
1112
1113 if (sc->sc_hrt != NULL)
1114 free(sc->sc_hrt, M_DEVBUF);
1115 sc->sc_hrt = hrt;
1116 return (0);
1117 }
1118
1119 /*
1120 * Request the specified number of bytes from the IOP's logical
1121 * configuration table. If a change indicator is specified, this
1122 * is a verbatim notification request, so the caller is prepared
1123 * to wait indefinitely.
1124 */
1125 static int
1126 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1127 u_int32_t chgind)
1128 {
1129 struct iop_msg *im;
1130 struct i2o_exec_lct_notify *mf;
1131 int rv;
1132 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1133
1134 im = iop_msg_alloc(sc, IM_WAIT);
1135 memset(lct, 0, size);
1136
1137 mf = (struct i2o_exec_lct_notify *)mb;
1138 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1139 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1140 mf->msgictx = IOP_ICTX;
1141 mf->msgtctx = im->im_tctx;
1142 mf->classid = I2O_CLASS_ANY;
1143 mf->changeindicator = chgind;
1144
1145 #ifdef I2ODEBUG
1146 printf("iop_lct_get0: reading LCT");
1147 if (chgind != 0)
1148 printf(" (async)");
1149 printf("\n");
1150 #endif
1151
1152 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1153 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1154 iop_msg_unmap(sc, im);
1155 iop_msg_free(sc, im);
1156 return (rv);
1157 }
1158
1159 /*
1160 * Read the IOP's logical configuration table.
1161 */
1162 int
1163 iop_lct_get(struct iop_softc *sc)
1164 {
1165 int esize, size, rv;
1166 struct i2o_lct *lct;
1167
1168 esize = le32toh(sc->sc_status.expectedlctsize);
1169 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1170 if (lct == NULL)
1171 return (ENOMEM);
1172
1173 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1174 free(lct, M_DEVBUF);
1175 return (rv);
1176 }
1177
1178 size = le16toh(lct->tablesize) << 2;
1179 if (esize != size) {
1180 free(lct, M_DEVBUF);
1181 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1182 if (lct == NULL)
1183 return (ENOMEM);
1184
1185 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1186 free(lct, M_DEVBUF);
1187 return (rv);
1188 }
1189 }
1190
1191 /* Swap in the new LCT. */
1192 if (sc->sc_lct != NULL)
1193 free(sc->sc_lct, M_DEVBUF);
1194 sc->sc_lct = lct;
1195 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1196 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1197 sizeof(struct i2o_lct_entry);
1198 return (0);
1199 }
1200
1201 /*
1202 * Post a SYS_ENABLE message to the adapter.
1203 */
1204 int
1205 iop_sys_enable(struct iop_softc *sc)
1206 {
1207 struct iop_msg *im;
1208 struct i2o_msg mf;
1209 int rv;
1210
1211 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1212
1213 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1214 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1215 mf.msgictx = IOP_ICTX;
1216 mf.msgtctx = im->im_tctx;
1217
1218 rv = iop_msg_post(sc, im, &mf, 30000);
1219 if (rv == 0) {
1220 if ((im->im_flags & IM_FAIL) != 0)
1221 rv = ENXIO;
1222 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1223 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1224 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1225 rv = 0;
1226 else
1227 rv = EIO;
1228 }
1229
1230 iop_msg_free(sc, im);
1231 return (rv);
1232 }
1233
1234 /*
1235 * Request the specified parameter group from the target. If an initiator
1236 * is specified (a) don't wait for the operation to complete, but instead
1237 * let the initiator's interrupt handler deal with the reply and (b) place a
1238 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1239 */
1240 int
1241 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1242 int size, struct iop_initiator *ii)
1243 {
1244 struct iop_msg *im;
1245 struct i2o_util_params_op *mf;
1246 int rv;
1247 struct iop_pgop *pgop;
1248 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1249
1250 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1251 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1252 iop_msg_free(sc, im);
1253 return (ENOMEM);
1254 }
1255 im->im_dvcontext = pgop;
1256
1257 mf = (struct i2o_util_params_op *)mb;
1258 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1259 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1260 mf->msgictx = IOP_ICTX;
1261 mf->msgtctx = im->im_tctx;
1262 mf->flags = 0;
1263
1264 pgop->olh.count = htole16(1);
1265 pgop->olh.reserved = htole16(0);
1266 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1267 pgop->oat.fieldcount = htole16(0xffff);
1268 pgop->oat.group = htole16(group);
1269
1270 if (ii == NULL)
1271 uvm_lwp_hold(curlwp);
1272
1273 memset(buf, 0, size);
1274 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1275 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1276 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1277
1278 if (ii == NULL)
1279 uvm_lwp_rele(curlwp);
1280
1281 /* Detect errors; let partial transfers to count as success. */
1282 if (ii == NULL && rv == 0) {
1283 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1284 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1285 rv = 0;
1286 else
1287 rv = (im->im_reqstatus != 0 ? EIO : 0);
1288
1289 if (rv != 0)
1290 printf("%s: FIELD_GET failed for tid %d group %d\n",
1291 device_xname(&sc->sc_dv), tid, group);
1292 }
1293
1294 if (ii == NULL || rv != 0) {
1295 iop_msg_unmap(sc, im);
1296 iop_msg_free(sc, im);
1297 free(pgop, M_DEVBUF);
1298 }
1299
1300 return (rv);
1301 }
1302
1303 /*
1304 * Set a single field in a scalar parameter group.
1305 */
1306 int
1307 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1308 int size, int field)
1309 {
1310 struct iop_msg *im;
1311 struct i2o_util_params_op *mf;
1312 struct iop_pgop *pgop;
1313 int rv, totsize;
1314 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1315
1316 totsize = sizeof(*pgop) + size;
1317
1318 im = iop_msg_alloc(sc, IM_WAIT);
1319 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1320 iop_msg_free(sc, im);
1321 return (ENOMEM);
1322 }
1323
1324 mf = (struct i2o_util_params_op *)mb;
1325 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1326 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1327 mf->msgictx = IOP_ICTX;
1328 mf->msgtctx = im->im_tctx;
1329 mf->flags = 0;
1330
1331 pgop->olh.count = htole16(1);
1332 pgop->olh.reserved = htole16(0);
1333 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1334 pgop->oat.fieldcount = htole16(1);
1335 pgop->oat.group = htole16(group);
1336 pgop->oat.fields[0] = htole16(field);
1337 memcpy(pgop + 1, buf, size);
1338
1339 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1340 rv = iop_msg_post(sc, im, mb, 30000);
1341 if (rv != 0)
1342 aprint_error_dev(&sc->sc_dv, "FIELD_SET failed for tid %d group %d\n",
1343 tid, group);
1344
1345 iop_msg_unmap(sc, im);
1346 iop_msg_free(sc, im);
1347 free(pgop, M_DEVBUF);
1348 return (rv);
1349 }
1350
1351 /*
1352 * Delete all rows in a tablular parameter group.
1353 */
1354 int
1355 iop_table_clear(struct iop_softc *sc, int tid, int group)
1356 {
1357 struct iop_msg *im;
1358 struct i2o_util_params_op *mf;
1359 struct iop_pgop pgop;
1360 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1361 int rv;
1362
1363 im = iop_msg_alloc(sc, IM_WAIT);
1364
1365 mf = (struct i2o_util_params_op *)mb;
1366 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1367 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1368 mf->msgictx = IOP_ICTX;
1369 mf->msgtctx = im->im_tctx;
1370 mf->flags = 0;
1371
1372 pgop.olh.count = htole16(1);
1373 pgop.olh.reserved = htole16(0);
1374 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1375 pgop.oat.fieldcount = htole16(0);
1376 pgop.oat.group = htole16(group);
1377 pgop.oat.fields[0] = htole16(0);
1378
1379 uvm_lwp_hold(curlwp);
1380 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1381 rv = iop_msg_post(sc, im, mb, 30000);
1382 if (rv != 0)
1383 aprint_error_dev(&sc->sc_dv, "TABLE_CLEAR failed for tid %d group %d\n",
1384 tid, group);
1385
1386 iop_msg_unmap(sc, im);
1387 uvm_lwp_rele(curlwp);
1388 iop_msg_free(sc, im);
1389 return (rv);
1390 }
1391
1392 /*
1393 * Add a single row to a tabular parameter group. The row can have only one
1394 * field.
1395 */
1396 int
1397 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1398 int size, int row)
1399 {
1400 struct iop_msg *im;
1401 struct i2o_util_params_op *mf;
1402 struct iop_pgop *pgop;
1403 int rv, totsize;
1404 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1405
1406 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1407
1408 im = iop_msg_alloc(sc, IM_WAIT);
1409 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1410 iop_msg_free(sc, im);
1411 return (ENOMEM);
1412 }
1413
1414 mf = (struct i2o_util_params_op *)mb;
1415 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1416 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1417 mf->msgictx = IOP_ICTX;
1418 mf->msgtctx = im->im_tctx;
1419 mf->flags = 0;
1420
1421 pgop->olh.count = htole16(1);
1422 pgop->olh.reserved = htole16(0);
1423 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1424 pgop->oat.fieldcount = htole16(1);
1425 pgop->oat.group = htole16(group);
1426 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1427 pgop->oat.fields[1] = htole16(1); /* RowCount */
1428 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1429 memcpy(&pgop->oat.fields[3], buf, size);
1430
1431 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1432 rv = iop_msg_post(sc, im, mb, 30000);
1433 if (rv != 0)
1434 aprint_error_dev(&sc->sc_dv, "ADD_ROW failed for tid %d group %d row %d\n",
1435 tid, group, row);
1436
1437 iop_msg_unmap(sc, im);
1438 iop_msg_free(sc, im);
1439 free(pgop, M_DEVBUF);
1440 return (rv);
1441 }
1442
1443 /*
1444 * Execute a simple command (no parameters).
1445 */
1446 int
1447 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1448 int async, int timo)
1449 {
1450 struct iop_msg *im;
1451 struct i2o_msg mf;
1452 int rv, fl;
1453
1454 fl = (async != 0 ? IM_WAIT : IM_POLL);
1455 im = iop_msg_alloc(sc, fl);
1456
1457 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1458 mf.msgfunc = I2O_MSGFUNC(tid, function);
1459 mf.msgictx = ictx;
1460 mf.msgtctx = im->im_tctx;
1461
1462 rv = iop_msg_post(sc, im, &mf, timo);
1463 iop_msg_free(sc, im);
1464 return (rv);
1465 }
1466
1467 /*
1468 * Post the system table to the IOP.
1469 */
1470 static int
1471 iop_systab_set(struct iop_softc *sc)
1472 {
1473 struct i2o_exec_sys_tab_set *mf;
1474 struct iop_msg *im;
1475 bus_space_handle_t bsh;
1476 bus_addr_t boo;
1477 u_int32_t mema[2], ioa[2];
1478 int rv;
1479 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1480
1481 im = iop_msg_alloc(sc, IM_WAIT);
1482
1483 mf = (struct i2o_exec_sys_tab_set *)mb;
1484 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1485 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1486 mf->msgictx = IOP_ICTX;
1487 mf->msgtctx = im->im_tctx;
1488 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1489 mf->segnumber = 0;
1490
1491 mema[1] = sc->sc_status.desiredprivmemsize;
1492 ioa[1] = sc->sc_status.desiredpriviosize;
1493
1494 if (mema[1] != 0) {
1495 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1496 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1497 mema[0] = htole32(boo);
1498 if (rv != 0) {
1499 aprint_error_dev(&sc->sc_dv, "can't alloc priv mem space, err = %d\n", rv);
1500 mema[0] = 0;
1501 mema[1] = 0;
1502 }
1503 }
1504
1505 if (ioa[1] != 0) {
1506 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1507 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1508 ioa[0] = htole32(boo);
1509 if (rv != 0) {
1510 aprint_error_dev(&sc->sc_dv, "can't alloc priv i/o space, err = %d\n", rv);
1511 ioa[0] = 0;
1512 ioa[1] = 0;
1513 }
1514 }
1515
1516 uvm_lwp_hold(curlwp);
1517 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1518 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1519 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1520 rv = iop_msg_post(sc, im, mb, 5000);
1521 iop_msg_unmap(sc, im);
1522 iop_msg_free(sc, im);
1523 uvm_lwp_rele(curlwp);
1524 return (rv);
1525 }
1526
1527 /*
1528 * Reset the IOP. Must be called with interrupts disabled.
1529 */
1530 static int
1531 iop_reset(struct iop_softc *sc)
1532 {
1533 u_int32_t mfa, *sw;
1534 struct i2o_exec_iop_reset mf;
1535 int rv;
1536 paddr_t pa;
1537
1538 sw = (u_int32_t *)sc->sc_scr;
1539 pa = sc->sc_scr_seg->ds_addr;
1540
1541 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1542 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1543 mf.reserved[0] = 0;
1544 mf.reserved[1] = 0;
1545 mf.reserved[2] = 0;
1546 mf.reserved[3] = 0;
1547 mf.statuslow = (u_int32_t)pa;
1548 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1549
1550 *sw = htole32(0);
1551 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1552 BUS_DMASYNC_PREREAD);
1553
1554 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1555 return (rv);
1556
1557 POLL(2500,
1558 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1559 BUS_DMASYNC_POSTREAD), *sw != 0));
1560 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1561 aprint_error_dev(&sc->sc_dv, "reset rejected, status 0x%x\n",
1562 le32toh(*sw));
1563 return (EIO);
1564 }
1565
1566 /*
1567 * IOP is now in the INIT state. Wait no more than 10 seconds for
1568 * the inbound queue to become responsive.
1569 */
1570 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1571 if (mfa == IOP_MFA_EMPTY) {
1572 aprint_error_dev(&sc->sc_dv, "reset failed\n");
1573 return (EIO);
1574 }
1575
1576 iop_release_mfa(sc, mfa);
1577 return (0);
1578 }
1579
1580 /*
1581 * Register a new initiator. Must be called with the configuration lock
1582 * held.
1583 */
1584 void
1585 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1586 {
1587 static int ictxgen;
1588
1589 /* 0 is reserved (by us) for system messages. */
1590 ii->ii_ictx = ++ictxgen;
1591
1592 /*
1593 * `Utility initiators' don't make it onto the per-IOP initiator list
1594 * (which is used only for configuration), but do get one slot on
1595 * the inbound queue.
1596 */
1597 if ((ii->ii_flags & II_UTILITY) == 0) {
1598 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1599 sc->sc_nii++;
1600 } else
1601 sc->sc_nuii++;
1602
1603 cv_init(&ii->ii_cv, "iopevt");
1604
1605 mutex_spin_enter(&sc->sc_intrlock);
1606 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1607 mutex_spin_exit(&sc->sc_intrlock);
1608 }
1609
1610 /*
1611 * Unregister an initiator. Must be called with the configuration lock
1612 * held.
1613 */
1614 void
1615 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1616 {
1617
1618 if ((ii->ii_flags & II_UTILITY) == 0) {
1619 LIST_REMOVE(ii, ii_list);
1620 sc->sc_nii--;
1621 } else
1622 sc->sc_nuii--;
1623
1624 mutex_spin_enter(&sc->sc_intrlock);
1625 LIST_REMOVE(ii, ii_hash);
1626 mutex_spin_exit(&sc->sc_intrlock);
1627
1628 cv_destroy(&ii->ii_cv);
1629 }
1630
1631 /*
1632 * Handle a reply frame from the IOP.
1633 */
1634 static int
1635 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1636 {
1637 struct iop_msg *im;
1638 struct i2o_reply *rb;
1639 struct i2o_fault_notify *fn;
1640 struct iop_initiator *ii;
1641 u_int off, ictx, tctx, status, size;
1642
1643 KASSERT(mutex_owned(&sc->sc_intrlock));
1644
1645 off = (int)(rmfa - sc->sc_rep_phys);
1646 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1647
1648 /* Perform reply queue DMA synchronisation. */
1649 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1650 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1651 if (--sc->sc_curib != 0)
1652 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1653 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1654
1655 #ifdef I2ODEBUG
1656 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1657 panic("iop_handle_reply: 64-bit reply");
1658 #endif
1659 /*
1660 * Find the initiator.
1661 */
1662 ictx = le32toh(rb->msgictx);
1663 if (ictx == IOP_ICTX)
1664 ii = NULL;
1665 else {
1666 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1667 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1668 if (ii->ii_ictx == ictx)
1669 break;
1670 if (ii == NULL) {
1671 #ifdef I2ODEBUG
1672 iop_reply_print(sc, rb);
1673 #endif
1674 aprint_error_dev(&sc->sc_dv, "WARNING: bad ictx returned (%x)\n",
1675 ictx);
1676 return (-1);
1677 }
1678 }
1679
1680 /*
1681 * If we received a transport failure notice, we've got to dig the
1682 * transaction context (if any) out of the original message frame,
1683 * and then release the original MFA back to the inbound FIFO.
1684 */
1685 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1686 status = I2O_STATUS_SUCCESS;
1687
1688 fn = (struct i2o_fault_notify *)rb;
1689 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1690 iop_release_mfa(sc, fn->lowmfa);
1691 iop_tfn_print(sc, fn);
1692 } else {
1693 status = rb->reqstatus;
1694 tctx = le32toh(rb->msgtctx);
1695 }
1696
1697 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1698 /*
1699 * This initiator tracks state using message wrappers.
1700 *
1701 * Find the originating message wrapper, and if requested
1702 * notify the initiator.
1703 */
1704 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1705 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1706 (im->im_flags & IM_ALLOCED) == 0 ||
1707 tctx != im->im_tctx) {
1708 aprint_error_dev(&sc->sc_dv, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1709 if (im != NULL)
1710 aprint_error_dev(&sc->sc_dv, "flags=0x%08x tctx=0x%08x\n",
1711 im->im_flags, im->im_tctx);
1712 #ifdef I2ODEBUG
1713 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1714 iop_reply_print(sc, rb);
1715 #endif
1716 return (-1);
1717 }
1718
1719 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1720 im->im_flags |= IM_FAIL;
1721
1722 #ifdef I2ODEBUG
1723 if ((im->im_flags & IM_REPLIED) != 0)
1724 panic("%s: dup reply", device_xname(&sc->sc_dv));
1725 #endif
1726 im->im_flags |= IM_REPLIED;
1727
1728 #ifdef I2ODEBUG
1729 if (status != I2O_STATUS_SUCCESS)
1730 iop_reply_print(sc, rb);
1731 #endif
1732 im->im_reqstatus = status;
1733 im->im_detstatus = le16toh(rb->detail);
1734
1735 /* Copy the reply frame, if requested. */
1736 if (im->im_rb != NULL) {
1737 size = (le32toh(rb->msgflags) >> 14) & ~3;
1738 #ifdef I2ODEBUG
1739 if (size > sc->sc_framesize)
1740 panic("iop_handle_reply: reply too large");
1741 #endif
1742 memcpy(im->im_rb, rb, size);
1743 }
1744
1745 /* Notify the initiator. */
1746 if ((im->im_flags & IM_WAIT) != 0)
1747 cv_broadcast(&im->im_cv);
1748 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1749 if (ii != NULL) {
1750 mutex_spin_exit(&sc->sc_intrlock);
1751 (*ii->ii_intr)(ii->ii_dv, im, rb);
1752 mutex_spin_enter(&sc->sc_intrlock);
1753 }
1754 }
1755 } else {
1756 /*
1757 * This initiator discards message wrappers.
1758 *
1759 * Simply pass the reply frame to the initiator.
1760 */
1761 if (ii != NULL) {
1762 mutex_spin_exit(&sc->sc_intrlock);
1763 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1764 mutex_spin_enter(&sc->sc_intrlock);
1765 }
1766 }
1767
1768 return (status);
1769 }
1770
1771 /*
1772 * Handle an interrupt from the IOP.
1773 */
1774 int
1775 iop_intr(void *arg)
1776 {
1777 struct iop_softc *sc;
1778 u_int32_t rmfa;
1779
1780 sc = arg;
1781
1782 mutex_spin_enter(&sc->sc_intrlock);
1783
1784 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1785 mutex_spin_exit(&sc->sc_intrlock);
1786 return (0);
1787 }
1788
1789 for (;;) {
1790 /* Double read to account for IOP bug. */
1791 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1792 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1793 if (rmfa == IOP_MFA_EMPTY)
1794 break;
1795 }
1796 iop_handle_reply(sc, rmfa);
1797 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1798 }
1799
1800 mutex_spin_exit(&sc->sc_intrlock);
1801 return (1);
1802 }
1803
1804 /*
1805 * Handle an event signalled by the executive.
1806 */
1807 static void
1808 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1809 {
1810 struct i2o_util_event_register_reply *rb;
1811 u_int event;
1812
1813 rb = reply;
1814
1815 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1816 return;
1817
1818 event = le32toh(rb->event);
1819 printf("%s: event 0x%08x received\n", device_xname(dv), event);
1820 }
1821
1822 /*
1823 * Allocate a message wrapper.
1824 */
1825 struct iop_msg *
1826 iop_msg_alloc(struct iop_softc *sc, int flags)
1827 {
1828 struct iop_msg *im;
1829 static u_int tctxgen;
1830 int i;
1831
1832 #ifdef I2ODEBUG
1833 if ((flags & IM_SYSMASK) != 0)
1834 panic("iop_msg_alloc: system flags specified");
1835 #endif
1836
1837 mutex_spin_enter(&sc->sc_intrlock);
1838 im = SLIST_FIRST(&sc->sc_im_freelist);
1839 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1840 if (im == NULL)
1841 panic("iop_msg_alloc: no free wrappers");
1842 #endif
1843 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1844 mutex_spin_exit(&sc->sc_intrlock);
1845
1846 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1847 tctxgen += (1 << IOP_TCTX_SHIFT);
1848 im->im_flags = flags | IM_ALLOCED;
1849 im->im_rb = NULL;
1850 i = 0;
1851 do {
1852 im->im_xfer[i++].ix_size = 0;
1853 } while (i < IOP_MAX_MSG_XFERS);
1854
1855 return (im);
1856 }
1857
1858 /*
1859 * Free a message wrapper.
1860 */
1861 void
1862 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1863 {
1864
1865 #ifdef I2ODEBUG
1866 if ((im->im_flags & IM_ALLOCED) == 0)
1867 panic("iop_msg_free: wrapper not allocated");
1868 #endif
1869
1870 im->im_flags = 0;
1871 mutex_spin_enter(&sc->sc_intrlock);
1872 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1873 mutex_spin_exit(&sc->sc_intrlock);
1874 }
1875
1876 /*
1877 * Map a data transfer. Write a scatter-gather list into the message frame.
1878 */
1879 int
1880 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1881 void *xferaddr, int xfersize, int out, struct proc *up)
1882 {
1883 bus_dmamap_t dm;
1884 bus_dma_segment_t *ds;
1885 struct iop_xfer *ix;
1886 u_int rv, i, nsegs, flg, off, xn;
1887 u_int32_t *p;
1888
1889 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1890 if (ix->ix_size == 0)
1891 break;
1892
1893 #ifdef I2ODEBUG
1894 if (xfersize == 0)
1895 panic("iop_msg_map: null transfer");
1896 if (xfersize > IOP_MAX_XFER)
1897 panic("iop_msg_map: transfer too large");
1898 if (xn == IOP_MAX_MSG_XFERS)
1899 panic("iop_msg_map: too many xfers");
1900 #endif
1901
1902 /*
1903 * Only the first DMA map is static.
1904 */
1905 if (xn != 0) {
1906 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1907 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1908 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1909 if (rv != 0)
1910 return (rv);
1911 }
1912
1913 dm = ix->ix_map;
1914 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1915 (up == NULL ? BUS_DMA_NOWAIT : 0));
1916 if (rv != 0)
1917 goto bad;
1918
1919 /*
1920 * How many SIMPLE SG elements can we fit in this message?
1921 */
1922 off = mb[0] >> 16;
1923 p = mb + off;
1924 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1925
1926 if (dm->dm_nsegs > nsegs) {
1927 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1928 rv = EFBIG;
1929 DPRINTF(("iop_msg_map: too many segs\n"));
1930 goto bad;
1931 }
1932
1933 nsegs = dm->dm_nsegs;
1934 xfersize = 0;
1935
1936 /*
1937 * Write out the SG list.
1938 */
1939 if (out)
1940 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1941 else
1942 flg = I2O_SGL_SIMPLE;
1943
1944 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1945 p[0] = (u_int32_t)ds->ds_len | flg;
1946 p[1] = (u_int32_t)ds->ds_addr;
1947 xfersize += ds->ds_len;
1948 }
1949
1950 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1951 p[1] = (u_int32_t)ds->ds_addr;
1952 xfersize += ds->ds_len;
1953
1954 /* Fix up the transfer record, and sync the map. */
1955 ix->ix_flags = (out ? IX_OUT : IX_IN);
1956 ix->ix_size = xfersize;
1957 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1958 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1959
1960 /*
1961 * If this is the first xfer we've mapped for this message, adjust
1962 * the SGL offset field in the message header.
1963 */
1964 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1965 mb[0] += (mb[0] >> 12) & 0xf0;
1966 im->im_flags |= IM_SGLOFFADJ;
1967 }
1968 mb[0] += (nsegs << 17);
1969 return (0);
1970
1971 bad:
1972 if (xn != 0)
1973 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1974 return (rv);
1975 }
1976
1977 /*
1978 * Map a block I/O data transfer (different in that there's only one per
1979 * message maximum, and PAGE addressing may be used). Write a scatter
1980 * gather list into the message frame.
1981 */
1982 int
1983 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1984 void *xferaddr, int xfersize, int out)
1985 {
1986 bus_dma_segment_t *ds;
1987 bus_dmamap_t dm;
1988 struct iop_xfer *ix;
1989 u_int rv, i, nsegs, off, slen, tlen, flg;
1990 paddr_t saddr, eaddr;
1991 u_int32_t *p;
1992
1993 #ifdef I2ODEBUG
1994 if (xfersize == 0)
1995 panic("iop_msg_map_bio: null transfer");
1996 if (xfersize > IOP_MAX_XFER)
1997 panic("iop_msg_map_bio: transfer too large");
1998 if ((im->im_flags & IM_SGLOFFADJ) != 0)
1999 panic("iop_msg_map_bio: SGLOFFADJ");
2000 #endif
2001
2002 ix = im->im_xfer;
2003 dm = ix->ix_map;
2004 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2005 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2006 if (rv != 0)
2007 return (rv);
2008
2009 off = mb[0] >> 16;
2010 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2011
2012 /*
2013 * If the transfer is highly fragmented and won't fit using SIMPLE
2014 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2015 * potentially more efficient, both for us and the IOP.
2016 */
2017 if (dm->dm_nsegs > nsegs) {
2018 nsegs = 1;
2019 p = mb + off + 1;
2020
2021 /* XXX This should be done with a bus_space flag. */
2022 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2023 slen = ds->ds_len;
2024 saddr = ds->ds_addr;
2025
2026 while (slen > 0) {
2027 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2028 tlen = min(eaddr - saddr, slen);
2029 slen -= tlen;
2030 *p++ = le32toh(saddr);
2031 saddr = eaddr;
2032 nsegs++;
2033 }
2034 }
2035
2036 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2037 I2O_SGL_END;
2038 if (out)
2039 mb[off] |= I2O_SGL_DATA_OUT;
2040 } else {
2041 p = mb + off;
2042 nsegs = dm->dm_nsegs;
2043
2044 if (out)
2045 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2046 else
2047 flg = I2O_SGL_SIMPLE;
2048
2049 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2050 p[0] = (u_int32_t)ds->ds_len | flg;
2051 p[1] = (u_int32_t)ds->ds_addr;
2052 }
2053
2054 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2055 I2O_SGL_END;
2056 p[1] = (u_int32_t)ds->ds_addr;
2057 nsegs <<= 1;
2058 }
2059
2060 /* Fix up the transfer record, and sync the map. */
2061 ix->ix_flags = (out ? IX_OUT : IX_IN);
2062 ix->ix_size = xfersize;
2063 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2064 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2065
2066 /*
2067 * Adjust the SGL offset and total message size fields. We don't
2068 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2069 */
2070 mb[0] += ((off << 4) + (nsegs << 16));
2071 return (0);
2072 }
2073
2074 /*
2075 * Unmap all data transfers associated with a message wrapper.
2076 */
2077 void
2078 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2079 {
2080 struct iop_xfer *ix;
2081 int i;
2082
2083 #ifdef I2ODEBUG
2084 if (im->im_xfer[0].ix_size == 0)
2085 panic("iop_msg_unmap: no transfers mapped");
2086 #endif
2087
2088 for (ix = im->im_xfer, i = 0;;) {
2089 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2090 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2091 BUS_DMASYNC_POSTREAD);
2092 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2093
2094 /* Only the first DMA map is static. */
2095 if (i != 0)
2096 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2097 if ((++ix)->ix_size == 0)
2098 break;
2099 if (++i >= IOP_MAX_MSG_XFERS)
2100 break;
2101 }
2102 }
2103
2104 /*
2105 * Post a message frame to the IOP's inbound queue.
2106 */
2107 int
2108 iop_post(struct iop_softc *sc, u_int32_t *mb)
2109 {
2110 u_int32_t mfa;
2111
2112 #ifdef I2ODEBUG
2113 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2114 panic("iop_post: frame too large");
2115 #endif
2116
2117 mutex_spin_enter(&sc->sc_intrlock);
2118
2119 /* Allocate a slot with the IOP. */
2120 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2121 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2122 mutex_spin_exit(&sc->sc_intrlock);
2123 aprint_error_dev(&sc->sc_dv, "mfa not forthcoming\n");
2124 return (EAGAIN);
2125 }
2126
2127 /* Perform reply buffer DMA synchronisation. */
2128 if (sc->sc_curib++ == 0)
2129 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2130 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2131
2132 /* Copy out the message frame. */
2133 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2134 mb[0] >> 16);
2135 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2136 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2137
2138 /* Post the MFA back to the IOP. */
2139 iop_outl(sc, IOP_REG_IFIFO, mfa);
2140
2141 mutex_spin_exit(&sc->sc_intrlock);
2142 return (0);
2143 }
2144
2145 /*
2146 * Post a message to the IOP and deal with completion.
2147 */
2148 int
2149 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2150 {
2151 u_int32_t *mb;
2152 int rv;
2153
2154 mb = xmb;
2155
2156 /* Terminate the scatter/gather list chain. */
2157 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2158 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2159
2160 if ((rv = iop_post(sc, mb)) != 0)
2161 return (rv);
2162
2163 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2164 if ((im->im_flags & IM_POLL) != 0)
2165 iop_msg_poll(sc, im, timo);
2166 else
2167 iop_msg_wait(sc, im, timo);
2168
2169 mutex_spin_enter(&sc->sc_intrlock);
2170 if ((im->im_flags & IM_REPLIED) != 0) {
2171 if ((im->im_flags & IM_NOSTATUS) != 0)
2172 rv = 0;
2173 else if ((im->im_flags & IM_FAIL) != 0)
2174 rv = ENXIO;
2175 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2176 rv = EIO;
2177 else
2178 rv = 0;
2179 } else
2180 rv = EBUSY;
2181 mutex_spin_exit(&sc->sc_intrlock);
2182 } else
2183 rv = 0;
2184
2185 return (rv);
2186 }
2187
2188 /*
2189 * Spin until the specified message is replied to.
2190 */
2191 static void
2192 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2193 {
2194 u_int32_t rmfa;
2195
2196 mutex_spin_enter(&sc->sc_intrlock);
2197
2198 for (timo *= 10; timo != 0; timo--) {
2199 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2200 /* Double read to account for IOP bug. */
2201 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2202 if (rmfa == IOP_MFA_EMPTY)
2203 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2204 if (rmfa != IOP_MFA_EMPTY) {
2205 iop_handle_reply(sc, rmfa);
2206
2207 /*
2208 * Return the reply frame to the IOP's
2209 * outbound FIFO.
2210 */
2211 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2212 }
2213 }
2214 if ((im->im_flags & IM_REPLIED) != 0)
2215 break;
2216 mutex_spin_exit(&sc->sc_intrlock);
2217 DELAY(100);
2218 mutex_spin_enter(&sc->sc_intrlock);
2219 }
2220
2221 if (timo == 0) {
2222 #ifdef I2ODEBUG
2223 printf("%s: poll - no reply\n", device_xname(&sc->sc_dv));
2224 if (iop_status_get(sc, 1) != 0)
2225 printf("iop_msg_poll: unable to retrieve status\n");
2226 else
2227 printf("iop_msg_poll: IOP state = %d\n",
2228 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2229 #endif
2230 }
2231
2232 mutex_spin_exit(&sc->sc_intrlock);
2233 }
2234
2235 /*
2236 * Sleep until the specified message is replied to.
2237 */
2238 static void
2239 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2240 {
2241 int rv;
2242
2243 mutex_spin_enter(&sc->sc_intrlock);
2244 if ((im->im_flags & IM_REPLIED) != 0) {
2245 mutex_spin_exit(&sc->sc_intrlock);
2246 return;
2247 }
2248 rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2249 mutex_spin_exit(&sc->sc_intrlock);
2250
2251 #ifdef I2ODEBUG
2252 if (rv != 0) {
2253 printf("iop_msg_wait: tsleep() == %d\n", rv);
2254 if (iop_status_get(sc, 0) != 0)
2255 printf("iop_msg_wait: unable to retrieve status\n");
2256 else
2257 printf("iop_msg_wait: IOP state = %d\n",
2258 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2259 }
2260 #endif
2261 }
2262
2263 /*
2264 * Release an unused message frame back to the IOP's inbound fifo.
2265 */
2266 static void
2267 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2268 {
2269
2270 /* Use the frame to issue a no-op. */
2271 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2272 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2273 iop_outl_msg(sc, mfa + 8, 0);
2274 iop_outl_msg(sc, mfa + 12, 0);
2275
2276 iop_outl(sc, IOP_REG_IFIFO, mfa);
2277 }
2278
2279 #ifdef I2ODEBUG
2280 /*
2281 * Dump a reply frame header.
2282 */
2283 static void
2284 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2285 {
2286 u_int function, detail;
2287 const char *statusstr;
2288
2289 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2290 detail = le16toh(rb->detail);
2291
2292 printf("%s: reply:\n", device_xname(&sc->sc_dv));
2293
2294 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2295 statusstr = iop_status[rb->reqstatus];
2296 else
2297 statusstr = "undefined error code";
2298
2299 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2300 device_xname(&sc->sc_dv), function, rb->reqstatus, statusstr);
2301 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2302 device_xname(&sc->sc_dv), detail, le32toh(rb->msgictx),
2303 le32toh(rb->msgtctx));
2304 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", device_xname(&sc->sc_dv),
2305 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2306 (le32toh(rb->msgflags) >> 8) & 0xff);
2307 }
2308 #endif
2309
2310 /*
2311 * Dump a transport failure reply.
2312 */
2313 static void
2314 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2315 {
2316
2317 printf("%s: WARNING: transport failure:\n", device_xname(&sc->sc_dv));
2318
2319 printf("%s: ictx=0x%08x tctx=0x%08x\n", device_xname(&sc->sc_dv),
2320 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2321 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2322 device_xname(&sc->sc_dv), fn->failurecode, fn->severity);
2323 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2324 device_xname(&sc->sc_dv), fn->highestver, fn->lowestver);
2325 }
2326
2327 /*
2328 * Translate an I2O ASCII field into a C string.
2329 */
2330 void
2331 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2332 {
2333 int hc, lc, i, nit;
2334
2335 dlen--;
2336 lc = 0;
2337 hc = 0;
2338 i = 0;
2339
2340 /*
2341 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2342 * spec has nothing to say about it. Since AMI fields are usually
2343 * filled with junk after the terminator, ...
2344 */
2345 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2346
2347 while (slen-- != 0 && dlen-- != 0) {
2348 if (nit && *src == '\0')
2349 break;
2350 else if (*src <= 0x20 || *src >= 0x7f) {
2351 if (hc)
2352 dst[i++] = ' ';
2353 } else {
2354 hc = 1;
2355 dst[i++] = *src;
2356 lc = i;
2357 }
2358 src++;
2359 }
2360
2361 dst[lc] = '\0';
2362 }
2363
2364 /*
2365 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2366 */
2367 int
2368 iop_print_ident(struct iop_softc *sc, int tid)
2369 {
2370 struct {
2371 struct i2o_param_op_results pr;
2372 struct i2o_param_read_results prr;
2373 struct i2o_param_device_identity di;
2374 } __attribute__ ((__packed__)) p;
2375 char buf[32];
2376 int rv;
2377
2378 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2379 sizeof(p), NULL);
2380 if (rv != 0)
2381 return (rv);
2382
2383 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2384 sizeof(buf));
2385 printf(" <%s, ", buf);
2386 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2387 sizeof(buf));
2388 printf("%s, ", buf);
2389 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2390 printf("%s>", buf);
2391
2392 return (0);
2393 }
2394
2395 /*
2396 * Claim or unclaim the specified TID.
2397 */
2398 int
2399 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2400 int flags)
2401 {
2402 struct iop_msg *im;
2403 struct i2o_util_claim mf;
2404 int rv, func;
2405
2406 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2407 im = iop_msg_alloc(sc, IM_WAIT);
2408
2409 /* We can use the same structure, as they're identical. */
2410 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2411 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2412 mf.msgictx = ii->ii_ictx;
2413 mf.msgtctx = im->im_tctx;
2414 mf.flags = flags;
2415
2416 rv = iop_msg_post(sc, im, &mf, 5000);
2417 iop_msg_free(sc, im);
2418 return (rv);
2419 }
2420
2421 /*
2422 * Perform an abort.
2423 */
2424 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2425 int tctxabort, int flags)
2426 {
2427 struct iop_msg *im;
2428 struct i2o_util_abort mf;
2429 int rv;
2430
2431 im = iop_msg_alloc(sc, IM_WAIT);
2432
2433 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2434 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2435 mf.msgictx = ii->ii_ictx;
2436 mf.msgtctx = im->im_tctx;
2437 mf.flags = (func << 24) | flags;
2438 mf.tctxabort = tctxabort;
2439
2440 rv = iop_msg_post(sc, im, &mf, 5000);
2441 iop_msg_free(sc, im);
2442 return (rv);
2443 }
2444
2445 /*
2446 * Enable or disable reception of events for the specified device.
2447 */
2448 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2449 {
2450 struct i2o_util_event_register mf;
2451
2452 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2453 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2454 mf.msgictx = ii->ii_ictx;
2455 mf.msgtctx = 0;
2456 mf.eventmask = mask;
2457
2458 /* This message is replied to only when events are signalled. */
2459 return (iop_post(sc, (u_int32_t *)&mf));
2460 }
2461
2462 int
2463 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2464 {
2465 struct iop_softc *sc;
2466
2467 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2468 return (ENXIO);
2469 if ((sc->sc_flags & IOP_ONLINE) == 0)
2470 return (ENXIO);
2471 if ((sc->sc_flags & IOP_OPEN) != 0)
2472 return (EBUSY);
2473 sc->sc_flags |= IOP_OPEN;
2474
2475 return (0);
2476 }
2477
2478 int
2479 iopclose(dev_t dev, int flag, int mode,
2480 struct lwp *l)
2481 {
2482 struct iop_softc *sc;
2483
2484 sc = device_lookup(&iop_cd, minor(dev));
2485 sc->sc_flags &= ~IOP_OPEN;
2486
2487 return (0);
2488 }
2489
2490 int
2491 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2492 {
2493 struct iop_softc *sc;
2494 struct iovec *iov;
2495 int rv, i;
2496
2497 sc = device_lookup(&iop_cd, minor(dev));
2498 rv = 0;
2499
2500 switch (cmd) {
2501 case IOPIOCPT:
2502 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2503 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2504 if (rv)
2505 return (rv);
2506
2507 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2508
2509 case IOPIOCGSTATUS:
2510 iov = (struct iovec *)data;
2511 i = sizeof(struct i2o_status);
2512 if (i > iov->iov_len)
2513 i = iov->iov_len;
2514 else
2515 iov->iov_len = i;
2516 if ((rv = iop_status_get(sc, 0)) == 0)
2517 rv = copyout(&sc->sc_status, iov->iov_base, i);
2518 return (rv);
2519
2520 case IOPIOCGLCT:
2521 case IOPIOCGTIDMAP:
2522 case IOPIOCRECONFIG:
2523 break;
2524
2525 default:
2526 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2527 printf("%s: unknown ioctl %lx\n", device_xname(&sc->sc_dv), cmd);
2528 #endif
2529 return (ENOTTY);
2530 }
2531
2532 mutex_enter(&sc->sc_conflock);
2533
2534 switch (cmd) {
2535 case IOPIOCGLCT:
2536 iov = (struct iovec *)data;
2537 i = le16toh(sc->sc_lct->tablesize) << 2;
2538 if (i > iov->iov_len)
2539 i = iov->iov_len;
2540 else
2541 iov->iov_len = i;
2542 rv = copyout(sc->sc_lct, iov->iov_base, i);
2543 break;
2544
2545 case IOPIOCRECONFIG:
2546 rv = iop_reconfigure(sc, 0);
2547 break;
2548
2549 case IOPIOCGTIDMAP:
2550 iov = (struct iovec *)data;
2551 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2552 if (i > iov->iov_len)
2553 i = iov->iov_len;
2554 else
2555 iov->iov_len = i;
2556 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2557 break;
2558 }
2559
2560 mutex_exit(&sc->sc_conflock);
2561 return (rv);
2562 }
2563
2564 static int
2565 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2566 {
2567 struct iop_msg *im;
2568 struct i2o_msg *mf;
2569 struct ioppt_buf *ptb;
2570 int rv, i, mapped;
2571
2572 mf = NULL;
2573 im = NULL;
2574 mapped = 1;
2575
2576 if (pt->pt_msglen > sc->sc_framesize ||
2577 pt->pt_msglen < sizeof(struct i2o_msg) ||
2578 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2579 pt->pt_nbufs < 0 ||
2580 #if 0
2581 pt->pt_replylen < 0 ||
2582 #endif
2583 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2584 return (EINVAL);
2585
2586 for (i = 0; i < pt->pt_nbufs; i++)
2587 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2588 rv = ENOMEM;
2589 goto bad;
2590 }
2591
2592 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2593 if (mf == NULL)
2594 return (ENOMEM);
2595
2596 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2597 goto bad;
2598
2599 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2600 im->im_rb = (struct i2o_reply *)mf;
2601 mf->msgictx = IOP_ICTX;
2602 mf->msgtctx = im->im_tctx;
2603
2604 for (i = 0; i < pt->pt_nbufs; i++) {
2605 ptb = &pt->pt_bufs[i];
2606 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2607 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2608 if (rv != 0)
2609 goto bad;
2610 mapped = 1;
2611 }
2612
2613 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2614 goto bad;
2615
2616 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2617 if (i > sc->sc_framesize)
2618 i = sc->sc_framesize;
2619 if (i > pt->pt_replylen)
2620 i = pt->pt_replylen;
2621 rv = copyout(im->im_rb, pt->pt_reply, i);
2622
2623 bad:
2624 if (mapped != 0)
2625 iop_msg_unmap(sc, im);
2626 if (im != NULL)
2627 iop_msg_free(sc, im);
2628 if (mf != NULL)
2629 free(mf, M_DEVBUF);
2630 return (rv);
2631 }
2632