iop.c revision 1.69 1 /* $NetBSD: iop.c,v 1.69 2008/04/06 20:26:21 cegger Exp $ */
2
3 /*-
4 * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 /*
40 * Support for I2O IOPs (intelligent I/O processors).
41 */
42
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.69 2008/04/06 20:26:21 cegger Exp $");
45
46 #include "iop.h"
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/device.h>
52 #include <sys/queue.h>
53 #include <sys/proc.h>
54 #include <sys/malloc.h>
55 #include <sys/ioctl.h>
56 #include <sys/endian.h>
57 #include <sys/conf.h>
58 #include <sys/kthread.h>
59 #include <sys/kauth.h>
60 #include <sys/bus.h>
61
62 #include <uvm/uvm_extern.h>
63
64 #include <dev/i2o/i2o.h>
65 #include <dev/i2o/iopio.h>
66 #include <dev/i2o/iopreg.h>
67 #include <dev/i2o/iopvar.h>
68
69 #include "locators.h"
70
71 #define POLL(ms, cond) \
72 do { \
73 int xi; \
74 for (xi = (ms) * 10; xi; xi--) { \
75 if (cond) \
76 break; \
77 DELAY(100); \
78 } \
79 } while (/* CONSTCOND */0);
80
81 #ifdef I2ODEBUG
82 #define DPRINTF(x) printf x
83 #else
84 #define DPRINTF(x)
85 #endif
86
87 #define IOP_ICTXHASH_NBUCKETS 16
88 #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
89
90 #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
91
92 #define IOP_TCTX_SHIFT 12
93 #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
94
95 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
96 static u_long iop_ictxhash;
97 static void *iop_sdh;
98 static struct i2o_systab *iop_systab;
99 static int iop_systab_size;
100
101 extern struct cfdriver iop_cd;
102
103 dev_type_open(iopopen);
104 dev_type_close(iopclose);
105 dev_type_ioctl(iopioctl);
106
107 const struct cdevsw iop_cdevsw = {
108 iopopen, iopclose, noread, nowrite, iopioctl,
109 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER,
110 };
111
112 #define IC_CONFIGURE 0x01
113 #define IC_PRIORITY 0x02
114
115 static struct iop_class {
116 u_short ic_class;
117 u_short ic_flags;
118 const char *ic_caption;
119 } const iop_class[] = {
120 {
121 I2O_CLASS_EXECUTIVE,
122 0,
123 "executive"
124 },
125 {
126 I2O_CLASS_DDM,
127 0,
128 "device driver module"
129 },
130 {
131 I2O_CLASS_RANDOM_BLOCK_STORAGE,
132 IC_CONFIGURE | IC_PRIORITY,
133 "random block storage"
134 },
135 {
136 I2O_CLASS_SEQUENTIAL_STORAGE,
137 IC_CONFIGURE | IC_PRIORITY,
138 "sequential storage"
139 },
140 {
141 I2O_CLASS_LAN,
142 IC_CONFIGURE | IC_PRIORITY,
143 "LAN port"
144 },
145 {
146 I2O_CLASS_WAN,
147 IC_CONFIGURE | IC_PRIORITY,
148 "WAN port"
149 },
150 {
151 I2O_CLASS_FIBRE_CHANNEL_PORT,
152 IC_CONFIGURE,
153 "fibrechannel port"
154 },
155 {
156 I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
157 0,
158 "fibrechannel peripheral"
159 },
160 {
161 I2O_CLASS_SCSI_PERIPHERAL,
162 0,
163 "SCSI peripheral"
164 },
165 {
166 I2O_CLASS_ATE_PORT,
167 IC_CONFIGURE,
168 "ATE port"
169 },
170 {
171 I2O_CLASS_ATE_PERIPHERAL,
172 0,
173 "ATE peripheral"
174 },
175 {
176 I2O_CLASS_FLOPPY_CONTROLLER,
177 IC_CONFIGURE,
178 "floppy controller"
179 },
180 {
181 I2O_CLASS_FLOPPY_DEVICE,
182 0,
183 "floppy device"
184 },
185 {
186 I2O_CLASS_BUS_ADAPTER_PORT,
187 IC_CONFIGURE,
188 "bus adapter port"
189 },
190 };
191
192 static const char * const iop_status[] = {
193 "success",
194 "abort (dirty)",
195 "abort (no data transfer)",
196 "abort (partial transfer)",
197 "error (dirty)",
198 "error (no data transfer)",
199 "error (partial transfer)",
200 "undefined error code",
201 "process abort (dirty)",
202 "process abort (no data transfer)",
203 "process abort (partial transfer)",
204 "transaction error",
205 };
206
207 static inline u_int32_t iop_inl(struct iop_softc *, int);
208 static inline void iop_outl(struct iop_softc *, int, u_int32_t);
209
210 static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
211 static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
212
213 static void iop_config_interrupts(struct device *);
214 static void iop_configure_devices(struct iop_softc *, int, int);
215 static void iop_devinfo(int, char *, size_t);
216 static int iop_print(void *, const char *);
217 static void iop_shutdown(void *);
218
219 static void iop_adjqparam(struct iop_softc *, int);
220 static int iop_handle_reply(struct iop_softc *, u_int32_t);
221 static int iop_hrt_get(struct iop_softc *);
222 static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
223 static void iop_intr_event(struct device *, struct iop_msg *, void *);
224 static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
225 u_int32_t);
226 static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
227 static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
228 static int iop_ofifo_init(struct iop_softc *);
229 static int iop_passthrough(struct iop_softc *, struct ioppt *,
230 struct proc *);
231 static void iop_reconf_thread(void *);
232 static void iop_release_mfa(struct iop_softc *, u_int32_t);
233 static int iop_reset(struct iop_softc *);
234 static int iop_sys_enable(struct iop_softc *);
235 static int iop_systab_set(struct iop_softc *);
236 static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
237
238 #ifdef I2ODEBUG
239 static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
240 #endif
241
242 static inline u_int32_t
243 iop_inl(struct iop_softc *sc, int off)
244 {
245
246 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
247 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
248 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
249 }
250
251 static inline void
252 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
253 {
254
255 bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
256 bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
257 BUS_SPACE_BARRIER_WRITE);
258 }
259
260 static inline u_int32_t
261 iop_inl_msg(struct iop_softc *sc, int off)
262 {
263
264 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
265 BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
266 return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
267 }
268
269 static inline void
270 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
271 {
272
273 bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
274 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
275 BUS_SPACE_BARRIER_WRITE);
276 }
277
278 /*
279 * Initialise the IOP and our interface.
280 */
281 void
282 iop_init(struct iop_softc *sc, const char *intrstr)
283 {
284 struct iop_msg *im;
285 int rv, i, j, state, nsegs;
286 u_int32_t mask;
287 char ident[64];
288
289 state = 0;
290
291 printf("I2O adapter");
292
293 mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
294 mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
295 cv_init(&sc->sc_confcv, "iopconf");
296
297 if (iop_ictxhashtbl == NULL)
298 iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
299 M_DEVBUF, M_NOWAIT, &iop_ictxhash);
300
301 /* Disable interrupts at the IOP. */
302 mask = iop_inl(sc, IOP_REG_INTR_MASK);
303 iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
304
305 /* Allocate a scratch DMA map for small miscellaneous shared data. */
306 if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
307 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
308 aprint_error_dev(&sc->sc_dv, "cannot create scratch dmamap\n");
309 return;
310 }
311
312 if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
313 sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
314 aprint_error_dev(&sc->sc_dv, "cannot alloc scratch dmamem\n");
315 goto bail_out;
316 }
317 state++;
318
319 if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
320 &sc->sc_scr, 0)) {
321 aprint_error_dev(&sc->sc_dv, "cannot map scratch dmamem\n");
322 goto bail_out;
323 }
324 state++;
325
326 if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
327 PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
328 aprint_error_dev(&sc->sc_dv, "cannot load scratch dmamap\n");
329 goto bail_out;
330 }
331 state++;
332
333 #ifdef I2ODEBUG
334 /* So that our debug checks don't choke. */
335 sc->sc_framesize = 128;
336 #endif
337
338 /* Avoid syncing the reply map until it's set up. */
339 sc->sc_curib = 0x123;
340
341 /* Reset the adapter and request status. */
342 if ((rv = iop_reset(sc)) != 0) {
343 aprint_error_dev(&sc->sc_dv, "not responding (reset)\n");
344 goto bail_out;
345 }
346
347 if ((rv = iop_status_get(sc, 1)) != 0) {
348 aprint_error_dev(&sc->sc_dv, "not responding (get status)\n");
349 goto bail_out;
350 }
351
352 sc->sc_flags |= IOP_HAVESTATUS;
353 iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
354 ident, sizeof(ident));
355 printf(" <%s>\n", ident);
356
357 #ifdef I2ODEBUG
358 printf("%s: orgid=0x%04x version=%d\n",
359 device_xname(&sc->sc_dv),
360 le16toh(sc->sc_status.orgid),
361 (le32toh(sc->sc_status.segnumber) >> 12) & 15);
362 printf("%s: type want have cbase\n", device_xname(&sc->sc_dv));
363 printf("%s: mem %04x %04x %08x\n", device_xname(&sc->sc_dv),
364 le32toh(sc->sc_status.desiredprivmemsize),
365 le32toh(sc->sc_status.currentprivmemsize),
366 le32toh(sc->sc_status.currentprivmembase));
367 printf("%s: i/o %04x %04x %08x\n", device_xname(&sc->sc_dv),
368 le32toh(sc->sc_status.desiredpriviosize),
369 le32toh(sc->sc_status.currentpriviosize),
370 le32toh(sc->sc_status.currentpriviobase));
371 #endif
372
373 sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
374 if (sc->sc_maxob > IOP_MAX_OUTBOUND)
375 sc->sc_maxob = IOP_MAX_OUTBOUND;
376 sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
377 if (sc->sc_maxib > IOP_MAX_INBOUND)
378 sc->sc_maxib = IOP_MAX_INBOUND;
379 sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
380 if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
381 sc->sc_framesize = IOP_MAX_MSG_SIZE;
382
383 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
384 if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
385 aprint_error_dev(&sc->sc_dv, "frame size too small (%d)\n",
386 sc->sc_framesize);
387 goto bail_out;
388 }
389 #endif
390
391 /* Allocate message wrappers. */
392 im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
393 if (im == NULL) {
394 aprint_error_dev(&sc->sc_dv, "memory allocation failure\n");
395 goto bail_out;
396 }
397 state++;
398 sc->sc_ims = im;
399 SLIST_INIT(&sc->sc_im_freelist);
400
401 for (i = 0; i < sc->sc_maxib; i++, im++) {
402 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
403 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
404 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
405 &im->im_xfer[0].ix_map);
406 if (rv != 0) {
407 aprint_error_dev(&sc->sc_dv, "couldn't create dmamap (%d)", rv);
408 goto bail_out3;
409 }
410
411 im->im_tctx = i;
412 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
413 cv_init(&im->im_cv, "iopmsg");
414 }
415
416 /* Initialise the IOP's outbound FIFO. */
417 if (iop_ofifo_init(sc) != 0) {
418 aprint_error_dev(&sc->sc_dv, "unable to init oubound FIFO\n");
419 goto bail_out3;
420 }
421
422 /*
423 * Defer further configuration until (a) interrupts are working and
424 * (b) we have enough information to build the system table.
425 */
426 config_interrupts((struct device *)sc, iop_config_interrupts);
427
428 /* Configure shutdown hook before we start any device activity. */
429 if (iop_sdh == NULL)
430 iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
431
432 /* Ensure interrupts are enabled at the IOP. */
433 mask = iop_inl(sc, IOP_REG_INTR_MASK);
434 iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
435
436 if (intrstr != NULL)
437 printf("%s: interrupting at %s\n", device_xname(&sc->sc_dv),
438 intrstr);
439
440 #ifdef I2ODEBUG
441 printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
442 device_xname(&sc->sc_dv), sc->sc_maxib,
443 le32toh(sc->sc_status.maxinboundmframes),
444 sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
445 #endif
446
447 return;
448
449 bail_out3:
450 if (state > 3) {
451 for (j = 0; j < i; j++)
452 bus_dmamap_destroy(sc->sc_dmat,
453 sc->sc_ims[j].im_xfer[0].ix_map);
454 free(sc->sc_ims, M_DEVBUF);
455 }
456 bail_out:
457 if (state > 2)
458 bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
459 if (state > 1)
460 bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
461 if (state > 0)
462 bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
463 bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
464 }
465
466 /*
467 * Perform autoconfiguration tasks.
468 */
469 static void
470 iop_config_interrupts(struct device *self)
471 {
472 struct iop_attach_args ia;
473 struct iop_softc *sc, *iop;
474 struct i2o_systab_entry *ste;
475 int rv, i, niop;
476 int locs[IOPCF_NLOCS];
477
478 sc = device_private(self);
479 mutex_enter(&sc->sc_conflock);
480
481 LIST_INIT(&sc->sc_iilist);
482
483 printf("%s: configuring...\n", device_xname(&sc->sc_dv));
484
485 if (iop_hrt_get(sc) != 0) {
486 printf("%s: unable to retrieve HRT\n", device_xname(&sc->sc_dv));
487 mutex_exit(&sc->sc_conflock);
488 return;
489 }
490
491 /*
492 * Build the system table.
493 */
494 if (iop_systab == NULL) {
495 for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
496 if ((iop = device_lookup(&iop_cd, i)) == NULL)
497 continue;
498 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
499 continue;
500 if (iop_status_get(iop, 1) != 0) {
501 aprint_error_dev(&sc->sc_dv, "unable to retrieve status\n");
502 iop->sc_flags &= ~IOP_HAVESTATUS;
503 continue;
504 }
505 niop++;
506 }
507 if (niop == 0) {
508 mutex_exit(&sc->sc_conflock);
509 return;
510 }
511
512 i = sizeof(struct i2o_systab_entry) * (niop - 1) +
513 sizeof(struct i2o_systab);
514 iop_systab_size = i;
515 iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
516
517 iop_systab->numentries = niop;
518 iop_systab->version = I2O_VERSION_11;
519
520 for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
521 if ((iop = device_lookup(&iop_cd, i)) == NULL)
522 continue;
523 if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
524 continue;
525
526 ste->orgid = iop->sc_status.orgid;
527 ste->iopid = device_unit(&iop->sc_dv) + 2;
528 ste->segnumber =
529 htole32(le32toh(iop->sc_status.segnumber) & ~4095);
530 ste->iopcaps = iop->sc_status.iopcaps;
531 ste->inboundmsgframesize =
532 iop->sc_status.inboundmframesize;
533 ste->inboundmsgportaddresslow =
534 htole32(iop->sc_memaddr + IOP_REG_IFIFO);
535 ste++;
536 }
537 }
538
539 /*
540 * Post the system table to the IOP and bring it to the OPERATIONAL
541 * state.
542 */
543 if (iop_systab_set(sc) != 0) {
544 aprint_error_dev(&sc->sc_dv, "unable to set system table\n");
545 mutex_exit(&sc->sc_conflock);
546 return;
547 }
548 if (iop_sys_enable(sc) != 0) {
549 aprint_error_dev(&sc->sc_dv, "unable to enable system\n");
550 mutex_exit(&sc->sc_conflock);
551 return;
552 }
553
554 /*
555 * Set up an event handler for this IOP.
556 */
557 sc->sc_eventii.ii_dv = self;
558 sc->sc_eventii.ii_intr = iop_intr_event;
559 sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
560 sc->sc_eventii.ii_tid = I2O_TID_IOP;
561 iop_initiator_register(sc, &sc->sc_eventii);
562
563 rv = iop_util_eventreg(sc, &sc->sc_eventii,
564 I2O_EVENT_EXEC_RESOURCE_LIMITS |
565 I2O_EVENT_EXEC_CONNECTION_FAIL |
566 I2O_EVENT_EXEC_ADAPTER_FAULT |
567 I2O_EVENT_EXEC_POWER_FAIL |
568 I2O_EVENT_EXEC_RESET_PENDING |
569 I2O_EVENT_EXEC_RESET_IMMINENT |
570 I2O_EVENT_EXEC_HARDWARE_FAIL |
571 I2O_EVENT_EXEC_XCT_CHANGE |
572 I2O_EVENT_EXEC_DDM_AVAILIBILITY |
573 I2O_EVENT_GEN_DEVICE_RESET |
574 I2O_EVENT_GEN_STATE_CHANGE |
575 I2O_EVENT_GEN_GENERAL_WARNING);
576 if (rv != 0) {
577 aprint_error_dev(&sc->sc_dv, "unable to register for events");
578 mutex_exit(&sc->sc_conflock);
579 return;
580 }
581
582 /*
583 * Attempt to match and attach a product-specific extension.
584 */
585 ia.ia_class = I2O_CLASS_ANY;
586 ia.ia_tid = I2O_TID_IOP;
587 locs[IOPCF_TID] = I2O_TID_IOP;
588 config_found_sm_loc(self, "iop", locs, &ia, iop_print,
589 config_stdsubmatch);
590
591 /*
592 * Start device configuration.
593 */
594 if ((rv = iop_reconfigure(sc, 0)) == -1)
595 aprint_error_dev(&sc->sc_dv, "configure failed (%d)\n", rv);
596
597
598 sc->sc_flags |= IOP_ONLINE;
599 rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
600 &sc->sc_reconf_thread, "%s", device_xname(&sc->sc_dv));
601 mutex_exit(&sc->sc_conflock);
602 if (rv != 0) {
603 aprint_error_dev(&sc->sc_dv, "unable to create reconfiguration thread (%d)", rv);
604 return;
605 }
606 }
607
608 /*
609 * Reconfiguration thread; listens for LCT change notification, and
610 * initiates re-configuration if received.
611 */
612 static void
613 iop_reconf_thread(void *cookie)
614 {
615 struct iop_softc *sc;
616 struct lwp *l;
617 struct i2o_lct lct;
618 u_int32_t chgind;
619 int rv;
620
621 sc = cookie;
622 chgind = sc->sc_chgind + 1;
623 l = curlwp;
624
625 for (;;) {
626 DPRINTF(("%s: async reconfig: requested 0x%08x\n",
627 device_xname(&sc->sc_dv), chgind));
628
629 rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
630
631 DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
632 device_xname(&sc->sc_dv), le32toh(lct.changeindicator), rv));
633
634 mutex_enter(&sc->sc_conflock);
635 if (rv == 0) {
636 iop_reconfigure(sc, le32toh(lct.changeindicator));
637 chgind = sc->sc_chgind + 1;
638 }
639 (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
640 mutex_exit(&sc->sc_conflock);
641 }
642 }
643
644 /*
645 * Reconfigure: find new and removed devices.
646 */
647 int
648 iop_reconfigure(struct iop_softc *sc, u_int chgind)
649 {
650 struct iop_msg *im;
651 struct i2o_hba_bus_scan mf;
652 struct i2o_lct_entry *le;
653 struct iop_initiator *ii, *nextii;
654 int rv, tid, i;
655
656 KASSERT(mutex_owned(&sc->sc_conflock));
657
658 /*
659 * If the reconfiguration request isn't the result of LCT change
660 * notification, then be more thorough: ask all bus ports to scan
661 * their busses. Wait up to 5 minutes for each bus port to complete
662 * the request.
663 */
664 if (chgind == 0) {
665 if ((rv = iop_lct_get(sc)) != 0) {
666 DPRINTF(("iop_reconfigure: unable to read LCT\n"));
667 return (rv);
668 }
669
670 le = sc->sc_lct->entry;
671 for (i = 0; i < sc->sc_nlctent; i++, le++) {
672 if ((le16toh(le->classid) & 4095) !=
673 I2O_CLASS_BUS_ADAPTER_PORT)
674 continue;
675 tid = le16toh(le->localtid) & 4095;
676
677 im = iop_msg_alloc(sc, IM_WAIT);
678
679 mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
680 mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
681 mf.msgictx = IOP_ICTX;
682 mf.msgtctx = im->im_tctx;
683
684 DPRINTF(("%s: scanning bus %d\n", device_xname(&sc->sc_dv),
685 tid));
686
687 rv = iop_msg_post(sc, im, &mf, 5*60*1000);
688 iop_msg_free(sc, im);
689 #ifdef I2ODEBUG
690 if (rv != 0)
691 aprint_error_dev(&sc->sc_dv, "bus scan failed\n");
692 #endif
693 }
694 } else if (chgind <= sc->sc_chgind) {
695 DPRINTF(("%s: LCT unchanged (async)\n", device_xname(&sc->sc_dv)));
696 return (0);
697 }
698
699 /* Re-read the LCT and determine if it has changed. */
700 if ((rv = iop_lct_get(sc)) != 0) {
701 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
702 return (rv);
703 }
704 DPRINTF(("%s: %d LCT entries\n", device_xname(&sc->sc_dv), sc->sc_nlctent));
705
706 chgind = le32toh(sc->sc_lct->changeindicator);
707 if (chgind == sc->sc_chgind) {
708 DPRINTF(("%s: LCT unchanged\n", device_xname(&sc->sc_dv)));
709 return (0);
710 }
711 DPRINTF(("%s: LCT changed\n", device_xname(&sc->sc_dv)));
712 sc->sc_chgind = chgind;
713
714 if (sc->sc_tidmap != NULL)
715 free(sc->sc_tidmap, M_DEVBUF);
716 sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
717 M_DEVBUF, M_NOWAIT|M_ZERO);
718
719 /* Allow 1 queued command per device while we're configuring. */
720 iop_adjqparam(sc, 1);
721
722 /*
723 * Match and attach child devices. We configure high-level devices
724 * first so that any claims will propagate throughout the LCT,
725 * hopefully masking off aliased devices as a result.
726 *
727 * Re-reading the LCT at this point is a little dangerous, but we'll
728 * trust the IOP (and the operator) to behave itself...
729 */
730 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
731 IC_CONFIGURE | IC_PRIORITY);
732 if ((rv = iop_lct_get(sc)) != 0) {
733 DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
734 }
735 iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
736 IC_CONFIGURE);
737
738 for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
739 nextii = LIST_NEXT(ii, ii_list);
740
741 /* Detach devices that were configured, but are now gone. */
742 for (i = 0; i < sc->sc_nlctent; i++)
743 if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
744 break;
745 if (i == sc->sc_nlctent ||
746 (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
747 config_detach(ii->ii_dv, DETACH_FORCE);
748 continue;
749 }
750
751 /*
752 * Tell initiators that existed before the re-configuration
753 * to re-configure.
754 */
755 if (ii->ii_reconfig == NULL)
756 continue;
757 if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
758 aprint_error_dev(&sc->sc_dv, "%s failed reconfigure (%d)\n",
759 device_xname(ii->ii_dv), rv);
760 }
761
762 /* Re-adjust queue parameters and return. */
763 if (sc->sc_nii != 0)
764 iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
765 / sc->sc_nii);
766
767 return (0);
768 }
769
770 /*
771 * Configure I2O devices into the system.
772 */
773 static void
774 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
775 {
776 struct iop_attach_args ia;
777 struct iop_initiator *ii;
778 const struct i2o_lct_entry *le;
779 struct device *dv;
780 int i, j, nent;
781 u_int usertid;
782 int locs[IOPCF_NLOCS];
783
784 nent = sc->sc_nlctent;
785 for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
786 sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
787
788 /* Ignore the device if it's in use. */
789 usertid = le32toh(le->usertid) & 4095;
790 if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
791 continue;
792
793 ia.ia_class = le16toh(le->classid) & 4095;
794 ia.ia_tid = sc->sc_tidmap[i].it_tid;
795
796 /* Ignore uninteresting devices. */
797 for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
798 if (iop_class[j].ic_class == ia.ia_class)
799 break;
800 if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
801 (iop_class[j].ic_flags & mask) != maskval)
802 continue;
803
804 /*
805 * Try to configure the device only if it's not already
806 * configured.
807 */
808 LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
809 if (ia.ia_tid == ii->ii_tid) {
810 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
811 strcpy(sc->sc_tidmap[i].it_dvname,
812 device_xname(ii->ii_dv));
813 break;
814 }
815 }
816 if (ii != NULL)
817 continue;
818
819 locs[IOPCF_TID] = ia.ia_tid;
820
821 dv = config_found_sm_loc(&sc->sc_dv, "iop", locs, &ia,
822 iop_print, config_stdsubmatch);
823 if (dv != NULL) {
824 sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
825 strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
826 }
827 }
828 }
829
830 /*
831 * Adjust queue parameters for all child devices.
832 */
833 static void
834 iop_adjqparam(struct iop_softc *sc, int mpi)
835 {
836 struct iop_initiator *ii;
837
838 LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
839 if (ii->ii_adjqparam != NULL)
840 (*ii->ii_adjqparam)(ii->ii_dv, mpi);
841 }
842
843 static void
844 iop_devinfo(int class, char *devinfo, size_t l)
845 {
846 int i;
847
848 for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
849 if (class == iop_class[i].ic_class)
850 break;
851
852 if (i == sizeof(iop_class) / sizeof(iop_class[0]))
853 snprintf(devinfo, l, "device (class 0x%x)", class);
854 else
855 strlcpy(devinfo, iop_class[i].ic_caption, l);
856 }
857
858 static int
859 iop_print(void *aux, const char *pnp)
860 {
861 struct iop_attach_args *ia;
862 char devinfo[256];
863
864 ia = aux;
865
866 if (pnp != NULL) {
867 iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
868 aprint_normal("%s at %s", devinfo, pnp);
869 }
870 aprint_normal(" tid %d", ia->ia_tid);
871 return (UNCONF);
872 }
873
874 /*
875 * Shut down all configured IOPs.
876 */
877 static void
878 iop_shutdown(void *junk)
879 {
880 struct iop_softc *sc;
881 int i;
882
883 printf("shutting down iop devices...");
884
885 for (i = 0; i < iop_cd.cd_ndevs; i++) {
886 if ((sc = device_lookup(&iop_cd, i)) == NULL)
887 continue;
888 if ((sc->sc_flags & IOP_ONLINE) == 0)
889 continue;
890
891 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
892 0, 5000);
893
894 if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
895 /*
896 * Some AMI firmware revisions will go to sleep and
897 * never come back after this.
898 */
899 iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
900 IOP_ICTX, 0, 1000);
901 }
902 }
903
904 /* Wait. Some boards could still be flushing, stupidly enough. */
905 delay(5000*1000);
906 printf(" done\n");
907 }
908
909 /*
910 * Retrieve IOP status.
911 */
912 int
913 iop_status_get(struct iop_softc *sc, int nosleep)
914 {
915 struct i2o_exec_status_get mf;
916 struct i2o_status *st;
917 paddr_t pa;
918 int rv, i;
919
920 pa = sc->sc_scr_seg->ds_addr;
921 st = (struct i2o_status *)sc->sc_scr;
922
923 mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
924 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
925 mf.reserved[0] = 0;
926 mf.reserved[1] = 0;
927 mf.reserved[2] = 0;
928 mf.reserved[3] = 0;
929 mf.addrlow = (u_int32_t)pa;
930 mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
931 mf.length = sizeof(sc->sc_status);
932
933 memset(st, 0, sizeof(*st));
934 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
935 BUS_DMASYNC_PREREAD);
936
937 if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
938 return (rv);
939
940 for (i = 25; i != 0; i--) {
941 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
942 sizeof(*st), BUS_DMASYNC_POSTREAD);
943 if (st->syncbyte == 0xff)
944 break;
945 if (nosleep)
946 DELAY(100*1000);
947 else
948 kpause("iopstat", false, hz / 10, NULL);
949 }
950
951 if (st->syncbyte != 0xff) {
952 aprint_error_dev(&sc->sc_dv, "STATUS_GET timed out\n");
953 rv = EIO;
954 } else {
955 memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
956 rv = 0;
957 }
958
959 return (rv);
960 }
961
962 /*
963 * Initialize and populate the IOP's outbound FIFO.
964 */
965 static int
966 iop_ofifo_init(struct iop_softc *sc)
967 {
968 bus_addr_t addr;
969 bus_dma_segment_t seg;
970 struct i2o_exec_outbound_init *mf;
971 int i, rseg, rv;
972 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
973
974 sw = (u_int32_t *)sc->sc_scr;
975
976 mf = (struct i2o_exec_outbound_init *)mb;
977 mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
978 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
979 mf->msgictx = IOP_ICTX;
980 mf->msgtctx = 0;
981 mf->pagesize = PAGE_SIZE;
982 mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
983
984 /*
985 * The I2O spec says that there are two SGLs: one for the status
986 * word, and one for a list of discarded MFAs. It continues to say
987 * that if you don't want to get the list of MFAs, an IGNORE SGL is
988 * necessary; this isn't the case (and is in fact a bad thing).
989 */
990 mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
991 I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
992 mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
993 (u_int32_t)sc->sc_scr_seg->ds_addr;
994 mb[0] += 2 << 16;
995
996 *sw = 0;
997 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
998 BUS_DMASYNC_PREREAD);
999
1000 if ((rv = iop_post(sc, mb)) != 0)
1001 return (rv);
1002
1003 POLL(5000,
1004 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1005 BUS_DMASYNC_POSTREAD),
1006 *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1007
1008 if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1009 aprint_error_dev(&sc->sc_dv, "outbound FIFO init failed (%d)\n",
1010 le32toh(*sw));
1011 return (EIO);
1012 }
1013
1014 /* Allocate DMA safe memory for the reply frames. */
1015 if (sc->sc_rep_phys == 0) {
1016 sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1017
1018 rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1019 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1020 if (rv != 0) {
1021 aprint_error_dev(&sc->sc_dv, "DMA alloc = %d\n",
1022 rv);
1023 return (rv);
1024 }
1025
1026 rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1027 &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1028 if (rv != 0) {
1029 aprint_error_dev(&sc->sc_dv, "DMA map = %d\n", rv);
1030 return (rv);
1031 }
1032
1033 rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1034 sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1035 if (rv != 0) {
1036 aprint_error_dev(&sc->sc_dv, "DMA create = %d\n", rv);
1037 return (rv);
1038 }
1039
1040 rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1041 sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1042 if (rv != 0) {
1043 aprint_error_dev(&sc->sc_dv, "DMA load = %d\n", rv);
1044 return (rv);
1045 }
1046
1047 sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1048
1049 /* Now safe to sync the reply map. */
1050 sc->sc_curib = 0;
1051 }
1052
1053 /* Populate the outbound FIFO. */
1054 for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1055 iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1056 addr += sc->sc_framesize;
1057 }
1058
1059 return (0);
1060 }
1061
1062 /*
1063 * Read the specified number of bytes from the IOP's hardware resource table.
1064 */
1065 static int
1066 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1067 {
1068 struct iop_msg *im;
1069 int rv;
1070 struct i2o_exec_hrt_get *mf;
1071 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1072
1073 im = iop_msg_alloc(sc, IM_WAIT);
1074 mf = (struct i2o_exec_hrt_get *)mb;
1075 mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1076 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1077 mf->msgictx = IOP_ICTX;
1078 mf->msgtctx = im->im_tctx;
1079
1080 iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1081 rv = iop_msg_post(sc, im, mb, 30000);
1082 iop_msg_unmap(sc, im);
1083 iop_msg_free(sc, im);
1084 return (rv);
1085 }
1086
1087 /*
1088 * Read the IOP's hardware resource table.
1089 */
1090 static int
1091 iop_hrt_get(struct iop_softc *sc)
1092 {
1093 struct i2o_hrt hrthdr, *hrt;
1094 int size, rv;
1095
1096 uvm_lwp_hold(curlwp);
1097 rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1098 uvm_lwp_rele(curlwp);
1099 if (rv != 0)
1100 return (rv);
1101
1102 DPRINTF(("%s: %d hrt entries\n", device_xname(&sc->sc_dv),
1103 le16toh(hrthdr.numentries)));
1104
1105 size = sizeof(struct i2o_hrt) +
1106 (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1107 hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1108
1109 if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1110 free(hrt, M_DEVBUF);
1111 return (rv);
1112 }
1113
1114 if (sc->sc_hrt != NULL)
1115 free(sc->sc_hrt, M_DEVBUF);
1116 sc->sc_hrt = hrt;
1117 return (0);
1118 }
1119
1120 /*
1121 * Request the specified number of bytes from the IOP's logical
1122 * configuration table. If a change indicator is specified, this
1123 * is a verbatim notification request, so the caller is prepared
1124 * to wait indefinitely.
1125 */
1126 static int
1127 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1128 u_int32_t chgind)
1129 {
1130 struct iop_msg *im;
1131 struct i2o_exec_lct_notify *mf;
1132 int rv;
1133 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1134
1135 im = iop_msg_alloc(sc, IM_WAIT);
1136 memset(lct, 0, size);
1137
1138 mf = (struct i2o_exec_lct_notify *)mb;
1139 mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1140 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1141 mf->msgictx = IOP_ICTX;
1142 mf->msgtctx = im->im_tctx;
1143 mf->classid = I2O_CLASS_ANY;
1144 mf->changeindicator = chgind;
1145
1146 #ifdef I2ODEBUG
1147 printf("iop_lct_get0: reading LCT");
1148 if (chgind != 0)
1149 printf(" (async)");
1150 printf("\n");
1151 #endif
1152
1153 iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1154 rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1155 iop_msg_unmap(sc, im);
1156 iop_msg_free(sc, im);
1157 return (rv);
1158 }
1159
1160 /*
1161 * Read the IOP's logical configuration table.
1162 */
1163 int
1164 iop_lct_get(struct iop_softc *sc)
1165 {
1166 int esize, size, rv;
1167 struct i2o_lct *lct;
1168
1169 esize = le32toh(sc->sc_status.expectedlctsize);
1170 lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1171 if (lct == NULL)
1172 return (ENOMEM);
1173
1174 if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1175 free(lct, M_DEVBUF);
1176 return (rv);
1177 }
1178
1179 size = le16toh(lct->tablesize) << 2;
1180 if (esize != size) {
1181 free(lct, M_DEVBUF);
1182 lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1183 if (lct == NULL)
1184 return (ENOMEM);
1185
1186 if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1187 free(lct, M_DEVBUF);
1188 return (rv);
1189 }
1190 }
1191
1192 /* Swap in the new LCT. */
1193 if (sc->sc_lct != NULL)
1194 free(sc->sc_lct, M_DEVBUF);
1195 sc->sc_lct = lct;
1196 sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1197 sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1198 sizeof(struct i2o_lct_entry);
1199 return (0);
1200 }
1201
1202 /*
1203 * Post a SYS_ENABLE message to the adapter.
1204 */
1205 int
1206 iop_sys_enable(struct iop_softc *sc)
1207 {
1208 struct iop_msg *im;
1209 struct i2o_msg mf;
1210 int rv;
1211
1212 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1213
1214 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1215 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1216 mf.msgictx = IOP_ICTX;
1217 mf.msgtctx = im->im_tctx;
1218
1219 rv = iop_msg_post(sc, im, &mf, 30000);
1220 if (rv == 0) {
1221 if ((im->im_flags & IM_FAIL) != 0)
1222 rv = ENXIO;
1223 else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1224 (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1225 im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1226 rv = 0;
1227 else
1228 rv = EIO;
1229 }
1230
1231 iop_msg_free(sc, im);
1232 return (rv);
1233 }
1234
1235 /*
1236 * Request the specified parameter group from the target. If an initiator
1237 * is specified (a) don't wait for the operation to complete, but instead
1238 * let the initiator's interrupt handler deal with the reply and (b) place a
1239 * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1240 */
1241 int
1242 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1243 int size, struct iop_initiator *ii)
1244 {
1245 struct iop_msg *im;
1246 struct i2o_util_params_op *mf;
1247 int rv;
1248 struct iop_pgop *pgop;
1249 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1250
1251 im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1252 if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1253 iop_msg_free(sc, im);
1254 return (ENOMEM);
1255 }
1256 im->im_dvcontext = pgop;
1257
1258 mf = (struct i2o_util_params_op *)mb;
1259 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1260 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1261 mf->msgictx = IOP_ICTX;
1262 mf->msgtctx = im->im_tctx;
1263 mf->flags = 0;
1264
1265 pgop->olh.count = htole16(1);
1266 pgop->olh.reserved = htole16(0);
1267 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1268 pgop->oat.fieldcount = htole16(0xffff);
1269 pgop->oat.group = htole16(group);
1270
1271 if (ii == NULL)
1272 uvm_lwp_hold(curlwp);
1273
1274 memset(buf, 0, size);
1275 iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1276 iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1277 rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1278
1279 if (ii == NULL)
1280 uvm_lwp_rele(curlwp);
1281
1282 /* Detect errors; let partial transfers to count as success. */
1283 if (ii == NULL && rv == 0) {
1284 if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1285 im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1286 rv = 0;
1287 else
1288 rv = (im->im_reqstatus != 0 ? EIO : 0);
1289
1290 if (rv != 0)
1291 printf("%s: FIELD_GET failed for tid %d group %d\n",
1292 device_xname(&sc->sc_dv), tid, group);
1293 }
1294
1295 if (ii == NULL || rv != 0) {
1296 iop_msg_unmap(sc, im);
1297 iop_msg_free(sc, im);
1298 free(pgop, M_DEVBUF);
1299 }
1300
1301 return (rv);
1302 }
1303
1304 /*
1305 * Set a single field in a scalar parameter group.
1306 */
1307 int
1308 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1309 int size, int field)
1310 {
1311 struct iop_msg *im;
1312 struct i2o_util_params_op *mf;
1313 struct iop_pgop *pgop;
1314 int rv, totsize;
1315 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1316
1317 totsize = sizeof(*pgop) + size;
1318
1319 im = iop_msg_alloc(sc, IM_WAIT);
1320 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1321 iop_msg_free(sc, im);
1322 return (ENOMEM);
1323 }
1324
1325 mf = (struct i2o_util_params_op *)mb;
1326 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1327 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1328 mf->msgictx = IOP_ICTX;
1329 mf->msgtctx = im->im_tctx;
1330 mf->flags = 0;
1331
1332 pgop->olh.count = htole16(1);
1333 pgop->olh.reserved = htole16(0);
1334 pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1335 pgop->oat.fieldcount = htole16(1);
1336 pgop->oat.group = htole16(group);
1337 pgop->oat.fields[0] = htole16(field);
1338 memcpy(pgop + 1, buf, size);
1339
1340 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1341 rv = iop_msg_post(sc, im, mb, 30000);
1342 if (rv != 0)
1343 aprint_error_dev(&sc->sc_dv, "FIELD_SET failed for tid %d group %d\n",
1344 tid, group);
1345
1346 iop_msg_unmap(sc, im);
1347 iop_msg_free(sc, im);
1348 free(pgop, M_DEVBUF);
1349 return (rv);
1350 }
1351
1352 /*
1353 * Delete all rows in a tablular parameter group.
1354 */
1355 int
1356 iop_table_clear(struct iop_softc *sc, int tid, int group)
1357 {
1358 struct iop_msg *im;
1359 struct i2o_util_params_op *mf;
1360 struct iop_pgop pgop;
1361 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1362 int rv;
1363
1364 im = iop_msg_alloc(sc, IM_WAIT);
1365
1366 mf = (struct i2o_util_params_op *)mb;
1367 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1368 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1369 mf->msgictx = IOP_ICTX;
1370 mf->msgtctx = im->im_tctx;
1371 mf->flags = 0;
1372
1373 pgop.olh.count = htole16(1);
1374 pgop.olh.reserved = htole16(0);
1375 pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1376 pgop.oat.fieldcount = htole16(0);
1377 pgop.oat.group = htole16(group);
1378 pgop.oat.fields[0] = htole16(0);
1379
1380 uvm_lwp_hold(curlwp);
1381 iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1382 rv = iop_msg_post(sc, im, mb, 30000);
1383 if (rv != 0)
1384 aprint_error_dev(&sc->sc_dv, "TABLE_CLEAR failed for tid %d group %d\n",
1385 tid, group);
1386
1387 iop_msg_unmap(sc, im);
1388 uvm_lwp_rele(curlwp);
1389 iop_msg_free(sc, im);
1390 return (rv);
1391 }
1392
1393 /*
1394 * Add a single row to a tabular parameter group. The row can have only one
1395 * field.
1396 */
1397 int
1398 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1399 int size, int row)
1400 {
1401 struct iop_msg *im;
1402 struct i2o_util_params_op *mf;
1403 struct iop_pgop *pgop;
1404 int rv, totsize;
1405 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1406
1407 totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1408
1409 im = iop_msg_alloc(sc, IM_WAIT);
1410 if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1411 iop_msg_free(sc, im);
1412 return (ENOMEM);
1413 }
1414
1415 mf = (struct i2o_util_params_op *)mb;
1416 mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1417 mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1418 mf->msgictx = IOP_ICTX;
1419 mf->msgtctx = im->im_tctx;
1420 mf->flags = 0;
1421
1422 pgop->olh.count = htole16(1);
1423 pgop->olh.reserved = htole16(0);
1424 pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1425 pgop->oat.fieldcount = htole16(1);
1426 pgop->oat.group = htole16(group);
1427 pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1428 pgop->oat.fields[1] = htole16(1); /* RowCount */
1429 pgop->oat.fields[2] = htole16(row); /* KeyValue */
1430 memcpy(&pgop->oat.fields[3], buf, size);
1431
1432 iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1433 rv = iop_msg_post(sc, im, mb, 30000);
1434 if (rv != 0)
1435 aprint_error_dev(&sc->sc_dv, "ADD_ROW failed for tid %d group %d row %d\n",
1436 tid, group, row);
1437
1438 iop_msg_unmap(sc, im);
1439 iop_msg_free(sc, im);
1440 free(pgop, M_DEVBUF);
1441 return (rv);
1442 }
1443
1444 /*
1445 * Execute a simple command (no parameters).
1446 */
1447 int
1448 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1449 int async, int timo)
1450 {
1451 struct iop_msg *im;
1452 struct i2o_msg mf;
1453 int rv, fl;
1454
1455 fl = (async != 0 ? IM_WAIT : IM_POLL);
1456 im = iop_msg_alloc(sc, fl);
1457
1458 mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1459 mf.msgfunc = I2O_MSGFUNC(tid, function);
1460 mf.msgictx = ictx;
1461 mf.msgtctx = im->im_tctx;
1462
1463 rv = iop_msg_post(sc, im, &mf, timo);
1464 iop_msg_free(sc, im);
1465 return (rv);
1466 }
1467
1468 /*
1469 * Post the system table to the IOP.
1470 */
1471 static int
1472 iop_systab_set(struct iop_softc *sc)
1473 {
1474 struct i2o_exec_sys_tab_set *mf;
1475 struct iop_msg *im;
1476 bus_space_handle_t bsh;
1477 bus_addr_t boo;
1478 u_int32_t mema[2], ioa[2];
1479 int rv;
1480 u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1481
1482 im = iop_msg_alloc(sc, IM_WAIT);
1483
1484 mf = (struct i2o_exec_sys_tab_set *)mb;
1485 mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1486 mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1487 mf->msgictx = IOP_ICTX;
1488 mf->msgtctx = im->im_tctx;
1489 mf->iopid = (device_unit(&sc->sc_dv) + 2) << 12;
1490 mf->segnumber = 0;
1491
1492 mema[1] = sc->sc_status.desiredprivmemsize;
1493 ioa[1] = sc->sc_status.desiredpriviosize;
1494
1495 if (mema[1] != 0) {
1496 rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1497 le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1498 mema[0] = htole32(boo);
1499 if (rv != 0) {
1500 aprint_error_dev(&sc->sc_dv, "can't alloc priv mem space, err = %d\n", rv);
1501 mema[0] = 0;
1502 mema[1] = 0;
1503 }
1504 }
1505
1506 if (ioa[1] != 0) {
1507 rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1508 le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1509 ioa[0] = htole32(boo);
1510 if (rv != 0) {
1511 aprint_error_dev(&sc->sc_dv, "can't alloc priv i/o space, err = %d\n", rv);
1512 ioa[0] = 0;
1513 ioa[1] = 0;
1514 }
1515 }
1516
1517 uvm_lwp_hold(curlwp);
1518 iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1519 iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1520 iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1521 rv = iop_msg_post(sc, im, mb, 5000);
1522 iop_msg_unmap(sc, im);
1523 iop_msg_free(sc, im);
1524 uvm_lwp_rele(curlwp);
1525 return (rv);
1526 }
1527
1528 /*
1529 * Reset the IOP. Must be called with interrupts disabled.
1530 */
1531 static int
1532 iop_reset(struct iop_softc *sc)
1533 {
1534 u_int32_t mfa, *sw;
1535 struct i2o_exec_iop_reset mf;
1536 int rv;
1537 paddr_t pa;
1538
1539 sw = (u_int32_t *)sc->sc_scr;
1540 pa = sc->sc_scr_seg->ds_addr;
1541
1542 mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1543 mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1544 mf.reserved[0] = 0;
1545 mf.reserved[1] = 0;
1546 mf.reserved[2] = 0;
1547 mf.reserved[3] = 0;
1548 mf.statuslow = (u_int32_t)pa;
1549 mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1550
1551 *sw = htole32(0);
1552 bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1553 BUS_DMASYNC_PREREAD);
1554
1555 if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1556 return (rv);
1557
1558 POLL(2500,
1559 (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1560 BUS_DMASYNC_POSTREAD), *sw != 0));
1561 if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1562 aprint_error_dev(&sc->sc_dv, "reset rejected, status 0x%x\n",
1563 le32toh(*sw));
1564 return (EIO);
1565 }
1566
1567 /*
1568 * IOP is now in the INIT state. Wait no more than 10 seconds for
1569 * the inbound queue to become responsive.
1570 */
1571 POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1572 if (mfa == IOP_MFA_EMPTY) {
1573 aprint_error_dev(&sc->sc_dv, "reset failed\n");
1574 return (EIO);
1575 }
1576
1577 iop_release_mfa(sc, mfa);
1578 return (0);
1579 }
1580
1581 /*
1582 * Register a new initiator. Must be called with the configuration lock
1583 * held.
1584 */
1585 void
1586 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1587 {
1588 static int ictxgen;
1589
1590 /* 0 is reserved (by us) for system messages. */
1591 ii->ii_ictx = ++ictxgen;
1592
1593 /*
1594 * `Utility initiators' don't make it onto the per-IOP initiator list
1595 * (which is used only for configuration), but do get one slot on
1596 * the inbound queue.
1597 */
1598 if ((ii->ii_flags & II_UTILITY) == 0) {
1599 LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1600 sc->sc_nii++;
1601 } else
1602 sc->sc_nuii++;
1603
1604 cv_init(&ii->ii_cv, "iopevt");
1605
1606 mutex_spin_enter(&sc->sc_intrlock);
1607 LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1608 mutex_spin_exit(&sc->sc_intrlock);
1609 }
1610
1611 /*
1612 * Unregister an initiator. Must be called with the configuration lock
1613 * held.
1614 */
1615 void
1616 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1617 {
1618
1619 if ((ii->ii_flags & II_UTILITY) == 0) {
1620 LIST_REMOVE(ii, ii_list);
1621 sc->sc_nii--;
1622 } else
1623 sc->sc_nuii--;
1624
1625 mutex_spin_enter(&sc->sc_intrlock);
1626 LIST_REMOVE(ii, ii_hash);
1627 mutex_spin_exit(&sc->sc_intrlock);
1628
1629 cv_destroy(&ii->ii_cv);
1630 }
1631
1632 /*
1633 * Handle a reply frame from the IOP.
1634 */
1635 static int
1636 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1637 {
1638 struct iop_msg *im;
1639 struct i2o_reply *rb;
1640 struct i2o_fault_notify *fn;
1641 struct iop_initiator *ii;
1642 u_int off, ictx, tctx, status, size;
1643
1644 KASSERT(mutex_owned(&sc->sc_intrlock));
1645
1646 off = (int)(rmfa - sc->sc_rep_phys);
1647 rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1648
1649 /* Perform reply queue DMA synchronisation. */
1650 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1651 sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1652 if (--sc->sc_curib != 0)
1653 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1654 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1655
1656 #ifdef I2ODEBUG
1657 if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1658 panic("iop_handle_reply: 64-bit reply");
1659 #endif
1660 /*
1661 * Find the initiator.
1662 */
1663 ictx = le32toh(rb->msgictx);
1664 if (ictx == IOP_ICTX)
1665 ii = NULL;
1666 else {
1667 ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1668 for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1669 if (ii->ii_ictx == ictx)
1670 break;
1671 if (ii == NULL) {
1672 #ifdef I2ODEBUG
1673 iop_reply_print(sc, rb);
1674 #endif
1675 aprint_error_dev(&sc->sc_dv, "WARNING: bad ictx returned (%x)\n",
1676 ictx);
1677 return (-1);
1678 }
1679 }
1680
1681 /*
1682 * If we received a transport failure notice, we've got to dig the
1683 * transaction context (if any) out of the original message frame,
1684 * and then release the original MFA back to the inbound FIFO.
1685 */
1686 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1687 status = I2O_STATUS_SUCCESS;
1688
1689 fn = (struct i2o_fault_notify *)rb;
1690 tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1691 iop_release_mfa(sc, fn->lowmfa);
1692 iop_tfn_print(sc, fn);
1693 } else {
1694 status = rb->reqstatus;
1695 tctx = le32toh(rb->msgtctx);
1696 }
1697
1698 if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1699 /*
1700 * This initiator tracks state using message wrappers.
1701 *
1702 * Find the originating message wrapper, and if requested
1703 * notify the initiator.
1704 */
1705 im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1706 if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1707 (im->im_flags & IM_ALLOCED) == 0 ||
1708 tctx != im->im_tctx) {
1709 aprint_error_dev(&sc->sc_dv, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1710 if (im != NULL)
1711 aprint_error_dev(&sc->sc_dv, "flags=0x%08x tctx=0x%08x\n",
1712 im->im_flags, im->im_tctx);
1713 #ifdef I2ODEBUG
1714 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1715 iop_reply_print(sc, rb);
1716 #endif
1717 return (-1);
1718 }
1719
1720 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1721 im->im_flags |= IM_FAIL;
1722
1723 #ifdef I2ODEBUG
1724 if ((im->im_flags & IM_REPLIED) != 0)
1725 panic("%s: dup reply", device_xname(&sc->sc_dv));
1726 #endif
1727 im->im_flags |= IM_REPLIED;
1728
1729 #ifdef I2ODEBUG
1730 if (status != I2O_STATUS_SUCCESS)
1731 iop_reply_print(sc, rb);
1732 #endif
1733 im->im_reqstatus = status;
1734 im->im_detstatus = le16toh(rb->detail);
1735
1736 /* Copy the reply frame, if requested. */
1737 if (im->im_rb != NULL) {
1738 size = (le32toh(rb->msgflags) >> 14) & ~3;
1739 #ifdef I2ODEBUG
1740 if (size > sc->sc_framesize)
1741 panic("iop_handle_reply: reply too large");
1742 #endif
1743 memcpy(im->im_rb, rb, size);
1744 }
1745
1746 /* Notify the initiator. */
1747 if ((im->im_flags & IM_WAIT) != 0)
1748 cv_broadcast(&im->im_cv);
1749 else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1750 if (ii != NULL) {
1751 mutex_spin_exit(&sc->sc_intrlock);
1752 (*ii->ii_intr)(ii->ii_dv, im, rb);
1753 mutex_spin_enter(&sc->sc_intrlock);
1754 }
1755 }
1756 } else {
1757 /*
1758 * This initiator discards message wrappers.
1759 *
1760 * Simply pass the reply frame to the initiator.
1761 */
1762 if (ii != NULL) {
1763 mutex_spin_exit(&sc->sc_intrlock);
1764 (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1765 mutex_spin_enter(&sc->sc_intrlock);
1766 }
1767 }
1768
1769 return (status);
1770 }
1771
1772 /*
1773 * Handle an interrupt from the IOP.
1774 */
1775 int
1776 iop_intr(void *arg)
1777 {
1778 struct iop_softc *sc;
1779 u_int32_t rmfa;
1780
1781 sc = arg;
1782
1783 mutex_spin_enter(&sc->sc_intrlock);
1784
1785 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1786 mutex_spin_exit(&sc->sc_intrlock);
1787 return (0);
1788 }
1789
1790 for (;;) {
1791 /* Double read to account for IOP bug. */
1792 if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1793 rmfa = iop_inl(sc, IOP_REG_OFIFO);
1794 if (rmfa == IOP_MFA_EMPTY)
1795 break;
1796 }
1797 iop_handle_reply(sc, rmfa);
1798 iop_outl(sc, IOP_REG_OFIFO, rmfa);
1799 }
1800
1801 mutex_spin_exit(&sc->sc_intrlock);
1802 return (1);
1803 }
1804
1805 /*
1806 * Handle an event signalled by the executive.
1807 */
1808 static void
1809 iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1810 {
1811 struct i2o_util_event_register_reply *rb;
1812 u_int event;
1813
1814 rb = reply;
1815
1816 if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1817 return;
1818
1819 event = le32toh(rb->event);
1820 printf("%s: event 0x%08x received\n", device_xname(dv), event);
1821 }
1822
1823 /*
1824 * Allocate a message wrapper.
1825 */
1826 struct iop_msg *
1827 iop_msg_alloc(struct iop_softc *sc, int flags)
1828 {
1829 struct iop_msg *im;
1830 static u_int tctxgen;
1831 int i;
1832
1833 #ifdef I2ODEBUG
1834 if ((flags & IM_SYSMASK) != 0)
1835 panic("iop_msg_alloc: system flags specified");
1836 #endif
1837
1838 mutex_spin_enter(&sc->sc_intrlock);
1839 im = SLIST_FIRST(&sc->sc_im_freelist);
1840 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1841 if (im == NULL)
1842 panic("iop_msg_alloc: no free wrappers");
1843 #endif
1844 SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1845 mutex_spin_exit(&sc->sc_intrlock);
1846
1847 im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1848 tctxgen += (1 << IOP_TCTX_SHIFT);
1849 im->im_flags = flags | IM_ALLOCED;
1850 im->im_rb = NULL;
1851 i = 0;
1852 do {
1853 im->im_xfer[i++].ix_size = 0;
1854 } while (i < IOP_MAX_MSG_XFERS);
1855
1856 return (im);
1857 }
1858
1859 /*
1860 * Free a message wrapper.
1861 */
1862 void
1863 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1864 {
1865
1866 #ifdef I2ODEBUG
1867 if ((im->im_flags & IM_ALLOCED) == 0)
1868 panic("iop_msg_free: wrapper not allocated");
1869 #endif
1870
1871 im->im_flags = 0;
1872 mutex_spin_enter(&sc->sc_intrlock);
1873 SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1874 mutex_spin_exit(&sc->sc_intrlock);
1875 }
1876
1877 /*
1878 * Map a data transfer. Write a scatter-gather list into the message frame.
1879 */
1880 int
1881 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1882 void *xferaddr, int xfersize, int out, struct proc *up)
1883 {
1884 bus_dmamap_t dm;
1885 bus_dma_segment_t *ds;
1886 struct iop_xfer *ix;
1887 u_int rv, i, nsegs, flg, off, xn;
1888 u_int32_t *p;
1889
1890 for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1891 if (ix->ix_size == 0)
1892 break;
1893
1894 #ifdef I2ODEBUG
1895 if (xfersize == 0)
1896 panic("iop_msg_map: null transfer");
1897 if (xfersize > IOP_MAX_XFER)
1898 panic("iop_msg_map: transfer too large");
1899 if (xn == IOP_MAX_MSG_XFERS)
1900 panic("iop_msg_map: too many xfers");
1901 #endif
1902
1903 /*
1904 * Only the first DMA map is static.
1905 */
1906 if (xn != 0) {
1907 rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1908 IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1909 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1910 if (rv != 0)
1911 return (rv);
1912 }
1913
1914 dm = ix->ix_map;
1915 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1916 (up == NULL ? BUS_DMA_NOWAIT : 0));
1917 if (rv != 0)
1918 goto bad;
1919
1920 /*
1921 * How many SIMPLE SG elements can we fit in this message?
1922 */
1923 off = mb[0] >> 16;
1924 p = mb + off;
1925 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1926
1927 if (dm->dm_nsegs > nsegs) {
1928 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1929 rv = EFBIG;
1930 DPRINTF(("iop_msg_map: too many segs\n"));
1931 goto bad;
1932 }
1933
1934 nsegs = dm->dm_nsegs;
1935 xfersize = 0;
1936
1937 /*
1938 * Write out the SG list.
1939 */
1940 if (out)
1941 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1942 else
1943 flg = I2O_SGL_SIMPLE;
1944
1945 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1946 p[0] = (u_int32_t)ds->ds_len | flg;
1947 p[1] = (u_int32_t)ds->ds_addr;
1948 xfersize += ds->ds_len;
1949 }
1950
1951 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1952 p[1] = (u_int32_t)ds->ds_addr;
1953 xfersize += ds->ds_len;
1954
1955 /* Fix up the transfer record, and sync the map. */
1956 ix->ix_flags = (out ? IX_OUT : IX_IN);
1957 ix->ix_size = xfersize;
1958 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1959 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1960
1961 /*
1962 * If this is the first xfer we've mapped for this message, adjust
1963 * the SGL offset field in the message header.
1964 */
1965 if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1966 mb[0] += (mb[0] >> 12) & 0xf0;
1967 im->im_flags |= IM_SGLOFFADJ;
1968 }
1969 mb[0] += (nsegs << 17);
1970 return (0);
1971
1972 bad:
1973 if (xn != 0)
1974 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1975 return (rv);
1976 }
1977
1978 /*
1979 * Map a block I/O data transfer (different in that there's only one per
1980 * message maximum, and PAGE addressing may be used). Write a scatter
1981 * gather list into the message frame.
1982 */
1983 int
1984 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1985 void *xferaddr, int xfersize, int out)
1986 {
1987 bus_dma_segment_t *ds;
1988 bus_dmamap_t dm;
1989 struct iop_xfer *ix;
1990 u_int rv, i, nsegs, off, slen, tlen, flg;
1991 paddr_t saddr, eaddr;
1992 u_int32_t *p;
1993
1994 #ifdef I2ODEBUG
1995 if (xfersize == 0)
1996 panic("iop_msg_map_bio: null transfer");
1997 if (xfersize > IOP_MAX_XFER)
1998 panic("iop_msg_map_bio: transfer too large");
1999 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2000 panic("iop_msg_map_bio: SGLOFFADJ");
2001 #endif
2002
2003 ix = im->im_xfer;
2004 dm = ix->ix_map;
2005 rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2006 BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2007 if (rv != 0)
2008 return (rv);
2009
2010 off = mb[0] >> 16;
2011 nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2012
2013 /*
2014 * If the transfer is highly fragmented and won't fit using SIMPLE
2015 * elements, use PAGE_LIST elements instead. SIMPLE elements are
2016 * potentially more efficient, both for us and the IOP.
2017 */
2018 if (dm->dm_nsegs > nsegs) {
2019 nsegs = 1;
2020 p = mb + off + 1;
2021
2022 /* XXX This should be done with a bus_space flag. */
2023 for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2024 slen = ds->ds_len;
2025 saddr = ds->ds_addr;
2026
2027 while (slen > 0) {
2028 eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2029 tlen = min(eaddr - saddr, slen);
2030 slen -= tlen;
2031 *p++ = le32toh(saddr);
2032 saddr = eaddr;
2033 nsegs++;
2034 }
2035 }
2036
2037 mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2038 I2O_SGL_END;
2039 if (out)
2040 mb[off] |= I2O_SGL_DATA_OUT;
2041 } else {
2042 p = mb + off;
2043 nsegs = dm->dm_nsegs;
2044
2045 if (out)
2046 flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2047 else
2048 flg = I2O_SGL_SIMPLE;
2049
2050 for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2051 p[0] = (u_int32_t)ds->ds_len | flg;
2052 p[1] = (u_int32_t)ds->ds_addr;
2053 }
2054
2055 p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2056 I2O_SGL_END;
2057 p[1] = (u_int32_t)ds->ds_addr;
2058 nsegs <<= 1;
2059 }
2060
2061 /* Fix up the transfer record, and sync the map. */
2062 ix->ix_flags = (out ? IX_OUT : IX_IN);
2063 ix->ix_size = xfersize;
2064 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2065 out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2066
2067 /*
2068 * Adjust the SGL offset and total message size fields. We don't
2069 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2070 */
2071 mb[0] += ((off << 4) + (nsegs << 16));
2072 return (0);
2073 }
2074
2075 /*
2076 * Unmap all data transfers associated with a message wrapper.
2077 */
2078 void
2079 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2080 {
2081 struct iop_xfer *ix;
2082 int i;
2083
2084 #ifdef I2ODEBUG
2085 if (im->im_xfer[0].ix_size == 0)
2086 panic("iop_msg_unmap: no transfers mapped");
2087 #endif
2088
2089 for (ix = im->im_xfer, i = 0;;) {
2090 bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2091 ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2092 BUS_DMASYNC_POSTREAD);
2093 bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2094
2095 /* Only the first DMA map is static. */
2096 if (i != 0)
2097 bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2098 if ((++ix)->ix_size == 0)
2099 break;
2100 if (++i >= IOP_MAX_MSG_XFERS)
2101 break;
2102 }
2103 }
2104
2105 /*
2106 * Post a message frame to the IOP's inbound queue.
2107 */
2108 int
2109 iop_post(struct iop_softc *sc, u_int32_t *mb)
2110 {
2111 u_int32_t mfa;
2112
2113 #ifdef I2ODEBUG
2114 if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2115 panic("iop_post: frame too large");
2116 #endif
2117
2118 mutex_spin_enter(&sc->sc_intrlock);
2119
2120 /* Allocate a slot with the IOP. */
2121 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2122 if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2123 mutex_spin_exit(&sc->sc_intrlock);
2124 aprint_error_dev(&sc->sc_dv, "mfa not forthcoming\n");
2125 return (EAGAIN);
2126 }
2127
2128 /* Perform reply buffer DMA synchronisation. */
2129 if (sc->sc_curib++ == 0)
2130 bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2131 sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2132
2133 /* Copy out the message frame. */
2134 bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2135 mb[0] >> 16);
2136 bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2137 (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2138
2139 /* Post the MFA back to the IOP. */
2140 iop_outl(sc, IOP_REG_IFIFO, mfa);
2141
2142 mutex_spin_exit(&sc->sc_intrlock);
2143 return (0);
2144 }
2145
2146 /*
2147 * Post a message to the IOP and deal with completion.
2148 */
2149 int
2150 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2151 {
2152 u_int32_t *mb;
2153 int rv;
2154
2155 mb = xmb;
2156
2157 /* Terminate the scatter/gather list chain. */
2158 if ((im->im_flags & IM_SGLOFFADJ) != 0)
2159 mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2160
2161 if ((rv = iop_post(sc, mb)) != 0)
2162 return (rv);
2163
2164 if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2165 if ((im->im_flags & IM_POLL) != 0)
2166 iop_msg_poll(sc, im, timo);
2167 else
2168 iop_msg_wait(sc, im, timo);
2169
2170 mutex_spin_enter(&sc->sc_intrlock);
2171 if ((im->im_flags & IM_REPLIED) != 0) {
2172 if ((im->im_flags & IM_NOSTATUS) != 0)
2173 rv = 0;
2174 else if ((im->im_flags & IM_FAIL) != 0)
2175 rv = ENXIO;
2176 else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2177 rv = EIO;
2178 else
2179 rv = 0;
2180 } else
2181 rv = EBUSY;
2182 mutex_spin_exit(&sc->sc_intrlock);
2183 } else
2184 rv = 0;
2185
2186 return (rv);
2187 }
2188
2189 /*
2190 * Spin until the specified message is replied to.
2191 */
2192 static void
2193 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2194 {
2195 u_int32_t rmfa;
2196
2197 mutex_spin_enter(&sc->sc_intrlock);
2198
2199 for (timo *= 10; timo != 0; timo--) {
2200 if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2201 /* Double read to account for IOP bug. */
2202 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2203 if (rmfa == IOP_MFA_EMPTY)
2204 rmfa = iop_inl(sc, IOP_REG_OFIFO);
2205 if (rmfa != IOP_MFA_EMPTY) {
2206 iop_handle_reply(sc, rmfa);
2207
2208 /*
2209 * Return the reply frame to the IOP's
2210 * outbound FIFO.
2211 */
2212 iop_outl(sc, IOP_REG_OFIFO, rmfa);
2213 }
2214 }
2215 if ((im->im_flags & IM_REPLIED) != 0)
2216 break;
2217 mutex_spin_exit(&sc->sc_intrlock);
2218 DELAY(100);
2219 mutex_spin_enter(&sc->sc_intrlock);
2220 }
2221
2222 if (timo == 0) {
2223 #ifdef I2ODEBUG
2224 printf("%s: poll - no reply\n", device_xname(&sc->sc_dv));
2225 if (iop_status_get(sc, 1) != 0)
2226 printf("iop_msg_poll: unable to retrieve status\n");
2227 else
2228 printf("iop_msg_poll: IOP state = %d\n",
2229 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2230 #endif
2231 }
2232
2233 mutex_spin_exit(&sc->sc_intrlock);
2234 }
2235
2236 /*
2237 * Sleep until the specified message is replied to.
2238 */
2239 static void
2240 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2241 {
2242 int rv;
2243
2244 mutex_spin_enter(&sc->sc_intrlock);
2245 if ((im->im_flags & IM_REPLIED) != 0) {
2246 mutex_spin_exit(&sc->sc_intrlock);
2247 return;
2248 }
2249 rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2250 mutex_spin_exit(&sc->sc_intrlock);
2251
2252 #ifdef I2ODEBUG
2253 if (rv != 0) {
2254 printf("iop_msg_wait: tsleep() == %d\n", rv);
2255 if (iop_status_get(sc, 0) != 0)
2256 printf("iop_msg_wait: unable to retrieve status\n");
2257 else
2258 printf("iop_msg_wait: IOP state = %d\n",
2259 (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2260 }
2261 #endif
2262 }
2263
2264 /*
2265 * Release an unused message frame back to the IOP's inbound fifo.
2266 */
2267 static void
2268 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2269 {
2270
2271 /* Use the frame to issue a no-op. */
2272 iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2273 iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2274 iop_outl_msg(sc, mfa + 8, 0);
2275 iop_outl_msg(sc, mfa + 12, 0);
2276
2277 iop_outl(sc, IOP_REG_IFIFO, mfa);
2278 }
2279
2280 #ifdef I2ODEBUG
2281 /*
2282 * Dump a reply frame header.
2283 */
2284 static void
2285 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2286 {
2287 u_int function, detail;
2288 const char *statusstr;
2289
2290 function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2291 detail = le16toh(rb->detail);
2292
2293 printf("%s: reply:\n", device_xname(&sc->sc_dv));
2294
2295 if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2296 statusstr = iop_status[rb->reqstatus];
2297 else
2298 statusstr = "undefined error code";
2299
2300 printf("%s: function=0x%02x status=0x%02x (%s)\n",
2301 device_xname(&sc->sc_dv), function, rb->reqstatus, statusstr);
2302 printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2303 device_xname(&sc->sc_dv), detail, le32toh(rb->msgictx),
2304 le32toh(rb->msgtctx));
2305 printf("%s: tidi=%d tidt=%d flags=0x%02x\n", device_xname(&sc->sc_dv),
2306 (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2307 (le32toh(rb->msgflags) >> 8) & 0xff);
2308 }
2309 #endif
2310
2311 /*
2312 * Dump a transport failure reply.
2313 */
2314 static void
2315 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2316 {
2317
2318 printf("%s: WARNING: transport failure:\n", device_xname(&sc->sc_dv));
2319
2320 printf("%s: ictx=0x%08x tctx=0x%08x\n", device_xname(&sc->sc_dv),
2321 le32toh(fn->msgictx), le32toh(fn->msgtctx));
2322 printf("%s: failurecode=0x%02x severity=0x%02x\n",
2323 device_xname(&sc->sc_dv), fn->failurecode, fn->severity);
2324 printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2325 device_xname(&sc->sc_dv), fn->highestver, fn->lowestver);
2326 }
2327
2328 /*
2329 * Translate an I2O ASCII field into a C string.
2330 */
2331 void
2332 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2333 {
2334 int hc, lc, i, nit;
2335
2336 dlen--;
2337 lc = 0;
2338 hc = 0;
2339 i = 0;
2340
2341 /*
2342 * DPT use NUL as a space, whereas AMI use it as a terminator. The
2343 * spec has nothing to say about it. Since AMI fields are usually
2344 * filled with junk after the terminator, ...
2345 */
2346 nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2347
2348 while (slen-- != 0 && dlen-- != 0) {
2349 if (nit && *src == '\0')
2350 break;
2351 else if (*src <= 0x20 || *src >= 0x7f) {
2352 if (hc)
2353 dst[i++] = ' ';
2354 } else {
2355 hc = 1;
2356 dst[i++] = *src;
2357 lc = i;
2358 }
2359 src++;
2360 }
2361
2362 dst[lc] = '\0';
2363 }
2364
2365 /*
2366 * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2367 */
2368 int
2369 iop_print_ident(struct iop_softc *sc, int tid)
2370 {
2371 struct {
2372 struct i2o_param_op_results pr;
2373 struct i2o_param_read_results prr;
2374 struct i2o_param_device_identity di;
2375 } __attribute__ ((__packed__)) p;
2376 char buf[32];
2377 int rv;
2378
2379 rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2380 sizeof(p), NULL);
2381 if (rv != 0)
2382 return (rv);
2383
2384 iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2385 sizeof(buf));
2386 printf(" <%s, ", buf);
2387 iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2388 sizeof(buf));
2389 printf("%s, ", buf);
2390 iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2391 printf("%s>", buf);
2392
2393 return (0);
2394 }
2395
2396 /*
2397 * Claim or unclaim the specified TID.
2398 */
2399 int
2400 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2401 int flags)
2402 {
2403 struct iop_msg *im;
2404 struct i2o_util_claim mf;
2405 int rv, func;
2406
2407 func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2408 im = iop_msg_alloc(sc, IM_WAIT);
2409
2410 /* We can use the same structure, as they're identical. */
2411 mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2412 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2413 mf.msgictx = ii->ii_ictx;
2414 mf.msgtctx = im->im_tctx;
2415 mf.flags = flags;
2416
2417 rv = iop_msg_post(sc, im, &mf, 5000);
2418 iop_msg_free(sc, im);
2419 return (rv);
2420 }
2421
2422 /*
2423 * Perform an abort.
2424 */
2425 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2426 int tctxabort, int flags)
2427 {
2428 struct iop_msg *im;
2429 struct i2o_util_abort mf;
2430 int rv;
2431
2432 im = iop_msg_alloc(sc, IM_WAIT);
2433
2434 mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2435 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2436 mf.msgictx = ii->ii_ictx;
2437 mf.msgtctx = im->im_tctx;
2438 mf.flags = (func << 24) | flags;
2439 mf.tctxabort = tctxabort;
2440
2441 rv = iop_msg_post(sc, im, &mf, 5000);
2442 iop_msg_free(sc, im);
2443 return (rv);
2444 }
2445
2446 /*
2447 * Enable or disable reception of events for the specified device.
2448 */
2449 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2450 {
2451 struct i2o_util_event_register mf;
2452
2453 mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2454 mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2455 mf.msgictx = ii->ii_ictx;
2456 mf.msgtctx = 0;
2457 mf.eventmask = mask;
2458
2459 /* This message is replied to only when events are signalled. */
2460 return (iop_post(sc, (u_int32_t *)&mf));
2461 }
2462
2463 int
2464 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2465 {
2466 struct iop_softc *sc;
2467
2468 if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2469 return (ENXIO);
2470 if ((sc->sc_flags & IOP_ONLINE) == 0)
2471 return (ENXIO);
2472 if ((sc->sc_flags & IOP_OPEN) != 0)
2473 return (EBUSY);
2474 sc->sc_flags |= IOP_OPEN;
2475
2476 return (0);
2477 }
2478
2479 int
2480 iopclose(dev_t dev, int flag, int mode,
2481 struct lwp *l)
2482 {
2483 struct iop_softc *sc;
2484
2485 sc = device_lookup(&iop_cd, minor(dev));
2486 sc->sc_flags &= ~IOP_OPEN;
2487
2488 return (0);
2489 }
2490
2491 int
2492 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2493 {
2494 struct iop_softc *sc;
2495 struct iovec *iov;
2496 int rv, i;
2497
2498 sc = device_lookup(&iop_cd, minor(dev));
2499 rv = 0;
2500
2501 switch (cmd) {
2502 case IOPIOCPT:
2503 rv = kauth_authorize_device_passthru(l->l_cred, dev,
2504 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2505 if (rv)
2506 return (rv);
2507
2508 return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2509
2510 case IOPIOCGSTATUS:
2511 iov = (struct iovec *)data;
2512 i = sizeof(struct i2o_status);
2513 if (i > iov->iov_len)
2514 i = iov->iov_len;
2515 else
2516 iov->iov_len = i;
2517 if ((rv = iop_status_get(sc, 0)) == 0)
2518 rv = copyout(&sc->sc_status, iov->iov_base, i);
2519 return (rv);
2520
2521 case IOPIOCGLCT:
2522 case IOPIOCGTIDMAP:
2523 case IOPIOCRECONFIG:
2524 break;
2525
2526 default:
2527 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2528 printf("%s: unknown ioctl %lx\n", device_xname(&sc->sc_dv), cmd);
2529 #endif
2530 return (ENOTTY);
2531 }
2532
2533 mutex_enter(&sc->sc_conflock);
2534
2535 switch (cmd) {
2536 case IOPIOCGLCT:
2537 iov = (struct iovec *)data;
2538 i = le16toh(sc->sc_lct->tablesize) << 2;
2539 if (i > iov->iov_len)
2540 i = iov->iov_len;
2541 else
2542 iov->iov_len = i;
2543 rv = copyout(sc->sc_lct, iov->iov_base, i);
2544 break;
2545
2546 case IOPIOCRECONFIG:
2547 rv = iop_reconfigure(sc, 0);
2548 break;
2549
2550 case IOPIOCGTIDMAP:
2551 iov = (struct iovec *)data;
2552 i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2553 if (i > iov->iov_len)
2554 i = iov->iov_len;
2555 else
2556 iov->iov_len = i;
2557 rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2558 break;
2559 }
2560
2561 mutex_exit(&sc->sc_conflock);
2562 return (rv);
2563 }
2564
2565 static int
2566 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2567 {
2568 struct iop_msg *im;
2569 struct i2o_msg *mf;
2570 struct ioppt_buf *ptb;
2571 int rv, i, mapped;
2572
2573 mf = NULL;
2574 im = NULL;
2575 mapped = 1;
2576
2577 if (pt->pt_msglen > sc->sc_framesize ||
2578 pt->pt_msglen < sizeof(struct i2o_msg) ||
2579 pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2580 pt->pt_nbufs < 0 ||
2581 #if 0
2582 pt->pt_replylen < 0 ||
2583 #endif
2584 pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2585 return (EINVAL);
2586
2587 for (i = 0; i < pt->pt_nbufs; i++)
2588 if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2589 rv = ENOMEM;
2590 goto bad;
2591 }
2592
2593 mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2594 if (mf == NULL)
2595 return (ENOMEM);
2596
2597 if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2598 goto bad;
2599
2600 im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2601 im->im_rb = (struct i2o_reply *)mf;
2602 mf->msgictx = IOP_ICTX;
2603 mf->msgtctx = im->im_tctx;
2604
2605 for (i = 0; i < pt->pt_nbufs; i++) {
2606 ptb = &pt->pt_bufs[i];
2607 rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2608 ptb->ptb_datalen, ptb->ptb_out != 0, p);
2609 if (rv != 0)
2610 goto bad;
2611 mapped = 1;
2612 }
2613
2614 if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2615 goto bad;
2616
2617 i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2618 if (i > sc->sc_framesize)
2619 i = sc->sc_framesize;
2620 if (i > pt->pt_replylen)
2621 i = pt->pt_replylen;
2622 rv = copyout(im->im_rb, pt->pt_reply, i);
2623
2624 bad:
2625 if (mapped != 0)
2626 iop_msg_unmap(sc, im);
2627 if (im != NULL)
2628 iop_msg_free(sc, im);
2629 if (mf != NULL)
2630 free(mf, M_DEVBUF);
2631 return (rv);
2632 }
2633