iop.c revision 1.27 1 1.27 ad /* $NetBSD: iop.c,v 1.27 2002/10/22 13:42:33 ad Exp $ */
2 1.1 ad
3 1.1 ad /*-
4 1.11 ad * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5 1.1 ad * All rights reserved.
6 1.1 ad *
7 1.1 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 ad * by Andrew Doran.
9 1.1 ad *
10 1.1 ad * Redistribution and use in source and binary forms, with or without
11 1.1 ad * modification, are permitted provided that the following conditions
12 1.1 ad * are met:
13 1.1 ad * 1. Redistributions of source code must retain the above copyright
14 1.1 ad * notice, this list of conditions and the following disclaimer.
15 1.1 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 ad * notice, this list of conditions and the following disclaimer in the
17 1.1 ad * documentation and/or other materials provided with the distribution.
18 1.1 ad * 3. All advertising materials mentioning features or use of this software
19 1.1 ad * must display the following acknowledgement:
20 1.1 ad * This product includes software developed by the NetBSD
21 1.1 ad * Foundation, Inc. and its contributors.
22 1.1 ad * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 ad * contributors may be used to endorse or promote products derived
24 1.1 ad * from this software without specific prior written permission.
25 1.1 ad *
26 1.1 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 ad * POSSIBILITY OF SUCH DAMAGE.
37 1.1 ad */
38 1.1 ad
39 1.1 ad /*
40 1.1 ad * Support for I2O IOPs (intelligent I/O processors).
41 1.1 ad */
42 1.20 lukem
43 1.20 lukem #include <sys/cdefs.h>
44 1.27 ad __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.27 2002/10/22 13:42:33 ad Exp $");
45 1.1 ad
46 1.1 ad #include "opt_i2o.h"
47 1.5 ad #include "iop.h"
48 1.1 ad
49 1.1 ad #include <sys/param.h>
50 1.1 ad #include <sys/systm.h>
51 1.1 ad #include <sys/kernel.h>
52 1.1 ad #include <sys/device.h>
53 1.1 ad #include <sys/queue.h>
54 1.1 ad #include <sys/proc.h>
55 1.1 ad #include <sys/malloc.h>
56 1.1 ad #include <sys/ioctl.h>
57 1.1 ad #include <sys/endian.h>
58 1.5 ad #include <sys/conf.h>
59 1.5 ad #include <sys/kthread.h>
60 1.1 ad
61 1.4 thorpej #include <uvm/uvm_extern.h>
62 1.4 thorpej
63 1.1 ad #include <machine/bus.h>
64 1.1 ad
65 1.1 ad #include <dev/i2o/i2o.h>
66 1.11 ad #include <dev/i2o/iopio.h>
67 1.1 ad #include <dev/i2o/iopreg.h>
68 1.1 ad #include <dev/i2o/iopvar.h>
69 1.1 ad
70 1.1 ad #define POLL(ms, cond) \
71 1.1 ad do { \
72 1.1 ad int i; \
73 1.1 ad for (i = (ms) * 10; i; i--) { \
74 1.1 ad if (cond) \
75 1.1 ad break; \
76 1.1 ad DELAY(100); \
77 1.1 ad } \
78 1.1 ad } while (/* CONSTCOND */0);
79 1.1 ad
80 1.1 ad #ifdef I2ODEBUG
81 1.1 ad #define DPRINTF(x) printf x
82 1.1 ad #else
83 1.1 ad #define DPRINTF(x)
84 1.1 ad #endif
85 1.1 ad
86 1.1 ad #ifdef I2OVERBOSE
87 1.5 ad #define IFVERBOSE(x) x
88 1.11 ad #define COMMENT(x) NULL
89 1.1 ad #else
90 1.1 ad #define IFVERBOSE(x)
91 1.11 ad #define COMMENT(x)
92 1.1 ad #endif
93 1.1 ad
94 1.5 ad #define IOP_ICTXHASH_NBUCKETS 16
95 1.5 ad #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96 1.11 ad
97 1.11 ad #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98 1.11 ad
99 1.11 ad #define IOP_TCTX_SHIFT 12
100 1.11 ad #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101 1.5 ad
102 1.5 ad static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 1.5 ad static u_long iop_ictxhash;
104 1.1 ad static void *iop_sdh;
105 1.5 ad static struct i2o_systab *iop_systab;
106 1.5 ad static int iop_systab_size;
107 1.1 ad
108 1.1 ad extern struct cfdriver iop_cd;
109 1.1 ad
110 1.25 gehenna dev_type_open(iopopen);
111 1.25 gehenna dev_type_close(iopclose);
112 1.25 gehenna dev_type_ioctl(iopioctl);
113 1.25 gehenna
114 1.25 gehenna const struct cdevsw iop_cdevsw = {
115 1.25 gehenna iopopen, iopclose, noread, nowrite, iopioctl,
116 1.25 gehenna nostop, notty, nopoll, nommap,
117 1.25 gehenna };
118 1.25 gehenna
119 1.5 ad #define IC_CONFIGURE 0x01
120 1.11 ad #define IC_PRIORITY 0x02
121 1.1 ad
122 1.1 ad struct iop_class {
123 1.5 ad u_short ic_class;
124 1.5 ad u_short ic_flags;
125 1.11 ad #ifdef I2OVERBOSE
126 1.1 ad const char *ic_caption;
127 1.11 ad #endif
128 1.1 ad } static const iop_class[] = {
129 1.1 ad {
130 1.1 ad I2O_CLASS_EXECUTIVE,
131 1.1 ad 0,
132 1.5 ad COMMENT("executive")
133 1.1 ad },
134 1.1 ad {
135 1.1 ad I2O_CLASS_DDM,
136 1.1 ad 0,
137 1.5 ad COMMENT("device driver module")
138 1.1 ad },
139 1.1 ad {
140 1.1 ad I2O_CLASS_RANDOM_BLOCK_STORAGE,
141 1.11 ad IC_CONFIGURE | IC_PRIORITY,
142 1.1 ad IFVERBOSE("random block storage")
143 1.1 ad },
144 1.1 ad {
145 1.1 ad I2O_CLASS_SEQUENTIAL_STORAGE,
146 1.11 ad IC_CONFIGURE | IC_PRIORITY,
147 1.1 ad IFVERBOSE("sequential storage")
148 1.1 ad },
149 1.1 ad {
150 1.1 ad I2O_CLASS_LAN,
151 1.11 ad IC_CONFIGURE | IC_PRIORITY,
152 1.1 ad IFVERBOSE("LAN port")
153 1.1 ad },
154 1.1 ad {
155 1.1 ad I2O_CLASS_WAN,
156 1.11 ad IC_CONFIGURE | IC_PRIORITY,
157 1.1 ad IFVERBOSE("WAN port")
158 1.1 ad },
159 1.1 ad {
160 1.1 ad I2O_CLASS_FIBRE_CHANNEL_PORT,
161 1.1 ad IC_CONFIGURE,
162 1.1 ad IFVERBOSE("fibrechannel port")
163 1.1 ad },
164 1.1 ad {
165 1.1 ad I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
166 1.1 ad 0,
167 1.5 ad COMMENT("fibrechannel peripheral")
168 1.1 ad },
169 1.1 ad {
170 1.1 ad I2O_CLASS_SCSI_PERIPHERAL,
171 1.1 ad 0,
172 1.5 ad COMMENT("SCSI peripheral")
173 1.1 ad },
174 1.1 ad {
175 1.1 ad I2O_CLASS_ATE_PORT,
176 1.1 ad IC_CONFIGURE,
177 1.1 ad IFVERBOSE("ATE port")
178 1.1 ad },
179 1.1 ad {
180 1.1 ad I2O_CLASS_ATE_PERIPHERAL,
181 1.1 ad 0,
182 1.5 ad COMMENT("ATE peripheral")
183 1.1 ad },
184 1.1 ad {
185 1.1 ad I2O_CLASS_FLOPPY_CONTROLLER,
186 1.1 ad IC_CONFIGURE,
187 1.1 ad IFVERBOSE("floppy controller")
188 1.1 ad },
189 1.1 ad {
190 1.1 ad I2O_CLASS_FLOPPY_DEVICE,
191 1.1 ad 0,
192 1.5 ad COMMENT("floppy device")
193 1.1 ad },
194 1.1 ad {
195 1.1 ad I2O_CLASS_BUS_ADAPTER_PORT,
196 1.1 ad IC_CONFIGURE,
197 1.1 ad IFVERBOSE("bus adapter port" )
198 1.1 ad },
199 1.1 ad };
200 1.1 ad
201 1.1 ad #if defined(I2ODEBUG) && defined(I2OVERBOSE)
202 1.11 ad static const char * const iop_status[] = {
203 1.1 ad "success",
204 1.1 ad "abort (dirty)",
205 1.1 ad "abort (no data transfer)",
206 1.1 ad "abort (partial transfer)",
207 1.1 ad "error (dirty)",
208 1.1 ad "error (no data transfer)",
209 1.1 ad "error (partial transfer)",
210 1.1 ad "undefined error code",
211 1.1 ad "process abort (dirty)",
212 1.1 ad "process abort (no data transfer)",
213 1.1 ad "process abort (partial transfer)",
214 1.1 ad "transaction error",
215 1.1 ad };
216 1.1 ad #endif
217 1.1 ad
218 1.5 ad static inline u_int32_t iop_inl(struct iop_softc *, int);
219 1.5 ad static inline void iop_outl(struct iop_softc *, int, u_int32_t);
220 1.5 ad
221 1.1 ad static void iop_config_interrupts(struct device *);
222 1.11 ad static void iop_configure_devices(struct iop_softc *, int, int);
223 1.1 ad static void iop_devinfo(int, char *);
224 1.1 ad static int iop_print(void *, const char *);
225 1.1 ad static void iop_shutdown(void *);
226 1.1 ad static int iop_submatch(struct device *, struct cfdata *, void *);
227 1.1 ad static int iop_vendor_print(void *, const char *);
228 1.1 ad
229 1.11 ad static void iop_adjqparam(struct iop_softc *, int);
230 1.9 ad static void iop_create_reconf_thread(void *);
231 1.11 ad static int iop_handle_reply(struct iop_softc *, u_int32_t);
232 1.1 ad static int iop_hrt_get(struct iop_softc *);
233 1.1 ad static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
234 1.11 ad static void iop_intr_event(struct device *, struct iop_msg *, void *);
235 1.5 ad static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
236 1.5 ad u_int32_t);
237 1.11 ad static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
238 1.11 ad static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
239 1.1 ad static int iop_ofifo_init(struct iop_softc *);
240 1.15 ad static int iop_passthrough(struct iop_softc *, struct ioppt *,
241 1.15 ad struct proc *);
242 1.9 ad static void iop_reconf_thread(void *);
243 1.1 ad static void iop_release_mfa(struct iop_softc *, u_int32_t);
244 1.1 ad static int iop_reset(struct iop_softc *);
245 1.1 ad static int iop_systab_set(struct iop_softc *);
246 1.11 ad static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
247 1.1 ad
248 1.1 ad #ifdef I2ODEBUG
249 1.11 ad static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
250 1.1 ad #endif
251 1.5 ad
252 1.5 ad static inline u_int32_t
253 1.5 ad iop_inl(struct iop_softc *sc, int off)
254 1.5 ad {
255 1.5 ad
256 1.5 ad bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
257 1.5 ad BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
258 1.5 ad return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
259 1.5 ad }
260 1.5 ad
261 1.5 ad static inline void
262 1.5 ad iop_outl(struct iop_softc *sc, int off, u_int32_t val)
263 1.5 ad {
264 1.5 ad
265 1.5 ad bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
266 1.5 ad bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
267 1.5 ad BUS_SPACE_BARRIER_WRITE);
268 1.5 ad }
269 1.5 ad
270 1.1 ad /*
271 1.11 ad * Initialise the IOP and our interface.
272 1.1 ad */
273 1.5 ad void
274 1.1 ad iop_init(struct iop_softc *sc, const char *intrstr)
275 1.1 ad {
276 1.11 ad struct iop_msg *im;
277 1.15 ad int rv, i, j, state, nsegs;
278 1.1 ad u_int32_t mask;
279 1.1 ad char ident[64];
280 1.1 ad
281 1.15 ad state = 0;
282 1.15 ad
283 1.15 ad printf("I2O adapter");
284 1.15 ad
285 1.11 ad if (iop_ictxhashtbl == NULL)
286 1.5 ad iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
287 1.5 ad M_DEVBUF, M_NOWAIT, &iop_ictxhash);
288 1.1 ad
289 1.15 ad /* Disable interrupts at the IOP. */
290 1.15 ad mask = iop_inl(sc, IOP_REG_INTR_MASK);
291 1.15 ad iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
292 1.5 ad
293 1.15 ad /* Allocate a scratch DMA map for small miscellaneous shared data. */
294 1.15 ad if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
295 1.15 ad BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
296 1.15 ad printf("%s: cannot create scratch dmamap\n",
297 1.15 ad sc->sc_dv.dv_xname);
298 1.5 ad return;
299 1.1 ad }
300 1.15 ad
301 1.15 ad if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
302 1.15 ad sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
303 1.15 ad printf("%s: cannot alloc scratch dmamem\n",
304 1.15 ad sc->sc_dv.dv_xname);
305 1.15 ad goto bail_out;
306 1.15 ad }
307 1.15 ad state++;
308 1.15 ad
309 1.15 ad if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
310 1.15 ad &sc->sc_scr, 0)) {
311 1.15 ad printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
312 1.15 ad goto bail_out;
313 1.15 ad }
314 1.15 ad state++;
315 1.15 ad
316 1.15 ad if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
317 1.15 ad PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
318 1.15 ad printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
319 1.15 ad goto bail_out;
320 1.15 ad }
321 1.15 ad state++;
322 1.15 ad
323 1.21 ad #ifdef I2ODEBUG
324 1.21 ad /* So that our debug checks don't choke. */
325 1.21 ad sc->sc_framesize = 128;
326 1.21 ad #endif
327 1.21 ad
328 1.15 ad /* Reset the adapter and request status. */
329 1.15 ad if ((rv = iop_reset(sc)) != 0) {
330 1.15 ad printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
331 1.15 ad goto bail_out;
332 1.15 ad }
333 1.15 ad
334 1.15 ad if ((rv = iop_status_get(sc, 1)) != 0) {
335 1.15 ad printf("%s: not responding (get status)\n",
336 1.15 ad sc->sc_dv.dv_xname);
337 1.15 ad goto bail_out;
338 1.15 ad }
339 1.15 ad
340 1.5 ad sc->sc_flags |= IOP_HAVESTATUS;
341 1.5 ad iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
342 1.1 ad ident, sizeof(ident));
343 1.5 ad printf(" <%s>\n", ident);
344 1.5 ad
345 1.5 ad #ifdef I2ODEBUG
346 1.5 ad printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
347 1.5 ad le16toh(sc->sc_status.orgid),
348 1.5 ad (le32toh(sc->sc_status.segnumber) >> 12) & 15);
349 1.5 ad printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
350 1.5 ad printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
351 1.5 ad le32toh(sc->sc_status.desiredprivmemsize),
352 1.5 ad le32toh(sc->sc_status.currentprivmemsize),
353 1.5 ad le32toh(sc->sc_status.currentprivmembase));
354 1.5 ad printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
355 1.5 ad le32toh(sc->sc_status.desiredpriviosize),
356 1.5 ad le32toh(sc->sc_status.currentpriviosize),
357 1.5 ad le32toh(sc->sc_status.currentpriviobase));
358 1.5 ad #endif
359 1.1 ad
360 1.11 ad sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
361 1.11 ad if (sc->sc_maxob > IOP_MAX_OUTBOUND)
362 1.11 ad sc->sc_maxob = IOP_MAX_OUTBOUND;
363 1.11 ad sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
364 1.11 ad if (sc->sc_maxib > IOP_MAX_INBOUND)
365 1.11 ad sc->sc_maxib = IOP_MAX_INBOUND;
366 1.19 ad sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
367 1.19 ad if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
368 1.19 ad sc->sc_framesize = IOP_MAX_MSG_SIZE;
369 1.19 ad
370 1.19 ad #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
371 1.19 ad if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
372 1.19 ad printf("%s: frame size too small (%d)\n",
373 1.19 ad sc->sc_dv.dv_xname, sc->sc_framesize);
374 1.23 ad goto bail_out;
375 1.19 ad }
376 1.19 ad #endif
377 1.11 ad
378 1.11 ad /* Allocate message wrappers. */
379 1.22 tsutsui im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
380 1.23 ad if (im == NULL) {
381 1.23 ad printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
382 1.23 ad goto bail_out;
383 1.23 ad }
384 1.23 ad state++;
385 1.11 ad sc->sc_ims = im;
386 1.11 ad SLIST_INIT(&sc->sc_im_freelist);
387 1.11 ad
388 1.15 ad for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
389 1.11 ad rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
390 1.11 ad IOP_MAX_SEGS, IOP_MAX_XFER, 0,
391 1.11 ad BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
392 1.11 ad &im->im_xfer[0].ix_map);
393 1.11 ad if (rv != 0) {
394 1.11 ad printf("%s: couldn't create dmamap (%d)",
395 1.11 ad sc->sc_dv.dv_xname, rv);
396 1.15 ad goto bail_out;
397 1.11 ad }
398 1.11 ad
399 1.11 ad im->im_tctx = i;
400 1.11 ad SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
401 1.11 ad }
402 1.1 ad
403 1.17 wiz /* Initialise the IOP's outbound FIFO. */
404 1.5 ad if (iop_ofifo_init(sc) != 0) {
405 1.15 ad printf("%s: unable to init oubound FIFO\n",
406 1.15 ad sc->sc_dv.dv_xname);
407 1.15 ad goto bail_out;
408 1.5 ad }
409 1.1 ad
410 1.5 ad /*
411 1.5 ad * Defer further configuration until (a) interrupts are working and
412 1.5 ad * (b) we have enough information to build the system table.
413 1.5 ad */
414 1.1 ad config_interrupts((struct device *)sc, iop_config_interrupts);
415 1.1 ad
416 1.5 ad /* Configure shutdown hook before we start any device activity. */
417 1.1 ad if (iop_sdh == NULL)
418 1.1 ad iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
419 1.1 ad
420 1.1 ad /* Ensure interrupts are enabled at the IOP. */
421 1.5 ad mask = iop_inl(sc, IOP_REG_INTR_MASK);
422 1.5 ad iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
423 1.1 ad
424 1.1 ad if (intrstr != NULL)
425 1.1 ad printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
426 1.1 ad intrstr);
427 1.1 ad
428 1.1 ad #ifdef I2ODEBUG
429 1.1 ad printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
430 1.11 ad sc->sc_dv.dv_xname, sc->sc_maxib,
431 1.11 ad le32toh(sc->sc_status.maxinboundmframes),
432 1.11 ad sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
433 1.1 ad #endif
434 1.1 ad
435 1.5 ad lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
436 1.15 ad return;
437 1.15 ad
438 1.15 ad bail_out:
439 1.15 ad if (state > 3) {
440 1.15 ad for (j = 0; j < i; j++)
441 1.15 ad bus_dmamap_destroy(sc->sc_dmat,
442 1.15 ad sc->sc_ims[j].im_xfer[0].ix_map);
443 1.15 ad free(sc->sc_ims, M_DEVBUF);
444 1.15 ad }
445 1.15 ad if (state > 2)
446 1.15 ad bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
447 1.15 ad if (state > 1)
448 1.15 ad bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
449 1.15 ad if (state > 0)
450 1.15 ad bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
451 1.15 ad bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
452 1.1 ad }
453 1.1 ad
454 1.1 ad /*
455 1.5 ad * Perform autoconfiguration tasks.
456 1.1 ad */
457 1.1 ad static void
458 1.1 ad iop_config_interrupts(struct device *self)
459 1.1 ad {
460 1.18 ad struct iop_attach_args ia;
461 1.5 ad struct iop_softc *sc, *iop;
462 1.5 ad struct i2o_systab_entry *ste;
463 1.5 ad int rv, i, niop;
464 1.1 ad
465 1.1 ad sc = (struct iop_softc *)self;
466 1.5 ad LIST_INIT(&sc->sc_iilist);
467 1.5 ad
468 1.5 ad printf("%s: configuring...\n", sc->sc_dv.dv_xname);
469 1.1 ad
470 1.5 ad if (iop_hrt_get(sc) != 0) {
471 1.5 ad printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
472 1.5 ad return;
473 1.5 ad }
474 1.1 ad
475 1.5 ad /*
476 1.5 ad * Build the system table.
477 1.5 ad */
478 1.5 ad if (iop_systab == NULL) {
479 1.5 ad for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
480 1.5 ad if ((iop = device_lookup(&iop_cd, i)) == NULL)
481 1.5 ad continue;
482 1.5 ad if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
483 1.5 ad continue;
484 1.11 ad if (iop_status_get(iop, 1) != 0) {
485 1.5 ad printf("%s: unable to retrieve status\n",
486 1.5 ad sc->sc_dv.dv_xname);
487 1.5 ad iop->sc_flags &= ~IOP_HAVESTATUS;
488 1.5 ad continue;
489 1.5 ad }
490 1.5 ad niop++;
491 1.5 ad }
492 1.5 ad if (niop == 0)
493 1.5 ad return;
494 1.5 ad
495 1.5 ad i = sizeof(struct i2o_systab_entry) * (niop - 1) +
496 1.5 ad sizeof(struct i2o_systab);
497 1.5 ad iop_systab_size = i;
498 1.22 tsutsui iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
499 1.5 ad
500 1.5 ad iop_systab->numentries = niop;
501 1.5 ad iop_systab->version = I2O_VERSION_11;
502 1.5 ad
503 1.5 ad for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
504 1.5 ad if ((iop = device_lookup(&iop_cd, i)) == NULL)
505 1.5 ad continue;
506 1.5 ad if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
507 1.5 ad continue;
508 1.5 ad
509 1.5 ad ste->orgid = iop->sc_status.orgid;
510 1.5 ad ste->iopid = iop->sc_dv.dv_unit + 2;
511 1.5 ad ste->segnumber =
512 1.5 ad htole32(le32toh(iop->sc_status.segnumber) & ~4095);
513 1.5 ad ste->iopcaps = iop->sc_status.iopcaps;
514 1.5 ad ste->inboundmsgframesize =
515 1.5 ad iop->sc_status.inboundmframesize;
516 1.5 ad ste->inboundmsgportaddresslow =
517 1.5 ad htole32(iop->sc_memaddr + IOP_REG_IFIFO);
518 1.5 ad ste++;
519 1.5 ad }
520 1.5 ad }
521 1.5 ad
522 1.11 ad /*
523 1.11 ad * Post the system table to the IOP and bring it to the OPERATIONAL
524 1.11 ad * state.
525 1.11 ad */
526 1.5 ad if (iop_systab_set(sc) != 0) {
527 1.5 ad printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
528 1.5 ad return;
529 1.5 ad }
530 1.5 ad if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
531 1.11 ad 30000) != 0) {
532 1.5 ad printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
533 1.5 ad return;
534 1.5 ad }
535 1.5 ad
536 1.5 ad /*
537 1.5 ad * Set up an event handler for this IOP.
538 1.5 ad */
539 1.5 ad sc->sc_eventii.ii_dv = self;
540 1.5 ad sc->sc_eventii.ii_intr = iop_intr_event;
541 1.15 ad sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
542 1.5 ad sc->sc_eventii.ii_tid = I2O_TID_IOP;
543 1.11 ad iop_initiator_register(sc, &sc->sc_eventii);
544 1.11 ad
545 1.11 ad rv = iop_util_eventreg(sc, &sc->sc_eventii,
546 1.11 ad I2O_EVENT_EXEC_RESOURCE_LIMITS |
547 1.11 ad I2O_EVENT_EXEC_CONNECTION_FAIL |
548 1.11 ad I2O_EVENT_EXEC_ADAPTER_FAULT |
549 1.11 ad I2O_EVENT_EXEC_POWER_FAIL |
550 1.11 ad I2O_EVENT_EXEC_RESET_PENDING |
551 1.11 ad I2O_EVENT_EXEC_RESET_IMMINENT |
552 1.11 ad I2O_EVENT_EXEC_HARDWARE_FAIL |
553 1.11 ad I2O_EVENT_EXEC_XCT_CHANGE |
554 1.11 ad I2O_EVENT_EXEC_DDM_AVAILIBILITY |
555 1.11 ad I2O_EVENT_GEN_DEVICE_RESET |
556 1.11 ad I2O_EVENT_GEN_STATE_CHANGE |
557 1.11 ad I2O_EVENT_GEN_GENERAL_WARNING);
558 1.11 ad if (rv != 0) {
559 1.5 ad printf("%s: unable to register for events", sc->sc_dv.dv_xname);
560 1.5 ad return;
561 1.5 ad }
562 1.5 ad
563 1.18 ad /*
564 1.18 ad * Attempt to match and attach a product-specific extension.
565 1.18 ad */
566 1.1 ad ia.ia_class = I2O_CLASS_ANY;
567 1.1 ad ia.ia_tid = I2O_TID_IOP;
568 1.1 ad config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
569 1.5 ad
570 1.18 ad /*
571 1.18 ad * Start device configuration.
572 1.18 ad */
573 1.11 ad lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
574 1.11 ad if ((rv = iop_reconfigure(sc, 0)) == -1) {
575 1.5 ad printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
576 1.5 ad return;
577 1.5 ad }
578 1.11 ad lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
579 1.5 ad
580 1.9 ad kthread_create(iop_create_reconf_thread, sc);
581 1.9 ad }
582 1.9 ad
583 1.9 ad /*
584 1.9 ad * Create the reconfiguration thread. Called after the standard kernel
585 1.9 ad * threads have been created.
586 1.9 ad */
587 1.9 ad static void
588 1.9 ad iop_create_reconf_thread(void *cookie)
589 1.9 ad {
590 1.9 ad struct iop_softc *sc;
591 1.9 ad int rv;
592 1.9 ad
593 1.9 ad sc = cookie;
594 1.11 ad sc->sc_flags |= IOP_ONLINE;
595 1.9 ad
596 1.9 ad rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
597 1.11 ad "%s", sc->sc_dv.dv_xname);
598 1.11 ad if (rv != 0) {
599 1.9 ad printf("%s: unable to create reconfiguration thread (%d)",
600 1.11 ad sc->sc_dv.dv_xname, rv);
601 1.11 ad return;
602 1.11 ad }
603 1.5 ad }
604 1.5 ad
605 1.5 ad /*
606 1.5 ad * Reconfiguration thread; listens for LCT change notification, and
607 1.14 wiz * initiates re-configuration if received.
608 1.5 ad */
609 1.5 ad static void
610 1.9 ad iop_reconf_thread(void *cookie)
611 1.5 ad {
612 1.5 ad struct iop_softc *sc;
613 1.5 ad struct i2o_lct lct;
614 1.5 ad u_int32_t chgind;
615 1.11 ad int rv;
616 1.5 ad
617 1.5 ad sc = cookie;
618 1.11 ad chgind = sc->sc_chgind + 1;
619 1.5 ad
620 1.5 ad for (;;) {
621 1.11 ad DPRINTF(("%s: async reconfig: requested 0x%08x\n",
622 1.11 ad sc->sc_dv.dv_xname, chgind));
623 1.5 ad
624 1.11 ad PHOLD(sc->sc_reconf_proc);
625 1.11 ad rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
626 1.11 ad PRELE(sc->sc_reconf_proc);
627 1.11 ad
628 1.11 ad DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
629 1.11 ad sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
630 1.11 ad
631 1.11 ad if (rv == 0 &&
632 1.11 ad lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
633 1.11 ad iop_reconfigure(sc, le32toh(lct.changeindicator));
634 1.11 ad chgind = sc->sc_chgind + 1;
635 1.11 ad lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
636 1.5 ad }
637 1.5 ad
638 1.9 ad tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
639 1.5 ad }
640 1.5 ad }
641 1.5 ad
642 1.5 ad /*
643 1.5 ad * Reconfigure: find new and removed devices.
644 1.5 ad */
645 1.18 ad int
646 1.11 ad iop_reconfigure(struct iop_softc *sc, u_int chgind)
647 1.5 ad {
648 1.5 ad struct iop_msg *im;
649 1.11 ad struct i2o_hba_bus_scan mf;
650 1.5 ad struct i2o_lct_entry *le;
651 1.5 ad struct iop_initiator *ii, *nextii;
652 1.5 ad int rv, tid, i;
653 1.5 ad
654 1.1 ad /*
655 1.5 ad * If the reconfiguration request isn't the result of LCT change
656 1.5 ad * notification, then be more thorough: ask all bus ports to scan
657 1.5 ad * their busses. Wait up to 5 minutes for each bus port to complete
658 1.5 ad * the request.
659 1.1 ad */
660 1.5 ad if (chgind == 0) {
661 1.5 ad if ((rv = iop_lct_get(sc)) != 0) {
662 1.5 ad DPRINTF(("iop_reconfigure: unable to read LCT\n"));
663 1.11 ad return (rv);
664 1.5 ad }
665 1.5 ad
666 1.5 ad le = sc->sc_lct->entry;
667 1.5 ad for (i = 0; i < sc->sc_nlctent; i++, le++) {
668 1.5 ad if ((le16toh(le->classid) & 4095) !=
669 1.5 ad I2O_CLASS_BUS_ADAPTER_PORT)
670 1.5 ad continue;
671 1.15 ad tid = le16toh(le->localtid) & 4095;
672 1.5 ad
673 1.15 ad im = iop_msg_alloc(sc, IM_WAIT);
674 1.5 ad
675 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
676 1.11 ad mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
677 1.11 ad mf.msgictx = IOP_ICTX;
678 1.11 ad mf.msgtctx = im->im_tctx;
679 1.5 ad
680 1.5 ad DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
681 1.5 ad tid));
682 1.5 ad
683 1.11 ad rv = iop_msg_post(sc, im, &mf, 5*60*1000);
684 1.11 ad iop_msg_free(sc, im);
685 1.11 ad #ifdef I2ODEBUG
686 1.11 ad if (rv != 0)
687 1.11 ad printf("%s: bus scan failed\n",
688 1.11 ad sc->sc_dv.dv_xname);
689 1.11 ad #endif
690 1.5 ad }
691 1.11 ad } else if (chgind <= sc->sc_chgind) {
692 1.5 ad DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
693 1.11 ad return (0);
694 1.5 ad }
695 1.5 ad
696 1.5 ad /* Re-read the LCT and determine if it has changed. */
697 1.5 ad if ((rv = iop_lct_get(sc)) != 0) {
698 1.5 ad DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
699 1.11 ad return (rv);
700 1.5 ad }
701 1.5 ad DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
702 1.5 ad
703 1.11 ad chgind = le32toh(sc->sc_lct->changeindicator);
704 1.11 ad if (chgind == sc->sc_chgind) {
705 1.5 ad DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
706 1.11 ad return (0);
707 1.5 ad }
708 1.5 ad DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
709 1.11 ad sc->sc_chgind = chgind;
710 1.5 ad
711 1.5 ad if (sc->sc_tidmap != NULL)
712 1.5 ad free(sc->sc_tidmap, M_DEVBUF);
713 1.5 ad sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
714 1.22 tsutsui M_DEVBUF, M_NOWAIT|M_ZERO);
715 1.5 ad
716 1.11 ad /* Allow 1 queued command per device while we're configuring. */
717 1.11 ad iop_adjqparam(sc, 1);
718 1.11 ad
719 1.11 ad /*
720 1.11 ad * Match and attach child devices. We configure high-level devices
721 1.11 ad * first so that any claims will propagate throughout the LCT,
722 1.11 ad * hopefully masking off aliased devices as a result.
723 1.11 ad *
724 1.11 ad * Re-reading the LCT at this point is a little dangerous, but we'll
725 1.11 ad * trust the IOP (and the operator) to behave itself...
726 1.11 ad */
727 1.11 ad iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
728 1.11 ad IC_CONFIGURE | IC_PRIORITY);
729 1.11 ad if ((rv = iop_lct_get(sc)) != 0)
730 1.11 ad DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
731 1.11 ad iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
732 1.11 ad IC_CONFIGURE);
733 1.5 ad
734 1.5 ad for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
735 1.11 ad nextii = LIST_NEXT(ii, ii_list);
736 1.5 ad
737 1.5 ad /* Detach devices that were configured, but are now gone. */
738 1.5 ad for (i = 0; i < sc->sc_nlctent; i++)
739 1.5 ad if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
740 1.5 ad break;
741 1.5 ad if (i == sc->sc_nlctent ||
742 1.5 ad (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
743 1.5 ad config_detach(ii->ii_dv, DETACH_FORCE);
744 1.5 ad
745 1.5 ad /*
746 1.5 ad * Tell initiators that existed before the re-configuration
747 1.5 ad * to re-configure.
748 1.5 ad */
749 1.5 ad if (ii->ii_reconfig == NULL)
750 1.5 ad continue;
751 1.5 ad if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
752 1.5 ad printf("%s: %s failed reconfigure (%d)\n",
753 1.5 ad sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
754 1.5 ad }
755 1.5 ad
756 1.11 ad /* Re-adjust queue parameters and return. */
757 1.11 ad if (sc->sc_nii != 0)
758 1.11 ad iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
759 1.11 ad / sc->sc_nii);
760 1.11 ad
761 1.11 ad return (0);
762 1.1 ad }
763 1.1 ad
764 1.1 ad /*
765 1.5 ad * Configure I2O devices into the system.
766 1.1 ad */
767 1.1 ad static void
768 1.11 ad iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
769 1.1 ad {
770 1.1 ad struct iop_attach_args ia;
771 1.5 ad struct iop_initiator *ii;
772 1.1 ad const struct i2o_lct_entry *le;
773 1.9 ad struct device *dv;
774 1.8 ad int i, j, nent;
775 1.11 ad u_int usertid;
776 1.1 ad
777 1.1 ad nent = sc->sc_nlctent;
778 1.1 ad for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
779 1.15 ad sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
780 1.9 ad
781 1.11 ad /* Ignore the device if it's in use. */
782 1.11 ad usertid = le32toh(le->usertid) & 4095;
783 1.11 ad if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
784 1.1 ad continue;
785 1.1 ad
786 1.1 ad ia.ia_class = le16toh(le->classid) & 4095;
787 1.9 ad ia.ia_tid = sc->sc_tidmap[i].it_tid;
788 1.8 ad
789 1.8 ad /* Ignore uninteresting devices. */
790 1.8 ad for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
791 1.8 ad if (iop_class[j].ic_class == ia.ia_class)
792 1.8 ad break;
793 1.8 ad if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
794 1.11 ad (iop_class[j].ic_flags & mask) != maskval)
795 1.8 ad continue;
796 1.1 ad
797 1.1 ad /*
798 1.5 ad * Try to configure the device only if it's not already
799 1.5 ad * configured.
800 1.1 ad */
801 1.7 ad LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
802 1.9 ad if (ia.ia_tid == ii->ii_tid) {
803 1.9 ad sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
804 1.9 ad strcpy(sc->sc_tidmap[i].it_dvname,
805 1.9 ad ii->ii_dv->dv_xname);
806 1.11 ad break;
807 1.9 ad }
808 1.7 ad }
809 1.5 ad if (ii != NULL)
810 1.5 ad continue;
811 1.5 ad
812 1.9 ad dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
813 1.9 ad if (dv != NULL) {
814 1.11 ad sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
815 1.9 ad strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
816 1.9 ad }
817 1.1 ad }
818 1.1 ad }
819 1.1 ad
820 1.11 ad /*
821 1.11 ad * Adjust queue parameters for all child devices.
822 1.11 ad */
823 1.11 ad static void
824 1.11 ad iop_adjqparam(struct iop_softc *sc, int mpi)
825 1.11 ad {
826 1.11 ad struct iop_initiator *ii;
827 1.11 ad
828 1.11 ad LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
829 1.11 ad if (ii->ii_adjqparam != NULL)
830 1.11 ad (*ii->ii_adjqparam)(ii->ii_dv, mpi);
831 1.11 ad }
832 1.11 ad
833 1.1 ad static void
834 1.1 ad iop_devinfo(int class, char *devinfo)
835 1.1 ad {
836 1.1 ad #ifdef I2OVERBOSE
837 1.1 ad int i;
838 1.1 ad
839 1.1 ad for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
840 1.1 ad if (class == iop_class[i].ic_class)
841 1.1 ad break;
842 1.1 ad
843 1.1 ad if (i == sizeof(iop_class) / sizeof(iop_class[0]))
844 1.1 ad sprintf(devinfo, "device (class 0x%x)", class);
845 1.1 ad else
846 1.1 ad strcpy(devinfo, iop_class[i].ic_caption);
847 1.1 ad #else
848 1.1 ad
849 1.1 ad sprintf(devinfo, "device (class 0x%x)", class);
850 1.1 ad #endif
851 1.1 ad }
852 1.1 ad
853 1.1 ad static int
854 1.1 ad iop_print(void *aux, const char *pnp)
855 1.1 ad {
856 1.1 ad struct iop_attach_args *ia;
857 1.1 ad char devinfo[256];
858 1.1 ad
859 1.1 ad ia = aux;
860 1.1 ad
861 1.1 ad if (pnp != NULL) {
862 1.1 ad iop_devinfo(ia->ia_class, devinfo);
863 1.1 ad printf("%s at %s", devinfo, pnp);
864 1.1 ad }
865 1.1 ad printf(" tid %d", ia->ia_tid);
866 1.1 ad return (UNCONF);
867 1.1 ad }
868 1.1 ad
869 1.1 ad static int
870 1.1 ad iop_vendor_print(void *aux, const char *pnp)
871 1.1 ad {
872 1.1 ad
873 1.18 ad return (QUIET);
874 1.1 ad }
875 1.1 ad
876 1.1 ad static int
877 1.1 ad iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
878 1.1 ad {
879 1.1 ad struct iop_attach_args *ia;
880 1.1 ad
881 1.1 ad ia = aux;
882 1.1 ad
883 1.1 ad if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
884 1.1 ad return (0);
885 1.1 ad
886 1.26 thorpej return (config_match(parent, cf, aux));
887 1.1 ad }
888 1.1 ad
889 1.1 ad /*
890 1.1 ad * Shut down all configured IOPs.
891 1.1 ad */
892 1.1 ad static void
893 1.1 ad iop_shutdown(void *junk)
894 1.1 ad {
895 1.1 ad struct iop_softc *sc;
896 1.1 ad int i;
897 1.1 ad
898 1.11 ad printf("shutting down iop devices...");
899 1.1 ad
900 1.1 ad for (i = 0; i < iop_cd.cd_ndevs; i++) {
901 1.1 ad if ((sc = device_lookup(&iop_cd, i)) == NULL)
902 1.1 ad continue;
903 1.5 ad if ((sc->sc_flags & IOP_ONLINE) == 0)
904 1.5 ad continue;
905 1.27 ad
906 1.5 ad iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
907 1.12 ad 0, 5000);
908 1.27 ad
909 1.27 ad if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
910 1.27 ad /*
911 1.27 ad * Some AMI firmware revisions will go to sleep and
912 1.27 ad * never come back after this.
913 1.27 ad */
914 1.27 ad iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
915 1.27 ad IOP_ICTX, 0, 1000);
916 1.27 ad }
917 1.1 ad }
918 1.1 ad
919 1.1 ad /* Wait. Some boards could still be flushing, stupidly enough. */
920 1.1 ad delay(5000*1000);
921 1.18 ad printf(" done\n");
922 1.1 ad }
923 1.1 ad
924 1.1 ad /*
925 1.11 ad * Retrieve IOP status.
926 1.1 ad */
927 1.18 ad int
928 1.11 ad iop_status_get(struct iop_softc *sc, int nosleep)
929 1.1 ad {
930 1.11 ad struct i2o_exec_status_get mf;
931 1.15 ad struct i2o_status *st;
932 1.15 ad paddr_t pa;
933 1.11 ad int rv, i;
934 1.1 ad
935 1.15 ad pa = sc->sc_scr_seg->ds_addr;
936 1.15 ad st = (struct i2o_status *)sc->sc_scr;
937 1.15 ad
938 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
939 1.11 ad mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
940 1.11 ad mf.reserved[0] = 0;
941 1.11 ad mf.reserved[1] = 0;
942 1.11 ad mf.reserved[2] = 0;
943 1.11 ad mf.reserved[3] = 0;
944 1.15 ad mf.addrlow = (u_int32_t)pa;
945 1.15 ad mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
946 1.11 ad mf.length = sizeof(sc->sc_status);
947 1.1 ad
948 1.15 ad memset(st, 0, sizeof(*st));
949 1.15 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
950 1.15 ad BUS_DMASYNC_PREREAD);
951 1.1 ad
952 1.11 ad if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
953 1.1 ad return (rv);
954 1.1 ad
955 1.11 ad for (i = 25; i != 0; i--) {
956 1.15 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
957 1.15 ad sizeof(*st), BUS_DMASYNC_POSTREAD);
958 1.15 ad if (st->syncbyte == 0xff)
959 1.11 ad break;
960 1.11 ad if (nosleep)
961 1.11 ad DELAY(100*1000);
962 1.11 ad else
963 1.11 ad tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
964 1.11 ad }
965 1.1 ad
966 1.21 ad if (st->syncbyte != 0xff) {
967 1.21 ad printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
968 1.11 ad rv = EIO;
969 1.21 ad } else {
970 1.15 ad memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
971 1.11 ad rv = 0;
972 1.15 ad }
973 1.15 ad
974 1.11 ad return (rv);
975 1.1 ad }
976 1.1 ad
977 1.1 ad /*
978 1.17 wiz * Initialize and populate the IOP's outbound FIFO.
979 1.1 ad */
980 1.1 ad static int
981 1.1 ad iop_ofifo_init(struct iop_softc *sc)
982 1.1 ad {
983 1.1 ad bus_addr_t addr;
984 1.5 ad bus_dma_segment_t seg;
985 1.11 ad struct i2o_exec_outbound_init *mf;
986 1.5 ad int i, rseg, rv;
987 1.15 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
988 1.1 ad
989 1.15 ad sw = (u_int32_t *)sc->sc_scr;
990 1.1 ad
991 1.11 ad mf = (struct i2o_exec_outbound_init *)mb;
992 1.11 ad mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
993 1.11 ad mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
994 1.11 ad mf->msgictx = IOP_ICTX;
995 1.15 ad mf->msgtctx = 0;
996 1.11 ad mf->pagesize = PAGE_SIZE;
997 1.19 ad mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
998 1.1 ad
999 1.5 ad /*
1000 1.5 ad * The I2O spec says that there are two SGLs: one for the status
1001 1.5 ad * word, and one for a list of discarded MFAs. It continues to say
1002 1.5 ad * that if you don't want to get the list of MFAs, an IGNORE SGL is
1003 1.11 ad * necessary; this isn't the case (and is in fact a bad thing).
1004 1.5 ad */
1005 1.15 ad mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1006 1.15 ad I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1007 1.15 ad mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1008 1.15 ad (u_int32_t)sc->sc_scr_seg->ds_addr;
1009 1.15 ad mb[0] += 2 << 16;
1010 1.15 ad
1011 1.15 ad *sw = 0;
1012 1.15 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1013 1.15 ad BUS_DMASYNC_PREREAD);
1014 1.15 ad
1015 1.15 ad if ((rv = iop_post(sc, mb)) != 0)
1016 1.1 ad return (rv);
1017 1.1 ad
1018 1.15 ad POLL(5000,
1019 1.15 ad (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1020 1.15 ad BUS_DMASYNC_POSTREAD),
1021 1.15 ad *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1022 1.15 ad
1023 1.15 ad if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1024 1.15 ad printf("%s: outbound FIFO init failed (%d)\n",
1025 1.15 ad sc->sc_dv.dv_xname, le32toh(*sw));
1026 1.5 ad return (EIO);
1027 1.1 ad }
1028 1.1 ad
1029 1.11 ad /* Allocate DMA safe memory for the reply frames. */
1030 1.1 ad if (sc->sc_rep_phys == 0) {
1031 1.19 ad sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1032 1.5 ad
1033 1.5 ad rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1034 1.5 ad 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1035 1.5 ad if (rv != 0) {
1036 1.5 ad printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1037 1.5 ad rv);
1038 1.5 ad return (rv);
1039 1.5 ad }
1040 1.5 ad
1041 1.5 ad rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1042 1.5 ad &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1043 1.5 ad if (rv != 0) {
1044 1.5 ad printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1045 1.5 ad return (rv);
1046 1.5 ad }
1047 1.5 ad
1048 1.5 ad rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1049 1.5 ad sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1050 1.5 ad if (rv != 0) {
1051 1.15 ad printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1052 1.15 ad rv);
1053 1.5 ad return (rv);
1054 1.5 ad }
1055 1.5 ad
1056 1.15 ad rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1057 1.15 ad sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1058 1.5 ad if (rv != 0) {
1059 1.5 ad printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1060 1.5 ad return (rv);
1061 1.5 ad }
1062 1.5 ad
1063 1.5 ad sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1064 1.1 ad }
1065 1.1 ad
1066 1.1 ad /* Populate the outbound FIFO. */
1067 1.11 ad for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1068 1.5 ad iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1069 1.19 ad addr += sc->sc_framesize;
1070 1.1 ad }
1071 1.1 ad
1072 1.1 ad return (0);
1073 1.1 ad }
1074 1.1 ad
1075 1.1 ad /*
1076 1.1 ad * Read the specified number of bytes from the IOP's hardware resource table.
1077 1.1 ad */
1078 1.1 ad static int
1079 1.1 ad iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1080 1.1 ad {
1081 1.1 ad struct iop_msg *im;
1082 1.1 ad int rv;
1083 1.11 ad struct i2o_exec_hrt_get *mf;
1084 1.11 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1085 1.1 ad
1086 1.15 ad im = iop_msg_alloc(sc, IM_WAIT);
1087 1.11 ad mf = (struct i2o_exec_hrt_get *)mb;
1088 1.11 ad mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1089 1.11 ad mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1090 1.11 ad mf->msgictx = IOP_ICTX;
1091 1.11 ad mf->msgtctx = im->im_tctx;
1092 1.1 ad
1093 1.15 ad iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1094 1.11 ad rv = iop_msg_post(sc, im, mb, 30000);
1095 1.1 ad iop_msg_unmap(sc, im);
1096 1.11 ad iop_msg_free(sc, im);
1097 1.1 ad return (rv);
1098 1.1 ad }
1099 1.1 ad
1100 1.1 ad /*
1101 1.5 ad * Read the IOP's hardware resource table.
1102 1.1 ad */
1103 1.1 ad static int
1104 1.1 ad iop_hrt_get(struct iop_softc *sc)
1105 1.1 ad {
1106 1.1 ad struct i2o_hrt hrthdr, *hrt;
1107 1.1 ad int size, rv;
1108 1.1 ad
1109 1.11 ad PHOLD(curproc);
1110 1.11 ad rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1111 1.11 ad PRELE(curproc);
1112 1.11 ad if (rv != 0)
1113 1.1 ad return (rv);
1114 1.1 ad
1115 1.5 ad DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1116 1.5 ad le16toh(hrthdr.numentries)));
1117 1.5 ad
1118 1.5 ad size = sizeof(struct i2o_hrt) +
1119 1.15 ad (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1120 1.1 ad hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1121 1.1 ad
1122 1.1 ad if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1123 1.1 ad free(hrt, M_DEVBUF);
1124 1.1 ad return (rv);
1125 1.1 ad }
1126 1.1 ad
1127 1.1 ad if (sc->sc_hrt != NULL)
1128 1.1 ad free(sc->sc_hrt, M_DEVBUF);
1129 1.1 ad sc->sc_hrt = hrt;
1130 1.1 ad return (0);
1131 1.1 ad }
1132 1.1 ad
1133 1.1 ad /*
1134 1.1 ad * Request the specified number of bytes from the IOP's logical
1135 1.5 ad * configuration table. If a change indicator is specified, this
1136 1.11 ad * is a verbatim notification request, so the caller is prepared
1137 1.5 ad * to wait indefinitely.
1138 1.1 ad */
1139 1.1 ad static int
1140 1.5 ad iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1141 1.5 ad u_int32_t chgind)
1142 1.1 ad {
1143 1.1 ad struct iop_msg *im;
1144 1.11 ad struct i2o_exec_lct_notify *mf;
1145 1.1 ad int rv;
1146 1.11 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1147 1.1 ad
1148 1.15 ad im = iop_msg_alloc(sc, IM_WAIT);
1149 1.1 ad memset(lct, 0, size);
1150 1.1 ad
1151 1.11 ad mf = (struct i2o_exec_lct_notify *)mb;
1152 1.11 ad mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1153 1.11 ad mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1154 1.11 ad mf->msgictx = IOP_ICTX;
1155 1.11 ad mf->msgtctx = im->im_tctx;
1156 1.11 ad mf->classid = I2O_CLASS_ANY;
1157 1.11 ad mf->changeindicator = chgind;
1158 1.5 ad
1159 1.9 ad #ifdef I2ODEBUG
1160 1.9 ad printf("iop_lct_get0: reading LCT");
1161 1.9 ad if (chgind != 0)
1162 1.9 ad printf(" (async)");
1163 1.9 ad printf("\n");
1164 1.9 ad #endif
1165 1.1 ad
1166 1.15 ad iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1167 1.11 ad rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1168 1.1 ad iop_msg_unmap(sc, im);
1169 1.11 ad iop_msg_free(sc, im);
1170 1.1 ad return (rv);
1171 1.1 ad }
1172 1.1 ad
1173 1.1 ad /*
1174 1.6 ad * Read the IOP's logical configuration table.
1175 1.1 ad */
1176 1.1 ad int
1177 1.1 ad iop_lct_get(struct iop_softc *sc)
1178 1.1 ad {
1179 1.5 ad int esize, size, rv;
1180 1.5 ad struct i2o_lct *lct;
1181 1.1 ad
1182 1.5 ad esize = le32toh(sc->sc_status.expectedlctsize);
1183 1.5 ad lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1184 1.5 ad if (lct == NULL)
1185 1.1 ad return (ENOMEM);
1186 1.1 ad
1187 1.5 ad if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1188 1.1 ad free(lct, M_DEVBUF);
1189 1.1 ad return (rv);
1190 1.1 ad }
1191 1.1 ad
1192 1.5 ad size = le16toh(lct->tablesize) << 2;
1193 1.5 ad if (esize != size) {
1194 1.1 ad free(lct, M_DEVBUF);
1195 1.5 ad lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1196 1.5 ad if (lct == NULL)
1197 1.5 ad return (ENOMEM);
1198 1.5 ad
1199 1.5 ad if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1200 1.5 ad free(lct, M_DEVBUF);
1201 1.5 ad return (rv);
1202 1.5 ad }
1203 1.1 ad }
1204 1.5 ad
1205 1.5 ad /* Swap in the new LCT. */
1206 1.1 ad if (sc->sc_lct != NULL)
1207 1.1 ad free(sc->sc_lct, M_DEVBUF);
1208 1.1 ad sc->sc_lct = lct;
1209 1.1 ad sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1210 1.1 ad sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1211 1.1 ad sizeof(struct i2o_lct_entry);
1212 1.1 ad return (0);
1213 1.1 ad }
1214 1.1 ad
1215 1.1 ad /*
1216 1.11 ad * Request the specified parameter group from the target. If an initiator
1217 1.11 ad * is specified (a) don't wait for the operation to complete, but instead
1218 1.11 ad * let the initiator's interrupt handler deal with the reply and (b) place a
1219 1.11 ad * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1220 1.1 ad */
1221 1.1 ad int
1222 1.16 ad iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1223 1.16 ad int size, struct iop_initiator *ii)
1224 1.1 ad {
1225 1.1 ad struct iop_msg *im;
1226 1.11 ad struct i2o_util_params_op *mf;
1227 1.11 ad struct i2o_reply *rf;
1228 1.16 ad int rv;
1229 1.11 ad struct iop_pgop *pgop;
1230 1.11 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1231 1.1 ad
1232 1.15 ad im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1233 1.11 ad if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1234 1.11 ad iop_msg_free(sc, im);
1235 1.11 ad return (ENOMEM);
1236 1.11 ad }
1237 1.11 ad if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1238 1.11 ad iop_msg_free(sc, im);
1239 1.11 ad free(pgop, M_DEVBUF);
1240 1.11 ad return (ENOMEM);
1241 1.11 ad }
1242 1.11 ad im->im_dvcontext = pgop;
1243 1.11 ad im->im_rb = rf;
1244 1.1 ad
1245 1.11 ad mf = (struct i2o_util_params_op *)mb;
1246 1.11 ad mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1247 1.16 ad mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1248 1.11 ad mf->msgictx = IOP_ICTX;
1249 1.11 ad mf->msgtctx = im->im_tctx;
1250 1.11 ad mf->flags = 0;
1251 1.11 ad
1252 1.11 ad pgop->olh.count = htole16(1);
1253 1.11 ad pgop->olh.reserved = htole16(0);
1254 1.16 ad pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1255 1.11 ad pgop->oat.fieldcount = htole16(0xffff);
1256 1.11 ad pgop->oat.group = htole16(group);
1257 1.11 ad
1258 1.11 ad if (ii == NULL)
1259 1.11 ad PHOLD(curproc);
1260 1.1 ad
1261 1.5 ad memset(buf, 0, size);
1262 1.15 ad iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1263 1.16 ad iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1264 1.11 ad rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1265 1.11 ad
1266 1.11 ad if (ii == NULL)
1267 1.11 ad PRELE(curproc);
1268 1.11 ad
1269 1.11 ad /* Detect errors; let partial transfers to count as success. */
1270 1.11 ad if (ii == NULL && rv == 0) {
1271 1.11 ad if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1272 1.11 ad le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1273 1.11 ad rv = 0;
1274 1.11 ad else
1275 1.11 ad rv = (rf->reqstatus != 0 ? EIO : 0);
1276 1.16 ad
1277 1.16 ad if (rv != 0)
1278 1.16 ad printf("%s: FIELD_GET failed for tid %d group %d\n",
1279 1.16 ad sc->sc_dv.dv_xname, tid, group);
1280 1.11 ad }
1281 1.11 ad
1282 1.11 ad if (ii == NULL || rv != 0) {
1283 1.11 ad iop_msg_unmap(sc, im);
1284 1.11 ad iop_msg_free(sc, im);
1285 1.11 ad free(pgop, M_DEVBUF);
1286 1.11 ad free(rf, M_DEVBUF);
1287 1.11 ad }
1288 1.1 ad
1289 1.1 ad return (rv);
1290 1.11 ad }
1291 1.1 ad
1292 1.1 ad /*
1293 1.16 ad * Set a single field in a scalar parameter group.
1294 1.16 ad */
1295 1.16 ad int
1296 1.16 ad iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1297 1.16 ad int size, int field)
1298 1.16 ad {
1299 1.16 ad struct iop_msg *im;
1300 1.16 ad struct i2o_util_params_op *mf;
1301 1.16 ad struct iop_pgop *pgop;
1302 1.16 ad int rv, totsize;
1303 1.16 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1304 1.16 ad
1305 1.16 ad totsize = sizeof(*pgop) + size;
1306 1.16 ad
1307 1.16 ad im = iop_msg_alloc(sc, IM_WAIT);
1308 1.16 ad if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1309 1.16 ad iop_msg_free(sc, im);
1310 1.16 ad return (ENOMEM);
1311 1.16 ad }
1312 1.16 ad
1313 1.16 ad mf = (struct i2o_util_params_op *)mb;
1314 1.16 ad mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1315 1.16 ad mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1316 1.16 ad mf->msgictx = IOP_ICTX;
1317 1.16 ad mf->msgtctx = im->im_tctx;
1318 1.16 ad mf->flags = 0;
1319 1.16 ad
1320 1.16 ad pgop->olh.count = htole16(1);
1321 1.16 ad pgop->olh.reserved = htole16(0);
1322 1.16 ad pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1323 1.16 ad pgop->oat.fieldcount = htole16(1);
1324 1.16 ad pgop->oat.group = htole16(group);
1325 1.16 ad pgop->oat.fields[0] = htole16(field);
1326 1.16 ad memcpy(pgop + 1, buf, size);
1327 1.16 ad
1328 1.16 ad iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1329 1.16 ad rv = iop_msg_post(sc, im, mb, 30000);
1330 1.16 ad if (rv != 0)
1331 1.16 ad printf("%s: FIELD_SET failed for tid %d group %d\n",
1332 1.16 ad sc->sc_dv.dv_xname, tid, group);
1333 1.16 ad
1334 1.16 ad iop_msg_unmap(sc, im);
1335 1.16 ad iop_msg_free(sc, im);
1336 1.16 ad free(pgop, M_DEVBUF);
1337 1.16 ad return (rv);
1338 1.16 ad }
1339 1.16 ad
1340 1.16 ad /*
1341 1.16 ad * Delete all rows in a tablular parameter group.
1342 1.16 ad */
1343 1.16 ad int
1344 1.16 ad iop_table_clear(struct iop_softc *sc, int tid, int group)
1345 1.16 ad {
1346 1.16 ad struct iop_msg *im;
1347 1.16 ad struct i2o_util_params_op *mf;
1348 1.16 ad struct iop_pgop pgop;
1349 1.16 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1350 1.16 ad int rv;
1351 1.16 ad
1352 1.16 ad im = iop_msg_alloc(sc, IM_WAIT);
1353 1.16 ad
1354 1.16 ad mf = (struct i2o_util_params_op *)mb;
1355 1.16 ad mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1356 1.16 ad mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1357 1.16 ad mf->msgictx = IOP_ICTX;
1358 1.16 ad mf->msgtctx = im->im_tctx;
1359 1.16 ad mf->flags = 0;
1360 1.16 ad
1361 1.16 ad pgop.olh.count = htole16(1);
1362 1.16 ad pgop.olh.reserved = htole16(0);
1363 1.16 ad pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1364 1.16 ad pgop.oat.fieldcount = htole16(0);
1365 1.16 ad pgop.oat.group = htole16(group);
1366 1.16 ad pgop.oat.fields[0] = htole16(0);
1367 1.16 ad
1368 1.16 ad PHOLD(curproc);
1369 1.16 ad iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1370 1.16 ad rv = iop_msg_post(sc, im, mb, 30000);
1371 1.16 ad if (rv != 0)
1372 1.16 ad printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1373 1.16 ad sc->sc_dv.dv_xname, tid, group);
1374 1.16 ad
1375 1.16 ad iop_msg_unmap(sc, im);
1376 1.16 ad PRELE(curproc);
1377 1.16 ad iop_msg_free(sc, im);
1378 1.16 ad return (rv);
1379 1.16 ad }
1380 1.16 ad
1381 1.16 ad /*
1382 1.16 ad * Add a single row to a tabular parameter group. The row can have only one
1383 1.16 ad * field.
1384 1.16 ad */
1385 1.16 ad int
1386 1.16 ad iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1387 1.16 ad int size, int row)
1388 1.16 ad {
1389 1.16 ad struct iop_msg *im;
1390 1.16 ad struct i2o_util_params_op *mf;
1391 1.16 ad struct iop_pgop *pgop;
1392 1.16 ad int rv, totsize;
1393 1.16 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1394 1.16 ad
1395 1.16 ad totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1396 1.16 ad
1397 1.16 ad im = iop_msg_alloc(sc, IM_WAIT);
1398 1.16 ad if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1399 1.16 ad iop_msg_free(sc, im);
1400 1.16 ad return (ENOMEM);
1401 1.16 ad }
1402 1.16 ad
1403 1.16 ad mf = (struct i2o_util_params_op *)mb;
1404 1.16 ad mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1405 1.16 ad mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1406 1.16 ad mf->msgictx = IOP_ICTX;
1407 1.16 ad mf->msgtctx = im->im_tctx;
1408 1.16 ad mf->flags = 0;
1409 1.16 ad
1410 1.16 ad pgop->olh.count = htole16(1);
1411 1.16 ad pgop->olh.reserved = htole16(0);
1412 1.16 ad pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1413 1.16 ad pgop->oat.fieldcount = htole16(1);
1414 1.16 ad pgop->oat.group = htole16(group);
1415 1.16 ad pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1416 1.16 ad pgop->oat.fields[1] = htole16(1); /* RowCount */
1417 1.16 ad pgop->oat.fields[2] = htole16(row); /* KeyValue */
1418 1.16 ad memcpy(&pgop->oat.fields[3], buf, size);
1419 1.16 ad
1420 1.16 ad iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1421 1.16 ad rv = iop_msg_post(sc, im, mb, 30000);
1422 1.16 ad if (rv != 0)
1423 1.16 ad printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1424 1.16 ad sc->sc_dv.dv_xname, tid, group, row);
1425 1.16 ad
1426 1.16 ad iop_msg_unmap(sc, im);
1427 1.16 ad iop_msg_free(sc, im);
1428 1.16 ad free(pgop, M_DEVBUF);
1429 1.16 ad return (rv);
1430 1.16 ad }
1431 1.16 ad
1432 1.16 ad /*
1433 1.5 ad * Execute a simple command (no parameters).
1434 1.1 ad */
1435 1.1 ad int
1436 1.5 ad iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1437 1.5 ad int async, int timo)
1438 1.1 ad {
1439 1.1 ad struct iop_msg *im;
1440 1.11 ad struct i2o_msg mf;
1441 1.5 ad int rv, fl;
1442 1.1 ad
1443 1.11 ad fl = (async != 0 ? IM_WAIT : IM_POLL);
1444 1.15 ad im = iop_msg_alloc(sc, fl);
1445 1.1 ad
1446 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1447 1.11 ad mf.msgfunc = I2O_MSGFUNC(tid, function);
1448 1.11 ad mf.msgictx = ictx;
1449 1.11 ad mf.msgtctx = im->im_tctx;
1450 1.1 ad
1451 1.11 ad rv = iop_msg_post(sc, im, &mf, timo);
1452 1.11 ad iop_msg_free(sc, im);
1453 1.1 ad return (rv);
1454 1.1 ad }
1455 1.1 ad
1456 1.1 ad /*
1457 1.5 ad * Post the system table to the IOP.
1458 1.1 ad */
1459 1.1 ad static int
1460 1.1 ad iop_systab_set(struct iop_softc *sc)
1461 1.1 ad {
1462 1.11 ad struct i2o_exec_sys_tab_set *mf;
1463 1.1 ad struct iop_msg *im;
1464 1.13 ad bus_space_handle_t bsh;
1465 1.13 ad bus_addr_t boo;
1466 1.1 ad u_int32_t mema[2], ioa[2];
1467 1.1 ad int rv;
1468 1.11 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1469 1.1 ad
1470 1.15 ad im = iop_msg_alloc(sc, IM_WAIT);
1471 1.1 ad
1472 1.11 ad mf = (struct i2o_exec_sys_tab_set *)mb;
1473 1.11 ad mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1474 1.11 ad mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1475 1.11 ad mf->msgictx = IOP_ICTX;
1476 1.11 ad mf->msgtctx = im->im_tctx;
1477 1.11 ad mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1478 1.11 ad mf->segnumber = 0;
1479 1.5 ad
1480 1.13 ad mema[1] = sc->sc_status.desiredprivmemsize;
1481 1.13 ad ioa[1] = sc->sc_status.desiredpriviosize;
1482 1.13 ad
1483 1.13 ad if (mema[1] != 0) {
1484 1.13 ad rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1485 1.13 ad le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1486 1.13 ad mema[0] = htole32(boo);
1487 1.13 ad if (rv != 0) {
1488 1.13 ad printf("%s: can't alloc priv mem space, err = %d\n",
1489 1.13 ad sc->sc_dv.dv_xname, rv);
1490 1.13 ad mema[0] = 0;
1491 1.13 ad mema[1] = 0;
1492 1.13 ad }
1493 1.13 ad }
1494 1.13 ad
1495 1.13 ad if (ioa[1] != 0) {
1496 1.13 ad rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1497 1.13 ad le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1498 1.13 ad ioa[0] = htole32(boo);
1499 1.13 ad if (rv != 0) {
1500 1.13 ad printf("%s: can't alloc priv i/o space, err = %d\n",
1501 1.13 ad sc->sc_dv.dv_xname, rv);
1502 1.13 ad ioa[0] = 0;
1503 1.13 ad ioa[1] = 0;
1504 1.13 ad }
1505 1.13 ad }
1506 1.1 ad
1507 1.11 ad PHOLD(curproc);
1508 1.15 ad iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1509 1.15 ad iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1510 1.15 ad iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1511 1.11 ad rv = iop_msg_post(sc, im, mb, 5000);
1512 1.1 ad iop_msg_unmap(sc, im);
1513 1.11 ad iop_msg_free(sc, im);
1514 1.11 ad PRELE(curproc);
1515 1.1 ad return (rv);
1516 1.1 ad }
1517 1.1 ad
1518 1.1 ad /*
1519 1.11 ad * Reset the IOP. Must be called with interrupts disabled.
1520 1.1 ad */
1521 1.1 ad static int
1522 1.1 ad iop_reset(struct iop_softc *sc)
1523 1.1 ad {
1524 1.15 ad u_int32_t mfa, *sw;
1525 1.11 ad struct i2o_exec_iop_reset mf;
1526 1.1 ad int rv;
1527 1.15 ad paddr_t pa;
1528 1.1 ad
1529 1.15 ad sw = (u_int32_t *)sc->sc_scr;
1530 1.15 ad pa = sc->sc_scr_seg->ds_addr;
1531 1.1 ad
1532 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1533 1.11 ad mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1534 1.11 ad mf.reserved[0] = 0;
1535 1.11 ad mf.reserved[1] = 0;
1536 1.11 ad mf.reserved[2] = 0;
1537 1.11 ad mf.reserved[3] = 0;
1538 1.15 ad mf.statuslow = (u_int32_t)pa;
1539 1.15 ad mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1540 1.15 ad
1541 1.15 ad *sw = htole32(0);
1542 1.15 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1543 1.15 ad BUS_DMASYNC_PREREAD);
1544 1.1 ad
1545 1.11 ad if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1546 1.1 ad return (rv);
1547 1.1 ad
1548 1.15 ad POLL(2500,
1549 1.15 ad (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1550 1.15 ad BUS_DMASYNC_POSTREAD), *sw != 0));
1551 1.15 ad if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1552 1.15 ad printf("%s: reset rejected, status 0x%x\n",
1553 1.15 ad sc->sc_dv.dv_xname, le32toh(*sw));
1554 1.1 ad return (EIO);
1555 1.1 ad }
1556 1.1 ad
1557 1.1 ad /*
1558 1.5 ad * IOP is now in the INIT state. Wait no more than 10 seconds for
1559 1.1 ad * the inbound queue to become responsive.
1560 1.1 ad */
1561 1.5 ad POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1562 1.1 ad if (mfa == IOP_MFA_EMPTY) {
1563 1.1 ad printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1564 1.1 ad return (EIO);
1565 1.1 ad }
1566 1.1 ad
1567 1.1 ad iop_release_mfa(sc, mfa);
1568 1.1 ad return (0);
1569 1.1 ad }
1570 1.1 ad
1571 1.1 ad /*
1572 1.11 ad * Register a new initiator. Must be called with the configuration lock
1573 1.11 ad * held.
1574 1.1 ad */
1575 1.11 ad void
1576 1.1 ad iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1577 1.1 ad {
1578 1.11 ad static int ictxgen;
1579 1.11 ad int s;
1580 1.5 ad
1581 1.11 ad /* 0 is reserved (by us) for system messages. */
1582 1.11 ad ii->ii_ictx = ++ictxgen;
1583 1.1 ad
1584 1.11 ad /*
1585 1.11 ad * `Utility initiators' don't make it onto the per-IOP initiator list
1586 1.11 ad * (which is used only for configuration), but do get one slot on
1587 1.11 ad * the inbound queue.
1588 1.11 ad */
1589 1.11 ad if ((ii->ii_flags & II_UTILITY) == 0) {
1590 1.11 ad LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1591 1.11 ad sc->sc_nii++;
1592 1.11 ad } else
1593 1.11 ad sc->sc_nuii++;
1594 1.11 ad
1595 1.11 ad s = splbio();
1596 1.5 ad LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1597 1.11 ad splx(s);
1598 1.1 ad }
1599 1.1 ad
1600 1.1 ad /*
1601 1.11 ad * Unregister an initiator. Must be called with the configuration lock
1602 1.11 ad * held.
1603 1.1 ad */
1604 1.1 ad void
1605 1.1 ad iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1606 1.1 ad {
1607 1.11 ad int s;
1608 1.11 ad
1609 1.11 ad if ((ii->ii_flags & II_UTILITY) == 0) {
1610 1.11 ad LIST_REMOVE(ii, ii_list);
1611 1.11 ad sc->sc_nii--;
1612 1.11 ad } else
1613 1.11 ad sc->sc_nuii--;
1614 1.1 ad
1615 1.11 ad s = splbio();
1616 1.5 ad LIST_REMOVE(ii, ii_hash);
1617 1.11 ad splx(s);
1618 1.1 ad }
1619 1.1 ad
1620 1.1 ad /*
1621 1.11 ad * Handle a reply frame from the IOP.
1622 1.1 ad */
1623 1.1 ad static int
1624 1.5 ad iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1625 1.1 ad {
1626 1.1 ad struct iop_msg *im;
1627 1.1 ad struct i2o_reply *rb;
1628 1.11 ad struct i2o_fault_notify *fn;
1629 1.1 ad struct iop_initiator *ii;
1630 1.5 ad u_int off, ictx, tctx, status, size;
1631 1.1 ad
1632 1.1 ad off = (int)(rmfa - sc->sc_rep_phys);
1633 1.1 ad rb = (struct i2o_reply *)(sc->sc_rep + off);
1634 1.1 ad
1635 1.15 ad /* Perform reply queue DMA synchronisation. */
1636 1.11 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1637 1.19 ad sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1638 1.11 ad if (--sc->sc_curib != 0)
1639 1.1 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1640 1.1 ad 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1641 1.1 ad
1642 1.1 ad #ifdef I2ODEBUG
1643 1.1 ad if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1644 1.5 ad panic("iop_handle_reply: 64-bit reply");
1645 1.1 ad #endif
1646 1.1 ad /*
1647 1.1 ad * Find the initiator.
1648 1.1 ad */
1649 1.1 ad ictx = le32toh(rb->msgictx);
1650 1.1 ad if (ictx == IOP_ICTX)
1651 1.1 ad ii = NULL;
1652 1.1 ad else {
1653 1.5 ad ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1654 1.5 ad for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1655 1.5 ad if (ii->ii_ictx == ictx)
1656 1.5 ad break;
1657 1.5 ad if (ii == NULL) {
1658 1.1 ad #ifdef I2ODEBUG
1659 1.11 ad iop_reply_print(sc, rb);
1660 1.1 ad #endif
1661 1.11 ad printf("%s: WARNING: bad ictx returned (%x)\n",
1662 1.5 ad sc->sc_dv.dv_xname, ictx);
1663 1.5 ad return (-1);
1664 1.5 ad }
1665 1.1 ad }
1666 1.1 ad
1667 1.11 ad /*
1668 1.14 wiz * If we received a transport failure notice, we've got to dig the
1669 1.11 ad * transaction context (if any) out of the original message frame,
1670 1.11 ad * and then release the original MFA back to the inbound FIFO.
1671 1.11 ad */
1672 1.11 ad if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1673 1.11 ad status = I2O_STATUS_SUCCESS;
1674 1.11 ad
1675 1.11 ad fn = (struct i2o_fault_notify *)rb;
1676 1.15 ad tctx = iop_inl(sc, fn->lowmfa + 12);
1677 1.11 ad iop_release_mfa(sc, fn->lowmfa);
1678 1.11 ad iop_tfn_print(sc, fn);
1679 1.11 ad } else {
1680 1.11 ad status = rb->reqstatus;
1681 1.11 ad tctx = le32toh(rb->msgtctx);
1682 1.11 ad }
1683 1.1 ad
1684 1.15 ad if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1685 1.1 ad /*
1686 1.1 ad * This initiator tracks state using message wrappers.
1687 1.1 ad *
1688 1.1 ad * Find the originating message wrapper, and if requested
1689 1.1 ad * notify the initiator.
1690 1.1 ad */
1691 1.11 ad im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1692 1.11 ad if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1693 1.11 ad (im->im_flags & IM_ALLOCED) == 0 ||
1694 1.11 ad tctx != im->im_tctx) {
1695 1.11 ad printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1696 1.11 ad sc->sc_dv.dv_xname, tctx, im);
1697 1.11 ad if (im != NULL)
1698 1.11 ad printf("%s: flags=0x%08x tctx=0x%08x\n",
1699 1.11 ad sc->sc_dv.dv_xname, im->im_flags,
1700 1.11 ad im->im_tctx);
1701 1.5 ad #ifdef I2ODEBUG
1702 1.11 ad if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1703 1.11 ad iop_reply_print(sc, rb);
1704 1.5 ad #endif
1705 1.5 ad return (-1);
1706 1.5 ad }
1707 1.11 ad
1708 1.11 ad if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1709 1.11 ad im->im_flags |= IM_FAIL;
1710 1.11 ad
1711 1.1 ad #ifdef I2ODEBUG
1712 1.1 ad if ((im->im_flags & IM_REPLIED) != 0)
1713 1.5 ad panic("%s: dup reply", sc->sc_dv.dv_xname);
1714 1.1 ad #endif
1715 1.11 ad im->im_flags |= IM_REPLIED;
1716 1.1 ad
1717 1.11 ad #ifdef I2ODEBUG
1718 1.11 ad if (status != I2O_STATUS_SUCCESS)
1719 1.11 ad iop_reply_print(sc, rb);
1720 1.11 ad #endif
1721 1.11 ad im->im_reqstatus = status;
1722 1.1 ad
1723 1.11 ad /* Copy the reply frame, if requested. */
1724 1.11 ad if (im->im_rb != NULL) {
1725 1.11 ad size = (le32toh(rb->msgflags) >> 14) & ~3;
1726 1.1 ad #ifdef I2ODEBUG
1727 1.19 ad if (size > sc->sc_framesize)
1728 1.11 ad panic("iop_handle_reply: reply too large");
1729 1.1 ad #endif
1730 1.11 ad memcpy(im->im_rb, rb, size);
1731 1.11 ad }
1732 1.11 ad
1733 1.1 ad /* Notify the initiator. */
1734 1.11 ad if ((im->im_flags & IM_WAIT) != 0)
1735 1.1 ad wakeup(im);
1736 1.13 ad else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1737 1.1 ad (*ii->ii_intr)(ii->ii_dv, im, rb);
1738 1.1 ad } else {
1739 1.1 ad /*
1740 1.1 ad * This initiator discards message wrappers.
1741 1.1 ad *
1742 1.1 ad * Simply pass the reply frame to the initiator.
1743 1.1 ad */
1744 1.1 ad (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1745 1.1 ad }
1746 1.1 ad
1747 1.1 ad return (status);
1748 1.1 ad }
1749 1.1 ad
1750 1.1 ad /*
1751 1.11 ad * Handle an interrupt from the IOP.
1752 1.1 ad */
1753 1.1 ad int
1754 1.1 ad iop_intr(void *arg)
1755 1.1 ad {
1756 1.1 ad struct iop_softc *sc;
1757 1.5 ad u_int32_t rmfa;
1758 1.1 ad
1759 1.1 ad sc = arg;
1760 1.1 ad
1761 1.5 ad if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1762 1.5 ad return (0);
1763 1.5 ad
1764 1.5 ad for (;;) {
1765 1.5 ad /* Double read to account for IOP bug. */
1766 1.11 ad if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1767 1.11 ad rmfa = iop_inl(sc, IOP_REG_OFIFO);
1768 1.11 ad if (rmfa == IOP_MFA_EMPTY)
1769 1.11 ad break;
1770 1.11 ad }
1771 1.5 ad iop_handle_reply(sc, rmfa);
1772 1.11 ad iop_outl(sc, IOP_REG_OFIFO, rmfa);
1773 1.1 ad }
1774 1.1 ad
1775 1.5 ad return (1);
1776 1.5 ad }
1777 1.5 ad
1778 1.5 ad /*
1779 1.5 ad * Handle an event signalled by the executive.
1780 1.5 ad */
1781 1.5 ad static void
1782 1.5 ad iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1783 1.5 ad {
1784 1.5 ad struct i2o_util_event_register_reply *rb;
1785 1.5 ad struct iop_softc *sc;
1786 1.5 ad u_int event;
1787 1.5 ad
1788 1.5 ad sc = (struct iop_softc *)dv;
1789 1.5 ad rb = reply;
1790 1.5 ad
1791 1.11 ad if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1792 1.5 ad return;
1793 1.5 ad
1794 1.11 ad event = le32toh(rb->event);
1795 1.5 ad printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1796 1.1 ad }
1797 1.1 ad
1798 1.1 ad /*
1799 1.1 ad * Allocate a message wrapper.
1800 1.1 ad */
1801 1.11 ad struct iop_msg *
1802 1.15 ad iop_msg_alloc(struct iop_softc *sc, int flags)
1803 1.1 ad {
1804 1.1 ad struct iop_msg *im;
1805 1.11 ad static u_int tctxgen;
1806 1.11 ad int s, i;
1807 1.1 ad
1808 1.1 ad #ifdef I2ODEBUG
1809 1.1 ad if ((flags & IM_SYSMASK) != 0)
1810 1.1 ad panic("iop_msg_alloc: system flags specified");
1811 1.1 ad #endif
1812 1.1 ad
1813 1.15 ad s = splbio();
1814 1.11 ad im = SLIST_FIRST(&sc->sc_im_freelist);
1815 1.11 ad #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1816 1.11 ad if (im == NULL)
1817 1.11 ad panic("iop_msg_alloc: no free wrappers");
1818 1.11 ad #endif
1819 1.11 ad SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1820 1.11 ad splx(s);
1821 1.1 ad
1822 1.11 ad im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1823 1.11 ad tctxgen += (1 << IOP_TCTX_SHIFT);
1824 1.1 ad im->im_flags = flags | IM_ALLOCED;
1825 1.11 ad im->im_rb = NULL;
1826 1.11 ad i = 0;
1827 1.11 ad do {
1828 1.11 ad im->im_xfer[i++].ix_size = 0;
1829 1.11 ad } while (i < IOP_MAX_MSG_XFERS);
1830 1.1 ad
1831 1.11 ad return (im);
1832 1.1 ad }
1833 1.1 ad
1834 1.1 ad /*
1835 1.1 ad * Free a message wrapper.
1836 1.1 ad */
1837 1.1 ad void
1838 1.11 ad iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1839 1.1 ad {
1840 1.1 ad int s;
1841 1.1 ad
1842 1.1 ad #ifdef I2ODEBUG
1843 1.1 ad if ((im->im_flags & IM_ALLOCED) == 0)
1844 1.1 ad panic("iop_msg_free: wrapper not allocated");
1845 1.1 ad #endif
1846 1.1 ad
1847 1.1 ad im->im_flags = 0;
1848 1.11 ad s = splbio();
1849 1.11 ad SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1850 1.1 ad splx(s);
1851 1.1 ad }
1852 1.1 ad
1853 1.1 ad /*
1854 1.5 ad * Map a data transfer. Write a scatter-gather list into the message frame.
1855 1.1 ad */
1856 1.1 ad int
1857 1.11 ad iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1858 1.15 ad void *xferaddr, int xfersize, int out, struct proc *up)
1859 1.1 ad {
1860 1.11 ad bus_dmamap_t dm;
1861 1.11 ad bus_dma_segment_t *ds;
1862 1.1 ad struct iop_xfer *ix;
1863 1.11 ad u_int rv, i, nsegs, flg, off, xn;
1864 1.11 ad u_int32_t *p;
1865 1.5 ad
1866 1.11 ad for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1867 1.1 ad if (ix->ix_size == 0)
1868 1.1 ad break;
1869 1.11 ad
1870 1.1 ad #ifdef I2ODEBUG
1871 1.11 ad if (xfersize == 0)
1872 1.11 ad panic("iop_msg_map: null transfer");
1873 1.11 ad if (xfersize > IOP_MAX_XFER)
1874 1.11 ad panic("iop_msg_map: transfer too large");
1875 1.11 ad if (xn == IOP_MAX_MSG_XFERS)
1876 1.1 ad panic("iop_msg_map: too many xfers");
1877 1.1 ad #endif
1878 1.1 ad
1879 1.11 ad /*
1880 1.11 ad * Only the first DMA map is static.
1881 1.11 ad */
1882 1.11 ad if (xn != 0) {
1883 1.1 ad rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1884 1.11 ad IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1885 1.1 ad BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1886 1.1 ad if (rv != 0)
1887 1.1 ad return (rv);
1888 1.1 ad }
1889 1.1 ad
1890 1.11 ad dm = ix->ix_map;
1891 1.15 ad rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1892 1.15 ad (up == NULL ? BUS_DMA_NOWAIT : 0));
1893 1.11 ad if (rv != 0)
1894 1.11 ad goto bad;
1895 1.11 ad
1896 1.11 ad /*
1897 1.11 ad * How many SIMPLE SG elements can we fit in this message?
1898 1.11 ad */
1899 1.11 ad off = mb[0] >> 16;
1900 1.11 ad p = mb + off;
1901 1.19 ad nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1902 1.11 ad
1903 1.11 ad if (dm->dm_nsegs > nsegs) {
1904 1.11 ad bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1905 1.11 ad rv = EFBIG;
1906 1.11 ad DPRINTF(("iop_msg_map: too many segs\n"));
1907 1.11 ad goto bad;
1908 1.11 ad }
1909 1.1 ad
1910 1.11 ad nsegs = dm->dm_nsegs;
1911 1.11 ad xfersize = 0;
1912 1.1 ad
1913 1.11 ad /*
1914 1.11 ad * Write out the SG list.
1915 1.11 ad */
1916 1.1 ad if (out)
1917 1.11 ad flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1918 1.1 ad else
1919 1.11 ad flg = I2O_SGL_SIMPLE;
1920 1.1 ad
1921 1.11 ad for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1922 1.11 ad p[0] = (u_int32_t)ds->ds_len | flg;
1923 1.11 ad p[1] = (u_int32_t)ds->ds_addr;
1924 1.11 ad xfersize += ds->ds_len;
1925 1.1 ad }
1926 1.1 ad
1927 1.11 ad p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1928 1.11 ad p[1] = (u_int32_t)ds->ds_addr;
1929 1.11 ad xfersize += ds->ds_len;
1930 1.11 ad
1931 1.11 ad /* Fix up the transfer record, and sync the map. */
1932 1.11 ad ix->ix_flags = (out ? IX_OUT : IX_IN);
1933 1.11 ad ix->ix_size = xfersize;
1934 1.11 ad bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1935 1.11 ad out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1936 1.11 ad
1937 1.1 ad /*
1938 1.1 ad * If this is the first xfer we've mapped for this message, adjust
1939 1.1 ad * the SGL offset field in the message header.
1940 1.1 ad */
1941 1.2 ad if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1942 1.11 ad mb[0] += (mb[0] >> 12) & 0xf0;
1943 1.2 ad im->im_flags |= IM_SGLOFFADJ;
1944 1.2 ad }
1945 1.11 ad mb[0] += (nsegs << 17);
1946 1.11 ad return (0);
1947 1.11 ad
1948 1.11 ad bad:
1949 1.11 ad if (xn != 0)
1950 1.11 ad bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1951 1.11 ad return (rv);
1952 1.11 ad }
1953 1.11 ad
1954 1.11 ad /*
1955 1.11 ad * Map a block I/O data transfer (different in that there's only one per
1956 1.11 ad * message maximum, and PAGE addressing may be used). Write a scatter
1957 1.11 ad * gather list into the message frame.
1958 1.11 ad */
1959 1.11 ad int
1960 1.11 ad iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1961 1.11 ad void *xferaddr, int xfersize, int out)
1962 1.11 ad {
1963 1.11 ad bus_dma_segment_t *ds;
1964 1.11 ad bus_dmamap_t dm;
1965 1.11 ad struct iop_xfer *ix;
1966 1.11 ad u_int rv, i, nsegs, off, slen, tlen, flg;
1967 1.11 ad paddr_t saddr, eaddr;
1968 1.11 ad u_int32_t *p;
1969 1.11 ad
1970 1.11 ad #ifdef I2ODEBUG
1971 1.11 ad if (xfersize == 0)
1972 1.11 ad panic("iop_msg_map_bio: null transfer");
1973 1.11 ad if (xfersize > IOP_MAX_XFER)
1974 1.11 ad panic("iop_msg_map_bio: transfer too large");
1975 1.11 ad if ((im->im_flags & IM_SGLOFFADJ) != 0)
1976 1.11 ad panic("iop_msg_map_bio: SGLOFFADJ");
1977 1.11 ad #endif
1978 1.11 ad
1979 1.11 ad ix = im->im_xfer;
1980 1.11 ad dm = ix->ix_map;
1981 1.15 ad rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1982 1.15 ad BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1983 1.11 ad if (rv != 0)
1984 1.11 ad return (rv);
1985 1.11 ad
1986 1.11 ad off = mb[0] >> 16;
1987 1.19 ad nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1988 1.11 ad
1989 1.11 ad /*
1990 1.11 ad * If the transfer is highly fragmented and won't fit using SIMPLE
1991 1.11 ad * elements, use PAGE_LIST elements instead. SIMPLE elements are
1992 1.11 ad * potentially more efficient, both for us and the IOP.
1993 1.11 ad */
1994 1.11 ad if (dm->dm_nsegs > nsegs) {
1995 1.11 ad nsegs = 1;
1996 1.11 ad p = mb + off + 1;
1997 1.11 ad
1998 1.11 ad /* XXX This should be done with a bus_space flag. */
1999 1.11 ad for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2000 1.11 ad slen = ds->ds_len;
2001 1.11 ad saddr = ds->ds_addr;
2002 1.11 ad
2003 1.11 ad while (slen > 0) {
2004 1.11 ad eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2005 1.11 ad tlen = min(eaddr - saddr, slen);
2006 1.11 ad slen -= tlen;
2007 1.11 ad *p++ = le32toh(saddr);
2008 1.11 ad saddr = eaddr;
2009 1.11 ad nsegs++;
2010 1.11 ad }
2011 1.11 ad }
2012 1.11 ad
2013 1.11 ad mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2014 1.11 ad I2O_SGL_END;
2015 1.11 ad if (out)
2016 1.11 ad mb[off] |= I2O_SGL_DATA_OUT;
2017 1.11 ad } else {
2018 1.11 ad p = mb + off;
2019 1.13 ad nsegs = dm->dm_nsegs;
2020 1.11 ad
2021 1.11 ad if (out)
2022 1.11 ad flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2023 1.11 ad else
2024 1.11 ad flg = I2O_SGL_SIMPLE;
2025 1.11 ad
2026 1.11 ad for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2027 1.11 ad p[0] = (u_int32_t)ds->ds_len | flg;
2028 1.11 ad p[1] = (u_int32_t)ds->ds_addr;
2029 1.11 ad }
2030 1.11 ad
2031 1.11 ad p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2032 1.11 ad I2O_SGL_END;
2033 1.11 ad p[1] = (u_int32_t)ds->ds_addr;
2034 1.11 ad nsegs <<= 1;
2035 1.11 ad }
2036 1.11 ad
2037 1.11 ad /* Fix up the transfer record, and sync the map. */
2038 1.11 ad ix->ix_flags = (out ? IX_OUT : IX_IN);
2039 1.11 ad ix->ix_size = xfersize;
2040 1.11 ad bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2041 1.11 ad out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2042 1.11 ad
2043 1.11 ad /*
2044 1.11 ad * Adjust the SGL offset and total message size fields. We don't
2045 1.11 ad * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2046 1.11 ad */
2047 1.11 ad mb[0] += ((off << 4) + (nsegs << 16));
2048 1.1 ad return (0);
2049 1.1 ad }
2050 1.1 ad
2051 1.1 ad /*
2052 1.1 ad * Unmap all data transfers associated with a message wrapper.
2053 1.1 ad */
2054 1.1 ad void
2055 1.1 ad iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2056 1.1 ad {
2057 1.1 ad struct iop_xfer *ix;
2058 1.1 ad int i;
2059 1.11 ad
2060 1.11 ad #ifdef I2ODEBUG
2061 1.11 ad if (im->im_xfer[0].ix_size == 0)
2062 1.11 ad panic("iop_msg_unmap: no transfers mapped");
2063 1.11 ad #endif
2064 1.11 ad
2065 1.11 ad for (ix = im->im_xfer, i = 0;;) {
2066 1.1 ad bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2067 1.1 ad ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2068 1.1 ad BUS_DMASYNC_POSTREAD);
2069 1.1 ad bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2070 1.1 ad
2071 1.1 ad /* Only the first DMA map is static. */
2072 1.1 ad if (i != 0)
2073 1.1 ad bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2074 1.11 ad if ((++ix)->ix_size == 0)
2075 1.11 ad break;
2076 1.11 ad if (++i >= IOP_MAX_MSG_XFERS)
2077 1.11 ad break;
2078 1.1 ad }
2079 1.1 ad }
2080 1.1 ad
2081 1.11 ad /*
2082 1.11 ad * Post a message frame to the IOP's inbound queue.
2083 1.1 ad */
2084 1.1 ad int
2085 1.11 ad iop_post(struct iop_softc *sc, u_int32_t *mb)
2086 1.1 ad {
2087 1.11 ad u_int32_t mfa;
2088 1.11 ad int s;
2089 1.11 ad
2090 1.15 ad #ifdef I2ODEBUG
2091 1.19 ad if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2092 1.13 ad panic("iop_post: frame too large");
2093 1.15 ad #endif
2094 1.13 ad
2095 1.15 ad s = splbio();
2096 1.11 ad
2097 1.11 ad /* Allocate a slot with the IOP. */
2098 1.11 ad if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2099 1.11 ad if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2100 1.11 ad splx(s);
2101 1.11 ad printf("%s: mfa not forthcoming\n",
2102 1.11 ad sc->sc_dv.dv_xname);
2103 1.11 ad return (EAGAIN);
2104 1.11 ad }
2105 1.11 ad
2106 1.15 ad /* Perform reply buffer DMA synchronisation. */
2107 1.11 ad if (sc->sc_curib++ == 0)
2108 1.11 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2109 1.11 ad sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2110 1.1 ad
2111 1.11 ad /* Copy out the message frame. */
2112 1.11 ad bus_space_write_region_4(sc->sc_iot, sc->sc_ioh, mfa, mb, mb[0] >> 16);
2113 1.13 ad bus_space_barrier(sc->sc_iot, sc->sc_ioh, mfa, (mb[0] >> 14) & ~3,
2114 1.11 ad BUS_SPACE_BARRIER_WRITE);
2115 1.11 ad
2116 1.11 ad /* Post the MFA back to the IOP. */
2117 1.11 ad iop_outl(sc, IOP_REG_IFIFO, mfa);
2118 1.1 ad
2119 1.11 ad splx(s);
2120 1.11 ad return (0);
2121 1.11 ad }
2122 1.1 ad
2123 1.11 ad /*
2124 1.11 ad * Post a message to the IOP and deal with completion.
2125 1.11 ad */
2126 1.11 ad int
2127 1.11 ad iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2128 1.11 ad {
2129 1.11 ad u_int32_t *mb;
2130 1.11 ad int rv, s;
2131 1.1 ad
2132 1.11 ad mb = xmb;
2133 1.1 ad
2134 1.11 ad /* Terminate the scatter/gather list chain. */
2135 1.1 ad if ((im->im_flags & IM_SGLOFFADJ) != 0)
2136 1.11 ad mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2137 1.1 ad
2138 1.11 ad if ((rv = iop_post(sc, mb)) != 0)
2139 1.11 ad return (rv);
2140 1.1 ad
2141 1.15 ad if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2142 1.11 ad if ((im->im_flags & IM_POLL) != 0)
2143 1.11 ad iop_msg_poll(sc, im, timo);
2144 1.11 ad else
2145 1.11 ad iop_msg_wait(sc, im, timo);
2146 1.1 ad
2147 1.11 ad s = splbio();
2148 1.11 ad if ((im->im_flags & IM_REPLIED) != 0) {
2149 1.11 ad if ((im->im_flags & IM_NOSTATUS) != 0)
2150 1.11 ad rv = 0;
2151 1.11 ad else if ((im->im_flags & IM_FAIL) != 0)
2152 1.11 ad rv = ENXIO;
2153 1.11 ad else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2154 1.11 ad rv = EIO;
2155 1.11 ad else
2156 1.11 ad rv = 0;
2157 1.11 ad } else
2158 1.11 ad rv = EBUSY;
2159 1.2 ad splx(s);
2160 1.11 ad } else
2161 1.11 ad rv = 0;
2162 1.11 ad
2163 1.11 ad return (rv);
2164 1.11 ad }
2165 1.11 ad
2166 1.11 ad /*
2167 1.11 ad * Spin until the specified message is replied to.
2168 1.11 ad */
2169 1.11 ad static void
2170 1.11 ad iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2171 1.11 ad {
2172 1.11 ad u_int32_t rmfa;
2173 1.11 ad int s, status;
2174 1.11 ad
2175 1.15 ad s = splbio();
2176 1.1 ad
2177 1.1 ad /* Wait for completion. */
2178 1.1 ad for (timo *= 10; timo != 0; timo--) {
2179 1.5 ad if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2180 1.5 ad /* Double read to account for IOP bug. */
2181 1.5 ad rmfa = iop_inl(sc, IOP_REG_OFIFO);
2182 1.5 ad if (rmfa == IOP_MFA_EMPTY)
2183 1.5 ad rmfa = iop_inl(sc, IOP_REG_OFIFO);
2184 1.11 ad if (rmfa != IOP_MFA_EMPTY) {
2185 1.5 ad status = iop_handle_reply(sc, rmfa);
2186 1.11 ad
2187 1.11 ad /*
2188 1.11 ad * Return the reply frame to the IOP's
2189 1.11 ad * outbound FIFO.
2190 1.11 ad */
2191 1.11 ad iop_outl(sc, IOP_REG_OFIFO, rmfa);
2192 1.11 ad }
2193 1.5 ad }
2194 1.1 ad if ((im->im_flags & IM_REPLIED) != 0)
2195 1.1 ad break;
2196 1.1 ad DELAY(100);
2197 1.1 ad }
2198 1.1 ad
2199 1.1 ad if (timo == 0) {
2200 1.5 ad #ifdef I2ODEBUG
2201 1.5 ad printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2202 1.11 ad if (iop_status_get(sc, 1) != 0)
2203 1.11 ad printf("iop_msg_poll: unable to retrieve status\n");
2204 1.5 ad else
2205 1.11 ad printf("iop_msg_poll: IOP state = %d\n",
2206 1.5 ad (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2207 1.5 ad #endif
2208 1.1 ad }
2209 1.1 ad
2210 1.1 ad splx(s);
2211 1.1 ad }
2212 1.1 ad
2213 1.1 ad /*
2214 1.11 ad * Sleep until the specified message is replied to.
2215 1.1 ad */
2216 1.11 ad static void
2217 1.1 ad iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2218 1.1 ad {
2219 1.11 ad int s, rv;
2220 1.1 ad
2221 1.5 ad s = splbio();
2222 1.5 ad if ((im->im_flags & IM_REPLIED) != 0) {
2223 1.5 ad splx(s);
2224 1.11 ad return;
2225 1.5 ad }
2226 1.24 bouyer rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2227 1.5 ad splx(s);
2228 1.11 ad
2229 1.5 ad #ifdef I2ODEBUG
2230 1.5 ad if (rv != 0) {
2231 1.5 ad printf("iop_msg_wait: tsleep() == %d\n", rv);
2232 1.11 ad if (iop_status_get(sc, 0) != 0)
2233 1.5 ad printf("iop_msg_wait: unable to retrieve status\n");
2234 1.5 ad else
2235 1.5 ad printf("iop_msg_wait: IOP state = %d\n",
2236 1.5 ad (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2237 1.5 ad }
2238 1.5 ad #endif
2239 1.1 ad }
2240 1.1 ad
2241 1.1 ad /*
2242 1.1 ad * Release an unused message frame back to the IOP's inbound fifo.
2243 1.1 ad */
2244 1.1 ad static void
2245 1.1 ad iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2246 1.1 ad {
2247 1.1 ad
2248 1.1 ad /* Use the frame to issue a no-op. */
2249 1.5 ad iop_outl(sc, mfa, I2O_VERSION_11 | (4 << 16));
2250 1.5 ad iop_outl(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2251 1.5 ad iop_outl(sc, mfa + 8, 0);
2252 1.5 ad iop_outl(sc, mfa + 12, 0);
2253 1.1 ad
2254 1.5 ad iop_outl(sc, IOP_REG_IFIFO, mfa);
2255 1.1 ad }
2256 1.1 ad
2257 1.1 ad #ifdef I2ODEBUG
2258 1.1 ad /*
2259 1.11 ad * Dump a reply frame header.
2260 1.1 ad */
2261 1.1 ad static void
2262 1.11 ad iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2263 1.1 ad {
2264 1.5 ad u_int function, detail;
2265 1.1 ad #ifdef I2OVERBOSE
2266 1.1 ad const char *statusstr;
2267 1.1 ad #endif
2268 1.1 ad
2269 1.5 ad function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2270 1.1 ad detail = le16toh(rb->detail);
2271 1.1 ad
2272 1.5 ad printf("%s: reply:\n", sc->sc_dv.dv_xname);
2273 1.5 ad
2274 1.1 ad #ifdef I2OVERBOSE
2275 1.1 ad if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2276 1.1 ad statusstr = iop_status[rb->reqstatus];
2277 1.1 ad else
2278 1.1 ad statusstr = "undefined error code";
2279 1.1 ad
2280 1.5 ad printf("%s: function=0x%02x status=0x%02x (%s)\n",
2281 1.5 ad sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2282 1.1 ad #else
2283 1.5 ad printf("%s: function=0x%02x status=0x%02x\n",
2284 1.5 ad sc->sc_dv.dv_xname, function, rb->reqstatus);
2285 1.1 ad #endif
2286 1.5 ad printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2287 1.5 ad sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2288 1.5 ad le32toh(rb->msgtctx));
2289 1.5 ad printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2290 1.5 ad (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2291 1.5 ad (le32toh(rb->msgflags) >> 8) & 0xff);
2292 1.1 ad }
2293 1.1 ad #endif
2294 1.1 ad
2295 1.1 ad /*
2296 1.11 ad * Dump a transport failure reply.
2297 1.11 ad */
2298 1.11 ad static void
2299 1.11 ad iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2300 1.11 ad {
2301 1.11 ad
2302 1.11 ad printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2303 1.11 ad
2304 1.19 ad printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2305 1.11 ad le32toh(fn->msgictx), le32toh(fn->msgtctx));
2306 1.11 ad printf("%s: failurecode=0x%02x severity=0x%02x\n",
2307 1.11 ad sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2308 1.11 ad printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2309 1.11 ad sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2310 1.11 ad }
2311 1.11 ad
2312 1.11 ad /*
2313 1.5 ad * Translate an I2O ASCII field into a C string.
2314 1.1 ad */
2315 1.1 ad void
2316 1.5 ad iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2317 1.1 ad {
2318 1.5 ad int hc, lc, i, nit;
2319 1.1 ad
2320 1.1 ad dlen--;
2321 1.1 ad lc = 0;
2322 1.1 ad hc = 0;
2323 1.1 ad i = 0;
2324 1.5 ad
2325 1.5 ad /*
2326 1.5 ad * DPT use NUL as a space, whereas AMI use it as a terminator. The
2327 1.5 ad * spec has nothing to say about it. Since AMI fields are usually
2328 1.5 ad * filled with junk after the terminator, ...
2329 1.5 ad */
2330 1.5 ad nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2331 1.5 ad
2332 1.5 ad while (slen-- != 0 && dlen-- != 0) {
2333 1.5 ad if (nit && *src == '\0')
2334 1.5 ad break;
2335 1.5 ad else if (*src <= 0x20 || *src >= 0x7f) {
2336 1.1 ad if (hc)
2337 1.1 ad dst[i++] = ' ';
2338 1.1 ad } else {
2339 1.1 ad hc = 1;
2340 1.1 ad dst[i++] = *src;
2341 1.1 ad lc = i;
2342 1.1 ad }
2343 1.1 ad src++;
2344 1.1 ad }
2345 1.1 ad
2346 1.1 ad dst[lc] = '\0';
2347 1.1 ad }
2348 1.1 ad
2349 1.1 ad /*
2350 1.11 ad * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2351 1.11 ad */
2352 1.11 ad int
2353 1.11 ad iop_print_ident(struct iop_softc *sc, int tid)
2354 1.11 ad {
2355 1.11 ad struct {
2356 1.11 ad struct i2o_param_op_results pr;
2357 1.11 ad struct i2o_param_read_results prr;
2358 1.11 ad struct i2o_param_device_identity di;
2359 1.11 ad } __attribute__ ((__packed__)) p;
2360 1.11 ad char buf[32];
2361 1.11 ad int rv;
2362 1.11 ad
2363 1.16 ad rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2364 1.16 ad sizeof(p), NULL);
2365 1.11 ad if (rv != 0)
2366 1.11 ad return (rv);
2367 1.11 ad
2368 1.11 ad iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2369 1.11 ad sizeof(buf));
2370 1.11 ad printf(" <%s, ", buf);
2371 1.11 ad iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2372 1.11 ad sizeof(buf));
2373 1.11 ad printf("%s, ", buf);
2374 1.11 ad iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2375 1.11 ad printf("%s>", buf);
2376 1.11 ad
2377 1.11 ad return (0);
2378 1.11 ad }
2379 1.11 ad
2380 1.11 ad /*
2381 1.5 ad * Claim or unclaim the specified TID.
2382 1.1 ad */
2383 1.1 ad int
2384 1.5 ad iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2385 1.15 ad int flags)
2386 1.1 ad {
2387 1.5 ad struct iop_msg *im;
2388 1.11 ad struct i2o_util_claim mf;
2389 1.5 ad int rv, func;
2390 1.5 ad
2391 1.5 ad func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2392 1.15 ad im = iop_msg_alloc(sc, IM_WAIT);
2393 1.5 ad
2394 1.11 ad /* We can use the same structure, as they're identical. */
2395 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2396 1.11 ad mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2397 1.11 ad mf.msgictx = ii->ii_ictx;
2398 1.11 ad mf.msgtctx = im->im_tctx;
2399 1.11 ad mf.flags = flags;
2400 1.5 ad
2401 1.11 ad rv = iop_msg_post(sc, im, &mf, 5000);
2402 1.11 ad iop_msg_free(sc, im);
2403 1.5 ad return (rv);
2404 1.5 ad }
2405 1.5 ad
2406 1.5 ad /*
2407 1.5 ad * Perform an abort.
2408 1.5 ad */
2409 1.5 ad int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2410 1.15 ad int tctxabort, int flags)
2411 1.5 ad {
2412 1.5 ad struct iop_msg *im;
2413 1.11 ad struct i2o_util_abort mf;
2414 1.5 ad int rv;
2415 1.5 ad
2416 1.15 ad im = iop_msg_alloc(sc, IM_WAIT);
2417 1.1 ad
2418 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2419 1.11 ad mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2420 1.11 ad mf.msgictx = ii->ii_ictx;
2421 1.11 ad mf.msgtctx = im->im_tctx;
2422 1.11 ad mf.flags = (func << 24) | flags;
2423 1.11 ad mf.tctxabort = tctxabort;
2424 1.1 ad
2425 1.11 ad rv = iop_msg_post(sc, im, &mf, 5000);
2426 1.11 ad iop_msg_free(sc, im);
2427 1.5 ad return (rv);
2428 1.1 ad }
2429 1.1 ad
2430 1.1 ad /*
2431 1.11 ad * Enable or disable reception of events for the specified device.
2432 1.1 ad */
2433 1.5 ad int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2434 1.5 ad {
2435 1.11 ad struct i2o_util_event_register mf;
2436 1.5 ad
2437 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2438 1.11 ad mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2439 1.11 ad mf.msgictx = ii->ii_ictx;
2440 1.15 ad mf.msgtctx = 0;
2441 1.11 ad mf.eventmask = mask;
2442 1.5 ad
2443 1.11 ad /* This message is replied to only when events are signalled. */
2444 1.15 ad return (iop_post(sc, (u_int32_t *)&mf));
2445 1.5 ad }
2446 1.5 ad
2447 1.1 ad int
2448 1.5 ad iopopen(dev_t dev, int flag, int mode, struct proc *p)
2449 1.1 ad {
2450 1.5 ad struct iop_softc *sc;
2451 1.5 ad
2452 1.11 ad if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2453 1.11 ad return (ENXIO);
2454 1.11 ad if ((sc->sc_flags & IOP_ONLINE) == 0)
2455 1.1 ad return (ENXIO);
2456 1.5 ad if ((sc->sc_flags & IOP_OPEN) != 0)
2457 1.5 ad return (EBUSY);
2458 1.5 ad sc->sc_flags |= IOP_OPEN;
2459 1.5 ad
2460 1.5 ad return (0);
2461 1.1 ad }
2462 1.1 ad
2463 1.5 ad int
2464 1.5 ad iopclose(dev_t dev, int flag, int mode, struct proc *p)
2465 1.1 ad {
2466 1.5 ad struct iop_softc *sc;
2467 1.1 ad
2468 1.5 ad sc = device_lookup(&iop_cd, minor(dev));
2469 1.11 ad sc->sc_flags &= ~IOP_OPEN;
2470 1.15 ad
2471 1.5 ad return (0);
2472 1.1 ad }
2473 1.1 ad
2474 1.1 ad int
2475 1.5 ad iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2476 1.1 ad {
2477 1.5 ad struct iop_softc *sc;
2478 1.5 ad struct iovec *iov;
2479 1.5 ad int rv, i;
2480 1.5 ad
2481 1.5 ad if (securelevel >= 2)
2482 1.5 ad return (EPERM);
2483 1.5 ad
2484 1.5 ad sc = device_lookup(&iop_cd, minor(dev));
2485 1.5 ad
2486 1.5 ad switch (cmd) {
2487 1.5 ad case IOPIOCPT:
2488 1.15 ad return (iop_passthrough(sc, (struct ioppt *)data, p));
2489 1.5 ad
2490 1.11 ad case IOPIOCGSTATUS:
2491 1.11 ad iov = (struct iovec *)data;
2492 1.11 ad i = sizeof(struct i2o_status);
2493 1.11 ad if (i > iov->iov_len)
2494 1.11 ad i = iov->iov_len;
2495 1.11 ad else
2496 1.11 ad iov->iov_len = i;
2497 1.11 ad if ((rv = iop_status_get(sc, 0)) == 0)
2498 1.11 ad rv = copyout(&sc->sc_status, iov->iov_base, i);
2499 1.11 ad return (rv);
2500 1.5 ad
2501 1.11 ad case IOPIOCGLCT:
2502 1.11 ad case IOPIOCGTIDMAP:
2503 1.11 ad case IOPIOCRECONFIG:
2504 1.11 ad break;
2505 1.5 ad
2506 1.11 ad default:
2507 1.11 ad #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2508 1.11 ad printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2509 1.11 ad #endif
2510 1.11 ad return (ENOTTY);
2511 1.11 ad }
2512 1.5 ad
2513 1.11 ad if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2514 1.11 ad return (rv);
2515 1.1 ad
2516 1.11 ad switch (cmd) {
2517 1.5 ad case IOPIOCGLCT:
2518 1.5 ad iov = (struct iovec *)data;
2519 1.11 ad i = le16toh(sc->sc_lct->tablesize) << 2;
2520 1.5 ad if (i > iov->iov_len)
2521 1.5 ad i = iov->iov_len;
2522 1.5 ad else
2523 1.5 ad iov->iov_len = i;
2524 1.11 ad rv = copyout(sc->sc_lct, iov->iov_base, i);
2525 1.5 ad break;
2526 1.5 ad
2527 1.5 ad case IOPIOCRECONFIG:
2528 1.11 ad rv = iop_reconfigure(sc, 0);
2529 1.9 ad break;
2530 1.9 ad
2531 1.9 ad case IOPIOCGTIDMAP:
2532 1.9 ad iov = (struct iovec *)data;
2533 1.11 ad i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2534 1.11 ad if (i > iov->iov_len)
2535 1.11 ad i = iov->iov_len;
2536 1.11 ad else
2537 1.11 ad iov->iov_len = i;
2538 1.11 ad rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2539 1.11 ad break;
2540 1.11 ad }
2541 1.11 ad
2542 1.11 ad lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2543 1.11 ad return (rv);
2544 1.11 ad }
2545 1.11 ad
2546 1.11 ad static int
2547 1.15 ad iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2548 1.11 ad {
2549 1.11 ad struct iop_msg *im;
2550 1.11 ad struct i2o_msg *mf;
2551 1.11 ad struct ioppt_buf *ptb;
2552 1.11 ad int rv, i, mapped;
2553 1.11 ad
2554 1.11 ad mf = NULL;
2555 1.11 ad im = NULL;
2556 1.11 ad mapped = 1;
2557 1.11 ad
2558 1.19 ad if (pt->pt_msglen > sc->sc_framesize ||
2559 1.11 ad pt->pt_msglen < sizeof(struct i2o_msg) ||
2560 1.11 ad pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2561 1.11 ad pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2562 1.11 ad pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2563 1.11 ad return (EINVAL);
2564 1.11 ad
2565 1.11 ad for (i = 0; i < pt->pt_nbufs; i++)
2566 1.11 ad if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2567 1.11 ad rv = ENOMEM;
2568 1.11 ad goto bad;
2569 1.11 ad }
2570 1.11 ad
2571 1.19 ad mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2572 1.11 ad if (mf == NULL)
2573 1.11 ad return (ENOMEM);
2574 1.11 ad
2575 1.11 ad if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2576 1.11 ad goto bad;
2577 1.11 ad
2578 1.15 ad im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2579 1.11 ad im->im_rb = (struct i2o_reply *)mf;
2580 1.11 ad mf->msgictx = IOP_ICTX;
2581 1.11 ad mf->msgtctx = im->im_tctx;
2582 1.11 ad
2583 1.11 ad for (i = 0; i < pt->pt_nbufs; i++) {
2584 1.11 ad ptb = &pt->pt_bufs[i];
2585 1.15 ad rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2586 1.15 ad ptb->ptb_datalen, ptb->ptb_out != 0, p);
2587 1.11 ad if (rv != 0)
2588 1.11 ad goto bad;
2589 1.11 ad mapped = 1;
2590 1.11 ad }
2591 1.11 ad
2592 1.11 ad if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2593 1.11 ad goto bad;
2594 1.11 ad
2595 1.11 ad i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2596 1.19 ad if (i > sc->sc_framesize)
2597 1.19 ad i = sc->sc_framesize;
2598 1.11 ad if (i > pt->pt_replylen)
2599 1.11 ad i = pt->pt_replylen;
2600 1.15 ad rv = copyout(im->im_rb, pt->pt_reply, i);
2601 1.9 ad
2602 1.11 ad bad:
2603 1.11 ad if (mapped != 0)
2604 1.11 ad iop_msg_unmap(sc, im);
2605 1.11 ad if (im != NULL)
2606 1.11 ad iop_msg_free(sc, im);
2607 1.11 ad if (mf != NULL)
2608 1.11 ad free(mf, M_DEVBUF);
2609 1.1 ad return (rv);
2610 1.5 ad }
2611