iop.c revision 1.10.2.17 1 1.10.2.17 christos /* $NetBSD: iop.c,v 1.10.2.17 2002/11/27 21:59:23 christos Exp $ */
2 1.1 ad
3 1.1 ad /*-
4 1.10.2.17 christos * Copyright (c) 2000, 2001, 2002 The NetBSD Foundation, Inc.
5 1.1 ad * All rights reserved.
6 1.1 ad *
7 1.1 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.1 ad * by Andrew Doran.
9 1.1 ad *
10 1.1 ad * Redistribution and use in source and binary forms, with or without
11 1.1 ad * modification, are permitted provided that the following conditions
12 1.1 ad * are met:
13 1.1 ad * 1. Redistributions of source code must retain the above copyright
14 1.1 ad * notice, this list of conditions and the following disclaimer.
15 1.1 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 ad * notice, this list of conditions and the following disclaimer in the
17 1.1 ad * documentation and/or other materials provided with the distribution.
18 1.1 ad * 3. All advertising materials mentioning features or use of this software
19 1.1 ad * must display the following acknowledgement:
20 1.1 ad * This product includes software developed by the NetBSD
21 1.1 ad * Foundation, Inc. and its contributors.
22 1.1 ad * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 ad * contributors may be used to endorse or promote products derived
24 1.1 ad * from this software without specific prior written permission.
25 1.1 ad *
26 1.1 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 ad * POSSIBILITY OF SUCH DAMAGE.
37 1.1 ad */
38 1.1 ad
39 1.1 ad /*
40 1.1 ad * Support for I2O IOPs (intelligent I/O processors).
41 1.1 ad */
42 1.10.2.8 nathanw
43 1.10.2.8 nathanw #include <sys/cdefs.h>
44 1.10.2.17 christos __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.10.2.17 2002/11/27 21:59:23 christos Exp $");
45 1.1 ad
46 1.1 ad #include "opt_i2o.h"
47 1.5 ad #include "iop.h"
48 1.1 ad
49 1.1 ad #include <sys/param.h>
50 1.1 ad #include <sys/systm.h>
51 1.1 ad #include <sys/kernel.h>
52 1.1 ad #include <sys/device.h>
53 1.1 ad #include <sys/queue.h>
54 1.1 ad #include <sys/proc.h>
55 1.1 ad #include <sys/malloc.h>
56 1.1 ad #include <sys/ioctl.h>
57 1.1 ad #include <sys/endian.h>
58 1.5 ad #include <sys/conf.h>
59 1.5 ad #include <sys/kthread.h>
60 1.1 ad
61 1.4 thorpej #include <uvm/uvm_extern.h>
62 1.4 thorpej
63 1.1 ad #include <machine/bus.h>
64 1.1 ad
65 1.1 ad #include <dev/i2o/i2o.h>
66 1.10.2.2 nathanw #include <dev/i2o/iopio.h>
67 1.1 ad #include <dev/i2o/iopreg.h>
68 1.1 ad #include <dev/i2o/iopvar.h>
69 1.1 ad
70 1.1 ad #define POLL(ms, cond) \
71 1.1 ad do { \
72 1.1 ad int i; \
73 1.1 ad for (i = (ms) * 10; i; i--) { \
74 1.1 ad if (cond) \
75 1.1 ad break; \
76 1.1 ad DELAY(100); \
77 1.1 ad } \
78 1.1 ad } while (/* CONSTCOND */0);
79 1.1 ad
80 1.1 ad #ifdef I2ODEBUG
81 1.1 ad #define DPRINTF(x) printf x
82 1.1 ad #else
83 1.1 ad #define DPRINTF(x)
84 1.1 ad #endif
85 1.1 ad
86 1.1 ad #ifdef I2OVERBOSE
87 1.5 ad #define IFVERBOSE(x) x
88 1.10.2.2 nathanw #define COMMENT(x) NULL
89 1.1 ad #else
90 1.1 ad #define IFVERBOSE(x)
91 1.10.2.2 nathanw #define COMMENT(x)
92 1.1 ad #endif
93 1.1 ad
94 1.5 ad #define IOP_ICTXHASH_NBUCKETS 16
95 1.5 ad #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash])
96 1.10.2.2 nathanw
97 1.10.2.2 nathanw #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
98 1.10.2.2 nathanw
99 1.10.2.2 nathanw #define IOP_TCTX_SHIFT 12
100 1.10.2.2 nathanw #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1)
101 1.5 ad
102 1.5 ad static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
103 1.5 ad static u_long iop_ictxhash;
104 1.1 ad static void *iop_sdh;
105 1.5 ad static struct i2o_systab *iop_systab;
106 1.5 ad static int iop_systab_size;
107 1.1 ad
108 1.1 ad extern struct cfdriver iop_cd;
109 1.1 ad
110 1.10.2.14 nathanw dev_type_open(iopopen);
111 1.10.2.14 nathanw dev_type_close(iopclose);
112 1.10.2.14 nathanw dev_type_ioctl(iopioctl);
113 1.10.2.14 nathanw
114 1.10.2.14 nathanw const struct cdevsw iop_cdevsw = {
115 1.10.2.14 nathanw iopopen, iopclose, noread, nowrite, iopioctl,
116 1.10.2.16 nathanw nostop, notty, nopoll, nommap, nokqfilter,
117 1.10.2.14 nathanw };
118 1.10.2.14 nathanw
119 1.5 ad #define IC_CONFIGURE 0x01
120 1.10.2.2 nathanw #define IC_PRIORITY 0x02
121 1.1 ad
122 1.1 ad struct iop_class {
123 1.5 ad u_short ic_class;
124 1.5 ad u_short ic_flags;
125 1.10.2.2 nathanw #ifdef I2OVERBOSE
126 1.1 ad const char *ic_caption;
127 1.10.2.2 nathanw #endif
128 1.1 ad } static const iop_class[] = {
129 1.1 ad {
130 1.1 ad I2O_CLASS_EXECUTIVE,
131 1.1 ad 0,
132 1.5 ad COMMENT("executive")
133 1.1 ad },
134 1.1 ad {
135 1.1 ad I2O_CLASS_DDM,
136 1.1 ad 0,
137 1.5 ad COMMENT("device driver module")
138 1.1 ad },
139 1.1 ad {
140 1.1 ad I2O_CLASS_RANDOM_BLOCK_STORAGE,
141 1.10.2.2 nathanw IC_CONFIGURE | IC_PRIORITY,
142 1.1 ad IFVERBOSE("random block storage")
143 1.1 ad },
144 1.1 ad {
145 1.1 ad I2O_CLASS_SEQUENTIAL_STORAGE,
146 1.10.2.2 nathanw IC_CONFIGURE | IC_PRIORITY,
147 1.1 ad IFVERBOSE("sequential storage")
148 1.1 ad },
149 1.1 ad {
150 1.1 ad I2O_CLASS_LAN,
151 1.10.2.2 nathanw IC_CONFIGURE | IC_PRIORITY,
152 1.1 ad IFVERBOSE("LAN port")
153 1.1 ad },
154 1.1 ad {
155 1.1 ad I2O_CLASS_WAN,
156 1.10.2.2 nathanw IC_CONFIGURE | IC_PRIORITY,
157 1.1 ad IFVERBOSE("WAN port")
158 1.1 ad },
159 1.1 ad {
160 1.1 ad I2O_CLASS_FIBRE_CHANNEL_PORT,
161 1.1 ad IC_CONFIGURE,
162 1.1 ad IFVERBOSE("fibrechannel port")
163 1.1 ad },
164 1.1 ad {
165 1.1 ad I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
166 1.1 ad 0,
167 1.5 ad COMMENT("fibrechannel peripheral")
168 1.1 ad },
169 1.1 ad {
170 1.1 ad I2O_CLASS_SCSI_PERIPHERAL,
171 1.1 ad 0,
172 1.5 ad COMMENT("SCSI peripheral")
173 1.1 ad },
174 1.1 ad {
175 1.1 ad I2O_CLASS_ATE_PORT,
176 1.1 ad IC_CONFIGURE,
177 1.1 ad IFVERBOSE("ATE port")
178 1.1 ad },
179 1.1 ad {
180 1.1 ad I2O_CLASS_ATE_PERIPHERAL,
181 1.1 ad 0,
182 1.5 ad COMMENT("ATE peripheral")
183 1.1 ad },
184 1.1 ad {
185 1.1 ad I2O_CLASS_FLOPPY_CONTROLLER,
186 1.1 ad IC_CONFIGURE,
187 1.1 ad IFVERBOSE("floppy controller")
188 1.1 ad },
189 1.1 ad {
190 1.1 ad I2O_CLASS_FLOPPY_DEVICE,
191 1.1 ad 0,
192 1.5 ad COMMENT("floppy device")
193 1.1 ad },
194 1.1 ad {
195 1.1 ad I2O_CLASS_BUS_ADAPTER_PORT,
196 1.1 ad IC_CONFIGURE,
197 1.1 ad IFVERBOSE("bus adapter port" )
198 1.1 ad },
199 1.1 ad };
200 1.1 ad
201 1.1 ad #if defined(I2ODEBUG) && defined(I2OVERBOSE)
202 1.10.2.2 nathanw static const char * const iop_status[] = {
203 1.1 ad "success",
204 1.1 ad "abort (dirty)",
205 1.1 ad "abort (no data transfer)",
206 1.1 ad "abort (partial transfer)",
207 1.1 ad "error (dirty)",
208 1.1 ad "error (no data transfer)",
209 1.1 ad "error (partial transfer)",
210 1.1 ad "undefined error code",
211 1.1 ad "process abort (dirty)",
212 1.1 ad "process abort (no data transfer)",
213 1.1 ad "process abort (partial transfer)",
214 1.1 ad "transaction error",
215 1.1 ad };
216 1.1 ad #endif
217 1.1 ad
218 1.5 ad static inline u_int32_t iop_inl(struct iop_softc *, int);
219 1.5 ad static inline void iop_outl(struct iop_softc *, int, u_int32_t);
220 1.5 ad
221 1.10.2.16 nathanw static inline u_int32_t iop_inl_msg(struct iop_softc *, int);
222 1.10.2.17 christos static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t);
223 1.10.2.16 nathanw
224 1.1 ad static void iop_config_interrupts(struct device *);
225 1.10.2.2 nathanw static void iop_configure_devices(struct iop_softc *, int, int);
226 1.1 ad static void iop_devinfo(int, char *);
227 1.1 ad static int iop_print(void *, const char *);
228 1.1 ad static void iop_shutdown(void *);
229 1.1 ad static int iop_submatch(struct device *, struct cfdata *, void *);
230 1.1 ad static int iop_vendor_print(void *, const char *);
231 1.1 ad
232 1.10.2.2 nathanw static void iop_adjqparam(struct iop_softc *, int);
233 1.9 ad static void iop_create_reconf_thread(void *);
234 1.10.2.2 nathanw static int iop_handle_reply(struct iop_softc *, u_int32_t);
235 1.1 ad static int iop_hrt_get(struct iop_softc *);
236 1.1 ad static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
237 1.10.2.2 nathanw static void iop_intr_event(struct device *, struct iop_msg *, void *);
238 1.5 ad static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
239 1.5 ad u_int32_t);
240 1.10.2.2 nathanw static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
241 1.10.2.2 nathanw static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
242 1.1 ad static int iop_ofifo_init(struct iop_softc *);
243 1.10.2.4 nathanw static int iop_passthrough(struct iop_softc *, struct ioppt *,
244 1.10.2.4 nathanw struct proc *);
245 1.9 ad static void iop_reconf_thread(void *);
246 1.1 ad static void iop_release_mfa(struct iop_softc *, u_int32_t);
247 1.1 ad static int iop_reset(struct iop_softc *);
248 1.1 ad static int iop_systab_set(struct iop_softc *);
249 1.10.2.2 nathanw static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
250 1.1 ad
251 1.1 ad #ifdef I2ODEBUG
252 1.10.2.2 nathanw static void iop_reply_print(struct iop_softc *, struct i2o_reply *);
253 1.1 ad #endif
254 1.5 ad
255 1.5 ad static inline u_int32_t
256 1.5 ad iop_inl(struct iop_softc *sc, int off)
257 1.5 ad {
258 1.5 ad
259 1.5 ad bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
260 1.5 ad BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
261 1.5 ad return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
262 1.5 ad }
263 1.5 ad
264 1.5 ad static inline void
265 1.5 ad iop_outl(struct iop_softc *sc, int off, u_int32_t val)
266 1.5 ad {
267 1.5 ad
268 1.5 ad bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
269 1.5 ad bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
270 1.5 ad BUS_SPACE_BARRIER_WRITE);
271 1.5 ad }
272 1.5 ad
273 1.10.2.16 nathanw static inline u_int32_t
274 1.10.2.16 nathanw iop_inl_msg(struct iop_softc *sc, int off)
275 1.10.2.16 nathanw {
276 1.10.2.16 nathanw
277 1.10.2.16 nathanw bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
278 1.10.2.16 nathanw BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
279 1.10.2.16 nathanw return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
280 1.10.2.16 nathanw }
281 1.10.2.16 nathanw
282 1.10.2.16 nathanw static inline void
283 1.10.2.17 christos iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
284 1.10.2.16 nathanw {
285 1.10.2.16 nathanw
286 1.10.2.17 christos bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
287 1.10.2.17 christos bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
288 1.10.2.16 nathanw BUS_SPACE_BARRIER_WRITE);
289 1.10.2.16 nathanw }
290 1.10.2.16 nathanw
291 1.1 ad /*
292 1.10.2.2 nathanw * Initialise the IOP and our interface.
293 1.1 ad */
294 1.5 ad void
295 1.1 ad iop_init(struct iop_softc *sc, const char *intrstr)
296 1.1 ad {
297 1.10.2.2 nathanw struct iop_msg *im;
298 1.10.2.4 nathanw int rv, i, j, state, nsegs;
299 1.1 ad u_int32_t mask;
300 1.1 ad char ident[64];
301 1.1 ad
302 1.10.2.4 nathanw state = 0;
303 1.10.2.4 nathanw
304 1.10.2.4 nathanw printf("I2O adapter");
305 1.10.2.4 nathanw
306 1.10.2.2 nathanw if (iop_ictxhashtbl == NULL)
307 1.5 ad iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
308 1.5 ad M_DEVBUF, M_NOWAIT, &iop_ictxhash);
309 1.1 ad
310 1.10.2.4 nathanw /* Disable interrupts at the IOP. */
311 1.10.2.4 nathanw mask = iop_inl(sc, IOP_REG_INTR_MASK);
312 1.10.2.4 nathanw iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
313 1.5 ad
314 1.10.2.4 nathanw /* Allocate a scratch DMA map for small miscellaneous shared data. */
315 1.10.2.4 nathanw if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
316 1.10.2.4 nathanw BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
317 1.10.2.4 nathanw printf("%s: cannot create scratch dmamap\n",
318 1.10.2.4 nathanw sc->sc_dv.dv_xname);
319 1.5 ad return;
320 1.1 ad }
321 1.10.2.4 nathanw
322 1.10.2.4 nathanw if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
323 1.10.2.4 nathanw sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
324 1.10.2.4 nathanw printf("%s: cannot alloc scratch dmamem\n",
325 1.10.2.4 nathanw sc->sc_dv.dv_xname);
326 1.10.2.4 nathanw goto bail_out;
327 1.10.2.4 nathanw }
328 1.10.2.4 nathanw state++;
329 1.10.2.4 nathanw
330 1.10.2.4 nathanw if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
331 1.10.2.4 nathanw &sc->sc_scr, 0)) {
332 1.10.2.4 nathanw printf("%s: cannot map scratch dmamem\n", sc->sc_dv.dv_xname);
333 1.10.2.4 nathanw goto bail_out;
334 1.10.2.4 nathanw }
335 1.10.2.4 nathanw state++;
336 1.10.2.4 nathanw
337 1.10.2.4 nathanw if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
338 1.10.2.4 nathanw PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
339 1.10.2.4 nathanw printf("%s: cannot load scratch dmamap\n", sc->sc_dv.dv_xname);
340 1.10.2.4 nathanw goto bail_out;
341 1.10.2.4 nathanw }
342 1.10.2.4 nathanw state++;
343 1.10.2.4 nathanw
344 1.10.2.9 nathanw #ifdef I2ODEBUG
345 1.10.2.9 nathanw /* So that our debug checks don't choke. */
346 1.10.2.9 nathanw sc->sc_framesize = 128;
347 1.10.2.9 nathanw #endif
348 1.10.2.9 nathanw
349 1.10.2.4 nathanw /* Reset the adapter and request status. */
350 1.10.2.4 nathanw if ((rv = iop_reset(sc)) != 0) {
351 1.10.2.4 nathanw printf("%s: not responding (reset)\n", sc->sc_dv.dv_xname);
352 1.10.2.4 nathanw goto bail_out;
353 1.10.2.4 nathanw }
354 1.10.2.4 nathanw
355 1.10.2.4 nathanw if ((rv = iop_status_get(sc, 1)) != 0) {
356 1.10.2.4 nathanw printf("%s: not responding (get status)\n",
357 1.10.2.4 nathanw sc->sc_dv.dv_xname);
358 1.10.2.4 nathanw goto bail_out;
359 1.10.2.4 nathanw }
360 1.10.2.4 nathanw
361 1.5 ad sc->sc_flags |= IOP_HAVESTATUS;
362 1.5 ad iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
363 1.1 ad ident, sizeof(ident));
364 1.5 ad printf(" <%s>\n", ident);
365 1.5 ad
366 1.5 ad #ifdef I2ODEBUG
367 1.5 ad printf("%s: orgid=0x%04x version=%d\n", sc->sc_dv.dv_xname,
368 1.5 ad le16toh(sc->sc_status.orgid),
369 1.5 ad (le32toh(sc->sc_status.segnumber) >> 12) & 15);
370 1.5 ad printf("%s: type want have cbase\n", sc->sc_dv.dv_xname);
371 1.5 ad printf("%s: mem %04x %04x %08x\n", sc->sc_dv.dv_xname,
372 1.5 ad le32toh(sc->sc_status.desiredprivmemsize),
373 1.5 ad le32toh(sc->sc_status.currentprivmemsize),
374 1.5 ad le32toh(sc->sc_status.currentprivmembase));
375 1.5 ad printf("%s: i/o %04x %04x %08x\n", sc->sc_dv.dv_xname,
376 1.5 ad le32toh(sc->sc_status.desiredpriviosize),
377 1.5 ad le32toh(sc->sc_status.currentpriviosize),
378 1.5 ad le32toh(sc->sc_status.currentpriviobase));
379 1.5 ad #endif
380 1.1 ad
381 1.10.2.2 nathanw sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
382 1.10.2.2 nathanw if (sc->sc_maxob > IOP_MAX_OUTBOUND)
383 1.10.2.2 nathanw sc->sc_maxob = IOP_MAX_OUTBOUND;
384 1.10.2.2 nathanw sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
385 1.10.2.2 nathanw if (sc->sc_maxib > IOP_MAX_INBOUND)
386 1.10.2.2 nathanw sc->sc_maxib = IOP_MAX_INBOUND;
387 1.10.2.7 nathanw sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
388 1.10.2.7 nathanw if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
389 1.10.2.7 nathanw sc->sc_framesize = IOP_MAX_MSG_SIZE;
390 1.10.2.7 nathanw
391 1.10.2.7 nathanw #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
392 1.10.2.7 nathanw if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
393 1.10.2.7 nathanw printf("%s: frame size too small (%d)\n",
394 1.10.2.7 nathanw sc->sc_dv.dv_xname, sc->sc_framesize);
395 1.10.2.11 nathanw goto bail_out;
396 1.10.2.7 nathanw }
397 1.10.2.7 nathanw #endif
398 1.10.2.2 nathanw
399 1.10.2.2 nathanw /* Allocate message wrappers. */
400 1.10.2.10 nathanw im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
401 1.10.2.11 nathanw if (im == NULL) {
402 1.10.2.11 nathanw printf("%s: memory allocation failure\n", sc->sc_dv.dv_xname);
403 1.10.2.11 nathanw goto bail_out;
404 1.10.2.11 nathanw }
405 1.10.2.11 nathanw state++;
406 1.10.2.2 nathanw sc->sc_ims = im;
407 1.10.2.2 nathanw SLIST_INIT(&sc->sc_im_freelist);
408 1.10.2.2 nathanw
409 1.10.2.4 nathanw for (i = 0, state++; i < sc->sc_maxib; i++, im++) {
410 1.10.2.2 nathanw rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
411 1.10.2.2 nathanw IOP_MAX_SEGS, IOP_MAX_XFER, 0,
412 1.10.2.2 nathanw BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
413 1.10.2.2 nathanw &im->im_xfer[0].ix_map);
414 1.10.2.2 nathanw if (rv != 0) {
415 1.10.2.2 nathanw printf("%s: couldn't create dmamap (%d)",
416 1.10.2.2 nathanw sc->sc_dv.dv_xname, rv);
417 1.10.2.4 nathanw goto bail_out;
418 1.10.2.2 nathanw }
419 1.10.2.2 nathanw
420 1.10.2.2 nathanw im->im_tctx = i;
421 1.10.2.2 nathanw SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
422 1.10.2.2 nathanw }
423 1.1 ad
424 1.10.2.5 nathanw /* Initialise the IOP's outbound FIFO. */
425 1.5 ad if (iop_ofifo_init(sc) != 0) {
426 1.10.2.4 nathanw printf("%s: unable to init oubound FIFO\n",
427 1.10.2.4 nathanw sc->sc_dv.dv_xname);
428 1.10.2.4 nathanw goto bail_out;
429 1.5 ad }
430 1.1 ad
431 1.5 ad /*
432 1.5 ad * Defer further configuration until (a) interrupts are working and
433 1.5 ad * (b) we have enough information to build the system table.
434 1.5 ad */
435 1.1 ad config_interrupts((struct device *)sc, iop_config_interrupts);
436 1.1 ad
437 1.5 ad /* Configure shutdown hook before we start any device activity. */
438 1.1 ad if (iop_sdh == NULL)
439 1.1 ad iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
440 1.1 ad
441 1.1 ad /* Ensure interrupts are enabled at the IOP. */
442 1.5 ad mask = iop_inl(sc, IOP_REG_INTR_MASK);
443 1.5 ad iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
444 1.1 ad
445 1.1 ad if (intrstr != NULL)
446 1.1 ad printf("%s: interrupting at %s\n", sc->sc_dv.dv_xname,
447 1.1 ad intrstr);
448 1.1 ad
449 1.1 ad #ifdef I2ODEBUG
450 1.1 ad printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
451 1.10.2.2 nathanw sc->sc_dv.dv_xname, sc->sc_maxib,
452 1.10.2.2 nathanw le32toh(sc->sc_status.maxinboundmframes),
453 1.10.2.2 nathanw sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
454 1.1 ad #endif
455 1.1 ad
456 1.5 ad lockinit(&sc->sc_conflock, PRIBIO, "iopconf", hz * 30, 0);
457 1.10.2.4 nathanw return;
458 1.10.2.4 nathanw
459 1.10.2.4 nathanw bail_out:
460 1.10.2.4 nathanw if (state > 3) {
461 1.10.2.4 nathanw for (j = 0; j < i; j++)
462 1.10.2.4 nathanw bus_dmamap_destroy(sc->sc_dmat,
463 1.10.2.4 nathanw sc->sc_ims[j].im_xfer[0].ix_map);
464 1.10.2.4 nathanw free(sc->sc_ims, M_DEVBUF);
465 1.10.2.4 nathanw }
466 1.10.2.4 nathanw if (state > 2)
467 1.10.2.4 nathanw bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
468 1.10.2.4 nathanw if (state > 1)
469 1.10.2.4 nathanw bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
470 1.10.2.4 nathanw if (state > 0)
471 1.10.2.4 nathanw bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
472 1.10.2.4 nathanw bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
473 1.1 ad }
474 1.1 ad
475 1.1 ad /*
476 1.5 ad * Perform autoconfiguration tasks.
477 1.1 ad */
478 1.1 ad static void
479 1.1 ad iop_config_interrupts(struct device *self)
480 1.1 ad {
481 1.10.2.6 nathanw struct iop_attach_args ia;
482 1.5 ad struct iop_softc *sc, *iop;
483 1.5 ad struct i2o_systab_entry *ste;
484 1.5 ad int rv, i, niop;
485 1.1 ad
486 1.1 ad sc = (struct iop_softc *)self;
487 1.5 ad LIST_INIT(&sc->sc_iilist);
488 1.5 ad
489 1.5 ad printf("%s: configuring...\n", sc->sc_dv.dv_xname);
490 1.1 ad
491 1.5 ad if (iop_hrt_get(sc) != 0) {
492 1.5 ad printf("%s: unable to retrieve HRT\n", sc->sc_dv.dv_xname);
493 1.5 ad return;
494 1.5 ad }
495 1.1 ad
496 1.5 ad /*
497 1.5 ad * Build the system table.
498 1.5 ad */
499 1.5 ad if (iop_systab == NULL) {
500 1.5 ad for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
501 1.5 ad if ((iop = device_lookup(&iop_cd, i)) == NULL)
502 1.5 ad continue;
503 1.5 ad if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
504 1.5 ad continue;
505 1.10.2.2 nathanw if (iop_status_get(iop, 1) != 0) {
506 1.5 ad printf("%s: unable to retrieve status\n",
507 1.5 ad sc->sc_dv.dv_xname);
508 1.5 ad iop->sc_flags &= ~IOP_HAVESTATUS;
509 1.5 ad continue;
510 1.5 ad }
511 1.5 ad niop++;
512 1.5 ad }
513 1.5 ad if (niop == 0)
514 1.5 ad return;
515 1.5 ad
516 1.5 ad i = sizeof(struct i2o_systab_entry) * (niop - 1) +
517 1.5 ad sizeof(struct i2o_systab);
518 1.5 ad iop_systab_size = i;
519 1.10.2.10 nathanw iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
520 1.5 ad
521 1.5 ad iop_systab->numentries = niop;
522 1.5 ad iop_systab->version = I2O_VERSION_11;
523 1.5 ad
524 1.5 ad for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
525 1.5 ad if ((iop = device_lookup(&iop_cd, i)) == NULL)
526 1.5 ad continue;
527 1.5 ad if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
528 1.5 ad continue;
529 1.5 ad
530 1.5 ad ste->orgid = iop->sc_status.orgid;
531 1.5 ad ste->iopid = iop->sc_dv.dv_unit + 2;
532 1.5 ad ste->segnumber =
533 1.5 ad htole32(le32toh(iop->sc_status.segnumber) & ~4095);
534 1.5 ad ste->iopcaps = iop->sc_status.iopcaps;
535 1.5 ad ste->inboundmsgframesize =
536 1.5 ad iop->sc_status.inboundmframesize;
537 1.5 ad ste->inboundmsgportaddresslow =
538 1.5 ad htole32(iop->sc_memaddr + IOP_REG_IFIFO);
539 1.5 ad ste++;
540 1.5 ad }
541 1.5 ad }
542 1.5 ad
543 1.10.2.2 nathanw /*
544 1.10.2.2 nathanw * Post the system table to the IOP and bring it to the OPERATIONAL
545 1.10.2.2 nathanw * state.
546 1.10.2.2 nathanw */
547 1.5 ad if (iop_systab_set(sc) != 0) {
548 1.5 ad printf("%s: unable to set system table\n", sc->sc_dv.dv_xname);
549 1.5 ad return;
550 1.5 ad }
551 1.5 ad if (iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_ENABLE, IOP_ICTX, 1,
552 1.10.2.2 nathanw 30000) != 0) {
553 1.5 ad printf("%s: unable to enable system\n", sc->sc_dv.dv_xname);
554 1.5 ad return;
555 1.5 ad }
556 1.5 ad
557 1.5 ad /*
558 1.5 ad * Set up an event handler for this IOP.
559 1.5 ad */
560 1.5 ad sc->sc_eventii.ii_dv = self;
561 1.5 ad sc->sc_eventii.ii_intr = iop_intr_event;
562 1.10.2.4 nathanw sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
563 1.5 ad sc->sc_eventii.ii_tid = I2O_TID_IOP;
564 1.10.2.2 nathanw iop_initiator_register(sc, &sc->sc_eventii);
565 1.10.2.2 nathanw
566 1.10.2.2 nathanw rv = iop_util_eventreg(sc, &sc->sc_eventii,
567 1.10.2.2 nathanw I2O_EVENT_EXEC_RESOURCE_LIMITS |
568 1.10.2.2 nathanw I2O_EVENT_EXEC_CONNECTION_FAIL |
569 1.10.2.2 nathanw I2O_EVENT_EXEC_ADAPTER_FAULT |
570 1.10.2.2 nathanw I2O_EVENT_EXEC_POWER_FAIL |
571 1.10.2.2 nathanw I2O_EVENT_EXEC_RESET_PENDING |
572 1.10.2.2 nathanw I2O_EVENT_EXEC_RESET_IMMINENT |
573 1.10.2.2 nathanw I2O_EVENT_EXEC_HARDWARE_FAIL |
574 1.10.2.2 nathanw I2O_EVENT_EXEC_XCT_CHANGE |
575 1.10.2.2 nathanw I2O_EVENT_EXEC_DDM_AVAILIBILITY |
576 1.10.2.2 nathanw I2O_EVENT_GEN_DEVICE_RESET |
577 1.10.2.2 nathanw I2O_EVENT_GEN_STATE_CHANGE |
578 1.10.2.2 nathanw I2O_EVENT_GEN_GENERAL_WARNING);
579 1.10.2.2 nathanw if (rv != 0) {
580 1.5 ad printf("%s: unable to register for events", sc->sc_dv.dv_xname);
581 1.5 ad return;
582 1.5 ad }
583 1.5 ad
584 1.10.2.6 nathanw /*
585 1.10.2.6 nathanw * Attempt to match and attach a product-specific extension.
586 1.10.2.6 nathanw */
587 1.1 ad ia.ia_class = I2O_CLASS_ANY;
588 1.1 ad ia.ia_tid = I2O_TID_IOP;
589 1.1 ad config_found_sm(self, &ia, iop_vendor_print, iop_submatch);
590 1.5 ad
591 1.10.2.6 nathanw /*
592 1.10.2.6 nathanw * Start device configuration.
593 1.10.2.6 nathanw */
594 1.10.2.2 nathanw lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL);
595 1.10.2.2 nathanw if ((rv = iop_reconfigure(sc, 0)) == -1) {
596 1.5 ad printf("%s: configure failed (%d)\n", sc->sc_dv.dv_xname, rv);
597 1.5 ad return;
598 1.5 ad }
599 1.10.2.2 nathanw lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
600 1.5 ad
601 1.9 ad kthread_create(iop_create_reconf_thread, sc);
602 1.9 ad }
603 1.9 ad
604 1.9 ad /*
605 1.9 ad * Create the reconfiguration thread. Called after the standard kernel
606 1.9 ad * threads have been created.
607 1.9 ad */
608 1.9 ad static void
609 1.9 ad iop_create_reconf_thread(void *cookie)
610 1.9 ad {
611 1.9 ad struct iop_softc *sc;
612 1.9 ad int rv;
613 1.9 ad
614 1.9 ad sc = cookie;
615 1.5 ad sc->sc_flags |= IOP_ONLINE;
616 1.10.2.2 nathanw
617 1.9 ad rv = kthread_create1(iop_reconf_thread, sc, &sc->sc_reconf_proc,
618 1.10.2.2 nathanw "%s", sc->sc_dv.dv_xname);
619 1.10.2.2 nathanw if (rv != 0) {
620 1.9 ad printf("%s: unable to create reconfiguration thread (%d)",
621 1.10.2.2 nathanw sc->sc_dv.dv_xname, rv);
622 1.10.2.2 nathanw return;
623 1.10.2.2 nathanw }
624 1.5 ad }
625 1.5 ad
626 1.5 ad /*
627 1.5 ad * Reconfiguration thread; listens for LCT change notification, and
628 1.10.2.3 nathanw * initiates re-configuration if received.
629 1.5 ad */
630 1.5 ad static void
631 1.9 ad iop_reconf_thread(void *cookie)
632 1.5 ad {
633 1.5 ad struct iop_softc *sc;
634 1.10.2.2 nathanw struct lwp *l;
635 1.5 ad struct i2o_lct lct;
636 1.5 ad u_int32_t chgind;
637 1.10.2.2 nathanw int rv;
638 1.5 ad
639 1.5 ad sc = cookie;
640 1.10.2.2 nathanw chgind = sc->sc_chgind + 1;
641 1.10.2.13 nathanw l = curlwp;
642 1.5 ad
643 1.5 ad for (;;) {
644 1.10.2.2 nathanw DPRINTF(("%s: async reconfig: requested 0x%08x\n",
645 1.10.2.2 nathanw sc->sc_dv.dv_xname, chgind));
646 1.5 ad
647 1.10.2.2 nathanw PHOLD(l);
648 1.10.2.2 nathanw rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
649 1.10.2.2 nathanw PRELE(l);
650 1.10.2.2 nathanw
651 1.10.2.2 nathanw DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
652 1.10.2.2 nathanw sc->sc_dv.dv_xname, le32toh(lct.changeindicator), rv));
653 1.10.2.2 nathanw
654 1.10.2.2 nathanw if (rv == 0 &&
655 1.10.2.2 nathanw lockmgr(&sc->sc_conflock, LK_EXCLUSIVE, NULL) == 0) {
656 1.10.2.2 nathanw iop_reconfigure(sc, le32toh(lct.changeindicator));
657 1.10.2.2 nathanw chgind = sc->sc_chgind + 1;
658 1.10.2.2 nathanw lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
659 1.5 ad }
660 1.5 ad
661 1.9 ad tsleep(iop_reconf_thread, PWAIT, "iopzzz", hz * 5);
662 1.5 ad }
663 1.5 ad }
664 1.5 ad
665 1.5 ad /*
666 1.5 ad * Reconfigure: find new and removed devices.
667 1.5 ad */
668 1.10.2.6 nathanw int
669 1.10.2.2 nathanw iop_reconfigure(struct iop_softc *sc, u_int chgind)
670 1.5 ad {
671 1.5 ad struct iop_msg *im;
672 1.10.2.2 nathanw struct i2o_hba_bus_scan mf;
673 1.5 ad struct i2o_lct_entry *le;
674 1.5 ad struct iop_initiator *ii, *nextii;
675 1.5 ad int rv, tid, i;
676 1.5 ad
677 1.1 ad /*
678 1.5 ad * If the reconfiguration request isn't the result of LCT change
679 1.5 ad * notification, then be more thorough: ask all bus ports to scan
680 1.5 ad * their busses. Wait up to 5 minutes for each bus port to complete
681 1.5 ad * the request.
682 1.1 ad */
683 1.5 ad if (chgind == 0) {
684 1.5 ad if ((rv = iop_lct_get(sc)) != 0) {
685 1.5 ad DPRINTF(("iop_reconfigure: unable to read LCT\n"));
686 1.10.2.2 nathanw return (rv);
687 1.5 ad }
688 1.5 ad
689 1.5 ad le = sc->sc_lct->entry;
690 1.5 ad for (i = 0; i < sc->sc_nlctent; i++, le++) {
691 1.5 ad if ((le16toh(le->classid) & 4095) !=
692 1.5 ad I2O_CLASS_BUS_ADAPTER_PORT)
693 1.5 ad continue;
694 1.10.2.4 nathanw tid = le16toh(le->localtid) & 4095;
695 1.5 ad
696 1.10.2.4 nathanw im = iop_msg_alloc(sc, IM_WAIT);
697 1.5 ad
698 1.10.2.2 nathanw mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
699 1.10.2.2 nathanw mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
700 1.10.2.2 nathanw mf.msgictx = IOP_ICTX;
701 1.10.2.2 nathanw mf.msgtctx = im->im_tctx;
702 1.5 ad
703 1.5 ad DPRINTF(("%s: scanning bus %d\n", sc->sc_dv.dv_xname,
704 1.5 ad tid));
705 1.5 ad
706 1.10.2.2 nathanw rv = iop_msg_post(sc, im, &mf, 5*60*1000);
707 1.10.2.2 nathanw iop_msg_free(sc, im);
708 1.10.2.2 nathanw #ifdef I2ODEBUG
709 1.10.2.2 nathanw if (rv != 0)
710 1.10.2.2 nathanw printf("%s: bus scan failed\n",
711 1.10.2.2 nathanw sc->sc_dv.dv_xname);
712 1.10.2.2 nathanw #endif
713 1.5 ad }
714 1.10.2.2 nathanw } else if (chgind <= sc->sc_chgind) {
715 1.5 ad DPRINTF(("%s: LCT unchanged (async)\n", sc->sc_dv.dv_xname));
716 1.10.2.2 nathanw return (0);
717 1.5 ad }
718 1.5 ad
719 1.5 ad /* Re-read the LCT and determine if it has changed. */
720 1.5 ad if ((rv = iop_lct_get(sc)) != 0) {
721 1.5 ad DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
722 1.10.2.2 nathanw return (rv);
723 1.5 ad }
724 1.5 ad DPRINTF(("%s: %d LCT entries\n", sc->sc_dv.dv_xname, sc->sc_nlctent));
725 1.5 ad
726 1.10.2.2 nathanw chgind = le32toh(sc->sc_lct->changeindicator);
727 1.10.2.2 nathanw if (chgind == sc->sc_chgind) {
728 1.5 ad DPRINTF(("%s: LCT unchanged\n", sc->sc_dv.dv_xname));
729 1.10.2.2 nathanw return (0);
730 1.5 ad }
731 1.5 ad DPRINTF(("%s: LCT changed\n", sc->sc_dv.dv_xname));
732 1.10.2.2 nathanw sc->sc_chgind = chgind;
733 1.5 ad
734 1.5 ad if (sc->sc_tidmap != NULL)
735 1.5 ad free(sc->sc_tidmap, M_DEVBUF);
736 1.5 ad sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
737 1.10.2.10 nathanw M_DEVBUF, M_NOWAIT|M_ZERO);
738 1.5 ad
739 1.10.2.2 nathanw /* Allow 1 queued command per device while we're configuring. */
740 1.10.2.2 nathanw iop_adjqparam(sc, 1);
741 1.10.2.2 nathanw
742 1.10.2.2 nathanw /*
743 1.10.2.2 nathanw * Match and attach child devices. We configure high-level devices
744 1.10.2.2 nathanw * first so that any claims will propagate throughout the LCT,
745 1.10.2.2 nathanw * hopefully masking off aliased devices as a result.
746 1.10.2.2 nathanw *
747 1.10.2.2 nathanw * Re-reading the LCT at this point is a little dangerous, but we'll
748 1.10.2.2 nathanw * trust the IOP (and the operator) to behave itself...
749 1.10.2.2 nathanw */
750 1.10.2.2 nathanw iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
751 1.10.2.2 nathanw IC_CONFIGURE | IC_PRIORITY);
752 1.10.2.2 nathanw if ((rv = iop_lct_get(sc)) != 0)
753 1.10.2.2 nathanw DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
754 1.10.2.2 nathanw iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
755 1.10.2.2 nathanw IC_CONFIGURE);
756 1.5 ad
757 1.5 ad for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
758 1.10.2.2 nathanw nextii = LIST_NEXT(ii, ii_list);
759 1.5 ad
760 1.5 ad /* Detach devices that were configured, but are now gone. */
761 1.5 ad for (i = 0; i < sc->sc_nlctent; i++)
762 1.5 ad if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
763 1.5 ad break;
764 1.5 ad if (i == sc->sc_nlctent ||
765 1.5 ad (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0)
766 1.5 ad config_detach(ii->ii_dv, DETACH_FORCE);
767 1.5 ad
768 1.5 ad /*
769 1.5 ad * Tell initiators that existed before the re-configuration
770 1.5 ad * to re-configure.
771 1.5 ad */
772 1.5 ad if (ii->ii_reconfig == NULL)
773 1.5 ad continue;
774 1.5 ad if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
775 1.5 ad printf("%s: %s failed reconfigure (%d)\n",
776 1.5 ad sc->sc_dv.dv_xname, ii->ii_dv->dv_xname, rv);
777 1.5 ad }
778 1.5 ad
779 1.10.2.2 nathanw /* Re-adjust queue parameters and return. */
780 1.10.2.2 nathanw if (sc->sc_nii != 0)
781 1.10.2.2 nathanw iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
782 1.10.2.2 nathanw / sc->sc_nii);
783 1.10.2.2 nathanw
784 1.10.2.2 nathanw return (0);
785 1.1 ad }
786 1.1 ad
787 1.1 ad /*
788 1.5 ad * Configure I2O devices into the system.
789 1.1 ad */
790 1.1 ad static void
791 1.10.2.2 nathanw iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
792 1.1 ad {
793 1.1 ad struct iop_attach_args ia;
794 1.5 ad struct iop_initiator *ii;
795 1.1 ad const struct i2o_lct_entry *le;
796 1.9 ad struct device *dv;
797 1.8 ad int i, j, nent;
798 1.10.2.2 nathanw u_int usertid;
799 1.1 ad
800 1.1 ad nent = sc->sc_nlctent;
801 1.1 ad for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
802 1.10.2.4 nathanw sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
803 1.9 ad
804 1.10.2.2 nathanw /* Ignore the device if it's in use. */
805 1.10.2.2 nathanw usertid = le32toh(le->usertid) & 4095;
806 1.10.2.2 nathanw if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
807 1.1 ad continue;
808 1.1 ad
809 1.1 ad ia.ia_class = le16toh(le->classid) & 4095;
810 1.9 ad ia.ia_tid = sc->sc_tidmap[i].it_tid;
811 1.8 ad
812 1.8 ad /* Ignore uninteresting devices. */
813 1.8 ad for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
814 1.8 ad if (iop_class[j].ic_class == ia.ia_class)
815 1.8 ad break;
816 1.8 ad if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
817 1.10.2.2 nathanw (iop_class[j].ic_flags & mask) != maskval)
818 1.8 ad continue;
819 1.1 ad
820 1.1 ad /*
821 1.5 ad * Try to configure the device only if it's not already
822 1.5 ad * configured.
823 1.1 ad */
824 1.7 ad LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
825 1.9 ad if (ia.ia_tid == ii->ii_tid) {
826 1.9 ad sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
827 1.9 ad strcpy(sc->sc_tidmap[i].it_dvname,
828 1.9 ad ii->ii_dv->dv_xname);
829 1.10.2.2 nathanw break;
830 1.9 ad }
831 1.7 ad }
832 1.5 ad if (ii != NULL)
833 1.5 ad continue;
834 1.5 ad
835 1.9 ad dv = config_found_sm(&sc->sc_dv, &ia, iop_print, iop_submatch);
836 1.9 ad if (dv != NULL) {
837 1.10.2.2 nathanw sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
838 1.9 ad strcpy(sc->sc_tidmap[i].it_dvname, dv->dv_xname);
839 1.9 ad }
840 1.1 ad }
841 1.1 ad }
842 1.1 ad
843 1.10.2.2 nathanw /*
844 1.10.2.2 nathanw * Adjust queue parameters for all child devices.
845 1.10.2.2 nathanw */
846 1.10.2.2 nathanw static void
847 1.10.2.2 nathanw iop_adjqparam(struct iop_softc *sc, int mpi)
848 1.10.2.2 nathanw {
849 1.10.2.2 nathanw struct iop_initiator *ii;
850 1.10.2.2 nathanw
851 1.10.2.2 nathanw LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
852 1.10.2.2 nathanw if (ii->ii_adjqparam != NULL)
853 1.10.2.2 nathanw (*ii->ii_adjqparam)(ii->ii_dv, mpi);
854 1.10.2.2 nathanw }
855 1.10.2.2 nathanw
856 1.1 ad static void
857 1.1 ad iop_devinfo(int class, char *devinfo)
858 1.1 ad {
859 1.1 ad #ifdef I2OVERBOSE
860 1.1 ad int i;
861 1.1 ad
862 1.1 ad for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
863 1.1 ad if (class == iop_class[i].ic_class)
864 1.1 ad break;
865 1.1 ad
866 1.1 ad if (i == sizeof(iop_class) / sizeof(iop_class[0]))
867 1.1 ad sprintf(devinfo, "device (class 0x%x)", class);
868 1.1 ad else
869 1.1 ad strcpy(devinfo, iop_class[i].ic_caption);
870 1.1 ad #else
871 1.1 ad
872 1.1 ad sprintf(devinfo, "device (class 0x%x)", class);
873 1.1 ad #endif
874 1.1 ad }
875 1.1 ad
876 1.1 ad static int
877 1.1 ad iop_print(void *aux, const char *pnp)
878 1.1 ad {
879 1.1 ad struct iop_attach_args *ia;
880 1.1 ad char devinfo[256];
881 1.1 ad
882 1.1 ad ia = aux;
883 1.1 ad
884 1.1 ad if (pnp != NULL) {
885 1.1 ad iop_devinfo(ia->ia_class, devinfo);
886 1.1 ad printf("%s at %s", devinfo, pnp);
887 1.1 ad }
888 1.1 ad printf(" tid %d", ia->ia_tid);
889 1.1 ad return (UNCONF);
890 1.1 ad }
891 1.1 ad
892 1.1 ad static int
893 1.1 ad iop_vendor_print(void *aux, const char *pnp)
894 1.1 ad {
895 1.1 ad
896 1.10.2.6 nathanw return (QUIET);
897 1.1 ad }
898 1.1 ad
899 1.1 ad static int
900 1.1 ad iop_submatch(struct device *parent, struct cfdata *cf, void *aux)
901 1.1 ad {
902 1.1 ad struct iop_attach_args *ia;
903 1.1 ad
904 1.1 ad ia = aux;
905 1.1 ad
906 1.1 ad if (cf->iopcf_tid != IOPCF_TID_DEFAULT && cf->iopcf_tid != ia->ia_tid)
907 1.1 ad return (0);
908 1.1 ad
909 1.10.2.15 nathanw return (config_match(parent, cf, aux));
910 1.1 ad }
911 1.1 ad
912 1.1 ad /*
913 1.1 ad * Shut down all configured IOPs.
914 1.1 ad */
915 1.1 ad static void
916 1.1 ad iop_shutdown(void *junk)
917 1.1 ad {
918 1.1 ad struct iop_softc *sc;
919 1.1 ad int i;
920 1.1 ad
921 1.10.2.2 nathanw printf("shutting down iop devices...");
922 1.1 ad
923 1.1 ad for (i = 0; i < iop_cd.cd_ndevs; i++) {
924 1.1 ad if ((sc = device_lookup(&iop_cd, i)) == NULL)
925 1.1 ad continue;
926 1.5 ad if ((sc->sc_flags & IOP_ONLINE) == 0)
927 1.5 ad continue;
928 1.10.2.16 nathanw
929 1.5 ad iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
930 1.5 ad 0, 5000);
931 1.10.2.16 nathanw
932 1.10.2.16 nathanw if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
933 1.10.2.16 nathanw /*
934 1.10.2.16 nathanw * Some AMI firmware revisions will go to sleep and
935 1.10.2.16 nathanw * never come back after this.
936 1.10.2.16 nathanw */
937 1.10.2.16 nathanw iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
938 1.10.2.16 nathanw IOP_ICTX, 0, 1000);
939 1.10.2.16 nathanw }
940 1.1 ad }
941 1.1 ad
942 1.1 ad /* Wait. Some boards could still be flushing, stupidly enough. */
943 1.1 ad delay(5000*1000);
944 1.10.2.6 nathanw printf(" done\n");
945 1.1 ad }
946 1.1 ad
947 1.1 ad /*
948 1.10.2.2 nathanw * Retrieve IOP status.
949 1.1 ad */
950 1.10.2.6 nathanw int
951 1.10.2.2 nathanw iop_status_get(struct iop_softc *sc, int nosleep)
952 1.1 ad {
953 1.10.2.2 nathanw struct i2o_exec_status_get mf;
954 1.10.2.4 nathanw struct i2o_status *st;
955 1.10.2.4 nathanw paddr_t pa;
956 1.10.2.2 nathanw int rv, i;
957 1.1 ad
958 1.10.2.4 nathanw pa = sc->sc_scr_seg->ds_addr;
959 1.10.2.4 nathanw st = (struct i2o_status *)sc->sc_scr;
960 1.10.2.4 nathanw
961 1.10.2.2 nathanw mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
962 1.10.2.2 nathanw mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
963 1.10.2.2 nathanw mf.reserved[0] = 0;
964 1.10.2.2 nathanw mf.reserved[1] = 0;
965 1.10.2.2 nathanw mf.reserved[2] = 0;
966 1.10.2.2 nathanw mf.reserved[3] = 0;
967 1.10.2.4 nathanw mf.addrlow = (u_int32_t)pa;
968 1.10.2.4 nathanw mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
969 1.10.2.2 nathanw mf.length = sizeof(sc->sc_status);
970 1.1 ad
971 1.10.2.4 nathanw memset(st, 0, sizeof(*st));
972 1.10.2.4 nathanw bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
973 1.10.2.4 nathanw BUS_DMASYNC_PREREAD);
974 1.1 ad
975 1.10.2.2 nathanw if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
976 1.1 ad return (rv);
977 1.1 ad
978 1.10.2.2 nathanw for (i = 25; i != 0; i--) {
979 1.10.2.4 nathanw bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
980 1.10.2.4 nathanw sizeof(*st), BUS_DMASYNC_POSTREAD);
981 1.10.2.4 nathanw if (st->syncbyte == 0xff)
982 1.10.2.2 nathanw break;
983 1.10.2.2 nathanw if (nosleep)
984 1.10.2.2 nathanw DELAY(100*1000);
985 1.10.2.2 nathanw else
986 1.10.2.2 nathanw tsleep(iop_status_get, PWAIT, "iopstat", hz / 10);
987 1.10.2.2 nathanw }
988 1.1 ad
989 1.10.2.9 nathanw if (st->syncbyte != 0xff) {
990 1.10.2.9 nathanw printf("%s: STATUS_GET timed out\n", sc->sc_dv.dv_xname);
991 1.10.2.2 nathanw rv = EIO;
992 1.10.2.9 nathanw } else {
993 1.10.2.4 nathanw memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
994 1.10.2.2 nathanw rv = 0;
995 1.10.2.4 nathanw }
996 1.10.2.4 nathanw
997 1.10.2.2 nathanw return (rv);
998 1.1 ad }
999 1.1 ad
1000 1.1 ad /*
1001 1.10.2.5 nathanw * Initialize and populate the IOP's outbound FIFO.
1002 1.1 ad */
1003 1.1 ad static int
1004 1.1 ad iop_ofifo_init(struct iop_softc *sc)
1005 1.1 ad {
1006 1.1 ad bus_addr_t addr;
1007 1.5 ad bus_dma_segment_t seg;
1008 1.10.2.2 nathanw struct i2o_exec_outbound_init *mf;
1009 1.5 ad int i, rseg, rv;
1010 1.10.2.4 nathanw u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
1011 1.1 ad
1012 1.10.2.4 nathanw sw = (u_int32_t *)sc->sc_scr;
1013 1.1 ad
1014 1.10.2.2 nathanw mf = (struct i2o_exec_outbound_init *)mb;
1015 1.10.2.2 nathanw mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
1016 1.10.2.2 nathanw mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
1017 1.10.2.2 nathanw mf->msgictx = IOP_ICTX;
1018 1.10.2.4 nathanw mf->msgtctx = 0;
1019 1.10.2.2 nathanw mf->pagesize = PAGE_SIZE;
1020 1.10.2.7 nathanw mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
1021 1.1 ad
1022 1.5 ad /*
1023 1.5 ad * The I2O spec says that there are two SGLs: one for the status
1024 1.5 ad * word, and one for a list of discarded MFAs. It continues to say
1025 1.5 ad * that if you don't want to get the list of MFAs, an IGNORE SGL is
1026 1.10.2.2 nathanw * necessary; this isn't the case (and is in fact a bad thing).
1027 1.5 ad */
1028 1.10.2.4 nathanw mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
1029 1.10.2.4 nathanw I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
1030 1.10.2.4 nathanw mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
1031 1.10.2.4 nathanw (u_int32_t)sc->sc_scr_seg->ds_addr;
1032 1.10.2.4 nathanw mb[0] += 2 << 16;
1033 1.10.2.4 nathanw
1034 1.10.2.4 nathanw *sw = 0;
1035 1.10.2.4 nathanw bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1036 1.10.2.4 nathanw BUS_DMASYNC_PREREAD);
1037 1.10.2.4 nathanw
1038 1.10.2.4 nathanw if ((rv = iop_post(sc, mb)) != 0)
1039 1.1 ad return (rv);
1040 1.1 ad
1041 1.10.2.4 nathanw POLL(5000,
1042 1.10.2.4 nathanw (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1043 1.10.2.4 nathanw BUS_DMASYNC_POSTREAD),
1044 1.10.2.4 nathanw *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1045 1.10.2.4 nathanw
1046 1.10.2.4 nathanw if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1047 1.10.2.4 nathanw printf("%s: outbound FIFO init failed (%d)\n",
1048 1.10.2.4 nathanw sc->sc_dv.dv_xname, le32toh(*sw));
1049 1.5 ad return (EIO);
1050 1.1 ad }
1051 1.1 ad
1052 1.10.2.2 nathanw /* Allocate DMA safe memory for the reply frames. */
1053 1.1 ad if (sc->sc_rep_phys == 0) {
1054 1.10.2.7 nathanw sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1055 1.5 ad
1056 1.5 ad rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1057 1.5 ad 0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1058 1.5 ad if (rv != 0) {
1059 1.5 ad printf("%s: dma alloc = %d\n", sc->sc_dv.dv_xname,
1060 1.5 ad rv);
1061 1.5 ad return (rv);
1062 1.5 ad }
1063 1.5 ad
1064 1.5 ad rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1065 1.5 ad &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1066 1.5 ad if (rv != 0) {
1067 1.5 ad printf("%s: dma map = %d\n", sc->sc_dv.dv_xname, rv);
1068 1.5 ad return (rv);
1069 1.5 ad }
1070 1.5 ad
1071 1.5 ad rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1072 1.5 ad sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1073 1.5 ad if (rv != 0) {
1074 1.10.2.4 nathanw printf("%s: dma create = %d\n", sc->sc_dv.dv_xname,
1075 1.10.2.4 nathanw rv);
1076 1.5 ad return (rv);
1077 1.5 ad }
1078 1.5 ad
1079 1.10.2.4 nathanw rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1080 1.10.2.4 nathanw sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1081 1.5 ad if (rv != 0) {
1082 1.5 ad printf("%s: dma load = %d\n", sc->sc_dv.dv_xname, rv);
1083 1.5 ad return (rv);
1084 1.5 ad }
1085 1.5 ad
1086 1.5 ad sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1087 1.1 ad }
1088 1.1 ad
1089 1.1 ad /* Populate the outbound FIFO. */
1090 1.10.2.2 nathanw for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1091 1.5 ad iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1092 1.10.2.7 nathanw addr += sc->sc_framesize;
1093 1.1 ad }
1094 1.1 ad
1095 1.1 ad return (0);
1096 1.1 ad }
1097 1.1 ad
1098 1.1 ad /*
1099 1.1 ad * Read the specified number of bytes from the IOP's hardware resource table.
1100 1.1 ad */
1101 1.1 ad static int
1102 1.1 ad iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1103 1.1 ad {
1104 1.1 ad struct iop_msg *im;
1105 1.1 ad int rv;
1106 1.10.2.2 nathanw struct i2o_exec_hrt_get *mf;
1107 1.10.2.2 nathanw u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1108 1.1 ad
1109 1.10.2.4 nathanw im = iop_msg_alloc(sc, IM_WAIT);
1110 1.10.2.2 nathanw mf = (struct i2o_exec_hrt_get *)mb;
1111 1.10.2.2 nathanw mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1112 1.10.2.2 nathanw mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1113 1.10.2.2 nathanw mf->msgictx = IOP_ICTX;
1114 1.10.2.2 nathanw mf->msgtctx = im->im_tctx;
1115 1.1 ad
1116 1.10.2.4 nathanw iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1117 1.10.2.2 nathanw rv = iop_msg_post(sc, im, mb, 30000);
1118 1.1 ad iop_msg_unmap(sc, im);
1119 1.10.2.2 nathanw iop_msg_free(sc, im);
1120 1.1 ad return (rv);
1121 1.1 ad }
1122 1.1 ad
1123 1.1 ad /*
1124 1.5 ad * Read the IOP's hardware resource table.
1125 1.1 ad */
1126 1.1 ad static int
1127 1.1 ad iop_hrt_get(struct iop_softc *sc)
1128 1.1 ad {
1129 1.1 ad struct i2o_hrt hrthdr, *hrt;
1130 1.1 ad int size, rv;
1131 1.1 ad
1132 1.10.2.13 nathanw PHOLD(curlwp);
1133 1.10.2.2 nathanw rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1134 1.10.2.13 nathanw PRELE(curlwp);
1135 1.10.2.2 nathanw if (rv != 0)
1136 1.1 ad return (rv);
1137 1.1 ad
1138 1.5 ad DPRINTF(("%s: %d hrt entries\n", sc->sc_dv.dv_xname,
1139 1.5 ad le16toh(hrthdr.numentries)));
1140 1.5 ad
1141 1.5 ad size = sizeof(struct i2o_hrt) +
1142 1.10.2.4 nathanw (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1143 1.1 ad hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1144 1.1 ad
1145 1.1 ad if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1146 1.1 ad free(hrt, M_DEVBUF);
1147 1.1 ad return (rv);
1148 1.1 ad }
1149 1.1 ad
1150 1.1 ad if (sc->sc_hrt != NULL)
1151 1.1 ad free(sc->sc_hrt, M_DEVBUF);
1152 1.1 ad sc->sc_hrt = hrt;
1153 1.1 ad return (0);
1154 1.1 ad }
1155 1.1 ad
1156 1.1 ad /*
1157 1.1 ad * Request the specified number of bytes from the IOP's logical
1158 1.5 ad * configuration table. If a change indicator is specified, this
1159 1.10.2.2 nathanw * is a verbatim notification request, so the caller is prepared
1160 1.5 ad * to wait indefinitely.
1161 1.1 ad */
1162 1.1 ad static int
1163 1.5 ad iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1164 1.5 ad u_int32_t chgind)
1165 1.1 ad {
1166 1.1 ad struct iop_msg *im;
1167 1.10.2.2 nathanw struct i2o_exec_lct_notify *mf;
1168 1.1 ad int rv;
1169 1.10.2.2 nathanw u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1170 1.1 ad
1171 1.10.2.4 nathanw im = iop_msg_alloc(sc, IM_WAIT);
1172 1.1 ad memset(lct, 0, size);
1173 1.1 ad
1174 1.10.2.2 nathanw mf = (struct i2o_exec_lct_notify *)mb;
1175 1.10.2.2 nathanw mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1176 1.10.2.2 nathanw mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1177 1.10.2.2 nathanw mf->msgictx = IOP_ICTX;
1178 1.10.2.2 nathanw mf->msgtctx = im->im_tctx;
1179 1.10.2.2 nathanw mf->classid = I2O_CLASS_ANY;
1180 1.10.2.2 nathanw mf->changeindicator = chgind;
1181 1.5 ad
1182 1.9 ad #ifdef I2ODEBUG
1183 1.9 ad printf("iop_lct_get0: reading LCT");
1184 1.9 ad if (chgind != 0)
1185 1.9 ad printf(" (async)");
1186 1.9 ad printf("\n");
1187 1.9 ad #endif
1188 1.1 ad
1189 1.10.2.4 nathanw iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1190 1.10.2.2 nathanw rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1191 1.1 ad iop_msg_unmap(sc, im);
1192 1.10.2.2 nathanw iop_msg_free(sc, im);
1193 1.1 ad return (rv);
1194 1.1 ad }
1195 1.1 ad
1196 1.1 ad /*
1197 1.6 ad * Read the IOP's logical configuration table.
1198 1.1 ad */
1199 1.1 ad int
1200 1.1 ad iop_lct_get(struct iop_softc *sc)
1201 1.1 ad {
1202 1.5 ad int esize, size, rv;
1203 1.5 ad struct i2o_lct *lct;
1204 1.1 ad
1205 1.5 ad esize = le32toh(sc->sc_status.expectedlctsize);
1206 1.5 ad lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1207 1.5 ad if (lct == NULL)
1208 1.1 ad return (ENOMEM);
1209 1.1 ad
1210 1.5 ad if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1211 1.1 ad free(lct, M_DEVBUF);
1212 1.1 ad return (rv);
1213 1.1 ad }
1214 1.1 ad
1215 1.5 ad size = le16toh(lct->tablesize) << 2;
1216 1.5 ad if (esize != size) {
1217 1.1 ad free(lct, M_DEVBUF);
1218 1.5 ad lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1219 1.5 ad if (lct == NULL)
1220 1.5 ad return (ENOMEM);
1221 1.5 ad
1222 1.5 ad if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1223 1.5 ad free(lct, M_DEVBUF);
1224 1.5 ad return (rv);
1225 1.5 ad }
1226 1.1 ad }
1227 1.5 ad
1228 1.5 ad /* Swap in the new LCT. */
1229 1.1 ad if (sc->sc_lct != NULL)
1230 1.1 ad free(sc->sc_lct, M_DEVBUF);
1231 1.1 ad sc->sc_lct = lct;
1232 1.1 ad sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1233 1.1 ad sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1234 1.1 ad sizeof(struct i2o_lct_entry);
1235 1.1 ad return (0);
1236 1.1 ad }
1237 1.1 ad
1238 1.1 ad /*
1239 1.10.2.2 nathanw * Request the specified parameter group from the target. If an initiator
1240 1.10.2.2 nathanw * is specified (a) don't wait for the operation to complete, but instead
1241 1.10.2.2 nathanw * let the initiator's interrupt handler deal with the reply and (b) place a
1242 1.10.2.2 nathanw * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1243 1.1 ad */
1244 1.1 ad int
1245 1.10.2.5 nathanw iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1246 1.10.2.5 nathanw int size, struct iop_initiator *ii)
1247 1.1 ad {
1248 1.1 ad struct iop_msg *im;
1249 1.10.2.2 nathanw struct i2o_util_params_op *mf;
1250 1.10.2.2 nathanw struct i2o_reply *rf;
1251 1.10.2.5 nathanw int rv;
1252 1.10.2.2 nathanw struct iop_pgop *pgop;
1253 1.10.2.2 nathanw u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1254 1.1 ad
1255 1.10.2.4 nathanw im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1256 1.10.2.2 nathanw if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1257 1.10.2.2 nathanw iop_msg_free(sc, im);
1258 1.10.2.2 nathanw return (ENOMEM);
1259 1.10.2.2 nathanw }
1260 1.10.2.2 nathanw if ((rf = malloc(sizeof(*rf), M_DEVBUF, M_WAITOK)) == NULL) {
1261 1.10.2.2 nathanw iop_msg_free(sc, im);
1262 1.10.2.2 nathanw free(pgop, M_DEVBUF);
1263 1.10.2.2 nathanw return (ENOMEM);
1264 1.10.2.2 nathanw }
1265 1.10.2.2 nathanw im->im_dvcontext = pgop;
1266 1.10.2.2 nathanw im->im_rb = rf;
1267 1.1 ad
1268 1.10.2.2 nathanw mf = (struct i2o_util_params_op *)mb;
1269 1.10.2.2 nathanw mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1270 1.10.2.5 nathanw mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1271 1.10.2.2 nathanw mf->msgictx = IOP_ICTX;
1272 1.10.2.2 nathanw mf->msgtctx = im->im_tctx;
1273 1.10.2.2 nathanw mf->flags = 0;
1274 1.10.2.2 nathanw
1275 1.10.2.2 nathanw pgop->olh.count = htole16(1);
1276 1.10.2.2 nathanw pgop->olh.reserved = htole16(0);
1277 1.10.2.5 nathanw pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1278 1.10.2.2 nathanw pgop->oat.fieldcount = htole16(0xffff);
1279 1.10.2.2 nathanw pgop->oat.group = htole16(group);
1280 1.10.2.2 nathanw
1281 1.10.2.2 nathanw if (ii == NULL)
1282 1.10.2.13 nathanw PHOLD(curlwp);
1283 1.1 ad
1284 1.5 ad memset(buf, 0, size);
1285 1.10.2.4 nathanw iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1286 1.10.2.5 nathanw iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1287 1.10.2.2 nathanw rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1288 1.10.2.2 nathanw
1289 1.10.2.2 nathanw if (ii == NULL)
1290 1.10.2.13 nathanw PRELE(curlwp);
1291 1.10.2.2 nathanw
1292 1.10.2.2 nathanw /* Detect errors; let partial transfers to count as success. */
1293 1.10.2.2 nathanw if (ii == NULL && rv == 0) {
1294 1.10.2.2 nathanw if (rf->reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1295 1.10.2.2 nathanw le16toh(rf->detail) == I2O_DSC_UNKNOWN_ERROR)
1296 1.10.2.2 nathanw rv = 0;
1297 1.10.2.2 nathanw else
1298 1.10.2.2 nathanw rv = (rf->reqstatus != 0 ? EIO : 0);
1299 1.10.2.5 nathanw
1300 1.10.2.5 nathanw if (rv != 0)
1301 1.10.2.5 nathanw printf("%s: FIELD_GET failed for tid %d group %d\n",
1302 1.10.2.5 nathanw sc->sc_dv.dv_xname, tid, group);
1303 1.10.2.2 nathanw }
1304 1.10.2.2 nathanw
1305 1.10.2.2 nathanw if (ii == NULL || rv != 0) {
1306 1.10.2.2 nathanw iop_msg_unmap(sc, im);
1307 1.10.2.2 nathanw iop_msg_free(sc, im);
1308 1.10.2.2 nathanw free(pgop, M_DEVBUF);
1309 1.10.2.2 nathanw free(rf, M_DEVBUF);
1310 1.10.2.2 nathanw }
1311 1.1 ad
1312 1.1 ad return (rv);
1313 1.10.2.2 nathanw }
1314 1.1 ad
1315 1.1 ad /*
1316 1.10.2.5 nathanw * Set a single field in a scalar parameter group.
1317 1.10.2.5 nathanw */
1318 1.10.2.5 nathanw int
1319 1.10.2.5 nathanw iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1320 1.10.2.5 nathanw int size, int field)
1321 1.10.2.5 nathanw {
1322 1.10.2.5 nathanw struct iop_msg *im;
1323 1.10.2.5 nathanw struct i2o_util_params_op *mf;
1324 1.10.2.5 nathanw struct iop_pgop *pgop;
1325 1.10.2.5 nathanw int rv, totsize;
1326 1.10.2.5 nathanw u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1327 1.10.2.5 nathanw
1328 1.10.2.5 nathanw totsize = sizeof(*pgop) + size;
1329 1.10.2.5 nathanw
1330 1.10.2.5 nathanw im = iop_msg_alloc(sc, IM_WAIT);
1331 1.10.2.5 nathanw if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1332 1.10.2.5 nathanw iop_msg_free(sc, im);
1333 1.10.2.5 nathanw return (ENOMEM);
1334 1.10.2.5 nathanw }
1335 1.10.2.5 nathanw
1336 1.10.2.5 nathanw mf = (struct i2o_util_params_op *)mb;
1337 1.10.2.5 nathanw mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1338 1.10.2.5 nathanw mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1339 1.10.2.5 nathanw mf->msgictx = IOP_ICTX;
1340 1.10.2.5 nathanw mf->msgtctx = im->im_tctx;
1341 1.10.2.5 nathanw mf->flags = 0;
1342 1.10.2.5 nathanw
1343 1.10.2.5 nathanw pgop->olh.count = htole16(1);
1344 1.10.2.5 nathanw pgop->olh.reserved = htole16(0);
1345 1.10.2.5 nathanw pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1346 1.10.2.5 nathanw pgop->oat.fieldcount = htole16(1);
1347 1.10.2.5 nathanw pgop->oat.group = htole16(group);
1348 1.10.2.5 nathanw pgop->oat.fields[0] = htole16(field);
1349 1.10.2.5 nathanw memcpy(pgop + 1, buf, size);
1350 1.10.2.5 nathanw
1351 1.10.2.5 nathanw iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1352 1.10.2.5 nathanw rv = iop_msg_post(sc, im, mb, 30000);
1353 1.10.2.5 nathanw if (rv != 0)
1354 1.10.2.5 nathanw printf("%s: FIELD_SET failed for tid %d group %d\n",
1355 1.10.2.5 nathanw sc->sc_dv.dv_xname, tid, group);
1356 1.10.2.5 nathanw
1357 1.10.2.5 nathanw iop_msg_unmap(sc, im);
1358 1.10.2.5 nathanw iop_msg_free(sc, im);
1359 1.10.2.5 nathanw free(pgop, M_DEVBUF);
1360 1.10.2.5 nathanw return (rv);
1361 1.10.2.5 nathanw }
1362 1.10.2.5 nathanw
1363 1.10.2.5 nathanw /*
1364 1.10.2.5 nathanw * Delete all rows in a tablular parameter group.
1365 1.10.2.5 nathanw */
1366 1.10.2.5 nathanw int
1367 1.10.2.5 nathanw iop_table_clear(struct iop_softc *sc, int tid, int group)
1368 1.10.2.5 nathanw {
1369 1.10.2.5 nathanw struct iop_msg *im;
1370 1.10.2.5 nathanw struct i2o_util_params_op *mf;
1371 1.10.2.5 nathanw struct iop_pgop pgop;
1372 1.10.2.5 nathanw u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1373 1.10.2.5 nathanw int rv;
1374 1.10.2.5 nathanw
1375 1.10.2.5 nathanw im = iop_msg_alloc(sc, IM_WAIT);
1376 1.10.2.5 nathanw
1377 1.10.2.5 nathanw mf = (struct i2o_util_params_op *)mb;
1378 1.10.2.5 nathanw mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1379 1.10.2.5 nathanw mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1380 1.10.2.5 nathanw mf->msgictx = IOP_ICTX;
1381 1.10.2.5 nathanw mf->msgtctx = im->im_tctx;
1382 1.10.2.5 nathanw mf->flags = 0;
1383 1.10.2.5 nathanw
1384 1.10.2.5 nathanw pgop.olh.count = htole16(1);
1385 1.10.2.5 nathanw pgop.olh.reserved = htole16(0);
1386 1.10.2.5 nathanw pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1387 1.10.2.5 nathanw pgop.oat.fieldcount = htole16(0);
1388 1.10.2.5 nathanw pgop.oat.group = htole16(group);
1389 1.10.2.5 nathanw pgop.oat.fields[0] = htole16(0);
1390 1.10.2.5 nathanw
1391 1.10.2.13 nathanw PHOLD(curlwp);
1392 1.10.2.5 nathanw iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1393 1.10.2.5 nathanw rv = iop_msg_post(sc, im, mb, 30000);
1394 1.10.2.5 nathanw if (rv != 0)
1395 1.10.2.5 nathanw printf("%s: TABLE_CLEAR failed for tid %d group %d\n",
1396 1.10.2.5 nathanw sc->sc_dv.dv_xname, tid, group);
1397 1.10.2.5 nathanw
1398 1.10.2.5 nathanw iop_msg_unmap(sc, im);
1399 1.10.2.13 nathanw PRELE(curlwp);
1400 1.10.2.5 nathanw iop_msg_free(sc, im);
1401 1.10.2.5 nathanw return (rv);
1402 1.10.2.5 nathanw }
1403 1.10.2.5 nathanw
1404 1.10.2.5 nathanw /*
1405 1.10.2.5 nathanw * Add a single row to a tabular parameter group. The row can have only one
1406 1.10.2.5 nathanw * field.
1407 1.10.2.5 nathanw */
1408 1.10.2.5 nathanw int
1409 1.10.2.5 nathanw iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1410 1.10.2.5 nathanw int size, int row)
1411 1.10.2.5 nathanw {
1412 1.10.2.5 nathanw struct iop_msg *im;
1413 1.10.2.5 nathanw struct i2o_util_params_op *mf;
1414 1.10.2.5 nathanw struct iop_pgop *pgop;
1415 1.10.2.5 nathanw int rv, totsize;
1416 1.10.2.5 nathanw u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1417 1.10.2.5 nathanw
1418 1.10.2.5 nathanw totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1419 1.10.2.5 nathanw
1420 1.10.2.5 nathanw im = iop_msg_alloc(sc, IM_WAIT);
1421 1.10.2.5 nathanw if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1422 1.10.2.5 nathanw iop_msg_free(sc, im);
1423 1.10.2.5 nathanw return (ENOMEM);
1424 1.10.2.5 nathanw }
1425 1.10.2.5 nathanw
1426 1.10.2.5 nathanw mf = (struct i2o_util_params_op *)mb;
1427 1.10.2.5 nathanw mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1428 1.10.2.5 nathanw mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1429 1.10.2.5 nathanw mf->msgictx = IOP_ICTX;
1430 1.10.2.5 nathanw mf->msgtctx = im->im_tctx;
1431 1.10.2.5 nathanw mf->flags = 0;
1432 1.10.2.5 nathanw
1433 1.10.2.5 nathanw pgop->olh.count = htole16(1);
1434 1.10.2.5 nathanw pgop->olh.reserved = htole16(0);
1435 1.10.2.5 nathanw pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1436 1.10.2.5 nathanw pgop->oat.fieldcount = htole16(1);
1437 1.10.2.5 nathanw pgop->oat.group = htole16(group);
1438 1.10.2.5 nathanw pgop->oat.fields[0] = htole16(0); /* FieldIdx */
1439 1.10.2.5 nathanw pgop->oat.fields[1] = htole16(1); /* RowCount */
1440 1.10.2.5 nathanw pgop->oat.fields[2] = htole16(row); /* KeyValue */
1441 1.10.2.5 nathanw memcpy(&pgop->oat.fields[3], buf, size);
1442 1.10.2.5 nathanw
1443 1.10.2.5 nathanw iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1444 1.10.2.5 nathanw rv = iop_msg_post(sc, im, mb, 30000);
1445 1.10.2.5 nathanw if (rv != 0)
1446 1.10.2.5 nathanw printf("%s: ADD_ROW failed for tid %d group %d row %d\n",
1447 1.10.2.5 nathanw sc->sc_dv.dv_xname, tid, group, row);
1448 1.10.2.5 nathanw
1449 1.10.2.5 nathanw iop_msg_unmap(sc, im);
1450 1.10.2.5 nathanw iop_msg_free(sc, im);
1451 1.10.2.5 nathanw free(pgop, M_DEVBUF);
1452 1.10.2.5 nathanw return (rv);
1453 1.10.2.5 nathanw }
1454 1.10.2.5 nathanw
1455 1.10.2.5 nathanw /*
1456 1.5 ad * Execute a simple command (no parameters).
1457 1.1 ad */
1458 1.1 ad int
1459 1.5 ad iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1460 1.5 ad int async, int timo)
1461 1.1 ad {
1462 1.1 ad struct iop_msg *im;
1463 1.10.2.2 nathanw struct i2o_msg mf;
1464 1.5 ad int rv, fl;
1465 1.1 ad
1466 1.10.2.2 nathanw fl = (async != 0 ? IM_WAIT : IM_POLL);
1467 1.10.2.4 nathanw im = iop_msg_alloc(sc, fl);
1468 1.1 ad
1469 1.10.2.2 nathanw mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1470 1.10.2.2 nathanw mf.msgfunc = I2O_MSGFUNC(tid, function);
1471 1.10.2.2 nathanw mf.msgictx = ictx;
1472 1.10.2.2 nathanw mf.msgtctx = im->im_tctx;
1473 1.1 ad
1474 1.10.2.2 nathanw rv = iop_msg_post(sc, im, &mf, timo);
1475 1.10.2.2 nathanw iop_msg_free(sc, im);
1476 1.1 ad return (rv);
1477 1.1 ad }
1478 1.1 ad
1479 1.1 ad /*
1480 1.5 ad * Post the system table to the IOP.
1481 1.1 ad */
1482 1.1 ad static int
1483 1.1 ad iop_systab_set(struct iop_softc *sc)
1484 1.1 ad {
1485 1.10.2.2 nathanw struct i2o_exec_sys_tab_set *mf;
1486 1.1 ad struct iop_msg *im;
1487 1.10.2.2 nathanw bus_space_handle_t bsh;
1488 1.10.2.2 nathanw bus_addr_t boo;
1489 1.1 ad u_int32_t mema[2], ioa[2];
1490 1.1 ad int rv;
1491 1.10.2.2 nathanw u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1492 1.1 ad
1493 1.10.2.4 nathanw im = iop_msg_alloc(sc, IM_WAIT);
1494 1.10.2.2 nathanw
1495 1.10.2.2 nathanw mf = (struct i2o_exec_sys_tab_set *)mb;
1496 1.10.2.2 nathanw mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1497 1.10.2.2 nathanw mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1498 1.10.2.2 nathanw mf->msgictx = IOP_ICTX;
1499 1.10.2.2 nathanw mf->msgtctx = im->im_tctx;
1500 1.10.2.2 nathanw mf->iopid = (sc->sc_dv.dv_unit + 2) << 12;
1501 1.10.2.2 nathanw mf->segnumber = 0;
1502 1.10.2.2 nathanw
1503 1.10.2.2 nathanw mema[1] = sc->sc_status.desiredprivmemsize;
1504 1.10.2.2 nathanw ioa[1] = sc->sc_status.desiredpriviosize;
1505 1.10.2.2 nathanw
1506 1.10.2.2 nathanw if (mema[1] != 0) {
1507 1.10.2.2 nathanw rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1508 1.10.2.2 nathanw le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1509 1.10.2.2 nathanw mema[0] = htole32(boo);
1510 1.10.2.2 nathanw if (rv != 0) {
1511 1.10.2.2 nathanw printf("%s: can't alloc priv mem space, err = %d\n",
1512 1.10.2.2 nathanw sc->sc_dv.dv_xname, rv);
1513 1.10.2.2 nathanw mema[0] = 0;
1514 1.10.2.2 nathanw mema[1] = 0;
1515 1.10.2.2 nathanw }
1516 1.10.2.2 nathanw }
1517 1.1 ad
1518 1.10.2.2 nathanw if (ioa[1] != 0) {
1519 1.10.2.2 nathanw rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1520 1.10.2.2 nathanw le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1521 1.10.2.2 nathanw ioa[0] = htole32(boo);
1522 1.10.2.2 nathanw if (rv != 0) {
1523 1.10.2.2 nathanw printf("%s: can't alloc priv i/o space, err = %d\n",
1524 1.10.2.2 nathanw sc->sc_dv.dv_xname, rv);
1525 1.10.2.2 nathanw ioa[0] = 0;
1526 1.10.2.2 nathanw ioa[1] = 0;
1527 1.10.2.2 nathanw }
1528 1.10.2.2 nathanw }
1529 1.1 ad
1530 1.10.2.13 nathanw PHOLD(curlwp);
1531 1.10.2.4 nathanw iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1532 1.10.2.4 nathanw iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1533 1.10.2.4 nathanw iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1534 1.10.2.2 nathanw rv = iop_msg_post(sc, im, mb, 5000);
1535 1.1 ad iop_msg_unmap(sc, im);
1536 1.10.2.2 nathanw iop_msg_free(sc, im);
1537 1.10.2.13 nathanw PRELE(curlwp);
1538 1.1 ad return (rv);
1539 1.1 ad }
1540 1.1 ad
1541 1.1 ad /*
1542 1.10.2.2 nathanw * Reset the IOP. Must be called with interrupts disabled.
1543 1.1 ad */
1544 1.1 ad static int
1545 1.1 ad iop_reset(struct iop_softc *sc)
1546 1.1 ad {
1547 1.10.2.4 nathanw u_int32_t mfa, *sw;
1548 1.10.2.2 nathanw struct i2o_exec_iop_reset mf;
1549 1.1 ad int rv;
1550 1.10.2.4 nathanw paddr_t pa;
1551 1.1 ad
1552 1.10.2.4 nathanw sw = (u_int32_t *)sc->sc_scr;
1553 1.10.2.4 nathanw pa = sc->sc_scr_seg->ds_addr;
1554 1.1 ad
1555 1.10.2.2 nathanw mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1556 1.10.2.2 nathanw mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1557 1.10.2.2 nathanw mf.reserved[0] = 0;
1558 1.10.2.2 nathanw mf.reserved[1] = 0;
1559 1.10.2.2 nathanw mf.reserved[2] = 0;
1560 1.10.2.2 nathanw mf.reserved[3] = 0;
1561 1.10.2.4 nathanw mf.statuslow = (u_int32_t)pa;
1562 1.10.2.4 nathanw mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1563 1.10.2.4 nathanw
1564 1.10.2.4 nathanw *sw = htole32(0);
1565 1.10.2.4 nathanw bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1566 1.10.2.4 nathanw BUS_DMASYNC_PREREAD);
1567 1.1 ad
1568 1.10.2.2 nathanw if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1569 1.1 ad return (rv);
1570 1.1 ad
1571 1.10.2.4 nathanw POLL(2500,
1572 1.10.2.4 nathanw (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1573 1.10.2.4 nathanw BUS_DMASYNC_POSTREAD), *sw != 0));
1574 1.10.2.4 nathanw if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1575 1.10.2.4 nathanw printf("%s: reset rejected, status 0x%x\n",
1576 1.10.2.4 nathanw sc->sc_dv.dv_xname, le32toh(*sw));
1577 1.1 ad return (EIO);
1578 1.1 ad }
1579 1.1 ad
1580 1.1 ad /*
1581 1.5 ad * IOP is now in the INIT state. Wait no more than 10 seconds for
1582 1.1 ad * the inbound queue to become responsive.
1583 1.1 ad */
1584 1.5 ad POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1585 1.1 ad if (mfa == IOP_MFA_EMPTY) {
1586 1.1 ad printf("%s: reset failed\n", sc->sc_dv.dv_xname);
1587 1.1 ad return (EIO);
1588 1.1 ad }
1589 1.1 ad
1590 1.1 ad iop_release_mfa(sc, mfa);
1591 1.1 ad return (0);
1592 1.1 ad }
1593 1.1 ad
1594 1.1 ad /*
1595 1.10.2.2 nathanw * Register a new initiator. Must be called with the configuration lock
1596 1.10.2.2 nathanw * held.
1597 1.1 ad */
1598 1.10.2.2 nathanw void
1599 1.1 ad iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1600 1.1 ad {
1601 1.10.2.2 nathanw static int ictxgen;
1602 1.10.2.2 nathanw int s;
1603 1.5 ad
1604 1.10.2.2 nathanw /* 0 is reserved (by us) for system messages. */
1605 1.10.2.2 nathanw ii->ii_ictx = ++ictxgen;
1606 1.1 ad
1607 1.10.2.2 nathanw /*
1608 1.10.2.2 nathanw * `Utility initiators' don't make it onto the per-IOP initiator list
1609 1.10.2.2 nathanw * (which is used only for configuration), but do get one slot on
1610 1.10.2.2 nathanw * the inbound queue.
1611 1.10.2.2 nathanw */
1612 1.10.2.2 nathanw if ((ii->ii_flags & II_UTILITY) == 0) {
1613 1.10.2.2 nathanw LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1614 1.10.2.2 nathanw sc->sc_nii++;
1615 1.10.2.2 nathanw } else
1616 1.10.2.2 nathanw sc->sc_nuii++;
1617 1.1 ad
1618 1.10.2.2 nathanw s = splbio();
1619 1.10.2.2 nathanw LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1620 1.10.2.2 nathanw splx(s);
1621 1.1 ad }
1622 1.1 ad
1623 1.1 ad /*
1624 1.10.2.2 nathanw * Unregister an initiator. Must be called with the configuration lock
1625 1.10.2.2 nathanw * held.
1626 1.1 ad */
1627 1.1 ad void
1628 1.1 ad iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1629 1.1 ad {
1630 1.10.2.2 nathanw int s;
1631 1.1 ad
1632 1.10.2.2 nathanw if ((ii->ii_flags & II_UTILITY) == 0) {
1633 1.10.2.2 nathanw LIST_REMOVE(ii, ii_list);
1634 1.10.2.2 nathanw sc->sc_nii--;
1635 1.10.2.2 nathanw } else
1636 1.10.2.2 nathanw sc->sc_nuii--;
1637 1.10.2.2 nathanw
1638 1.10.2.2 nathanw s = splbio();
1639 1.5 ad LIST_REMOVE(ii, ii_hash);
1640 1.10.2.2 nathanw splx(s);
1641 1.1 ad }
1642 1.1 ad
1643 1.1 ad /*
1644 1.10.2.2 nathanw * Handle a reply frame from the IOP.
1645 1.1 ad */
1646 1.1 ad static int
1647 1.5 ad iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1648 1.1 ad {
1649 1.1 ad struct iop_msg *im;
1650 1.1 ad struct i2o_reply *rb;
1651 1.10.2.2 nathanw struct i2o_fault_notify *fn;
1652 1.1 ad struct iop_initiator *ii;
1653 1.5 ad u_int off, ictx, tctx, status, size;
1654 1.1 ad
1655 1.1 ad off = (int)(rmfa - sc->sc_rep_phys);
1656 1.1 ad rb = (struct i2o_reply *)(sc->sc_rep + off);
1657 1.1 ad
1658 1.10.2.4 nathanw /* Perform reply queue DMA synchronisation. */
1659 1.10.2.2 nathanw bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1660 1.10.2.7 nathanw sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1661 1.10.2.2 nathanw if (--sc->sc_curib != 0)
1662 1.1 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap,
1663 1.1 ad 0, sc->sc_rep_size, BUS_DMASYNC_PREREAD);
1664 1.1 ad
1665 1.1 ad #ifdef I2ODEBUG
1666 1.1 ad if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1667 1.5 ad panic("iop_handle_reply: 64-bit reply");
1668 1.1 ad #endif
1669 1.1 ad /*
1670 1.1 ad * Find the initiator.
1671 1.1 ad */
1672 1.1 ad ictx = le32toh(rb->msgictx);
1673 1.1 ad if (ictx == IOP_ICTX)
1674 1.1 ad ii = NULL;
1675 1.1 ad else {
1676 1.5 ad ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1677 1.5 ad for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1678 1.5 ad if (ii->ii_ictx == ictx)
1679 1.5 ad break;
1680 1.5 ad if (ii == NULL) {
1681 1.1 ad #ifdef I2ODEBUG
1682 1.10.2.2 nathanw iop_reply_print(sc, rb);
1683 1.1 ad #endif
1684 1.10.2.2 nathanw printf("%s: WARNING: bad ictx returned (%x)\n",
1685 1.5 ad sc->sc_dv.dv_xname, ictx);
1686 1.5 ad return (-1);
1687 1.5 ad }
1688 1.1 ad }
1689 1.1 ad
1690 1.10.2.2 nathanw /*
1691 1.10.2.3 nathanw * If we received a transport failure notice, we've got to dig the
1692 1.10.2.2 nathanw * transaction context (if any) out of the original message frame,
1693 1.10.2.2 nathanw * and then release the original MFA back to the inbound FIFO.
1694 1.10.2.2 nathanw */
1695 1.10.2.2 nathanw if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1696 1.10.2.2 nathanw status = I2O_STATUS_SUCCESS;
1697 1.10.2.2 nathanw
1698 1.10.2.2 nathanw fn = (struct i2o_fault_notify *)rb;
1699 1.10.2.16 nathanw tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1700 1.10.2.2 nathanw iop_release_mfa(sc, fn->lowmfa);
1701 1.10.2.2 nathanw iop_tfn_print(sc, fn);
1702 1.10.2.2 nathanw } else {
1703 1.10.2.2 nathanw status = rb->reqstatus;
1704 1.10.2.2 nathanw tctx = le32toh(rb->msgtctx);
1705 1.10.2.2 nathanw }
1706 1.1 ad
1707 1.10.2.4 nathanw if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1708 1.1 ad /*
1709 1.1 ad * This initiator tracks state using message wrappers.
1710 1.1 ad *
1711 1.1 ad * Find the originating message wrapper, and if requested
1712 1.1 ad * notify the initiator.
1713 1.1 ad */
1714 1.10.2.2 nathanw im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1715 1.10.2.2 nathanw if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1716 1.10.2.2 nathanw (im->im_flags & IM_ALLOCED) == 0 ||
1717 1.10.2.2 nathanw tctx != im->im_tctx) {
1718 1.10.2.2 nathanw printf("%s: WARNING: bad tctx returned (0x%08x, %p)\n",
1719 1.10.2.2 nathanw sc->sc_dv.dv_xname, tctx, im);
1720 1.10.2.2 nathanw if (im != NULL)
1721 1.10.2.2 nathanw printf("%s: flags=0x%08x tctx=0x%08x\n",
1722 1.10.2.2 nathanw sc->sc_dv.dv_xname, im->im_flags,
1723 1.10.2.2 nathanw im->im_tctx);
1724 1.5 ad #ifdef I2ODEBUG
1725 1.10.2.2 nathanw if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1726 1.10.2.2 nathanw iop_reply_print(sc, rb);
1727 1.5 ad #endif
1728 1.5 ad return (-1);
1729 1.5 ad }
1730 1.10.2.2 nathanw
1731 1.10.2.2 nathanw if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1732 1.10.2.2 nathanw im->im_flags |= IM_FAIL;
1733 1.10.2.2 nathanw
1734 1.1 ad #ifdef I2ODEBUG
1735 1.1 ad if ((im->im_flags & IM_REPLIED) != 0)
1736 1.5 ad panic("%s: dup reply", sc->sc_dv.dv_xname);
1737 1.1 ad #endif
1738 1.1 ad im->im_flags |= IM_REPLIED;
1739 1.1 ad
1740 1.1 ad #ifdef I2ODEBUG
1741 1.10.2.2 nathanw if (status != I2O_STATUS_SUCCESS)
1742 1.10.2.2 nathanw iop_reply_print(sc, rb);
1743 1.1 ad #endif
1744 1.10.2.2 nathanw im->im_reqstatus = status;
1745 1.10.2.2 nathanw
1746 1.10.2.2 nathanw /* Copy the reply frame, if requested. */
1747 1.10.2.2 nathanw if (im->im_rb != NULL) {
1748 1.5 ad size = (le32toh(rb->msgflags) >> 14) & ~3;
1749 1.10.2.2 nathanw #ifdef I2ODEBUG
1750 1.10.2.7 nathanw if (size > sc->sc_framesize)
1751 1.10.2.2 nathanw panic("iop_handle_reply: reply too large");
1752 1.10.2.2 nathanw #endif
1753 1.10.2.2 nathanw memcpy(im->im_rb, rb, size);
1754 1.10.2.2 nathanw }
1755 1.10.2.2 nathanw
1756 1.10.2.2 nathanw /* Notify the initiator. */
1757 1.10.2.2 nathanw if ((im->im_flags & IM_WAIT) != 0)
1758 1.1 ad wakeup(im);
1759 1.10.2.2 nathanw else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL)
1760 1.1 ad (*ii->ii_intr)(ii->ii_dv, im, rb);
1761 1.1 ad } else {
1762 1.1 ad /*
1763 1.1 ad * This initiator discards message wrappers.
1764 1.1 ad *
1765 1.1 ad * Simply pass the reply frame to the initiator.
1766 1.1 ad */
1767 1.1 ad (*ii->ii_intr)(ii->ii_dv, NULL, rb);
1768 1.1 ad }
1769 1.1 ad
1770 1.1 ad return (status);
1771 1.1 ad }
1772 1.1 ad
1773 1.1 ad /*
1774 1.10.2.2 nathanw * Handle an interrupt from the IOP.
1775 1.1 ad */
1776 1.1 ad int
1777 1.1 ad iop_intr(void *arg)
1778 1.1 ad {
1779 1.1 ad struct iop_softc *sc;
1780 1.5 ad u_int32_t rmfa;
1781 1.1 ad
1782 1.1 ad sc = arg;
1783 1.1 ad
1784 1.5 ad if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0)
1785 1.5 ad return (0);
1786 1.5 ad
1787 1.5 ad for (;;) {
1788 1.5 ad /* Double read to account for IOP bug. */
1789 1.10.2.2 nathanw if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1790 1.10.2.2 nathanw rmfa = iop_inl(sc, IOP_REG_OFIFO);
1791 1.10.2.2 nathanw if (rmfa == IOP_MFA_EMPTY)
1792 1.10.2.2 nathanw break;
1793 1.10.2.2 nathanw }
1794 1.5 ad iop_handle_reply(sc, rmfa);
1795 1.10.2.2 nathanw iop_outl(sc, IOP_REG_OFIFO, rmfa);
1796 1.1 ad }
1797 1.1 ad
1798 1.5 ad return (1);
1799 1.5 ad }
1800 1.5 ad
1801 1.5 ad /*
1802 1.5 ad * Handle an event signalled by the executive.
1803 1.5 ad */
1804 1.5 ad static void
1805 1.5 ad iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
1806 1.5 ad {
1807 1.5 ad struct i2o_util_event_register_reply *rb;
1808 1.5 ad struct iop_softc *sc;
1809 1.5 ad u_int event;
1810 1.5 ad
1811 1.5 ad sc = (struct iop_softc *)dv;
1812 1.5 ad rb = reply;
1813 1.5 ad
1814 1.10.2.2 nathanw if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1815 1.5 ad return;
1816 1.5 ad
1817 1.10.2.2 nathanw event = le32toh(rb->event);
1818 1.5 ad printf("%s: event 0x%08x received\n", dv->dv_xname, event);
1819 1.1 ad }
1820 1.1 ad
1821 1.1 ad /*
1822 1.1 ad * Allocate a message wrapper.
1823 1.1 ad */
1824 1.10.2.2 nathanw struct iop_msg *
1825 1.10.2.4 nathanw iop_msg_alloc(struct iop_softc *sc, int flags)
1826 1.1 ad {
1827 1.1 ad struct iop_msg *im;
1828 1.10.2.2 nathanw static u_int tctxgen;
1829 1.10.2.2 nathanw int s, i;
1830 1.1 ad
1831 1.1 ad #ifdef I2ODEBUG
1832 1.1 ad if ((flags & IM_SYSMASK) != 0)
1833 1.1 ad panic("iop_msg_alloc: system flags specified");
1834 1.1 ad #endif
1835 1.1 ad
1836 1.10.2.4 nathanw s = splbio();
1837 1.10.2.2 nathanw im = SLIST_FIRST(&sc->sc_im_freelist);
1838 1.10.2.2 nathanw #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1839 1.10.2.2 nathanw if (im == NULL)
1840 1.10.2.2 nathanw panic("iop_msg_alloc: no free wrappers");
1841 1.10.2.2 nathanw #endif
1842 1.10.2.2 nathanw SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1843 1.10.2.2 nathanw splx(s);
1844 1.1 ad
1845 1.10.2.2 nathanw im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1846 1.10.2.2 nathanw tctxgen += (1 << IOP_TCTX_SHIFT);
1847 1.1 ad im->im_flags = flags | IM_ALLOCED;
1848 1.10.2.2 nathanw im->im_rb = NULL;
1849 1.10.2.2 nathanw i = 0;
1850 1.10.2.2 nathanw do {
1851 1.10.2.2 nathanw im->im_xfer[i++].ix_size = 0;
1852 1.10.2.2 nathanw } while (i < IOP_MAX_MSG_XFERS);
1853 1.1 ad
1854 1.10.2.2 nathanw return (im);
1855 1.1 ad }
1856 1.1 ad
1857 1.1 ad /*
1858 1.1 ad * Free a message wrapper.
1859 1.1 ad */
1860 1.1 ad void
1861 1.10.2.2 nathanw iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1862 1.1 ad {
1863 1.1 ad int s;
1864 1.1 ad
1865 1.1 ad #ifdef I2ODEBUG
1866 1.1 ad if ((im->im_flags & IM_ALLOCED) == 0)
1867 1.1 ad panic("iop_msg_free: wrapper not allocated");
1868 1.1 ad #endif
1869 1.1 ad
1870 1.1 ad im->im_flags = 0;
1871 1.10.2.2 nathanw s = splbio();
1872 1.10.2.2 nathanw SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1873 1.1 ad splx(s);
1874 1.1 ad }
1875 1.1 ad
1876 1.1 ad /*
1877 1.5 ad * Map a data transfer. Write a scatter-gather list into the message frame.
1878 1.1 ad */
1879 1.1 ad int
1880 1.10.2.2 nathanw iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1881 1.10.2.4 nathanw void *xferaddr, int xfersize, int out, struct proc *up)
1882 1.1 ad {
1883 1.10.2.2 nathanw bus_dmamap_t dm;
1884 1.10.2.2 nathanw bus_dma_segment_t *ds;
1885 1.1 ad struct iop_xfer *ix;
1886 1.10.2.2 nathanw u_int rv, i, nsegs, flg, off, xn;
1887 1.10.2.2 nathanw u_int32_t *p;
1888 1.5 ad
1889 1.10.2.2 nathanw for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1890 1.1 ad if (ix->ix_size == 0)
1891 1.1 ad break;
1892 1.10.2.2 nathanw
1893 1.1 ad #ifdef I2ODEBUG
1894 1.10.2.2 nathanw if (xfersize == 0)
1895 1.10.2.2 nathanw panic("iop_msg_map: null transfer");
1896 1.10.2.2 nathanw if (xfersize > IOP_MAX_XFER)
1897 1.10.2.2 nathanw panic("iop_msg_map: transfer too large");
1898 1.10.2.2 nathanw if (xn == IOP_MAX_MSG_XFERS)
1899 1.1 ad panic("iop_msg_map: too many xfers");
1900 1.1 ad #endif
1901 1.1 ad
1902 1.10.2.2 nathanw /*
1903 1.10.2.2 nathanw * Only the first DMA map is static.
1904 1.10.2.2 nathanw */
1905 1.10.2.2 nathanw if (xn != 0) {
1906 1.1 ad rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1907 1.10.2.2 nathanw IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1908 1.1 ad BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1909 1.1 ad if (rv != 0)
1910 1.1 ad return (rv);
1911 1.1 ad }
1912 1.1 ad
1913 1.10.2.2 nathanw dm = ix->ix_map;
1914 1.10.2.4 nathanw rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1915 1.10.2.4 nathanw (up == NULL ? BUS_DMA_NOWAIT : 0));
1916 1.1 ad if (rv != 0)
1917 1.10.2.2 nathanw goto bad;
1918 1.10.2.2 nathanw
1919 1.10.2.2 nathanw /*
1920 1.10.2.2 nathanw * How many SIMPLE SG elements can we fit in this message?
1921 1.10.2.2 nathanw */
1922 1.10.2.2 nathanw off = mb[0] >> 16;
1923 1.10.2.2 nathanw p = mb + off;
1924 1.10.2.7 nathanw nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1925 1.10.2.2 nathanw
1926 1.10.2.2 nathanw if (dm->dm_nsegs > nsegs) {
1927 1.10.2.2 nathanw bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1928 1.10.2.2 nathanw rv = EFBIG;
1929 1.10.2.2 nathanw DPRINTF(("iop_msg_map: too many segs\n"));
1930 1.10.2.2 nathanw goto bad;
1931 1.10.2.2 nathanw }
1932 1.10.2.2 nathanw
1933 1.10.2.2 nathanw nsegs = dm->dm_nsegs;
1934 1.10.2.2 nathanw xfersize = 0;
1935 1.1 ad
1936 1.10.2.2 nathanw /*
1937 1.10.2.2 nathanw * Write out the SG list.
1938 1.10.2.2 nathanw */
1939 1.1 ad if (out)
1940 1.10.2.2 nathanw flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1941 1.1 ad else
1942 1.10.2.2 nathanw flg = I2O_SGL_SIMPLE;
1943 1.1 ad
1944 1.10.2.2 nathanw for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1945 1.10.2.2 nathanw p[0] = (u_int32_t)ds->ds_len | flg;
1946 1.10.2.2 nathanw p[1] = (u_int32_t)ds->ds_addr;
1947 1.10.2.2 nathanw xfersize += ds->ds_len;
1948 1.1 ad }
1949 1.1 ad
1950 1.10.2.2 nathanw p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1951 1.10.2.2 nathanw p[1] = (u_int32_t)ds->ds_addr;
1952 1.10.2.2 nathanw xfersize += ds->ds_len;
1953 1.10.2.2 nathanw
1954 1.10.2.2 nathanw /* Fix up the transfer record, and sync the map. */
1955 1.10.2.2 nathanw ix->ix_flags = (out ? IX_OUT : IX_IN);
1956 1.10.2.2 nathanw ix->ix_size = xfersize;
1957 1.10.2.2 nathanw bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1958 1.10.2.2 nathanw out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
1959 1.10.2.2 nathanw
1960 1.1 ad /*
1961 1.1 ad * If this is the first xfer we've mapped for this message, adjust
1962 1.1 ad * the SGL offset field in the message header.
1963 1.1 ad */
1964 1.2 ad if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1965 1.10.2.2 nathanw mb[0] += (mb[0] >> 12) & 0xf0;
1966 1.2 ad im->im_flags |= IM_SGLOFFADJ;
1967 1.2 ad }
1968 1.10.2.2 nathanw mb[0] += (nsegs << 17);
1969 1.10.2.2 nathanw return (0);
1970 1.10.2.2 nathanw
1971 1.10.2.2 nathanw bad:
1972 1.10.2.2 nathanw if (xn != 0)
1973 1.10.2.2 nathanw bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1974 1.10.2.2 nathanw return (rv);
1975 1.10.2.2 nathanw }
1976 1.10.2.2 nathanw
1977 1.10.2.2 nathanw /*
1978 1.10.2.2 nathanw * Map a block I/O data transfer (different in that there's only one per
1979 1.10.2.2 nathanw * message maximum, and PAGE addressing may be used). Write a scatter
1980 1.10.2.2 nathanw * gather list into the message frame.
1981 1.10.2.2 nathanw */
1982 1.10.2.2 nathanw int
1983 1.10.2.2 nathanw iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1984 1.10.2.2 nathanw void *xferaddr, int xfersize, int out)
1985 1.10.2.2 nathanw {
1986 1.10.2.2 nathanw bus_dma_segment_t *ds;
1987 1.10.2.2 nathanw bus_dmamap_t dm;
1988 1.10.2.2 nathanw struct iop_xfer *ix;
1989 1.10.2.2 nathanw u_int rv, i, nsegs, off, slen, tlen, flg;
1990 1.10.2.2 nathanw paddr_t saddr, eaddr;
1991 1.10.2.2 nathanw u_int32_t *p;
1992 1.10.2.2 nathanw
1993 1.10.2.2 nathanw #ifdef I2ODEBUG
1994 1.10.2.2 nathanw if (xfersize == 0)
1995 1.10.2.2 nathanw panic("iop_msg_map_bio: null transfer");
1996 1.10.2.2 nathanw if (xfersize > IOP_MAX_XFER)
1997 1.10.2.2 nathanw panic("iop_msg_map_bio: transfer too large");
1998 1.10.2.2 nathanw if ((im->im_flags & IM_SGLOFFADJ) != 0)
1999 1.10.2.2 nathanw panic("iop_msg_map_bio: SGLOFFADJ");
2000 1.10.2.2 nathanw #endif
2001 1.10.2.2 nathanw
2002 1.10.2.2 nathanw ix = im->im_xfer;
2003 1.10.2.2 nathanw dm = ix->ix_map;
2004 1.10.2.4 nathanw rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
2005 1.10.2.4 nathanw BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2006 1.10.2.2 nathanw if (rv != 0)
2007 1.10.2.2 nathanw return (rv);
2008 1.10.2.2 nathanw
2009 1.10.2.2 nathanw off = mb[0] >> 16;
2010 1.10.2.7 nathanw nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2011 1.10.2.2 nathanw
2012 1.10.2.2 nathanw /*
2013 1.10.2.2 nathanw * If the transfer is highly fragmented and won't fit using SIMPLE
2014 1.10.2.2 nathanw * elements, use PAGE_LIST elements instead. SIMPLE elements are
2015 1.10.2.2 nathanw * potentially more efficient, both for us and the IOP.
2016 1.10.2.2 nathanw */
2017 1.10.2.2 nathanw if (dm->dm_nsegs > nsegs) {
2018 1.10.2.2 nathanw nsegs = 1;
2019 1.10.2.2 nathanw p = mb + off + 1;
2020 1.10.2.2 nathanw
2021 1.10.2.2 nathanw /* XXX This should be done with a bus_space flag. */
2022 1.10.2.2 nathanw for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2023 1.10.2.2 nathanw slen = ds->ds_len;
2024 1.10.2.2 nathanw saddr = ds->ds_addr;
2025 1.10.2.2 nathanw
2026 1.10.2.2 nathanw while (slen > 0) {
2027 1.10.2.2 nathanw eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2028 1.10.2.2 nathanw tlen = min(eaddr - saddr, slen);
2029 1.10.2.2 nathanw slen -= tlen;
2030 1.10.2.2 nathanw *p++ = le32toh(saddr);
2031 1.10.2.2 nathanw saddr = eaddr;
2032 1.10.2.2 nathanw nsegs++;
2033 1.10.2.2 nathanw }
2034 1.10.2.2 nathanw }
2035 1.10.2.2 nathanw
2036 1.10.2.2 nathanw mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2037 1.10.2.2 nathanw I2O_SGL_END;
2038 1.10.2.2 nathanw if (out)
2039 1.10.2.2 nathanw mb[off] |= I2O_SGL_DATA_OUT;
2040 1.10.2.2 nathanw } else {
2041 1.10.2.2 nathanw p = mb + off;
2042 1.10.2.2 nathanw nsegs = dm->dm_nsegs;
2043 1.10.2.2 nathanw
2044 1.10.2.2 nathanw if (out)
2045 1.10.2.2 nathanw flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2046 1.10.2.2 nathanw else
2047 1.10.2.2 nathanw flg = I2O_SGL_SIMPLE;
2048 1.10.2.2 nathanw
2049 1.10.2.2 nathanw for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2050 1.10.2.2 nathanw p[0] = (u_int32_t)ds->ds_len | flg;
2051 1.10.2.2 nathanw p[1] = (u_int32_t)ds->ds_addr;
2052 1.10.2.2 nathanw }
2053 1.10.2.2 nathanw
2054 1.10.2.2 nathanw p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2055 1.10.2.2 nathanw I2O_SGL_END;
2056 1.10.2.2 nathanw p[1] = (u_int32_t)ds->ds_addr;
2057 1.10.2.2 nathanw nsegs <<= 1;
2058 1.10.2.2 nathanw }
2059 1.10.2.2 nathanw
2060 1.10.2.2 nathanw /* Fix up the transfer record, and sync the map. */
2061 1.10.2.2 nathanw ix->ix_flags = (out ? IX_OUT : IX_IN);
2062 1.10.2.2 nathanw ix->ix_size = xfersize;
2063 1.10.2.2 nathanw bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2064 1.10.2.2 nathanw out ? BUS_DMASYNC_POSTWRITE : BUS_DMASYNC_POSTREAD);
2065 1.10.2.2 nathanw
2066 1.10.2.2 nathanw /*
2067 1.10.2.2 nathanw * Adjust the SGL offset and total message size fields. We don't
2068 1.10.2.2 nathanw * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2069 1.10.2.2 nathanw */
2070 1.10.2.2 nathanw mb[0] += ((off << 4) + (nsegs << 16));
2071 1.1 ad return (0);
2072 1.1 ad }
2073 1.1 ad
2074 1.1 ad /*
2075 1.1 ad * Unmap all data transfers associated with a message wrapper.
2076 1.1 ad */
2077 1.1 ad void
2078 1.1 ad iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2079 1.1 ad {
2080 1.1 ad struct iop_xfer *ix;
2081 1.1 ad int i;
2082 1.10.2.2 nathanw
2083 1.10.2.2 nathanw #ifdef I2ODEBUG
2084 1.10.2.2 nathanw if (im->im_xfer[0].ix_size == 0)
2085 1.10.2.2 nathanw panic("iop_msg_unmap: no transfers mapped");
2086 1.10.2.2 nathanw #endif
2087 1.10.2.2 nathanw
2088 1.10.2.2 nathanw for (ix = im->im_xfer, i = 0;;) {
2089 1.1 ad bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2090 1.1 ad ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2091 1.1 ad BUS_DMASYNC_POSTREAD);
2092 1.1 ad bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2093 1.1 ad
2094 1.1 ad /* Only the first DMA map is static. */
2095 1.1 ad if (i != 0)
2096 1.1 ad bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2097 1.10.2.2 nathanw if ((++ix)->ix_size == 0)
2098 1.10.2.2 nathanw break;
2099 1.10.2.2 nathanw if (++i >= IOP_MAX_MSG_XFERS)
2100 1.10.2.2 nathanw break;
2101 1.1 ad }
2102 1.1 ad }
2103 1.1 ad
2104 1.10.2.2 nathanw /*
2105 1.10.2.2 nathanw * Post a message frame to the IOP's inbound queue.
2106 1.1 ad */
2107 1.1 ad int
2108 1.10.2.2 nathanw iop_post(struct iop_softc *sc, u_int32_t *mb)
2109 1.1 ad {
2110 1.10.2.2 nathanw u_int32_t mfa;
2111 1.10.2.2 nathanw int s;
2112 1.1 ad
2113 1.10.2.4 nathanw #ifdef I2ODEBUG
2114 1.10.2.7 nathanw if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2115 1.10.2.2 nathanw panic("iop_post: frame too large");
2116 1.10.2.4 nathanw #endif
2117 1.1 ad
2118 1.10.2.4 nathanw s = splbio();
2119 1.1 ad
2120 1.10.2.2 nathanw /* Allocate a slot with the IOP. */
2121 1.10.2.2 nathanw if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2122 1.10.2.2 nathanw if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2123 1.10.2.2 nathanw splx(s);
2124 1.10.2.2 nathanw printf("%s: mfa not forthcoming\n",
2125 1.10.2.2 nathanw sc->sc_dv.dv_xname);
2126 1.10.2.2 nathanw return (EAGAIN);
2127 1.10.2.2 nathanw }
2128 1.1 ad
2129 1.10.2.4 nathanw /* Perform reply buffer DMA synchronisation. */
2130 1.10.2.2 nathanw if (sc->sc_curib++ == 0)
2131 1.10.2.2 nathanw bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2132 1.10.2.2 nathanw sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2133 1.1 ad
2134 1.10.2.2 nathanw /* Copy out the message frame. */
2135 1.10.2.17 christos bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2136 1.10.2.16 nathanw mb[0] >> 16);
2137 1.10.2.17 christos bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2138 1.10.2.16 nathanw (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2139 1.10.2.2 nathanw
2140 1.10.2.2 nathanw /* Post the MFA back to the IOP. */
2141 1.10.2.2 nathanw iop_outl(sc, IOP_REG_IFIFO, mfa);
2142 1.10.2.2 nathanw
2143 1.10.2.2 nathanw splx(s);
2144 1.10.2.2 nathanw return (0);
2145 1.10.2.2 nathanw }
2146 1.10.2.2 nathanw
2147 1.10.2.2 nathanw /*
2148 1.10.2.2 nathanw * Post a message to the IOP and deal with completion.
2149 1.10.2.2 nathanw */
2150 1.10.2.2 nathanw int
2151 1.10.2.2 nathanw iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2152 1.10.2.2 nathanw {
2153 1.10.2.2 nathanw u_int32_t *mb;
2154 1.10.2.2 nathanw int rv, s;
2155 1.10.2.2 nathanw
2156 1.10.2.2 nathanw mb = xmb;
2157 1.10.2.2 nathanw
2158 1.10.2.2 nathanw /* Terminate the scatter/gather list chain. */
2159 1.1 ad if ((im->im_flags & IM_SGLOFFADJ) != 0)
2160 1.10.2.2 nathanw mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2161 1.1 ad
2162 1.10.2.2 nathanw if ((rv = iop_post(sc, mb)) != 0)
2163 1.10.2.2 nathanw return (rv);
2164 1.1 ad
2165 1.10.2.4 nathanw if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2166 1.10.2.2 nathanw if ((im->im_flags & IM_POLL) != 0)
2167 1.10.2.2 nathanw iop_msg_poll(sc, im, timo);
2168 1.10.2.2 nathanw else
2169 1.10.2.2 nathanw iop_msg_wait(sc, im, timo);
2170 1.1 ad
2171 1.10.2.2 nathanw s = splbio();
2172 1.10.2.2 nathanw if ((im->im_flags & IM_REPLIED) != 0) {
2173 1.10.2.2 nathanw if ((im->im_flags & IM_NOSTATUS) != 0)
2174 1.10.2.2 nathanw rv = 0;
2175 1.10.2.2 nathanw else if ((im->im_flags & IM_FAIL) != 0)
2176 1.10.2.2 nathanw rv = ENXIO;
2177 1.10.2.2 nathanw else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2178 1.10.2.2 nathanw rv = EIO;
2179 1.10.2.2 nathanw else
2180 1.10.2.2 nathanw rv = 0;
2181 1.10.2.2 nathanw } else
2182 1.10.2.2 nathanw rv = EBUSY;
2183 1.2 ad splx(s);
2184 1.10.2.2 nathanw } else
2185 1.10.2.2 nathanw rv = 0;
2186 1.10.2.2 nathanw
2187 1.10.2.2 nathanw return (rv);
2188 1.10.2.2 nathanw }
2189 1.10.2.2 nathanw
2190 1.10.2.2 nathanw /*
2191 1.10.2.2 nathanw * Spin until the specified message is replied to.
2192 1.10.2.2 nathanw */
2193 1.10.2.2 nathanw static void
2194 1.10.2.2 nathanw iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2195 1.10.2.2 nathanw {
2196 1.10.2.2 nathanw u_int32_t rmfa;
2197 1.10.2.2 nathanw int s, status;
2198 1.10.2.2 nathanw
2199 1.10.2.4 nathanw s = splbio();
2200 1.1 ad
2201 1.1 ad /* Wait for completion. */
2202 1.1 ad for (timo *= 10; timo != 0; timo--) {
2203 1.5 ad if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2204 1.5 ad /* Double read to account for IOP bug. */
2205 1.5 ad rmfa = iop_inl(sc, IOP_REG_OFIFO);
2206 1.5 ad if (rmfa == IOP_MFA_EMPTY)
2207 1.5 ad rmfa = iop_inl(sc, IOP_REG_OFIFO);
2208 1.10.2.2 nathanw if (rmfa != IOP_MFA_EMPTY) {
2209 1.5 ad status = iop_handle_reply(sc, rmfa);
2210 1.10.2.2 nathanw
2211 1.10.2.2 nathanw /*
2212 1.10.2.2 nathanw * Return the reply frame to the IOP's
2213 1.10.2.2 nathanw * outbound FIFO.
2214 1.10.2.2 nathanw */
2215 1.10.2.2 nathanw iop_outl(sc, IOP_REG_OFIFO, rmfa);
2216 1.10.2.2 nathanw }
2217 1.5 ad }
2218 1.1 ad if ((im->im_flags & IM_REPLIED) != 0)
2219 1.1 ad break;
2220 1.1 ad DELAY(100);
2221 1.1 ad }
2222 1.1 ad
2223 1.1 ad if (timo == 0) {
2224 1.5 ad #ifdef I2ODEBUG
2225 1.5 ad printf("%s: poll - no reply\n", sc->sc_dv.dv_xname);
2226 1.10.2.2 nathanw if (iop_status_get(sc, 1) != 0)
2227 1.10.2.2 nathanw printf("iop_msg_poll: unable to retrieve status\n");
2228 1.5 ad else
2229 1.10.2.2 nathanw printf("iop_msg_poll: IOP state = %d\n",
2230 1.5 ad (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2231 1.5 ad #endif
2232 1.1 ad }
2233 1.1 ad
2234 1.1 ad splx(s);
2235 1.1 ad }
2236 1.1 ad
2237 1.1 ad /*
2238 1.10.2.2 nathanw * Sleep until the specified message is replied to.
2239 1.1 ad */
2240 1.10.2.2 nathanw static void
2241 1.1 ad iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2242 1.1 ad {
2243 1.10.2.2 nathanw int s, rv;
2244 1.1 ad
2245 1.5 ad s = splbio();
2246 1.5 ad if ((im->im_flags & IM_REPLIED) != 0) {
2247 1.5 ad splx(s);
2248 1.10.2.2 nathanw return;
2249 1.5 ad }
2250 1.10.2.12 nathanw rv = tsleep(im, PRIBIO, "iopmsg", mstohz(timo));
2251 1.5 ad splx(s);
2252 1.10.2.2 nathanw
2253 1.5 ad #ifdef I2ODEBUG
2254 1.5 ad if (rv != 0) {
2255 1.5 ad printf("iop_msg_wait: tsleep() == %d\n", rv);
2256 1.10.2.2 nathanw if (iop_status_get(sc, 0) != 0)
2257 1.5 ad printf("iop_msg_wait: unable to retrieve status\n");
2258 1.5 ad else
2259 1.5 ad printf("iop_msg_wait: IOP state = %d\n",
2260 1.5 ad (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2261 1.5 ad }
2262 1.5 ad #endif
2263 1.1 ad }
2264 1.1 ad
2265 1.1 ad /*
2266 1.1 ad * Release an unused message frame back to the IOP's inbound fifo.
2267 1.1 ad */
2268 1.1 ad static void
2269 1.1 ad iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2270 1.1 ad {
2271 1.1 ad
2272 1.1 ad /* Use the frame to issue a no-op. */
2273 1.10.2.17 christos iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2274 1.10.2.17 christos iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2275 1.10.2.17 christos iop_outl_msg(sc, mfa + 8, 0);
2276 1.10.2.17 christos iop_outl_msg(sc, mfa + 12, 0);
2277 1.1 ad
2278 1.5 ad iop_outl(sc, IOP_REG_IFIFO, mfa);
2279 1.1 ad }
2280 1.1 ad
2281 1.1 ad #ifdef I2ODEBUG
2282 1.1 ad /*
2283 1.10.2.2 nathanw * Dump a reply frame header.
2284 1.1 ad */
2285 1.1 ad static void
2286 1.10.2.2 nathanw iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2287 1.1 ad {
2288 1.5 ad u_int function, detail;
2289 1.1 ad #ifdef I2OVERBOSE
2290 1.1 ad const char *statusstr;
2291 1.1 ad #endif
2292 1.1 ad
2293 1.5 ad function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2294 1.1 ad detail = le16toh(rb->detail);
2295 1.1 ad
2296 1.5 ad printf("%s: reply:\n", sc->sc_dv.dv_xname);
2297 1.5 ad
2298 1.1 ad #ifdef I2OVERBOSE
2299 1.1 ad if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2300 1.1 ad statusstr = iop_status[rb->reqstatus];
2301 1.1 ad else
2302 1.1 ad statusstr = "undefined error code";
2303 1.1 ad
2304 1.5 ad printf("%s: function=0x%02x status=0x%02x (%s)\n",
2305 1.5 ad sc->sc_dv.dv_xname, function, rb->reqstatus, statusstr);
2306 1.1 ad #else
2307 1.5 ad printf("%s: function=0x%02x status=0x%02x\n",
2308 1.5 ad sc->sc_dv.dv_xname, function, rb->reqstatus);
2309 1.1 ad #endif
2310 1.5 ad printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2311 1.5 ad sc->sc_dv.dv_xname, detail, le32toh(rb->msgictx),
2312 1.5 ad le32toh(rb->msgtctx));
2313 1.5 ad printf("%s: tidi=%d tidt=%d flags=0x%02x\n", sc->sc_dv.dv_xname,
2314 1.5 ad (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2315 1.5 ad (le32toh(rb->msgflags) >> 8) & 0xff);
2316 1.1 ad }
2317 1.1 ad #endif
2318 1.1 ad
2319 1.1 ad /*
2320 1.10.2.2 nathanw * Dump a transport failure reply.
2321 1.10.2.2 nathanw */
2322 1.10.2.2 nathanw static void
2323 1.10.2.2 nathanw iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2324 1.10.2.2 nathanw {
2325 1.10.2.2 nathanw
2326 1.10.2.2 nathanw printf("%s: WARNING: transport failure:\n", sc->sc_dv.dv_xname);
2327 1.10.2.2 nathanw
2328 1.10.2.7 nathanw printf("%s: ictx=0x%08x tctx=0x%08x\n", sc->sc_dv.dv_xname,
2329 1.10.2.2 nathanw le32toh(fn->msgictx), le32toh(fn->msgtctx));
2330 1.10.2.2 nathanw printf("%s: failurecode=0x%02x severity=0x%02x\n",
2331 1.10.2.2 nathanw sc->sc_dv.dv_xname, fn->failurecode, fn->severity);
2332 1.10.2.2 nathanw printf("%s: highestver=0x%02x lowestver=0x%02x\n",
2333 1.10.2.2 nathanw sc->sc_dv.dv_xname, fn->highestver, fn->lowestver);
2334 1.10.2.2 nathanw }
2335 1.10.2.2 nathanw
2336 1.10.2.2 nathanw /*
2337 1.5 ad * Translate an I2O ASCII field into a C string.
2338 1.1 ad */
2339 1.1 ad void
2340 1.5 ad iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2341 1.1 ad {
2342 1.5 ad int hc, lc, i, nit;
2343 1.1 ad
2344 1.1 ad dlen--;
2345 1.1 ad lc = 0;
2346 1.1 ad hc = 0;
2347 1.1 ad i = 0;
2348 1.5 ad
2349 1.5 ad /*
2350 1.5 ad * DPT use NUL as a space, whereas AMI use it as a terminator. The
2351 1.5 ad * spec has nothing to say about it. Since AMI fields are usually
2352 1.5 ad * filled with junk after the terminator, ...
2353 1.5 ad */
2354 1.5 ad nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2355 1.5 ad
2356 1.5 ad while (slen-- != 0 && dlen-- != 0) {
2357 1.5 ad if (nit && *src == '\0')
2358 1.5 ad break;
2359 1.5 ad else if (*src <= 0x20 || *src >= 0x7f) {
2360 1.1 ad if (hc)
2361 1.1 ad dst[i++] = ' ';
2362 1.1 ad } else {
2363 1.1 ad hc = 1;
2364 1.1 ad dst[i++] = *src;
2365 1.1 ad lc = i;
2366 1.1 ad }
2367 1.1 ad src++;
2368 1.1 ad }
2369 1.1 ad
2370 1.1 ad dst[lc] = '\0';
2371 1.1 ad }
2372 1.1 ad
2373 1.1 ad /*
2374 1.10.2.2 nathanw * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2375 1.10.2.2 nathanw */
2376 1.10.2.2 nathanw int
2377 1.10.2.2 nathanw iop_print_ident(struct iop_softc *sc, int tid)
2378 1.10.2.2 nathanw {
2379 1.10.2.2 nathanw struct {
2380 1.10.2.2 nathanw struct i2o_param_op_results pr;
2381 1.10.2.2 nathanw struct i2o_param_read_results prr;
2382 1.10.2.2 nathanw struct i2o_param_device_identity di;
2383 1.10.2.2 nathanw } __attribute__ ((__packed__)) p;
2384 1.10.2.2 nathanw char buf[32];
2385 1.10.2.2 nathanw int rv;
2386 1.10.2.2 nathanw
2387 1.10.2.5 nathanw rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2388 1.10.2.5 nathanw sizeof(p), NULL);
2389 1.10.2.2 nathanw if (rv != 0)
2390 1.10.2.2 nathanw return (rv);
2391 1.10.2.2 nathanw
2392 1.10.2.2 nathanw iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2393 1.10.2.2 nathanw sizeof(buf));
2394 1.10.2.2 nathanw printf(" <%s, ", buf);
2395 1.10.2.2 nathanw iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2396 1.10.2.2 nathanw sizeof(buf));
2397 1.10.2.2 nathanw printf("%s, ", buf);
2398 1.10.2.2 nathanw iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2399 1.10.2.2 nathanw printf("%s>", buf);
2400 1.10.2.2 nathanw
2401 1.10.2.2 nathanw return (0);
2402 1.10.2.2 nathanw }
2403 1.10.2.2 nathanw
2404 1.10.2.2 nathanw /*
2405 1.5 ad * Claim or unclaim the specified TID.
2406 1.1 ad */
2407 1.1 ad int
2408 1.5 ad iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2409 1.10.2.4 nathanw int flags)
2410 1.1 ad {
2411 1.5 ad struct iop_msg *im;
2412 1.10.2.2 nathanw struct i2o_util_claim mf;
2413 1.5 ad int rv, func;
2414 1.5 ad
2415 1.5 ad func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2416 1.10.2.4 nathanw im = iop_msg_alloc(sc, IM_WAIT);
2417 1.5 ad
2418 1.10.2.2 nathanw /* We can use the same structure, as they're identical. */
2419 1.10.2.2 nathanw mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2420 1.10.2.2 nathanw mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2421 1.10.2.2 nathanw mf.msgictx = ii->ii_ictx;
2422 1.10.2.2 nathanw mf.msgtctx = im->im_tctx;
2423 1.10.2.2 nathanw mf.flags = flags;
2424 1.5 ad
2425 1.10.2.2 nathanw rv = iop_msg_post(sc, im, &mf, 5000);
2426 1.10.2.2 nathanw iop_msg_free(sc, im);
2427 1.5 ad return (rv);
2428 1.5 ad }
2429 1.5 ad
2430 1.5 ad /*
2431 1.5 ad * Perform an abort.
2432 1.5 ad */
2433 1.5 ad int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2434 1.10.2.4 nathanw int tctxabort, int flags)
2435 1.5 ad {
2436 1.5 ad struct iop_msg *im;
2437 1.10.2.2 nathanw struct i2o_util_abort mf;
2438 1.5 ad int rv;
2439 1.5 ad
2440 1.10.2.4 nathanw im = iop_msg_alloc(sc, IM_WAIT);
2441 1.1 ad
2442 1.10.2.2 nathanw mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2443 1.10.2.2 nathanw mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2444 1.10.2.2 nathanw mf.msgictx = ii->ii_ictx;
2445 1.10.2.2 nathanw mf.msgtctx = im->im_tctx;
2446 1.10.2.2 nathanw mf.flags = (func << 24) | flags;
2447 1.10.2.2 nathanw mf.tctxabort = tctxabort;
2448 1.1 ad
2449 1.10.2.2 nathanw rv = iop_msg_post(sc, im, &mf, 5000);
2450 1.10.2.2 nathanw iop_msg_free(sc, im);
2451 1.5 ad return (rv);
2452 1.1 ad }
2453 1.1 ad
2454 1.1 ad /*
2455 1.10.2.2 nathanw * Enable or disable reception of events for the specified device.
2456 1.1 ad */
2457 1.5 ad int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2458 1.5 ad {
2459 1.10.2.2 nathanw struct i2o_util_event_register mf;
2460 1.5 ad
2461 1.10.2.2 nathanw mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2462 1.10.2.2 nathanw mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2463 1.10.2.2 nathanw mf.msgictx = ii->ii_ictx;
2464 1.10.2.4 nathanw mf.msgtctx = 0;
2465 1.10.2.2 nathanw mf.eventmask = mask;
2466 1.5 ad
2467 1.10.2.2 nathanw /* This message is replied to only when events are signalled. */
2468 1.10.2.4 nathanw return (iop_post(sc, (u_int32_t *)&mf));
2469 1.5 ad }
2470 1.5 ad
2471 1.1 ad int
2472 1.5 ad iopopen(dev_t dev, int flag, int mode, struct proc *p)
2473 1.1 ad {
2474 1.5 ad struct iop_softc *sc;
2475 1.5 ad
2476 1.10.2.2 nathanw if ((sc = device_lookup(&iop_cd, minor(dev))) == NULL)
2477 1.10.2.2 nathanw return (ENXIO);
2478 1.10.2.2 nathanw if ((sc->sc_flags & IOP_ONLINE) == 0)
2479 1.1 ad return (ENXIO);
2480 1.5 ad if ((sc->sc_flags & IOP_OPEN) != 0)
2481 1.5 ad return (EBUSY);
2482 1.5 ad sc->sc_flags |= IOP_OPEN;
2483 1.5 ad
2484 1.5 ad return (0);
2485 1.1 ad }
2486 1.1 ad
2487 1.5 ad int
2488 1.5 ad iopclose(dev_t dev, int flag, int mode, struct proc *p)
2489 1.1 ad {
2490 1.5 ad struct iop_softc *sc;
2491 1.1 ad
2492 1.5 ad sc = device_lookup(&iop_cd, minor(dev));
2493 1.10.2.2 nathanw sc->sc_flags &= ~IOP_OPEN;
2494 1.10.2.4 nathanw
2495 1.5 ad return (0);
2496 1.1 ad }
2497 1.1 ad
2498 1.1 ad int
2499 1.5 ad iopioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
2500 1.1 ad {
2501 1.5 ad struct iop_softc *sc;
2502 1.5 ad struct iovec *iov;
2503 1.5 ad int rv, i;
2504 1.5 ad
2505 1.5 ad if (securelevel >= 2)
2506 1.5 ad return (EPERM);
2507 1.5 ad
2508 1.5 ad sc = device_lookup(&iop_cd, minor(dev));
2509 1.5 ad
2510 1.5 ad switch (cmd) {
2511 1.5 ad case IOPIOCPT:
2512 1.10.2.4 nathanw return (iop_passthrough(sc, (struct ioppt *)data, p));
2513 1.9 ad
2514 1.10.2.2 nathanw case IOPIOCGSTATUS:
2515 1.10.2.2 nathanw iov = (struct iovec *)data;
2516 1.10.2.2 nathanw i = sizeof(struct i2o_status);
2517 1.10.2.2 nathanw if (i > iov->iov_len)
2518 1.10.2.2 nathanw i = iov->iov_len;
2519 1.10.2.2 nathanw else
2520 1.10.2.2 nathanw iov->iov_len = i;
2521 1.10.2.2 nathanw if ((rv = iop_status_get(sc, 0)) == 0)
2522 1.10.2.2 nathanw rv = copyout(&sc->sc_status, iov->iov_base, i);
2523 1.10.2.2 nathanw return (rv);
2524 1.5 ad
2525 1.10.2.2 nathanw case IOPIOCGLCT:
2526 1.10.2.2 nathanw case IOPIOCGTIDMAP:
2527 1.10.2.2 nathanw case IOPIOCRECONFIG:
2528 1.10.2.2 nathanw break;
2529 1.5 ad
2530 1.10.2.2 nathanw default:
2531 1.10.2.2 nathanw #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2532 1.10.2.2 nathanw printf("%s: unknown ioctl %lx\n", sc->sc_dv.dv_xname, cmd);
2533 1.10.2.2 nathanw #endif
2534 1.10.2.2 nathanw return (ENOTTY);
2535 1.10.2.2 nathanw }
2536 1.9 ad
2537 1.10.2.2 nathanw if ((rv = lockmgr(&sc->sc_conflock, LK_SHARED, NULL)) != 0)
2538 1.10.2.2 nathanw return (rv);
2539 1.1 ad
2540 1.10.2.2 nathanw switch (cmd) {
2541 1.5 ad case IOPIOCGLCT:
2542 1.5 ad iov = (struct iovec *)data;
2543 1.10.2.2 nathanw i = le16toh(sc->sc_lct->tablesize) << 2;
2544 1.5 ad if (i > iov->iov_len)
2545 1.5 ad i = iov->iov_len;
2546 1.5 ad else
2547 1.5 ad iov->iov_len = i;
2548 1.10.2.2 nathanw rv = copyout(sc->sc_lct, iov->iov_base, i);
2549 1.5 ad break;
2550 1.5 ad
2551 1.5 ad case IOPIOCRECONFIG:
2552 1.10.2.2 nathanw rv = iop_reconfigure(sc, 0);
2553 1.9 ad break;
2554 1.9 ad
2555 1.9 ad case IOPIOCGTIDMAP:
2556 1.9 ad iov = (struct iovec *)data;
2557 1.10.2.2 nathanw i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2558 1.10.2.2 nathanw if (i > iov->iov_len)
2559 1.10.2.2 nathanw i = iov->iov_len;
2560 1.10.2.2 nathanw else
2561 1.10.2.2 nathanw iov->iov_len = i;
2562 1.10.2.2 nathanw rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2563 1.5 ad break;
2564 1.10.2.2 nathanw }
2565 1.5 ad
2566 1.10.2.2 nathanw lockmgr(&sc->sc_conflock, LK_RELEASE, NULL);
2567 1.10.2.2 nathanw return (rv);
2568 1.10.2.2 nathanw }
2569 1.10.2.2 nathanw
2570 1.10.2.2 nathanw static int
2571 1.10.2.4 nathanw iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2572 1.10.2.2 nathanw {
2573 1.10.2.2 nathanw struct iop_msg *im;
2574 1.10.2.2 nathanw struct i2o_msg *mf;
2575 1.10.2.2 nathanw struct ioppt_buf *ptb;
2576 1.10.2.2 nathanw int rv, i, mapped;
2577 1.10.2.2 nathanw
2578 1.10.2.2 nathanw mf = NULL;
2579 1.10.2.2 nathanw im = NULL;
2580 1.10.2.2 nathanw mapped = 1;
2581 1.10.2.2 nathanw
2582 1.10.2.7 nathanw if (pt->pt_msglen > sc->sc_framesize ||
2583 1.10.2.2 nathanw pt->pt_msglen < sizeof(struct i2o_msg) ||
2584 1.10.2.2 nathanw pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2585 1.10.2.2 nathanw pt->pt_nbufs < 0 || pt->pt_replylen < 0 ||
2586 1.10.2.2 nathanw pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2587 1.10.2.2 nathanw return (EINVAL);
2588 1.10.2.2 nathanw
2589 1.10.2.2 nathanw for (i = 0; i < pt->pt_nbufs; i++)
2590 1.10.2.2 nathanw if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2591 1.10.2.2 nathanw rv = ENOMEM;
2592 1.10.2.2 nathanw goto bad;
2593 1.10.2.2 nathanw }
2594 1.10.2.2 nathanw
2595 1.10.2.7 nathanw mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2596 1.10.2.2 nathanw if (mf == NULL)
2597 1.10.2.2 nathanw return (ENOMEM);
2598 1.10.2.2 nathanw
2599 1.10.2.2 nathanw if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2600 1.10.2.2 nathanw goto bad;
2601 1.10.2.2 nathanw
2602 1.10.2.4 nathanw im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2603 1.10.2.2 nathanw im->im_rb = (struct i2o_reply *)mf;
2604 1.10.2.2 nathanw mf->msgictx = IOP_ICTX;
2605 1.10.2.2 nathanw mf->msgtctx = im->im_tctx;
2606 1.10.2.2 nathanw
2607 1.10.2.2 nathanw for (i = 0; i < pt->pt_nbufs; i++) {
2608 1.10.2.2 nathanw ptb = &pt->pt_bufs[i];
2609 1.10.2.4 nathanw rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2610 1.10.2.4 nathanw ptb->ptb_datalen, ptb->ptb_out != 0, p);
2611 1.10.2.2 nathanw if (rv != 0)
2612 1.10.2.2 nathanw goto bad;
2613 1.10.2.2 nathanw mapped = 1;
2614 1.5 ad }
2615 1.9 ad
2616 1.10.2.2 nathanw if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2617 1.10.2.2 nathanw goto bad;
2618 1.10.2.2 nathanw
2619 1.10.2.2 nathanw i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2620 1.10.2.7 nathanw if (i > sc->sc_framesize)
2621 1.10.2.7 nathanw i = sc->sc_framesize;
2622 1.10.2.2 nathanw if (i > pt->pt_replylen)
2623 1.10.2.2 nathanw i = pt->pt_replylen;
2624 1.10.2.4 nathanw rv = copyout(im->im_rb, pt->pt_reply, i);
2625 1.10.2.1 nathanw
2626 1.10.2.2 nathanw bad:
2627 1.10.2.2 nathanw if (mapped != 0)
2628 1.10.2.2 nathanw iop_msg_unmap(sc, im);
2629 1.10.2.2 nathanw if (im != NULL)
2630 1.10.2.2 nathanw iop_msg_free(sc, im);
2631 1.10.2.2 nathanw if (mf != NULL)
2632 1.10.2.2 nathanw free(mf, M_DEVBUF);
2633 1.1 ad return (rv);
2634 1.5 ad }
2635