1 1.93 ad /* $NetBSD: iop.c,v 1.93 2023/09/07 20:07:03 ad Exp $ */ 2 1.1 ad 3 1.1 ad /*- 4 1.93 ad * Copyright (c) 2000, 2001, 2002, 2007, 2023 The NetBSD Foundation, Inc. 5 1.1 ad * All rights reserved. 6 1.1 ad * 7 1.1 ad * This code is derived from software contributed to The NetBSD Foundation 8 1.1 ad * by Andrew Doran. 9 1.1 ad * 10 1.1 ad * Redistribution and use in source and binary forms, with or without 11 1.1 ad * modification, are permitted provided that the following conditions 12 1.1 ad * are met: 13 1.1 ad * 1. Redistributions of source code must retain the above copyright 14 1.1 ad * notice, this list of conditions and the following disclaimer. 15 1.1 ad * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 ad * notice, this list of conditions and the following disclaimer in the 17 1.1 ad * documentation and/or other materials provided with the distribution. 18 1.1 ad * 19 1.1 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 ad * POSSIBILITY OF SUCH DAMAGE. 30 1.1 ad */ 31 1.1 ad 32 1.1 ad /* 33 1.1 ad * Support for I2O IOPs (intelligent I/O processors). 34 1.1 ad */ 35 1.20 lukem 36 1.20 lukem #include <sys/cdefs.h> 37 1.93 ad __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.93 2023/09/07 20:07:03 ad Exp $"); 38 1.1 ad 39 1.5 ad #include "iop.h" 40 1.1 ad 41 1.1 ad #include <sys/param.h> 42 1.1 ad #include <sys/systm.h> 43 1.1 ad #include <sys/kernel.h> 44 1.1 ad #include <sys/device.h> 45 1.1 ad #include <sys/queue.h> 46 1.1 ad #include <sys/proc.h> 47 1.1 ad #include <sys/malloc.h> 48 1.1 ad #include <sys/ioctl.h> 49 1.1 ad #include <sys/endian.h> 50 1.5 ad #include <sys/conf.h> 51 1.5 ad #include <sys/kthread.h> 52 1.60 elad #include <sys/kauth.h> 53 1.67 ad #include <sys/bus.h> 54 1.1 ad 55 1.1 ad #include <dev/i2o/i2o.h> 56 1.11 ad #include <dev/i2o/iopio.h> 57 1.1 ad #include <dev/i2o/iopreg.h> 58 1.1 ad #include <dev/i2o/iopvar.h> 59 1.1 ad 60 1.88 riastrad #include "ioconf.h" 61 1.44 drochner #include "locators.h" 62 1.44 drochner 63 1.1 ad #define POLL(ms, cond) \ 64 1.1 ad do { \ 65 1.48 christos int xi; \ 66 1.48 christos for (xi = (ms) * 10; xi; xi--) { \ 67 1.1 ad if (cond) \ 68 1.1 ad break; \ 69 1.1 ad DELAY(100); \ 70 1.1 ad } \ 71 1.1 ad } while (/* CONSTCOND */0); 72 1.1 ad 73 1.1 ad #ifdef I2ODEBUG 74 1.1 ad #define DPRINTF(x) printf x 75 1.1 ad #else 76 1.1 ad #define DPRINTF(x) 77 1.1 ad #endif 78 1.1 ad 79 1.5 ad #define IOP_ICTXHASH_NBUCKETS 16 80 1.5 ad #define IOP_ICTXHASH(ictx) (&iop_ictxhashtbl[(ictx) & iop_ictxhash]) 81 1.11 ad 82 1.11 ad #define IOP_MAX_SEGS (((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1) 83 1.11 ad 84 1.11 ad #define IOP_TCTX_SHIFT 12 85 1.11 ad #define IOP_TCTX_MASK ((1 << IOP_TCTX_SHIFT) - 1) 86 1.5 ad 87 1.5 ad static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl; 88 1.5 ad static u_long iop_ictxhash; 89 1.1 ad static void *iop_sdh; 90 1.5 ad static struct i2o_systab *iop_systab; 91 1.5 ad static int iop_systab_size; 92 1.1 ad 93 1.25 gehenna dev_type_open(iopopen); 94 1.25 gehenna dev_type_close(iopclose); 95 1.25 gehenna dev_type_ioctl(iopioctl); 96 1.25 gehenna 97 1.25 gehenna const struct cdevsw iop_cdevsw = { 98 1.85 dholland .d_open = iopopen, 99 1.85 dholland .d_close = iopclose, 100 1.85 dholland .d_read = noread, 101 1.85 dholland .d_write = nowrite, 102 1.85 dholland .d_ioctl = iopioctl, 103 1.85 dholland .d_stop = nostop, 104 1.85 dholland .d_tty = notty, 105 1.85 dholland .d_poll = nopoll, 106 1.85 dholland .d_mmap = nommap, 107 1.85 dholland .d_kqfilter = nokqfilter, 108 1.86 dholland .d_discard = nodiscard, 109 1.93 ad .d_flag = D_OTHER | D_MPSAFE, 110 1.25 gehenna }; 111 1.25 gehenna 112 1.5 ad #define IC_CONFIGURE 0x01 113 1.11 ad #define IC_PRIORITY 0x02 114 1.1 ad 115 1.57 christos static struct iop_class { 116 1.5 ad u_short ic_class; 117 1.5 ad u_short ic_flags; 118 1.65 ad const char *ic_caption; 119 1.57 christos } const iop_class[] = { 120 1.47 perry { 121 1.1 ad I2O_CLASS_EXECUTIVE, 122 1.1 ad 0, 123 1.65 ad "executive" 124 1.1 ad }, 125 1.1 ad { 126 1.1 ad I2O_CLASS_DDM, 127 1.1 ad 0, 128 1.65 ad "device driver module" 129 1.1 ad }, 130 1.1 ad { 131 1.1 ad I2O_CLASS_RANDOM_BLOCK_STORAGE, 132 1.11 ad IC_CONFIGURE | IC_PRIORITY, 133 1.65 ad "random block storage" 134 1.1 ad }, 135 1.1 ad { 136 1.1 ad I2O_CLASS_SEQUENTIAL_STORAGE, 137 1.11 ad IC_CONFIGURE | IC_PRIORITY, 138 1.65 ad "sequential storage" 139 1.1 ad }, 140 1.1 ad { 141 1.1 ad I2O_CLASS_LAN, 142 1.11 ad IC_CONFIGURE | IC_PRIORITY, 143 1.65 ad "LAN port" 144 1.1 ad }, 145 1.1 ad { 146 1.1 ad I2O_CLASS_WAN, 147 1.11 ad IC_CONFIGURE | IC_PRIORITY, 148 1.65 ad "WAN port" 149 1.1 ad }, 150 1.1 ad { 151 1.1 ad I2O_CLASS_FIBRE_CHANNEL_PORT, 152 1.1 ad IC_CONFIGURE, 153 1.65 ad "fibrechannel port" 154 1.1 ad }, 155 1.1 ad { 156 1.1 ad I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL, 157 1.1 ad 0, 158 1.65 ad "fibrechannel peripheral" 159 1.1 ad }, 160 1.1 ad { 161 1.1 ad I2O_CLASS_SCSI_PERIPHERAL, 162 1.1 ad 0, 163 1.65 ad "SCSI peripheral" 164 1.1 ad }, 165 1.1 ad { 166 1.1 ad I2O_CLASS_ATE_PORT, 167 1.1 ad IC_CONFIGURE, 168 1.65 ad "ATE port" 169 1.1 ad }, 170 1.47 perry { 171 1.1 ad I2O_CLASS_ATE_PERIPHERAL, 172 1.1 ad 0, 173 1.65 ad "ATE peripheral" 174 1.1 ad }, 175 1.47 perry { 176 1.1 ad I2O_CLASS_FLOPPY_CONTROLLER, 177 1.1 ad IC_CONFIGURE, 178 1.65 ad "floppy controller" 179 1.1 ad }, 180 1.1 ad { 181 1.1 ad I2O_CLASS_FLOPPY_DEVICE, 182 1.1 ad 0, 183 1.65 ad "floppy device" 184 1.1 ad }, 185 1.1 ad { 186 1.1 ad I2O_CLASS_BUS_ADAPTER_PORT, 187 1.1 ad IC_CONFIGURE, 188 1.65 ad "bus adapter port" 189 1.1 ad }, 190 1.1 ad }; 191 1.1 ad 192 1.83 joerg #ifdef I2ODEBUG 193 1.11 ad static const char * const iop_status[] = { 194 1.1 ad "success", 195 1.1 ad "abort (dirty)", 196 1.1 ad "abort (no data transfer)", 197 1.1 ad "abort (partial transfer)", 198 1.1 ad "error (dirty)", 199 1.1 ad "error (no data transfer)", 200 1.1 ad "error (partial transfer)", 201 1.1 ad "undefined error code", 202 1.1 ad "process abort (dirty)", 203 1.1 ad "process abort (no data transfer)", 204 1.1 ad "process abort (partial transfer)", 205 1.1 ad "transaction error", 206 1.1 ad }; 207 1.83 joerg #endif 208 1.1 ad 209 1.5 ad static inline u_int32_t iop_inl(struct iop_softc *, int); 210 1.5 ad static inline void iop_outl(struct iop_softc *, int, u_int32_t); 211 1.5 ad 212 1.29 msaitoh static inline u_int32_t iop_inl_msg(struct iop_softc *, int); 213 1.30 ad static inline void iop_outl_msg(struct iop_softc *, int, u_int32_t); 214 1.29 msaitoh 215 1.76 cegger static void iop_config_interrupts(device_t); 216 1.11 ad static void iop_configure_devices(struct iop_softc *, int, int); 217 1.43 itojun static void iop_devinfo(int, char *, size_t); 218 1.1 ad static int iop_print(void *, const char *); 219 1.1 ad static void iop_shutdown(void *); 220 1.1 ad 221 1.11 ad static void iop_adjqparam(struct iop_softc *, int); 222 1.11 ad static int iop_handle_reply(struct iop_softc *, u_int32_t); 223 1.1 ad static int iop_hrt_get(struct iop_softc *); 224 1.1 ad static int iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int); 225 1.76 cegger static void iop_intr_event(device_t, struct iop_msg *, void *); 226 1.5 ad static int iop_lct_get0(struct iop_softc *, struct i2o_lct *, int, 227 1.5 ad u_int32_t); 228 1.11 ad static void iop_msg_poll(struct iop_softc *, struct iop_msg *, int); 229 1.11 ad static void iop_msg_wait(struct iop_softc *, struct iop_msg *, int); 230 1.1 ad static int iop_ofifo_init(struct iop_softc *); 231 1.15 ad static int iop_passthrough(struct iop_softc *, struct ioppt *, 232 1.15 ad struct proc *); 233 1.9 ad static void iop_reconf_thread(void *); 234 1.1 ad static void iop_release_mfa(struct iop_softc *, u_int32_t); 235 1.1 ad static int iop_reset(struct iop_softc *); 236 1.42 ad static int iop_sys_enable(struct iop_softc *); 237 1.1 ad static int iop_systab_set(struct iop_softc *); 238 1.11 ad static void iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *); 239 1.1 ad 240 1.1 ad #ifdef I2ODEBUG 241 1.11 ad static void iop_reply_print(struct iop_softc *, struct i2o_reply *); 242 1.1 ad #endif 243 1.5 ad 244 1.5 ad static inline u_int32_t 245 1.5 ad iop_inl(struct iop_softc *sc, int off) 246 1.5 ad { 247 1.5 ad 248 1.5 ad bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 249 1.5 ad BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 250 1.5 ad return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off)); 251 1.5 ad } 252 1.5 ad 253 1.5 ad static inline void 254 1.5 ad iop_outl(struct iop_softc *sc, int off, u_int32_t val) 255 1.5 ad { 256 1.5 ad 257 1.5 ad bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); 258 1.5 ad bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4, 259 1.5 ad BUS_SPACE_BARRIER_WRITE); 260 1.5 ad } 261 1.5 ad 262 1.29 msaitoh static inline u_int32_t 263 1.29 msaitoh iop_inl_msg(struct iop_softc *sc, int off) 264 1.29 msaitoh { 265 1.29 msaitoh 266 1.32 ad bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4, 267 1.29 msaitoh BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ); 268 1.32 ad return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off)); 269 1.29 msaitoh } 270 1.29 msaitoh 271 1.29 msaitoh static inline void 272 1.30 ad iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val) 273 1.29 msaitoh { 274 1.29 msaitoh 275 1.30 ad bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val); 276 1.30 ad bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4, 277 1.29 msaitoh BUS_SPACE_BARRIER_WRITE); 278 1.29 msaitoh } 279 1.29 msaitoh 280 1.1 ad /* 281 1.11 ad * Initialise the IOP and our interface. 282 1.1 ad */ 283 1.5 ad void 284 1.1 ad iop_init(struct iop_softc *sc, const char *intrstr) 285 1.1 ad { 286 1.11 ad struct iop_msg *im; 287 1.40 mycroft int rv, i, j, state, nsegs; 288 1.1 ad u_int32_t mask; 289 1.1 ad char ident[64]; 290 1.1 ad 291 1.15 ad state = 0; 292 1.15 ad 293 1.15 ad printf("I2O adapter"); 294 1.15 ad 295 1.68 ad mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM); 296 1.68 ad mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE); 297 1.65 ad cv_init(&sc->sc_confcv, "iopconf"); 298 1.65 ad 299 1.71 ad if (iop_ictxhashtbl == NULL) { 300 1.5 ad iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST, 301 1.71 ad true, &iop_ictxhash); 302 1.71 ad } 303 1.1 ad 304 1.15 ad /* Disable interrupts at the IOP. */ 305 1.15 ad mask = iop_inl(sc, IOP_REG_INTR_MASK); 306 1.15 ad iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO); 307 1.5 ad 308 1.15 ad /* Allocate a scratch DMA map for small miscellaneous shared data. */ 309 1.15 ad if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, 310 1.15 ad BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) { 311 1.82 chs aprint_error_dev(sc->sc_dev, "cannot create scratch dmamap\n"); 312 1.5 ad return; 313 1.1 ad } 314 1.15 ad 315 1.15 ad if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0, 316 1.15 ad sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) { 317 1.82 chs aprint_error_dev(sc->sc_dev, "cannot alloc scratch dmamem\n"); 318 1.15 ad goto bail_out; 319 1.15 ad } 320 1.15 ad state++; 321 1.15 ad 322 1.15 ad if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE, 323 1.15 ad &sc->sc_scr, 0)) { 324 1.82 chs aprint_error_dev(sc->sc_dev, "cannot map scratch dmamem\n"); 325 1.15 ad goto bail_out; 326 1.15 ad } 327 1.15 ad state++; 328 1.15 ad 329 1.15 ad if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr, 330 1.15 ad PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) { 331 1.82 chs aprint_error_dev(sc->sc_dev, "cannot load scratch dmamap\n"); 332 1.15 ad goto bail_out; 333 1.15 ad } 334 1.15 ad state++; 335 1.15 ad 336 1.21 ad #ifdef I2ODEBUG 337 1.21 ad /* So that our debug checks don't choke. */ 338 1.21 ad sc->sc_framesize = 128; 339 1.21 ad #endif 340 1.21 ad 341 1.65 ad /* Avoid syncing the reply map until it's set up. */ 342 1.65 ad sc->sc_curib = 0x123; 343 1.65 ad 344 1.15 ad /* Reset the adapter and request status. */ 345 1.15 ad if ((rv = iop_reset(sc)) != 0) { 346 1.82 chs aprint_error_dev(sc->sc_dev, "not responding (reset)\n"); 347 1.15 ad goto bail_out; 348 1.15 ad } 349 1.15 ad 350 1.15 ad if ((rv = iop_status_get(sc, 1)) != 0) { 351 1.82 chs aprint_error_dev(sc->sc_dev, "not responding (get status)\n"); 352 1.15 ad goto bail_out; 353 1.15 ad } 354 1.15 ad 355 1.5 ad sc->sc_flags |= IOP_HAVESTATUS; 356 1.5 ad iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid), 357 1.1 ad ident, sizeof(ident)); 358 1.5 ad printf(" <%s>\n", ident); 359 1.5 ad 360 1.5 ad #ifdef I2ODEBUG 361 1.69 cegger printf("%s: orgid=0x%04x version=%d\n", 362 1.82 chs device_xname(sc->sc_dev), 363 1.5 ad le16toh(sc->sc_status.orgid), 364 1.5 ad (le32toh(sc->sc_status.segnumber) >> 12) & 15); 365 1.82 chs printf("%s: type want have cbase\n", device_xname(sc->sc_dev)); 366 1.82 chs printf("%s: mem %04x %04x %08x\n", device_xname(sc->sc_dev), 367 1.5 ad le32toh(sc->sc_status.desiredprivmemsize), 368 1.5 ad le32toh(sc->sc_status.currentprivmemsize), 369 1.5 ad le32toh(sc->sc_status.currentprivmembase)); 370 1.82 chs printf("%s: i/o %04x %04x %08x\n", device_xname(sc->sc_dev), 371 1.5 ad le32toh(sc->sc_status.desiredpriviosize), 372 1.5 ad le32toh(sc->sc_status.currentpriviosize), 373 1.5 ad le32toh(sc->sc_status.currentpriviobase)); 374 1.5 ad #endif 375 1.1 ad 376 1.11 ad sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes); 377 1.11 ad if (sc->sc_maxob > IOP_MAX_OUTBOUND) 378 1.11 ad sc->sc_maxob = IOP_MAX_OUTBOUND; 379 1.11 ad sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes); 380 1.11 ad if (sc->sc_maxib > IOP_MAX_INBOUND) 381 1.11 ad sc->sc_maxib = IOP_MAX_INBOUND; 382 1.19 ad sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2; 383 1.19 ad if (sc->sc_framesize > IOP_MAX_MSG_SIZE) 384 1.19 ad sc->sc_framesize = IOP_MAX_MSG_SIZE; 385 1.19 ad 386 1.19 ad #if defined(I2ODEBUG) || defined(DIAGNOSTIC) 387 1.19 ad if (sc->sc_framesize < IOP_MIN_MSG_SIZE) { 388 1.82 chs aprint_error_dev(sc->sc_dev, "frame size too small (%d)\n", 389 1.69 cegger sc->sc_framesize); 390 1.23 ad goto bail_out; 391 1.19 ad } 392 1.19 ad #endif 393 1.11 ad 394 1.11 ad /* Allocate message wrappers. */ 395 1.90 chs im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_WAITOK|M_ZERO); 396 1.23 ad state++; 397 1.11 ad sc->sc_ims = im; 398 1.11 ad SLIST_INIT(&sc->sc_im_freelist); 399 1.11 ad 400 1.40 mycroft for (i = 0; i < sc->sc_maxib; i++, im++) { 401 1.11 ad rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, 402 1.11 ad IOP_MAX_SEGS, IOP_MAX_XFER, 0, 403 1.11 ad BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 404 1.11 ad &im->im_xfer[0].ix_map); 405 1.11 ad if (rv != 0) { 406 1.82 chs aprint_error_dev(sc->sc_dev, "couldn't create dmamap (%d)", rv); 407 1.40 mycroft goto bail_out3; 408 1.11 ad } 409 1.11 ad 410 1.11 ad im->im_tctx = i; 411 1.11 ad SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain); 412 1.65 ad cv_init(&im->im_cv, "iopmsg"); 413 1.11 ad } 414 1.1 ad 415 1.17 wiz /* Initialise the IOP's outbound FIFO. */ 416 1.5 ad if (iop_ofifo_init(sc) != 0) { 417 1.82 chs aprint_error_dev(sc->sc_dev, "unable to init oubound FIFO\n"); 418 1.40 mycroft goto bail_out3; 419 1.5 ad } 420 1.1 ad 421 1.5 ad /* 422 1.5 ad * Defer further configuration until (a) interrupts are working and 423 1.5 ad * (b) we have enough information to build the system table. 424 1.5 ad */ 425 1.82 chs config_interrupts(sc->sc_dev, iop_config_interrupts); 426 1.1 ad 427 1.5 ad /* Configure shutdown hook before we start any device activity. */ 428 1.1 ad if (iop_sdh == NULL) 429 1.1 ad iop_sdh = shutdownhook_establish(iop_shutdown, NULL); 430 1.1 ad 431 1.1 ad /* Ensure interrupts are enabled at the IOP. */ 432 1.5 ad mask = iop_inl(sc, IOP_REG_INTR_MASK); 433 1.5 ad iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO); 434 1.1 ad 435 1.1 ad if (intrstr != NULL) 436 1.82 chs printf("%s: interrupting at %s\n", device_xname(sc->sc_dev), 437 1.1 ad intrstr); 438 1.1 ad 439 1.1 ad #ifdef I2ODEBUG 440 1.1 ad printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n", 441 1.82 chs device_xname(sc->sc_dev), sc->sc_maxib, 442 1.11 ad le32toh(sc->sc_status.maxinboundmframes), 443 1.11 ad sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes)); 444 1.1 ad #endif 445 1.1 ad 446 1.15 ad return; 447 1.15 ad 448 1.40 mycroft bail_out3: 449 1.15 ad if (state > 3) { 450 1.15 ad for (j = 0; j < i; j++) 451 1.15 ad bus_dmamap_destroy(sc->sc_dmat, 452 1.15 ad sc->sc_ims[j].im_xfer[0].ix_map); 453 1.15 ad free(sc->sc_ims, M_DEVBUF); 454 1.15 ad } 455 1.40 mycroft bail_out: 456 1.15 ad if (state > 2) 457 1.15 ad bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap); 458 1.15 ad if (state > 1) 459 1.15 ad bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE); 460 1.15 ad if (state > 0) 461 1.15 ad bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs); 462 1.15 ad bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap); 463 1.1 ad } 464 1.1 ad 465 1.1 ad /* 466 1.5 ad * Perform autoconfiguration tasks. 467 1.1 ad */ 468 1.1 ad static void 469 1.76 cegger iop_config_interrupts(device_t self) 470 1.1 ad { 471 1.18 ad struct iop_attach_args ia; 472 1.5 ad struct iop_softc *sc, *iop; 473 1.5 ad struct i2o_systab_entry *ste; 474 1.5 ad int rv, i, niop; 475 1.49 drochner int locs[IOPCF_NLOCS]; 476 1.1 ad 477 1.54 thorpej sc = device_private(self); 478 1.65 ad mutex_enter(&sc->sc_conflock); 479 1.65 ad 480 1.5 ad LIST_INIT(&sc->sc_iilist); 481 1.5 ad 482 1.82 chs printf("%s: configuring...\n", device_xname(sc->sc_dev)); 483 1.1 ad 484 1.5 ad if (iop_hrt_get(sc) != 0) { 485 1.82 chs printf("%s: unable to retrieve HRT\n", device_xname(sc->sc_dev)); 486 1.65 ad mutex_exit(&sc->sc_conflock); 487 1.5 ad return; 488 1.5 ad } 489 1.1 ad 490 1.5 ad /* 491 1.5 ad * Build the system table. 492 1.5 ad */ 493 1.5 ad if (iop_systab == NULL) { 494 1.5 ad for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) { 495 1.73 tsutsui if ((iop = device_lookup_private(&iop_cd, i)) == NULL) 496 1.5 ad continue; 497 1.5 ad if ((iop->sc_flags & IOP_HAVESTATUS) == 0) 498 1.5 ad continue; 499 1.11 ad if (iop_status_get(iop, 1) != 0) { 500 1.82 chs aprint_error_dev(sc->sc_dev, "unable to retrieve status\n"); 501 1.5 ad iop->sc_flags &= ~IOP_HAVESTATUS; 502 1.5 ad continue; 503 1.5 ad } 504 1.5 ad niop++; 505 1.5 ad } 506 1.65 ad if (niop == 0) { 507 1.65 ad mutex_exit(&sc->sc_conflock); 508 1.5 ad return; 509 1.65 ad } 510 1.5 ad 511 1.5 ad i = sizeof(struct i2o_systab_entry) * (niop - 1) + 512 1.5 ad sizeof(struct i2o_systab); 513 1.5 ad iop_systab_size = i; 514 1.90 chs iop_systab = malloc(i, M_DEVBUF, M_WAITOK|M_ZERO); 515 1.5 ad iop_systab->numentries = niop; 516 1.5 ad iop_systab->version = I2O_VERSION_11; 517 1.5 ad 518 1.5 ad for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) { 519 1.73 tsutsui if ((iop = device_lookup_private(&iop_cd, i)) == NULL) 520 1.5 ad continue; 521 1.5 ad if ((iop->sc_flags & IOP_HAVESTATUS) == 0) 522 1.5 ad continue; 523 1.5 ad 524 1.5 ad ste->orgid = iop->sc_status.orgid; 525 1.82 chs ste->iopid = device_unit(iop->sc_dev) + 2; 526 1.5 ad ste->segnumber = 527 1.5 ad htole32(le32toh(iop->sc_status.segnumber) & ~4095); 528 1.5 ad ste->iopcaps = iop->sc_status.iopcaps; 529 1.5 ad ste->inboundmsgframesize = 530 1.5 ad iop->sc_status.inboundmframesize; 531 1.5 ad ste->inboundmsgportaddresslow = 532 1.5 ad htole32(iop->sc_memaddr + IOP_REG_IFIFO); 533 1.5 ad ste++; 534 1.5 ad } 535 1.5 ad } 536 1.5 ad 537 1.11 ad /* 538 1.11 ad * Post the system table to the IOP and bring it to the OPERATIONAL 539 1.11 ad * state. 540 1.11 ad */ 541 1.5 ad if (iop_systab_set(sc) != 0) { 542 1.82 chs aprint_error_dev(sc->sc_dev, "unable to set system table\n"); 543 1.65 ad mutex_exit(&sc->sc_conflock); 544 1.5 ad return; 545 1.5 ad } 546 1.42 ad if (iop_sys_enable(sc) != 0) { 547 1.82 chs aprint_error_dev(sc->sc_dev, "unable to enable system\n"); 548 1.65 ad mutex_exit(&sc->sc_conflock); 549 1.5 ad return; 550 1.5 ad } 551 1.5 ad 552 1.5 ad /* 553 1.5 ad * Set up an event handler for this IOP. 554 1.5 ad */ 555 1.5 ad sc->sc_eventii.ii_dv = self; 556 1.5 ad sc->sc_eventii.ii_intr = iop_intr_event; 557 1.15 ad sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY; 558 1.5 ad sc->sc_eventii.ii_tid = I2O_TID_IOP; 559 1.11 ad iop_initiator_register(sc, &sc->sc_eventii); 560 1.11 ad 561 1.11 ad rv = iop_util_eventreg(sc, &sc->sc_eventii, 562 1.11 ad I2O_EVENT_EXEC_RESOURCE_LIMITS | 563 1.11 ad I2O_EVENT_EXEC_CONNECTION_FAIL | 564 1.11 ad I2O_EVENT_EXEC_ADAPTER_FAULT | 565 1.11 ad I2O_EVENT_EXEC_POWER_FAIL | 566 1.11 ad I2O_EVENT_EXEC_RESET_PENDING | 567 1.11 ad I2O_EVENT_EXEC_RESET_IMMINENT | 568 1.11 ad I2O_EVENT_EXEC_HARDWARE_FAIL | 569 1.11 ad I2O_EVENT_EXEC_XCT_CHANGE | 570 1.11 ad I2O_EVENT_EXEC_DDM_AVAILIBILITY | 571 1.11 ad I2O_EVENT_GEN_DEVICE_RESET | 572 1.11 ad I2O_EVENT_GEN_STATE_CHANGE | 573 1.11 ad I2O_EVENT_GEN_GENERAL_WARNING); 574 1.11 ad if (rv != 0) { 575 1.82 chs aprint_error_dev(sc->sc_dev, "unable to register for events"); 576 1.65 ad mutex_exit(&sc->sc_conflock); 577 1.5 ad return; 578 1.5 ad } 579 1.5 ad 580 1.18 ad /* 581 1.18 ad * Attempt to match and attach a product-specific extension. 582 1.18 ad */ 583 1.1 ad ia.ia_class = I2O_CLASS_ANY; 584 1.1 ad ia.ia_tid = I2O_TID_IOP; 585 1.49 drochner locs[IOPCF_TID] = I2O_TID_IOP; 586 1.91 thorpej config_found(self, &ia, iop_print, 587 1.92 thorpej CFARGS(.submatch = config_stdsubmatch, 588 1.92 thorpej .locators = locs)); 589 1.5 ad 590 1.18 ad /* 591 1.18 ad * Start device configuration. 592 1.18 ad */ 593 1.63 ad if ((rv = iop_reconfigure(sc, 0)) == -1) 594 1.82 chs aprint_error_dev(sc->sc_dev, "configure failed (%d)\n", rv); 595 1.65 ad 596 1.9 ad 597 1.11 ad sc->sc_flags |= IOP_ONLINE; 598 1.66 ad rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc, 599 1.82 chs &sc->sc_reconf_thread, "%s", device_xname(sc->sc_dev)); 600 1.65 ad mutex_exit(&sc->sc_conflock); 601 1.11 ad if (rv != 0) { 602 1.82 chs aprint_error_dev(sc->sc_dev, "unable to create reconfiguration thread (%d)", rv); 603 1.11 ad return; 604 1.11 ad } 605 1.5 ad } 606 1.5 ad 607 1.5 ad /* 608 1.5 ad * Reconfiguration thread; listens for LCT change notification, and 609 1.14 wiz * initiates re-configuration if received. 610 1.5 ad */ 611 1.5 ad static void 612 1.9 ad iop_reconf_thread(void *cookie) 613 1.5 ad { 614 1.5 ad struct iop_softc *sc; 615 1.5 ad struct i2o_lct lct; 616 1.5 ad u_int32_t chgind; 617 1.11 ad int rv; 618 1.5 ad 619 1.5 ad sc = cookie; 620 1.11 ad chgind = sc->sc_chgind + 1; 621 1.5 ad 622 1.5 ad for (;;) { 623 1.11 ad DPRINTF(("%s: async reconfig: requested 0x%08x\n", 624 1.82 chs device_xname(sc->sc_dev), chgind)); 625 1.5 ad 626 1.11 ad rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind); 627 1.11 ad 628 1.11 ad DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n", 629 1.82 chs device_xname(sc->sc_dev), le32toh(lct.changeindicator), rv)); 630 1.11 ad 631 1.65 ad mutex_enter(&sc->sc_conflock); 632 1.63 ad if (rv == 0) { 633 1.11 ad iop_reconfigure(sc, le32toh(lct.changeindicator)); 634 1.11 ad chgind = sc->sc_chgind + 1; 635 1.5 ad } 636 1.65 ad (void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5); 637 1.65 ad mutex_exit(&sc->sc_conflock); 638 1.5 ad } 639 1.5 ad } 640 1.5 ad 641 1.5 ad /* 642 1.5 ad * Reconfigure: find new and removed devices. 643 1.5 ad */ 644 1.18 ad int 645 1.11 ad iop_reconfigure(struct iop_softc *sc, u_int chgind) 646 1.5 ad { 647 1.5 ad struct iop_msg *im; 648 1.11 ad struct i2o_hba_bus_scan mf; 649 1.5 ad struct i2o_lct_entry *le; 650 1.5 ad struct iop_initiator *ii, *nextii; 651 1.5 ad int rv, tid, i; 652 1.5 ad 653 1.65 ad KASSERT(mutex_owned(&sc->sc_conflock)); 654 1.65 ad 655 1.1 ad /* 656 1.5 ad * If the reconfiguration request isn't the result of LCT change 657 1.5 ad * notification, then be more thorough: ask all bus ports to scan 658 1.5 ad * their busses. Wait up to 5 minutes for each bus port to complete 659 1.5 ad * the request. 660 1.1 ad */ 661 1.5 ad if (chgind == 0) { 662 1.5 ad if ((rv = iop_lct_get(sc)) != 0) { 663 1.5 ad DPRINTF(("iop_reconfigure: unable to read LCT\n")); 664 1.11 ad return (rv); 665 1.5 ad } 666 1.5 ad 667 1.5 ad le = sc->sc_lct->entry; 668 1.5 ad for (i = 0; i < sc->sc_nlctent; i++, le++) { 669 1.5 ad if ((le16toh(le->classid) & 4095) != 670 1.5 ad I2O_CLASS_BUS_ADAPTER_PORT) 671 1.5 ad continue; 672 1.15 ad tid = le16toh(le->localtid) & 4095; 673 1.5 ad 674 1.15 ad im = iop_msg_alloc(sc, IM_WAIT); 675 1.5 ad 676 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan); 677 1.11 ad mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN); 678 1.11 ad mf.msgictx = IOP_ICTX; 679 1.11 ad mf.msgtctx = im->im_tctx; 680 1.5 ad 681 1.82 chs DPRINTF(("%s: scanning bus %d\n", device_xname(sc->sc_dev), 682 1.5 ad tid)); 683 1.5 ad 684 1.11 ad rv = iop_msg_post(sc, im, &mf, 5*60*1000); 685 1.11 ad iop_msg_free(sc, im); 686 1.11 ad #ifdef I2ODEBUG 687 1.11 ad if (rv != 0) 688 1.82 chs aprint_error_dev(sc->sc_dev, "bus scan failed\n"); 689 1.11 ad #endif 690 1.5 ad } 691 1.11 ad } else if (chgind <= sc->sc_chgind) { 692 1.82 chs DPRINTF(("%s: LCT unchanged (async)\n", device_xname(sc->sc_dev))); 693 1.11 ad return (0); 694 1.5 ad } 695 1.5 ad 696 1.5 ad /* Re-read the LCT and determine if it has changed. */ 697 1.5 ad if ((rv = iop_lct_get(sc)) != 0) { 698 1.5 ad DPRINTF(("iop_reconfigure: unable to re-read LCT\n")); 699 1.11 ad return (rv); 700 1.5 ad } 701 1.82 chs DPRINTF(("%s: %d LCT entries\n", device_xname(sc->sc_dev), sc->sc_nlctent)); 702 1.5 ad 703 1.11 ad chgind = le32toh(sc->sc_lct->changeindicator); 704 1.11 ad if (chgind == sc->sc_chgind) { 705 1.82 chs DPRINTF(("%s: LCT unchanged\n", device_xname(sc->sc_dev))); 706 1.11 ad return (0); 707 1.5 ad } 708 1.82 chs DPRINTF(("%s: LCT changed\n", device_xname(sc->sc_dev))); 709 1.11 ad sc->sc_chgind = chgind; 710 1.5 ad 711 1.5 ad if (sc->sc_tidmap != NULL) 712 1.5 ad free(sc->sc_tidmap, M_DEVBUF); 713 1.5 ad sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap), 714 1.90 chs M_DEVBUF, M_WAITOK|M_ZERO); 715 1.5 ad 716 1.11 ad /* Allow 1 queued command per device while we're configuring. */ 717 1.11 ad iop_adjqparam(sc, 1); 718 1.11 ad 719 1.11 ad /* 720 1.11 ad * Match and attach child devices. We configure high-level devices 721 1.11 ad * first so that any claims will propagate throughout the LCT, 722 1.11 ad * hopefully masking off aliased devices as a result. 723 1.11 ad * 724 1.11 ad * Re-reading the LCT at this point is a little dangerous, but we'll 725 1.11 ad * trust the IOP (and the operator) to behave itself... 726 1.11 ad */ 727 1.11 ad iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY, 728 1.11 ad IC_CONFIGURE | IC_PRIORITY); 729 1.58 christos if ((rv = iop_lct_get(sc)) != 0) { 730 1.11 ad DPRINTF(("iop_reconfigure: unable to re-read LCT\n")); 731 1.58 christos } 732 1.11 ad iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY, 733 1.11 ad IC_CONFIGURE); 734 1.5 ad 735 1.5 ad for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) { 736 1.11 ad nextii = LIST_NEXT(ii, ii_list); 737 1.5 ad 738 1.5 ad /* Detach devices that were configured, but are now gone. */ 739 1.5 ad for (i = 0; i < sc->sc_nlctent; i++) 740 1.5 ad if (ii->ii_tid == sc->sc_tidmap[i].it_tid) 741 1.5 ad break; 742 1.5 ad if (i == sc->sc_nlctent || 743 1.52 bouyer (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) { 744 1.5 ad config_detach(ii->ii_dv, DETACH_FORCE); 745 1.52 bouyer continue; 746 1.52 bouyer } 747 1.5 ad 748 1.5 ad /* 749 1.5 ad * Tell initiators that existed before the re-configuration 750 1.5 ad * to re-configure. 751 1.5 ad */ 752 1.5 ad if (ii->ii_reconfig == NULL) 753 1.5 ad continue; 754 1.5 ad if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0) 755 1.82 chs aprint_error_dev(sc->sc_dev, "%s failed reconfigure (%d)\n", 756 1.69 cegger device_xname(ii->ii_dv), rv); 757 1.5 ad } 758 1.5 ad 759 1.11 ad /* Re-adjust queue parameters and return. */ 760 1.11 ad if (sc->sc_nii != 0) 761 1.11 ad iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE) 762 1.11 ad / sc->sc_nii); 763 1.11 ad 764 1.11 ad return (0); 765 1.1 ad } 766 1.1 ad 767 1.1 ad /* 768 1.5 ad * Configure I2O devices into the system. 769 1.1 ad */ 770 1.1 ad static void 771 1.11 ad iop_configure_devices(struct iop_softc *sc, int mask, int maskval) 772 1.1 ad { 773 1.1 ad struct iop_attach_args ia; 774 1.5 ad struct iop_initiator *ii; 775 1.1 ad const struct i2o_lct_entry *le; 776 1.76 cegger device_t dv; 777 1.8 ad int i, j, nent; 778 1.11 ad u_int usertid; 779 1.49 drochner int locs[IOPCF_NLOCS]; 780 1.1 ad 781 1.1 ad nent = sc->sc_nlctent; 782 1.1 ad for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) { 783 1.15 ad sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095; 784 1.9 ad 785 1.11 ad /* Ignore the device if it's in use. */ 786 1.11 ad usertid = le32toh(le->usertid) & 4095; 787 1.11 ad if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST) 788 1.1 ad continue; 789 1.1 ad 790 1.1 ad ia.ia_class = le16toh(le->classid) & 4095; 791 1.9 ad ia.ia_tid = sc->sc_tidmap[i].it_tid; 792 1.8 ad 793 1.8 ad /* Ignore uninteresting devices. */ 794 1.8 ad for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++) 795 1.8 ad if (iop_class[j].ic_class == ia.ia_class) 796 1.8 ad break; 797 1.8 ad if (j < sizeof(iop_class) / sizeof(iop_class[0]) && 798 1.11 ad (iop_class[j].ic_flags & mask) != maskval) 799 1.8 ad continue; 800 1.1 ad 801 1.1 ad /* 802 1.5 ad * Try to configure the device only if it's not already 803 1.5 ad * configured. 804 1.1 ad */ 805 1.7 ad LIST_FOREACH(ii, &sc->sc_iilist, ii_list) { 806 1.9 ad if (ia.ia_tid == ii->ii_tid) { 807 1.9 ad sc->sc_tidmap[i].it_flags |= IT_CONFIGURED; 808 1.9 ad strcpy(sc->sc_tidmap[i].it_dvname, 809 1.69 cegger device_xname(ii->ii_dv)); 810 1.11 ad break; 811 1.9 ad } 812 1.7 ad } 813 1.5 ad if (ii != NULL) 814 1.5 ad continue; 815 1.5 ad 816 1.49 drochner locs[IOPCF_TID] = ia.ia_tid; 817 1.44 drochner 818 1.91 thorpej dv = config_found(sc->sc_dev, &ia, iop_print, 819 1.92 thorpej CFARGS(.submatch = config_stdsubmatch, 820 1.92 thorpej .locators = locs)); 821 1.9 ad if (dv != NULL) { 822 1.11 ad sc->sc_tidmap[i].it_flags |= IT_CONFIGURED; 823 1.69 cegger strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv)); 824 1.9 ad } 825 1.1 ad } 826 1.1 ad } 827 1.1 ad 828 1.11 ad /* 829 1.11 ad * Adjust queue parameters for all child devices. 830 1.11 ad */ 831 1.11 ad static void 832 1.11 ad iop_adjqparam(struct iop_softc *sc, int mpi) 833 1.11 ad { 834 1.11 ad struct iop_initiator *ii; 835 1.11 ad 836 1.11 ad LIST_FOREACH(ii, &sc->sc_iilist, ii_list) 837 1.11 ad if (ii->ii_adjqparam != NULL) 838 1.11 ad (*ii->ii_adjqparam)(ii->ii_dv, mpi); 839 1.11 ad } 840 1.11 ad 841 1.1 ad static void 842 1.43 itojun iop_devinfo(int class, char *devinfo, size_t l) 843 1.1 ad { 844 1.1 ad int i; 845 1.1 ad 846 1.1 ad for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++) 847 1.1 ad if (class == iop_class[i].ic_class) 848 1.1 ad break; 849 1.47 perry 850 1.1 ad if (i == sizeof(iop_class) / sizeof(iop_class[0])) 851 1.43 itojun snprintf(devinfo, l, "device (class 0x%x)", class); 852 1.1 ad else 853 1.43 itojun strlcpy(devinfo, iop_class[i].ic_caption, l); 854 1.1 ad } 855 1.1 ad 856 1.1 ad static int 857 1.1 ad iop_print(void *aux, const char *pnp) 858 1.1 ad { 859 1.1 ad struct iop_attach_args *ia; 860 1.1 ad char devinfo[256]; 861 1.1 ad 862 1.1 ad ia = aux; 863 1.1 ad 864 1.1 ad if (pnp != NULL) { 865 1.43 itojun iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo)); 866 1.33 thorpej aprint_normal("%s at %s", devinfo, pnp); 867 1.1 ad } 868 1.33 thorpej aprint_normal(" tid %d", ia->ia_tid); 869 1.1 ad return (UNCONF); 870 1.1 ad } 871 1.1 ad 872 1.1 ad /* 873 1.1 ad * Shut down all configured IOPs. 874 1.47 perry */ 875 1.1 ad static void 876 1.61 christos iop_shutdown(void *junk) 877 1.1 ad { 878 1.1 ad struct iop_softc *sc; 879 1.1 ad int i; 880 1.1 ad 881 1.11 ad printf("shutting down iop devices..."); 882 1.1 ad 883 1.1 ad for (i = 0; i < iop_cd.cd_ndevs; i++) { 884 1.73 tsutsui if ((sc = device_lookup_private(&iop_cd, i)) == NULL) 885 1.1 ad continue; 886 1.5 ad if ((sc->sc_flags & IOP_ONLINE) == 0) 887 1.5 ad continue; 888 1.27 ad 889 1.5 ad iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX, 890 1.12 ad 0, 5000); 891 1.27 ad 892 1.27 ad if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) { 893 1.27 ad /* 894 1.27 ad * Some AMI firmware revisions will go to sleep and 895 1.27 ad * never come back after this. 896 1.27 ad */ 897 1.27 ad iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR, 898 1.27 ad IOP_ICTX, 0, 1000); 899 1.27 ad } 900 1.1 ad } 901 1.1 ad 902 1.1 ad /* Wait. Some boards could still be flushing, stupidly enough. */ 903 1.1 ad delay(5000*1000); 904 1.18 ad printf(" done\n"); 905 1.1 ad } 906 1.1 ad 907 1.1 ad /* 908 1.11 ad * Retrieve IOP status. 909 1.1 ad */ 910 1.18 ad int 911 1.11 ad iop_status_get(struct iop_softc *sc, int nosleep) 912 1.1 ad { 913 1.11 ad struct i2o_exec_status_get mf; 914 1.15 ad struct i2o_status *st; 915 1.15 ad paddr_t pa; 916 1.11 ad int rv, i; 917 1.1 ad 918 1.75 mhitch pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr; 919 1.15 ad st = (struct i2o_status *)sc->sc_scr; 920 1.15 ad 921 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get); 922 1.11 ad mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET); 923 1.11 ad mf.reserved[0] = 0; 924 1.11 ad mf.reserved[1] = 0; 925 1.11 ad mf.reserved[2] = 0; 926 1.11 ad mf.reserved[3] = 0; 927 1.15 ad mf.addrlow = (u_int32_t)pa; 928 1.15 ad mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32); 929 1.11 ad mf.length = sizeof(sc->sc_status); 930 1.1 ad 931 1.72 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st), 932 1.72 ad BUS_DMASYNC_PREWRITE); 933 1.15 ad memset(st, 0, sizeof(*st)); 934 1.15 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st), 935 1.78 asau BUS_DMASYNC_POSTWRITE); 936 1.1 ad 937 1.11 ad if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0) 938 1.1 ad return (rv); 939 1.1 ad 940 1.72 ad for (i = 100; i != 0; i--) { 941 1.15 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, 942 1.15 ad sizeof(*st), BUS_DMASYNC_POSTREAD); 943 1.15 ad if (st->syncbyte == 0xff) 944 1.11 ad break; 945 1.11 ad if (nosleep) 946 1.11 ad DELAY(100*1000); 947 1.11 ad else 948 1.65 ad kpause("iopstat", false, hz / 10, NULL); 949 1.11 ad } 950 1.1 ad 951 1.21 ad if (st->syncbyte != 0xff) { 952 1.82 chs aprint_error_dev(sc->sc_dev, "STATUS_GET timed out\n"); 953 1.11 ad rv = EIO; 954 1.21 ad } else { 955 1.15 ad memcpy(&sc->sc_status, st, sizeof(sc->sc_status)); 956 1.11 ad rv = 0; 957 1.15 ad } 958 1.15 ad 959 1.11 ad return (rv); 960 1.1 ad } 961 1.1 ad 962 1.1 ad /* 963 1.17 wiz * Initialize and populate the IOP's outbound FIFO. 964 1.1 ad */ 965 1.1 ad static int 966 1.1 ad iop_ofifo_init(struct iop_softc *sc) 967 1.1 ad { 968 1.1 ad bus_addr_t addr; 969 1.5 ad bus_dma_segment_t seg; 970 1.11 ad struct i2o_exec_outbound_init *mf; 971 1.5 ad int i, rseg, rv; 972 1.15 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw; 973 1.1 ad 974 1.15 ad sw = (u_int32_t *)sc->sc_scr; 975 1.1 ad 976 1.11 ad mf = (struct i2o_exec_outbound_init *)mb; 977 1.11 ad mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init); 978 1.11 ad mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT); 979 1.11 ad mf->msgictx = IOP_ICTX; 980 1.15 ad mf->msgtctx = 0; 981 1.11 ad mf->pagesize = PAGE_SIZE; 982 1.19 ad mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16); 983 1.1 ad 984 1.5 ad /* 985 1.5 ad * The I2O spec says that there are two SGLs: one for the status 986 1.5 ad * word, and one for a list of discarded MFAs. It continues to say 987 1.5 ad * that if you don't want to get the list of MFAs, an IGNORE SGL is 988 1.11 ad * necessary; this isn't the case (and is in fact a bad thing). 989 1.5 ad */ 990 1.15 ad mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) | 991 1.15 ad I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END; 992 1.15 ad mb[sizeof(*mf) / sizeof(u_int32_t) + 1] = 993 1.75 mhitch (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr; 994 1.15 ad mb[0] += 2 << 16; 995 1.15 ad 996 1.72 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 997 1.80 bouyer BUS_DMASYNC_POSTWRITE); 998 1.15 ad *sw = 0; 999 1.15 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1000 1.80 bouyer BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1001 1.15 ad 1002 1.15 ad if ((rv = iop_post(sc, mb)) != 0) 1003 1.1 ad return (rv); 1004 1.1 ad 1005 1.15 ad POLL(5000, 1006 1.15 ad (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1007 1.80 bouyer BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD), 1008 1.15 ad *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE))); 1009 1.15 ad 1010 1.15 ad if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) { 1011 1.82 chs aprint_error_dev(sc->sc_dev, "outbound FIFO init failed (%d)\n", 1012 1.69 cegger le32toh(*sw)); 1013 1.5 ad return (EIO); 1014 1.1 ad } 1015 1.1 ad 1016 1.11 ad /* Allocate DMA safe memory for the reply frames. */ 1017 1.1 ad if (sc->sc_rep_phys == 0) { 1018 1.19 ad sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize; 1019 1.5 ad 1020 1.5 ad rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE, 1021 1.5 ad 0, &seg, 1, &rseg, BUS_DMA_NOWAIT); 1022 1.5 ad if (rv != 0) { 1023 1.82 chs aprint_error_dev(sc->sc_dev, "DMA alloc = %d\n", 1024 1.5 ad rv); 1025 1.5 ad return (rv); 1026 1.5 ad } 1027 1.5 ad 1028 1.5 ad rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size, 1029 1.5 ad &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT); 1030 1.5 ad if (rv != 0) { 1031 1.82 chs aprint_error_dev(sc->sc_dev, "DMA map = %d\n", rv); 1032 1.5 ad return (rv); 1033 1.5 ad } 1034 1.5 ad 1035 1.5 ad rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1, 1036 1.5 ad sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap); 1037 1.5 ad if (rv != 0) { 1038 1.82 chs aprint_error_dev(sc->sc_dev, "DMA create = %d\n", rv); 1039 1.5 ad return (rv); 1040 1.5 ad } 1041 1.5 ad 1042 1.15 ad rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap, 1043 1.15 ad sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT); 1044 1.5 ad if (rv != 0) { 1045 1.82 chs aprint_error_dev(sc->sc_dev, "DMA load = %d\n", rv); 1046 1.5 ad return (rv); 1047 1.5 ad } 1048 1.5 ad 1049 1.5 ad sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr; 1050 1.65 ad 1051 1.65 ad /* Now safe to sync the reply map. */ 1052 1.65 ad sc->sc_curib = 0; 1053 1.1 ad } 1054 1.1 ad 1055 1.1 ad /* Populate the outbound FIFO. */ 1056 1.11 ad for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) { 1057 1.5 ad iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr); 1058 1.19 ad addr += sc->sc_framesize; 1059 1.1 ad } 1060 1.1 ad 1061 1.1 ad return (0); 1062 1.1 ad } 1063 1.1 ad 1064 1.1 ad /* 1065 1.1 ad * Read the specified number of bytes from the IOP's hardware resource table. 1066 1.1 ad */ 1067 1.1 ad static int 1068 1.1 ad iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size) 1069 1.1 ad { 1070 1.1 ad struct iop_msg *im; 1071 1.1 ad int rv; 1072 1.11 ad struct i2o_exec_hrt_get *mf; 1073 1.11 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1074 1.1 ad 1075 1.15 ad im = iop_msg_alloc(sc, IM_WAIT); 1076 1.11 ad mf = (struct i2o_exec_hrt_get *)mb; 1077 1.11 ad mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get); 1078 1.11 ad mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET); 1079 1.11 ad mf->msgictx = IOP_ICTX; 1080 1.11 ad mf->msgtctx = im->im_tctx; 1081 1.1 ad 1082 1.15 ad iop_msg_map(sc, im, mb, hrt, size, 0, NULL); 1083 1.11 ad rv = iop_msg_post(sc, im, mb, 30000); 1084 1.1 ad iop_msg_unmap(sc, im); 1085 1.11 ad iop_msg_free(sc, im); 1086 1.1 ad return (rv); 1087 1.1 ad } 1088 1.1 ad 1089 1.1 ad /* 1090 1.5 ad * Read the IOP's hardware resource table. 1091 1.1 ad */ 1092 1.1 ad static int 1093 1.1 ad iop_hrt_get(struct iop_softc *sc) 1094 1.1 ad { 1095 1.1 ad struct i2o_hrt hrthdr, *hrt; 1096 1.1 ad int size, rv; 1097 1.1 ad 1098 1.11 ad rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr)); 1099 1.11 ad if (rv != 0) 1100 1.1 ad return (rv); 1101 1.1 ad 1102 1.82 chs DPRINTF(("%s: %d hrt entries\n", device_xname(sc->sc_dev), 1103 1.5 ad le16toh(hrthdr.numentries))); 1104 1.5 ad 1105 1.47 perry size = sizeof(struct i2o_hrt) + 1106 1.15 ad (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry); 1107 1.90 chs hrt = malloc(size, M_DEVBUF, M_WAITOK); 1108 1.1 ad if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) { 1109 1.1 ad free(hrt, M_DEVBUF); 1110 1.1 ad return (rv); 1111 1.1 ad } 1112 1.1 ad 1113 1.1 ad if (sc->sc_hrt != NULL) 1114 1.1 ad free(sc->sc_hrt, M_DEVBUF); 1115 1.1 ad sc->sc_hrt = hrt; 1116 1.1 ad return (0); 1117 1.1 ad } 1118 1.1 ad 1119 1.1 ad /* 1120 1.1 ad * Request the specified number of bytes from the IOP's logical 1121 1.5 ad * configuration table. If a change indicator is specified, this 1122 1.11 ad * is a verbatim notification request, so the caller is prepared 1123 1.5 ad * to wait indefinitely. 1124 1.1 ad */ 1125 1.1 ad static int 1126 1.5 ad iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size, 1127 1.5 ad u_int32_t chgind) 1128 1.1 ad { 1129 1.1 ad struct iop_msg *im; 1130 1.11 ad struct i2o_exec_lct_notify *mf; 1131 1.1 ad int rv; 1132 1.11 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1133 1.1 ad 1134 1.15 ad im = iop_msg_alloc(sc, IM_WAIT); 1135 1.1 ad memset(lct, 0, size); 1136 1.1 ad 1137 1.11 ad mf = (struct i2o_exec_lct_notify *)mb; 1138 1.11 ad mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify); 1139 1.11 ad mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY); 1140 1.11 ad mf->msgictx = IOP_ICTX; 1141 1.11 ad mf->msgtctx = im->im_tctx; 1142 1.11 ad mf->classid = I2O_CLASS_ANY; 1143 1.11 ad mf->changeindicator = chgind; 1144 1.5 ad 1145 1.9 ad #ifdef I2ODEBUG 1146 1.9 ad printf("iop_lct_get0: reading LCT"); 1147 1.9 ad if (chgind != 0) 1148 1.9 ad printf(" (async)"); 1149 1.9 ad printf("\n"); 1150 1.9 ad #endif 1151 1.1 ad 1152 1.15 ad iop_msg_map(sc, im, mb, lct, size, 0, NULL); 1153 1.11 ad rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0)); 1154 1.1 ad iop_msg_unmap(sc, im); 1155 1.11 ad iop_msg_free(sc, im); 1156 1.1 ad return (rv); 1157 1.1 ad } 1158 1.1 ad 1159 1.1 ad /* 1160 1.6 ad * Read the IOP's logical configuration table. 1161 1.1 ad */ 1162 1.1 ad int 1163 1.1 ad iop_lct_get(struct iop_softc *sc) 1164 1.1 ad { 1165 1.5 ad int esize, size, rv; 1166 1.5 ad struct i2o_lct *lct; 1167 1.1 ad 1168 1.5 ad esize = le32toh(sc->sc_status.expectedlctsize); 1169 1.90 chs lct = malloc(esize, M_DEVBUF, M_WAITOK); 1170 1.5 ad if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) { 1171 1.1 ad free(lct, M_DEVBUF); 1172 1.1 ad return (rv); 1173 1.1 ad } 1174 1.1 ad 1175 1.5 ad size = le16toh(lct->tablesize) << 2; 1176 1.5 ad if (esize != size) { 1177 1.1 ad free(lct, M_DEVBUF); 1178 1.90 chs lct = malloc(size, M_DEVBUF, M_WAITOK); 1179 1.5 ad if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) { 1180 1.5 ad free(lct, M_DEVBUF); 1181 1.5 ad return (rv); 1182 1.5 ad } 1183 1.1 ad } 1184 1.5 ad 1185 1.5 ad /* Swap in the new LCT. */ 1186 1.1 ad if (sc->sc_lct != NULL) 1187 1.1 ad free(sc->sc_lct, M_DEVBUF); 1188 1.1 ad sc->sc_lct = lct; 1189 1.1 ad sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) - 1190 1.1 ad sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) / 1191 1.1 ad sizeof(struct i2o_lct_entry); 1192 1.1 ad return (0); 1193 1.1 ad } 1194 1.1 ad 1195 1.1 ad /* 1196 1.42 ad * Post a SYS_ENABLE message to the adapter. 1197 1.42 ad */ 1198 1.42 ad int 1199 1.42 ad iop_sys_enable(struct iop_softc *sc) 1200 1.42 ad { 1201 1.42 ad struct iop_msg *im; 1202 1.42 ad struct i2o_msg mf; 1203 1.42 ad int rv; 1204 1.42 ad 1205 1.42 ad im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS); 1206 1.42 ad 1207 1.42 ad mf.msgflags = I2O_MSGFLAGS(i2o_msg); 1208 1.42 ad mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE); 1209 1.42 ad mf.msgictx = IOP_ICTX; 1210 1.42 ad mf.msgtctx = im->im_tctx; 1211 1.42 ad 1212 1.42 ad rv = iop_msg_post(sc, im, &mf, 30000); 1213 1.42 ad if (rv == 0) { 1214 1.42 ad if ((im->im_flags & IM_FAIL) != 0) 1215 1.42 ad rv = ENXIO; 1216 1.42 ad else if (im->im_reqstatus == I2O_STATUS_SUCCESS || 1217 1.42 ad (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER && 1218 1.42 ad im->im_detstatus == I2O_DSC_INVALID_REQUEST)) 1219 1.42 ad rv = 0; 1220 1.42 ad else 1221 1.42 ad rv = EIO; 1222 1.42 ad } 1223 1.42 ad 1224 1.42 ad iop_msg_free(sc, im); 1225 1.42 ad return (rv); 1226 1.42 ad } 1227 1.42 ad 1228 1.42 ad /* 1229 1.11 ad * Request the specified parameter group from the target. If an initiator 1230 1.11 ad * is specified (a) don't wait for the operation to complete, but instead 1231 1.11 ad * let the initiator's interrupt handler deal with the reply and (b) place a 1232 1.11 ad * pointer to the parameter group op in the wrapper's `im_dvcontext' field. 1233 1.1 ad */ 1234 1.1 ad int 1235 1.16 ad iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf, 1236 1.16 ad int size, struct iop_initiator *ii) 1237 1.1 ad { 1238 1.1 ad struct iop_msg *im; 1239 1.11 ad struct i2o_util_params_op *mf; 1240 1.16 ad int rv; 1241 1.11 ad struct iop_pgop *pgop; 1242 1.11 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1243 1.1 ad 1244 1.15 ad im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS); 1245 1.90 chs pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK); 1246 1.11 ad im->im_dvcontext = pgop; 1247 1.1 ad 1248 1.11 ad mf = (struct i2o_util_params_op *)mb; 1249 1.11 ad mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1250 1.16 ad mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET); 1251 1.11 ad mf->msgictx = IOP_ICTX; 1252 1.11 ad mf->msgtctx = im->im_tctx; 1253 1.11 ad mf->flags = 0; 1254 1.11 ad 1255 1.11 ad pgop->olh.count = htole16(1); 1256 1.11 ad pgop->olh.reserved = htole16(0); 1257 1.16 ad pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET); 1258 1.11 ad pgop->oat.fieldcount = htole16(0xffff); 1259 1.11 ad pgop->oat.group = htole16(group); 1260 1.11 ad 1261 1.5 ad memset(buf, 0, size); 1262 1.15 ad iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL); 1263 1.16 ad iop_msg_map(sc, im, mb, buf, size, 0, NULL); 1264 1.11 ad rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0)); 1265 1.11 ad 1266 1.11 ad /* Detect errors; let partial transfers to count as success. */ 1267 1.11 ad if (ii == NULL && rv == 0) { 1268 1.42 ad if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER && 1269 1.42 ad im->im_detstatus == I2O_DSC_UNKNOWN_ERROR) 1270 1.11 ad rv = 0; 1271 1.11 ad else 1272 1.42 ad rv = (im->im_reqstatus != 0 ? EIO : 0); 1273 1.16 ad 1274 1.16 ad if (rv != 0) 1275 1.16 ad printf("%s: FIELD_GET failed for tid %d group %d\n", 1276 1.82 chs device_xname(sc->sc_dev), tid, group); 1277 1.11 ad } 1278 1.11 ad 1279 1.11 ad if (ii == NULL || rv != 0) { 1280 1.11 ad iop_msg_unmap(sc, im); 1281 1.11 ad iop_msg_free(sc, im); 1282 1.11 ad free(pgop, M_DEVBUF); 1283 1.11 ad } 1284 1.1 ad 1285 1.1 ad return (rv); 1286 1.11 ad } 1287 1.1 ad 1288 1.1 ad /* 1289 1.16 ad * Set a single field in a scalar parameter group. 1290 1.16 ad */ 1291 1.16 ad int 1292 1.16 ad iop_field_set(struct iop_softc *sc, int tid, int group, void *buf, 1293 1.16 ad int size, int field) 1294 1.16 ad { 1295 1.16 ad struct iop_msg *im; 1296 1.16 ad struct i2o_util_params_op *mf; 1297 1.16 ad struct iop_pgop *pgop; 1298 1.16 ad int rv, totsize; 1299 1.16 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1300 1.16 ad 1301 1.16 ad totsize = sizeof(*pgop) + size; 1302 1.16 ad 1303 1.16 ad im = iop_msg_alloc(sc, IM_WAIT); 1304 1.90 chs pgop = malloc(totsize, M_DEVBUF, M_WAITOK); 1305 1.16 ad mf = (struct i2o_util_params_op *)mb; 1306 1.16 ad mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1307 1.16 ad mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1308 1.16 ad mf->msgictx = IOP_ICTX; 1309 1.16 ad mf->msgtctx = im->im_tctx; 1310 1.16 ad mf->flags = 0; 1311 1.16 ad 1312 1.16 ad pgop->olh.count = htole16(1); 1313 1.16 ad pgop->olh.reserved = htole16(0); 1314 1.16 ad pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET); 1315 1.16 ad pgop->oat.fieldcount = htole16(1); 1316 1.16 ad pgop->oat.group = htole16(group); 1317 1.16 ad pgop->oat.fields[0] = htole16(field); 1318 1.16 ad memcpy(pgop + 1, buf, size); 1319 1.16 ad 1320 1.16 ad iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL); 1321 1.16 ad rv = iop_msg_post(sc, im, mb, 30000); 1322 1.16 ad if (rv != 0) 1323 1.82 chs aprint_error_dev(sc->sc_dev, "FIELD_SET failed for tid %d group %d\n", 1324 1.69 cegger tid, group); 1325 1.16 ad 1326 1.16 ad iop_msg_unmap(sc, im); 1327 1.16 ad iop_msg_free(sc, im); 1328 1.16 ad free(pgop, M_DEVBUF); 1329 1.16 ad return (rv); 1330 1.16 ad } 1331 1.16 ad 1332 1.16 ad /* 1333 1.16 ad * Delete all rows in a tablular parameter group. 1334 1.16 ad */ 1335 1.16 ad int 1336 1.16 ad iop_table_clear(struct iop_softc *sc, int tid, int group) 1337 1.16 ad { 1338 1.16 ad struct iop_msg *im; 1339 1.16 ad struct i2o_util_params_op *mf; 1340 1.16 ad struct iop_pgop pgop; 1341 1.16 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1342 1.16 ad int rv; 1343 1.16 ad 1344 1.16 ad im = iop_msg_alloc(sc, IM_WAIT); 1345 1.16 ad 1346 1.16 ad mf = (struct i2o_util_params_op *)mb; 1347 1.16 ad mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1348 1.16 ad mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1349 1.16 ad mf->msgictx = IOP_ICTX; 1350 1.16 ad mf->msgtctx = im->im_tctx; 1351 1.16 ad mf->flags = 0; 1352 1.16 ad 1353 1.16 ad pgop.olh.count = htole16(1); 1354 1.16 ad pgop.olh.reserved = htole16(0); 1355 1.16 ad pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR); 1356 1.16 ad pgop.oat.fieldcount = htole16(0); 1357 1.16 ad pgop.oat.group = htole16(group); 1358 1.16 ad pgop.oat.fields[0] = htole16(0); 1359 1.16 ad 1360 1.16 ad iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL); 1361 1.16 ad rv = iop_msg_post(sc, im, mb, 30000); 1362 1.16 ad if (rv != 0) 1363 1.82 chs aprint_error_dev(sc->sc_dev, "TABLE_CLEAR failed for tid %d group %d\n", 1364 1.69 cegger tid, group); 1365 1.16 ad 1366 1.16 ad iop_msg_unmap(sc, im); 1367 1.16 ad iop_msg_free(sc, im); 1368 1.16 ad return (rv); 1369 1.16 ad } 1370 1.16 ad 1371 1.16 ad /* 1372 1.16 ad * Add a single row to a tabular parameter group. The row can have only one 1373 1.16 ad * field. 1374 1.16 ad */ 1375 1.16 ad int 1376 1.16 ad iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf, 1377 1.16 ad int size, int row) 1378 1.16 ad { 1379 1.16 ad struct iop_msg *im; 1380 1.16 ad struct i2o_util_params_op *mf; 1381 1.16 ad struct iop_pgop *pgop; 1382 1.16 ad int rv, totsize; 1383 1.16 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1384 1.16 ad 1385 1.16 ad totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size; 1386 1.16 ad 1387 1.16 ad im = iop_msg_alloc(sc, IM_WAIT); 1388 1.90 chs pgop = malloc(totsize, M_DEVBUF, M_WAITOK); 1389 1.16 ad mf = (struct i2o_util_params_op *)mb; 1390 1.16 ad mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op); 1391 1.16 ad mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET); 1392 1.16 ad mf->msgictx = IOP_ICTX; 1393 1.16 ad mf->msgtctx = im->im_tctx; 1394 1.16 ad mf->flags = 0; 1395 1.16 ad 1396 1.16 ad pgop->olh.count = htole16(1); 1397 1.16 ad pgop->olh.reserved = htole16(0); 1398 1.16 ad pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD); 1399 1.16 ad pgop->oat.fieldcount = htole16(1); 1400 1.16 ad pgop->oat.group = htole16(group); 1401 1.16 ad pgop->oat.fields[0] = htole16(0); /* FieldIdx */ 1402 1.16 ad pgop->oat.fields[1] = htole16(1); /* RowCount */ 1403 1.16 ad pgop->oat.fields[2] = htole16(row); /* KeyValue */ 1404 1.16 ad memcpy(&pgop->oat.fields[3], buf, size); 1405 1.16 ad 1406 1.16 ad iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL); 1407 1.16 ad rv = iop_msg_post(sc, im, mb, 30000); 1408 1.16 ad if (rv != 0) 1409 1.82 chs aprint_error_dev(sc->sc_dev, "ADD_ROW failed for tid %d group %d row %d\n", 1410 1.69 cegger tid, group, row); 1411 1.16 ad 1412 1.16 ad iop_msg_unmap(sc, im); 1413 1.16 ad iop_msg_free(sc, im); 1414 1.16 ad free(pgop, M_DEVBUF); 1415 1.16 ad return (rv); 1416 1.16 ad } 1417 1.16 ad 1418 1.16 ad /* 1419 1.5 ad * Execute a simple command (no parameters). 1420 1.1 ad */ 1421 1.1 ad int 1422 1.5 ad iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx, 1423 1.5 ad int async, int timo) 1424 1.1 ad { 1425 1.1 ad struct iop_msg *im; 1426 1.11 ad struct i2o_msg mf; 1427 1.5 ad int rv, fl; 1428 1.1 ad 1429 1.11 ad fl = (async != 0 ? IM_WAIT : IM_POLL); 1430 1.15 ad im = iop_msg_alloc(sc, fl); 1431 1.1 ad 1432 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_msg); 1433 1.11 ad mf.msgfunc = I2O_MSGFUNC(tid, function); 1434 1.11 ad mf.msgictx = ictx; 1435 1.11 ad mf.msgtctx = im->im_tctx; 1436 1.1 ad 1437 1.11 ad rv = iop_msg_post(sc, im, &mf, timo); 1438 1.11 ad iop_msg_free(sc, im); 1439 1.1 ad return (rv); 1440 1.1 ad } 1441 1.1 ad 1442 1.1 ad /* 1443 1.5 ad * Post the system table to the IOP. 1444 1.1 ad */ 1445 1.1 ad static int 1446 1.1 ad iop_systab_set(struct iop_softc *sc) 1447 1.1 ad { 1448 1.11 ad struct i2o_exec_sys_tab_set *mf; 1449 1.1 ad struct iop_msg *im; 1450 1.13 ad bus_space_handle_t bsh; 1451 1.13 ad bus_addr_t boo; 1452 1.1 ad u_int32_t mema[2], ioa[2]; 1453 1.1 ad int rv; 1454 1.11 ad u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)]; 1455 1.1 ad 1456 1.15 ad im = iop_msg_alloc(sc, IM_WAIT); 1457 1.1 ad 1458 1.11 ad mf = (struct i2o_exec_sys_tab_set *)mb; 1459 1.11 ad mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set); 1460 1.11 ad mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET); 1461 1.11 ad mf->msgictx = IOP_ICTX; 1462 1.11 ad mf->msgtctx = im->im_tctx; 1463 1.82 chs mf->iopid = (device_unit(sc->sc_dev) + 2) << 12; 1464 1.11 ad mf->segnumber = 0; 1465 1.5 ad 1466 1.13 ad mema[1] = sc->sc_status.desiredprivmemsize; 1467 1.13 ad ioa[1] = sc->sc_status.desiredpriviosize; 1468 1.13 ad 1469 1.13 ad if (mema[1] != 0) { 1470 1.13 ad rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff, 1471 1.13 ad le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh); 1472 1.13 ad mema[0] = htole32(boo); 1473 1.13 ad if (rv != 0) { 1474 1.82 chs aprint_error_dev(sc->sc_dev, "can't alloc priv mem space, err = %d\n", rv); 1475 1.13 ad mema[0] = 0; 1476 1.13 ad mema[1] = 0; 1477 1.13 ad } 1478 1.13 ad } 1479 1.13 ad 1480 1.13 ad if (ioa[1] != 0) { 1481 1.13 ad rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff, 1482 1.13 ad le32toh(ioa[1]), 0, 0, 0, &boo, &bsh); 1483 1.13 ad ioa[0] = htole32(boo); 1484 1.13 ad if (rv != 0) { 1485 1.82 chs aprint_error_dev(sc->sc_dev, "can't alloc priv i/o space, err = %d\n", rv); 1486 1.13 ad ioa[0] = 0; 1487 1.13 ad ioa[1] = 0; 1488 1.13 ad } 1489 1.13 ad } 1490 1.1 ad 1491 1.15 ad iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL); 1492 1.15 ad iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL); 1493 1.15 ad iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL); 1494 1.11 ad rv = iop_msg_post(sc, im, mb, 5000); 1495 1.1 ad iop_msg_unmap(sc, im); 1496 1.11 ad iop_msg_free(sc, im); 1497 1.1 ad return (rv); 1498 1.1 ad } 1499 1.1 ad 1500 1.1 ad /* 1501 1.11 ad * Reset the IOP. Must be called with interrupts disabled. 1502 1.1 ad */ 1503 1.1 ad static int 1504 1.1 ad iop_reset(struct iop_softc *sc) 1505 1.1 ad { 1506 1.15 ad u_int32_t mfa, *sw; 1507 1.11 ad struct i2o_exec_iop_reset mf; 1508 1.1 ad int rv; 1509 1.15 ad paddr_t pa; 1510 1.1 ad 1511 1.15 ad sw = (u_int32_t *)sc->sc_scr; 1512 1.75 mhitch pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr; 1513 1.1 ad 1514 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset); 1515 1.11 ad mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET); 1516 1.11 ad mf.reserved[0] = 0; 1517 1.11 ad mf.reserved[1] = 0; 1518 1.11 ad mf.reserved[2] = 0; 1519 1.11 ad mf.reserved[3] = 0; 1520 1.15 ad mf.statuslow = (u_int32_t)pa; 1521 1.15 ad mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32); 1522 1.15 ad 1523 1.72 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1524 1.80 bouyer BUS_DMASYNC_POSTWRITE); 1525 1.15 ad *sw = htole32(0); 1526 1.15 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1527 1.80 bouyer BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1528 1.1 ad 1529 1.11 ad if ((rv = iop_post(sc, (u_int32_t *)&mf))) 1530 1.1 ad return (rv); 1531 1.1 ad 1532 1.15 ad POLL(2500, 1533 1.15 ad (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw), 1534 1.80 bouyer BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD), *sw != 0)); 1535 1.15 ad if (*sw != htole32(I2O_RESET_IN_PROGRESS)) { 1536 1.82 chs aprint_error_dev(sc->sc_dev, "reset rejected, status 0x%x\n", 1537 1.69 cegger le32toh(*sw)); 1538 1.1 ad return (EIO); 1539 1.1 ad } 1540 1.1 ad 1541 1.47 perry /* 1542 1.5 ad * IOP is now in the INIT state. Wait no more than 10 seconds for 1543 1.1 ad * the inbound queue to become responsive. 1544 1.1 ad */ 1545 1.5 ad POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY); 1546 1.1 ad if (mfa == IOP_MFA_EMPTY) { 1547 1.82 chs aprint_error_dev(sc->sc_dev, "reset failed\n"); 1548 1.1 ad return (EIO); 1549 1.1 ad } 1550 1.1 ad 1551 1.1 ad iop_release_mfa(sc, mfa); 1552 1.1 ad return (0); 1553 1.1 ad } 1554 1.1 ad 1555 1.1 ad /* 1556 1.11 ad * Register a new initiator. Must be called with the configuration lock 1557 1.11 ad * held. 1558 1.1 ad */ 1559 1.11 ad void 1560 1.1 ad iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii) 1561 1.1 ad { 1562 1.11 ad static int ictxgen; 1563 1.5 ad 1564 1.11 ad /* 0 is reserved (by us) for system messages. */ 1565 1.11 ad ii->ii_ictx = ++ictxgen; 1566 1.1 ad 1567 1.11 ad /* 1568 1.11 ad * `Utility initiators' don't make it onto the per-IOP initiator list 1569 1.11 ad * (which is used only for configuration), but do get one slot on 1570 1.11 ad * the inbound queue. 1571 1.11 ad */ 1572 1.11 ad if ((ii->ii_flags & II_UTILITY) == 0) { 1573 1.11 ad LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list); 1574 1.11 ad sc->sc_nii++; 1575 1.11 ad } else 1576 1.11 ad sc->sc_nuii++; 1577 1.11 ad 1578 1.65 ad cv_init(&ii->ii_cv, "iopevt"); 1579 1.65 ad 1580 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 1581 1.5 ad LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash); 1582 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 1583 1.1 ad } 1584 1.1 ad 1585 1.1 ad /* 1586 1.11 ad * Unregister an initiator. Must be called with the configuration lock 1587 1.11 ad * held. 1588 1.1 ad */ 1589 1.1 ad void 1590 1.1 ad iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii) 1591 1.1 ad { 1592 1.11 ad 1593 1.11 ad if ((ii->ii_flags & II_UTILITY) == 0) { 1594 1.11 ad LIST_REMOVE(ii, ii_list); 1595 1.11 ad sc->sc_nii--; 1596 1.11 ad } else 1597 1.11 ad sc->sc_nuii--; 1598 1.1 ad 1599 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 1600 1.5 ad LIST_REMOVE(ii, ii_hash); 1601 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 1602 1.65 ad 1603 1.65 ad cv_destroy(&ii->ii_cv); 1604 1.1 ad } 1605 1.1 ad 1606 1.1 ad /* 1607 1.11 ad * Handle a reply frame from the IOP. 1608 1.1 ad */ 1609 1.1 ad static int 1610 1.5 ad iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa) 1611 1.1 ad { 1612 1.1 ad struct iop_msg *im; 1613 1.1 ad struct i2o_reply *rb; 1614 1.11 ad struct i2o_fault_notify *fn; 1615 1.1 ad struct iop_initiator *ii; 1616 1.5 ad u_int off, ictx, tctx, status, size; 1617 1.1 ad 1618 1.65 ad KASSERT(mutex_owned(&sc->sc_intrlock)); 1619 1.65 ad 1620 1.1 ad off = (int)(rmfa - sc->sc_rep_phys); 1621 1.64 christos rb = (struct i2o_reply *)((char *)sc->sc_rep + off); 1622 1.1 ad 1623 1.15 ad /* Perform reply queue DMA synchronisation. */ 1624 1.11 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off, 1625 1.19 ad sc->sc_framesize, BUS_DMASYNC_POSTREAD); 1626 1.1 ad 1627 1.1 ad #ifdef I2ODEBUG 1628 1.1 ad if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0) 1629 1.5 ad panic("iop_handle_reply: 64-bit reply"); 1630 1.1 ad #endif 1631 1.47 perry /* 1632 1.1 ad * Find the initiator. 1633 1.1 ad */ 1634 1.1 ad ictx = le32toh(rb->msgictx); 1635 1.1 ad if (ictx == IOP_ICTX) 1636 1.1 ad ii = NULL; 1637 1.1 ad else { 1638 1.5 ad ii = LIST_FIRST(IOP_ICTXHASH(ictx)); 1639 1.5 ad for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash)) 1640 1.5 ad if (ii->ii_ictx == ictx) 1641 1.5 ad break; 1642 1.5 ad if (ii == NULL) { 1643 1.1 ad #ifdef I2ODEBUG 1644 1.11 ad iop_reply_print(sc, rb); 1645 1.1 ad #endif 1646 1.82 chs aprint_error_dev(sc->sc_dev, "WARNING: bad ictx returned (%x)\n", 1647 1.69 cegger ictx); 1648 1.5 ad return (-1); 1649 1.5 ad } 1650 1.1 ad } 1651 1.1 ad 1652 1.11 ad /* 1653 1.14 wiz * If we received a transport failure notice, we've got to dig the 1654 1.11 ad * transaction context (if any) out of the original message frame, 1655 1.11 ad * and then release the original MFA back to the inbound FIFO. 1656 1.11 ad */ 1657 1.11 ad if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) { 1658 1.11 ad status = I2O_STATUS_SUCCESS; 1659 1.11 ad 1660 1.11 ad fn = (struct i2o_fault_notify *)rb; 1661 1.29 msaitoh tctx = iop_inl_msg(sc, fn->lowmfa + 12); 1662 1.11 ad iop_release_mfa(sc, fn->lowmfa); 1663 1.11 ad iop_tfn_print(sc, fn); 1664 1.11 ad } else { 1665 1.11 ad status = rb->reqstatus; 1666 1.11 ad tctx = le32toh(rb->msgtctx); 1667 1.11 ad } 1668 1.1 ad 1669 1.15 ad if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) { 1670 1.1 ad /* 1671 1.1 ad * This initiator tracks state using message wrappers. 1672 1.1 ad * 1673 1.1 ad * Find the originating message wrapper, and if requested 1674 1.1 ad * notify the initiator. 1675 1.1 ad */ 1676 1.11 ad im = sc->sc_ims + (tctx & IOP_TCTX_MASK); 1677 1.11 ad if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib || 1678 1.11 ad (im->im_flags & IM_ALLOCED) == 0 || 1679 1.11 ad tctx != im->im_tctx) { 1680 1.82 chs aprint_error_dev(sc->sc_dev, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im); 1681 1.11 ad if (im != NULL) 1682 1.82 chs aprint_error_dev(sc->sc_dev, "flags=0x%08x tctx=0x%08x\n", 1683 1.69 cegger im->im_flags, im->im_tctx); 1684 1.5 ad #ifdef I2ODEBUG 1685 1.11 ad if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0) 1686 1.11 ad iop_reply_print(sc, rb); 1687 1.5 ad #endif 1688 1.5 ad return (-1); 1689 1.5 ad } 1690 1.11 ad 1691 1.11 ad if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) 1692 1.11 ad im->im_flags |= IM_FAIL; 1693 1.11 ad 1694 1.1 ad #ifdef I2ODEBUG 1695 1.1 ad if ((im->im_flags & IM_REPLIED) != 0) 1696 1.82 chs panic("%s: dup reply", device_xname(sc->sc_dev)); 1697 1.1 ad #endif 1698 1.11 ad im->im_flags |= IM_REPLIED; 1699 1.1 ad 1700 1.11 ad #ifdef I2ODEBUG 1701 1.11 ad if (status != I2O_STATUS_SUCCESS) 1702 1.11 ad iop_reply_print(sc, rb); 1703 1.11 ad #endif 1704 1.11 ad im->im_reqstatus = status; 1705 1.42 ad im->im_detstatus = le16toh(rb->detail); 1706 1.1 ad 1707 1.11 ad /* Copy the reply frame, if requested. */ 1708 1.11 ad if (im->im_rb != NULL) { 1709 1.11 ad size = (le32toh(rb->msgflags) >> 14) & ~3; 1710 1.1 ad #ifdef I2ODEBUG 1711 1.19 ad if (size > sc->sc_framesize) 1712 1.11 ad panic("iop_handle_reply: reply too large"); 1713 1.1 ad #endif 1714 1.11 ad memcpy(im->im_rb, rb, size); 1715 1.11 ad } 1716 1.11 ad 1717 1.1 ad /* Notify the initiator. */ 1718 1.11 ad if ((im->im_flags & IM_WAIT) != 0) 1719 1.65 ad cv_broadcast(&im->im_cv); 1720 1.55 christos else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) { 1721 1.65 ad if (ii != NULL) { 1722 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 1723 1.55 christos (*ii->ii_intr)(ii->ii_dv, im, rb); 1724 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 1725 1.65 ad } 1726 1.55 christos } 1727 1.1 ad } else { 1728 1.1 ad /* 1729 1.1 ad * This initiator discards message wrappers. 1730 1.1 ad * 1731 1.1 ad * Simply pass the reply frame to the initiator. 1732 1.1 ad */ 1733 1.65 ad if (ii != NULL) { 1734 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 1735 1.55 christos (*ii->ii_intr)(ii->ii_dv, NULL, rb); 1736 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 1737 1.65 ad } 1738 1.1 ad } 1739 1.1 ad 1740 1.1 ad return (status); 1741 1.1 ad } 1742 1.1 ad 1743 1.1 ad /* 1744 1.11 ad * Handle an interrupt from the IOP. 1745 1.1 ad */ 1746 1.1 ad int 1747 1.1 ad iop_intr(void *arg) 1748 1.1 ad { 1749 1.1 ad struct iop_softc *sc; 1750 1.5 ad u_int32_t rmfa; 1751 1.1 ad 1752 1.1 ad sc = arg; 1753 1.1 ad 1754 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 1755 1.65 ad 1756 1.65 ad if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) { 1757 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 1758 1.5 ad return (0); 1759 1.65 ad } 1760 1.5 ad 1761 1.5 ad for (;;) { 1762 1.5 ad /* Double read to account for IOP bug. */ 1763 1.11 ad if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) { 1764 1.11 ad rmfa = iop_inl(sc, IOP_REG_OFIFO); 1765 1.11 ad if (rmfa == IOP_MFA_EMPTY) 1766 1.11 ad break; 1767 1.11 ad } 1768 1.5 ad iop_handle_reply(sc, rmfa); 1769 1.11 ad iop_outl(sc, IOP_REG_OFIFO, rmfa); 1770 1.1 ad } 1771 1.1 ad 1772 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 1773 1.5 ad return (1); 1774 1.5 ad } 1775 1.5 ad 1776 1.5 ad /* 1777 1.5 ad * Handle an event signalled by the executive. 1778 1.5 ad */ 1779 1.5 ad static void 1780 1.76 cegger iop_intr_event(device_t dv, struct iop_msg *im, void *reply) 1781 1.5 ad { 1782 1.5 ad struct i2o_util_event_register_reply *rb; 1783 1.5 ad u_int event; 1784 1.5 ad 1785 1.5 ad rb = reply; 1786 1.5 ad 1787 1.11 ad if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) 1788 1.5 ad return; 1789 1.5 ad 1790 1.11 ad event = le32toh(rb->event); 1791 1.69 cegger printf("%s: event 0x%08x received\n", device_xname(dv), event); 1792 1.1 ad } 1793 1.1 ad 1794 1.47 perry /* 1795 1.1 ad * Allocate a message wrapper. 1796 1.1 ad */ 1797 1.11 ad struct iop_msg * 1798 1.15 ad iop_msg_alloc(struct iop_softc *sc, int flags) 1799 1.1 ad { 1800 1.1 ad struct iop_msg *im; 1801 1.11 ad static u_int tctxgen; 1802 1.65 ad int i; 1803 1.1 ad 1804 1.1 ad #ifdef I2ODEBUG 1805 1.1 ad if ((flags & IM_SYSMASK) != 0) 1806 1.1 ad panic("iop_msg_alloc: system flags specified"); 1807 1.1 ad #endif 1808 1.1 ad 1809 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 1810 1.11 ad im = SLIST_FIRST(&sc->sc_im_freelist); 1811 1.11 ad #if defined(DIAGNOSTIC) || defined(I2ODEBUG) 1812 1.11 ad if (im == NULL) 1813 1.11 ad panic("iop_msg_alloc: no free wrappers"); 1814 1.11 ad #endif 1815 1.11 ad SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain); 1816 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 1817 1.1 ad 1818 1.11 ad im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen; 1819 1.11 ad tctxgen += (1 << IOP_TCTX_SHIFT); 1820 1.1 ad im->im_flags = flags | IM_ALLOCED; 1821 1.11 ad im->im_rb = NULL; 1822 1.11 ad i = 0; 1823 1.11 ad do { 1824 1.11 ad im->im_xfer[i++].ix_size = 0; 1825 1.11 ad } while (i < IOP_MAX_MSG_XFERS); 1826 1.1 ad 1827 1.11 ad return (im); 1828 1.1 ad } 1829 1.1 ad 1830 1.47 perry /* 1831 1.1 ad * Free a message wrapper. 1832 1.1 ad */ 1833 1.1 ad void 1834 1.11 ad iop_msg_free(struct iop_softc *sc, struct iop_msg *im) 1835 1.1 ad { 1836 1.1 ad 1837 1.1 ad #ifdef I2ODEBUG 1838 1.1 ad if ((im->im_flags & IM_ALLOCED) == 0) 1839 1.1 ad panic("iop_msg_free: wrapper not allocated"); 1840 1.1 ad #endif 1841 1.1 ad 1842 1.1 ad im->im_flags = 0; 1843 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 1844 1.11 ad SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain); 1845 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 1846 1.1 ad } 1847 1.1 ad 1848 1.1 ad /* 1849 1.47 perry * Map a data transfer. Write a scatter-gather list into the message frame. 1850 1.1 ad */ 1851 1.1 ad int 1852 1.11 ad iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb, 1853 1.15 ad void *xferaddr, int xfersize, int out, struct proc *up) 1854 1.1 ad { 1855 1.11 ad bus_dmamap_t dm; 1856 1.11 ad bus_dma_segment_t *ds; 1857 1.1 ad struct iop_xfer *ix; 1858 1.11 ad u_int rv, i, nsegs, flg, off, xn; 1859 1.11 ad u_int32_t *p; 1860 1.5 ad 1861 1.11 ad for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++) 1862 1.1 ad if (ix->ix_size == 0) 1863 1.1 ad break; 1864 1.11 ad 1865 1.1 ad #ifdef I2ODEBUG 1866 1.11 ad if (xfersize == 0) 1867 1.11 ad panic("iop_msg_map: null transfer"); 1868 1.11 ad if (xfersize > IOP_MAX_XFER) 1869 1.11 ad panic("iop_msg_map: transfer too large"); 1870 1.11 ad if (xn == IOP_MAX_MSG_XFERS) 1871 1.1 ad panic("iop_msg_map: too many xfers"); 1872 1.1 ad #endif 1873 1.1 ad 1874 1.11 ad /* 1875 1.11 ad * Only the first DMA map is static. 1876 1.11 ad */ 1877 1.11 ad if (xn != 0) { 1878 1.1 ad rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER, 1879 1.11 ad IOP_MAX_SEGS, IOP_MAX_XFER, 0, 1880 1.1 ad BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map); 1881 1.1 ad if (rv != 0) 1882 1.1 ad return (rv); 1883 1.1 ad } 1884 1.1 ad 1885 1.11 ad dm = ix->ix_map; 1886 1.15 ad rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up, 1887 1.15 ad (up == NULL ? BUS_DMA_NOWAIT : 0)); 1888 1.11 ad if (rv != 0) 1889 1.11 ad goto bad; 1890 1.11 ad 1891 1.11 ad /* 1892 1.11 ad * How many SIMPLE SG elements can we fit in this message? 1893 1.11 ad */ 1894 1.11 ad off = mb[0] >> 16; 1895 1.11 ad p = mb + off; 1896 1.19 ad nsegs = ((sc->sc_framesize >> 2) - off) >> 1; 1897 1.11 ad 1898 1.11 ad if (dm->dm_nsegs > nsegs) { 1899 1.11 ad bus_dmamap_unload(sc->sc_dmat, ix->ix_map); 1900 1.11 ad rv = EFBIG; 1901 1.11 ad DPRINTF(("iop_msg_map: too many segs\n")); 1902 1.11 ad goto bad; 1903 1.11 ad } 1904 1.1 ad 1905 1.11 ad nsegs = dm->dm_nsegs; 1906 1.11 ad xfersize = 0; 1907 1.1 ad 1908 1.11 ad /* 1909 1.11 ad * Write out the SG list. 1910 1.11 ad */ 1911 1.1 ad if (out) 1912 1.11 ad flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT; 1913 1.1 ad else 1914 1.11 ad flg = I2O_SGL_SIMPLE; 1915 1.1 ad 1916 1.11 ad for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) { 1917 1.11 ad p[0] = (u_int32_t)ds->ds_len | flg; 1918 1.11 ad p[1] = (u_int32_t)ds->ds_addr; 1919 1.11 ad xfersize += ds->ds_len; 1920 1.1 ad } 1921 1.1 ad 1922 1.11 ad p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER; 1923 1.11 ad p[1] = (u_int32_t)ds->ds_addr; 1924 1.11 ad xfersize += ds->ds_len; 1925 1.11 ad 1926 1.11 ad /* Fix up the transfer record, and sync the map. */ 1927 1.11 ad ix->ix_flags = (out ? IX_OUT : IX_IN); 1928 1.11 ad ix->ix_size = xfersize; 1929 1.11 ad bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize, 1930 1.72 ad out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD); 1931 1.11 ad 1932 1.1 ad /* 1933 1.1 ad * If this is the first xfer we've mapped for this message, adjust 1934 1.1 ad * the SGL offset field in the message header. 1935 1.1 ad */ 1936 1.2 ad if ((im->im_flags & IM_SGLOFFADJ) == 0) { 1937 1.11 ad mb[0] += (mb[0] >> 12) & 0xf0; 1938 1.2 ad im->im_flags |= IM_SGLOFFADJ; 1939 1.2 ad } 1940 1.11 ad mb[0] += (nsegs << 17); 1941 1.11 ad return (0); 1942 1.11 ad 1943 1.11 ad bad: 1944 1.11 ad if (xn != 0) 1945 1.11 ad bus_dmamap_destroy(sc->sc_dmat, ix->ix_map); 1946 1.11 ad return (rv); 1947 1.11 ad } 1948 1.11 ad 1949 1.11 ad /* 1950 1.11 ad * Map a block I/O data transfer (different in that there's only one per 1951 1.11 ad * message maximum, and PAGE addressing may be used). Write a scatter 1952 1.11 ad * gather list into the message frame. 1953 1.11 ad */ 1954 1.11 ad int 1955 1.11 ad iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb, 1956 1.11 ad void *xferaddr, int xfersize, int out) 1957 1.11 ad { 1958 1.11 ad bus_dma_segment_t *ds; 1959 1.11 ad bus_dmamap_t dm; 1960 1.11 ad struct iop_xfer *ix; 1961 1.11 ad u_int rv, i, nsegs, off, slen, tlen, flg; 1962 1.11 ad paddr_t saddr, eaddr; 1963 1.11 ad u_int32_t *p; 1964 1.11 ad 1965 1.11 ad #ifdef I2ODEBUG 1966 1.11 ad if (xfersize == 0) 1967 1.11 ad panic("iop_msg_map_bio: null transfer"); 1968 1.11 ad if (xfersize > IOP_MAX_XFER) 1969 1.11 ad panic("iop_msg_map_bio: transfer too large"); 1970 1.11 ad if ((im->im_flags & IM_SGLOFFADJ) != 0) 1971 1.11 ad panic("iop_msg_map_bio: SGLOFFADJ"); 1972 1.11 ad #endif 1973 1.11 ad 1974 1.11 ad ix = im->im_xfer; 1975 1.11 ad dm = ix->ix_map; 1976 1.15 ad rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL, 1977 1.15 ad BUS_DMA_NOWAIT | BUS_DMA_STREAMING); 1978 1.11 ad if (rv != 0) 1979 1.11 ad return (rv); 1980 1.11 ad 1981 1.11 ad off = mb[0] >> 16; 1982 1.19 ad nsegs = ((sc->sc_framesize >> 2) - off) >> 1; 1983 1.11 ad 1984 1.11 ad /* 1985 1.11 ad * If the transfer is highly fragmented and won't fit using SIMPLE 1986 1.11 ad * elements, use PAGE_LIST elements instead. SIMPLE elements are 1987 1.11 ad * potentially more efficient, both for us and the IOP. 1988 1.11 ad */ 1989 1.11 ad if (dm->dm_nsegs > nsegs) { 1990 1.11 ad nsegs = 1; 1991 1.11 ad p = mb + off + 1; 1992 1.11 ad 1993 1.11 ad /* XXX This should be done with a bus_space flag. */ 1994 1.11 ad for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) { 1995 1.11 ad slen = ds->ds_len; 1996 1.11 ad saddr = ds->ds_addr; 1997 1.11 ad 1998 1.11 ad while (slen > 0) { 1999 1.11 ad eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1); 2000 1.89 riastrad tlen = uimin(eaddr - saddr, slen); 2001 1.11 ad slen -= tlen; 2002 1.11 ad *p++ = le32toh(saddr); 2003 1.11 ad saddr = eaddr; 2004 1.11 ad nsegs++; 2005 1.11 ad } 2006 1.11 ad } 2007 1.11 ad 2008 1.11 ad mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER | 2009 1.11 ad I2O_SGL_END; 2010 1.11 ad if (out) 2011 1.11 ad mb[off] |= I2O_SGL_DATA_OUT; 2012 1.11 ad } else { 2013 1.11 ad p = mb + off; 2014 1.13 ad nsegs = dm->dm_nsegs; 2015 1.11 ad 2016 1.11 ad if (out) 2017 1.11 ad flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT; 2018 1.11 ad else 2019 1.11 ad flg = I2O_SGL_SIMPLE; 2020 1.11 ad 2021 1.11 ad for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) { 2022 1.11 ad p[0] = (u_int32_t)ds->ds_len | flg; 2023 1.11 ad p[1] = (u_int32_t)ds->ds_addr; 2024 1.11 ad } 2025 1.11 ad 2026 1.11 ad p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER | 2027 1.11 ad I2O_SGL_END; 2028 1.11 ad p[1] = (u_int32_t)ds->ds_addr; 2029 1.11 ad nsegs <<= 1; 2030 1.11 ad } 2031 1.11 ad 2032 1.11 ad /* Fix up the transfer record, and sync the map. */ 2033 1.11 ad ix->ix_flags = (out ? IX_OUT : IX_IN); 2034 1.11 ad ix->ix_size = xfersize; 2035 1.11 ad bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize, 2036 1.72 ad out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD); 2037 1.11 ad 2038 1.11 ad /* 2039 1.11 ad * Adjust the SGL offset and total message size fields. We don't 2040 1.11 ad * set IM_SGLOFFADJ, since it's used only for SIMPLE elements. 2041 1.11 ad */ 2042 1.11 ad mb[0] += ((off << 4) + (nsegs << 16)); 2043 1.1 ad return (0); 2044 1.1 ad } 2045 1.1 ad 2046 1.1 ad /* 2047 1.1 ad * Unmap all data transfers associated with a message wrapper. 2048 1.1 ad */ 2049 1.1 ad void 2050 1.1 ad iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im) 2051 1.1 ad { 2052 1.1 ad struct iop_xfer *ix; 2053 1.1 ad int i; 2054 1.11 ad 2055 1.47 perry #ifdef I2ODEBUG 2056 1.11 ad if (im->im_xfer[0].ix_size == 0) 2057 1.11 ad panic("iop_msg_unmap: no transfers mapped"); 2058 1.11 ad #endif 2059 1.11 ad 2060 1.11 ad for (ix = im->im_xfer, i = 0;;) { 2061 1.1 ad bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size, 2062 1.1 ad ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE : 2063 1.1 ad BUS_DMASYNC_POSTREAD); 2064 1.1 ad bus_dmamap_unload(sc->sc_dmat, ix->ix_map); 2065 1.1 ad 2066 1.1 ad /* Only the first DMA map is static. */ 2067 1.1 ad if (i != 0) 2068 1.1 ad bus_dmamap_destroy(sc->sc_dmat, ix->ix_map); 2069 1.87 msaitoh if (++i >= IOP_MAX_MSG_XFERS) 2070 1.87 msaitoh break; 2071 1.47 perry if ((++ix)->ix_size == 0) 2072 1.11 ad break; 2073 1.1 ad } 2074 1.1 ad } 2075 1.1 ad 2076 1.11 ad /* 2077 1.11 ad * Post a message frame to the IOP's inbound queue. 2078 1.1 ad */ 2079 1.1 ad int 2080 1.11 ad iop_post(struct iop_softc *sc, u_int32_t *mb) 2081 1.1 ad { 2082 1.11 ad u_int32_t mfa; 2083 1.11 ad 2084 1.15 ad #ifdef I2ODEBUG 2085 1.19 ad if ((mb[0] >> 16) > (sc->sc_framesize >> 2)) 2086 1.13 ad panic("iop_post: frame too large"); 2087 1.15 ad #endif 2088 1.13 ad 2089 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 2090 1.11 ad 2091 1.11 ad /* Allocate a slot with the IOP. */ 2092 1.11 ad if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) 2093 1.11 ad if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) { 2094 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 2095 1.82 chs aprint_error_dev(sc->sc_dev, "mfa not forthcoming\n"); 2096 1.11 ad return (EAGAIN); 2097 1.11 ad } 2098 1.11 ad 2099 1.15 ad /* Perform reply buffer DMA synchronisation. */ 2100 1.72 ad if (sc->sc_rep_size != 0) { 2101 1.11 ad bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0, 2102 1.11 ad sc->sc_rep_size, BUS_DMASYNC_PREREAD); 2103 1.72 ad } 2104 1.1 ad 2105 1.11 ad /* Copy out the message frame. */ 2106 1.30 ad bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb, 2107 1.29 msaitoh mb[0] >> 16); 2108 1.30 ad bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, 2109 1.29 msaitoh (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE); 2110 1.11 ad 2111 1.11 ad /* Post the MFA back to the IOP. */ 2112 1.11 ad iop_outl(sc, IOP_REG_IFIFO, mfa); 2113 1.1 ad 2114 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 2115 1.11 ad return (0); 2116 1.11 ad } 2117 1.1 ad 2118 1.11 ad /* 2119 1.11 ad * Post a message to the IOP and deal with completion. 2120 1.11 ad */ 2121 1.11 ad int 2122 1.11 ad iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo) 2123 1.11 ad { 2124 1.11 ad u_int32_t *mb; 2125 1.65 ad int rv; 2126 1.1 ad 2127 1.11 ad mb = xmb; 2128 1.1 ad 2129 1.11 ad /* Terminate the scatter/gather list chain. */ 2130 1.1 ad if ((im->im_flags & IM_SGLOFFADJ) != 0) 2131 1.11 ad mb[(mb[0] >> 16) - 2] |= I2O_SGL_END; 2132 1.1 ad 2133 1.11 ad if ((rv = iop_post(sc, mb)) != 0) 2134 1.11 ad return (rv); 2135 1.1 ad 2136 1.15 ad if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) { 2137 1.11 ad if ((im->im_flags & IM_POLL) != 0) 2138 1.11 ad iop_msg_poll(sc, im, timo); 2139 1.11 ad else 2140 1.11 ad iop_msg_wait(sc, im, timo); 2141 1.1 ad 2142 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 2143 1.11 ad if ((im->im_flags & IM_REPLIED) != 0) { 2144 1.11 ad if ((im->im_flags & IM_NOSTATUS) != 0) 2145 1.11 ad rv = 0; 2146 1.11 ad else if ((im->im_flags & IM_FAIL) != 0) 2147 1.11 ad rv = ENXIO; 2148 1.11 ad else if (im->im_reqstatus != I2O_STATUS_SUCCESS) 2149 1.11 ad rv = EIO; 2150 1.11 ad else 2151 1.11 ad rv = 0; 2152 1.11 ad } else 2153 1.11 ad rv = EBUSY; 2154 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 2155 1.11 ad } else 2156 1.11 ad rv = 0; 2157 1.11 ad 2158 1.11 ad return (rv); 2159 1.11 ad } 2160 1.11 ad 2161 1.47 perry /* 2162 1.11 ad * Spin until the specified message is replied to. 2163 1.11 ad */ 2164 1.11 ad static void 2165 1.11 ad iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo) 2166 1.11 ad { 2167 1.11 ad u_int32_t rmfa; 2168 1.11 ad 2169 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 2170 1.1 ad 2171 1.1 ad for (timo *= 10; timo != 0; timo--) { 2172 1.5 ad if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) { 2173 1.5 ad /* Double read to account for IOP bug. */ 2174 1.5 ad rmfa = iop_inl(sc, IOP_REG_OFIFO); 2175 1.5 ad if (rmfa == IOP_MFA_EMPTY) 2176 1.5 ad rmfa = iop_inl(sc, IOP_REG_OFIFO); 2177 1.11 ad if (rmfa != IOP_MFA_EMPTY) { 2178 1.35 simonb iop_handle_reply(sc, rmfa); 2179 1.11 ad 2180 1.11 ad /* 2181 1.11 ad * Return the reply frame to the IOP's 2182 1.11 ad * outbound FIFO. 2183 1.11 ad */ 2184 1.11 ad iop_outl(sc, IOP_REG_OFIFO, rmfa); 2185 1.11 ad } 2186 1.5 ad } 2187 1.1 ad if ((im->im_flags & IM_REPLIED) != 0) 2188 1.1 ad break; 2189 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 2190 1.1 ad DELAY(100); 2191 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 2192 1.1 ad } 2193 1.1 ad 2194 1.1 ad if (timo == 0) { 2195 1.5 ad #ifdef I2ODEBUG 2196 1.82 chs printf("%s: poll - no reply\n", device_xname(sc->sc_dev)); 2197 1.11 ad if (iop_status_get(sc, 1) != 0) 2198 1.11 ad printf("iop_msg_poll: unable to retrieve status\n"); 2199 1.5 ad else 2200 1.11 ad printf("iop_msg_poll: IOP state = %d\n", 2201 1.47 perry (le32toh(sc->sc_status.segnumber) >> 16) & 0xff); 2202 1.5 ad #endif 2203 1.1 ad } 2204 1.1 ad 2205 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 2206 1.1 ad } 2207 1.1 ad 2208 1.1 ad /* 2209 1.11 ad * Sleep until the specified message is replied to. 2210 1.1 ad */ 2211 1.11 ad static void 2212 1.61 christos iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo) 2213 1.1 ad { 2214 1.65 ad int rv; 2215 1.1 ad 2216 1.65 ad mutex_spin_enter(&sc->sc_intrlock); 2217 1.5 ad if ((im->im_flags & IM_REPLIED) != 0) { 2218 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 2219 1.11 ad return; 2220 1.5 ad } 2221 1.65 ad rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo)); 2222 1.65 ad mutex_spin_exit(&sc->sc_intrlock); 2223 1.11 ad 2224 1.5 ad #ifdef I2ODEBUG 2225 1.5 ad if (rv != 0) { 2226 1.5 ad printf("iop_msg_wait: tsleep() == %d\n", rv); 2227 1.11 ad if (iop_status_get(sc, 0) != 0) 2228 1.84 christos printf("%s: unable to retrieve status\n", __func__); 2229 1.5 ad else 2230 1.84 christos printf("%s: IOP state = %d\n", __func__, 2231 1.47 perry (le32toh(sc->sc_status.segnumber) >> 16) & 0xff); 2232 1.5 ad } 2233 1.84 christos #else 2234 1.84 christos __USE(rv); 2235 1.5 ad #endif 2236 1.1 ad } 2237 1.1 ad 2238 1.1 ad /* 2239 1.1 ad * Release an unused message frame back to the IOP's inbound fifo. 2240 1.1 ad */ 2241 1.1 ad static void 2242 1.1 ad iop_release_mfa(struct iop_softc *sc, u_int32_t mfa) 2243 1.1 ad { 2244 1.1 ad 2245 1.1 ad /* Use the frame to issue a no-op. */ 2246 1.30 ad iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16)); 2247 1.30 ad iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP)); 2248 1.30 ad iop_outl_msg(sc, mfa + 8, 0); 2249 1.30 ad iop_outl_msg(sc, mfa + 12, 0); 2250 1.1 ad 2251 1.5 ad iop_outl(sc, IOP_REG_IFIFO, mfa); 2252 1.1 ad } 2253 1.1 ad 2254 1.1 ad #ifdef I2ODEBUG 2255 1.1 ad /* 2256 1.11 ad * Dump a reply frame header. 2257 1.1 ad */ 2258 1.1 ad static void 2259 1.11 ad iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb) 2260 1.1 ad { 2261 1.5 ad u_int function, detail; 2262 1.1 ad const char *statusstr; 2263 1.1 ad 2264 1.5 ad function = (le32toh(rb->msgfunc) >> 24) & 0xff; 2265 1.1 ad detail = le16toh(rb->detail); 2266 1.1 ad 2267 1.82 chs printf("%s: reply:\n", device_xname(sc->sc_dev)); 2268 1.5 ad 2269 1.1 ad if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0])) 2270 1.1 ad statusstr = iop_status[rb->reqstatus]; 2271 1.1 ad else 2272 1.1 ad statusstr = "undefined error code"; 2273 1.1 ad 2274 1.47 perry printf("%s: function=0x%02x status=0x%02x (%s)\n", 2275 1.82 chs device_xname(sc->sc_dev), function, rb->reqstatus, statusstr); 2276 1.5 ad printf("%s: detail=0x%04x ictx=0x%08x tctx=0x%08x\n", 2277 1.82 chs device_xname(sc->sc_dev), detail, le32toh(rb->msgictx), 2278 1.5 ad le32toh(rb->msgtctx)); 2279 1.82 chs printf("%s: tidi=%d tidt=%d flags=0x%02x\n", device_xname(sc->sc_dev), 2280 1.5 ad (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095, 2281 1.5 ad (le32toh(rb->msgflags) >> 8) & 0xff); 2282 1.1 ad } 2283 1.1 ad #endif 2284 1.1 ad 2285 1.1 ad /* 2286 1.11 ad * Dump a transport failure reply. 2287 1.11 ad */ 2288 1.11 ad static void 2289 1.11 ad iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn) 2290 1.11 ad { 2291 1.11 ad 2292 1.82 chs printf("%s: WARNING: transport failure:\n", device_xname(sc->sc_dev)); 2293 1.11 ad 2294 1.82 chs printf("%s: ictx=0x%08x tctx=0x%08x\n", device_xname(sc->sc_dev), 2295 1.11 ad le32toh(fn->msgictx), le32toh(fn->msgtctx)); 2296 1.11 ad printf("%s: failurecode=0x%02x severity=0x%02x\n", 2297 1.82 chs device_xname(sc->sc_dev), fn->failurecode, fn->severity); 2298 1.11 ad printf("%s: highestver=0x%02x lowestver=0x%02x\n", 2299 1.82 chs device_xname(sc->sc_dev), fn->highestver, fn->lowestver); 2300 1.11 ad } 2301 1.11 ad 2302 1.11 ad /* 2303 1.5 ad * Translate an I2O ASCII field into a C string. 2304 1.1 ad */ 2305 1.1 ad void 2306 1.5 ad iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen) 2307 1.1 ad { 2308 1.5 ad int hc, lc, i, nit; 2309 1.1 ad 2310 1.1 ad dlen--; 2311 1.1 ad lc = 0; 2312 1.1 ad hc = 0; 2313 1.1 ad i = 0; 2314 1.5 ad 2315 1.5 ad /* 2316 1.5 ad * DPT use NUL as a space, whereas AMI use it as a terminator. The 2317 1.5 ad * spec has nothing to say about it. Since AMI fields are usually 2318 1.5 ad * filled with junk after the terminator, ... 2319 1.5 ad */ 2320 1.5 ad nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT); 2321 1.5 ad 2322 1.5 ad while (slen-- != 0 && dlen-- != 0) { 2323 1.5 ad if (nit && *src == '\0') 2324 1.5 ad break; 2325 1.5 ad else if (*src <= 0x20 || *src >= 0x7f) { 2326 1.1 ad if (hc) 2327 1.1 ad dst[i++] = ' '; 2328 1.1 ad } else { 2329 1.1 ad hc = 1; 2330 1.1 ad dst[i++] = *src; 2331 1.1 ad lc = i; 2332 1.1 ad } 2333 1.1 ad src++; 2334 1.1 ad } 2335 1.47 perry 2336 1.1 ad dst[lc] = '\0'; 2337 1.1 ad } 2338 1.1 ad 2339 1.1 ad /* 2340 1.11 ad * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it. 2341 1.11 ad */ 2342 1.11 ad int 2343 1.11 ad iop_print_ident(struct iop_softc *sc, int tid) 2344 1.11 ad { 2345 1.11 ad struct { 2346 1.11 ad struct i2o_param_op_results pr; 2347 1.11 ad struct i2o_param_read_results prr; 2348 1.11 ad struct i2o_param_device_identity di; 2349 1.74 gmcgarry } __packed p; 2350 1.11 ad char buf[32]; 2351 1.11 ad int rv; 2352 1.11 ad 2353 1.16 ad rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p, 2354 1.16 ad sizeof(p), NULL); 2355 1.11 ad if (rv != 0) 2356 1.11 ad return (rv); 2357 1.11 ad 2358 1.11 ad iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf, 2359 1.11 ad sizeof(buf)); 2360 1.11 ad printf(" <%s, ", buf); 2361 1.11 ad iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf, 2362 1.11 ad sizeof(buf)); 2363 1.11 ad printf("%s, ", buf); 2364 1.11 ad iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf)); 2365 1.11 ad printf("%s>", buf); 2366 1.11 ad 2367 1.11 ad return (0); 2368 1.11 ad } 2369 1.11 ad 2370 1.11 ad /* 2371 1.5 ad * Claim or unclaim the specified TID. 2372 1.1 ad */ 2373 1.1 ad int 2374 1.5 ad iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release, 2375 1.15 ad int flags) 2376 1.1 ad { 2377 1.5 ad struct iop_msg *im; 2378 1.11 ad struct i2o_util_claim mf; 2379 1.5 ad int rv, func; 2380 1.5 ad 2381 1.5 ad func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM; 2382 1.15 ad im = iop_msg_alloc(sc, IM_WAIT); 2383 1.5 ad 2384 1.11 ad /* We can use the same structure, as they're identical. */ 2385 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_util_claim); 2386 1.11 ad mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func); 2387 1.11 ad mf.msgictx = ii->ii_ictx; 2388 1.11 ad mf.msgtctx = im->im_tctx; 2389 1.11 ad mf.flags = flags; 2390 1.5 ad 2391 1.11 ad rv = iop_msg_post(sc, im, &mf, 5000); 2392 1.11 ad iop_msg_free(sc, im); 2393 1.5 ad return (rv); 2394 1.47 perry } 2395 1.5 ad 2396 1.5 ad /* 2397 1.5 ad * Perform an abort. 2398 1.5 ad */ 2399 1.5 ad int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func, 2400 1.15 ad int tctxabort, int flags) 2401 1.5 ad { 2402 1.5 ad struct iop_msg *im; 2403 1.11 ad struct i2o_util_abort mf; 2404 1.5 ad int rv; 2405 1.5 ad 2406 1.15 ad im = iop_msg_alloc(sc, IM_WAIT); 2407 1.1 ad 2408 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_util_abort); 2409 1.11 ad mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT); 2410 1.11 ad mf.msgictx = ii->ii_ictx; 2411 1.11 ad mf.msgtctx = im->im_tctx; 2412 1.11 ad mf.flags = (func << 24) | flags; 2413 1.11 ad mf.tctxabort = tctxabort; 2414 1.1 ad 2415 1.11 ad rv = iop_msg_post(sc, im, &mf, 5000); 2416 1.11 ad iop_msg_free(sc, im); 2417 1.5 ad return (rv); 2418 1.1 ad } 2419 1.1 ad 2420 1.1 ad /* 2421 1.11 ad * Enable or disable reception of events for the specified device. 2422 1.1 ad */ 2423 1.5 ad int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask) 2424 1.5 ad { 2425 1.11 ad struct i2o_util_event_register mf; 2426 1.5 ad 2427 1.11 ad mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register); 2428 1.11 ad mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER); 2429 1.11 ad mf.msgictx = ii->ii_ictx; 2430 1.15 ad mf.msgtctx = 0; 2431 1.11 ad mf.eventmask = mask; 2432 1.5 ad 2433 1.11 ad /* This message is replied to only when events are signalled. */ 2434 1.15 ad return (iop_post(sc, (u_int32_t *)&mf)); 2435 1.5 ad } 2436 1.5 ad 2437 1.1 ad int 2438 1.61 christos iopopen(dev_t dev, int flag, int mode, struct lwp *l) 2439 1.1 ad { 2440 1.5 ad struct iop_softc *sc; 2441 1.93 ad int rv; 2442 1.5 ad 2443 1.73 tsutsui if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL) 2444 1.11 ad return (ENXIO); 2445 1.93 ad 2446 1.93 ad mutex_enter(&sc->sc_conflock); 2447 1.11 ad if ((sc->sc_flags & IOP_ONLINE) == 0) 2448 1.93 ad rv = ENXIO; 2449 1.93 ad else if ((sc->sc_flags & IOP_OPEN) != 0) 2450 1.93 ad rv = EBUSY; 2451 1.93 ad else { 2452 1.93 ad sc->sc_flags |= IOP_OPEN; 2453 1.93 ad rv = 0; 2454 1.93 ad } 2455 1.93 ad mutex_exit(&sc->sc_conflock); 2456 1.5 ad 2457 1.93 ad return (rv); 2458 1.1 ad } 2459 1.1 ad 2460 1.5 ad int 2461 1.61 christos iopclose(dev_t dev, int flag, int mode, 2462 1.61 christos struct lwp *l) 2463 1.1 ad { 2464 1.5 ad struct iop_softc *sc; 2465 1.1 ad 2466 1.73 tsutsui sc = device_lookup_private(&iop_cd, minor(dev)); 2467 1.93 ad 2468 1.93 ad mutex_enter(&sc->sc_conflock); 2469 1.11 ad sc->sc_flags &= ~IOP_OPEN; 2470 1.93 ad mutex_exit(&sc->sc_conflock); 2471 1.15 ad 2472 1.5 ad return (0); 2473 1.1 ad } 2474 1.1 ad 2475 1.1 ad int 2476 1.64 christos iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) 2477 1.1 ad { 2478 1.5 ad struct iop_softc *sc; 2479 1.5 ad struct iovec *iov; 2480 1.5 ad int rv, i; 2481 1.5 ad 2482 1.73 tsutsui sc = device_lookup_private(&iop_cd, minor(dev)); 2483 1.63 ad rv = 0; 2484 1.5 ad 2485 1.93 ad mutex_enter(&sc->sc_conflock); 2486 1.5 ad switch (cmd) { 2487 1.5 ad case IOPIOCPT: 2488 1.62 elad rv = kauth_authorize_device_passthru(l->l_cred, dev, 2489 1.62 elad KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data); 2490 1.93 ad if (rv) { 2491 1.93 ad mutex_exit(&sc->sc_conflock); 2492 1.60 elad return (rv); 2493 1.93 ad } 2494 1.56 christos 2495 1.93 ad rv = iop_passthrough(sc, (struct ioppt *)data, l->l_proc); 2496 1.93 ad mutex_exit(&sc->sc_conflock); 2497 1.93 ad return (rv); 2498 1.5 ad 2499 1.11 ad case IOPIOCGSTATUS: 2500 1.11 ad iov = (struct iovec *)data; 2501 1.11 ad i = sizeof(struct i2o_status); 2502 1.11 ad if (i > iov->iov_len) 2503 1.11 ad i = iov->iov_len; 2504 1.11 ad else 2505 1.11 ad iov->iov_len = i; 2506 1.11 ad if ((rv = iop_status_get(sc, 0)) == 0) 2507 1.11 ad rv = copyout(&sc->sc_status, iov->iov_base, i); 2508 1.93 ad mutex_exit(&sc->sc_conflock); 2509 1.11 ad return (rv); 2510 1.5 ad 2511 1.11 ad case IOPIOCGLCT: 2512 1.11 ad case IOPIOCGTIDMAP: 2513 1.11 ad case IOPIOCRECONFIG: 2514 1.11 ad break; 2515 1.5 ad 2516 1.11 ad default: 2517 1.11 ad #if defined(DIAGNOSTIC) || defined(I2ODEBUG) 2518 1.82 chs printf("%s: unknown ioctl %lx\n", device_xname(sc->sc_dev), cmd); 2519 1.11 ad #endif 2520 1.93 ad mutex_exit(&sc->sc_conflock); 2521 1.11 ad return (ENOTTY); 2522 1.11 ad } 2523 1.5 ad 2524 1.11 ad switch (cmd) { 2525 1.5 ad case IOPIOCGLCT: 2526 1.5 ad iov = (struct iovec *)data; 2527 1.11 ad i = le16toh(sc->sc_lct->tablesize) << 2; 2528 1.5 ad if (i > iov->iov_len) 2529 1.5 ad i = iov->iov_len; 2530 1.5 ad else 2531 1.5 ad iov->iov_len = i; 2532 1.11 ad rv = copyout(sc->sc_lct, iov->iov_base, i); 2533 1.5 ad break; 2534 1.5 ad 2535 1.5 ad case IOPIOCRECONFIG: 2536 1.63 ad rv = iop_reconfigure(sc, 0); 2537 1.9 ad break; 2538 1.9 ad 2539 1.9 ad case IOPIOCGTIDMAP: 2540 1.9 ad iov = (struct iovec *)data; 2541 1.11 ad i = sizeof(struct iop_tidmap) * sc->sc_nlctent; 2542 1.11 ad if (i > iov->iov_len) 2543 1.11 ad i = iov->iov_len; 2544 1.11 ad else 2545 1.11 ad iov->iov_len = i; 2546 1.11 ad rv = copyout(sc->sc_tidmap, iov->iov_base, i); 2547 1.11 ad break; 2548 1.11 ad } 2549 1.11 ad 2550 1.63 ad mutex_exit(&sc->sc_conflock); 2551 1.11 ad return (rv); 2552 1.11 ad } 2553 1.11 ad 2554 1.11 ad static int 2555 1.15 ad iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p) 2556 1.11 ad { 2557 1.11 ad struct iop_msg *im; 2558 1.11 ad struct i2o_msg *mf; 2559 1.11 ad struct ioppt_buf *ptb; 2560 1.11 ad int rv, i, mapped; 2561 1.11 ad 2562 1.93 ad KASSERT(mutex_owned(&sc->sc_conflock)); 2563 1.93 ad 2564 1.11 ad mf = NULL; 2565 1.11 ad im = NULL; 2566 1.11 ad mapped = 1; 2567 1.11 ad 2568 1.19 ad if (pt->pt_msglen > sc->sc_framesize || 2569 1.11 ad pt->pt_msglen < sizeof(struct i2o_msg) || 2570 1.11 ad pt->pt_nbufs > IOP_MAX_MSG_XFERS || 2571 1.57 christos pt->pt_nbufs < 0 || 2572 1.57 christos #if 0 2573 1.57 christos pt->pt_replylen < 0 || 2574 1.57 christos #endif 2575 1.11 ad pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000) 2576 1.11 ad return (EINVAL); 2577 1.11 ad 2578 1.11 ad for (i = 0; i < pt->pt_nbufs; i++) 2579 1.11 ad if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) { 2580 1.11 ad rv = ENOMEM; 2581 1.11 ad goto bad; 2582 1.11 ad } 2583 1.11 ad 2584 1.19 ad mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK); 2585 1.11 ad if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0) 2586 1.11 ad goto bad; 2587 1.11 ad 2588 1.15 ad im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS); 2589 1.11 ad im->im_rb = (struct i2o_reply *)mf; 2590 1.11 ad mf->msgictx = IOP_ICTX; 2591 1.11 ad mf->msgtctx = im->im_tctx; 2592 1.11 ad 2593 1.11 ad for (i = 0; i < pt->pt_nbufs; i++) { 2594 1.11 ad ptb = &pt->pt_bufs[i]; 2595 1.15 ad rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data, 2596 1.15 ad ptb->ptb_datalen, ptb->ptb_out != 0, p); 2597 1.11 ad if (rv != 0) 2598 1.11 ad goto bad; 2599 1.11 ad mapped = 1; 2600 1.11 ad } 2601 1.11 ad 2602 1.11 ad if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0) 2603 1.11 ad goto bad; 2604 1.11 ad 2605 1.11 ad i = (le32toh(im->im_rb->msgflags) >> 14) & ~3; 2606 1.19 ad if (i > sc->sc_framesize) 2607 1.19 ad i = sc->sc_framesize; 2608 1.11 ad if (i > pt->pt_replylen) 2609 1.11 ad i = pt->pt_replylen; 2610 1.15 ad rv = copyout(im->im_rb, pt->pt_reply, i); 2611 1.9 ad 2612 1.11 ad bad: 2613 1.11 ad if (mapped != 0) 2614 1.11 ad iop_msg_unmap(sc, im); 2615 1.11 ad if (im != NULL) 2616 1.11 ad iop_msg_free(sc, im); 2617 1.11 ad if (mf != NULL) 2618 1.11 ad free(mf, M_DEVBUF); 2619 1.1 ad return (rv); 2620 1.5 ad } 2621