1 1.100 andvar /* $NetBSD: xd.c,v 1.100 2025/09/06 21:20:20 andvar Exp $ */ 2 1.1 pk 3 1.1 pk /* 4 1.1 pk * Copyright (c) 1995 Charles D. Cranor 5 1.1 pk * All rights reserved. 6 1.1 pk * 7 1.1 pk * Redistribution and use in source and binary forms, with or without 8 1.1 pk * modification, are permitted provided that the following conditions 9 1.1 pk * are met: 10 1.1 pk * 1. Redistributions of source code must retain the above copyright 11 1.1 pk * notice, this list of conditions and the following disclaimer. 12 1.1 pk * 2. Redistributions in binary form must reproduce the above copyright 13 1.1 pk * notice, this list of conditions and the following disclaimer in the 14 1.1 pk * documentation and/or other materials provided with the distribution. 15 1.1 pk * 16 1.1 pk * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 1.1 pk * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 1.1 pk * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 1.1 pk * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 1.1 pk * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 1.1 pk * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 1.1 pk * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 1.1 pk * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 1.1 pk * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 1.1 pk * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 1.1 pk */ 27 1.1 pk 28 1.1 pk /* 29 1.1 pk * 30 1.1 pk * x d . c x y l o g i c s 7 5 3 / 7 0 5 3 v m e / s m d d r i v e r 31 1.1 pk * 32 1.87 chuck * author: Chuck Cranor <chuck@netbsd> 33 1.1 pk * started: 27-Feb-95 34 1.1 pk * references: [1] Xylogics Model 753 User's Manual 35 1.1 pk * part number: 166-753-001, Revision B, May 21, 1988. 36 1.1 pk * "Your Partner For Performance" 37 1.1 pk * [2] other NetBSD disk device drivers 38 1.1 pk * 39 1.1 pk * Special thanks go to Scott E. Campbell of Xylogics, Inc. for taking 40 1.1 pk * the time to answer some of my questions about the 753/7053. 41 1.1 pk * 42 1.1 pk * note: the 753 and the 7053 are programmed the same way, but are 43 1.1 pk * different sizes. the 753 is a 6U VME card, while the 7053 is a 9U 44 1.1 pk * VME card (found in many VME based suns). 45 1.1 pk */ 46 1.39 lukem 47 1.39 lukem #include <sys/cdefs.h> 48 1.100 andvar __KERNEL_RCSID(0, "$NetBSD: xd.c,v 1.100 2025/09/06 21:20:20 andvar Exp $"); 49 1.1 pk 50 1.1 pk #undef XDC_DEBUG /* full debug */ 51 1.1 pk #define XDC_DIAG /* extra sanity checks */ 52 1.1 pk #if defined(DIAGNOSTIC) && !defined(XDC_DIAG) 53 1.1 pk #define XDC_DIAG /* link in with master DIAG option */ 54 1.1 pk #endif 55 1.1 pk 56 1.1 pk #include <sys/param.h> 57 1.1 pk #include <sys/proc.h> 58 1.1 pk #include <sys/systm.h> 59 1.1 pk #include <sys/kernel.h> 60 1.1 pk #include <sys/file.h> 61 1.1 pk #include <sys/stat.h> 62 1.1 pk #include <sys/ioctl.h> 63 1.1 pk #include <sys/buf.h> 64 1.55 yamt #include <sys/bufq.h> 65 1.1 pk #include <sys/uio.h> 66 1.1 pk #include <sys/malloc.h> 67 1.1 pk #include <sys/device.h> 68 1.1 pk #include <sys/disklabel.h> 69 1.1 pk #include <sys/disk.h> 70 1.1 pk #include <sys/syslog.h> 71 1.1 pk #include <sys/dkbad.h> 72 1.1 pk #include <sys/conf.h> 73 1.62 yamt #include <sys/kauth.h> 74 1.1 pk 75 1.71 ad #include <sys/bus.h> 76 1.71 ad #include <sys/intr.h> 77 1.13 mrg 78 1.27 chs #if defined(__sparc__) || defined(sun3) 79 1.13 mrg #include <dev/sun/disklabel.h> 80 1.14 pk #endif 81 1.1 pk 82 1.30 pk #include <dev/vme/vmereg.h> 83 1.1 pk #include <dev/vme/vmevar.h> 84 1.1 pk 85 1.1 pk #include <dev/vme/xdreg.h> 86 1.1 pk #include <dev/vme/xdvar.h> 87 1.1 pk #include <dev/vme/xio.h> 88 1.1 pk 89 1.1 pk #include "locators.h" 90 1.1 pk 91 1.1 pk /* 92 1.1 pk * macros 93 1.1 pk */ 94 1.1 pk 95 1.1 pk /* 96 1.1 pk * XDC_TWAIT: add iorq "N" to tail of SC's wait queue 97 1.1 pk */ 98 1.1 pk #define XDC_TWAIT(SC, N) { \ 99 1.1 pk (SC)->waitq[(SC)->waitend] = (N); \ 100 1.1 pk (SC)->waitend = ((SC)->waitend + 1) % XDC_MAXIOPB; \ 101 1.1 pk (SC)->nwait++; \ 102 1.1 pk } 103 1.1 pk 104 1.1 pk /* 105 1.1 pk * XDC_HWAIT: add iorq "N" to head of SC's wait queue 106 1.1 pk */ 107 1.1 pk #define XDC_HWAIT(SC, N) { \ 108 1.1 pk (SC)->waithead = ((SC)->waithead == 0) ? \ 109 1.1 pk (XDC_MAXIOPB - 1) : ((SC)->waithead - 1); \ 110 1.1 pk (SC)->waitq[(SC)->waithead] = (N); \ 111 1.1 pk (SC)->nwait++; \ 112 1.1 pk } 113 1.1 pk 114 1.1 pk /* 115 1.1 pk * XDC_GET_WAITER: gets the first request waiting on the waitq 116 1.1 pk * and removes it (so it can be submitted) 117 1.1 pk */ 118 1.1 pk #define XDC_GET_WAITER(XDCSC, RQ) { \ 119 1.1 pk (RQ) = (XDCSC)->waitq[(XDCSC)->waithead]; \ 120 1.1 pk (XDCSC)->waithead = ((XDCSC)->waithead + 1) % XDC_MAXIOPB; \ 121 1.1 pk xdcsc->nwait--; \ 122 1.1 pk } 123 1.1 pk 124 1.1 pk /* 125 1.1 pk * XDC_FREE: add iorq "N" to SC's free list 126 1.1 pk */ 127 1.1 pk #define XDC_FREE(SC, N) { \ 128 1.1 pk (SC)->freereq[(SC)->nfree++] = (N); \ 129 1.1 pk (SC)->reqs[N].mode = 0; \ 130 1.1 pk if ((SC)->nfree == 1) wakeup(&(SC)->nfree); \ 131 1.1 pk } 132 1.1 pk 133 1.1 pk 134 1.1 pk /* 135 1.1 pk * XDC_RQALLOC: allocate an iorq off the free list (assume nfree > 0). 136 1.1 pk */ 137 1.1 pk #define XDC_RQALLOC(XDCSC) (XDCSC)->freereq[--((XDCSC)->nfree)] 138 1.1 pk 139 1.1 pk /* 140 1.1 pk * XDC_GO: start iopb ADDR (DVMA addr in a u_long) on XDC 141 1.1 pk */ 142 1.1 pk #define XDC_GO(XDC, ADDR) { \ 143 1.1 pk (XDC)->xdc_iopbaddr0 = ((ADDR) & 0xff); \ 144 1.1 pk (ADDR) = ((ADDR) >> 8); \ 145 1.1 pk (XDC)->xdc_iopbaddr1 = ((ADDR) & 0xff); \ 146 1.1 pk (ADDR) = ((ADDR) >> 8); \ 147 1.1 pk (XDC)->xdc_iopbaddr2 = ((ADDR) & 0xff); \ 148 1.1 pk (ADDR) = ((ADDR) >> 8); \ 149 1.1 pk (XDC)->xdc_iopbaddr3 = (ADDR); \ 150 1.1 pk (XDC)->xdc_iopbamod = XDC_ADDRMOD; \ 151 1.1 pk (XDC)->xdc_csr = XDC_ADDIOPB; /* go! */ \ 152 1.1 pk } 153 1.1 pk 154 1.1 pk /* 155 1.1 pk * XDC_WAIT: wait for XDC's csr "BITS" to come on in "TIME". 156 1.1 pk * LCV is a counter. If it goes to zero then we timed out. 157 1.1 pk */ 158 1.1 pk #define XDC_WAIT(XDC, LCV, TIME, BITS) { \ 159 1.1 pk (LCV) = (TIME); \ 160 1.1 pk while ((LCV) > 0) { \ 161 1.1 pk if ((XDC)->xdc_csr & (BITS)) break; \ 162 1.1 pk (LCV) = (LCV) - 1; \ 163 1.1 pk DELAY(1); \ 164 1.1 pk } \ 165 1.1 pk } 166 1.1 pk 167 1.1 pk /* 168 1.1 pk * XDC_DONE: don't need IORQ, get error code and free (done after xdc_cmd) 169 1.1 pk */ 170 1.1 pk #define XDC_DONE(SC,RQ,ER) { \ 171 1.1 pk if ((RQ) == XD_ERR_FAIL) { \ 172 1.1 pk (ER) = (RQ); \ 173 1.1 pk } else { \ 174 1.1 pk if ((SC)->ndone-- == XDC_SUBWAITLIM) \ 175 1.1 pk wakeup(&(SC)->ndone); \ 176 1.64 christos (ER) = (SC)->reqs[RQ].errnum; \ 177 1.1 pk XDC_FREE((SC), (RQ)); \ 178 1.1 pk } \ 179 1.1 pk } 180 1.1 pk 181 1.1 pk /* 182 1.1 pk * XDC_ADVANCE: advance iorq's pointers by a number of sectors 183 1.1 pk */ 184 1.1 pk #define XDC_ADVANCE(IORQ, N) { \ 185 1.1 pk if (N) { \ 186 1.1 pk (IORQ)->sectcnt -= (N); \ 187 1.1 pk (IORQ)->blockno += (N); \ 188 1.1 pk (IORQ)->dbuf += ((N)*XDFM_BPS); \ 189 1.1 pk } \ 190 1.1 pk } 191 1.1 pk 192 1.1 pk /* 193 1.1 pk * note - addresses you can sleep on: 194 1.1 pk * [1] & of xd_softc's "state" (waiting for a chance to attach a drive) 195 1.1 pk * [2] & of xdc_softc's "nfree" (waiting for a free iorq/iopb) 196 1.1 pk * [3] & of xdc_softc's "ndone" (waiting for number of done iorq/iopb's 197 1.1 pk * to drop below XDC_SUBWAITLIM) 198 1.1 pk * [4] & an iorq (waiting for an XD_SUB_WAIT iorq to finish) 199 1.1 pk */ 200 1.1 pk 201 1.1 pk 202 1.1 pk /* 203 1.1 pk * function prototypes 204 1.1 pk * "xdc_*" functions are internal, all others are external interfaces 205 1.1 pk */ 206 1.1 pk 207 1.1 pk extern int pil_to_vme[]; /* from obio.c */ 208 1.1 pk 209 1.1 pk /* internals */ 210 1.56 perry int xdc_cmd(struct xdc_softc *, int, int, int, int, int, char *, int); 211 1.57 tsutsui const char *xdc_e2str(int); 212 1.56 perry int xdc_error(struct xdc_softc *, struct xd_iorq *, 213 1.56 perry struct xd_iopb *, int, int); 214 1.56 perry int xdc_ioctlcmd(struct xd_softc *, dev_t dev, struct xd_iocmd *); 215 1.56 perry void xdc_perror(struct xd_iorq *, struct xd_iopb *, int); 216 1.56 perry int xdc_piodriver(struct xdc_softc *, int, int); 217 1.56 perry int xdc_remove_iorq(struct xdc_softc *); 218 1.56 perry int xdc_reset(struct xdc_softc *, int, int, int, struct xd_softc *); 219 1.56 perry inline void xdc_rqinit(struct xd_iorq *, struct xdc_softc *, 220 1.56 perry struct xd_softc *, int, u_long, int, 221 1.66 christos void *, struct buf *); 222 1.56 perry void xdc_rqtopb(struct xd_iorq *, struct xd_iopb *, int, int); 223 1.56 perry void xdc_start(struct xdc_softc *, int); 224 1.56 perry int xdc_startbuf(struct xdc_softc *, struct xd_softc *, struct buf *); 225 1.56 perry int xdc_submit_iorq(struct xdc_softc *, int, int); 226 1.56 perry void xdc_tick(void *); 227 1.56 perry void xdc_xdreset(struct xdc_softc *, struct xd_softc *); 228 1.21 pk int xd_dmamem_alloc(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *, 229 1.66 christos int *, bus_size_t, void **, bus_addr_t *); 230 1.21 pk void xd_dmamem_free(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *, 231 1.66 christos int, bus_size_t, void *); 232 1.21 pk 233 1.1 pk 234 1.1 pk /* machine interrupt hook */ 235 1.56 perry int xdcintr(void *); 236 1.1 pk 237 1.1 pk /* autoconf */ 238 1.86 cegger int xdcmatch(device_t, cfdata_t, void *); 239 1.86 cegger void xdcattach(device_t, device_t, void *); 240 1.86 cegger int xdmatch(device_t, cfdata_t, void *); 241 1.86 cegger void xdattach(device_t, device_t, void *); 242 1.56 perry static int xdc_probe(void *, bus_space_tag_t, bus_space_handle_t); 243 1.1 pk 244 1.56 perry static void xddummystrat(struct buf *); 245 1.56 perry int xdgetdisklabel(struct xd_softc *, void *); 246 1.1 pk 247 1.1 pk /* XXX - think about this more.. xd_machdep? */ 248 1.56 perry void xdc_md_setup(void); 249 1.1 pk int XDC_DELAY; 250 1.18 thorpej 251 1.20 chs #if defined(__sparc__) 252 1.1 pk #include <sparc/sparc/vaddrs.h> 253 1.1 pk #include <sparc/sparc/cpuvar.h> 254 1.81 cegger void xdc_md_setup(void) 255 1.1 pk { 256 1.1 pk if (CPU_ISSUN4 && cpuinfo.cpu_type == CPUTYP_4_300) 257 1.1 pk XDC_DELAY = XDC_DELAY_4_300; 258 1.1 pk else 259 1.1 pk XDC_DELAY = XDC_DELAY_SPARC; 260 1.1 pk } 261 1.27 chs #elif defined(sun3) 262 1.81 cegger void xdc_md_setup(void) 263 1.1 pk { 264 1.1 pk XDC_DELAY = XDC_DELAY_SUN3; 265 1.1 pk } 266 1.18 thorpej #else 267 1.81 cegger void xdc_md_setup(void) 268 1.18 thorpej { 269 1.18 thorpej XDC_DELAY = 0; 270 1.18 thorpej } 271 1.1 pk #endif 272 1.18 thorpej 273 1.1 pk /* 274 1.4 thorpej * cfattach's: device driver interface to autoconfig 275 1.1 pk */ 276 1.1 pk 277 1.89 chs CFATTACH_DECL_NEW(xdc, sizeof(struct xdc_softc), 278 1.45 thorpej xdcmatch, xdcattach, NULL, NULL); 279 1.1 pk 280 1.89 chs CFATTACH_DECL_NEW(xd, sizeof(struct xd_softc), 281 1.45 thorpej xdmatch, xdattach, NULL, NULL); 282 1.1 pk 283 1.4 thorpej extern struct cfdriver xd_cd; 284 1.42 gehenna 285 1.42 gehenna dev_type_open(xdopen); 286 1.42 gehenna dev_type_close(xdclose); 287 1.42 gehenna dev_type_read(xdread); 288 1.42 gehenna dev_type_write(xdwrite); 289 1.42 gehenna dev_type_ioctl(xdioctl); 290 1.42 gehenna dev_type_strategy(xdstrategy); 291 1.42 gehenna dev_type_dump(xddump); 292 1.42 gehenna dev_type_size(xdsize); 293 1.42 gehenna 294 1.42 gehenna const struct bdevsw xd_bdevsw = { 295 1.91 dholland .d_open = xdopen, 296 1.91 dholland .d_close = xdclose, 297 1.91 dholland .d_strategy = xdstrategy, 298 1.91 dholland .d_ioctl = xdioctl, 299 1.91 dholland .d_dump = xddump, 300 1.91 dholland .d_psize = xdsize, 301 1.92 dholland .d_discard = nodiscard, 302 1.91 dholland .d_flag = D_DISK 303 1.42 gehenna }; 304 1.42 gehenna 305 1.42 gehenna const struct cdevsw xd_cdevsw = { 306 1.91 dholland .d_open = xdopen, 307 1.91 dholland .d_close = xdclose, 308 1.91 dholland .d_read = xdread, 309 1.91 dholland .d_write = xdwrite, 310 1.91 dholland .d_ioctl = xdioctl, 311 1.91 dholland .d_stop = nostop, 312 1.91 dholland .d_tty = notty, 313 1.91 dholland .d_poll = nopoll, 314 1.91 dholland .d_mmap = nommap, 315 1.91 dholland .d_kqfilter = nokqfilter, 316 1.93 dholland .d_discard = nodiscard, 317 1.91 dholland .d_flag = D_DISK 318 1.42 gehenna }; 319 1.1 pk 320 1.1 pk struct xdc_attach_args { /* this is the "aux" args to xdattach */ 321 1.1 pk int driveno; /* unit number */ 322 1.1 pk int fullmode; /* submit mode */ 323 1.1 pk int booting; /* are we booting or not? */ 324 1.1 pk }; 325 1.1 pk 326 1.1 pk /* 327 1.1 pk * dkdriver 328 1.1 pk */ 329 1.1 pk 330 1.95 mlelstv struct dkdriver xddkdriver = { 331 1.95 mlelstv .d_strategy = xdstrategy 332 1.95 mlelstv }; 333 1.1 pk 334 1.1 pk /* 335 1.1 pk * start: disk label fix code (XXX) 336 1.1 pk */ 337 1.1 pk 338 1.1 pk static void *xd_labeldata; 339 1.1 pk 340 1.1 pk static void 341 1.79 dsl xddummystrat(struct buf *bp) 342 1.1 pk { 343 1.1 pk if (bp->b_bcount != XDFM_BPS) 344 1.1 pk panic("xddummystrat"); 345 1.84 tsutsui memcpy(bp->b_data, xd_labeldata, XDFM_BPS); 346 1.72 ad bp->b_oflags |= BO_DONE; 347 1.72 ad bp->b_cflags &= ~BC_BUSY; 348 1.1 pk } 349 1.1 pk 350 1.1 pk int 351 1.79 dsl xdgetdisklabel(struct xd_softc *xd, void *b) 352 1.1 pk { 353 1.48 dsl const char *err; 354 1.27 chs #if defined(__sparc__) || defined(sun3) 355 1.1 pk struct sun_disklabel *sdl; 356 1.14 pk #endif 357 1.1 pk 358 1.1 pk /* We already have the label data in `b'; setup for dummy strategy */ 359 1.1 pk xd_labeldata = b; 360 1.1 pk 361 1.1 pk /* Required parameter for readdisklabel() */ 362 1.1 pk xd->sc_dk.dk_label->d_secsize = XDFM_BPS; 363 1.1 pk 364 1.89 chs err = readdisklabel(MAKEDISKDEV(0, device_unit(xd->sc_dev), RAW_PART), 365 1.1 pk xddummystrat, 366 1.1 pk xd->sc_dk.dk_label, xd->sc_dk.dk_cpulabel); 367 1.1 pk if (err) { 368 1.89 chs aprint_error_dev(xd->sc_dev, "%s\n", err); 369 1.1 pk return(XD_ERR_FAIL); 370 1.1 pk } 371 1.1 pk 372 1.27 chs #if defined(__sparc__) || defined(sun3) 373 1.1 pk /* Ok, we have the label; fill in `pcyl' if there's SunOS magic */ 374 1.1 pk sdl = (struct sun_disklabel *)xd->sc_dk.dk_cpulabel->cd_block; 375 1.14 pk if (sdl->sl_magic == SUN_DKMAGIC) { 376 1.1 pk xd->pcyl = sdl->sl_pcylinders; 377 1.14 pk } else 378 1.17 drochner #endif 379 1.14 pk { 380 1.1 pk printf("%s: WARNING: no `pcyl' in disk label.\n", 381 1.89 chs device_xname(xd->sc_dev)); 382 1.1 pk xd->pcyl = xd->sc_dk.dk_label->d_ncylinders + 383 1.1 pk xd->sc_dk.dk_label->d_acylinders; 384 1.1 pk printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n", 385 1.89 chs device_xname(xd->sc_dev), xd->pcyl); 386 1.1 pk } 387 1.1 pk 388 1.1 pk xd->ncyl = xd->sc_dk.dk_label->d_ncylinders; 389 1.1 pk xd->acyl = xd->sc_dk.dk_label->d_acylinders; 390 1.1 pk xd->nhead = xd->sc_dk.dk_label->d_ntracks; 391 1.1 pk xd->nsect = xd->sc_dk.dk_label->d_nsectors; 392 1.1 pk xd->sectpercyl = xd->nhead * xd->nsect; 393 1.1 pk xd->sc_dk.dk_label->d_secsize = XDFM_BPS; /* not handled by 394 1.1 pk * sun->bsd */ 395 1.1 pk return(XD_ERR_AOK); 396 1.1 pk } 397 1.1 pk 398 1.1 pk /* 399 1.1 pk * end: disk label fix code (XXX) 400 1.1 pk */ 401 1.1 pk 402 1.1 pk /* 403 1.21 pk * Shorthand for allocating, mapping and loading a DMA buffer 404 1.21 pk */ 405 1.21 pk int 406 1.79 dsl xd_dmamem_alloc(bus_dma_tag_t tag, bus_dmamap_t map, bus_dma_segment_t *seg, int *nsegp, bus_size_t len, void * *kvap, bus_addr_t *dmap) 407 1.21 pk { 408 1.21 pk int nseg; 409 1.21 pk int error; 410 1.21 pk 411 1.22 pk if ((error = bus_dmamem_alloc(tag, len, 0, 0, 412 1.21 pk seg, 1, &nseg, BUS_DMA_NOWAIT)) != 0) { 413 1.21 pk return (error); 414 1.21 pk } 415 1.21 pk 416 1.36 thorpej if ((error = bus_dmamem_map(tag, seg, nseg, 417 1.36 thorpej len, kvap, 418 1.36 thorpej BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 419 1.21 pk bus_dmamem_free(tag, seg, nseg); 420 1.21 pk return (error); 421 1.21 pk } 422 1.21 pk 423 1.36 thorpej if ((error = bus_dmamap_load(tag, map, 424 1.36 thorpej *kvap, len, NULL, 425 1.36 thorpej BUS_DMA_NOWAIT)) != 0) { 426 1.36 thorpej bus_dmamem_unmap(tag, *kvap, len); 427 1.21 pk bus_dmamem_free(tag, seg, nseg); 428 1.21 pk return (error); 429 1.21 pk } 430 1.21 pk 431 1.21 pk *dmap = map->dm_segs[0].ds_addr; 432 1.21 pk *nsegp = nseg; 433 1.21 pk return (0); 434 1.21 pk } 435 1.21 pk 436 1.21 pk void 437 1.79 dsl xd_dmamem_free(bus_dma_tag_t tag, bus_dmamap_t map, bus_dma_segment_t *seg, int nseg, bus_size_t len, void * kva) 438 1.21 pk { 439 1.21 pk 440 1.21 pk bus_dmamap_unload(tag, map); 441 1.21 pk bus_dmamem_unmap(tag, kva, len); 442 1.21 pk bus_dmamem_free(tag, seg, nseg); 443 1.21 pk } 444 1.21 pk 445 1.21 pk 446 1.21 pk /* 447 1.1 pk * a u t o c o n f i g f u n c t i o n s 448 1.1 pk */ 449 1.1 pk 450 1.1 pk /* 451 1.1 pk * xdcmatch: determine if xdc is present or not. we do a 452 1.1 pk * soft reset to detect the xdc. 453 1.1 pk */ 454 1.1 pk 455 1.7 pk int 456 1.79 dsl xdc_probe(void *arg, bus_space_tag_t tag, bus_space_handle_t handle) 457 1.7 pk { 458 1.15 drochner struct xdc *xdc = (void *)handle; /* XXX */ 459 1.7 pk int del = 0; 460 1.7 pk 461 1.7 pk xdc->xdc_csr = XDC_RESET; 462 1.7 pk XDC_WAIT(xdc, del, XDC_RESETUSEC, XDC_RESET); 463 1.15 drochner return (del > 0 ? 0 : EIO); 464 1.7 pk } 465 1.7 pk 466 1.88 matt int 467 1.88 matt xdcmatch(device_t parent, cfdata_t cf, void *aux) 468 1.1 pk { 469 1.1 pk struct vme_attach_args *va = aux; 470 1.15 drochner vme_chipset_tag_t ct = va->va_vct; 471 1.15 drochner vme_am_t mod; 472 1.15 drochner int error; 473 1.15 drochner 474 1.30 pk mod = VME_AM_A16 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA; 475 1.30 pk if (vme_space_alloc(ct, va->r[0].offset, sizeof(struct xdc), mod)) 476 1.15 drochner return (0); 477 1.30 pk 478 1.15 drochner error = vme_probe(ct, va->r[0].offset, sizeof(struct xdc), 479 1.15 drochner mod, VME_D32, xdc_probe, 0); 480 1.15 drochner vme_space_free(va->va_vct, va->r[0].offset, sizeof(struct xdc), mod); 481 1.15 drochner 482 1.15 drochner return (error == 0); 483 1.1 pk } 484 1.1 pk 485 1.1 pk /* 486 1.1 pk * xdcattach: attach controller 487 1.1 pk */ 488 1.1 pk void 489 1.86 cegger xdcattach(device_t parent, device_t self, void *aux) 490 1.1 pk { 491 1.1 pk struct vme_attach_args *va = aux; 492 1.15 drochner vme_chipset_tag_t ct = va->va_vct; 493 1.15 drochner bus_space_tag_t bt; 494 1.1 pk bus_space_handle_t bh; 495 1.1 pk vme_intr_handle_t ih; 496 1.15 drochner vme_am_t mod; 497 1.76 drochner struct xdc_softc *xdc = device_private(self); 498 1.1 pk struct xdc_attach_args xa; 499 1.3 pk int lcv, rqno, error; 500 1.1 pk struct xd_iopb_ctrl *ctl; 501 1.3 pk bus_dma_segment_t seg; 502 1.3 pk int rseg; 503 1.15 drochner vme_mapresc_t resc; 504 1.52 mrg bus_addr_t busaddr; 505 1.1 pk 506 1.89 chs xdc->sc_dev = self; 507 1.18 thorpej xdc_md_setup(); 508 1.1 pk 509 1.1 pk /* get addressing and intr level stuff from autoconfig and load it 510 1.1 pk * into our xdc_softc. */ 511 1.1 pk 512 1.15 drochner xdc->dmatag = va->va_bdt; 513 1.30 pk mod = VME_AM_A16 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA; 514 1.1 pk 515 1.30 pk if (vme_space_alloc(ct, va->r[0].offset, sizeof(struct xdc), mod)) 516 1.15 drochner panic("xdc: vme alloc"); 517 1.30 pk 518 1.15 drochner if (vme_space_map(ct, va->r[0].offset, sizeof(struct xdc), 519 1.15 drochner mod, VME_D32, 0, &bt, &bh, &resc) != 0) 520 1.2 pk panic("xdc: vme_map"); 521 1.1 pk 522 1.15 drochner xdc->xdc = (struct xdc *) bh; /* XXX */ 523 1.15 drochner xdc->ipl = va->ilevel; 524 1.15 drochner xdc->vector = va->ivector; 525 1.1 pk 526 1.1 pk for (lcv = 0; lcv < XDC_MAXDEV; lcv++) 527 1.89 chs xdc->sc_drives[lcv] = NULL; 528 1.1 pk 529 1.21 pk /* 530 1.21 pk * allocate and zero buffers 531 1.1 pk * 532 1.1 pk * note: we simplify the code by allocating the max number of iopbs and 533 1.1 pk * iorq's up front. thus, we avoid linked lists and the costs 534 1.21 pk * associated with them in exchange for wasting a little memory. 535 1.21 pk */ 536 1.1 pk 537 1.21 pk /* Get DMA handle for misc. transfers */ 538 1.30 pk if ((error = vme_dmamap_create( 539 1.30 pk ct, /* VME chip tag */ 540 1.30 pk MAXPHYS, /* size */ 541 1.30 pk VME_AM_A24, /* address modifier */ 542 1.30 pk VME_D32, /* data size */ 543 1.30 pk 0, /* swap */ 544 1.21 pk 1, /* nsegments */ 545 1.30 pk MAXPHYS, /* maxsegsz */ 546 1.21 pk 0, /* boundary */ 547 1.21 pk BUS_DMA_NOWAIT, 548 1.21 pk &xdc->auxmap)) != 0) { 549 1.30 pk 550 1.89 chs aprint_error_dev(xdc->sc_dev, "DMA buffer map create error %d\n", 551 1.75 cegger error); 552 1.3 pk return; 553 1.3 pk } 554 1.3 pk 555 1.30 pk 556 1.21 pk /* Get DMA handle for mapping iorq descriptors */ 557 1.30 pk if ((error = vme_dmamap_create( 558 1.30 pk ct, /* VME chip tag */ 559 1.21 pk XDC_MAXIOPB * sizeof(struct xd_iopb), 560 1.30 pk VME_AM_A24, /* address modifier */ 561 1.30 pk VME_D32, /* data size */ 562 1.30 pk 0, /* swap */ 563 1.21 pk 1, /* nsegments */ 564 1.3 pk XDC_MAXIOPB * sizeof(struct xd_iopb), 565 1.21 pk 0, /* boundary */ 566 1.21 pk BUS_DMA_NOWAIT, 567 1.21 pk &xdc->iopmap)) != 0) { 568 1.30 pk 569 1.89 chs aprint_error_dev(xdc->sc_dev, "DMA buffer map create error %d\n", 570 1.75 cegger error); 571 1.21 pk return; 572 1.21 pk } 573 1.21 pk 574 1.21 pk /* Get DMA buffer for iorq descriptors */ 575 1.21 pk if ((error = xd_dmamem_alloc(xdc->dmatag, xdc->iopmap, &seg, &rseg, 576 1.21 pk XDC_MAXIOPB * sizeof(struct xd_iopb), 577 1.66 christos (void **)&xdc->iopbase, 578 1.52 mrg &busaddr)) != 0) { 579 1.89 chs aprint_error_dev(xdc->sc_dev, "DMA buffer alloc error %d\n", 580 1.75 cegger error); 581 1.3 pk return; 582 1.3 pk } 583 1.53 mrg xdc->dvmaiopb = (struct xd_iopb *)(u_long)BUS_ADDR_PADDR(busaddr); 584 1.21 pk 585 1.82 cegger memset(xdc->iopbase, 0, XDC_MAXIOPB * sizeof(struct xd_iopb)); 586 1.1 pk 587 1.96 chs xdc->reqs = malloc(XDC_MAXIOPB * sizeof(struct xd_iorq), 588 1.96 chs M_DEVBUF, M_WAITOK|M_ZERO); 589 1.1 pk 590 1.1 pk /* init free list, iorq to iopb pointers, and non-zero fields in the 591 1.1 pk * iopb which never change. */ 592 1.1 pk 593 1.1 pk for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 594 1.1 pk xdc->reqs[lcv].iopb = &xdc->iopbase[lcv]; 595 1.12 pk xdc->reqs[lcv].dmaiopb = &xdc->dvmaiopb[lcv]; 596 1.1 pk xdc->freereq[lcv] = lcv; 597 1.1 pk xdc->iopbase[lcv].fixd = 1; /* always the same */ 598 1.1 pk xdc->iopbase[lcv].naddrmod = XDC_ADDRMOD; /* always the same */ 599 1.1 pk xdc->iopbase[lcv].intr_vec = xdc->vector; /* always the same */ 600 1.3 pk 601 1.30 pk if ((error = vme_dmamap_create( 602 1.30 pk ct, /* VME chip tag */ 603 1.3 pk MAXPHYS, /* size */ 604 1.30 pk VME_AM_A24, /* address modifier */ 605 1.30 pk VME_D32, /* data size */ 606 1.30 pk 0, /* swap */ 607 1.3 pk 1, /* nsegments */ 608 1.3 pk MAXPHYS, /* maxsegsz */ 609 1.3 pk 0, /* boundary */ 610 1.3 pk BUS_DMA_NOWAIT, 611 1.30 pk &xdc->reqs[lcv].dmamap)) != 0) { 612 1.30 pk 613 1.89 chs aprint_error_dev(xdc->sc_dev, "DMA buffer map create error %d\n", 614 1.75 cegger error); 615 1.3 pk return; 616 1.3 pk } 617 1.1 pk } 618 1.1 pk xdc->nfree = XDC_MAXIOPB; 619 1.1 pk xdc->nrun = 0; 620 1.1 pk xdc->waithead = xdc->waitend = xdc->nwait = 0; 621 1.1 pk xdc->ndone = 0; 622 1.1 pk 623 1.1 pk /* init queue of waiting bufs */ 624 1.1 pk 625 1.58 yamt bufq_alloc(&xdc->sc_wq, "fcfs", 0); 626 1.68 ad callout_init(&xdc->sc_tick_ch, 0); 627 1.1 pk 628 1.1 pk /* 629 1.1 pk * section 7 of the manual tells us how to init the controller: 630 1.1 pk * - read controller parameters (6/0) 631 1.1 pk * - write controller parameters (5/0) 632 1.1 pk */ 633 1.1 pk 634 1.1 pk /* read controller parameters and insure we have a 753/7053 */ 635 1.1 pk 636 1.1 pk rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 637 1.1 pk if (rqno == XD_ERR_FAIL) { 638 1.1 pk printf(": couldn't read controller params\n"); 639 1.1 pk return; /* shouldn't ever happen */ 640 1.1 pk } 641 1.21 pk ctl = (struct xd_iopb_ctrl *) &xdc->iopbase[rqno]; 642 1.1 pk if (ctl->ctype != XDCT_753) { 643 1.64 christos if (xdc->reqs[rqno].errnum) 644 1.64 christos printf(": %s: ", xdc_e2str(xdc->reqs[rqno].errnum)); 645 1.1 pk printf(": doesn't identify as a 753/7053\n"); 646 1.3 pk XDC_DONE(xdc, rqno, error); 647 1.1 pk return; 648 1.1 pk } 649 1.1 pk printf(": Xylogics 753/7053, PROM=0x%x.%02x.%02x\n", 650 1.1 pk ctl->eprom_partno, ctl->eprom_lvl, ctl->eprom_rev); 651 1.3 pk XDC_DONE(xdc, rqno, error); 652 1.1 pk 653 1.1 pk /* now write controller parameters (xdc_cmd sets all params for us) */ 654 1.1 pk 655 1.1 pk rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_CTL, 0, 0, 0, 0, XD_SUB_POLL); 656 1.3 pk XDC_DONE(xdc, rqno, error); 657 1.3 pk if (error) { 658 1.89 chs aprint_error_dev(xdc->sc_dev, "controller config error: %s\n", 659 1.75 cegger xdc_e2str(error)); 660 1.1 pk return; 661 1.1 pk } 662 1.1 pk 663 1.1 pk /* link in interrupt with higher level software */ 664 1.32 scw vme_intr_map(ct, va->ilevel, va->ivector, &ih); 665 1.15 drochner vme_intr_establish(ct, ih, IPL_BIO, xdcintr, xdc); 666 1.26 cgd evcnt_attach_dynamic(&xdc->sc_intrcnt, EVCNT_TYPE_INTR, NULL, 667 1.89 chs device_xname(xdc->sc_dev), "intr"); 668 1.1 pk 669 1.1 pk 670 1.1 pk /* now we must look for disks using autoconfig */ 671 1.1 pk xa.fullmode = XD_SUB_POLL; 672 1.1 pk xa.booting = 1; 673 1.1 pk 674 1.1 pk for (xa.driveno = 0; xa.driveno < XDC_MAXDEV; xa.driveno++) 675 1.98 thorpej (void) config_found(self, (void *) &xa, NULL, CFARGS_NONE); 676 1.1 pk 677 1.1 pk /* start the watchdog clock */ 678 1.19 thorpej callout_reset(&xdc->sc_tick_ch, XDC_TICKCNT, xdc_tick, xdc); 679 1.1 pk 680 1.1 pk } 681 1.1 pk 682 1.1 pk /* 683 1.1 pk * xdmatch: probe for disk. 684 1.1 pk * 685 1.1 pk * note: we almost always say disk is present. this allows us to 686 1.1 pk * spin up and configure a disk after the system is booted (we can 687 1.1 pk * call xdattach!). 688 1.1 pk */ 689 1.1 pk int 690 1.86 cegger xdmatch(device_t parent, cfdata_t cf, void *aux) 691 1.1 pk { 692 1.1 pk struct xdc_attach_args *xa = aux; 693 1.1 pk 694 1.1 pk /* looking for autoconf wildcard or exact match */ 695 1.1 pk 696 1.1 pk if (cf->cf_loc[XDCCF_DRIVE] != XDCCF_DRIVE_DEFAULT && 697 1.1 pk cf->cf_loc[XDCCF_DRIVE] != xa->driveno) 698 1.1 pk return 0; 699 1.1 pk 700 1.1 pk return 1; 701 1.1 pk 702 1.1 pk } 703 1.1 pk 704 1.1 pk /* 705 1.1 pk * xdattach: attach a disk. this can be called from autoconf and also 706 1.1 pk * from xdopen/xdstrategy. 707 1.1 pk */ 708 1.1 pk void 709 1.86 cegger xdattach(device_t parent, device_t self, void *aux) 710 1.1 pk { 711 1.76 drochner struct xd_softc *xd = device_private(self); 712 1.76 drochner struct xdc_softc *xdc = device_private(parent); 713 1.1 pk struct xdc_attach_args *xa = aux; 714 1.3 pk int rqno, spt = 0, mb, blk, lcv, fmode, s = 0, newstate; 715 1.1 pk struct xd_iopb_drive *driopb; 716 1.1 pk struct dkbad *dkb; 717 1.3 pk int rseg, error; 718 1.3 pk bus_dma_segment_t seg; 719 1.52 mrg bus_addr_t busaddr; 720 1.66 christos void * dmaddr; 721 1.67 mrg char * buf; 722 1.1 pk 723 1.89 chs xd->sc_dev = self; 724 1.89 chs 725 1.1 pk /* 726 1.1 pk * Always re-initialize the disk structure. We want statistics 727 1.1 pk * to start with a clean slate. 728 1.1 pk */ 729 1.82 cegger memset(&xd->sc_dk, 0, sizeof(xd->sc_dk)); 730 1.1 pk 731 1.1 pk /* if booting, init the xd_softc */ 732 1.1 pk 733 1.1 pk if (xa->booting) { 734 1.1 pk xd->state = XD_DRIVE_UNKNOWN; /* to start */ 735 1.1 pk xd->flags = 0; 736 1.1 pk xd->parent = xdc; 737 1.1 pk } 738 1.1 pk xd->xd_drive = xa->driveno; 739 1.1 pk fmode = xa->fullmode; 740 1.1 pk xdc->sc_drives[xa->driveno] = xd; 741 1.1 pk 742 1.1 pk /* if not booting, make sure we are the only process in the attach for 743 1.1 pk * this drive. if locked out, sleep on it. */ 744 1.1 pk 745 1.1 pk if (!xa->booting) { 746 1.1 pk s = splbio(); 747 1.1 pk while (xd->state == XD_DRIVE_ATTACHING) { 748 1.1 pk if (tsleep(&xd->state, PRIBIO, "xdattach", 0)) { 749 1.1 pk splx(s); 750 1.1 pk return; 751 1.1 pk } 752 1.1 pk } 753 1.1 pk printf("%s at %s", 754 1.89 chs device_xname(xd->sc_dev), device_xname(xd->parent->sc_dev)); 755 1.1 pk } 756 1.3 pk 757 1.1 pk /* we now have control */ 758 1.1 pk xd->state = XD_DRIVE_ATTACHING; 759 1.1 pk newstate = XD_DRIVE_UNKNOWN; 760 1.1 pk 761 1.3 pk buf = NULL; 762 1.21 pk if ((error = xd_dmamem_alloc(xdc->dmatag, xdc->auxmap, &seg, &rseg, 763 1.21 pk XDFM_BPS, 764 1.66 christos (void **)&buf, 765 1.52 mrg &busaddr)) != 0) { 766 1.89 chs aprint_error_dev(xdc->sc_dev, "DMA buffer alloc error %d\n", 767 1.75 cegger error); 768 1.21 pk return; 769 1.3 pk } 770 1.66 christos dmaddr = (void *)(u_long)BUS_ADDR_PADDR(busaddr); 771 1.3 pk 772 1.1 pk /* first try and reset the drive */ 773 1.1 pk 774 1.1 pk rqno = xdc_cmd(xdc, XDCMD_RST, 0, xd->xd_drive, 0, 0, 0, fmode); 775 1.3 pk XDC_DONE(xdc, rqno, error); 776 1.3 pk if (error == XD_ERR_NRDY) { 777 1.1 pk printf(" drive %d: off-line\n", xa->driveno); 778 1.1 pk goto done; 779 1.1 pk } 780 1.3 pk if (error) { 781 1.3 pk printf(": ERROR 0x%02x (%s)\n", error, xdc_e2str(error)); 782 1.1 pk goto done; 783 1.1 pk } 784 1.1 pk printf(" drive %d: ready\n", xa->driveno); 785 1.1 pk 786 1.1 pk /* now set format parameters */ 787 1.1 pk 788 1.1 pk rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_FMT, xd->xd_drive, 0, 0, 0, fmode); 789 1.3 pk XDC_DONE(xdc, rqno, error); 790 1.3 pk if (error) { 791 1.89 chs aprint_error_dev(xd->sc_dev, "write format parameters failed: %s\n", 792 1.75 cegger xdc_e2str(error)); 793 1.1 pk goto done; 794 1.1 pk } 795 1.1 pk 796 1.1 pk /* get drive parameters */ 797 1.1 pk rqno = xdc_cmd(xdc, XDCMD_RDP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 798 1.1 pk if (rqno != XD_ERR_FAIL) { 799 1.21 pk driopb = (struct xd_iopb_drive *) &xdc->iopbase[rqno]; 800 1.1 pk spt = driopb->sectpertrk; 801 1.1 pk } 802 1.3 pk XDC_DONE(xdc, rqno, error); 803 1.3 pk if (error) { 804 1.89 chs aprint_error_dev(xd->sc_dev, "read drive parameters failed: %s\n", 805 1.75 cegger xdc_e2str(error)); 806 1.1 pk goto done; 807 1.1 pk } 808 1.1 pk 809 1.1 pk /* 810 1.1 pk * now set drive parameters (to semi-bogus values) so we can read the 811 1.1 pk * disk label. 812 1.1 pk */ 813 1.1 pk xd->pcyl = xd->ncyl = 1; 814 1.1 pk xd->acyl = 0; 815 1.1 pk xd->nhead = 1; 816 1.1 pk xd->nsect = 1; 817 1.1 pk xd->sectpercyl = 1; 818 1.1 pk for (lcv = 0; lcv < 126; lcv++) /* init empty bad144 table */ 819 1.1 pk xd->dkb.bt_bad[lcv].bt_cyl = xd->dkb.bt_bad[lcv].bt_trksec = 0xffff; 820 1.1 pk rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 821 1.3 pk XDC_DONE(xdc, rqno, error); 822 1.3 pk if (error) { 823 1.89 chs aprint_error_dev(xd->sc_dev, "write drive parameters failed: %s\n", 824 1.75 cegger xdc_e2str(error)); 825 1.1 pk goto done; 826 1.1 pk } 827 1.1 pk 828 1.1 pk /* read disk label */ 829 1.3 pk rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, 0, 1, dmaddr, fmode); 830 1.3 pk XDC_DONE(xdc, rqno, error); 831 1.3 pk if (error) { 832 1.89 chs aprint_error_dev(xd->sc_dev, "reading disk label failed: %s\n", 833 1.75 cegger xdc_e2str(error)); 834 1.1 pk goto done; 835 1.1 pk } 836 1.1 pk newstate = XD_DRIVE_NOLABEL; 837 1.1 pk 838 1.1 pk xd->hw_spt = spt; 839 1.1 pk /* Attach the disk: must be before getdisklabel to malloc label */ 840 1.89 chs disk_init(&xd->sc_dk, device_xname(xd->sc_dev), &xddkdriver); 841 1.1 pk disk_attach(&xd->sc_dk); 842 1.1 pk 843 1.3 pk if (xdgetdisklabel(xd, buf) != XD_ERR_AOK) 844 1.1 pk goto done; 845 1.1 pk 846 1.1 pk /* inform the user of what is up */ 847 1.89 chs printf("%s: <%s>, pcyl %d, hw_spt %d\n", device_xname(xd->sc_dev), 848 1.3 pk buf, xd->pcyl, spt); 849 1.1 pk mb = xd->ncyl * (xd->nhead * xd->nsect) / (1048576 / XDFM_BPS); 850 1.1 pk printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n", 851 1.89 chs device_xname(xd->sc_dev), mb, xd->ncyl, xd->nhead, xd->nsect, 852 1.1 pk XDFM_BPS); 853 1.1 pk 854 1.1 pk /* now set the real drive parameters! */ 855 1.1 pk 856 1.1 pk rqno = xdc_cmd(xdc, XDCMD_WRP, XDFUN_DRV, xd->xd_drive, 0, 0, 0, fmode); 857 1.3 pk XDC_DONE(xdc, rqno, error); 858 1.3 pk if (error) { 859 1.89 chs aprint_error_dev(xd->sc_dev, "write real drive parameters failed: %s\n", 860 1.75 cegger xdc_e2str(error)); 861 1.1 pk goto done; 862 1.1 pk } 863 1.1 pk newstate = XD_DRIVE_ONLINE; 864 1.1 pk 865 1.1 pk /* 866 1.1 pk * read bad144 table. this table resides on the first sector of the 867 1.1 pk * last track of the disk (i.e. second cyl of "acyl" area). 868 1.1 pk */ 869 1.1 pk 870 1.1 pk blk = (xd->ncyl + xd->acyl - 1) * (xd->nhead * xd->nsect) + /* last cyl */ 871 1.1 pk (xd->nhead - 1) * xd->nsect; /* last head */ 872 1.3 pk rqno = xdc_cmd(xdc, XDCMD_RD, 0, xd->xd_drive, blk, 1, dmaddr, fmode); 873 1.3 pk XDC_DONE(xdc, rqno, error); 874 1.3 pk if (error) { 875 1.89 chs aprint_error_dev(xd->sc_dev, "reading bad144 failed: %s\n", 876 1.75 cegger xdc_e2str(error)); 877 1.1 pk goto done; 878 1.1 pk } 879 1.1 pk 880 1.1 pk /* check dkbad for sanity */ 881 1.3 pk dkb = (struct dkbad *) buf; 882 1.1 pk for (lcv = 0; lcv < 126; lcv++) { 883 1.1 pk if ((dkb->bt_bad[lcv].bt_cyl == 0xffff || 884 1.1 pk dkb->bt_bad[lcv].bt_cyl == 0) && 885 1.1 pk dkb->bt_bad[lcv].bt_trksec == 0xffff) 886 1.1 pk continue; /* blank */ 887 1.1 pk if (dkb->bt_bad[lcv].bt_cyl >= xd->ncyl) 888 1.1 pk break; 889 1.1 pk if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xd->nhead) 890 1.1 pk break; 891 1.1 pk if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xd->nsect) 892 1.1 pk break; 893 1.1 pk } 894 1.1 pk if (lcv != 126) { 895 1.89 chs aprint_error_dev(xd->sc_dev, "warning: invalid bad144 sector!\n"); 896 1.1 pk } else { 897 1.84 tsutsui memcpy(&xd->dkb, buf, XDFM_BPS); 898 1.1 pk } 899 1.1 pk 900 1.1 pk done: 901 1.3 pk if (buf != NULL) { 902 1.21 pk xd_dmamem_free(xdc->dmatag, xdc->auxmap, 903 1.21 pk &seg, rseg, XDFM_BPS, buf); 904 1.3 pk } 905 1.3 pk 906 1.1 pk xd->state = newstate; 907 1.1 pk if (!xa->booting) { 908 1.1 pk wakeup(&xd->state); 909 1.1 pk splx(s); 910 1.1 pk } 911 1.1 pk } 912 1.1 pk 913 1.1 pk /* 914 1.1 pk * end of autoconfig functions 915 1.1 pk */ 916 1.1 pk 917 1.1 pk /* 918 1.1 pk * { b , c } d e v s w f u n c t i o n s 919 1.1 pk */ 920 1.1 pk 921 1.1 pk /* 922 1.1 pk * xdclose: close device 923 1.1 pk */ 924 1.1 pk int 925 1.80 dsl xdclose(dev_t dev, int flag, int fmt, struct lwp *l) 926 1.1 pk { 927 1.76 drochner struct xd_softc *xd = device_lookup_private(&xd_cd, DISKUNIT(dev)); 928 1.1 pk int part = DISKPART(dev); 929 1.1 pk 930 1.1 pk /* clear mask bits */ 931 1.1 pk 932 1.1 pk switch (fmt) { 933 1.1 pk case S_IFCHR: 934 1.1 pk xd->sc_dk.dk_copenmask &= ~(1 << part); 935 1.1 pk break; 936 1.1 pk case S_IFBLK: 937 1.1 pk xd->sc_dk.dk_bopenmask &= ~(1 << part); 938 1.1 pk break; 939 1.1 pk } 940 1.1 pk xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 941 1.1 pk 942 1.1 pk return 0; 943 1.1 pk } 944 1.1 pk 945 1.1 pk /* 946 1.1 pk * xddump: crash dump system 947 1.1 pk */ 948 1.1 pk int 949 1.79 dsl xddump(dev_t dev, daddr_t blkno, void *va, size_t size) 950 1.1 pk { 951 1.1 pk int unit, part; 952 1.1 pk struct xd_softc *xd; 953 1.1 pk 954 1.1 pk unit = DISKUNIT(dev); 955 1.1 pk part = DISKPART(dev); 956 1.1 pk 957 1.76 drochner xd = device_lookup_private(&xd_cd, unit); 958 1.76 drochner if (!xd) 959 1.76 drochner return ENXIO; 960 1.1 pk 961 1.89 chs printf("%s%c: crash dump not supported (yet)\n", device_xname(xd->sc_dev), 962 1.1 pk 'a' + part); 963 1.1 pk 964 1.1 pk return ENXIO; 965 1.1 pk 966 1.1 pk /* outline: globals: "dumplo" == sector number of partition to start 967 1.1 pk * dump at (convert to physical sector with partition table) 968 1.1 pk * "dumpsize" == size of dump in clicks "physmem" == size of physical 969 1.1 pk * memory (clicks, ctob() to get bytes) (normal case: dumpsize == 970 1.1 pk * physmem) 971 1.1 pk * 972 1.1 pk * dump a copy of physical memory to the dump device starting at sector 973 1.1 pk * "dumplo" in the swap partition (make sure > 0). map in pages as 974 1.1 pk * we go. use polled I/O. 975 1.1 pk * 976 1.1 pk * XXX how to handle NON_CONTIG? */ 977 1.1 pk 978 1.1 pk } 979 1.1 pk 980 1.73 elad static enum kauth_device_req 981 1.73 elad xd_getkauthreq(u_char cmd) 982 1.73 elad { 983 1.73 elad enum kauth_device_req req; 984 1.73 elad 985 1.73 elad switch (cmd) { 986 1.73 elad case XDCMD_WR: 987 1.73 elad case XDCMD_XWR: 988 1.74 elad req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITE; 989 1.73 elad break; 990 1.73 elad 991 1.73 elad case XDCMD_RD: 992 1.74 elad req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READ; 993 1.73 elad break; 994 1.73 elad 995 1.73 elad case XDCMD_RDP: 996 1.73 elad case XDCMD_XRD: 997 1.74 elad req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_READCONF; 998 1.73 elad break; 999 1.73 elad 1000 1.73 elad case XDCMD_WRP: 1001 1.73 elad case XDCMD_RST: 1002 1.74 elad req = KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_WRITECONF; 1003 1.73 elad break; 1004 1.73 elad 1005 1.73 elad case XDCMD_NOP: 1006 1.73 elad case XDCMD_SK: 1007 1.73 elad case XDCMD_TST: 1008 1.73 elad default: 1009 1.73 elad req = 0; 1010 1.73 elad break; 1011 1.73 elad } 1012 1.73 elad 1013 1.73 elad return (req); 1014 1.73 elad } 1015 1.73 elad 1016 1.1 pk /* 1017 1.1 pk * xdioctl: ioctls on XD drives. based on ioctl's of other netbsd disks. 1018 1.1 pk */ 1019 1.1 pk int 1020 1.88 matt xdioctl(dev_t dev, u_long command, void *addr, int flag, struct lwp *l) 1021 1.1 pk 1022 1.1 pk { 1023 1.1 pk struct xd_softc *xd; 1024 1.1 pk struct xd_iocmd *xio; 1025 1.1 pk int error, s, unit; 1026 1.33 fvdl #ifdef __HAVE_OLD_DISKLABEL 1027 1.35 fvdl struct disklabel newlabel; 1028 1.33 fvdl #endif 1029 1.35 fvdl struct disklabel *lp; 1030 1.1 pk 1031 1.1 pk unit = DISKUNIT(dev); 1032 1.1 pk 1033 1.76 drochner if ((xd = device_lookup_private(&xd_cd, unit)) == NULL) 1034 1.1 pk return (ENXIO); 1035 1.1 pk 1036 1.94 christos error = disk_ioctl(&xd->sc_dk, dev, command, addr, flag, l); 1037 1.94 christos if (error != EPASSTHROUGH) 1038 1.94 christos return error; 1039 1.94 christos 1040 1.1 pk /* switch on ioctl type */ 1041 1.1 pk 1042 1.1 pk switch (command) { 1043 1.1 pk case DIOCSBAD: /* set bad144 info */ 1044 1.1 pk if ((flag & FWRITE) == 0) 1045 1.1 pk return EBADF; 1046 1.1 pk s = splbio(); 1047 1.84 tsutsui memcpy(&xd->dkb, addr, sizeof(xd->dkb)); 1048 1.1 pk splx(s); 1049 1.1 pk return 0; 1050 1.1 pk 1051 1.1 pk case DIOCSDINFO: /* set disk label */ 1052 1.33 fvdl #ifdef __HAVE_OLD_DISKLABEL 1053 1.33 fvdl case ODIOCSDINFO: 1054 1.33 fvdl if (command == ODIOCSDINFO) { 1055 1.33 fvdl memset(&newlabel, 0, sizeof newlabel); 1056 1.33 fvdl memcpy(&newlabel, addr, sizeof (struct olddisklabel)); 1057 1.33 fvdl lp = &newlabel; 1058 1.33 fvdl } else 1059 1.33 fvdl #endif 1060 1.33 fvdl lp = (struct disklabel *)addr; 1061 1.33 fvdl 1062 1.1 pk if ((flag & FWRITE) == 0) 1063 1.1 pk return EBADF; 1064 1.1 pk error = setdisklabel(xd->sc_dk.dk_label, 1065 1.33 fvdl lp, /* xd->sc_dk.dk_openmask : */ 0, 1066 1.1 pk xd->sc_dk.dk_cpulabel); 1067 1.1 pk if (error == 0) { 1068 1.1 pk if (xd->state == XD_DRIVE_NOLABEL) 1069 1.1 pk xd->state = XD_DRIVE_ONLINE; 1070 1.1 pk } 1071 1.1 pk return error; 1072 1.1 pk 1073 1.1 pk case DIOCWLABEL: /* change write status of disk label */ 1074 1.1 pk if ((flag & FWRITE) == 0) 1075 1.1 pk return EBADF; 1076 1.1 pk if (*(int *) addr) 1077 1.1 pk xd->flags |= XD_WLABEL; 1078 1.1 pk else 1079 1.1 pk xd->flags &= ~XD_WLABEL; 1080 1.1 pk return 0; 1081 1.1 pk 1082 1.1 pk case DIOCWDINFO: /* write disk label */ 1083 1.33 fvdl #ifdef __HAVE_OLD_DISKLABEL 1084 1.33 fvdl case ODIOCWDINFO: 1085 1.33 fvdl if (command == ODIOCWDINFO) { 1086 1.33 fvdl memset(&newlabel, 0, sizeof newlabel); 1087 1.33 fvdl memcpy(&newlabel, addr, sizeof (struct olddisklabel)); 1088 1.33 fvdl lp = &newlabel; 1089 1.33 fvdl } else 1090 1.33 fvdl #endif 1091 1.33 fvdl lp = (struct disklabel *)addr; 1092 1.33 fvdl 1093 1.1 pk if ((flag & FWRITE) == 0) 1094 1.1 pk return EBADF; 1095 1.1 pk error = setdisklabel(xd->sc_dk.dk_label, 1096 1.33 fvdl lp, /* xd->sc_dk.dk_openmask : */ 0, 1097 1.1 pk xd->sc_dk.dk_cpulabel); 1098 1.1 pk if (error == 0) { 1099 1.1 pk if (xd->state == XD_DRIVE_NOLABEL) 1100 1.1 pk xd->state = XD_DRIVE_ONLINE; 1101 1.1 pk 1102 1.1 pk /* Simulate opening partition 0 so write succeeds. */ 1103 1.1 pk xd->sc_dk.dk_openmask |= (1 << 0); 1104 1.33 fvdl error = writedisklabel(MAKEDISKDEV(major(dev), 1105 1.33 fvdl DISKUNIT(dev), RAW_PART), 1106 1.1 pk xdstrategy, xd->sc_dk.dk_label, 1107 1.1 pk xd->sc_dk.dk_cpulabel); 1108 1.1 pk xd->sc_dk.dk_openmask = 1109 1.1 pk xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 1110 1.1 pk } 1111 1.1 pk return error; 1112 1.1 pk 1113 1.73 elad case DIOSXDCMD: { 1114 1.73 elad enum kauth_device_req req; 1115 1.73 elad 1116 1.1 pk xio = (struct xd_iocmd *) addr; 1117 1.73 elad req = xd_getkauthreq(xio->cmd); 1118 1.73 elad if ((error = kauth_authorize_device_passthru(l->l_cred, 1119 1.73 elad dev, req, xio)) != 0) 1120 1.1 pk return (error); 1121 1.1 pk return (xdc_ioctlcmd(xd, dev, xio)); 1122 1.73 elad } 1123 1.1 pk 1124 1.1 pk default: 1125 1.1 pk return ENOTTY; 1126 1.1 pk } 1127 1.1 pk } 1128 1.1 pk /* 1129 1.1 pk * xdopen: open drive 1130 1.1 pk */ 1131 1.1 pk 1132 1.1 pk int 1133 1.80 dsl xdopen(dev_t dev, int flag, int fmt, struct lwp *l) 1134 1.1 pk { 1135 1.1 pk int unit, part; 1136 1.1 pk struct xd_softc *xd; 1137 1.1 pk struct xdc_attach_args xa; 1138 1.1 pk 1139 1.1 pk /* first, could it be a valid target? */ 1140 1.1 pk 1141 1.1 pk unit = DISKUNIT(dev); 1142 1.76 drochner if ((xd = device_lookup_private(&xd_cd, unit)) == NULL) 1143 1.1 pk return (ENXIO); 1144 1.1 pk part = DISKPART(dev); 1145 1.1 pk 1146 1.1 pk /* do we need to attach the drive? */ 1147 1.1 pk 1148 1.1 pk if (xd->state == XD_DRIVE_UNKNOWN) { 1149 1.1 pk xa.driveno = xd->xd_drive; 1150 1.1 pk xa.fullmode = XD_SUB_WAIT; 1151 1.1 pk xa.booting = 0; 1152 1.89 chs xdattach(xd->parent->sc_dev, xd->sc_dev, &xa); 1153 1.1 pk if (xd->state == XD_DRIVE_UNKNOWN) { 1154 1.1 pk return (EIO); 1155 1.1 pk } 1156 1.1 pk } 1157 1.1 pk /* check for partition */ 1158 1.1 pk 1159 1.1 pk if (part != RAW_PART && 1160 1.1 pk (part >= xd->sc_dk.dk_label->d_npartitions || 1161 1.1 pk xd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) { 1162 1.1 pk return (ENXIO); 1163 1.1 pk } 1164 1.1 pk /* set open masks */ 1165 1.1 pk 1166 1.1 pk switch (fmt) { 1167 1.1 pk case S_IFCHR: 1168 1.1 pk xd->sc_dk.dk_copenmask |= (1 << part); 1169 1.1 pk break; 1170 1.1 pk case S_IFBLK: 1171 1.1 pk xd->sc_dk.dk_bopenmask |= (1 << part); 1172 1.1 pk break; 1173 1.1 pk } 1174 1.1 pk xd->sc_dk.dk_openmask = xd->sc_dk.dk_copenmask | xd->sc_dk.dk_bopenmask; 1175 1.1 pk 1176 1.1 pk return 0; 1177 1.1 pk } 1178 1.1 pk 1179 1.1 pk int 1180 1.79 dsl xdread(dev_t dev, struct uio *uio, int flags) 1181 1.1 pk { 1182 1.1 pk 1183 1.1 pk return (physio(xdstrategy, NULL, dev, B_READ, minphys, uio)); 1184 1.1 pk } 1185 1.1 pk 1186 1.1 pk int 1187 1.79 dsl xdwrite(dev_t dev, struct uio *uio, int flags) 1188 1.1 pk { 1189 1.1 pk 1190 1.1 pk return (physio(xdstrategy, NULL, dev, B_WRITE, minphys, uio)); 1191 1.1 pk } 1192 1.1 pk 1193 1.1 pk 1194 1.1 pk /* 1195 1.1 pk * xdsize: return size of a partition for a dump 1196 1.1 pk */ 1197 1.1 pk 1198 1.1 pk int 1199 1.88 matt xdsize(dev_t dev) 1200 1.1 pk { 1201 1.1 pk struct xd_softc *xdsc; 1202 1.1 pk int unit, part, size, omask; 1203 1.1 pk 1204 1.1 pk /* valid unit? */ 1205 1.1 pk unit = DISKUNIT(dev); 1206 1.76 drochner if ((xdsc = device_lookup_private(&xd_cd, unit)) == NULL) 1207 1.1 pk return (-1); 1208 1.1 pk 1209 1.1 pk part = DISKPART(dev); 1210 1.1 pk omask = xdsc->sc_dk.dk_openmask & (1 << part); 1211 1.1 pk 1212 1.1 pk if (omask == 0 && xdopen(dev, 0, S_IFBLK, NULL) != 0) 1213 1.1 pk return (-1); 1214 1.1 pk 1215 1.1 pk /* do it */ 1216 1.1 pk if (xdsc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP) 1217 1.1 pk size = -1; /* only give valid size for swap partitions */ 1218 1.1 pk else 1219 1.1 pk size = xdsc->sc_dk.dk_label->d_partitions[part].p_size * 1220 1.1 pk (xdsc->sc_dk.dk_label->d_secsize / DEV_BSIZE); 1221 1.1 pk if (omask == 0 && xdclose(dev, 0, S_IFBLK, NULL) != 0) 1222 1.1 pk return (-1); 1223 1.1 pk return (size); 1224 1.1 pk } 1225 1.1 pk /* 1226 1.1 pk * xdstrategy: buffering system interface to xd. 1227 1.1 pk */ 1228 1.1 pk 1229 1.1 pk void 1230 1.88 matt xdstrategy(struct buf *bp) 1231 1.1 pk { 1232 1.1 pk struct xd_softc *xd; 1233 1.1 pk struct xdc_softc *parent; 1234 1.1 pk int s, unit; 1235 1.1 pk struct xdc_attach_args xa; 1236 1.1 pk 1237 1.1 pk unit = DISKUNIT(bp->b_dev); 1238 1.1 pk 1239 1.1 pk /* check for live device */ 1240 1.1 pk 1241 1.76 drochner if (!(xd = device_lookup_private(&xd_cd, unit)) || 1242 1.1 pk bp->b_blkno < 0 || 1243 1.1 pk (bp->b_bcount % xd->sc_dk.dk_label->d_secsize) != 0) { 1244 1.1 pk bp->b_error = EINVAL; 1245 1.69 ad goto done; 1246 1.1 pk } 1247 1.1 pk /* do we need to attach the drive? */ 1248 1.1 pk 1249 1.1 pk if (xd->state == XD_DRIVE_UNKNOWN) { 1250 1.1 pk xa.driveno = xd->xd_drive; 1251 1.1 pk xa.fullmode = XD_SUB_WAIT; 1252 1.1 pk xa.booting = 0; 1253 1.89 chs xdattach(xd->parent->sc_dev, xd->sc_dev, &xa); 1254 1.1 pk if (xd->state == XD_DRIVE_UNKNOWN) { 1255 1.1 pk bp->b_error = EIO; 1256 1.69 ad goto done; 1257 1.1 pk } 1258 1.1 pk } 1259 1.1 pk if (xd->state != XD_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) { 1260 1.1 pk /* no I/O to unlabeled disks, unless raw partition */ 1261 1.1 pk bp->b_error = EIO; 1262 1.69 ad goto done; 1263 1.1 pk } 1264 1.1 pk /* short circuit zero length request */ 1265 1.1 pk 1266 1.1 pk if (bp->b_bcount == 0) 1267 1.1 pk goto done; 1268 1.1 pk 1269 1.1 pk /* check bounds with label (disksubr.c). Determine the size of the 1270 1.1 pk * transfer, and make sure it is within the boundaries of the 1271 1.1 pk * partition. Adjust transfer if needed, and signal errors or early 1272 1.1 pk * completion. */ 1273 1.1 pk 1274 1.49 thorpej if (bounds_check_with_label(&xd->sc_dk, bp, 1275 1.1 pk (xd->flags & XD_WLABEL) != 0) <= 0) 1276 1.1 pk goto done; 1277 1.1 pk 1278 1.1 pk /* 1279 1.1 pk * now we know we have a valid buf structure that we need to do I/O 1280 1.1 pk * on. 1281 1.1 pk * 1282 1.1 pk * note that we don't disksort because the controller has a sorting 1283 1.1 pk * algorithm built into the hardware. 1284 1.1 pk */ 1285 1.1 pk 1286 1.1 pk s = splbio(); /* protect the queues */ 1287 1.1 pk 1288 1.1 pk /* first, give jobs in front of us a chance */ 1289 1.1 pk parent = xd->parent; 1290 1.78 yamt while (parent->nfree > 0 && bufq_peek(parent->sc_wq) != NULL) 1291 1.1 pk if (xdc_startbuf(parent, NULL, NULL) != XD_ERR_AOK) 1292 1.1 pk break; 1293 1.1 pk 1294 1.1 pk /* if there are no free iorq's, then we just queue and return. the 1295 1.1 pk * buffs will get picked up later by xdcintr(). 1296 1.1 pk */ 1297 1.1 pk 1298 1.1 pk if (parent->nfree == 0) { 1299 1.78 yamt bufq_put(parent->sc_wq, bp); 1300 1.1 pk splx(s); 1301 1.1 pk return; 1302 1.1 pk } 1303 1.1 pk 1304 1.1 pk /* now we have free iopb's and we are at splbio... start 'em up */ 1305 1.1 pk if (xdc_startbuf(parent, xd, bp) != XD_ERR_AOK) { 1306 1.1 pk return; 1307 1.1 pk } 1308 1.1 pk 1309 1.1 pk /* done! */ 1310 1.1 pk 1311 1.1 pk splx(s); 1312 1.1 pk return; 1313 1.1 pk 1314 1.1 pk done: /* tells upper layers we are done with this 1315 1.1 pk * buf */ 1316 1.1 pk bp->b_resid = bp->b_bcount; 1317 1.1 pk biodone(bp); 1318 1.1 pk } 1319 1.1 pk /* 1320 1.1 pk * end of {b,c}devsw functions 1321 1.1 pk */ 1322 1.1 pk 1323 1.1 pk /* 1324 1.1 pk * i n t e r r u p t f u n c t i o n 1325 1.1 pk * 1326 1.1 pk * xdcintr: hardware interrupt. 1327 1.1 pk */ 1328 1.1 pk int 1329 1.88 matt xdcintr(void *v) 1330 1.1 pk { 1331 1.1 pk struct xdc_softc *xdcsc = v; 1332 1.1 pk 1333 1.1 pk /* kick the event counter */ 1334 1.1 pk 1335 1.1 pk xdcsc->sc_intrcnt.ev_count++; 1336 1.1 pk 1337 1.1 pk /* remove as many done IOPBs as possible */ 1338 1.1 pk 1339 1.1 pk xdc_remove_iorq(xdcsc); 1340 1.1 pk 1341 1.1 pk /* start any iorq's already waiting */ 1342 1.1 pk 1343 1.1 pk xdc_start(xdcsc, XDC_MAXIOPB); 1344 1.1 pk 1345 1.1 pk /* fill up any remaining iorq's with queue'd buffers */ 1346 1.1 pk 1347 1.78 yamt while (xdcsc->nfree > 0 && bufq_peek(xdcsc->sc_wq) != NULL) 1348 1.1 pk if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1349 1.1 pk break; 1350 1.1 pk 1351 1.1 pk return (1); 1352 1.1 pk } 1353 1.1 pk /* 1354 1.1 pk * end of interrupt function 1355 1.1 pk */ 1356 1.1 pk 1357 1.1 pk /* 1358 1.1 pk * i n t e r n a l f u n c t i o n s 1359 1.1 pk */ 1360 1.1 pk 1361 1.1 pk /* 1362 1.1 pk * xdc_rqinit: fill out the fields of an I/O request 1363 1.1 pk */ 1364 1.1 pk 1365 1.1 pk inline void 1366 1.79 dsl xdc_rqinit(struct xd_iorq *rq, struct xdc_softc *xdc, struct xd_softc *xd, int md, u_long blk, int cnt, void *db, struct buf *bp) 1367 1.1 pk { 1368 1.1 pk rq->xdc = xdc; 1369 1.1 pk rq->xd = xd; 1370 1.1 pk rq->ttl = XDC_MAXTTL + 10; 1371 1.1 pk rq->mode = md; 1372 1.64 christos rq->tries = rq->errnum = rq->lasterror = 0; 1373 1.1 pk rq->blockno = blk; 1374 1.1 pk rq->sectcnt = cnt; 1375 1.3 pk rq->dbuf = db; 1376 1.1 pk rq->buf = bp; 1377 1.1 pk } 1378 1.1 pk /* 1379 1.1 pk * xdc_rqtopb: load up an IOPB based on an iorq 1380 1.1 pk */ 1381 1.1 pk 1382 1.1 pk void 1383 1.88 matt xdc_rqtopb(struct xd_iorq *iorq, struct xd_iopb *iopb, int cmd, int subfun) 1384 1.1 pk { 1385 1.1 pk u_long block, dp; 1386 1.1 pk 1387 1.1 pk /* standard stuff */ 1388 1.1 pk 1389 1.1 pk iopb->errs = iopb->done = 0; 1390 1.1 pk iopb->comm = cmd; 1391 1.64 christos iopb->errnum = iopb->status = 0; 1392 1.1 pk iopb->subfun = subfun; 1393 1.1 pk if (iorq->xd) 1394 1.1 pk iopb->unit = iorq->xd->xd_drive; 1395 1.1 pk else 1396 1.1 pk iopb->unit = 0; 1397 1.1 pk 1398 1.1 pk /* check for alternate IOPB format */ 1399 1.1 pk 1400 1.1 pk if (cmd == XDCMD_WRP) { 1401 1.1 pk switch (subfun) { 1402 1.1 pk case XDFUN_CTL:{ 1403 1.1 pk struct xd_iopb_ctrl *ctrl = 1404 1.1 pk (struct xd_iopb_ctrl *) iopb; 1405 1.1 pk iopb->lll = 0; 1406 1.1 pk iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1407 1.1 pk ? 0 1408 1.1 pk : iorq->xdc->ipl; 1409 1.1 pk ctrl->param_a = XDPA_TMOD | XDPA_DACF; 1410 1.1 pk ctrl->param_b = XDPB_ROR | XDPB_TDT_3_2USEC; 1411 1.1 pk ctrl->param_c = XDPC_OVS | XDPC_COP | XDPC_ASR | 1412 1.1 pk XDPC_RBC | XDPC_ECC2; 1413 1.1 pk ctrl->throttle = XDC_THROTTLE; 1414 1.1 pk ctrl->delay = XDC_DELAY; 1415 1.1 pk break; 1416 1.1 pk } 1417 1.1 pk case XDFUN_DRV:{ 1418 1.1 pk struct xd_iopb_drive *drv = 1419 1.1 pk (struct xd_iopb_drive *)iopb; 1420 1.1 pk /* we assume that the disk label has the right 1421 1.1 pk * info */ 1422 1.1 pk if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1423 1.1 pk drv->dparam_ipl = (XDC_DPARAM << 3); 1424 1.1 pk else 1425 1.1 pk drv->dparam_ipl = (XDC_DPARAM << 3) | 1426 1.1 pk iorq->xdc->ipl; 1427 1.1 pk drv->maxsect = iorq->xd->nsect - 1; 1428 1.1 pk drv->maxsector = drv->maxsect; 1429 1.1 pk /* note: maxsector != maxsect only if you are 1430 1.1 pk * doing cyl sparing */ 1431 1.1 pk drv->headoff = 0; 1432 1.1 pk drv->maxcyl = iorq->xd->pcyl - 1; 1433 1.1 pk drv->maxhead = iorq->xd->nhead - 1; 1434 1.1 pk break; 1435 1.1 pk } 1436 1.1 pk case XDFUN_FMT:{ 1437 1.1 pk struct xd_iopb_format *form = 1438 1.1 pk (struct xd_iopb_format *) iopb; 1439 1.1 pk if (XD_STATE(iorq->mode) == XD_SUB_POLL) 1440 1.1 pk form->interleave_ipl = (XDC_INTERLEAVE << 3); 1441 1.1 pk else 1442 1.1 pk form->interleave_ipl = (XDC_INTERLEAVE << 3) | 1443 1.1 pk iorq->xdc->ipl; 1444 1.1 pk form->field1 = XDFM_FIELD1; 1445 1.1 pk form->field2 = XDFM_FIELD2; 1446 1.1 pk form->field3 = XDFM_FIELD3; 1447 1.1 pk form->field4 = XDFM_FIELD4; 1448 1.1 pk form->bytespersec = XDFM_BPS; 1449 1.1 pk form->field6 = XDFM_FIELD6; 1450 1.1 pk form->field7 = XDFM_FIELD7; 1451 1.1 pk break; 1452 1.1 pk } 1453 1.1 pk } 1454 1.1 pk } else { 1455 1.1 pk 1456 1.1 pk /* normal IOPB case (harmless to RDP command) */ 1457 1.1 pk 1458 1.1 pk iopb->lll = 0; 1459 1.1 pk iopb->intl = (XD_STATE(iorq->mode) == XD_SUB_POLL) 1460 1.1 pk ? 0 1461 1.1 pk : iorq->xdc->ipl; 1462 1.1 pk iopb->sectcnt = iorq->sectcnt; 1463 1.1 pk block = iorq->blockno; 1464 1.1 pk if (iorq->xd == NULL || block == 0) { 1465 1.1 pk iopb->sectno = iopb->headno = iopb->cylno = 0; 1466 1.1 pk } else { 1467 1.1 pk iopb->sectno = block % iorq->xd->nsect; 1468 1.1 pk block = block / iorq->xd->nsect; 1469 1.1 pk iopb->headno = block % iorq->xd->nhead; 1470 1.1 pk block = block / iorq->xd->nhead; 1471 1.1 pk iopb->cylno = block; 1472 1.1 pk } 1473 1.3 pk dp = (u_long) iorq->dbuf; 1474 1.1 pk dp = iopb->daddr = (iorq->dbuf == NULL) ? 0 : dp; 1475 1.1 pk iopb->addrmod = ((dp + (XDFM_BPS * iorq->sectcnt)) > 0x1000000) 1476 1.1 pk ? XDC_ADDRMOD32 1477 1.1 pk : XDC_ADDRMOD; 1478 1.1 pk } 1479 1.1 pk } 1480 1.1 pk 1481 1.1 pk /* 1482 1.1 pk * xdc_cmd: front end for POLL'd and WAIT'd commands. Returns rqno. 1483 1.1 pk * If you've already got an IORQ, you can call submit directly (currently 1484 1.37 wiz * there is no need to do this). NORM requests are handled separately. 1485 1.1 pk */ 1486 1.1 pk int 1487 1.88 matt xdc_cmd(struct xdc_softc *xdcsc, int cmd, int subfn, int unit, int block, 1488 1.88 matt int scnt, char *dptr, int fullmode) 1489 1.1 pk { 1490 1.1 pk int rqno, submode = XD_STATE(fullmode), retry; 1491 1.1 pk struct xd_iorq *iorq; 1492 1.1 pk struct xd_iopb *iopb; 1493 1.1 pk 1494 1.1 pk /* get iorq/iopb */ 1495 1.1 pk switch (submode) { 1496 1.1 pk case XD_SUB_POLL: 1497 1.1 pk while (xdcsc->nfree == 0) { 1498 1.1 pk if (xdc_piodriver(xdcsc, 0, 1) != XD_ERR_AOK) 1499 1.1 pk return (XD_ERR_FAIL); 1500 1.1 pk } 1501 1.1 pk break; 1502 1.1 pk case XD_SUB_WAIT: 1503 1.1 pk retry = 1; 1504 1.1 pk while (retry) { 1505 1.1 pk while (xdcsc->nfree == 0) { 1506 1.1 pk if (tsleep(&xdcsc->nfree, PRIBIO, "xdnfree", 0)) 1507 1.1 pk return (XD_ERR_FAIL); 1508 1.1 pk } 1509 1.1 pk while (xdcsc->ndone > XDC_SUBWAITLIM) { 1510 1.1 pk if (tsleep(&xdcsc->ndone, PRIBIO, "xdsubwait", 0)) 1511 1.1 pk return (XD_ERR_FAIL); 1512 1.1 pk } 1513 1.1 pk if (xdcsc->nfree) 1514 1.1 pk retry = 0; /* got it */ 1515 1.1 pk } 1516 1.1 pk break; 1517 1.1 pk default: 1518 1.1 pk return (XD_ERR_FAIL); /* illegal */ 1519 1.1 pk } 1520 1.1 pk if (xdcsc->nfree == 0) 1521 1.1 pk panic("xdcmd nfree"); 1522 1.1 pk rqno = XDC_RQALLOC(xdcsc); 1523 1.1 pk iorq = &xdcsc->reqs[rqno]; 1524 1.1 pk iopb = iorq->iopb; 1525 1.1 pk 1526 1.1 pk 1527 1.1 pk /* init iorq/iopb */ 1528 1.1 pk 1529 1.1 pk xdc_rqinit(iorq, xdcsc, 1530 1.1 pk (unit == XDC_NOUNIT) ? NULL : xdcsc->sc_drives[unit], 1531 1.1 pk fullmode, block, scnt, dptr, NULL); 1532 1.1 pk 1533 1.1 pk /* load IOPB from iorq */ 1534 1.1 pk 1535 1.1 pk xdc_rqtopb(iorq, iopb, cmd, subfn); 1536 1.1 pk 1537 1.1 pk /* submit it for processing */ 1538 1.1 pk 1539 1.1 pk xdc_submit_iorq(xdcsc, rqno, fullmode); /* error code will be in iorq */ 1540 1.1 pk 1541 1.1 pk return (rqno); 1542 1.1 pk } 1543 1.1 pk /* 1544 1.1 pk * xdc_startbuf 1545 1.1 pk * start a buffer running, assumes nfree > 0 1546 1.1 pk */ 1547 1.1 pk 1548 1.1 pk int 1549 1.88 matt xdc_startbuf(struct xdc_softc *xdcsc, struct xd_softc *xdsc, struct buf *bp) 1550 1.1 pk { 1551 1.1 pk int rqno, partno; 1552 1.1 pk struct xd_iorq *iorq; 1553 1.1 pk struct xd_iopb *iopb; 1554 1.1 pk u_long block; 1555 1.66 christos /* void *dbuf;*/ 1556 1.3 pk int error; 1557 1.1 pk 1558 1.1 pk if (!xdcsc->nfree) 1559 1.1 pk panic("xdc_startbuf free"); 1560 1.1 pk rqno = XDC_RQALLOC(xdcsc); 1561 1.1 pk iorq = &xdcsc->reqs[rqno]; 1562 1.1 pk iopb = iorq->iopb; 1563 1.1 pk 1564 1.1 pk /* get buf */ 1565 1.1 pk 1566 1.1 pk if (bp == NULL) { 1567 1.78 yamt bp = bufq_get(xdcsc->sc_wq); 1568 1.18 thorpej if (bp == NULL) 1569 1.1 pk panic("xdc_startbuf bp"); 1570 1.1 pk xdsc = xdcsc->sc_drives[DISKUNIT(bp->b_dev)]; 1571 1.1 pk } 1572 1.1 pk partno = DISKPART(bp->b_dev); 1573 1.1 pk #ifdef XDC_DEBUG 1574 1.89 chs printf("xdc_startbuf: %s%c: %s block %d\n", device_xname(xdsc->sc_dev), 1575 1.1 pk 'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno); 1576 1.1 pk printf("xdc_startbuf: b_bcount %d, b_data 0x%x\n", 1577 1.1 pk bp->b_bcount, bp->b_data); 1578 1.1 pk #endif 1579 1.1 pk 1580 1.1 pk /* 1581 1.1 pk * load request. we have to calculate the correct block number based 1582 1.1 pk * on partition info. 1583 1.1 pk * 1584 1.1 pk * note that iorq points to the buffer as mapped into DVMA space, 1585 1.1 pk * where as the bp->b_data points to its non-DVMA mapping. 1586 1.1 pk */ 1587 1.1 pk 1588 1.1 pk block = bp->b_blkno + ((partno == RAW_PART) ? 0 : 1589 1.1 pk xdsc->sc_dk.dk_label->d_partitions[partno].p_offset); 1590 1.1 pk 1591 1.3 pk error = bus_dmamap_load(xdcsc->dmatag, iorq->dmamap, 1592 1.3 pk bp->b_data, bp->b_bcount, 0, BUS_DMA_NOWAIT); 1593 1.3 pk if (error != 0) { 1594 1.89 chs aprint_error_dev(xdcsc->sc_dev, "warning: cannot load DMA map\n"); 1595 1.1 pk XDC_FREE(xdcsc, rqno); 1596 1.78 yamt bufq_put(xdcsc->sc_wq, bp); 1597 1.1 pk return (XD_ERR_FAIL); /* XXX: need some sort of 1598 1.1 pk * call-back scheme here? */ 1599 1.1 pk } 1600 1.9 thorpej bus_dmamap_sync(xdcsc->dmatag, iorq->dmamap, 0, 1601 1.9 thorpej iorq->dmamap->dm_mapsize, (bp->b_flags & B_READ) 1602 1.3 pk ? BUS_DMASYNC_PREREAD 1603 1.3 pk : BUS_DMASYNC_PREWRITE); 1604 1.1 pk 1605 1.1 pk /* init iorq and load iopb from it */ 1606 1.1 pk xdc_rqinit(iorq, xdcsc, xdsc, XD_SUB_NORM | XD_MODE_VERBO, block, 1607 1.3 pk bp->b_bcount / XDFM_BPS, 1608 1.66 christos (void *)(u_long)iorq->dmamap->dm_segs[0].ds_addr, 1609 1.3 pk bp); 1610 1.1 pk 1611 1.1 pk xdc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XDCMD_RD : XDCMD_WR, 0); 1612 1.1 pk 1613 1.1 pk /* Instrumentation. */ 1614 1.1 pk disk_busy(&xdsc->sc_dk); 1615 1.1 pk 1616 1.1 pk /* now submit [note that xdc_submit_iorq can never fail on NORM reqs] */ 1617 1.1 pk 1618 1.1 pk xdc_submit_iorq(xdcsc, rqno, XD_SUB_NORM); 1619 1.1 pk return (XD_ERR_AOK); 1620 1.1 pk } 1621 1.1 pk 1622 1.1 pk 1623 1.1 pk /* 1624 1.1 pk * xdc_submit_iorq: submit an iorq for processing. returns XD_ERR_AOK 1625 1.1 pk * if ok. if it fail returns an error code. type is XD_SUB_*. 1626 1.1 pk * 1627 1.1 pk * note: caller frees iorq in all cases except NORM 1628 1.1 pk * 1629 1.1 pk * return value: 1630 1.1 pk * NORM: XD_AOK (req pending), XD_FAIL (couldn't submit request) 1631 1.1 pk * WAIT: XD_AOK (success), <error-code> (failed) 1632 1.1 pk * POLL: <same as WAIT> 1633 1.1 pk * NOQ : <same as NORM> 1634 1.1 pk * 1635 1.1 pk * there are three sources for i/o requests: 1636 1.1 pk * [1] xdstrategy: normal block I/O, using "struct buf" system. 1637 1.1 pk * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts. 1638 1.1 pk * [3] open/ioctl: these are I/O requests done in the context of a process, 1639 1.1 pk * and the process should block until they are done. 1640 1.1 pk * 1641 1.1 pk * software state is stored in the iorq structure. each iorq has an 1642 1.1 pk * iopb structure. the hardware understands the iopb structure. 1643 1.1 pk * every command must go through an iopb. a 7053 can only handle 1644 1.1 pk * XDC_MAXIOPB (31) active iopbs at one time. iopbs are allocated in 1645 1.1 pk * DVMA space at boot up time. what happens if we run out of iopb's? 1646 1.1 pk * for i/o type [1], the buffers are queued at the "buff" layer and 1647 1.1 pk * picked up later by the interrupt routine. for case [2] the 1648 1.1 pk * programmed i/o driver is called with a special flag that says 1649 1.1 pk * return when one iopb is free. for case [3] the process can sleep 1650 1.54 wiz * on the iorq free list until some iopbs are available. 1651 1.1 pk */ 1652 1.1 pk 1653 1.1 pk 1654 1.1 pk int 1655 1.88 matt xdc_submit_iorq(struct xdc_softc *xdcsc, int iorqno, int type) 1656 1.1 pk { 1657 1.1 pk u_long iopbaddr; 1658 1.1 pk struct xd_iorq *iorq = &xdcsc->reqs[iorqno]; 1659 1.1 pk 1660 1.1 pk #ifdef XDC_DEBUG 1661 1.89 chs printf("xdc_submit_iorq(%s, no=%d, type=%d)\n", device_xname(xdcsc->sc_dev), 1662 1.1 pk iorqno, type); 1663 1.1 pk #endif 1664 1.1 pk 1665 1.1 pk /* first check and see if controller is busy */ 1666 1.1 pk if (xdcsc->xdc->xdc_csr & XDC_ADDING) { 1667 1.1 pk #ifdef XDC_DEBUG 1668 1.1 pk printf("xdc_submit_iorq: XDC not ready (ADDING)\n"); 1669 1.1 pk #endif 1670 1.1 pk if (type == XD_SUB_NOQ) 1671 1.1 pk return (XD_ERR_FAIL); /* failed */ 1672 1.1 pk XDC_TWAIT(xdcsc, iorqno); /* put at end of waitq */ 1673 1.1 pk switch (type) { 1674 1.1 pk case XD_SUB_NORM: 1675 1.1 pk return XD_ERR_AOK; /* success */ 1676 1.1 pk case XD_SUB_WAIT: 1677 1.1 pk while (iorq->iopb->done == 0) { 1678 1.25 thorpej (void) tsleep(iorq, PRIBIO, "xdciorq", 0); 1679 1.1 pk } 1680 1.64 christos return (iorq->errnum); 1681 1.1 pk case XD_SUB_POLL: 1682 1.1 pk return (xdc_piodriver(xdcsc, iorqno, 0)); 1683 1.1 pk default: 1684 1.1 pk panic("xdc_submit_iorq adding"); 1685 1.1 pk } 1686 1.1 pk } 1687 1.1 pk #ifdef XDC_DEBUG 1688 1.1 pk { 1689 1.1 pk u_char *rio = (u_char *) iorq->iopb; 1690 1.1 pk int sz = sizeof(struct xd_iopb), lcv; 1691 1.1 pk printf("%s: aio #%d [", 1692 1.89 chs device_xname(xdcsc->sc_dev), iorq - xdcsc->reqs); 1693 1.1 pk for (lcv = 0; lcv < sz; lcv++) 1694 1.1 pk printf(" %02x", rio[lcv]); 1695 1.1 pk printf("]\n"); 1696 1.1 pk } 1697 1.1 pk #endif /* XDC_DEBUG */ 1698 1.1 pk 1699 1.1 pk /* controller not busy, start command */ 1700 1.12 pk iopbaddr = (u_long) iorq->dmaiopb; 1701 1.1 pk XDC_GO(xdcsc->xdc, iopbaddr); /* go! */ 1702 1.1 pk xdcsc->nrun++; 1703 1.1 pk /* command now running, wrap it up */ 1704 1.1 pk switch (type) { 1705 1.1 pk case XD_SUB_NORM: 1706 1.1 pk case XD_SUB_NOQ: 1707 1.1 pk return (XD_ERR_AOK); /* success */ 1708 1.1 pk case XD_SUB_WAIT: 1709 1.1 pk while (iorq->iopb->done == 0) { 1710 1.25 thorpej (void) tsleep(iorq, PRIBIO, "xdciorq", 0); 1711 1.1 pk } 1712 1.64 christos return (iorq->errnum); 1713 1.1 pk case XD_SUB_POLL: 1714 1.1 pk return (xdc_piodriver(xdcsc, iorqno, 0)); 1715 1.1 pk default: 1716 1.1 pk panic("xdc_submit_iorq wrap up"); 1717 1.1 pk } 1718 1.1 pk panic("xdc_submit_iorq"); 1719 1.1 pk return 0; /* not reached */ 1720 1.1 pk } 1721 1.1 pk 1722 1.1 pk 1723 1.1 pk /* 1724 1.1 pk * xdc_piodriver 1725 1.1 pk * 1726 1.1 pk * programmed i/o driver. this function takes over the computer 1727 1.1 pk * and drains off all i/o requests. it returns the status of the iorq 1728 1.1 pk * the caller is interesting in. if freeone is true, then it returns 1729 1.1 pk * when there is a free iorq. 1730 1.1 pk */ 1731 1.1 pk int 1732 1.88 matt xdc_piodriver(struct xdc_softc *xdcsc, int iorqno, int freeone) 1733 1.1 pk { 1734 1.11 pk int nreset = 0; 1735 1.11 pk int retval = 0; 1736 1.11 pk u_long count; 1737 1.11 pk struct xdc *xdc = xdcsc->xdc; 1738 1.1 pk #ifdef XDC_DEBUG 1739 1.89 chs printf("xdc_piodriver(%s, %d, freeone=%d)\n", device_xname(xdcsc->sc_dev), 1740 1.1 pk iorqno, freeone); 1741 1.1 pk #endif 1742 1.1 pk 1743 1.1 pk while (xdcsc->nwait || xdcsc->nrun) { 1744 1.1 pk #ifdef XDC_DEBUG 1745 1.1 pk printf("xdc_piodriver: wait=%d, run=%d\n", 1746 1.1 pk xdcsc->nwait, xdcsc->nrun); 1747 1.1 pk #endif 1748 1.1 pk XDC_WAIT(xdc, count, XDC_MAXTIME, (XDC_REMIOPB | XDC_F_ERROR)); 1749 1.1 pk #ifdef XDC_DEBUG 1750 1.1 pk printf("xdc_piodriver: done wait with count = %d\n", count); 1751 1.1 pk #endif 1752 1.1 pk /* we expect some progress soon */ 1753 1.1 pk if (count == 0 && nreset >= 2) { 1754 1.1 pk xdc_reset(xdcsc, 0, XD_RSET_ALL, XD_ERR_FAIL, 0); 1755 1.1 pk #ifdef XDC_DEBUG 1756 1.1 pk printf("xdc_piodriver: timeout\n"); 1757 1.1 pk #endif 1758 1.1 pk return (XD_ERR_FAIL); 1759 1.1 pk } 1760 1.1 pk if (count == 0) { 1761 1.1 pk if (xdc_reset(xdcsc, 0, 1762 1.1 pk (nreset++ == 0) ? XD_RSET_NONE : iorqno, 1763 1.1 pk XD_ERR_FAIL, 1764 1.1 pk 0) == XD_ERR_FAIL) 1765 1.1 pk return (XD_ERR_FAIL); /* flushes all but POLL 1766 1.1 pk * requests, resets */ 1767 1.1 pk continue; 1768 1.1 pk } 1769 1.1 pk xdc_remove_iorq(xdcsc); /* could resubmit request */ 1770 1.1 pk if (freeone) { 1771 1.1 pk if (xdcsc->nrun < XDC_MAXIOPB) { 1772 1.1 pk #ifdef XDC_DEBUG 1773 1.1 pk printf("xdc_piodriver: done: one free\n"); 1774 1.1 pk #endif 1775 1.1 pk return (XD_ERR_AOK); 1776 1.1 pk } 1777 1.1 pk continue; /* don't xdc_start */ 1778 1.1 pk } 1779 1.1 pk xdc_start(xdcsc, XDC_MAXIOPB); 1780 1.1 pk } 1781 1.1 pk 1782 1.1 pk /* get return value */ 1783 1.1 pk 1784 1.64 christos retval = xdcsc->reqs[iorqno].errnum; 1785 1.1 pk 1786 1.1 pk #ifdef XDC_DEBUG 1787 1.1 pk printf("xdc_piodriver: done, retval = 0x%x (%s)\n", 1788 1.64 christos xdcsc->reqs[iorqno].errnum, xdc_e2str(xdcsc->reqs[iorqno].errnum)); 1789 1.1 pk #endif 1790 1.1 pk 1791 1.1 pk /* now that we've drained everything, start up any bufs that have 1792 1.1 pk * queued */ 1793 1.1 pk 1794 1.78 yamt while (xdcsc->nfree > 0 && bufq_peek(xdcsc->sc_wq) != NULL) 1795 1.1 pk if (xdc_startbuf(xdcsc, NULL, NULL) != XD_ERR_AOK) 1796 1.1 pk break; 1797 1.1 pk 1798 1.1 pk return (retval); 1799 1.1 pk } 1800 1.1 pk 1801 1.1 pk /* 1802 1.1 pk * xdc_reset: reset one drive. NOTE: assumes xdc was just reset. 1803 1.1 pk * we steal iopb[0] for this, but we put it back when we are done. 1804 1.1 pk */ 1805 1.1 pk void 1806 1.88 matt xdc_xdreset(struct xdc_softc *xdcsc, struct xd_softc *xdsc) 1807 1.1 pk { 1808 1.1 pk struct xd_iopb tmpiopb; 1809 1.1 pk u_long addr; 1810 1.1 pk int del; 1811 1.84 tsutsui memcpy(&tmpiopb, xdcsc->iopbase, sizeof(tmpiopb)); 1812 1.82 cegger memset(xdcsc->iopbase, 0, sizeof(tmpiopb)); 1813 1.1 pk xdcsc->iopbase->comm = XDCMD_RST; 1814 1.1 pk xdcsc->iopbase->unit = xdsc->xd_drive; 1815 1.1 pk addr = (u_long) xdcsc->dvmaiopb; 1816 1.1 pk XDC_GO(xdcsc->xdc, addr); /* go! */ 1817 1.1 pk XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_REMIOPB); 1818 1.1 pk if (del <= 0 || xdcsc->iopbase->errs) { 1819 1.89 chs printf("%s: off-line: %s\n", device_xname(xdcsc->sc_dev), 1820 1.64 christos xdc_e2str(xdcsc->iopbase->errnum)); 1821 1.1 pk xdcsc->xdc->xdc_csr = XDC_RESET; 1822 1.1 pk XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1823 1.1 pk if (del <= 0) 1824 1.1 pk panic("xdc_reset"); 1825 1.1 pk } else { 1826 1.1 pk xdcsc->xdc->xdc_csr = XDC_CLRRIO; /* clear RIO */ 1827 1.1 pk } 1828 1.84 tsutsui memcpy(xdcsc->iopbase, &tmpiopb, sizeof(tmpiopb)); 1829 1.1 pk } 1830 1.1 pk 1831 1.1 pk 1832 1.1 pk /* 1833 1.1 pk * xdc_reset: reset everything: requests are marked as errors except 1834 1.1 pk * a polled request (which is resubmitted) 1835 1.1 pk */ 1836 1.1 pk int 1837 1.88 matt xdc_reset(struct xdc_softc *xdcsc, int quiet, int blastmode, int error, 1838 1.88 matt struct xd_softc *xdsc) 1839 1.1 pk 1840 1.1 pk { 1841 1.1 pk int del = 0, lcv, retval = XD_ERR_AOK; 1842 1.1 pk int oldfree = xdcsc->nfree; 1843 1.1 pk 1844 1.1 pk /* soft reset hardware */ 1845 1.1 pk 1846 1.1 pk if (!quiet) 1847 1.89 chs printf("%s: soft reset\n", device_xname(xdcsc->sc_dev)); 1848 1.1 pk xdcsc->xdc->xdc_csr = XDC_RESET; 1849 1.1 pk XDC_WAIT(xdcsc->xdc, del, XDC_RESETUSEC, XDC_RESET); 1850 1.1 pk if (del <= 0) { 1851 1.1 pk blastmode = XD_RSET_ALL; /* dead, flush all requests */ 1852 1.1 pk retval = XD_ERR_FAIL; 1853 1.1 pk } 1854 1.1 pk if (xdsc) 1855 1.1 pk xdc_xdreset(xdcsc, xdsc); 1856 1.1 pk 1857 1.1 pk /* fix queues based on "blast-mode" */ 1858 1.1 pk 1859 1.1 pk for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 1860 1.1 pk register struct xd_iorq *iorq = &xdcsc->reqs[lcv]; 1861 1.1 pk 1862 1.1 pk if (XD_STATE(iorq->mode) != XD_SUB_POLL && 1863 1.1 pk XD_STATE(iorq->mode) != XD_SUB_WAIT && 1864 1.1 pk XD_STATE(iorq->mode) != XD_SUB_NORM) 1865 1.1 pk /* is it active? */ 1866 1.1 pk continue; 1867 1.1 pk 1868 1.1 pk xdcsc->nrun--; /* it isn't running any more */ 1869 1.1 pk if (blastmode == XD_RSET_ALL || blastmode != lcv) { 1870 1.1 pk /* failed */ 1871 1.64 christos iorq->errnum = error; 1872 1.1 pk xdcsc->iopbase[lcv].done = xdcsc->iopbase[lcv].errs = 1; 1873 1.1 pk switch (XD_STATE(xdcsc->reqs[lcv].mode)) { 1874 1.1 pk case XD_SUB_NORM: 1875 1.1 pk iorq->buf->b_error = EIO; 1876 1.1 pk iorq->buf->b_resid = 1877 1.1 pk iorq->sectcnt * XDFM_BPS; 1878 1.3 pk 1879 1.9 thorpej bus_dmamap_sync(xdcsc->dmatag, iorq->dmamap, 0, 1880 1.9 thorpej iorq->dmamap->dm_mapsize, 1881 1.3 pk (iorq->buf->b_flags & B_READ) 1882 1.3 pk ? BUS_DMASYNC_POSTREAD 1883 1.3 pk : BUS_DMASYNC_POSTWRITE); 1884 1.3 pk 1885 1.3 pk bus_dmamap_unload(xdcsc->dmatag, iorq->dmamap); 1886 1.3 pk 1887 1.1 pk disk_unbusy(&xdcsc->reqs[lcv].xd->sc_dk, 1888 1.1 pk (xdcsc->reqs[lcv].buf->b_bcount - 1889 1.47 mrg xdcsc->reqs[lcv].buf->b_resid), 1890 1.47 mrg (iorq->buf->b_flags & B_READ)); 1891 1.1 pk biodone(iorq->buf); 1892 1.1 pk XDC_FREE(xdcsc, lcv); /* add to free list */ 1893 1.1 pk break; 1894 1.1 pk case XD_SUB_WAIT: 1895 1.1 pk wakeup(iorq); 1896 1.1 pk case XD_SUB_POLL: 1897 1.1 pk xdcsc->ndone++; 1898 1.1 pk iorq->mode = 1899 1.1 pk XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 1900 1.1 pk break; 1901 1.1 pk } 1902 1.1 pk 1903 1.1 pk } else { 1904 1.1 pk 1905 1.1 pk /* resubmit, put at front of wait queue */ 1906 1.1 pk XDC_HWAIT(xdcsc, lcv); 1907 1.1 pk } 1908 1.1 pk } 1909 1.1 pk 1910 1.1 pk /* 1911 1.1 pk * now, if stuff is waiting, start it. 1912 1.1 pk * since we just reset it should go 1913 1.1 pk */ 1914 1.1 pk xdc_start(xdcsc, XDC_MAXIOPB); 1915 1.1 pk 1916 1.1 pk /* ok, we did it */ 1917 1.1 pk if (oldfree == 0 && xdcsc->nfree) 1918 1.1 pk wakeup(&xdcsc->nfree); 1919 1.1 pk 1920 1.1 pk #ifdef XDC_DIAG 1921 1.1 pk del = xdcsc->nwait + xdcsc->nrun + xdcsc->nfree + xdcsc->ndone; 1922 1.1 pk if (del != XDC_MAXIOPB) 1923 1.1 pk printf("%s: diag: xdc_reset miscount (%d should be %d)!\n", 1924 1.89 chs device_xname(xdcsc->sc_dev), del, XDC_MAXIOPB); 1925 1.1 pk else 1926 1.1 pk if (xdcsc->ndone > XDC_MAXIOPB - XDC_SUBWAITLIM) 1927 1.1 pk printf("%s: diag: lots of done jobs (%d)\n", 1928 1.89 chs device_xname(xdcsc->sc_dev), xdcsc->ndone); 1929 1.1 pk #endif 1930 1.1 pk printf("RESET DONE\n"); 1931 1.1 pk return (retval); 1932 1.1 pk } 1933 1.1 pk /* 1934 1.1 pk * xdc_start: start all waiting buffers 1935 1.1 pk */ 1936 1.1 pk 1937 1.1 pk void 1938 1.88 matt xdc_start(struct xdc_softc *xdcsc, int maxio) 1939 1.1 pk 1940 1.1 pk { 1941 1.1 pk int rqno; 1942 1.1 pk while (maxio && xdcsc->nwait && 1943 1.1 pk (xdcsc->xdc->xdc_csr & XDC_ADDING) == 0) { 1944 1.1 pk XDC_GET_WAITER(xdcsc, rqno); /* note: rqno is an "out" 1945 1.1 pk * param */ 1946 1.1 pk if (xdc_submit_iorq(xdcsc, rqno, XD_SUB_NOQ) != XD_ERR_AOK) 1947 1.1 pk panic("xdc_start"); /* should never happen */ 1948 1.1 pk maxio--; 1949 1.1 pk } 1950 1.1 pk } 1951 1.1 pk /* 1952 1.1 pk * xdc_remove_iorq: remove "done" IOPB's. 1953 1.1 pk */ 1954 1.1 pk 1955 1.1 pk int 1956 1.88 matt xdc_remove_iorq(struct xdc_softc *xdcsc) 1957 1.1 pk { 1958 1.64 christos int errnum, rqno, comm, errs; 1959 1.1 pk struct xdc *xdc = xdcsc->xdc; 1960 1.1 pk struct xd_iopb *iopb; 1961 1.1 pk struct xd_iorq *iorq; 1962 1.1 pk struct buf *bp; 1963 1.1 pk 1964 1.1 pk if (xdc->xdc_csr & XDC_F_ERROR) { 1965 1.1 pk /* 1966 1.1 pk * FATAL ERROR: should never happen under normal use. This 1967 1.1 pk * error is so bad, you can't even tell which IOPB is bad, so 1968 1.1 pk * we dump them all. 1969 1.1 pk */ 1970 1.64 christos errnum = xdc->xdc_f_err; 1971 1.89 chs aprint_error_dev(xdcsc->sc_dev, "fatal error 0x%02x: %s\n", 1972 1.64 christos errnum, xdc_e2str(errnum)); 1973 1.64 christos if (xdc_reset(xdcsc, 0, XD_RSET_ALL, errnum, 0) != XD_ERR_AOK) { 1974 1.89 chs aprint_error_dev(xdcsc->sc_dev, "soft reset failed!\n"); 1975 1.1 pk panic("xdc_remove_iorq: controller DEAD"); 1976 1.1 pk } 1977 1.1 pk return (XD_ERR_AOK); 1978 1.1 pk } 1979 1.1 pk 1980 1.1 pk /* 1981 1.1 pk * get iopb that is done 1982 1.1 pk * 1983 1.1 pk * hmm... I used to read the address of the done IOPB off the VME 1984 1.1 pk * registers and calculate the rqno directly from that. that worked 1985 1.1 pk * until I started putting a load on the controller. when loaded, i 1986 1.1 pk * would get interrupts but neither the REMIOPB or F_ERROR bits would 1987 1.1 pk * be set, even after DELAY'ing a while! later on the timeout 1988 1.1 pk * routine would detect IOPBs that were marked "running" but their 1989 1.1 pk * "done" bit was set. rather than dealing directly with this 1990 1.1 pk * problem, it is just easier to look at all running IOPB's for the 1991 1.1 pk * done bit. 1992 1.1 pk */ 1993 1.1 pk if (xdc->xdc_csr & XDC_REMIOPB) { 1994 1.1 pk xdc->xdc_csr = XDC_CLRRIO; 1995 1.1 pk } 1996 1.1 pk 1997 1.1 pk for (rqno = 0; rqno < XDC_MAXIOPB; rqno++) { 1998 1.1 pk iorq = &xdcsc->reqs[rqno]; 1999 1.1 pk if (iorq->mode == 0 || XD_STATE(iorq->mode) == XD_SUB_DONE) 2000 1.1 pk continue; /* free, or done */ 2001 1.1 pk iopb = &xdcsc->iopbase[rqno]; 2002 1.1 pk if (iopb->done == 0) 2003 1.1 pk continue; /* not done yet */ 2004 1.1 pk 2005 1.1 pk #ifdef XDC_DEBUG 2006 1.1 pk { 2007 1.1 pk u_char *rio = (u_char *) iopb; 2008 1.1 pk int sz = sizeof(struct xd_iopb), lcv; 2009 1.89 chs printf("%s: rio #%d [", device_xname(xdcsc->sc_dev), rqno); 2010 1.1 pk for (lcv = 0; lcv < sz; lcv++) 2011 1.1 pk printf(" %02x", rio[lcv]); 2012 1.1 pk printf("]\n"); 2013 1.1 pk } 2014 1.1 pk #endif /* XDC_DEBUG */ 2015 1.1 pk 2016 1.1 pk xdcsc->nrun--; 2017 1.1 pk 2018 1.1 pk comm = iopb->comm; 2019 1.1 pk errs = iopb->errs; 2020 1.1 pk 2021 1.1 pk if (errs) 2022 1.64 christos iorq->errnum = iopb->errnum; 2023 1.1 pk else 2024 1.64 christos iorq->errnum = 0; 2025 1.1 pk 2026 1.1 pk /* handle non-fatal errors */ 2027 1.1 pk 2028 1.1 pk if (errs && 2029 1.1 pk xdc_error(xdcsc, iorq, iopb, rqno, comm) == XD_ERR_AOK) 2030 1.1 pk continue; /* AOK: we resubmitted it */ 2031 1.1 pk 2032 1.1 pk 2033 1.1 pk /* this iorq is now done (hasn't been restarted or anything) */ 2034 1.1 pk 2035 1.1 pk if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 2036 1.1 pk xdc_perror(iorq, iopb, 0); 2037 1.1 pk 2038 1.1 pk /* now, if read/write check to make sure we got all the data 2039 1.1 pk * we needed. (this may not be the case if we got an error in 2040 1.1 pk * the middle of a multisector request). */ 2041 1.1 pk 2042 1.1 pk if ((iorq->mode & XD_MODE_B144) != 0 && errs == 0 && 2043 1.1 pk (comm == XDCMD_RD || comm == XDCMD_WR)) { 2044 1.1 pk /* we just successfully processed a bad144 sector 2045 1.1 pk * note: if we are in bad 144 mode, the pointers have 2046 1.1 pk * been advanced already (see above) and are pointing 2047 1.1 pk * at the bad144 sector. to exit bad144 mode, we 2048 1.1 pk * must advance the pointers 1 sector and issue a new 2049 1.1 pk * request if there are still sectors left to process 2050 1.1 pk * 2051 1.1 pk */ 2052 1.1 pk XDC_ADVANCE(iorq, 1); /* advance 1 sector */ 2053 1.1 pk 2054 1.1 pk /* exit b144 mode */ 2055 1.1 pk iorq->mode = iorq->mode & (~XD_MODE_B144); 2056 1.1 pk 2057 1.1 pk if (iorq->sectcnt) { /* more to go! */ 2058 1.64 christos iorq->lasterror = iorq->errnum = iopb->errnum = 0; 2059 1.1 pk iopb->errs = iopb->done = 0; 2060 1.1 pk iorq->tries = 0; 2061 1.1 pk iopb->sectcnt = iorq->sectcnt; 2062 1.1 pk iopb->cylno = iorq->blockno / 2063 1.1 pk iorq->xd->sectpercyl; 2064 1.1 pk iopb->headno = 2065 1.1 pk (iorq->blockno / iorq->xd->nhead) % 2066 1.1 pk iorq->xd->nhead; 2067 1.1 pk iopb->sectno = iorq->blockno % XDFM_BPS; 2068 1.3 pk iopb->daddr = (u_long) iorq->dbuf; 2069 1.1 pk XDC_HWAIT(xdcsc, rqno); 2070 1.1 pk xdc_start(xdcsc, 1); /* resubmit */ 2071 1.1 pk continue; 2072 1.1 pk } 2073 1.1 pk } 2074 1.1 pk /* final cleanup, totally done with this request */ 2075 1.1 pk 2076 1.1 pk switch (XD_STATE(iorq->mode)) { 2077 1.1 pk case XD_SUB_NORM: 2078 1.1 pk bp = iorq->buf; 2079 1.1 pk if (errs) { 2080 1.1 pk bp->b_error = EIO; 2081 1.1 pk bp->b_resid = iorq->sectcnt * XDFM_BPS; 2082 1.1 pk } else { 2083 1.1 pk bp->b_resid = 0; /* done */ 2084 1.1 pk } 2085 1.9 thorpej bus_dmamap_sync(xdcsc->dmatag, iorq->dmamap, 0, 2086 1.9 thorpej iorq->dmamap->dm_mapsize, 2087 1.3 pk (bp->b_flags & B_READ) 2088 1.3 pk ? BUS_DMASYNC_POSTREAD 2089 1.3 pk : BUS_DMASYNC_POSTWRITE); 2090 1.3 pk bus_dmamap_unload(xdcsc->dmatag, iorq->dmamap); 2091 1.3 pk 2092 1.1 pk disk_unbusy(&iorq->xd->sc_dk, 2093 1.47 mrg (bp->b_bcount - bp->b_resid), 2094 1.47 mrg (bp->b_flags & B_READ)); 2095 1.1 pk XDC_FREE(xdcsc, rqno); 2096 1.1 pk biodone(bp); 2097 1.1 pk break; 2098 1.1 pk case XD_SUB_WAIT: 2099 1.1 pk iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 2100 1.1 pk xdcsc->ndone++; 2101 1.1 pk wakeup(iorq); 2102 1.1 pk break; 2103 1.1 pk case XD_SUB_POLL: 2104 1.1 pk iorq->mode = XD_NEWSTATE(iorq->mode, XD_SUB_DONE); 2105 1.1 pk xdcsc->ndone++; 2106 1.1 pk break; 2107 1.1 pk } 2108 1.1 pk } 2109 1.1 pk 2110 1.1 pk return (XD_ERR_AOK); 2111 1.1 pk } 2112 1.1 pk 2113 1.1 pk /* 2114 1.1 pk * xdc_perror: print error. 2115 1.1 pk * - if still_trying is true: we got an error, retried and got a 2116 1.1 pk * different error. in that case lasterror is the old error, 2117 1.64 christos * and errnum is the new one. 2118 1.1 pk * - if still_trying is not true, then if we ever had an error it 2119 1.64 christos * is in lasterror. also, if iorq->errnum == 0, then we recovered 2120 1.64 christos * from that error (otherwise iorq->errnum == iorq->lasterror). 2121 1.1 pk */ 2122 1.1 pk void 2123 1.88 matt xdc_perror(struct xd_iorq *iorq, struct xd_iopb *iopb, int still_trying) 2124 1.1 pk 2125 1.1 pk { 2126 1.1 pk 2127 1.1 pk int error = iorq->lasterror; 2128 1.1 pk 2129 1.89 chs printf("%s", (iorq->xd) ? device_xname(iorq->xd->sc_dev) 2130 1.89 chs : device_xname(iorq->xdc->sc_dev)); 2131 1.1 pk if (iorq->buf) 2132 1.77 cegger printf("%c: ", 'a' + (char)DISKPART(iorq->buf->b_dev)); 2133 1.1 pk if (iopb->comm == XDCMD_RD || iopb->comm == XDCMD_WR) 2134 1.1 pk printf("%s %d/%d/%d: ", 2135 1.1 pk (iopb->comm == XDCMD_RD) ? "read" : "write", 2136 1.1 pk iopb->cylno, iopb->headno, iopb->sectno); 2137 1.1 pk printf("%s", xdc_e2str(error)); 2138 1.1 pk 2139 1.1 pk if (still_trying) 2140 1.64 christos printf(" [still trying, new error=%s]", xdc_e2str(iorq->errnum)); 2141 1.1 pk else 2142 1.64 christos if (iorq->errnum == 0) 2143 1.1 pk printf(" [recovered in %d tries]", iorq->tries); 2144 1.1 pk 2145 1.1 pk printf("\n"); 2146 1.1 pk } 2147 1.1 pk 2148 1.1 pk /* 2149 1.1 pk * xdc_error: non-fatal error encountered... recover. 2150 1.1 pk * return AOK if resubmitted, return FAIL if this iopb is done 2151 1.1 pk */ 2152 1.1 pk int 2153 1.88 matt xdc_error(struct xdc_softc *xdcsc, struct xd_iorq *iorq, struct xd_iopb *iopb, 2154 1.88 matt int rqno, int comm) 2155 1.1 pk 2156 1.1 pk { 2157 1.64 christos int errnum = iorq->errnum; 2158 1.64 christos int erract = errnum & XD_ERA_MASK; 2159 1.15 drochner int oldmode, advance; 2160 1.20 chs #ifdef __sparc__ 2161 1.15 drochner int i; 2162 1.15 drochner #endif 2163 1.1 pk 2164 1.1 pk if (erract == XD_ERA_RSET) { /* some errors require a reset */ 2165 1.1 pk oldmode = iorq->mode; 2166 1.1 pk iorq->mode = XD_SUB_DONE | (~XD_SUB_MASK & oldmode); 2167 1.1 pk xdcsc->ndone++; 2168 1.1 pk /* make xdc_start ignore us */ 2169 1.64 christos xdc_reset(xdcsc, 1, XD_RSET_NONE, errnum, iorq->xd); 2170 1.1 pk iorq->mode = oldmode; 2171 1.1 pk xdcsc->ndone--; 2172 1.1 pk } 2173 1.1 pk /* check for read/write to a sector in bad144 table if bad: redirect 2174 1.1 pk * request to bad144 area */ 2175 1.1 pk 2176 1.1 pk if ((comm == XDCMD_RD || comm == XDCMD_WR) && 2177 1.1 pk (iorq->mode & XD_MODE_B144) == 0) { 2178 1.1 pk advance = iorq->sectcnt - iopb->sectcnt; 2179 1.1 pk XDC_ADVANCE(iorq, advance); 2180 1.20 chs #ifdef __sparc__ 2181 1.1 pk if ((i = isbad(&iorq->xd->dkb, iorq->blockno / iorq->xd->sectpercyl, 2182 1.1 pk (iorq->blockno / iorq->xd->nsect) % iorq->xd->nhead, 2183 1.1 pk iorq->blockno % iorq->xd->nsect)) != -1) { 2184 1.1 pk iorq->mode |= XD_MODE_B144; /* enter bad144 mode & 2185 1.1 pk * redirect */ 2186 1.64 christos iopb->errnum = iopb->done = iopb->errs = 0; 2187 1.1 pk iopb->sectcnt = 1; 2188 1.1 pk iopb->cylno = (iorq->xd->ncyl + iorq->xd->acyl) - 2; 2189 1.1 pk /* second to last acyl */ 2190 1.1 pk i = iorq->xd->sectpercyl - 1 - i; /* follow bad144 2191 1.1 pk * standard */ 2192 1.1 pk iopb->headno = i / iorq->xd->nhead; 2193 1.1 pk iopb->sectno = i % iorq->xd->nhead; 2194 1.1 pk XDC_HWAIT(xdcsc, rqno); 2195 1.1 pk xdc_start(xdcsc, 1); /* resubmit */ 2196 1.1 pk return (XD_ERR_AOK); /* recovered! */ 2197 1.1 pk } 2198 1.15 drochner #endif 2199 1.1 pk } 2200 1.1 pk 2201 1.1 pk /* 2202 1.1 pk * it isn't a bad144 sector, must be real error! see if we can retry 2203 1.1 pk * it? 2204 1.1 pk */ 2205 1.1 pk if ((iorq->mode & XD_MODE_VERBO) && iorq->lasterror) 2206 1.1 pk xdc_perror(iorq, iopb, 1); /* inform of error state 2207 1.1 pk * change */ 2208 1.64 christos iorq->lasterror = errnum; 2209 1.1 pk 2210 1.1 pk if ((erract == XD_ERA_RSET || erract == XD_ERA_HARD) 2211 1.1 pk && iorq->tries < XDC_MAXTRIES) { /* retry? */ 2212 1.1 pk iorq->tries++; 2213 1.64 christos iorq->errnum = iopb->errnum = iopb->done = iopb->errs = 0; 2214 1.1 pk XDC_HWAIT(xdcsc, rqno); 2215 1.1 pk xdc_start(xdcsc, 1); /* restart */ 2216 1.1 pk return (XD_ERR_AOK); /* recovered! */ 2217 1.1 pk } 2218 1.1 pk 2219 1.1 pk /* failed to recover from this error */ 2220 1.1 pk return (XD_ERR_FAIL); 2221 1.1 pk } 2222 1.1 pk 2223 1.1 pk /* 2224 1.1 pk * xdc_tick: make sure xd is still alive and ticking (err, kicking). 2225 1.1 pk */ 2226 1.1 pk void 2227 1.88 matt xdc_tick(void *arg) 2228 1.1 pk 2229 1.1 pk { 2230 1.1 pk struct xdc_softc *xdcsc = arg; 2231 1.1 pk int lcv, s, reset = 0; 2232 1.1 pk #ifdef XDC_DIAG 2233 1.57 tsutsui int nwait, nrun, nfree, ndone, whd = 0; 2234 1.1 pk u_char fqc[XDC_MAXIOPB], wqc[XDC_MAXIOPB], mark[XDC_MAXIOPB]; 2235 1.1 pk s = splbio(); 2236 1.57 tsutsui nwait = xdcsc->nwait; 2237 1.57 tsutsui nrun = xdcsc->nrun; 2238 1.57 tsutsui nfree = xdcsc->nfree; 2239 1.57 tsutsui ndone = xdcsc->ndone; 2240 1.84 tsutsui memcpy(wqc, xdcsc->waitq, sizeof(wqc)); 2241 1.84 tsutsui memcpy(fqc, xdcsc->freereq, sizeof(fqc)); 2242 1.1 pk splx(s); 2243 1.57 tsutsui if (nwait + nrun + nfree + ndone != XDC_MAXIOPB) { 2244 1.1 pk printf("%s: diag: IOPB miscount (got w/f/r/d %d/%d/%d/%d, wanted %d)\n", 2245 1.89 chs device_xname(xdcsc->sc_dev), nwait, nfree, nrun, ndone, 2246 1.57 tsutsui XDC_MAXIOPB); 2247 1.82 cegger memset(mark, 0, sizeof(mark)); 2248 1.1 pk printf("FREE: "); 2249 1.57 tsutsui for (lcv = nfree; lcv > 0; lcv--) { 2250 1.1 pk printf("%d ", fqc[lcv - 1]); 2251 1.1 pk mark[fqc[lcv - 1]] = 1; 2252 1.1 pk } 2253 1.1 pk printf("\nWAIT: "); 2254 1.57 tsutsui lcv = nwait; 2255 1.1 pk while (lcv > 0) { 2256 1.1 pk printf("%d ", wqc[whd]); 2257 1.1 pk mark[wqc[whd]] = 1; 2258 1.1 pk whd = (whd + 1) % XDC_MAXIOPB; 2259 1.1 pk lcv--; 2260 1.1 pk } 2261 1.1 pk printf("\n"); 2262 1.1 pk for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2263 1.1 pk if (mark[lcv] == 0) 2264 1.64 christos printf("MARK: running %d: mode %d done %d errs %d errnum 0x%x ttl %d buf %p\n", 2265 1.1 pk lcv, xdcsc->reqs[lcv].mode, 2266 1.1 pk xdcsc->iopbase[lcv].done, 2267 1.1 pk xdcsc->iopbase[lcv].errs, 2268 1.64 christos xdcsc->iopbase[lcv].errnum, 2269 1.1 pk xdcsc->reqs[lcv].ttl, xdcsc->reqs[lcv].buf); 2270 1.1 pk } 2271 1.1 pk } else 2272 1.57 tsutsui if (ndone > XDC_MAXIOPB - XDC_SUBWAITLIM) 2273 1.1 pk printf("%s: diag: lots of done jobs (%d)\n", 2274 1.89 chs device_xname(xdcsc->sc_dev), ndone); 2275 1.1 pk 2276 1.1 pk #endif 2277 1.1 pk #ifdef XDC_DEBUG 2278 1.1 pk printf("%s: tick: csr 0x%x, w/f/r/d %d/%d/%d/%d\n", 2279 1.89 chs device_xname(xdcsc->sc_dev), 2280 1.1 pk xdcsc->xdc->xdc_csr, xdcsc->nwait, xdcsc->nfree, xdcsc->nrun, 2281 1.1 pk xdcsc->ndone); 2282 1.1 pk for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2283 1.1 pk if (xdcsc->reqs[lcv].mode) 2284 1.64 christos printf("running %d: mode %d done %d errs %d errnum 0x%x\n", 2285 1.1 pk lcv, 2286 1.1 pk xdcsc->reqs[lcv].mode, xdcsc->iopbase[lcv].done, 2287 1.64 christos xdcsc->iopbase[lcv].errs, xdcsc->iopbase[lcv].errnum); 2288 1.1 pk } 2289 1.1 pk #endif 2290 1.1 pk 2291 1.1 pk /* reduce ttl for each request if one goes to zero, reset xdc */ 2292 1.1 pk s = splbio(); 2293 1.1 pk for (lcv = 0; lcv < XDC_MAXIOPB; lcv++) { 2294 1.1 pk if (xdcsc->reqs[lcv].mode == 0 || 2295 1.1 pk XD_STATE(xdcsc->reqs[lcv].mode) == XD_SUB_DONE) 2296 1.1 pk continue; 2297 1.1 pk xdcsc->reqs[lcv].ttl--; 2298 1.1 pk if (xdcsc->reqs[lcv].ttl == 0) 2299 1.1 pk reset = 1; 2300 1.1 pk } 2301 1.1 pk if (reset) { 2302 1.89 chs printf("%s: watchdog timeout\n", device_xname(xdcsc->sc_dev)); 2303 1.1 pk xdc_reset(xdcsc, 0, XD_RSET_NONE, XD_ERR_FAIL, NULL); 2304 1.1 pk } 2305 1.1 pk splx(s); 2306 1.1 pk 2307 1.1 pk /* until next time */ 2308 1.1 pk 2309 1.19 thorpej callout_reset(&xdcsc->sc_tick_ch, XDC_TICKCNT, xdc_tick, xdcsc); 2310 1.1 pk } 2311 1.1 pk 2312 1.1 pk /* 2313 1.1 pk * xdc_ioctlcmd: this function provides a user level interface to the 2314 1.1 pk * controller via ioctl. this allows "format" programs to be written 2315 1.1 pk * in user code, and is also useful for some debugging. we return 2316 1.1 pk * an error code. called at user priority. 2317 1.1 pk */ 2318 1.1 pk int 2319 1.88 matt xdc_ioctlcmd(struct xd_softc *xd, dev_t dev, struct xd_iocmd *xio) 2320 1.1 pk 2321 1.1 pk { 2322 1.3 pk int s, rqno, dummy; 2323 1.67 mrg char *dvmabuf = NULL, *buf = NULL; 2324 1.1 pk struct xdc_softc *xdcsc; 2325 1.3 pk int rseg, error; 2326 1.3 pk bus_dma_segment_t seg; 2327 1.1 pk 2328 1.1 pk /* check sanity of requested command */ 2329 1.1 pk 2330 1.1 pk switch (xio->cmd) { 2331 1.1 pk 2332 1.1 pk case XDCMD_NOP: /* no op: everything should be zero */ 2333 1.1 pk if (xio->subfn || xio->dptr || xio->dlen || 2334 1.1 pk xio->block || xio->sectcnt) 2335 1.1 pk return (EINVAL); 2336 1.1 pk break; 2337 1.1 pk 2338 1.1 pk case XDCMD_RD: /* read / write sectors (up to XD_IOCMD_MAXS) */ 2339 1.1 pk case XDCMD_WR: 2340 1.1 pk if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS || 2341 1.1 pk xio->sectcnt * XDFM_BPS != xio->dlen || xio->dptr == NULL) 2342 1.1 pk return (EINVAL); 2343 1.1 pk break; 2344 1.1 pk 2345 1.1 pk case XDCMD_SK: /* seek: doesn't seem useful to export this */ 2346 1.1 pk return (EINVAL); 2347 1.1 pk 2348 1.1 pk case XDCMD_WRP: /* write parameters */ 2349 1.1 pk return (EINVAL);/* not useful, except maybe drive 2350 1.1 pk * parameters... but drive parameters should 2351 1.1 pk * go via disklabel changes */ 2352 1.1 pk 2353 1.1 pk case XDCMD_RDP: /* read parameters */ 2354 1.1 pk if (xio->subfn != XDFUN_DRV || 2355 1.1 pk xio->dlen || xio->block || xio->dptr) 2356 1.1 pk return (EINVAL); /* allow read drive params to 2357 1.1 pk * get hw_spt */ 2358 1.1 pk xio->sectcnt = xd->hw_spt; /* we already know the answer */ 2359 1.1 pk return (0); 2360 1.1 pk break; 2361 1.1 pk 2362 1.1 pk case XDCMD_XRD: /* extended read/write */ 2363 1.1 pk case XDCMD_XWR: 2364 1.1 pk 2365 1.1 pk switch (xio->subfn) { 2366 1.1 pk 2367 1.1 pk case XDFUN_THD:/* track headers */ 2368 1.1 pk if (xio->sectcnt != xd->hw_spt || 2369 1.1 pk (xio->block % xd->nsect) != 0 || 2370 1.1 pk xio->dlen != XD_IOCMD_HSZ * xd->hw_spt || 2371 1.1 pk xio->dptr == NULL) 2372 1.1 pk return (EINVAL); 2373 1.1 pk xio->sectcnt = 0; 2374 1.1 pk break; 2375 1.1 pk 2376 1.1 pk case XDFUN_FMT:/* NOTE: also XDFUN_VFY */ 2377 1.1 pk if (xio->cmd == XDCMD_XRD) 2378 1.1 pk return (EINVAL); /* no XDFUN_VFY */ 2379 1.1 pk if (xio->sectcnt || xio->dlen || 2380 1.1 pk (xio->block % xd->nsect) != 0 || xio->dptr) 2381 1.1 pk return (EINVAL); 2382 1.1 pk break; 2383 1.1 pk 2384 1.1 pk case XDFUN_HDR:/* header, header verify, data, data ECC */ 2385 1.1 pk return (EINVAL); /* not yet */ 2386 1.1 pk 2387 1.1 pk case XDFUN_DM: /* defect map */ 2388 1.1 pk case XDFUN_DMX:/* defect map (alternate location) */ 2389 1.1 pk if (xio->sectcnt || xio->dlen != XD_IOCMD_DMSZ || 2390 1.1 pk (xio->block % xd->nsect) != 0 || xio->dptr == NULL) 2391 1.1 pk return (EINVAL); 2392 1.1 pk break; 2393 1.1 pk 2394 1.1 pk default: 2395 1.1 pk return (EINVAL); 2396 1.1 pk } 2397 1.1 pk break; 2398 1.1 pk 2399 1.1 pk case XDCMD_TST: /* diagnostics */ 2400 1.1 pk return (EINVAL); 2401 1.1 pk 2402 1.1 pk default: 2403 1.1 pk return (EINVAL);/* ??? */ 2404 1.1 pk } 2405 1.1 pk 2406 1.3 pk xdcsc = xd->parent; 2407 1.3 pk 2408 1.1 pk /* create DVMA buffer for request if needed */ 2409 1.3 pk if (xio->dlen) { 2410 1.52 mrg bus_addr_t busbuf; 2411 1.52 mrg 2412 1.21 pk if ((error = xd_dmamem_alloc(xdcsc->dmatag, xdcsc->auxmap, 2413 1.21 pk &seg, &rseg, 2414 1.67 mrg xio->dlen, (void **)&buf, 2415 1.52 mrg &busbuf)) != 0) { 2416 1.3 pk return (error); 2417 1.21 pk } 2418 1.66 christos dvmabuf = (void *)(u_long)BUS_ADDR_PADDR(busbuf); 2419 1.1 pk 2420 1.1 pk if (xio->cmd == XDCMD_WR || xio->cmd == XDCMD_XWR) { 2421 1.3 pk if ((error = copyin(xio->dptr, buf, xio->dlen)) != 0) { 2422 1.3 pk bus_dmamem_unmap(xdcsc->dmatag, buf, xio->dlen); 2423 1.3 pk bus_dmamem_free(xdcsc->dmatag, &seg, rseg); 2424 1.3 pk return (error); 2425 1.1 pk } 2426 1.1 pk } 2427 1.1 pk } 2428 1.3 pk 2429 1.1 pk /* do it! */ 2430 1.1 pk 2431 1.3 pk error = 0; 2432 1.1 pk s = splbio(); 2433 1.1 pk rqno = xdc_cmd(xdcsc, xio->cmd, xio->subfn, xd->xd_drive, xio->block, 2434 1.1 pk xio->sectcnt, dvmabuf, XD_SUB_WAIT); 2435 1.1 pk if (rqno == XD_ERR_FAIL) { 2436 1.3 pk error = EIO; 2437 1.1 pk goto done; 2438 1.1 pk } 2439 1.64 christos xio->errnum = xdcsc->reqs[rqno].errnum; 2440 1.1 pk xio->tries = xdcsc->reqs[rqno].tries; 2441 1.1 pk XDC_DONE(xdcsc, rqno, dummy); 2442 1.90 mrg __USE(dummy); 2443 1.1 pk 2444 1.1 pk if (xio->cmd == XDCMD_RD || xio->cmd == XDCMD_XRD) 2445 1.3 pk error = copyout(buf, xio->dptr, xio->dlen); 2446 1.1 pk 2447 1.1 pk done: 2448 1.1 pk splx(s); 2449 1.3 pk if (dvmabuf) { 2450 1.21 pk xd_dmamem_free(xdcsc->dmatag, xdcsc->auxmap, &seg, rseg, 2451 1.21 pk xio->dlen, buf); 2452 1.3 pk } 2453 1.3 pk return (error); 2454 1.1 pk } 2455 1.1 pk 2456 1.1 pk /* 2457 1.1 pk * xdc_e2str: convert error code number into an error string 2458 1.1 pk */ 2459 1.57 tsutsui const char * 2460 1.79 dsl xdc_e2str(int no) 2461 1.1 pk { 2462 1.1 pk switch (no) { 2463 1.1 pk case XD_ERR_FAIL: 2464 1.1 pk return ("Software fatal error"); 2465 1.1 pk case XD_ERR_AOK: 2466 1.1 pk return ("Successful completion"); 2467 1.1 pk case XD_ERR_ICYL: 2468 1.1 pk return ("Illegal cylinder address"); 2469 1.1 pk case XD_ERR_IHD: 2470 1.1 pk return ("Illegal head address"); 2471 1.1 pk case XD_ERR_ISEC: 2472 1.100 andvar return ("Illegal sector address"); 2473 1.1 pk case XD_ERR_CZER: 2474 1.1 pk return ("Count zero"); 2475 1.1 pk case XD_ERR_UIMP: 2476 1.1 pk return ("Unimplemented command"); 2477 1.1 pk case XD_ERR_IF1: 2478 1.1 pk return ("Illegal field length 1"); 2479 1.1 pk case XD_ERR_IF2: 2480 1.1 pk return ("Illegal field length 2"); 2481 1.1 pk case XD_ERR_IF3: 2482 1.1 pk return ("Illegal field length 3"); 2483 1.1 pk case XD_ERR_IF4: 2484 1.1 pk return ("Illegal field length 4"); 2485 1.1 pk case XD_ERR_IF5: 2486 1.1 pk return ("Illegal field length 5"); 2487 1.1 pk case XD_ERR_IF6: 2488 1.1 pk return ("Illegal field length 6"); 2489 1.1 pk case XD_ERR_IF7: 2490 1.1 pk return ("Illegal field length 7"); 2491 1.1 pk case XD_ERR_ISG: 2492 1.1 pk return ("Illegal scatter/gather length"); 2493 1.1 pk case XD_ERR_ISPT: 2494 1.1 pk return ("Not enough sectors per track"); 2495 1.1 pk case XD_ERR_ALGN: 2496 1.1 pk return ("Next IOPB address alignment error"); 2497 1.1 pk case XD_ERR_SGAL: 2498 1.1 pk return ("Scatter/gather address alignment error"); 2499 1.1 pk case XD_ERR_SGEC: 2500 1.1 pk return ("Scatter/gather with auto-ECC"); 2501 1.1 pk case XD_ERR_SECC: 2502 1.1 pk return ("Soft ECC corrected"); 2503 1.1 pk case XD_ERR_SIGN: 2504 1.1 pk return ("ECC ignored"); 2505 1.1 pk case XD_ERR_ASEK: 2506 1.1 pk return ("Auto-seek retry recovered"); 2507 1.1 pk case XD_ERR_RTRY: 2508 1.1 pk return ("Soft retry recovered"); 2509 1.1 pk case XD_ERR_HECC: 2510 1.1 pk return ("Hard data ECC"); 2511 1.1 pk case XD_ERR_NHDR: 2512 1.1 pk return ("Header not found"); 2513 1.1 pk case XD_ERR_NRDY: 2514 1.1 pk return ("Drive not ready"); 2515 1.1 pk case XD_ERR_TOUT: 2516 1.1 pk return ("Operation timeout"); 2517 1.1 pk case XD_ERR_VTIM: 2518 1.1 pk return ("VMEDMA timeout"); 2519 1.1 pk case XD_ERR_DSEQ: 2520 1.1 pk return ("Disk sequencer error"); 2521 1.1 pk case XD_ERR_HDEC: 2522 1.1 pk return ("Header ECC error"); 2523 1.1 pk case XD_ERR_RVFY: 2524 1.1 pk return ("Read verify"); 2525 1.1 pk case XD_ERR_VFER: 2526 1.1 pk return ("Fatail VMEDMA error"); 2527 1.1 pk case XD_ERR_VBUS: 2528 1.1 pk return ("VMEbus error"); 2529 1.1 pk case XD_ERR_DFLT: 2530 1.1 pk return ("Drive faulted"); 2531 1.1 pk case XD_ERR_HECY: 2532 1.99 andvar return ("Header error/cylinder"); 2533 1.1 pk case XD_ERR_HEHD: 2534 1.1 pk return ("Header error/head"); 2535 1.1 pk case XD_ERR_NOCY: 2536 1.1 pk return ("Drive not on-cylinder"); 2537 1.1 pk case XD_ERR_SEEK: 2538 1.1 pk return ("Seek error"); 2539 1.1 pk case XD_ERR_ILSS: 2540 1.1 pk return ("Illegal sector size"); 2541 1.1 pk case XD_ERR_SEC: 2542 1.1 pk return ("Soft ECC"); 2543 1.1 pk case XD_ERR_WPER: 2544 1.1 pk return ("Write-protect error"); 2545 1.1 pk case XD_ERR_IRAM: 2546 1.1 pk return ("IRAM self test failure"); 2547 1.1 pk case XD_ERR_MT3: 2548 1.1 pk return ("Maintenance test 3 failure (DSKCEL RAM)"); 2549 1.1 pk case XD_ERR_MT4: 2550 1.1 pk return ("Maintenance test 4 failure (header shift reg)"); 2551 1.1 pk case XD_ERR_MT5: 2552 1.1 pk return ("Maintenance test 5 failure (VMEDMA regs)"); 2553 1.1 pk case XD_ERR_MT6: 2554 1.1 pk return ("Maintenance test 6 failure (REGCEL chip)"); 2555 1.1 pk case XD_ERR_MT7: 2556 1.1 pk return ("Maintenance test 7 failure (buffer parity)"); 2557 1.1 pk case XD_ERR_MT8: 2558 1.1 pk return ("Maintenance test 8 failure (disk FIFO)"); 2559 1.1 pk case XD_ERR_IOCK: 2560 1.1 pk return ("IOPB checksum miscompare"); 2561 1.1 pk case XD_ERR_IODM: 2562 1.1 pk return ("IOPB DMA fatal"); 2563 1.1 pk case XD_ERR_IOAL: 2564 1.1 pk return ("IOPB address alignment error"); 2565 1.1 pk case XD_ERR_FIRM: 2566 1.1 pk return ("Firmware error"); 2567 1.1 pk case XD_ERR_MMOD: 2568 1.1 pk return ("Illegal maintenance mode test number"); 2569 1.1 pk case XD_ERR_ACFL: 2570 1.1 pk return ("ACFAIL asserted"); 2571 1.1 pk default: 2572 1.1 pk return ("Unknown error"); 2573 1.1 pk } 2574 1.1 pk } 2575