1 1.33 tsutsui /* $NetBSD: si_sebuf.c,v 1.33 2024/12/20 23:52:00 tsutsui Exp $ */ 2 1.1 gwr 3 1.1 gwr /*- 4 1.1 gwr * Copyright (c) 1996 The NetBSD Foundation, Inc. 5 1.1 gwr * All rights reserved. 6 1.1 gwr * 7 1.1 gwr * This code is derived from software contributed to The NetBSD Foundation 8 1.1 gwr * by Gordon W. Ross. 9 1.1 gwr * 10 1.1 gwr * Redistribution and use in source and binary forms, with or without 11 1.1 gwr * modification, are permitted provided that the following conditions 12 1.1 gwr * are met: 13 1.1 gwr * 1. Redistributions of source code must retain the above copyright 14 1.1 gwr * notice, this list of conditions and the following disclaimer. 15 1.1 gwr * 2. Redistributions in binary form must reproduce the above copyright 16 1.1 gwr * notice, this list of conditions and the following disclaimer in the 17 1.1 gwr * documentation and/or other materials provided with the distribution. 18 1.1 gwr * 19 1.1 gwr * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 1.1 gwr * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 1.1 gwr * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 1.1 gwr * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 1.1 gwr * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 1.1 gwr * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 1.1 gwr * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 1.1 gwr * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 1.1 gwr * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 1.1 gwr * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 1.1 gwr * POSSIBILITY OF SUCH DAMAGE. 30 1.1 gwr */ 31 1.1 gwr 32 1.1 gwr /* 33 1.1 gwr * Sun3/E SCSI driver (machine-dependent portion). 34 1.1 gwr * The machine-independent parts are in ncr5380sbc.c 35 1.1 gwr * 36 1.1 gwr * XXX - Mostly from the si driver. Merge? 37 1.1 gwr */ 38 1.21 lukem 39 1.21 lukem #include <sys/cdefs.h> 40 1.33 tsutsui __KERNEL_RCSID(0, "$NetBSD: si_sebuf.c,v 1.33 2024/12/20 23:52:00 tsutsui Exp $"); 41 1.1 gwr 42 1.1 gwr #include <sys/param.h> 43 1.1 gwr #include <sys/systm.h> 44 1.1 gwr #include <sys/errno.h> 45 1.1 gwr #include <sys/kernel.h> 46 1.30 thorpej #include <sys/kmem.h> 47 1.1 gwr #include <sys/device.h> 48 1.1 gwr #include <sys/buf.h> 49 1.1 gwr #include <sys/proc.h> 50 1.1 gwr 51 1.1 gwr #include <dev/scsipi/scsi_all.h> 52 1.1 gwr #include <dev/scsipi/scsipi_all.h> 53 1.1 gwr #include <dev/scsipi/scsipi_debug.h> 54 1.1 gwr #include <dev/scsipi/scsiconf.h> 55 1.1 gwr 56 1.1 gwr #include <machine/autoconf.h> 57 1.1 gwr 58 1.5 gwr /* #define DEBUG XXX */ 59 1.1 gwr 60 1.1 gwr #include <dev/ic/ncr5380reg.h> 61 1.1 gwr #include <dev/ic/ncr5380var.h> 62 1.1 gwr 63 1.1 gwr #include "sereg.h" 64 1.1 gwr #include "sevar.h" 65 1.1 gwr 66 1.1 gwr /* 67 1.1 gwr * Transfers smaller than this are done using PIO 68 1.1 gwr * (on assumption they're not worth DMA overhead) 69 1.1 gwr */ 70 1.1 gwr #define MIN_DMA_LEN 128 71 1.1 gwr 72 1.1 gwr /* 73 1.32 andvar * Transfers larger than 65535 bytes need to be split-up. 74 1.1 gwr * (Some of the FIFO logic has only 16 bits counters.) 75 1.1 gwr * Make the size an integer multiple of the page size 76 1.1 gwr * to avoid buf/cluster remap problems. (paranoid?) 77 1.1 gwr */ 78 1.1 gwr #define MAX_DMA_LEN 0xE000 79 1.1 gwr 80 1.1 gwr /* 81 1.1 gwr * This structure is used to keep track of mapped DMA requests. 82 1.1 gwr */ 83 1.1 gwr struct se_dma_handle { 84 1.1 gwr int dh_flags; 85 1.1 gwr #define SIDH_BUSY 1 /* This DH is in use */ 86 1.1 gwr #define SIDH_OUT 2 /* DMA does data out (write) */ 87 1.1 gwr u_char * dh_addr; /* KVA of start of buffer */ 88 1.1 gwr int dh_maplen; /* Length of KVA mapping. */ 89 1.1 gwr long dh_dma; /* Offset in DMA buffer. */ 90 1.1 gwr }; 91 1.1 gwr 92 1.1 gwr /* 93 1.1 gwr * The first structure member has to be the ncr5380_softc 94 1.1 gwr * so we can just cast to go back and fourth between them. 95 1.1 gwr */ 96 1.1 gwr struct se_softc { 97 1.1 gwr struct ncr5380_softc ncr_sc; 98 1.1 gwr volatile struct se_regs *sc_regs; 99 1.1 gwr int sc_adapter_type; 100 1.1 gwr int sc_adapter_iv; /* int. vec */ 101 1.1 gwr int sc_options; /* options for this instance */ 102 1.1 gwr int sc_reqlen; /* requested transfer length */ 103 1.1 gwr struct se_dma_handle *sc_dma; 104 1.1 gwr /* DMA command block for the OBIO controller. */ 105 1.1 gwr void *sc_dmacmd; 106 1.1 gwr }; 107 1.1 gwr 108 1.1 gwr /* Options for disconnect/reselect, DMA, and interrupts. */ 109 1.1 gwr #define SE_NO_DISCONNECT 0xff 110 1.1 gwr #define SE_NO_PARITY_CHK 0xff00 111 1.1 gwr #define SE_FORCE_POLLING 0x10000 112 1.1 gwr #define SE_DISABLE_DMA 0x20000 113 1.1 gwr 114 1.22 chs void se_dma_alloc(struct ncr5380_softc *); 115 1.22 chs void se_dma_free(struct ncr5380_softc *); 116 1.22 chs void se_dma_poll(struct ncr5380_softc *); 117 1.22 chs 118 1.22 chs void se_dma_setup(struct ncr5380_softc *); 119 1.22 chs void se_dma_start(struct ncr5380_softc *); 120 1.22 chs void se_dma_eop(struct ncr5380_softc *); 121 1.22 chs void se_dma_stop(struct ncr5380_softc *); 122 1.1 gwr 123 1.22 chs void se_intr_on (struct ncr5380_softc *); 124 1.22 chs void se_intr_off(struct ncr5380_softc *); 125 1.1 gwr 126 1.22 chs static int se_intr(void *); 127 1.22 chs static void se_reset(struct ncr5380_softc *); 128 1.1 gwr 129 1.1 gwr /* 130 1.1 gwr * New-style autoconfig attachment 131 1.1 gwr */ 132 1.1 gwr 133 1.26 tsutsui static int se_match(device_t, cfdata_t, void *); 134 1.26 tsutsui static void se_attach(device_t, device_t, void *); 135 1.1 gwr 136 1.26 tsutsui CFATTACH_DECL_NEW(si_sebuf, sizeof(struct se_softc), 137 1.18 thorpej se_match, se_attach, NULL, NULL); 138 1.1 gwr 139 1.22 chs static void se_minphys(struct buf *); 140 1.1 gwr 141 1.1 gwr /* Options for disconnect/reselect, DMA, and interrupts. */ 142 1.4 gwr int se_options = SE_DISABLE_DMA | SE_FORCE_POLLING | 0xff; 143 1.1 gwr 144 1.1 gwr /* How long to wait for DMA before declaring an error. */ 145 1.1 gwr int se_dma_intr_timo = 500; /* ticks (sec. X 100) */ 146 1.1 gwr 147 1.1 gwr int se_debug = 0; 148 1.1 gwr 149 1.33 tsutsui static int 150 1.26 tsutsui se_match(device_t parent, cfdata_t cf, void *args) 151 1.1 gwr { 152 1.1 gwr struct sebuf_attach_args *aa = args; 153 1.1 gwr 154 1.1 gwr /* Match by name. */ 155 1.1 gwr if (strcmp(aa->name, "se")) 156 1.26 tsutsui return 0; 157 1.1 gwr 158 1.2 gwr /* Anyting else to check? */ 159 1.1 gwr 160 1.26 tsutsui return 1; 161 1.1 gwr } 162 1.1 gwr 163 1.33 tsutsui static void 164 1.26 tsutsui se_attach(device_t parent, device_t self, void *args) 165 1.1 gwr { 166 1.26 tsutsui struct se_softc *sc = device_private(self); 167 1.1 gwr struct ncr5380_softc *ncr_sc = &sc->ncr_sc; 168 1.24 thorpej struct cfdata *cf = device_cfdata(self); 169 1.1 gwr struct sebuf_attach_args *aa = args; 170 1.1 gwr volatile struct se_regs *regs; 171 1.1 gwr int i; 172 1.1 gwr 173 1.26 tsutsui ncr_sc->sc_dev = self; 174 1.26 tsutsui 175 1.1 gwr /* Get options from config flags if specified. */ 176 1.1 gwr if (cf->cf_flags) 177 1.1 gwr sc->sc_options = cf->cf_flags; 178 1.1 gwr else 179 1.1 gwr sc->sc_options = se_options; 180 1.1 gwr 181 1.26 tsutsui aprint_normal(": options=0x%x\n", sc->sc_options); 182 1.1 gwr 183 1.1 gwr sc->sc_adapter_type = aa->ca.ca_bustype; 184 1.1 gwr sc->sc_adapter_iv = aa->ca.ca_intvec; 185 1.1 gwr sc->sc_regs = regs = aa->regs; 186 1.1 gwr 187 1.1 gwr /* 188 1.1 gwr * MD function pointers used by the MI code. 189 1.1 gwr */ 190 1.1 gwr ncr_sc->sc_pio_out = ncr5380_pio_out; 191 1.1 gwr ncr_sc->sc_pio_in = ncr5380_pio_in; 192 1.1 gwr 193 1.1 gwr #if 0 /* XXX - not yet... */ 194 1.1 gwr ncr_sc->sc_dma_alloc = se_dma_alloc; 195 1.1 gwr ncr_sc->sc_dma_free = se_dma_free; 196 1.1 gwr ncr_sc->sc_dma_setup = se_dma_setup; 197 1.1 gwr ncr_sc->sc_dma_start = se_dma_start; 198 1.1 gwr ncr_sc->sc_dma_poll = se_dma_poll; 199 1.1 gwr ncr_sc->sc_dma_eop = se_dma_eop; 200 1.1 gwr ncr_sc->sc_dma_stop = se_dma_stop; 201 1.1 gwr ncr_sc->sc_intr_on = se_intr_on; 202 1.1 gwr ncr_sc->sc_intr_off = se_intr_off; 203 1.1 gwr #endif /* XXX */ 204 1.1 gwr 205 1.1 gwr /* Attach interrupt handler. */ 206 1.1 gwr isr_add_vectored(se_intr, (void *)sc, 207 1.26 tsutsui aa->ca.ca_intpri, aa->ca.ca_intvec); 208 1.1 gwr 209 1.1 gwr /* Reset the hardware. */ 210 1.1 gwr se_reset(ncr_sc); 211 1.1 gwr 212 1.1 gwr /* Do the common attach stuff. */ 213 1.1 gwr 214 1.1 gwr /* 215 1.1 gwr * Support the "options" (config file flags). 216 1.1 gwr * Disconnect/reselect is a per-target mask. 217 1.1 gwr * Interrupts and DMA are per-controller. 218 1.1 gwr */ 219 1.1 gwr ncr_sc->sc_no_disconnect = 220 1.26 tsutsui (sc->sc_options & SE_NO_DISCONNECT); 221 1.33 tsutsui ncr_sc->sc_parity_disable = 222 1.26 tsutsui (sc->sc_options & SE_NO_PARITY_CHK) >> 8; 223 1.1 gwr if (sc->sc_options & SE_FORCE_POLLING) 224 1.1 gwr ncr_sc->sc_flags |= NCR5380_FORCE_POLLING; 225 1.1 gwr 226 1.1 gwr #if 1 /* XXX - Temporary */ 227 1.1 gwr /* XXX - In case we think DMA is completely broken... */ 228 1.1 gwr if (sc->sc_options & SE_DISABLE_DMA) { 229 1.1 gwr /* Override this function pointer. */ 230 1.1 gwr ncr_sc->sc_dma_alloc = NULL; 231 1.1 gwr } 232 1.1 gwr #endif 233 1.1 gwr ncr_sc->sc_min_dma_len = MIN_DMA_LEN; 234 1.1 gwr 235 1.1 gwr /* 236 1.1 gwr * Initialize fields used by the MI code 237 1.1 gwr */ 238 1.1 gwr ncr_sc->sci_r0 = ®s->ncrregs[0]; 239 1.1 gwr ncr_sc->sci_r1 = ®s->ncrregs[1]; 240 1.1 gwr ncr_sc->sci_r2 = ®s->ncrregs[2]; 241 1.1 gwr ncr_sc->sci_r3 = ®s->ncrregs[3]; 242 1.1 gwr ncr_sc->sci_r4 = ®s->ncrregs[4]; 243 1.1 gwr ncr_sc->sci_r5 = ®s->ncrregs[5]; 244 1.1 gwr ncr_sc->sci_r6 = ®s->ncrregs[6]; 245 1.1 gwr ncr_sc->sci_r7 = ®s->ncrregs[7]; 246 1.11 tsutsui 247 1.11 tsutsui ncr_sc->sc_rev = NCR_VARIANT_NCR5380; 248 1.1 gwr 249 1.1 gwr /* 250 1.1 gwr * Allocate DMA handles. 251 1.1 gwr */ 252 1.1 gwr i = SCI_OPENINGS * sizeof(struct se_dma_handle); 253 1.30 thorpej sc->sc_dma = kmem_alloc(i, KM_SLEEP); 254 1.1 gwr for (i = 0; i < SCI_OPENINGS; i++) 255 1.1 gwr sc->sc_dma[i].dh_flags = 0; 256 1.1 gwr 257 1.13 bouyer ncr_sc->sc_channel.chan_id = 7; 258 1.13 bouyer ncr_sc->sc_adapter.adapt_minphys = se_minphys; 259 1.10 mycroft 260 1.1 gwr /* 261 1.1 gwr * Initialize se board itself. 262 1.1 gwr */ 263 1.10 mycroft ncr5380_attach(ncr_sc); 264 1.1 gwr } 265 1.1 gwr 266 1.1 gwr static void 267 1.1 gwr se_reset(struct ncr5380_softc *ncr_sc) 268 1.1 gwr { 269 1.1 gwr struct se_softc *sc = (struct se_softc *)ncr_sc; 270 1.1 gwr volatile struct se_regs *se = sc->sc_regs; 271 1.1 gwr 272 1.1 gwr #ifdef DEBUG 273 1.1 gwr if (se_debug) { 274 1.26 tsutsui printf("%s\n", __func__); 275 1.1 gwr } 276 1.1 gwr #endif 277 1.1 gwr 278 1.1 gwr /* The reset bits in the CSR are active low. */ 279 1.1 gwr se->se_csr = 0; 280 1.1 gwr delay(10); 281 1.1 gwr se->se_csr = SE_CSR_SCSI_RES /* | SE_CSR_INTR_EN */ ; 282 1.1 gwr delay(10); 283 1.1 gwr 284 1.1 gwr /* Make sure the DMA engine is stopped. */ 285 1.1 gwr se->dma_addr = 0; 286 1.1 gwr se->dma_cntr = 0; 287 1.1 gwr se->se_ivec = sc->sc_adapter_iv; 288 1.1 gwr } 289 1.1 gwr 290 1.1 gwr /* 291 1.1 gwr * This is called when the bus is going idle, 292 1.1 gwr * so we want to enable the SBC interrupts. 293 1.1 gwr * That is controlled by the DMA enable! 294 1.1 gwr * Who would have guessed! 295 1.1 gwr * What a NASTY trick! 296 1.1 gwr */ 297 1.33 tsutsui void 298 1.22 chs se_intr_on(struct ncr5380_softc *ncr_sc) 299 1.1 gwr { 300 1.1 gwr struct se_softc *sc = (struct se_softc *)ncr_sc; 301 1.1 gwr volatile struct se_regs *se = sc->sc_regs; 302 1.1 gwr 303 1.1 gwr /* receive mode should be safer */ 304 1.1 gwr se->se_csr &= ~SE_CSR_SEND; 305 1.1 gwr 306 1.1 gwr /* Clear the count so nothing happens. */ 307 1.1 gwr se->dma_cntr = 0; 308 1.1 gwr 309 1.1 gwr /* Clear the start address too. (paranoid?) */ 310 1.1 gwr se->dma_addr = 0; 311 1.1 gwr 312 1.1 gwr /* Finally, enable the DMA engine. */ 313 1.1 gwr se->se_csr |= SE_CSR_INTR_EN; 314 1.1 gwr } 315 1.1 gwr 316 1.1 gwr /* 317 1.1 gwr * This is called when the bus is idle and we are 318 1.1 gwr * about to start playing with the SBC chip. 319 1.1 gwr */ 320 1.33 tsutsui void 321 1.22 chs se_intr_off(struct ncr5380_softc *ncr_sc) 322 1.1 gwr { 323 1.1 gwr struct se_softc *sc = (struct se_softc *)ncr_sc; 324 1.1 gwr volatile struct se_regs *se = sc->sc_regs; 325 1.1 gwr 326 1.1 gwr se->se_csr &= ~SE_CSR_INTR_EN; 327 1.1 gwr } 328 1.1 gwr 329 1.1 gwr /* 330 1.1 gwr * This function is called during the COMMAND or MSG_IN phase 331 1.14 wiz * that precedes a DATA_IN or DATA_OUT phase, in case we need 332 1.1 gwr * to setup the DMA engine before the bus enters a DATA phase. 333 1.1 gwr * 334 1.31 andvar * On the VME version, setup the start address, but clear the 335 1.1 gwr * count (to make sure it stays idle) and set that later. 336 1.1 gwr * XXX: The VME adapter appears to suppress SBC interrupts 337 1.1 gwr * when the FIFO is not empty or the FIFO count is non-zero! 338 1.1 gwr * XXX: Need to copy data into the DMA buffer... 339 1.1 gwr */ 340 1.33 tsutsui void 341 1.22 chs se_dma_setup(struct ncr5380_softc *ncr_sc) 342 1.1 gwr { 343 1.1 gwr struct se_softc *sc = (struct se_softc *)ncr_sc; 344 1.1 gwr struct sci_req *sr = ncr_sc->sc_current; 345 1.1 gwr struct se_dma_handle *dh = sr->sr_dma_hand; 346 1.1 gwr volatile struct se_regs *se = sc->sc_regs; 347 1.1 gwr long data_pa; 348 1.1 gwr int xlen; 349 1.1 gwr 350 1.1 gwr /* 351 1.1 gwr * Get the DMA mapping for this segment. 352 1.1 gwr * XXX - Should separate allocation and mapin. 353 1.1 gwr */ 354 1.1 gwr data_pa = 0; /* XXX se_dma_kvtopa(dh->dh_dma); */ 355 1.1 gwr data_pa += (ncr_sc->sc_dataptr - dh->dh_addr); 356 1.1 gwr if (data_pa & 1) 357 1.26 tsutsui panic("%s: bad pa=0x%lx", __func__, data_pa); 358 1.1 gwr xlen = ncr_sc->sc_datalen; 359 1.1 gwr xlen &= ~1; /* XXX: necessary? */ 360 1.1 gwr sc->sc_reqlen = xlen; /* XXX: or less? */ 361 1.1 gwr 362 1.1 gwr #ifdef DEBUG 363 1.1 gwr if (se_debug & 2) { 364 1.26 tsutsui printf("%s: dh=%p, pa=0x%lx, xlen=0x%x\n", 365 1.26 tsutsui __func__, dh, data_pa, xlen); 366 1.1 gwr } 367 1.1 gwr #endif 368 1.1 gwr 369 1.1 gwr /* Set direction (send/recv) */ 370 1.1 gwr if (dh->dh_flags & SIDH_OUT) { 371 1.1 gwr se->se_csr |= SE_CSR_SEND; 372 1.1 gwr } else { 373 1.1 gwr se->se_csr &= ~SE_CSR_SEND; 374 1.1 gwr } 375 1.1 gwr 376 1.1 gwr /* Load the start address. */ 377 1.1 gwr se->dma_addr = (ushort)(data_pa & 0xFFFF); 378 1.1 gwr 379 1.1 gwr /* 380 1.1 gwr * Keep the count zero or it may start early! 381 1.1 gwr */ 382 1.1 gwr se->dma_cntr = 0; 383 1.1 gwr } 384 1.1 gwr 385 1.1 gwr 386 1.33 tsutsui void 387 1.22 chs se_dma_start(struct ncr5380_softc *ncr_sc) 388 1.1 gwr { 389 1.1 gwr struct se_softc *sc = (struct se_softc *)ncr_sc; 390 1.1 gwr struct sci_req *sr = ncr_sc->sc_current; 391 1.1 gwr struct se_dma_handle *dh = sr->sr_dma_hand; 392 1.1 gwr volatile struct se_regs *se = sc->sc_regs; 393 1.1 gwr int s, xlen; 394 1.1 gwr 395 1.1 gwr xlen = sc->sc_reqlen; 396 1.1 gwr 397 1.1 gwr /* This MAY be time critical (not sure). */ 398 1.1 gwr s = splhigh(); 399 1.1 gwr 400 1.1 gwr se->dma_cntr = (ushort)(xlen & 0xFFFF); 401 1.1 gwr 402 1.1 gwr /* 403 1.1 gwr * Acknowledge the phase change. (After DMA setup!) 404 1.1 gwr * Put the SBIC into DMA mode, and start the transfer. 405 1.1 gwr */ 406 1.1 gwr if (dh->dh_flags & SIDH_OUT) { 407 1.1 gwr *ncr_sc->sci_tcmd = PHASE_DATA_OUT; 408 1.1 gwr SCI_CLR_INTR(ncr_sc); 409 1.1 gwr *ncr_sc->sci_icmd = SCI_ICMD_DATA; 410 1.1 gwr *ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE); 411 1.1 gwr *ncr_sc->sci_dma_send = 0; /* start it */ 412 1.1 gwr } else { 413 1.1 gwr *ncr_sc->sci_tcmd = PHASE_DATA_IN; 414 1.1 gwr SCI_CLR_INTR(ncr_sc); 415 1.1 gwr *ncr_sc->sci_icmd = 0; 416 1.1 gwr *ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE); 417 1.1 gwr *ncr_sc->sci_irecv = 0; /* start it */ 418 1.1 gwr } 419 1.1 gwr 420 1.1 gwr /* Let'er rip! */ 421 1.1 gwr se->se_csr |= SE_CSR_INTR_EN; 422 1.1 gwr 423 1.1 gwr splx(s); 424 1.1 gwr ncr_sc->sc_state |= NCR_DOINGDMA; 425 1.1 gwr 426 1.1 gwr #ifdef DEBUG 427 1.1 gwr if (se_debug & 2) { 428 1.26 tsutsui printf("%s: started, flags=0x%x\n", 429 1.26 tsutsui __func__, ncr_sc->sc_state); 430 1.1 gwr } 431 1.1 gwr #endif 432 1.1 gwr } 433 1.1 gwr 434 1.1 gwr 435 1.33 tsutsui void 436 1.22 chs se_dma_eop(struct ncr5380_softc *ncr_sc) 437 1.1 gwr { 438 1.1 gwr 439 1.1 gwr /* Not needed - DMA was stopped prior to examining sci_csr */ 440 1.1 gwr } 441 1.1 gwr 442 1.1 gwr 443 1.33 tsutsui void 444 1.22 chs se_dma_stop(struct ncr5380_softc *ncr_sc) 445 1.1 gwr { 446 1.1 gwr struct se_softc *sc = (struct se_softc *)ncr_sc; 447 1.1 gwr struct sci_req *sr = ncr_sc->sc_current; 448 1.1 gwr struct se_dma_handle *dh = sr->sr_dma_hand; 449 1.1 gwr volatile struct se_regs *se = sc->sc_regs; 450 1.1 gwr int resid, ntrans; 451 1.1 gwr 452 1.1 gwr if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) { 453 1.1 gwr #ifdef DEBUG 454 1.26 tsutsui printf("%s: DMA not running\n", __func__); 455 1.1 gwr #endif 456 1.1 gwr return; 457 1.1 gwr } 458 1.1 gwr ncr_sc->sc_state &= ~NCR_DOINGDMA; 459 1.1 gwr 460 1.1 gwr /* First, halt the DMA engine. */ 461 1.1 gwr se->se_csr &= ~SE_CSR_INTR_EN; /* VME only */ 462 1.1 gwr 463 1.1 gwr /* Set an impossible phase to prevent data movement? */ 464 1.1 gwr *ncr_sc->sci_tcmd = PHASE_INVALID; 465 1.1 gwr 466 1.1 gwr /* Note that timeout may have set the error flag. */ 467 1.1 gwr if (ncr_sc->sc_state & NCR_ABORTING) 468 1.1 gwr goto out; 469 1.1 gwr 470 1.1 gwr /* XXX: Wait for DMA to actually finish? */ 471 1.1 gwr 472 1.1 gwr /* 473 1.1 gwr * Now try to figure out how much actually transferred 474 1.1 gwr */ 475 1.1 gwr resid = se->dma_cntr & 0xFFFF; 476 1.1 gwr if (dh->dh_flags & SIDH_OUT) 477 1.1 gwr if ((resid > 0) && (resid < sc->sc_reqlen)) 478 1.1 gwr resid++; 479 1.1 gwr ntrans = sc->sc_reqlen - resid; 480 1.1 gwr 481 1.1 gwr #ifdef DEBUG 482 1.1 gwr if (se_debug & 2) { 483 1.26 tsutsui printf("%s: resid=0x%x ntrans=0x%x\n", 484 1.26 tsutsui __func__, resid, ntrans); 485 1.1 gwr } 486 1.1 gwr #endif 487 1.1 gwr 488 1.1 gwr if (ntrans < MIN_DMA_LEN) { 489 1.1 gwr printf("se: fifo count: 0x%x\n", resid); 490 1.1 gwr ncr_sc->sc_state |= NCR_ABORTING; 491 1.1 gwr goto out; 492 1.1 gwr } 493 1.1 gwr if (ntrans > ncr_sc->sc_datalen) 494 1.26 tsutsui panic("%s: excess transfer", __func__); 495 1.1 gwr 496 1.1 gwr /* Adjust data pointer */ 497 1.1 gwr ncr_sc->sc_dataptr += ntrans; 498 1.1 gwr ncr_sc->sc_datalen -= ntrans; 499 1.1 gwr 500 1.1 gwr out: 501 1.1 gwr se->dma_addr = 0; 502 1.1 gwr se->dma_cntr = 0; 503 1.1 gwr 504 1.1 gwr /* Put SBIC back in PIO mode. */ 505 1.1 gwr *ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE); 506 1.1 gwr *ncr_sc->sci_icmd = 0; 507 1.1 gwr } 508 1.1 gwr 509 1.1 gwr /*****************************************************************/ 510 1.1 gwr 511 1.1 gwr static void 512 1.1 gwr se_minphys(struct buf *bp) 513 1.1 gwr { 514 1.8 gwr 515 1.8 gwr if (bp->b_bcount > MAX_DMA_LEN) 516 1.1 gwr bp->b_bcount = MAX_DMA_LEN; 517 1.8 gwr 518 1.19 kristerw minphys(bp); 519 1.1 gwr } 520 1.1 gwr 521 1.1 gwr 522 1.1 gwr int 523 1.1 gwr se_intr(void *arg) 524 1.1 gwr { 525 1.1 gwr struct se_softc *sc = arg; 526 1.1 gwr volatile struct se_regs *se = sc->sc_regs; 527 1.29 christos int claimed; 528 1.1 gwr u_short csr; 529 1.1 gwr 530 1.1 gwr claimed = 0; 531 1.1 gwr 532 1.1 gwr /* SBC interrupt? DMA interrupt? */ 533 1.1 gwr csr = se->se_csr; 534 1.1 gwr NCR_TRACE("se_intr: csr=0x%x\n", csr); 535 1.1 gwr 536 1.1 gwr if (csr & SE_CSR_SBC_IP) { 537 1.1 gwr claimed = ncr5380_intr(&sc->ncr_sc); 538 1.1 gwr #ifdef DEBUG 539 1.1 gwr if (!claimed) { 540 1.26 tsutsui printf("%s: spurious from SBC\n", __func__); 541 1.1 gwr } 542 1.1 gwr #endif 543 1.1 gwr /* Yes, we DID cause this interrupt. */ 544 1.1 gwr claimed = 1; 545 1.1 gwr } 546 1.1 gwr 547 1.26 tsutsui return claimed; 548 1.1 gwr } 549 1.1 gwr 550 1.1 gwr 551 1.1 gwr /***************************************************************** 552 1.1 gwr * Common functions for DMA 553 1.1 gwr ****************************************************************/ 554 1.1 gwr 555 1.1 gwr /* 556 1.1 gwr * Allocate a DMA handle and put it in sc->sc_dma. Prepare 557 1.1 gwr * for DMA transfer. On the Sun3/E, this means we have to 558 1.1 gwr * allocate space in the DMA buffer for this transfer. 559 1.1 gwr */ 560 1.33 tsutsui void 561 1.22 chs se_dma_alloc(struct ncr5380_softc *ncr_sc) 562 1.1 gwr { 563 1.1 gwr struct se_softc *sc = (struct se_softc *)ncr_sc; 564 1.1 gwr struct sci_req *sr = ncr_sc->sc_current; 565 1.1 gwr struct scsipi_xfer *xs = sr->sr_xs; 566 1.1 gwr struct se_dma_handle *dh; 567 1.1 gwr int i, xlen; 568 1.1 gwr u_long addr; 569 1.1 gwr 570 1.1 gwr #ifdef DIAGNOSTIC 571 1.1 gwr if (sr->sr_dma_hand != NULL) 572 1.26 tsutsui panic("%s: already have DMA handle", __func__); 573 1.1 gwr #endif 574 1.1 gwr 575 1.26 tsutsui addr = (u_long)ncr_sc->sc_dataptr; 576 1.1 gwr xlen = ncr_sc->sc_datalen; 577 1.1 gwr 578 1.1 gwr /* If the DMA start addr is misaligned then do PIO */ 579 1.1 gwr if ((addr & 1) || (xlen & 1)) { 580 1.26 tsutsui printf("%s: misaligned.\n", __func__); 581 1.1 gwr return; 582 1.1 gwr } 583 1.1 gwr 584 1.1 gwr /* Make sure our caller checked sc_min_dma_len. */ 585 1.1 gwr if (xlen < MIN_DMA_LEN) 586 1.26 tsutsui panic("%s: xlen=0x%x", __func__, xlen); 587 1.1 gwr 588 1.1 gwr /* 589 1.1 gwr * Never attempt single transfers of more than 63k, because 590 1.1 gwr * our count register may be only 16 bits (an OBIO adapter). 591 1.1 gwr * This should never happen since already bounded by minphys(). 592 1.1 gwr * XXX - Should just segment these... 593 1.1 gwr */ 594 1.1 gwr if (xlen > MAX_DMA_LEN) { 595 1.26 tsutsui printf("%s: excessive xlen=0x%x\n", __func__, xlen); 596 1.1 gwr ncr_sc->sc_datalen = xlen = MAX_DMA_LEN; 597 1.1 gwr } 598 1.1 gwr 599 1.1 gwr /* Find free DMA handle. Guaranteed to find one since we have 600 1.1 gwr as many DMA handles as the driver has processes. */ 601 1.1 gwr for (i = 0; i < SCI_OPENINGS; i++) { 602 1.1 gwr if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0) 603 1.1 gwr goto found; 604 1.1 gwr } 605 1.1 gwr panic("se: no free DMA handles."); 606 1.1 gwr found: 607 1.1 gwr 608 1.1 gwr dh = &sc->sc_dma[i]; 609 1.1 gwr dh->dh_flags = SIDH_BUSY; 610 1.1 gwr 611 1.1 gwr /* Copy the "write" flag for convenience. */ 612 1.9 jdolecek if (xs->xs_control & XS_CTL_DATA_OUT) 613 1.1 gwr dh->dh_flags |= SIDH_OUT; 614 1.1 gwr 615 1.26 tsutsui dh->dh_addr = (uint8_t *)addr; 616 1.1 gwr dh->dh_maplen = xlen; 617 1.1 gwr dh->dh_dma = 0; /* XXX - Allocate space in DMA buffer. */ 618 1.1 gwr /* XXX: dh->dh_dma = alloc(xlen) */ 619 1.1 gwr if (!dh->dh_dma) { 620 1.1 gwr /* Can't remap segment */ 621 1.26 tsutsui printf("%s: can't remap %p/0x%x\n", 622 1.26 tsutsui __func__, dh->dh_addr, dh->dh_maplen); 623 1.1 gwr dh->dh_flags = 0; 624 1.1 gwr return; 625 1.1 gwr } 626 1.1 gwr 627 1.1 gwr /* success */ 628 1.1 gwr sr->sr_dma_hand = dh; 629 1.1 gwr } 630 1.1 gwr 631 1.1 gwr 632 1.33 tsutsui void 633 1.22 chs se_dma_free(struct ncr5380_softc *ncr_sc) 634 1.1 gwr { 635 1.1 gwr struct sci_req *sr = ncr_sc->sc_current; 636 1.1 gwr struct se_dma_handle *dh = sr->sr_dma_hand; 637 1.1 gwr 638 1.1 gwr #ifdef DIAGNOSTIC 639 1.1 gwr if (dh == NULL) 640 1.26 tsutsui panic("%s: no DMA handle", __func__); 641 1.1 gwr #endif 642 1.1 gwr 643 1.1 gwr if (ncr_sc->sc_state & NCR_DOINGDMA) 644 1.26 tsutsui panic("%s: free while in progress", __func__); 645 1.1 gwr 646 1.1 gwr if (dh->dh_flags & SIDH_BUSY) { 647 1.1 gwr /* XXX: Should separate allocation and mapping. */ 648 1.1 gwr /* XXX: Give back the DMA space. */ 649 1.25 christos /* XXX: free((void *)dh->dh_dma, dh->dh_maplen); */ 650 1.1 gwr dh->dh_dma = 0; 651 1.1 gwr dh->dh_flags = 0; 652 1.1 gwr } 653 1.1 gwr sr->sr_dma_hand = NULL; 654 1.1 gwr } 655 1.1 gwr 656 1.1 gwr 657 1.1 gwr #define CSR_MASK SE_CSR_SBC_IP 658 1.1 gwr #define POLL_TIMO 50000 /* X100 = 5 sec. */ 659 1.1 gwr 660 1.1 gwr /* 661 1.1 gwr * Poll (spin-wait) for DMA completion. 662 1.1 gwr * Called right after xx_dma_start(), and 663 1.1 gwr * xx_dma_stop() will be called next. 664 1.1 gwr * Same for either VME or OBIO. 665 1.1 gwr */ 666 1.33 tsutsui void 667 1.22 chs se_dma_poll(struct ncr5380_softc *ncr_sc) 668 1.1 gwr { 669 1.1 gwr struct se_softc *sc = (struct se_softc *)ncr_sc; 670 1.1 gwr struct sci_req *sr = ncr_sc->sc_current; 671 1.1 gwr volatile struct se_regs *se = sc->sc_regs; 672 1.1 gwr int tmo; 673 1.1 gwr 674 1.1 gwr /* Make sure DMA started successfully. */ 675 1.1 gwr if (ncr_sc->sc_state & NCR_ABORTING) 676 1.1 gwr return; 677 1.1 gwr 678 1.1 gwr /* 679 1.1 gwr * XXX: The Sun driver waits for ~SE_CSR_DMA_ACTIVE here 680 1.1 gwr * XXX: (on obio) or even worse (on vme) a 10mS. delay! 681 1.1 gwr * XXX: I really doubt that is necessary... 682 1.1 gwr */ 683 1.1 gwr 684 1.20 wiz /* Wait for any "DMA complete" or error bits. */ 685 1.1 gwr tmo = POLL_TIMO; 686 1.1 gwr for (;;) { 687 1.1 gwr if (se->se_csr & CSR_MASK) 688 1.1 gwr break; 689 1.1 gwr if (--tmo <= 0) { 690 1.1 gwr printf("se: DMA timeout (while polling)\n"); 691 1.1 gwr /* Indicate timeout as MI code would. */ 692 1.1 gwr sr->sr_flags |= SR_OVERDUE; 693 1.1 gwr break; 694 1.1 gwr } 695 1.1 gwr delay(100); 696 1.1 gwr } 697 1.1 gwr NCR_TRACE("se_dma_poll: waited %d\n", 698 1.1 gwr POLL_TIMO - tmo); 699 1.1 gwr 700 1.1 gwr #ifdef DEBUG 701 1.1 gwr if (se_debug & 2) { 702 1.26 tsutsui printf("%s: done, csr=0x%x\n", __func__, se->se_csr); 703 1.1 gwr } 704 1.1 gwr #endif 705 1.1 gwr } 706 1.1 gwr 707