wdc_obio.c revision 1.29 1 1.29 bouyer /* $NetBSD: wdc_obio.c,v 1.29 2003/10/08 11:12:36 bouyer Exp $ */
2 1.1 tsubai
3 1.1 tsubai /*-
4 1.27 mycroft * Copyright (c) 1998, 2003 The NetBSD Foundation, Inc.
5 1.1 tsubai * All rights reserved.
6 1.1 tsubai *
7 1.1 tsubai * This code is derived from software contributed to The NetBSD Foundation
8 1.1 tsubai * by Charles M. Hannum and by Onno van der Linden.
9 1.1 tsubai *
10 1.1 tsubai * Redistribution and use in source and binary forms, with or without
11 1.1 tsubai * modification, are permitted provided that the following conditions
12 1.1 tsubai * are met:
13 1.1 tsubai * 1. Redistributions of source code must retain the above copyright
14 1.1 tsubai * notice, this list of conditions and the following disclaimer.
15 1.1 tsubai * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 tsubai * notice, this list of conditions and the following disclaimer in the
17 1.1 tsubai * documentation and/or other materials provided with the distribution.
18 1.1 tsubai * 3. All advertising materials mentioning features or use of this software
19 1.1 tsubai * must display the following acknowledgement:
20 1.1 tsubai * This product includes software developed by the NetBSD
21 1.1 tsubai * Foundation, Inc. and its contributors.
22 1.1 tsubai * 4. Neither the name of The NetBSD Foundation nor the names of its
23 1.1 tsubai * contributors may be used to endorse or promote products derived
24 1.1 tsubai * from this software without specific prior written permission.
25 1.1 tsubai *
26 1.1 tsubai * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 1.1 tsubai * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 1.1 tsubai * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 1.1 tsubai * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 1.1 tsubai * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 1.1 tsubai * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 1.1 tsubai * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 1.1 tsubai * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 1.1 tsubai * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 1.1 tsubai * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 1.1 tsubai * POSSIBILITY OF SUCH DAMAGE.
37 1.1 tsubai */
38 1.26 lukem
39 1.26 lukem #include <sys/cdefs.h>
40 1.29 bouyer __KERNEL_RCSID(0, "$NetBSD: wdc_obio.c,v 1.29 2003/10/08 11:12:36 bouyer Exp $");
41 1.1 tsubai
42 1.1 tsubai #include <sys/param.h>
43 1.1 tsubai #include <sys/systm.h>
44 1.1 tsubai #include <sys/device.h>
45 1.1 tsubai #include <sys/malloc.h>
46 1.1 tsubai
47 1.10 mrg #include <uvm/uvm_extern.h>
48 1.1 tsubai
49 1.1 tsubai #include <machine/bus.h>
50 1.1 tsubai #include <machine/autoconf.h>
51 1.1 tsubai
52 1.9 tsubai #include <dev/ata/atareg.h>
53 1.1 tsubai #include <dev/ata/atavar.h>
54 1.1 tsubai #include <dev/ic/wdcvar.h>
55 1.1 tsubai
56 1.12 matt #include <dev/ofw/openfirm.h>
57 1.12 matt
58 1.1 tsubai #include <macppc/dev/dbdma.h>
59 1.1 tsubai
60 1.1 tsubai #define WDC_REG_NPORTS 8
61 1.1 tsubai #define WDC_AUXREG_OFFSET 0x16
62 1.1 tsubai #define WDC_DEFAULT_PIO_IRQ 13 /* XXX */
63 1.1 tsubai #define WDC_DEFAULT_DMA_IRQ 2 /* XXX */
64 1.1 tsubai
65 1.1 tsubai #define WDC_OPTIONS_DMA 0x01
66 1.1 tsubai
67 1.1 tsubai /*
68 1.1 tsubai * XXX This code currently doesn't even try to allow 32-bit data port use.
69 1.1 tsubai */
70 1.1 tsubai
71 1.1 tsubai struct wdc_obio_softc {
72 1.1 tsubai struct wdc_softc sc_wdcdev;
73 1.1 tsubai struct channel_softc *wdc_chanptr;
74 1.1 tsubai struct channel_softc wdc_channel;
75 1.1 tsubai dbdma_regmap_t *sc_dmareg;
76 1.1 tsubai dbdma_command_t *sc_dmacmd;
77 1.18 dbj u_int sc_dmaconf[2]; /* per target value of CONFIG_REG */
78 1.5 tsubai void *sc_ih;
79 1.1 tsubai };
80 1.1 tsubai
81 1.9 tsubai int wdc_obio_probe __P((struct device *, struct cfdata *, void *));
82 1.9 tsubai void wdc_obio_attach __P((struct device *, struct device *, void *));
83 1.9 tsubai int wdc_obio_detach __P((struct device *, int));
84 1.9 tsubai int wdc_obio_dma_init __P((void *, int, int, void *, size_t, int));
85 1.9 tsubai void wdc_obio_dma_start __P((void *, int, int));
86 1.9 tsubai int wdc_obio_dma_finish __P((void *, int, int, int));
87 1.18 dbj
88 1.18 dbj static void wdc_obio_select __P((struct channel_softc *, int));
89 1.9 tsubai static void adjust_timing __P((struct channel_softc *));
90 1.16 bouyer static void ata4_adjust_timing __P((struct channel_softc *));
91 1.1 tsubai
92 1.22 thorpej CFATTACH_DECL(wdc_obio, sizeof(struct wdc_obio_softc),
93 1.22 thorpej wdc_obio_probe, wdc_obio_attach, wdc_obio_detach, wdcactivate);
94 1.1 tsubai
95 1.1 tsubai int
96 1.1 tsubai wdc_obio_probe(parent, match, aux)
97 1.1 tsubai struct device *parent;
98 1.1 tsubai struct cfdata *match;
99 1.1 tsubai void *aux;
100 1.1 tsubai {
101 1.1 tsubai struct confargs *ca = aux;
102 1.3 tsubai char compat[32];
103 1.1 tsubai
104 1.3 tsubai /* XXX should not use name */
105 1.1 tsubai if (strcmp(ca->ca_name, "ATA") == 0 ||
106 1.1 tsubai strcmp(ca->ca_name, "ata") == 0 ||
107 1.2 tsubai strcmp(ca->ca_name, "ata0") == 0 ||
108 1.1 tsubai strcmp(ca->ca_name, "ide") == 0)
109 1.3 tsubai return 1;
110 1.3 tsubai
111 1.14 wiz memset(compat, 0, sizeof(compat));
112 1.3 tsubai OF_getprop(ca->ca_node, "compatible", compat, sizeof(compat));
113 1.6 tsubai if (strcmp(compat, "heathrow-ata") == 0 ||
114 1.6 tsubai strcmp(compat, "keylargo-ata") == 0)
115 1.1 tsubai return 1;
116 1.1 tsubai
117 1.1 tsubai return 0;
118 1.1 tsubai }
119 1.1 tsubai
120 1.1 tsubai void
121 1.1 tsubai wdc_obio_attach(parent, self, aux)
122 1.1 tsubai struct device *parent, *self;
123 1.1 tsubai void *aux;
124 1.1 tsubai {
125 1.1 tsubai struct wdc_obio_softc *sc = (void *)self;
126 1.1 tsubai struct confargs *ca = aux;
127 1.1 tsubai struct channel_softc *chp = &sc->wdc_channel;
128 1.4 tsubai int intr;
129 1.1 tsubai int use_dma = 0;
130 1.7 tsubai char path[80];
131 1.1 tsubai
132 1.1 tsubai if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & WDC_OPTIONS_DMA) {
133 1.1 tsubai if (ca->ca_nreg >= 16 || ca->ca_nintr == -1)
134 1.1 tsubai use_dma = 1; /* XXX Don't work yet. */
135 1.1 tsubai }
136 1.1 tsubai
137 1.1 tsubai if (ca->ca_nintr >= 4 && ca->ca_nreg >= 8) {
138 1.4 tsubai intr = ca->ca_intr[0];
139 1.4 tsubai printf(" irq %d", intr);
140 1.4 tsubai } else if (ca->ca_nintr == -1) {
141 1.4 tsubai intr = WDC_DEFAULT_PIO_IRQ;
142 1.4 tsubai printf(" irq property not found; using %d", intr);
143 1.4 tsubai } else {
144 1.1 tsubai printf(": couldn't get irq property\n");
145 1.1 tsubai return;
146 1.1 tsubai }
147 1.1 tsubai
148 1.1 tsubai if (use_dma)
149 1.1 tsubai printf(": DMA transfer");
150 1.1 tsubai
151 1.1 tsubai printf("\n");
152 1.1 tsubai
153 1.1 tsubai chp->cmd_iot = chp->ctl_iot =
154 1.1 tsubai macppc_make_bus_space_tag(ca->ca_baseaddr + ca->ca_reg[0], 4);
155 1.1 tsubai
156 1.1 tsubai if (bus_space_map(chp->cmd_iot, 0, WDC_REG_NPORTS, 0, &chp->cmd_ioh) ||
157 1.1 tsubai bus_space_subregion(chp->cmd_iot, chp->cmd_ioh,
158 1.1 tsubai WDC_AUXREG_OFFSET, 1, &chp->ctl_ioh)) {
159 1.1 tsubai printf("%s: couldn't map registers\n",
160 1.1 tsubai sc->sc_wdcdev.sc_dev.dv_xname);
161 1.1 tsubai return;
162 1.1 tsubai }
163 1.1 tsubai #if 0
164 1.1 tsubai chp->data32iot = chp->cmd_iot;
165 1.1 tsubai chp->data32ioh = chp->cmd_ioh;
166 1.1 tsubai #endif
167 1.1 tsubai
168 1.5 tsubai sc->sc_ih = intr_establish(intr, IST_LEVEL, IPL_BIO, wdcintr, chp);
169 1.1 tsubai
170 1.1 tsubai if (use_dma) {
171 1.1 tsubai sc->sc_dmacmd = dbdma_alloc(sizeof(dbdma_command_t) * 20);
172 1.1 tsubai sc->sc_dmareg = mapiodev(ca->ca_baseaddr + ca->ca_reg[2],
173 1.1 tsubai ca->ca_reg[3]);
174 1.1 tsubai sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
175 1.13 bouyer sc->sc_wdcdev.DMA_cap = 2;
176 1.16 bouyer if (strcmp(ca->ca_name, "ata-4") == 0) {
177 1.16 bouyer sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
178 1.16 bouyer sc->sc_wdcdev.UDMA_cap = 4;
179 1.16 bouyer sc->sc_wdcdev.set_modes = ata4_adjust_timing;
180 1.16 bouyer } else {
181 1.16 bouyer sc->sc_wdcdev.set_modes = adjust_timing;
182 1.16 bouyer }
183 1.13 bouyer #ifdef notyet
184 1.13 bouyer /* Minimum cycle time is 150ns (DMA MODE 1) on ohare. */
185 1.13 bouyer if (ohare) {
186 1.13 bouyer sc->sc_wdcdev.PIO_cap = 3;
187 1.13 bouyer sc->sc_wdcdev.DMA_cap = 1;
188 1.13 bouyer }
189 1.13 bouyer #endif
190 1.17 bouyer } else {
191 1.24 wiz /* all non-DMA controllers can use adjust_timing */
192 1.17 bouyer sc->sc_wdcdev.set_modes = adjust_timing;
193 1.1 tsubai }
194 1.17 bouyer
195 1.13 bouyer sc->sc_wdcdev.PIO_cap = 4;
196 1.13 bouyer sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
197 1.1 tsubai sc->wdc_chanptr = chp;
198 1.1 tsubai sc->sc_wdcdev.channels = &sc->wdc_chanptr;
199 1.1 tsubai sc->sc_wdcdev.nchannels = 1;
200 1.1 tsubai sc->sc_wdcdev.dma_arg = sc;
201 1.1 tsubai sc->sc_wdcdev.dma_init = wdc_obio_dma_init;
202 1.1 tsubai sc->sc_wdcdev.dma_start = wdc_obio_dma_start;
203 1.1 tsubai sc->sc_wdcdev.dma_finish = wdc_obio_dma_finish;
204 1.1 tsubai chp->channel = 0;
205 1.1 tsubai chp->wdc = &sc->sc_wdcdev;
206 1.1 tsubai chp->ch_queue = malloc(sizeof(struct channel_queue),
207 1.1 tsubai M_DEVBUF, M_NOWAIT);
208 1.1 tsubai if (chp->ch_queue == NULL) {
209 1.1 tsubai printf("%s: can't allocate memory for command queue",
210 1.1 tsubai sc->sc_wdcdev.sc_dev.dv_xname);
211 1.1 tsubai return;
212 1.7 tsubai }
213 1.7 tsubai
214 1.7 tsubai #define OHARE_FEATURE_REG 0xf3000038
215 1.7 tsubai
216 1.7 tsubai /* XXX Enable wdc1 by feature reg. */
217 1.14 wiz memset(path, 0, sizeof(path));
218 1.7 tsubai OF_package_to_path(ca->ca_node, path, sizeof(path));
219 1.7 tsubai if (strcmp(path, "/bandit@F2000000/ohare@10/ata@21000") == 0) {
220 1.7 tsubai u_int x;
221 1.7 tsubai
222 1.7 tsubai x = in32rb(OHARE_FEATURE_REG);
223 1.7 tsubai x |= 8;
224 1.7 tsubai out32rb(OHARE_FEATURE_REG, x);
225 1.1 tsubai }
226 1.1 tsubai
227 1.29 bouyer wdcattach(chp);
228 1.9 tsubai }
229 1.9 tsubai
230 1.9 tsubai /* Multiword DMA transfer timings */
231 1.13 bouyer struct ide_timings {
232 1.9 tsubai int cycle; /* minimum cycle time [ns] */
233 1.9 tsubai int active; /* minimum command active time [ns] */
234 1.13 bouyer };
235 1.13 bouyer static struct ide_timings pio_timing[5] = {
236 1.19 dbj { 600, 180 }, /* Mode 0 */
237 1.19 dbj { 390, 150 }, /* 1 */
238 1.19 dbj { 240, 105 }, /* 2 */
239 1.19 dbj { 180, 90 }, /* 3 */
240 1.19 dbj { 120, 75 } /* 4 */
241 1.13 bouyer };
242 1.13 bouyer static struct ide_timings dma_timing[3] = {
243 1.19 dbj { 480, 240 }, /* Mode 0 */
244 1.19 dbj { 165, 90 }, /* Mode 1 */
245 1.19 dbj { 120, 75 } /* Mode 2 */
246 1.9 tsubai };
247 1.9 tsubai
248 1.16 bouyer static struct ide_timings udma_timing[5] = {
249 1.19 dbj {120, 180}, /* Mode 0 */
250 1.19 dbj { 90, 150}, /* Mode 1 */
251 1.19 dbj { 60, 120}, /* Mode 2 */
252 1.19 dbj { 45, 90}, /* Mode 3 */
253 1.19 dbj { 30, 90} /* Mode 4 */
254 1.16 bouyer };
255 1.16 bouyer
256 1.9 tsubai #define TIME_TO_TICK(time) howmany((time), 30)
257 1.16 bouyer #define PIO_REC_OFFSET 4
258 1.16 bouyer #define PIO_REC_MIN 1
259 1.16 bouyer #define PIO_ACT_MIN 1
260 1.16 bouyer #define DMA_REC_OFFSET 1
261 1.16 bouyer #define DMA_REC_MIN 1
262 1.16 bouyer #define DMA_ACT_MIN 1
263 1.16 bouyer
264 1.18 dbj #define ATA4_TIME_TO_TICK(time) howmany((time), 15) /* 15 ns clock */
265 1.16 bouyer
266 1.18 dbj #define CONFIG_REG (0x200 >> 4) /* IDE access timing register */
267 1.9 tsubai
268 1.18 dbj void
269 1.18 dbj wdc_obio_select(chp, drive)
270 1.18 dbj struct channel_softc *chp;
271 1.18 dbj int drive;
272 1.18 dbj {
273 1.18 dbj struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->wdc;
274 1.18 dbj bus_space_write_4(chp->cmd_iot, chp->cmd_ioh,
275 1.18 dbj CONFIG_REG, sc->sc_dmaconf[drive]);
276 1.18 dbj }
277 1.9 tsubai
278 1.9 tsubai void
279 1.9 tsubai adjust_timing(chp)
280 1.9 tsubai struct channel_softc *chp;
281 1.9 tsubai {
282 1.18 dbj struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->wdc;
283 1.13 bouyer int drive;
284 1.13 bouyer int min_cycle, min_active;
285 1.9 tsubai int cycle_tick, act_tick, inact_tick, half_tick;
286 1.9 tsubai
287 1.13 bouyer for (drive = 0; drive < 2; drive++) {
288 1.18 dbj u_int conf = 0;
289 1.18 dbj struct ata_drive_datas *drvp;
290 1.18 dbj
291 1.13 bouyer drvp = &chp->ch_drive[drive];
292 1.18 dbj /* set up pio mode timings */
293 1.18 dbj if (drvp->drive_flags & DRIVE) {
294 1.18 dbj int piomode = drvp->PIO_mode;
295 1.18 dbj min_cycle = pio_timing[piomode].cycle;
296 1.18 dbj min_active = pio_timing[piomode].active;
297 1.18 dbj
298 1.18 dbj cycle_tick = TIME_TO_TICK(min_cycle);
299 1.18 dbj act_tick = TIME_TO_TICK(min_active);
300 1.18 dbj if (act_tick < PIO_ACT_MIN)
301 1.18 dbj act_tick = PIO_ACT_MIN;
302 1.18 dbj inact_tick = cycle_tick - act_tick - PIO_REC_OFFSET;
303 1.18 dbj if (inact_tick < PIO_REC_MIN)
304 1.18 dbj inact_tick = PIO_REC_MIN;
305 1.18 dbj /* mask: 0x000007ff */
306 1.18 dbj conf |= (inact_tick << 5) | act_tick;
307 1.18 dbj }
308 1.24 wiz /* Set up DMA mode timings */
309 1.13 bouyer if (drvp->drive_flags & DRIVE_DMA) {
310 1.18 dbj int dmamode = drvp->DMA_mode;
311 1.18 dbj min_cycle = dma_timing[dmamode].cycle;
312 1.18 dbj min_active = dma_timing[dmamode].active;
313 1.18 dbj cycle_tick = TIME_TO_TICK(min_cycle);
314 1.18 dbj act_tick = TIME_TO_TICK(min_active);
315 1.18 dbj inact_tick = cycle_tick - act_tick - DMA_REC_OFFSET;
316 1.18 dbj if (inact_tick < DMA_REC_MIN)
317 1.18 dbj inact_tick = DMA_REC_MIN;
318 1.18 dbj half_tick = 0; /* XXX */
319 1.18 dbj /* mask: 0xfffff800 */
320 1.18 dbj conf |=
321 1.18 dbj (half_tick << 21) |
322 1.18 dbj (inact_tick << 16) | (act_tick << 11);
323 1.13 bouyer }
324 1.20 bouyer #ifdef DEBUG
325 1.18 dbj if (conf) {
326 1.18 dbj printf("conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n",
327 1.18 dbj drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick);
328 1.18 dbj }
329 1.20 bouyer #endif
330 1.18 dbj sc->sc_dmaconf[drive] = conf;
331 1.13 bouyer }
332 1.18 dbj sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_SELECT;
333 1.18 dbj sc->sc_wdcdev.select = 0;
334 1.18 dbj if (sc->sc_dmaconf[0]) {
335 1.18 dbj wdc_obio_select(chp,0);
336 1.18 dbj if (sc->sc_dmaconf[1] && (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) {
337 1.18 dbj sc->sc_wdcdev.select = wdc_obio_select;
338 1.18 dbj sc->sc_wdcdev.cap |= WDC_CAPABILITY_SELECT;
339 1.13 bouyer }
340 1.18 dbj } else if (sc->sc_dmaconf[1]) {
341 1.18 dbj wdc_obio_select(chp,1);
342 1.13 bouyer }
343 1.16 bouyer }
344 1.16 bouyer
345 1.16 bouyer void
346 1.16 bouyer ata4_adjust_timing(chp)
347 1.16 bouyer struct channel_softc *chp;
348 1.16 bouyer {
349 1.18 dbj struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->wdc;
350 1.16 bouyer int drive;
351 1.16 bouyer int min_cycle, min_active;
352 1.16 bouyer int cycle_tick, act_tick, inact_tick;
353 1.16 bouyer
354 1.18 dbj for (drive = 0; drive < 2; drive++) {
355 1.18 dbj u_int conf = 0;
356 1.18 dbj struct ata_drive_datas *drvp;
357 1.16 bouyer
358 1.16 bouyer drvp = &chp->ch_drive[drive];
359 1.18 dbj /* set up pio mode timings */
360 1.18 dbj
361 1.18 dbj if (drvp->drive_flags & DRIVE) {
362 1.18 dbj int piomode = drvp->PIO_mode;
363 1.18 dbj min_cycle = pio_timing[piomode].cycle;
364 1.18 dbj min_active = pio_timing[piomode].active;
365 1.18 dbj
366 1.18 dbj cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
367 1.18 dbj act_tick = ATA4_TIME_TO_TICK(min_active);
368 1.18 dbj inact_tick = cycle_tick - act_tick;
369 1.18 dbj /* mask: 0x000003ff */
370 1.18 dbj conf |= (inact_tick << 5) | act_tick;
371 1.18 dbj }
372 1.18 dbj /* set up dma mode timings */
373 1.16 bouyer if (drvp->drive_flags & DRIVE_DMA) {
374 1.18 dbj int dmamode = drvp->DMA_mode;
375 1.18 dbj min_cycle = dma_timing[dmamode].cycle;
376 1.18 dbj min_active = dma_timing[dmamode].active;
377 1.18 dbj cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
378 1.18 dbj act_tick = ATA4_TIME_TO_TICK(min_active);
379 1.18 dbj inact_tick = cycle_tick - act_tick;
380 1.18 dbj /* mask: 0x001ffc00 */
381 1.18 dbj conf |= (act_tick << 10) | (inact_tick << 15);
382 1.16 bouyer }
383 1.18 dbj /* set up udma mode timings */
384 1.16 bouyer if (drvp->drive_flags & DRIVE_UDMA) {
385 1.18 dbj int udmamode = drvp->UDMA_mode;
386 1.18 dbj min_cycle = udma_timing[udmamode].cycle;
387 1.18 dbj min_active = udma_timing[udmamode].active;
388 1.18 dbj act_tick = ATA4_TIME_TO_TICK(min_active);
389 1.18 dbj cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
390 1.18 dbj /* mask: 0x1ff00000 */
391 1.18 dbj conf |= (cycle_tick << 21) | (act_tick << 25) | 0x100000;
392 1.18 dbj }
393 1.20 bouyer #ifdef DEBUG
394 1.18 dbj if (conf) {
395 1.18 dbj printf("ata4 conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n",
396 1.18 dbj drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick);
397 1.16 bouyer }
398 1.20 bouyer #endif
399 1.18 dbj sc->sc_dmaconf[drive] = conf;
400 1.16 bouyer }
401 1.18 dbj sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_SELECT;
402 1.18 dbj sc->sc_wdcdev.select = 0;
403 1.18 dbj if (sc->sc_dmaconf[0]) {
404 1.18 dbj wdc_obio_select(chp,0);
405 1.18 dbj if (sc->sc_dmaconf[1] && (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) {
406 1.18 dbj sc->sc_wdcdev.select = wdc_obio_select;
407 1.18 dbj sc->sc_wdcdev.cap |= WDC_CAPABILITY_SELECT;
408 1.16 bouyer }
409 1.18 dbj } else if (sc->sc_dmaconf[1]) {
410 1.18 dbj wdc_obio_select(chp,1);
411 1.16 bouyer }
412 1.5 tsubai }
413 1.5 tsubai
414 1.5 tsubai int
415 1.5 tsubai wdc_obio_detach(self, flags)
416 1.5 tsubai struct device *self;
417 1.5 tsubai int flags;
418 1.5 tsubai {
419 1.5 tsubai struct wdc_obio_softc *sc = (void *)self;
420 1.5 tsubai int error;
421 1.5 tsubai
422 1.5 tsubai if ((error = wdcdetach(self, flags)) != 0)
423 1.5 tsubai return error;
424 1.5 tsubai
425 1.5 tsubai intr_disestablish(sc->sc_ih);
426 1.5 tsubai
427 1.5 tsubai free(sc->wdc_channel.ch_queue, M_DEVBUF);
428 1.5 tsubai
429 1.5 tsubai /* Unmap our i/o space. */
430 1.5 tsubai bus_space_unmap(chp->cmd_iot, chp->cmd_ioh, WDC_REG_NPORTS);
431 1.5 tsubai
432 1.5 tsubai /* Unmap DMA registers. */
433 1.5 tsubai /* XXX unmapiodev(sc->sc_dmareg); */
434 1.5 tsubai /* XXX free(sc->sc_dmacmd); */
435 1.5 tsubai
436 1.5 tsubai return 0;
437 1.1 tsubai }
438 1.1 tsubai
439 1.9 tsubai int
440 1.25 hamajima wdc_obio_dma_init(v, channel, drive, databuf, datalen, flags)
441 1.1 tsubai void *v;
442 1.1 tsubai void *databuf;
443 1.1 tsubai size_t datalen;
444 1.25 hamajima int flags;
445 1.1 tsubai {
446 1.1 tsubai struct wdc_obio_softc *sc = v;
447 1.1 tsubai vaddr_t va = (vaddr_t)databuf;
448 1.1 tsubai dbdma_command_t *cmdp;
449 1.4 tsubai u_int cmd, offset;
450 1.25 hamajima int read = flags & WDC_DMA_READ;
451 1.1 tsubai
452 1.1 tsubai cmdp = sc->sc_dmacmd;
453 1.1 tsubai cmd = read ? DBDMA_CMD_IN_MORE : DBDMA_CMD_OUT_MORE;
454 1.4 tsubai
455 1.4 tsubai offset = va & PGOFSET;
456 1.4 tsubai
457 1.4 tsubai /* if va is not page-aligned, setup the first page */
458 1.4 tsubai if (offset != 0) {
459 1.23 thorpej int rest = PAGE_SIZE - offset; /* the rest of the page */
460 1.4 tsubai
461 1.4 tsubai if (datalen > rest) { /* if continues to next page */
462 1.4 tsubai DBDMA_BUILD(cmdp, cmd, 0, rest, vtophys(va),
463 1.4 tsubai DBDMA_INT_NEVER, DBDMA_WAIT_NEVER,
464 1.4 tsubai DBDMA_BRANCH_NEVER);
465 1.4 tsubai datalen -= rest;
466 1.4 tsubai va += rest;
467 1.4 tsubai cmdp++;
468 1.4 tsubai }
469 1.4 tsubai }
470 1.4 tsubai
471 1.4 tsubai /* now va is page-aligned */
472 1.23 thorpej while (datalen > PAGE_SIZE) {
473 1.23 thorpej DBDMA_BUILD(cmdp, cmd, 0, PAGE_SIZE, vtophys(va),
474 1.1 tsubai DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
475 1.23 thorpej datalen -= PAGE_SIZE;
476 1.23 thorpej va += PAGE_SIZE;
477 1.1 tsubai cmdp++;
478 1.1 tsubai }
479 1.1 tsubai
480 1.23 thorpej /* the last page (datalen <= PAGE_SIZE here) */
481 1.1 tsubai cmd = read ? DBDMA_CMD_IN_LAST : DBDMA_CMD_OUT_LAST;
482 1.1 tsubai DBDMA_BUILD(cmdp, cmd, 0, datalen, vtophys(va),
483 1.4 tsubai DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
484 1.1 tsubai cmdp++;
485 1.1 tsubai
486 1.1 tsubai DBDMA_BUILD(cmdp, DBDMA_CMD_STOP, 0, 0, 0,
487 1.1 tsubai DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
488 1.1 tsubai
489 1.1 tsubai return 0;
490 1.1 tsubai }
491 1.1 tsubai
492 1.9 tsubai void
493 1.8 tsubai wdc_obio_dma_start(v, channel, drive)
494 1.1 tsubai void *v;
495 1.1 tsubai int channel, drive;
496 1.1 tsubai {
497 1.1 tsubai struct wdc_obio_softc *sc = v;
498 1.1 tsubai
499 1.1 tsubai dbdma_start(sc->sc_dmareg, sc->sc_dmacmd);
500 1.1 tsubai }
501 1.1 tsubai
502 1.9 tsubai int
503 1.1 tsubai wdc_obio_dma_finish(v, channel, drive, read)
504 1.1 tsubai void *v;
505 1.1 tsubai int channel, drive;
506 1.1 tsubai int read;
507 1.1 tsubai {
508 1.4 tsubai struct wdc_obio_softc *sc = v;
509 1.4 tsubai
510 1.4 tsubai dbdma_stop(sc->sc_dmareg);
511 1.1 tsubai return 0;
512 1.1 tsubai }
513