wdc_obio.c revision 1.17 1 /* $NetBSD: wdc_obio.c,v 1.17 2001/09/09 16:08:49 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Onno van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
43
44 #include <uvm/uvm_extern.h>
45
46 #include <machine/bus.h>
47 #include <machine/autoconf.h>
48
49 #include <dev/ata/atareg.h>
50 #include <dev/ata/atavar.h>
51 #include <dev/ic/wdcvar.h>
52
53 #include <dev/ofw/openfirm.h>
54
55 #include <macppc/dev/dbdma.h>
56
57 #define WDC_REG_NPORTS 8
58 #define WDC_AUXREG_OFFSET 0x16
59 #define WDC_DEFAULT_PIO_IRQ 13 /* XXX */
60 #define WDC_DEFAULT_DMA_IRQ 2 /* XXX */
61
62 #define WDC_OPTIONS_DMA 0x01
63
64 /*
65 * XXX This code currently doesn't even try to allow 32-bit data port use.
66 */
67
68 struct wdc_obio_softc {
69 struct wdc_softc sc_wdcdev;
70 struct channel_softc *wdc_chanptr;
71 struct channel_softc wdc_channel;
72 dbdma_regmap_t *sc_dmareg;
73 dbdma_command_t *sc_dmacmd;
74 void *sc_ih;
75 };
76
77 int wdc_obio_probe __P((struct device *, struct cfdata *, void *));
78 void wdc_obio_attach __P((struct device *, struct device *, void *));
79 int wdc_obio_detach __P((struct device *, int));
80 int wdc_obio_dma_init __P((void *, int, int, void *, size_t, int));
81 void wdc_obio_dma_start __P((void *, int, int));
82 int wdc_obio_dma_finish __P((void *, int, int, int));
83 static void adjust_timing __P((struct channel_softc *));
84 static void ata4_adjust_timing __P((struct channel_softc *));
85
86 struct cfattach wdc_obio_ca = {
87 sizeof(struct wdc_obio_softc), wdc_obio_probe, wdc_obio_attach,
88 wdc_obio_detach, wdcactivate
89 };
90
91
92 int
93 wdc_obio_probe(parent, match, aux)
94 struct device *parent;
95 struct cfdata *match;
96 void *aux;
97 {
98 struct confargs *ca = aux;
99 char compat[32];
100
101 /* XXX should not use name */
102 if (strcmp(ca->ca_name, "ATA") == 0 ||
103 strcmp(ca->ca_name, "ata") == 0 ||
104 strcmp(ca->ca_name, "ata0") == 0 ||
105 strcmp(ca->ca_name, "ide") == 0)
106 return 1;
107
108 memset(compat, 0, sizeof(compat));
109 OF_getprop(ca->ca_node, "compatible", compat, sizeof(compat));
110 if (strcmp(compat, "heathrow-ata") == 0 ||
111 strcmp(compat, "keylargo-ata") == 0)
112 return 1;
113
114 return 0;
115 }
116
117 void
118 wdc_obio_attach(parent, self, aux)
119 struct device *parent, *self;
120 void *aux;
121 {
122 struct wdc_obio_softc *sc = (void *)self;
123 struct confargs *ca = aux;
124 struct channel_softc *chp = &sc->wdc_channel;
125 int intr;
126 int use_dma = 0;
127 char path[80];
128
129 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & WDC_OPTIONS_DMA) {
130 if (ca->ca_nreg >= 16 || ca->ca_nintr == -1)
131 use_dma = 1; /* XXX Don't work yet. */
132 }
133
134 if (ca->ca_nintr >= 4 && ca->ca_nreg >= 8) {
135 intr = ca->ca_intr[0];
136 printf(" irq %d", intr);
137 } else if (ca->ca_nintr == -1) {
138 intr = WDC_DEFAULT_PIO_IRQ;
139 printf(" irq property not found; using %d", intr);
140 } else {
141 printf(": couldn't get irq property\n");
142 return;
143 }
144
145 if (use_dma)
146 printf(": DMA transfer");
147
148 printf("\n");
149
150 chp->cmd_iot = chp->ctl_iot =
151 macppc_make_bus_space_tag(ca->ca_baseaddr + ca->ca_reg[0], 4);
152
153 if (bus_space_map(chp->cmd_iot, 0, WDC_REG_NPORTS, 0, &chp->cmd_ioh) ||
154 bus_space_subregion(chp->cmd_iot, chp->cmd_ioh,
155 WDC_AUXREG_OFFSET, 1, &chp->ctl_ioh)) {
156 printf("%s: couldn't map registers\n",
157 sc->sc_wdcdev.sc_dev.dv_xname);
158 return;
159 }
160 #if 0
161 chp->data32iot = chp->cmd_iot;
162 chp->data32ioh = chp->cmd_ioh;
163 #endif
164
165 sc->sc_ih = intr_establish(intr, IST_LEVEL, IPL_BIO, wdcintr, chp);
166
167 if (use_dma) {
168 sc->sc_dmacmd = dbdma_alloc(sizeof(dbdma_command_t) * 20);
169 sc->sc_dmareg = mapiodev(ca->ca_baseaddr + ca->ca_reg[2],
170 ca->ca_reg[3]);
171 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
172 sc->sc_wdcdev.DMA_cap = 2;
173 if (strcmp(ca->ca_name, "ata-4") == 0) {
174 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
175 sc->sc_wdcdev.UDMA_cap = 4;
176 sc->sc_wdcdev.set_modes = ata4_adjust_timing;
177 } else {
178 sc->sc_wdcdev.set_modes = adjust_timing;
179 }
180 #ifdef notyet
181 /* Minimum cycle time is 150ns (DMA MODE 1) on ohare. */
182 if (ohare) {
183 sc->sc_wdcdev.PIO_cap = 3;
184 sc->sc_wdcdev.DMA_cap = 1;
185 }
186 #endif
187 } else {
188 /* all non-dma controllers can use adjust_timing */
189 sc->sc_wdcdev.set_modes = adjust_timing;
190 }
191
192 sc->sc_wdcdev.PIO_cap = 4;
193 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
194 sc->wdc_chanptr = chp;
195 sc->sc_wdcdev.channels = &sc->wdc_chanptr;
196 sc->sc_wdcdev.nchannels = 1;
197 sc->sc_wdcdev.dma_arg = sc;
198 sc->sc_wdcdev.dma_init = wdc_obio_dma_init;
199 sc->sc_wdcdev.dma_start = wdc_obio_dma_start;
200 sc->sc_wdcdev.dma_finish = wdc_obio_dma_finish;
201 chp->channel = 0;
202 chp->wdc = &sc->sc_wdcdev;
203 chp->ch_queue = malloc(sizeof(struct channel_queue),
204 M_DEVBUF, M_NOWAIT);
205 if (chp->ch_queue == NULL) {
206 printf("%s: can't allocate memory for command queue",
207 sc->sc_wdcdev.sc_dev.dv_xname);
208 return;
209 }
210
211 #define OHARE_FEATURE_REG 0xf3000038
212
213 /* XXX Enable wdc1 by feature reg. */
214 memset(path, 0, sizeof(path));
215 OF_package_to_path(ca->ca_node, path, sizeof(path));
216 if (strcmp(path, "/bandit@F2000000/ohare@10/ata@21000") == 0) {
217 u_int x;
218
219 x = in32rb(OHARE_FEATURE_REG);
220 x |= 8;
221 out32rb(OHARE_FEATURE_REG, x);
222 }
223
224 wdcattach(chp);
225 sc->sc_wdcdev.set_modes(chp);
226
227 }
228
229 /* Multiword DMA transfer timings */
230 struct ide_timings {
231 int cycle; /* minimum cycle time [ns] */
232 int active; /* minimum command active time [ns] */
233 };
234 static struct ide_timings pio_timing[5] = {
235 { 600, 165 }, /* Mode 0 */
236 { 383, 125 }, /* 1 */
237 { 240, 100 }, /* 2 */
238 { 180, 80 }, /* 3 */
239 { 120, 70 } /* 4 */
240 };
241 static struct ide_timings dma_timing[3] = {
242 { 480, 215 }, /* Mode 0 */
243 { 150, 80 }, /* Mode 1 */
244 { 120, 70 }, /* Mode 2 */
245 };
246
247 static struct ide_timings udma_timing[5] = {
248 {114, 0}, /* Mode 0 */
249 { 75, 0}, /* Mode 1 */
250 { 55, 0}, /* Mode 2 */
251 { 45, 100}, /* Mode 3 */
252 { 25, 100} /* Mode 4 */
253 };
254
255 #define TIME_TO_TICK(time) howmany((time), 30)
256 #define PIO_REC_OFFSET 4
257 #define PIO_REC_MIN 1
258 #define PIO_ACT_MIN 1
259 #define DMA_REC_OFFSET 1
260 #define DMA_REC_MIN 1
261 #define DMA_ACT_MIN 1
262
263 #define ATA4_TIME_TO_TICK(time) howmany((time) * 1000, 7500)
264
265
266 #define CONFIG_REG (0x200 >> 4) /* IDE access timing register */
267
268 void
269 adjust_timing(chp)
270 struct channel_softc *chp;
271 {
272 struct ata_drive_datas *drvp;
273 u_int conf;
274 int drive;
275 int piomode = -1, dmamode = -1;
276 int min_cycle, min_active;
277 int cycle_tick, act_tick, inact_tick, half_tick;
278
279
280 for (drive = 0; drive < 2; drive++) {
281 drvp = &chp->ch_drive[drive];
282 if ((drvp->drive_flags & DRIVE) == 0)
283 continue;
284 if (piomode == -1 || piomode > drvp->PIO_mode)
285 piomode = drvp->PIO_mode;
286 if (drvp->drive_flags & DRIVE_DMA) {
287 if (dmamode == -1 || dmamode > drvp->DMA_mode)
288 dmamode = drvp->DMA_mode;
289 }
290 }
291 if (piomode == -1)
292 return; /* No drive */
293 for (drive = 0; drive < 2; drive++) {
294 drvp = &chp->ch_drive[drive];
295 if (drvp->drive_flags & DRIVE) {
296 drvp->PIO_mode = piomode;
297 if (drvp->drive_flags & DRIVE_DMA)
298 drvp->DMA_mode = dmamode;
299 }
300 }
301 min_cycle = pio_timing[piomode].cycle;
302 min_active = pio_timing[piomode].active;
303
304 cycle_tick = TIME_TO_TICK(min_cycle);
305 act_tick = TIME_TO_TICK(min_active);
306 if (act_tick < PIO_ACT_MIN)
307 act_tick = PIO_ACT_MIN;
308 inact_tick = cycle_tick - act_tick - PIO_REC_OFFSET;
309 if (inact_tick < PIO_REC_MIN)
310 inact_tick = PIO_REC_MIN;
311 /* mask: 0x000007ff */
312 conf = (inact_tick << 5) | act_tick;
313 if (dmamode != -1) {
314 /* there are active DMA mode */
315
316 min_cycle = dma_timing[dmamode].cycle;
317 min_active = dma_timing[dmamode].active;
318 cycle_tick = TIME_TO_TICK(min_cycle);
319 act_tick = TIME_TO_TICK(min_active);
320 inact_tick = cycle_tick - act_tick - DMA_REC_OFFSET;
321 if (inact_tick < DMA_REC_MIN)
322 inact_tick = DMA_REC_MIN;
323 half_tick = 0; /* XXX */
324 /* mask: 0xfffff800 */
325 conf |=
326 (half_tick << 21) |
327 (inact_tick << 16) | (act_tick << 11);
328 }
329 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, CONFIG_REG, conf);
330 printf("conf = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n",
331 conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick);
332 wdc_print_modes(chp);
333 }
334
335 void
336 ata4_adjust_timing(chp)
337 struct channel_softc *chp;
338 {
339 struct ata_drive_datas *drvp;
340 u_int conf;
341 int drive;
342 int piomode = -1, dmamode = -1;
343 int min_cycle, min_active;
344 int cycle_tick, act_tick, inact_tick;
345 int udmamode = -1;
346
347
348 for (drive = 0; drive < 2; drive++) {
349 drvp = &chp->ch_drive[drive];
350 if ((drvp->drive_flags & DRIVE) == 0)
351 continue;
352 if (piomode == -1 || piomode > drvp->PIO_mode)
353 piomode = drvp->PIO_mode;
354 if (drvp->drive_flags & DRIVE_DMA) {
355 if (dmamode == -1 || dmamode > drvp->DMA_mode)
356 dmamode = drvp->DMA_mode;
357 }
358 if (drvp->drive_flags & DRIVE_UDMA) {
359 if (udmamode == -1 || udmamode > drvp->UDMA_mode)
360 udmamode = drvp->UDMA_mode;
361 }
362 }
363 if (piomode == -1)
364 return; /* No drive */
365 for (drive = 0; drive < 2; drive++) {
366 drvp = &chp->ch_drive[drive];
367 if (drvp->drive_flags & DRIVE) {
368 drvp->PIO_mode = piomode;
369 if (drvp->drive_flags & DRIVE_DMA)
370 drvp->DMA_mode = dmamode;
371 if (drvp->drive_flags & DRIVE_UDMA)
372 drvp->UDMA_mode = udmamode;
373 }
374 }
375 min_cycle = pio_timing[piomode].cycle;
376 min_active = pio_timing[piomode].active;
377
378 cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
379 act_tick = ATA4_TIME_TO_TICK(min_active);
380 inact_tick = cycle_tick - act_tick;
381 /* mask: 0x000003ff */
382 conf = (inact_tick << 5) | act_tick;
383 if (dmamode != -1) {
384 /* there are active DMA mode */
385
386 min_cycle = dma_timing[dmamode].cycle;
387 min_active = dma_timing[dmamode].active;
388 cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
389 act_tick = ATA4_TIME_TO_TICK(min_active);
390 inact_tick = cycle_tick - act_tick;
391 /* mask: 0x001ffc00 */
392 conf |= (act_tick << 10) | (inact_tick << 15);
393 }
394 if (udmamode != -1) {
395 min_cycle = udma_timing[udmamode].cycle;
396 min_active = udma_timing[udmamode].active;
397 act_tick = ATA4_TIME_TO_TICK(min_active);
398 cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
399 /* mask: 0x1ff00000 */
400 conf |= (cycle_tick << 21) | (act_tick << 25) | 0x100000;
401 }
402
403 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, CONFIG_REG, conf);
404 printf("ata4 conf = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n",
405 conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick);
406 wdc_print_modes(chp);
407 }
408
409 int
410 wdc_obio_detach(self, flags)
411 struct device *self;
412 int flags;
413 {
414 struct wdc_obio_softc *sc = (void *)self;
415 int error;
416
417 if ((error = wdcdetach(self, flags)) != 0)
418 return error;
419
420 intr_disestablish(sc->sc_ih);
421
422 free(sc->wdc_channel.ch_queue, M_DEVBUF);
423
424 /* Unmap our i/o space. */
425 bus_space_unmap(chp->cmd_iot, chp->cmd_ioh, WDC_REG_NPORTS);
426
427 /* Unmap DMA registers. */
428 /* XXX unmapiodev(sc->sc_dmareg); */
429 /* XXX free(sc->sc_dmacmd); */
430
431 return 0;
432 }
433
434 int
435 wdc_obio_dma_init(v, channel, drive, databuf, datalen, read)
436 void *v;
437 void *databuf;
438 size_t datalen;
439 int read;
440 {
441 struct wdc_obio_softc *sc = v;
442 vaddr_t va = (vaddr_t)databuf;
443 dbdma_command_t *cmdp;
444 u_int cmd, offset;
445
446 cmdp = sc->sc_dmacmd;
447 cmd = read ? DBDMA_CMD_IN_MORE : DBDMA_CMD_OUT_MORE;
448
449 offset = va & PGOFSET;
450
451 /* if va is not page-aligned, setup the first page */
452 if (offset != 0) {
453 int rest = NBPG - offset; /* the rest of the page */
454
455 if (datalen > rest) { /* if continues to next page */
456 DBDMA_BUILD(cmdp, cmd, 0, rest, vtophys(va),
457 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER,
458 DBDMA_BRANCH_NEVER);
459 datalen -= rest;
460 va += rest;
461 cmdp++;
462 }
463 }
464
465 /* now va is page-aligned */
466 while (datalen > NBPG) {
467 DBDMA_BUILD(cmdp, cmd, 0, NBPG, vtophys(va),
468 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
469 datalen -= NBPG;
470 va += NBPG;
471 cmdp++;
472 }
473
474 /* the last page (datalen <= NBPG here) */
475 cmd = read ? DBDMA_CMD_IN_LAST : DBDMA_CMD_OUT_LAST;
476 DBDMA_BUILD(cmdp, cmd, 0, datalen, vtophys(va),
477 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
478 cmdp++;
479
480 DBDMA_BUILD(cmdp, DBDMA_CMD_STOP, 0, 0, 0,
481 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
482
483 return 0;
484 }
485
486 void
487 wdc_obio_dma_start(v, channel, drive)
488 void *v;
489 int channel, drive;
490 {
491 struct wdc_obio_softc *sc = v;
492
493 dbdma_start(sc->sc_dmareg, sc->sc_dmacmd);
494 }
495
496 int
497 wdc_obio_dma_finish(v, channel, drive, read)
498 void *v;
499 int channel, drive;
500 int read;
501 {
502 struct wdc_obio_softc *sc = v;
503
504 dbdma_stop(sc->sc_dmareg);
505 return 0;
506 }
507