wdc_obio.c revision 1.9.4.1 1 /* $NetBSD: wdc_obio.c,v 1.9.4.1 2002/01/16 10:15:57 he Exp $ */
2
3 /*-
4 * Copyright (c) 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Onno van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/device.h>
42 #include <sys/malloc.h>
43
44 #include <vm/vm.h>
45
46 #include <machine/bus.h>
47 #include <machine/autoconf.h>
48
49 #include <dev/ata/atareg.h>
50 #include <dev/ata/atavar.h>
51 #include <dev/ic/wdcvar.h>
52
53 #include <macppc/dev/dbdma.h>
54
55 #define WDC_REG_NPORTS 8
56 #define WDC_AUXREG_OFFSET 0x16
57 #define WDC_DEFAULT_PIO_IRQ 13 /* XXX */
58 #define WDC_DEFAULT_DMA_IRQ 2 /* XXX */
59
60 #define WDC_OPTIONS_DMA 0x01
61
62 /*
63 * XXX This code currently doesn't even try to allow 32-bit data port use.
64 */
65
66 struct wdc_obio_softc {
67 struct wdc_softc sc_wdcdev;
68 struct channel_softc *wdc_chanptr;
69 struct channel_softc wdc_channel;
70 dbdma_regmap_t *sc_dmareg;
71 dbdma_command_t *sc_dmacmd;
72 u_int sc_dmaconf[2]; /* per target value of CONFIG_REG */
73 void *sc_ih;
74 };
75
76 int wdc_obio_probe __P((struct device *, struct cfdata *, void *));
77 void wdc_obio_attach __P((struct device *, struct device *, void *));
78 int wdc_obio_detach __P((struct device *, int));
79 int wdc_obio_dma_init __P((void *, int, int, void *, size_t, int));
80 void wdc_obio_dma_start __P((void *, int, int));
81 int wdc_obio_dma_finish __P((void *, int, int, int));
82
83 static void wdc_obio_select __P((struct channel_softc *, int));
84 static void adjust_timing __P((struct channel_softc *));
85 static void ata4_adjust_timing __P((struct channel_softc *));
86
87 struct cfattach wdc_obio_ca = {
88 sizeof(struct wdc_obio_softc), wdc_obio_probe, wdc_obio_attach,
89 wdc_obio_detach, wdcactivate
90 };
91
92
93 int
94 wdc_obio_probe(parent, match, aux)
95 struct device *parent;
96 struct cfdata *match;
97 void *aux;
98 {
99 struct confargs *ca = aux;
100 char compat[32];
101
102 /* XXX should not use name */
103 if (strcmp(ca->ca_name, "ATA") == 0 ||
104 strcmp(ca->ca_name, "ata") == 0 ||
105 strcmp(ca->ca_name, "ata0") == 0 ||
106 strcmp(ca->ca_name, "ide") == 0)
107 return 1;
108
109 bzero(compat, sizeof(compat));
110 OF_getprop(ca->ca_node, "compatible", compat, sizeof(compat));
111 if (strcmp(compat, "heathrow-ata") == 0 ||
112 strcmp(compat, "keylargo-ata") == 0)
113 return 1;
114
115 return 0;
116 }
117
118 void
119 wdc_obio_attach(parent, self, aux)
120 struct device *parent, *self;
121 void *aux;
122 {
123 struct wdc_obio_softc *sc = (void *)self;
124 struct confargs *ca = aux;
125 struct channel_softc *chp = &sc->wdc_channel;
126 int intr;
127 int use_dma = 0;
128 char path[80];
129
130 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & WDC_OPTIONS_DMA) {
131 if (ca->ca_nreg >= 16 || ca->ca_nintr == -1)
132 use_dma = 1; /* XXX Don't work yet. */
133 }
134
135 if (ca->ca_nintr >= 4 && ca->ca_nreg >= 8) {
136 intr = ca->ca_intr[0];
137 printf(" irq %d", intr);
138 } else if (ca->ca_nintr == -1) {
139 intr = WDC_DEFAULT_PIO_IRQ;
140 printf(" irq property not found; using %d", intr);
141 } else {
142 printf(": couldn't get irq property\n");
143 return;
144 }
145
146 if (use_dma)
147 printf(": DMA transfer");
148
149 printf("\n");
150
151 chp->cmd_iot = chp->ctl_iot =
152 macppc_make_bus_space_tag(ca->ca_baseaddr + ca->ca_reg[0], 4);
153
154 if (bus_space_map(chp->cmd_iot, 0, WDC_REG_NPORTS, 0, &chp->cmd_ioh) ||
155 bus_space_subregion(chp->cmd_iot, chp->cmd_ioh,
156 WDC_AUXREG_OFFSET, 1, &chp->ctl_ioh)) {
157 printf("%s: couldn't map registers\n",
158 sc->sc_wdcdev.sc_dev.dv_xname);
159 return;
160 }
161 #if 0
162 chp->data32iot = chp->cmd_iot;
163 chp->data32ioh = chp->cmd_ioh;
164 #endif
165
166 sc->sc_ih = intr_establish(intr, IST_LEVEL, IPL_BIO, wdcintr, chp);
167
168 if (use_dma) {
169 sc->sc_dmacmd = dbdma_alloc(sizeof(dbdma_command_t) * 20);
170 sc->sc_dmareg = mapiodev(ca->ca_baseaddr + ca->ca_reg[2],
171 ca->ca_reg[3]);
172 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
173 sc->sc_wdcdev.DMA_cap = 2;
174 if (strcmp(ca->ca_name, "ata-4") == 0) {
175 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
176 sc->sc_wdcdev.UDMA_cap = 4;
177 sc->sc_wdcdev.set_modes = ata4_adjust_timing;
178 } else {
179 sc->sc_wdcdev.set_modes = adjust_timing;
180 }
181 #ifdef notyet
182 /* Minimum cycle time is 150ns (DMA MODE 1) on ohare. */
183 if (ohare) {
184 sc->sc_wdcdev.PIO_cap = 3;
185 sc->sc_wdcdev.DMA_cap = 1;
186 }
187 #endif
188 } else {
189 /* all non-dma controllers can use adjust_timing */
190 sc->sc_wdcdev.set_modes = adjust_timing;
191 }
192
193 sc->sc_wdcdev.PIO_cap = 4;
194 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
195 sc->wdc_chanptr = chp;
196 sc->sc_wdcdev.channels = &sc->wdc_chanptr;
197 sc->sc_wdcdev.nchannels = 1;
198 sc->sc_wdcdev.dma_arg = sc;
199 sc->sc_wdcdev.dma_init = wdc_obio_dma_init;
200 sc->sc_wdcdev.dma_start = wdc_obio_dma_start;
201 sc->sc_wdcdev.dma_finish = wdc_obio_dma_finish;
202 chp->channel = 0;
203 chp->wdc = &sc->sc_wdcdev;
204 chp->ch_queue = malloc(sizeof(struct channel_queue),
205 M_DEVBUF, M_NOWAIT);
206 if (chp->ch_queue == NULL) {
207 printf("%s: can't allocate memory for command queue",
208 sc->sc_wdcdev.sc_dev.dv_xname);
209 return;
210 }
211
212 #define OHARE_FEATURE_REG 0xf3000038
213
214 /* XXX Enable wdc1 by feature reg. */
215 bzero(path, sizeof(path));
216 OF_package_to_path(ca->ca_node, path, sizeof(path));
217 if (strcmp(path, "/bandit@F2000000/ohare@10/ata@21000") == 0) {
218 u_int x;
219
220 x = in32rb(OHARE_FEATURE_REG);
221 x |= 8;
222 out32rb(OHARE_FEATURE_REG, x);
223 }
224
225 wdcattach(chp);
226 sc->sc_wdcdev.set_modes(chp);
227
228 }
229
230 /* Multiword DMA transfer timings */
231 struct ide_timings {
232 int cycle; /* minimum cycle time [ns] */
233 int active; /* minimum command active time [ns] */
234 };
235 static struct ide_timings pio_timing[5] = {
236 { 600, 180 }, /* Mode 0 */
237 { 390, 150 }, /* 1 */
238 { 240, 105 }, /* 2 */
239 { 180, 90 }, /* 3 */
240 { 120, 75 } /* 4 */
241 };
242 static struct ide_timings dma_timing[3] = {
243 { 480, 240 }, /* Mode 0 */
244 { 165, 90 }, /* Mode 1 */
245 { 120, 75 } /* Mode 2 */
246 };
247
248 static struct ide_timings udma_timing[5] = {
249 {120, 180}, /* Mode 0 */
250 { 90, 150}, /* Mode 1 */
251 { 60, 120}, /* Mode 2 */
252 { 45, 90}, /* Mode 3 */
253 { 30, 90} /* Mode 4 */
254 };
255
256 #define TIME_TO_TICK(time) howmany((time), 30)
257 #define PIO_REC_OFFSET 4
258 #define PIO_REC_MIN 1
259 #define PIO_ACT_MIN 1
260 #define DMA_REC_OFFSET 1
261 #define DMA_REC_MIN 1
262 #define DMA_ACT_MIN 1
263
264 #define ATA4_TIME_TO_TICK(time) howmany((time), 15) /* 15 ns clock */
265
266 #define CONFIG_REG (0x200 >> 4) /* IDE access timing register */
267
268 void
269 wdc_obio_select(chp, drive)
270 struct channel_softc *chp;
271 int drive;
272 {
273 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->wdc;
274 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh,
275 CONFIG_REG, sc->sc_dmaconf[drive]);
276 }
277
278 void
279 adjust_timing(chp)
280 struct channel_softc *chp;
281 {
282 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->wdc;
283 int drive;
284 int min_cycle, min_active;
285 int cycle_tick, act_tick, inact_tick, half_tick;
286
287 for (drive = 0; drive < 2; drive++) {
288 u_int conf = 0;
289 struct ata_drive_datas *drvp;
290
291 drvp = &chp->ch_drive[drive];
292 /* set up pio mode timings */
293 if (drvp->drive_flags & DRIVE) {
294 int piomode = drvp->PIO_mode;
295 min_cycle = pio_timing[piomode].cycle;
296 min_active = pio_timing[piomode].active;
297
298 cycle_tick = TIME_TO_TICK(min_cycle);
299 act_tick = TIME_TO_TICK(min_active);
300 if (act_tick < PIO_ACT_MIN)
301 act_tick = PIO_ACT_MIN;
302 inact_tick = cycle_tick - act_tick - PIO_REC_OFFSET;
303 if (inact_tick < PIO_REC_MIN)
304 inact_tick = PIO_REC_MIN;
305 /* mask: 0x000007ff */
306 conf |= (inact_tick << 5) | act_tick;
307 }
308 /* Set up dma mode timings */
309 if (drvp->drive_flags & DRIVE_DMA) {
310 int dmamode = drvp->DMA_mode;
311 min_cycle = dma_timing[dmamode].cycle;
312 min_active = dma_timing[dmamode].active;
313 cycle_tick = TIME_TO_TICK(min_cycle);
314 act_tick = TIME_TO_TICK(min_active);
315 inact_tick = cycle_tick - act_tick - DMA_REC_OFFSET;
316 if (inact_tick < DMA_REC_MIN)
317 inact_tick = DMA_REC_MIN;
318 half_tick = 0; /* XXX */
319 /* mask: 0xfffff800 */
320 conf |=
321 (half_tick << 21) |
322 (inact_tick << 16) | (act_tick << 11);
323 }
324 if (conf) {
325 printf("conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n",
326 drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick);
327 }
328 sc->sc_dmaconf[drive] = conf;
329 }
330 sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_SELECT;
331 sc->sc_wdcdev.select = 0;
332 if (sc->sc_dmaconf[0]) {
333 wdc_obio_select(chp,0);
334 if (sc->sc_dmaconf[1] && (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) {
335 sc->sc_wdcdev.select = wdc_obio_select;
336 sc->sc_wdcdev.cap |= WDC_CAPABILITY_SELECT;
337 }
338 } else if (sc->sc_dmaconf[1]) {
339 wdc_obio_select(chp,1);
340 }
341 }
342
343 void
344 ata4_adjust_timing(chp)
345 struct channel_softc *chp;
346 {
347 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->wdc;
348 int drive;
349 int min_cycle, min_active;
350 int cycle_tick, act_tick, inact_tick;
351
352 for (drive = 0; drive < 2; drive++) {
353 u_int conf = 0;
354 struct ata_drive_datas *drvp;
355
356 drvp = &chp->ch_drive[drive];
357 /* set up pio mode timings */
358
359 if (drvp->drive_flags & DRIVE) {
360 int piomode = drvp->PIO_mode;
361 min_cycle = pio_timing[piomode].cycle;
362 min_active = pio_timing[piomode].active;
363
364 cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
365 act_tick = ATA4_TIME_TO_TICK(min_active);
366 inact_tick = cycle_tick - act_tick;
367 /* mask: 0x000003ff */
368 conf |= (inact_tick << 5) | act_tick;
369 }
370 /* set up dma mode timings */
371 if (drvp->drive_flags & DRIVE_DMA) {
372 int dmamode = drvp->DMA_mode;
373 min_cycle = dma_timing[dmamode].cycle;
374 min_active = dma_timing[dmamode].active;
375 cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
376 act_tick = ATA4_TIME_TO_TICK(min_active);
377 inact_tick = cycle_tick - act_tick;
378 /* mask: 0x001ffc00 */
379 conf |= (act_tick << 10) | (inact_tick << 15);
380 }
381 /* set up udma mode timings */
382 if (drvp->drive_flags & DRIVE_UDMA) {
383 int udmamode = drvp->UDMA_mode;
384 min_cycle = udma_timing[udmamode].cycle;
385 min_active = udma_timing[udmamode].active;
386 act_tick = ATA4_TIME_TO_TICK(min_active);
387 cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
388 /* mask: 0x1ff00000 */
389 conf |= (cycle_tick << 21) | (act_tick << 25) | 0x100000;
390 }
391 if (conf) {
392 printf("ata4 conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n",
393 drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick);
394 }
395 sc->sc_dmaconf[drive] = conf;
396 }
397 sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_SELECT;
398 sc->sc_wdcdev.select = 0;
399 if (sc->sc_dmaconf[0]) {
400 wdc_obio_select(chp,0);
401 if (sc->sc_dmaconf[1] && (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) {
402 sc->sc_wdcdev.select = wdc_obio_select;
403 sc->sc_wdcdev.cap |= WDC_CAPABILITY_SELECT;
404 }
405 } else if (sc->sc_dmaconf[1]) {
406 wdc_obio_select(chp,1);
407 }
408 }
409
410 int
411 wdc_obio_detach(self, flags)
412 struct device *self;
413 int flags;
414 {
415 struct wdc_obio_softc *sc = (void *)self;
416 struct channel_softc *chp = &sc->wdc_channel;
417 int error;
418
419 if ((error = wdcdetach(self, flags)) != 0)
420 return error;
421
422 intr_disestablish(sc->sc_ih);
423
424 free(sc->wdc_channel.ch_queue, M_DEVBUF);
425
426 /* Unmap our i/o space. */
427 bus_space_unmap(chp->cmd_iot, chp->cmd_ioh, WDC_REG_NPORTS);
428
429 /* Unmap DMA registers. */
430 /* XXX unmapiodev(sc->sc_dmareg); */
431 /* XXX free(sc->sc_dmacmd); */
432
433 return 0;
434 }
435
436 int
437 wdc_obio_dma_init(v, channel, drive, databuf, datalen, read)
438 void *v;
439 void *databuf;
440 size_t datalen;
441 int read;
442 {
443 struct wdc_obio_softc *sc = v;
444 vaddr_t va = (vaddr_t)databuf;
445 dbdma_command_t *cmdp;
446 u_int cmd, offset;
447
448 cmdp = sc->sc_dmacmd;
449 cmd = read ? DBDMA_CMD_IN_MORE : DBDMA_CMD_OUT_MORE;
450
451 offset = va & PGOFSET;
452
453 /* if va is not page-aligned, setup the first page */
454 if (offset != 0) {
455 int rest = NBPG - offset; /* the rest of the page */
456
457 if (datalen > rest) { /* if continues to next page */
458 DBDMA_BUILD(cmdp, cmd, 0, rest, vtophys(va),
459 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER,
460 DBDMA_BRANCH_NEVER);
461 datalen -= rest;
462 va += rest;
463 cmdp++;
464 }
465 }
466
467 /* now va is page-aligned */
468 while (datalen > NBPG) {
469 DBDMA_BUILD(cmdp, cmd, 0, NBPG, vtophys(va),
470 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
471 datalen -= NBPG;
472 va += NBPG;
473 cmdp++;
474 }
475
476 /* the last page (datalen <= NBPG here) */
477 cmd = read ? DBDMA_CMD_IN_LAST : DBDMA_CMD_OUT_LAST;
478 DBDMA_BUILD(cmdp, cmd, 0, datalen, vtophys(va),
479 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
480 cmdp++;
481
482 DBDMA_BUILD(cmdp, DBDMA_CMD_STOP, 0, 0, 0,
483 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
484
485 return 0;
486 }
487
488 void
489 wdc_obio_dma_start(v, channel, drive)
490 void *v;
491 int channel, drive;
492 {
493 struct wdc_obio_softc *sc = v;
494
495 dbdma_start(sc->sc_dmareg, sc->sc_dmacmd);
496 }
497
498 int
499 wdc_obio_dma_finish(v, channel, drive, read)
500 void *v;
501 int channel, drive;
502 int read;
503 {
504 struct wdc_obio_softc *sc = v;
505
506 dbdma_stop(sc->sc_dmareg);
507 return 0;
508 }
509