wdc_obio.c revision 1.37 1 /* $NetBSD: wdc_obio.c,v 1.37 2004/05/25 20:42:41 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1998, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Charles M. Hannum and by Onno van der Linden.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: wdc_obio.c,v 1.37 2004/05/25 20:42:41 thorpej Exp $");
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/device.h>
45 #include <sys/malloc.h>
46
47 #include <uvm/uvm_extern.h>
48
49 #include <machine/bus.h>
50 #include <machine/autoconf.h>
51
52 #include <dev/ata/atareg.h>
53 #include <dev/ata/atavar.h>
54 #include <dev/ic/wdcvar.h>
55
56 #include <dev/ofw/openfirm.h>
57
58 #include <macppc/dev/dbdma.h>
59
60 #define WDC_REG_NPORTS 8
61 #define WDC_AUXREG_OFFSET 0x16
62 #define WDC_DEFAULT_PIO_IRQ 13 /* XXX */
63 #define WDC_DEFAULT_DMA_IRQ 2 /* XXX */
64
65 #define WDC_OPTIONS_DMA 0x01
66
67 /*
68 * XXX This code currently doesn't even try to allow 32-bit data port use.
69 */
70
71 struct wdc_obio_softc {
72 struct wdc_softc sc_wdcdev;
73 struct wdc_channel *wdc_chanptr;
74 struct wdc_channel wdc_channel;
75 struct ata_queue wdc_chqueue;
76 dbdma_regmap_t *sc_dmareg;
77 dbdma_command_t *sc_dmacmd;
78 u_int sc_dmaconf[2]; /* per target value of CONFIG_REG */
79 void *sc_ih;
80 };
81
82 int wdc_obio_probe __P((struct device *, struct cfdata *, void *));
83 void wdc_obio_attach __P((struct device *, struct device *, void *));
84 int wdc_obio_detach __P((struct device *, int));
85 int wdc_obio_dma_init __P((void *, int, int, void *, size_t, int));
86 void wdc_obio_dma_start __P((void *, int, int));
87 int wdc_obio_dma_finish __P((void *, int, int, int));
88
89 static void wdc_obio_select __P((struct wdc_channel *, int));
90 static void adjust_timing __P((struct wdc_channel *));
91 static void ata4_adjust_timing __P((struct wdc_channel *));
92
93 CFATTACH_DECL(wdc_obio, sizeof(struct wdc_obio_softc),
94 wdc_obio_probe, wdc_obio_attach, wdc_obio_detach, wdcactivate);
95
96 int
97 wdc_obio_probe(parent, match, aux)
98 struct device *parent;
99 struct cfdata *match;
100 void *aux;
101 {
102 struct confargs *ca = aux;
103 char compat[32];
104
105 /* XXX should not use name */
106 if (strcmp(ca->ca_name, "ATA") == 0 ||
107 strcmp(ca->ca_name, "ata") == 0 ||
108 strcmp(ca->ca_name, "ata0") == 0 ||
109 strcmp(ca->ca_name, "ide") == 0)
110 return 1;
111
112 memset(compat, 0, sizeof(compat));
113 OF_getprop(ca->ca_node, "compatible", compat, sizeof(compat));
114 if (strcmp(compat, "heathrow-ata") == 0 ||
115 strcmp(compat, "keylargo-ata") == 0)
116 return 1;
117
118 return 0;
119 }
120
121 void
122 wdc_obio_attach(parent, self, aux)
123 struct device *parent, *self;
124 void *aux;
125 {
126 struct wdc_obio_softc *sc = (void *)self;
127 struct confargs *ca = aux;
128 struct wdc_channel *chp = &sc->wdc_channel;
129 int intr, i;
130 int use_dma = 0;
131 char path[80];
132
133 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & WDC_OPTIONS_DMA) {
134 if (ca->ca_nreg >= 16 || ca->ca_nintr == -1)
135 use_dma = 1; /* XXX Don't work yet. */
136 }
137
138 if (ca->ca_nintr >= 4 && ca->ca_nreg >= 8) {
139 intr = ca->ca_intr[0];
140 printf(" irq %d", intr);
141 } else if (ca->ca_nintr == -1) {
142 intr = WDC_DEFAULT_PIO_IRQ;
143 printf(" irq property not found; using %d", intr);
144 } else {
145 printf(": couldn't get irq property\n");
146 return;
147 }
148
149 if (use_dma)
150 printf(": DMA transfer");
151
152 printf("\n");
153
154 chp->cmd_iot = chp->ctl_iot =
155 macppc_make_bus_space_tag(ca->ca_baseaddr + ca->ca_reg[0], 4);
156
157 if (bus_space_map(chp->cmd_iot, 0, WDC_REG_NPORTS, 0,
158 &chp->cmd_baseioh) ||
159 bus_space_subregion(chp->cmd_iot, chp->cmd_baseioh,
160 WDC_AUXREG_OFFSET, 1, &chp->ctl_ioh)) {
161 printf("%s: couldn't map registers\n",
162 sc->sc_wdcdev.sc_dev.dv_xname);
163 return;
164 }
165 for (i = 0; i < WDC_NREG; i++) {
166 if (bus_space_subregion(chp->cmd_iot, chp->cmd_baseioh, i,
167 i == 0 ? 4 : 1, &chp->cmd_iohs[i]) != 0) {
168 bus_space_unmap(chp->cmd_iot, chp->cmd_baseioh,
169 WDC_REG_NPORTS);
170 printf("%s: couldn't subregion registers\n",
171 sc->sc_wdcdev.sc_dev.dv_xname);
172 return;
173 }
174 }
175 wdc_init_shadow_regs(chp);
176 #if 0
177 chp->data32iot = chp->cmd_iot;
178 chp->data32ioh = chp->cmd_ioh;
179 #endif
180
181 sc->sc_ih = intr_establish(intr, IST_LEVEL, IPL_BIO, wdcintr, chp);
182
183 if (use_dma) {
184 sc->sc_dmacmd = dbdma_alloc(sizeof(dbdma_command_t) * 20);
185 sc->sc_dmareg = mapiodev(ca->ca_baseaddr + ca->ca_reg[2],
186 ca->ca_reg[3]);
187 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
188 sc->sc_wdcdev.DMA_cap = 2;
189 if (strcmp(ca->ca_name, "ata-4") == 0) {
190 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
191 sc->sc_wdcdev.UDMA_cap = 4;
192 sc->sc_wdcdev.set_modes = ata4_adjust_timing;
193 } else {
194 sc->sc_wdcdev.set_modes = adjust_timing;
195 }
196 #ifdef notyet
197 /* Minimum cycle time is 150ns (DMA MODE 1) on ohare. */
198 if (ohare) {
199 sc->sc_wdcdev.PIO_cap = 3;
200 sc->sc_wdcdev.DMA_cap = 1;
201 }
202 #endif
203 } else {
204 /* all non-DMA controllers can use adjust_timing */
205 sc->sc_wdcdev.set_modes = adjust_timing;
206 }
207
208 sc->sc_wdcdev.PIO_cap = 4;
209 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
210 sc->wdc_chanptr = chp;
211 sc->sc_wdcdev.channels = &sc->wdc_chanptr;
212 sc->sc_wdcdev.nchannels = 1;
213 sc->sc_wdcdev.dma_arg = sc;
214 sc->sc_wdcdev.dma_init = wdc_obio_dma_init;
215 sc->sc_wdcdev.dma_start = wdc_obio_dma_start;
216 sc->sc_wdcdev.dma_finish = wdc_obio_dma_finish;
217 chp->ch_channel = 0;
218 chp->ch_wdc = &sc->sc_wdcdev;
219 chp->ch_queue = &sc->wdc_chqueue;
220
221 #define OHARE_FEATURE_REG 0xf3000038
222
223 /* XXX Enable wdc1 by feature reg. */
224 memset(path, 0, sizeof(path));
225 OF_package_to_path(ca->ca_node, path, sizeof(path));
226 if (strcmp(path, "/bandit@F2000000/ohare@10/ata@21000") == 0) {
227 u_int x;
228
229 x = in32rb(OHARE_FEATURE_REG);
230 x |= 8;
231 out32rb(OHARE_FEATURE_REG, x);
232 }
233
234 wdcattach(chp);
235 }
236
237 /* Multiword DMA transfer timings */
238 struct ide_timings {
239 int cycle; /* minimum cycle time [ns] */
240 int active; /* minimum command active time [ns] */
241 };
242 static struct ide_timings pio_timing[5] = {
243 { 600, 180 }, /* Mode 0 */
244 { 390, 150 }, /* 1 */
245 { 240, 105 }, /* 2 */
246 { 180, 90 }, /* 3 */
247 { 120, 75 } /* 4 */
248 };
249 static struct ide_timings dma_timing[3] = {
250 { 480, 240 }, /* Mode 0 */
251 { 165, 90 }, /* Mode 1 */
252 { 120, 75 } /* Mode 2 */
253 };
254
255 static struct ide_timings udma_timing[5] = {
256 {120, 180}, /* Mode 0 */
257 { 90, 150}, /* Mode 1 */
258 { 60, 120}, /* Mode 2 */
259 { 45, 90}, /* Mode 3 */
260 { 30, 90} /* Mode 4 */
261 };
262
263 #define TIME_TO_TICK(time) howmany((time), 30)
264 #define PIO_REC_OFFSET 4
265 #define PIO_REC_MIN 1
266 #define PIO_ACT_MIN 1
267 #define DMA_REC_OFFSET 1
268 #define DMA_REC_MIN 1
269 #define DMA_ACT_MIN 1
270
271 #define ATA4_TIME_TO_TICK(time) howmany((time), 15) /* 15 ns clock */
272
273 #define CONFIG_REG (0x200 >> 4) /* IDE access timing register */
274
275 void
276 wdc_obio_select(chp, drive)
277 struct wdc_channel *chp;
278 int drive;
279 {
280 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->ch_wdc;
281 bus_space_write_4(chp->cmd_iot, chp->cmd_baseioh,
282 CONFIG_REG, sc->sc_dmaconf[drive]);
283 }
284
285 void
286 adjust_timing(chp)
287 struct wdc_channel *chp;
288 {
289 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->ch_wdc;
290 int drive;
291 int min_cycle = 0, min_active = 0;
292 int cycle_tick = 0, act_tick = 0, inact_tick = 0, half_tick;
293
294 for (drive = 0; drive < 2; drive++) {
295 u_int conf = 0;
296 struct ata_drive_datas *drvp;
297
298 drvp = &chp->ch_drive[drive];
299 /* set up pio mode timings */
300 if (drvp->drive_flags & DRIVE) {
301 int piomode = drvp->PIO_mode;
302 min_cycle = pio_timing[piomode].cycle;
303 min_active = pio_timing[piomode].active;
304
305 cycle_tick = TIME_TO_TICK(min_cycle);
306 act_tick = TIME_TO_TICK(min_active);
307 if (act_tick < PIO_ACT_MIN)
308 act_tick = PIO_ACT_MIN;
309 inact_tick = cycle_tick - act_tick - PIO_REC_OFFSET;
310 if (inact_tick < PIO_REC_MIN)
311 inact_tick = PIO_REC_MIN;
312 /* mask: 0x000007ff */
313 conf |= (inact_tick << 5) | act_tick;
314 }
315 /* Set up DMA mode timings */
316 if (drvp->drive_flags & DRIVE_DMA) {
317 int dmamode = drvp->DMA_mode;
318 min_cycle = dma_timing[dmamode].cycle;
319 min_active = dma_timing[dmamode].active;
320 cycle_tick = TIME_TO_TICK(min_cycle);
321 act_tick = TIME_TO_TICK(min_active);
322 inact_tick = cycle_tick - act_tick - DMA_REC_OFFSET;
323 if (inact_tick < DMA_REC_MIN)
324 inact_tick = DMA_REC_MIN;
325 half_tick = 0; /* XXX */
326 /* mask: 0xfffff800 */
327 conf |=
328 (half_tick << 21) |
329 (inact_tick << 16) | (act_tick << 11);
330 }
331 #ifdef DEBUG
332 if (conf) {
333 printf("conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n",
334 drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick);
335 }
336 #endif
337 sc->sc_dmaconf[drive] = conf;
338 }
339 sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_SELECT;
340 sc->sc_wdcdev.select = 0;
341 if (sc->sc_dmaconf[0]) {
342 wdc_obio_select(chp,0);
343 if (sc->sc_dmaconf[1] && (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) {
344 sc->sc_wdcdev.select = wdc_obio_select;
345 sc->sc_wdcdev.cap |= WDC_CAPABILITY_SELECT;
346 }
347 } else if (sc->sc_dmaconf[1]) {
348 wdc_obio_select(chp,1);
349 }
350 }
351
352 void
353 ata4_adjust_timing(chp)
354 struct wdc_channel *chp;
355 {
356 struct wdc_obio_softc *sc = (struct wdc_obio_softc *)chp->ch_wdc;
357 int drive;
358 int min_cycle = 0, min_active = 0;
359 int cycle_tick = 0, act_tick = 0, inact_tick = 0;
360
361 for (drive = 0; drive < 2; drive++) {
362 u_int conf = 0;
363 struct ata_drive_datas *drvp;
364
365 drvp = &chp->ch_drive[drive];
366 /* set up pio mode timings */
367
368 if (drvp->drive_flags & DRIVE) {
369 int piomode = drvp->PIO_mode;
370 min_cycle = pio_timing[piomode].cycle;
371 min_active = pio_timing[piomode].active;
372
373 cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
374 act_tick = ATA4_TIME_TO_TICK(min_active);
375 inact_tick = cycle_tick - act_tick;
376 /* mask: 0x000003ff */
377 conf |= (inact_tick << 5) | act_tick;
378 }
379 /* set up dma mode timings */
380 if (drvp->drive_flags & DRIVE_DMA) {
381 int dmamode = drvp->DMA_mode;
382 min_cycle = dma_timing[dmamode].cycle;
383 min_active = dma_timing[dmamode].active;
384 cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
385 act_tick = ATA4_TIME_TO_TICK(min_active);
386 inact_tick = cycle_tick - act_tick;
387 /* mask: 0x001ffc00 */
388 conf |= (act_tick << 10) | (inact_tick << 15);
389 }
390 /* set up udma mode timings */
391 if (drvp->drive_flags & DRIVE_UDMA) {
392 int udmamode = drvp->UDMA_mode;
393 min_cycle = udma_timing[udmamode].cycle;
394 min_active = udma_timing[udmamode].active;
395 act_tick = ATA4_TIME_TO_TICK(min_active);
396 cycle_tick = ATA4_TIME_TO_TICK(min_cycle);
397 /* mask: 0x1ff00000 */
398 conf |= (cycle_tick << 21) | (act_tick << 25) | 0x100000;
399 }
400 #ifdef DEBUG
401 if (conf) {
402 printf("ata4 conf[%d] = 0x%x, cyc = %d (%d ns), act = %d (%d ns), inact = %d\n",
403 drive, conf, cycle_tick, min_cycle, act_tick, min_active, inact_tick);
404 }
405 #endif
406 sc->sc_dmaconf[drive] = conf;
407 }
408 sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_SELECT;
409 sc->sc_wdcdev.select = 0;
410 if (sc->sc_dmaconf[0]) {
411 wdc_obio_select(chp,0);
412 if (sc->sc_dmaconf[1] && (sc->sc_dmaconf[0] != sc->sc_dmaconf[1])) {
413 sc->sc_wdcdev.select = wdc_obio_select;
414 sc->sc_wdcdev.cap |= WDC_CAPABILITY_SELECT;
415 }
416 } else if (sc->sc_dmaconf[1]) {
417 wdc_obio_select(chp,1);
418 }
419 }
420
421 int
422 wdc_obio_detach(self, flags)
423 struct device *self;
424 int flags;
425 {
426 struct wdc_obio_softc *sc = (void *)self;
427 int error;
428
429 if ((error = wdcdetach(self, flags)) != 0)
430 return error;
431
432 intr_disestablish(sc->sc_ih);
433
434 /* Unmap our i/o space. */
435 bus_space_unmap(chp->cmd_iot, chp->cmd_ioh, WDC_REG_NPORTS);
436
437 /* Unmap DMA registers. */
438 /* XXX unmapiodev(sc->sc_dmareg); */
439 /* XXX free(sc->sc_dmacmd); */
440
441 return 0;
442 }
443
444 int
445 wdc_obio_dma_init(v, channel, drive, databuf, datalen, flags)
446 void *v;
447 void *databuf;
448 size_t datalen;
449 int flags;
450 {
451 struct wdc_obio_softc *sc = v;
452 vaddr_t va = (vaddr_t)databuf;
453 dbdma_command_t *cmdp;
454 u_int cmd, offset;
455 int read = flags & WDC_DMA_READ;
456
457 cmdp = sc->sc_dmacmd;
458 cmd = read ? DBDMA_CMD_IN_MORE : DBDMA_CMD_OUT_MORE;
459
460 offset = va & PGOFSET;
461
462 /* if va is not page-aligned, setup the first page */
463 if (offset != 0) {
464 int rest = PAGE_SIZE - offset; /* the rest of the page */
465
466 if (datalen > rest) { /* if continues to next page */
467 DBDMA_BUILD(cmdp, cmd, 0, rest, vtophys(va),
468 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER,
469 DBDMA_BRANCH_NEVER);
470 datalen -= rest;
471 va += rest;
472 cmdp++;
473 }
474 }
475
476 /* now va is page-aligned */
477 while (datalen > PAGE_SIZE) {
478 DBDMA_BUILD(cmdp, cmd, 0, PAGE_SIZE, vtophys(va),
479 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
480 datalen -= PAGE_SIZE;
481 va += PAGE_SIZE;
482 cmdp++;
483 }
484
485 /* the last page (datalen <= PAGE_SIZE here) */
486 cmd = read ? DBDMA_CMD_IN_LAST : DBDMA_CMD_OUT_LAST;
487 DBDMA_BUILD(cmdp, cmd, 0, datalen, vtophys(va),
488 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
489 cmdp++;
490
491 DBDMA_BUILD(cmdp, DBDMA_CMD_STOP, 0, 0, 0,
492 DBDMA_INT_NEVER, DBDMA_WAIT_NEVER, DBDMA_BRANCH_NEVER);
493
494 return 0;
495 }
496
497 void
498 wdc_obio_dma_start(v, channel, drive)
499 void *v;
500 int channel, drive;
501 {
502 struct wdc_obio_softc *sc = v;
503
504 dbdma_start(sc->sc_dmareg, sc->sc_dmacmd);
505 }
506
507 int
508 wdc_obio_dma_finish(v, channel, drive, read)
509 void *v;
510 int channel, drive;
511 int read;
512 {
513 struct wdc_obio_softc *sc = v;
514
515 dbdma_stop(sc->sc_dmareg);
516 return 0;
517 }
518