cmdide.c revision 1.31.2.1 1 /* $NetBSD: cmdide.c,v 1.31.2.1 2011/06/06 09:08:09 jruoho Exp $ */
2
3 /*
4 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: cmdide.c,v 1.31.2.1 2011/06/06 09:08:09 jruoho Exp $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33
34 #include <dev/pci/pcivar.h>
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pciidereg.h>
37 #include <dev/pci/pciidevar.h>
38 #include <dev/pci/pciide_cmd_reg.h>
39
40
41 static int cmdide_match(device_t, cfdata_t, void *);
42 static void cmdide_attach(device_t, device_t, void *);
43
44 CFATTACH_DECL_NEW(cmdide, sizeof(struct pciide_softc),
45 cmdide_match, cmdide_attach, pciide_detach, NULL);
46
47 static void cmd_chip_map(struct pciide_softc*, const struct pci_attach_args*);
48 static void cmd0643_9_chip_map(struct pciide_softc*,
49 const struct pci_attach_args*);
50 static void cmd0643_9_setup_channel(struct ata_channel*);
51 static void cmd_channel_map(const struct pci_attach_args *,
52 struct pciide_softc *, int);
53 static int cmd_pci_intr(void *);
54 static void cmd646_9_irqack(struct ata_channel *);
55 static void cmd680_chip_map(struct pciide_softc*,
56 const struct pci_attach_args*);
57 static void cmd680_setup_channel(struct ata_channel*);
58 static void cmd680_channel_map(const struct pci_attach_args *,
59 struct pciide_softc *, int);
60
61 static const struct pciide_product_desc pciide_cmd_products[] = {
62 { PCI_PRODUCT_CMDTECH_640,
63 0,
64 "CMD Technology PCI0640",
65 cmd_chip_map
66 },
67 { PCI_PRODUCT_CMDTECH_643,
68 0,
69 "CMD Technology PCI0643",
70 cmd0643_9_chip_map,
71 },
72 { PCI_PRODUCT_CMDTECH_646,
73 0,
74 "CMD Technology PCI0646",
75 cmd0643_9_chip_map,
76 },
77 { PCI_PRODUCT_CMDTECH_648,
78 0,
79 "CMD Technology PCI0648",
80 cmd0643_9_chip_map,
81 },
82 { PCI_PRODUCT_CMDTECH_649,
83 0,
84 "CMD Technology PCI0649",
85 cmd0643_9_chip_map,
86 },
87 { PCI_PRODUCT_CMDTECH_680,
88 0,
89 "Silicon Image 0680",
90 cmd680_chip_map,
91 },
92 { 0,
93 0,
94 NULL,
95 NULL
96 }
97 };
98
99 static int
100 cmdide_match(device_t parent, cfdata_t match, void *aux)
101 {
102 struct pci_attach_args *pa = aux;
103
104 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_CMDTECH) {
105 if (pciide_lookup_product(pa->pa_id, pciide_cmd_products))
106 return (2);
107 }
108 return (0);
109 }
110
111 static void
112 cmdide_attach(device_t parent, device_t self, void *aux)
113 {
114 struct pci_attach_args *pa = aux;
115 struct pciide_softc *sc = device_private(self);
116
117 sc->sc_wdcdev.sc_atac.atac_dev = self;
118
119 pciide_common_attach(sc, pa,
120 pciide_lookup_product(pa->pa_id, pciide_cmd_products));
121
122 }
123
124 static void
125 cmd_channel_map(const struct pci_attach_args *pa, struct pciide_softc *sc,
126 int channel)
127 {
128 struct pciide_channel *cp = &sc->pciide_channels[channel];
129 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
130 int interface, one_channel;
131
132 /*
133 * The 0648/0649 can be told to identify as a RAID controller.
134 * In this case, we have to fake interface
135 */
136 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
137 interface = PCIIDE_INTERFACE_SETTABLE(0) |
138 PCIIDE_INTERFACE_SETTABLE(1);
139 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
140 CMD_CONF_DSA1)
141 interface |= PCIIDE_INTERFACE_PCI(0) |
142 PCIIDE_INTERFACE_PCI(1);
143 } else {
144 interface = PCI_INTERFACE(pa->pa_class);
145 }
146
147 sc->wdc_chanarray[channel] = &cp->ata_channel;
148 cp->name = PCIIDE_CHANNEL_NAME(channel);
149 cp->ata_channel.ch_channel = channel;
150 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
151
152 /*
153 * Older CMD64X doesn't have independent channels
154 */
155 switch (sc->sc_pp->ide_product) {
156 case PCI_PRODUCT_CMDTECH_649:
157 one_channel = 0;
158 break;
159 default:
160 one_channel = 1;
161 break;
162 }
163
164 if (channel > 0 && one_channel) {
165 cp->ata_channel.ch_queue =
166 sc->pciide_channels[0].ata_channel.ch_queue;
167 } else {
168 cp->ata_channel.ch_queue =
169 malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
170 }
171 if (cp->ata_channel.ch_queue == NULL) {
172 aprint_error("%s %s channel: "
173 "can't allocate memory for command queue",
174 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cp->name);
175 return;
176 }
177 cp->ata_channel.ch_ndrive = 2;
178
179 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
180 "%s channel %s to %s mode\n", cp->name,
181 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
182 "configured" : "wired",
183 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
184 "native-PCI" : "compatibility");
185
186 /*
187 * with a CMD PCI64x, if we get here, the first channel is enabled:
188 * there's no way to disable the first channel without disabling
189 * the whole device
190 */
191 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
192 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
193 "%s channel ignored (disabled)\n", cp->name);
194 cp->ata_channel.ch_flags |= ATACH_DISABLED;
195 return;
196 }
197
198 pciide_mapchan(pa, cp, interface, cmd_pci_intr);
199 }
200
201 static int
202 cmd_pci_intr(void *arg)
203 {
204 struct pciide_softc *sc = arg;
205 struct pciide_channel *cp;
206 struct ata_channel *wdc_cp;
207 int i, rv, crv;
208 u_int32_t priirq, secirq;
209
210 rv = 0;
211 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
212 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
213 for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) {
214 cp = &sc->pciide_channels[i];
215 wdc_cp = &cp->ata_channel;
216 /* If a compat channel skip. */
217 if (cp->compat)
218 continue;
219 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
220 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
221 crv = wdcintr(wdc_cp);
222 if (crv == 0) {
223 aprint_error("%s:%d: bogus intr\n",
224 device_xname(
225 sc->sc_wdcdev.sc_atac.atac_dev), i);
226 sc->sc_wdcdev.irqack(wdc_cp);
227 } else
228 rv = 1;
229 }
230 }
231 return rv;
232 }
233
234 static void
235 cmd_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
236 {
237 int channel;
238
239 /*
240 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
241 * and base addresses registers can be disabled at
242 * hardware level. In this case, the device is wired
243 * in compat mode and its first channel is always enabled,
244 * but we can't rely on PCI_COMMAND_IO_ENABLE.
245 * In fact, it seems that the first channel of the CMD PCI0640
246 * can't be disabled.
247 */
248
249 #ifdef PCIIDE_CMD064x_DISABLE
250 if (pciide_chipen(sc, pa) == 0)
251 return;
252 #endif
253
254 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
255 "hardware does not support DMA\n");
256 sc->sc_dma_ok = 0;
257
258 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
259 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
260 sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16;
261
262 wdc_allocate_regs(&sc->sc_wdcdev);
263
264 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
265 channel++) {
266 cmd_channel_map(pa, sc, channel);
267 }
268 }
269
270 static void
271 cmd0643_9_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
272 {
273 int channel;
274 pcireg_t rev = PCI_REVISION(pa->pa_class);
275
276 /*
277 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
278 * and base addresses registers can be disabled at
279 * hardware level. In this case, the device is wired
280 * in compat mode and its first channel is always enabled,
281 * but we can't rely on PCI_COMMAND_IO_ENABLE.
282 * In fact, it seems that the first channel of the CMD PCI0640
283 * can't be disabled.
284 */
285
286 #ifdef PCIIDE_CMD064x_DISABLE
287 if (pciide_chipen(sc, pa) == 0)
288 return;
289 #endif
290
291 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
292 "bus-master DMA support present");
293 pciide_mapreg_dma(sc, pa);
294 aprint_verbose("\n");
295 sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
296 if (sc->sc_dma_ok) {
297 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
298 switch (sc->sc_pp->ide_product) {
299 case PCI_PRODUCT_CMDTECH_649:
300 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
301 sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
302 sc->sc_wdcdev.irqack = cmd646_9_irqack;
303 break;
304 case PCI_PRODUCT_CMDTECH_648:
305 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
306 sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
307 sc->sc_wdcdev.irqack = cmd646_9_irqack;
308 break;
309 case PCI_PRODUCT_CMDTECH_646:
310 if (rev >= CMD0646U2_REV) {
311 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
312 sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
313 } else if (rev >= CMD0646U_REV) {
314 /*
315 * Linux's driver claims that the 646U is broken
316 * with UDMA. Only enable it if we know what we're
317 * doing
318 */
319 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
320 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
321 sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
322 #endif
323 /* explicitly disable UDMA */
324 pciide_pci_write(sc->sc_pc, sc->sc_tag,
325 CMD_UDMATIM(0), 0);
326 pciide_pci_write(sc->sc_pc, sc->sc_tag,
327 CMD_UDMATIM(1), 0);
328 }
329 sc->sc_wdcdev.irqack = cmd646_9_irqack;
330 break;
331 default:
332 sc->sc_wdcdev.irqack = pciide_irqack;
333 }
334 }
335
336 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
337 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
338 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
339 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
340 sc->sc_wdcdev.sc_atac.atac_set_modes = cmd0643_9_setup_channel;
341
342 ATADEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
343 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
344 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
345 DEBUG_PROBE);
346
347 wdc_allocate_regs(&sc->sc_wdcdev);
348
349 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
350 channel++)
351 cmd_channel_map(pa, sc, channel);
352
353 /*
354 * note - this also makes sure we clear the irq disable and reset
355 * bits
356 */
357 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
358 ATADEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
359 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
360 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
361 DEBUG_PROBE);
362 }
363
364 static void
365 cmd0643_9_setup_channel(struct ata_channel *chp)
366 {
367 struct ata_drive_datas *drvp;
368 u_int8_t tim;
369 u_int32_t idedma_ctl, udma_reg;
370 int drive, s;
371 struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
372 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
373
374 idedma_ctl = 0;
375 /* setup DMA if needed */
376 pciide_channel_dma_setup(cp);
377
378 for (drive = 0; drive < 2; drive++) {
379 drvp = &chp->ch_drive[drive];
380 /* If no drive, skip */
381 if ((drvp->drive_flags & DRIVE) == 0)
382 continue;
383 /* add timing values, setup DMA if needed */
384 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
385 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
386 if (drvp->drive_flags & DRIVE_UDMA) {
387 /* UltraDMA on a 646U2, 0648 or 0649 */
388 s = splbio();
389 drvp->drive_flags &= ~DRIVE_DMA;
390 splx(s);
391 udma_reg = pciide_pci_read(sc->sc_pc,
392 sc->sc_tag, CMD_UDMATIM(chp->ch_channel));
393 if (drvp->UDMA_mode > 2 &&
394 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
395 CMD_BICSR) &
396 CMD_BICSR_80(chp->ch_channel)) == 0)
397 drvp->UDMA_mode = 2;
398 if (drvp->UDMA_mode > 2)
399 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
400 else if (sc->sc_wdcdev.sc_atac.atac_udma_cap > 2)
401 udma_reg |= CMD_UDMATIM_UDMA33(drive);
402 udma_reg |= CMD_UDMATIM_UDMA(drive);
403 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
404 CMD_UDMATIM_TIM_OFF(drive));
405 udma_reg |=
406 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
407 CMD_UDMATIM_TIM_OFF(drive));
408 pciide_pci_write(sc->sc_pc, sc->sc_tag,
409 CMD_UDMATIM(chp->ch_channel), udma_reg);
410 } else {
411 /*
412 * use Multiword DMA.
413 * Timings will be used for both PIO and DMA,
414 * so adjust DMA mode if needed
415 * if we have a 0646U2/8/9, turn off UDMA
416 */
417 if (sc->sc_wdcdev.sc_atac.atac_cap & ATAC_CAP_UDMA) {
418 udma_reg = pciide_pci_read(sc->sc_pc,
419 sc->sc_tag,
420 CMD_UDMATIM(chp->ch_channel));
421 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
422 pciide_pci_write(sc->sc_pc, sc->sc_tag,
423 CMD_UDMATIM(chp->ch_channel),
424 udma_reg);
425 }
426 if (drvp->PIO_mode >= 3 &&
427 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
428 drvp->DMA_mode = drvp->PIO_mode - 2;
429 }
430 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
431 }
432 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
433 }
434 pciide_pci_write(sc->sc_pc, sc->sc_tag,
435 CMD_DATA_TIM(chp->ch_channel, drive), tim);
436 }
437 if (idedma_ctl != 0) {
438 /* Add software bits in status register */
439 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
440 idedma_ctl);
441 }
442 }
443
444 static void
445 cmd646_9_irqack(struct ata_channel *chp)
446 {
447 u_int32_t priirq, secirq;
448 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
449
450 if (chp->ch_channel == 0) {
451 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
452 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
453 } else {
454 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
455 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
456 }
457 pciide_irqack(chp);
458 }
459
460 static void
461 cmd680_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
462 {
463 int channel;
464
465 if (pciide_chipen(sc, pa) == 0)
466 return;
467
468 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
469 "bus-master DMA support present");
470 pciide_mapreg_dma(sc, pa);
471 aprint_verbose("\n");
472 sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
473 if (sc->sc_dma_ok) {
474 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
475 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
476 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
477 sc->sc_wdcdev.irqack = pciide_irqack;
478 }
479
480 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
481 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
482 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
483 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
484 sc->sc_wdcdev.sc_atac.atac_set_modes = cmd680_setup_channel;
485
486 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
487 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
488 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
489 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
490
491 wdc_allocate_regs(&sc->sc_wdcdev);
492
493 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
494 channel++)
495 cmd680_channel_map(pa, sc, channel);
496 }
497
498 static void
499 cmd680_channel_map(const struct pci_attach_args *pa, struct pciide_softc *sc,
500 int channel)
501 {
502 struct pciide_channel *cp = &sc->pciide_channels[channel];
503 int interface, i, reg;
504 static const u_int8_t init_val[] =
505 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
506 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
507
508 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
509 interface = PCIIDE_INTERFACE_SETTABLE(0) |
510 PCIIDE_INTERFACE_SETTABLE(1);
511 interface |= PCIIDE_INTERFACE_PCI(0) |
512 PCIIDE_INTERFACE_PCI(1);
513 } else {
514 interface = PCI_INTERFACE(pa->pa_class);
515 }
516
517 sc->wdc_chanarray[channel] = &cp->ata_channel;
518 cp->name = PCIIDE_CHANNEL_NAME(channel);
519 cp->ata_channel.ch_channel = channel;
520 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
521
522 cp->ata_channel.ch_queue =
523 malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
524 if (cp->ata_channel.ch_queue == NULL) {
525 aprint_error("%s %s channel: "
526 "can't allocate memory for command queue",
527 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cp->name);
528 return;
529 }
530 cp->ata_channel.ch_ndrive = 2;
531
532 /* XXX */
533 reg = 0xa2 + channel * 16;
534 for (i = 0; i < sizeof(init_val); i++)
535 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
536
537 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
538 "%s channel %s to %s mode\n", cp->name,
539 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
540 "configured" : "wired",
541 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
542 "native-PCI" : "compatibility");
543
544 pciide_mapchan(pa, cp, interface, pciide_pci_intr);
545 }
546
547 static void
548 cmd680_setup_channel(struct ata_channel *chp)
549 {
550 struct ata_drive_datas *drvp;
551 u_int8_t mode, off, scsc;
552 u_int16_t val;
553 u_int32_t idedma_ctl;
554 int drive, s;
555 struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
556 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
557 pci_chipset_tag_t pc = sc->sc_pc;
558 pcitag_t pa = sc->sc_tag;
559 static const u_int8_t udma2_tbl[] =
560 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
561 static const u_int8_t udma_tbl[] =
562 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
563 static const u_int16_t dma_tbl[] =
564 { 0x2208, 0x10c2, 0x10c1 };
565 static const u_int16_t pio_tbl[] =
566 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
567
568 idedma_ctl = 0;
569 pciide_channel_dma_setup(cp);
570 mode = pciide_pci_read(pc, pa, 0x80 + chp->ch_channel * 4);
571
572 for (drive = 0; drive < 2; drive++) {
573 drvp = &chp->ch_drive[drive];
574 /* If no drive, skip */
575 if ((drvp->drive_flags & DRIVE) == 0)
576 continue;
577 mode &= ~(0x03 << (drive * 4));
578 if (drvp->drive_flags & DRIVE_UDMA) {
579 s = splbio();
580 drvp->drive_flags &= ~DRIVE_DMA;
581 splx(s);
582 off = 0xa0 + chp->ch_channel * 16;
583 if (drvp->UDMA_mode > 2 &&
584 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
585 drvp->UDMA_mode = 2;
586 scsc = pciide_pci_read(pc, pa, 0x8a);
587 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
588 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
589 scsc = pciide_pci_read(pc, pa, 0x8a);
590 if ((scsc & 0x30) == 0)
591 drvp->UDMA_mode = 5;
592 }
593 mode |= 0x03 << (drive * 4);
594 off = 0xac + chp->ch_channel * 16 + drive * 2;
595 val = pciide_pci_read(pc, pa, off) & ~0x3f;
596 if (scsc & 0x30)
597 val |= udma2_tbl[drvp->UDMA_mode];
598 else
599 val |= udma_tbl[drvp->UDMA_mode];
600 pciide_pci_write(pc, pa, off, val);
601 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
602 } else if (drvp->drive_flags & DRIVE_DMA) {
603 mode |= 0x02 << (drive * 4);
604 off = 0xa8 + chp->ch_channel * 16 + drive * 2;
605 val = dma_tbl[drvp->DMA_mode];
606 pciide_pci_write(pc, pa, off, val & 0xff);
607 pciide_pci_write(pc, pa, off+1, val >> 8);
608 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
609 } else {
610 mode |= 0x01 << (drive * 4);
611 off = 0xa4 + chp->ch_channel * 16 + drive * 2;
612 val = pio_tbl[drvp->PIO_mode];
613 pciide_pci_write(pc, pa, off, val & 0xff);
614 pciide_pci_write(pc, pa, off+1, val >> 8);
615 }
616 }
617
618 pciide_pci_write(pc, pa, 0x80 + chp->ch_channel * 4, mode);
619 if (idedma_ctl != 0) {
620 /* Add software bits in status register */
621 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
622 idedma_ctl);
623 }
624 }
625