cmdide.c revision 1.29 1 /* $NetBSD: cmdide.c,v 1.29 2009/10/19 18:41:14 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: cmdide.c,v 1.29 2009/10/19 18:41:14 bouyer Exp $");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33
34 #include <dev/pci/pcivar.h>
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pciidereg.h>
37 #include <dev/pci/pciidevar.h>
38 #include <dev/pci/pciide_cmd_reg.h>
39
40
41 static int cmdide_match(device_t, cfdata_t, void *);
42 static void cmdide_attach(device_t, device_t, void *);
43
44 CFATTACH_DECL_NEW(cmdide, sizeof(struct pciide_softc),
45 cmdide_match, cmdide_attach, NULL, NULL);
46
47 static void cmd_chip_map(struct pciide_softc*, struct pci_attach_args*);
48 static void cmd0643_9_chip_map(struct pciide_softc*, struct pci_attach_args*);
49 static void cmd0643_9_setup_channel(struct ata_channel*);
50 static void cmd_channel_map(struct pci_attach_args *, struct pciide_softc *,
51 int);
52 static int cmd_pci_intr(void *);
53 static void cmd646_9_irqack(struct ata_channel *);
54 static void cmd680_chip_map(struct pciide_softc*, struct pci_attach_args*);
55 static void cmd680_setup_channel(struct ata_channel*);
56 static void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *,
57 int);
58
59 static const struct pciide_product_desc pciide_cmd_products[] = {
60 { PCI_PRODUCT_CMDTECH_640,
61 0,
62 "CMD Technology PCI0640",
63 cmd_chip_map
64 },
65 { PCI_PRODUCT_CMDTECH_643,
66 0,
67 "CMD Technology PCI0643",
68 cmd0643_9_chip_map,
69 },
70 { PCI_PRODUCT_CMDTECH_646,
71 0,
72 "CMD Technology PCI0646",
73 cmd0643_9_chip_map,
74 },
75 { PCI_PRODUCT_CMDTECH_648,
76 0,
77 "CMD Technology PCI0648",
78 cmd0643_9_chip_map,
79 },
80 { PCI_PRODUCT_CMDTECH_649,
81 0,
82 "CMD Technology PCI0649",
83 cmd0643_9_chip_map,
84 },
85 { PCI_PRODUCT_CMDTECH_680,
86 0,
87 "Silicon Image 0680",
88 cmd680_chip_map,
89 },
90 { 0,
91 0,
92 NULL,
93 NULL
94 }
95 };
96
97 static int
98 cmdide_match(device_t parent, cfdata_t match, void *aux)
99 {
100 struct pci_attach_args *pa = aux;
101
102 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_CMDTECH) {
103 if (pciide_lookup_product(pa->pa_id, pciide_cmd_products))
104 return (2);
105 }
106 return (0);
107 }
108
109 static void
110 cmdide_attach(device_t parent, device_t self, void *aux)
111 {
112 struct pci_attach_args *pa = aux;
113 struct pciide_softc *sc = device_private(self);
114
115 sc->sc_wdcdev.sc_atac.atac_dev = self;
116
117 pciide_common_attach(sc, pa,
118 pciide_lookup_product(pa->pa_id, pciide_cmd_products));
119
120 }
121
122 static void
123 cmd_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc,
124 int channel)
125 {
126 struct pciide_channel *cp = &sc->pciide_channels[channel];
127 bus_size_t cmdsize, ctlsize;
128 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
129 int interface, one_channel;
130
131 /*
132 * The 0648/0649 can be told to identify as a RAID controller.
133 * In this case, we have to fake interface
134 */
135 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
136 interface = PCIIDE_INTERFACE_SETTABLE(0) |
137 PCIIDE_INTERFACE_SETTABLE(1);
138 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
139 CMD_CONF_DSA1)
140 interface |= PCIIDE_INTERFACE_PCI(0) |
141 PCIIDE_INTERFACE_PCI(1);
142 } else {
143 interface = PCI_INTERFACE(pa->pa_class);
144 }
145
146 sc->wdc_chanarray[channel] = &cp->ata_channel;
147 cp->name = PCIIDE_CHANNEL_NAME(channel);
148 cp->ata_channel.ch_channel = channel;
149 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
150
151 /*
152 * Older CMD64X doesn't have independent channels
153 */
154 switch (sc->sc_pp->ide_product) {
155 case PCI_PRODUCT_CMDTECH_649:
156 one_channel = 0;
157 break;
158 default:
159 one_channel = 1;
160 break;
161 }
162
163 if (channel > 0 && one_channel) {
164 cp->ata_channel.ch_queue =
165 sc->pciide_channels[0].ata_channel.ch_queue;
166 } else {
167 cp->ata_channel.ch_queue =
168 malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
169 }
170 if (cp->ata_channel.ch_queue == NULL) {
171 aprint_error("%s %s channel: "
172 "can't allocate memory for command queue",
173 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cp->name);
174 return;
175 }
176 cp->ata_channel.ch_ndrive = 2;
177
178 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
179 "%s channel %s to %s mode\n", cp->name,
180 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
181 "configured" : "wired",
182 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
183 "native-PCI" : "compatibility");
184
185 /*
186 * with a CMD PCI64x, if we get here, the first channel is enabled:
187 * there's no way to disable the first channel without disabling
188 * the whole device
189 */
190 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
191 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
192 "%s channel ignored (disabled)\n", cp->name);
193 cp->ata_channel.ch_flags |= ATACH_DISABLED;
194 return;
195 }
196
197 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
198 }
199
200 static int
201 cmd_pci_intr(void *arg)
202 {
203 struct pciide_softc *sc = arg;
204 struct pciide_channel *cp;
205 struct ata_channel *wdc_cp;
206 int i, rv, crv;
207 u_int32_t priirq, secirq;
208
209 rv = 0;
210 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
211 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
212 for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) {
213 cp = &sc->pciide_channels[i];
214 wdc_cp = &cp->ata_channel;
215 /* If a compat channel skip. */
216 if (cp->compat)
217 continue;
218 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
219 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
220 crv = wdcintr(wdc_cp);
221 if (crv == 0) {
222 aprint_error("%s:%d: bogus intr\n",
223 device_xname(
224 sc->sc_wdcdev.sc_atac.atac_dev), i);
225 sc->sc_wdcdev.irqack(wdc_cp);
226 } else
227 rv = 1;
228 }
229 }
230 return rv;
231 }
232
233 static void
234 cmd_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa)
235 {
236 int channel;
237
238 /*
239 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
240 * and base addresses registers can be disabled at
241 * hardware level. In this case, the device is wired
242 * in compat mode and its first channel is always enabled,
243 * but we can't rely on PCI_COMMAND_IO_ENABLE.
244 * In fact, it seems that the first channel of the CMD PCI0640
245 * can't be disabled.
246 */
247
248 #ifdef PCIIDE_CMD064x_DISABLE
249 if (pciide_chipen(sc, pa) == 0)
250 return;
251 #endif
252
253 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
254 "hardware does not support DMA\n");
255 sc->sc_dma_ok = 0;
256
257 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
258 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
259 sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16;
260
261 wdc_allocate_regs(&sc->sc_wdcdev);
262
263 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
264 channel++) {
265 cmd_channel_map(pa, sc, channel);
266 }
267 }
268
269 static void
270 cmd0643_9_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa)
271 {
272 int channel;
273 pcireg_t rev = PCI_REVISION(pa->pa_class);
274
275 /*
276 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
277 * and base addresses registers can be disabled at
278 * hardware level. In this case, the device is wired
279 * in compat mode and its first channel is always enabled,
280 * but we can't rely on PCI_COMMAND_IO_ENABLE.
281 * In fact, it seems that the first channel of the CMD PCI0640
282 * can't be disabled.
283 */
284
285 #ifdef PCIIDE_CMD064x_DISABLE
286 if (pciide_chipen(sc, pa) == 0)
287 return;
288 #endif
289
290 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
291 "bus-master DMA support present");
292 pciide_mapreg_dma(sc, pa);
293 aprint_verbose("\n");
294 sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
295 if (sc->sc_dma_ok) {
296 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
297 switch (sc->sc_pp->ide_product) {
298 case PCI_PRODUCT_CMDTECH_649:
299 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
300 sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
301 sc->sc_wdcdev.irqack = cmd646_9_irqack;
302 break;
303 case PCI_PRODUCT_CMDTECH_648:
304 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
305 sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
306 sc->sc_wdcdev.irqack = cmd646_9_irqack;
307 break;
308 case PCI_PRODUCT_CMDTECH_646:
309 if (rev >= CMD0646U2_REV) {
310 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
311 sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
312 } else if (rev >= CMD0646U_REV) {
313 /*
314 * Linux's driver claims that the 646U is broken
315 * with UDMA. Only enable it if we know what we're
316 * doing
317 */
318 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
319 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
320 sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
321 #endif
322 /* explicitly disable UDMA */
323 pciide_pci_write(sc->sc_pc, sc->sc_tag,
324 CMD_UDMATIM(0), 0);
325 pciide_pci_write(sc->sc_pc, sc->sc_tag,
326 CMD_UDMATIM(1), 0);
327 }
328 sc->sc_wdcdev.irqack = cmd646_9_irqack;
329 break;
330 default:
331 sc->sc_wdcdev.irqack = pciide_irqack;
332 }
333 }
334
335 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
336 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
337 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
338 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
339 sc->sc_wdcdev.sc_atac.atac_set_modes = cmd0643_9_setup_channel;
340
341 ATADEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
342 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
343 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
344 DEBUG_PROBE);
345
346 wdc_allocate_regs(&sc->sc_wdcdev);
347
348 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
349 channel++)
350 cmd_channel_map(pa, sc, channel);
351
352 /*
353 * note - this also makes sure we clear the irq disable and reset
354 * bits
355 */
356 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
357 ATADEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
358 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
359 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
360 DEBUG_PROBE);
361 }
362
363 static void
364 cmd0643_9_setup_channel(struct ata_channel *chp)
365 {
366 struct ata_drive_datas *drvp;
367 u_int8_t tim;
368 u_int32_t idedma_ctl, udma_reg;
369 int drive, s;
370 struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
371 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
372
373 idedma_ctl = 0;
374 /* setup DMA if needed */
375 pciide_channel_dma_setup(cp);
376
377 for (drive = 0; drive < 2; drive++) {
378 drvp = &chp->ch_drive[drive];
379 /* If no drive, skip */
380 if ((drvp->drive_flags & DRIVE) == 0)
381 continue;
382 /* add timing values, setup DMA if needed */
383 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
384 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
385 if (drvp->drive_flags & DRIVE_UDMA) {
386 /* UltraDMA on a 646U2, 0648 or 0649 */
387 s = splbio();
388 drvp->drive_flags &= ~DRIVE_DMA;
389 splx(s);
390 udma_reg = pciide_pci_read(sc->sc_pc,
391 sc->sc_tag, CMD_UDMATIM(chp->ch_channel));
392 if (drvp->UDMA_mode > 2 &&
393 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
394 CMD_BICSR) &
395 CMD_BICSR_80(chp->ch_channel)) == 0)
396 drvp->UDMA_mode = 2;
397 if (drvp->UDMA_mode > 2)
398 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
399 else if (sc->sc_wdcdev.sc_atac.atac_udma_cap > 2)
400 udma_reg |= CMD_UDMATIM_UDMA33(drive);
401 udma_reg |= CMD_UDMATIM_UDMA(drive);
402 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
403 CMD_UDMATIM_TIM_OFF(drive));
404 udma_reg |=
405 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
406 CMD_UDMATIM_TIM_OFF(drive));
407 pciide_pci_write(sc->sc_pc, sc->sc_tag,
408 CMD_UDMATIM(chp->ch_channel), udma_reg);
409 } else {
410 /*
411 * use Multiword DMA.
412 * Timings will be used for both PIO and DMA,
413 * so adjust DMA mode if needed
414 * if we have a 0646U2/8/9, turn off UDMA
415 */
416 if (sc->sc_wdcdev.sc_atac.atac_cap & ATAC_CAP_UDMA) {
417 udma_reg = pciide_pci_read(sc->sc_pc,
418 sc->sc_tag,
419 CMD_UDMATIM(chp->ch_channel));
420 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
421 pciide_pci_write(sc->sc_pc, sc->sc_tag,
422 CMD_UDMATIM(chp->ch_channel),
423 udma_reg);
424 }
425 if (drvp->PIO_mode >= 3 &&
426 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
427 drvp->DMA_mode = drvp->PIO_mode - 2;
428 }
429 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
430 }
431 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
432 }
433 pciide_pci_write(sc->sc_pc, sc->sc_tag,
434 CMD_DATA_TIM(chp->ch_channel, drive), tim);
435 }
436 if (idedma_ctl != 0) {
437 /* Add software bits in status register */
438 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
439 idedma_ctl);
440 }
441 }
442
443 static void
444 cmd646_9_irqack(struct ata_channel *chp)
445 {
446 u_int32_t priirq, secirq;
447 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
448
449 if (chp->ch_channel == 0) {
450 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
451 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
452 } else {
453 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
454 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
455 }
456 pciide_irqack(chp);
457 }
458
459 static void
460 cmd680_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa)
461 {
462 int channel;
463
464 if (pciide_chipen(sc, pa) == 0)
465 return;
466
467 aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
468 "bus-master DMA support present");
469 pciide_mapreg_dma(sc, pa);
470 aprint_verbose("\n");
471 sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
472 if (sc->sc_dma_ok) {
473 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
474 sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
475 sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
476 sc->sc_wdcdev.irqack = pciide_irqack;
477 }
478
479 sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
480 sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
481 sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
482 sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
483 sc->sc_wdcdev.sc_atac.atac_set_modes = cmd680_setup_channel;
484
485 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
486 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
487 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
488 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
489
490 wdc_allocate_regs(&sc->sc_wdcdev);
491
492 for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
493 channel++)
494 cmd680_channel_map(pa, sc, channel);
495 }
496
497 static void
498 cmd680_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc,
499 int channel)
500 {
501 struct pciide_channel *cp = &sc->pciide_channels[channel];
502 bus_size_t cmdsize, ctlsize;
503 int interface, i, reg;
504 static const u_int8_t init_val[] =
505 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
506 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
507
508 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
509 interface = PCIIDE_INTERFACE_SETTABLE(0) |
510 PCIIDE_INTERFACE_SETTABLE(1);
511 interface |= PCIIDE_INTERFACE_PCI(0) |
512 PCIIDE_INTERFACE_PCI(1);
513 } else {
514 interface = PCI_INTERFACE(pa->pa_class);
515 }
516
517 sc->wdc_chanarray[channel] = &cp->ata_channel;
518 cp->name = PCIIDE_CHANNEL_NAME(channel);
519 cp->ata_channel.ch_channel = channel;
520 cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
521
522 cp->ata_channel.ch_queue =
523 malloc(sizeof(struct ata_queue), M_DEVBUF, M_NOWAIT);
524 if (cp->ata_channel.ch_queue == NULL) {
525 aprint_error("%s %s channel: "
526 "can't allocate memory for command queue",
527 device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cp->name);
528 return;
529 }
530 cp->ata_channel.ch_ndrive = 2;
531
532 /* XXX */
533 reg = 0xa2 + channel * 16;
534 for (i = 0; i < sizeof(init_val); i++)
535 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
536
537 aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
538 "%s channel %s to %s mode\n", cp->name,
539 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
540 "configured" : "wired",
541 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
542 "native-PCI" : "compatibility");
543
544 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
545 }
546
547 static void
548 cmd680_setup_channel(struct ata_channel *chp)
549 {
550 struct ata_drive_datas *drvp;
551 u_int8_t mode, off, scsc;
552 u_int16_t val;
553 u_int32_t idedma_ctl;
554 int drive, s;
555 struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
556 struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
557 pci_chipset_tag_t pc = sc->sc_pc;
558 pcitag_t pa = sc->sc_tag;
559 static const u_int8_t udma2_tbl[] =
560 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
561 static const u_int8_t udma_tbl[] =
562 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
563 static const u_int16_t dma_tbl[] =
564 { 0x2208, 0x10c2, 0x10c1 };
565 static const u_int16_t pio_tbl[] =
566 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
567
568 idedma_ctl = 0;
569 pciide_channel_dma_setup(cp);
570 mode = pciide_pci_read(pc, pa, 0x80 + chp->ch_channel * 4);
571
572 for (drive = 0; drive < 2; drive++) {
573 drvp = &chp->ch_drive[drive];
574 /* If no drive, skip */
575 if ((drvp->drive_flags & DRIVE) == 0)
576 continue;
577 mode &= ~(0x03 << (drive * 4));
578 if (drvp->drive_flags & DRIVE_UDMA) {
579 s = splbio();
580 drvp->drive_flags &= ~DRIVE_DMA;
581 splx(s);
582 off = 0xa0 + chp->ch_channel * 16;
583 if (drvp->UDMA_mode > 2 &&
584 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
585 drvp->UDMA_mode = 2;
586 scsc = pciide_pci_read(pc, pa, 0x8a);
587 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
588 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
589 scsc = pciide_pci_read(pc, pa, 0x8a);
590 if ((scsc & 0x30) == 0)
591 drvp->UDMA_mode = 5;
592 }
593 mode |= 0x03 << (drive * 4);
594 off = 0xac + chp->ch_channel * 16 + drive * 2;
595 val = pciide_pci_read(pc, pa, off) & ~0x3f;
596 if (scsc & 0x30)
597 val |= udma2_tbl[drvp->UDMA_mode];
598 else
599 val |= udma_tbl[drvp->UDMA_mode];
600 pciide_pci_write(pc, pa, off, val);
601 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
602 } else if (drvp->drive_flags & DRIVE_DMA) {
603 mode |= 0x02 << (drive * 4);
604 off = 0xa8 + chp->ch_channel * 16 + drive * 2;
605 val = dma_tbl[drvp->DMA_mode];
606 pciide_pci_write(pc, pa, off, val & 0xff);
607 pciide_pci_write(pc, pa, off+1, val >> 8);
608 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
609 } else {
610 mode |= 0x01 << (drive * 4);
611 off = 0xa4 + chp->ch_channel * 16 + drive * 2;
612 val = pio_tbl[drvp->PIO_mode];
613 pciide_pci_write(pc, pa, off, val & 0xff);
614 pciide_pci_write(pc, pa, off+1, val >> 8);
615 }
616 }
617
618 pciide_pci_write(pc, pa, 0x80 + chp->ch_channel * 4, mode);
619 if (idedma_ctl != 0) {
620 /* Add software bits in status register */
621 bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
622 idedma_ctl);
623 }
624 }
625