pciide.c revision 1.68.2.25 1 /* $NetBSD: pciide.c,v 1.68.2.25 2001/07/02 13:47:25 jhawk Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/cy82c693var.h>
121
122 #include "opt_pciide.h"
123
124 /* inlines for reading/writing 8-bit PCI registers */
125 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
126 int));
127 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
128 int, u_int8_t));
129
130 static __inline u_int8_t
131 pciide_pci_read(pc, pa, reg)
132 pci_chipset_tag_t pc;
133 pcitag_t pa;
134 int reg;
135 {
136
137 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
138 ((reg & 0x03) * 8) & 0xff);
139 }
140
141 static __inline void
142 pciide_pci_write(pc, pa, reg, val)
143 pci_chipset_tag_t pc;
144 pcitag_t pa;
145 int reg;
146 u_int8_t val;
147 {
148 pcireg_t pcival;
149
150 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
151 pcival &= ~(0xff << ((reg & 0x03) * 8));
152 pcival |= (val << ((reg & 0x03) * 8));
153 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
154 }
155
156 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157
158 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159 void piix_setup_channel __P((struct channel_softc*));
160 void piix3_4_setup_channel __P((struct channel_softc*));
161 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
163 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164
165 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
166 void amd7x6_setup_channel __P((struct channel_softc*));
167
168 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
169 void apollo_setup_channel __P((struct channel_softc*));
170
171 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
173 void cmd0643_9_setup_channel __P((struct channel_softc*));
174 void cmd_channel_map __P((struct pci_attach_args *,
175 struct pciide_softc *, int));
176 int cmd_pci_intr __P((void *));
177 void cmd646_9_irqack __P((struct channel_softc *));
178
179 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
180 void cy693_setup_channel __P((struct channel_softc*));
181
182 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
183 void sis_setup_channel __P((struct channel_softc*));
184
185 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void acer_setup_channel __P((struct channel_softc*));
187 int acer_pci_intr __P((void *));
188
189 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
190 void pdc202xx_setup_channel __P((struct channel_softc*));
191 int pdc202xx_pci_intr __P((void *));
192 int pdc20265_pci_intr __P((void *));
193
194 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
195 void opti_setup_channel __P((struct channel_softc*));
196
197 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
198 void hpt_setup_channel __P((struct channel_softc*));
199 int hpt_pci_intr __P((void *));
200
201 void pciide_channel_dma_setup __P((struct pciide_channel *));
202 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
203 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
204 void pciide_dma_start __P((void*, int, int));
205 int pciide_dma_finish __P((void*, int, int, int));
206 void pciide_irqack __P((struct channel_softc *));
207 void pciide_print_modes __P((struct pciide_channel *));
208
209 struct pciide_product_desc {
210 u_int32_t ide_product;
211 int ide_flags;
212 const char *ide_name;
213 /* map and setup chip, probe drives */
214 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
215 };
216
217 /* Flags for ide_flags */
218 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
219
220 /* Default product description for devices not known from this controller */
221 const struct pciide_product_desc default_product_desc = {
222 0,
223 0,
224 "Generic PCI IDE controller",
225 default_chip_map,
226 };
227
228 const struct pciide_product_desc pciide_intel_products[] = {
229 { PCI_PRODUCT_INTEL_82092AA,
230 0,
231 "Intel 82092AA IDE controller",
232 default_chip_map,
233 },
234 { PCI_PRODUCT_INTEL_82371FB_IDE,
235 0,
236 "Intel 82371FB IDE controller (PIIX)",
237 piix_chip_map,
238 },
239 { PCI_PRODUCT_INTEL_82371SB_IDE,
240 0,
241 "Intel 82371SB IDE Interface (PIIX3)",
242 piix_chip_map,
243 },
244 { PCI_PRODUCT_INTEL_82371AB_IDE,
245 0,
246 "Intel 82371AB IDE controller (PIIX4)",
247 piix_chip_map,
248 },
249 { PCI_PRODUCT_INTEL_82801AA_IDE,
250 0,
251 "Intel 82801AA IDE Controller (ICH)",
252 piix_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82801AB_IDE,
255 0,
256 "Intel 82801AB IDE Controller (ICH0)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82801BA_IDE,
260 0,
261 "Intel 82801BA IDE Controller (ICH2)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82801BAM_IDE,
265 0,
266 "Intel 82801BAM IDE Controller (ICH2)",
267 piix_chip_map,
268 },
269 { 0,
270 0,
271 NULL,
272 NULL
273 }
274 };
275
276 const struct pciide_product_desc pciide_amd_products[] = {
277 { PCI_PRODUCT_AMD_PBC756_IDE,
278 0,
279 "Advanced Micro Devices AMD756 IDE Controller",
280 amd7x6_chip_map
281 },
282 { PCI_PRODUCT_AMD_PBC766_IDE,
283 0,
284 "Advanced Micro Devices AMD766 IDE Controller",
285 amd7x6_chip_map
286 },
287 { 0,
288 0,
289 NULL,
290 NULL
291 }
292 };
293
294 const struct pciide_product_desc pciide_cmd_products[] = {
295 { PCI_PRODUCT_CMDTECH_640,
296 0,
297 "CMD Technology PCI0640",
298 cmd_chip_map
299 },
300 { PCI_PRODUCT_CMDTECH_643,
301 0,
302 "CMD Technology PCI0643",
303 cmd0643_9_chip_map,
304 },
305 { PCI_PRODUCT_CMDTECH_646,
306 0,
307 "CMD Technology PCI0646",
308 cmd0643_9_chip_map,
309 },
310 { PCI_PRODUCT_CMDTECH_648,
311 IDE_PCI_CLASS_OVERRIDE,
312 "CMD Technology PCI0648",
313 cmd0643_9_chip_map,
314 },
315 { PCI_PRODUCT_CMDTECH_649,
316 IDE_PCI_CLASS_OVERRIDE,
317 "CMD Technology PCI0649",
318 cmd0643_9_chip_map,
319 },
320 { 0,
321 0,
322 NULL,
323 NULL
324 }
325 };
326
327 const struct pciide_product_desc pciide_via_products[] = {
328 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
329 0,
330 NULL,
331 apollo_chip_map,
332 },
333 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
334 0,
335 NULL,
336 apollo_chip_map,
337 },
338 { 0,
339 0,
340 NULL,
341 NULL
342 }
343 };
344
345 const struct pciide_product_desc pciide_cypress_products[] = {
346 { PCI_PRODUCT_CONTAQ_82C693,
347 0,
348 "Cypress 82C693 IDE Controller",
349 cy693_chip_map,
350 },
351 { 0,
352 0,
353 NULL,
354 NULL
355 }
356 };
357
358 const struct pciide_product_desc pciide_sis_products[] = {
359 { PCI_PRODUCT_SIS_5597_IDE,
360 0,
361 "Silicon Integrated System 5597/5598 IDE controller",
362 sis_chip_map,
363 },
364 { 0,
365 0,
366 NULL,
367 NULL
368 }
369 };
370
371 const struct pciide_product_desc pciide_acer_products[] = {
372 { PCI_PRODUCT_ALI_M5229,
373 0,
374 "Acer Labs M5229 UDMA IDE Controller",
375 acer_chip_map,
376 },
377 { 0,
378 0,
379 NULL,
380 NULL
381 }
382 };
383
384 const struct pciide_product_desc pciide_promise_products[] = {
385 { PCI_PRODUCT_PROMISE_ULTRA33,
386 IDE_PCI_CLASS_OVERRIDE,
387 "Promise Ultra33/ATA Bus Master IDE Accelerator",
388 pdc202xx_chip_map,
389 },
390 { PCI_PRODUCT_PROMISE_ULTRA66,
391 IDE_PCI_CLASS_OVERRIDE,
392 "Promise Ultra66/ATA Bus Master IDE Accelerator",
393 pdc202xx_chip_map,
394 },
395 { PCI_PRODUCT_PROMISE_ULTRA100,
396 IDE_PCI_CLASS_OVERRIDE,
397 "Promise Ultra100/ATA Bus Master IDE Accelerator",
398 pdc202xx_chip_map,
399 },
400 { PCI_PRODUCT_PROMISE_ULTRA100X,
401 IDE_PCI_CLASS_OVERRIDE,
402 "Promise Ultra100/ATA Bus Master IDE Accelerator",
403 pdc202xx_chip_map,
404 },
405 { 0,
406 0,
407 NULL,
408 NULL
409 }
410 };
411
412 const struct pciide_product_desc pciide_opti_products[] = {
413 { PCI_PRODUCT_OPTI_82C621,
414 0,
415 "OPTi 82c621 PCI IDE controller",
416 opti_chip_map,
417 },
418 { PCI_PRODUCT_OPTI_82C568,
419 0,
420 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
421 opti_chip_map,
422 },
423 { PCI_PRODUCT_OPTI_82D568,
424 0,
425 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
426 opti_chip_map,
427 },
428 { 0,
429 0,
430 NULL,
431 NULL
432 }
433 };
434
435 const struct pciide_product_desc pciide_triones_products[] = {
436 { PCI_PRODUCT_TRIONES_HPT366,
437 IDE_PCI_CLASS_OVERRIDE,
438 NULL,
439 hpt_chip_map,
440 },
441 { 0,
442 0,
443 NULL,
444 NULL
445 }
446 };
447
448 struct pciide_vendor_desc {
449 u_int32_t ide_vendor;
450 const struct pciide_product_desc *ide_products;
451 };
452
453 const struct pciide_vendor_desc pciide_vendors[] = {
454 { PCI_VENDOR_INTEL, pciide_intel_products },
455 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
456 { PCI_VENDOR_VIATECH, pciide_via_products },
457 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
458 { PCI_VENDOR_SIS, pciide_sis_products },
459 { PCI_VENDOR_ALI, pciide_acer_products },
460 { PCI_VENDOR_PROMISE, pciide_promise_products },
461 { PCI_VENDOR_AMD, pciide_amd_products },
462 { PCI_VENDOR_OPTI, pciide_opti_products },
463 { PCI_VENDOR_TRIONES, pciide_triones_products },
464 { 0, NULL }
465 };
466
467 /* options passed via the 'flags' config keyword */
468 #define PCIIDE_OPTIONS_DMA 0x01
469
470 int pciide_match __P((struct device *, struct cfdata *, void *));
471 void pciide_attach __P((struct device *, struct device *, void *));
472
473 struct cfattach pciide_ca = {
474 sizeof(struct pciide_softc), pciide_match, pciide_attach
475 };
476 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
477 int pciide_mapregs_compat __P(( struct pci_attach_args *,
478 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
479 int pciide_mapregs_native __P((struct pci_attach_args *,
480 struct pciide_channel *, bus_size_t *, bus_size_t *,
481 int (*pci_intr) __P((void *))));
482 void pciide_mapreg_dma __P((struct pciide_softc *,
483 struct pci_attach_args *));
484 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
485 void pciide_mapchan __P((struct pci_attach_args *,
486 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
487 int (*pci_intr) __P((void *))));
488 int pciide_chan_candisable __P((struct pciide_channel *));
489 void pciide_map_compat_intr __P(( struct pci_attach_args *,
490 struct pciide_channel *, int, int));
491 int pciide_print __P((void *, const char *pnp));
492 int pciide_compat_intr __P((void *));
493 int pciide_pci_intr __P((void *));
494 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
495
496 const struct pciide_product_desc *
497 pciide_lookup_product(id)
498 u_int32_t id;
499 {
500 const struct pciide_product_desc *pp;
501 const struct pciide_vendor_desc *vp;
502
503 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
504 if (PCI_VENDOR(id) == vp->ide_vendor)
505 break;
506
507 if ((pp = vp->ide_products) == NULL)
508 return NULL;
509
510 for (; pp->chip_map != NULL; pp++)
511 if (PCI_PRODUCT(id) == pp->ide_product)
512 break;
513
514 if (pp->chip_map == NULL)
515 return NULL;
516 return pp;
517 }
518
519 int
520 pciide_match(parent, match, aux)
521 struct device *parent;
522 struct cfdata *match;
523 void *aux;
524 {
525 struct pci_attach_args *pa = aux;
526 const struct pciide_product_desc *pp;
527
528 /*
529 * Check the ID register to see that it's a PCI IDE controller.
530 * If it is, we assume that we can deal with it; it _should_
531 * work in a standardized way...
532 */
533 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
534 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
535 return (1);
536 }
537
538 /*
539 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
540 * controllers. Let see if we can deal with it anyway.
541 */
542 pp = pciide_lookup_product(pa->pa_id);
543 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
544 return (1);
545 }
546
547 return (0);
548 }
549
550 void
551 pciide_attach(parent, self, aux)
552 struct device *parent, *self;
553 void *aux;
554 {
555 struct pci_attach_args *pa = aux;
556 pci_chipset_tag_t pc = pa->pa_pc;
557 pcitag_t tag = pa->pa_tag;
558 struct pciide_softc *sc = (struct pciide_softc *)self;
559 pcireg_t csr;
560 char devinfo[256];
561 const char *displaydev;
562
563 sc->sc_pp = pciide_lookup_product(pa->pa_id);
564 if (sc->sc_pp == NULL) {
565 sc->sc_pp = &default_product_desc;
566 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
567 displaydev = devinfo;
568 } else
569 displaydev = sc->sc_pp->ide_name;
570
571 /* if displaydev == NULL, printf is done in chip-specific map */
572 if (displaydev)
573 printf(": %s (rev. 0x%02x)\n", displaydev,
574 PCI_REVISION(pa->pa_class));
575
576 sc->sc_pc = pa->pa_pc;
577 sc->sc_tag = pa->pa_tag;
578 #ifdef WDCDEBUG
579 if (wdcdebug_pciide_mask & DEBUG_PROBE)
580 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
581 #endif
582 sc->sc_pp->chip_map(sc, pa);
583
584 if (sc->sc_dma_ok) {
585 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
586 csr |= PCI_COMMAND_MASTER_ENABLE;
587 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
588 }
589 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
590 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
591 }
592
593 /* tell wether the chip is enabled or not */
594 int
595 pciide_chipen(sc, pa)
596 struct pciide_softc *sc;
597 struct pci_attach_args *pa;
598 {
599 pcireg_t csr;
600 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
601 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
602 PCI_COMMAND_STATUS_REG);
603 printf("%s: device disabled (at %s)\n",
604 sc->sc_wdcdev.sc_dev.dv_xname,
605 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
606 "device" : "bridge");
607 return 0;
608 }
609 return 1;
610 }
611
612 int
613 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
614 struct pci_attach_args *pa;
615 struct pciide_channel *cp;
616 int compatchan;
617 bus_size_t *cmdsizep, *ctlsizep;
618 {
619 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
620 struct channel_softc *wdc_cp = &cp->wdc_channel;
621
622 cp->compat = 1;
623 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
624 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
625
626 wdc_cp->cmd_iot = pa->pa_iot;
627 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
628 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
629 printf("%s: couldn't map %s channel cmd regs\n",
630 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
631 return (0);
632 }
633
634 wdc_cp->ctl_iot = pa->pa_iot;
635 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
636 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
637 printf("%s: couldn't map %s channel ctl regs\n",
638 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
639 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
640 PCIIDE_COMPAT_CMD_SIZE);
641 return (0);
642 }
643
644 return (1);
645 }
646
647 int
648 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
649 struct pci_attach_args * pa;
650 struct pciide_channel *cp;
651 bus_size_t *cmdsizep, *ctlsizep;
652 int (*pci_intr) __P((void *));
653 {
654 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
655 struct channel_softc *wdc_cp = &cp->wdc_channel;
656 const char *intrstr;
657 pci_intr_handle_t intrhandle;
658
659 cp->compat = 0;
660
661 if (sc->sc_pci_ih == NULL) {
662 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
663 pa->pa_intrline, &intrhandle) != 0) {
664 printf("%s: couldn't map native-PCI interrupt\n",
665 sc->sc_wdcdev.sc_dev.dv_xname);
666 return 0;
667 }
668 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
669 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
670 intrhandle, IPL_BIO, pci_intr, sc);
671 if (sc->sc_pci_ih != NULL) {
672 printf("%s: using %s for native-PCI interrupt\n",
673 sc->sc_wdcdev.sc_dev.dv_xname,
674 intrstr ? intrstr : "unknown interrupt");
675 } else {
676 printf("%s: couldn't establish native-PCI interrupt",
677 sc->sc_wdcdev.sc_dev.dv_xname);
678 if (intrstr != NULL)
679 printf(" at %s", intrstr);
680 printf("\n");
681 return 0;
682 }
683 }
684 cp->ih = sc->sc_pci_ih;
685 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
686 PCI_MAPREG_TYPE_IO, 0,
687 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
688 printf("%s: couldn't map %s channel cmd regs\n",
689 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
690 return 0;
691 }
692
693 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
694 PCI_MAPREG_TYPE_IO, 0,
695 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
696 printf("%s: couldn't map %s channel ctl regs\n",
697 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
698 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
699 return 0;
700 }
701 /*
702 * In native mode, 4 bytes of I/O space are mapped for the control
703 * register, the control register is at offset 2. Pass the generic
704 * code a handle for only one byte at the rigth offset.
705 */
706 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
707 &wdc_cp->ctl_ioh) != 0) {
708 printf("%s: unable to subregion %s channel ctl regs\n",
709 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
710 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
711 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
712 return 0;
713 }
714 return (1);
715 }
716
717 void
718 pciide_mapreg_dma(sc, pa)
719 struct pciide_softc *sc;
720 struct pci_attach_args *pa;
721 {
722 pcireg_t maptype;
723
724 /*
725 * Map DMA registers
726 *
727 * Note that sc_dma_ok is the right variable to test to see if
728 * DMA can be done. If the interface doesn't support DMA,
729 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
730 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
731 * non-zero if the interface supports DMA and the registers
732 * could be mapped.
733 *
734 * XXX Note that despite the fact that the Bus Master IDE specs
735 * XXX say that "The bus master IDE function uses 16 bytes of IO
736 * XXX space," some controllers (at least the United
737 * XXX Microelectronics UM8886BF) place it in memory space.
738 */
739 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
740 PCIIDE_REG_BUS_MASTER_DMA);
741
742 switch (maptype) {
743 case PCI_MAPREG_TYPE_IO:
744 case PCI_MAPREG_MEM_TYPE_32BIT:
745 sc->sc_dma_ok = (pci_mapreg_map(pa,
746 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
747 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
748 sc->sc_dmat = pa->pa_dmat;
749 if (sc->sc_dma_ok == 0) {
750 printf(", but unused (couldn't map registers)");
751 } else {
752 sc->sc_wdcdev.dma_arg = sc;
753 sc->sc_wdcdev.dma_init = pciide_dma_init;
754 sc->sc_wdcdev.dma_start = pciide_dma_start;
755 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
756 }
757 break;
758
759 default:
760 sc->sc_dma_ok = 0;
761 printf(", but unsupported register maptype (0x%x)", maptype);
762 }
763 }
764
765 int
766 pciide_compat_intr(arg)
767 void *arg;
768 {
769 struct pciide_channel *cp = arg;
770
771 #ifdef DIAGNOSTIC
772 /* should only be called for a compat channel */
773 if (cp->compat == 0)
774 panic("pciide compat intr called for non-compat chan %p\n", cp);
775 #endif
776 return (wdcintr(&cp->wdc_channel));
777 }
778
779 int
780 pciide_pci_intr(arg)
781 void *arg;
782 {
783 struct pciide_softc *sc = arg;
784 struct pciide_channel *cp;
785 struct channel_softc *wdc_cp;
786 int i, rv, crv;
787
788 rv = 0;
789 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
790 cp = &sc->pciide_channels[i];
791 wdc_cp = &cp->wdc_channel;
792
793 /* If a compat channel skip. */
794 if (cp->compat)
795 continue;
796 /* if this channel not waiting for intr, skip */
797 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
798 continue;
799
800 crv = wdcintr(wdc_cp);
801 if (crv == 0)
802 ; /* leave rv alone */
803 else if (crv == 1)
804 rv = 1; /* claim the intr */
805 else if (rv == 0) /* crv should be -1 in this case */
806 rv = crv; /* if we've done no better, take it */
807 }
808 return (rv);
809 }
810
811 void
812 pciide_channel_dma_setup(cp)
813 struct pciide_channel *cp;
814 {
815 int drive;
816 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
817 struct ata_drive_datas *drvp;
818
819 for (drive = 0; drive < 2; drive++) {
820 drvp = &cp->wdc_channel.ch_drive[drive];
821 /* If no drive, skip */
822 if ((drvp->drive_flags & DRIVE) == 0)
823 continue;
824 /* setup DMA if needed */
825 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
826 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
827 sc->sc_dma_ok == 0) {
828 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
829 continue;
830 }
831 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
832 != 0) {
833 /* Abort DMA setup */
834 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
835 continue;
836 }
837 }
838 }
839
840 int
841 pciide_dma_table_setup(sc, channel, drive)
842 struct pciide_softc *sc;
843 int channel, drive;
844 {
845 bus_dma_segment_t seg;
846 int error, rseg;
847 const bus_size_t dma_table_size =
848 sizeof(struct idedma_table) * NIDEDMA_TABLES;
849 struct pciide_dma_maps *dma_maps =
850 &sc->pciide_channels[channel].dma_maps[drive];
851
852 /* If table was already allocated, just return */
853 if (dma_maps->dma_table)
854 return 0;
855
856 /* Allocate memory for the DMA tables and map it */
857 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
858 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
859 BUS_DMA_NOWAIT)) != 0) {
860 printf("%s:%d: unable to allocate table DMA for "
861 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
862 channel, drive, error);
863 return error;
864 }
865 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
866 dma_table_size,
867 (caddr_t *)&dma_maps->dma_table,
868 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
869 printf("%s:%d: unable to map table DMA for"
870 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
871 channel, drive, error);
872 return error;
873 }
874 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
875 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
876 seg.ds_addr), DEBUG_PROBE);
877
878 /* Create and load table DMA map for this disk */
879 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
880 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
881 &dma_maps->dmamap_table)) != 0) {
882 printf("%s:%d: unable to create table DMA map for "
883 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
884 channel, drive, error);
885 return error;
886 }
887 if ((error = bus_dmamap_load(sc->sc_dmat,
888 dma_maps->dmamap_table,
889 dma_maps->dma_table,
890 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
891 printf("%s:%d: unable to load table DMA map for "
892 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
893 channel, drive, error);
894 return error;
895 }
896 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
897 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
898 /* Create a xfer DMA map for this drive */
899 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
900 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
901 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
902 &dma_maps->dmamap_xfer)) != 0) {
903 printf("%s:%d: unable to create xfer DMA map for "
904 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
905 channel, drive, error);
906 return error;
907 }
908 return 0;
909 }
910
911 int
912 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
913 void *v;
914 int channel, drive;
915 void *databuf;
916 size_t datalen;
917 int flags;
918 {
919 struct pciide_softc *sc = v;
920 int error, seg;
921 struct pciide_dma_maps *dma_maps =
922 &sc->pciide_channels[channel].dma_maps[drive];
923
924 error = bus_dmamap_load(sc->sc_dmat,
925 dma_maps->dmamap_xfer,
926 databuf, datalen, NULL, BUS_DMA_NOWAIT);
927 if (error) {
928 printf("%s:%d: unable to load xfer DMA map for"
929 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
930 channel, drive, error);
931 return error;
932 }
933
934 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
935 dma_maps->dmamap_xfer->dm_mapsize,
936 (flags & WDC_DMA_READ) ?
937 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
938
939 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
940 #ifdef DIAGNOSTIC
941 /* A segment must not cross a 64k boundary */
942 {
943 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
944 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
945 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
946 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
947 printf("pciide_dma: segment %d physical addr 0x%lx"
948 " len 0x%lx not properly aligned\n",
949 seg, phys, len);
950 panic("pciide_dma: buf align");
951 }
952 }
953 #endif
954 dma_maps->dma_table[seg].base_addr =
955 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
956 dma_maps->dma_table[seg].byte_count =
957 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
958 IDEDMA_BYTE_COUNT_MASK);
959 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
960 seg, le32toh(dma_maps->dma_table[seg].byte_count),
961 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
962
963 }
964 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
965 htole32(IDEDMA_BYTE_COUNT_EOT);
966
967 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
968 dma_maps->dmamap_table->dm_mapsize,
969 BUS_DMASYNC_PREWRITE);
970
971 /* Maps are ready. Start DMA function */
972 #ifdef DIAGNOSTIC
973 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
974 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
975 dma_maps->dmamap_table->dm_segs[0].ds_addr);
976 panic("pciide_dma_init: table align");
977 }
978 #endif
979
980 /* Clear status bits */
981 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
982 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
983 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
984 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
985 /* Write table addr */
986 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
987 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
988 dma_maps->dmamap_table->dm_segs[0].ds_addr);
989 /* set read/write */
990 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
991 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
992 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
993 /* remember flags */
994 dma_maps->dma_flags = flags;
995 return 0;
996 }
997
998 void
999 pciide_dma_start(v, channel, drive)
1000 void *v;
1001 int channel, drive;
1002 {
1003 struct pciide_softc *sc = v;
1004
1005 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1006 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1007 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1008 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1009 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1010 }
1011
1012 int
1013 pciide_dma_finish(v, channel, drive, force)
1014 void *v;
1015 int channel, drive;
1016 int force;
1017 {
1018 struct pciide_softc *sc = v;
1019 u_int8_t status;
1020 int error = 0;
1021 struct pciide_dma_maps *dma_maps =
1022 &sc->pciide_channels[channel].dma_maps[drive];
1023
1024 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1025 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1026 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1027 DEBUG_XFERS);
1028
1029 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1030 return WDC_DMAST_NOIRQ;
1031
1032 /* stop DMA channel */
1033 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1034 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1035 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1036 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1037
1038 /* Unload the map of the data buffer */
1039 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1040 dma_maps->dmamap_xfer->dm_mapsize,
1041 (dma_maps->dma_flags & WDC_DMA_READ) ?
1042 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1043 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1044
1045 if ((status & IDEDMA_CTL_ERR) != 0) {
1046 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1047 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1048 error |= WDC_DMAST_ERR;
1049 }
1050
1051 if ((status & IDEDMA_CTL_INTR) == 0) {
1052 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1053 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1054 drive, status);
1055 error |= WDC_DMAST_NOIRQ;
1056 }
1057
1058 if ((status & IDEDMA_CTL_ACT) != 0) {
1059 /* data underrun, may be a valid condition for ATAPI */
1060 error |= WDC_DMAST_UNDER;
1061 }
1062 return error;
1063 }
1064
1065 void
1066 pciide_irqack(chp)
1067 struct channel_softc *chp;
1068 {
1069 struct pciide_channel *cp = (struct pciide_channel*)chp;
1070 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1071
1072 /* clear status bits in IDE DMA registers */
1073 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1074 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1075 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1076 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1077 }
1078
1079 /* some common code used by several chip_map */
1080 int
1081 pciide_chansetup(sc, channel, interface)
1082 struct pciide_softc *sc;
1083 int channel;
1084 pcireg_t interface;
1085 {
1086 struct pciide_channel *cp = &sc->pciide_channels[channel];
1087 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1088 cp->name = PCIIDE_CHANNEL_NAME(channel);
1089 cp->wdc_channel.channel = channel;
1090 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1091 cp->wdc_channel.ch_queue =
1092 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1093 if (cp->wdc_channel.ch_queue == NULL) {
1094 printf("%s %s channel: "
1095 "can't allocate memory for command queue",
1096 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1097 return 0;
1098 }
1099 printf("%s: %s channel %s to %s mode\n",
1100 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1101 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1102 "configured" : "wired",
1103 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1104 "native-PCI" : "compatibility");
1105 return 1;
1106 }
1107
1108 /* some common code used by several chip channel_map */
1109 void
1110 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1111 struct pci_attach_args *pa;
1112 struct pciide_channel *cp;
1113 pcireg_t interface;
1114 bus_size_t *cmdsizep, *ctlsizep;
1115 int (*pci_intr) __P((void *));
1116 {
1117 struct channel_softc *wdc_cp = &cp->wdc_channel;
1118
1119 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1120 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1121 pci_intr);
1122 else
1123 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1124 wdc_cp->channel, cmdsizep, ctlsizep);
1125
1126 if (cp->hw_ok == 0)
1127 return;
1128 wdc_cp->data32iot = wdc_cp->cmd_iot;
1129 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1130 wdcattach(wdc_cp);
1131 }
1132
1133 /*
1134 * Generic code to call to know if a channel can be disabled. Return 1
1135 * if channel can be disabled, 0 if not
1136 */
1137 int
1138 pciide_chan_candisable(cp)
1139 struct pciide_channel *cp;
1140 {
1141 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1142 struct channel_softc *wdc_cp = &cp->wdc_channel;
1143
1144 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1145 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1146 printf("%s: disabling %s channel (no drives)\n",
1147 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1148 cp->hw_ok = 0;
1149 return 1;
1150 }
1151 return 0;
1152 }
1153
1154 /*
1155 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1156 * Set hw_ok=0 on failure
1157 */
1158 void
1159 pciide_map_compat_intr(pa, cp, compatchan, interface)
1160 struct pci_attach_args *pa;
1161 struct pciide_channel *cp;
1162 int compatchan, interface;
1163 {
1164 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1165 struct channel_softc *wdc_cp = &cp->wdc_channel;
1166
1167 if (cp->hw_ok == 0)
1168 return;
1169 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1170 return;
1171
1172 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1173 pa, compatchan, pciide_compat_intr, cp);
1174 if (cp->ih == NULL) {
1175 printf("%s: no compatibility interrupt for use by %s "
1176 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1177 cp->hw_ok = 0;
1178 }
1179 }
1180
1181 void
1182 pciide_print_modes(cp)
1183 struct pciide_channel *cp;
1184 {
1185 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1186 int drive;
1187 struct channel_softc *chp;
1188 struct ata_drive_datas *drvp;
1189
1190 chp = &cp->wdc_channel;
1191 for (drive = 0; drive < 2; drive++) {
1192 drvp = &chp->ch_drive[drive];
1193 if ((drvp->drive_flags & DRIVE) == 0)
1194 continue;
1195 printf("%s(%s:%d:%d): using PIO mode %d",
1196 drvp->drv_softc->dv_xname,
1197 sc->sc_wdcdev.sc_dev.dv_xname,
1198 chp->channel, drive, drvp->PIO_mode);
1199 if (drvp->drive_flags & DRIVE_DMA)
1200 printf(", DMA mode %d", drvp->DMA_mode);
1201 if (drvp->drive_flags & DRIVE_UDMA)
1202 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1203 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1204 printf(" (using DMA data transfers)");
1205 printf("\n");
1206 }
1207 }
1208
1209 void
1210 default_chip_map(sc, pa)
1211 struct pciide_softc *sc;
1212 struct pci_attach_args *pa;
1213 {
1214 struct pciide_channel *cp;
1215 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1216 pcireg_t csr;
1217 int channel, drive;
1218 struct ata_drive_datas *drvp;
1219 u_int8_t idedma_ctl;
1220 bus_size_t cmdsize, ctlsize;
1221 char *failreason;
1222
1223 if (pciide_chipen(sc, pa) == 0)
1224 return;
1225
1226 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1227 printf("%s: bus-master DMA support present",
1228 sc->sc_wdcdev.sc_dev.dv_xname);
1229 if (sc->sc_pp == &default_product_desc &&
1230 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1231 PCIIDE_OPTIONS_DMA) == 0) {
1232 printf(", but unused (no driver support)");
1233 sc->sc_dma_ok = 0;
1234 } else {
1235 pciide_mapreg_dma(sc, pa);
1236 if (sc->sc_dma_ok != 0)
1237 printf(", used without full driver "
1238 "support");
1239 }
1240 } else {
1241 printf("%s: hardware does not support DMA",
1242 sc->sc_wdcdev.sc_dev.dv_xname);
1243 sc->sc_dma_ok = 0;
1244 }
1245 printf("\n");
1246 if (sc->sc_dma_ok) {
1247 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1248 sc->sc_wdcdev.irqack = pciide_irqack;
1249 }
1250 sc->sc_wdcdev.PIO_cap = 0;
1251 sc->sc_wdcdev.DMA_cap = 0;
1252
1253 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1254 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1255 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1256
1257 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1258 cp = &sc->pciide_channels[channel];
1259 if (pciide_chansetup(sc, channel, interface) == 0)
1260 continue;
1261 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1262 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1263 &ctlsize, pciide_pci_intr);
1264 } else {
1265 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1266 channel, &cmdsize, &ctlsize);
1267 }
1268 if (cp->hw_ok == 0)
1269 continue;
1270 /*
1271 * Check to see if something appears to be there.
1272 */
1273 failreason = NULL;
1274 if (!wdcprobe(&cp->wdc_channel)) {
1275 failreason = "not responding; disabled or no drives?";
1276 goto next;
1277 }
1278 /*
1279 * Now, make sure it's actually attributable to this PCI IDE
1280 * channel by trying to access the channel again while the
1281 * PCI IDE controller's I/O space is disabled. (If the
1282 * channel no longer appears to be there, it belongs to
1283 * this controller.) YUCK!
1284 */
1285 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1286 PCI_COMMAND_STATUS_REG);
1287 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1288 csr & ~PCI_COMMAND_IO_ENABLE);
1289 if (wdcprobe(&cp->wdc_channel))
1290 failreason = "other hardware responding at addresses";
1291 pci_conf_write(sc->sc_pc, sc->sc_tag,
1292 PCI_COMMAND_STATUS_REG, csr);
1293 next:
1294 if (failreason) {
1295 printf("%s: %s channel ignored (%s)\n",
1296 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1297 failreason);
1298 cp->hw_ok = 0;
1299 bus_space_unmap(cp->wdc_channel.cmd_iot,
1300 cp->wdc_channel.cmd_ioh, cmdsize);
1301 bus_space_unmap(cp->wdc_channel.ctl_iot,
1302 cp->wdc_channel.ctl_ioh, ctlsize);
1303 } else {
1304 pciide_map_compat_intr(pa, cp, channel, interface);
1305 }
1306 if (cp->hw_ok) {
1307 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1308 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1309 wdcattach(&cp->wdc_channel);
1310 }
1311 }
1312
1313 if (sc->sc_dma_ok == 0)
1314 return;
1315
1316 /* Allocate DMA maps */
1317 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1318 idedma_ctl = 0;
1319 cp = &sc->pciide_channels[channel];
1320 for (drive = 0; drive < 2; drive++) {
1321 drvp = &cp->wdc_channel.ch_drive[drive];
1322 /* If no drive, skip */
1323 if ((drvp->drive_flags & DRIVE) == 0)
1324 continue;
1325 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1326 continue;
1327 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1328 /* Abort DMA setup */
1329 printf("%s:%d:%d: can't allocate DMA maps, "
1330 "using PIO transfers\n",
1331 sc->sc_wdcdev.sc_dev.dv_xname,
1332 channel, drive);
1333 drvp->drive_flags &= ~DRIVE_DMA;
1334 }
1335 printf("%s:%d:%d: using DMA data transfers\n",
1336 sc->sc_wdcdev.sc_dev.dv_xname,
1337 channel, drive);
1338 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1339 }
1340 if (idedma_ctl != 0) {
1341 /* Add software bits in status register */
1342 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1343 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1344 idedma_ctl);
1345 }
1346 }
1347 }
1348
1349 void
1350 piix_chip_map(sc, pa)
1351 struct pciide_softc *sc;
1352 struct pci_attach_args *pa;
1353 {
1354 struct pciide_channel *cp;
1355 int channel;
1356 u_int32_t idetim;
1357 bus_size_t cmdsize, ctlsize;
1358
1359 if (pciide_chipen(sc, pa) == 0)
1360 return;
1361
1362 printf("%s: bus-master DMA support present",
1363 sc->sc_wdcdev.sc_dev.dv_xname);
1364 pciide_mapreg_dma(sc, pa);
1365 printf("\n");
1366 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1367 WDC_CAPABILITY_MODE;
1368 if (sc->sc_dma_ok) {
1369 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1370 sc->sc_wdcdev.irqack = pciide_irqack;
1371 switch(sc->sc_pp->ide_product) {
1372 case PCI_PRODUCT_INTEL_82371AB_IDE:
1373 case PCI_PRODUCT_INTEL_82801AA_IDE:
1374 case PCI_PRODUCT_INTEL_82801AB_IDE:
1375 case PCI_PRODUCT_INTEL_82801BA_IDE:
1376 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1377 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1378 }
1379 }
1380 sc->sc_wdcdev.PIO_cap = 4;
1381 sc->sc_wdcdev.DMA_cap = 2;
1382 switch(sc->sc_pp->ide_product) {
1383 case PCI_PRODUCT_INTEL_82801AA_IDE:
1384 sc->sc_wdcdev.UDMA_cap = 4;
1385 break;
1386 case PCI_PRODUCT_INTEL_82801BA_IDE:
1387 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1388 sc->sc_wdcdev.UDMA_cap = 5;
1389 break;
1390 default:
1391 sc->sc_wdcdev.UDMA_cap = 2;
1392 }
1393 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1394 sc->sc_wdcdev.set_modes = piix_setup_channel;
1395 else
1396 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1397 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1398 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1399
1400 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1401 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1402 DEBUG_PROBE);
1403 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1404 WDCDEBUG_PRINT((", sidetim=0x%x",
1405 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1406 DEBUG_PROBE);
1407 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1408 WDCDEBUG_PRINT((", udamreg 0x%x",
1409 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1410 DEBUG_PROBE);
1411 }
1412 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1413 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1414 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1415 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1416 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1417 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1418 DEBUG_PROBE);
1419 }
1420
1421 }
1422 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1423
1424 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1425 cp = &sc->pciide_channels[channel];
1426 /* PIIX is compat-only */
1427 if (pciide_chansetup(sc, channel, 0) == 0)
1428 continue;
1429 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1430 if ((PIIX_IDETIM_READ(idetim, channel) &
1431 PIIX_IDETIM_IDE) == 0) {
1432 printf("%s: %s channel ignored (disabled)\n",
1433 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1434 continue;
1435 }
1436 /* PIIX are compat-only pciide devices */
1437 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1438 if (cp->hw_ok == 0)
1439 continue;
1440 if (pciide_chan_candisable(cp)) {
1441 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1442 channel);
1443 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1444 idetim);
1445 }
1446 pciide_map_compat_intr(pa, cp, channel, 0);
1447 if (cp->hw_ok == 0)
1448 continue;
1449 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1450 }
1451
1452 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1453 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1454 DEBUG_PROBE);
1455 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1456 WDCDEBUG_PRINT((", sidetim=0x%x",
1457 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1458 DEBUG_PROBE);
1459 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1460 WDCDEBUG_PRINT((", udamreg 0x%x",
1461 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1462 DEBUG_PROBE);
1463 }
1464 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1465 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1466 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1467 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1468 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1469 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1470 DEBUG_PROBE);
1471 }
1472 }
1473 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1474 }
1475
1476 void
1477 piix_setup_channel(chp)
1478 struct channel_softc *chp;
1479 {
1480 u_int8_t mode[2], drive;
1481 u_int32_t oidetim, idetim, idedma_ctl;
1482 struct pciide_channel *cp = (struct pciide_channel*)chp;
1483 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1484 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1485
1486 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1487 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1488 idedma_ctl = 0;
1489
1490 /* set up new idetim: Enable IDE registers decode */
1491 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1492 chp->channel);
1493
1494 /* setup DMA */
1495 pciide_channel_dma_setup(cp);
1496
1497 /*
1498 * Here we have to mess up with drives mode: PIIX can't have
1499 * different timings for master and slave drives.
1500 * We need to find the best combination.
1501 */
1502
1503 /* If both drives supports DMA, take the lower mode */
1504 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1505 (drvp[1].drive_flags & DRIVE_DMA)) {
1506 mode[0] = mode[1] =
1507 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1508 drvp[0].DMA_mode = mode[0];
1509 drvp[1].DMA_mode = mode[1];
1510 goto ok;
1511 }
1512 /*
1513 * If only one drive supports DMA, use its mode, and
1514 * put the other one in PIO mode 0 if mode not compatible
1515 */
1516 if (drvp[0].drive_flags & DRIVE_DMA) {
1517 mode[0] = drvp[0].DMA_mode;
1518 mode[1] = drvp[1].PIO_mode;
1519 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1520 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1521 mode[1] = drvp[1].PIO_mode = 0;
1522 goto ok;
1523 }
1524 if (drvp[1].drive_flags & DRIVE_DMA) {
1525 mode[1] = drvp[1].DMA_mode;
1526 mode[0] = drvp[0].PIO_mode;
1527 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1528 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1529 mode[0] = drvp[0].PIO_mode = 0;
1530 goto ok;
1531 }
1532 /*
1533 * If both drives are not DMA, takes the lower mode, unless
1534 * one of them is PIO mode < 2
1535 */
1536 if (drvp[0].PIO_mode < 2) {
1537 mode[0] = drvp[0].PIO_mode = 0;
1538 mode[1] = drvp[1].PIO_mode;
1539 } else if (drvp[1].PIO_mode < 2) {
1540 mode[1] = drvp[1].PIO_mode = 0;
1541 mode[0] = drvp[0].PIO_mode;
1542 } else {
1543 mode[0] = mode[1] =
1544 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1545 drvp[0].PIO_mode = mode[0];
1546 drvp[1].PIO_mode = mode[1];
1547 }
1548 ok: /* The modes are setup */
1549 for (drive = 0; drive < 2; drive++) {
1550 if (drvp[drive].drive_flags & DRIVE_DMA) {
1551 idetim |= piix_setup_idetim_timings(
1552 mode[drive], 1, chp->channel);
1553 goto end;
1554 }
1555 }
1556 /* If we are there, none of the drives are DMA */
1557 if (mode[0] >= 2)
1558 idetim |= piix_setup_idetim_timings(
1559 mode[0], 0, chp->channel);
1560 else
1561 idetim |= piix_setup_idetim_timings(
1562 mode[1], 0, chp->channel);
1563 end: /*
1564 * timing mode is now set up in the controller. Enable
1565 * it per-drive
1566 */
1567 for (drive = 0; drive < 2; drive++) {
1568 /* If no drive, skip */
1569 if ((drvp[drive].drive_flags & DRIVE) == 0)
1570 continue;
1571 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1572 if (drvp[drive].drive_flags & DRIVE_DMA)
1573 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1574 }
1575 if (idedma_ctl != 0) {
1576 /* Add software bits in status register */
1577 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1578 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1579 idedma_ctl);
1580 }
1581 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1582 pciide_print_modes(cp);
1583 }
1584
1585 void
1586 piix3_4_setup_channel(chp)
1587 struct channel_softc *chp;
1588 {
1589 struct ata_drive_datas *drvp;
1590 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1591 struct pciide_channel *cp = (struct pciide_channel*)chp;
1592 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1593 int drive;
1594 int channel = chp->channel;
1595
1596 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1597 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1598 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1599 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1600 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1601 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1602 PIIX_SIDETIM_RTC_MASK(channel));
1603
1604 idedma_ctl = 0;
1605 /* If channel disabled, no need to go further */
1606 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1607 return;
1608 /* set up new idetim: Enable IDE registers decode */
1609 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1610
1611 /* setup DMA if needed */
1612 pciide_channel_dma_setup(cp);
1613
1614 for (drive = 0; drive < 2; drive++) {
1615 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1616 PIIX_UDMATIM_SET(0x3, channel, drive));
1617 drvp = &chp->ch_drive[drive];
1618 /* If no drive, skip */
1619 if ((drvp->drive_flags & DRIVE) == 0)
1620 continue;
1621 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1622 (drvp->drive_flags & DRIVE_UDMA) == 0))
1623 goto pio;
1624
1625 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1626 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1627 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1628 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1629 ideconf |= PIIX_CONFIG_PINGPONG;
1630 }
1631 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1632 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1633 /* setup Ultra/100 */
1634 if (drvp->UDMA_mode > 2 &&
1635 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1636 drvp->UDMA_mode = 2;
1637 if (drvp->UDMA_mode > 4) {
1638 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1639 } else {
1640 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1641 if (drvp->UDMA_mode > 2) {
1642 ideconf |= PIIX_CONFIG_UDMA66(channel,
1643 drive);
1644 } else {
1645 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1646 drive);
1647 }
1648 }
1649 }
1650 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1651 /* setup Ultra/66 */
1652 if (drvp->UDMA_mode > 2 &&
1653 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1654 drvp->UDMA_mode = 2;
1655 if (drvp->UDMA_mode > 2)
1656 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1657 else
1658 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1659 }
1660 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1661 (drvp->drive_flags & DRIVE_UDMA)) {
1662 /* use Ultra/DMA */
1663 drvp->drive_flags &= ~DRIVE_DMA;
1664 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1665 udmareg |= PIIX_UDMATIM_SET(
1666 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1667 } else {
1668 /* use Multiword DMA */
1669 drvp->drive_flags &= ~DRIVE_UDMA;
1670 if (drive == 0) {
1671 idetim |= piix_setup_idetim_timings(
1672 drvp->DMA_mode, 1, channel);
1673 } else {
1674 sidetim |= piix_setup_sidetim_timings(
1675 drvp->DMA_mode, 1, channel);
1676 idetim =PIIX_IDETIM_SET(idetim,
1677 PIIX_IDETIM_SITRE, channel);
1678 }
1679 }
1680 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1681
1682 pio: /* use PIO mode */
1683 idetim |= piix_setup_idetim_drvs(drvp);
1684 if (drive == 0) {
1685 idetim |= piix_setup_idetim_timings(
1686 drvp->PIO_mode, 0, channel);
1687 } else {
1688 sidetim |= piix_setup_sidetim_timings(
1689 drvp->PIO_mode, 0, channel);
1690 idetim =PIIX_IDETIM_SET(idetim,
1691 PIIX_IDETIM_SITRE, channel);
1692 }
1693 }
1694 if (idedma_ctl != 0) {
1695 /* Add software bits in status register */
1696 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1697 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1698 idedma_ctl);
1699 }
1700 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1701 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1702 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1703 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1704 pciide_print_modes(cp);
1705 }
1706
1707
1708 /* setup ISP and RTC fields, based on mode */
1709 static u_int32_t
1710 piix_setup_idetim_timings(mode, dma, channel)
1711 u_int8_t mode;
1712 u_int8_t dma;
1713 u_int8_t channel;
1714 {
1715
1716 if (dma)
1717 return PIIX_IDETIM_SET(0,
1718 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1719 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1720 channel);
1721 else
1722 return PIIX_IDETIM_SET(0,
1723 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1724 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1725 channel);
1726 }
1727
1728 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1729 static u_int32_t
1730 piix_setup_idetim_drvs(drvp)
1731 struct ata_drive_datas *drvp;
1732 {
1733 u_int32_t ret = 0;
1734 struct channel_softc *chp = drvp->chnl_softc;
1735 u_int8_t channel = chp->channel;
1736 u_int8_t drive = drvp->drive;
1737
1738 /*
1739 * If drive is using UDMA, timings setups are independant
1740 * So just check DMA and PIO here.
1741 */
1742 if (drvp->drive_flags & DRIVE_DMA) {
1743 /* if mode = DMA mode 0, use compatible timings */
1744 if ((drvp->drive_flags & DRIVE_DMA) &&
1745 drvp->DMA_mode == 0) {
1746 drvp->PIO_mode = 0;
1747 return ret;
1748 }
1749 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1750 /*
1751 * PIO and DMA timings are the same, use fast timings for PIO
1752 * too, else use compat timings.
1753 */
1754 if ((piix_isp_pio[drvp->PIO_mode] !=
1755 piix_isp_dma[drvp->DMA_mode]) ||
1756 (piix_rtc_pio[drvp->PIO_mode] !=
1757 piix_rtc_dma[drvp->DMA_mode]))
1758 drvp->PIO_mode = 0;
1759 /* if PIO mode <= 2, use compat timings for PIO */
1760 if (drvp->PIO_mode <= 2) {
1761 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1762 channel);
1763 return ret;
1764 }
1765 }
1766
1767 /*
1768 * Now setup PIO modes. If mode < 2, use compat timings.
1769 * Else enable fast timings. Enable IORDY and prefetch/post
1770 * if PIO mode >= 3.
1771 */
1772
1773 if (drvp->PIO_mode < 2)
1774 return ret;
1775
1776 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1777 if (drvp->PIO_mode >= 3) {
1778 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1779 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1780 }
1781 return ret;
1782 }
1783
1784 /* setup values in SIDETIM registers, based on mode */
1785 static u_int32_t
1786 piix_setup_sidetim_timings(mode, dma, channel)
1787 u_int8_t mode;
1788 u_int8_t dma;
1789 u_int8_t channel;
1790 {
1791 if (dma)
1792 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1793 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1794 else
1795 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1796 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1797 }
1798
1799 void
1800 amd7x6_chip_map(sc, pa)
1801 struct pciide_softc *sc;
1802 struct pci_attach_args *pa;
1803 {
1804 struct pciide_channel *cp;
1805 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1806 int channel;
1807 pcireg_t chanenable;
1808 bus_size_t cmdsize, ctlsize;
1809
1810 if (pciide_chipen(sc, pa) == 0)
1811 return;
1812 printf("%s: bus-master DMA support present",
1813 sc->sc_wdcdev.sc_dev.dv_xname);
1814 pciide_mapreg_dma(sc, pa);
1815 printf("\n");
1816 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1817 WDC_CAPABILITY_MODE;
1818 if (sc->sc_dma_ok) {
1819 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1820 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1821 sc->sc_wdcdev.irqack = pciide_irqack;
1822 }
1823 sc->sc_wdcdev.PIO_cap = 4;
1824 sc->sc_wdcdev.DMA_cap = 2;
1825
1826 if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE)
1827 sc->sc_wdcdev.UDMA_cap = 5;
1828 else
1829 sc->sc_wdcdev.UDMA_cap = 4;
1830 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1831 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1832 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1833 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1834
1835 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1836 DEBUG_PROBE);
1837 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1838 cp = &sc->pciide_channels[channel];
1839 if (pciide_chansetup(sc, channel, interface) == 0)
1840 continue;
1841
1842 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1843 printf("%s: %s channel ignored (disabled)\n",
1844 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1845 continue;
1846 }
1847 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1848 pciide_pci_intr);
1849
1850 if (pciide_chan_candisable(cp))
1851 chanenable &= ~AMD7X6_CHAN_EN(channel);
1852 pciide_map_compat_intr(pa, cp, channel, interface);
1853 if (cp->hw_ok == 0)
1854 continue;
1855
1856 amd7x6_setup_channel(&cp->wdc_channel);
1857 }
1858 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1859 chanenable);
1860 return;
1861 }
1862
1863 void
1864 amd7x6_setup_channel(chp)
1865 struct channel_softc *chp;
1866 {
1867 u_int32_t udmatim_reg, datatim_reg;
1868 u_int8_t idedma_ctl;
1869 int mode, drive;
1870 struct ata_drive_datas *drvp;
1871 struct pciide_channel *cp = (struct pciide_channel*)chp;
1872 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1873 #ifndef PCIIDE_AMD756_ENABLEDMA
1874 int rev = PCI_REVISION(
1875 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1876 #endif
1877
1878 idedma_ctl = 0;
1879 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
1880 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
1881 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
1882 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
1883
1884 /* setup DMA if needed */
1885 pciide_channel_dma_setup(cp);
1886
1887 for (drive = 0; drive < 2; drive++) {
1888 drvp = &chp->ch_drive[drive];
1889 /* If no drive, skip */
1890 if ((drvp->drive_flags & DRIVE) == 0)
1891 continue;
1892 /* add timing values, setup DMA if needed */
1893 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1894 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1895 mode = drvp->PIO_mode;
1896 goto pio;
1897 }
1898 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1899 (drvp->drive_flags & DRIVE_UDMA)) {
1900 /* use Ultra/DMA */
1901 drvp->drive_flags &= ~DRIVE_DMA;
1902 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
1903 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
1904 AMD7X6_UDMA_TIME(chp->channel, drive,
1905 amd7x6_udma_tim[drvp->UDMA_mode]);
1906 /* can use PIO timings, MW DMA unused */
1907 mode = drvp->PIO_mode;
1908 } else {
1909 /* use Multiword DMA, but only if revision is OK */
1910 drvp->drive_flags &= ~DRIVE_UDMA;
1911 #ifndef PCIIDE_AMD756_ENABLEDMA
1912 /*
1913 * The workaround doesn't seem to be necessary
1914 * with all drives, so it can be disabled by
1915 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1916 * triggered.
1917 */
1918 if (sc->sc_pp->ide_product ==
1919 PCI_PRODUCT_AMD_PBC756_IDE &&
1920 AMD756_CHIPREV_DISABLEDMA(rev)) {
1921 printf("%s:%d:%d: multi-word DMA disabled due "
1922 "to chip revision\n",
1923 sc->sc_wdcdev.sc_dev.dv_xname,
1924 chp->channel, drive);
1925 mode = drvp->PIO_mode;
1926 drvp->drive_flags &= ~DRIVE_DMA;
1927 goto pio;
1928 }
1929 #endif
1930 /* mode = min(pio, dma+2) */
1931 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1932 mode = drvp->PIO_mode;
1933 else
1934 mode = drvp->DMA_mode + 2;
1935 }
1936 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1937
1938 pio: /* setup PIO mode */
1939 if (mode <= 2) {
1940 drvp->DMA_mode = 0;
1941 drvp->PIO_mode = 0;
1942 mode = 0;
1943 } else {
1944 drvp->PIO_mode = mode;
1945 drvp->DMA_mode = mode - 2;
1946 }
1947 datatim_reg |=
1948 AMD7X6_DATATIM_PULSE(chp->channel, drive,
1949 amd7x6_pio_set[mode]) |
1950 AMD7X6_DATATIM_RECOV(chp->channel, drive,
1951 amd7x6_pio_rec[mode]);
1952 }
1953 if (idedma_ctl != 0) {
1954 /* Add software bits in status register */
1955 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1956 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1957 idedma_ctl);
1958 }
1959 pciide_print_modes(cp);
1960 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
1961 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
1962 }
1963
1964 void
1965 apollo_chip_map(sc, pa)
1966 struct pciide_softc *sc;
1967 struct pci_attach_args *pa;
1968 {
1969 struct pciide_channel *cp;
1970 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1971 int channel;
1972 u_int32_t ideconf;
1973 bus_size_t cmdsize, ctlsize;
1974 pcitag_t pcib_tag;
1975 pcireg_t pcib_id, pcib_class;
1976
1977 if (pciide_chipen(sc, pa) == 0)
1978 return;
1979 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
1980 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
1981 /* and read ID and rev of the ISA bridge */
1982 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
1983 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
1984 printf(": VIA Technologies ");
1985 switch (PCI_PRODUCT(pcib_id)) {
1986 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
1987 printf("VT82C586 (Apollo VP) ");
1988 if(PCI_REVISION(pcib_class) >= 0x02) {
1989 printf("ATA33 controller\n");
1990 sc->sc_wdcdev.UDMA_cap = 2;
1991 } else {
1992 printf("controller\n");
1993 sc->sc_wdcdev.UDMA_cap = 0;
1994 }
1995 break;
1996 case PCI_PRODUCT_VIATECH_VT82C596A:
1997 printf("VT82C596A (Apollo Pro) ");
1998 if (PCI_REVISION(pcib_class) >= 0x12) {
1999 printf("ATA66 controller\n");
2000 sc->sc_wdcdev.UDMA_cap = 4;
2001 } else {
2002 printf("ATA33 controller\n");
2003 sc->sc_wdcdev.UDMA_cap = 2;
2004 }
2005 break;
2006 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2007 printf("VT82C686A (Apollo KX133) ");
2008 if (PCI_REVISION(pcib_class) >= 0x40) {
2009 printf("ATA100 controller\n");
2010 sc->sc_wdcdev.UDMA_cap = 5;
2011 } else {
2012 printf("ATA66 controller\n");
2013 sc->sc_wdcdev.UDMA_cap = 4;
2014 }
2015 break;
2016 default:
2017 printf("unknown ATA controller\n");
2018 sc->sc_wdcdev.UDMA_cap = 0;
2019 }
2020
2021 printf("%s: bus-master DMA support present",
2022 sc->sc_wdcdev.sc_dev.dv_xname);
2023 pciide_mapreg_dma(sc, pa);
2024 printf("\n");
2025 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2026 WDC_CAPABILITY_MODE;
2027 if (sc->sc_dma_ok) {
2028 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2029 sc->sc_wdcdev.irqack = pciide_irqack;
2030 if (sc->sc_wdcdev.UDMA_cap > 0)
2031 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2032 }
2033 sc->sc_wdcdev.PIO_cap = 4;
2034 sc->sc_wdcdev.DMA_cap = 2;
2035 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2036 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2037 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2038
2039 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2040 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2041 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2042 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2043 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2044 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2045 DEBUG_PROBE);
2046
2047 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2048 cp = &sc->pciide_channels[channel];
2049 if (pciide_chansetup(sc, channel, interface) == 0)
2050 continue;
2051
2052 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2053 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2054 printf("%s: %s channel ignored (disabled)\n",
2055 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2056 continue;
2057 }
2058 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2059 pciide_pci_intr);
2060 if (cp->hw_ok == 0)
2061 continue;
2062 if (pciide_chan_candisable(cp)) {
2063 ideconf &= ~APO_IDECONF_EN(channel);
2064 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2065 ideconf);
2066 }
2067 pciide_map_compat_intr(pa, cp, channel, interface);
2068
2069 if (cp->hw_ok == 0)
2070 continue;
2071 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2072 }
2073 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2074 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2075 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2076 }
2077
2078 void
2079 apollo_setup_channel(chp)
2080 struct channel_softc *chp;
2081 {
2082 u_int32_t udmatim_reg, datatim_reg;
2083 u_int8_t idedma_ctl;
2084 int mode, drive;
2085 struct ata_drive_datas *drvp;
2086 struct pciide_channel *cp = (struct pciide_channel*)chp;
2087 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2088
2089 idedma_ctl = 0;
2090 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2091 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2092 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2093 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2094
2095 /* setup DMA if needed */
2096 pciide_channel_dma_setup(cp);
2097
2098 for (drive = 0; drive < 2; drive++) {
2099 drvp = &chp->ch_drive[drive];
2100 /* If no drive, skip */
2101 if ((drvp->drive_flags & DRIVE) == 0)
2102 continue;
2103 /* add timing values, setup DMA if needed */
2104 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2105 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2106 mode = drvp->PIO_mode;
2107 goto pio;
2108 }
2109 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2110 (drvp->drive_flags & DRIVE_UDMA)) {
2111 /* use Ultra/DMA */
2112 drvp->drive_flags &= ~DRIVE_DMA;
2113 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2114 APO_UDMA_EN_MTH(chp->channel, drive);
2115 if (sc->sc_wdcdev.UDMA_cap == 5) {
2116 /* 686b */
2117 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2118 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2119 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2120 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2121 /* 596b or 686a */
2122 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2123 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2124 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2125 } else {
2126 /* 596a or 586b */
2127 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2128 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2129 }
2130 /* can use PIO timings, MW DMA unused */
2131 mode = drvp->PIO_mode;
2132 } else {
2133 /* use Multiword DMA */
2134 drvp->drive_flags &= ~DRIVE_UDMA;
2135 /* mode = min(pio, dma+2) */
2136 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2137 mode = drvp->PIO_mode;
2138 else
2139 mode = drvp->DMA_mode + 2;
2140 }
2141 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2142
2143 pio: /* setup PIO mode */
2144 if (mode <= 2) {
2145 drvp->DMA_mode = 0;
2146 drvp->PIO_mode = 0;
2147 mode = 0;
2148 } else {
2149 drvp->PIO_mode = mode;
2150 drvp->DMA_mode = mode - 2;
2151 }
2152 datatim_reg |=
2153 APO_DATATIM_PULSE(chp->channel, drive,
2154 apollo_pio_set[mode]) |
2155 APO_DATATIM_RECOV(chp->channel, drive,
2156 apollo_pio_rec[mode]);
2157 }
2158 if (idedma_ctl != 0) {
2159 /* Add software bits in status register */
2160 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2161 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2162 idedma_ctl);
2163 }
2164 pciide_print_modes(cp);
2165 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2166 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2167 }
2168
2169 void
2170 cmd_channel_map(pa, sc, channel)
2171 struct pci_attach_args *pa;
2172 struct pciide_softc *sc;
2173 int channel;
2174 {
2175 struct pciide_channel *cp = &sc->pciide_channels[channel];
2176 bus_size_t cmdsize, ctlsize;
2177 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2178 int interface;
2179
2180 /*
2181 * The 0648/0649 can be told to identify as a RAID controller.
2182 * In this case, we have to fake interface
2183 */
2184 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2185 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2186 PCIIDE_INTERFACE_SETTABLE(1);
2187 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2188 CMD_CONF_DSA1)
2189 interface |= PCIIDE_INTERFACE_PCI(0) |
2190 PCIIDE_INTERFACE_PCI(1);
2191 } else {
2192 interface = PCI_INTERFACE(pa->pa_class);
2193 }
2194
2195 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2196 cp->name = PCIIDE_CHANNEL_NAME(channel);
2197 cp->wdc_channel.channel = channel;
2198 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2199
2200 if (channel > 0) {
2201 cp->wdc_channel.ch_queue =
2202 sc->pciide_channels[0].wdc_channel.ch_queue;
2203 } else {
2204 cp->wdc_channel.ch_queue =
2205 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2206 }
2207 if (cp->wdc_channel.ch_queue == NULL) {
2208 printf("%s %s channel: "
2209 "can't allocate memory for command queue",
2210 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2211 return;
2212 }
2213
2214 printf("%s: %s channel %s to %s mode\n",
2215 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2216 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2217 "configured" : "wired",
2218 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2219 "native-PCI" : "compatibility");
2220
2221 /*
2222 * with a CMD PCI64x, if we get here, the first channel is enabled:
2223 * there's no way to disable the first channel without disabling
2224 * the whole device
2225 */
2226 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2227 printf("%s: %s channel ignored (disabled)\n",
2228 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2229 return;
2230 }
2231
2232 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2233 if (cp->hw_ok == 0)
2234 return;
2235 if (channel == 1) {
2236 if (pciide_chan_candisable(cp)) {
2237 ctrl &= ~CMD_CTRL_2PORT;
2238 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2239 CMD_CTRL, ctrl);
2240 }
2241 }
2242 pciide_map_compat_intr(pa, cp, channel, interface);
2243 }
2244
2245 int
2246 cmd_pci_intr(arg)
2247 void *arg;
2248 {
2249 struct pciide_softc *sc = arg;
2250 struct pciide_channel *cp;
2251 struct channel_softc *wdc_cp;
2252 int i, rv, crv;
2253 u_int32_t priirq, secirq;
2254
2255 rv = 0;
2256 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2257 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2258 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2259 cp = &sc->pciide_channels[i];
2260 wdc_cp = &cp->wdc_channel;
2261 /* If a compat channel skip. */
2262 if (cp->compat)
2263 continue;
2264 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2265 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2266 crv = wdcintr(wdc_cp);
2267 if (crv == 0)
2268 printf("%s:%d: bogus intr\n",
2269 sc->sc_wdcdev.sc_dev.dv_xname, i);
2270 else
2271 rv = 1;
2272 }
2273 }
2274 return rv;
2275 }
2276
2277 void
2278 cmd_chip_map(sc, pa)
2279 struct pciide_softc *sc;
2280 struct pci_attach_args *pa;
2281 {
2282 int channel;
2283
2284 /*
2285 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2286 * and base adresses registers can be disabled at
2287 * hardware level. In this case, the device is wired
2288 * in compat mode and its first channel is always enabled,
2289 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2290 * In fact, it seems that the first channel of the CMD PCI0640
2291 * can't be disabled.
2292 */
2293
2294 #ifdef PCIIDE_CMD064x_DISABLE
2295 if (pciide_chipen(sc, pa) == 0)
2296 return;
2297 #endif
2298
2299 printf("%s: hardware does not support DMA\n",
2300 sc->sc_wdcdev.sc_dev.dv_xname);
2301 sc->sc_dma_ok = 0;
2302
2303 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2304 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2305 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2306
2307 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2308 cmd_channel_map(pa, sc, channel);
2309 }
2310 }
2311
2312 void
2313 cmd0643_9_chip_map(sc, pa)
2314 struct pciide_softc *sc;
2315 struct pci_attach_args *pa;
2316 {
2317 struct pciide_channel *cp;
2318 int channel;
2319 int rev = PCI_REVISION(
2320 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2321
2322 /*
2323 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2324 * and base adresses registers can be disabled at
2325 * hardware level. In this case, the device is wired
2326 * in compat mode and its first channel is always enabled,
2327 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2328 * In fact, it seems that the first channel of the CMD PCI0640
2329 * can't be disabled.
2330 */
2331
2332 #ifdef PCIIDE_CMD064x_DISABLE
2333 if (pciide_chipen(sc, pa) == 0)
2334 return;
2335 #endif
2336 printf("%s: bus-master DMA support present",
2337 sc->sc_wdcdev.sc_dev.dv_xname);
2338 pciide_mapreg_dma(sc, pa);
2339 printf("\n");
2340 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2341 WDC_CAPABILITY_MODE;
2342 if (sc->sc_dma_ok) {
2343 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2344 switch (sc->sc_pp->ide_product) {
2345 case PCI_PRODUCT_CMDTECH_649:
2346 case PCI_PRODUCT_CMDTECH_648:
2347 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2348 sc->sc_wdcdev.UDMA_cap = 4;
2349 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2350 break;
2351 case PCI_PRODUCT_CMDTECH_646:
2352 if (rev >= CMD0646U2_REV) {
2353 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2354 sc->sc_wdcdev.UDMA_cap = 2;
2355 } else if (rev >= CMD0646U_REV) {
2356 /*
2357 * Linux's driver claims that the 646U is broken
2358 * with UDMA. Only enable it if we know what we're
2359 * doing
2360 */
2361 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2362 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2363 sc->sc_wdcdev.UDMA_cap = 2;
2364 #endif
2365 /* explicitely disable UDMA */
2366 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2367 CMD_UDMATIM(0), 0);
2368 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2369 CMD_UDMATIM(1), 0);
2370 }
2371 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2372 break;
2373 default:
2374 sc->sc_wdcdev.irqack = pciide_irqack;
2375 }
2376 }
2377
2378 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2379 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2380 sc->sc_wdcdev.PIO_cap = 4;
2381 sc->sc_wdcdev.DMA_cap = 2;
2382 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2383
2384 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2385 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2386 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2387 DEBUG_PROBE);
2388
2389 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2390 cp = &sc->pciide_channels[channel];
2391 cmd_channel_map(pa, sc, channel);
2392 if (cp->hw_ok == 0)
2393 continue;
2394 cmd0643_9_setup_channel(&cp->wdc_channel);
2395 }
2396 /*
2397 * note - this also makes sure we clear the irq disable and reset
2398 * bits
2399 */
2400 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2401 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2402 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2403 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2404 DEBUG_PROBE);
2405 }
2406
2407 void
2408 cmd0643_9_setup_channel(chp)
2409 struct channel_softc *chp;
2410 {
2411 struct ata_drive_datas *drvp;
2412 u_int8_t tim;
2413 u_int32_t idedma_ctl, udma_reg;
2414 int drive;
2415 struct pciide_channel *cp = (struct pciide_channel*)chp;
2416 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2417
2418 idedma_ctl = 0;
2419 /* setup DMA if needed */
2420 pciide_channel_dma_setup(cp);
2421
2422 for (drive = 0; drive < 2; drive++) {
2423 drvp = &chp->ch_drive[drive];
2424 /* If no drive, skip */
2425 if ((drvp->drive_flags & DRIVE) == 0)
2426 continue;
2427 /* add timing values, setup DMA if needed */
2428 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2429 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2430 if (drvp->drive_flags & DRIVE_UDMA) {
2431 /* UltraDMA on a 646U2, 0648 or 0649 */
2432 drvp->drive_flags &= ~DRIVE_DMA;
2433 udma_reg = pciide_pci_read(sc->sc_pc,
2434 sc->sc_tag, CMD_UDMATIM(chp->channel));
2435 if (drvp->UDMA_mode > 2 &&
2436 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2437 CMD_BICSR) &
2438 CMD_BICSR_80(chp->channel)) == 0)
2439 drvp->UDMA_mode = 2;
2440 if (drvp->UDMA_mode > 2)
2441 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2442 else if (sc->sc_wdcdev.UDMA_cap > 2)
2443 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2444 udma_reg |= CMD_UDMATIM_UDMA(drive);
2445 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2446 CMD_UDMATIM_TIM_OFF(drive));
2447 udma_reg |=
2448 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2449 CMD_UDMATIM_TIM_OFF(drive));
2450 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2451 CMD_UDMATIM(chp->channel), udma_reg);
2452 } else {
2453 /*
2454 * use Multiword DMA.
2455 * Timings will be used for both PIO and DMA,
2456 * so adjust DMA mode if needed
2457 * if we have a 0646U2/8/9, turn off UDMA
2458 */
2459 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2460 udma_reg = pciide_pci_read(sc->sc_pc,
2461 sc->sc_tag,
2462 CMD_UDMATIM(chp->channel));
2463 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2464 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2465 CMD_UDMATIM(chp->channel),
2466 udma_reg);
2467 }
2468 if (drvp->PIO_mode >= 3 &&
2469 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2470 drvp->DMA_mode = drvp->PIO_mode - 2;
2471 }
2472 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2473 }
2474 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2475 }
2476 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2477 CMD_DATA_TIM(chp->channel, drive), tim);
2478 }
2479 if (idedma_ctl != 0) {
2480 /* Add software bits in status register */
2481 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2482 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2483 idedma_ctl);
2484 }
2485 pciide_print_modes(cp);
2486 }
2487
2488 void
2489 cmd646_9_irqack(chp)
2490 struct channel_softc *chp;
2491 {
2492 u_int32_t priirq, secirq;
2493 struct pciide_channel *cp = (struct pciide_channel*)chp;
2494 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2495
2496 if (chp->channel == 0) {
2497 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2498 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2499 } else {
2500 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2501 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2502 }
2503 pciide_irqack(chp);
2504 }
2505
2506 void
2507 cy693_chip_map(sc, pa)
2508 struct pciide_softc *sc;
2509 struct pci_attach_args *pa;
2510 {
2511 struct pciide_channel *cp;
2512 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2513 bus_size_t cmdsize, ctlsize;
2514
2515 if (pciide_chipen(sc, pa) == 0)
2516 return;
2517 /*
2518 * this chip has 2 PCI IDE functions, one for primary and one for
2519 * secondary. So we need to call pciide_mapregs_compat() with
2520 * the real channel
2521 */
2522 if (pa->pa_function == 1) {
2523 sc->sc_cy_compatchan = 0;
2524 } else if (pa->pa_function == 2) {
2525 sc->sc_cy_compatchan = 1;
2526 } else {
2527 printf("%s: unexpected PCI function %d\n",
2528 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2529 return;
2530 }
2531 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2532 printf("%s: bus-master DMA support present",
2533 sc->sc_wdcdev.sc_dev.dv_xname);
2534 pciide_mapreg_dma(sc, pa);
2535 } else {
2536 printf("%s: hardware does not support DMA",
2537 sc->sc_wdcdev.sc_dev.dv_xname);
2538 sc->sc_dma_ok = 0;
2539 }
2540 printf("\n");
2541
2542 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2543 if (sc->sc_cy_handle == NULL) {
2544 printf("%s: unable to map hyperCache control registers\n",
2545 sc->sc_wdcdev.sc_dev.dv_xname);
2546 sc->sc_dma_ok = 0;
2547 }
2548
2549 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2550 WDC_CAPABILITY_MODE;
2551 if (sc->sc_dma_ok) {
2552 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2553 sc->sc_wdcdev.irqack = pciide_irqack;
2554 }
2555 sc->sc_wdcdev.PIO_cap = 4;
2556 sc->sc_wdcdev.DMA_cap = 2;
2557 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2558
2559 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2560 sc->sc_wdcdev.nchannels = 1;
2561
2562 /* Only one channel for this chip; if we are here it's enabled */
2563 cp = &sc->pciide_channels[0];
2564 sc->wdc_chanarray[0] = &cp->wdc_channel;
2565 cp->name = PCIIDE_CHANNEL_NAME(0);
2566 cp->wdc_channel.channel = 0;
2567 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2568 cp->wdc_channel.ch_queue =
2569 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2570 if (cp->wdc_channel.ch_queue == NULL) {
2571 printf("%s primary channel: "
2572 "can't allocate memory for command queue",
2573 sc->sc_wdcdev.sc_dev.dv_xname);
2574 return;
2575 }
2576 printf("%s: primary channel %s to ",
2577 sc->sc_wdcdev.sc_dev.dv_xname,
2578 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2579 "configured" : "wired");
2580 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2581 printf("native-PCI");
2582 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2583 pciide_pci_intr);
2584 } else {
2585 printf("compatibility");
2586 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2587 &cmdsize, &ctlsize);
2588 }
2589 printf(" mode\n");
2590 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2591 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2592 wdcattach(&cp->wdc_channel);
2593 if (pciide_chan_candisable(cp)) {
2594 pci_conf_write(sc->sc_pc, sc->sc_tag,
2595 PCI_COMMAND_STATUS_REG, 0);
2596 }
2597 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2598 if (cp->hw_ok == 0)
2599 return;
2600 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2601 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2602 cy693_setup_channel(&cp->wdc_channel);
2603 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2604 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2605 }
2606
2607 void
2608 cy693_setup_channel(chp)
2609 struct channel_softc *chp;
2610 {
2611 struct ata_drive_datas *drvp;
2612 int drive;
2613 u_int32_t cy_cmd_ctrl;
2614 u_int32_t idedma_ctl;
2615 struct pciide_channel *cp = (struct pciide_channel*)chp;
2616 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2617 int dma_mode = -1;
2618
2619 cy_cmd_ctrl = idedma_ctl = 0;
2620
2621 /* setup DMA if needed */
2622 pciide_channel_dma_setup(cp);
2623
2624 for (drive = 0; drive < 2; drive++) {
2625 drvp = &chp->ch_drive[drive];
2626 /* If no drive, skip */
2627 if ((drvp->drive_flags & DRIVE) == 0)
2628 continue;
2629 /* add timing values, setup DMA if needed */
2630 if (drvp->drive_flags & DRIVE_DMA) {
2631 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2632 /* use Multiword DMA */
2633 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2634 dma_mode = drvp->DMA_mode;
2635 }
2636 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2637 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2638 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2639 CY_CMD_CTRL_IOW_REC_OFF(drive));
2640 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2641 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2642 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2643 CY_CMD_CTRL_IOR_REC_OFF(drive));
2644 }
2645 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2646 chp->ch_drive[0].DMA_mode = dma_mode;
2647 chp->ch_drive[1].DMA_mode = dma_mode;
2648
2649 if (dma_mode == -1)
2650 dma_mode = 0;
2651
2652 if (sc->sc_cy_handle != NULL) {
2653 /* Note: `multiple' is implied. */
2654 cy82c693_write(sc->sc_cy_handle,
2655 (sc->sc_cy_compatchan == 0) ?
2656 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2657 }
2658
2659 pciide_print_modes(cp);
2660
2661 if (idedma_ctl != 0) {
2662 /* Add software bits in status register */
2663 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2664 IDEDMA_CTL, idedma_ctl);
2665 }
2666 }
2667
2668 void
2669 sis_chip_map(sc, pa)
2670 struct pciide_softc *sc;
2671 struct pci_attach_args *pa;
2672 {
2673 struct pciide_channel *cp;
2674 int channel;
2675 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2676 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2677 pcireg_t rev = PCI_REVISION(pa->pa_class);
2678 bus_size_t cmdsize, ctlsize;
2679
2680 if (pciide_chipen(sc, pa) == 0)
2681 return;
2682 printf("%s: bus-master DMA support present",
2683 sc->sc_wdcdev.sc_dev.dv_xname);
2684 pciide_mapreg_dma(sc, pa);
2685 printf("\n");
2686 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2687 WDC_CAPABILITY_MODE;
2688 if (sc->sc_dma_ok) {
2689 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2690 sc->sc_wdcdev.irqack = pciide_irqack;
2691 if (rev > 0xd0)
2692 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2693 }
2694
2695 sc->sc_wdcdev.PIO_cap = 4;
2696 sc->sc_wdcdev.DMA_cap = 2;
2697 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2698 sc->sc_wdcdev.UDMA_cap = 2;
2699 sc->sc_wdcdev.set_modes = sis_setup_channel;
2700
2701 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2702 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2703
2704 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2705 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2706 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2707
2708 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2709 cp = &sc->pciide_channels[channel];
2710 if (pciide_chansetup(sc, channel, interface) == 0)
2711 continue;
2712 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2713 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2714 printf("%s: %s channel ignored (disabled)\n",
2715 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2716 continue;
2717 }
2718 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2719 pciide_pci_intr);
2720 if (cp->hw_ok == 0)
2721 continue;
2722 if (pciide_chan_candisable(cp)) {
2723 if (channel == 0)
2724 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2725 else
2726 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2727 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2728 sis_ctr0);
2729 }
2730 pciide_map_compat_intr(pa, cp, channel, interface);
2731 if (cp->hw_ok == 0)
2732 continue;
2733 sis_setup_channel(&cp->wdc_channel);
2734 }
2735 }
2736
2737 void
2738 sis_setup_channel(chp)
2739 struct channel_softc *chp;
2740 {
2741 struct ata_drive_datas *drvp;
2742 int drive;
2743 u_int32_t sis_tim;
2744 u_int32_t idedma_ctl;
2745 struct pciide_channel *cp = (struct pciide_channel*)chp;
2746 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2747
2748 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2749 "channel %d 0x%x\n", chp->channel,
2750 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2751 DEBUG_PROBE);
2752 sis_tim = 0;
2753 idedma_ctl = 0;
2754 /* setup DMA if needed */
2755 pciide_channel_dma_setup(cp);
2756
2757 for (drive = 0; drive < 2; drive++) {
2758 drvp = &chp->ch_drive[drive];
2759 /* If no drive, skip */
2760 if ((drvp->drive_flags & DRIVE) == 0)
2761 continue;
2762 /* add timing values, setup DMA if needed */
2763 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2764 (drvp->drive_flags & DRIVE_UDMA) == 0)
2765 goto pio;
2766
2767 if (drvp->drive_flags & DRIVE_UDMA) {
2768 /* use Ultra/DMA */
2769 drvp->drive_flags &= ~DRIVE_DMA;
2770 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2771 SIS_TIM_UDMA_TIME_OFF(drive);
2772 sis_tim |= SIS_TIM_UDMA_EN(drive);
2773 } else {
2774 /*
2775 * use Multiword DMA
2776 * Timings will be used for both PIO and DMA,
2777 * so adjust DMA mode if needed
2778 */
2779 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2780 drvp->PIO_mode = drvp->DMA_mode + 2;
2781 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2782 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2783 drvp->PIO_mode - 2 : 0;
2784 if (drvp->DMA_mode == 0)
2785 drvp->PIO_mode = 0;
2786 }
2787 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2788 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2789 SIS_TIM_ACT_OFF(drive);
2790 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2791 SIS_TIM_REC_OFF(drive);
2792 }
2793 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2794 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2795 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2796 if (idedma_ctl != 0) {
2797 /* Add software bits in status register */
2798 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2799 IDEDMA_CTL, idedma_ctl);
2800 }
2801 pciide_print_modes(cp);
2802 }
2803
2804 void
2805 acer_chip_map(sc, pa)
2806 struct pciide_softc *sc;
2807 struct pci_attach_args *pa;
2808 {
2809 struct pciide_channel *cp;
2810 int channel;
2811 pcireg_t cr, interface;
2812 bus_size_t cmdsize, ctlsize;
2813 pcireg_t rev = PCI_REVISION(pa->pa_class);
2814
2815 if (pciide_chipen(sc, pa) == 0)
2816 return;
2817 printf("%s: bus-master DMA support present",
2818 sc->sc_wdcdev.sc_dev.dv_xname);
2819 pciide_mapreg_dma(sc, pa);
2820 printf("\n");
2821 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2822 WDC_CAPABILITY_MODE;
2823 if (sc->sc_dma_ok) {
2824 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2825 if (rev >= 0x20)
2826 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2827 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2828 sc->sc_wdcdev.irqack = pciide_irqack;
2829 }
2830
2831 sc->sc_wdcdev.PIO_cap = 4;
2832 sc->sc_wdcdev.DMA_cap = 2;
2833 sc->sc_wdcdev.UDMA_cap = 2;
2834 sc->sc_wdcdev.set_modes = acer_setup_channel;
2835 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2836 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2837
2838 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2839 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2840 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2841
2842 /* Enable "microsoft register bits" R/W. */
2843 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2844 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2845 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2846 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2847 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2848 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2849 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2850 ~ACER_CHANSTATUSREGS_RO);
2851 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2852 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2853 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2854 /* Don't use cr, re-read the real register content instead */
2855 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2856 PCI_CLASS_REG));
2857
2858 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2859 cp = &sc->pciide_channels[channel];
2860 if (pciide_chansetup(sc, channel, interface) == 0)
2861 continue;
2862 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2863 printf("%s: %s channel ignored (disabled)\n",
2864 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2865 continue;
2866 }
2867 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2868 acer_pci_intr);
2869 if (cp->hw_ok == 0)
2870 continue;
2871 if (pciide_chan_candisable(cp)) {
2872 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2873 pci_conf_write(sc->sc_pc, sc->sc_tag,
2874 PCI_CLASS_REG, cr);
2875 }
2876 pciide_map_compat_intr(pa, cp, channel, interface);
2877 acer_setup_channel(&cp->wdc_channel);
2878 }
2879 }
2880
2881 void
2882 acer_setup_channel(chp)
2883 struct channel_softc *chp;
2884 {
2885 struct ata_drive_datas *drvp;
2886 int drive;
2887 u_int32_t acer_fifo_udma;
2888 u_int32_t idedma_ctl;
2889 struct pciide_channel *cp = (struct pciide_channel*)chp;
2890 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2891
2892 idedma_ctl = 0;
2893 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2894 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2895 acer_fifo_udma), DEBUG_PROBE);
2896 /* setup DMA if needed */
2897 pciide_channel_dma_setup(cp);
2898
2899 for (drive = 0; drive < 2; drive++) {
2900 drvp = &chp->ch_drive[drive];
2901 /* If no drive, skip */
2902 if ((drvp->drive_flags & DRIVE) == 0)
2903 continue;
2904 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2905 "channel %d drive %d 0x%x\n", chp->channel, drive,
2906 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2907 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2908 /* clear FIFO/DMA mode */
2909 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2910 ACER_UDMA_EN(chp->channel, drive) |
2911 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2912
2913 /* add timing values, setup DMA if needed */
2914 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2915 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2916 acer_fifo_udma |=
2917 ACER_FTH_OPL(chp->channel, drive, 0x1);
2918 goto pio;
2919 }
2920
2921 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2922 if (drvp->drive_flags & DRIVE_UDMA) {
2923 /* use Ultra/DMA */
2924 drvp->drive_flags &= ~DRIVE_DMA;
2925 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2926 acer_fifo_udma |=
2927 ACER_UDMA_TIM(chp->channel, drive,
2928 acer_udma[drvp->UDMA_mode]);
2929 } else {
2930 /*
2931 * use Multiword DMA
2932 * Timings will be used for both PIO and DMA,
2933 * so adjust DMA mode if needed
2934 */
2935 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2936 drvp->PIO_mode = drvp->DMA_mode + 2;
2937 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2938 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2939 drvp->PIO_mode - 2 : 0;
2940 if (drvp->DMA_mode == 0)
2941 drvp->PIO_mode = 0;
2942 }
2943 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2944 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2945 ACER_IDETIM(chp->channel, drive),
2946 acer_pio[drvp->PIO_mode]);
2947 }
2948 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2949 acer_fifo_udma), DEBUG_PROBE);
2950 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2951 if (idedma_ctl != 0) {
2952 /* Add software bits in status register */
2953 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2954 IDEDMA_CTL, idedma_ctl);
2955 }
2956 pciide_print_modes(cp);
2957 }
2958
2959 int
2960 acer_pci_intr(arg)
2961 void *arg;
2962 {
2963 struct pciide_softc *sc = arg;
2964 struct pciide_channel *cp;
2965 struct channel_softc *wdc_cp;
2966 int i, rv, crv;
2967 u_int32_t chids;
2968
2969 rv = 0;
2970 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2971 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2972 cp = &sc->pciide_channels[i];
2973 wdc_cp = &cp->wdc_channel;
2974 /* If a compat channel skip. */
2975 if (cp->compat)
2976 continue;
2977 if (chids & ACER_CHIDS_INT(i)) {
2978 crv = wdcintr(wdc_cp);
2979 if (crv == 0)
2980 printf("%s:%d: bogus intr\n",
2981 sc->sc_wdcdev.sc_dev.dv_xname, i);
2982 else
2983 rv = 1;
2984 }
2985 }
2986 return rv;
2987 }
2988
2989 void
2990 hpt_chip_map(sc, pa)
2991 struct pciide_softc *sc;
2992 struct pci_attach_args *pa;
2993 {
2994 struct pciide_channel *cp;
2995 int i, compatchan, revision;
2996 pcireg_t interface;
2997 bus_size_t cmdsize, ctlsize;
2998
2999 if (pciide_chipen(sc, pa) == 0)
3000 return;
3001 revision = PCI_REVISION(pa->pa_class);
3002 printf(": Triones/Highpoint ");
3003 if (revision == HPT370_REV)
3004 printf("HPT370 IDE Controller\n");
3005 else
3006 printf("HPT366 IDE Controller\n");
3007
3008 /*
3009 * when the chip is in native mode it identifies itself as a
3010 * 'misc mass storage'. Fake interface in this case.
3011 */
3012 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3013 interface = PCI_INTERFACE(pa->pa_class);
3014 } else {
3015 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3016 PCIIDE_INTERFACE_PCI(0);
3017 if (revision == HPT370_REV)
3018 interface |= PCIIDE_INTERFACE_PCI(1);
3019 }
3020
3021 printf("%s: bus-master DMA support present",
3022 sc->sc_wdcdev.sc_dev.dv_xname);
3023 pciide_mapreg_dma(sc, pa);
3024 printf("\n");
3025 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3026 WDC_CAPABILITY_MODE;
3027 if (sc->sc_dma_ok) {
3028 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3029 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3030 sc->sc_wdcdev.irqack = pciide_irqack;
3031 }
3032 sc->sc_wdcdev.PIO_cap = 4;
3033 sc->sc_wdcdev.DMA_cap = 2;
3034
3035 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3036 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3037 if (revision == HPT366_REV) {
3038 sc->sc_wdcdev.UDMA_cap = 4;
3039 /*
3040 * The 366 has 2 PCI IDE functions, one for primary and one
3041 * for secondary. So we need to call pciide_mapregs_compat()
3042 * with the real channel
3043 */
3044 if (pa->pa_function == 0) {
3045 compatchan = 0;
3046 } else if (pa->pa_function == 1) {
3047 compatchan = 1;
3048 } else {
3049 printf("%s: unexpected PCI function %d\n",
3050 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3051 return;
3052 }
3053 sc->sc_wdcdev.nchannels = 1;
3054 } else {
3055 sc->sc_wdcdev.nchannels = 2;
3056 sc->sc_wdcdev.UDMA_cap = 5;
3057 }
3058 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3059 cp = &sc->pciide_channels[i];
3060 if (sc->sc_wdcdev.nchannels > 1) {
3061 compatchan = i;
3062 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3063 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3064 printf("%s: %s channel ignored (disabled)\n",
3065 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3066 continue;
3067 }
3068 }
3069 if (pciide_chansetup(sc, i, interface) == 0)
3070 continue;
3071 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3072 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3073 &ctlsize, hpt_pci_intr);
3074 } else {
3075 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3076 &cmdsize, &ctlsize);
3077 }
3078 if (cp->hw_ok == 0)
3079 return;
3080 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3081 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3082 wdcattach(&cp->wdc_channel);
3083 hpt_setup_channel(&cp->wdc_channel);
3084 }
3085 if (revision == HPT370_REV) {
3086 /*
3087 * HPT370_REV has a bit to disable interrupts, make sure
3088 * to clear it
3089 */
3090 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3091 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3092 ~HPT_CSEL_IRQDIS);
3093 }
3094 return;
3095 }
3096
3097 void
3098 hpt_setup_channel(chp)
3099 struct channel_softc *chp;
3100 {
3101 struct ata_drive_datas *drvp;
3102 int drive;
3103 int cable;
3104 u_int32_t before, after;
3105 u_int32_t idedma_ctl;
3106 struct pciide_channel *cp = (struct pciide_channel*)chp;
3107 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3108
3109 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3110
3111 /* setup DMA if needed */
3112 pciide_channel_dma_setup(cp);
3113
3114 idedma_ctl = 0;
3115
3116 /* Per drive settings */
3117 for (drive = 0; drive < 2; drive++) {
3118 drvp = &chp->ch_drive[drive];
3119 /* If no drive, skip */
3120 if ((drvp->drive_flags & DRIVE) == 0)
3121 continue;
3122 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3123 HPT_IDETIM(chp->channel, drive));
3124
3125 /* add timing values, setup DMA if needed */
3126 if (drvp->drive_flags & DRIVE_UDMA) {
3127 /* use Ultra/DMA */
3128 drvp->drive_flags &= ~DRIVE_DMA;
3129 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3130 drvp->UDMA_mode > 2)
3131 drvp->UDMA_mode = 2;
3132 after = (sc->sc_wdcdev.nchannels == 2) ?
3133 hpt370_udma[drvp->UDMA_mode] :
3134 hpt366_udma[drvp->UDMA_mode];
3135 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3136 } else if (drvp->drive_flags & DRIVE_DMA) {
3137 /*
3138 * use Multiword DMA.
3139 * Timings will be used for both PIO and DMA, so adjust
3140 * DMA mode if needed
3141 */
3142 if (drvp->PIO_mode >= 3 &&
3143 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3144 drvp->DMA_mode = drvp->PIO_mode - 2;
3145 }
3146 after = (sc->sc_wdcdev.nchannels == 2) ?
3147 hpt370_dma[drvp->DMA_mode] :
3148 hpt366_dma[drvp->DMA_mode];
3149 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3150 } else {
3151 /* PIO only */
3152 after = (sc->sc_wdcdev.nchannels == 2) ?
3153 hpt370_pio[drvp->PIO_mode] :
3154 hpt366_pio[drvp->PIO_mode];
3155 }
3156 pci_conf_write(sc->sc_pc, sc->sc_tag,
3157 HPT_IDETIM(chp->channel, drive), after);
3158 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3159 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3160 after, before), DEBUG_PROBE);
3161 }
3162 if (idedma_ctl != 0) {
3163 /* Add software bits in status register */
3164 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3165 IDEDMA_CTL, idedma_ctl);
3166 }
3167 pciide_print_modes(cp);
3168 }
3169
3170 int
3171 hpt_pci_intr(arg)
3172 void *arg;
3173 {
3174 struct pciide_softc *sc = arg;
3175 struct pciide_channel *cp;
3176 struct channel_softc *wdc_cp;
3177 int rv = 0;
3178 int dmastat, i, crv;
3179
3180 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3181 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3182 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3183 if((dmastat & IDEDMA_CTL_INTR) == 0)
3184 continue;
3185 cp = &sc->pciide_channels[i];
3186 wdc_cp = &cp->wdc_channel;
3187 crv = wdcintr(wdc_cp);
3188 if (crv == 0) {
3189 printf("%s:%d: bogus intr\n",
3190 sc->sc_wdcdev.sc_dev.dv_xname, i);
3191 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3192 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3193 } else
3194 rv = 1;
3195 }
3196 return rv;
3197 }
3198
3199
3200 /* Macros to test product */
3201 #define PDC_IS_262(sc) \
3202 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3203 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3204 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3205 #define PDC_IS_265(sc) \
3206 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3207 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3208
3209 void
3210 pdc202xx_chip_map(sc, pa)
3211 struct pciide_softc *sc;
3212 struct pci_attach_args *pa;
3213 {
3214 struct pciide_channel *cp;
3215 int channel;
3216 pcireg_t interface, st, mode;
3217 bus_size_t cmdsize, ctlsize;
3218
3219 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3220 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3221 DEBUG_PROBE);
3222 if (pciide_chipen(sc, pa) == 0)
3223 return;
3224
3225 /* turn off RAID mode */
3226 st &= ~PDC2xx_STATE_IDERAID;
3227
3228 /*
3229 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3230 * mode. We have to fake interface
3231 */
3232 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3233 if (st & PDC2xx_STATE_NATIVE)
3234 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3235
3236 printf("%s: bus-master DMA support present",
3237 sc->sc_wdcdev.sc_dev.dv_xname);
3238 pciide_mapreg_dma(sc, pa);
3239 printf("\n");
3240 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3241 WDC_CAPABILITY_MODE;
3242 if (sc->sc_dma_ok) {
3243 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3244 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3245 sc->sc_wdcdev.irqack = pciide_irqack;
3246 }
3247 sc->sc_wdcdev.PIO_cap = 4;
3248 sc->sc_wdcdev.DMA_cap = 2;
3249 if (PDC_IS_265(sc))
3250 sc->sc_wdcdev.UDMA_cap = 5;
3251 else if (PDC_IS_262(sc))
3252 sc->sc_wdcdev.UDMA_cap = 4;
3253 else
3254 sc->sc_wdcdev.UDMA_cap = 2;
3255 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3256 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3257 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3258
3259 /* setup failsafe defaults */
3260 mode = 0;
3261 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3262 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3263 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3264 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3265 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3266 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3267 "initial timings 0x%x, now 0x%x\n", channel,
3268 pci_conf_read(sc->sc_pc, sc->sc_tag,
3269 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3270 DEBUG_PROBE);
3271 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3272 mode | PDC2xx_TIM_IORDYp);
3273 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3274 "initial timings 0x%x, now 0x%x\n", channel,
3275 pci_conf_read(sc->sc_pc, sc->sc_tag,
3276 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3277 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3278 mode);
3279 }
3280
3281 mode = PDC2xx_SCR_DMA;
3282 if (PDC_IS_262(sc)) {
3283 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3284 } else {
3285 /* the BIOS set it up this way */
3286 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3287 }
3288 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3289 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3290 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3291 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3292 DEBUG_PROBE);
3293 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3294
3295 /* controller initial state register is OK even without BIOS */
3296 /* Set DMA mode to IDE DMA compatibility */
3297 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3298 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3299 DEBUG_PROBE);
3300 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3301 mode | 0x1);
3302 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3303 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3304 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3305 mode | 0x1);
3306
3307 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3308 cp = &sc->pciide_channels[channel];
3309 if (pciide_chansetup(sc, channel, interface) == 0)
3310 continue;
3311 if ((st & (PDC_IS_262(sc) ?
3312 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3313 printf("%s: %s channel ignored (disabled)\n",
3314 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3315 continue;
3316 }
3317 if (PDC_IS_265(sc))
3318 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3319 pdc20265_pci_intr);
3320 else
3321 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3322 pdc202xx_pci_intr);
3323 if (cp->hw_ok == 0)
3324 continue;
3325 if (pciide_chan_candisable(cp))
3326 st &= ~(PDC_IS_262(sc) ?
3327 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3328 pciide_map_compat_intr(pa, cp, channel, interface);
3329 pdc202xx_setup_channel(&cp->wdc_channel);
3330 }
3331 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3332 DEBUG_PROBE);
3333 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3334 return;
3335 }
3336
3337 void
3338 pdc202xx_setup_channel(chp)
3339 struct channel_softc *chp;
3340 {
3341 struct ata_drive_datas *drvp;
3342 int drive;
3343 pcireg_t mode, st;
3344 u_int32_t idedma_ctl, scr, atapi;
3345 struct pciide_channel *cp = (struct pciide_channel*)chp;
3346 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3347 int channel = chp->channel;
3348
3349 /* setup DMA if needed */
3350 pciide_channel_dma_setup(cp);
3351
3352 idedma_ctl = 0;
3353 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3354 sc->sc_wdcdev.sc_dev.dv_xname,
3355 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3356 DEBUG_PROBE);
3357
3358 /* Per channel settings */
3359 if (PDC_IS_262(sc)) {
3360 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3361 PDC262_U66);
3362 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3363 /* Trimm UDMA mode */
3364 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3365 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3366 chp->ch_drive[0].UDMA_mode <= 2) ||
3367 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3368 chp->ch_drive[1].UDMA_mode <= 2)) {
3369 if (chp->ch_drive[0].UDMA_mode > 2)
3370 chp->ch_drive[0].UDMA_mode = 2;
3371 if (chp->ch_drive[1].UDMA_mode > 2)
3372 chp->ch_drive[1].UDMA_mode = 2;
3373 }
3374 /* Set U66 if needed */
3375 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3376 chp->ch_drive[0].UDMA_mode > 2) ||
3377 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3378 chp->ch_drive[1].UDMA_mode > 2))
3379 scr |= PDC262_U66_EN(channel);
3380 else
3381 scr &= ~PDC262_U66_EN(channel);
3382 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3383 PDC262_U66, scr);
3384 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3385 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3386 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3387 PDC262_ATAPI(channel))), DEBUG_PROBE);
3388 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3389 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3390 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3391 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3392 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3393 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3394 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3395 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3396 atapi = 0;
3397 else
3398 atapi = PDC262_ATAPI_UDMA;
3399 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3400 PDC262_ATAPI(channel), atapi);
3401 }
3402 }
3403 for (drive = 0; drive < 2; drive++) {
3404 drvp = &chp->ch_drive[drive];
3405 /* If no drive, skip */
3406 if ((drvp->drive_flags & DRIVE) == 0)
3407 continue;
3408 mode = 0;
3409 if (drvp->drive_flags & DRIVE_UDMA) {
3410 /* use Ultra/DMA */
3411 drvp->drive_flags &= ~DRIVE_DMA;
3412 mode = PDC2xx_TIM_SET_MB(mode,
3413 pdc2xx_udma_mb[drvp->UDMA_mode]);
3414 mode = PDC2xx_TIM_SET_MC(mode,
3415 pdc2xx_udma_mc[drvp->UDMA_mode]);
3416 drvp->drive_flags &= ~DRIVE_DMA;
3417 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3418 } else if (drvp->drive_flags & DRIVE_DMA) {
3419 mode = PDC2xx_TIM_SET_MB(mode,
3420 pdc2xx_dma_mb[drvp->DMA_mode]);
3421 mode = PDC2xx_TIM_SET_MC(mode,
3422 pdc2xx_dma_mc[drvp->DMA_mode]);
3423 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3424 } else {
3425 mode = PDC2xx_TIM_SET_MB(mode,
3426 pdc2xx_dma_mb[0]);
3427 mode = PDC2xx_TIM_SET_MC(mode,
3428 pdc2xx_dma_mc[0]);
3429 }
3430 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3431 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3432 if (drvp->drive_flags & DRIVE_ATA)
3433 mode |= PDC2xx_TIM_PRE;
3434 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3435 if (drvp->PIO_mode >= 3) {
3436 mode |= PDC2xx_TIM_IORDY;
3437 if (drive == 0)
3438 mode |= PDC2xx_TIM_IORDYp;
3439 }
3440 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3441 "timings 0x%x\n",
3442 sc->sc_wdcdev.sc_dev.dv_xname,
3443 chp->channel, drive, mode), DEBUG_PROBE);
3444 pci_conf_write(sc->sc_pc, sc->sc_tag,
3445 PDC2xx_TIM(chp->channel, drive), mode);
3446 }
3447 if (idedma_ctl != 0) {
3448 /* Add software bits in status register */
3449 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3450 IDEDMA_CTL, idedma_ctl);
3451 }
3452 pciide_print_modes(cp);
3453 }
3454
3455 int
3456 pdc202xx_pci_intr(arg)
3457 void *arg;
3458 {
3459 struct pciide_softc *sc = arg;
3460 struct pciide_channel *cp;
3461 struct channel_softc *wdc_cp;
3462 int i, rv, crv;
3463 u_int32_t scr;
3464
3465 rv = 0;
3466 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3467 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3468 cp = &sc->pciide_channels[i];
3469 wdc_cp = &cp->wdc_channel;
3470 /* If a compat channel skip. */
3471 if (cp->compat)
3472 continue;
3473 if (scr & PDC2xx_SCR_INT(i)) {
3474 crv = wdcintr(wdc_cp);
3475 if (crv == 0)
3476 printf("%s:%d: bogus intr (reg 0x%x)\n",
3477 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3478 else
3479 rv = 1;
3480 }
3481 }
3482 return rv;
3483 }
3484
3485 int
3486 pdc20265_pci_intr(arg)
3487 void *arg;
3488 {
3489 struct pciide_softc *sc = arg;
3490 struct pciide_channel *cp;
3491 struct channel_softc *wdc_cp;
3492 int i, rv, crv;
3493 u_int32_t dmastat;
3494
3495 rv = 0;
3496 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3497 cp = &sc->pciide_channels[i];
3498 wdc_cp = &cp->wdc_channel;
3499 /* If a compat channel skip. */
3500 if (cp->compat)
3501 continue;
3502 /*
3503 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3504 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3505 * So use it instead (requires 2 reg reads instead of 1,
3506 * but we can't do it another way).
3507 */
3508 dmastat = bus_space_read_1(sc->sc_dma_iot,
3509 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3510 if((dmastat & IDEDMA_CTL_INTR) == 0)
3511 continue;
3512 crv = wdcintr(wdc_cp);
3513 if (crv == 0)
3514 printf("%s:%d: bogus intr\n",
3515 sc->sc_wdcdev.sc_dev.dv_xname, i);
3516 else
3517 rv = 1;
3518 }
3519 return rv;
3520 }
3521
3522 void
3523 opti_chip_map(sc, pa)
3524 struct pciide_softc *sc;
3525 struct pci_attach_args *pa;
3526 {
3527 struct pciide_channel *cp;
3528 bus_size_t cmdsize, ctlsize;
3529 pcireg_t interface;
3530 u_int8_t init_ctrl;
3531 int channel;
3532
3533 if (pciide_chipen(sc, pa) == 0)
3534 return;
3535 printf("%s: bus-master DMA support present",
3536 sc->sc_wdcdev.sc_dev.dv_xname);
3537 pciide_mapreg_dma(sc, pa);
3538 printf("\n");
3539
3540 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3541 WDC_CAPABILITY_MODE;
3542 sc->sc_wdcdev.PIO_cap = 4;
3543 if (sc->sc_dma_ok) {
3544 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3545 sc->sc_wdcdev.irqack = pciide_irqack;
3546 sc->sc_wdcdev.DMA_cap = 2;
3547 }
3548 sc->sc_wdcdev.set_modes = opti_setup_channel;
3549
3550 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3551 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3552
3553 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3554 OPTI_REG_INIT_CONTROL);
3555
3556 interface = PCI_INTERFACE(pa->pa_class);
3557
3558 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3559 cp = &sc->pciide_channels[channel];
3560 if (pciide_chansetup(sc, channel, interface) == 0)
3561 continue;
3562 if (channel == 1 &&
3563 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3564 printf("%s: %s channel ignored (disabled)\n",
3565 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3566 continue;
3567 }
3568 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3569 pciide_pci_intr);
3570 if (cp->hw_ok == 0)
3571 continue;
3572 pciide_map_compat_intr(pa, cp, channel, interface);
3573 if (cp->hw_ok == 0)
3574 continue;
3575 opti_setup_channel(&cp->wdc_channel);
3576 }
3577 }
3578
3579 void
3580 opti_setup_channel(chp)
3581 struct channel_softc *chp;
3582 {
3583 struct ata_drive_datas *drvp;
3584 struct pciide_channel *cp = (struct pciide_channel*)chp;
3585 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3586 int drive, spd;
3587 int mode[2];
3588 u_int8_t rv, mr;
3589
3590 /*
3591 * The `Delay' and `Address Setup Time' fields of the
3592 * Miscellaneous Register are always zero initially.
3593 */
3594 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3595 mr &= ~(OPTI_MISC_DELAY_MASK |
3596 OPTI_MISC_ADDR_SETUP_MASK |
3597 OPTI_MISC_INDEX_MASK);
3598
3599 /* Prime the control register before setting timing values */
3600 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3601
3602 /* Determine the clockrate of the PCIbus the chip is attached to */
3603 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3604 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3605
3606 /* setup DMA if needed */
3607 pciide_channel_dma_setup(cp);
3608
3609 for (drive = 0; drive < 2; drive++) {
3610 drvp = &chp->ch_drive[drive];
3611 /* If no drive, skip */
3612 if ((drvp->drive_flags & DRIVE) == 0) {
3613 mode[drive] = -1;
3614 continue;
3615 }
3616
3617 if ((drvp->drive_flags & DRIVE_DMA)) {
3618 /*
3619 * Timings will be used for both PIO and DMA,
3620 * so adjust DMA mode if needed
3621 */
3622 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3623 drvp->PIO_mode = drvp->DMA_mode + 2;
3624 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3625 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3626 drvp->PIO_mode - 2 : 0;
3627 if (drvp->DMA_mode == 0)
3628 drvp->PIO_mode = 0;
3629
3630 mode[drive] = drvp->DMA_mode + 5;
3631 } else
3632 mode[drive] = drvp->PIO_mode;
3633
3634 if (drive && mode[0] >= 0 &&
3635 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3636 /*
3637 * Can't have two drives using different values
3638 * for `Address Setup Time'.
3639 * Slow down the faster drive to compensate.
3640 */
3641 int d = (opti_tim_as[spd][mode[0]] >
3642 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3643
3644 mode[d] = mode[1-d];
3645 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3646 chp->ch_drive[d].DMA_mode = 0;
3647 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3648 }
3649 }
3650
3651 for (drive = 0; drive < 2; drive++) {
3652 int m;
3653 if ((m = mode[drive]) < 0)
3654 continue;
3655
3656 /* Set the Address Setup Time and select appropriate index */
3657 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3658 rv |= OPTI_MISC_INDEX(drive);
3659 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3660
3661 /* Set the pulse width and recovery timing parameters */
3662 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3663 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3664 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3665 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3666
3667 /* Set the Enhanced Mode register appropriately */
3668 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3669 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3670 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3671 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3672 }
3673
3674 /* Finally, enable the timings */
3675 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3676
3677 pciide_print_modes(cp);
3678 }
3679