pciide.c revision 1.68.2.30 1 /* $NetBSD: pciide.c,v 1.68.2.30 2002/02/06 14:17:51 he Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82
83 #define DEBUG_DMA 0x01
84 #define DEBUG_XFERS 0x02
85 #define DEBUG_FUNCS 0x08
86 #define DEBUG_PROBE 0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98
99 #include <machine/endian.h>
100
101 #include <vm/vm.h>
102 #include <vm/vm_param.h>
103 #include <vm/vm_kern.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/cy82c693var.h>
122
123 #include "opt_pciide.h"
124
125 /* inlines for reading/writing 8-bit PCI registers */
126 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
127 int));
128 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
129 int, u_int8_t));
130
131 static __inline u_int8_t
132 pciide_pci_read(pc, pa, reg)
133 pci_chipset_tag_t pc;
134 pcitag_t pa;
135 int reg;
136 {
137
138 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
139 ((reg & 0x03) * 8) & 0xff);
140 }
141
142 static __inline void
143 pciide_pci_write(pc, pa, reg, val)
144 pci_chipset_tag_t pc;
145 pcitag_t pa;
146 int reg;
147 u_int8_t val;
148 {
149 pcireg_t pcival;
150
151 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
152 pcival &= ~(0xff << ((reg & 0x03) * 8));
153 pcival |= (val << ((reg & 0x03) * 8));
154 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
155 }
156
157 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
158
159 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
160 void piix_setup_channel __P((struct channel_softc*));
161 void piix3_4_setup_channel __P((struct channel_softc*));
162 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
163 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
164 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
165
166 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
167 void amd7x6_setup_channel __P((struct channel_softc*));
168
169 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
170 void apollo_setup_channel __P((struct channel_softc*));
171
172 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
173 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_setup_channel __P((struct channel_softc*));
175 void cmd_channel_map __P((struct pci_attach_args *,
176 struct pciide_softc *, int));
177 int cmd_pci_intr __P((void *));
178 void cmd646_9_irqack __P((struct channel_softc *));
179
180 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cy693_setup_channel __P((struct channel_softc*));
182
183 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
184 void sis_setup_channel __P((struct channel_softc*));
185
186 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
187 void acer_setup_channel __P((struct channel_softc*));
188 int acer_pci_intr __P((void *));
189
190 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
191 void pdc202xx_setup_channel __P((struct channel_softc*));
192 int pdc202xx_pci_intr __P((void *));
193 int pdc20265_pci_intr __P((void *));
194
195 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
196 void opti_setup_channel __P((struct channel_softc*));
197
198 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void hpt_setup_channel __P((struct channel_softc*));
200 int hpt_pci_intr __P((void *));
201
202 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void acard_setup_channel __P((struct channel_softc*));
204 int acard_pci_intr __P((void *));
205
206 void pciide_channel_dma_setup __P((struct pciide_channel *));
207 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
208 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
209 void pciide_dma_start __P((void*, int, int));
210 int pciide_dma_finish __P((void*, int, int, int));
211 void pciide_irqack __P((struct channel_softc *));
212 void pciide_print_modes __P((struct pciide_channel *));
213
214 struct pciide_product_desc {
215 u_int32_t ide_product;
216 int ide_flags;
217 const char *ide_name;
218 /* map and setup chip, probe drives */
219 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
220 };
221
222 /* Flags for ide_flags */
223 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
224
225 /* Default product description for devices not known from this controller */
226 const struct pciide_product_desc default_product_desc = {
227 0,
228 0,
229 "Generic PCI IDE controller",
230 default_chip_map,
231 };
232
233 const struct pciide_product_desc pciide_intel_products[] = {
234 { PCI_PRODUCT_INTEL_82092AA,
235 0,
236 "Intel 82092AA IDE controller",
237 default_chip_map,
238 },
239 { PCI_PRODUCT_INTEL_82371FB_IDE,
240 0,
241 "Intel 82371FB IDE controller (PIIX)",
242 piix_chip_map,
243 },
244 { PCI_PRODUCT_INTEL_82371SB_IDE,
245 0,
246 "Intel 82371SB IDE Interface (PIIX3)",
247 piix_chip_map,
248 },
249 { PCI_PRODUCT_INTEL_82371AB_IDE,
250 0,
251 "Intel 82371AB IDE controller (PIIX4)",
252 piix_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82801AA_IDE,
255 0,
256 "Intel 82801AA IDE Controller (ICH)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82801AB_IDE,
260 0,
261 "Intel 82801AB IDE Controller (ICH0)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82801BA_IDE,
265 0,
266 "Intel 82801BA IDE Controller (ICH2)",
267 piix_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82801BAM_IDE,
270 0,
271 "Intel 82801BAM IDE Controller (ICH2)",
272 piix_chip_map,
273 },
274 { 0,
275 0,
276 NULL,
277 NULL
278 }
279 };
280
281 const struct pciide_product_desc pciide_amd_products[] = {
282 { PCI_PRODUCT_AMD_PBC756_IDE,
283 0,
284 "Advanced Micro Devices AMD756 IDE Controller",
285 amd7x6_chip_map
286 },
287 { PCI_PRODUCT_AMD_PBC766_IDE,
288 0,
289 "Advanced Micro Devices AMD766 IDE Controller",
290 amd7x6_chip_map
291 },
292 { 0,
293 0,
294 NULL,
295 NULL
296 }
297 };
298
299 const struct pciide_product_desc pciide_cmd_products[] = {
300 { PCI_PRODUCT_CMDTECH_640,
301 0,
302 "CMD Technology PCI0640",
303 cmd_chip_map
304 },
305 { PCI_PRODUCT_CMDTECH_643,
306 0,
307 "CMD Technology PCI0643",
308 cmd0643_9_chip_map,
309 },
310 { PCI_PRODUCT_CMDTECH_646,
311 0,
312 "CMD Technology PCI0646",
313 cmd0643_9_chip_map,
314 },
315 { PCI_PRODUCT_CMDTECH_648,
316 IDE_PCI_CLASS_OVERRIDE,
317 "CMD Technology PCI0648",
318 cmd0643_9_chip_map,
319 },
320 { PCI_PRODUCT_CMDTECH_649,
321 IDE_PCI_CLASS_OVERRIDE,
322 "CMD Technology PCI0649",
323 cmd0643_9_chip_map,
324 },
325 { 0,
326 0,
327 NULL,
328 NULL
329 }
330 };
331
332 const struct pciide_product_desc pciide_via_products[] = {
333 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
334 0,
335 NULL,
336 apollo_chip_map,
337 },
338 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
339 0,
340 NULL,
341 apollo_chip_map,
342 },
343 { 0,
344 0,
345 NULL,
346 NULL
347 }
348 };
349
350 const struct pciide_product_desc pciide_cypress_products[] = {
351 { PCI_PRODUCT_CONTAQ_82C693,
352 0,
353 "Cypress 82C693 IDE Controller",
354 cy693_chip_map,
355 },
356 { 0,
357 0,
358 NULL,
359 NULL
360 }
361 };
362
363 const struct pciide_product_desc pciide_sis_products[] = {
364 { PCI_PRODUCT_SIS_5597_IDE,
365 0,
366 "Silicon Integrated System 5597/5598 IDE controller",
367 sis_chip_map,
368 },
369 { 0,
370 0,
371 NULL,
372 NULL
373 }
374 };
375
376 const struct pciide_product_desc pciide_acer_products[] = {
377 { PCI_PRODUCT_ALI_M5229,
378 0,
379 "Acer Labs M5229 UDMA IDE Controller",
380 acer_chip_map,
381 },
382 { 0,
383 0,
384 NULL,
385 NULL
386 }
387 };
388
389 const struct pciide_product_desc pciide_promise_products[] = {
390 { PCI_PRODUCT_PROMISE_ULTRA33,
391 IDE_PCI_CLASS_OVERRIDE,
392 "Promise Ultra33/ATA Bus Master IDE Accelerator",
393 pdc202xx_chip_map,
394 },
395 { PCI_PRODUCT_PROMISE_ULTRA66,
396 IDE_PCI_CLASS_OVERRIDE,
397 "Promise Ultra66/ATA Bus Master IDE Accelerator",
398 pdc202xx_chip_map,
399 },
400 { PCI_PRODUCT_PROMISE_ULTRA100,
401 IDE_PCI_CLASS_OVERRIDE,
402 "Promise Ultra100/ATA Bus Master IDE Accelerator",
403 pdc202xx_chip_map,
404 },
405 { PCI_PRODUCT_PROMISE_ULTRA100X,
406 IDE_PCI_CLASS_OVERRIDE,
407 "Promise Ultra100/ATA Bus Master IDE Accelerator",
408 pdc202xx_chip_map,
409 },
410 { 0,
411 0,
412 NULL,
413 NULL
414 }
415 };
416
417 const struct pciide_product_desc pciide_opti_products[] = {
418 { PCI_PRODUCT_OPTI_82C621,
419 0,
420 "OPTi 82c621 PCI IDE controller",
421 opti_chip_map,
422 },
423 { PCI_PRODUCT_OPTI_82C568,
424 0,
425 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
426 opti_chip_map,
427 },
428 { PCI_PRODUCT_OPTI_82D568,
429 0,
430 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
431 opti_chip_map,
432 },
433 { 0,
434 0,
435 NULL,
436 NULL
437 }
438 };
439
440 const struct pciide_product_desc pciide_triones_products[] = {
441 { PCI_PRODUCT_TRIONES_HPT366,
442 IDE_PCI_CLASS_OVERRIDE,
443 NULL,
444 hpt_chip_map,
445 },
446 { 0,
447 0,
448 NULL,
449 NULL
450 }
451 };
452
453 const struct pciide_product_desc pciide_acard_products[] = {
454 { PCI_PRODUCT_ACARD_ATP850U,
455 IDE_PCI_CLASS_OVERRIDE,
456 "Acard ATP850U Ultra33 IDE Controller",
457 acard_chip_map,
458 },
459 { PCI_PRODUCT_ACARD_ATP860,
460 IDE_PCI_CLASS_OVERRIDE,
461 "Acard ATP860 Ultra66 IDE Controller",
462 acard_chip_map,
463 },
464 { PCI_PRODUCT_ACARD_ATP860A,
465 IDE_PCI_CLASS_OVERRIDE,
466 "Acard ATP860-A Ultra66 IDE Controller",
467 acard_chip_map,
468 },
469 { 0,
470 0,
471 NULL,
472 }
473 };
474
475 struct pciide_vendor_desc {
476 u_int32_t ide_vendor;
477 const struct pciide_product_desc *ide_products;
478 };
479
480 const struct pciide_vendor_desc pciide_vendors[] = {
481 { PCI_VENDOR_INTEL, pciide_intel_products },
482 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
483 { PCI_VENDOR_VIATECH, pciide_via_products },
484 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
485 { PCI_VENDOR_SIS, pciide_sis_products },
486 { PCI_VENDOR_ALI, pciide_acer_products },
487 { PCI_VENDOR_PROMISE, pciide_promise_products },
488 { PCI_VENDOR_AMD, pciide_amd_products },
489 { PCI_VENDOR_OPTI, pciide_opti_products },
490 { PCI_VENDOR_TRIONES, pciide_triones_products },
491 { PCI_VENDOR_ACARD, pciide_acard_products },
492 { 0, NULL }
493 };
494
495 /* options passed via the 'flags' config keyword */
496 #define PCIIDE_OPTIONS_DMA 0x01
497
498 int pciide_match __P((struct device *, struct cfdata *, void *));
499 void pciide_attach __P((struct device *, struct device *, void *));
500
501 struct cfattach pciide_ca = {
502 sizeof(struct pciide_softc), pciide_match, pciide_attach
503 };
504 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
505 int pciide_mapregs_compat __P(( struct pci_attach_args *,
506 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
507 int pciide_mapregs_native __P((struct pci_attach_args *,
508 struct pciide_channel *, bus_size_t *, bus_size_t *,
509 int (*pci_intr) __P((void *))));
510 void pciide_mapreg_dma __P((struct pciide_softc *,
511 struct pci_attach_args *));
512 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
513 void pciide_mapchan __P((struct pci_attach_args *,
514 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
515 int (*pci_intr) __P((void *))));
516 int pciide_chan_candisable __P((struct pciide_channel *));
517 void pciide_map_compat_intr __P(( struct pci_attach_args *,
518 struct pciide_channel *, int, int));
519 int pciide_print __P((void *, const char *pnp));
520 int pciide_compat_intr __P((void *));
521 int pciide_pci_intr __P((void *));
522 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
523
524 const struct pciide_product_desc *
525 pciide_lookup_product(id)
526 u_int32_t id;
527 {
528 const struct pciide_product_desc *pp;
529 const struct pciide_vendor_desc *vp;
530
531 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
532 if (PCI_VENDOR(id) == vp->ide_vendor)
533 break;
534
535 if ((pp = vp->ide_products) == NULL)
536 return NULL;
537
538 for (; pp->chip_map != NULL; pp++)
539 if (PCI_PRODUCT(id) == pp->ide_product)
540 break;
541
542 if (pp->chip_map == NULL)
543 return NULL;
544 return pp;
545 }
546
547 int
548 pciide_match(parent, match, aux)
549 struct device *parent;
550 struct cfdata *match;
551 void *aux;
552 {
553 struct pci_attach_args *pa = aux;
554 const struct pciide_product_desc *pp;
555
556 /*
557 * Check the ID register to see that it's a PCI IDE controller.
558 * If it is, we assume that we can deal with it; it _should_
559 * work in a standardized way...
560 */
561 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
562 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
563 return (1);
564 }
565
566 /*
567 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
568 * controllers. Let see if we can deal with it anyway.
569 */
570 pp = pciide_lookup_product(pa->pa_id);
571 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
572 return (1);
573 }
574
575 return (0);
576 }
577
578 void
579 pciide_attach(parent, self, aux)
580 struct device *parent, *self;
581 void *aux;
582 {
583 struct pci_attach_args *pa = aux;
584 pci_chipset_tag_t pc = pa->pa_pc;
585 pcitag_t tag = pa->pa_tag;
586 struct pciide_softc *sc = (struct pciide_softc *)self;
587 pcireg_t csr;
588 char devinfo[256];
589 const char *displaydev;
590
591 sc->sc_pp = pciide_lookup_product(pa->pa_id);
592 if (sc->sc_pp == NULL) {
593 sc->sc_pp = &default_product_desc;
594 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
595 displaydev = devinfo;
596 } else
597 displaydev = sc->sc_pp->ide_name;
598
599 /* if displaydev == NULL, printf is done in chip-specific map */
600 if (displaydev)
601 printf(": %s (rev. 0x%02x)\n", displaydev,
602 PCI_REVISION(pa->pa_class));
603
604 sc->sc_pc = pa->pa_pc;
605 sc->sc_tag = pa->pa_tag;
606 #ifdef WDCDEBUG
607 if (wdcdebug_pciide_mask & DEBUG_PROBE)
608 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
609 #endif
610 sc->sc_pp->chip_map(sc, pa);
611
612 if (sc->sc_dma_ok) {
613 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
614 csr |= PCI_COMMAND_MASTER_ENABLE;
615 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
616 }
617 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
618 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
619 }
620
621 /* tell wether the chip is enabled or not */
622 int
623 pciide_chipen(sc, pa)
624 struct pciide_softc *sc;
625 struct pci_attach_args *pa;
626 {
627 pcireg_t csr;
628 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
629 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
630 PCI_COMMAND_STATUS_REG);
631 printf("%s: device disabled (at %s)\n",
632 sc->sc_wdcdev.sc_dev.dv_xname,
633 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
634 "device" : "bridge");
635 return 0;
636 }
637 return 1;
638 }
639
640 int
641 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
642 struct pci_attach_args *pa;
643 struct pciide_channel *cp;
644 int compatchan;
645 bus_size_t *cmdsizep, *ctlsizep;
646 {
647 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
648 struct channel_softc *wdc_cp = &cp->wdc_channel;
649
650 cp->compat = 1;
651 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
652 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
653
654 wdc_cp->cmd_iot = pa->pa_iot;
655 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
656 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
657 printf("%s: couldn't map %s channel cmd regs\n",
658 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
659 return (0);
660 }
661
662 wdc_cp->ctl_iot = pa->pa_iot;
663 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
664 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
665 printf("%s: couldn't map %s channel ctl regs\n",
666 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
667 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
668 PCIIDE_COMPAT_CMD_SIZE);
669 return (0);
670 }
671
672 return (1);
673 }
674
675 int
676 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
677 struct pci_attach_args * pa;
678 struct pciide_channel *cp;
679 bus_size_t *cmdsizep, *ctlsizep;
680 int (*pci_intr) __P((void *));
681 {
682 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
683 struct channel_softc *wdc_cp = &cp->wdc_channel;
684 const char *intrstr;
685 pci_intr_handle_t intrhandle;
686
687 cp->compat = 0;
688
689 if (sc->sc_pci_ih == NULL) {
690 if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
691 pa->pa_intrline, &intrhandle) != 0) {
692 printf("%s: couldn't map native-PCI interrupt\n",
693 sc->sc_wdcdev.sc_dev.dv_xname);
694 return 0;
695 }
696 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
697 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
698 intrhandle, IPL_BIO, pci_intr, sc);
699 if (sc->sc_pci_ih != NULL) {
700 printf("%s: using %s for native-PCI interrupt\n",
701 sc->sc_wdcdev.sc_dev.dv_xname,
702 intrstr ? intrstr : "unknown interrupt");
703 } else {
704 printf("%s: couldn't establish native-PCI interrupt",
705 sc->sc_wdcdev.sc_dev.dv_xname);
706 if (intrstr != NULL)
707 printf(" at %s", intrstr);
708 printf("\n");
709 return 0;
710 }
711 }
712 cp->ih = sc->sc_pci_ih;
713 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
714 PCI_MAPREG_TYPE_IO, 0,
715 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
716 printf("%s: couldn't map %s channel cmd regs\n",
717 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
718 return 0;
719 }
720
721 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
722 PCI_MAPREG_TYPE_IO, 0,
723 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
724 printf("%s: couldn't map %s channel ctl regs\n",
725 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
726 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
727 return 0;
728 }
729 /*
730 * In native mode, 4 bytes of I/O space are mapped for the control
731 * register, the control register is at offset 2. Pass the generic
732 * code a handle for only one byte at the rigth offset.
733 */
734 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
735 &wdc_cp->ctl_ioh) != 0) {
736 printf("%s: unable to subregion %s channel ctl regs\n",
737 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
738 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
739 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
740 return 0;
741 }
742 return (1);
743 }
744
745 void
746 pciide_mapreg_dma(sc, pa)
747 struct pciide_softc *sc;
748 struct pci_attach_args *pa;
749 {
750 pcireg_t maptype;
751
752 /*
753 * Map DMA registers
754 *
755 * Note that sc_dma_ok is the right variable to test to see if
756 * DMA can be done. If the interface doesn't support DMA,
757 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
758 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
759 * non-zero if the interface supports DMA and the registers
760 * could be mapped.
761 *
762 * XXX Note that despite the fact that the Bus Master IDE specs
763 * XXX say that "The bus master IDE function uses 16 bytes of IO
764 * XXX space," some controllers (at least the United
765 * XXX Microelectronics UM8886BF) place it in memory space.
766 */
767 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
768 PCIIDE_REG_BUS_MASTER_DMA);
769
770 switch (maptype) {
771 case PCI_MAPREG_TYPE_IO:
772 case PCI_MAPREG_MEM_TYPE_32BIT:
773 sc->sc_dma_ok = (pci_mapreg_map(pa,
774 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
775 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
776 sc->sc_dmat = pa->pa_dmat;
777 if (sc->sc_dma_ok == 0) {
778 printf(", but unused (couldn't map registers)");
779 } else {
780 sc->sc_wdcdev.dma_arg = sc;
781 sc->sc_wdcdev.dma_init = pciide_dma_init;
782 sc->sc_wdcdev.dma_start = pciide_dma_start;
783 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
784 }
785 break;
786
787 default:
788 sc->sc_dma_ok = 0;
789 printf(", but unsupported register maptype (0x%x)", maptype);
790 }
791 }
792
793 int
794 pciide_compat_intr(arg)
795 void *arg;
796 {
797 struct pciide_channel *cp = arg;
798
799 #ifdef DIAGNOSTIC
800 /* should only be called for a compat channel */
801 if (cp->compat == 0)
802 panic("pciide compat intr called for non-compat chan %p\n", cp);
803 #endif
804 return (wdcintr(&cp->wdc_channel));
805 }
806
807 int
808 pciide_pci_intr(arg)
809 void *arg;
810 {
811 struct pciide_softc *sc = arg;
812 struct pciide_channel *cp;
813 struct channel_softc *wdc_cp;
814 int i, rv, crv;
815
816 rv = 0;
817 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
818 cp = &sc->pciide_channels[i];
819 wdc_cp = &cp->wdc_channel;
820
821 /* If a compat channel skip. */
822 if (cp->compat)
823 continue;
824 /* if this channel not waiting for intr, skip */
825 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
826 continue;
827
828 crv = wdcintr(wdc_cp);
829 if (crv == 0)
830 ; /* leave rv alone */
831 else if (crv == 1)
832 rv = 1; /* claim the intr */
833 else if (rv == 0) /* crv should be -1 in this case */
834 rv = crv; /* if we've done no better, take it */
835 }
836 return (rv);
837 }
838
839 void
840 pciide_channel_dma_setup(cp)
841 struct pciide_channel *cp;
842 {
843 int drive;
844 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
845 struct ata_drive_datas *drvp;
846
847 for (drive = 0; drive < 2; drive++) {
848 drvp = &cp->wdc_channel.ch_drive[drive];
849 /* If no drive, skip */
850 if ((drvp->drive_flags & DRIVE) == 0)
851 continue;
852 /* setup DMA if needed */
853 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
854 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
855 sc->sc_dma_ok == 0) {
856 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
857 continue;
858 }
859 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
860 != 0) {
861 /* Abort DMA setup */
862 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
863 continue;
864 }
865 }
866 }
867
868 int
869 pciide_dma_table_setup(sc, channel, drive)
870 struct pciide_softc *sc;
871 int channel, drive;
872 {
873 bus_dma_segment_t seg;
874 int error, rseg;
875 const bus_size_t dma_table_size =
876 sizeof(struct idedma_table) * NIDEDMA_TABLES;
877 struct pciide_dma_maps *dma_maps =
878 &sc->pciide_channels[channel].dma_maps[drive];
879
880 /* If table was already allocated, just return */
881 if (dma_maps->dma_table)
882 return 0;
883
884 /* Allocate memory for the DMA tables and map it */
885 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
886 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
887 BUS_DMA_NOWAIT)) != 0) {
888 printf("%s:%d: unable to allocate table DMA for "
889 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
890 channel, drive, error);
891 return error;
892 }
893 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
894 dma_table_size,
895 (caddr_t *)&dma_maps->dma_table,
896 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
897 printf("%s:%d: unable to map table DMA for"
898 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
899 channel, drive, error);
900 return error;
901 }
902 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
903 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
904 seg.ds_addr), DEBUG_PROBE);
905
906 /* Create and load table DMA map for this disk */
907 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
908 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
909 &dma_maps->dmamap_table)) != 0) {
910 printf("%s:%d: unable to create table DMA map for "
911 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
912 channel, drive, error);
913 return error;
914 }
915 if ((error = bus_dmamap_load(sc->sc_dmat,
916 dma_maps->dmamap_table,
917 dma_maps->dma_table,
918 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
919 printf("%s:%d: unable to load table DMA map for "
920 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
921 channel, drive, error);
922 return error;
923 }
924 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
925 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
926 /* Create a xfer DMA map for this drive */
927 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
928 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
929 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
930 &dma_maps->dmamap_xfer)) != 0) {
931 printf("%s:%d: unable to create xfer DMA map for "
932 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
933 channel, drive, error);
934 return error;
935 }
936 return 0;
937 }
938
939 int
940 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
941 void *v;
942 int channel, drive;
943 void *databuf;
944 size_t datalen;
945 int flags;
946 {
947 struct pciide_softc *sc = v;
948 int error, seg;
949 struct pciide_dma_maps *dma_maps =
950 &sc->pciide_channels[channel].dma_maps[drive];
951
952 error = bus_dmamap_load(sc->sc_dmat,
953 dma_maps->dmamap_xfer,
954 databuf, datalen, NULL, BUS_DMA_NOWAIT);
955 if (error) {
956 printf("%s:%d: unable to load xfer DMA map for"
957 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
958 channel, drive, error);
959 return error;
960 }
961
962 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
963 dma_maps->dmamap_xfer->dm_mapsize,
964 (flags & WDC_DMA_READ) ?
965 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
966
967 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
968 #ifdef DIAGNOSTIC
969 /* A segment must not cross a 64k boundary */
970 {
971 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
972 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
973 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
974 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
975 printf("pciide_dma: segment %d physical addr 0x%lx"
976 " len 0x%lx not properly aligned\n",
977 seg, phys, len);
978 panic("pciide_dma: buf align");
979 }
980 }
981 #endif
982 dma_maps->dma_table[seg].base_addr =
983 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
984 dma_maps->dma_table[seg].byte_count =
985 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
986 IDEDMA_BYTE_COUNT_MASK);
987 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
988 seg, le32toh(dma_maps->dma_table[seg].byte_count),
989 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
990
991 }
992 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
993 htole32(IDEDMA_BYTE_COUNT_EOT);
994
995 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
996 dma_maps->dmamap_table->dm_mapsize,
997 BUS_DMASYNC_PREWRITE);
998
999 /* Maps are ready. Start DMA function */
1000 #ifdef DIAGNOSTIC
1001 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1002 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1003 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1004 panic("pciide_dma_init: table align");
1005 }
1006 #endif
1007
1008 /* Clear status bits */
1009 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1010 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1011 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1012 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1013 /* Write table addr */
1014 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1015 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1016 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1017 /* set read/write */
1018 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1019 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1020 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1021 /* remember flags */
1022 dma_maps->dma_flags = flags;
1023 return 0;
1024 }
1025
1026 void
1027 pciide_dma_start(v, channel, drive)
1028 void *v;
1029 int channel, drive;
1030 {
1031 struct pciide_softc *sc = v;
1032
1033 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1034 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1035 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1036 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1037 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1038 }
1039
1040 int
1041 pciide_dma_finish(v, channel, drive, force)
1042 void *v;
1043 int channel, drive;
1044 int force;
1045 {
1046 struct pciide_softc *sc = v;
1047 u_int8_t status;
1048 int error = 0;
1049 struct pciide_dma_maps *dma_maps =
1050 &sc->pciide_channels[channel].dma_maps[drive];
1051
1052 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1053 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1054 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1055 DEBUG_XFERS);
1056
1057 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1058 return WDC_DMAST_NOIRQ;
1059
1060 /* stop DMA channel */
1061 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1062 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1063 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1064 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1065
1066 /* Unload the map of the data buffer */
1067 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1068 dma_maps->dmamap_xfer->dm_mapsize,
1069 (dma_maps->dma_flags & WDC_DMA_READ) ?
1070 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1071 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1072
1073 if ((status & IDEDMA_CTL_ERR) != 0) {
1074 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1075 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1076 error |= WDC_DMAST_ERR;
1077 }
1078
1079 if ((status & IDEDMA_CTL_INTR) == 0) {
1080 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1081 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1082 drive, status);
1083 error |= WDC_DMAST_NOIRQ;
1084 }
1085
1086 if ((status & IDEDMA_CTL_ACT) != 0) {
1087 /* data underrun, may be a valid condition for ATAPI */
1088 error |= WDC_DMAST_UNDER;
1089 }
1090 return error;
1091 }
1092
1093 void
1094 pciide_irqack(chp)
1095 struct channel_softc *chp;
1096 {
1097 struct pciide_channel *cp = (struct pciide_channel*)chp;
1098 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1099
1100 /* clear status bits in IDE DMA registers */
1101 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1102 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1103 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1104 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1105 }
1106
1107 /* some common code used by several chip_map */
1108 int
1109 pciide_chansetup(sc, channel, interface)
1110 struct pciide_softc *sc;
1111 int channel;
1112 pcireg_t interface;
1113 {
1114 struct pciide_channel *cp = &sc->pciide_channels[channel];
1115 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1116 cp->name = PCIIDE_CHANNEL_NAME(channel);
1117 cp->wdc_channel.channel = channel;
1118 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1119 cp->wdc_channel.ch_queue =
1120 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1121 if (cp->wdc_channel.ch_queue == NULL) {
1122 printf("%s %s channel: "
1123 "can't allocate memory for command queue",
1124 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1125 return 0;
1126 }
1127 printf("%s: %s channel %s to %s mode\n",
1128 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1129 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1130 "configured" : "wired",
1131 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1132 "native-PCI" : "compatibility");
1133 return 1;
1134 }
1135
1136 /* some common code used by several chip channel_map */
1137 void
1138 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1139 struct pci_attach_args *pa;
1140 struct pciide_channel *cp;
1141 pcireg_t interface;
1142 bus_size_t *cmdsizep, *ctlsizep;
1143 int (*pci_intr) __P((void *));
1144 {
1145 struct channel_softc *wdc_cp = &cp->wdc_channel;
1146
1147 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1148 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1149 pci_intr);
1150 else
1151 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1152 wdc_cp->channel, cmdsizep, ctlsizep);
1153
1154 if (cp->hw_ok == 0)
1155 return;
1156 wdc_cp->data32iot = wdc_cp->cmd_iot;
1157 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1158 wdcattach(wdc_cp);
1159 }
1160
1161 /*
1162 * Generic code to call to know if a channel can be disabled. Return 1
1163 * if channel can be disabled, 0 if not
1164 */
1165 int
1166 pciide_chan_candisable(cp)
1167 struct pciide_channel *cp;
1168 {
1169 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1170 struct channel_softc *wdc_cp = &cp->wdc_channel;
1171
1172 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1173 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1174 printf("%s: disabling %s channel (no drives)\n",
1175 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1176 cp->hw_ok = 0;
1177 return 1;
1178 }
1179 return 0;
1180 }
1181
1182 /*
1183 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1184 * Set hw_ok=0 on failure
1185 */
1186 void
1187 pciide_map_compat_intr(pa, cp, compatchan, interface)
1188 struct pci_attach_args *pa;
1189 struct pciide_channel *cp;
1190 int compatchan, interface;
1191 {
1192 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1193 struct channel_softc *wdc_cp = &cp->wdc_channel;
1194
1195 if (cp->hw_ok == 0)
1196 return;
1197 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1198 return;
1199
1200 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1201 pa, compatchan, pciide_compat_intr, cp);
1202 if (cp->ih == NULL) {
1203 printf("%s: no compatibility interrupt for use by %s "
1204 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1205 cp->hw_ok = 0;
1206 }
1207 }
1208
1209 void
1210 pciide_print_modes(cp)
1211 struct pciide_channel *cp;
1212 {
1213 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1214 int drive;
1215 struct channel_softc *chp;
1216 struct ata_drive_datas *drvp;
1217
1218 chp = &cp->wdc_channel;
1219 for (drive = 0; drive < 2; drive++) {
1220 drvp = &chp->ch_drive[drive];
1221 if ((drvp->drive_flags & DRIVE) == 0)
1222 continue;
1223 printf("%s(%s:%d:%d): using PIO mode %d",
1224 drvp->drv_softc->dv_xname,
1225 sc->sc_wdcdev.sc_dev.dv_xname,
1226 chp->channel, drive, drvp->PIO_mode);
1227 if (drvp->drive_flags & DRIVE_DMA)
1228 printf(", DMA mode %d", drvp->DMA_mode);
1229 if (drvp->drive_flags & DRIVE_UDMA)
1230 printf(", Ultra-DMA mode %d", drvp->UDMA_mode);
1231 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA))
1232 printf(" (using DMA data transfers)");
1233 printf("\n");
1234 }
1235 }
1236
1237 void
1238 default_chip_map(sc, pa)
1239 struct pciide_softc *sc;
1240 struct pci_attach_args *pa;
1241 {
1242 struct pciide_channel *cp;
1243 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1244 pcireg_t csr;
1245 int channel, drive;
1246 struct ata_drive_datas *drvp;
1247 u_int8_t idedma_ctl;
1248 bus_size_t cmdsize, ctlsize;
1249 char *failreason;
1250
1251 if (pciide_chipen(sc, pa) == 0)
1252 return;
1253
1254 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1255 printf("%s: bus-master DMA support present",
1256 sc->sc_wdcdev.sc_dev.dv_xname);
1257 if (sc->sc_pp == &default_product_desc &&
1258 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1259 PCIIDE_OPTIONS_DMA) == 0) {
1260 printf(", but unused (no driver support)");
1261 sc->sc_dma_ok = 0;
1262 } else {
1263 pciide_mapreg_dma(sc, pa);
1264 if (sc->sc_dma_ok != 0)
1265 printf(", used without full driver "
1266 "support");
1267 }
1268 } else {
1269 printf("%s: hardware does not support DMA",
1270 sc->sc_wdcdev.sc_dev.dv_xname);
1271 sc->sc_dma_ok = 0;
1272 }
1273 printf("\n");
1274 if (sc->sc_dma_ok) {
1275 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1276 sc->sc_wdcdev.irqack = pciide_irqack;
1277 }
1278 sc->sc_wdcdev.PIO_cap = 0;
1279 sc->sc_wdcdev.DMA_cap = 0;
1280
1281 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1282 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1283 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1284
1285 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1286 cp = &sc->pciide_channels[channel];
1287 if (pciide_chansetup(sc, channel, interface) == 0)
1288 continue;
1289 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1290 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1291 &ctlsize, pciide_pci_intr);
1292 } else {
1293 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1294 channel, &cmdsize, &ctlsize);
1295 }
1296 if (cp->hw_ok == 0)
1297 continue;
1298 /*
1299 * Check to see if something appears to be there.
1300 */
1301 failreason = NULL;
1302 if (!wdcprobe(&cp->wdc_channel)) {
1303 failreason = "not responding; disabled or no drives?";
1304 goto next;
1305 }
1306 /*
1307 * Now, make sure it's actually attributable to this PCI IDE
1308 * channel by trying to access the channel again while the
1309 * PCI IDE controller's I/O space is disabled. (If the
1310 * channel no longer appears to be there, it belongs to
1311 * this controller.) YUCK!
1312 */
1313 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1314 PCI_COMMAND_STATUS_REG);
1315 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1316 csr & ~PCI_COMMAND_IO_ENABLE);
1317 if (wdcprobe(&cp->wdc_channel))
1318 failreason = "other hardware responding at addresses";
1319 pci_conf_write(sc->sc_pc, sc->sc_tag,
1320 PCI_COMMAND_STATUS_REG, csr);
1321 next:
1322 if (failreason) {
1323 printf("%s: %s channel ignored (%s)\n",
1324 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1325 failreason);
1326 cp->hw_ok = 0;
1327 bus_space_unmap(cp->wdc_channel.cmd_iot,
1328 cp->wdc_channel.cmd_ioh, cmdsize);
1329 bus_space_unmap(cp->wdc_channel.ctl_iot,
1330 cp->wdc_channel.ctl_ioh, ctlsize);
1331 } else {
1332 pciide_map_compat_intr(pa, cp, channel, interface);
1333 }
1334 if (cp->hw_ok) {
1335 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1336 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1337 wdcattach(&cp->wdc_channel);
1338 }
1339 }
1340
1341 if (sc->sc_dma_ok == 0)
1342 return;
1343
1344 /* Allocate DMA maps */
1345 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1346 idedma_ctl = 0;
1347 cp = &sc->pciide_channels[channel];
1348 for (drive = 0; drive < 2; drive++) {
1349 drvp = &cp->wdc_channel.ch_drive[drive];
1350 /* If no drive, skip */
1351 if ((drvp->drive_flags & DRIVE) == 0)
1352 continue;
1353 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1354 continue;
1355 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1356 /* Abort DMA setup */
1357 printf("%s:%d:%d: can't allocate DMA maps, "
1358 "using PIO transfers\n",
1359 sc->sc_wdcdev.sc_dev.dv_xname,
1360 channel, drive);
1361 drvp->drive_flags &= ~DRIVE_DMA;
1362 }
1363 printf("%s:%d:%d: using DMA data transfers\n",
1364 sc->sc_wdcdev.sc_dev.dv_xname,
1365 channel, drive);
1366 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1367 }
1368 if (idedma_ctl != 0) {
1369 /* Add software bits in status register */
1370 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1371 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1372 idedma_ctl);
1373 }
1374 }
1375 }
1376
1377 void
1378 piix_chip_map(sc, pa)
1379 struct pciide_softc *sc;
1380 struct pci_attach_args *pa;
1381 {
1382 struct pciide_channel *cp;
1383 int channel;
1384 u_int32_t idetim;
1385 bus_size_t cmdsize, ctlsize;
1386
1387 if (pciide_chipen(sc, pa) == 0)
1388 return;
1389
1390 printf("%s: bus-master DMA support present",
1391 sc->sc_wdcdev.sc_dev.dv_xname);
1392 pciide_mapreg_dma(sc, pa);
1393 printf("\n");
1394 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1395 WDC_CAPABILITY_MODE;
1396 if (sc->sc_dma_ok) {
1397 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1398 sc->sc_wdcdev.irqack = pciide_irqack;
1399 switch(sc->sc_pp->ide_product) {
1400 case PCI_PRODUCT_INTEL_82371AB_IDE:
1401 case PCI_PRODUCT_INTEL_82801AA_IDE:
1402 case PCI_PRODUCT_INTEL_82801AB_IDE:
1403 case PCI_PRODUCT_INTEL_82801BA_IDE:
1404 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1405 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1406 }
1407 }
1408 sc->sc_wdcdev.PIO_cap = 4;
1409 sc->sc_wdcdev.DMA_cap = 2;
1410 switch(sc->sc_pp->ide_product) {
1411 case PCI_PRODUCT_INTEL_82801AA_IDE:
1412 sc->sc_wdcdev.UDMA_cap = 4;
1413 break;
1414 case PCI_PRODUCT_INTEL_82801BA_IDE:
1415 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1416 sc->sc_wdcdev.UDMA_cap = 5;
1417 break;
1418 default:
1419 sc->sc_wdcdev.UDMA_cap = 2;
1420 }
1421 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1422 sc->sc_wdcdev.set_modes = piix_setup_channel;
1423 else
1424 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1425 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1426 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1427
1428 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1429 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1430 DEBUG_PROBE);
1431 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1432 WDCDEBUG_PRINT((", sidetim=0x%x",
1433 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1434 DEBUG_PROBE);
1435 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1436 WDCDEBUG_PRINT((", udamreg 0x%x",
1437 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1438 DEBUG_PROBE);
1439 }
1440 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1441 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1442 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1443 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1444 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1445 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1446 DEBUG_PROBE);
1447 }
1448
1449 }
1450 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1451
1452 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1453 cp = &sc->pciide_channels[channel];
1454 /* PIIX is compat-only */
1455 if (pciide_chansetup(sc, channel, 0) == 0)
1456 continue;
1457 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1458 if ((PIIX_IDETIM_READ(idetim, channel) &
1459 PIIX_IDETIM_IDE) == 0) {
1460 printf("%s: %s channel ignored (disabled)\n",
1461 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1462 continue;
1463 }
1464 /* PIIX are compat-only pciide devices */
1465 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1466 if (cp->hw_ok == 0)
1467 continue;
1468 if (pciide_chan_candisable(cp)) {
1469 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1470 channel);
1471 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1472 idetim);
1473 }
1474 pciide_map_compat_intr(pa, cp, channel, 0);
1475 if (cp->hw_ok == 0)
1476 continue;
1477 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1478 }
1479
1480 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1481 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1482 DEBUG_PROBE);
1483 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1484 WDCDEBUG_PRINT((", sidetim=0x%x",
1485 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1486 DEBUG_PROBE);
1487 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1488 WDCDEBUG_PRINT((", udamreg 0x%x",
1489 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1490 DEBUG_PROBE);
1491 }
1492 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1493 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1494 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1495 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1496 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1497 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1498 DEBUG_PROBE);
1499 }
1500 }
1501 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1502 }
1503
1504 void
1505 piix_setup_channel(chp)
1506 struct channel_softc *chp;
1507 {
1508 u_int8_t mode[2], drive;
1509 u_int32_t oidetim, idetim, idedma_ctl;
1510 struct pciide_channel *cp = (struct pciide_channel*)chp;
1511 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1512 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1513
1514 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1515 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1516 idedma_ctl = 0;
1517
1518 /* set up new idetim: Enable IDE registers decode */
1519 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1520 chp->channel);
1521
1522 /* setup DMA */
1523 pciide_channel_dma_setup(cp);
1524
1525 /*
1526 * Here we have to mess up with drives mode: PIIX can't have
1527 * different timings for master and slave drives.
1528 * We need to find the best combination.
1529 */
1530
1531 /* If both drives supports DMA, take the lower mode */
1532 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1533 (drvp[1].drive_flags & DRIVE_DMA)) {
1534 mode[0] = mode[1] =
1535 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1536 drvp[0].DMA_mode = mode[0];
1537 drvp[1].DMA_mode = mode[1];
1538 goto ok;
1539 }
1540 /*
1541 * If only one drive supports DMA, use its mode, and
1542 * put the other one in PIO mode 0 if mode not compatible
1543 */
1544 if (drvp[0].drive_flags & DRIVE_DMA) {
1545 mode[0] = drvp[0].DMA_mode;
1546 mode[1] = drvp[1].PIO_mode;
1547 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1548 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1549 mode[1] = drvp[1].PIO_mode = 0;
1550 goto ok;
1551 }
1552 if (drvp[1].drive_flags & DRIVE_DMA) {
1553 mode[1] = drvp[1].DMA_mode;
1554 mode[0] = drvp[0].PIO_mode;
1555 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1556 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1557 mode[0] = drvp[0].PIO_mode = 0;
1558 goto ok;
1559 }
1560 /*
1561 * If both drives are not DMA, takes the lower mode, unless
1562 * one of them is PIO mode < 2
1563 */
1564 if (drvp[0].PIO_mode < 2) {
1565 mode[0] = drvp[0].PIO_mode = 0;
1566 mode[1] = drvp[1].PIO_mode;
1567 } else if (drvp[1].PIO_mode < 2) {
1568 mode[1] = drvp[1].PIO_mode = 0;
1569 mode[0] = drvp[0].PIO_mode;
1570 } else {
1571 mode[0] = mode[1] =
1572 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1573 drvp[0].PIO_mode = mode[0];
1574 drvp[1].PIO_mode = mode[1];
1575 }
1576 ok: /* The modes are setup */
1577 for (drive = 0; drive < 2; drive++) {
1578 if (drvp[drive].drive_flags & DRIVE_DMA) {
1579 idetim |= piix_setup_idetim_timings(
1580 mode[drive], 1, chp->channel);
1581 goto end;
1582 }
1583 }
1584 /* If we are there, none of the drives are DMA */
1585 if (mode[0] >= 2)
1586 idetim |= piix_setup_idetim_timings(
1587 mode[0], 0, chp->channel);
1588 else
1589 idetim |= piix_setup_idetim_timings(
1590 mode[1], 0, chp->channel);
1591 end: /*
1592 * timing mode is now set up in the controller. Enable
1593 * it per-drive
1594 */
1595 for (drive = 0; drive < 2; drive++) {
1596 /* If no drive, skip */
1597 if ((drvp[drive].drive_flags & DRIVE) == 0)
1598 continue;
1599 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1600 if (drvp[drive].drive_flags & DRIVE_DMA)
1601 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1602 }
1603 if (idedma_ctl != 0) {
1604 /* Add software bits in status register */
1605 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1606 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1607 idedma_ctl);
1608 }
1609 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1610 pciide_print_modes(cp);
1611 }
1612
1613 void
1614 piix3_4_setup_channel(chp)
1615 struct channel_softc *chp;
1616 {
1617 struct ata_drive_datas *drvp;
1618 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1619 struct pciide_channel *cp = (struct pciide_channel*)chp;
1620 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1621 int drive;
1622 int channel = chp->channel;
1623
1624 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1625 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1626 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1627 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1628 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1629 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1630 PIIX_SIDETIM_RTC_MASK(channel));
1631
1632 idedma_ctl = 0;
1633 /* If channel disabled, no need to go further */
1634 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1635 return;
1636 /* set up new idetim: Enable IDE registers decode */
1637 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1638
1639 /* setup DMA if needed */
1640 pciide_channel_dma_setup(cp);
1641
1642 for (drive = 0; drive < 2; drive++) {
1643 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1644 PIIX_UDMATIM_SET(0x3, channel, drive));
1645 drvp = &chp->ch_drive[drive];
1646 /* If no drive, skip */
1647 if ((drvp->drive_flags & DRIVE) == 0)
1648 continue;
1649 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1650 (drvp->drive_flags & DRIVE_UDMA) == 0))
1651 goto pio;
1652
1653 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1654 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1655 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1656 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1657 ideconf |= PIIX_CONFIG_PINGPONG;
1658 }
1659 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1660 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1661 /* setup Ultra/100 */
1662 if (drvp->UDMA_mode > 2 &&
1663 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1664 drvp->UDMA_mode = 2;
1665 if (drvp->UDMA_mode > 4) {
1666 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1667 } else {
1668 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1669 if (drvp->UDMA_mode > 2) {
1670 ideconf |= PIIX_CONFIG_UDMA66(channel,
1671 drive);
1672 } else {
1673 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1674 drive);
1675 }
1676 }
1677 }
1678 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1679 /* setup Ultra/66 */
1680 if (drvp->UDMA_mode > 2 &&
1681 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1682 drvp->UDMA_mode = 2;
1683 if (drvp->UDMA_mode > 2)
1684 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1685 else
1686 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1687 }
1688 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1689 (drvp->drive_flags & DRIVE_UDMA)) {
1690 /* use Ultra/DMA */
1691 drvp->drive_flags &= ~DRIVE_DMA;
1692 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1693 udmareg |= PIIX_UDMATIM_SET(
1694 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1695 } else {
1696 /* use Multiword DMA */
1697 drvp->drive_flags &= ~DRIVE_UDMA;
1698 if (drive == 0) {
1699 idetim |= piix_setup_idetim_timings(
1700 drvp->DMA_mode, 1, channel);
1701 } else {
1702 sidetim |= piix_setup_sidetim_timings(
1703 drvp->DMA_mode, 1, channel);
1704 idetim =PIIX_IDETIM_SET(idetim,
1705 PIIX_IDETIM_SITRE, channel);
1706 }
1707 }
1708 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1709
1710 pio: /* use PIO mode */
1711 idetim |= piix_setup_idetim_drvs(drvp);
1712 if (drive == 0) {
1713 idetim |= piix_setup_idetim_timings(
1714 drvp->PIO_mode, 0, channel);
1715 } else {
1716 sidetim |= piix_setup_sidetim_timings(
1717 drvp->PIO_mode, 0, channel);
1718 idetim =PIIX_IDETIM_SET(idetim,
1719 PIIX_IDETIM_SITRE, channel);
1720 }
1721 }
1722 if (idedma_ctl != 0) {
1723 /* Add software bits in status register */
1724 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1725 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1726 idedma_ctl);
1727 }
1728 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1729 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1730 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1731 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1732 pciide_print_modes(cp);
1733 }
1734
1735
1736 /* setup ISP and RTC fields, based on mode */
1737 static u_int32_t
1738 piix_setup_idetim_timings(mode, dma, channel)
1739 u_int8_t mode;
1740 u_int8_t dma;
1741 u_int8_t channel;
1742 {
1743
1744 if (dma)
1745 return PIIX_IDETIM_SET(0,
1746 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1747 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1748 channel);
1749 else
1750 return PIIX_IDETIM_SET(0,
1751 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1752 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1753 channel);
1754 }
1755
1756 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1757 static u_int32_t
1758 piix_setup_idetim_drvs(drvp)
1759 struct ata_drive_datas *drvp;
1760 {
1761 u_int32_t ret = 0;
1762 struct channel_softc *chp = drvp->chnl_softc;
1763 u_int8_t channel = chp->channel;
1764 u_int8_t drive = drvp->drive;
1765
1766 /*
1767 * If drive is using UDMA, timings setups are independant
1768 * So just check DMA and PIO here.
1769 */
1770 if (drvp->drive_flags & DRIVE_DMA) {
1771 /* if mode = DMA mode 0, use compatible timings */
1772 if ((drvp->drive_flags & DRIVE_DMA) &&
1773 drvp->DMA_mode == 0) {
1774 drvp->PIO_mode = 0;
1775 return ret;
1776 }
1777 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1778 /*
1779 * PIO and DMA timings are the same, use fast timings for PIO
1780 * too, else use compat timings.
1781 */
1782 if ((piix_isp_pio[drvp->PIO_mode] !=
1783 piix_isp_dma[drvp->DMA_mode]) ||
1784 (piix_rtc_pio[drvp->PIO_mode] !=
1785 piix_rtc_dma[drvp->DMA_mode]))
1786 drvp->PIO_mode = 0;
1787 /* if PIO mode <= 2, use compat timings for PIO */
1788 if (drvp->PIO_mode <= 2) {
1789 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1790 channel);
1791 return ret;
1792 }
1793 }
1794
1795 /*
1796 * Now setup PIO modes. If mode < 2, use compat timings.
1797 * Else enable fast timings. Enable IORDY and prefetch/post
1798 * if PIO mode >= 3.
1799 */
1800
1801 if (drvp->PIO_mode < 2)
1802 return ret;
1803
1804 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1805 if (drvp->PIO_mode >= 3) {
1806 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1807 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1808 }
1809 return ret;
1810 }
1811
1812 /* setup values in SIDETIM registers, based on mode */
1813 static u_int32_t
1814 piix_setup_sidetim_timings(mode, dma, channel)
1815 u_int8_t mode;
1816 u_int8_t dma;
1817 u_int8_t channel;
1818 {
1819 if (dma)
1820 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1821 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1822 else
1823 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1824 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1825 }
1826
1827 void
1828 amd7x6_chip_map(sc, pa)
1829 struct pciide_softc *sc;
1830 struct pci_attach_args *pa;
1831 {
1832 struct pciide_channel *cp;
1833 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1834 int channel;
1835 pcireg_t chanenable;
1836 bus_size_t cmdsize, ctlsize;
1837
1838 if (pciide_chipen(sc, pa) == 0)
1839 return;
1840 printf("%s: bus-master DMA support present",
1841 sc->sc_wdcdev.sc_dev.dv_xname);
1842 pciide_mapreg_dma(sc, pa);
1843 printf("\n");
1844 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1845 WDC_CAPABILITY_MODE;
1846 if (sc->sc_dma_ok) {
1847 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1848 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1849 sc->sc_wdcdev.irqack = pciide_irqack;
1850 }
1851 sc->sc_wdcdev.PIO_cap = 4;
1852 sc->sc_wdcdev.DMA_cap = 2;
1853
1854 if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE)
1855 sc->sc_wdcdev.UDMA_cap = 5;
1856 else
1857 sc->sc_wdcdev.UDMA_cap = 4;
1858 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1859 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1860 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1861 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1862
1863 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1864 DEBUG_PROBE);
1865 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1866 cp = &sc->pciide_channels[channel];
1867 if (pciide_chansetup(sc, channel, interface) == 0)
1868 continue;
1869
1870 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1871 printf("%s: %s channel ignored (disabled)\n",
1872 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1873 continue;
1874 }
1875 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1876 pciide_pci_intr);
1877
1878 if (pciide_chan_candisable(cp))
1879 chanenable &= ~AMD7X6_CHAN_EN(channel);
1880 pciide_map_compat_intr(pa, cp, channel, interface);
1881 if (cp->hw_ok == 0)
1882 continue;
1883
1884 amd7x6_setup_channel(&cp->wdc_channel);
1885 }
1886 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1887 chanenable);
1888 return;
1889 }
1890
1891 void
1892 amd7x6_setup_channel(chp)
1893 struct channel_softc *chp;
1894 {
1895 u_int32_t udmatim_reg, datatim_reg;
1896 u_int8_t idedma_ctl;
1897 int mode, drive;
1898 struct ata_drive_datas *drvp;
1899 struct pciide_channel *cp = (struct pciide_channel*)chp;
1900 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1901 #ifndef PCIIDE_AMD756_ENABLEDMA
1902 int rev = PCI_REVISION(
1903 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1904 #endif
1905
1906 idedma_ctl = 0;
1907 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
1908 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
1909 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
1910 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
1911
1912 /* setup DMA if needed */
1913 pciide_channel_dma_setup(cp);
1914
1915 for (drive = 0; drive < 2; drive++) {
1916 drvp = &chp->ch_drive[drive];
1917 /* If no drive, skip */
1918 if ((drvp->drive_flags & DRIVE) == 0)
1919 continue;
1920 /* add timing values, setup DMA if needed */
1921 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1922 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1923 mode = drvp->PIO_mode;
1924 goto pio;
1925 }
1926 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1927 (drvp->drive_flags & DRIVE_UDMA)) {
1928 /* use Ultra/DMA */
1929 drvp->drive_flags &= ~DRIVE_DMA;
1930 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
1931 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
1932 AMD7X6_UDMA_TIME(chp->channel, drive,
1933 amd7x6_udma_tim[drvp->UDMA_mode]);
1934 /* can use PIO timings, MW DMA unused */
1935 mode = drvp->PIO_mode;
1936 } else {
1937 /* use Multiword DMA, but only if revision is OK */
1938 drvp->drive_flags &= ~DRIVE_UDMA;
1939 #ifndef PCIIDE_AMD756_ENABLEDMA
1940 /*
1941 * The workaround doesn't seem to be necessary
1942 * with all drives, so it can be disabled by
1943 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1944 * triggered.
1945 */
1946 if (sc->sc_pp->ide_product ==
1947 PCI_PRODUCT_AMD_PBC756_IDE &&
1948 AMD756_CHIPREV_DISABLEDMA(rev)) {
1949 printf("%s:%d:%d: multi-word DMA disabled due "
1950 "to chip revision\n",
1951 sc->sc_wdcdev.sc_dev.dv_xname,
1952 chp->channel, drive);
1953 mode = drvp->PIO_mode;
1954 drvp->drive_flags &= ~DRIVE_DMA;
1955 goto pio;
1956 }
1957 #endif
1958 /* mode = min(pio, dma+2) */
1959 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1960 mode = drvp->PIO_mode;
1961 else
1962 mode = drvp->DMA_mode + 2;
1963 }
1964 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1965
1966 pio: /* setup PIO mode */
1967 if (mode <= 2) {
1968 drvp->DMA_mode = 0;
1969 drvp->PIO_mode = 0;
1970 mode = 0;
1971 } else {
1972 drvp->PIO_mode = mode;
1973 drvp->DMA_mode = mode - 2;
1974 }
1975 datatim_reg |=
1976 AMD7X6_DATATIM_PULSE(chp->channel, drive,
1977 amd7x6_pio_set[mode]) |
1978 AMD7X6_DATATIM_RECOV(chp->channel, drive,
1979 amd7x6_pio_rec[mode]);
1980 }
1981 if (idedma_ctl != 0) {
1982 /* Add software bits in status register */
1983 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1984 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1985 idedma_ctl);
1986 }
1987 pciide_print_modes(cp);
1988 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
1989 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
1990 }
1991
1992 void
1993 apollo_chip_map(sc, pa)
1994 struct pciide_softc *sc;
1995 struct pci_attach_args *pa;
1996 {
1997 struct pciide_channel *cp;
1998 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1999 int channel;
2000 u_int32_t ideconf;
2001 bus_size_t cmdsize, ctlsize;
2002 pcitag_t pcib_tag;
2003 pcireg_t pcib_id, pcib_class;
2004
2005 if (pciide_chipen(sc, pa) == 0)
2006 return;
2007 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2008 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2009 /* and read ID and rev of the ISA bridge */
2010 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2011 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2012 printf(": VIA Technologies ");
2013 switch (PCI_PRODUCT(pcib_id)) {
2014 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2015 printf("VT82C586 (Apollo VP) ");
2016 if(PCI_REVISION(pcib_class) >= 0x02) {
2017 printf("ATA33 controller\n");
2018 sc->sc_wdcdev.UDMA_cap = 2;
2019 } else {
2020 printf("controller\n");
2021 sc->sc_wdcdev.UDMA_cap = 0;
2022 }
2023 break;
2024 case PCI_PRODUCT_VIATECH_VT82C596A:
2025 printf("VT82C596A (Apollo Pro) ");
2026 if (PCI_REVISION(pcib_class) >= 0x12) {
2027 printf("ATA66 controller\n");
2028 sc->sc_wdcdev.UDMA_cap = 4;
2029 } else {
2030 printf("ATA33 controller\n");
2031 sc->sc_wdcdev.UDMA_cap = 2;
2032 }
2033 break;
2034 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2035 printf("VT82C686A (Apollo KX133) ");
2036 if (PCI_REVISION(pcib_class) >= 0x40) {
2037 printf("ATA100 controller\n");
2038 sc->sc_wdcdev.UDMA_cap = 5;
2039 } else {
2040 printf("ATA66 controller\n");
2041 sc->sc_wdcdev.UDMA_cap = 4;
2042 }
2043 break;
2044 default:
2045 printf("unknown ATA controller\n");
2046 sc->sc_wdcdev.UDMA_cap = 0;
2047 }
2048
2049 printf("%s: bus-master DMA support present",
2050 sc->sc_wdcdev.sc_dev.dv_xname);
2051 pciide_mapreg_dma(sc, pa);
2052 printf("\n");
2053 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2054 WDC_CAPABILITY_MODE;
2055 if (sc->sc_dma_ok) {
2056 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2057 sc->sc_wdcdev.irqack = pciide_irqack;
2058 if (sc->sc_wdcdev.UDMA_cap > 0)
2059 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2060 }
2061 sc->sc_wdcdev.PIO_cap = 4;
2062 sc->sc_wdcdev.DMA_cap = 2;
2063 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2064 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2065 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2066
2067 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2068 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2069 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2070 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2071 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2072 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2073 DEBUG_PROBE);
2074
2075 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2076 cp = &sc->pciide_channels[channel];
2077 if (pciide_chansetup(sc, channel, interface) == 0)
2078 continue;
2079
2080 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2081 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2082 printf("%s: %s channel ignored (disabled)\n",
2083 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2084 continue;
2085 }
2086 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2087 pciide_pci_intr);
2088 if (cp->hw_ok == 0)
2089 continue;
2090 if (pciide_chan_candisable(cp)) {
2091 ideconf &= ~APO_IDECONF_EN(channel);
2092 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2093 ideconf);
2094 }
2095 pciide_map_compat_intr(pa, cp, channel, interface);
2096
2097 if (cp->hw_ok == 0)
2098 continue;
2099 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2100 }
2101 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2102 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2103 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2104 }
2105
2106 void
2107 apollo_setup_channel(chp)
2108 struct channel_softc *chp;
2109 {
2110 u_int32_t udmatim_reg, datatim_reg;
2111 u_int8_t idedma_ctl;
2112 int mode, drive;
2113 struct ata_drive_datas *drvp;
2114 struct pciide_channel *cp = (struct pciide_channel*)chp;
2115 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2116
2117 idedma_ctl = 0;
2118 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2119 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2120 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2121 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2122
2123 /* setup DMA if needed */
2124 pciide_channel_dma_setup(cp);
2125
2126 for (drive = 0; drive < 2; drive++) {
2127 drvp = &chp->ch_drive[drive];
2128 /* If no drive, skip */
2129 if ((drvp->drive_flags & DRIVE) == 0)
2130 continue;
2131 /* add timing values, setup DMA if needed */
2132 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2133 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2134 mode = drvp->PIO_mode;
2135 goto pio;
2136 }
2137 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2138 (drvp->drive_flags & DRIVE_UDMA)) {
2139 /* use Ultra/DMA */
2140 drvp->drive_flags &= ~DRIVE_DMA;
2141 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2142 APO_UDMA_EN_MTH(chp->channel, drive);
2143 if (sc->sc_wdcdev.UDMA_cap == 5) {
2144 /* 686b */
2145 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2146 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2147 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2148 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2149 /* 596b or 686a */
2150 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2151 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2152 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2153 } else {
2154 /* 596a or 586b */
2155 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2156 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2157 }
2158 /* can use PIO timings, MW DMA unused */
2159 mode = drvp->PIO_mode;
2160 } else {
2161 /* use Multiword DMA */
2162 drvp->drive_flags &= ~DRIVE_UDMA;
2163 /* mode = min(pio, dma+2) */
2164 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2165 mode = drvp->PIO_mode;
2166 else
2167 mode = drvp->DMA_mode + 2;
2168 }
2169 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2170
2171 pio: /* setup PIO mode */
2172 if (mode <= 2) {
2173 drvp->DMA_mode = 0;
2174 drvp->PIO_mode = 0;
2175 mode = 0;
2176 } else {
2177 drvp->PIO_mode = mode;
2178 drvp->DMA_mode = mode - 2;
2179 }
2180 datatim_reg |=
2181 APO_DATATIM_PULSE(chp->channel, drive,
2182 apollo_pio_set[mode]) |
2183 APO_DATATIM_RECOV(chp->channel, drive,
2184 apollo_pio_rec[mode]);
2185 }
2186 if (idedma_ctl != 0) {
2187 /* Add software bits in status register */
2188 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2189 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2190 idedma_ctl);
2191 }
2192 pciide_print_modes(cp);
2193 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2194 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2195 }
2196
2197 void
2198 cmd_channel_map(pa, sc, channel)
2199 struct pci_attach_args *pa;
2200 struct pciide_softc *sc;
2201 int channel;
2202 {
2203 struct pciide_channel *cp = &sc->pciide_channels[channel];
2204 bus_size_t cmdsize, ctlsize;
2205 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2206 int interface;
2207
2208 /*
2209 * The 0648/0649 can be told to identify as a RAID controller.
2210 * In this case, we have to fake interface
2211 */
2212 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2213 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2214 PCIIDE_INTERFACE_SETTABLE(1);
2215 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2216 CMD_CONF_DSA1)
2217 interface |= PCIIDE_INTERFACE_PCI(0) |
2218 PCIIDE_INTERFACE_PCI(1);
2219 } else {
2220 interface = PCI_INTERFACE(pa->pa_class);
2221 }
2222
2223 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2224 cp->name = PCIIDE_CHANNEL_NAME(channel);
2225 cp->wdc_channel.channel = channel;
2226 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2227
2228 if (channel > 0) {
2229 cp->wdc_channel.ch_queue =
2230 sc->pciide_channels[0].wdc_channel.ch_queue;
2231 } else {
2232 cp->wdc_channel.ch_queue =
2233 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2234 }
2235 if (cp->wdc_channel.ch_queue == NULL) {
2236 printf("%s %s channel: "
2237 "can't allocate memory for command queue",
2238 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2239 return;
2240 }
2241
2242 printf("%s: %s channel %s to %s mode\n",
2243 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2244 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2245 "configured" : "wired",
2246 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2247 "native-PCI" : "compatibility");
2248
2249 /*
2250 * with a CMD PCI64x, if we get here, the first channel is enabled:
2251 * there's no way to disable the first channel without disabling
2252 * the whole device
2253 */
2254 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2255 printf("%s: %s channel ignored (disabled)\n",
2256 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2257 return;
2258 }
2259
2260 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2261 if (cp->hw_ok == 0)
2262 return;
2263 if (channel == 1) {
2264 if (pciide_chan_candisable(cp)) {
2265 ctrl &= ~CMD_CTRL_2PORT;
2266 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2267 CMD_CTRL, ctrl);
2268 }
2269 }
2270 pciide_map_compat_intr(pa, cp, channel, interface);
2271 }
2272
2273 int
2274 cmd_pci_intr(arg)
2275 void *arg;
2276 {
2277 struct pciide_softc *sc = arg;
2278 struct pciide_channel *cp;
2279 struct channel_softc *wdc_cp;
2280 int i, rv, crv;
2281 u_int32_t priirq, secirq;
2282
2283 rv = 0;
2284 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2285 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2286 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2287 cp = &sc->pciide_channels[i];
2288 wdc_cp = &cp->wdc_channel;
2289 /* If a compat channel skip. */
2290 if (cp->compat)
2291 continue;
2292 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2293 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2294 crv = wdcintr(wdc_cp);
2295 if (crv == 0)
2296 printf("%s:%d: bogus intr\n",
2297 sc->sc_wdcdev.sc_dev.dv_xname, i);
2298 else
2299 rv = 1;
2300 }
2301 }
2302 return rv;
2303 }
2304
2305 void
2306 cmd_chip_map(sc, pa)
2307 struct pciide_softc *sc;
2308 struct pci_attach_args *pa;
2309 {
2310 int channel;
2311
2312 /*
2313 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2314 * and base adresses registers can be disabled at
2315 * hardware level. In this case, the device is wired
2316 * in compat mode and its first channel is always enabled,
2317 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2318 * In fact, it seems that the first channel of the CMD PCI0640
2319 * can't be disabled.
2320 */
2321
2322 #ifdef PCIIDE_CMD064x_DISABLE
2323 if (pciide_chipen(sc, pa) == 0)
2324 return;
2325 #endif
2326
2327 printf("%s: hardware does not support DMA\n",
2328 sc->sc_wdcdev.sc_dev.dv_xname);
2329 sc->sc_dma_ok = 0;
2330
2331 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2332 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2333 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2334
2335 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2336 cmd_channel_map(pa, sc, channel);
2337 }
2338 }
2339
2340 void
2341 cmd0643_9_chip_map(sc, pa)
2342 struct pciide_softc *sc;
2343 struct pci_attach_args *pa;
2344 {
2345 struct pciide_channel *cp;
2346 int channel;
2347 int rev = PCI_REVISION(
2348 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2349
2350 /*
2351 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2352 * and base adresses registers can be disabled at
2353 * hardware level. In this case, the device is wired
2354 * in compat mode and its first channel is always enabled,
2355 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2356 * In fact, it seems that the first channel of the CMD PCI0640
2357 * can't be disabled.
2358 */
2359
2360 #ifdef PCIIDE_CMD064x_DISABLE
2361 if (pciide_chipen(sc, pa) == 0)
2362 return;
2363 #endif
2364 printf("%s: bus-master DMA support present",
2365 sc->sc_wdcdev.sc_dev.dv_xname);
2366 pciide_mapreg_dma(sc, pa);
2367 printf("\n");
2368 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2369 WDC_CAPABILITY_MODE;
2370 if (sc->sc_dma_ok) {
2371 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2372 switch (sc->sc_pp->ide_product) {
2373 case PCI_PRODUCT_CMDTECH_649:
2374 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2375 sc->sc_wdcdev.UDMA_cap = 5;
2376 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2377 break;
2378 case PCI_PRODUCT_CMDTECH_648:
2379 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2380 sc->sc_wdcdev.UDMA_cap = 4;
2381 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2382 break;
2383 case PCI_PRODUCT_CMDTECH_646:
2384 if (rev >= CMD0646U2_REV) {
2385 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2386 sc->sc_wdcdev.UDMA_cap = 2;
2387 } else if (rev >= CMD0646U_REV) {
2388 /*
2389 * Linux's driver claims that the 646U is broken
2390 * with UDMA. Only enable it if we know what we're
2391 * doing
2392 */
2393 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2394 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2395 sc->sc_wdcdev.UDMA_cap = 2;
2396 #endif
2397 /* explicitely disable UDMA */
2398 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2399 CMD_UDMATIM(0), 0);
2400 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2401 CMD_UDMATIM(1), 0);
2402 }
2403 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2404 break;
2405 default:
2406 sc->sc_wdcdev.irqack = pciide_irqack;
2407 }
2408 }
2409
2410 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2411 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2412 sc->sc_wdcdev.PIO_cap = 4;
2413 sc->sc_wdcdev.DMA_cap = 2;
2414 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2415
2416 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2417 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2418 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2419 DEBUG_PROBE);
2420
2421 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2422 cp = &sc->pciide_channels[channel];
2423 cmd_channel_map(pa, sc, channel);
2424 if (cp->hw_ok == 0)
2425 continue;
2426 cmd0643_9_setup_channel(&cp->wdc_channel);
2427 }
2428 /*
2429 * note - this also makes sure we clear the irq disable and reset
2430 * bits
2431 */
2432 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2433 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2434 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2435 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2436 DEBUG_PROBE);
2437 }
2438
2439 void
2440 cmd0643_9_setup_channel(chp)
2441 struct channel_softc *chp;
2442 {
2443 struct ata_drive_datas *drvp;
2444 u_int8_t tim;
2445 u_int32_t idedma_ctl, udma_reg;
2446 int drive;
2447 struct pciide_channel *cp = (struct pciide_channel*)chp;
2448 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2449
2450 idedma_ctl = 0;
2451 /* setup DMA if needed */
2452 pciide_channel_dma_setup(cp);
2453
2454 for (drive = 0; drive < 2; drive++) {
2455 drvp = &chp->ch_drive[drive];
2456 /* If no drive, skip */
2457 if ((drvp->drive_flags & DRIVE) == 0)
2458 continue;
2459 /* add timing values, setup DMA if needed */
2460 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2461 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2462 if (drvp->drive_flags & DRIVE_UDMA) {
2463 /* UltraDMA on a 646U2, 0648 or 0649 */
2464 drvp->drive_flags &= ~DRIVE_DMA;
2465 udma_reg = pciide_pci_read(sc->sc_pc,
2466 sc->sc_tag, CMD_UDMATIM(chp->channel));
2467 if (drvp->UDMA_mode > 2 &&
2468 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2469 CMD_BICSR) &
2470 CMD_BICSR_80(chp->channel)) == 0)
2471 drvp->UDMA_mode = 2;
2472 if (drvp->UDMA_mode > 2)
2473 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2474 else if (sc->sc_wdcdev.UDMA_cap > 2)
2475 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2476 udma_reg |= CMD_UDMATIM_UDMA(drive);
2477 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2478 CMD_UDMATIM_TIM_OFF(drive));
2479 udma_reg |=
2480 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2481 CMD_UDMATIM_TIM_OFF(drive));
2482 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2483 CMD_UDMATIM(chp->channel), udma_reg);
2484 } else {
2485 /*
2486 * use Multiword DMA.
2487 * Timings will be used for both PIO and DMA,
2488 * so adjust DMA mode if needed
2489 * if we have a 0646U2/8/9, turn off UDMA
2490 */
2491 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2492 udma_reg = pciide_pci_read(sc->sc_pc,
2493 sc->sc_tag,
2494 CMD_UDMATIM(chp->channel));
2495 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2496 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2497 CMD_UDMATIM(chp->channel),
2498 udma_reg);
2499 }
2500 if (drvp->PIO_mode >= 3 &&
2501 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2502 drvp->DMA_mode = drvp->PIO_mode - 2;
2503 }
2504 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2505 }
2506 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2507 }
2508 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2509 CMD_DATA_TIM(chp->channel, drive), tim);
2510 }
2511 if (idedma_ctl != 0) {
2512 /* Add software bits in status register */
2513 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2514 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2515 idedma_ctl);
2516 }
2517 pciide_print_modes(cp);
2518 }
2519
2520 void
2521 cmd646_9_irqack(chp)
2522 struct channel_softc *chp;
2523 {
2524 u_int32_t priirq, secirq;
2525 struct pciide_channel *cp = (struct pciide_channel*)chp;
2526 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2527
2528 if (chp->channel == 0) {
2529 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2530 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2531 } else {
2532 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2533 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2534 }
2535 pciide_irqack(chp);
2536 }
2537
2538 void
2539 cy693_chip_map(sc, pa)
2540 struct pciide_softc *sc;
2541 struct pci_attach_args *pa;
2542 {
2543 struct pciide_channel *cp;
2544 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2545 bus_size_t cmdsize, ctlsize;
2546
2547 if (pciide_chipen(sc, pa) == 0)
2548 return;
2549 /*
2550 * this chip has 2 PCI IDE functions, one for primary and one for
2551 * secondary. So we need to call pciide_mapregs_compat() with
2552 * the real channel
2553 */
2554 if (pa->pa_function == 1) {
2555 sc->sc_cy_compatchan = 0;
2556 } else if (pa->pa_function == 2) {
2557 sc->sc_cy_compatchan = 1;
2558 } else {
2559 printf("%s: unexpected PCI function %d\n",
2560 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2561 return;
2562 }
2563 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2564 printf("%s: bus-master DMA support present",
2565 sc->sc_wdcdev.sc_dev.dv_xname);
2566 pciide_mapreg_dma(sc, pa);
2567 } else {
2568 printf("%s: hardware does not support DMA",
2569 sc->sc_wdcdev.sc_dev.dv_xname);
2570 sc->sc_dma_ok = 0;
2571 }
2572 printf("\n");
2573
2574 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2575 if (sc->sc_cy_handle == NULL) {
2576 printf("%s: unable to map hyperCache control registers\n",
2577 sc->sc_wdcdev.sc_dev.dv_xname);
2578 sc->sc_dma_ok = 0;
2579 }
2580
2581 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2582 WDC_CAPABILITY_MODE;
2583 if (sc->sc_dma_ok) {
2584 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2585 sc->sc_wdcdev.irqack = pciide_irqack;
2586 }
2587 sc->sc_wdcdev.PIO_cap = 4;
2588 sc->sc_wdcdev.DMA_cap = 2;
2589 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2590
2591 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2592 sc->sc_wdcdev.nchannels = 1;
2593
2594 /* Only one channel for this chip; if we are here it's enabled */
2595 cp = &sc->pciide_channels[0];
2596 sc->wdc_chanarray[0] = &cp->wdc_channel;
2597 cp->name = PCIIDE_CHANNEL_NAME(0);
2598 cp->wdc_channel.channel = 0;
2599 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2600 cp->wdc_channel.ch_queue =
2601 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2602 if (cp->wdc_channel.ch_queue == NULL) {
2603 printf("%s primary channel: "
2604 "can't allocate memory for command queue",
2605 sc->sc_wdcdev.sc_dev.dv_xname);
2606 return;
2607 }
2608 printf("%s: primary channel %s to ",
2609 sc->sc_wdcdev.sc_dev.dv_xname,
2610 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2611 "configured" : "wired");
2612 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2613 printf("native-PCI");
2614 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2615 pciide_pci_intr);
2616 } else {
2617 printf("compatibility");
2618 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2619 &cmdsize, &ctlsize);
2620 }
2621 printf(" mode\n");
2622 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2623 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2624 wdcattach(&cp->wdc_channel);
2625 if (pciide_chan_candisable(cp)) {
2626 pci_conf_write(sc->sc_pc, sc->sc_tag,
2627 PCI_COMMAND_STATUS_REG, 0);
2628 }
2629 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2630 if (cp->hw_ok == 0)
2631 return;
2632 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2633 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2634 cy693_setup_channel(&cp->wdc_channel);
2635 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2636 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2637 }
2638
2639 void
2640 cy693_setup_channel(chp)
2641 struct channel_softc *chp;
2642 {
2643 struct ata_drive_datas *drvp;
2644 int drive;
2645 u_int32_t cy_cmd_ctrl;
2646 u_int32_t idedma_ctl;
2647 struct pciide_channel *cp = (struct pciide_channel*)chp;
2648 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2649 int dma_mode = -1;
2650
2651 cy_cmd_ctrl = idedma_ctl = 0;
2652
2653 /* setup DMA if needed */
2654 pciide_channel_dma_setup(cp);
2655
2656 for (drive = 0; drive < 2; drive++) {
2657 drvp = &chp->ch_drive[drive];
2658 /* If no drive, skip */
2659 if ((drvp->drive_flags & DRIVE) == 0)
2660 continue;
2661 /* add timing values, setup DMA if needed */
2662 if (drvp->drive_flags & DRIVE_DMA) {
2663 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2664 /* use Multiword DMA */
2665 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2666 dma_mode = drvp->DMA_mode;
2667 }
2668 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2669 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2670 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2671 CY_CMD_CTRL_IOW_REC_OFF(drive));
2672 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2673 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2674 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2675 CY_CMD_CTRL_IOR_REC_OFF(drive));
2676 }
2677 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2678 chp->ch_drive[0].DMA_mode = dma_mode;
2679 chp->ch_drive[1].DMA_mode = dma_mode;
2680
2681 if (dma_mode == -1)
2682 dma_mode = 0;
2683
2684 if (sc->sc_cy_handle != NULL) {
2685 /* Note: `multiple' is implied. */
2686 cy82c693_write(sc->sc_cy_handle,
2687 (sc->sc_cy_compatchan == 0) ?
2688 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2689 }
2690
2691 pciide_print_modes(cp);
2692
2693 if (idedma_ctl != 0) {
2694 /* Add software bits in status register */
2695 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2696 IDEDMA_CTL, idedma_ctl);
2697 }
2698 }
2699
2700 void
2701 sis_chip_map(sc, pa)
2702 struct pciide_softc *sc;
2703 struct pci_attach_args *pa;
2704 {
2705 struct pciide_channel *cp;
2706 int channel;
2707 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2708 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2709 pcireg_t rev = PCI_REVISION(pa->pa_class);
2710 bus_size_t cmdsize, ctlsize;
2711 pcitag_t pchb_tag;
2712 pcireg_t pchb_id, pchb_class;
2713
2714 if (pciide_chipen(sc, pa) == 0)
2715 return;
2716 printf("%s: bus-master DMA support present",
2717 sc->sc_wdcdev.sc_dev.dv_xname);
2718 pciide_mapreg_dma(sc, pa);
2719 printf("\n");
2720
2721 /* get a PCI tag for the host bridge (function 0 of the same device) */
2722 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2723 /* and read ID and rev of the ISA bridge */
2724 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2725 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2726
2727 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2728 WDC_CAPABILITY_MODE;
2729 if (sc->sc_dma_ok) {
2730 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2731 sc->sc_wdcdev.irqack = pciide_irqack;
2732 /*
2733 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2734 * have problems with UDMA (info provided by Christos)
2735 */
2736 if (rev >= 0xd0 &&
2737 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2738 PCI_REVISION(pchb_class) >= 0x03))
2739 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2740 }
2741
2742 sc->sc_wdcdev.PIO_cap = 4;
2743 sc->sc_wdcdev.DMA_cap = 2;
2744 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2745 sc->sc_wdcdev.UDMA_cap = 2;
2746 sc->sc_wdcdev.set_modes = sis_setup_channel;
2747
2748 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2749 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2750
2751 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2752 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2753 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2754
2755 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2756 cp = &sc->pciide_channels[channel];
2757 if (pciide_chansetup(sc, channel, interface) == 0)
2758 continue;
2759 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2760 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2761 printf("%s: %s channel ignored (disabled)\n",
2762 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2763 continue;
2764 }
2765 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2766 pciide_pci_intr);
2767 if (cp->hw_ok == 0)
2768 continue;
2769 if (pciide_chan_candisable(cp)) {
2770 if (channel == 0)
2771 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2772 else
2773 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2774 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2775 sis_ctr0);
2776 }
2777 pciide_map_compat_intr(pa, cp, channel, interface);
2778 if (cp->hw_ok == 0)
2779 continue;
2780 sis_setup_channel(&cp->wdc_channel);
2781 }
2782 }
2783
2784 void
2785 sis_setup_channel(chp)
2786 struct channel_softc *chp;
2787 {
2788 struct ata_drive_datas *drvp;
2789 int drive;
2790 u_int32_t sis_tim;
2791 u_int32_t idedma_ctl;
2792 struct pciide_channel *cp = (struct pciide_channel*)chp;
2793 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2794
2795 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2796 "channel %d 0x%x\n", chp->channel,
2797 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2798 DEBUG_PROBE);
2799 sis_tim = 0;
2800 idedma_ctl = 0;
2801 /* setup DMA if needed */
2802 pciide_channel_dma_setup(cp);
2803
2804 for (drive = 0; drive < 2; drive++) {
2805 drvp = &chp->ch_drive[drive];
2806 /* If no drive, skip */
2807 if ((drvp->drive_flags & DRIVE) == 0)
2808 continue;
2809 /* add timing values, setup DMA if needed */
2810 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2811 (drvp->drive_flags & DRIVE_UDMA) == 0)
2812 goto pio;
2813
2814 if (drvp->drive_flags & DRIVE_UDMA) {
2815 /* use Ultra/DMA */
2816 drvp->drive_flags &= ~DRIVE_DMA;
2817 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2818 SIS_TIM_UDMA_TIME_OFF(drive);
2819 sis_tim |= SIS_TIM_UDMA_EN(drive);
2820 } else {
2821 /*
2822 * use Multiword DMA
2823 * Timings will be used for both PIO and DMA,
2824 * so adjust DMA mode if needed
2825 */
2826 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2827 drvp->PIO_mode = drvp->DMA_mode + 2;
2828 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2829 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2830 drvp->PIO_mode - 2 : 0;
2831 if (drvp->DMA_mode == 0)
2832 drvp->PIO_mode = 0;
2833 }
2834 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2835 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2836 SIS_TIM_ACT_OFF(drive);
2837 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2838 SIS_TIM_REC_OFF(drive);
2839 }
2840 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2841 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2842 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2843 if (idedma_ctl != 0) {
2844 /* Add software bits in status register */
2845 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2846 IDEDMA_CTL, idedma_ctl);
2847 }
2848 pciide_print_modes(cp);
2849 }
2850
2851 void
2852 acer_chip_map(sc, pa)
2853 struct pciide_softc *sc;
2854 struct pci_attach_args *pa;
2855 {
2856 struct pciide_channel *cp;
2857 int channel;
2858 pcireg_t cr, interface;
2859 bus_size_t cmdsize, ctlsize;
2860 pcireg_t rev = PCI_REVISION(pa->pa_class);
2861
2862 if (pciide_chipen(sc, pa) == 0)
2863 return;
2864 printf("%s: bus-master DMA support present",
2865 sc->sc_wdcdev.sc_dev.dv_xname);
2866 pciide_mapreg_dma(sc, pa);
2867 printf("\n");
2868 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2869 WDC_CAPABILITY_MODE;
2870 if (sc->sc_dma_ok) {
2871 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2872 if (rev >= 0x20)
2873 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2874 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2875 sc->sc_wdcdev.irqack = pciide_irqack;
2876 }
2877
2878 sc->sc_wdcdev.PIO_cap = 4;
2879 sc->sc_wdcdev.DMA_cap = 2;
2880 sc->sc_wdcdev.UDMA_cap = 2;
2881 sc->sc_wdcdev.set_modes = acer_setup_channel;
2882 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2883 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2884
2885 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2886 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2887 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2888
2889 /* Enable "microsoft register bits" R/W. */
2890 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2891 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2892 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2893 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2894 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2895 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2896 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2897 ~ACER_CHANSTATUSREGS_RO);
2898 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2899 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2900 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2901 /* Don't use cr, re-read the real register content instead */
2902 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2903 PCI_CLASS_REG));
2904
2905 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2906 cp = &sc->pciide_channels[channel];
2907 if (pciide_chansetup(sc, channel, interface) == 0)
2908 continue;
2909 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2910 printf("%s: %s channel ignored (disabled)\n",
2911 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2912 continue;
2913 }
2914 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2915 acer_pci_intr);
2916 if (cp->hw_ok == 0)
2917 continue;
2918 if (pciide_chan_candisable(cp)) {
2919 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2920 pci_conf_write(sc->sc_pc, sc->sc_tag,
2921 PCI_CLASS_REG, cr);
2922 }
2923 pciide_map_compat_intr(pa, cp, channel, interface);
2924 acer_setup_channel(&cp->wdc_channel);
2925 }
2926 }
2927
2928 void
2929 acer_setup_channel(chp)
2930 struct channel_softc *chp;
2931 {
2932 struct ata_drive_datas *drvp;
2933 int drive;
2934 u_int32_t acer_fifo_udma;
2935 u_int32_t idedma_ctl;
2936 struct pciide_channel *cp = (struct pciide_channel*)chp;
2937 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2938
2939 idedma_ctl = 0;
2940 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2941 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2942 acer_fifo_udma), DEBUG_PROBE);
2943 /* setup DMA if needed */
2944 pciide_channel_dma_setup(cp);
2945
2946 for (drive = 0; drive < 2; drive++) {
2947 drvp = &chp->ch_drive[drive];
2948 /* If no drive, skip */
2949 if ((drvp->drive_flags & DRIVE) == 0)
2950 continue;
2951 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2952 "channel %d drive %d 0x%x\n", chp->channel, drive,
2953 pciide_pci_read(sc->sc_pc, sc->sc_tag,
2954 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2955 /* clear FIFO/DMA mode */
2956 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2957 ACER_UDMA_EN(chp->channel, drive) |
2958 ACER_UDMA_TIM(chp->channel, drive, 0x7));
2959
2960 /* add timing values, setup DMA if needed */
2961 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2962 (drvp->drive_flags & DRIVE_UDMA) == 0) {
2963 acer_fifo_udma |=
2964 ACER_FTH_OPL(chp->channel, drive, 0x1);
2965 goto pio;
2966 }
2967
2968 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2969 if (drvp->drive_flags & DRIVE_UDMA) {
2970 /* use Ultra/DMA */
2971 drvp->drive_flags &= ~DRIVE_DMA;
2972 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2973 acer_fifo_udma |=
2974 ACER_UDMA_TIM(chp->channel, drive,
2975 acer_udma[drvp->UDMA_mode]);
2976 } else {
2977 /*
2978 * use Multiword DMA
2979 * Timings will be used for both PIO and DMA,
2980 * so adjust DMA mode if needed
2981 */
2982 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2983 drvp->PIO_mode = drvp->DMA_mode + 2;
2984 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2985 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2986 drvp->PIO_mode - 2 : 0;
2987 if (drvp->DMA_mode == 0)
2988 drvp->PIO_mode = 0;
2989 }
2990 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2991 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
2992 ACER_IDETIM(chp->channel, drive),
2993 acer_pio[drvp->PIO_mode]);
2994 }
2995 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2996 acer_fifo_udma), DEBUG_PROBE);
2997 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2998 if (idedma_ctl != 0) {
2999 /* Add software bits in status register */
3000 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3001 IDEDMA_CTL, idedma_ctl);
3002 }
3003 pciide_print_modes(cp);
3004 }
3005
3006 int
3007 acer_pci_intr(arg)
3008 void *arg;
3009 {
3010 struct pciide_softc *sc = arg;
3011 struct pciide_channel *cp;
3012 struct channel_softc *wdc_cp;
3013 int i, rv, crv;
3014 u_int32_t chids;
3015
3016 rv = 0;
3017 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3018 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3019 cp = &sc->pciide_channels[i];
3020 wdc_cp = &cp->wdc_channel;
3021 /* If a compat channel skip. */
3022 if (cp->compat)
3023 continue;
3024 if (chids & ACER_CHIDS_INT(i)) {
3025 crv = wdcintr(wdc_cp);
3026 if (crv == 0)
3027 printf("%s:%d: bogus intr\n",
3028 sc->sc_wdcdev.sc_dev.dv_xname, i);
3029 else
3030 rv = 1;
3031 }
3032 }
3033 return rv;
3034 }
3035
3036 void
3037 hpt_chip_map(sc, pa)
3038 struct pciide_softc *sc;
3039 struct pci_attach_args *pa;
3040 {
3041 struct pciide_channel *cp;
3042 int i, compatchan, revision;
3043 pcireg_t interface;
3044 bus_size_t cmdsize, ctlsize;
3045
3046 if (pciide_chipen(sc, pa) == 0)
3047 return;
3048 revision = PCI_REVISION(pa->pa_class);
3049 printf(": Triones/Highpoint ");
3050 if (revision == HPT370_REV)
3051 printf("HPT370 IDE Controller\n");
3052 else if (revision == HPT370A_REV)
3053 printf("HPT370A IDE Controller\n");
3054 else if (revision == HPT366_REV)
3055 printf("HPT366 IDE Controller\n");
3056 else
3057 printf("unknown HPT IDE controller rev %d\n", revision);
3058
3059 /*
3060 * when the chip is in native mode it identifies itself as a
3061 * 'misc mass storage'. Fake interface in this case.
3062 */
3063 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3064 interface = PCI_INTERFACE(pa->pa_class);
3065 } else {
3066 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3067 PCIIDE_INTERFACE_PCI(0);
3068 if (revision == HPT370_REV || revision == HPT370A_REV)
3069 interface |= PCIIDE_INTERFACE_PCI(1);
3070 }
3071
3072 printf("%s: bus-master DMA support present",
3073 sc->sc_wdcdev.sc_dev.dv_xname);
3074 pciide_mapreg_dma(sc, pa);
3075 printf("\n");
3076 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3077 WDC_CAPABILITY_MODE;
3078 if (sc->sc_dma_ok) {
3079 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3080 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3081 sc->sc_wdcdev.irqack = pciide_irqack;
3082 }
3083 sc->sc_wdcdev.PIO_cap = 4;
3084 sc->sc_wdcdev.DMA_cap = 2;
3085
3086 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3087 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3088 if (revision == HPT366_REV) {
3089 sc->sc_wdcdev.UDMA_cap = 4;
3090 /*
3091 * The 366 has 2 PCI IDE functions, one for primary and one
3092 * for secondary. So we need to call pciide_mapregs_compat()
3093 * with the real channel
3094 */
3095 if (pa->pa_function == 0) {
3096 compatchan = 0;
3097 } else if (pa->pa_function == 1) {
3098 compatchan = 1;
3099 } else {
3100 printf("%s: unexpected PCI function %d\n",
3101 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3102 return;
3103 }
3104 sc->sc_wdcdev.nchannels = 1;
3105 } else {
3106 sc->sc_wdcdev.nchannels = 2;
3107 sc->sc_wdcdev.UDMA_cap = 5;
3108 }
3109 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3110 cp = &sc->pciide_channels[i];
3111 if (sc->sc_wdcdev.nchannels > 1) {
3112 compatchan = i;
3113 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3114 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3115 printf("%s: %s channel ignored (disabled)\n",
3116 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3117 continue;
3118 }
3119 }
3120 if (pciide_chansetup(sc, i, interface) == 0)
3121 continue;
3122 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3123 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3124 &ctlsize, hpt_pci_intr);
3125 } else {
3126 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3127 &cmdsize, &ctlsize);
3128 }
3129 if (cp->hw_ok == 0)
3130 return;
3131 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3132 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3133 wdcattach(&cp->wdc_channel);
3134 hpt_setup_channel(&cp->wdc_channel);
3135 }
3136 if (revision == HPT370_REV || revision == HPT370A_REV) {
3137 /*
3138 * HPT370_REV has a bit to disable interrupts, make sure
3139 * to clear it
3140 */
3141 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3142 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3143 ~HPT_CSEL_IRQDIS);
3144 }
3145 return;
3146 }
3147
3148 void
3149 hpt_setup_channel(chp)
3150 struct channel_softc *chp;
3151 {
3152 struct ata_drive_datas *drvp;
3153 int drive;
3154 int cable;
3155 u_int32_t before, after;
3156 u_int32_t idedma_ctl;
3157 struct pciide_channel *cp = (struct pciide_channel*)chp;
3158 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3159
3160 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3161
3162 /* setup DMA if needed */
3163 pciide_channel_dma_setup(cp);
3164
3165 idedma_ctl = 0;
3166
3167 /* Per drive settings */
3168 for (drive = 0; drive < 2; drive++) {
3169 drvp = &chp->ch_drive[drive];
3170 /* If no drive, skip */
3171 if ((drvp->drive_flags & DRIVE) == 0)
3172 continue;
3173 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3174 HPT_IDETIM(chp->channel, drive));
3175
3176 /* add timing values, setup DMA if needed */
3177 if (drvp->drive_flags & DRIVE_UDMA) {
3178 /* use Ultra/DMA */
3179 drvp->drive_flags &= ~DRIVE_DMA;
3180 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3181 drvp->UDMA_mode > 2)
3182 drvp->UDMA_mode = 2;
3183 after = (sc->sc_wdcdev.nchannels == 2) ?
3184 hpt370_udma[drvp->UDMA_mode] :
3185 hpt366_udma[drvp->UDMA_mode];
3186 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3187 } else if (drvp->drive_flags & DRIVE_DMA) {
3188 /*
3189 * use Multiword DMA.
3190 * Timings will be used for both PIO and DMA, so adjust
3191 * DMA mode if needed
3192 */
3193 if (drvp->PIO_mode >= 3 &&
3194 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3195 drvp->DMA_mode = drvp->PIO_mode - 2;
3196 }
3197 after = (sc->sc_wdcdev.nchannels == 2) ?
3198 hpt370_dma[drvp->DMA_mode] :
3199 hpt366_dma[drvp->DMA_mode];
3200 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3201 } else {
3202 /* PIO only */
3203 after = (sc->sc_wdcdev.nchannels == 2) ?
3204 hpt370_pio[drvp->PIO_mode] :
3205 hpt366_pio[drvp->PIO_mode];
3206 }
3207 pci_conf_write(sc->sc_pc, sc->sc_tag,
3208 HPT_IDETIM(chp->channel, drive), after);
3209 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3210 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3211 after, before), DEBUG_PROBE);
3212 }
3213 if (idedma_ctl != 0) {
3214 /* Add software bits in status register */
3215 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3216 IDEDMA_CTL, idedma_ctl);
3217 }
3218 pciide_print_modes(cp);
3219 }
3220
3221 int
3222 hpt_pci_intr(arg)
3223 void *arg;
3224 {
3225 struct pciide_softc *sc = arg;
3226 struct pciide_channel *cp;
3227 struct channel_softc *wdc_cp;
3228 int rv = 0;
3229 int dmastat, i, crv;
3230
3231 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3232 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3233 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3234 if((dmastat & IDEDMA_CTL_INTR) == 0)
3235 continue;
3236 cp = &sc->pciide_channels[i];
3237 wdc_cp = &cp->wdc_channel;
3238 crv = wdcintr(wdc_cp);
3239 if (crv == 0) {
3240 printf("%s:%d: bogus intr\n",
3241 sc->sc_wdcdev.sc_dev.dv_xname, i);
3242 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3243 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3244 } else
3245 rv = 1;
3246 }
3247 return rv;
3248 }
3249
3250
3251 /* Macros to test product */
3252 #define PDC_IS_262(sc) \
3253 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3254 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3255 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3256 #define PDC_IS_265(sc) \
3257 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3258 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3259
3260 void
3261 pdc202xx_chip_map(sc, pa)
3262 struct pciide_softc *sc;
3263 struct pci_attach_args *pa;
3264 {
3265 struct pciide_channel *cp;
3266 int channel;
3267 pcireg_t interface, st, mode;
3268 bus_size_t cmdsize, ctlsize;
3269
3270 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3271 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3272 DEBUG_PROBE);
3273 if (pciide_chipen(sc, pa) == 0)
3274 return;
3275
3276 /* turn off RAID mode */
3277 st &= ~PDC2xx_STATE_IDERAID;
3278
3279 /*
3280 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3281 * mode. We have to fake interface
3282 */
3283 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3284 if (st & PDC2xx_STATE_NATIVE)
3285 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3286
3287 printf("%s: bus-master DMA support present",
3288 sc->sc_wdcdev.sc_dev.dv_xname);
3289 pciide_mapreg_dma(sc, pa);
3290 printf("\n");
3291 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3292 WDC_CAPABILITY_MODE;
3293 if (sc->sc_dma_ok) {
3294 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3295 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3296 sc->sc_wdcdev.irqack = pciide_irqack;
3297 }
3298 sc->sc_wdcdev.PIO_cap = 4;
3299 sc->sc_wdcdev.DMA_cap = 2;
3300 if (PDC_IS_265(sc))
3301 sc->sc_wdcdev.UDMA_cap = 5;
3302 else if (PDC_IS_262(sc))
3303 sc->sc_wdcdev.UDMA_cap = 4;
3304 else
3305 sc->sc_wdcdev.UDMA_cap = 2;
3306 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3307 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3308 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3309
3310 /* setup failsafe defaults */
3311 mode = 0;
3312 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3313 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3314 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3315 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3316 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3317 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3318 "initial timings 0x%x, now 0x%x\n", channel,
3319 pci_conf_read(sc->sc_pc, sc->sc_tag,
3320 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3321 DEBUG_PROBE);
3322 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3323 mode | PDC2xx_TIM_IORDYp);
3324 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3325 "initial timings 0x%x, now 0x%x\n", channel,
3326 pci_conf_read(sc->sc_pc, sc->sc_tag,
3327 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3328 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3329 mode);
3330 }
3331
3332 mode = PDC2xx_SCR_DMA;
3333 if (PDC_IS_262(sc)) {
3334 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3335 } else {
3336 /* the BIOS set it up this way */
3337 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3338 }
3339 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3340 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3341 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3342 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3343 DEBUG_PROBE);
3344 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3345
3346 /* controller initial state register is OK even without BIOS */
3347 /* Set DMA mode to IDE DMA compatibility */
3348 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3349 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3350 DEBUG_PROBE);
3351 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3352 mode | 0x1);
3353 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3354 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3355 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3356 mode | 0x1);
3357
3358 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3359 cp = &sc->pciide_channels[channel];
3360 if (pciide_chansetup(sc, channel, interface) == 0)
3361 continue;
3362 if ((st & (PDC_IS_262(sc) ?
3363 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3364 printf("%s: %s channel ignored (disabled)\n",
3365 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3366 continue;
3367 }
3368 if (PDC_IS_265(sc))
3369 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3370 pdc20265_pci_intr);
3371 else
3372 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3373 pdc202xx_pci_intr);
3374 if (cp->hw_ok == 0)
3375 continue;
3376 if (pciide_chan_candisable(cp))
3377 st &= ~(PDC_IS_262(sc) ?
3378 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3379 pciide_map_compat_intr(pa, cp, channel, interface);
3380 pdc202xx_setup_channel(&cp->wdc_channel);
3381 }
3382 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3383 DEBUG_PROBE);
3384 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3385 return;
3386 }
3387
3388 void
3389 pdc202xx_setup_channel(chp)
3390 struct channel_softc *chp;
3391 {
3392 struct ata_drive_datas *drvp;
3393 int drive;
3394 pcireg_t mode, st;
3395 u_int32_t idedma_ctl, scr, atapi;
3396 struct pciide_channel *cp = (struct pciide_channel*)chp;
3397 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3398 int channel = chp->channel;
3399
3400 /* setup DMA if needed */
3401 pciide_channel_dma_setup(cp);
3402
3403 idedma_ctl = 0;
3404 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3405 sc->sc_wdcdev.sc_dev.dv_xname,
3406 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3407 DEBUG_PROBE);
3408
3409 /* Per channel settings */
3410 if (PDC_IS_262(sc)) {
3411 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3412 PDC262_U66);
3413 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3414 /* Trimm UDMA mode */
3415 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3416 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3417 chp->ch_drive[0].UDMA_mode <= 2) ||
3418 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3419 chp->ch_drive[1].UDMA_mode <= 2)) {
3420 if (chp->ch_drive[0].UDMA_mode > 2)
3421 chp->ch_drive[0].UDMA_mode = 2;
3422 if (chp->ch_drive[1].UDMA_mode > 2)
3423 chp->ch_drive[1].UDMA_mode = 2;
3424 }
3425 /* Set U66 if needed */
3426 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3427 chp->ch_drive[0].UDMA_mode > 2) ||
3428 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3429 chp->ch_drive[1].UDMA_mode > 2))
3430 scr |= PDC262_U66_EN(channel);
3431 else
3432 scr &= ~PDC262_U66_EN(channel);
3433 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3434 PDC262_U66, scr);
3435 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3436 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3437 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3438 PDC262_ATAPI(channel))), DEBUG_PROBE);
3439 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3440 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3441 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3442 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3443 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3444 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3445 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3446 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3447 atapi = 0;
3448 else
3449 atapi = PDC262_ATAPI_UDMA;
3450 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3451 PDC262_ATAPI(channel), atapi);
3452 }
3453 }
3454 for (drive = 0; drive < 2; drive++) {
3455 drvp = &chp->ch_drive[drive];
3456 /* If no drive, skip */
3457 if ((drvp->drive_flags & DRIVE) == 0)
3458 continue;
3459 mode = 0;
3460 if (drvp->drive_flags & DRIVE_UDMA) {
3461 /* use Ultra/DMA */
3462 drvp->drive_flags &= ~DRIVE_DMA;
3463 mode = PDC2xx_TIM_SET_MB(mode,
3464 pdc2xx_udma_mb[drvp->UDMA_mode]);
3465 mode = PDC2xx_TIM_SET_MC(mode,
3466 pdc2xx_udma_mc[drvp->UDMA_mode]);
3467 drvp->drive_flags &= ~DRIVE_DMA;
3468 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3469 } else if (drvp->drive_flags & DRIVE_DMA) {
3470 mode = PDC2xx_TIM_SET_MB(mode,
3471 pdc2xx_dma_mb[drvp->DMA_mode]);
3472 mode = PDC2xx_TIM_SET_MC(mode,
3473 pdc2xx_dma_mc[drvp->DMA_mode]);
3474 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3475 } else {
3476 mode = PDC2xx_TIM_SET_MB(mode,
3477 pdc2xx_dma_mb[0]);
3478 mode = PDC2xx_TIM_SET_MC(mode,
3479 pdc2xx_dma_mc[0]);
3480 }
3481 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3482 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3483 if (drvp->drive_flags & DRIVE_ATA)
3484 mode |= PDC2xx_TIM_PRE;
3485 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3486 if (drvp->PIO_mode >= 3) {
3487 mode |= PDC2xx_TIM_IORDY;
3488 if (drive == 0)
3489 mode |= PDC2xx_TIM_IORDYp;
3490 }
3491 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3492 "timings 0x%x\n",
3493 sc->sc_wdcdev.sc_dev.dv_xname,
3494 chp->channel, drive, mode), DEBUG_PROBE);
3495 pci_conf_write(sc->sc_pc, sc->sc_tag,
3496 PDC2xx_TIM(chp->channel, drive), mode);
3497 }
3498 if (idedma_ctl != 0) {
3499 /* Add software bits in status register */
3500 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3501 IDEDMA_CTL, idedma_ctl);
3502 }
3503 pciide_print_modes(cp);
3504 }
3505
3506 int
3507 pdc202xx_pci_intr(arg)
3508 void *arg;
3509 {
3510 struct pciide_softc *sc = arg;
3511 struct pciide_channel *cp;
3512 struct channel_softc *wdc_cp;
3513 int i, rv, crv;
3514 u_int32_t scr;
3515
3516 rv = 0;
3517 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3518 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3519 cp = &sc->pciide_channels[i];
3520 wdc_cp = &cp->wdc_channel;
3521 /* If a compat channel skip. */
3522 if (cp->compat)
3523 continue;
3524 if (scr & PDC2xx_SCR_INT(i)) {
3525 crv = wdcintr(wdc_cp);
3526 if (crv == 0)
3527 printf("%s:%d: bogus intr (reg 0x%x)\n",
3528 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3529 else
3530 rv = 1;
3531 }
3532 }
3533 return rv;
3534 }
3535
3536 int
3537 pdc20265_pci_intr(arg)
3538 void *arg;
3539 {
3540 struct pciide_softc *sc = arg;
3541 struct pciide_channel *cp;
3542 struct channel_softc *wdc_cp;
3543 int i, rv, crv;
3544 u_int32_t dmastat;
3545
3546 rv = 0;
3547 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3548 cp = &sc->pciide_channels[i];
3549 wdc_cp = &cp->wdc_channel;
3550 /* If a compat channel skip. */
3551 if (cp->compat)
3552 continue;
3553 /*
3554 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3555 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3556 * So use it instead (requires 2 reg reads instead of 1,
3557 * but we can't do it another way).
3558 */
3559 dmastat = bus_space_read_1(sc->sc_dma_iot,
3560 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3561 if((dmastat & IDEDMA_CTL_INTR) == 0)
3562 continue;
3563 crv = wdcintr(wdc_cp);
3564 if (crv == 0)
3565 printf("%s:%d: bogus intr\n",
3566 sc->sc_wdcdev.sc_dev.dv_xname, i);
3567 else
3568 rv = 1;
3569 }
3570 return rv;
3571 }
3572
3573 void
3574 opti_chip_map(sc, pa)
3575 struct pciide_softc *sc;
3576 struct pci_attach_args *pa;
3577 {
3578 struct pciide_channel *cp;
3579 bus_size_t cmdsize, ctlsize;
3580 pcireg_t interface;
3581 u_int8_t init_ctrl;
3582 int channel;
3583
3584 if (pciide_chipen(sc, pa) == 0)
3585 return;
3586 printf("%s: bus-master DMA support present",
3587 sc->sc_wdcdev.sc_dev.dv_xname);
3588
3589 /*
3590 * XXXSCW:
3591 * There seem to be a couple of buggy revisions/implementations
3592 * of the OPTi pciide chipset. This kludge seems to fix one of
3593 * the reported problems (PR/11644) but still fails for the
3594 * other (PR/13151), although the latter may be due to other
3595 * issues too...
3596 */
3597 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3598 printf(" but disabled due to chip rev. <= 0x12");
3599 sc->sc_dma_ok = 0;
3600 sc->sc_wdcdev.cap = 0;
3601 } else {
3602 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3603 pciide_mapreg_dma(sc, pa);
3604 }
3605 printf("\n");
3606
3607 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3608 sc->sc_wdcdev.PIO_cap = 4;
3609 if (sc->sc_dma_ok) {
3610 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3611 sc->sc_wdcdev.irqack = pciide_irqack;
3612 sc->sc_wdcdev.DMA_cap = 2;
3613 }
3614 sc->sc_wdcdev.set_modes = opti_setup_channel;
3615
3616 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3617 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3618
3619 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3620 OPTI_REG_INIT_CONTROL);
3621
3622 interface = PCI_INTERFACE(pa->pa_class);
3623
3624 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3625 cp = &sc->pciide_channels[channel];
3626 if (pciide_chansetup(sc, channel, interface) == 0)
3627 continue;
3628 if (channel == 1 &&
3629 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3630 printf("%s: %s channel ignored (disabled)\n",
3631 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3632 continue;
3633 }
3634 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3635 pciide_pci_intr);
3636 if (cp->hw_ok == 0)
3637 continue;
3638 pciide_map_compat_intr(pa, cp, channel, interface);
3639 if (cp->hw_ok == 0)
3640 continue;
3641 opti_setup_channel(&cp->wdc_channel);
3642 }
3643 }
3644
3645 void
3646 opti_setup_channel(chp)
3647 struct channel_softc *chp;
3648 {
3649 struct ata_drive_datas *drvp;
3650 struct pciide_channel *cp = (struct pciide_channel*)chp;
3651 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3652 int drive, spd;
3653 int mode[2];
3654 u_int8_t rv, mr;
3655
3656 /*
3657 * The `Delay' and `Address Setup Time' fields of the
3658 * Miscellaneous Register are always zero initially.
3659 */
3660 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3661 mr &= ~(OPTI_MISC_DELAY_MASK |
3662 OPTI_MISC_ADDR_SETUP_MASK |
3663 OPTI_MISC_INDEX_MASK);
3664
3665 /* Prime the control register before setting timing values */
3666 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3667
3668 /* Determine the clockrate of the PCIbus the chip is attached to */
3669 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3670 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3671
3672 /* setup DMA if needed */
3673 pciide_channel_dma_setup(cp);
3674
3675 for (drive = 0; drive < 2; drive++) {
3676 drvp = &chp->ch_drive[drive];
3677 /* If no drive, skip */
3678 if ((drvp->drive_flags & DRIVE) == 0) {
3679 mode[drive] = -1;
3680 continue;
3681 }
3682
3683 if ((drvp->drive_flags & DRIVE_DMA)) {
3684 /*
3685 * Timings will be used for both PIO and DMA,
3686 * so adjust DMA mode if needed
3687 */
3688 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3689 drvp->PIO_mode = drvp->DMA_mode + 2;
3690 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3691 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3692 drvp->PIO_mode - 2 : 0;
3693 if (drvp->DMA_mode == 0)
3694 drvp->PIO_mode = 0;
3695
3696 mode[drive] = drvp->DMA_mode + 5;
3697 } else
3698 mode[drive] = drvp->PIO_mode;
3699
3700 if (drive && mode[0] >= 0 &&
3701 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3702 /*
3703 * Can't have two drives using different values
3704 * for `Address Setup Time'.
3705 * Slow down the faster drive to compensate.
3706 */
3707 int d = (opti_tim_as[spd][mode[0]] >
3708 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3709
3710 mode[d] = mode[1-d];
3711 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3712 chp->ch_drive[d].DMA_mode = 0;
3713 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3714 }
3715 }
3716
3717 for (drive = 0; drive < 2; drive++) {
3718 int m;
3719 if ((m = mode[drive]) < 0)
3720 continue;
3721
3722 /* Set the Address Setup Time and select appropriate index */
3723 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3724 rv |= OPTI_MISC_INDEX(drive);
3725 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3726
3727 /* Set the pulse width and recovery timing parameters */
3728 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3729 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3730 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3731 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3732
3733 /* Set the Enhanced Mode register appropriately */
3734 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3735 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3736 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3737 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3738 }
3739
3740 /* Finally, enable the timings */
3741 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3742
3743 pciide_print_modes(cp);
3744 }
3745
3746 #define ACARD_IS_850(sc) \
3747 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
3748
3749 void
3750 acard_chip_map(sc, pa)
3751 struct pciide_softc *sc;
3752 struct pci_attach_args *pa;
3753 {
3754 struct pciide_channel *cp;
3755 int i;
3756 pcireg_t interface;
3757 bus_size_t cmdsize, ctlsize;
3758
3759 if (pciide_chipen(sc, pa) == 0)
3760 return;
3761
3762 /*
3763 * when the chip is in native mode it identifies itself as a
3764 * 'misc mass storage'. Fake interface in this case.
3765 */
3766 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3767 interface = PCI_INTERFACE(pa->pa_class);
3768 } else {
3769 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3770 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3771 }
3772
3773 printf("%s: bus-master DMA support present",
3774 sc->sc_wdcdev.sc_dev.dv_xname);
3775 pciide_mapreg_dma(sc, pa);
3776 printf("\n");
3777 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3778 WDC_CAPABILITY_MODE;
3779
3780 if (sc->sc_dma_ok) {
3781 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3782 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3783 sc->sc_wdcdev.irqack = pciide_irqack;
3784 }
3785 sc->sc_wdcdev.PIO_cap = 4;
3786 sc->sc_wdcdev.DMA_cap = 2;
3787 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
3788
3789 sc->sc_wdcdev.set_modes = acard_setup_channel;
3790 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3791 sc->sc_wdcdev.nchannels = 2;
3792
3793 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3794 cp = &sc->pciide_channels[i];
3795 if (pciide_chansetup(sc, i, interface) == 0)
3796 continue;
3797 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3798 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3799 &ctlsize, pciide_pci_intr);
3800 } else {
3801 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
3802 &cmdsize, &ctlsize);
3803 }
3804 if (cp->hw_ok == 0)
3805 return;
3806 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3807 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3808 wdcattach(&cp->wdc_channel);
3809 acard_setup_channel(&cp->wdc_channel);
3810 }
3811 if (!ACARD_IS_850(sc)) {
3812 u_int32_t reg;
3813 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
3814 reg &= ~ATP860_CTRL_INT;
3815 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
3816 }
3817 }
3818
3819 void
3820 acard_setup_channel(chp)
3821 struct channel_softc *chp;
3822 {
3823 struct ata_drive_datas *drvp;
3824 struct pciide_channel *cp = (struct pciide_channel*)chp;
3825 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3826 int channel = chp->channel;
3827 int drive;
3828 u_int32_t idetime, udma_mode;
3829 u_int32_t idedma_ctl;
3830
3831 /* setup DMA if needed */
3832 pciide_channel_dma_setup(cp);
3833
3834 if (ACARD_IS_850(sc)) {
3835 idetime = 0;
3836 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
3837 udma_mode &= ~ATP850_UDMA_MASK(channel);
3838 } else {
3839 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
3840 idetime &= ~ATP860_SETTIME_MASK(channel);
3841 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
3842 udma_mode &= ~ATP860_UDMA_MASK(channel);
3843
3844 /* check 80 pins cable */
3845 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
3846 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
3847 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
3848 & ATP860_CTRL_80P(chp->channel)) {
3849 if (chp->ch_drive[0].UDMA_mode > 2)
3850 chp->ch_drive[0].UDMA_mode = 2;
3851 if (chp->ch_drive[1].UDMA_mode > 2)
3852 chp->ch_drive[1].UDMA_mode = 2;
3853 }
3854 }
3855 }
3856
3857 idedma_ctl = 0;
3858
3859 /* Per drive settings */
3860 for (drive = 0; drive < 2; drive++) {
3861 drvp = &chp->ch_drive[drive];
3862 /* If no drive, skip */
3863 if ((drvp->drive_flags & DRIVE) == 0)
3864 continue;
3865 /* add timing values, setup DMA if needed */
3866 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
3867 (drvp->drive_flags & DRIVE_UDMA)) {
3868 /* use Ultra/DMA */
3869 if (ACARD_IS_850(sc)) {
3870 idetime |= ATP850_SETTIME(drive,
3871 acard_act_udma[drvp->UDMA_mode],
3872 acard_rec_udma[drvp->UDMA_mode]);
3873 udma_mode |= ATP850_UDMA_MODE(channel, drive,
3874 acard_udma_conf[drvp->UDMA_mode]);
3875 } else {
3876 idetime |= ATP860_SETTIME(channel, drive,
3877 acard_act_udma[drvp->UDMA_mode],
3878 acard_rec_udma[drvp->UDMA_mode]);
3879 udma_mode |= ATP860_UDMA_MODE(channel, drive,
3880 acard_udma_conf[drvp->UDMA_mode]);
3881 }
3882 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3883 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
3884 (drvp->drive_flags & DRIVE_DMA)) {
3885 /* use Multiword DMA */
3886 drvp->drive_flags &= ~DRIVE_UDMA;
3887 if (ACARD_IS_850(sc)) {
3888 idetime |= ATP850_SETTIME(drive,
3889 acard_act_dma[drvp->DMA_mode],
3890 acard_rec_dma[drvp->DMA_mode]);
3891 } else {
3892 idetime |= ATP860_SETTIME(channel, drive,
3893 acard_act_dma[drvp->DMA_mode],
3894 acard_rec_dma[drvp->DMA_mode]);
3895 }
3896 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3897 } else {
3898 /* PIO only */
3899 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
3900 if (ACARD_IS_850(sc)) {
3901 idetime |= ATP850_SETTIME(drive,
3902 acard_act_pio[drvp->PIO_mode],
3903 acard_rec_pio[drvp->PIO_mode]);
3904 } else {
3905 idetime |= ATP860_SETTIME(channel, drive,
3906 acard_act_pio[drvp->PIO_mode],
3907 acard_rec_pio[drvp->PIO_mode]);
3908 }
3909 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
3910 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
3911 | ATP8x0_CTRL_EN(channel));
3912 }
3913 }
3914
3915 if (idedma_ctl != 0) {
3916 /* Add software bits in status register */
3917 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3918 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
3919 }
3920 pciide_print_modes(cp);
3921
3922 if (ACARD_IS_850(sc)) {
3923 pci_conf_write(sc->sc_pc, sc->sc_tag,
3924 ATP850_IDETIME(channel), idetime);
3925 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
3926 } else {
3927 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
3928 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
3929 }
3930 }
3931
3932 int
3933 acard_pci_intr(arg)
3934 void *arg;
3935 {
3936 struct pciide_softc *sc = arg;
3937 struct pciide_channel *cp;
3938 struct channel_softc *wdc_cp;
3939 int rv = 0;
3940 int dmastat, i, crv;
3941
3942 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3943 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3944 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3945 if ((dmastat & IDEDMA_CTL_INTR) == 0)
3946 continue;
3947 cp = &sc->pciide_channels[i];
3948 wdc_cp = &cp->wdc_channel;
3949 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
3950 (void)wdcintr(wdc_cp);
3951 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3952 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3953 continue;
3954 }
3955 crv = wdcintr(wdc_cp);
3956 if (crv == 0)
3957 printf("%s:%d: bogus intr\n",
3958 sc->sc_wdcdev.sc_dev.dv_xname, i);
3959 else if (crv == 1)
3960 rv = 1;
3961 else if (rv == 0)
3962 rv = crv;
3963 }
3964 return rv;
3965 }
3966