pciide.c revision 1.135 1 /* $NetBSD: pciide.c,v 1.135 2001/11/15 20:48:17 bouyer Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 *
34 */
35
36
37 /*
38 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
39 *
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by Christopher G. Demetriou
51 * for the NetBSD Project.
52 * 4. The name of the author may not be used to endorse or promote products
53 * derived from this software without specific prior written permission
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 */
66
67 /*
68 * PCI IDE controller driver.
69 *
70 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71 * sys/dev/pci/ppb.c, revision 1.16).
72 *
73 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75 * 5/16/94" from the PCI SIG.
76 *
77 */
78
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.135 2001/11/15 20:48:17 bouyer Exp $");
81
82 #ifndef WDCDEBUG
83 #define WDCDEBUG
84 #endif
85
86 #define DEBUG_DMA 0x01
87 #define DEBUG_XFERS 0x02
88 #define DEBUG_FUNCS 0x08
89 #define DEBUG_PROBE 0x10
90 #ifdef WDCDEBUG
91 int wdcdebug_pciide_mask = 0;
92 #define WDCDEBUG_PRINT(args, level) \
93 if (wdcdebug_pciide_mask & (level)) printf args
94 #else
95 #define WDCDEBUG_PRINT(args, level)
96 #endif
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/device.h>
100 #include <sys/malloc.h>
101
102 #include <uvm/uvm_extern.h>
103
104 #include <machine/endian.h>
105
106 #include <dev/pci/pcireg.h>
107 #include <dev/pci/pcivar.h>
108 #include <dev/pci/pcidevs.h>
109 #include <dev/pci/pciidereg.h>
110 #include <dev/pci/pciidevar.h>
111 #include <dev/pci/pciide_piix_reg.h>
112 #include <dev/pci/pciide_amd_reg.h>
113 #include <dev/pci/pciide_apollo_reg.h>
114 #include <dev/pci/pciide_cmd_reg.h>
115 #include <dev/pci/pciide_cy693_reg.h>
116 #include <dev/pci/pciide_sis_reg.h>
117 #include <dev/pci/pciide_acer_reg.h>
118 #include <dev/pci/pciide_pdc202xx_reg.h>
119 #include <dev/pci/pciide_opti_reg.h>
120 #include <dev/pci/pciide_hpt_reg.h>
121 #include <dev/pci/pciide_acard_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 static int sis_hostbr_match __P(( struct pci_attach_args *));
187
188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void acer_setup_channel __P((struct channel_softc*));
190 int acer_pci_intr __P((void *));
191 static int acer_isabr_match __P(( struct pci_attach_args *));
192
193 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void pdc202xx_setup_channel __P((struct channel_softc*));
195 int pdc202xx_pci_intr __P((void *));
196 int pdc20265_pci_intr __P((void *));
197
198 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void opti_setup_channel __P((struct channel_softc*));
200
201 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
202 void hpt_setup_channel __P((struct channel_softc*));
203 int hpt_pci_intr __P((void *));
204
205 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void acard_setup_channel __P((struct channel_softc*));
207 int acard_pci_intr __P((void *));
208
209 #ifdef PCIIDE_WINBOND_ENABLE
210 void winbond_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
211 #endif
212
213 void pciide_channel_dma_setup __P((struct pciide_channel *));
214 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
215 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
216 void pciide_dma_start __P((void*, int, int));
217 int pciide_dma_finish __P((void*, int, int, int));
218 void pciide_irqack __P((struct channel_softc *));
219 void pciide_print_modes __P((struct pciide_channel *));
220
221 struct pciide_product_desc {
222 u_int32_t ide_product;
223 int ide_flags;
224 const char *ide_name;
225 /* map and setup chip, probe drives */
226 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
227 };
228
229 /* Flags for ide_flags */
230 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
231 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
232
233 /* Default product description for devices not known from this controller */
234 const struct pciide_product_desc default_product_desc = {
235 0,
236 0,
237 "Generic PCI IDE controller",
238 default_chip_map,
239 };
240
241 const struct pciide_product_desc pciide_intel_products[] = {
242 { PCI_PRODUCT_INTEL_82092AA,
243 0,
244 "Intel 82092AA IDE controller",
245 default_chip_map,
246 },
247 { PCI_PRODUCT_INTEL_82371FB_IDE,
248 0,
249 "Intel 82371FB IDE controller (PIIX)",
250 piix_chip_map,
251 },
252 { PCI_PRODUCT_INTEL_82371SB_IDE,
253 0,
254 "Intel 82371SB IDE Interface (PIIX3)",
255 piix_chip_map,
256 },
257 { PCI_PRODUCT_INTEL_82371AB_IDE,
258 0,
259 "Intel 82371AB IDE controller (PIIX4)",
260 piix_chip_map,
261 },
262 { PCI_PRODUCT_INTEL_82440MX_IDE,
263 0,
264 "Intel 82440MX IDE controller",
265 piix_chip_map
266 },
267 { PCI_PRODUCT_INTEL_82801AA_IDE,
268 0,
269 "Intel 82801AA IDE Controller (ICH)",
270 piix_chip_map,
271 },
272 { PCI_PRODUCT_INTEL_82801AB_IDE,
273 0,
274 "Intel 82801AB IDE Controller (ICH0)",
275 piix_chip_map,
276 },
277 { PCI_PRODUCT_INTEL_82801BA_IDE,
278 0,
279 "Intel 82801BA IDE Controller (ICH2)",
280 piix_chip_map,
281 },
282 { PCI_PRODUCT_INTEL_82801BAM_IDE,
283 0,
284 "Intel 82801BAM IDE Controller (ICH2)",
285 piix_chip_map,
286 },
287 { 0,
288 0,
289 NULL,
290 NULL
291 }
292 };
293
294 const struct pciide_product_desc pciide_amd_products[] = {
295 { PCI_PRODUCT_AMD_PBC756_IDE,
296 0,
297 "Advanced Micro Devices AMD756 IDE Controller",
298 amd7x6_chip_map
299 },
300 { PCI_PRODUCT_AMD_PBC766_IDE,
301 0,
302 "Advanced Micro Devices AMD766 IDE Controller",
303 amd7x6_chip_map
304 },
305 { 0,
306 0,
307 NULL,
308 NULL
309 }
310 };
311
312 const struct pciide_product_desc pciide_cmd_products[] = {
313 { PCI_PRODUCT_CMDTECH_640,
314 0,
315 "CMD Technology PCI0640",
316 cmd_chip_map
317 },
318 { PCI_PRODUCT_CMDTECH_643,
319 0,
320 "CMD Technology PCI0643",
321 cmd0643_9_chip_map,
322 },
323 { PCI_PRODUCT_CMDTECH_646,
324 0,
325 "CMD Technology PCI0646",
326 cmd0643_9_chip_map,
327 },
328 { PCI_PRODUCT_CMDTECH_648,
329 IDE_PCI_CLASS_OVERRIDE,
330 "CMD Technology PCI0648",
331 cmd0643_9_chip_map,
332 },
333 { PCI_PRODUCT_CMDTECH_649,
334 IDE_PCI_CLASS_OVERRIDE,
335 "CMD Technology PCI0649",
336 cmd0643_9_chip_map,
337 },
338 { 0,
339 0,
340 NULL,
341 NULL
342 }
343 };
344
345 const struct pciide_product_desc pciide_via_products[] = {
346 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
347 0,
348 NULL,
349 apollo_chip_map,
350 },
351 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
352 0,
353 NULL,
354 apollo_chip_map,
355 },
356 { 0,
357 0,
358 NULL,
359 NULL
360 }
361 };
362
363 const struct pciide_product_desc pciide_cypress_products[] = {
364 { PCI_PRODUCT_CONTAQ_82C693,
365 IDE_16BIT_IOSPACE,
366 "Cypress 82C693 IDE Controller",
367 cy693_chip_map,
368 },
369 { 0,
370 0,
371 NULL,
372 NULL
373 }
374 };
375
376 const struct pciide_product_desc pciide_sis_products[] = {
377 { PCI_PRODUCT_SIS_5597_IDE,
378 0,
379 "Silicon Integrated System 5597/5598 IDE controller",
380 sis_chip_map,
381 },
382 { 0,
383 0,
384 NULL,
385 NULL
386 }
387 };
388
389 const struct pciide_product_desc pciide_acer_products[] = {
390 { PCI_PRODUCT_ALI_M5229,
391 0,
392 "Acer Labs M5229 UDMA IDE Controller",
393 acer_chip_map,
394 },
395 { 0,
396 0,
397 NULL,
398 NULL
399 }
400 };
401
402 const struct pciide_product_desc pciide_promise_products[] = {
403 { PCI_PRODUCT_PROMISE_ULTRA33,
404 IDE_PCI_CLASS_OVERRIDE,
405 "Promise Ultra33/ATA Bus Master IDE Accelerator",
406 pdc202xx_chip_map,
407 },
408 { PCI_PRODUCT_PROMISE_ULTRA66,
409 IDE_PCI_CLASS_OVERRIDE,
410 "Promise Ultra66/ATA Bus Master IDE Accelerator",
411 pdc202xx_chip_map,
412 },
413 { PCI_PRODUCT_PROMISE_ULTRA100,
414 IDE_PCI_CLASS_OVERRIDE,
415 "Promise Ultra100/ATA Bus Master IDE Accelerator",
416 pdc202xx_chip_map,
417 },
418 { PCI_PRODUCT_PROMISE_ULTRA100X,
419 IDE_PCI_CLASS_OVERRIDE,
420 "Promise Ultra100/ATA Bus Master IDE Accelerator",
421 pdc202xx_chip_map,
422 },
423 { 0,
424 0,
425 NULL,
426 NULL
427 }
428 };
429
430 const struct pciide_product_desc pciide_opti_products[] = {
431 { PCI_PRODUCT_OPTI_82C621,
432 0,
433 "OPTi 82c621 PCI IDE controller",
434 opti_chip_map,
435 },
436 { PCI_PRODUCT_OPTI_82C568,
437 0,
438 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
439 opti_chip_map,
440 },
441 { PCI_PRODUCT_OPTI_82D568,
442 0,
443 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
444 opti_chip_map,
445 },
446 { 0,
447 0,
448 NULL,
449 NULL
450 }
451 };
452
453 const struct pciide_product_desc pciide_triones_products[] = {
454 { PCI_PRODUCT_TRIONES_HPT366,
455 IDE_PCI_CLASS_OVERRIDE,
456 NULL,
457 hpt_chip_map,
458 },
459 { 0,
460 0,
461 NULL,
462 NULL
463 }
464 };
465
466 const struct pciide_product_desc pciide_acard_products[] = {
467 { PCI_PRODUCT_ACARD_ATP850U,
468 IDE_PCI_CLASS_OVERRIDE,
469 "Acard ATP850U Ultra33 IDE Controller",
470 acard_chip_map,
471 },
472 { PCI_PRODUCT_ACARD_ATP860,
473 IDE_PCI_CLASS_OVERRIDE,
474 "Acard ATP860 Ultra66 IDE Controller",
475 acard_chip_map,
476 },
477 { PCI_PRODUCT_ACARD_ATP860A,
478 IDE_PCI_CLASS_OVERRIDE,
479 "Acard ATP860-A Ultra66 IDE Controller",
480 acard_chip_map,
481 },
482 { 0,
483 0,
484 NULL,
485 NULL
486 }
487 };
488
489 #ifdef PCIIDE_SERVERWORKS_ENABLE
490 const struct pciide_product_desc pciide_serverworks_products[] = {
491 { PCI_PRODUCT_SERVERWORKS_IDE,
492 0,
493 "ServerWorks ROSB4 IDE Controller",
494 piix_chip_map,
495 },
496 { 0,
497 0,
498 NULL,
499 }
500 };
501 #endif
502
503 #ifdef PCIIDE_WINBOND_ENABLE
504 const struct pciide_product_desc pciide_winbond_products[] = {
505 { PCI_PRODUCT_WINBOND_W83C553F_1,
506 0,
507 "Winbond W83C553F IDE controller",
508 winbond_chip_map,
509 },
510 { 0,
511 0,
512 NULL,
513 }
514 };
515 #endif
516
517 struct pciide_vendor_desc {
518 u_int32_t ide_vendor;
519 const struct pciide_product_desc *ide_products;
520 };
521
522 const struct pciide_vendor_desc pciide_vendors[] = {
523 { PCI_VENDOR_INTEL, pciide_intel_products },
524 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
525 { PCI_VENDOR_VIATECH, pciide_via_products },
526 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
527 { PCI_VENDOR_SIS, pciide_sis_products },
528 { PCI_VENDOR_ALI, pciide_acer_products },
529 { PCI_VENDOR_PROMISE, pciide_promise_products },
530 { PCI_VENDOR_AMD, pciide_amd_products },
531 { PCI_VENDOR_OPTI, pciide_opti_products },
532 { PCI_VENDOR_TRIONES, pciide_triones_products },
533 { PCI_VENDOR_ACARD, pciide_acard_products },
534 #ifdef PCIIDE_SERVERWORKS_ENABLE
535 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
536 #endif
537 #ifdef PCIIDE_WINBOND_ENABLE
538 { PCI_VENDOR_WINBOND, pciide_winbond_products },
539 #endif
540 { 0, NULL }
541 };
542
543 /* options passed via the 'flags' config keyword */
544 #define PCIIDE_OPTIONS_DMA 0x01
545 #define PCIIDE_OPTIONS_NODMA 0x02
546
547 int pciide_match __P((struct device *, struct cfdata *, void *));
548 void pciide_attach __P((struct device *, struct device *, void *));
549
550 struct cfattach pciide_ca = {
551 sizeof(struct pciide_softc), pciide_match, pciide_attach
552 };
553 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
554 int pciide_mapregs_compat __P(( struct pci_attach_args *,
555 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
556 int pciide_mapregs_native __P((struct pci_attach_args *,
557 struct pciide_channel *, bus_size_t *, bus_size_t *,
558 int (*pci_intr) __P((void *))));
559 void pciide_mapreg_dma __P((struct pciide_softc *,
560 struct pci_attach_args *));
561 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
562 void pciide_mapchan __P((struct pci_attach_args *,
563 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
564 int (*pci_intr) __P((void *))));
565 int pciide_chan_candisable __P((struct pciide_channel *));
566 void pciide_map_compat_intr __P(( struct pci_attach_args *,
567 struct pciide_channel *, int, int));
568 int pciide_compat_intr __P((void *));
569 int pciide_pci_intr __P((void *));
570 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
571
572 const struct pciide_product_desc *
573 pciide_lookup_product(id)
574 u_int32_t id;
575 {
576 const struct pciide_product_desc *pp;
577 const struct pciide_vendor_desc *vp;
578
579 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
580 if (PCI_VENDOR(id) == vp->ide_vendor)
581 break;
582
583 if ((pp = vp->ide_products) == NULL)
584 return NULL;
585
586 for (; pp->chip_map != NULL; pp++)
587 if (PCI_PRODUCT(id) == pp->ide_product)
588 break;
589
590 if (pp->chip_map == NULL)
591 return NULL;
592 return pp;
593 }
594
595 int
596 pciide_match(parent, match, aux)
597 struct device *parent;
598 struct cfdata *match;
599 void *aux;
600 {
601 struct pci_attach_args *pa = aux;
602 const struct pciide_product_desc *pp;
603
604 /*
605 * Check the ID register to see that it's a PCI IDE controller.
606 * If it is, we assume that we can deal with it; it _should_
607 * work in a standardized way...
608 */
609 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
610 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
611 return (1);
612 }
613
614 /*
615 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
616 * controllers. Let see if we can deal with it anyway.
617 */
618 pp = pciide_lookup_product(pa->pa_id);
619 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
620 return (1);
621 }
622
623 return (0);
624 }
625
626 void
627 pciide_attach(parent, self, aux)
628 struct device *parent, *self;
629 void *aux;
630 {
631 struct pci_attach_args *pa = aux;
632 pci_chipset_tag_t pc = pa->pa_pc;
633 pcitag_t tag = pa->pa_tag;
634 struct pciide_softc *sc = (struct pciide_softc *)self;
635 pcireg_t csr;
636 char devinfo[256];
637 const char *displaydev;
638
639 sc->sc_pp = pciide_lookup_product(pa->pa_id);
640 if (sc->sc_pp == NULL) {
641 sc->sc_pp = &default_product_desc;
642 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
643 displaydev = devinfo;
644 } else
645 displaydev = sc->sc_pp->ide_name;
646
647 /* if displaydev == NULL, printf is done in chip-specific map */
648 if (displaydev)
649 printf(": %s (rev. 0x%02x)\n", displaydev,
650 PCI_REVISION(pa->pa_class));
651
652 sc->sc_pc = pa->pa_pc;
653 sc->sc_tag = pa->pa_tag;
654 #ifdef WDCDEBUG
655 if (wdcdebug_pciide_mask & DEBUG_PROBE)
656 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
657 #endif
658 sc->sc_pp->chip_map(sc, pa);
659
660 if (sc->sc_dma_ok) {
661 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
662 csr |= PCI_COMMAND_MASTER_ENABLE;
663 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
664 }
665 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
666 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
667 }
668
669 /* tell wether the chip is enabled or not */
670 int
671 pciide_chipen(sc, pa)
672 struct pciide_softc *sc;
673 struct pci_attach_args *pa;
674 {
675 pcireg_t csr;
676 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
677 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
678 PCI_COMMAND_STATUS_REG);
679 printf("%s: device disabled (at %s)\n",
680 sc->sc_wdcdev.sc_dev.dv_xname,
681 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
682 "device" : "bridge");
683 return 0;
684 }
685 return 1;
686 }
687
688 int
689 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
690 struct pci_attach_args *pa;
691 struct pciide_channel *cp;
692 int compatchan;
693 bus_size_t *cmdsizep, *ctlsizep;
694 {
695 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
696 struct channel_softc *wdc_cp = &cp->wdc_channel;
697
698 cp->compat = 1;
699 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
700 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
701
702 wdc_cp->cmd_iot = pa->pa_iot;
703 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
704 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
705 printf("%s: couldn't map %s channel cmd regs\n",
706 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
707 return (0);
708 }
709
710 wdc_cp->ctl_iot = pa->pa_iot;
711 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
712 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
713 printf("%s: couldn't map %s channel ctl regs\n",
714 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
715 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
716 PCIIDE_COMPAT_CMD_SIZE);
717 return (0);
718 }
719
720 return (1);
721 }
722
723 int
724 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
725 struct pci_attach_args * pa;
726 struct pciide_channel *cp;
727 bus_size_t *cmdsizep, *ctlsizep;
728 int (*pci_intr) __P((void *));
729 {
730 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
731 struct channel_softc *wdc_cp = &cp->wdc_channel;
732 const char *intrstr;
733 pci_intr_handle_t intrhandle;
734
735 cp->compat = 0;
736
737 if (sc->sc_pci_ih == NULL) {
738 if (pci_intr_map(pa, &intrhandle) != 0) {
739 printf("%s: couldn't map native-PCI interrupt\n",
740 sc->sc_wdcdev.sc_dev.dv_xname);
741 return 0;
742 }
743 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
744 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
745 intrhandle, IPL_BIO, pci_intr, sc);
746 if (sc->sc_pci_ih != NULL) {
747 printf("%s: using %s for native-PCI interrupt\n",
748 sc->sc_wdcdev.sc_dev.dv_xname,
749 intrstr ? intrstr : "unknown interrupt");
750 } else {
751 printf("%s: couldn't establish native-PCI interrupt",
752 sc->sc_wdcdev.sc_dev.dv_xname);
753 if (intrstr != NULL)
754 printf(" at %s", intrstr);
755 printf("\n");
756 return 0;
757 }
758 }
759 cp->ih = sc->sc_pci_ih;
760 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
761 PCI_MAPREG_TYPE_IO, 0,
762 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
763 printf("%s: couldn't map %s channel cmd regs\n",
764 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
765 return 0;
766 }
767
768 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
769 PCI_MAPREG_TYPE_IO, 0,
770 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
771 printf("%s: couldn't map %s channel ctl regs\n",
772 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
773 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
774 return 0;
775 }
776 /*
777 * In native mode, 4 bytes of I/O space are mapped for the control
778 * register, the control register is at offset 2. Pass the generic
779 * code a handle for only one byte at the rigth offset.
780 */
781 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
782 &wdc_cp->ctl_ioh) != 0) {
783 printf("%s: unable to subregion %s channel ctl regs\n",
784 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
785 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
786 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
787 return 0;
788 }
789 return (1);
790 }
791
792 void
793 pciide_mapreg_dma(sc, pa)
794 struct pciide_softc *sc;
795 struct pci_attach_args *pa;
796 {
797 pcireg_t maptype;
798 bus_addr_t addr;
799
800 /*
801 * Map DMA registers
802 *
803 * Note that sc_dma_ok is the right variable to test to see if
804 * DMA can be done. If the interface doesn't support DMA,
805 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
806 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
807 * non-zero if the interface supports DMA and the registers
808 * could be mapped.
809 *
810 * XXX Note that despite the fact that the Bus Master IDE specs
811 * XXX say that "The bus master IDE function uses 16 bytes of IO
812 * XXX space," some controllers (at least the United
813 * XXX Microelectronics UM8886BF) place it in memory space.
814 */
815 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
816 PCIIDE_REG_BUS_MASTER_DMA);
817
818 switch (maptype) {
819 case PCI_MAPREG_TYPE_IO:
820 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
821 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
822 &addr, NULL, NULL) == 0);
823 if (sc->sc_dma_ok == 0) {
824 printf(", but unused (couldn't query registers)");
825 break;
826 }
827 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
828 && addr >= 0x10000) {
829 sc->sc_dma_ok = 0;
830 printf(", but unused (registers at unsafe address "
831 "%#lx)", (unsigned long)addr);
832 break;
833 }
834 /* FALLTHROUGH */
835
836 case PCI_MAPREG_MEM_TYPE_32BIT:
837 sc->sc_dma_ok = (pci_mapreg_map(pa,
838 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
839 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
840 sc->sc_dmat = pa->pa_dmat;
841 if (sc->sc_dma_ok == 0) {
842 printf(", but unused (couldn't map registers)");
843 } else {
844 sc->sc_wdcdev.dma_arg = sc;
845 sc->sc_wdcdev.dma_init = pciide_dma_init;
846 sc->sc_wdcdev.dma_start = pciide_dma_start;
847 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
848 }
849
850 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
851 PCIIDE_OPTIONS_NODMA) {
852 printf(", but unused (forced off by config file)");
853 sc->sc_dma_ok = 0;
854 }
855 break;
856
857 default:
858 sc->sc_dma_ok = 0;
859 printf(", but unsupported register maptype (0x%x)", maptype);
860 }
861 }
862
863 int
864 pciide_compat_intr(arg)
865 void *arg;
866 {
867 struct pciide_channel *cp = arg;
868
869 #ifdef DIAGNOSTIC
870 /* should only be called for a compat channel */
871 if (cp->compat == 0)
872 panic("pciide compat intr called for non-compat chan %p\n", cp);
873 #endif
874 return (wdcintr(&cp->wdc_channel));
875 }
876
877 int
878 pciide_pci_intr(arg)
879 void *arg;
880 {
881 struct pciide_softc *sc = arg;
882 struct pciide_channel *cp;
883 struct channel_softc *wdc_cp;
884 int i, rv, crv;
885
886 rv = 0;
887 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
888 cp = &sc->pciide_channels[i];
889 wdc_cp = &cp->wdc_channel;
890
891 /* If a compat channel skip. */
892 if (cp->compat)
893 continue;
894 /* if this channel not waiting for intr, skip */
895 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
896 continue;
897
898 crv = wdcintr(wdc_cp);
899 if (crv == 0)
900 ; /* leave rv alone */
901 else if (crv == 1)
902 rv = 1; /* claim the intr */
903 else if (rv == 0) /* crv should be -1 in this case */
904 rv = crv; /* if we've done no better, take it */
905 }
906 return (rv);
907 }
908
909 void
910 pciide_channel_dma_setup(cp)
911 struct pciide_channel *cp;
912 {
913 int drive;
914 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
915 struct ata_drive_datas *drvp;
916
917 for (drive = 0; drive < 2; drive++) {
918 drvp = &cp->wdc_channel.ch_drive[drive];
919 /* If no drive, skip */
920 if ((drvp->drive_flags & DRIVE) == 0)
921 continue;
922 /* setup DMA if needed */
923 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
924 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
925 sc->sc_dma_ok == 0) {
926 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
927 continue;
928 }
929 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
930 != 0) {
931 /* Abort DMA setup */
932 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
933 continue;
934 }
935 }
936 }
937
938 int
939 pciide_dma_table_setup(sc, channel, drive)
940 struct pciide_softc *sc;
941 int channel, drive;
942 {
943 bus_dma_segment_t seg;
944 int error, rseg;
945 const bus_size_t dma_table_size =
946 sizeof(struct idedma_table) * NIDEDMA_TABLES;
947 struct pciide_dma_maps *dma_maps =
948 &sc->pciide_channels[channel].dma_maps[drive];
949
950 /* If table was already allocated, just return */
951 if (dma_maps->dma_table)
952 return 0;
953
954 /* Allocate memory for the DMA tables and map it */
955 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
956 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
957 BUS_DMA_NOWAIT)) != 0) {
958 printf("%s:%d: unable to allocate table DMA for "
959 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
960 channel, drive, error);
961 return error;
962 }
963 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
964 dma_table_size,
965 (caddr_t *)&dma_maps->dma_table,
966 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
967 printf("%s:%d: unable to map table DMA for"
968 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
969 channel, drive, error);
970 return error;
971 }
972 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
973 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
974 (unsigned long)seg.ds_addr), DEBUG_PROBE);
975
976 /* Create and load table DMA map for this disk */
977 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
978 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
979 &dma_maps->dmamap_table)) != 0) {
980 printf("%s:%d: unable to create table DMA map for "
981 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
982 channel, drive, error);
983 return error;
984 }
985 if ((error = bus_dmamap_load(sc->sc_dmat,
986 dma_maps->dmamap_table,
987 dma_maps->dma_table,
988 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
989 printf("%s:%d: unable to load table DMA map for "
990 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
991 channel, drive, error);
992 return error;
993 }
994 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
995 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
996 DEBUG_PROBE);
997 /* Create a xfer DMA map for this drive */
998 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
999 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1000 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1001 &dma_maps->dmamap_xfer)) != 0) {
1002 printf("%s:%d: unable to create xfer DMA map for "
1003 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1004 channel, drive, error);
1005 return error;
1006 }
1007 return 0;
1008 }
1009
1010 int
1011 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1012 void *v;
1013 int channel, drive;
1014 void *databuf;
1015 size_t datalen;
1016 int flags;
1017 {
1018 struct pciide_softc *sc = v;
1019 int error, seg;
1020 struct pciide_dma_maps *dma_maps =
1021 &sc->pciide_channels[channel].dma_maps[drive];
1022
1023 error = bus_dmamap_load(sc->sc_dmat,
1024 dma_maps->dmamap_xfer,
1025 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1026 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1027 if (error) {
1028 printf("%s:%d: unable to load xfer DMA map for"
1029 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1030 channel, drive, error);
1031 return error;
1032 }
1033
1034 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1035 dma_maps->dmamap_xfer->dm_mapsize,
1036 (flags & WDC_DMA_READ) ?
1037 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1038
1039 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1040 #ifdef DIAGNOSTIC
1041 /* A segment must not cross a 64k boundary */
1042 {
1043 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1044 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1045 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1046 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1047 printf("pciide_dma: segment %d physical addr 0x%lx"
1048 " len 0x%lx not properly aligned\n",
1049 seg, phys, len);
1050 panic("pciide_dma: buf align");
1051 }
1052 }
1053 #endif
1054 dma_maps->dma_table[seg].base_addr =
1055 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1056 dma_maps->dma_table[seg].byte_count =
1057 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1058 IDEDMA_BYTE_COUNT_MASK);
1059 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1060 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1061 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1062
1063 }
1064 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1065 htole32(IDEDMA_BYTE_COUNT_EOT);
1066
1067 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1068 dma_maps->dmamap_table->dm_mapsize,
1069 BUS_DMASYNC_PREWRITE);
1070
1071 /* Maps are ready. Start DMA function */
1072 #ifdef DIAGNOSTIC
1073 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1074 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1075 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1076 panic("pciide_dma_init: table align");
1077 }
1078 #endif
1079
1080 /* Clear status bits */
1081 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1082 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1083 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1084 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1085 /* Write table addr */
1086 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1087 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1088 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1089 /* set read/write */
1090 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1091 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1092 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1093 /* remember flags */
1094 dma_maps->dma_flags = flags;
1095 return 0;
1096 }
1097
1098 void
1099 pciide_dma_start(v, channel, drive)
1100 void *v;
1101 int channel, drive;
1102 {
1103 struct pciide_softc *sc = v;
1104
1105 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1106 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1107 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1108 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1109 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1110 }
1111
1112 int
1113 pciide_dma_finish(v, channel, drive, force)
1114 void *v;
1115 int channel, drive;
1116 int force;
1117 {
1118 struct pciide_softc *sc = v;
1119 u_int8_t status;
1120 int error = 0;
1121 struct pciide_dma_maps *dma_maps =
1122 &sc->pciide_channels[channel].dma_maps[drive];
1123
1124 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1125 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1126 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1127 DEBUG_XFERS);
1128
1129 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1130 return WDC_DMAST_NOIRQ;
1131
1132 /* stop DMA channel */
1133 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1134 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1135 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1136 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1137
1138 /* Unload the map of the data buffer */
1139 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1140 dma_maps->dmamap_xfer->dm_mapsize,
1141 (dma_maps->dma_flags & WDC_DMA_READ) ?
1142 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1143 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1144
1145 if ((status & IDEDMA_CTL_ERR) != 0) {
1146 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1147 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1148 error |= WDC_DMAST_ERR;
1149 }
1150
1151 if ((status & IDEDMA_CTL_INTR) == 0) {
1152 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1153 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1154 drive, status);
1155 error |= WDC_DMAST_NOIRQ;
1156 }
1157
1158 if ((status & IDEDMA_CTL_ACT) != 0) {
1159 /* data underrun, may be a valid condition for ATAPI */
1160 error |= WDC_DMAST_UNDER;
1161 }
1162 return error;
1163 }
1164
1165 void
1166 pciide_irqack(chp)
1167 struct channel_softc *chp;
1168 {
1169 struct pciide_channel *cp = (struct pciide_channel*)chp;
1170 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1171
1172 /* clear status bits in IDE DMA registers */
1173 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1174 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1175 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1176 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1177 }
1178
1179 /* some common code used by several chip_map */
1180 int
1181 pciide_chansetup(sc, channel, interface)
1182 struct pciide_softc *sc;
1183 int channel;
1184 pcireg_t interface;
1185 {
1186 struct pciide_channel *cp = &sc->pciide_channels[channel];
1187 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1188 cp->name = PCIIDE_CHANNEL_NAME(channel);
1189 cp->wdc_channel.channel = channel;
1190 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1191 cp->wdc_channel.ch_queue =
1192 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1193 if (cp->wdc_channel.ch_queue == NULL) {
1194 printf("%s %s channel: "
1195 "can't allocate memory for command queue",
1196 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1197 return 0;
1198 }
1199 printf("%s: %s channel %s to %s mode\n",
1200 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1201 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1202 "configured" : "wired",
1203 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1204 "native-PCI" : "compatibility");
1205 return 1;
1206 }
1207
1208 /* some common code used by several chip channel_map */
1209 void
1210 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1211 struct pci_attach_args *pa;
1212 struct pciide_channel *cp;
1213 pcireg_t interface;
1214 bus_size_t *cmdsizep, *ctlsizep;
1215 int (*pci_intr) __P((void *));
1216 {
1217 struct channel_softc *wdc_cp = &cp->wdc_channel;
1218
1219 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1220 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1221 pci_intr);
1222 else
1223 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1224 wdc_cp->channel, cmdsizep, ctlsizep);
1225
1226 if (cp->hw_ok == 0)
1227 return;
1228 wdc_cp->data32iot = wdc_cp->cmd_iot;
1229 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1230 wdcattach(wdc_cp);
1231 }
1232
1233 /*
1234 * Generic code to call to know if a channel can be disabled. Return 1
1235 * if channel can be disabled, 0 if not
1236 */
1237 int
1238 pciide_chan_candisable(cp)
1239 struct pciide_channel *cp;
1240 {
1241 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1242 struct channel_softc *wdc_cp = &cp->wdc_channel;
1243
1244 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1245 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1246 printf("%s: disabling %s channel (no drives)\n",
1247 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1248 cp->hw_ok = 0;
1249 return 1;
1250 }
1251 return 0;
1252 }
1253
1254 /*
1255 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1256 * Set hw_ok=0 on failure
1257 */
1258 void
1259 pciide_map_compat_intr(pa, cp, compatchan, interface)
1260 struct pci_attach_args *pa;
1261 struct pciide_channel *cp;
1262 int compatchan, interface;
1263 {
1264 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1265 struct channel_softc *wdc_cp = &cp->wdc_channel;
1266
1267 if (cp->hw_ok == 0)
1268 return;
1269 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1270 return;
1271
1272 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1273 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1274 pa, compatchan, pciide_compat_intr, cp);
1275 if (cp->ih == NULL) {
1276 #endif
1277 printf("%s: no compatibility interrupt for use by %s "
1278 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1279 cp->hw_ok = 0;
1280 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1281 }
1282 #endif
1283 }
1284
1285 void
1286 pciide_print_modes(cp)
1287 struct pciide_channel *cp;
1288 {
1289 wdc_print_modes(&cp->wdc_channel);
1290 }
1291
1292 void
1293 default_chip_map(sc, pa)
1294 struct pciide_softc *sc;
1295 struct pci_attach_args *pa;
1296 {
1297 struct pciide_channel *cp;
1298 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1299 pcireg_t csr;
1300 int channel, drive;
1301 struct ata_drive_datas *drvp;
1302 u_int8_t idedma_ctl;
1303 bus_size_t cmdsize, ctlsize;
1304 char *failreason;
1305
1306 if (pciide_chipen(sc, pa) == 0)
1307 return;
1308
1309 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1310 printf("%s: bus-master DMA support present",
1311 sc->sc_wdcdev.sc_dev.dv_xname);
1312 if (sc->sc_pp == &default_product_desc &&
1313 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1314 PCIIDE_OPTIONS_DMA) == 0) {
1315 printf(", but unused (no driver support)");
1316 sc->sc_dma_ok = 0;
1317 } else {
1318 pciide_mapreg_dma(sc, pa);
1319 if (sc->sc_dma_ok != 0)
1320 printf(", used without full driver "
1321 "support");
1322 }
1323 } else {
1324 printf("%s: hardware does not support DMA",
1325 sc->sc_wdcdev.sc_dev.dv_xname);
1326 sc->sc_dma_ok = 0;
1327 }
1328 printf("\n");
1329 if (sc->sc_dma_ok) {
1330 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1331 sc->sc_wdcdev.irqack = pciide_irqack;
1332 }
1333 sc->sc_wdcdev.PIO_cap = 0;
1334 sc->sc_wdcdev.DMA_cap = 0;
1335
1336 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1337 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1338 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1339
1340 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1341 cp = &sc->pciide_channels[channel];
1342 if (pciide_chansetup(sc, channel, interface) == 0)
1343 continue;
1344 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1345 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1346 &ctlsize, pciide_pci_intr);
1347 } else {
1348 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1349 channel, &cmdsize, &ctlsize);
1350 }
1351 if (cp->hw_ok == 0)
1352 continue;
1353 /*
1354 * Check to see if something appears to be there.
1355 */
1356 failreason = NULL;
1357 if (!wdcprobe(&cp->wdc_channel)) {
1358 failreason = "not responding; disabled or no drives?";
1359 goto next;
1360 }
1361 /*
1362 * Now, make sure it's actually attributable to this PCI IDE
1363 * channel by trying to access the channel again while the
1364 * PCI IDE controller's I/O space is disabled. (If the
1365 * channel no longer appears to be there, it belongs to
1366 * this controller.) YUCK!
1367 */
1368 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1369 PCI_COMMAND_STATUS_REG);
1370 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1371 csr & ~PCI_COMMAND_IO_ENABLE);
1372 if (wdcprobe(&cp->wdc_channel))
1373 failreason = "other hardware responding at addresses";
1374 pci_conf_write(sc->sc_pc, sc->sc_tag,
1375 PCI_COMMAND_STATUS_REG, csr);
1376 next:
1377 if (failreason) {
1378 printf("%s: %s channel ignored (%s)\n",
1379 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1380 failreason);
1381 cp->hw_ok = 0;
1382 bus_space_unmap(cp->wdc_channel.cmd_iot,
1383 cp->wdc_channel.cmd_ioh, cmdsize);
1384 bus_space_unmap(cp->wdc_channel.ctl_iot,
1385 cp->wdc_channel.ctl_ioh, ctlsize);
1386 } else {
1387 pciide_map_compat_intr(pa, cp, channel, interface);
1388 }
1389 if (cp->hw_ok) {
1390 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1391 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1392 wdcattach(&cp->wdc_channel);
1393 }
1394 }
1395
1396 if (sc->sc_dma_ok == 0)
1397 return;
1398
1399 /* Allocate DMA maps */
1400 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1401 idedma_ctl = 0;
1402 cp = &sc->pciide_channels[channel];
1403 for (drive = 0; drive < 2; drive++) {
1404 drvp = &cp->wdc_channel.ch_drive[drive];
1405 /* If no drive, skip */
1406 if ((drvp->drive_flags & DRIVE) == 0)
1407 continue;
1408 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1409 continue;
1410 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1411 /* Abort DMA setup */
1412 printf("%s:%d:%d: can't allocate DMA maps, "
1413 "using PIO transfers\n",
1414 sc->sc_wdcdev.sc_dev.dv_xname,
1415 channel, drive);
1416 drvp->drive_flags &= ~DRIVE_DMA;
1417 }
1418 printf("%s:%d:%d: using DMA data transfers\n",
1419 sc->sc_wdcdev.sc_dev.dv_xname,
1420 channel, drive);
1421 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1422 }
1423 if (idedma_ctl != 0) {
1424 /* Add software bits in status register */
1425 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1426 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1427 idedma_ctl);
1428 }
1429 }
1430 }
1431
1432 void
1433 piix_chip_map(sc, pa)
1434 struct pciide_softc *sc;
1435 struct pci_attach_args *pa;
1436 {
1437 struct pciide_channel *cp;
1438 int channel;
1439 u_int32_t idetim;
1440 bus_size_t cmdsize, ctlsize;
1441
1442 if (pciide_chipen(sc, pa) == 0)
1443 return;
1444
1445 printf("%s: bus-master DMA support present",
1446 sc->sc_wdcdev.sc_dev.dv_xname);
1447 pciide_mapreg_dma(sc, pa);
1448 printf("\n");
1449 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1450 WDC_CAPABILITY_MODE;
1451 if (sc->sc_dma_ok) {
1452 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1453 sc->sc_wdcdev.irqack = pciide_irqack;
1454 switch(sc->sc_pp->ide_product) {
1455 case PCI_PRODUCT_INTEL_82371AB_IDE:
1456 case PCI_PRODUCT_INTEL_82440MX_IDE:
1457 case PCI_PRODUCT_INTEL_82801AA_IDE:
1458 case PCI_PRODUCT_INTEL_82801AB_IDE:
1459 case PCI_PRODUCT_INTEL_82801BA_IDE:
1460 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1461 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1462 }
1463 }
1464 sc->sc_wdcdev.PIO_cap = 4;
1465 sc->sc_wdcdev.DMA_cap = 2;
1466 switch(sc->sc_pp->ide_product) {
1467 case PCI_PRODUCT_INTEL_82801AA_IDE:
1468 sc->sc_wdcdev.UDMA_cap = 4;
1469 break;
1470 case PCI_PRODUCT_INTEL_82801BA_IDE:
1471 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1472 sc->sc_wdcdev.UDMA_cap = 5;
1473 break;
1474 default:
1475 sc->sc_wdcdev.UDMA_cap = 2;
1476 }
1477 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1478 sc->sc_wdcdev.set_modes = piix_setup_channel;
1479 else
1480 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1481 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1482 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1483
1484 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1485 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1486 DEBUG_PROBE);
1487 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1488 WDCDEBUG_PRINT((", sidetim=0x%x",
1489 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1490 DEBUG_PROBE);
1491 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1492 WDCDEBUG_PRINT((", udamreg 0x%x",
1493 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1494 DEBUG_PROBE);
1495 }
1496 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1497 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1498 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1499 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1500 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1501 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1502 DEBUG_PROBE);
1503 }
1504
1505 }
1506 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1507
1508 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1509 cp = &sc->pciide_channels[channel];
1510 /* PIIX is compat-only */
1511 if (pciide_chansetup(sc, channel, 0) == 0)
1512 continue;
1513 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1514 if ((PIIX_IDETIM_READ(idetim, channel) &
1515 PIIX_IDETIM_IDE) == 0) {
1516 printf("%s: %s channel ignored (disabled)\n",
1517 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1518 continue;
1519 }
1520 /* PIIX are compat-only pciide devices */
1521 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1522 if (cp->hw_ok == 0)
1523 continue;
1524 if (pciide_chan_candisable(cp)) {
1525 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1526 channel);
1527 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1528 idetim);
1529 }
1530 pciide_map_compat_intr(pa, cp, channel, 0);
1531 if (cp->hw_ok == 0)
1532 continue;
1533 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1534 }
1535
1536 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1537 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1538 DEBUG_PROBE);
1539 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1540 WDCDEBUG_PRINT((", sidetim=0x%x",
1541 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1542 DEBUG_PROBE);
1543 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1544 WDCDEBUG_PRINT((", udamreg 0x%x",
1545 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1546 DEBUG_PROBE);
1547 }
1548 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1549 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1550 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1551 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1552 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1553 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1554 DEBUG_PROBE);
1555 }
1556 }
1557 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1558 }
1559
1560 void
1561 piix_setup_channel(chp)
1562 struct channel_softc *chp;
1563 {
1564 u_int8_t mode[2], drive;
1565 u_int32_t oidetim, idetim, idedma_ctl;
1566 struct pciide_channel *cp = (struct pciide_channel*)chp;
1567 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1568 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1569
1570 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1571 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1572 idedma_ctl = 0;
1573
1574 /* set up new idetim: Enable IDE registers decode */
1575 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1576 chp->channel);
1577
1578 /* setup DMA */
1579 pciide_channel_dma_setup(cp);
1580
1581 /*
1582 * Here we have to mess up with drives mode: PIIX can't have
1583 * different timings for master and slave drives.
1584 * We need to find the best combination.
1585 */
1586
1587 /* If both drives supports DMA, take the lower mode */
1588 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1589 (drvp[1].drive_flags & DRIVE_DMA)) {
1590 mode[0] = mode[1] =
1591 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1592 drvp[0].DMA_mode = mode[0];
1593 drvp[1].DMA_mode = mode[1];
1594 goto ok;
1595 }
1596 /*
1597 * If only one drive supports DMA, use its mode, and
1598 * put the other one in PIO mode 0 if mode not compatible
1599 */
1600 if (drvp[0].drive_flags & DRIVE_DMA) {
1601 mode[0] = drvp[0].DMA_mode;
1602 mode[1] = drvp[1].PIO_mode;
1603 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1604 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1605 mode[1] = drvp[1].PIO_mode = 0;
1606 goto ok;
1607 }
1608 if (drvp[1].drive_flags & DRIVE_DMA) {
1609 mode[1] = drvp[1].DMA_mode;
1610 mode[0] = drvp[0].PIO_mode;
1611 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1612 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1613 mode[0] = drvp[0].PIO_mode = 0;
1614 goto ok;
1615 }
1616 /*
1617 * If both drives are not DMA, takes the lower mode, unless
1618 * one of them is PIO mode < 2
1619 */
1620 if (drvp[0].PIO_mode < 2) {
1621 mode[0] = drvp[0].PIO_mode = 0;
1622 mode[1] = drvp[1].PIO_mode;
1623 } else if (drvp[1].PIO_mode < 2) {
1624 mode[1] = drvp[1].PIO_mode = 0;
1625 mode[0] = drvp[0].PIO_mode;
1626 } else {
1627 mode[0] = mode[1] =
1628 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1629 drvp[0].PIO_mode = mode[0];
1630 drvp[1].PIO_mode = mode[1];
1631 }
1632 ok: /* The modes are setup */
1633 for (drive = 0; drive < 2; drive++) {
1634 if (drvp[drive].drive_flags & DRIVE_DMA) {
1635 idetim |= piix_setup_idetim_timings(
1636 mode[drive], 1, chp->channel);
1637 goto end;
1638 }
1639 }
1640 /* If we are there, none of the drives are DMA */
1641 if (mode[0] >= 2)
1642 idetim |= piix_setup_idetim_timings(
1643 mode[0], 0, chp->channel);
1644 else
1645 idetim |= piix_setup_idetim_timings(
1646 mode[1], 0, chp->channel);
1647 end: /*
1648 * timing mode is now set up in the controller. Enable
1649 * it per-drive
1650 */
1651 for (drive = 0; drive < 2; drive++) {
1652 /* If no drive, skip */
1653 if ((drvp[drive].drive_flags & DRIVE) == 0)
1654 continue;
1655 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1656 if (drvp[drive].drive_flags & DRIVE_DMA)
1657 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1658 }
1659 if (idedma_ctl != 0) {
1660 /* Add software bits in status register */
1661 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1662 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1663 idedma_ctl);
1664 }
1665 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1666 pciide_print_modes(cp);
1667 }
1668
1669 void
1670 piix3_4_setup_channel(chp)
1671 struct channel_softc *chp;
1672 {
1673 struct ata_drive_datas *drvp;
1674 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1675 struct pciide_channel *cp = (struct pciide_channel*)chp;
1676 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1677 int drive;
1678 int channel = chp->channel;
1679
1680 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1681 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1682 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1683 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1684 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1685 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1686 PIIX_SIDETIM_RTC_MASK(channel));
1687
1688 idedma_ctl = 0;
1689 /* If channel disabled, no need to go further */
1690 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1691 return;
1692 /* set up new idetim: Enable IDE registers decode */
1693 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1694
1695 /* setup DMA if needed */
1696 pciide_channel_dma_setup(cp);
1697
1698 for (drive = 0; drive < 2; drive++) {
1699 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1700 PIIX_UDMATIM_SET(0x3, channel, drive));
1701 drvp = &chp->ch_drive[drive];
1702 /* If no drive, skip */
1703 if ((drvp->drive_flags & DRIVE) == 0)
1704 continue;
1705 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1706 (drvp->drive_flags & DRIVE_UDMA) == 0))
1707 goto pio;
1708
1709 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1710 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1711 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1712 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1713 ideconf |= PIIX_CONFIG_PINGPONG;
1714 }
1715 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1716 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1717 /* setup Ultra/100 */
1718 if (drvp->UDMA_mode > 2 &&
1719 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1720 drvp->UDMA_mode = 2;
1721 if (drvp->UDMA_mode > 4) {
1722 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1723 } else {
1724 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1725 if (drvp->UDMA_mode > 2) {
1726 ideconf |= PIIX_CONFIG_UDMA66(channel,
1727 drive);
1728 } else {
1729 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1730 drive);
1731 }
1732 }
1733 }
1734 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1735 /* setup Ultra/66 */
1736 if (drvp->UDMA_mode > 2 &&
1737 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1738 drvp->UDMA_mode = 2;
1739 if (drvp->UDMA_mode > 2)
1740 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1741 else
1742 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1743 }
1744 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1745 (drvp->drive_flags & DRIVE_UDMA)) {
1746 /* use Ultra/DMA */
1747 drvp->drive_flags &= ~DRIVE_DMA;
1748 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1749 udmareg |= PIIX_UDMATIM_SET(
1750 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1751 } else {
1752 /* use Multiword DMA */
1753 drvp->drive_flags &= ~DRIVE_UDMA;
1754 if (drive == 0) {
1755 idetim |= piix_setup_idetim_timings(
1756 drvp->DMA_mode, 1, channel);
1757 } else {
1758 sidetim |= piix_setup_sidetim_timings(
1759 drvp->DMA_mode, 1, channel);
1760 idetim =PIIX_IDETIM_SET(idetim,
1761 PIIX_IDETIM_SITRE, channel);
1762 }
1763 }
1764 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1765
1766 pio: /* use PIO mode */
1767 idetim |= piix_setup_idetim_drvs(drvp);
1768 if (drive == 0) {
1769 idetim |= piix_setup_idetim_timings(
1770 drvp->PIO_mode, 0, channel);
1771 } else {
1772 sidetim |= piix_setup_sidetim_timings(
1773 drvp->PIO_mode, 0, channel);
1774 idetim =PIIX_IDETIM_SET(idetim,
1775 PIIX_IDETIM_SITRE, channel);
1776 }
1777 }
1778 if (idedma_ctl != 0) {
1779 /* Add software bits in status register */
1780 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1781 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1782 idedma_ctl);
1783 }
1784 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1785 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1786 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1787 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1788 pciide_print_modes(cp);
1789 }
1790
1791
1792 /* setup ISP and RTC fields, based on mode */
1793 static u_int32_t
1794 piix_setup_idetim_timings(mode, dma, channel)
1795 u_int8_t mode;
1796 u_int8_t dma;
1797 u_int8_t channel;
1798 {
1799
1800 if (dma)
1801 return PIIX_IDETIM_SET(0,
1802 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1803 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1804 channel);
1805 else
1806 return PIIX_IDETIM_SET(0,
1807 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1808 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1809 channel);
1810 }
1811
1812 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1813 static u_int32_t
1814 piix_setup_idetim_drvs(drvp)
1815 struct ata_drive_datas *drvp;
1816 {
1817 u_int32_t ret = 0;
1818 struct channel_softc *chp = drvp->chnl_softc;
1819 u_int8_t channel = chp->channel;
1820 u_int8_t drive = drvp->drive;
1821
1822 /*
1823 * If drive is using UDMA, timings setups are independant
1824 * So just check DMA and PIO here.
1825 */
1826 if (drvp->drive_flags & DRIVE_DMA) {
1827 /* if mode = DMA mode 0, use compatible timings */
1828 if ((drvp->drive_flags & DRIVE_DMA) &&
1829 drvp->DMA_mode == 0) {
1830 drvp->PIO_mode = 0;
1831 return ret;
1832 }
1833 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1834 /*
1835 * PIO and DMA timings are the same, use fast timings for PIO
1836 * too, else use compat timings.
1837 */
1838 if ((piix_isp_pio[drvp->PIO_mode] !=
1839 piix_isp_dma[drvp->DMA_mode]) ||
1840 (piix_rtc_pio[drvp->PIO_mode] !=
1841 piix_rtc_dma[drvp->DMA_mode]))
1842 drvp->PIO_mode = 0;
1843 /* if PIO mode <= 2, use compat timings for PIO */
1844 if (drvp->PIO_mode <= 2) {
1845 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1846 channel);
1847 return ret;
1848 }
1849 }
1850
1851 /*
1852 * Now setup PIO modes. If mode < 2, use compat timings.
1853 * Else enable fast timings. Enable IORDY and prefetch/post
1854 * if PIO mode >= 3.
1855 */
1856
1857 if (drvp->PIO_mode < 2)
1858 return ret;
1859
1860 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1861 if (drvp->PIO_mode >= 3) {
1862 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1863 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1864 }
1865 return ret;
1866 }
1867
1868 /* setup values in SIDETIM registers, based on mode */
1869 static u_int32_t
1870 piix_setup_sidetim_timings(mode, dma, channel)
1871 u_int8_t mode;
1872 u_int8_t dma;
1873 u_int8_t channel;
1874 {
1875 if (dma)
1876 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1877 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1878 else
1879 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1880 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1881 }
1882
1883 void
1884 amd7x6_chip_map(sc, pa)
1885 struct pciide_softc *sc;
1886 struct pci_attach_args *pa;
1887 {
1888 struct pciide_channel *cp;
1889 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1890 int channel;
1891 pcireg_t chanenable;
1892 bus_size_t cmdsize, ctlsize;
1893
1894 if (pciide_chipen(sc, pa) == 0)
1895 return;
1896 printf("%s: bus-master DMA support present",
1897 sc->sc_wdcdev.sc_dev.dv_xname);
1898 pciide_mapreg_dma(sc, pa);
1899 printf("\n");
1900 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1901 WDC_CAPABILITY_MODE;
1902 if (sc->sc_dma_ok) {
1903 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1904 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1905 sc->sc_wdcdev.irqack = pciide_irqack;
1906 }
1907 sc->sc_wdcdev.PIO_cap = 4;
1908 sc->sc_wdcdev.DMA_cap = 2;
1909
1910 if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE)
1911 sc->sc_wdcdev.UDMA_cap = 5;
1912 else
1913 sc->sc_wdcdev.UDMA_cap = 4;
1914 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1915 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1916 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1917 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1918
1919 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1920 DEBUG_PROBE);
1921 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1922 cp = &sc->pciide_channels[channel];
1923 if (pciide_chansetup(sc, channel, interface) == 0)
1924 continue;
1925
1926 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1927 printf("%s: %s channel ignored (disabled)\n",
1928 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1929 continue;
1930 }
1931 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1932 pciide_pci_intr);
1933
1934 if (pciide_chan_candisable(cp))
1935 chanenable &= ~AMD7X6_CHAN_EN(channel);
1936 pciide_map_compat_intr(pa, cp, channel, interface);
1937 if (cp->hw_ok == 0)
1938 continue;
1939
1940 amd7x6_setup_channel(&cp->wdc_channel);
1941 }
1942 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1943 chanenable);
1944 return;
1945 }
1946
1947 void
1948 amd7x6_setup_channel(chp)
1949 struct channel_softc *chp;
1950 {
1951 u_int32_t udmatim_reg, datatim_reg;
1952 u_int8_t idedma_ctl;
1953 int mode, drive;
1954 struct ata_drive_datas *drvp;
1955 struct pciide_channel *cp = (struct pciide_channel*)chp;
1956 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1957 #ifndef PCIIDE_AMD756_ENABLEDMA
1958 int rev = PCI_REVISION(
1959 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1960 #endif
1961
1962 idedma_ctl = 0;
1963 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
1964 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
1965 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
1966 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
1967
1968 /* setup DMA if needed */
1969 pciide_channel_dma_setup(cp);
1970
1971 for (drive = 0; drive < 2; drive++) {
1972 drvp = &chp->ch_drive[drive];
1973 /* If no drive, skip */
1974 if ((drvp->drive_flags & DRIVE) == 0)
1975 continue;
1976 /* add timing values, setup DMA if needed */
1977 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1978 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1979 mode = drvp->PIO_mode;
1980 goto pio;
1981 }
1982 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1983 (drvp->drive_flags & DRIVE_UDMA)) {
1984 /* use Ultra/DMA */
1985 drvp->drive_flags &= ~DRIVE_DMA;
1986 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
1987 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
1988 AMD7X6_UDMA_TIME(chp->channel, drive,
1989 amd7x6_udma_tim[drvp->UDMA_mode]);
1990 /* can use PIO timings, MW DMA unused */
1991 mode = drvp->PIO_mode;
1992 } else {
1993 /* use Multiword DMA, but only if revision is OK */
1994 drvp->drive_flags &= ~DRIVE_UDMA;
1995 #ifndef PCIIDE_AMD756_ENABLEDMA
1996 /*
1997 * The workaround doesn't seem to be necessary
1998 * with all drives, so it can be disabled by
1999 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2000 * triggered.
2001 */
2002 if (sc->sc_pp->ide_product ==
2003 PCI_PRODUCT_AMD_PBC756_IDE &&
2004 AMD756_CHIPREV_DISABLEDMA(rev)) {
2005 printf("%s:%d:%d: multi-word DMA disabled due "
2006 "to chip revision\n",
2007 sc->sc_wdcdev.sc_dev.dv_xname,
2008 chp->channel, drive);
2009 mode = drvp->PIO_mode;
2010 drvp->drive_flags &= ~DRIVE_DMA;
2011 goto pio;
2012 }
2013 #endif
2014 /* mode = min(pio, dma+2) */
2015 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2016 mode = drvp->PIO_mode;
2017 else
2018 mode = drvp->DMA_mode + 2;
2019 }
2020 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2021
2022 pio: /* setup PIO mode */
2023 if (mode <= 2) {
2024 drvp->DMA_mode = 0;
2025 drvp->PIO_mode = 0;
2026 mode = 0;
2027 } else {
2028 drvp->PIO_mode = mode;
2029 drvp->DMA_mode = mode - 2;
2030 }
2031 datatim_reg |=
2032 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2033 amd7x6_pio_set[mode]) |
2034 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2035 amd7x6_pio_rec[mode]);
2036 }
2037 if (idedma_ctl != 0) {
2038 /* Add software bits in status register */
2039 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2040 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2041 idedma_ctl);
2042 }
2043 pciide_print_modes(cp);
2044 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2045 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2046 }
2047
2048 void
2049 apollo_chip_map(sc, pa)
2050 struct pciide_softc *sc;
2051 struct pci_attach_args *pa;
2052 {
2053 struct pciide_channel *cp;
2054 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2055 int channel;
2056 u_int32_t ideconf;
2057 bus_size_t cmdsize, ctlsize;
2058 pcitag_t pcib_tag;
2059 pcireg_t pcib_id, pcib_class;
2060
2061 if (pciide_chipen(sc, pa) == 0)
2062 return;
2063 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2064 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2065 /* and read ID and rev of the ISA bridge */
2066 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2067 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2068 printf(": VIA Technologies ");
2069 switch (PCI_PRODUCT(pcib_id)) {
2070 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2071 printf("VT82C586 (Apollo VP) ");
2072 if(PCI_REVISION(pcib_class) >= 0x02) {
2073 printf("ATA33 controller\n");
2074 sc->sc_wdcdev.UDMA_cap = 2;
2075 } else {
2076 printf("controller\n");
2077 sc->sc_wdcdev.UDMA_cap = 0;
2078 }
2079 break;
2080 case PCI_PRODUCT_VIATECH_VT82C596A:
2081 printf("VT82C596A (Apollo Pro) ");
2082 if (PCI_REVISION(pcib_class) >= 0x12) {
2083 printf("ATA66 controller\n");
2084 sc->sc_wdcdev.UDMA_cap = 4;
2085 } else {
2086 printf("ATA33 controller\n");
2087 sc->sc_wdcdev.UDMA_cap = 2;
2088 }
2089 break;
2090 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2091 printf("VT82C686A (Apollo KX133) ");
2092 if (PCI_REVISION(pcib_class) >= 0x40) {
2093 printf("ATA100 controller\n");
2094 sc->sc_wdcdev.UDMA_cap = 5;
2095 } else {
2096 printf("ATA66 controller\n");
2097 sc->sc_wdcdev.UDMA_cap = 4;
2098 }
2099 break;
2100 case PCI_PRODUCT_VIATECH_VT8233:
2101 printf("VT8233 ATA100 controller\n");
2102 sc->sc_wdcdev.UDMA_cap = 5;
2103 break;
2104 default:
2105 printf("unknown ATA controller\n");
2106 sc->sc_wdcdev.UDMA_cap = 0;
2107 }
2108
2109 printf("%s: bus-master DMA support present",
2110 sc->sc_wdcdev.sc_dev.dv_xname);
2111 pciide_mapreg_dma(sc, pa);
2112 printf("\n");
2113 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2114 WDC_CAPABILITY_MODE;
2115 if (sc->sc_dma_ok) {
2116 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2117 sc->sc_wdcdev.irqack = pciide_irqack;
2118 if (sc->sc_wdcdev.UDMA_cap > 0)
2119 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2120 }
2121 sc->sc_wdcdev.PIO_cap = 4;
2122 sc->sc_wdcdev.DMA_cap = 2;
2123 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2124 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2125 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2126
2127 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2128 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2129 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2130 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2131 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2132 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2133 DEBUG_PROBE);
2134
2135 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2136 cp = &sc->pciide_channels[channel];
2137 if (pciide_chansetup(sc, channel, interface) == 0)
2138 continue;
2139
2140 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2141 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2142 printf("%s: %s channel ignored (disabled)\n",
2143 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2144 continue;
2145 }
2146 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2147 pciide_pci_intr);
2148 if (cp->hw_ok == 0)
2149 continue;
2150 if (pciide_chan_candisable(cp)) {
2151 ideconf &= ~APO_IDECONF_EN(channel);
2152 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2153 ideconf);
2154 }
2155 pciide_map_compat_intr(pa, cp, channel, interface);
2156
2157 if (cp->hw_ok == 0)
2158 continue;
2159 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2160 }
2161 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2162 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2163 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2164 }
2165
2166 void
2167 apollo_setup_channel(chp)
2168 struct channel_softc *chp;
2169 {
2170 u_int32_t udmatim_reg, datatim_reg;
2171 u_int8_t idedma_ctl;
2172 int mode, drive;
2173 struct ata_drive_datas *drvp;
2174 struct pciide_channel *cp = (struct pciide_channel*)chp;
2175 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2176
2177 idedma_ctl = 0;
2178 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2179 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2180 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2181 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2182
2183 /* setup DMA if needed */
2184 pciide_channel_dma_setup(cp);
2185
2186 for (drive = 0; drive < 2; drive++) {
2187 drvp = &chp->ch_drive[drive];
2188 /* If no drive, skip */
2189 if ((drvp->drive_flags & DRIVE) == 0)
2190 continue;
2191 /* add timing values, setup DMA if needed */
2192 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2193 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2194 mode = drvp->PIO_mode;
2195 goto pio;
2196 }
2197 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2198 (drvp->drive_flags & DRIVE_UDMA)) {
2199 /* use Ultra/DMA */
2200 drvp->drive_flags &= ~DRIVE_DMA;
2201 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2202 APO_UDMA_EN_MTH(chp->channel, drive);
2203 if (sc->sc_wdcdev.UDMA_cap == 5) {
2204 /* 686b */
2205 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2206 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2207 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2208 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2209 /* 596b or 686a */
2210 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2211 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2212 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2213 } else {
2214 /* 596a or 586b */
2215 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2216 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2217 }
2218 /* can use PIO timings, MW DMA unused */
2219 mode = drvp->PIO_mode;
2220 } else {
2221 /* use Multiword DMA */
2222 drvp->drive_flags &= ~DRIVE_UDMA;
2223 /* mode = min(pio, dma+2) */
2224 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2225 mode = drvp->PIO_mode;
2226 else
2227 mode = drvp->DMA_mode + 2;
2228 }
2229 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2230
2231 pio: /* setup PIO mode */
2232 if (mode <= 2) {
2233 drvp->DMA_mode = 0;
2234 drvp->PIO_mode = 0;
2235 mode = 0;
2236 } else {
2237 drvp->PIO_mode = mode;
2238 drvp->DMA_mode = mode - 2;
2239 }
2240 datatim_reg |=
2241 APO_DATATIM_PULSE(chp->channel, drive,
2242 apollo_pio_set[mode]) |
2243 APO_DATATIM_RECOV(chp->channel, drive,
2244 apollo_pio_rec[mode]);
2245 }
2246 if (idedma_ctl != 0) {
2247 /* Add software bits in status register */
2248 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2249 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2250 idedma_ctl);
2251 }
2252 pciide_print_modes(cp);
2253 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2254 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2255 }
2256
2257 void
2258 cmd_channel_map(pa, sc, channel)
2259 struct pci_attach_args *pa;
2260 struct pciide_softc *sc;
2261 int channel;
2262 {
2263 struct pciide_channel *cp = &sc->pciide_channels[channel];
2264 bus_size_t cmdsize, ctlsize;
2265 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2266 int interface;
2267
2268 /*
2269 * The 0648/0649 can be told to identify as a RAID controller.
2270 * In this case, we have to fake interface
2271 */
2272 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2273 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2274 PCIIDE_INTERFACE_SETTABLE(1);
2275 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2276 CMD_CONF_DSA1)
2277 interface |= PCIIDE_INTERFACE_PCI(0) |
2278 PCIIDE_INTERFACE_PCI(1);
2279 } else {
2280 interface = PCI_INTERFACE(pa->pa_class);
2281 }
2282
2283 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2284 cp->name = PCIIDE_CHANNEL_NAME(channel);
2285 cp->wdc_channel.channel = channel;
2286 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2287
2288 if (channel > 0) {
2289 cp->wdc_channel.ch_queue =
2290 sc->pciide_channels[0].wdc_channel.ch_queue;
2291 } else {
2292 cp->wdc_channel.ch_queue =
2293 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2294 }
2295 if (cp->wdc_channel.ch_queue == NULL) {
2296 printf("%s %s channel: "
2297 "can't allocate memory for command queue",
2298 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2299 return;
2300 }
2301
2302 printf("%s: %s channel %s to %s mode\n",
2303 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2304 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2305 "configured" : "wired",
2306 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2307 "native-PCI" : "compatibility");
2308
2309 /*
2310 * with a CMD PCI64x, if we get here, the first channel is enabled:
2311 * there's no way to disable the first channel without disabling
2312 * the whole device
2313 */
2314 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2315 printf("%s: %s channel ignored (disabled)\n",
2316 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2317 return;
2318 }
2319
2320 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2321 if (cp->hw_ok == 0)
2322 return;
2323 if (channel == 1) {
2324 if (pciide_chan_candisable(cp)) {
2325 ctrl &= ~CMD_CTRL_2PORT;
2326 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2327 CMD_CTRL, ctrl);
2328 }
2329 }
2330 pciide_map_compat_intr(pa, cp, channel, interface);
2331 }
2332
2333 int
2334 cmd_pci_intr(arg)
2335 void *arg;
2336 {
2337 struct pciide_softc *sc = arg;
2338 struct pciide_channel *cp;
2339 struct channel_softc *wdc_cp;
2340 int i, rv, crv;
2341 u_int32_t priirq, secirq;
2342
2343 rv = 0;
2344 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2345 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2346 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2347 cp = &sc->pciide_channels[i];
2348 wdc_cp = &cp->wdc_channel;
2349 /* If a compat channel skip. */
2350 if (cp->compat)
2351 continue;
2352 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2353 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2354 crv = wdcintr(wdc_cp);
2355 if (crv == 0)
2356 printf("%s:%d: bogus intr\n",
2357 sc->sc_wdcdev.sc_dev.dv_xname, i);
2358 else
2359 rv = 1;
2360 }
2361 }
2362 return rv;
2363 }
2364
2365 void
2366 cmd_chip_map(sc, pa)
2367 struct pciide_softc *sc;
2368 struct pci_attach_args *pa;
2369 {
2370 int channel;
2371
2372 /*
2373 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2374 * and base adresses registers can be disabled at
2375 * hardware level. In this case, the device is wired
2376 * in compat mode and its first channel is always enabled,
2377 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2378 * In fact, it seems that the first channel of the CMD PCI0640
2379 * can't be disabled.
2380 */
2381
2382 #ifdef PCIIDE_CMD064x_DISABLE
2383 if (pciide_chipen(sc, pa) == 0)
2384 return;
2385 #endif
2386
2387 printf("%s: hardware does not support DMA\n",
2388 sc->sc_wdcdev.sc_dev.dv_xname);
2389 sc->sc_dma_ok = 0;
2390
2391 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2392 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2393 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2394
2395 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2396 cmd_channel_map(pa, sc, channel);
2397 }
2398 }
2399
2400 void
2401 cmd0643_9_chip_map(sc, pa)
2402 struct pciide_softc *sc;
2403 struct pci_attach_args *pa;
2404 {
2405 struct pciide_channel *cp;
2406 int channel;
2407 int rev = PCI_REVISION(
2408 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2409
2410 /*
2411 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2412 * and base adresses registers can be disabled at
2413 * hardware level. In this case, the device is wired
2414 * in compat mode and its first channel is always enabled,
2415 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2416 * In fact, it seems that the first channel of the CMD PCI0640
2417 * can't be disabled.
2418 */
2419
2420 #ifdef PCIIDE_CMD064x_DISABLE
2421 if (pciide_chipen(sc, pa) == 0)
2422 return;
2423 #endif
2424 printf("%s: bus-master DMA support present",
2425 sc->sc_wdcdev.sc_dev.dv_xname);
2426 pciide_mapreg_dma(sc, pa);
2427 printf("\n");
2428 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2429 WDC_CAPABILITY_MODE;
2430 if (sc->sc_dma_ok) {
2431 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2432 switch (sc->sc_pp->ide_product) {
2433 case PCI_PRODUCT_CMDTECH_649:
2434 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2435 sc->sc_wdcdev.UDMA_cap = 5;
2436 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2437 break;
2438 case PCI_PRODUCT_CMDTECH_648:
2439 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2440 sc->sc_wdcdev.UDMA_cap = 4;
2441 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2442 break;
2443 case PCI_PRODUCT_CMDTECH_646:
2444 if (rev >= CMD0646U2_REV) {
2445 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2446 sc->sc_wdcdev.UDMA_cap = 2;
2447 } else if (rev >= CMD0646U_REV) {
2448 /*
2449 * Linux's driver claims that the 646U is broken
2450 * with UDMA. Only enable it if we know what we're
2451 * doing
2452 */
2453 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2454 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2455 sc->sc_wdcdev.UDMA_cap = 2;
2456 #endif
2457 /* explicitely disable UDMA */
2458 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2459 CMD_UDMATIM(0), 0);
2460 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2461 CMD_UDMATIM(1), 0);
2462 }
2463 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2464 break;
2465 default:
2466 sc->sc_wdcdev.irqack = pciide_irqack;
2467 }
2468 }
2469
2470 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2471 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2472 sc->sc_wdcdev.PIO_cap = 4;
2473 sc->sc_wdcdev.DMA_cap = 2;
2474 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2475
2476 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2477 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2478 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2479 DEBUG_PROBE);
2480
2481 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2482 cp = &sc->pciide_channels[channel];
2483 cmd_channel_map(pa, sc, channel);
2484 if (cp->hw_ok == 0)
2485 continue;
2486 cmd0643_9_setup_channel(&cp->wdc_channel);
2487 }
2488 /*
2489 * note - this also makes sure we clear the irq disable and reset
2490 * bits
2491 */
2492 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2493 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2494 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2495 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2496 DEBUG_PROBE);
2497 }
2498
2499 void
2500 cmd0643_9_setup_channel(chp)
2501 struct channel_softc *chp;
2502 {
2503 struct ata_drive_datas *drvp;
2504 u_int8_t tim;
2505 u_int32_t idedma_ctl, udma_reg;
2506 int drive;
2507 struct pciide_channel *cp = (struct pciide_channel*)chp;
2508 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2509
2510 idedma_ctl = 0;
2511 /* setup DMA if needed */
2512 pciide_channel_dma_setup(cp);
2513
2514 for (drive = 0; drive < 2; drive++) {
2515 drvp = &chp->ch_drive[drive];
2516 /* If no drive, skip */
2517 if ((drvp->drive_flags & DRIVE) == 0)
2518 continue;
2519 /* add timing values, setup DMA if needed */
2520 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2521 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2522 if (drvp->drive_flags & DRIVE_UDMA) {
2523 /* UltraDMA on a 646U2, 0648 or 0649 */
2524 drvp->drive_flags &= ~DRIVE_DMA;
2525 udma_reg = pciide_pci_read(sc->sc_pc,
2526 sc->sc_tag, CMD_UDMATIM(chp->channel));
2527 if (drvp->UDMA_mode > 2 &&
2528 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2529 CMD_BICSR) &
2530 CMD_BICSR_80(chp->channel)) == 0)
2531 drvp->UDMA_mode = 2;
2532 if (drvp->UDMA_mode > 2)
2533 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2534 else if (sc->sc_wdcdev.UDMA_cap > 2)
2535 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2536 udma_reg |= CMD_UDMATIM_UDMA(drive);
2537 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2538 CMD_UDMATIM_TIM_OFF(drive));
2539 udma_reg |=
2540 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2541 CMD_UDMATIM_TIM_OFF(drive));
2542 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2543 CMD_UDMATIM(chp->channel), udma_reg);
2544 } else {
2545 /*
2546 * use Multiword DMA.
2547 * Timings will be used for both PIO and DMA,
2548 * so adjust DMA mode if needed
2549 * if we have a 0646U2/8/9, turn off UDMA
2550 */
2551 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2552 udma_reg = pciide_pci_read(sc->sc_pc,
2553 sc->sc_tag,
2554 CMD_UDMATIM(chp->channel));
2555 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2556 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2557 CMD_UDMATIM(chp->channel),
2558 udma_reg);
2559 }
2560 if (drvp->PIO_mode >= 3 &&
2561 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2562 drvp->DMA_mode = drvp->PIO_mode - 2;
2563 }
2564 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2565 }
2566 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2567 }
2568 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2569 CMD_DATA_TIM(chp->channel, drive), tim);
2570 }
2571 if (idedma_ctl != 0) {
2572 /* Add software bits in status register */
2573 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2574 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2575 idedma_ctl);
2576 }
2577 pciide_print_modes(cp);
2578 }
2579
2580 void
2581 cmd646_9_irqack(chp)
2582 struct channel_softc *chp;
2583 {
2584 u_int32_t priirq, secirq;
2585 struct pciide_channel *cp = (struct pciide_channel*)chp;
2586 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2587
2588 if (chp->channel == 0) {
2589 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2590 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2591 } else {
2592 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2593 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2594 }
2595 pciide_irqack(chp);
2596 }
2597
2598 void
2599 cy693_chip_map(sc, pa)
2600 struct pciide_softc *sc;
2601 struct pci_attach_args *pa;
2602 {
2603 struct pciide_channel *cp;
2604 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2605 bus_size_t cmdsize, ctlsize;
2606
2607 if (pciide_chipen(sc, pa) == 0)
2608 return;
2609 /*
2610 * this chip has 2 PCI IDE functions, one for primary and one for
2611 * secondary. So we need to call pciide_mapregs_compat() with
2612 * the real channel
2613 */
2614 if (pa->pa_function == 1) {
2615 sc->sc_cy_compatchan = 0;
2616 } else if (pa->pa_function == 2) {
2617 sc->sc_cy_compatchan = 1;
2618 } else {
2619 printf("%s: unexpected PCI function %d\n",
2620 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2621 return;
2622 }
2623 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2624 printf("%s: bus-master DMA support present",
2625 sc->sc_wdcdev.sc_dev.dv_xname);
2626 pciide_mapreg_dma(sc, pa);
2627 } else {
2628 printf("%s: hardware does not support DMA",
2629 sc->sc_wdcdev.sc_dev.dv_xname);
2630 sc->sc_dma_ok = 0;
2631 }
2632 printf("\n");
2633
2634 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2635 if (sc->sc_cy_handle == NULL) {
2636 printf("%s: unable to map hyperCache control registers\n",
2637 sc->sc_wdcdev.sc_dev.dv_xname);
2638 sc->sc_dma_ok = 0;
2639 }
2640
2641 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2642 WDC_CAPABILITY_MODE;
2643 if (sc->sc_dma_ok) {
2644 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2645 sc->sc_wdcdev.irqack = pciide_irqack;
2646 }
2647 sc->sc_wdcdev.PIO_cap = 4;
2648 sc->sc_wdcdev.DMA_cap = 2;
2649 sc->sc_wdcdev.set_modes = cy693_setup_channel;
2650
2651 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2652 sc->sc_wdcdev.nchannels = 1;
2653
2654 /* Only one channel for this chip; if we are here it's enabled */
2655 cp = &sc->pciide_channels[0];
2656 sc->wdc_chanarray[0] = &cp->wdc_channel;
2657 cp->name = PCIIDE_CHANNEL_NAME(0);
2658 cp->wdc_channel.channel = 0;
2659 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2660 cp->wdc_channel.ch_queue =
2661 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2662 if (cp->wdc_channel.ch_queue == NULL) {
2663 printf("%s primary channel: "
2664 "can't allocate memory for command queue",
2665 sc->sc_wdcdev.sc_dev.dv_xname);
2666 return;
2667 }
2668 printf("%s: primary channel %s to ",
2669 sc->sc_wdcdev.sc_dev.dv_xname,
2670 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2671 "configured" : "wired");
2672 if (interface & PCIIDE_INTERFACE_PCI(0)) {
2673 printf("native-PCI");
2674 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2675 pciide_pci_intr);
2676 } else {
2677 printf("compatibility");
2678 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2679 &cmdsize, &ctlsize);
2680 }
2681 printf(" mode\n");
2682 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2683 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2684 wdcattach(&cp->wdc_channel);
2685 if (pciide_chan_candisable(cp)) {
2686 pci_conf_write(sc->sc_pc, sc->sc_tag,
2687 PCI_COMMAND_STATUS_REG, 0);
2688 }
2689 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2690 if (cp->hw_ok == 0)
2691 return;
2692 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2693 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2694 cy693_setup_channel(&cp->wdc_channel);
2695 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2696 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2697 }
2698
2699 void
2700 cy693_setup_channel(chp)
2701 struct channel_softc *chp;
2702 {
2703 struct ata_drive_datas *drvp;
2704 int drive;
2705 u_int32_t cy_cmd_ctrl;
2706 u_int32_t idedma_ctl;
2707 struct pciide_channel *cp = (struct pciide_channel*)chp;
2708 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2709 int dma_mode = -1;
2710
2711 cy_cmd_ctrl = idedma_ctl = 0;
2712
2713 /* setup DMA if needed */
2714 pciide_channel_dma_setup(cp);
2715
2716 for (drive = 0; drive < 2; drive++) {
2717 drvp = &chp->ch_drive[drive];
2718 /* If no drive, skip */
2719 if ((drvp->drive_flags & DRIVE) == 0)
2720 continue;
2721 /* add timing values, setup DMA if needed */
2722 if (drvp->drive_flags & DRIVE_DMA) {
2723 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2724 /* use Multiword DMA */
2725 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2726 dma_mode = drvp->DMA_mode;
2727 }
2728 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2729 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2730 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2731 CY_CMD_CTRL_IOW_REC_OFF(drive));
2732 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2733 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2734 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2735 CY_CMD_CTRL_IOR_REC_OFF(drive));
2736 }
2737 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2738 chp->ch_drive[0].DMA_mode = dma_mode;
2739 chp->ch_drive[1].DMA_mode = dma_mode;
2740
2741 if (dma_mode == -1)
2742 dma_mode = 0;
2743
2744 if (sc->sc_cy_handle != NULL) {
2745 /* Note: `multiple' is implied. */
2746 cy82c693_write(sc->sc_cy_handle,
2747 (sc->sc_cy_compatchan == 0) ?
2748 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2749 }
2750
2751 pciide_print_modes(cp);
2752
2753 if (idedma_ctl != 0) {
2754 /* Add software bits in status register */
2755 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2756 IDEDMA_CTL, idedma_ctl);
2757 }
2758 }
2759
2760 static int
2761 sis_hostbr_match(pa)
2762 struct pci_attach_args *pa;
2763 {
2764 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2765 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2766 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2767 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2768 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2769 }
2770
2771 void
2772 sis_chip_map(sc, pa)
2773 struct pciide_softc *sc;
2774 struct pci_attach_args *pa;
2775 {
2776 struct pciide_channel *cp;
2777 int channel;
2778 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2779 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2780 pcireg_t rev = PCI_REVISION(pa->pa_class);
2781 bus_size_t cmdsize, ctlsize;
2782 pcitag_t pchb_tag;
2783 pcireg_t pchb_id, pchb_class;
2784
2785 if (pciide_chipen(sc, pa) == 0)
2786 return;
2787 printf("%s: bus-master DMA support present",
2788 sc->sc_wdcdev.sc_dev.dv_xname);
2789 pciide_mapreg_dma(sc, pa);
2790 printf("\n");
2791
2792 /* get a PCI tag for the host bridge (function 0 of the same device) */
2793 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2794 /* and read ID and rev of the ISA bridge */
2795 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2796 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2797
2798 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2799 WDC_CAPABILITY_MODE;
2800 if (sc->sc_dma_ok) {
2801 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2802 sc->sc_wdcdev.irqack = pciide_irqack;
2803 /*
2804 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2805 * have problems with UDMA (info provided by Christos)
2806 */
2807 if (rev >= 0xd0 &&
2808 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2809 PCI_REVISION(pchb_class) >= 0x03))
2810 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2811 }
2812
2813 sc->sc_wdcdev.PIO_cap = 4;
2814 sc->sc_wdcdev.DMA_cap = 2;
2815 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2816 /*
2817 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2818 * chipsets.
2819 */
2820 sc->sc_wdcdev.UDMA_cap =
2821 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2822 sc->sc_wdcdev.set_modes = sis_setup_channel;
2823
2824 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2825 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2826
2827 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2828 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2829 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2830
2831 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2832 cp = &sc->pciide_channels[channel];
2833 if (pciide_chansetup(sc, channel, interface) == 0)
2834 continue;
2835 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2836 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2837 printf("%s: %s channel ignored (disabled)\n",
2838 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2839 continue;
2840 }
2841 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2842 pciide_pci_intr);
2843 if (cp->hw_ok == 0)
2844 continue;
2845 if (pciide_chan_candisable(cp)) {
2846 if (channel == 0)
2847 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2848 else
2849 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2850 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2851 sis_ctr0);
2852 }
2853 pciide_map_compat_intr(pa, cp, channel, interface);
2854 if (cp->hw_ok == 0)
2855 continue;
2856 sis_setup_channel(&cp->wdc_channel);
2857 }
2858 }
2859
2860 void
2861 sis_setup_channel(chp)
2862 struct channel_softc *chp;
2863 {
2864 struct ata_drive_datas *drvp;
2865 int drive;
2866 u_int32_t sis_tim;
2867 u_int32_t idedma_ctl;
2868 struct pciide_channel *cp = (struct pciide_channel*)chp;
2869 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2870
2871 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2872 "channel %d 0x%x\n", chp->channel,
2873 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2874 DEBUG_PROBE);
2875 sis_tim = 0;
2876 idedma_ctl = 0;
2877 /* setup DMA if needed */
2878 pciide_channel_dma_setup(cp);
2879
2880 for (drive = 0; drive < 2; drive++) {
2881 drvp = &chp->ch_drive[drive];
2882 /* If no drive, skip */
2883 if ((drvp->drive_flags & DRIVE) == 0)
2884 continue;
2885 /* add timing values, setup DMA if needed */
2886 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2887 (drvp->drive_flags & DRIVE_UDMA) == 0)
2888 goto pio;
2889
2890 if (drvp->drive_flags & DRIVE_UDMA) {
2891 /* use Ultra/DMA */
2892 drvp->drive_flags &= ~DRIVE_DMA;
2893 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2894 SIS_TIM_UDMA_TIME_OFF(drive);
2895 sis_tim |= SIS_TIM_UDMA_EN(drive);
2896 } else {
2897 /*
2898 * use Multiword DMA
2899 * Timings will be used for both PIO and DMA,
2900 * so adjust DMA mode if needed
2901 */
2902 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2903 drvp->PIO_mode = drvp->DMA_mode + 2;
2904 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2905 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2906 drvp->PIO_mode - 2 : 0;
2907 if (drvp->DMA_mode == 0)
2908 drvp->PIO_mode = 0;
2909 }
2910 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2911 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2912 SIS_TIM_ACT_OFF(drive);
2913 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2914 SIS_TIM_REC_OFF(drive);
2915 }
2916 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2917 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2918 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2919 if (idedma_ctl != 0) {
2920 /* Add software bits in status register */
2921 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2922 IDEDMA_CTL, idedma_ctl);
2923 }
2924 pciide_print_modes(cp);
2925 }
2926
2927 static int
2928 acer_isabr_match(pa)
2929 struct pci_attach_args *pa;
2930 {
2931 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI) &&
2932 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALI_M1543));
2933 }
2934
2935 void
2936 acer_chip_map(sc, pa)
2937 struct pciide_softc *sc;
2938 struct pci_attach_args *pa;
2939 {
2940 struct pci_attach_args isa_pa;
2941 struct pciide_channel *cp;
2942 int channel;
2943 pcireg_t cr, interface;
2944 bus_size_t cmdsize, ctlsize;
2945 pcireg_t rev = PCI_REVISION(pa->pa_class);
2946
2947 if (pciide_chipen(sc, pa) == 0)
2948 return;
2949 printf("%s: bus-master DMA support present",
2950 sc->sc_wdcdev.sc_dev.dv_xname);
2951 pciide_mapreg_dma(sc, pa);
2952 printf("\n");
2953 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2954 WDC_CAPABILITY_MODE;
2955 if (sc->sc_dma_ok) {
2956 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2957 if (rev >= 0x20) {
2958 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2959 if (rev >= 0xC4)
2960 sc->sc_wdcdev.UDMA_cap = 5;
2961 else if (rev >= 0xC2)
2962 sc->sc_wdcdev.UDMA_cap = 4;
2963 else
2964 sc->sc_wdcdev.UDMA_cap = 2;
2965 }
2966 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2967 sc->sc_wdcdev.irqack = pciide_irqack;
2968 }
2969
2970 sc->sc_wdcdev.PIO_cap = 4;
2971 sc->sc_wdcdev.DMA_cap = 2;
2972 sc->sc_wdcdev.set_modes = acer_setup_channel;
2973 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2974 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2975
2976 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2977 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2978 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2979
2980 /* Enable "microsoft register bits" R/W. */
2981 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2982 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2983 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2984 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2985 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2986 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2987 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2988 ~ACER_CHANSTATUSREGS_RO);
2989 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2990 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2991 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2992 /* Don't use cr, re-read the real register content instead */
2993 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2994 PCI_CLASS_REG));
2995
2996 /* From linux: enable "Cable Detection" */
2997 if (rev >= 0xC2) {
2998 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
2999 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3000 | ACER_0x4B_CDETECT);
3001 /* set south-bridge's enable bit, m1533, 0x79 */
3002 if (pci_find_device(&isa_pa, acer_isabr_match) == 0) {
3003 printf("%s: can't find PCI/ISA bridge, downgrading "
3004 "to Ultra/33\n", sc->sc_wdcdev.sc_dev.dv_xname);
3005 sc->sc_wdcdev.UDMA_cap = 2;
3006 } else {
3007 if (rev == 0xC2)
3008 /* 1543C-B0 (m1533, 0x79, bit 2) */
3009 pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3010 ACER_0x79,
3011 pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3012 ACER_0x79)
3013 | ACER_0x79_REVC2_EN);
3014 else
3015 /* 1553/1535 (m1533, 0x79, bit 1) */
3016 pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3017 ACER_0x79,
3018 pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3019 ACER_0x79)
3020 | ACER_0x79_EN);
3021 }
3022 }
3023
3024 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3025 cp = &sc->pciide_channels[channel];
3026 if (pciide_chansetup(sc, channel, interface) == 0)
3027 continue;
3028 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3029 printf("%s: %s channel ignored (disabled)\n",
3030 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3031 continue;
3032 }
3033 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3034 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3035 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3036 if (cp->hw_ok == 0)
3037 continue;
3038 if (pciide_chan_candisable(cp)) {
3039 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3040 pci_conf_write(sc->sc_pc, sc->sc_tag,
3041 PCI_CLASS_REG, cr);
3042 }
3043 pciide_map_compat_intr(pa, cp, channel, interface);
3044 acer_setup_channel(&cp->wdc_channel);
3045 }
3046 }
3047
3048 void
3049 acer_setup_channel(chp)
3050 struct channel_softc *chp;
3051 {
3052 struct ata_drive_datas *drvp;
3053 int drive;
3054 u_int32_t acer_fifo_udma;
3055 u_int32_t idedma_ctl;
3056 struct pciide_channel *cp = (struct pciide_channel*)chp;
3057 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3058
3059 idedma_ctl = 0;
3060 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3061 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3062 acer_fifo_udma), DEBUG_PROBE);
3063 /* setup DMA if needed */
3064 pciide_channel_dma_setup(cp);
3065
3066 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3067 DRIVE_UDMA) { /* check 80 pins cable */
3068 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3069 ACER_0x4A_80PIN(chp->channel)) {
3070 if (chp->ch_drive[0].UDMA_mode > 2)
3071 chp->ch_drive[0].UDMA_mode = 2;
3072 if (chp->ch_drive[1].UDMA_mode > 2)
3073 chp->ch_drive[1].UDMA_mode = 2;
3074 }
3075 }
3076
3077 for (drive = 0; drive < 2; drive++) {
3078 drvp = &chp->ch_drive[drive];
3079 /* If no drive, skip */
3080 if ((drvp->drive_flags & DRIVE) == 0)
3081 continue;
3082 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3083 "channel %d drive %d 0x%x\n", chp->channel, drive,
3084 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3085 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3086 /* clear FIFO/DMA mode */
3087 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3088 ACER_UDMA_EN(chp->channel, drive) |
3089 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3090
3091 /* add timing values, setup DMA if needed */
3092 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3093 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3094 acer_fifo_udma |=
3095 ACER_FTH_OPL(chp->channel, drive, 0x1);
3096 goto pio;
3097 }
3098
3099 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3100 if (drvp->drive_flags & DRIVE_UDMA) {
3101 /* use Ultra/DMA */
3102 drvp->drive_flags &= ~DRIVE_DMA;
3103 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3104 acer_fifo_udma |=
3105 ACER_UDMA_TIM(chp->channel, drive,
3106 acer_udma[drvp->UDMA_mode]);
3107 /* XXX disable if one drive < UDMA3 ? */
3108 if (drvp->UDMA_mode >= 3) {
3109 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3110 ACER_0x4B,
3111 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3112 ACER_0x4B) | ACER_0x4B_UDMA66);
3113 }
3114 } else {
3115 /*
3116 * use Multiword DMA
3117 * Timings will be used for both PIO and DMA,
3118 * so adjust DMA mode if needed
3119 */
3120 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3121 drvp->PIO_mode = drvp->DMA_mode + 2;
3122 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3123 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3124 drvp->PIO_mode - 2 : 0;
3125 if (drvp->DMA_mode == 0)
3126 drvp->PIO_mode = 0;
3127 }
3128 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3129 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3130 ACER_IDETIM(chp->channel, drive),
3131 acer_pio[drvp->PIO_mode]);
3132 }
3133 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3134 acer_fifo_udma), DEBUG_PROBE);
3135 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3136 if (idedma_ctl != 0) {
3137 /* Add software bits in status register */
3138 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3139 IDEDMA_CTL, idedma_ctl);
3140 }
3141 pciide_print_modes(cp);
3142 }
3143
3144 int
3145 acer_pci_intr(arg)
3146 void *arg;
3147 {
3148 struct pciide_softc *sc = arg;
3149 struct pciide_channel *cp;
3150 struct channel_softc *wdc_cp;
3151 int i, rv, crv;
3152 u_int32_t chids;
3153
3154 rv = 0;
3155 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3156 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3157 cp = &sc->pciide_channels[i];
3158 wdc_cp = &cp->wdc_channel;
3159 /* If a compat channel skip. */
3160 if (cp->compat)
3161 continue;
3162 if (chids & ACER_CHIDS_INT(i)) {
3163 crv = wdcintr(wdc_cp);
3164 if (crv == 0)
3165 printf("%s:%d: bogus intr\n",
3166 sc->sc_wdcdev.sc_dev.dv_xname, i);
3167 else
3168 rv = 1;
3169 }
3170 }
3171 return rv;
3172 }
3173
3174 void
3175 hpt_chip_map(sc, pa)
3176 struct pciide_softc *sc;
3177 struct pci_attach_args *pa;
3178 {
3179 struct pciide_channel *cp;
3180 int i, compatchan, revision;
3181 pcireg_t interface;
3182 bus_size_t cmdsize, ctlsize;
3183
3184 if (pciide_chipen(sc, pa) == 0)
3185 return;
3186 revision = PCI_REVISION(pa->pa_class);
3187 printf(": Triones/Highpoint ");
3188 if (revision == HPT370_REV)
3189 printf("HPT370 IDE Controller\n");
3190 else if (revision == HPT370A_REV)
3191 printf("HPT370A IDE Controller\n");
3192 else if (revision == HPT366_REV)
3193 printf("HPT366 IDE Controller\n");
3194 else
3195 printf("unknown HPT IDE controller rev %d\n", revision);
3196
3197 /*
3198 * when the chip is in native mode it identifies itself as a
3199 * 'misc mass storage'. Fake interface in this case.
3200 */
3201 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3202 interface = PCI_INTERFACE(pa->pa_class);
3203 } else {
3204 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3205 PCIIDE_INTERFACE_PCI(0);
3206 if (revision == HPT370_REV || revision == HPT370A_REV)
3207 interface |= PCIIDE_INTERFACE_PCI(1);
3208 }
3209
3210 printf("%s: bus-master DMA support present",
3211 sc->sc_wdcdev.sc_dev.dv_xname);
3212 pciide_mapreg_dma(sc, pa);
3213 printf("\n");
3214 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3215 WDC_CAPABILITY_MODE;
3216 if (sc->sc_dma_ok) {
3217 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3218 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3219 sc->sc_wdcdev.irqack = pciide_irqack;
3220 }
3221 sc->sc_wdcdev.PIO_cap = 4;
3222 sc->sc_wdcdev.DMA_cap = 2;
3223
3224 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3225 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3226 if (revision == HPT366_REV) {
3227 sc->sc_wdcdev.UDMA_cap = 4;
3228 /*
3229 * The 366 has 2 PCI IDE functions, one for primary and one
3230 * for secondary. So we need to call pciide_mapregs_compat()
3231 * with the real channel
3232 */
3233 if (pa->pa_function == 0) {
3234 compatchan = 0;
3235 } else if (pa->pa_function == 1) {
3236 compatchan = 1;
3237 } else {
3238 printf("%s: unexpected PCI function %d\n",
3239 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3240 return;
3241 }
3242 sc->sc_wdcdev.nchannels = 1;
3243 } else {
3244 sc->sc_wdcdev.nchannels = 2;
3245 sc->sc_wdcdev.UDMA_cap = 5;
3246 }
3247 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3248 cp = &sc->pciide_channels[i];
3249 if (sc->sc_wdcdev.nchannels > 1) {
3250 compatchan = i;
3251 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3252 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3253 printf("%s: %s channel ignored (disabled)\n",
3254 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3255 continue;
3256 }
3257 }
3258 if (pciide_chansetup(sc, i, interface) == 0)
3259 continue;
3260 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3261 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3262 &ctlsize, hpt_pci_intr);
3263 } else {
3264 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3265 &cmdsize, &ctlsize);
3266 }
3267 if (cp->hw_ok == 0)
3268 return;
3269 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3270 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3271 wdcattach(&cp->wdc_channel);
3272 hpt_setup_channel(&cp->wdc_channel);
3273 }
3274 if (revision == HPT370_REV || revision == HPT370A_REV) {
3275 /*
3276 * HPT370_REV has a bit to disable interrupts, make sure
3277 * to clear it
3278 */
3279 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3280 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3281 ~HPT_CSEL_IRQDIS);
3282 }
3283 return;
3284 }
3285
3286 void
3287 hpt_setup_channel(chp)
3288 struct channel_softc *chp;
3289 {
3290 struct ata_drive_datas *drvp;
3291 int drive;
3292 int cable;
3293 u_int32_t before, after;
3294 u_int32_t idedma_ctl;
3295 struct pciide_channel *cp = (struct pciide_channel*)chp;
3296 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3297
3298 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3299
3300 /* setup DMA if needed */
3301 pciide_channel_dma_setup(cp);
3302
3303 idedma_ctl = 0;
3304
3305 /* Per drive settings */
3306 for (drive = 0; drive < 2; drive++) {
3307 drvp = &chp->ch_drive[drive];
3308 /* If no drive, skip */
3309 if ((drvp->drive_flags & DRIVE) == 0)
3310 continue;
3311 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3312 HPT_IDETIM(chp->channel, drive));
3313
3314 /* add timing values, setup DMA if needed */
3315 if (drvp->drive_flags & DRIVE_UDMA) {
3316 /* use Ultra/DMA */
3317 drvp->drive_flags &= ~DRIVE_DMA;
3318 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3319 drvp->UDMA_mode > 2)
3320 drvp->UDMA_mode = 2;
3321 after = (sc->sc_wdcdev.nchannels == 2) ?
3322 hpt370_udma[drvp->UDMA_mode] :
3323 hpt366_udma[drvp->UDMA_mode];
3324 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3325 } else if (drvp->drive_flags & DRIVE_DMA) {
3326 /*
3327 * use Multiword DMA.
3328 * Timings will be used for both PIO and DMA, so adjust
3329 * DMA mode if needed
3330 */
3331 if (drvp->PIO_mode >= 3 &&
3332 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3333 drvp->DMA_mode = drvp->PIO_mode - 2;
3334 }
3335 after = (sc->sc_wdcdev.nchannels == 2) ?
3336 hpt370_dma[drvp->DMA_mode] :
3337 hpt366_dma[drvp->DMA_mode];
3338 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3339 } else {
3340 /* PIO only */
3341 after = (sc->sc_wdcdev.nchannels == 2) ?
3342 hpt370_pio[drvp->PIO_mode] :
3343 hpt366_pio[drvp->PIO_mode];
3344 }
3345 pci_conf_write(sc->sc_pc, sc->sc_tag,
3346 HPT_IDETIM(chp->channel, drive), after);
3347 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3348 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3349 after, before), DEBUG_PROBE);
3350 }
3351 if (idedma_ctl != 0) {
3352 /* Add software bits in status register */
3353 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3354 IDEDMA_CTL, idedma_ctl);
3355 }
3356 pciide_print_modes(cp);
3357 }
3358
3359 int
3360 hpt_pci_intr(arg)
3361 void *arg;
3362 {
3363 struct pciide_softc *sc = arg;
3364 struct pciide_channel *cp;
3365 struct channel_softc *wdc_cp;
3366 int rv = 0;
3367 int dmastat, i, crv;
3368
3369 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3370 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3371 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3372 if((dmastat & IDEDMA_CTL_INTR) == 0)
3373 continue;
3374 cp = &sc->pciide_channels[i];
3375 wdc_cp = &cp->wdc_channel;
3376 crv = wdcintr(wdc_cp);
3377 if (crv == 0) {
3378 printf("%s:%d: bogus intr\n",
3379 sc->sc_wdcdev.sc_dev.dv_xname, i);
3380 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3381 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3382 } else
3383 rv = 1;
3384 }
3385 return rv;
3386 }
3387
3388
3389 /* Macros to test product */
3390 #define PDC_IS_262(sc) \
3391 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3392 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3393 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3394 #define PDC_IS_265(sc) \
3395 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3396 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3397
3398 void
3399 pdc202xx_chip_map(sc, pa)
3400 struct pciide_softc *sc;
3401 struct pci_attach_args *pa;
3402 {
3403 struct pciide_channel *cp;
3404 int channel;
3405 pcireg_t interface, st, mode;
3406 bus_size_t cmdsize, ctlsize;
3407
3408 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3409 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3410 DEBUG_PROBE);
3411 if (pciide_chipen(sc, pa) == 0)
3412 return;
3413
3414 /* turn off RAID mode */
3415 st &= ~PDC2xx_STATE_IDERAID;
3416
3417 /*
3418 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3419 * mode. We have to fake interface
3420 */
3421 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3422 if (st & PDC2xx_STATE_NATIVE)
3423 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3424
3425 printf("%s: bus-master DMA support present",
3426 sc->sc_wdcdev.sc_dev.dv_xname);
3427 pciide_mapreg_dma(sc, pa);
3428 printf("\n");
3429 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3430 WDC_CAPABILITY_MODE;
3431 if (sc->sc_dma_ok) {
3432 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3433 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3434 sc->sc_wdcdev.irqack = pciide_irqack;
3435 }
3436 sc->sc_wdcdev.PIO_cap = 4;
3437 sc->sc_wdcdev.DMA_cap = 2;
3438 if (PDC_IS_265(sc))
3439 sc->sc_wdcdev.UDMA_cap = 5;
3440 else if (PDC_IS_262(sc))
3441 sc->sc_wdcdev.UDMA_cap = 4;
3442 else
3443 sc->sc_wdcdev.UDMA_cap = 2;
3444 sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3445 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3446 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3447
3448 /* setup failsafe defaults */
3449 mode = 0;
3450 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3451 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3452 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3453 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3454 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3455 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3456 "initial timings 0x%x, now 0x%x\n", channel,
3457 pci_conf_read(sc->sc_pc, sc->sc_tag,
3458 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3459 DEBUG_PROBE);
3460 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3461 mode | PDC2xx_TIM_IORDYp);
3462 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3463 "initial timings 0x%x, now 0x%x\n", channel,
3464 pci_conf_read(sc->sc_pc, sc->sc_tag,
3465 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3466 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3467 mode);
3468 }
3469
3470 mode = PDC2xx_SCR_DMA;
3471 if (PDC_IS_262(sc)) {
3472 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3473 } else {
3474 /* the BIOS set it up this way */
3475 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3476 }
3477 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3478 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3479 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, now 0x%x\n",
3480 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3481 DEBUG_PROBE);
3482 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3483
3484 /* controller initial state register is OK even without BIOS */
3485 /* Set DMA mode to IDE DMA compatibility */
3486 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3487 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3488 DEBUG_PROBE);
3489 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3490 mode | 0x1);
3491 mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3492 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3493 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3494 mode | 0x1);
3495
3496 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3497 cp = &sc->pciide_channels[channel];
3498 if (pciide_chansetup(sc, channel, interface) == 0)
3499 continue;
3500 if ((st & (PDC_IS_262(sc) ?
3501 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3502 printf("%s: %s channel ignored (disabled)\n",
3503 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3504 continue;
3505 }
3506 if (PDC_IS_265(sc))
3507 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3508 pdc20265_pci_intr);
3509 else
3510 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3511 pdc202xx_pci_intr);
3512 if (cp->hw_ok == 0)
3513 continue;
3514 if (pciide_chan_candisable(cp))
3515 st &= ~(PDC_IS_262(sc) ?
3516 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3517 pciide_map_compat_intr(pa, cp, channel, interface);
3518 pdc202xx_setup_channel(&cp->wdc_channel);
3519 }
3520 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3521 DEBUG_PROBE);
3522 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3523 return;
3524 }
3525
3526 void
3527 pdc202xx_setup_channel(chp)
3528 struct channel_softc *chp;
3529 {
3530 struct ata_drive_datas *drvp;
3531 int drive;
3532 pcireg_t mode, st;
3533 u_int32_t idedma_ctl, scr, atapi;
3534 struct pciide_channel *cp = (struct pciide_channel*)chp;
3535 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3536 int channel = chp->channel;
3537
3538 /* setup DMA if needed */
3539 pciide_channel_dma_setup(cp);
3540
3541 idedma_ctl = 0;
3542 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3543 sc->sc_wdcdev.sc_dev.dv_xname,
3544 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3545 DEBUG_PROBE);
3546
3547 /* Per channel settings */
3548 if (PDC_IS_262(sc)) {
3549 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3550 PDC262_U66);
3551 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3552 /* Trimm UDMA mode */
3553 if ((st & PDC262_STATE_80P(channel)) != 0 ||
3554 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3555 chp->ch_drive[0].UDMA_mode <= 2) ||
3556 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3557 chp->ch_drive[1].UDMA_mode <= 2)) {
3558 if (chp->ch_drive[0].UDMA_mode > 2)
3559 chp->ch_drive[0].UDMA_mode = 2;
3560 if (chp->ch_drive[1].UDMA_mode > 2)
3561 chp->ch_drive[1].UDMA_mode = 2;
3562 }
3563 /* Set U66 if needed */
3564 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3565 chp->ch_drive[0].UDMA_mode > 2) ||
3566 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3567 chp->ch_drive[1].UDMA_mode > 2))
3568 scr |= PDC262_U66_EN(channel);
3569 else
3570 scr &= ~PDC262_U66_EN(channel);
3571 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3572 PDC262_U66, scr);
3573 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3574 sc->sc_wdcdev.sc_dev.dv_xname, channel,
3575 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3576 PDC262_ATAPI(channel))), DEBUG_PROBE);
3577 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3578 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3579 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3580 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3581 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3582 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3583 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3584 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3585 atapi = 0;
3586 else
3587 atapi = PDC262_ATAPI_UDMA;
3588 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3589 PDC262_ATAPI(channel), atapi);
3590 }
3591 }
3592 for (drive = 0; drive < 2; drive++) {
3593 drvp = &chp->ch_drive[drive];
3594 /* If no drive, skip */
3595 if ((drvp->drive_flags & DRIVE) == 0)
3596 continue;
3597 mode = 0;
3598 if (drvp->drive_flags & DRIVE_UDMA) {
3599 /* use Ultra/DMA */
3600 drvp->drive_flags &= ~DRIVE_DMA;
3601 mode = PDC2xx_TIM_SET_MB(mode,
3602 pdc2xx_udma_mb[drvp->UDMA_mode]);
3603 mode = PDC2xx_TIM_SET_MC(mode,
3604 pdc2xx_udma_mc[drvp->UDMA_mode]);
3605 drvp->drive_flags &= ~DRIVE_DMA;
3606 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3607 } else if (drvp->drive_flags & DRIVE_DMA) {
3608 mode = PDC2xx_TIM_SET_MB(mode,
3609 pdc2xx_dma_mb[drvp->DMA_mode]);
3610 mode = PDC2xx_TIM_SET_MC(mode,
3611 pdc2xx_dma_mc[drvp->DMA_mode]);
3612 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3613 } else {
3614 mode = PDC2xx_TIM_SET_MB(mode,
3615 pdc2xx_dma_mb[0]);
3616 mode = PDC2xx_TIM_SET_MC(mode,
3617 pdc2xx_dma_mc[0]);
3618 }
3619 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3620 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3621 if (drvp->drive_flags & DRIVE_ATA)
3622 mode |= PDC2xx_TIM_PRE;
3623 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3624 if (drvp->PIO_mode >= 3) {
3625 mode |= PDC2xx_TIM_IORDY;
3626 if (drive == 0)
3627 mode |= PDC2xx_TIM_IORDYp;
3628 }
3629 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3630 "timings 0x%x\n",
3631 sc->sc_wdcdev.sc_dev.dv_xname,
3632 chp->channel, drive, mode), DEBUG_PROBE);
3633 pci_conf_write(sc->sc_pc, sc->sc_tag,
3634 PDC2xx_TIM(chp->channel, drive), mode);
3635 }
3636 if (idedma_ctl != 0) {
3637 /* Add software bits in status register */
3638 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3639 IDEDMA_CTL, idedma_ctl);
3640 }
3641 pciide_print_modes(cp);
3642 }
3643
3644 int
3645 pdc202xx_pci_intr(arg)
3646 void *arg;
3647 {
3648 struct pciide_softc *sc = arg;
3649 struct pciide_channel *cp;
3650 struct channel_softc *wdc_cp;
3651 int i, rv, crv;
3652 u_int32_t scr;
3653
3654 rv = 0;
3655 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3656 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3657 cp = &sc->pciide_channels[i];
3658 wdc_cp = &cp->wdc_channel;
3659 /* If a compat channel skip. */
3660 if (cp->compat)
3661 continue;
3662 if (scr & PDC2xx_SCR_INT(i)) {
3663 crv = wdcintr(wdc_cp);
3664 if (crv == 0)
3665 printf("%s:%d: bogus intr (reg 0x%x)\n",
3666 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3667 else
3668 rv = 1;
3669 }
3670 }
3671 return rv;
3672 }
3673
3674 int
3675 pdc20265_pci_intr(arg)
3676 void *arg;
3677 {
3678 struct pciide_softc *sc = arg;
3679 struct pciide_channel *cp;
3680 struct channel_softc *wdc_cp;
3681 int i, rv, crv;
3682 u_int32_t dmastat;
3683
3684 rv = 0;
3685 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3686 cp = &sc->pciide_channels[i];
3687 wdc_cp = &cp->wdc_channel;
3688 /* If a compat channel skip. */
3689 if (cp->compat)
3690 continue;
3691 /*
3692 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3693 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3694 * So use it instead (requires 2 reg reads instead of 1,
3695 * but we can't do it another way).
3696 */
3697 dmastat = bus_space_read_1(sc->sc_dma_iot,
3698 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3699 if((dmastat & IDEDMA_CTL_INTR) == 0)
3700 continue;
3701 crv = wdcintr(wdc_cp);
3702 if (crv == 0)
3703 printf("%s:%d: bogus intr\n",
3704 sc->sc_wdcdev.sc_dev.dv_xname, i);
3705 else
3706 rv = 1;
3707 }
3708 return rv;
3709 }
3710
3711 void
3712 opti_chip_map(sc, pa)
3713 struct pciide_softc *sc;
3714 struct pci_attach_args *pa;
3715 {
3716 struct pciide_channel *cp;
3717 bus_size_t cmdsize, ctlsize;
3718 pcireg_t interface;
3719 u_int8_t init_ctrl;
3720 int channel;
3721
3722 if (pciide_chipen(sc, pa) == 0)
3723 return;
3724 printf("%s: bus-master DMA support present",
3725 sc->sc_wdcdev.sc_dev.dv_xname);
3726
3727 /*
3728 * XXXSCW:
3729 * There seem to be a couple of buggy revisions/implementations
3730 * of the OPTi pciide chipset. This kludge seems to fix one of
3731 * the reported problems (PR/11644) but still fails for the
3732 * other (PR/13151), although the latter may be due to other
3733 * issues too...
3734 */
3735 if (PCI_REVISION(pa->pa_class) <= 0x12) {
3736 printf(" but disabled due to chip rev. <= 0x12");
3737 sc->sc_dma_ok = 0;
3738 sc->sc_wdcdev.cap = 0;
3739 } else {
3740 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3741 pciide_mapreg_dma(sc, pa);
3742 }
3743 printf("\n");
3744
3745 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3746 sc->sc_wdcdev.PIO_cap = 4;
3747 if (sc->sc_dma_ok) {
3748 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3749 sc->sc_wdcdev.irqack = pciide_irqack;
3750 sc->sc_wdcdev.DMA_cap = 2;
3751 }
3752 sc->sc_wdcdev.set_modes = opti_setup_channel;
3753
3754 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3755 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3756
3757 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3758 OPTI_REG_INIT_CONTROL);
3759
3760 interface = PCI_INTERFACE(pa->pa_class);
3761
3762 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3763 cp = &sc->pciide_channels[channel];
3764 if (pciide_chansetup(sc, channel, interface) == 0)
3765 continue;
3766 if (channel == 1 &&
3767 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3768 printf("%s: %s channel ignored (disabled)\n",
3769 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3770 continue;
3771 }
3772 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3773 pciide_pci_intr);
3774 if (cp->hw_ok == 0)
3775 continue;
3776 pciide_map_compat_intr(pa, cp, channel, interface);
3777 if (cp->hw_ok == 0)
3778 continue;
3779 opti_setup_channel(&cp->wdc_channel);
3780 }
3781 }
3782
3783 void
3784 opti_setup_channel(chp)
3785 struct channel_softc *chp;
3786 {
3787 struct ata_drive_datas *drvp;
3788 struct pciide_channel *cp = (struct pciide_channel*)chp;
3789 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3790 int drive, spd;
3791 int mode[2];
3792 u_int8_t rv, mr;
3793
3794 /*
3795 * The `Delay' and `Address Setup Time' fields of the
3796 * Miscellaneous Register are always zero initially.
3797 */
3798 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3799 mr &= ~(OPTI_MISC_DELAY_MASK |
3800 OPTI_MISC_ADDR_SETUP_MASK |
3801 OPTI_MISC_INDEX_MASK);
3802
3803 /* Prime the control register before setting timing values */
3804 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3805
3806 /* Determine the clockrate of the PCIbus the chip is attached to */
3807 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3808 spd &= OPTI_STRAP_PCI_SPEED_MASK;
3809
3810 /* setup DMA if needed */
3811 pciide_channel_dma_setup(cp);
3812
3813 for (drive = 0; drive < 2; drive++) {
3814 drvp = &chp->ch_drive[drive];
3815 /* If no drive, skip */
3816 if ((drvp->drive_flags & DRIVE) == 0) {
3817 mode[drive] = -1;
3818 continue;
3819 }
3820
3821 if ((drvp->drive_flags & DRIVE_DMA)) {
3822 /*
3823 * Timings will be used for both PIO and DMA,
3824 * so adjust DMA mode if needed
3825 */
3826 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3827 drvp->PIO_mode = drvp->DMA_mode + 2;
3828 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3829 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3830 drvp->PIO_mode - 2 : 0;
3831 if (drvp->DMA_mode == 0)
3832 drvp->PIO_mode = 0;
3833
3834 mode[drive] = drvp->DMA_mode + 5;
3835 } else
3836 mode[drive] = drvp->PIO_mode;
3837
3838 if (drive && mode[0] >= 0 &&
3839 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3840 /*
3841 * Can't have two drives using different values
3842 * for `Address Setup Time'.
3843 * Slow down the faster drive to compensate.
3844 */
3845 int d = (opti_tim_as[spd][mode[0]] >
3846 opti_tim_as[spd][mode[1]]) ? 0 : 1;
3847
3848 mode[d] = mode[1-d];
3849 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3850 chp->ch_drive[d].DMA_mode = 0;
3851 chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3852 }
3853 }
3854
3855 for (drive = 0; drive < 2; drive++) {
3856 int m;
3857 if ((m = mode[drive]) < 0)
3858 continue;
3859
3860 /* Set the Address Setup Time and select appropriate index */
3861 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3862 rv |= OPTI_MISC_INDEX(drive);
3863 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3864
3865 /* Set the pulse width and recovery timing parameters */
3866 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3867 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3868 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3869 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3870
3871 /* Set the Enhanced Mode register appropriately */
3872 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3873 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3874 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3875 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3876 }
3877
3878 /* Finally, enable the timings */
3879 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3880
3881 pciide_print_modes(cp);
3882 }
3883
3884 #define ACARD_IS_850(sc) \
3885 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
3886
3887 void
3888 acard_chip_map(sc, pa)
3889 struct pciide_softc *sc;
3890 struct pci_attach_args *pa;
3891 {
3892 struct pciide_channel *cp;
3893 int i;
3894 pcireg_t interface;
3895 bus_size_t cmdsize, ctlsize;
3896
3897 if (pciide_chipen(sc, pa) == 0)
3898 return;
3899
3900 /*
3901 * when the chip is in native mode it identifies itself as a
3902 * 'misc mass storage'. Fake interface in this case.
3903 */
3904 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3905 interface = PCI_INTERFACE(pa->pa_class);
3906 } else {
3907 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3908 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3909 }
3910
3911 printf("%s: bus-master DMA support present",
3912 sc->sc_wdcdev.sc_dev.dv_xname);
3913 pciide_mapreg_dma(sc, pa);
3914 printf("\n");
3915 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3916 WDC_CAPABILITY_MODE;
3917
3918 if (sc->sc_dma_ok) {
3919 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3920 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3921 sc->sc_wdcdev.irqack = pciide_irqack;
3922 }
3923 sc->sc_wdcdev.PIO_cap = 4;
3924 sc->sc_wdcdev.DMA_cap = 2;
3925 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
3926
3927 sc->sc_wdcdev.set_modes = acard_setup_channel;
3928 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3929 sc->sc_wdcdev.nchannels = 2;
3930
3931 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3932 cp = &sc->pciide_channels[i];
3933 if (pciide_chansetup(sc, i, interface) == 0)
3934 continue;
3935 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3936 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3937 &ctlsize, pciide_pci_intr);
3938 } else {
3939 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
3940 &cmdsize, &ctlsize);
3941 }
3942 if (cp->hw_ok == 0)
3943 return;
3944 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3945 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3946 wdcattach(&cp->wdc_channel);
3947 acard_setup_channel(&cp->wdc_channel);
3948 }
3949 if (!ACARD_IS_850(sc)) {
3950 u_int32_t reg;
3951 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
3952 reg &= ~ATP860_CTRL_INT;
3953 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
3954 }
3955 }
3956
3957 void
3958 acard_setup_channel(chp)
3959 struct channel_softc *chp;
3960 {
3961 struct ata_drive_datas *drvp;
3962 struct pciide_channel *cp = (struct pciide_channel*)chp;
3963 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3964 int channel = chp->channel;
3965 int drive;
3966 u_int32_t idetime, udma_mode;
3967 u_int32_t idedma_ctl;
3968
3969 /* setup DMA if needed */
3970 pciide_channel_dma_setup(cp);
3971
3972 if (ACARD_IS_850(sc)) {
3973 idetime = 0;
3974 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
3975 udma_mode &= ~ATP850_UDMA_MASK(channel);
3976 } else {
3977 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
3978 idetime &= ~ATP860_SETTIME_MASK(channel);
3979 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
3980 udma_mode &= ~ATP860_UDMA_MASK(channel);
3981
3982 /* check 80 pins cable */
3983 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
3984 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
3985 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
3986 & ATP860_CTRL_80P(chp->channel)) {
3987 if (chp->ch_drive[0].UDMA_mode > 2)
3988 chp->ch_drive[0].UDMA_mode = 2;
3989 if (chp->ch_drive[1].UDMA_mode > 2)
3990 chp->ch_drive[1].UDMA_mode = 2;
3991 }
3992 }
3993 }
3994
3995 idedma_ctl = 0;
3996
3997 /* Per drive settings */
3998 for (drive = 0; drive < 2; drive++) {
3999 drvp = &chp->ch_drive[drive];
4000 /* If no drive, skip */
4001 if ((drvp->drive_flags & DRIVE) == 0)
4002 continue;
4003 /* add timing values, setup DMA if needed */
4004 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4005 (drvp->drive_flags & DRIVE_UDMA)) {
4006 /* use Ultra/DMA */
4007 if (ACARD_IS_850(sc)) {
4008 idetime |= ATP850_SETTIME(drive,
4009 acard_act_udma[drvp->UDMA_mode],
4010 acard_rec_udma[drvp->UDMA_mode]);
4011 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4012 acard_udma_conf[drvp->UDMA_mode]);
4013 } else {
4014 idetime |= ATP860_SETTIME(channel, drive,
4015 acard_act_udma[drvp->UDMA_mode],
4016 acard_rec_udma[drvp->UDMA_mode]);
4017 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4018 acard_udma_conf[drvp->UDMA_mode]);
4019 }
4020 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4021 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4022 (drvp->drive_flags & DRIVE_DMA)) {
4023 /* use Multiword DMA */
4024 drvp->drive_flags &= ~DRIVE_UDMA;
4025 if (ACARD_IS_850(sc)) {
4026 idetime |= ATP850_SETTIME(drive,
4027 acard_act_dma[drvp->DMA_mode],
4028 acard_rec_dma[drvp->DMA_mode]);
4029 } else {
4030 idetime |= ATP860_SETTIME(channel, drive,
4031 acard_act_dma[drvp->DMA_mode],
4032 acard_rec_dma[drvp->DMA_mode]);
4033 }
4034 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4035 } else {
4036 /* PIO only */
4037 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4038 if (ACARD_IS_850(sc)) {
4039 idetime |= ATP850_SETTIME(drive,
4040 acard_act_pio[drvp->PIO_mode],
4041 acard_rec_pio[drvp->PIO_mode]);
4042 } else {
4043 idetime |= ATP860_SETTIME(channel, drive,
4044 acard_act_pio[drvp->PIO_mode],
4045 acard_rec_pio[drvp->PIO_mode]);
4046 }
4047 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4048 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4049 | ATP8x0_CTRL_EN(channel));
4050 }
4051 }
4052
4053 if (idedma_ctl != 0) {
4054 /* Add software bits in status register */
4055 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4056 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4057 }
4058 pciide_print_modes(cp);
4059
4060 if (ACARD_IS_850(sc)) {
4061 pci_conf_write(sc->sc_pc, sc->sc_tag,
4062 ATP850_IDETIME(channel), idetime);
4063 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4064 } else {
4065 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4066 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4067 }
4068 }
4069
4070 int
4071 acard_pci_intr(arg)
4072 void *arg;
4073 {
4074 struct pciide_softc *sc = arg;
4075 struct pciide_channel *cp;
4076 struct channel_softc *wdc_cp;
4077 int rv = 0;
4078 int dmastat, i, crv;
4079
4080 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4081 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4082 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4083 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4084 continue;
4085 cp = &sc->pciide_channels[i];
4086 wdc_cp = &cp->wdc_channel;
4087 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4088 (void)wdcintr(wdc_cp);
4089 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4090 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4091 continue;
4092 }
4093 crv = wdcintr(wdc_cp);
4094 if (crv == 0)
4095 printf("%s:%d: bogus intr\n",
4096 sc->sc_wdcdev.sc_dev.dv_xname, i);
4097 else if (crv == 1)
4098 rv = 1;
4099 else if (rv == 0)
4100 rv = crv;
4101 }
4102 return rv;
4103 }
4104