pciide.c revision 1.188 1 /* $NetBSD: pciide.c,v 1.188 2003/04/04 11:01:48 kent Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.188 2003/04/04 11:01:48 kent Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/pciide_i31244_reg.h>
123 #include <dev/pci/pciide_sii3112_reg.h>
124 #include <dev/pci/cy82c693var.h>
125
126 #include "opt_pciide.h"
127
128 /* inlines for reading/writing 8-bit PCI registers */
129 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
130 int));
131 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
132 int, u_int8_t));
133
134 static __inline u_int8_t
135 pciide_pci_read(pc, pa, reg)
136 pci_chipset_tag_t pc;
137 pcitag_t pa;
138 int reg;
139 {
140
141 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
142 ((reg & 0x03) * 8) & 0xff);
143 }
144
145 static __inline void
146 pciide_pci_write(pc, pa, reg, val)
147 pci_chipset_tag_t pc;
148 pcitag_t pa;
149 int reg;
150 u_int8_t val;
151 {
152 pcireg_t pcival;
153
154 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
155 pcival &= ~(0xff << ((reg & 0x03) * 8));
156 pcival |= (val << ((reg & 0x03) * 8));
157 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
158 }
159
160 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161
162 void sata_setup_channel __P((struct channel_softc*));
163
164 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
165 void piix_setup_channel __P((struct channel_softc*));
166 void piix3_4_setup_channel __P((struct channel_softc*));
167 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
168 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
169 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
170
171 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void amd7x6_setup_channel __P((struct channel_softc*));
173
174 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void apollo_setup_channel __P((struct channel_softc*));
176
177 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void cmd0643_9_setup_channel __P((struct channel_softc*));
180 void cmd_channel_map __P((struct pci_attach_args *,
181 struct pciide_softc *, int));
182 int cmd_pci_intr __P((void *));
183 void cmd646_9_irqack __P((struct channel_softc *));
184 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void cmd680_setup_channel __P((struct channel_softc*));
186 void cmd680_channel_map __P((struct pci_attach_args *,
187 struct pciide_softc *, int));
188
189 void cmd3112_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
190 void cmd3112_setup_channel __P((struct channel_softc*));
191
192 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void cy693_setup_channel __P((struct channel_softc*));
194
195 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
196 void sis_setup_channel __P((struct channel_softc*));
197 void sis96x_setup_channel __P((struct channel_softc*));
198 static int sis_hostbr_match __P(( struct pci_attach_args *));
199 static int sis_south_match __P(( struct pci_attach_args *));
200
201 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
202 void acer_setup_channel __P((struct channel_softc*));
203 int acer_pci_intr __P((void *));
204
205 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void pdc202xx_setup_channel __P((struct channel_softc*));
207 void pdc20268_setup_channel __P((struct channel_softc*));
208 int pdc202xx_pci_intr __P((void *));
209 int pdc20265_pci_intr __P((void *));
210
211 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
212 void opti_setup_channel __P((struct channel_softc*));
213
214 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
215 void hpt_setup_channel __P((struct channel_softc*));
216 int hpt_pci_intr __P((void *));
217
218 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
219 void acard_setup_channel __P((struct channel_softc*));
220 int acard_pci_intr __P((void *));
221
222 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
223 void serverworks_setup_channel __P((struct channel_softc*));
224 int serverworks_pci_intr __P((void *));
225
226 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
227 void sl82c105_setup_channel __P((struct channel_softc*));
228
229 void pciide_channel_dma_setup __P((struct pciide_channel *));
230 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
231 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
232 void pciide_dma_start __P((void*, int, int));
233 int pciide_dma_finish __P((void*, int, int, int));
234 void pciide_irqack __P((struct channel_softc *));
235 void pciide_print_modes __P((struct pciide_channel *));
236
237 void artisea_chip_map __P((struct pciide_softc*, struct pci_attach_args *));
238
239 struct pciide_product_desc {
240 u_int32_t ide_product;
241 int ide_flags;
242 const char *ide_name;
243 /* map and setup chip, probe drives */
244 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
245 };
246
247 /* Flags for ide_flags */
248 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
249 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
250
251 /* Default product description for devices not known from this controller */
252 const struct pciide_product_desc default_product_desc = {
253 0,
254 0,
255 "Generic PCI IDE controller",
256 default_chip_map,
257 };
258
259 const struct pciide_product_desc pciide_intel_products[] = {
260 { PCI_PRODUCT_INTEL_82092AA,
261 0,
262 "Intel 82092AA IDE controller",
263 default_chip_map,
264 },
265 { PCI_PRODUCT_INTEL_82371FB_IDE,
266 0,
267 "Intel 82371FB IDE controller (PIIX)",
268 piix_chip_map,
269 },
270 { PCI_PRODUCT_INTEL_82371SB_IDE,
271 0,
272 "Intel 82371SB IDE Interface (PIIX3)",
273 piix_chip_map,
274 },
275 { PCI_PRODUCT_INTEL_82371AB_IDE,
276 0,
277 "Intel 82371AB IDE controller (PIIX4)",
278 piix_chip_map,
279 },
280 { PCI_PRODUCT_INTEL_82440MX_IDE,
281 0,
282 "Intel 82440MX IDE controller",
283 piix_chip_map
284 },
285 { PCI_PRODUCT_INTEL_82801AA_IDE,
286 0,
287 "Intel 82801AA IDE Controller (ICH)",
288 piix_chip_map,
289 },
290 { PCI_PRODUCT_INTEL_82801AB_IDE,
291 0,
292 "Intel 82801AB IDE Controller (ICH0)",
293 piix_chip_map,
294 },
295 { PCI_PRODUCT_INTEL_82801BA_IDE,
296 0,
297 "Intel 82801BA IDE Controller (ICH2)",
298 piix_chip_map,
299 },
300 { PCI_PRODUCT_INTEL_82801BAM_IDE,
301 0,
302 "Intel 82801BAM IDE Controller (ICH2)",
303 piix_chip_map,
304 },
305 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
306 0,
307 "Intel 82801CA IDE Controller",
308 piix_chip_map,
309 },
310 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
311 0,
312 "Intel 82801CA IDE Controller",
313 piix_chip_map,
314 },
315 { PCI_PRODUCT_INTEL_82801DB_IDE,
316 0,
317 "Intel 82801DB IDE Controller (ICH4)",
318 piix_chip_map,
319 },
320 { PCI_PRODUCT_INTEL_82801DBM_IDE,
321 0,
322 "Intel 82801DBM IDE Controller (ICH4M)",
323 piix_chip_map,
324 },
325 { PCI_PRODUCT_INTEL_31244,
326 0,
327 "Intel 31244 Serial ATA Controller",
328 artisea_chip_map,
329 },
330 { 0,
331 0,
332 NULL,
333 NULL
334 }
335 };
336
337 const struct pciide_product_desc pciide_amd_products[] = {
338 { PCI_PRODUCT_AMD_PBC756_IDE,
339 0,
340 "Advanced Micro Devices AMD756 IDE Controller",
341 amd7x6_chip_map
342 },
343 { PCI_PRODUCT_AMD_PBC766_IDE,
344 0,
345 "Advanced Micro Devices AMD766 IDE Controller",
346 amd7x6_chip_map
347 },
348 { PCI_PRODUCT_AMD_PBC768_IDE,
349 0,
350 "Advanced Micro Devices AMD768 IDE Controller",
351 amd7x6_chip_map
352 },
353 { PCI_PRODUCT_AMD_PBC8111_IDE,
354 0,
355 "Advanced Micro Devices AMD8111 IDE Controller",
356 amd7x6_chip_map
357 },
358 { 0,
359 0,
360 NULL,
361 NULL
362 }
363 };
364
365 const struct pciide_product_desc pciide_nvidia_products[] = {
366 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
367 0,
368 "NVIDIA nForce IDE Controller",
369 amd7x6_chip_map
370 },
371 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
372 0,
373 "NVIDIA nForce2 IDE Controller",
374 amd7x6_chip_map
375 },
376 { 0,
377 0,
378 NULL,
379 NULL
380 }
381 };
382
383 const struct pciide_product_desc pciide_cmd_products[] = {
384 { PCI_PRODUCT_CMDTECH_640,
385 0,
386 "CMD Technology PCI0640",
387 cmd_chip_map
388 },
389 { PCI_PRODUCT_CMDTECH_643,
390 0,
391 "CMD Technology PCI0643",
392 cmd0643_9_chip_map,
393 },
394 { PCI_PRODUCT_CMDTECH_646,
395 0,
396 "CMD Technology PCI0646",
397 cmd0643_9_chip_map,
398 },
399 { PCI_PRODUCT_CMDTECH_648,
400 IDE_PCI_CLASS_OVERRIDE,
401 "CMD Technology PCI0648",
402 cmd0643_9_chip_map,
403 },
404 { PCI_PRODUCT_CMDTECH_649,
405 IDE_PCI_CLASS_OVERRIDE,
406 "CMD Technology PCI0649",
407 cmd0643_9_chip_map,
408 },
409 { PCI_PRODUCT_CMDTECH_680,
410 IDE_PCI_CLASS_OVERRIDE,
411 "Silicon Image 0680",
412 cmd680_chip_map,
413 },
414 { PCI_PRODUCT_CMDTECH_3112,
415 IDE_PCI_CLASS_OVERRIDE,
416 "Silicon Image SATALink 3112",
417 cmd3112_chip_map,
418 },
419 { 0,
420 0,
421 NULL,
422 NULL
423 }
424 };
425
426 const struct pciide_product_desc pciide_via_products[] = {
427 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
428 0,
429 NULL,
430 apollo_chip_map,
431 },
432 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
433 0,
434 NULL,
435 apollo_chip_map,
436 },
437 { 0,
438 0,
439 NULL,
440 NULL
441 }
442 };
443
444 const struct pciide_product_desc pciide_cypress_products[] = {
445 { PCI_PRODUCT_CONTAQ_82C693,
446 IDE_16BIT_IOSPACE,
447 "Cypress 82C693 IDE Controller",
448 cy693_chip_map,
449 },
450 { 0,
451 0,
452 NULL,
453 NULL
454 }
455 };
456
457 const struct pciide_product_desc pciide_sis_products[] = {
458 { PCI_PRODUCT_SIS_5597_IDE,
459 0,
460 NULL,
461 sis_chip_map,
462 },
463 { 0,
464 0,
465 NULL,
466 NULL
467 }
468 };
469
470 const struct pciide_product_desc pciide_acer_products[] = {
471 { PCI_PRODUCT_ALI_M5229,
472 0,
473 "Acer Labs M5229 UDMA IDE Controller",
474 acer_chip_map,
475 },
476 { 0,
477 0,
478 NULL,
479 NULL
480 }
481 };
482
483 const struct pciide_product_desc pciide_promise_products[] = {
484 { PCI_PRODUCT_PROMISE_ULTRA33,
485 IDE_PCI_CLASS_OVERRIDE,
486 "Promise Ultra33/ATA Bus Master IDE Accelerator",
487 pdc202xx_chip_map,
488 },
489 { PCI_PRODUCT_PROMISE_ULTRA66,
490 IDE_PCI_CLASS_OVERRIDE,
491 "Promise Ultra66/ATA Bus Master IDE Accelerator",
492 pdc202xx_chip_map,
493 },
494 { PCI_PRODUCT_PROMISE_ULTRA100,
495 IDE_PCI_CLASS_OVERRIDE,
496 "Promise Ultra100/ATA Bus Master IDE Accelerator",
497 pdc202xx_chip_map,
498 },
499 { PCI_PRODUCT_PROMISE_ULTRA100X,
500 IDE_PCI_CLASS_OVERRIDE,
501 "Promise Ultra100/ATA Bus Master IDE Accelerator",
502 pdc202xx_chip_map,
503 },
504 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
505 IDE_PCI_CLASS_OVERRIDE,
506 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
507 pdc202xx_chip_map,
508 },
509 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
510 IDE_PCI_CLASS_OVERRIDE,
511 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
512 pdc202xx_chip_map,
513 },
514 { PCI_PRODUCT_PROMISE_ULTRA133,
515 IDE_PCI_CLASS_OVERRIDE,
516 "Promise Ultra133/ATA Bus Master IDE Accelerator",
517 pdc202xx_chip_map,
518 },
519 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
520 IDE_PCI_CLASS_OVERRIDE,
521 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
522 pdc202xx_chip_map,
523 },
524 { PCI_PRODUCT_PROMISE_MBULTRA133,
525 IDE_PCI_CLASS_OVERRIDE,
526 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
527 pdc202xx_chip_map,
528 },
529 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
530 IDE_PCI_CLASS_OVERRIDE,
531 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
532 pdc202xx_chip_map,
533 },
534 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
535 IDE_PCI_CLASS_OVERRIDE,
536 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
537 pdc202xx_chip_map,
538 },
539 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
540 IDE_PCI_CLASS_OVERRIDE,
541 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
542 pdc202xx_chip_map,
543 },
544 { 0,
545 0,
546 NULL,
547 NULL
548 }
549 };
550
551 const struct pciide_product_desc pciide_opti_products[] = {
552 { PCI_PRODUCT_OPTI_82C621,
553 0,
554 "OPTi 82c621 PCI IDE controller",
555 opti_chip_map,
556 },
557 { PCI_PRODUCT_OPTI_82C568,
558 0,
559 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
560 opti_chip_map,
561 },
562 { PCI_PRODUCT_OPTI_82D568,
563 0,
564 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
565 opti_chip_map,
566 },
567 { 0,
568 0,
569 NULL,
570 NULL
571 }
572 };
573
574 const struct pciide_product_desc pciide_triones_products[] = {
575 { PCI_PRODUCT_TRIONES_HPT366,
576 IDE_PCI_CLASS_OVERRIDE,
577 NULL,
578 hpt_chip_map,
579 },
580 { PCI_PRODUCT_TRIONES_HPT372,
581 IDE_PCI_CLASS_OVERRIDE,
582 NULL,
583 hpt_chip_map
584 },
585 { PCI_PRODUCT_TRIONES_HPT374,
586 IDE_PCI_CLASS_OVERRIDE,
587 NULL,
588 hpt_chip_map
589 },
590 { 0,
591 0,
592 NULL,
593 NULL
594 }
595 };
596
597 const struct pciide_product_desc pciide_acard_products[] = {
598 { PCI_PRODUCT_ACARD_ATP850U,
599 IDE_PCI_CLASS_OVERRIDE,
600 "Acard ATP850U Ultra33 IDE Controller",
601 acard_chip_map,
602 },
603 { PCI_PRODUCT_ACARD_ATP860,
604 IDE_PCI_CLASS_OVERRIDE,
605 "Acard ATP860 Ultra66 IDE Controller",
606 acard_chip_map,
607 },
608 { PCI_PRODUCT_ACARD_ATP860A,
609 IDE_PCI_CLASS_OVERRIDE,
610 "Acard ATP860-A Ultra66 IDE Controller",
611 acard_chip_map,
612 },
613 { 0,
614 0,
615 NULL,
616 NULL
617 }
618 };
619
620 const struct pciide_product_desc pciide_serverworks_products[] = {
621 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
622 0,
623 "ServerWorks OSB4 IDE Controller",
624 serverworks_chip_map,
625 },
626 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
627 0,
628 "ServerWorks CSB5 IDE Controller",
629 serverworks_chip_map,
630 },
631 { PCI_PRODUCT_SERVERWORKS_CSB6_IDE,
632 0,
633 "ServerWorks CSB6 RAID/IDE Controller",
634 serverworks_chip_map,
635 },
636 { 0,
637 0,
638 NULL,
639 }
640 };
641
642 const struct pciide_product_desc pciide_symphony_products[] = {
643 { PCI_PRODUCT_SYMPHONY_82C105,
644 0,
645 "Symphony Labs 82C105 IDE controller",
646 sl82c105_chip_map,
647 },
648 { 0,
649 0,
650 NULL,
651 }
652 };
653
654 const struct pciide_product_desc pciide_winbond_products[] = {
655 { PCI_PRODUCT_WINBOND_W83C553F_1,
656 0,
657 "Winbond W83C553F IDE controller",
658 sl82c105_chip_map,
659 },
660 { 0,
661 0,
662 NULL,
663 }
664 };
665
666 struct pciide_vendor_desc {
667 u_int32_t ide_vendor;
668 const struct pciide_product_desc *ide_products;
669 };
670
671 const struct pciide_vendor_desc pciide_vendors[] = {
672 { PCI_VENDOR_INTEL, pciide_intel_products },
673 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
674 { PCI_VENDOR_VIATECH, pciide_via_products },
675 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
676 { PCI_VENDOR_SIS, pciide_sis_products },
677 { PCI_VENDOR_ALI, pciide_acer_products },
678 { PCI_VENDOR_PROMISE, pciide_promise_products },
679 { PCI_VENDOR_AMD, pciide_amd_products },
680 { PCI_VENDOR_OPTI, pciide_opti_products },
681 { PCI_VENDOR_TRIONES, pciide_triones_products },
682 { PCI_VENDOR_ACARD, pciide_acard_products },
683 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
684 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
685 { PCI_VENDOR_WINBOND, pciide_winbond_products },
686 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
687 { 0, NULL }
688 };
689
690 /* options passed via the 'flags' config keyword */
691 #define PCIIDE_OPTIONS_DMA 0x01
692 #define PCIIDE_OPTIONS_NODMA 0x02
693
694 int pciide_match __P((struct device *, struct cfdata *, void *));
695 void pciide_attach __P((struct device *, struct device *, void *));
696
697 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
698 pciide_match, pciide_attach, NULL, NULL);
699
700 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
701 int pciide_mapregs_compat __P(( struct pci_attach_args *,
702 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
703 int pciide_mapregs_native __P((struct pci_attach_args *,
704 struct pciide_channel *, bus_size_t *, bus_size_t *,
705 int (*pci_intr) __P((void *))));
706 void pciide_mapreg_dma __P((struct pciide_softc *,
707 struct pci_attach_args *));
708 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
709 void pciide_mapchan __P((struct pci_attach_args *,
710 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
711 int (*pci_intr) __P((void *))));
712 int pciide_chan_candisable __P((struct pciide_channel *));
713 void pciide_map_compat_intr __P(( struct pci_attach_args *,
714 struct pciide_channel *, int, int));
715 int pciide_compat_intr __P((void *));
716 int pciide_pci_intr __P((void *));
717 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
718
719 const struct pciide_product_desc *
720 pciide_lookup_product(id)
721 u_int32_t id;
722 {
723 const struct pciide_product_desc *pp;
724 const struct pciide_vendor_desc *vp;
725
726 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
727 if (PCI_VENDOR(id) == vp->ide_vendor)
728 break;
729
730 if ((pp = vp->ide_products) == NULL)
731 return NULL;
732
733 for (; pp->chip_map != NULL; pp++)
734 if (PCI_PRODUCT(id) == pp->ide_product)
735 break;
736
737 if (pp->chip_map == NULL)
738 return NULL;
739 return pp;
740 }
741
742 int
743 pciide_match(parent, match, aux)
744 struct device *parent;
745 struct cfdata *match;
746 void *aux;
747 {
748 struct pci_attach_args *pa = aux;
749 const struct pciide_product_desc *pp;
750
751 /*
752 * Check the ID register to see that it's a PCI IDE controller.
753 * If it is, we assume that we can deal with it; it _should_
754 * work in a standardized way...
755 */
756 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
757 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
758 return (1);
759 }
760
761 /*
762 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
763 * controllers. Let see if we can deal with it anyway.
764 */
765 pp = pciide_lookup_product(pa->pa_id);
766 if (pp != NULL && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
767 return (1);
768 }
769
770 return (0);
771 }
772
773 void
774 pciide_attach(parent, self, aux)
775 struct device *parent, *self;
776 void *aux;
777 {
778 struct pci_attach_args *pa = aux;
779 pci_chipset_tag_t pc = pa->pa_pc;
780 pcitag_t tag = pa->pa_tag;
781 struct pciide_softc *sc = (struct pciide_softc *)self;
782 pcireg_t csr;
783 char devinfo[256];
784 const char *displaydev;
785
786 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
787 sc->sc_pp = pciide_lookup_product(pa->pa_id);
788 if (sc->sc_pp == NULL) {
789 sc->sc_pp = &default_product_desc;
790 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
791 displaydev = devinfo;
792 } else
793 displaydev = sc->sc_pp->ide_name;
794
795 /* if displaydev == NULL, printf is done in chip-specific map */
796 if (displaydev)
797 printf(": %s (rev. 0x%02x)\n", displaydev,
798 PCI_REVISION(pa->pa_class));
799
800 sc->sc_pc = pa->pa_pc;
801 sc->sc_tag = pa->pa_tag;
802
803 /* Set up DMA defaults; these might be adjusted by chip_map. */
804 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
805 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
806
807 #ifdef WDCDEBUG
808 if (wdcdebug_pciide_mask & DEBUG_PROBE)
809 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
810 #endif
811 sc->sc_pp->chip_map(sc, pa);
812
813 if (sc->sc_dma_ok) {
814 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
815 csr |= PCI_COMMAND_MASTER_ENABLE;
816 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
817 }
818 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
819 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
820 }
821
822 /* tell whether the chip is enabled or not */
823 int
824 pciide_chipen(sc, pa)
825 struct pciide_softc *sc;
826 struct pci_attach_args *pa;
827 {
828 pcireg_t csr;
829 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
830 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
831 PCI_COMMAND_STATUS_REG);
832 printf("%s: device disabled (at %s)\n",
833 sc->sc_wdcdev.sc_dev.dv_xname,
834 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
835 "device" : "bridge");
836 return 0;
837 }
838 return 1;
839 }
840
841 int
842 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
843 struct pci_attach_args *pa;
844 struct pciide_channel *cp;
845 int compatchan;
846 bus_size_t *cmdsizep, *ctlsizep;
847 {
848 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
849 struct channel_softc *wdc_cp = &cp->wdc_channel;
850
851 cp->compat = 1;
852 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
853 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
854
855 wdc_cp->cmd_iot = pa->pa_iot;
856 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
857 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
858 printf("%s: couldn't map %s channel cmd regs\n",
859 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
860 return (0);
861 }
862
863 wdc_cp->ctl_iot = pa->pa_iot;
864 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
865 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
866 printf("%s: couldn't map %s channel ctl regs\n",
867 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
868 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
869 PCIIDE_COMPAT_CMD_SIZE);
870 return (0);
871 }
872
873 return (1);
874 }
875
876 int
877 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
878 struct pci_attach_args * pa;
879 struct pciide_channel *cp;
880 bus_size_t *cmdsizep, *ctlsizep;
881 int (*pci_intr) __P((void *));
882 {
883 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
884 struct channel_softc *wdc_cp = &cp->wdc_channel;
885 const char *intrstr;
886 pci_intr_handle_t intrhandle;
887
888 cp->compat = 0;
889
890 if (sc->sc_pci_ih == NULL) {
891 if (pci_intr_map(pa, &intrhandle) != 0) {
892 printf("%s: couldn't map native-PCI interrupt\n",
893 sc->sc_wdcdev.sc_dev.dv_xname);
894 return 0;
895 }
896 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
897 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
898 intrhandle, IPL_BIO, pci_intr, sc);
899 if (sc->sc_pci_ih != NULL) {
900 printf("%s: using %s for native-PCI interrupt\n",
901 sc->sc_wdcdev.sc_dev.dv_xname,
902 intrstr ? intrstr : "unknown interrupt");
903 } else {
904 printf("%s: couldn't establish native-PCI interrupt",
905 sc->sc_wdcdev.sc_dev.dv_xname);
906 if (intrstr != NULL)
907 printf(" at %s", intrstr);
908 printf("\n");
909 return 0;
910 }
911 }
912 cp->ih = sc->sc_pci_ih;
913 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
914 PCI_MAPREG_TYPE_IO, 0,
915 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
916 printf("%s: couldn't map %s channel cmd regs\n",
917 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
918 return 0;
919 }
920
921 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
922 PCI_MAPREG_TYPE_IO, 0,
923 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
924 printf("%s: couldn't map %s channel ctl regs\n",
925 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
926 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
927 return 0;
928 }
929 /*
930 * In native mode, 4 bytes of I/O space are mapped for the control
931 * register, the control register is at offset 2. Pass the generic
932 * code a handle for only one byte at the right offset.
933 */
934 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
935 &wdc_cp->ctl_ioh) != 0) {
936 printf("%s: unable to subregion %s channel ctl regs\n",
937 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
938 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
939 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
940 return 0;
941 }
942 return (1);
943 }
944
945 void
946 pciide_mapreg_dma(sc, pa)
947 struct pciide_softc *sc;
948 struct pci_attach_args *pa;
949 {
950 pcireg_t maptype;
951 bus_addr_t addr;
952
953 /*
954 * Map DMA registers
955 *
956 * Note that sc_dma_ok is the right variable to test to see if
957 * DMA can be done. If the interface doesn't support DMA,
958 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
959 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
960 * non-zero if the interface supports DMA and the registers
961 * could be mapped.
962 *
963 * XXX Note that despite the fact that the Bus Master IDE specs
964 * XXX say that "The bus master IDE function uses 16 bytes of IO
965 * XXX space," some controllers (at least the United
966 * XXX Microelectronics UM8886BF) place it in memory space.
967 */
968 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
969 PCIIDE_REG_BUS_MASTER_DMA);
970
971 switch (maptype) {
972 case PCI_MAPREG_TYPE_IO:
973 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
974 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
975 &addr, NULL, NULL) == 0);
976 if (sc->sc_dma_ok == 0) {
977 printf(", but unused (couldn't query registers)");
978 break;
979 }
980 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
981 && addr >= 0x10000) {
982 sc->sc_dma_ok = 0;
983 printf(", but unused (registers at unsafe address "
984 "%#lx)", (unsigned long)addr);
985 break;
986 }
987 /* FALLTHROUGH */
988
989 case PCI_MAPREG_MEM_TYPE_32BIT:
990 sc->sc_dma_ok = (pci_mapreg_map(pa,
991 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
992 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
993 sc->sc_dmat = pa->pa_dmat;
994 if (sc->sc_dma_ok == 0) {
995 printf(", but unused (couldn't map registers)");
996 } else {
997 sc->sc_wdcdev.dma_arg = sc;
998 sc->sc_wdcdev.dma_init = pciide_dma_init;
999 sc->sc_wdcdev.dma_start = pciide_dma_start;
1000 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
1001 }
1002
1003 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1004 PCIIDE_OPTIONS_NODMA) {
1005 printf(", but unused (forced off by config file)");
1006 sc->sc_dma_ok = 0;
1007 }
1008 break;
1009
1010 default:
1011 sc->sc_dma_ok = 0;
1012 printf(", but unsupported register maptype (0x%x)", maptype);
1013 }
1014 }
1015
1016 int
1017 pciide_compat_intr(arg)
1018 void *arg;
1019 {
1020 struct pciide_channel *cp = arg;
1021
1022 #ifdef DIAGNOSTIC
1023 /* should only be called for a compat channel */
1024 if (cp->compat == 0)
1025 panic("pciide compat intr called for non-compat chan %p", cp);
1026 #endif
1027 return (wdcintr(&cp->wdc_channel));
1028 }
1029
1030 int
1031 pciide_pci_intr(arg)
1032 void *arg;
1033 {
1034 struct pciide_softc *sc = arg;
1035 struct pciide_channel *cp;
1036 struct channel_softc *wdc_cp;
1037 int i, rv, crv;
1038
1039 rv = 0;
1040 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1041 cp = &sc->pciide_channels[i];
1042 wdc_cp = &cp->wdc_channel;
1043
1044 /* If a compat channel skip. */
1045 if (cp->compat)
1046 continue;
1047 /* if this channel not waiting for intr, skip */
1048 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1049 continue;
1050
1051 crv = wdcintr(wdc_cp);
1052 if (crv == 0)
1053 ; /* leave rv alone */
1054 else if (crv == 1)
1055 rv = 1; /* claim the intr */
1056 else if (rv == 0) /* crv should be -1 in this case */
1057 rv = crv; /* if we've done no better, take it */
1058 }
1059 return (rv);
1060 }
1061
1062 void
1063 pciide_channel_dma_setup(cp)
1064 struct pciide_channel *cp;
1065 {
1066 int drive;
1067 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1068 struct ata_drive_datas *drvp;
1069
1070 for (drive = 0; drive < 2; drive++) {
1071 drvp = &cp->wdc_channel.ch_drive[drive];
1072 /* If no drive, skip */
1073 if ((drvp->drive_flags & DRIVE) == 0)
1074 continue;
1075 /* setup DMA if needed */
1076 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1077 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1078 sc->sc_dma_ok == 0) {
1079 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1080 continue;
1081 }
1082 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1083 != 0) {
1084 /* Abort DMA setup */
1085 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1086 continue;
1087 }
1088 }
1089 }
1090
1091 int
1092 pciide_dma_table_setup(sc, channel, drive)
1093 struct pciide_softc *sc;
1094 int channel, drive;
1095 {
1096 bus_dma_segment_t seg;
1097 int error, rseg;
1098 const bus_size_t dma_table_size =
1099 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1100 struct pciide_dma_maps *dma_maps =
1101 &sc->pciide_channels[channel].dma_maps[drive];
1102
1103 /* If table was already allocated, just return */
1104 if (dma_maps->dma_table)
1105 return 0;
1106
1107 /* Allocate memory for the DMA tables and map it */
1108 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1109 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1110 BUS_DMA_NOWAIT)) != 0) {
1111 printf("%s:%d: unable to allocate table DMA for "
1112 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1113 channel, drive, error);
1114 return error;
1115 }
1116 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1117 dma_table_size,
1118 (caddr_t *)&dma_maps->dma_table,
1119 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1120 printf("%s:%d: unable to map table DMA for"
1121 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1122 channel, drive, error);
1123 return error;
1124 }
1125 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1126 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1127 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1128
1129 /* Create and load table DMA map for this disk */
1130 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1131 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1132 &dma_maps->dmamap_table)) != 0) {
1133 printf("%s:%d: unable to create table DMA map for "
1134 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1135 channel, drive, error);
1136 return error;
1137 }
1138 if ((error = bus_dmamap_load(sc->sc_dmat,
1139 dma_maps->dmamap_table,
1140 dma_maps->dma_table,
1141 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1142 printf("%s:%d: unable to load table DMA map for "
1143 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1144 channel, drive, error);
1145 return error;
1146 }
1147 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1148 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1149 DEBUG_PROBE);
1150 /* Create a xfer DMA map for this drive */
1151 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1152 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
1153 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1154 &dma_maps->dmamap_xfer)) != 0) {
1155 printf("%s:%d: unable to create xfer DMA map for "
1156 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1157 channel, drive, error);
1158 return error;
1159 }
1160 return 0;
1161 }
1162
1163 int
1164 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1165 void *v;
1166 int channel, drive;
1167 void *databuf;
1168 size_t datalen;
1169 int flags;
1170 {
1171 struct pciide_softc *sc = v;
1172 int error, seg;
1173 struct pciide_dma_maps *dma_maps =
1174 &sc->pciide_channels[channel].dma_maps[drive];
1175
1176 error = bus_dmamap_load(sc->sc_dmat,
1177 dma_maps->dmamap_xfer,
1178 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1179 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1180 if (error) {
1181 printf("%s:%d: unable to load xfer DMA map for"
1182 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1183 channel, drive, error);
1184 return error;
1185 }
1186
1187 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1188 dma_maps->dmamap_xfer->dm_mapsize,
1189 (flags & WDC_DMA_READ) ?
1190 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1191
1192 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1193 #ifdef DIAGNOSTIC
1194 /* A segment must not cross a 64k boundary */
1195 {
1196 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1197 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1198 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1199 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1200 printf("pciide_dma: segment %d physical addr 0x%lx"
1201 " len 0x%lx not properly aligned\n",
1202 seg, phys, len);
1203 panic("pciide_dma: buf align");
1204 }
1205 }
1206 #endif
1207 dma_maps->dma_table[seg].base_addr =
1208 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1209 dma_maps->dma_table[seg].byte_count =
1210 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1211 IDEDMA_BYTE_COUNT_MASK);
1212 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1213 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1214 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1215
1216 }
1217 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1218 htole32(IDEDMA_BYTE_COUNT_EOT);
1219
1220 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1221 dma_maps->dmamap_table->dm_mapsize,
1222 BUS_DMASYNC_PREWRITE);
1223
1224 /* Maps are ready. Start DMA function */
1225 #ifdef DIAGNOSTIC
1226 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1227 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1228 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1229 panic("pciide_dma_init: table align");
1230 }
1231 #endif
1232
1233 /* Clear status bits */
1234 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1235 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1236 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1237 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1238 /* Write table addr */
1239 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1240 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1241 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1242 /* set read/write */
1243 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1244 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1245 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1246 /* remember flags */
1247 dma_maps->dma_flags = flags;
1248 return 0;
1249 }
1250
1251 void
1252 pciide_dma_start(v, channel, drive)
1253 void *v;
1254 int channel, drive;
1255 {
1256 struct pciide_softc *sc = v;
1257
1258 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1259 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1260 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1261 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1262 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1263 }
1264
1265 int
1266 pciide_dma_finish(v, channel, drive, force)
1267 void *v;
1268 int channel, drive;
1269 int force;
1270 {
1271 struct pciide_softc *sc = v;
1272 u_int8_t status;
1273 int error = 0;
1274 struct pciide_dma_maps *dma_maps =
1275 &sc->pciide_channels[channel].dma_maps[drive];
1276
1277 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1278 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1279 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1280 DEBUG_XFERS);
1281
1282 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1283 return WDC_DMAST_NOIRQ;
1284
1285 /* stop DMA channel */
1286 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1287 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1288 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1289 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1290
1291 /* Unload the map of the data buffer */
1292 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1293 dma_maps->dmamap_xfer->dm_mapsize,
1294 (dma_maps->dma_flags & WDC_DMA_READ) ?
1295 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1296 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1297
1298 if ((status & IDEDMA_CTL_ERR) != 0) {
1299 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1300 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1301 error |= WDC_DMAST_ERR;
1302 }
1303
1304 if ((status & IDEDMA_CTL_INTR) == 0) {
1305 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1306 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1307 drive, status);
1308 error |= WDC_DMAST_NOIRQ;
1309 }
1310
1311 if ((status & IDEDMA_CTL_ACT) != 0) {
1312 /* data underrun, may be a valid condition for ATAPI */
1313 error |= WDC_DMAST_UNDER;
1314 }
1315 return error;
1316 }
1317
1318 void
1319 pciide_irqack(chp)
1320 struct channel_softc *chp;
1321 {
1322 struct pciide_channel *cp = (struct pciide_channel*)chp;
1323 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1324
1325 /* clear status bits in IDE DMA registers */
1326 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1327 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1328 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1329 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1330 }
1331
1332 /* some common code used by several chip_map */
1333 int
1334 pciide_chansetup(sc, channel, interface)
1335 struct pciide_softc *sc;
1336 int channel;
1337 pcireg_t interface;
1338 {
1339 struct pciide_channel *cp = &sc->pciide_channels[channel];
1340 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1341 cp->name = PCIIDE_CHANNEL_NAME(channel);
1342 cp->wdc_channel.channel = channel;
1343 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1344 cp->wdc_channel.ch_queue =
1345 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1346 if (cp->wdc_channel.ch_queue == NULL) {
1347 printf("%s %s channel: "
1348 "can't allocate memory for command queue",
1349 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1350 return 0;
1351 }
1352 printf("%s: %s channel %s to %s mode\n",
1353 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1354 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1355 "configured" : "wired",
1356 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1357 "native-PCI" : "compatibility");
1358 return 1;
1359 }
1360
1361 /* some common code used by several chip channel_map */
1362 void
1363 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1364 struct pci_attach_args *pa;
1365 struct pciide_channel *cp;
1366 pcireg_t interface;
1367 bus_size_t *cmdsizep, *ctlsizep;
1368 int (*pci_intr) __P((void *));
1369 {
1370 struct channel_softc *wdc_cp = &cp->wdc_channel;
1371
1372 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1373 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1374 pci_intr);
1375 else
1376 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1377 wdc_cp->channel, cmdsizep, ctlsizep);
1378
1379 if (cp->hw_ok == 0)
1380 return;
1381 wdc_cp->data32iot = wdc_cp->cmd_iot;
1382 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1383 wdcattach(wdc_cp);
1384 }
1385
1386 /*
1387 * Generic code to call to know if a channel can be disabled. Return 1
1388 * if channel can be disabled, 0 if not
1389 */
1390 int
1391 pciide_chan_candisable(cp)
1392 struct pciide_channel *cp;
1393 {
1394 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1395 struct channel_softc *wdc_cp = &cp->wdc_channel;
1396
1397 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1398 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1399 printf("%s: disabling %s channel (no drives)\n",
1400 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1401 cp->hw_ok = 0;
1402 return 1;
1403 }
1404 return 0;
1405 }
1406
1407 /*
1408 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1409 * Set hw_ok=0 on failure
1410 */
1411 void
1412 pciide_map_compat_intr(pa, cp, compatchan, interface)
1413 struct pci_attach_args *pa;
1414 struct pciide_channel *cp;
1415 int compatchan, interface;
1416 {
1417 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1418 struct channel_softc *wdc_cp = &cp->wdc_channel;
1419
1420 if (cp->hw_ok == 0)
1421 return;
1422 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1423 return;
1424
1425 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1426 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1427 pa, compatchan, pciide_compat_intr, cp);
1428 if (cp->ih == NULL) {
1429 #endif
1430 printf("%s: no compatibility interrupt for use by %s "
1431 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1432 cp->hw_ok = 0;
1433 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1434 }
1435 #endif
1436 }
1437
1438 void
1439 pciide_print_modes(cp)
1440 struct pciide_channel *cp;
1441 {
1442 wdc_print_modes(&cp->wdc_channel);
1443 }
1444
1445 void
1446 default_chip_map(sc, pa)
1447 struct pciide_softc *sc;
1448 struct pci_attach_args *pa;
1449 {
1450 struct pciide_channel *cp;
1451 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1452 pcireg_t csr;
1453 int channel, drive;
1454 struct ata_drive_datas *drvp;
1455 u_int8_t idedma_ctl;
1456 bus_size_t cmdsize, ctlsize;
1457 char *failreason;
1458
1459 if (pciide_chipen(sc, pa) == 0)
1460 return;
1461
1462 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1463 printf("%s: bus-master DMA support present",
1464 sc->sc_wdcdev.sc_dev.dv_xname);
1465 if (sc->sc_pp == &default_product_desc &&
1466 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1467 PCIIDE_OPTIONS_DMA) == 0) {
1468 printf(", but unused (no driver support)");
1469 sc->sc_dma_ok = 0;
1470 } else {
1471 pciide_mapreg_dma(sc, pa);
1472 if (sc->sc_dma_ok != 0)
1473 printf(", used without full driver "
1474 "support");
1475 }
1476 } else {
1477 printf("%s: hardware does not support DMA",
1478 sc->sc_wdcdev.sc_dev.dv_xname);
1479 sc->sc_dma_ok = 0;
1480 }
1481 printf("\n");
1482 if (sc->sc_dma_ok) {
1483 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1484 sc->sc_wdcdev.irqack = pciide_irqack;
1485 }
1486 sc->sc_wdcdev.PIO_cap = 0;
1487 sc->sc_wdcdev.DMA_cap = 0;
1488
1489 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1490 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1491 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1492
1493 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1494 cp = &sc->pciide_channels[channel];
1495 if (pciide_chansetup(sc, channel, interface) == 0)
1496 continue;
1497 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1498 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1499 &ctlsize, pciide_pci_intr);
1500 } else {
1501 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1502 channel, &cmdsize, &ctlsize);
1503 }
1504 if (cp->hw_ok == 0)
1505 continue;
1506 /*
1507 * Check to see if something appears to be there.
1508 */
1509 failreason = NULL;
1510 if (!wdcprobe(&cp->wdc_channel)) {
1511 failreason = "not responding; disabled or no drives?";
1512 goto next;
1513 }
1514 /*
1515 * Now, make sure it's actually attributable to this PCI IDE
1516 * channel by trying to access the channel again while the
1517 * PCI IDE controller's I/O space is disabled. (If the
1518 * channel no longer appears to be there, it belongs to
1519 * this controller.) YUCK!
1520 */
1521 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1522 PCI_COMMAND_STATUS_REG);
1523 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1524 csr & ~PCI_COMMAND_IO_ENABLE);
1525 if (wdcprobe(&cp->wdc_channel))
1526 failreason = "other hardware responding at addresses";
1527 pci_conf_write(sc->sc_pc, sc->sc_tag,
1528 PCI_COMMAND_STATUS_REG, csr);
1529 next:
1530 if (failreason) {
1531 printf("%s: %s channel ignored (%s)\n",
1532 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1533 failreason);
1534 cp->hw_ok = 0;
1535 bus_space_unmap(cp->wdc_channel.cmd_iot,
1536 cp->wdc_channel.cmd_ioh, cmdsize);
1537 if (interface & PCIIDE_INTERFACE_PCI(channel))
1538 bus_space_unmap(cp->wdc_channel.ctl_iot,
1539 cp->ctl_baseioh, ctlsize);
1540 else
1541 bus_space_unmap(cp->wdc_channel.ctl_iot,
1542 cp->wdc_channel.ctl_ioh, ctlsize);
1543 } else {
1544 pciide_map_compat_intr(pa, cp, channel, interface);
1545 }
1546 if (cp->hw_ok) {
1547 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1548 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1549 wdcattach(&cp->wdc_channel);
1550 }
1551 }
1552
1553 if (sc->sc_dma_ok == 0)
1554 return;
1555
1556 /* Allocate DMA maps */
1557 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1558 idedma_ctl = 0;
1559 cp = &sc->pciide_channels[channel];
1560 for (drive = 0; drive < 2; drive++) {
1561 drvp = &cp->wdc_channel.ch_drive[drive];
1562 /* If no drive, skip */
1563 if ((drvp->drive_flags & DRIVE) == 0)
1564 continue;
1565 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1566 continue;
1567 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1568 /* Abort DMA setup */
1569 printf("%s:%d:%d: can't allocate DMA maps, "
1570 "using PIO transfers\n",
1571 sc->sc_wdcdev.sc_dev.dv_xname,
1572 channel, drive);
1573 drvp->drive_flags &= ~DRIVE_DMA;
1574 }
1575 printf("%s:%d:%d: using DMA data transfers\n",
1576 sc->sc_wdcdev.sc_dev.dv_xname,
1577 channel, drive);
1578 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1579 }
1580 if (idedma_ctl != 0) {
1581 /* Add software bits in status register */
1582 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1583 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1584 idedma_ctl);
1585 }
1586 }
1587 }
1588
1589 void
1590 sata_setup_channel(chp)
1591 struct channel_softc *chp;
1592 {
1593 struct ata_drive_datas *drvp;
1594 int drive;
1595 u_int32_t idedma_ctl;
1596 struct pciide_channel *cp = (struct pciide_channel*)chp;
1597 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
1598
1599 /* setup DMA if needed */
1600 pciide_channel_dma_setup(cp);
1601
1602 idedma_ctl = 0;
1603
1604 for (drive = 0; drive < 2; drive++) {
1605 drvp = &chp->ch_drive[drive];
1606 /* If no drive, skip */
1607 if ((drvp->drive_flags & DRIVE) == 0)
1608 continue;
1609 if (drvp->drive_flags & DRIVE_UDMA) {
1610 /* use Ultra/DMA */
1611 drvp->drive_flags &= ~DRIVE_DMA;
1612 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1613 } else if (drvp->drive_flags & DRIVE_DMA) {
1614 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1615 }
1616 }
1617
1618 /*
1619 * Nothing to do to setup modes; it is meaningless in S-ATA
1620 * (but many S-ATA drives still want to get the SET_FEATURE
1621 * command).
1622 */
1623 if (idedma_ctl != 0) {
1624 /* Add software bits in status register */
1625 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1626 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1627 idedma_ctl);
1628 }
1629 pciide_print_modes(cp);
1630 }
1631
1632 void
1633 piix_chip_map(sc, pa)
1634 struct pciide_softc *sc;
1635 struct pci_attach_args *pa;
1636 {
1637 struct pciide_channel *cp;
1638 int channel;
1639 u_int32_t idetim;
1640 bus_size_t cmdsize, ctlsize;
1641
1642 if (pciide_chipen(sc, pa) == 0)
1643 return;
1644
1645 printf("%s: bus-master DMA support present",
1646 sc->sc_wdcdev.sc_dev.dv_xname);
1647 pciide_mapreg_dma(sc, pa);
1648 printf("\n");
1649 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1650 WDC_CAPABILITY_MODE;
1651 if (sc->sc_dma_ok) {
1652 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1653 sc->sc_wdcdev.irqack = pciide_irqack;
1654 switch(sc->sc_pp->ide_product) {
1655 case PCI_PRODUCT_INTEL_82371AB_IDE:
1656 case PCI_PRODUCT_INTEL_82440MX_IDE:
1657 case PCI_PRODUCT_INTEL_82801AA_IDE:
1658 case PCI_PRODUCT_INTEL_82801AB_IDE:
1659 case PCI_PRODUCT_INTEL_82801BA_IDE:
1660 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1661 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1662 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1663 case PCI_PRODUCT_INTEL_82801DB_IDE:
1664 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1665 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1666 }
1667 }
1668 sc->sc_wdcdev.PIO_cap = 4;
1669 sc->sc_wdcdev.DMA_cap = 2;
1670 switch(sc->sc_pp->ide_product) {
1671 case PCI_PRODUCT_INTEL_82801AA_IDE:
1672 sc->sc_wdcdev.UDMA_cap = 4;
1673 break;
1674 case PCI_PRODUCT_INTEL_82801BA_IDE:
1675 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1676 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1677 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1678 case PCI_PRODUCT_INTEL_82801DB_IDE:
1679 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1680 sc->sc_wdcdev.UDMA_cap = 5;
1681 break;
1682 default:
1683 sc->sc_wdcdev.UDMA_cap = 2;
1684 }
1685 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1686 sc->sc_wdcdev.set_modes = piix_setup_channel;
1687 else
1688 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1689 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1690 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1691
1692 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1693 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1694 DEBUG_PROBE);
1695 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1696 WDCDEBUG_PRINT((", sidetim=0x%x",
1697 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1698 DEBUG_PROBE);
1699 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1700 WDCDEBUG_PRINT((", udamreg 0x%x",
1701 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1702 DEBUG_PROBE);
1703 }
1704 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1705 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1706 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1707 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1708 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1709 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1710 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1711 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1712 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1713 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1714 DEBUG_PROBE);
1715 }
1716
1717 }
1718 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1719
1720 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1721 cp = &sc->pciide_channels[channel];
1722 /* PIIX is compat-only */
1723 if (pciide_chansetup(sc, channel, 0) == 0)
1724 continue;
1725 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1726 if ((PIIX_IDETIM_READ(idetim, channel) &
1727 PIIX_IDETIM_IDE) == 0) {
1728 printf("%s: %s channel ignored (disabled)\n",
1729 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1730 continue;
1731 }
1732 /* PIIX are compat-only pciide devices */
1733 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1734 if (cp->hw_ok == 0)
1735 continue;
1736 if (pciide_chan_candisable(cp)) {
1737 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1738 channel);
1739 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1740 idetim);
1741 }
1742 pciide_map_compat_intr(pa, cp, channel, 0);
1743 if (cp->hw_ok == 0)
1744 continue;
1745 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1746 }
1747
1748 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1749 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1750 DEBUG_PROBE);
1751 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1752 WDCDEBUG_PRINT((", sidetim=0x%x",
1753 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1754 DEBUG_PROBE);
1755 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1756 WDCDEBUG_PRINT((", udamreg 0x%x",
1757 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1758 DEBUG_PROBE);
1759 }
1760 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1761 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1762 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1763 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1764 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1765 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1766 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1767 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1768 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1769 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1770 DEBUG_PROBE);
1771 }
1772 }
1773 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1774 }
1775
1776 void
1777 piix_setup_channel(chp)
1778 struct channel_softc *chp;
1779 {
1780 u_int8_t mode[2], drive;
1781 u_int32_t oidetim, idetim, idedma_ctl;
1782 struct pciide_channel *cp = (struct pciide_channel*)chp;
1783 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1784 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1785
1786 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1787 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1788 idedma_ctl = 0;
1789
1790 /* set up new idetim: Enable IDE registers decode */
1791 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1792 chp->channel);
1793
1794 /* setup DMA */
1795 pciide_channel_dma_setup(cp);
1796
1797 /*
1798 * Here we have to mess up with drives mode: PIIX can't have
1799 * different timings for master and slave drives.
1800 * We need to find the best combination.
1801 */
1802
1803 /* If both drives supports DMA, take the lower mode */
1804 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1805 (drvp[1].drive_flags & DRIVE_DMA)) {
1806 mode[0] = mode[1] =
1807 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1808 drvp[0].DMA_mode = mode[0];
1809 drvp[1].DMA_mode = mode[1];
1810 goto ok;
1811 }
1812 /*
1813 * If only one drive supports DMA, use its mode, and
1814 * put the other one in PIO mode 0 if mode not compatible
1815 */
1816 if (drvp[0].drive_flags & DRIVE_DMA) {
1817 mode[0] = drvp[0].DMA_mode;
1818 mode[1] = drvp[1].PIO_mode;
1819 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1820 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1821 mode[1] = drvp[1].PIO_mode = 0;
1822 goto ok;
1823 }
1824 if (drvp[1].drive_flags & DRIVE_DMA) {
1825 mode[1] = drvp[1].DMA_mode;
1826 mode[0] = drvp[0].PIO_mode;
1827 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1828 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1829 mode[0] = drvp[0].PIO_mode = 0;
1830 goto ok;
1831 }
1832 /*
1833 * If both drives are not DMA, takes the lower mode, unless
1834 * one of them is PIO mode < 2
1835 */
1836 if (drvp[0].PIO_mode < 2) {
1837 mode[0] = drvp[0].PIO_mode = 0;
1838 mode[1] = drvp[1].PIO_mode;
1839 } else if (drvp[1].PIO_mode < 2) {
1840 mode[1] = drvp[1].PIO_mode = 0;
1841 mode[0] = drvp[0].PIO_mode;
1842 } else {
1843 mode[0] = mode[1] =
1844 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1845 drvp[0].PIO_mode = mode[0];
1846 drvp[1].PIO_mode = mode[1];
1847 }
1848 ok: /* The modes are setup */
1849 for (drive = 0; drive < 2; drive++) {
1850 if (drvp[drive].drive_flags & DRIVE_DMA) {
1851 idetim |= piix_setup_idetim_timings(
1852 mode[drive], 1, chp->channel);
1853 goto end;
1854 }
1855 }
1856 /* If we are there, none of the drives are DMA */
1857 if (mode[0] >= 2)
1858 idetim |= piix_setup_idetim_timings(
1859 mode[0], 0, chp->channel);
1860 else
1861 idetim |= piix_setup_idetim_timings(
1862 mode[1], 0, chp->channel);
1863 end: /*
1864 * timing mode is now set up in the controller. Enable
1865 * it per-drive
1866 */
1867 for (drive = 0; drive < 2; drive++) {
1868 /* If no drive, skip */
1869 if ((drvp[drive].drive_flags & DRIVE) == 0)
1870 continue;
1871 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1872 if (drvp[drive].drive_flags & DRIVE_DMA)
1873 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1874 }
1875 if (idedma_ctl != 0) {
1876 /* Add software bits in status register */
1877 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1878 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1879 idedma_ctl);
1880 }
1881 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1882 pciide_print_modes(cp);
1883 }
1884
1885 void
1886 piix3_4_setup_channel(chp)
1887 struct channel_softc *chp;
1888 {
1889 struct ata_drive_datas *drvp;
1890 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1891 struct pciide_channel *cp = (struct pciide_channel*)chp;
1892 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1893 int drive;
1894 int channel = chp->channel;
1895
1896 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1897 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1898 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1899 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1900 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1901 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1902 PIIX_SIDETIM_RTC_MASK(channel));
1903
1904 idedma_ctl = 0;
1905 /* If channel disabled, no need to go further */
1906 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1907 return;
1908 /* set up new idetim: Enable IDE registers decode */
1909 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1910
1911 /* setup DMA if needed */
1912 pciide_channel_dma_setup(cp);
1913
1914 for (drive = 0; drive < 2; drive++) {
1915 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1916 PIIX_UDMATIM_SET(0x3, channel, drive));
1917 drvp = &chp->ch_drive[drive];
1918 /* If no drive, skip */
1919 if ((drvp->drive_flags & DRIVE) == 0)
1920 continue;
1921 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1922 (drvp->drive_flags & DRIVE_UDMA) == 0))
1923 goto pio;
1924
1925 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1926 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1927 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1928 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1929 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1930 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1931 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1932 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1933 ideconf |= PIIX_CONFIG_PINGPONG;
1934 }
1935 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1936 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1937 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1938 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1939 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1940 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1941 /* setup Ultra/100 */
1942 if (drvp->UDMA_mode > 2 &&
1943 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1944 drvp->UDMA_mode = 2;
1945 if (drvp->UDMA_mode > 4) {
1946 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1947 } else {
1948 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1949 if (drvp->UDMA_mode > 2) {
1950 ideconf |= PIIX_CONFIG_UDMA66(channel,
1951 drive);
1952 } else {
1953 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1954 drive);
1955 }
1956 }
1957 }
1958 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1959 /* setup Ultra/66 */
1960 if (drvp->UDMA_mode > 2 &&
1961 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1962 drvp->UDMA_mode = 2;
1963 if (drvp->UDMA_mode > 2)
1964 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1965 else
1966 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1967 }
1968 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1969 (drvp->drive_flags & DRIVE_UDMA)) {
1970 /* use Ultra/DMA */
1971 drvp->drive_flags &= ~DRIVE_DMA;
1972 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1973 udmareg |= PIIX_UDMATIM_SET(
1974 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1975 } else {
1976 /* use Multiword DMA */
1977 drvp->drive_flags &= ~DRIVE_UDMA;
1978 if (drive == 0) {
1979 idetim |= piix_setup_idetim_timings(
1980 drvp->DMA_mode, 1, channel);
1981 } else {
1982 sidetim |= piix_setup_sidetim_timings(
1983 drvp->DMA_mode, 1, channel);
1984 idetim =PIIX_IDETIM_SET(idetim,
1985 PIIX_IDETIM_SITRE, channel);
1986 }
1987 }
1988 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1989
1990 pio: /* use PIO mode */
1991 idetim |= piix_setup_idetim_drvs(drvp);
1992 if (drive == 0) {
1993 idetim |= piix_setup_idetim_timings(
1994 drvp->PIO_mode, 0, channel);
1995 } else {
1996 sidetim |= piix_setup_sidetim_timings(
1997 drvp->PIO_mode, 0, channel);
1998 idetim =PIIX_IDETIM_SET(idetim,
1999 PIIX_IDETIM_SITRE, channel);
2000 }
2001 }
2002 if (idedma_ctl != 0) {
2003 /* Add software bits in status register */
2004 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2005 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
2006 idedma_ctl);
2007 }
2008 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
2009 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
2010 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
2011 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
2012 pciide_print_modes(cp);
2013 }
2014
2015
2016 /* setup ISP and RTC fields, based on mode */
2017 static u_int32_t
2018 piix_setup_idetim_timings(mode, dma, channel)
2019 u_int8_t mode;
2020 u_int8_t dma;
2021 u_int8_t channel;
2022 {
2023
2024 if (dma)
2025 return PIIX_IDETIM_SET(0,
2026 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
2027 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
2028 channel);
2029 else
2030 return PIIX_IDETIM_SET(0,
2031 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
2032 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
2033 channel);
2034 }
2035
2036 /* setup DTE, PPE, IE and TIME field based on PIO mode */
2037 static u_int32_t
2038 piix_setup_idetim_drvs(drvp)
2039 struct ata_drive_datas *drvp;
2040 {
2041 u_int32_t ret = 0;
2042 struct channel_softc *chp = drvp->chnl_softc;
2043 u_int8_t channel = chp->channel;
2044 u_int8_t drive = drvp->drive;
2045
2046 /*
2047 * If drive is using UDMA, timings setups are independant
2048 * So just check DMA and PIO here.
2049 */
2050 if (drvp->drive_flags & DRIVE_DMA) {
2051 /* if mode = DMA mode 0, use compatible timings */
2052 if ((drvp->drive_flags & DRIVE_DMA) &&
2053 drvp->DMA_mode == 0) {
2054 drvp->PIO_mode = 0;
2055 return ret;
2056 }
2057 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2058 /*
2059 * PIO and DMA timings are the same, use fast timings for PIO
2060 * too, else use compat timings.
2061 */
2062 if ((piix_isp_pio[drvp->PIO_mode] !=
2063 piix_isp_dma[drvp->DMA_mode]) ||
2064 (piix_rtc_pio[drvp->PIO_mode] !=
2065 piix_rtc_dma[drvp->DMA_mode]))
2066 drvp->PIO_mode = 0;
2067 /* if PIO mode <= 2, use compat timings for PIO */
2068 if (drvp->PIO_mode <= 2) {
2069 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
2070 channel);
2071 return ret;
2072 }
2073 }
2074
2075 /*
2076 * Now setup PIO modes. If mode < 2, use compat timings.
2077 * Else enable fast timings. Enable IORDY and prefetch/post
2078 * if PIO mode >= 3.
2079 */
2080
2081 if (drvp->PIO_mode < 2)
2082 return ret;
2083
2084 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2085 if (drvp->PIO_mode >= 3) {
2086 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2087 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2088 }
2089 return ret;
2090 }
2091
2092 /* setup values in SIDETIM registers, based on mode */
2093 static u_int32_t
2094 piix_setup_sidetim_timings(mode, dma, channel)
2095 u_int8_t mode;
2096 u_int8_t dma;
2097 u_int8_t channel;
2098 {
2099 if (dma)
2100 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2101 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2102 else
2103 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2104 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2105 }
2106
2107 void
2108 amd7x6_chip_map(sc, pa)
2109 struct pciide_softc *sc;
2110 struct pci_attach_args *pa;
2111 {
2112 struct pciide_channel *cp;
2113 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2114 int channel;
2115 pcireg_t chanenable;
2116 bus_size_t cmdsize, ctlsize;
2117
2118 if (pciide_chipen(sc, pa) == 0)
2119 return;
2120 printf("%s: bus-master DMA support present",
2121 sc->sc_wdcdev.sc_dev.dv_xname);
2122 pciide_mapreg_dma(sc, pa);
2123 printf("\n");
2124 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2125 WDC_CAPABILITY_MODE;
2126 if (sc->sc_dma_ok) {
2127 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2128 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2129 sc->sc_wdcdev.irqack = pciide_irqack;
2130 }
2131 sc->sc_wdcdev.PIO_cap = 4;
2132 sc->sc_wdcdev.DMA_cap = 2;
2133
2134 switch (sc->sc_pci_vendor) {
2135 case PCI_VENDOR_AMD:
2136 switch (sc->sc_pp->ide_product) {
2137 case PCI_PRODUCT_AMD_PBC766_IDE:
2138 case PCI_PRODUCT_AMD_PBC768_IDE:
2139 case PCI_PRODUCT_AMD_PBC8111_IDE:
2140 sc->sc_wdcdev.UDMA_cap = 5;
2141 break;
2142 default:
2143 sc->sc_wdcdev.UDMA_cap = 4;
2144 }
2145 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2146 break;
2147
2148 case PCI_VENDOR_NVIDIA:
2149 switch (sc->sc_pp->ide_product) {
2150 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2151 sc->sc_wdcdev.UDMA_cap = 5;
2152 break;
2153 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2154 sc->sc_wdcdev.UDMA_cap = 6;
2155 break;
2156 }
2157 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2158 break;
2159
2160 default:
2161 panic("amd7x6_chip_map: unknown vendor");
2162 }
2163 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2164 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2165 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2166 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2167 AMD7X6_CHANSTATUS_EN(sc));
2168
2169 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2170 DEBUG_PROBE);
2171 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2172 cp = &sc->pciide_channels[channel];
2173 if (pciide_chansetup(sc, channel, interface) == 0)
2174 continue;
2175
2176 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2177 printf("%s: %s channel ignored (disabled)\n",
2178 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2179 continue;
2180 }
2181 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2182 pciide_pci_intr);
2183
2184 if (pciide_chan_candisable(cp))
2185 chanenable &= ~AMD7X6_CHAN_EN(channel);
2186 pciide_map_compat_intr(pa, cp, channel, interface);
2187 if (cp->hw_ok == 0)
2188 continue;
2189
2190 amd7x6_setup_channel(&cp->wdc_channel);
2191 }
2192 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2193 chanenable);
2194 return;
2195 }
2196
2197 void
2198 amd7x6_setup_channel(chp)
2199 struct channel_softc *chp;
2200 {
2201 u_int32_t udmatim_reg, datatim_reg;
2202 u_int8_t idedma_ctl;
2203 int mode, drive;
2204 struct ata_drive_datas *drvp;
2205 struct pciide_channel *cp = (struct pciide_channel*)chp;
2206 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2207 #ifndef PCIIDE_AMD756_ENABLEDMA
2208 int rev = PCI_REVISION(
2209 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2210 #endif
2211
2212 idedma_ctl = 0;
2213 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2214 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2215 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2216 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2217
2218 /* setup DMA if needed */
2219 pciide_channel_dma_setup(cp);
2220
2221 for (drive = 0; drive < 2; drive++) {
2222 drvp = &chp->ch_drive[drive];
2223 /* If no drive, skip */
2224 if ((drvp->drive_flags & DRIVE) == 0)
2225 continue;
2226 /* add timing values, setup DMA if needed */
2227 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2228 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2229 mode = drvp->PIO_mode;
2230 goto pio;
2231 }
2232 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2233 (drvp->drive_flags & DRIVE_UDMA)) {
2234 /* use Ultra/DMA */
2235 drvp->drive_flags &= ~DRIVE_DMA;
2236 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2237 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2238 AMD7X6_UDMA_TIME(chp->channel, drive,
2239 amd7x6_udma_tim[drvp->UDMA_mode]);
2240 /* can use PIO timings, MW DMA unused */
2241 mode = drvp->PIO_mode;
2242 } else {
2243 /* use Multiword DMA, but only if revision is OK */
2244 drvp->drive_flags &= ~DRIVE_UDMA;
2245 #ifndef PCIIDE_AMD756_ENABLEDMA
2246 /*
2247 * The workaround doesn't seem to be necessary
2248 * with all drives, so it can be disabled by
2249 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2250 * triggered.
2251 */
2252 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2253 sc->sc_pp->ide_product ==
2254 PCI_PRODUCT_AMD_PBC756_IDE &&
2255 AMD756_CHIPREV_DISABLEDMA(rev)) {
2256 printf("%s:%d:%d: multi-word DMA disabled due "
2257 "to chip revision\n",
2258 sc->sc_wdcdev.sc_dev.dv_xname,
2259 chp->channel, drive);
2260 mode = drvp->PIO_mode;
2261 drvp->drive_flags &= ~DRIVE_DMA;
2262 goto pio;
2263 }
2264 #endif
2265 /* mode = min(pio, dma+2) */
2266 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2267 mode = drvp->PIO_mode;
2268 else
2269 mode = drvp->DMA_mode + 2;
2270 }
2271 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2272
2273 pio: /* setup PIO mode */
2274 if (mode <= 2) {
2275 drvp->DMA_mode = 0;
2276 drvp->PIO_mode = 0;
2277 mode = 0;
2278 } else {
2279 drvp->PIO_mode = mode;
2280 drvp->DMA_mode = mode - 2;
2281 }
2282 datatim_reg |=
2283 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2284 amd7x6_pio_set[mode]) |
2285 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2286 amd7x6_pio_rec[mode]);
2287 }
2288 if (idedma_ctl != 0) {
2289 /* Add software bits in status register */
2290 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2291 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2292 idedma_ctl);
2293 }
2294 pciide_print_modes(cp);
2295 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2296 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2297 }
2298
2299 void
2300 apollo_chip_map(sc, pa)
2301 struct pciide_softc *sc;
2302 struct pci_attach_args *pa;
2303 {
2304 struct pciide_channel *cp;
2305 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2306 int channel;
2307 u_int32_t ideconf;
2308 bus_size_t cmdsize, ctlsize;
2309 pcitag_t pcib_tag;
2310 pcireg_t pcib_id, pcib_class;
2311
2312 if (pciide_chipen(sc, pa) == 0)
2313 return;
2314 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2315 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2316 /* and read ID and rev of the ISA bridge */
2317 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2318 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2319 printf(": VIA Technologies ");
2320 switch (PCI_PRODUCT(pcib_id)) {
2321 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2322 printf("VT82C586 (Apollo VP) ");
2323 if(PCI_REVISION(pcib_class) >= 0x02) {
2324 printf("ATA33 controller\n");
2325 sc->sc_wdcdev.UDMA_cap = 2;
2326 } else {
2327 printf("controller\n");
2328 sc->sc_wdcdev.UDMA_cap = 0;
2329 }
2330 break;
2331 case PCI_PRODUCT_VIATECH_VT82C596A:
2332 printf("VT82C596A (Apollo Pro) ");
2333 if (PCI_REVISION(pcib_class) >= 0x12) {
2334 printf("ATA66 controller\n");
2335 sc->sc_wdcdev.UDMA_cap = 4;
2336 } else {
2337 printf("ATA33 controller\n");
2338 sc->sc_wdcdev.UDMA_cap = 2;
2339 }
2340 break;
2341 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2342 printf("VT82C686A (Apollo KX133) ");
2343 if (PCI_REVISION(pcib_class) >= 0x40) {
2344 printf("ATA100 controller\n");
2345 sc->sc_wdcdev.UDMA_cap = 5;
2346 } else {
2347 printf("ATA66 controller\n");
2348 sc->sc_wdcdev.UDMA_cap = 4;
2349 }
2350 break;
2351 case PCI_PRODUCT_VIATECH_VT8231:
2352 printf("VT8231 ATA100 controller\n");
2353 sc->sc_wdcdev.UDMA_cap = 5;
2354 break;
2355 case PCI_PRODUCT_VIATECH_VT8233:
2356 printf("VT8233 ATA100 controller\n");
2357 sc->sc_wdcdev.UDMA_cap = 5;
2358 break;
2359 case PCI_PRODUCT_VIATECH_VT8233A:
2360 printf("VT8233A ATA133 controller\n");
2361 sc->sc_wdcdev.UDMA_cap = 6;
2362 break;
2363 case PCI_PRODUCT_VIATECH_VT8235:
2364 printf("VT8235 ATA133 controller\n");
2365 sc->sc_wdcdev.UDMA_cap = 6;
2366 break;
2367 default:
2368 printf("unknown ATA controller\n");
2369 sc->sc_wdcdev.UDMA_cap = 0;
2370 }
2371
2372 printf("%s: bus-master DMA support present",
2373 sc->sc_wdcdev.sc_dev.dv_xname);
2374 pciide_mapreg_dma(sc, pa);
2375 printf("\n");
2376 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2377 WDC_CAPABILITY_MODE;
2378 if (sc->sc_dma_ok) {
2379 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2380 sc->sc_wdcdev.irqack = pciide_irqack;
2381 if (sc->sc_wdcdev.UDMA_cap > 0)
2382 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2383 }
2384 sc->sc_wdcdev.PIO_cap = 4;
2385 sc->sc_wdcdev.DMA_cap = 2;
2386 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2387 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2388 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2389
2390 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2391 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2392 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2393 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2394 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2395 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2396 DEBUG_PROBE);
2397
2398 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2399 cp = &sc->pciide_channels[channel];
2400 if (pciide_chansetup(sc, channel, interface) == 0)
2401 continue;
2402
2403 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2404 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2405 printf("%s: %s channel ignored (disabled)\n",
2406 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2407 continue;
2408 }
2409 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2410 pciide_pci_intr);
2411 if (cp->hw_ok == 0)
2412 continue;
2413 if (pciide_chan_candisable(cp)) {
2414 ideconf &= ~APO_IDECONF_EN(channel);
2415 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2416 ideconf);
2417 }
2418 pciide_map_compat_intr(pa, cp, channel, interface);
2419
2420 if (cp->hw_ok == 0)
2421 continue;
2422 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2423 }
2424 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2425 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2426 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2427 }
2428
2429 void
2430 apollo_setup_channel(chp)
2431 struct channel_softc *chp;
2432 {
2433 u_int32_t udmatim_reg, datatim_reg;
2434 u_int8_t idedma_ctl;
2435 int mode, drive;
2436 struct ata_drive_datas *drvp;
2437 struct pciide_channel *cp = (struct pciide_channel*)chp;
2438 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2439
2440 idedma_ctl = 0;
2441 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2442 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2443 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2444 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2445
2446 /* setup DMA if needed */
2447 pciide_channel_dma_setup(cp);
2448
2449 for (drive = 0; drive < 2; drive++) {
2450 drvp = &chp->ch_drive[drive];
2451 /* If no drive, skip */
2452 if ((drvp->drive_flags & DRIVE) == 0)
2453 continue;
2454 /* add timing values, setup DMA if needed */
2455 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2456 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2457 mode = drvp->PIO_mode;
2458 goto pio;
2459 }
2460 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2461 (drvp->drive_flags & DRIVE_UDMA)) {
2462 /* use Ultra/DMA */
2463 drvp->drive_flags &= ~DRIVE_DMA;
2464 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2465 APO_UDMA_EN_MTH(chp->channel, drive);
2466 if (sc->sc_wdcdev.UDMA_cap == 6) {
2467 /* 8233a */
2468 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2469 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2470 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2471 /* 686b */
2472 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2473 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2474 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2475 /* 596b or 686a */
2476 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2477 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2478 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2479 } else {
2480 /* 596a or 586b */
2481 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2482 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2483 }
2484 /* can use PIO timings, MW DMA unused */
2485 mode = drvp->PIO_mode;
2486 } else {
2487 /* use Multiword DMA */
2488 drvp->drive_flags &= ~DRIVE_UDMA;
2489 /* mode = min(pio, dma+2) */
2490 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2491 mode = drvp->PIO_mode;
2492 else
2493 mode = drvp->DMA_mode + 2;
2494 }
2495 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2496
2497 pio: /* setup PIO mode */
2498 if (mode <= 2) {
2499 drvp->DMA_mode = 0;
2500 drvp->PIO_mode = 0;
2501 mode = 0;
2502 } else {
2503 drvp->PIO_mode = mode;
2504 drvp->DMA_mode = mode - 2;
2505 }
2506 datatim_reg |=
2507 APO_DATATIM_PULSE(chp->channel, drive,
2508 apollo_pio_set[mode]) |
2509 APO_DATATIM_RECOV(chp->channel, drive,
2510 apollo_pio_rec[mode]);
2511 }
2512 if (idedma_ctl != 0) {
2513 /* Add software bits in status register */
2514 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2515 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2516 idedma_ctl);
2517 }
2518 pciide_print_modes(cp);
2519 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2520 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2521 }
2522
2523 void
2524 cmd_channel_map(pa, sc, channel)
2525 struct pci_attach_args *pa;
2526 struct pciide_softc *sc;
2527 int channel;
2528 {
2529 struct pciide_channel *cp = &sc->pciide_channels[channel];
2530 bus_size_t cmdsize, ctlsize;
2531 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2532 int interface, one_channel;
2533
2534 /*
2535 * The 0648/0649 can be told to identify as a RAID controller.
2536 * In this case, we have to fake interface
2537 */
2538 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2539 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2540 PCIIDE_INTERFACE_SETTABLE(1);
2541 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2542 CMD_CONF_DSA1)
2543 interface |= PCIIDE_INTERFACE_PCI(0) |
2544 PCIIDE_INTERFACE_PCI(1);
2545 } else {
2546 interface = PCI_INTERFACE(pa->pa_class);
2547 }
2548
2549 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2550 cp->name = PCIIDE_CHANNEL_NAME(channel);
2551 cp->wdc_channel.channel = channel;
2552 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2553
2554 /*
2555 * Older CMD64X doesn't have independant channels
2556 */
2557 switch (sc->sc_pp->ide_product) {
2558 case PCI_PRODUCT_CMDTECH_649:
2559 one_channel = 0;
2560 break;
2561 default:
2562 one_channel = 1;
2563 break;
2564 }
2565
2566 if (channel > 0 && one_channel) {
2567 cp->wdc_channel.ch_queue =
2568 sc->pciide_channels[0].wdc_channel.ch_queue;
2569 } else {
2570 cp->wdc_channel.ch_queue =
2571 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2572 }
2573 if (cp->wdc_channel.ch_queue == NULL) {
2574 printf("%s %s channel: "
2575 "can't allocate memory for command queue",
2576 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2577 return;
2578 }
2579
2580 printf("%s: %s channel %s to %s mode\n",
2581 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2582 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2583 "configured" : "wired",
2584 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2585 "native-PCI" : "compatibility");
2586
2587 /*
2588 * with a CMD PCI64x, if we get here, the first channel is enabled:
2589 * there's no way to disable the first channel without disabling
2590 * the whole device
2591 */
2592 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2593 printf("%s: %s channel ignored (disabled)\n",
2594 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2595 return;
2596 }
2597
2598 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2599 if (cp->hw_ok == 0)
2600 return;
2601 if (channel == 1) {
2602 if (pciide_chan_candisable(cp)) {
2603 ctrl &= ~CMD_CTRL_2PORT;
2604 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2605 CMD_CTRL, ctrl);
2606 }
2607 }
2608 pciide_map_compat_intr(pa, cp, channel, interface);
2609 }
2610
2611 int
2612 cmd_pci_intr(arg)
2613 void *arg;
2614 {
2615 struct pciide_softc *sc = arg;
2616 struct pciide_channel *cp;
2617 struct channel_softc *wdc_cp;
2618 int i, rv, crv;
2619 u_int32_t priirq, secirq;
2620
2621 rv = 0;
2622 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2623 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2624 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2625 cp = &sc->pciide_channels[i];
2626 wdc_cp = &cp->wdc_channel;
2627 /* If a compat channel skip. */
2628 if (cp->compat)
2629 continue;
2630 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2631 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2632 crv = wdcintr(wdc_cp);
2633 if (crv == 0)
2634 printf("%s:%d: bogus intr\n",
2635 sc->sc_wdcdev.sc_dev.dv_xname, i);
2636 else
2637 rv = 1;
2638 }
2639 }
2640 return rv;
2641 }
2642
2643 void
2644 cmd_chip_map(sc, pa)
2645 struct pciide_softc *sc;
2646 struct pci_attach_args *pa;
2647 {
2648 int channel;
2649
2650 /*
2651 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2652 * and base adresses registers can be disabled at
2653 * hardware level. In this case, the device is wired
2654 * in compat mode and its first channel is always enabled,
2655 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2656 * In fact, it seems that the first channel of the CMD PCI0640
2657 * can't be disabled.
2658 */
2659
2660 #ifdef PCIIDE_CMD064x_DISABLE
2661 if (pciide_chipen(sc, pa) == 0)
2662 return;
2663 #endif
2664
2665 printf("%s: hardware does not support DMA\n",
2666 sc->sc_wdcdev.sc_dev.dv_xname);
2667 sc->sc_dma_ok = 0;
2668
2669 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2670 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2671 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2672
2673 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2674 cmd_channel_map(pa, sc, channel);
2675 }
2676 }
2677
2678 void
2679 cmd0643_9_chip_map(sc, pa)
2680 struct pciide_softc *sc;
2681 struct pci_attach_args *pa;
2682 {
2683 struct pciide_channel *cp;
2684 int channel;
2685 pcireg_t rev = PCI_REVISION(pa->pa_class);
2686
2687 /*
2688 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2689 * and base adresses registers can be disabled at
2690 * hardware level. In this case, the device is wired
2691 * in compat mode and its first channel is always enabled,
2692 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2693 * In fact, it seems that the first channel of the CMD PCI0640
2694 * can't be disabled.
2695 */
2696
2697 #ifdef PCIIDE_CMD064x_DISABLE
2698 if (pciide_chipen(sc, pa) == 0)
2699 return;
2700 #endif
2701 printf("%s: bus-master DMA support present",
2702 sc->sc_wdcdev.sc_dev.dv_xname);
2703 pciide_mapreg_dma(sc, pa);
2704 printf("\n");
2705 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2706 WDC_CAPABILITY_MODE;
2707 if (sc->sc_dma_ok) {
2708 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2709 switch (sc->sc_pp->ide_product) {
2710 case PCI_PRODUCT_CMDTECH_649:
2711 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2712 sc->sc_wdcdev.UDMA_cap = 5;
2713 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2714 break;
2715 case PCI_PRODUCT_CMDTECH_648:
2716 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2717 sc->sc_wdcdev.UDMA_cap = 4;
2718 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2719 break;
2720 case PCI_PRODUCT_CMDTECH_646:
2721 if (rev >= CMD0646U2_REV) {
2722 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2723 sc->sc_wdcdev.UDMA_cap = 2;
2724 } else if (rev >= CMD0646U_REV) {
2725 /*
2726 * Linux's driver claims that the 646U is broken
2727 * with UDMA. Only enable it if we know what we're
2728 * doing
2729 */
2730 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2731 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2732 sc->sc_wdcdev.UDMA_cap = 2;
2733 #endif
2734 /* explicitly disable UDMA */
2735 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2736 CMD_UDMATIM(0), 0);
2737 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2738 CMD_UDMATIM(1), 0);
2739 }
2740 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2741 break;
2742 default:
2743 sc->sc_wdcdev.irqack = pciide_irqack;
2744 }
2745 }
2746
2747 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2748 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2749 sc->sc_wdcdev.PIO_cap = 4;
2750 sc->sc_wdcdev.DMA_cap = 2;
2751 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2752
2753 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2754 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2755 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2756 DEBUG_PROBE);
2757
2758 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2759 cp = &sc->pciide_channels[channel];
2760 cmd_channel_map(pa, sc, channel);
2761 if (cp->hw_ok == 0)
2762 continue;
2763 cmd0643_9_setup_channel(&cp->wdc_channel);
2764 }
2765 /*
2766 * note - this also makes sure we clear the irq disable and reset
2767 * bits
2768 */
2769 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2770 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2771 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2772 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2773 DEBUG_PROBE);
2774 }
2775
2776 void
2777 cmd0643_9_setup_channel(chp)
2778 struct channel_softc *chp;
2779 {
2780 struct ata_drive_datas *drvp;
2781 u_int8_t tim;
2782 u_int32_t idedma_ctl, udma_reg;
2783 int drive;
2784 struct pciide_channel *cp = (struct pciide_channel*)chp;
2785 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2786
2787 idedma_ctl = 0;
2788 /* setup DMA if needed */
2789 pciide_channel_dma_setup(cp);
2790
2791 for (drive = 0; drive < 2; drive++) {
2792 drvp = &chp->ch_drive[drive];
2793 /* If no drive, skip */
2794 if ((drvp->drive_flags & DRIVE) == 0)
2795 continue;
2796 /* add timing values, setup DMA if needed */
2797 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2798 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2799 if (drvp->drive_flags & DRIVE_UDMA) {
2800 /* UltraDMA on a 646U2, 0648 or 0649 */
2801 drvp->drive_flags &= ~DRIVE_DMA;
2802 udma_reg = pciide_pci_read(sc->sc_pc,
2803 sc->sc_tag, CMD_UDMATIM(chp->channel));
2804 if (drvp->UDMA_mode > 2 &&
2805 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2806 CMD_BICSR) &
2807 CMD_BICSR_80(chp->channel)) == 0)
2808 drvp->UDMA_mode = 2;
2809 if (drvp->UDMA_mode > 2)
2810 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2811 else if (sc->sc_wdcdev.UDMA_cap > 2)
2812 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2813 udma_reg |= CMD_UDMATIM_UDMA(drive);
2814 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2815 CMD_UDMATIM_TIM_OFF(drive));
2816 udma_reg |=
2817 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2818 CMD_UDMATIM_TIM_OFF(drive));
2819 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2820 CMD_UDMATIM(chp->channel), udma_reg);
2821 } else {
2822 /*
2823 * use Multiword DMA.
2824 * Timings will be used for both PIO and DMA,
2825 * so adjust DMA mode if needed
2826 * if we have a 0646U2/8/9, turn off UDMA
2827 */
2828 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2829 udma_reg = pciide_pci_read(sc->sc_pc,
2830 sc->sc_tag,
2831 CMD_UDMATIM(chp->channel));
2832 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2833 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2834 CMD_UDMATIM(chp->channel),
2835 udma_reg);
2836 }
2837 if (drvp->PIO_mode >= 3 &&
2838 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2839 drvp->DMA_mode = drvp->PIO_mode - 2;
2840 }
2841 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2842 }
2843 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2844 }
2845 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2846 CMD_DATA_TIM(chp->channel, drive), tim);
2847 }
2848 if (idedma_ctl != 0) {
2849 /* Add software bits in status register */
2850 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2851 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2852 idedma_ctl);
2853 }
2854 pciide_print_modes(cp);
2855 }
2856
2857 void
2858 cmd646_9_irqack(chp)
2859 struct channel_softc *chp;
2860 {
2861 u_int32_t priirq, secirq;
2862 struct pciide_channel *cp = (struct pciide_channel*)chp;
2863 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2864
2865 if (chp->channel == 0) {
2866 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2867 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2868 } else {
2869 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2870 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2871 }
2872 pciide_irqack(chp);
2873 }
2874
2875 void
2876 cmd680_chip_map(sc, pa)
2877 struct pciide_softc *sc;
2878 struct pci_attach_args *pa;
2879 {
2880 struct pciide_channel *cp;
2881 int channel;
2882
2883 if (pciide_chipen(sc, pa) == 0)
2884 return;
2885 printf("%s: bus-master DMA support present",
2886 sc->sc_wdcdev.sc_dev.dv_xname);
2887 pciide_mapreg_dma(sc, pa);
2888 printf("\n");
2889 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2890 WDC_CAPABILITY_MODE;
2891 if (sc->sc_dma_ok) {
2892 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2893 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2894 sc->sc_wdcdev.UDMA_cap = 6;
2895 sc->sc_wdcdev.irqack = pciide_irqack;
2896 }
2897
2898 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2899 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2900 sc->sc_wdcdev.PIO_cap = 4;
2901 sc->sc_wdcdev.DMA_cap = 2;
2902 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2903
2904 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2905 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2906 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2907 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2908 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2909 cp = &sc->pciide_channels[channel];
2910 cmd680_channel_map(pa, sc, channel);
2911 if (cp->hw_ok == 0)
2912 continue;
2913 cmd680_setup_channel(&cp->wdc_channel);
2914 }
2915 }
2916
2917 void
2918 cmd680_channel_map(pa, sc, channel)
2919 struct pci_attach_args *pa;
2920 struct pciide_softc *sc;
2921 int channel;
2922 {
2923 struct pciide_channel *cp = &sc->pciide_channels[channel];
2924 bus_size_t cmdsize, ctlsize;
2925 int interface, i, reg;
2926 static const u_int8_t init_val[] =
2927 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2928 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2929
2930 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2931 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2932 PCIIDE_INTERFACE_SETTABLE(1);
2933 interface |= PCIIDE_INTERFACE_PCI(0) |
2934 PCIIDE_INTERFACE_PCI(1);
2935 } else {
2936 interface = PCI_INTERFACE(pa->pa_class);
2937 }
2938
2939 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2940 cp->name = PCIIDE_CHANNEL_NAME(channel);
2941 cp->wdc_channel.channel = channel;
2942 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2943
2944 cp->wdc_channel.ch_queue =
2945 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2946 if (cp->wdc_channel.ch_queue == NULL) {
2947 printf("%s %s channel: "
2948 "can't allocate memory for command queue",
2949 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2950 return;
2951 }
2952
2953 /* XXX */
2954 reg = 0xa2 + channel * 16;
2955 for (i = 0; i < sizeof(init_val); i++)
2956 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2957
2958 printf("%s: %s channel %s to %s mode\n",
2959 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2960 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2961 "configured" : "wired",
2962 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2963 "native-PCI" : "compatibility");
2964
2965 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2966 if (cp->hw_ok == 0)
2967 return;
2968 pciide_map_compat_intr(pa, cp, channel, interface);
2969 }
2970
2971 void
2972 cmd680_setup_channel(chp)
2973 struct channel_softc *chp;
2974 {
2975 struct ata_drive_datas *drvp;
2976 u_int8_t mode, off, scsc;
2977 u_int16_t val;
2978 u_int32_t idedma_ctl;
2979 int drive;
2980 struct pciide_channel *cp = (struct pciide_channel*)chp;
2981 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2982 pci_chipset_tag_t pc = sc->sc_pc;
2983 pcitag_t pa = sc->sc_tag;
2984 static const u_int8_t udma2_tbl[] =
2985 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2986 static const u_int8_t udma_tbl[] =
2987 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2988 static const u_int16_t dma_tbl[] =
2989 { 0x2208, 0x10c2, 0x10c1 };
2990 static const u_int16_t pio_tbl[] =
2991 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2992
2993 idedma_ctl = 0;
2994 pciide_channel_dma_setup(cp);
2995 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2996
2997 for (drive = 0; drive < 2; drive++) {
2998 drvp = &chp->ch_drive[drive];
2999 /* If no drive, skip */
3000 if ((drvp->drive_flags & DRIVE) == 0)
3001 continue;
3002 mode &= ~(0x03 << (drive * 4));
3003 if (drvp->drive_flags & DRIVE_UDMA) {
3004 drvp->drive_flags &= ~DRIVE_DMA;
3005 off = 0xa0 + chp->channel * 16;
3006 if (drvp->UDMA_mode > 2 &&
3007 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
3008 drvp->UDMA_mode = 2;
3009 scsc = pciide_pci_read(pc, pa, 0x8a);
3010 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
3011 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
3012 scsc = pciide_pci_read(pc, pa, 0x8a);
3013 if ((scsc & 0x30) == 0)
3014 drvp->UDMA_mode = 5;
3015 }
3016 mode |= 0x03 << (drive * 4);
3017 off = 0xac + chp->channel * 16 + drive * 2;
3018 val = pciide_pci_read(pc, pa, off) & ~0x3f;
3019 if (scsc & 0x30)
3020 val |= udma2_tbl[drvp->UDMA_mode];
3021 else
3022 val |= udma_tbl[drvp->UDMA_mode];
3023 pciide_pci_write(pc, pa, off, val);
3024 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3025 } else if (drvp->drive_flags & DRIVE_DMA) {
3026 mode |= 0x02 << (drive * 4);
3027 off = 0xa8 + chp->channel * 16 + drive * 2;
3028 val = dma_tbl[drvp->DMA_mode];
3029 pciide_pci_write(pc, pa, off, val & 0xff);
3030 pciide_pci_write(pc, pa, off, val >> 8);
3031 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3032 } else {
3033 mode |= 0x01 << (drive * 4);
3034 off = 0xa4 + chp->channel * 16 + drive * 2;
3035 val = pio_tbl[drvp->PIO_mode];
3036 pciide_pci_write(pc, pa, off, val & 0xff);
3037 pciide_pci_write(pc, pa, off, val >> 8);
3038 }
3039 }
3040
3041 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
3042 if (idedma_ctl != 0) {
3043 /* Add software bits in status register */
3044 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3045 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3046 idedma_ctl);
3047 }
3048 pciide_print_modes(cp);
3049 }
3050
3051 void
3052 cmd3112_chip_map(sc, pa)
3053 struct pciide_softc *sc;
3054 struct pci_attach_args *pa;
3055 {
3056 struct pciide_channel *cp;
3057 bus_size_t cmdsize, ctlsize;
3058 pcireg_t interface;
3059 int channel;
3060
3061 if (pciide_chipen(sc, pa) == 0)
3062 return;
3063
3064 printf("%s: bus-master DMA support present",
3065 sc->sc_wdcdev.sc_dev.dv_xname);
3066 pciide_mapreg_dma(sc, pa);
3067 printf("\n");
3068
3069 /*
3070 * Rev. <= 0x01 of the 3112 have a bug that can cause data
3071 * corruption if DMA transfers cross an 8K boundary. This is
3072 * apparently hard to tickle, but we'll go ahead and play it
3073 * safe.
3074 */
3075 if (PCI_REVISION(pa->pa_class) <= 0x01) {
3076 sc->sc_dma_maxsegsz = 8192;
3077 sc->sc_dma_boundary = 8192;
3078 }
3079
3080 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3081 WDC_CAPABILITY_MODE;
3082 sc->sc_wdcdev.PIO_cap = 4;
3083 if (sc->sc_dma_ok) {
3084 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3085 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3086 sc->sc_wdcdev.irqack = pciide_irqack;
3087 sc->sc_wdcdev.DMA_cap = 2;
3088 sc->sc_wdcdev.UDMA_cap = 6;
3089 }
3090 sc->sc_wdcdev.set_modes = cmd3112_setup_channel;
3091
3092 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3093 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3094
3095 /*
3096 * The 3112 can be told to identify as a RAID controller.
3097 * In this case, we have to fake interface
3098 */
3099 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3100 interface = PCI_INTERFACE(pa->pa_class);
3101 } else {
3102 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3103 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3104 }
3105
3106 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3107 cp = &sc->pciide_channels[channel];
3108 if (pciide_chansetup(sc, channel, interface) == 0)
3109 continue;
3110 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3111 pciide_pci_intr);
3112 if (cp->hw_ok == 0)
3113 continue;
3114 pciide_map_compat_intr(pa, cp, channel, interface);
3115 cmd3112_setup_channel(&cp->wdc_channel);
3116 }
3117 }
3118
3119 void
3120 cmd3112_setup_channel(chp)
3121 struct channel_softc *chp;
3122 {
3123 struct ata_drive_datas *drvp;
3124 int drive;
3125 u_int32_t idedma_ctl, dtm;
3126 struct pciide_channel *cp = (struct pciide_channel*)chp;
3127 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
3128
3129 /* setup DMA if needed */
3130 pciide_channel_dma_setup(cp);
3131
3132 idedma_ctl = 0;
3133 dtm = 0;
3134
3135 for (drive = 0; drive < 2; drive++) {
3136 drvp = &chp->ch_drive[drive];
3137 /* If no drive, skip */
3138 if ((drvp->drive_flags & DRIVE) == 0)
3139 continue;
3140 if (drvp->drive_flags & DRIVE_UDMA) {
3141 /* use Ultra/DMA */
3142 drvp->drive_flags &= ~DRIVE_DMA;
3143 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3144 dtm |= DTM_IDEx_DMA;
3145 } else if (drvp->drive_flags & DRIVE_DMA) {
3146 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3147 dtm |= DTM_IDEx_DMA;
3148 } else {
3149 dtm |= DTM_IDEx_PIO;
3150 }
3151 }
3152
3153 /*
3154 * Nothing to do to setup modes; it is meaningless in S-ATA
3155 * (but many S-ATA drives still want to get the SET_FEATURE
3156 * command).
3157 */
3158 if (idedma_ctl != 0) {
3159 /* Add software bits in status register */
3160 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3161 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3162 idedma_ctl);
3163 }
3164 pci_conf_write(sc->sc_pc, sc->sc_tag,
3165 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm);
3166 pciide_print_modes(cp);
3167 }
3168
3169 void
3170 cy693_chip_map(sc, pa)
3171 struct pciide_softc *sc;
3172 struct pci_attach_args *pa;
3173 {
3174 struct pciide_channel *cp;
3175 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3176 bus_size_t cmdsize, ctlsize;
3177
3178 if (pciide_chipen(sc, pa) == 0)
3179 return;
3180 /*
3181 * this chip has 2 PCI IDE functions, one for primary and one for
3182 * secondary. So we need to call pciide_mapregs_compat() with
3183 * the real channel
3184 */
3185 if (pa->pa_function == 1) {
3186 sc->sc_cy_compatchan = 0;
3187 } else if (pa->pa_function == 2) {
3188 sc->sc_cy_compatchan = 1;
3189 } else {
3190 printf("%s: unexpected PCI function %d\n",
3191 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3192 return;
3193 }
3194 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
3195 printf("%s: bus-master DMA support present",
3196 sc->sc_wdcdev.sc_dev.dv_xname);
3197 pciide_mapreg_dma(sc, pa);
3198 } else {
3199 printf("%s: hardware does not support DMA",
3200 sc->sc_wdcdev.sc_dev.dv_xname);
3201 sc->sc_dma_ok = 0;
3202 }
3203 printf("\n");
3204
3205 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3206 if (sc->sc_cy_handle == NULL) {
3207 printf("%s: unable to map hyperCache control registers\n",
3208 sc->sc_wdcdev.sc_dev.dv_xname);
3209 sc->sc_dma_ok = 0;
3210 }
3211
3212 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3213 WDC_CAPABILITY_MODE;
3214 if (sc->sc_dma_ok) {
3215 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3216 sc->sc_wdcdev.irqack = pciide_irqack;
3217 }
3218 sc->sc_wdcdev.PIO_cap = 4;
3219 sc->sc_wdcdev.DMA_cap = 2;
3220 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3221
3222 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3223 sc->sc_wdcdev.nchannels = 1;
3224
3225 /* Only one channel for this chip; if we are here it's enabled */
3226 cp = &sc->pciide_channels[0];
3227 sc->wdc_chanarray[0] = &cp->wdc_channel;
3228 cp->name = PCIIDE_CHANNEL_NAME(0);
3229 cp->wdc_channel.channel = 0;
3230 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3231 cp->wdc_channel.ch_queue =
3232 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3233 if (cp->wdc_channel.ch_queue == NULL) {
3234 printf("%s primary channel: "
3235 "can't allocate memory for command queue",
3236 sc->sc_wdcdev.sc_dev.dv_xname);
3237 return;
3238 }
3239 printf("%s: primary channel %s to ",
3240 sc->sc_wdcdev.sc_dev.dv_xname,
3241 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3242 "configured" : "wired");
3243 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3244 printf("native-PCI");
3245 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3246 pciide_pci_intr);
3247 } else {
3248 printf("compatibility");
3249 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3250 &cmdsize, &ctlsize);
3251 }
3252 printf(" mode\n");
3253 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3254 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3255 wdcattach(&cp->wdc_channel);
3256 if (pciide_chan_candisable(cp)) {
3257 pci_conf_write(sc->sc_pc, sc->sc_tag,
3258 PCI_COMMAND_STATUS_REG, 0);
3259 }
3260 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3261 if (cp->hw_ok == 0)
3262 return;
3263 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3264 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3265 cy693_setup_channel(&cp->wdc_channel);
3266 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3267 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3268 }
3269
3270 void
3271 cy693_setup_channel(chp)
3272 struct channel_softc *chp;
3273 {
3274 struct ata_drive_datas *drvp;
3275 int drive;
3276 u_int32_t cy_cmd_ctrl;
3277 u_int32_t idedma_ctl;
3278 struct pciide_channel *cp = (struct pciide_channel*)chp;
3279 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3280 int dma_mode = -1;
3281
3282 cy_cmd_ctrl = idedma_ctl = 0;
3283
3284 /* setup DMA if needed */
3285 pciide_channel_dma_setup(cp);
3286
3287 for (drive = 0; drive < 2; drive++) {
3288 drvp = &chp->ch_drive[drive];
3289 /* If no drive, skip */
3290 if ((drvp->drive_flags & DRIVE) == 0)
3291 continue;
3292 /* add timing values, setup DMA if needed */
3293 if (drvp->drive_flags & DRIVE_DMA) {
3294 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3295 /* use Multiword DMA */
3296 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3297 dma_mode = drvp->DMA_mode;
3298 }
3299 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3300 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3301 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3302 CY_CMD_CTRL_IOW_REC_OFF(drive));
3303 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3304 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3305 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3306 CY_CMD_CTRL_IOR_REC_OFF(drive));
3307 }
3308 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3309 chp->ch_drive[0].DMA_mode = dma_mode;
3310 chp->ch_drive[1].DMA_mode = dma_mode;
3311
3312 if (dma_mode == -1)
3313 dma_mode = 0;
3314
3315 if (sc->sc_cy_handle != NULL) {
3316 /* Note: `multiple' is implied. */
3317 cy82c693_write(sc->sc_cy_handle,
3318 (sc->sc_cy_compatchan == 0) ?
3319 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3320 }
3321
3322 pciide_print_modes(cp);
3323
3324 if (idedma_ctl != 0) {
3325 /* Add software bits in status register */
3326 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3327 IDEDMA_CTL, idedma_ctl);
3328 }
3329 }
3330
3331 static struct sis_hostbr_type {
3332 u_int16_t id;
3333 u_int8_t rev;
3334 u_int8_t udma_mode;
3335 char *name;
3336 u_int8_t type;
3337 #define SIS_TYPE_NOUDMA 0
3338 #define SIS_TYPE_66 1
3339 #define SIS_TYPE_100OLD 2
3340 #define SIS_TYPE_100NEW 3
3341 #define SIS_TYPE_133OLD 4
3342 #define SIS_TYPE_133NEW 5
3343 #define SIS_TYPE_SOUTH 6
3344 } sis_hostbr_type[] = {
3345 /* Most infos here are from sos (at) freebsd.org */
3346 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
3347 #if 0
3348 /*
3349 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3350 * have problems with UDMA (info provided by Christos)
3351 */
3352 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
3353 #endif
3354 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
3355 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
3356 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
3357 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
3358 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
3359 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
3360 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
3361 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
3362 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
3363 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
3364 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
3365 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
3366 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
3367 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
3368 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
3369 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
3370 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
3371 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
3372 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
3373 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
3374 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
3375 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
3376 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
3377 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
3378 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
3379 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
3380 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
3381 /*
3382 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
3383 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
3384 */
3385 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
3386 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
3387 };
3388
3389 static struct sis_hostbr_type *sis_hostbr_type_match;
3390
3391 static int
3392 sis_hostbr_match(pa)
3393 struct pci_attach_args *pa;
3394 {
3395 int i;
3396 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
3397 return 0;
3398 sis_hostbr_type_match = NULL;
3399 for (i = 0;
3400 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
3401 i++) {
3402 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
3403 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
3404 sis_hostbr_type_match = &sis_hostbr_type[i];
3405 }
3406 return (sis_hostbr_type_match != NULL);
3407 }
3408
3409 static int sis_south_match(pa)
3410 struct pci_attach_args *pa;
3411 {
3412 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
3413 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
3414 PCI_REVISION(pa->pa_class) >= 0x10);
3415 }
3416
3417 void
3418 sis_chip_map(sc, pa)
3419 struct pciide_softc *sc;
3420 struct pci_attach_args *pa;
3421 {
3422 struct pciide_channel *cp;
3423 int channel;
3424 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3425 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3426 pcireg_t rev = PCI_REVISION(pa->pa_class);
3427 bus_size_t cmdsize, ctlsize;
3428
3429 if (pciide_chipen(sc, pa) == 0)
3430 return;
3431 printf(": Silicon Integrated System ");
3432 pci_find_device(NULL, sis_hostbr_match);
3433 if (sis_hostbr_type_match) {
3434 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
3435 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3436 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3437 SIS_REG_57) & 0x7f);
3438 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3439 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3440 printf("96X UDMA%d",
3441 sis_hostbr_type_match->udma_mode);
3442 sc->sis_type = SIS_TYPE_133NEW;
3443 sc->sc_wdcdev.UDMA_cap =
3444 sis_hostbr_type_match->udma_mode;
3445 } else {
3446 if (pci_find_device(NULL, sis_south_match)) {
3447 sc->sis_type = SIS_TYPE_133OLD;
3448 sc->sc_wdcdev.UDMA_cap =
3449 sis_hostbr_type_match->udma_mode;
3450 } else {
3451 sc->sis_type = SIS_TYPE_100NEW;
3452 sc->sc_wdcdev.UDMA_cap =
3453 sis_hostbr_type_match->udma_mode;
3454 }
3455 }
3456 } else {
3457 sc->sis_type = sis_hostbr_type_match->type;
3458 sc->sc_wdcdev.UDMA_cap =
3459 sis_hostbr_type_match->udma_mode;
3460 }
3461 printf(sis_hostbr_type_match->name);
3462 } else {
3463 printf("5597/5598");
3464 if (rev >= 0xd0) {
3465 sc->sc_wdcdev.UDMA_cap = 2;
3466 sc->sis_type = SIS_TYPE_66;
3467 } else {
3468 sc->sc_wdcdev.UDMA_cap = 0;
3469 sc->sis_type = SIS_TYPE_NOUDMA;
3470 }
3471 }
3472 printf(" IDE controller (rev. 0x%02x)\n", PCI_REVISION(pa->pa_class));
3473 printf("%s: bus-master DMA support present",
3474 sc->sc_wdcdev.sc_dev.dv_xname);
3475 pciide_mapreg_dma(sc, pa);
3476 printf("\n");
3477
3478 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3479 WDC_CAPABILITY_MODE;
3480 if (sc->sc_dma_ok) {
3481 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3482 sc->sc_wdcdev.irqack = pciide_irqack;
3483 if (sc->sis_type >= SIS_TYPE_66)
3484 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3485 }
3486
3487 sc->sc_wdcdev.PIO_cap = 4;
3488 sc->sc_wdcdev.DMA_cap = 2;
3489
3490 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3491 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3492 switch(sc->sis_type) {
3493 case SIS_TYPE_NOUDMA:
3494 case SIS_TYPE_66:
3495 case SIS_TYPE_100OLD:
3496 sc->sc_wdcdev.set_modes = sis_setup_channel;
3497 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3498 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3499 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3500 break;
3501 case SIS_TYPE_100NEW:
3502 case SIS_TYPE_133OLD:
3503 sc->sc_wdcdev.set_modes = sis_setup_channel;
3504 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3505 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3506 break;
3507 case SIS_TYPE_133NEW:
3508 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3509 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3510 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3511 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3512 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3513 break;
3514 }
3515
3516
3517 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3518 cp = &sc->pciide_channels[channel];
3519 if (pciide_chansetup(sc, channel, interface) == 0)
3520 continue;
3521 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3522 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3523 printf("%s: %s channel ignored (disabled)\n",
3524 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3525 continue;
3526 }
3527 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3528 pciide_pci_intr);
3529 if (cp->hw_ok == 0)
3530 continue;
3531 if (pciide_chan_candisable(cp)) {
3532 if (channel == 0)
3533 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3534 else
3535 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3536 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3537 sis_ctr0);
3538 }
3539 pciide_map_compat_intr(pa, cp, channel, interface);
3540 if (cp->hw_ok == 0)
3541 continue;
3542 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3543 }
3544 }
3545
3546 void
3547 sis96x_setup_channel(chp)
3548 struct channel_softc *chp;
3549 {
3550 struct ata_drive_datas *drvp;
3551 int drive;
3552 u_int32_t sis_tim;
3553 u_int32_t idedma_ctl;
3554 int regtim;
3555 struct pciide_channel *cp = (struct pciide_channel*)chp;
3556 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3557
3558 sis_tim = 0;
3559 idedma_ctl = 0;
3560 /* setup DMA if needed */
3561 pciide_channel_dma_setup(cp);
3562
3563 for (drive = 0; drive < 2; drive++) {
3564 regtim = SIS_TIM133(
3565 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3566 chp->channel, drive);
3567 drvp = &chp->ch_drive[drive];
3568 /* If no drive, skip */
3569 if ((drvp->drive_flags & DRIVE) == 0)
3570 continue;
3571 /* add timing values, setup DMA if needed */
3572 if (drvp->drive_flags & DRIVE_UDMA) {
3573 /* use Ultra/DMA */
3574 drvp->drive_flags &= ~DRIVE_DMA;
3575 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3576 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3577 if (drvp->UDMA_mode > 2)
3578 drvp->UDMA_mode = 2;
3579 }
3580 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3581 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3582 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3583 } else if (drvp->drive_flags & DRIVE_DMA) {
3584 /*
3585 * use Multiword DMA
3586 * Timings will be used for both PIO and DMA,
3587 * so adjust DMA mode if needed
3588 */
3589 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3590 drvp->PIO_mode = drvp->DMA_mode + 2;
3591 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3592 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3593 drvp->PIO_mode - 2 : 0;
3594 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3595 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3596 } else {
3597 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3598 }
3599 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3600 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3601 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3602 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3603 }
3604 if (idedma_ctl != 0) {
3605 /* Add software bits in status register */
3606 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3607 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3608 idedma_ctl);
3609 }
3610 pciide_print_modes(cp);
3611 }
3612
3613 void
3614 sis_setup_channel(chp)
3615 struct channel_softc *chp;
3616 {
3617 struct ata_drive_datas *drvp;
3618 int drive;
3619 u_int32_t sis_tim;
3620 u_int32_t idedma_ctl;
3621 struct pciide_channel *cp = (struct pciide_channel*)chp;
3622 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3623
3624 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3625 "channel %d 0x%x\n", chp->channel,
3626 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3627 DEBUG_PROBE);
3628 sis_tim = 0;
3629 idedma_ctl = 0;
3630 /* setup DMA if needed */
3631 pciide_channel_dma_setup(cp);
3632
3633 for (drive = 0; drive < 2; drive++) {
3634 drvp = &chp->ch_drive[drive];
3635 /* If no drive, skip */
3636 if ((drvp->drive_flags & DRIVE) == 0)
3637 continue;
3638 /* add timing values, setup DMA if needed */
3639 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3640 (drvp->drive_flags & DRIVE_UDMA) == 0)
3641 goto pio;
3642
3643 if (drvp->drive_flags & DRIVE_UDMA) {
3644 /* use Ultra/DMA */
3645 drvp->drive_flags &= ~DRIVE_DMA;
3646 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3647 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3648 if (drvp->UDMA_mode > 2)
3649 drvp->UDMA_mode = 2;
3650 }
3651 switch (sc->sis_type) {
3652 case SIS_TYPE_66:
3653 case SIS_TYPE_100OLD:
3654 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3655 SIS_TIM66_UDMA_TIME_OFF(drive);
3656 break;
3657 case SIS_TYPE_100NEW:
3658 sis_tim |=
3659 sis_udma100new_tim[drvp->UDMA_mode] <<
3660 SIS_TIM100_UDMA_TIME_OFF(drive);
3661 case SIS_TYPE_133OLD:
3662 sis_tim |=
3663 sis_udma133old_tim[drvp->UDMA_mode] <<
3664 SIS_TIM100_UDMA_TIME_OFF(drive);
3665 break;
3666 default:
3667 printf("unknown SiS IDE type %d\n",
3668 sc->sis_type);
3669 }
3670 } else {
3671 /*
3672 * use Multiword DMA
3673 * Timings will be used for both PIO and DMA,
3674 * so adjust DMA mode if needed
3675 */
3676 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3677 drvp->PIO_mode = drvp->DMA_mode + 2;
3678 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3679 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3680 drvp->PIO_mode - 2 : 0;
3681 if (drvp->DMA_mode == 0)
3682 drvp->PIO_mode = 0;
3683 }
3684 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3685 pio: switch (sc->sis_type) {
3686 case SIS_TYPE_NOUDMA:
3687 case SIS_TYPE_66:
3688 case SIS_TYPE_100OLD:
3689 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3690 SIS_TIM66_ACT_OFF(drive);
3691 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3692 SIS_TIM66_REC_OFF(drive);
3693 break;
3694 case SIS_TYPE_100NEW:
3695 case SIS_TYPE_133OLD:
3696 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3697 SIS_TIM100_ACT_OFF(drive);
3698 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3699 SIS_TIM100_REC_OFF(drive);
3700 break;
3701 default:
3702 printf("unknown SiS IDE type %d\n",
3703 sc->sis_type);
3704 }
3705 }
3706 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3707 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3708 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3709 if (idedma_ctl != 0) {
3710 /* Add software bits in status register */
3711 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3712 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3713 idedma_ctl);
3714 }
3715 pciide_print_modes(cp);
3716 }
3717
3718 void
3719 acer_chip_map(sc, pa)
3720 struct pciide_softc *sc;
3721 struct pci_attach_args *pa;
3722 {
3723 struct pciide_channel *cp;
3724 int channel;
3725 pcireg_t cr, interface;
3726 bus_size_t cmdsize, ctlsize;
3727 pcireg_t rev = PCI_REVISION(pa->pa_class);
3728
3729 if (pciide_chipen(sc, pa) == 0)
3730 return;
3731 printf("%s: bus-master DMA support present",
3732 sc->sc_wdcdev.sc_dev.dv_xname);
3733 pciide_mapreg_dma(sc, pa);
3734 printf("\n");
3735 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3736 WDC_CAPABILITY_MODE;
3737 if (sc->sc_dma_ok) {
3738 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3739 if (rev >= 0x20) {
3740 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3741 if (rev >= 0xC4)
3742 sc->sc_wdcdev.UDMA_cap = 5;
3743 else if (rev >= 0xC2)
3744 sc->sc_wdcdev.UDMA_cap = 4;
3745 else
3746 sc->sc_wdcdev.UDMA_cap = 2;
3747 }
3748 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3749 sc->sc_wdcdev.irqack = pciide_irqack;
3750 }
3751
3752 sc->sc_wdcdev.PIO_cap = 4;
3753 sc->sc_wdcdev.DMA_cap = 2;
3754 sc->sc_wdcdev.set_modes = acer_setup_channel;
3755 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3756 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3757
3758 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3759 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3760 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3761
3762 /* Enable "microsoft register bits" R/W. */
3763 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3764 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3765 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3766 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3767 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3768 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3769 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3770 ~ACER_CHANSTATUSREGS_RO);
3771 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3772 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3773 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3774 /* Don't use cr, re-read the real register content instead */
3775 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3776 PCI_CLASS_REG));
3777
3778 /* From linux: enable "Cable Detection" */
3779 if (rev >= 0xC2) {
3780 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3781 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3782 | ACER_0x4B_CDETECT);
3783 }
3784
3785 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3786 cp = &sc->pciide_channels[channel];
3787 if (pciide_chansetup(sc, channel, interface) == 0)
3788 continue;
3789 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3790 printf("%s: %s channel ignored (disabled)\n",
3791 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3792 continue;
3793 }
3794 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3795 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3796 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3797 if (cp->hw_ok == 0)
3798 continue;
3799 if (pciide_chan_candisable(cp)) {
3800 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3801 pci_conf_write(sc->sc_pc, sc->sc_tag,
3802 PCI_CLASS_REG, cr);
3803 }
3804 pciide_map_compat_intr(pa, cp, channel, interface);
3805 acer_setup_channel(&cp->wdc_channel);
3806 }
3807 }
3808
3809 void
3810 acer_setup_channel(chp)
3811 struct channel_softc *chp;
3812 {
3813 struct ata_drive_datas *drvp;
3814 int drive;
3815 u_int32_t acer_fifo_udma;
3816 u_int32_t idedma_ctl;
3817 struct pciide_channel *cp = (struct pciide_channel*)chp;
3818 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3819
3820 idedma_ctl = 0;
3821 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3822 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3823 acer_fifo_udma), DEBUG_PROBE);
3824 /* setup DMA if needed */
3825 pciide_channel_dma_setup(cp);
3826
3827 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3828 DRIVE_UDMA) { /* check 80 pins cable */
3829 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3830 ACER_0x4A_80PIN(chp->channel)) {
3831 if (chp->ch_drive[0].UDMA_mode > 2)
3832 chp->ch_drive[0].UDMA_mode = 2;
3833 if (chp->ch_drive[1].UDMA_mode > 2)
3834 chp->ch_drive[1].UDMA_mode = 2;
3835 }
3836 }
3837
3838 for (drive = 0; drive < 2; drive++) {
3839 drvp = &chp->ch_drive[drive];
3840 /* If no drive, skip */
3841 if ((drvp->drive_flags & DRIVE) == 0)
3842 continue;
3843 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3844 "channel %d drive %d 0x%x\n", chp->channel, drive,
3845 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3846 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3847 /* clear FIFO/DMA mode */
3848 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3849 ACER_UDMA_EN(chp->channel, drive) |
3850 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3851
3852 /* add timing values, setup DMA if needed */
3853 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3854 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3855 acer_fifo_udma |=
3856 ACER_FTH_OPL(chp->channel, drive, 0x1);
3857 goto pio;
3858 }
3859
3860 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3861 if (drvp->drive_flags & DRIVE_UDMA) {
3862 /* use Ultra/DMA */
3863 drvp->drive_flags &= ~DRIVE_DMA;
3864 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3865 acer_fifo_udma |=
3866 ACER_UDMA_TIM(chp->channel, drive,
3867 acer_udma[drvp->UDMA_mode]);
3868 /* XXX disable if one drive < UDMA3 ? */
3869 if (drvp->UDMA_mode >= 3) {
3870 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3871 ACER_0x4B,
3872 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3873 ACER_0x4B) | ACER_0x4B_UDMA66);
3874 }
3875 } else {
3876 /*
3877 * use Multiword DMA
3878 * Timings will be used for both PIO and DMA,
3879 * so adjust DMA mode if needed
3880 */
3881 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3882 drvp->PIO_mode = drvp->DMA_mode + 2;
3883 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3884 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3885 drvp->PIO_mode - 2 : 0;
3886 if (drvp->DMA_mode == 0)
3887 drvp->PIO_mode = 0;
3888 }
3889 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3890 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3891 ACER_IDETIM(chp->channel, drive),
3892 acer_pio[drvp->PIO_mode]);
3893 }
3894 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3895 acer_fifo_udma), DEBUG_PROBE);
3896 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3897 if (idedma_ctl != 0) {
3898 /* Add software bits in status register */
3899 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3900 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3901 idedma_ctl);
3902 }
3903 pciide_print_modes(cp);
3904 }
3905
3906 int
3907 acer_pci_intr(arg)
3908 void *arg;
3909 {
3910 struct pciide_softc *sc = arg;
3911 struct pciide_channel *cp;
3912 struct channel_softc *wdc_cp;
3913 int i, rv, crv;
3914 u_int32_t chids;
3915
3916 rv = 0;
3917 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3918 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3919 cp = &sc->pciide_channels[i];
3920 wdc_cp = &cp->wdc_channel;
3921 /* If a compat channel skip. */
3922 if (cp->compat)
3923 continue;
3924 if (chids & ACER_CHIDS_INT(i)) {
3925 crv = wdcintr(wdc_cp);
3926 if (crv == 0)
3927 printf("%s:%d: bogus intr\n",
3928 sc->sc_wdcdev.sc_dev.dv_xname, i);
3929 else
3930 rv = 1;
3931 }
3932 }
3933 return rv;
3934 }
3935
3936 void
3937 hpt_chip_map(sc, pa)
3938 struct pciide_softc *sc;
3939 struct pci_attach_args *pa;
3940 {
3941 struct pciide_channel *cp;
3942 int i, compatchan, revision;
3943 pcireg_t interface;
3944 bus_size_t cmdsize, ctlsize;
3945
3946 if (pciide_chipen(sc, pa) == 0)
3947 return;
3948 revision = PCI_REVISION(pa->pa_class);
3949 printf(": Triones/Highpoint ");
3950 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3951 printf("HPT374 IDE Controller\n");
3952 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3953 printf("HPT372 IDE Controller\n");
3954 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3955 if (revision == HPT372_REV)
3956 printf("HPT372 IDE Controller\n");
3957 else if (revision == HPT370_REV)
3958 printf("HPT370 IDE Controller\n");
3959 else if (revision == HPT370A_REV)
3960 printf("HPT370A IDE Controller\n");
3961 else if (revision == HPT366_REV)
3962 printf("HPT366 IDE Controller\n");
3963 else
3964 printf("unknown HPT IDE controller rev %d\n", revision);
3965 } else
3966 printf("unknown HPT IDE controller 0x%x\n",
3967 sc->sc_pp->ide_product);
3968
3969 /*
3970 * when the chip is in native mode it identifies itself as a
3971 * 'misc mass storage'. Fake interface in this case.
3972 */
3973 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3974 interface = PCI_INTERFACE(pa->pa_class);
3975 } else {
3976 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3977 PCIIDE_INTERFACE_PCI(0);
3978 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3979 (revision == HPT370_REV || revision == HPT370A_REV ||
3980 revision == HPT372_REV)) ||
3981 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3982 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3983 interface |= PCIIDE_INTERFACE_PCI(1);
3984 }
3985
3986 printf("%s: bus-master DMA support present",
3987 sc->sc_wdcdev.sc_dev.dv_xname);
3988 pciide_mapreg_dma(sc, pa);
3989 printf("\n");
3990 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3991 WDC_CAPABILITY_MODE;
3992 if (sc->sc_dma_ok) {
3993 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3994 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3995 sc->sc_wdcdev.irqack = pciide_irqack;
3996 }
3997 sc->sc_wdcdev.PIO_cap = 4;
3998 sc->sc_wdcdev.DMA_cap = 2;
3999
4000 sc->sc_wdcdev.set_modes = hpt_setup_channel;
4001 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4002 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4003 revision == HPT366_REV) {
4004 sc->sc_wdcdev.UDMA_cap = 4;
4005 /*
4006 * The 366 has 2 PCI IDE functions, one for primary and one
4007 * for secondary. So we need to call pciide_mapregs_compat()
4008 * with the real channel
4009 */
4010 if (pa->pa_function == 0) {
4011 compatchan = 0;
4012 } else if (pa->pa_function == 1) {
4013 compatchan = 1;
4014 } else {
4015 printf("%s: unexpected PCI function %d\n",
4016 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
4017 return;
4018 }
4019 sc->sc_wdcdev.nchannels = 1;
4020 } else {
4021 sc->sc_wdcdev.nchannels = 2;
4022 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
4023 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4024 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4025 revision == HPT372_REV))
4026 sc->sc_wdcdev.UDMA_cap = 6;
4027 else
4028 sc->sc_wdcdev.UDMA_cap = 5;
4029 }
4030 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4031 cp = &sc->pciide_channels[i];
4032 if (sc->sc_wdcdev.nchannels > 1) {
4033 compatchan = i;
4034 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
4035 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
4036 printf("%s: %s channel ignored (disabled)\n",
4037 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4038 continue;
4039 }
4040 }
4041 if (pciide_chansetup(sc, i, interface) == 0)
4042 continue;
4043 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4044 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4045 &ctlsize, hpt_pci_intr);
4046 } else {
4047 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
4048 &cmdsize, &ctlsize);
4049 }
4050 if (cp->hw_ok == 0)
4051 return;
4052 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4053 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4054 wdcattach(&cp->wdc_channel);
4055 hpt_setup_channel(&cp->wdc_channel);
4056 }
4057 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4058 (revision == HPT370_REV || revision == HPT370A_REV ||
4059 revision == HPT372_REV)) ||
4060 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4061 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
4062 /*
4063 * HPT370_REV and highter has a bit to disable interrupts,
4064 * make sure to clear it
4065 */
4066 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
4067 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
4068 ~HPT_CSEL_IRQDIS);
4069 }
4070 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
4071 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4072 revision == HPT372_REV ) ||
4073 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4074 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4075 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
4076 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
4077 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
4078 return;
4079 }
4080
4081 void
4082 hpt_setup_channel(chp)
4083 struct channel_softc *chp;
4084 {
4085 struct ata_drive_datas *drvp;
4086 int drive;
4087 int cable;
4088 u_int32_t before, after;
4089 u_int32_t idedma_ctl;
4090 struct pciide_channel *cp = (struct pciide_channel*)chp;
4091 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4092 int revision =
4093 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
4094
4095 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
4096
4097 /* setup DMA if needed */
4098 pciide_channel_dma_setup(cp);
4099
4100 idedma_ctl = 0;
4101
4102 /* Per drive settings */
4103 for (drive = 0; drive < 2; drive++) {
4104 drvp = &chp->ch_drive[drive];
4105 /* If no drive, skip */
4106 if ((drvp->drive_flags & DRIVE) == 0)
4107 continue;
4108 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
4109 HPT_IDETIM(chp->channel, drive));
4110
4111 /* add timing values, setup DMA if needed */
4112 if (drvp->drive_flags & DRIVE_UDMA) {
4113 /* use Ultra/DMA */
4114 drvp->drive_flags &= ~DRIVE_DMA;
4115 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
4116 drvp->UDMA_mode > 2)
4117 drvp->UDMA_mode = 2;
4118 switch (sc->sc_pp->ide_product) {
4119 case PCI_PRODUCT_TRIONES_HPT374:
4120 after = hpt374_udma[drvp->UDMA_mode];
4121 break;
4122 case PCI_PRODUCT_TRIONES_HPT372:
4123 after = hpt372_udma[drvp->UDMA_mode];
4124 break;
4125 case PCI_PRODUCT_TRIONES_HPT366:
4126 default:
4127 switch(revision) {
4128 case HPT372_REV:
4129 after = hpt372_udma[drvp->UDMA_mode];
4130 break;
4131 case HPT370_REV:
4132 case HPT370A_REV:
4133 after = hpt370_udma[drvp->UDMA_mode];
4134 break;
4135 case HPT366_REV:
4136 default:
4137 after = hpt366_udma[drvp->UDMA_mode];
4138 break;
4139 }
4140 }
4141 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4142 } else if (drvp->drive_flags & DRIVE_DMA) {
4143 /*
4144 * use Multiword DMA.
4145 * Timings will be used for both PIO and DMA, so adjust
4146 * DMA mode if needed
4147 */
4148 if (drvp->PIO_mode >= 3 &&
4149 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
4150 drvp->DMA_mode = drvp->PIO_mode - 2;
4151 }
4152 switch (sc->sc_pp->ide_product) {
4153 case PCI_PRODUCT_TRIONES_HPT374:
4154 after = hpt374_dma[drvp->DMA_mode];
4155 break;
4156 case PCI_PRODUCT_TRIONES_HPT372:
4157 after = hpt372_dma[drvp->DMA_mode];
4158 break;
4159 case PCI_PRODUCT_TRIONES_HPT366:
4160 default:
4161 switch(revision) {
4162 case HPT372_REV:
4163 after = hpt372_dma[drvp->DMA_mode];
4164 break;
4165 case HPT370_REV:
4166 case HPT370A_REV:
4167 after = hpt370_dma[drvp->DMA_mode];
4168 break;
4169 case HPT366_REV:
4170 default:
4171 after = hpt366_dma[drvp->DMA_mode];
4172 break;
4173 }
4174 }
4175 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4176 } else {
4177 /* PIO only */
4178 switch (sc->sc_pp->ide_product) {
4179 case PCI_PRODUCT_TRIONES_HPT374:
4180 after = hpt374_pio[drvp->PIO_mode];
4181 break;
4182 case PCI_PRODUCT_TRIONES_HPT372:
4183 after = hpt372_pio[drvp->PIO_mode];
4184 break;
4185 case PCI_PRODUCT_TRIONES_HPT366:
4186 default:
4187 switch(revision) {
4188 case HPT372_REV:
4189 after = hpt372_pio[drvp->PIO_mode];
4190 break;
4191 case HPT370_REV:
4192 case HPT370A_REV:
4193 after = hpt370_pio[drvp->PIO_mode];
4194 break;
4195 case HPT366_REV:
4196 default:
4197 after = hpt366_pio[drvp->PIO_mode];
4198 break;
4199 }
4200 }
4201 }
4202 pci_conf_write(sc->sc_pc, sc->sc_tag,
4203 HPT_IDETIM(chp->channel, drive), after);
4204 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
4205 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
4206 after, before), DEBUG_PROBE);
4207 }
4208 if (idedma_ctl != 0) {
4209 /* Add software bits in status register */
4210 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4211 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4212 idedma_ctl);
4213 }
4214 pciide_print_modes(cp);
4215 }
4216
4217 int
4218 hpt_pci_intr(arg)
4219 void *arg;
4220 {
4221 struct pciide_softc *sc = arg;
4222 struct pciide_channel *cp;
4223 struct channel_softc *wdc_cp;
4224 int rv = 0;
4225 int dmastat, i, crv;
4226
4227 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4228 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4229 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4230 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4231 IDEDMA_CTL_INTR)
4232 continue;
4233 cp = &sc->pciide_channels[i];
4234 wdc_cp = &cp->wdc_channel;
4235 crv = wdcintr(wdc_cp);
4236 if (crv == 0) {
4237 printf("%s:%d: bogus intr\n",
4238 sc->sc_wdcdev.sc_dev.dv_xname, i);
4239 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4240 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4241 } else
4242 rv = 1;
4243 }
4244 return rv;
4245 }
4246
4247
4248 /* Macros to test product */
4249 #define PDC_IS_262(sc) \
4250 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
4251 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4252 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4253 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4254 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4255 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4256 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4257 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4258 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4259 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4260 #define PDC_IS_265(sc) \
4261 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4262 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4263 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4264 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4265 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4266 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4267 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4268 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4269 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4270 #define PDC_IS_268(sc) \
4271 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4272 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4273 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4274 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4275 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4276 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4277 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4278 #define PDC_IS_276(sc) \
4279 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4280 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4281 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4282 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4283 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4284
4285 void
4286 pdc202xx_chip_map(sc, pa)
4287 struct pciide_softc *sc;
4288 struct pci_attach_args *pa;
4289 {
4290 struct pciide_channel *cp;
4291 int channel;
4292 pcireg_t interface, st, mode;
4293 bus_size_t cmdsize, ctlsize;
4294
4295 if (!PDC_IS_268(sc)) {
4296 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4297 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
4298 st), DEBUG_PROBE);
4299 }
4300 if (pciide_chipen(sc, pa) == 0)
4301 return;
4302
4303 /* turn off RAID mode */
4304 if (!PDC_IS_268(sc))
4305 st &= ~PDC2xx_STATE_IDERAID;
4306
4307 /*
4308 * can't rely on the PCI_CLASS_REG content if the chip was in raid
4309 * mode. We have to fake interface
4310 */
4311 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
4312 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
4313 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4314
4315 printf("%s: bus-master DMA support present",
4316 sc->sc_wdcdev.sc_dev.dv_xname);
4317 pciide_mapreg_dma(sc, pa);
4318 printf("\n");
4319 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4320 WDC_CAPABILITY_MODE;
4321 if (sc->sc_dma_ok) {
4322 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4323 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4324 sc->sc_wdcdev.irqack = pciide_irqack;
4325 }
4326 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
4327 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
4328 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
4329 sc->sc_wdcdev.PIO_cap = 4;
4330 sc->sc_wdcdev.DMA_cap = 2;
4331 if (PDC_IS_276(sc))
4332 sc->sc_wdcdev.UDMA_cap = 6;
4333 else if (PDC_IS_265(sc))
4334 sc->sc_wdcdev.UDMA_cap = 5;
4335 else if (PDC_IS_262(sc))
4336 sc->sc_wdcdev.UDMA_cap = 4;
4337 else
4338 sc->sc_wdcdev.UDMA_cap = 2;
4339 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
4340 pdc20268_setup_channel : pdc202xx_setup_channel;
4341 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4342 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4343
4344 if (!PDC_IS_268(sc)) {
4345 /* setup failsafe defaults */
4346 mode = 0;
4347 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
4348 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
4349 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
4350 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
4351 for (channel = 0;
4352 channel < sc->sc_wdcdev.nchannels;
4353 channel++) {
4354 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4355 "drive 0 initial timings 0x%x, now 0x%x\n",
4356 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4357 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
4358 DEBUG_PROBE);
4359 pci_conf_write(sc->sc_pc, sc->sc_tag,
4360 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
4361 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4362 "drive 1 initial timings 0x%x, now 0x%x\n",
4363 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4364 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
4365 pci_conf_write(sc->sc_pc, sc->sc_tag,
4366 PDC2xx_TIM(channel, 1), mode);
4367 }
4368
4369 mode = PDC2xx_SCR_DMA;
4370 if (PDC_IS_262(sc)) {
4371 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
4372 } else {
4373 /* the BIOS set it up this way */
4374 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
4375 }
4376 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
4377 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
4378 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
4379 "now 0x%x\n",
4380 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4381 PDC2xx_SCR),
4382 mode), DEBUG_PROBE);
4383 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4384 PDC2xx_SCR, mode);
4385
4386 /* controller initial state register is OK even without BIOS */
4387 /* Set DMA mode to IDE DMA compatibility */
4388 mode =
4389 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
4390 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
4391 DEBUG_PROBE);
4392 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
4393 mode | 0x1);
4394 mode =
4395 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
4396 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
4397 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
4398 mode | 0x1);
4399 }
4400
4401 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4402 cp = &sc->pciide_channels[channel];
4403 if (pciide_chansetup(sc, channel, interface) == 0)
4404 continue;
4405 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
4406 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
4407 printf("%s: %s channel ignored (disabled)\n",
4408 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4409 continue;
4410 }
4411 if (PDC_IS_265(sc))
4412 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4413 pdc20265_pci_intr);
4414 else
4415 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4416 pdc202xx_pci_intr);
4417 if (cp->hw_ok == 0)
4418 continue;
4419 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
4420 st &= ~(PDC_IS_262(sc) ?
4421 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
4422 pciide_map_compat_intr(pa, cp, channel, interface);
4423 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
4424 }
4425 if (!PDC_IS_268(sc)) {
4426 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4427 "0x%x\n", st), DEBUG_PROBE);
4428 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4429 }
4430 return;
4431 }
4432
4433 void
4434 pdc202xx_setup_channel(chp)
4435 struct channel_softc *chp;
4436 {
4437 struct ata_drive_datas *drvp;
4438 int drive;
4439 pcireg_t mode, st;
4440 u_int32_t idedma_ctl, scr, atapi;
4441 struct pciide_channel *cp = (struct pciide_channel*)chp;
4442 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4443 int channel = chp->channel;
4444
4445 /* setup DMA if needed */
4446 pciide_channel_dma_setup(cp);
4447
4448 idedma_ctl = 0;
4449 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4450 sc->sc_wdcdev.sc_dev.dv_xname,
4451 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4452 DEBUG_PROBE);
4453
4454 /* Per channel settings */
4455 if (PDC_IS_262(sc)) {
4456 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4457 PDC262_U66);
4458 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4459 /* Trim UDMA mode */
4460 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4461 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4462 chp->ch_drive[0].UDMA_mode <= 2) ||
4463 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4464 chp->ch_drive[1].UDMA_mode <= 2)) {
4465 if (chp->ch_drive[0].UDMA_mode > 2)
4466 chp->ch_drive[0].UDMA_mode = 2;
4467 if (chp->ch_drive[1].UDMA_mode > 2)
4468 chp->ch_drive[1].UDMA_mode = 2;
4469 }
4470 /* Set U66 if needed */
4471 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4472 chp->ch_drive[0].UDMA_mode > 2) ||
4473 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4474 chp->ch_drive[1].UDMA_mode > 2))
4475 scr |= PDC262_U66_EN(channel);
4476 else
4477 scr &= ~PDC262_U66_EN(channel);
4478 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4479 PDC262_U66, scr);
4480 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4481 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4482 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4483 PDC262_ATAPI(channel))), DEBUG_PROBE);
4484 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4485 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4486 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4487 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4488 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4489 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4490 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4491 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4492 atapi = 0;
4493 else
4494 atapi = PDC262_ATAPI_UDMA;
4495 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4496 PDC262_ATAPI(channel), atapi);
4497 }
4498 }
4499 for (drive = 0; drive < 2; drive++) {
4500 drvp = &chp->ch_drive[drive];
4501 /* If no drive, skip */
4502 if ((drvp->drive_flags & DRIVE) == 0)
4503 continue;
4504 mode = 0;
4505 if (drvp->drive_flags & DRIVE_UDMA) {
4506 /* use Ultra/DMA */
4507 drvp->drive_flags &= ~DRIVE_DMA;
4508 mode = PDC2xx_TIM_SET_MB(mode,
4509 pdc2xx_udma_mb[drvp->UDMA_mode]);
4510 mode = PDC2xx_TIM_SET_MC(mode,
4511 pdc2xx_udma_mc[drvp->UDMA_mode]);
4512 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4513 } else if (drvp->drive_flags & DRIVE_DMA) {
4514 mode = PDC2xx_TIM_SET_MB(mode,
4515 pdc2xx_dma_mb[drvp->DMA_mode]);
4516 mode = PDC2xx_TIM_SET_MC(mode,
4517 pdc2xx_dma_mc[drvp->DMA_mode]);
4518 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4519 } else {
4520 mode = PDC2xx_TIM_SET_MB(mode,
4521 pdc2xx_dma_mb[0]);
4522 mode = PDC2xx_TIM_SET_MC(mode,
4523 pdc2xx_dma_mc[0]);
4524 }
4525 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4526 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4527 if (drvp->drive_flags & DRIVE_ATA)
4528 mode |= PDC2xx_TIM_PRE;
4529 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4530 if (drvp->PIO_mode >= 3) {
4531 mode |= PDC2xx_TIM_IORDY;
4532 if (drive == 0)
4533 mode |= PDC2xx_TIM_IORDYp;
4534 }
4535 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4536 "timings 0x%x\n",
4537 sc->sc_wdcdev.sc_dev.dv_xname,
4538 chp->channel, drive, mode), DEBUG_PROBE);
4539 pci_conf_write(sc->sc_pc, sc->sc_tag,
4540 PDC2xx_TIM(chp->channel, drive), mode);
4541 }
4542 if (idedma_ctl != 0) {
4543 /* Add software bits in status register */
4544 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4545 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4546 idedma_ctl);
4547 }
4548 pciide_print_modes(cp);
4549 }
4550
4551 void
4552 pdc20268_setup_channel(chp)
4553 struct channel_softc *chp;
4554 {
4555 struct ata_drive_datas *drvp;
4556 int drive;
4557 u_int32_t idedma_ctl;
4558 struct pciide_channel *cp = (struct pciide_channel*)chp;
4559 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4560 int u100;
4561
4562 /* setup DMA if needed */
4563 pciide_channel_dma_setup(cp);
4564
4565 idedma_ctl = 0;
4566
4567 /* I don't know what this is for, FreeBSD does it ... */
4568 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4569 IDEDMA_CMD + 0x1, 0x0b);
4570
4571 /*
4572 * I don't know what this is for; FreeBSD checks this ... this is not
4573 * cable type detect.
4574 */
4575 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4576 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
4577
4578 for (drive = 0; drive < 2; drive++) {
4579 drvp = &chp->ch_drive[drive];
4580 /* If no drive, skip */
4581 if ((drvp->drive_flags & DRIVE) == 0)
4582 continue;
4583 if (drvp->drive_flags & DRIVE_UDMA) {
4584 /* use Ultra/DMA */
4585 drvp->drive_flags &= ~DRIVE_DMA;
4586 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4587 if (drvp->UDMA_mode > 2 && u100 == 0)
4588 drvp->UDMA_mode = 2;
4589 } else if (drvp->drive_flags & DRIVE_DMA) {
4590 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4591 }
4592 }
4593 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4594 if (idedma_ctl != 0) {
4595 /* Add software bits in status register */
4596 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4597 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4598 idedma_ctl);
4599 }
4600 pciide_print_modes(cp);
4601 }
4602
4603 int
4604 pdc202xx_pci_intr(arg)
4605 void *arg;
4606 {
4607 struct pciide_softc *sc = arg;
4608 struct pciide_channel *cp;
4609 struct channel_softc *wdc_cp;
4610 int i, rv, crv;
4611 u_int32_t scr;
4612
4613 rv = 0;
4614 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4615 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4616 cp = &sc->pciide_channels[i];
4617 wdc_cp = &cp->wdc_channel;
4618 /* If a compat channel skip. */
4619 if (cp->compat)
4620 continue;
4621 if (scr & PDC2xx_SCR_INT(i)) {
4622 crv = wdcintr(wdc_cp);
4623 if (crv == 0)
4624 printf("%s:%d: bogus intr (reg 0x%x)\n",
4625 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4626 else
4627 rv = 1;
4628 }
4629 }
4630 return rv;
4631 }
4632
4633 int
4634 pdc20265_pci_intr(arg)
4635 void *arg;
4636 {
4637 struct pciide_softc *sc = arg;
4638 struct pciide_channel *cp;
4639 struct channel_softc *wdc_cp;
4640 int i, rv, crv;
4641 u_int32_t dmastat;
4642
4643 rv = 0;
4644 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4645 cp = &sc->pciide_channels[i];
4646 wdc_cp = &cp->wdc_channel;
4647 /* If a compat channel skip. */
4648 if (cp->compat)
4649 continue;
4650 /*
4651 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4652 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4653 * So use it instead (requires 2 reg reads instead of 1,
4654 * but we can't do it another way).
4655 */
4656 dmastat = bus_space_read_1(sc->sc_dma_iot,
4657 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4658 if((dmastat & IDEDMA_CTL_INTR) == 0)
4659 continue;
4660 crv = wdcintr(wdc_cp);
4661 if (crv == 0)
4662 printf("%s:%d: bogus intr\n",
4663 sc->sc_wdcdev.sc_dev.dv_xname, i);
4664 else
4665 rv = 1;
4666 }
4667 return rv;
4668 }
4669
4670 void
4671 opti_chip_map(sc, pa)
4672 struct pciide_softc *sc;
4673 struct pci_attach_args *pa;
4674 {
4675 struct pciide_channel *cp;
4676 bus_size_t cmdsize, ctlsize;
4677 pcireg_t interface;
4678 u_int8_t init_ctrl;
4679 int channel;
4680
4681 if (pciide_chipen(sc, pa) == 0)
4682 return;
4683 printf("%s: bus-master DMA support present",
4684 sc->sc_wdcdev.sc_dev.dv_xname);
4685
4686 /*
4687 * XXXSCW:
4688 * There seem to be a couple of buggy revisions/implementations
4689 * of the OPTi pciide chipset. This kludge seems to fix one of
4690 * the reported problems (PR/11644) but still fails for the
4691 * other (PR/13151), although the latter may be due to other
4692 * issues too...
4693 */
4694 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4695 printf(" but disabled due to chip rev. <= 0x12");
4696 sc->sc_dma_ok = 0;
4697 } else
4698 pciide_mapreg_dma(sc, pa);
4699
4700 printf("\n");
4701
4702 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4703 WDC_CAPABILITY_MODE;
4704 sc->sc_wdcdev.PIO_cap = 4;
4705 if (sc->sc_dma_ok) {
4706 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4707 sc->sc_wdcdev.irqack = pciide_irqack;
4708 sc->sc_wdcdev.DMA_cap = 2;
4709 }
4710 sc->sc_wdcdev.set_modes = opti_setup_channel;
4711
4712 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4713 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4714
4715 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4716 OPTI_REG_INIT_CONTROL);
4717
4718 interface = PCI_INTERFACE(pa->pa_class);
4719
4720 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4721 cp = &sc->pciide_channels[channel];
4722 if (pciide_chansetup(sc, channel, interface) == 0)
4723 continue;
4724 if (channel == 1 &&
4725 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4726 printf("%s: %s channel ignored (disabled)\n",
4727 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4728 continue;
4729 }
4730 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4731 pciide_pci_intr);
4732 if (cp->hw_ok == 0)
4733 continue;
4734 pciide_map_compat_intr(pa, cp, channel, interface);
4735 if (cp->hw_ok == 0)
4736 continue;
4737 opti_setup_channel(&cp->wdc_channel);
4738 }
4739 }
4740
4741 void
4742 opti_setup_channel(chp)
4743 struct channel_softc *chp;
4744 {
4745 struct ata_drive_datas *drvp;
4746 struct pciide_channel *cp = (struct pciide_channel*)chp;
4747 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4748 int drive, spd;
4749 int mode[2];
4750 u_int8_t rv, mr;
4751
4752 /*
4753 * The `Delay' and `Address Setup Time' fields of the
4754 * Miscellaneous Register are always zero initially.
4755 */
4756 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4757 mr &= ~(OPTI_MISC_DELAY_MASK |
4758 OPTI_MISC_ADDR_SETUP_MASK |
4759 OPTI_MISC_INDEX_MASK);
4760
4761 /* Prime the control register before setting timing values */
4762 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4763
4764 /* Determine the clockrate of the PCIbus the chip is attached to */
4765 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4766 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4767
4768 /* setup DMA if needed */
4769 pciide_channel_dma_setup(cp);
4770
4771 for (drive = 0; drive < 2; drive++) {
4772 drvp = &chp->ch_drive[drive];
4773 /* If no drive, skip */
4774 if ((drvp->drive_flags & DRIVE) == 0) {
4775 mode[drive] = -1;
4776 continue;
4777 }
4778
4779 if ((drvp->drive_flags & DRIVE_DMA)) {
4780 /*
4781 * Timings will be used for both PIO and DMA,
4782 * so adjust DMA mode if needed
4783 */
4784 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4785 drvp->PIO_mode = drvp->DMA_mode + 2;
4786 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4787 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4788 drvp->PIO_mode - 2 : 0;
4789 if (drvp->DMA_mode == 0)
4790 drvp->PIO_mode = 0;
4791
4792 mode[drive] = drvp->DMA_mode + 5;
4793 } else
4794 mode[drive] = drvp->PIO_mode;
4795
4796 if (drive && mode[0] >= 0 &&
4797 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4798 /*
4799 * Can't have two drives using different values
4800 * for `Address Setup Time'.
4801 * Slow down the faster drive to compensate.
4802 */
4803 int d = (opti_tim_as[spd][mode[0]] >
4804 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4805
4806 mode[d] = mode[1-d];
4807 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4808 chp->ch_drive[d].DMA_mode = 0;
4809 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4810 }
4811 }
4812
4813 for (drive = 0; drive < 2; drive++) {
4814 int m;
4815 if ((m = mode[drive]) < 0)
4816 continue;
4817
4818 /* Set the Address Setup Time and select appropriate index */
4819 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4820 rv |= OPTI_MISC_INDEX(drive);
4821 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4822
4823 /* Set the pulse width and recovery timing parameters */
4824 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4825 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4826 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4827 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4828
4829 /* Set the Enhanced Mode register appropriately */
4830 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4831 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4832 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4833 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4834 }
4835
4836 /* Finally, enable the timings */
4837 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4838
4839 pciide_print_modes(cp);
4840 }
4841
4842 #define ACARD_IS_850(sc) \
4843 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4844
4845 void
4846 acard_chip_map(sc, pa)
4847 struct pciide_softc *sc;
4848 struct pci_attach_args *pa;
4849 {
4850 struct pciide_channel *cp;
4851 int i;
4852 pcireg_t interface;
4853 bus_size_t cmdsize, ctlsize;
4854
4855 if (pciide_chipen(sc, pa) == 0)
4856 return;
4857
4858 /*
4859 * when the chip is in native mode it identifies itself as a
4860 * 'misc mass storage'. Fake interface in this case.
4861 */
4862 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4863 interface = PCI_INTERFACE(pa->pa_class);
4864 } else {
4865 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4866 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4867 }
4868
4869 printf("%s: bus-master DMA support present",
4870 sc->sc_wdcdev.sc_dev.dv_xname);
4871 pciide_mapreg_dma(sc, pa);
4872 printf("\n");
4873 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4874 WDC_CAPABILITY_MODE;
4875
4876 if (sc->sc_dma_ok) {
4877 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4878 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4879 sc->sc_wdcdev.irqack = pciide_irqack;
4880 }
4881 sc->sc_wdcdev.PIO_cap = 4;
4882 sc->sc_wdcdev.DMA_cap = 2;
4883 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4884
4885 sc->sc_wdcdev.set_modes = acard_setup_channel;
4886 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4887 sc->sc_wdcdev.nchannels = 2;
4888
4889 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4890 cp = &sc->pciide_channels[i];
4891 if (pciide_chansetup(sc, i, interface) == 0)
4892 continue;
4893 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4894 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4895 &ctlsize, pciide_pci_intr);
4896 } else {
4897 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4898 &cmdsize, &ctlsize);
4899 }
4900 if (cp->hw_ok == 0)
4901 return;
4902 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4903 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4904 wdcattach(&cp->wdc_channel);
4905 acard_setup_channel(&cp->wdc_channel);
4906 }
4907 if (!ACARD_IS_850(sc)) {
4908 u_int32_t reg;
4909 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4910 reg &= ~ATP860_CTRL_INT;
4911 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4912 }
4913 }
4914
4915 void
4916 acard_setup_channel(chp)
4917 struct channel_softc *chp;
4918 {
4919 struct ata_drive_datas *drvp;
4920 struct pciide_channel *cp = (struct pciide_channel*)chp;
4921 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4922 int channel = chp->channel;
4923 int drive;
4924 u_int32_t idetime, udma_mode;
4925 u_int32_t idedma_ctl;
4926
4927 /* setup DMA if needed */
4928 pciide_channel_dma_setup(cp);
4929
4930 if (ACARD_IS_850(sc)) {
4931 idetime = 0;
4932 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4933 udma_mode &= ~ATP850_UDMA_MASK(channel);
4934 } else {
4935 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4936 idetime &= ~ATP860_SETTIME_MASK(channel);
4937 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4938 udma_mode &= ~ATP860_UDMA_MASK(channel);
4939
4940 /* check 80 pins cable */
4941 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4942 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4943 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4944 & ATP860_CTRL_80P(chp->channel)) {
4945 if (chp->ch_drive[0].UDMA_mode > 2)
4946 chp->ch_drive[0].UDMA_mode = 2;
4947 if (chp->ch_drive[1].UDMA_mode > 2)
4948 chp->ch_drive[1].UDMA_mode = 2;
4949 }
4950 }
4951 }
4952
4953 idedma_ctl = 0;
4954
4955 /* Per drive settings */
4956 for (drive = 0; drive < 2; drive++) {
4957 drvp = &chp->ch_drive[drive];
4958 /* If no drive, skip */
4959 if ((drvp->drive_flags & DRIVE) == 0)
4960 continue;
4961 /* add timing values, setup DMA if needed */
4962 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4963 (drvp->drive_flags & DRIVE_UDMA)) {
4964 /* use Ultra/DMA */
4965 if (ACARD_IS_850(sc)) {
4966 idetime |= ATP850_SETTIME(drive,
4967 acard_act_udma[drvp->UDMA_mode],
4968 acard_rec_udma[drvp->UDMA_mode]);
4969 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4970 acard_udma_conf[drvp->UDMA_mode]);
4971 } else {
4972 idetime |= ATP860_SETTIME(channel, drive,
4973 acard_act_udma[drvp->UDMA_mode],
4974 acard_rec_udma[drvp->UDMA_mode]);
4975 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4976 acard_udma_conf[drvp->UDMA_mode]);
4977 }
4978 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4979 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4980 (drvp->drive_flags & DRIVE_DMA)) {
4981 /* use Multiword DMA */
4982 drvp->drive_flags &= ~DRIVE_UDMA;
4983 if (ACARD_IS_850(sc)) {
4984 idetime |= ATP850_SETTIME(drive,
4985 acard_act_dma[drvp->DMA_mode],
4986 acard_rec_dma[drvp->DMA_mode]);
4987 } else {
4988 idetime |= ATP860_SETTIME(channel, drive,
4989 acard_act_dma[drvp->DMA_mode],
4990 acard_rec_dma[drvp->DMA_mode]);
4991 }
4992 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4993 } else {
4994 /* PIO only */
4995 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4996 if (ACARD_IS_850(sc)) {
4997 idetime |= ATP850_SETTIME(drive,
4998 acard_act_pio[drvp->PIO_mode],
4999 acard_rec_pio[drvp->PIO_mode]);
5000 } else {
5001 idetime |= ATP860_SETTIME(channel, drive,
5002 acard_act_pio[drvp->PIO_mode],
5003 acard_rec_pio[drvp->PIO_mode]);
5004 }
5005 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
5006 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5007 | ATP8x0_CTRL_EN(channel));
5008 }
5009 }
5010
5011 if (idedma_ctl != 0) {
5012 /* Add software bits in status register */
5013 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5014 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5015 }
5016 pciide_print_modes(cp);
5017
5018 if (ACARD_IS_850(sc)) {
5019 pci_conf_write(sc->sc_pc, sc->sc_tag,
5020 ATP850_IDETIME(channel), idetime);
5021 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
5022 } else {
5023 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
5024 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
5025 }
5026 }
5027
5028 int
5029 acard_pci_intr(arg)
5030 void *arg;
5031 {
5032 struct pciide_softc *sc = arg;
5033 struct pciide_channel *cp;
5034 struct channel_softc *wdc_cp;
5035 int rv = 0;
5036 int dmastat, i, crv;
5037
5038 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5039 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5040 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5041 if ((dmastat & IDEDMA_CTL_INTR) == 0)
5042 continue;
5043 cp = &sc->pciide_channels[i];
5044 wdc_cp = &cp->wdc_channel;
5045 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
5046 (void)wdcintr(wdc_cp);
5047 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5048 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5049 continue;
5050 }
5051 crv = wdcintr(wdc_cp);
5052 if (crv == 0)
5053 printf("%s:%d: bogus intr\n",
5054 sc->sc_wdcdev.sc_dev.dv_xname, i);
5055 else if (crv == 1)
5056 rv = 1;
5057 else if (rv == 0)
5058 rv = crv;
5059 }
5060 return rv;
5061 }
5062
5063 static int
5064 sl82c105_bugchk(struct pci_attach_args *pa)
5065 {
5066
5067 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
5068 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
5069 return (0);
5070
5071 if (PCI_REVISION(pa->pa_class) <= 0x05)
5072 return (1);
5073
5074 return (0);
5075 }
5076
5077 void
5078 sl82c105_chip_map(sc, pa)
5079 struct pciide_softc *sc;
5080 struct pci_attach_args *pa;
5081 {
5082 struct pciide_channel *cp;
5083 bus_size_t cmdsize, ctlsize;
5084 pcireg_t interface, idecr;
5085 int channel;
5086
5087 if (pciide_chipen(sc, pa) == 0)
5088 return;
5089
5090 printf("%s: bus-master DMA support present",
5091 sc->sc_wdcdev.sc_dev.dv_xname);
5092
5093 /*
5094 * Check to see if we're part of the Winbond 83c553 Southbridge.
5095 * If so, we need to disable DMA on rev. <= 5 of that chip.
5096 */
5097 if (pci_find_device(pa, sl82c105_bugchk)) {
5098 printf(" but disabled due to 83c553 rev. <= 0x05");
5099 sc->sc_dma_ok = 0;
5100 } else
5101 pciide_mapreg_dma(sc, pa);
5102 printf("\n");
5103
5104 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
5105 WDC_CAPABILITY_MODE;
5106 sc->sc_wdcdev.PIO_cap = 4;
5107 if (sc->sc_dma_ok) {
5108 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
5109 sc->sc_wdcdev.irqack = pciide_irqack;
5110 sc->sc_wdcdev.DMA_cap = 2;
5111 }
5112 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
5113
5114 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5115 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5116
5117 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
5118
5119 interface = PCI_INTERFACE(pa->pa_class);
5120
5121 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5122 cp = &sc->pciide_channels[channel];
5123 if (pciide_chansetup(sc, channel, interface) == 0)
5124 continue;
5125 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
5126 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
5127 printf("%s: %s channel ignored (disabled)\n",
5128 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
5129 continue;
5130 }
5131 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5132 pciide_pci_intr);
5133 if (cp->hw_ok == 0)
5134 continue;
5135 pciide_map_compat_intr(pa, cp, channel, interface);
5136 if (cp->hw_ok == 0)
5137 continue;
5138 sl82c105_setup_channel(&cp->wdc_channel);
5139 }
5140 }
5141
5142 void
5143 sl82c105_setup_channel(chp)
5144 struct channel_softc *chp;
5145 {
5146 struct ata_drive_datas *drvp;
5147 struct pciide_channel *cp = (struct pciide_channel*)chp;
5148 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5149 int pxdx_reg, drive;
5150 pcireg_t pxdx;
5151
5152 /* Set up DMA if needed. */
5153 pciide_channel_dma_setup(cp);
5154
5155 for (drive = 0; drive < 2; drive++) {
5156 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
5157 : SYMPH_P1D0CR) + (drive * 4);
5158
5159 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
5160
5161 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
5162 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
5163
5164 drvp = &chp->ch_drive[drive];
5165 /* If no drive, skip. */
5166 if ((drvp->drive_flags & DRIVE) == 0) {
5167 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5168 continue;
5169 }
5170
5171 if (drvp->drive_flags & DRIVE_DMA) {
5172 /*
5173 * Timings will be used for both PIO and DMA,
5174 * so adjust DMA mode if needed.
5175 */
5176 if (drvp->PIO_mode >= 3) {
5177 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
5178 drvp->DMA_mode = drvp->PIO_mode - 2;
5179 if (drvp->DMA_mode < 1) {
5180 /*
5181 * Can't mix both PIO and DMA.
5182 * Disable DMA.
5183 */
5184 drvp->drive_flags &= ~DRIVE_DMA;
5185 }
5186 } else {
5187 /*
5188 * Can't mix both PIO and DMA. Disable
5189 * DMA.
5190 */
5191 drvp->drive_flags &= ~DRIVE_DMA;
5192 }
5193 }
5194
5195 if (drvp->drive_flags & DRIVE_DMA) {
5196 /* Use multi-word DMA. */
5197 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
5198 PxDx_CMD_ON_SHIFT;
5199 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
5200 } else {
5201 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
5202 PxDx_CMD_ON_SHIFT;
5203 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
5204 }
5205
5206 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
5207
5208 /* ...and set the mode for this drive. */
5209 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5210 }
5211
5212 pciide_print_modes(cp);
5213 }
5214
5215 void
5216 serverworks_chip_map(sc, pa)
5217 struct pciide_softc *sc;
5218 struct pci_attach_args *pa;
5219 {
5220 struct pciide_channel *cp;
5221 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
5222 pcitag_t pcib_tag;
5223 int channel;
5224 bus_size_t cmdsize, ctlsize;
5225
5226 if (pciide_chipen(sc, pa) == 0)
5227 return;
5228
5229 printf("%s: bus-master DMA support present",
5230 sc->sc_wdcdev.sc_dev.dv_xname);
5231 pciide_mapreg_dma(sc, pa);
5232 printf("\n");
5233 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5234 WDC_CAPABILITY_MODE;
5235
5236 if (sc->sc_dma_ok) {
5237 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5238 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5239 sc->sc_wdcdev.irqack = pciide_irqack;
5240 }
5241 sc->sc_wdcdev.PIO_cap = 4;
5242 sc->sc_wdcdev.DMA_cap = 2;
5243 switch (sc->sc_pp->ide_product) {
5244 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
5245 sc->sc_wdcdev.UDMA_cap = 2;
5246 break;
5247 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
5248 if (PCI_REVISION(pa->pa_class) < 0x92)
5249 sc->sc_wdcdev.UDMA_cap = 4;
5250 else
5251 sc->sc_wdcdev.UDMA_cap = 5;
5252 break;
5253 case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
5254 sc->sc_wdcdev.UDMA_cap = 5;
5255 break;
5256 }
5257
5258 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
5259 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5260 sc->sc_wdcdev.nchannels = 2;
5261
5262 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5263 cp = &sc->pciide_channels[channel];
5264 if (pciide_chansetup(sc, channel, interface) == 0)
5265 continue;
5266 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5267 serverworks_pci_intr);
5268 if (cp->hw_ok == 0)
5269 return;
5270 pciide_map_compat_intr(pa, cp, channel, interface);
5271 if (cp->hw_ok == 0)
5272 return;
5273 serverworks_setup_channel(&cp->wdc_channel);
5274 }
5275
5276 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
5277 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
5278 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
5279 }
5280
5281 void
5282 serverworks_setup_channel(chp)
5283 struct channel_softc *chp;
5284 {
5285 struct ata_drive_datas *drvp;
5286 struct pciide_channel *cp = (struct pciide_channel*)chp;
5287 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5288 int channel = chp->channel;
5289 int drive, unit;
5290 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
5291 u_int32_t idedma_ctl;
5292 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
5293 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
5294
5295 /* setup DMA if needed */
5296 pciide_channel_dma_setup(cp);
5297
5298 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
5299 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
5300 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
5301 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
5302
5303 pio_time &= ~(0xffff << (16 * channel));
5304 dma_time &= ~(0xffff << (16 * channel));
5305 pio_mode &= ~(0xff << (8 * channel + 16));
5306 udma_mode &= ~(0xff << (8 * channel + 16));
5307 udma_mode &= ~(3 << (2 * channel));
5308
5309 idedma_ctl = 0;
5310
5311 /* Per drive settings */
5312 for (drive = 0; drive < 2; drive++) {
5313 drvp = &chp->ch_drive[drive];
5314 /* If no drive, skip */
5315 if ((drvp->drive_flags & DRIVE) == 0)
5316 continue;
5317 unit = drive + 2 * channel;
5318 /* add timing values, setup DMA if needed */
5319 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
5320 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
5321 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5322 (drvp->drive_flags & DRIVE_UDMA)) {
5323 /* use Ultra/DMA, check for 80-pin cable */
5324 if (drvp->UDMA_mode > 2 &&
5325 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
5326 drvp->UDMA_mode = 2;
5327 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5328 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
5329 udma_mode |= 1 << unit;
5330 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5331 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5332 (drvp->drive_flags & DRIVE_DMA)) {
5333 /* use Multiword DMA */
5334 drvp->drive_flags &= ~DRIVE_UDMA;
5335 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5336 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5337 } else {
5338 /* PIO only */
5339 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5340 }
5341 }
5342
5343 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
5344 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
5345 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
5346 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
5347 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
5348
5349 if (idedma_ctl != 0) {
5350 /* Add software bits in status register */
5351 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5352 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5353 }
5354 pciide_print_modes(cp);
5355 }
5356
5357 int
5358 serverworks_pci_intr(arg)
5359 void *arg;
5360 {
5361 struct pciide_softc *sc = arg;
5362 struct pciide_channel *cp;
5363 struct channel_softc *wdc_cp;
5364 int rv = 0;
5365 int dmastat, i, crv;
5366
5367 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5368 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5369 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5370 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
5371 IDEDMA_CTL_INTR)
5372 continue;
5373 cp = &sc->pciide_channels[i];
5374 wdc_cp = &cp->wdc_channel;
5375 crv = wdcintr(wdc_cp);
5376 if (crv == 0) {
5377 printf("%s:%d: bogus intr\n",
5378 sc->sc_wdcdev.sc_dev.dv_xname, i);
5379 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5380 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5381 } else
5382 rv = 1;
5383 }
5384 return rv;
5385 }
5386
5387 void
5388 artisea_chip_map(sc, pa)
5389 struct pciide_softc *sc;
5390 struct pci_attach_args *pa;
5391 {
5392 struct pciide_channel *cp;
5393 bus_size_t cmdsize, ctlsize;
5394 pcireg_t interface;
5395 int channel;
5396
5397 if (pciide_chipen(sc, pa) == 0)
5398 return;
5399
5400 printf("%s: bus-master DMA support resent",
5401 sc->sc_wdcdev.sc_dev.dv_xname);
5402 #ifndef PCIIDE_I31244_ENABLEDMA
5403 if (PCI_REVISION(pa->pa_class) == 0) {
5404 printf(" but disabled due to rev. 0");
5405 sc->sc_dma_ok = 0;
5406 } else
5407 #endif
5408 pciide_mapreg_dma(sc, pa);
5409 printf("\n");
5410
5411 /*
5412 * XXX Configure LEDs to show activity.
5413 */
5414
5415 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5416 WDC_CAPABILITY_MODE;
5417 sc->sc_wdcdev.PIO_cap = 4;
5418 if (sc->sc_dma_ok) {
5419 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5420 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5421 sc->sc_wdcdev.irqack = pciide_irqack;
5422 sc->sc_wdcdev.DMA_cap = 2;
5423 sc->sc_wdcdev.UDMA_cap = 6;
5424 }
5425 sc->sc_wdcdev.set_modes = sata_setup_channel;
5426
5427 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5428 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5429
5430 interface = PCI_INTERFACE(pa->pa_class);
5431
5432 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5433 cp = &sc->pciide_channels[channel];
5434 if (pciide_chansetup(sc, channel, interface) == 0)
5435 continue;
5436 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5437 pciide_pci_intr);
5438 if (cp->hw_ok == 0)
5439 continue;
5440 pciide_map_compat_intr(pa, cp, channel, interface);
5441 sata_setup_channel(&cp->wdc_channel);
5442 }
5443 }
5444