pciide.c revision 1.192 1 /* $NetBSD: pciide.c,v 1.192 2003/05/17 21:52:04 thorpej Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.192 2003/05/17 21:52:04 thorpej Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/pciide_i31244_reg.h>
123 #include <dev/pci/pciide_sii3112_reg.h>
124 #include <dev/pci/cy82c693var.h>
125
126 #include "opt_pciide.h"
127
128 static const char dmaerrfmt[] =
129 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n";
130
131 /* inlines for reading/writing 8-bit PCI registers */
132 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
133 int));
134 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
135 int, u_int8_t));
136
137 static __inline u_int8_t
138 pciide_pci_read(pc, pa, reg)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 {
143
144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
145 ((reg & 0x03) * 8) & 0xff);
146 }
147
148 static __inline void
149 pciide_pci_write(pc, pa, reg, val)
150 pci_chipset_tag_t pc;
151 pcitag_t pa;
152 int reg;
153 u_int8_t val;
154 {
155 pcireg_t pcival;
156
157 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
158 pcival &= ~(0xff << ((reg & 0x03) * 8));
159 pcival |= (val << ((reg & 0x03) * 8));
160 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
161 }
162
163 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164
165 void sata_setup_channel __P((struct channel_softc*));
166
167 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void piix_setup_channel __P((struct channel_softc*));
169 void piix3_4_setup_channel __P((struct channel_softc*));
170 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
171 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
172 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
173
174 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void amd7x6_setup_channel __P((struct channel_softc*));
176
177 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void apollo_setup_channel __P((struct channel_softc*));
179
180 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cmd0643_9_setup_channel __P((struct channel_softc*));
183 void cmd_channel_map __P((struct pci_attach_args *,
184 struct pciide_softc *, int));
185 int cmd_pci_intr __P((void *));
186 void cmd646_9_irqack __P((struct channel_softc *));
187 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
188 void cmd680_setup_channel __P((struct channel_softc*));
189 void cmd680_channel_map __P((struct pci_attach_args *,
190 struct pciide_softc *, int));
191
192 void cmd3112_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void cmd3112_setup_channel __P((struct channel_softc*));
194
195 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
196 void cy693_setup_channel __P((struct channel_softc*));
197
198 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void sis_setup_channel __P((struct channel_softc*));
200 void sis96x_setup_channel __P((struct channel_softc*));
201 static int sis_hostbr_match __P(( struct pci_attach_args *));
202 static int sis_south_match __P(( struct pci_attach_args *));
203
204 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
205 void acer_setup_channel __P((struct channel_softc*));
206 int acer_pci_intr __P((void *));
207
208 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
209 void pdc202xx_setup_channel __P((struct channel_softc*));
210 void pdc20268_setup_channel __P((struct channel_softc*));
211 int pdc202xx_pci_intr __P((void *));
212 int pdc20265_pci_intr __P((void *));
213 static void pdc20262_dma_start __P((void*, int, int));
214 static int pdc20262_dma_finish __P((void*, int, int, int));
215
216 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
217 void opti_setup_channel __P((struct channel_softc*));
218
219 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
220 void hpt_setup_channel __P((struct channel_softc*));
221 int hpt_pci_intr __P((void *));
222
223 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
224 void acard_setup_channel __P((struct channel_softc*));
225 int acard_pci_intr __P((void *));
226
227 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
228 void serverworks_setup_channel __P((struct channel_softc*));
229 int serverworks_pci_intr __P((void *));
230
231 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
232 void sl82c105_setup_channel __P((struct channel_softc*));
233
234 void pciide_channel_dma_setup __P((struct pciide_channel *));
235 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
236 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
237 void pciide_dma_start __P((void*, int, int));
238 int pciide_dma_finish __P((void*, int, int, int));
239 void pciide_irqack __P((struct channel_softc *));
240 void pciide_print_modes __P((struct pciide_channel *));
241
242 void artisea_chip_map __P((struct pciide_softc*, struct pci_attach_args *));
243
244 struct pciide_product_desc {
245 u_int32_t ide_product;
246 int ide_flags;
247 const char *ide_name;
248 /* map and setup chip, probe drives */
249 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
250 };
251
252 /* Flags for ide_flags */
253 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
254 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
255
256 /* Default product description for devices not known from this controller */
257 const struct pciide_product_desc default_product_desc = {
258 0,
259 0,
260 "Generic PCI IDE controller",
261 default_chip_map,
262 };
263
264 const struct pciide_product_desc pciide_intel_products[] = {
265 { PCI_PRODUCT_INTEL_82092AA,
266 0,
267 "Intel 82092AA IDE controller",
268 default_chip_map,
269 },
270 { PCI_PRODUCT_INTEL_82371FB_IDE,
271 0,
272 "Intel 82371FB IDE controller (PIIX)",
273 piix_chip_map,
274 },
275 { PCI_PRODUCT_INTEL_82371SB_IDE,
276 0,
277 "Intel 82371SB IDE Interface (PIIX3)",
278 piix_chip_map,
279 },
280 { PCI_PRODUCT_INTEL_82371AB_IDE,
281 0,
282 "Intel 82371AB IDE controller (PIIX4)",
283 piix_chip_map,
284 },
285 { PCI_PRODUCT_INTEL_82440MX_IDE,
286 0,
287 "Intel 82440MX IDE controller",
288 piix_chip_map
289 },
290 { PCI_PRODUCT_INTEL_82801AA_IDE,
291 0,
292 "Intel 82801AA IDE Controller (ICH)",
293 piix_chip_map,
294 },
295 { PCI_PRODUCT_INTEL_82801AB_IDE,
296 0,
297 "Intel 82801AB IDE Controller (ICH0)",
298 piix_chip_map,
299 },
300 { PCI_PRODUCT_INTEL_82801BA_IDE,
301 0,
302 "Intel 82801BA IDE Controller (ICH2)",
303 piix_chip_map,
304 },
305 { PCI_PRODUCT_INTEL_82801BAM_IDE,
306 0,
307 "Intel 82801BAM IDE Controller (ICH2-M)",
308 piix_chip_map,
309 },
310 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
311 0,
312 "Intel 82801CA IDE Controller (ICH3)",
313 piix_chip_map,
314 },
315 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
316 0,
317 "Intel 82801CA IDE Controller (ICH3)",
318 piix_chip_map,
319 },
320 { PCI_PRODUCT_INTEL_82801DB_IDE,
321 0,
322 "Intel 82801DB IDE Controller (ICH4)",
323 piix_chip_map,
324 },
325 { PCI_PRODUCT_INTEL_82801DBM_IDE,
326 0,
327 "Intel 82801DBM IDE Controller (ICH4-M)",
328 piix_chip_map,
329 },
330 { PCI_PRODUCT_INTEL_31244,
331 0,
332 "Intel 31244 Serial ATA Controller",
333 artisea_chip_map,
334 },
335 { 0,
336 0,
337 NULL,
338 NULL
339 }
340 };
341
342 const struct pciide_product_desc pciide_amd_products[] = {
343 { PCI_PRODUCT_AMD_PBC756_IDE,
344 0,
345 "Advanced Micro Devices AMD756 IDE Controller",
346 amd7x6_chip_map
347 },
348 { PCI_PRODUCT_AMD_PBC766_IDE,
349 0,
350 "Advanced Micro Devices AMD766 IDE Controller",
351 amd7x6_chip_map
352 },
353 { PCI_PRODUCT_AMD_PBC768_IDE,
354 0,
355 "Advanced Micro Devices AMD768 IDE Controller",
356 amd7x6_chip_map
357 },
358 { PCI_PRODUCT_AMD_PBC8111_IDE,
359 0,
360 "Advanced Micro Devices AMD8111 IDE Controller",
361 amd7x6_chip_map
362 },
363 { 0,
364 0,
365 NULL,
366 NULL
367 }
368 };
369
370 const struct pciide_product_desc pciide_nvidia_products[] = {
371 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
372 0,
373 "NVIDIA nForce IDE Controller",
374 amd7x6_chip_map
375 },
376 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
377 0,
378 "NVIDIA nForce2 IDE Controller",
379 amd7x6_chip_map
380 },
381 { 0,
382 0,
383 NULL,
384 NULL
385 }
386 };
387
388 const struct pciide_product_desc pciide_cmd_products[] = {
389 { PCI_PRODUCT_CMDTECH_640,
390 0,
391 "CMD Technology PCI0640",
392 cmd_chip_map
393 },
394 { PCI_PRODUCT_CMDTECH_643,
395 0,
396 "CMD Technology PCI0643",
397 cmd0643_9_chip_map,
398 },
399 { PCI_PRODUCT_CMDTECH_646,
400 0,
401 "CMD Technology PCI0646",
402 cmd0643_9_chip_map,
403 },
404 { PCI_PRODUCT_CMDTECH_648,
405 IDE_PCI_CLASS_OVERRIDE,
406 "CMD Technology PCI0648",
407 cmd0643_9_chip_map,
408 },
409 { PCI_PRODUCT_CMDTECH_649,
410 IDE_PCI_CLASS_OVERRIDE,
411 "CMD Technology PCI0649",
412 cmd0643_9_chip_map,
413 },
414 { PCI_PRODUCT_CMDTECH_680,
415 IDE_PCI_CLASS_OVERRIDE,
416 "Silicon Image 0680",
417 cmd680_chip_map,
418 },
419 { PCI_PRODUCT_CMDTECH_3112,
420 IDE_PCI_CLASS_OVERRIDE,
421 "Silicon Image SATALink 3112",
422 cmd3112_chip_map,
423 },
424 { 0,
425 0,
426 NULL,
427 NULL
428 }
429 };
430
431 const struct pciide_product_desc pciide_via_products[] = {
432 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
433 0,
434 NULL,
435 apollo_chip_map,
436 },
437 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
438 0,
439 NULL,
440 apollo_chip_map,
441 },
442 { 0,
443 0,
444 NULL,
445 NULL
446 }
447 };
448
449 const struct pciide_product_desc pciide_cypress_products[] = {
450 { PCI_PRODUCT_CONTAQ_82C693,
451 IDE_16BIT_IOSPACE,
452 "Cypress 82C693 IDE Controller",
453 cy693_chip_map,
454 },
455 { 0,
456 0,
457 NULL,
458 NULL
459 }
460 };
461
462 const struct pciide_product_desc pciide_sis_products[] = {
463 { PCI_PRODUCT_SIS_5597_IDE,
464 0,
465 NULL,
466 sis_chip_map,
467 },
468 { 0,
469 0,
470 NULL,
471 NULL
472 }
473 };
474
475 const struct pciide_product_desc pciide_acer_products[] = {
476 { PCI_PRODUCT_ALI_M5229,
477 0,
478 "Acer Labs M5229 UDMA IDE Controller",
479 acer_chip_map,
480 },
481 { 0,
482 0,
483 NULL,
484 NULL
485 }
486 };
487
488 const struct pciide_product_desc pciide_promise_products[] = {
489 { PCI_PRODUCT_PROMISE_ULTRA33,
490 IDE_PCI_CLASS_OVERRIDE,
491 "Promise Ultra33/ATA Bus Master IDE Accelerator",
492 pdc202xx_chip_map,
493 },
494 { PCI_PRODUCT_PROMISE_ULTRA66,
495 IDE_PCI_CLASS_OVERRIDE,
496 "Promise Ultra66/ATA Bus Master IDE Accelerator",
497 pdc202xx_chip_map,
498 },
499 { PCI_PRODUCT_PROMISE_ULTRA100,
500 IDE_PCI_CLASS_OVERRIDE,
501 "Promise Ultra100/ATA Bus Master IDE Accelerator",
502 pdc202xx_chip_map,
503 },
504 { PCI_PRODUCT_PROMISE_ULTRA100X,
505 IDE_PCI_CLASS_OVERRIDE,
506 "Promise Ultra100/ATA Bus Master IDE Accelerator",
507 pdc202xx_chip_map,
508 },
509 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
510 IDE_PCI_CLASS_OVERRIDE,
511 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
512 pdc202xx_chip_map,
513 },
514 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
515 IDE_PCI_CLASS_OVERRIDE,
516 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
517 pdc202xx_chip_map,
518 },
519 { PCI_PRODUCT_PROMISE_ULTRA133,
520 IDE_PCI_CLASS_OVERRIDE,
521 "Promise Ultra133/ATA Bus Master IDE Accelerator",
522 pdc202xx_chip_map,
523 },
524 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
525 IDE_PCI_CLASS_OVERRIDE,
526 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
527 pdc202xx_chip_map,
528 },
529 { PCI_PRODUCT_PROMISE_MBULTRA133,
530 IDE_PCI_CLASS_OVERRIDE,
531 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
532 pdc202xx_chip_map,
533 },
534 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
535 IDE_PCI_CLASS_OVERRIDE,
536 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
537 pdc202xx_chip_map,
538 },
539 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
540 IDE_PCI_CLASS_OVERRIDE,
541 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
542 pdc202xx_chip_map,
543 },
544 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
545 IDE_PCI_CLASS_OVERRIDE,
546 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
547 pdc202xx_chip_map,
548 },
549 { 0,
550 0,
551 NULL,
552 NULL
553 }
554 };
555
556 const struct pciide_product_desc pciide_opti_products[] = {
557 { PCI_PRODUCT_OPTI_82C621,
558 0,
559 "OPTi 82c621 PCI IDE controller",
560 opti_chip_map,
561 },
562 { PCI_PRODUCT_OPTI_82C568,
563 0,
564 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
565 opti_chip_map,
566 },
567 { PCI_PRODUCT_OPTI_82D568,
568 0,
569 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
570 opti_chip_map,
571 },
572 { 0,
573 0,
574 NULL,
575 NULL
576 }
577 };
578
579 const struct pciide_product_desc pciide_triones_products[] = {
580 { PCI_PRODUCT_TRIONES_HPT366,
581 IDE_PCI_CLASS_OVERRIDE,
582 NULL,
583 hpt_chip_map,
584 },
585 { PCI_PRODUCT_TRIONES_HPT372,
586 IDE_PCI_CLASS_OVERRIDE,
587 NULL,
588 hpt_chip_map
589 },
590 { PCI_PRODUCT_TRIONES_HPT374,
591 IDE_PCI_CLASS_OVERRIDE,
592 NULL,
593 hpt_chip_map
594 },
595 { 0,
596 0,
597 NULL,
598 NULL
599 }
600 };
601
602 const struct pciide_product_desc pciide_acard_products[] = {
603 { PCI_PRODUCT_ACARD_ATP850U,
604 IDE_PCI_CLASS_OVERRIDE,
605 "Acard ATP850U Ultra33 IDE Controller",
606 acard_chip_map,
607 },
608 { PCI_PRODUCT_ACARD_ATP860,
609 IDE_PCI_CLASS_OVERRIDE,
610 "Acard ATP860 Ultra66 IDE Controller",
611 acard_chip_map,
612 },
613 { PCI_PRODUCT_ACARD_ATP860A,
614 IDE_PCI_CLASS_OVERRIDE,
615 "Acard ATP860-A Ultra66 IDE Controller",
616 acard_chip_map,
617 },
618 { 0,
619 0,
620 NULL,
621 NULL
622 }
623 };
624
625 const struct pciide_product_desc pciide_serverworks_products[] = {
626 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
627 0,
628 "ServerWorks OSB4 IDE Controller",
629 serverworks_chip_map,
630 },
631 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
632 0,
633 "ServerWorks CSB5 IDE Controller",
634 serverworks_chip_map,
635 },
636 { PCI_PRODUCT_SERVERWORKS_CSB6_IDE,
637 0,
638 "ServerWorks CSB6 RAID/IDE Controller",
639 serverworks_chip_map,
640 },
641 { 0,
642 0,
643 NULL,
644 }
645 };
646
647 const struct pciide_product_desc pciide_symphony_products[] = {
648 { PCI_PRODUCT_SYMPHONY_82C105,
649 0,
650 "Symphony Labs 82C105 IDE controller",
651 sl82c105_chip_map,
652 },
653 { 0,
654 0,
655 NULL,
656 }
657 };
658
659 const struct pciide_product_desc pciide_winbond_products[] = {
660 { PCI_PRODUCT_WINBOND_W83C553F_1,
661 0,
662 "Winbond W83C553F IDE controller",
663 sl82c105_chip_map,
664 },
665 { 0,
666 0,
667 NULL,
668 }
669 };
670
671 struct pciide_vendor_desc {
672 u_int32_t ide_vendor;
673 const struct pciide_product_desc *ide_products;
674 };
675
676 const struct pciide_vendor_desc pciide_vendors[] = {
677 { PCI_VENDOR_INTEL, pciide_intel_products },
678 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
679 { PCI_VENDOR_VIATECH, pciide_via_products },
680 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
681 { PCI_VENDOR_SIS, pciide_sis_products },
682 { PCI_VENDOR_ALI, pciide_acer_products },
683 { PCI_VENDOR_PROMISE, pciide_promise_products },
684 { PCI_VENDOR_AMD, pciide_amd_products },
685 { PCI_VENDOR_OPTI, pciide_opti_products },
686 { PCI_VENDOR_TRIONES, pciide_triones_products },
687 { PCI_VENDOR_ACARD, pciide_acard_products },
688 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
689 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
690 { PCI_VENDOR_WINBOND, pciide_winbond_products },
691 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
692 { 0, NULL }
693 };
694
695 /* options passed via the 'flags' config keyword */
696 #define PCIIDE_OPTIONS_DMA 0x01
697 #define PCIIDE_OPTIONS_NODMA 0x02
698
699 int pciide_match __P((struct device *, struct cfdata *, void *));
700 void pciide_attach __P((struct device *, struct device *, void *));
701
702 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
703 pciide_match, pciide_attach, NULL, NULL);
704
705 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
706 int pciide_mapregs_compat __P(( struct pci_attach_args *,
707 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
708 int pciide_mapregs_native __P((struct pci_attach_args *,
709 struct pciide_channel *, bus_size_t *, bus_size_t *,
710 int (*pci_intr) __P((void *))));
711 void pciide_mapreg_dma __P((struct pciide_softc *,
712 struct pci_attach_args *));
713 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
714 void pciide_mapchan __P((struct pci_attach_args *,
715 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
716 int (*pci_intr) __P((void *))));
717 int pciide_chan_candisable __P((struct pciide_channel *));
718 void pciide_map_compat_intr __P(( struct pci_attach_args *,
719 struct pciide_channel *, int, int));
720 int pciide_compat_intr __P((void *));
721 int pciide_pci_intr __P((void *));
722 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
723
724 const struct pciide_product_desc *
725 pciide_lookup_product(id)
726 u_int32_t id;
727 {
728 const struct pciide_product_desc *pp;
729 const struct pciide_vendor_desc *vp;
730
731 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
732 if (PCI_VENDOR(id) == vp->ide_vendor)
733 break;
734
735 if ((pp = vp->ide_products) == NULL)
736 return NULL;
737
738 for (; pp->chip_map != NULL; pp++)
739 if (PCI_PRODUCT(id) == pp->ide_product)
740 break;
741
742 if (pp->chip_map == NULL)
743 return NULL;
744 return pp;
745 }
746
747 int
748 pciide_match(parent, match, aux)
749 struct device *parent;
750 struct cfdata *match;
751 void *aux;
752 {
753 struct pci_attach_args *pa = aux;
754 const struct pciide_product_desc *pp;
755
756 /*
757 * Check the ID register to see that it's a PCI IDE controller.
758 * If it is, we assume that we can deal with it; it _should_
759 * work in a standardized way...
760 */
761 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
762 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
763 return (1);
764 }
765
766 /*
767 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
768 * controllers. Let see if we can deal with it anyway.
769 */
770 pp = pciide_lookup_product(pa->pa_id);
771 if (pp != NULL && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
772 return (1);
773 }
774
775 return (0);
776 }
777
778 void
779 pciide_attach(parent, self, aux)
780 struct device *parent, *self;
781 void *aux;
782 {
783 struct pci_attach_args *pa = aux;
784 pci_chipset_tag_t pc = pa->pa_pc;
785 pcitag_t tag = pa->pa_tag;
786 struct pciide_softc *sc = (struct pciide_softc *)self;
787 pcireg_t csr;
788 char devinfo[256];
789 const char *displaydev;
790
791 aprint_naive(": disk controller\n");
792
793 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
794 sc->sc_pp = pciide_lookup_product(pa->pa_id);
795 if (sc->sc_pp == NULL) {
796 sc->sc_pp = &default_product_desc;
797 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
798 displaydev = devinfo;
799 } else
800 displaydev = sc->sc_pp->ide_name;
801
802 /* if displaydev == NULL, printf is done in chip-specific map */
803 if (displaydev)
804 aprint_normal(": %s (rev. 0x%02x)\n", displaydev,
805 PCI_REVISION(pa->pa_class));
806
807 sc->sc_pc = pa->pa_pc;
808 sc->sc_tag = pa->pa_tag;
809
810 /* Set up DMA defaults; these might be adjusted by chip_map. */
811 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
812 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
813
814 #ifdef WDCDEBUG
815 if (wdcdebug_pciide_mask & DEBUG_PROBE)
816 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
817 #endif
818 sc->sc_pp->chip_map(sc, pa);
819
820 if (sc->sc_dma_ok) {
821 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
822 csr |= PCI_COMMAND_MASTER_ENABLE;
823 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
824 }
825 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
826 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
827 }
828
829 /* tell whether the chip is enabled or not */
830 int
831 pciide_chipen(sc, pa)
832 struct pciide_softc *sc;
833 struct pci_attach_args *pa;
834 {
835 pcireg_t csr;
836 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
837 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
838 PCI_COMMAND_STATUS_REG);
839 aprint_normal("%s: device disabled (at %s)\n",
840 sc->sc_wdcdev.sc_dev.dv_xname,
841 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
842 "device" : "bridge");
843 return 0;
844 }
845 return 1;
846 }
847
848 int
849 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
850 struct pci_attach_args *pa;
851 struct pciide_channel *cp;
852 int compatchan;
853 bus_size_t *cmdsizep, *ctlsizep;
854 {
855 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
856 struct channel_softc *wdc_cp = &cp->wdc_channel;
857
858 cp->compat = 1;
859 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
860 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
861
862 wdc_cp->cmd_iot = pa->pa_iot;
863 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
864 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
865 aprint_error("%s: couldn't map %s channel cmd regs\n",
866 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
867 return (0);
868 }
869
870 wdc_cp->ctl_iot = pa->pa_iot;
871 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
872 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
873 aprint_error("%s: couldn't map %s channel ctl regs\n",
874 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
875 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
876 PCIIDE_COMPAT_CMD_SIZE);
877 return (0);
878 }
879
880 return (1);
881 }
882
883 int
884 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
885 struct pci_attach_args * pa;
886 struct pciide_channel *cp;
887 bus_size_t *cmdsizep, *ctlsizep;
888 int (*pci_intr) __P((void *));
889 {
890 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
891 struct channel_softc *wdc_cp = &cp->wdc_channel;
892 const char *intrstr;
893 pci_intr_handle_t intrhandle;
894
895 cp->compat = 0;
896
897 if (sc->sc_pci_ih == NULL) {
898 if (pci_intr_map(pa, &intrhandle) != 0) {
899 aprint_error("%s: couldn't map native-PCI interrupt\n",
900 sc->sc_wdcdev.sc_dev.dv_xname);
901 return 0;
902 }
903 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
904 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
905 intrhandle, IPL_BIO, pci_intr, sc);
906 if (sc->sc_pci_ih != NULL) {
907 aprint_normal("%s: using %s for native-PCI interrupt\n",
908 sc->sc_wdcdev.sc_dev.dv_xname,
909 intrstr ? intrstr : "unknown interrupt");
910 } else {
911 aprint_error(
912 "%s: couldn't establish native-PCI interrupt",
913 sc->sc_wdcdev.sc_dev.dv_xname);
914 if (intrstr != NULL)
915 aprint_normal(" at %s", intrstr);
916 aprint_normal("\n");
917 return 0;
918 }
919 }
920 cp->ih = sc->sc_pci_ih;
921 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
922 PCI_MAPREG_TYPE_IO, 0,
923 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
924 aprint_error("%s: couldn't map %s channel cmd regs\n",
925 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
926 return 0;
927 }
928
929 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
930 PCI_MAPREG_TYPE_IO, 0,
931 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
932 aprint_error("%s: couldn't map %s channel ctl regs\n",
933 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
934 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
935 return 0;
936 }
937 /*
938 * In native mode, 4 bytes of I/O space are mapped for the control
939 * register, the control register is at offset 2. Pass the generic
940 * code a handle for only one byte at the right offset.
941 */
942 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
943 &wdc_cp->ctl_ioh) != 0) {
944 aprint_error("%s: unable to subregion %s channel ctl regs\n",
945 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
946 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
947 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
948 return 0;
949 }
950 return (1);
951 }
952
953 void
954 pciide_mapreg_dma(sc, pa)
955 struct pciide_softc *sc;
956 struct pci_attach_args *pa;
957 {
958 pcireg_t maptype;
959 bus_addr_t addr;
960
961 /*
962 * Map DMA registers
963 *
964 * Note that sc_dma_ok is the right variable to test to see if
965 * DMA can be done. If the interface doesn't support DMA,
966 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
967 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
968 * non-zero if the interface supports DMA and the registers
969 * could be mapped.
970 *
971 * XXX Note that despite the fact that the Bus Master IDE specs
972 * XXX say that "The bus master IDE function uses 16 bytes of IO
973 * XXX space," some controllers (at least the United
974 * XXX Microelectronics UM8886BF) place it in memory space.
975 */
976 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
977 PCIIDE_REG_BUS_MASTER_DMA);
978
979 switch (maptype) {
980 case PCI_MAPREG_TYPE_IO:
981 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
982 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
983 &addr, NULL, NULL) == 0);
984 if (sc->sc_dma_ok == 0) {
985 aprint_normal(
986 ", but unused (couldn't query registers)");
987 break;
988 }
989 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
990 && addr >= 0x10000) {
991 sc->sc_dma_ok = 0;
992 aprint_normal(
993 ", but unused (registers at unsafe address "
994 "%#lx)", (unsigned long)addr);
995 break;
996 }
997 /* FALLTHROUGH */
998
999 case PCI_MAPREG_MEM_TYPE_32BIT:
1000 sc->sc_dma_ok = (pci_mapreg_map(pa,
1001 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
1002 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
1003 sc->sc_dmat = pa->pa_dmat;
1004 if (sc->sc_dma_ok == 0) {
1005 aprint_normal(", but unused (couldn't map registers)");
1006 } else {
1007 sc->sc_wdcdev.dma_arg = sc;
1008 sc->sc_wdcdev.dma_init = pciide_dma_init;
1009 sc->sc_wdcdev.dma_start = pciide_dma_start;
1010 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
1011 }
1012
1013 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1014 PCIIDE_OPTIONS_NODMA) {
1015 aprint_normal(
1016 ", but unused (forced off by config file)");
1017 sc->sc_dma_ok = 0;
1018 }
1019 break;
1020
1021 default:
1022 sc->sc_dma_ok = 0;
1023 aprint_normal(
1024 ", but unsupported register maptype (0x%x)", maptype);
1025 }
1026 }
1027
1028 int
1029 pciide_compat_intr(arg)
1030 void *arg;
1031 {
1032 struct pciide_channel *cp = arg;
1033
1034 #ifdef DIAGNOSTIC
1035 /* should only be called for a compat channel */
1036 if (cp->compat == 0)
1037 panic("pciide compat intr called for non-compat chan %p", cp);
1038 #endif
1039 return (wdcintr(&cp->wdc_channel));
1040 }
1041
1042 int
1043 pciide_pci_intr(arg)
1044 void *arg;
1045 {
1046 struct pciide_softc *sc = arg;
1047 struct pciide_channel *cp;
1048 struct channel_softc *wdc_cp;
1049 int i, rv, crv;
1050
1051 rv = 0;
1052 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1053 cp = &sc->pciide_channels[i];
1054 wdc_cp = &cp->wdc_channel;
1055
1056 /* If a compat channel skip. */
1057 if (cp->compat)
1058 continue;
1059 /* if this channel not waiting for intr, skip */
1060 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1061 continue;
1062
1063 crv = wdcintr(wdc_cp);
1064 if (crv == 0)
1065 ; /* leave rv alone */
1066 else if (crv == 1)
1067 rv = 1; /* claim the intr */
1068 else if (rv == 0) /* crv should be -1 in this case */
1069 rv = crv; /* if we've done no better, take it */
1070 }
1071 return (rv);
1072 }
1073
1074 void
1075 pciide_channel_dma_setup(cp)
1076 struct pciide_channel *cp;
1077 {
1078 int drive;
1079 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1080 struct ata_drive_datas *drvp;
1081
1082 for (drive = 0; drive < 2; drive++) {
1083 drvp = &cp->wdc_channel.ch_drive[drive];
1084 /* If no drive, skip */
1085 if ((drvp->drive_flags & DRIVE) == 0)
1086 continue;
1087 /* setup DMA if needed */
1088 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1089 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1090 sc->sc_dma_ok == 0) {
1091 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1092 continue;
1093 }
1094 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1095 != 0) {
1096 /* Abort DMA setup */
1097 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1098 continue;
1099 }
1100 }
1101 }
1102
1103 int
1104 pciide_dma_table_setup(sc, channel, drive)
1105 struct pciide_softc *sc;
1106 int channel, drive;
1107 {
1108 bus_dma_segment_t seg;
1109 int error, rseg;
1110 const bus_size_t dma_table_size =
1111 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1112 struct pciide_dma_maps *dma_maps =
1113 &sc->pciide_channels[channel].dma_maps[drive];
1114
1115 /* If table was already allocated, just return */
1116 if (dma_maps->dma_table)
1117 return 0;
1118
1119 /* Allocate memory for the DMA tables and map it */
1120 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1121 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1122 BUS_DMA_NOWAIT)) != 0) {
1123 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1124 "allocate", drive, error);
1125 return error;
1126 }
1127 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1128 dma_table_size,
1129 (caddr_t *)&dma_maps->dma_table,
1130 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1131 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1132 "map", drive, error);
1133 return error;
1134 }
1135 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1136 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1137 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1138 /* Create and load table DMA map for this disk */
1139 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1140 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1141 &dma_maps->dmamap_table)) != 0) {
1142 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1143 "create", drive, error);
1144 return error;
1145 }
1146 if ((error = bus_dmamap_load(sc->sc_dmat,
1147 dma_maps->dmamap_table,
1148 dma_maps->dma_table,
1149 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1150 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1151 "load", drive, error);
1152 return error;
1153 }
1154 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1155 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1156 DEBUG_PROBE);
1157 /* Create a xfer DMA map for this drive */
1158 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1159 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
1160 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1161 &dma_maps->dmamap_xfer)) != 0) {
1162 aprint_error(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1163 "create xfer", drive, error);
1164 return error;
1165 }
1166 return 0;
1167 }
1168
1169 int
1170 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1171 void *v;
1172 int channel, drive;
1173 void *databuf;
1174 size_t datalen;
1175 int flags;
1176 {
1177 struct pciide_softc *sc = v;
1178 int error, seg;
1179 struct pciide_dma_maps *dma_maps =
1180 &sc->pciide_channels[channel].dma_maps[drive];
1181
1182 error = bus_dmamap_load(sc->sc_dmat,
1183 dma_maps->dmamap_xfer,
1184 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1185 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1186 if (error) {
1187 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1188 "load xfer", drive, error);
1189 return error;
1190 }
1191
1192 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1193 dma_maps->dmamap_xfer->dm_mapsize,
1194 (flags & WDC_DMA_READ) ?
1195 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1196
1197 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1198 #ifdef DIAGNOSTIC
1199 /* A segment must not cross a 64k boundary */
1200 {
1201 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1202 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1203 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1204 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1205 printf("pciide_dma: segment %d physical addr 0x%lx"
1206 " len 0x%lx not properly aligned\n",
1207 seg, phys, len);
1208 panic("pciide_dma: buf align");
1209 }
1210 }
1211 #endif
1212 dma_maps->dma_table[seg].base_addr =
1213 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1214 dma_maps->dma_table[seg].byte_count =
1215 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1216 IDEDMA_BYTE_COUNT_MASK);
1217 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1218 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1219 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1220
1221 }
1222 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1223 htole32(IDEDMA_BYTE_COUNT_EOT);
1224
1225 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1226 dma_maps->dmamap_table->dm_mapsize,
1227 BUS_DMASYNC_PREWRITE);
1228
1229 /* Maps are ready. Start DMA function */
1230 #ifdef DIAGNOSTIC
1231 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1232 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1233 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1234 panic("pciide_dma_init: table align");
1235 }
1236 #endif
1237
1238 /* Clear status bits */
1239 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1240 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1241 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1242 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1243 /* Write table addr */
1244 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1245 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1246 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1247 /* set read/write */
1248 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1249 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1250 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1251 /* remember flags */
1252 dma_maps->dma_flags = flags;
1253 return 0;
1254 }
1255
1256 void
1257 pciide_dma_start(v, channel, drive)
1258 void *v;
1259 int channel, drive;
1260 {
1261 struct pciide_softc *sc = v;
1262
1263 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1264 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1265 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1266 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1267 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1268 }
1269
1270 int
1271 pciide_dma_finish(v, channel, drive, force)
1272 void *v;
1273 int channel, drive;
1274 int force;
1275 {
1276 struct pciide_softc *sc = v;
1277 u_int8_t status;
1278 int error = 0;
1279 struct pciide_dma_maps *dma_maps =
1280 &sc->pciide_channels[channel].dma_maps[drive];
1281
1282 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1283 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1284 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1285 DEBUG_XFERS);
1286
1287 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1288 return WDC_DMAST_NOIRQ;
1289
1290 /* stop DMA channel */
1291 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1292 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1293 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1294 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1295
1296 /* Unload the map of the data buffer */
1297 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1298 dma_maps->dmamap_xfer->dm_mapsize,
1299 (dma_maps->dma_flags & WDC_DMA_READ) ?
1300 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1301 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1302
1303 if ((status & IDEDMA_CTL_ERR) != 0) {
1304 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1305 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1306 error |= WDC_DMAST_ERR;
1307 }
1308
1309 if ((status & IDEDMA_CTL_INTR) == 0) {
1310 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1311 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1312 drive, status);
1313 error |= WDC_DMAST_NOIRQ;
1314 }
1315
1316 if ((status & IDEDMA_CTL_ACT) != 0) {
1317 /* data underrun, may be a valid condition for ATAPI */
1318 error |= WDC_DMAST_UNDER;
1319 }
1320 return error;
1321 }
1322
1323 void
1324 pciide_irqack(chp)
1325 struct channel_softc *chp;
1326 {
1327 struct pciide_channel *cp = (struct pciide_channel*)chp;
1328 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1329
1330 /* clear status bits in IDE DMA registers */
1331 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1332 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1333 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1334 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1335 }
1336
1337 /* some common code used by several chip_map */
1338 int
1339 pciide_chansetup(sc, channel, interface)
1340 struct pciide_softc *sc;
1341 int channel;
1342 pcireg_t interface;
1343 {
1344 struct pciide_channel *cp = &sc->pciide_channels[channel];
1345 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1346 cp->name = PCIIDE_CHANNEL_NAME(channel);
1347 cp->wdc_channel.channel = channel;
1348 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1349 cp->wdc_channel.ch_queue =
1350 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1351 if (cp->wdc_channel.ch_queue == NULL) {
1352 aprint_error("%s %s channel: "
1353 "can't allocate memory for command queue",
1354 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1355 return 0;
1356 }
1357 aprint_normal("%s: %s channel %s to %s mode\n",
1358 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1359 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1360 "configured" : "wired",
1361 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1362 "native-PCI" : "compatibility");
1363 return 1;
1364 }
1365
1366 /* some common code used by several chip channel_map */
1367 void
1368 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1369 struct pci_attach_args *pa;
1370 struct pciide_channel *cp;
1371 pcireg_t interface;
1372 bus_size_t *cmdsizep, *ctlsizep;
1373 int (*pci_intr) __P((void *));
1374 {
1375 struct channel_softc *wdc_cp = &cp->wdc_channel;
1376
1377 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1378 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1379 pci_intr);
1380 else
1381 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1382 wdc_cp->channel, cmdsizep, ctlsizep);
1383
1384 if (cp->hw_ok == 0)
1385 return;
1386 wdc_cp->data32iot = wdc_cp->cmd_iot;
1387 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1388 wdcattach(wdc_cp);
1389 }
1390
1391 /*
1392 * Generic code to call to know if a channel can be disabled. Return 1
1393 * if channel can be disabled, 0 if not
1394 */
1395 int
1396 pciide_chan_candisable(cp)
1397 struct pciide_channel *cp;
1398 {
1399 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1400 struct channel_softc *wdc_cp = &cp->wdc_channel;
1401
1402 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1403 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1404 aprint_normal("%s: disabling %s channel (no drives)\n",
1405 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1406 cp->hw_ok = 0;
1407 return 1;
1408 }
1409 return 0;
1410 }
1411
1412 /*
1413 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1414 * Set hw_ok=0 on failure
1415 */
1416 void
1417 pciide_map_compat_intr(pa, cp, compatchan, interface)
1418 struct pci_attach_args *pa;
1419 struct pciide_channel *cp;
1420 int compatchan, interface;
1421 {
1422 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1423 struct channel_softc *wdc_cp = &cp->wdc_channel;
1424
1425 if (cp->hw_ok == 0)
1426 return;
1427 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1428 return;
1429
1430 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1431 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1432 pa, compatchan, pciide_compat_intr, cp);
1433 if (cp->ih == NULL) {
1434 #endif
1435 aprint_error("%s: no compatibility interrupt for use by %s "
1436 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1437 cp->hw_ok = 0;
1438 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1439 }
1440 #endif
1441 }
1442
1443 void
1444 pciide_print_modes(cp)
1445 struct pciide_channel *cp;
1446 {
1447 wdc_print_modes(&cp->wdc_channel);
1448 }
1449
1450 void
1451 default_chip_map(sc, pa)
1452 struct pciide_softc *sc;
1453 struct pci_attach_args *pa;
1454 {
1455 struct pciide_channel *cp;
1456 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1457 pcireg_t csr;
1458 int channel, drive;
1459 struct ata_drive_datas *drvp;
1460 u_int8_t idedma_ctl;
1461 bus_size_t cmdsize, ctlsize;
1462 char *failreason;
1463
1464 if (pciide_chipen(sc, pa) == 0)
1465 return;
1466
1467 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1468 aprint_normal("%s: bus-master DMA support present",
1469 sc->sc_wdcdev.sc_dev.dv_xname);
1470 if (sc->sc_pp == &default_product_desc &&
1471 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1472 PCIIDE_OPTIONS_DMA) == 0) {
1473 aprint_normal(", but unused (no driver support)");
1474 sc->sc_dma_ok = 0;
1475 } else {
1476 pciide_mapreg_dma(sc, pa);
1477 if (sc->sc_dma_ok != 0)
1478 aprint_normal(", used without full driver "
1479 "support");
1480 }
1481 } else {
1482 aprint_normal("%s: hardware does not support DMA",
1483 sc->sc_wdcdev.sc_dev.dv_xname);
1484 sc->sc_dma_ok = 0;
1485 }
1486 aprint_normal("\n");
1487 if (sc->sc_dma_ok) {
1488 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1489 sc->sc_wdcdev.irqack = pciide_irqack;
1490 }
1491 sc->sc_wdcdev.PIO_cap = 0;
1492 sc->sc_wdcdev.DMA_cap = 0;
1493
1494 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1495 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1496 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1497
1498 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1499 cp = &sc->pciide_channels[channel];
1500 if (pciide_chansetup(sc, channel, interface) == 0)
1501 continue;
1502 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1503 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1504 &ctlsize, pciide_pci_intr);
1505 } else {
1506 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1507 channel, &cmdsize, &ctlsize);
1508 }
1509 if (cp->hw_ok == 0)
1510 continue;
1511 /*
1512 * Check to see if something appears to be there.
1513 */
1514 failreason = NULL;
1515 if (!wdcprobe(&cp->wdc_channel)) {
1516 failreason = "not responding; disabled or no drives?";
1517 goto next;
1518 }
1519 /*
1520 * Now, make sure it's actually attributable to this PCI IDE
1521 * channel by trying to access the channel again while the
1522 * PCI IDE controller's I/O space is disabled. (If the
1523 * channel no longer appears to be there, it belongs to
1524 * this controller.) YUCK!
1525 */
1526 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1527 PCI_COMMAND_STATUS_REG);
1528 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1529 csr & ~PCI_COMMAND_IO_ENABLE);
1530 if (wdcprobe(&cp->wdc_channel))
1531 failreason = "other hardware responding at addresses";
1532 pci_conf_write(sc->sc_pc, sc->sc_tag,
1533 PCI_COMMAND_STATUS_REG, csr);
1534 next:
1535 if (failreason) {
1536 aprint_error("%s: %s channel ignored (%s)\n",
1537 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1538 failreason);
1539 cp->hw_ok = 0;
1540 bus_space_unmap(cp->wdc_channel.cmd_iot,
1541 cp->wdc_channel.cmd_ioh, cmdsize);
1542 if (interface & PCIIDE_INTERFACE_PCI(channel))
1543 bus_space_unmap(cp->wdc_channel.ctl_iot,
1544 cp->ctl_baseioh, ctlsize);
1545 else
1546 bus_space_unmap(cp->wdc_channel.ctl_iot,
1547 cp->wdc_channel.ctl_ioh, ctlsize);
1548 } else {
1549 pciide_map_compat_intr(pa, cp, channel, interface);
1550 }
1551 if (cp->hw_ok) {
1552 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1553 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1554 wdcattach(&cp->wdc_channel);
1555 }
1556 }
1557
1558 if (sc->sc_dma_ok == 0)
1559 return;
1560
1561 /* Allocate DMA maps */
1562 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1563 idedma_ctl = 0;
1564 cp = &sc->pciide_channels[channel];
1565 for (drive = 0; drive < 2; drive++) {
1566 drvp = &cp->wdc_channel.ch_drive[drive];
1567 /* If no drive, skip */
1568 if ((drvp->drive_flags & DRIVE) == 0)
1569 continue;
1570 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1571 continue;
1572 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1573 /* Abort DMA setup */
1574 aprint_error(
1575 "%s:%d:%d: can't allocate DMA maps, "
1576 "using PIO transfers\n",
1577 sc->sc_wdcdev.sc_dev.dv_xname,
1578 channel, drive);
1579 drvp->drive_flags &= ~DRIVE_DMA;
1580 }
1581 aprint_normal("%s:%d:%d: using DMA data transfers\n",
1582 sc->sc_wdcdev.sc_dev.dv_xname,
1583 channel, drive);
1584 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1585 }
1586 if (idedma_ctl != 0) {
1587 /* Add software bits in status register */
1588 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1589 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1590 idedma_ctl);
1591 }
1592 }
1593 }
1594
1595 void
1596 sata_setup_channel(chp)
1597 struct channel_softc *chp;
1598 {
1599 struct ata_drive_datas *drvp;
1600 int drive;
1601 u_int32_t idedma_ctl;
1602 struct pciide_channel *cp = (struct pciide_channel*)chp;
1603 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
1604
1605 /* setup DMA if needed */
1606 pciide_channel_dma_setup(cp);
1607
1608 idedma_ctl = 0;
1609
1610 for (drive = 0; drive < 2; drive++) {
1611 drvp = &chp->ch_drive[drive];
1612 /* If no drive, skip */
1613 if ((drvp->drive_flags & DRIVE) == 0)
1614 continue;
1615 if (drvp->drive_flags & DRIVE_UDMA) {
1616 /* use Ultra/DMA */
1617 drvp->drive_flags &= ~DRIVE_DMA;
1618 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1619 } else if (drvp->drive_flags & DRIVE_DMA) {
1620 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1621 }
1622 }
1623
1624 /*
1625 * Nothing to do to setup modes; it is meaningless in S-ATA
1626 * (but many S-ATA drives still want to get the SET_FEATURE
1627 * command).
1628 */
1629 if (idedma_ctl != 0) {
1630 /* Add software bits in status register */
1631 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1632 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1633 idedma_ctl);
1634 }
1635 pciide_print_modes(cp);
1636 }
1637
1638 void
1639 piix_chip_map(sc, pa)
1640 struct pciide_softc *sc;
1641 struct pci_attach_args *pa;
1642 {
1643 struct pciide_channel *cp;
1644 int channel;
1645 u_int32_t idetim;
1646 bus_size_t cmdsize, ctlsize;
1647
1648 if (pciide_chipen(sc, pa) == 0)
1649 return;
1650
1651 aprint_normal("%s: bus-master DMA support present",
1652 sc->sc_wdcdev.sc_dev.dv_xname);
1653 pciide_mapreg_dma(sc, pa);
1654 aprint_normal("\n");
1655 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1656 WDC_CAPABILITY_MODE;
1657 if (sc->sc_dma_ok) {
1658 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1659 sc->sc_wdcdev.irqack = pciide_irqack;
1660 switch(sc->sc_pp->ide_product) {
1661 case PCI_PRODUCT_INTEL_82371AB_IDE:
1662 case PCI_PRODUCT_INTEL_82440MX_IDE:
1663 case PCI_PRODUCT_INTEL_82801AA_IDE:
1664 case PCI_PRODUCT_INTEL_82801AB_IDE:
1665 case PCI_PRODUCT_INTEL_82801BA_IDE:
1666 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1667 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1668 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1669 case PCI_PRODUCT_INTEL_82801DB_IDE:
1670 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1671 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1672 }
1673 }
1674 sc->sc_wdcdev.PIO_cap = 4;
1675 sc->sc_wdcdev.DMA_cap = 2;
1676 switch(sc->sc_pp->ide_product) {
1677 case PCI_PRODUCT_INTEL_82801AA_IDE:
1678 sc->sc_wdcdev.UDMA_cap = 4;
1679 break;
1680 case PCI_PRODUCT_INTEL_82801BA_IDE:
1681 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1682 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1683 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1684 case PCI_PRODUCT_INTEL_82801DB_IDE:
1685 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1686 sc->sc_wdcdev.UDMA_cap = 5;
1687 break;
1688 default:
1689 sc->sc_wdcdev.UDMA_cap = 2;
1690 }
1691 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1692 sc->sc_wdcdev.set_modes = piix_setup_channel;
1693 else
1694 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1695 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1696 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1697
1698 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1699 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1700 DEBUG_PROBE);
1701 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1702 WDCDEBUG_PRINT((", sidetim=0x%x",
1703 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1704 DEBUG_PROBE);
1705 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1706 WDCDEBUG_PRINT((", udamreg 0x%x",
1707 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1708 DEBUG_PROBE);
1709 }
1710 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1711 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1712 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1713 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1714 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1715 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1716 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1717 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1718 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1719 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1720 DEBUG_PROBE);
1721 }
1722
1723 }
1724 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1725
1726 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1727 cp = &sc->pciide_channels[channel];
1728 /* PIIX is compat-only */
1729 if (pciide_chansetup(sc, channel, 0) == 0)
1730 continue;
1731 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1732 if ((PIIX_IDETIM_READ(idetim, channel) &
1733 PIIX_IDETIM_IDE) == 0) {
1734 aprint_normal("%s: %s channel ignored (disabled)\n",
1735 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1736 continue;
1737 }
1738 /* PIIX are compat-only pciide devices */
1739 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1740 if (cp->hw_ok == 0)
1741 continue;
1742 if (pciide_chan_candisable(cp)) {
1743 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1744 channel);
1745 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1746 idetim);
1747 }
1748 pciide_map_compat_intr(pa, cp, channel, 0);
1749 if (cp->hw_ok == 0)
1750 continue;
1751 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1752 }
1753
1754 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1755 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1756 DEBUG_PROBE);
1757 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1758 WDCDEBUG_PRINT((", sidetim=0x%x",
1759 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1760 DEBUG_PROBE);
1761 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1762 WDCDEBUG_PRINT((", udamreg 0x%x",
1763 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1764 DEBUG_PROBE);
1765 }
1766 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1767 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1768 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1769 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1770 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1771 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1772 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1773 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1774 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1775 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1776 DEBUG_PROBE);
1777 }
1778 }
1779 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1780 }
1781
1782 void
1783 piix_setup_channel(chp)
1784 struct channel_softc *chp;
1785 {
1786 u_int8_t mode[2], drive;
1787 u_int32_t oidetim, idetim, idedma_ctl;
1788 struct pciide_channel *cp = (struct pciide_channel*)chp;
1789 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1790 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1791
1792 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1793 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1794 idedma_ctl = 0;
1795
1796 /* set up new idetim: Enable IDE registers decode */
1797 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1798 chp->channel);
1799
1800 /* setup DMA */
1801 pciide_channel_dma_setup(cp);
1802
1803 /*
1804 * Here we have to mess up with drives mode: PIIX can't have
1805 * different timings for master and slave drives.
1806 * We need to find the best combination.
1807 */
1808
1809 /* If both drives supports DMA, take the lower mode */
1810 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1811 (drvp[1].drive_flags & DRIVE_DMA)) {
1812 mode[0] = mode[1] =
1813 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1814 drvp[0].DMA_mode = mode[0];
1815 drvp[1].DMA_mode = mode[1];
1816 goto ok;
1817 }
1818 /*
1819 * If only one drive supports DMA, use its mode, and
1820 * put the other one in PIO mode 0 if mode not compatible
1821 */
1822 if (drvp[0].drive_flags & DRIVE_DMA) {
1823 mode[0] = drvp[0].DMA_mode;
1824 mode[1] = drvp[1].PIO_mode;
1825 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1826 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1827 mode[1] = drvp[1].PIO_mode = 0;
1828 goto ok;
1829 }
1830 if (drvp[1].drive_flags & DRIVE_DMA) {
1831 mode[1] = drvp[1].DMA_mode;
1832 mode[0] = drvp[0].PIO_mode;
1833 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1834 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1835 mode[0] = drvp[0].PIO_mode = 0;
1836 goto ok;
1837 }
1838 /*
1839 * If both drives are not DMA, takes the lower mode, unless
1840 * one of them is PIO mode < 2
1841 */
1842 if (drvp[0].PIO_mode < 2) {
1843 mode[0] = drvp[0].PIO_mode = 0;
1844 mode[1] = drvp[1].PIO_mode;
1845 } else if (drvp[1].PIO_mode < 2) {
1846 mode[1] = drvp[1].PIO_mode = 0;
1847 mode[0] = drvp[0].PIO_mode;
1848 } else {
1849 mode[0] = mode[1] =
1850 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1851 drvp[0].PIO_mode = mode[0];
1852 drvp[1].PIO_mode = mode[1];
1853 }
1854 ok: /* The modes are setup */
1855 for (drive = 0; drive < 2; drive++) {
1856 if (drvp[drive].drive_flags & DRIVE_DMA) {
1857 idetim |= piix_setup_idetim_timings(
1858 mode[drive], 1, chp->channel);
1859 goto end;
1860 }
1861 }
1862 /* If we are there, none of the drives are DMA */
1863 if (mode[0] >= 2)
1864 idetim |= piix_setup_idetim_timings(
1865 mode[0], 0, chp->channel);
1866 else
1867 idetim |= piix_setup_idetim_timings(
1868 mode[1], 0, chp->channel);
1869 end: /*
1870 * timing mode is now set up in the controller. Enable
1871 * it per-drive
1872 */
1873 for (drive = 0; drive < 2; drive++) {
1874 /* If no drive, skip */
1875 if ((drvp[drive].drive_flags & DRIVE) == 0)
1876 continue;
1877 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1878 if (drvp[drive].drive_flags & DRIVE_DMA)
1879 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1880 }
1881 if (idedma_ctl != 0) {
1882 /* Add software bits in status register */
1883 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1884 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1885 idedma_ctl);
1886 }
1887 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1888 pciide_print_modes(cp);
1889 }
1890
1891 void
1892 piix3_4_setup_channel(chp)
1893 struct channel_softc *chp;
1894 {
1895 struct ata_drive_datas *drvp;
1896 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1897 struct pciide_channel *cp = (struct pciide_channel*)chp;
1898 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1899 int drive;
1900 int channel = chp->channel;
1901
1902 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1903 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1904 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1905 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1906 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1907 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1908 PIIX_SIDETIM_RTC_MASK(channel));
1909
1910 idedma_ctl = 0;
1911 /* If channel disabled, no need to go further */
1912 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1913 return;
1914 /* set up new idetim: Enable IDE registers decode */
1915 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1916
1917 /* setup DMA if needed */
1918 pciide_channel_dma_setup(cp);
1919
1920 for (drive = 0; drive < 2; drive++) {
1921 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1922 PIIX_UDMATIM_SET(0x3, channel, drive));
1923 drvp = &chp->ch_drive[drive];
1924 /* If no drive, skip */
1925 if ((drvp->drive_flags & DRIVE) == 0)
1926 continue;
1927 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1928 (drvp->drive_flags & DRIVE_UDMA) == 0))
1929 goto pio;
1930
1931 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1932 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1933 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1934 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1935 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1936 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1937 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1938 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1939 ideconf |= PIIX_CONFIG_PINGPONG;
1940 }
1941 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1942 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1943 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1944 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1945 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1946 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1947 /* setup Ultra/100 */
1948 if (drvp->UDMA_mode > 2 &&
1949 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1950 drvp->UDMA_mode = 2;
1951 if (drvp->UDMA_mode > 4) {
1952 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1953 } else {
1954 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1955 if (drvp->UDMA_mode > 2) {
1956 ideconf |= PIIX_CONFIG_UDMA66(channel,
1957 drive);
1958 } else {
1959 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1960 drive);
1961 }
1962 }
1963 }
1964 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1965 /* setup Ultra/66 */
1966 if (drvp->UDMA_mode > 2 &&
1967 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1968 drvp->UDMA_mode = 2;
1969 if (drvp->UDMA_mode > 2)
1970 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1971 else
1972 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1973 }
1974 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1975 (drvp->drive_flags & DRIVE_UDMA)) {
1976 /* use Ultra/DMA */
1977 drvp->drive_flags &= ~DRIVE_DMA;
1978 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1979 udmareg |= PIIX_UDMATIM_SET(
1980 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1981 } else {
1982 /* use Multiword DMA */
1983 drvp->drive_flags &= ~DRIVE_UDMA;
1984 if (drive == 0) {
1985 idetim |= piix_setup_idetim_timings(
1986 drvp->DMA_mode, 1, channel);
1987 } else {
1988 sidetim |= piix_setup_sidetim_timings(
1989 drvp->DMA_mode, 1, channel);
1990 idetim =PIIX_IDETIM_SET(idetim,
1991 PIIX_IDETIM_SITRE, channel);
1992 }
1993 }
1994 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1995
1996 pio: /* use PIO mode */
1997 idetim |= piix_setup_idetim_drvs(drvp);
1998 if (drive == 0) {
1999 idetim |= piix_setup_idetim_timings(
2000 drvp->PIO_mode, 0, channel);
2001 } else {
2002 sidetim |= piix_setup_sidetim_timings(
2003 drvp->PIO_mode, 0, channel);
2004 idetim =PIIX_IDETIM_SET(idetim,
2005 PIIX_IDETIM_SITRE, channel);
2006 }
2007 }
2008 if (idedma_ctl != 0) {
2009 /* Add software bits in status register */
2010 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2011 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
2012 idedma_ctl);
2013 }
2014 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
2015 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
2016 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
2017 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
2018 pciide_print_modes(cp);
2019 }
2020
2021
2022 /* setup ISP and RTC fields, based on mode */
2023 static u_int32_t
2024 piix_setup_idetim_timings(mode, dma, channel)
2025 u_int8_t mode;
2026 u_int8_t dma;
2027 u_int8_t channel;
2028 {
2029
2030 if (dma)
2031 return PIIX_IDETIM_SET(0,
2032 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
2033 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
2034 channel);
2035 else
2036 return PIIX_IDETIM_SET(0,
2037 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
2038 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
2039 channel);
2040 }
2041
2042 /* setup DTE, PPE, IE and TIME field based on PIO mode */
2043 static u_int32_t
2044 piix_setup_idetim_drvs(drvp)
2045 struct ata_drive_datas *drvp;
2046 {
2047 u_int32_t ret = 0;
2048 struct channel_softc *chp = drvp->chnl_softc;
2049 u_int8_t channel = chp->channel;
2050 u_int8_t drive = drvp->drive;
2051
2052 /*
2053 * If drive is using UDMA, timings setups are independant
2054 * So just check DMA and PIO here.
2055 */
2056 if (drvp->drive_flags & DRIVE_DMA) {
2057 /* if mode = DMA mode 0, use compatible timings */
2058 if ((drvp->drive_flags & DRIVE_DMA) &&
2059 drvp->DMA_mode == 0) {
2060 drvp->PIO_mode = 0;
2061 return ret;
2062 }
2063 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2064 /*
2065 * PIO and DMA timings are the same, use fast timings for PIO
2066 * too, else use compat timings.
2067 */
2068 if ((piix_isp_pio[drvp->PIO_mode] !=
2069 piix_isp_dma[drvp->DMA_mode]) ||
2070 (piix_rtc_pio[drvp->PIO_mode] !=
2071 piix_rtc_dma[drvp->DMA_mode]))
2072 drvp->PIO_mode = 0;
2073 /* if PIO mode <= 2, use compat timings for PIO */
2074 if (drvp->PIO_mode <= 2) {
2075 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
2076 channel);
2077 return ret;
2078 }
2079 }
2080
2081 /*
2082 * Now setup PIO modes. If mode < 2, use compat timings.
2083 * Else enable fast timings. Enable IORDY and prefetch/post
2084 * if PIO mode >= 3.
2085 */
2086
2087 if (drvp->PIO_mode < 2)
2088 return ret;
2089
2090 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2091 if (drvp->PIO_mode >= 3) {
2092 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2093 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2094 }
2095 return ret;
2096 }
2097
2098 /* setup values in SIDETIM registers, based on mode */
2099 static u_int32_t
2100 piix_setup_sidetim_timings(mode, dma, channel)
2101 u_int8_t mode;
2102 u_int8_t dma;
2103 u_int8_t channel;
2104 {
2105 if (dma)
2106 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2107 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2108 else
2109 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2110 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2111 }
2112
2113 void
2114 amd7x6_chip_map(sc, pa)
2115 struct pciide_softc *sc;
2116 struct pci_attach_args *pa;
2117 {
2118 struct pciide_channel *cp;
2119 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2120 int channel;
2121 pcireg_t chanenable;
2122 bus_size_t cmdsize, ctlsize;
2123
2124 if (pciide_chipen(sc, pa) == 0)
2125 return;
2126 aprint_normal("%s: bus-master DMA support present",
2127 sc->sc_wdcdev.sc_dev.dv_xname);
2128 pciide_mapreg_dma(sc, pa);
2129 aprint_normal("\n");
2130 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2131 WDC_CAPABILITY_MODE;
2132 if (sc->sc_dma_ok) {
2133 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2134 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2135 sc->sc_wdcdev.irqack = pciide_irqack;
2136 }
2137 sc->sc_wdcdev.PIO_cap = 4;
2138 sc->sc_wdcdev.DMA_cap = 2;
2139
2140 switch (sc->sc_pci_vendor) {
2141 case PCI_VENDOR_AMD:
2142 switch (sc->sc_pp->ide_product) {
2143 case PCI_PRODUCT_AMD_PBC766_IDE:
2144 case PCI_PRODUCT_AMD_PBC768_IDE:
2145 case PCI_PRODUCT_AMD_PBC8111_IDE:
2146 sc->sc_wdcdev.UDMA_cap = 5;
2147 break;
2148 default:
2149 sc->sc_wdcdev.UDMA_cap = 4;
2150 }
2151 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2152 break;
2153
2154 case PCI_VENDOR_NVIDIA:
2155 switch (sc->sc_pp->ide_product) {
2156 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2157 sc->sc_wdcdev.UDMA_cap = 5;
2158 break;
2159 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2160 sc->sc_wdcdev.UDMA_cap = 6;
2161 break;
2162 }
2163 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2164 break;
2165
2166 default:
2167 panic("amd7x6_chip_map: unknown vendor");
2168 }
2169 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2170 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2171 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2172 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2173 AMD7X6_CHANSTATUS_EN(sc));
2174
2175 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2176 DEBUG_PROBE);
2177 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2178 cp = &sc->pciide_channels[channel];
2179 if (pciide_chansetup(sc, channel, interface) == 0)
2180 continue;
2181
2182 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2183 aprint_normal("%s: %s channel ignored (disabled)\n",
2184 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2185 continue;
2186 }
2187 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2188 pciide_pci_intr);
2189
2190 if (pciide_chan_candisable(cp))
2191 chanenable &= ~AMD7X6_CHAN_EN(channel);
2192 pciide_map_compat_intr(pa, cp, channel, interface);
2193 if (cp->hw_ok == 0)
2194 continue;
2195
2196 amd7x6_setup_channel(&cp->wdc_channel);
2197 }
2198 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2199 chanenable);
2200 return;
2201 }
2202
2203 void
2204 amd7x6_setup_channel(chp)
2205 struct channel_softc *chp;
2206 {
2207 u_int32_t udmatim_reg, datatim_reg;
2208 u_int8_t idedma_ctl;
2209 int mode, drive;
2210 struct ata_drive_datas *drvp;
2211 struct pciide_channel *cp = (struct pciide_channel*)chp;
2212 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2213 #ifndef PCIIDE_AMD756_ENABLEDMA
2214 int rev = PCI_REVISION(
2215 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2216 #endif
2217
2218 idedma_ctl = 0;
2219 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2220 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2221 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2222 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2223
2224 /* setup DMA if needed */
2225 pciide_channel_dma_setup(cp);
2226
2227 for (drive = 0; drive < 2; drive++) {
2228 drvp = &chp->ch_drive[drive];
2229 /* If no drive, skip */
2230 if ((drvp->drive_flags & DRIVE) == 0)
2231 continue;
2232 /* add timing values, setup DMA if needed */
2233 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2234 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2235 mode = drvp->PIO_mode;
2236 goto pio;
2237 }
2238 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2239 (drvp->drive_flags & DRIVE_UDMA)) {
2240 /* use Ultra/DMA */
2241 drvp->drive_flags &= ~DRIVE_DMA;
2242 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2243 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2244 AMD7X6_UDMA_TIME(chp->channel, drive,
2245 amd7x6_udma_tim[drvp->UDMA_mode]);
2246 /* can use PIO timings, MW DMA unused */
2247 mode = drvp->PIO_mode;
2248 } else {
2249 /* use Multiword DMA, but only if revision is OK */
2250 drvp->drive_flags &= ~DRIVE_UDMA;
2251 #ifndef PCIIDE_AMD756_ENABLEDMA
2252 /*
2253 * The workaround doesn't seem to be necessary
2254 * with all drives, so it can be disabled by
2255 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2256 * triggered.
2257 */
2258 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2259 sc->sc_pp->ide_product ==
2260 PCI_PRODUCT_AMD_PBC756_IDE &&
2261 AMD756_CHIPREV_DISABLEDMA(rev)) {
2262 aprint_normal(
2263 "%s:%d:%d: multi-word DMA disabled due "
2264 "to chip revision\n",
2265 sc->sc_wdcdev.sc_dev.dv_xname,
2266 chp->channel, drive);
2267 mode = drvp->PIO_mode;
2268 drvp->drive_flags &= ~DRIVE_DMA;
2269 goto pio;
2270 }
2271 #endif
2272 /* mode = min(pio, dma+2) */
2273 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2274 mode = drvp->PIO_mode;
2275 else
2276 mode = drvp->DMA_mode + 2;
2277 }
2278 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2279
2280 pio: /* setup PIO mode */
2281 if (mode <= 2) {
2282 drvp->DMA_mode = 0;
2283 drvp->PIO_mode = 0;
2284 mode = 0;
2285 } else {
2286 drvp->PIO_mode = mode;
2287 drvp->DMA_mode = mode - 2;
2288 }
2289 datatim_reg |=
2290 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2291 amd7x6_pio_set[mode]) |
2292 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2293 amd7x6_pio_rec[mode]);
2294 }
2295 if (idedma_ctl != 0) {
2296 /* Add software bits in status register */
2297 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2298 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2299 idedma_ctl);
2300 }
2301 pciide_print_modes(cp);
2302 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2303 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2304 }
2305
2306 void
2307 apollo_chip_map(sc, pa)
2308 struct pciide_softc *sc;
2309 struct pci_attach_args *pa;
2310 {
2311 struct pciide_channel *cp;
2312 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2313 int channel;
2314 u_int32_t ideconf;
2315 bus_size_t cmdsize, ctlsize;
2316 pcitag_t pcib_tag;
2317 pcireg_t pcib_id, pcib_class;
2318
2319 if (pciide_chipen(sc, pa) == 0)
2320 return;
2321 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2322 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2323 /* and read ID and rev of the ISA bridge */
2324 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2325 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2326 aprint_normal(": VIA Technologies ");
2327 switch (PCI_PRODUCT(pcib_id)) {
2328 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2329 aprint_normal("VT82C586 (Apollo VP) ");
2330 if(PCI_REVISION(pcib_class) >= 0x02) {
2331 aprint_normal("ATA33 controller\n");
2332 sc->sc_wdcdev.UDMA_cap = 2;
2333 } else {
2334 aprint_normal("controller\n");
2335 sc->sc_wdcdev.UDMA_cap = 0;
2336 }
2337 break;
2338 case PCI_PRODUCT_VIATECH_VT82C596A:
2339 aprint_normal("VT82C596A (Apollo Pro) ");
2340 if (PCI_REVISION(pcib_class) >= 0x12) {
2341 aprint_normal("ATA66 controller\n");
2342 sc->sc_wdcdev.UDMA_cap = 4;
2343 } else {
2344 aprint_normal("ATA33 controller\n");
2345 sc->sc_wdcdev.UDMA_cap = 2;
2346 }
2347 break;
2348 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2349 aprint_normal("VT82C686A (Apollo KX133) ");
2350 if (PCI_REVISION(pcib_class) >= 0x40) {
2351 aprint_normal("ATA100 controller\n");
2352 sc->sc_wdcdev.UDMA_cap = 5;
2353 } else {
2354 aprint_normal("ATA66 controller\n");
2355 sc->sc_wdcdev.UDMA_cap = 4;
2356 }
2357 break;
2358 case PCI_PRODUCT_VIATECH_VT8231:
2359 aprint_normal("VT8231 ATA100 controller\n");
2360 sc->sc_wdcdev.UDMA_cap = 5;
2361 break;
2362 case PCI_PRODUCT_VIATECH_VT8233:
2363 aprint_normal("VT8233 ATA100 controller\n");
2364 sc->sc_wdcdev.UDMA_cap = 5;
2365 break;
2366 case PCI_PRODUCT_VIATECH_VT8233A:
2367 aprint_normal("VT8233A ATA133 controller\n");
2368 sc->sc_wdcdev.UDMA_cap = 6;
2369 break;
2370 case PCI_PRODUCT_VIATECH_VT8235:
2371 aprint_normal("VT8235 ATA133 controller\n");
2372 sc->sc_wdcdev.UDMA_cap = 6;
2373 break;
2374 default:
2375 aprint_normal("unknown ATA controller\n");
2376 sc->sc_wdcdev.UDMA_cap = 0;
2377 }
2378
2379 aprint_normal("%s: bus-master DMA support present",
2380 sc->sc_wdcdev.sc_dev.dv_xname);
2381 pciide_mapreg_dma(sc, pa);
2382 aprint_normal("\n");
2383 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2384 WDC_CAPABILITY_MODE;
2385 if (sc->sc_dma_ok) {
2386 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2387 sc->sc_wdcdev.irqack = pciide_irqack;
2388 if (sc->sc_wdcdev.UDMA_cap > 0)
2389 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2390 }
2391 sc->sc_wdcdev.PIO_cap = 4;
2392 sc->sc_wdcdev.DMA_cap = 2;
2393 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2394 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2395 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2396
2397 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2398 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2399 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2400 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2401 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2402 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2403 DEBUG_PROBE);
2404
2405 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2406 cp = &sc->pciide_channels[channel];
2407 if (pciide_chansetup(sc, channel, interface) == 0)
2408 continue;
2409
2410 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2411 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2412 aprint_normal("%s: %s channel ignored (disabled)\n",
2413 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2414 continue;
2415 }
2416 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2417 pciide_pci_intr);
2418 if (cp->hw_ok == 0)
2419 continue;
2420 if (pciide_chan_candisable(cp)) {
2421 ideconf &= ~APO_IDECONF_EN(channel);
2422 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2423 ideconf);
2424 }
2425 pciide_map_compat_intr(pa, cp, channel, interface);
2426
2427 if (cp->hw_ok == 0)
2428 continue;
2429 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2430 }
2431 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2432 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2433 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2434 }
2435
2436 void
2437 apollo_setup_channel(chp)
2438 struct channel_softc *chp;
2439 {
2440 u_int32_t udmatim_reg, datatim_reg;
2441 u_int8_t idedma_ctl;
2442 int mode, drive;
2443 struct ata_drive_datas *drvp;
2444 struct pciide_channel *cp = (struct pciide_channel*)chp;
2445 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2446
2447 idedma_ctl = 0;
2448 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2449 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2450 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2451 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2452
2453 /* setup DMA if needed */
2454 pciide_channel_dma_setup(cp);
2455
2456 for (drive = 0; drive < 2; drive++) {
2457 drvp = &chp->ch_drive[drive];
2458 /* If no drive, skip */
2459 if ((drvp->drive_flags & DRIVE) == 0)
2460 continue;
2461 /* add timing values, setup DMA if needed */
2462 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2463 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2464 mode = drvp->PIO_mode;
2465 goto pio;
2466 }
2467 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2468 (drvp->drive_flags & DRIVE_UDMA)) {
2469 /* use Ultra/DMA */
2470 drvp->drive_flags &= ~DRIVE_DMA;
2471 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2472 APO_UDMA_EN_MTH(chp->channel, drive);
2473 if (sc->sc_wdcdev.UDMA_cap == 6) {
2474 /* 8233a */
2475 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2476 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2477 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2478 /* 686b */
2479 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2480 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2481 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2482 /* 596b or 686a */
2483 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2484 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2485 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2486 } else {
2487 /* 596a or 586b */
2488 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2489 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2490 }
2491 /* can use PIO timings, MW DMA unused */
2492 mode = drvp->PIO_mode;
2493 } else {
2494 /* use Multiword DMA */
2495 drvp->drive_flags &= ~DRIVE_UDMA;
2496 /* mode = min(pio, dma+2) */
2497 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2498 mode = drvp->PIO_mode;
2499 else
2500 mode = drvp->DMA_mode + 2;
2501 }
2502 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2503
2504 pio: /* setup PIO mode */
2505 if (mode <= 2) {
2506 drvp->DMA_mode = 0;
2507 drvp->PIO_mode = 0;
2508 mode = 0;
2509 } else {
2510 drvp->PIO_mode = mode;
2511 drvp->DMA_mode = mode - 2;
2512 }
2513 datatim_reg |=
2514 APO_DATATIM_PULSE(chp->channel, drive,
2515 apollo_pio_set[mode]) |
2516 APO_DATATIM_RECOV(chp->channel, drive,
2517 apollo_pio_rec[mode]);
2518 }
2519 if (idedma_ctl != 0) {
2520 /* Add software bits in status register */
2521 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2522 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2523 idedma_ctl);
2524 }
2525 pciide_print_modes(cp);
2526 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2527 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2528 }
2529
2530 void
2531 cmd_channel_map(pa, sc, channel)
2532 struct pci_attach_args *pa;
2533 struct pciide_softc *sc;
2534 int channel;
2535 {
2536 struct pciide_channel *cp = &sc->pciide_channels[channel];
2537 bus_size_t cmdsize, ctlsize;
2538 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2539 int interface, one_channel;
2540
2541 /*
2542 * The 0648/0649 can be told to identify as a RAID controller.
2543 * In this case, we have to fake interface
2544 */
2545 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2546 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2547 PCIIDE_INTERFACE_SETTABLE(1);
2548 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2549 CMD_CONF_DSA1)
2550 interface |= PCIIDE_INTERFACE_PCI(0) |
2551 PCIIDE_INTERFACE_PCI(1);
2552 } else {
2553 interface = PCI_INTERFACE(pa->pa_class);
2554 }
2555
2556 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2557 cp->name = PCIIDE_CHANNEL_NAME(channel);
2558 cp->wdc_channel.channel = channel;
2559 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2560
2561 /*
2562 * Older CMD64X doesn't have independant channels
2563 */
2564 switch (sc->sc_pp->ide_product) {
2565 case PCI_PRODUCT_CMDTECH_649:
2566 one_channel = 0;
2567 break;
2568 default:
2569 one_channel = 1;
2570 break;
2571 }
2572
2573 if (channel > 0 && one_channel) {
2574 cp->wdc_channel.ch_queue =
2575 sc->pciide_channels[0].wdc_channel.ch_queue;
2576 } else {
2577 cp->wdc_channel.ch_queue =
2578 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2579 }
2580 if (cp->wdc_channel.ch_queue == NULL) {
2581 aprint_error("%s %s channel: "
2582 "can't allocate memory for command queue",
2583 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2584 return;
2585 }
2586
2587 aprint_normal("%s: %s channel %s to %s mode\n",
2588 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2589 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2590 "configured" : "wired",
2591 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2592 "native-PCI" : "compatibility");
2593
2594 /*
2595 * with a CMD PCI64x, if we get here, the first channel is enabled:
2596 * there's no way to disable the first channel without disabling
2597 * the whole device
2598 */
2599 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2600 aprint_normal("%s: %s channel ignored (disabled)\n",
2601 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2602 return;
2603 }
2604
2605 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2606 if (cp->hw_ok == 0)
2607 return;
2608 if (channel == 1) {
2609 if (pciide_chan_candisable(cp)) {
2610 ctrl &= ~CMD_CTRL_2PORT;
2611 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2612 CMD_CTRL, ctrl);
2613 }
2614 }
2615 pciide_map_compat_intr(pa, cp, channel, interface);
2616 }
2617
2618 int
2619 cmd_pci_intr(arg)
2620 void *arg;
2621 {
2622 struct pciide_softc *sc = arg;
2623 struct pciide_channel *cp;
2624 struct channel_softc *wdc_cp;
2625 int i, rv, crv;
2626 u_int32_t priirq, secirq;
2627
2628 rv = 0;
2629 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2630 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2631 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2632 cp = &sc->pciide_channels[i];
2633 wdc_cp = &cp->wdc_channel;
2634 /* If a compat channel skip. */
2635 if (cp->compat)
2636 continue;
2637 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2638 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2639 crv = wdcintr(wdc_cp);
2640 if (crv == 0)
2641 printf("%s:%d: bogus intr\n",
2642 sc->sc_wdcdev.sc_dev.dv_xname, i);
2643 else
2644 rv = 1;
2645 }
2646 }
2647 return rv;
2648 }
2649
2650 void
2651 cmd_chip_map(sc, pa)
2652 struct pciide_softc *sc;
2653 struct pci_attach_args *pa;
2654 {
2655 int channel;
2656
2657 /*
2658 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2659 * and base adresses registers can be disabled at
2660 * hardware level. In this case, the device is wired
2661 * in compat mode and its first channel is always enabled,
2662 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2663 * In fact, it seems that the first channel of the CMD PCI0640
2664 * can't be disabled.
2665 */
2666
2667 #ifdef PCIIDE_CMD064x_DISABLE
2668 if (pciide_chipen(sc, pa) == 0)
2669 return;
2670 #endif
2671
2672 aprint_normal("%s: hardware does not support DMA\n",
2673 sc->sc_wdcdev.sc_dev.dv_xname);
2674 sc->sc_dma_ok = 0;
2675
2676 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2677 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2678 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2679
2680 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2681 cmd_channel_map(pa, sc, channel);
2682 }
2683 }
2684
2685 void
2686 cmd0643_9_chip_map(sc, pa)
2687 struct pciide_softc *sc;
2688 struct pci_attach_args *pa;
2689 {
2690 struct pciide_channel *cp;
2691 int channel;
2692 pcireg_t rev = PCI_REVISION(pa->pa_class);
2693
2694 /*
2695 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2696 * and base adresses registers can be disabled at
2697 * hardware level. In this case, the device is wired
2698 * in compat mode and its first channel is always enabled,
2699 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2700 * In fact, it seems that the first channel of the CMD PCI0640
2701 * can't be disabled.
2702 */
2703
2704 #ifdef PCIIDE_CMD064x_DISABLE
2705 if (pciide_chipen(sc, pa) == 0)
2706 return;
2707 #endif
2708 aprint_normal("%s: bus-master DMA support present",
2709 sc->sc_wdcdev.sc_dev.dv_xname);
2710 pciide_mapreg_dma(sc, pa);
2711 aprint_normal("\n");
2712 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2713 WDC_CAPABILITY_MODE;
2714 if (sc->sc_dma_ok) {
2715 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2716 switch (sc->sc_pp->ide_product) {
2717 case PCI_PRODUCT_CMDTECH_649:
2718 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2719 sc->sc_wdcdev.UDMA_cap = 5;
2720 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2721 break;
2722 case PCI_PRODUCT_CMDTECH_648:
2723 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2724 sc->sc_wdcdev.UDMA_cap = 4;
2725 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2726 break;
2727 case PCI_PRODUCT_CMDTECH_646:
2728 if (rev >= CMD0646U2_REV) {
2729 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2730 sc->sc_wdcdev.UDMA_cap = 2;
2731 } else if (rev >= CMD0646U_REV) {
2732 /*
2733 * Linux's driver claims that the 646U is broken
2734 * with UDMA. Only enable it if we know what we're
2735 * doing
2736 */
2737 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2738 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2739 sc->sc_wdcdev.UDMA_cap = 2;
2740 #endif
2741 /* explicitly disable UDMA */
2742 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2743 CMD_UDMATIM(0), 0);
2744 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2745 CMD_UDMATIM(1), 0);
2746 }
2747 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2748 break;
2749 default:
2750 sc->sc_wdcdev.irqack = pciide_irqack;
2751 }
2752 }
2753
2754 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2755 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2756 sc->sc_wdcdev.PIO_cap = 4;
2757 sc->sc_wdcdev.DMA_cap = 2;
2758 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2759
2760 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2761 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2762 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2763 DEBUG_PROBE);
2764
2765 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2766 cp = &sc->pciide_channels[channel];
2767 cmd_channel_map(pa, sc, channel);
2768 if (cp->hw_ok == 0)
2769 continue;
2770 cmd0643_9_setup_channel(&cp->wdc_channel);
2771 }
2772 /*
2773 * note - this also makes sure we clear the irq disable and reset
2774 * bits
2775 */
2776 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2777 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2778 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2779 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2780 DEBUG_PROBE);
2781 }
2782
2783 void
2784 cmd0643_9_setup_channel(chp)
2785 struct channel_softc *chp;
2786 {
2787 struct ata_drive_datas *drvp;
2788 u_int8_t tim;
2789 u_int32_t idedma_ctl, udma_reg;
2790 int drive;
2791 struct pciide_channel *cp = (struct pciide_channel*)chp;
2792 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2793
2794 idedma_ctl = 0;
2795 /* setup DMA if needed */
2796 pciide_channel_dma_setup(cp);
2797
2798 for (drive = 0; drive < 2; drive++) {
2799 drvp = &chp->ch_drive[drive];
2800 /* If no drive, skip */
2801 if ((drvp->drive_flags & DRIVE) == 0)
2802 continue;
2803 /* add timing values, setup DMA if needed */
2804 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2805 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2806 if (drvp->drive_flags & DRIVE_UDMA) {
2807 /* UltraDMA on a 646U2, 0648 or 0649 */
2808 drvp->drive_flags &= ~DRIVE_DMA;
2809 udma_reg = pciide_pci_read(sc->sc_pc,
2810 sc->sc_tag, CMD_UDMATIM(chp->channel));
2811 if (drvp->UDMA_mode > 2 &&
2812 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2813 CMD_BICSR) &
2814 CMD_BICSR_80(chp->channel)) == 0)
2815 drvp->UDMA_mode = 2;
2816 if (drvp->UDMA_mode > 2)
2817 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2818 else if (sc->sc_wdcdev.UDMA_cap > 2)
2819 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2820 udma_reg |= CMD_UDMATIM_UDMA(drive);
2821 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2822 CMD_UDMATIM_TIM_OFF(drive));
2823 udma_reg |=
2824 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2825 CMD_UDMATIM_TIM_OFF(drive));
2826 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2827 CMD_UDMATIM(chp->channel), udma_reg);
2828 } else {
2829 /*
2830 * use Multiword DMA.
2831 * Timings will be used for both PIO and DMA,
2832 * so adjust DMA mode if needed
2833 * if we have a 0646U2/8/9, turn off UDMA
2834 */
2835 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2836 udma_reg = pciide_pci_read(sc->sc_pc,
2837 sc->sc_tag,
2838 CMD_UDMATIM(chp->channel));
2839 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2840 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2841 CMD_UDMATIM(chp->channel),
2842 udma_reg);
2843 }
2844 if (drvp->PIO_mode >= 3 &&
2845 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2846 drvp->DMA_mode = drvp->PIO_mode - 2;
2847 }
2848 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2849 }
2850 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2851 }
2852 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2853 CMD_DATA_TIM(chp->channel, drive), tim);
2854 }
2855 if (idedma_ctl != 0) {
2856 /* Add software bits in status register */
2857 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2858 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2859 idedma_ctl);
2860 }
2861 pciide_print_modes(cp);
2862 }
2863
2864 void
2865 cmd646_9_irqack(chp)
2866 struct channel_softc *chp;
2867 {
2868 u_int32_t priirq, secirq;
2869 struct pciide_channel *cp = (struct pciide_channel*)chp;
2870 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2871
2872 if (chp->channel == 0) {
2873 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2874 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2875 } else {
2876 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2877 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2878 }
2879 pciide_irqack(chp);
2880 }
2881
2882 void
2883 cmd680_chip_map(sc, pa)
2884 struct pciide_softc *sc;
2885 struct pci_attach_args *pa;
2886 {
2887 struct pciide_channel *cp;
2888 int channel;
2889
2890 if (pciide_chipen(sc, pa) == 0)
2891 return;
2892 aprint_normal("%s: bus-master DMA support present",
2893 sc->sc_wdcdev.sc_dev.dv_xname);
2894 pciide_mapreg_dma(sc, pa);
2895 aprint_normal("\n");
2896 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2897 WDC_CAPABILITY_MODE;
2898 if (sc->sc_dma_ok) {
2899 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2900 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2901 sc->sc_wdcdev.UDMA_cap = 6;
2902 sc->sc_wdcdev.irqack = pciide_irqack;
2903 }
2904
2905 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2906 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2907 sc->sc_wdcdev.PIO_cap = 4;
2908 sc->sc_wdcdev.DMA_cap = 2;
2909 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2910
2911 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2912 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2913 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2914 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2915 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2916 cp = &sc->pciide_channels[channel];
2917 cmd680_channel_map(pa, sc, channel);
2918 if (cp->hw_ok == 0)
2919 continue;
2920 cmd680_setup_channel(&cp->wdc_channel);
2921 }
2922 }
2923
2924 void
2925 cmd680_channel_map(pa, sc, channel)
2926 struct pci_attach_args *pa;
2927 struct pciide_softc *sc;
2928 int channel;
2929 {
2930 struct pciide_channel *cp = &sc->pciide_channels[channel];
2931 bus_size_t cmdsize, ctlsize;
2932 int interface, i, reg;
2933 static const u_int8_t init_val[] =
2934 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2935 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2936
2937 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2938 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2939 PCIIDE_INTERFACE_SETTABLE(1);
2940 interface |= PCIIDE_INTERFACE_PCI(0) |
2941 PCIIDE_INTERFACE_PCI(1);
2942 } else {
2943 interface = PCI_INTERFACE(pa->pa_class);
2944 }
2945
2946 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2947 cp->name = PCIIDE_CHANNEL_NAME(channel);
2948 cp->wdc_channel.channel = channel;
2949 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2950
2951 cp->wdc_channel.ch_queue =
2952 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2953 if (cp->wdc_channel.ch_queue == NULL) {
2954 aprint_error("%s %s channel: "
2955 "can't allocate memory for command queue",
2956 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2957 return;
2958 }
2959
2960 /* XXX */
2961 reg = 0xa2 + channel * 16;
2962 for (i = 0; i < sizeof(init_val); i++)
2963 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2964
2965 aprint_normal("%s: %s channel %s to %s mode\n",
2966 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2967 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2968 "configured" : "wired",
2969 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2970 "native-PCI" : "compatibility");
2971
2972 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2973 if (cp->hw_ok == 0)
2974 return;
2975 pciide_map_compat_intr(pa, cp, channel, interface);
2976 }
2977
2978 void
2979 cmd680_setup_channel(chp)
2980 struct channel_softc *chp;
2981 {
2982 struct ata_drive_datas *drvp;
2983 u_int8_t mode, off, scsc;
2984 u_int16_t val;
2985 u_int32_t idedma_ctl;
2986 int drive;
2987 struct pciide_channel *cp = (struct pciide_channel*)chp;
2988 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2989 pci_chipset_tag_t pc = sc->sc_pc;
2990 pcitag_t pa = sc->sc_tag;
2991 static const u_int8_t udma2_tbl[] =
2992 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2993 static const u_int8_t udma_tbl[] =
2994 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2995 static const u_int16_t dma_tbl[] =
2996 { 0x2208, 0x10c2, 0x10c1 };
2997 static const u_int16_t pio_tbl[] =
2998 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2999
3000 idedma_ctl = 0;
3001 pciide_channel_dma_setup(cp);
3002 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
3003
3004 for (drive = 0; drive < 2; drive++) {
3005 drvp = &chp->ch_drive[drive];
3006 /* If no drive, skip */
3007 if ((drvp->drive_flags & DRIVE) == 0)
3008 continue;
3009 mode &= ~(0x03 << (drive * 4));
3010 if (drvp->drive_flags & DRIVE_UDMA) {
3011 drvp->drive_flags &= ~DRIVE_DMA;
3012 off = 0xa0 + chp->channel * 16;
3013 if (drvp->UDMA_mode > 2 &&
3014 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
3015 drvp->UDMA_mode = 2;
3016 scsc = pciide_pci_read(pc, pa, 0x8a);
3017 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
3018 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
3019 scsc = pciide_pci_read(pc, pa, 0x8a);
3020 if ((scsc & 0x30) == 0)
3021 drvp->UDMA_mode = 5;
3022 }
3023 mode |= 0x03 << (drive * 4);
3024 off = 0xac + chp->channel * 16 + drive * 2;
3025 val = pciide_pci_read(pc, pa, off) & ~0x3f;
3026 if (scsc & 0x30)
3027 val |= udma2_tbl[drvp->UDMA_mode];
3028 else
3029 val |= udma_tbl[drvp->UDMA_mode];
3030 pciide_pci_write(pc, pa, off, val);
3031 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3032 } else if (drvp->drive_flags & DRIVE_DMA) {
3033 mode |= 0x02 << (drive * 4);
3034 off = 0xa8 + chp->channel * 16 + drive * 2;
3035 val = dma_tbl[drvp->DMA_mode];
3036 pciide_pci_write(pc, pa, off, val & 0xff);
3037 pciide_pci_write(pc, pa, off, val >> 8);
3038 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3039 } else {
3040 mode |= 0x01 << (drive * 4);
3041 off = 0xa4 + chp->channel * 16 + drive * 2;
3042 val = pio_tbl[drvp->PIO_mode];
3043 pciide_pci_write(pc, pa, off, val & 0xff);
3044 pciide_pci_write(pc, pa, off, val >> 8);
3045 }
3046 }
3047
3048 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
3049 if (idedma_ctl != 0) {
3050 /* Add software bits in status register */
3051 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3052 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3053 idedma_ctl);
3054 }
3055 pciide_print_modes(cp);
3056 }
3057
3058 void
3059 cmd3112_chip_map(sc, pa)
3060 struct pciide_softc *sc;
3061 struct pci_attach_args *pa;
3062 {
3063 struct pciide_channel *cp;
3064 bus_size_t cmdsize, ctlsize;
3065 pcireg_t interface;
3066 int channel;
3067
3068 if (pciide_chipen(sc, pa) == 0)
3069 return;
3070
3071 aprint_normal("%s: bus-master DMA support present",
3072 sc->sc_wdcdev.sc_dev.dv_xname);
3073 pciide_mapreg_dma(sc, pa);
3074 aprint_normal("\n");
3075
3076 /*
3077 * Rev. <= 0x01 of the 3112 have a bug that can cause data
3078 * corruption if DMA transfers cross an 8K boundary. This is
3079 * apparently hard to tickle, but we'll go ahead and play it
3080 * safe.
3081 */
3082 if (PCI_REVISION(pa->pa_class) <= 0x01) {
3083 sc->sc_dma_maxsegsz = 8192;
3084 sc->sc_dma_boundary = 8192;
3085 }
3086
3087 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3088 WDC_CAPABILITY_MODE;
3089 sc->sc_wdcdev.PIO_cap = 4;
3090 if (sc->sc_dma_ok) {
3091 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3092 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3093 sc->sc_wdcdev.irqack = pciide_irqack;
3094 sc->sc_wdcdev.DMA_cap = 2;
3095 sc->sc_wdcdev.UDMA_cap = 6;
3096 }
3097 sc->sc_wdcdev.set_modes = cmd3112_setup_channel;
3098
3099 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3100 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3101
3102 /*
3103 * The 3112 can be told to identify as a RAID controller.
3104 * In this case, we have to fake interface
3105 */
3106 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3107 interface = PCI_INTERFACE(pa->pa_class);
3108 } else {
3109 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3110 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3111 }
3112
3113 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3114 cp = &sc->pciide_channels[channel];
3115 if (pciide_chansetup(sc, channel, interface) == 0)
3116 continue;
3117 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3118 pciide_pci_intr);
3119 if (cp->hw_ok == 0)
3120 continue;
3121 pciide_map_compat_intr(pa, cp, channel, interface);
3122 cmd3112_setup_channel(&cp->wdc_channel);
3123 }
3124 }
3125
3126 void
3127 cmd3112_setup_channel(chp)
3128 struct channel_softc *chp;
3129 {
3130 struct ata_drive_datas *drvp;
3131 int drive;
3132 u_int32_t idedma_ctl, dtm;
3133 struct pciide_channel *cp = (struct pciide_channel*)chp;
3134 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
3135
3136 /* setup DMA if needed */
3137 pciide_channel_dma_setup(cp);
3138
3139 idedma_ctl = 0;
3140 dtm = 0;
3141
3142 for (drive = 0; drive < 2; drive++) {
3143 drvp = &chp->ch_drive[drive];
3144 /* If no drive, skip */
3145 if ((drvp->drive_flags & DRIVE) == 0)
3146 continue;
3147 if (drvp->drive_flags & DRIVE_UDMA) {
3148 /* use Ultra/DMA */
3149 drvp->drive_flags &= ~DRIVE_DMA;
3150 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3151 dtm |= DTM_IDEx_DMA;
3152 } else if (drvp->drive_flags & DRIVE_DMA) {
3153 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3154 dtm |= DTM_IDEx_DMA;
3155 } else {
3156 dtm |= DTM_IDEx_PIO;
3157 }
3158 }
3159
3160 /*
3161 * Nothing to do to setup modes; it is meaningless in S-ATA
3162 * (but many S-ATA drives still want to get the SET_FEATURE
3163 * command).
3164 */
3165 if (idedma_ctl != 0) {
3166 /* Add software bits in status register */
3167 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3168 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3169 idedma_ctl);
3170 }
3171 pci_conf_write(sc->sc_pc, sc->sc_tag,
3172 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm);
3173 pciide_print_modes(cp);
3174 }
3175
3176 void
3177 cy693_chip_map(sc, pa)
3178 struct pciide_softc *sc;
3179 struct pci_attach_args *pa;
3180 {
3181 struct pciide_channel *cp;
3182 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3183 bus_size_t cmdsize, ctlsize;
3184
3185 if (pciide_chipen(sc, pa) == 0)
3186 return;
3187 /*
3188 * this chip has 2 PCI IDE functions, one for primary and one for
3189 * secondary. So we need to call pciide_mapregs_compat() with
3190 * the real channel
3191 */
3192 if (pa->pa_function == 1) {
3193 sc->sc_cy_compatchan = 0;
3194 } else if (pa->pa_function == 2) {
3195 sc->sc_cy_compatchan = 1;
3196 } else {
3197 aprint_error("%s: unexpected PCI function %d\n",
3198 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3199 return;
3200 }
3201 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
3202 aprint_normal("%s: bus-master DMA support present",
3203 sc->sc_wdcdev.sc_dev.dv_xname);
3204 pciide_mapreg_dma(sc, pa);
3205 } else {
3206 aprint_normal("%s: hardware does not support DMA",
3207 sc->sc_wdcdev.sc_dev.dv_xname);
3208 sc->sc_dma_ok = 0;
3209 }
3210 aprint_normal("\n");
3211
3212 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3213 if (sc->sc_cy_handle == NULL) {
3214 aprint_error("%s: unable to map hyperCache control registers\n",
3215 sc->sc_wdcdev.sc_dev.dv_xname);
3216 sc->sc_dma_ok = 0;
3217 }
3218
3219 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3220 WDC_CAPABILITY_MODE;
3221 if (sc->sc_dma_ok) {
3222 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3223 sc->sc_wdcdev.irqack = pciide_irqack;
3224 }
3225 sc->sc_wdcdev.PIO_cap = 4;
3226 sc->sc_wdcdev.DMA_cap = 2;
3227 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3228
3229 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3230 sc->sc_wdcdev.nchannels = 1;
3231
3232 /* Only one channel for this chip; if we are here it's enabled */
3233 cp = &sc->pciide_channels[0];
3234 sc->wdc_chanarray[0] = &cp->wdc_channel;
3235 cp->name = PCIIDE_CHANNEL_NAME(0);
3236 cp->wdc_channel.channel = 0;
3237 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3238 cp->wdc_channel.ch_queue =
3239 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3240 if (cp->wdc_channel.ch_queue == NULL) {
3241 aprint_error("%s primary channel: "
3242 "can't allocate memory for command queue",
3243 sc->sc_wdcdev.sc_dev.dv_xname);
3244 return;
3245 }
3246 aprint_normal("%s: primary channel %s to ",
3247 sc->sc_wdcdev.sc_dev.dv_xname,
3248 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3249 "configured" : "wired");
3250 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3251 aprint_normal("native-PCI");
3252 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3253 pciide_pci_intr);
3254 } else {
3255 aprint_normal("compatibility");
3256 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3257 &cmdsize, &ctlsize);
3258 }
3259 aprint_normal(" mode\n");
3260 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3261 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3262 wdcattach(&cp->wdc_channel);
3263 if (pciide_chan_candisable(cp)) {
3264 pci_conf_write(sc->sc_pc, sc->sc_tag,
3265 PCI_COMMAND_STATUS_REG, 0);
3266 }
3267 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3268 if (cp->hw_ok == 0)
3269 return;
3270 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3271 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3272 cy693_setup_channel(&cp->wdc_channel);
3273 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3274 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3275 }
3276
3277 void
3278 cy693_setup_channel(chp)
3279 struct channel_softc *chp;
3280 {
3281 struct ata_drive_datas *drvp;
3282 int drive;
3283 u_int32_t cy_cmd_ctrl;
3284 u_int32_t idedma_ctl;
3285 struct pciide_channel *cp = (struct pciide_channel*)chp;
3286 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3287 int dma_mode = -1;
3288
3289 cy_cmd_ctrl = idedma_ctl = 0;
3290
3291 /* setup DMA if needed */
3292 pciide_channel_dma_setup(cp);
3293
3294 for (drive = 0; drive < 2; drive++) {
3295 drvp = &chp->ch_drive[drive];
3296 /* If no drive, skip */
3297 if ((drvp->drive_flags & DRIVE) == 0)
3298 continue;
3299 /* add timing values, setup DMA if needed */
3300 if (drvp->drive_flags & DRIVE_DMA) {
3301 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3302 /* use Multiword DMA */
3303 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3304 dma_mode = drvp->DMA_mode;
3305 }
3306 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3307 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3308 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3309 CY_CMD_CTRL_IOW_REC_OFF(drive));
3310 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3311 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3312 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3313 CY_CMD_CTRL_IOR_REC_OFF(drive));
3314 }
3315 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3316 chp->ch_drive[0].DMA_mode = dma_mode;
3317 chp->ch_drive[1].DMA_mode = dma_mode;
3318
3319 if (dma_mode == -1)
3320 dma_mode = 0;
3321
3322 if (sc->sc_cy_handle != NULL) {
3323 /* Note: `multiple' is implied. */
3324 cy82c693_write(sc->sc_cy_handle,
3325 (sc->sc_cy_compatchan == 0) ?
3326 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3327 }
3328
3329 pciide_print_modes(cp);
3330
3331 if (idedma_ctl != 0) {
3332 /* Add software bits in status register */
3333 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3334 IDEDMA_CTL, idedma_ctl);
3335 }
3336 }
3337
3338 static struct sis_hostbr_type {
3339 u_int16_t id;
3340 u_int8_t rev;
3341 u_int8_t udma_mode;
3342 char *name;
3343 u_int8_t type;
3344 #define SIS_TYPE_NOUDMA 0
3345 #define SIS_TYPE_66 1
3346 #define SIS_TYPE_100OLD 2
3347 #define SIS_TYPE_100NEW 3
3348 #define SIS_TYPE_133OLD 4
3349 #define SIS_TYPE_133NEW 5
3350 #define SIS_TYPE_SOUTH 6
3351 } sis_hostbr_type[] = {
3352 /* Most infos here are from sos (at) freebsd.org */
3353 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
3354 #if 0
3355 /*
3356 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3357 * have problems with UDMA (info provided by Christos)
3358 */
3359 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
3360 #endif
3361 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
3362 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
3363 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
3364 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
3365 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
3366 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
3367 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
3368 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
3369 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
3370 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
3371 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
3372 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
3373 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
3374 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
3375 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
3376 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
3377 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
3378 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
3379 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
3380 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
3381 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
3382 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
3383 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
3384 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
3385 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
3386 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
3387 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
3388 /*
3389 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
3390 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
3391 */
3392 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
3393 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
3394 };
3395
3396 static struct sis_hostbr_type *sis_hostbr_type_match;
3397
3398 static int
3399 sis_hostbr_match(pa)
3400 struct pci_attach_args *pa;
3401 {
3402 int i;
3403 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
3404 return 0;
3405 sis_hostbr_type_match = NULL;
3406 for (i = 0;
3407 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
3408 i++) {
3409 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
3410 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
3411 sis_hostbr_type_match = &sis_hostbr_type[i];
3412 }
3413 return (sis_hostbr_type_match != NULL);
3414 }
3415
3416 static int sis_south_match(pa)
3417 struct pci_attach_args *pa;
3418 {
3419 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
3420 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
3421 PCI_REVISION(pa->pa_class) >= 0x10);
3422 }
3423
3424 void
3425 sis_chip_map(sc, pa)
3426 struct pciide_softc *sc;
3427 struct pci_attach_args *pa;
3428 {
3429 struct pciide_channel *cp;
3430 int channel;
3431 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3432 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3433 pcireg_t rev = PCI_REVISION(pa->pa_class);
3434 bus_size_t cmdsize, ctlsize;
3435
3436 if (pciide_chipen(sc, pa) == 0)
3437 return;
3438 aprint_normal(": Silicon Integrated System ");
3439 pci_find_device(NULL, sis_hostbr_match);
3440 if (sis_hostbr_type_match) {
3441 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
3442 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3443 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3444 SIS_REG_57) & 0x7f);
3445 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3446 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3447 aprint_normal("96X UDMA%d",
3448 sis_hostbr_type_match->udma_mode);
3449 sc->sis_type = SIS_TYPE_133NEW;
3450 sc->sc_wdcdev.UDMA_cap =
3451 sis_hostbr_type_match->udma_mode;
3452 } else {
3453 if (pci_find_device(NULL, sis_south_match)) {
3454 sc->sis_type = SIS_TYPE_133OLD;
3455 sc->sc_wdcdev.UDMA_cap =
3456 sis_hostbr_type_match->udma_mode;
3457 } else {
3458 sc->sis_type = SIS_TYPE_100NEW;
3459 sc->sc_wdcdev.UDMA_cap =
3460 sis_hostbr_type_match->udma_mode;
3461 }
3462 }
3463 } else {
3464 sc->sis_type = sis_hostbr_type_match->type;
3465 sc->sc_wdcdev.UDMA_cap =
3466 sis_hostbr_type_match->udma_mode;
3467 }
3468 aprint_normal(sis_hostbr_type_match->name);
3469 } else {
3470 aprint_normal("5597/5598");
3471 if (rev >= 0xd0) {
3472 sc->sc_wdcdev.UDMA_cap = 2;
3473 sc->sis_type = SIS_TYPE_66;
3474 } else {
3475 sc->sc_wdcdev.UDMA_cap = 0;
3476 sc->sis_type = SIS_TYPE_NOUDMA;
3477 }
3478 }
3479 aprint_normal(" IDE controller (rev. 0x%02x)\n",
3480 PCI_REVISION(pa->pa_class));
3481 aprint_normal("%s: bus-master DMA support present",
3482 sc->sc_wdcdev.sc_dev.dv_xname);
3483 pciide_mapreg_dma(sc, pa);
3484 aprint_normal("\n");
3485
3486 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3487 WDC_CAPABILITY_MODE;
3488 if (sc->sc_dma_ok) {
3489 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3490 sc->sc_wdcdev.irqack = pciide_irqack;
3491 if (sc->sis_type >= SIS_TYPE_66)
3492 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3493 }
3494
3495 sc->sc_wdcdev.PIO_cap = 4;
3496 sc->sc_wdcdev.DMA_cap = 2;
3497
3498 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3499 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3500 switch(sc->sis_type) {
3501 case SIS_TYPE_NOUDMA:
3502 case SIS_TYPE_66:
3503 case SIS_TYPE_100OLD:
3504 sc->sc_wdcdev.set_modes = sis_setup_channel;
3505 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3506 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3507 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3508 break;
3509 case SIS_TYPE_100NEW:
3510 case SIS_TYPE_133OLD:
3511 sc->sc_wdcdev.set_modes = sis_setup_channel;
3512 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3513 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3514 break;
3515 case SIS_TYPE_133NEW:
3516 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3517 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3518 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3519 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3520 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3521 break;
3522 }
3523
3524
3525 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3526 cp = &sc->pciide_channels[channel];
3527 if (pciide_chansetup(sc, channel, interface) == 0)
3528 continue;
3529 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3530 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3531 aprint_normal("%s: %s channel ignored (disabled)\n",
3532 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3533 continue;
3534 }
3535 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3536 pciide_pci_intr);
3537 if (cp->hw_ok == 0)
3538 continue;
3539 if (pciide_chan_candisable(cp)) {
3540 if (channel == 0)
3541 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3542 else
3543 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3544 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3545 sis_ctr0);
3546 }
3547 pciide_map_compat_intr(pa, cp, channel, interface);
3548 if (cp->hw_ok == 0)
3549 continue;
3550 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3551 }
3552 }
3553
3554 void
3555 sis96x_setup_channel(chp)
3556 struct channel_softc *chp;
3557 {
3558 struct ata_drive_datas *drvp;
3559 int drive;
3560 u_int32_t sis_tim;
3561 u_int32_t idedma_ctl;
3562 int regtim;
3563 struct pciide_channel *cp = (struct pciide_channel*)chp;
3564 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3565
3566 sis_tim = 0;
3567 idedma_ctl = 0;
3568 /* setup DMA if needed */
3569 pciide_channel_dma_setup(cp);
3570
3571 for (drive = 0; drive < 2; drive++) {
3572 regtim = SIS_TIM133(
3573 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3574 chp->channel, drive);
3575 drvp = &chp->ch_drive[drive];
3576 /* If no drive, skip */
3577 if ((drvp->drive_flags & DRIVE) == 0)
3578 continue;
3579 /* add timing values, setup DMA if needed */
3580 if (drvp->drive_flags & DRIVE_UDMA) {
3581 /* use Ultra/DMA */
3582 drvp->drive_flags &= ~DRIVE_DMA;
3583 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3584 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3585 if (drvp->UDMA_mode > 2)
3586 drvp->UDMA_mode = 2;
3587 }
3588 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3589 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3590 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3591 } else if (drvp->drive_flags & DRIVE_DMA) {
3592 /*
3593 * use Multiword DMA
3594 * Timings will be used for both PIO and DMA,
3595 * so adjust DMA mode if needed
3596 */
3597 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3598 drvp->PIO_mode = drvp->DMA_mode + 2;
3599 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3600 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3601 drvp->PIO_mode - 2 : 0;
3602 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3603 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3604 } else {
3605 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3606 }
3607 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3608 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3609 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3610 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3611 }
3612 if (idedma_ctl != 0) {
3613 /* Add software bits in status register */
3614 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3615 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3616 idedma_ctl);
3617 }
3618 pciide_print_modes(cp);
3619 }
3620
3621 void
3622 sis_setup_channel(chp)
3623 struct channel_softc *chp;
3624 {
3625 struct ata_drive_datas *drvp;
3626 int drive;
3627 u_int32_t sis_tim;
3628 u_int32_t idedma_ctl;
3629 struct pciide_channel *cp = (struct pciide_channel*)chp;
3630 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3631
3632 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3633 "channel %d 0x%x\n", chp->channel,
3634 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3635 DEBUG_PROBE);
3636 sis_tim = 0;
3637 idedma_ctl = 0;
3638 /* setup DMA if needed */
3639 pciide_channel_dma_setup(cp);
3640
3641 for (drive = 0; drive < 2; drive++) {
3642 drvp = &chp->ch_drive[drive];
3643 /* If no drive, skip */
3644 if ((drvp->drive_flags & DRIVE) == 0)
3645 continue;
3646 /* add timing values, setup DMA if needed */
3647 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3648 (drvp->drive_flags & DRIVE_UDMA) == 0)
3649 goto pio;
3650
3651 if (drvp->drive_flags & DRIVE_UDMA) {
3652 /* use Ultra/DMA */
3653 drvp->drive_flags &= ~DRIVE_DMA;
3654 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3655 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3656 if (drvp->UDMA_mode > 2)
3657 drvp->UDMA_mode = 2;
3658 }
3659 switch (sc->sis_type) {
3660 case SIS_TYPE_66:
3661 case SIS_TYPE_100OLD:
3662 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3663 SIS_TIM66_UDMA_TIME_OFF(drive);
3664 break;
3665 case SIS_TYPE_100NEW:
3666 sis_tim |=
3667 sis_udma100new_tim[drvp->UDMA_mode] <<
3668 SIS_TIM100_UDMA_TIME_OFF(drive);
3669 case SIS_TYPE_133OLD:
3670 sis_tim |=
3671 sis_udma133old_tim[drvp->UDMA_mode] <<
3672 SIS_TIM100_UDMA_TIME_OFF(drive);
3673 break;
3674 default:
3675 aprint_error("unknown SiS IDE type %d\n",
3676 sc->sis_type);
3677 }
3678 } else {
3679 /*
3680 * use Multiword DMA
3681 * Timings will be used for both PIO and DMA,
3682 * so adjust DMA mode if needed
3683 */
3684 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3685 drvp->PIO_mode = drvp->DMA_mode + 2;
3686 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3687 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3688 drvp->PIO_mode - 2 : 0;
3689 if (drvp->DMA_mode == 0)
3690 drvp->PIO_mode = 0;
3691 }
3692 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3693 pio: switch (sc->sis_type) {
3694 case SIS_TYPE_NOUDMA:
3695 case SIS_TYPE_66:
3696 case SIS_TYPE_100OLD:
3697 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3698 SIS_TIM66_ACT_OFF(drive);
3699 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3700 SIS_TIM66_REC_OFF(drive);
3701 break;
3702 case SIS_TYPE_100NEW:
3703 case SIS_TYPE_133OLD:
3704 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3705 SIS_TIM100_ACT_OFF(drive);
3706 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3707 SIS_TIM100_REC_OFF(drive);
3708 break;
3709 default:
3710 aprint_error("unknown SiS IDE type %d\n",
3711 sc->sis_type);
3712 }
3713 }
3714 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3715 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3716 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3717 if (idedma_ctl != 0) {
3718 /* Add software bits in status register */
3719 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3720 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3721 idedma_ctl);
3722 }
3723 pciide_print_modes(cp);
3724 }
3725
3726 void
3727 acer_chip_map(sc, pa)
3728 struct pciide_softc *sc;
3729 struct pci_attach_args *pa;
3730 {
3731 struct pciide_channel *cp;
3732 int channel;
3733 pcireg_t cr, interface;
3734 bus_size_t cmdsize, ctlsize;
3735 pcireg_t rev = PCI_REVISION(pa->pa_class);
3736
3737 if (pciide_chipen(sc, pa) == 0)
3738 return;
3739 aprint_normal("%s: bus-master DMA support present",
3740 sc->sc_wdcdev.sc_dev.dv_xname);
3741 pciide_mapreg_dma(sc, pa);
3742 aprint_normal("\n");
3743 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3744 WDC_CAPABILITY_MODE;
3745 if (sc->sc_dma_ok) {
3746 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3747 if (rev >= 0x20) {
3748 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3749 if (rev >= 0xC4)
3750 sc->sc_wdcdev.UDMA_cap = 5;
3751 else if (rev >= 0xC2)
3752 sc->sc_wdcdev.UDMA_cap = 4;
3753 else
3754 sc->sc_wdcdev.UDMA_cap = 2;
3755 }
3756 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3757 sc->sc_wdcdev.irqack = pciide_irqack;
3758 }
3759
3760 sc->sc_wdcdev.PIO_cap = 4;
3761 sc->sc_wdcdev.DMA_cap = 2;
3762 sc->sc_wdcdev.set_modes = acer_setup_channel;
3763 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3764 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3765
3766 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3767 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3768 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3769
3770 /* Enable "microsoft register bits" R/W. */
3771 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3772 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3773 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3774 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3775 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3776 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3777 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3778 ~ACER_CHANSTATUSREGS_RO);
3779 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3780 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3781 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3782 /* Don't use cr, re-read the real register content instead */
3783 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3784 PCI_CLASS_REG));
3785
3786 /* From linux: enable "Cable Detection" */
3787 if (rev >= 0xC2) {
3788 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3789 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3790 | ACER_0x4B_CDETECT);
3791 }
3792
3793 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3794 cp = &sc->pciide_channels[channel];
3795 if (pciide_chansetup(sc, channel, interface) == 0)
3796 continue;
3797 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3798 aprint_normal("%s: %s channel ignored (disabled)\n",
3799 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3800 continue;
3801 }
3802 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3803 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3804 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3805 if (cp->hw_ok == 0)
3806 continue;
3807 if (pciide_chan_candisable(cp)) {
3808 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3809 pci_conf_write(sc->sc_pc, sc->sc_tag,
3810 PCI_CLASS_REG, cr);
3811 }
3812 pciide_map_compat_intr(pa, cp, channel, interface);
3813 acer_setup_channel(&cp->wdc_channel);
3814 }
3815 }
3816
3817 void
3818 acer_setup_channel(chp)
3819 struct channel_softc *chp;
3820 {
3821 struct ata_drive_datas *drvp;
3822 int drive;
3823 u_int32_t acer_fifo_udma;
3824 u_int32_t idedma_ctl;
3825 struct pciide_channel *cp = (struct pciide_channel*)chp;
3826 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3827
3828 idedma_ctl = 0;
3829 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3830 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3831 acer_fifo_udma), DEBUG_PROBE);
3832 /* setup DMA if needed */
3833 pciide_channel_dma_setup(cp);
3834
3835 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3836 DRIVE_UDMA) { /* check 80 pins cable */
3837 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3838 ACER_0x4A_80PIN(chp->channel)) {
3839 if (chp->ch_drive[0].UDMA_mode > 2)
3840 chp->ch_drive[0].UDMA_mode = 2;
3841 if (chp->ch_drive[1].UDMA_mode > 2)
3842 chp->ch_drive[1].UDMA_mode = 2;
3843 }
3844 }
3845
3846 for (drive = 0; drive < 2; drive++) {
3847 drvp = &chp->ch_drive[drive];
3848 /* If no drive, skip */
3849 if ((drvp->drive_flags & DRIVE) == 0)
3850 continue;
3851 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3852 "channel %d drive %d 0x%x\n", chp->channel, drive,
3853 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3854 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3855 /* clear FIFO/DMA mode */
3856 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3857 ACER_UDMA_EN(chp->channel, drive) |
3858 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3859
3860 /* add timing values, setup DMA if needed */
3861 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3862 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3863 acer_fifo_udma |=
3864 ACER_FTH_OPL(chp->channel, drive, 0x1);
3865 goto pio;
3866 }
3867
3868 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3869 if (drvp->drive_flags & DRIVE_UDMA) {
3870 /* use Ultra/DMA */
3871 drvp->drive_flags &= ~DRIVE_DMA;
3872 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3873 acer_fifo_udma |=
3874 ACER_UDMA_TIM(chp->channel, drive,
3875 acer_udma[drvp->UDMA_mode]);
3876 /* XXX disable if one drive < UDMA3 ? */
3877 if (drvp->UDMA_mode >= 3) {
3878 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3879 ACER_0x4B,
3880 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3881 ACER_0x4B) | ACER_0x4B_UDMA66);
3882 }
3883 } else {
3884 /*
3885 * use Multiword DMA
3886 * Timings will be used for both PIO and DMA,
3887 * so adjust DMA mode if needed
3888 */
3889 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3890 drvp->PIO_mode = drvp->DMA_mode + 2;
3891 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3892 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3893 drvp->PIO_mode - 2 : 0;
3894 if (drvp->DMA_mode == 0)
3895 drvp->PIO_mode = 0;
3896 }
3897 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3898 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3899 ACER_IDETIM(chp->channel, drive),
3900 acer_pio[drvp->PIO_mode]);
3901 }
3902 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3903 acer_fifo_udma), DEBUG_PROBE);
3904 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3905 if (idedma_ctl != 0) {
3906 /* Add software bits in status register */
3907 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3908 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3909 idedma_ctl);
3910 }
3911 pciide_print_modes(cp);
3912 }
3913
3914 int
3915 acer_pci_intr(arg)
3916 void *arg;
3917 {
3918 struct pciide_softc *sc = arg;
3919 struct pciide_channel *cp;
3920 struct channel_softc *wdc_cp;
3921 int i, rv, crv;
3922 u_int32_t chids;
3923
3924 rv = 0;
3925 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3926 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3927 cp = &sc->pciide_channels[i];
3928 wdc_cp = &cp->wdc_channel;
3929 /* If a compat channel skip. */
3930 if (cp->compat)
3931 continue;
3932 if (chids & ACER_CHIDS_INT(i)) {
3933 crv = wdcintr(wdc_cp);
3934 if (crv == 0)
3935 printf("%s:%d: bogus intr\n",
3936 sc->sc_wdcdev.sc_dev.dv_xname, i);
3937 else
3938 rv = 1;
3939 }
3940 }
3941 return rv;
3942 }
3943
3944 void
3945 hpt_chip_map(sc, pa)
3946 struct pciide_softc *sc;
3947 struct pci_attach_args *pa;
3948 {
3949 struct pciide_channel *cp;
3950 int i, compatchan, revision;
3951 pcireg_t interface;
3952 bus_size_t cmdsize, ctlsize;
3953
3954 if (pciide_chipen(sc, pa) == 0)
3955 return;
3956 revision = PCI_REVISION(pa->pa_class);
3957 aprint_normal(": Triones/Highpoint ");
3958 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3959 aprint_normal("HPT374 IDE Controller\n");
3960 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3961 aprint_normal("HPT372 IDE Controller\n");
3962 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3963 if (revision == HPT372_REV)
3964 aprint_normal("HPT372 IDE Controller\n");
3965 else if (revision == HPT370_REV)
3966 aprint_normal("HPT370 IDE Controller\n");
3967 else if (revision == HPT370A_REV)
3968 aprint_normal("HPT370A IDE Controller\n");
3969 else if (revision == HPT366_REV)
3970 aprint_normal("HPT366 IDE Controller\n");
3971 else
3972 aprint_normal("unknown HPT IDE controller rev %d\n",
3973 revision);
3974 } else
3975 aprint_normal("unknown HPT IDE controller 0x%x\n",
3976 sc->sc_pp->ide_product);
3977
3978 /*
3979 * when the chip is in native mode it identifies itself as a
3980 * 'misc mass storage'. Fake interface in this case.
3981 */
3982 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3983 interface = PCI_INTERFACE(pa->pa_class);
3984 } else {
3985 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3986 PCIIDE_INTERFACE_PCI(0);
3987 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3988 (revision == HPT370_REV || revision == HPT370A_REV ||
3989 revision == HPT372_REV)) ||
3990 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3991 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3992 interface |= PCIIDE_INTERFACE_PCI(1);
3993 }
3994
3995 aprint_normal("%s: bus-master DMA support present",
3996 sc->sc_wdcdev.sc_dev.dv_xname);
3997 pciide_mapreg_dma(sc, pa);
3998 aprint_normal("\n");
3999 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4000 WDC_CAPABILITY_MODE;
4001 if (sc->sc_dma_ok) {
4002 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4003 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4004 sc->sc_wdcdev.irqack = pciide_irqack;
4005 }
4006 sc->sc_wdcdev.PIO_cap = 4;
4007 sc->sc_wdcdev.DMA_cap = 2;
4008
4009 sc->sc_wdcdev.set_modes = hpt_setup_channel;
4010 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4011 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4012 revision == HPT366_REV) {
4013 sc->sc_wdcdev.UDMA_cap = 4;
4014 /*
4015 * The 366 has 2 PCI IDE functions, one for primary and one
4016 * for secondary. So we need to call pciide_mapregs_compat()
4017 * with the real channel
4018 */
4019 if (pa->pa_function == 0) {
4020 compatchan = 0;
4021 } else if (pa->pa_function == 1) {
4022 compatchan = 1;
4023 } else {
4024 aprint_error("%s: unexpected PCI function %d\n",
4025 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
4026 return;
4027 }
4028 sc->sc_wdcdev.nchannels = 1;
4029 } else {
4030 sc->sc_wdcdev.nchannels = 2;
4031 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
4032 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4033 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4034 revision == HPT372_REV))
4035 sc->sc_wdcdev.UDMA_cap = 6;
4036 else
4037 sc->sc_wdcdev.UDMA_cap = 5;
4038 }
4039 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4040 cp = &sc->pciide_channels[i];
4041 if (sc->sc_wdcdev.nchannels > 1) {
4042 compatchan = i;
4043 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
4044 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
4045 aprint_normal(
4046 "%s: %s channel ignored (disabled)\n",
4047 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4048 continue;
4049 }
4050 }
4051 if (pciide_chansetup(sc, i, interface) == 0)
4052 continue;
4053 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4054 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4055 &ctlsize, hpt_pci_intr);
4056 } else {
4057 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
4058 &cmdsize, &ctlsize);
4059 }
4060 if (cp->hw_ok == 0)
4061 return;
4062 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4063 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4064 wdcattach(&cp->wdc_channel);
4065 hpt_setup_channel(&cp->wdc_channel);
4066 }
4067 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4068 (revision == HPT370_REV || revision == HPT370A_REV ||
4069 revision == HPT372_REV)) ||
4070 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4071 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
4072 /*
4073 * HPT370_REV and highter has a bit to disable interrupts,
4074 * make sure to clear it
4075 */
4076 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
4077 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
4078 ~HPT_CSEL_IRQDIS);
4079 }
4080 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
4081 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4082 revision == HPT372_REV ) ||
4083 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4084 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4085 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
4086 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
4087 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
4088 return;
4089 }
4090
4091 void
4092 hpt_setup_channel(chp)
4093 struct channel_softc *chp;
4094 {
4095 struct ata_drive_datas *drvp;
4096 int drive;
4097 int cable;
4098 u_int32_t before, after;
4099 u_int32_t idedma_ctl;
4100 struct pciide_channel *cp = (struct pciide_channel*)chp;
4101 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4102 int revision =
4103 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
4104
4105 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
4106
4107 /* setup DMA if needed */
4108 pciide_channel_dma_setup(cp);
4109
4110 idedma_ctl = 0;
4111
4112 /* Per drive settings */
4113 for (drive = 0; drive < 2; drive++) {
4114 drvp = &chp->ch_drive[drive];
4115 /* If no drive, skip */
4116 if ((drvp->drive_flags & DRIVE) == 0)
4117 continue;
4118 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
4119 HPT_IDETIM(chp->channel, drive));
4120
4121 /* add timing values, setup DMA if needed */
4122 if (drvp->drive_flags & DRIVE_UDMA) {
4123 /* use Ultra/DMA */
4124 drvp->drive_flags &= ~DRIVE_DMA;
4125 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
4126 drvp->UDMA_mode > 2)
4127 drvp->UDMA_mode = 2;
4128 switch (sc->sc_pp->ide_product) {
4129 case PCI_PRODUCT_TRIONES_HPT374:
4130 after = hpt374_udma[drvp->UDMA_mode];
4131 break;
4132 case PCI_PRODUCT_TRIONES_HPT372:
4133 after = hpt372_udma[drvp->UDMA_mode];
4134 break;
4135 case PCI_PRODUCT_TRIONES_HPT366:
4136 default:
4137 switch(revision) {
4138 case HPT372_REV:
4139 after = hpt372_udma[drvp->UDMA_mode];
4140 break;
4141 case HPT370_REV:
4142 case HPT370A_REV:
4143 after = hpt370_udma[drvp->UDMA_mode];
4144 break;
4145 case HPT366_REV:
4146 default:
4147 after = hpt366_udma[drvp->UDMA_mode];
4148 break;
4149 }
4150 }
4151 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4152 } else if (drvp->drive_flags & DRIVE_DMA) {
4153 /*
4154 * use Multiword DMA.
4155 * Timings will be used for both PIO and DMA, so adjust
4156 * DMA mode if needed
4157 */
4158 if (drvp->PIO_mode >= 3 &&
4159 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
4160 drvp->DMA_mode = drvp->PIO_mode - 2;
4161 }
4162 switch (sc->sc_pp->ide_product) {
4163 case PCI_PRODUCT_TRIONES_HPT374:
4164 after = hpt374_dma[drvp->DMA_mode];
4165 break;
4166 case PCI_PRODUCT_TRIONES_HPT372:
4167 after = hpt372_dma[drvp->DMA_mode];
4168 break;
4169 case PCI_PRODUCT_TRIONES_HPT366:
4170 default:
4171 switch(revision) {
4172 case HPT372_REV:
4173 after = hpt372_dma[drvp->DMA_mode];
4174 break;
4175 case HPT370_REV:
4176 case HPT370A_REV:
4177 after = hpt370_dma[drvp->DMA_mode];
4178 break;
4179 case HPT366_REV:
4180 default:
4181 after = hpt366_dma[drvp->DMA_mode];
4182 break;
4183 }
4184 }
4185 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4186 } else {
4187 /* PIO only */
4188 switch (sc->sc_pp->ide_product) {
4189 case PCI_PRODUCT_TRIONES_HPT374:
4190 after = hpt374_pio[drvp->PIO_mode];
4191 break;
4192 case PCI_PRODUCT_TRIONES_HPT372:
4193 after = hpt372_pio[drvp->PIO_mode];
4194 break;
4195 case PCI_PRODUCT_TRIONES_HPT366:
4196 default:
4197 switch(revision) {
4198 case HPT372_REV:
4199 after = hpt372_pio[drvp->PIO_mode];
4200 break;
4201 case HPT370_REV:
4202 case HPT370A_REV:
4203 after = hpt370_pio[drvp->PIO_mode];
4204 break;
4205 case HPT366_REV:
4206 default:
4207 after = hpt366_pio[drvp->PIO_mode];
4208 break;
4209 }
4210 }
4211 }
4212 pci_conf_write(sc->sc_pc, sc->sc_tag,
4213 HPT_IDETIM(chp->channel, drive), after);
4214 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
4215 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
4216 after, before), DEBUG_PROBE);
4217 }
4218 if (idedma_ctl != 0) {
4219 /* Add software bits in status register */
4220 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4221 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4222 idedma_ctl);
4223 }
4224 pciide_print_modes(cp);
4225 }
4226
4227 int
4228 hpt_pci_intr(arg)
4229 void *arg;
4230 {
4231 struct pciide_softc *sc = arg;
4232 struct pciide_channel *cp;
4233 struct channel_softc *wdc_cp;
4234 int rv = 0;
4235 int dmastat, i, crv;
4236
4237 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4238 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4239 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4240 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4241 IDEDMA_CTL_INTR)
4242 continue;
4243 cp = &sc->pciide_channels[i];
4244 wdc_cp = &cp->wdc_channel;
4245 crv = wdcintr(wdc_cp);
4246 if (crv == 0) {
4247 printf("%s:%d: bogus intr\n",
4248 sc->sc_wdcdev.sc_dev.dv_xname, i);
4249 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4250 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4251 } else
4252 rv = 1;
4253 }
4254 return rv;
4255 }
4256
4257
4258 /* Macros to test product */
4259 #define PDC_IS_262(sc) \
4260 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
4261 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4262 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4263 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4264 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4265 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4266 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4267 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4268 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4269 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4270 #define PDC_IS_265(sc) \
4271 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4272 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4273 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4274 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4275 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4276 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4277 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4278 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4279 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4280 #define PDC_IS_268(sc) \
4281 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4282 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4283 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4284 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4285 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4286 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4287 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4288 #define PDC_IS_276(sc) \
4289 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4290 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4291 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4292 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4293 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4294
4295 void
4296 pdc202xx_chip_map(sc, pa)
4297 struct pciide_softc *sc;
4298 struct pci_attach_args *pa;
4299 {
4300 struct pciide_channel *cp;
4301 int channel;
4302 pcireg_t interface, st, mode;
4303 bus_size_t cmdsize, ctlsize;
4304
4305 if (!PDC_IS_268(sc)) {
4306 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4307 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
4308 st), DEBUG_PROBE);
4309 }
4310 if (pciide_chipen(sc, pa) == 0)
4311 return;
4312
4313 /* turn off RAID mode */
4314 if (!PDC_IS_268(sc))
4315 st &= ~PDC2xx_STATE_IDERAID;
4316
4317 /*
4318 * can't rely on the PCI_CLASS_REG content if the chip was in raid
4319 * mode. We have to fake interface
4320 */
4321 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
4322 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
4323 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4324
4325 aprint_normal("%s: bus-master DMA support present",
4326 sc->sc_wdcdev.sc_dev.dv_xname);
4327 pciide_mapreg_dma(sc, pa);
4328 aprint_normal("\n");
4329 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4330 WDC_CAPABILITY_MODE;
4331 if (sc->sc_dma_ok) {
4332 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4333 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4334 sc->sc_wdcdev.irqack = pciide_irqack;
4335 }
4336 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
4337 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
4338 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
4339 sc->sc_wdcdev.PIO_cap = 4;
4340 sc->sc_wdcdev.DMA_cap = 2;
4341 if (PDC_IS_276(sc))
4342 sc->sc_wdcdev.UDMA_cap = 6;
4343 else if (PDC_IS_265(sc))
4344 sc->sc_wdcdev.UDMA_cap = 5;
4345 else if (PDC_IS_262(sc))
4346 sc->sc_wdcdev.UDMA_cap = 4;
4347 else
4348 sc->sc_wdcdev.UDMA_cap = 2;
4349 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
4350 pdc20268_setup_channel : pdc202xx_setup_channel;
4351 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4352 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4353
4354 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
4355 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
4356 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
4357 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
4358 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
4359 }
4360
4361 if (!PDC_IS_268(sc)) {
4362 /* setup failsafe defaults */
4363 mode = 0;
4364 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
4365 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
4366 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
4367 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
4368 for (channel = 0;
4369 channel < sc->sc_wdcdev.nchannels;
4370 channel++) {
4371 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4372 "drive 0 initial timings 0x%x, now 0x%x\n",
4373 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4374 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
4375 DEBUG_PROBE);
4376 pci_conf_write(sc->sc_pc, sc->sc_tag,
4377 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
4378 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4379 "drive 1 initial timings 0x%x, now 0x%x\n",
4380 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4381 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
4382 pci_conf_write(sc->sc_pc, sc->sc_tag,
4383 PDC2xx_TIM(channel, 1), mode);
4384 }
4385
4386 mode = PDC2xx_SCR_DMA;
4387 if (PDC_IS_262(sc)) {
4388 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
4389 } else {
4390 /* the BIOS set it up this way */
4391 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
4392 }
4393 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
4394 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
4395 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
4396 "now 0x%x\n",
4397 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4398 PDC2xx_SCR),
4399 mode), DEBUG_PROBE);
4400 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4401 PDC2xx_SCR, mode);
4402
4403 /* controller initial state register is OK even without BIOS */
4404 /* Set DMA mode to IDE DMA compatibility */
4405 mode =
4406 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
4407 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
4408 DEBUG_PROBE);
4409 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
4410 mode | 0x1);
4411 mode =
4412 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
4413 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
4414 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
4415 mode | 0x1);
4416 }
4417
4418 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4419 cp = &sc->pciide_channels[channel];
4420 if (pciide_chansetup(sc, channel, interface) == 0)
4421 continue;
4422 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
4423 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
4424 aprint_normal("%s: %s channel ignored (disabled)\n",
4425 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4426 continue;
4427 }
4428 if (PDC_IS_265(sc))
4429 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4430 pdc20265_pci_intr);
4431 else
4432 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4433 pdc202xx_pci_intr);
4434 if (cp->hw_ok == 0)
4435 continue;
4436 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
4437 st &= ~(PDC_IS_262(sc) ?
4438 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
4439 pciide_map_compat_intr(pa, cp, channel, interface);
4440 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
4441 }
4442 if (!PDC_IS_268(sc)) {
4443 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4444 "0x%x\n", st), DEBUG_PROBE);
4445 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4446 }
4447 return;
4448 }
4449
4450 void
4451 pdc202xx_setup_channel(chp)
4452 struct channel_softc *chp;
4453 {
4454 struct ata_drive_datas *drvp;
4455 int drive;
4456 pcireg_t mode, st;
4457 u_int32_t idedma_ctl, scr, atapi;
4458 struct pciide_channel *cp = (struct pciide_channel*)chp;
4459 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4460 int channel = chp->channel;
4461
4462 /* setup DMA if needed */
4463 pciide_channel_dma_setup(cp);
4464
4465 idedma_ctl = 0;
4466 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4467 sc->sc_wdcdev.sc_dev.dv_xname,
4468 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4469 DEBUG_PROBE);
4470
4471 /* Per channel settings */
4472 if (PDC_IS_262(sc)) {
4473 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4474 PDC262_U66);
4475 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4476 /* Trim UDMA mode */
4477 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4478 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4479 chp->ch_drive[0].UDMA_mode <= 2) ||
4480 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4481 chp->ch_drive[1].UDMA_mode <= 2)) {
4482 if (chp->ch_drive[0].UDMA_mode > 2)
4483 chp->ch_drive[0].UDMA_mode = 2;
4484 if (chp->ch_drive[1].UDMA_mode > 2)
4485 chp->ch_drive[1].UDMA_mode = 2;
4486 }
4487 /* Set U66 if needed */
4488 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4489 chp->ch_drive[0].UDMA_mode > 2) ||
4490 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4491 chp->ch_drive[1].UDMA_mode > 2))
4492 scr |= PDC262_U66_EN(channel);
4493 else
4494 scr &= ~PDC262_U66_EN(channel);
4495 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4496 PDC262_U66, scr);
4497 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4498 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4499 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4500 PDC262_ATAPI(channel))), DEBUG_PROBE);
4501 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4502 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4503 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4504 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4505 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4506 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4507 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4508 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4509 atapi = 0;
4510 else
4511 atapi = PDC262_ATAPI_UDMA;
4512 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4513 PDC262_ATAPI(channel), atapi);
4514 }
4515 }
4516 for (drive = 0; drive < 2; drive++) {
4517 drvp = &chp->ch_drive[drive];
4518 /* If no drive, skip */
4519 if ((drvp->drive_flags & DRIVE) == 0)
4520 continue;
4521 mode = 0;
4522 if (drvp->drive_flags & DRIVE_UDMA) {
4523 /* use Ultra/DMA */
4524 drvp->drive_flags &= ~DRIVE_DMA;
4525 mode = PDC2xx_TIM_SET_MB(mode,
4526 pdc2xx_udma_mb[drvp->UDMA_mode]);
4527 mode = PDC2xx_TIM_SET_MC(mode,
4528 pdc2xx_udma_mc[drvp->UDMA_mode]);
4529 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4530 } else if (drvp->drive_flags & DRIVE_DMA) {
4531 mode = PDC2xx_TIM_SET_MB(mode,
4532 pdc2xx_dma_mb[drvp->DMA_mode]);
4533 mode = PDC2xx_TIM_SET_MC(mode,
4534 pdc2xx_dma_mc[drvp->DMA_mode]);
4535 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4536 } else {
4537 mode = PDC2xx_TIM_SET_MB(mode,
4538 pdc2xx_dma_mb[0]);
4539 mode = PDC2xx_TIM_SET_MC(mode,
4540 pdc2xx_dma_mc[0]);
4541 }
4542 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4543 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4544 if (drvp->drive_flags & DRIVE_ATA)
4545 mode |= PDC2xx_TIM_PRE;
4546 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4547 if (drvp->PIO_mode >= 3) {
4548 mode |= PDC2xx_TIM_IORDY;
4549 if (drive == 0)
4550 mode |= PDC2xx_TIM_IORDYp;
4551 }
4552 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4553 "timings 0x%x\n",
4554 sc->sc_wdcdev.sc_dev.dv_xname,
4555 chp->channel, drive, mode), DEBUG_PROBE);
4556 pci_conf_write(sc->sc_pc, sc->sc_tag,
4557 PDC2xx_TIM(chp->channel, drive), mode);
4558 }
4559 if (idedma_ctl != 0) {
4560 /* Add software bits in status register */
4561 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4562 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4563 idedma_ctl);
4564 }
4565 pciide_print_modes(cp);
4566 }
4567
4568 void
4569 pdc20268_setup_channel(chp)
4570 struct channel_softc *chp;
4571 {
4572 struct ata_drive_datas *drvp;
4573 int drive;
4574 u_int32_t idedma_ctl;
4575 struct pciide_channel *cp = (struct pciide_channel*)chp;
4576 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4577 int u100;
4578
4579 /* setup DMA if needed */
4580 pciide_channel_dma_setup(cp);
4581
4582 idedma_ctl = 0;
4583
4584 /* I don't know what this is for, FreeBSD does it ... */
4585 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4586 IDEDMA_CMD + 0x1, 0x0b);
4587
4588 /*
4589 * I don't know what this is for; FreeBSD checks this ... this is not
4590 * cable type detect.
4591 */
4592 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4593 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
4594
4595 for (drive = 0; drive < 2; drive++) {
4596 drvp = &chp->ch_drive[drive];
4597 /* If no drive, skip */
4598 if ((drvp->drive_flags & DRIVE) == 0)
4599 continue;
4600 if (drvp->drive_flags & DRIVE_UDMA) {
4601 /* use Ultra/DMA */
4602 drvp->drive_flags &= ~DRIVE_DMA;
4603 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4604 if (drvp->UDMA_mode > 2 && u100 == 0)
4605 drvp->UDMA_mode = 2;
4606 } else if (drvp->drive_flags & DRIVE_DMA) {
4607 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4608 }
4609 }
4610 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4611 if (idedma_ctl != 0) {
4612 /* Add software bits in status register */
4613 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4614 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4615 idedma_ctl);
4616 }
4617 pciide_print_modes(cp);
4618 }
4619
4620 int
4621 pdc202xx_pci_intr(arg)
4622 void *arg;
4623 {
4624 struct pciide_softc *sc = arg;
4625 struct pciide_channel *cp;
4626 struct channel_softc *wdc_cp;
4627 int i, rv, crv;
4628 u_int32_t scr;
4629
4630 rv = 0;
4631 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4632 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4633 cp = &sc->pciide_channels[i];
4634 wdc_cp = &cp->wdc_channel;
4635 /* If a compat channel skip. */
4636 if (cp->compat)
4637 continue;
4638 if (scr & PDC2xx_SCR_INT(i)) {
4639 crv = wdcintr(wdc_cp);
4640 if (crv == 0)
4641 printf("%s:%d: bogus intr (reg 0x%x)\n",
4642 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4643 else
4644 rv = 1;
4645 }
4646 }
4647 return rv;
4648 }
4649
4650 int
4651 pdc20265_pci_intr(arg)
4652 void *arg;
4653 {
4654 struct pciide_softc *sc = arg;
4655 struct pciide_channel *cp;
4656 struct channel_softc *wdc_cp;
4657 int i, rv, crv;
4658 u_int32_t dmastat;
4659
4660 rv = 0;
4661 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4662 cp = &sc->pciide_channels[i];
4663 wdc_cp = &cp->wdc_channel;
4664 /* If a compat channel skip. */
4665 if (cp->compat)
4666 continue;
4667 /*
4668 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4669 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4670 * So use it instead (requires 2 reg reads instead of 1,
4671 * but we can't do it another way).
4672 */
4673 dmastat = bus_space_read_1(sc->sc_dma_iot,
4674 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4675 if((dmastat & IDEDMA_CTL_INTR) == 0)
4676 continue;
4677 crv = wdcintr(wdc_cp);
4678 if (crv == 0)
4679 printf("%s:%d: bogus intr\n",
4680 sc->sc_wdcdev.sc_dev.dv_xname, i);
4681 else
4682 rv = 1;
4683 }
4684 return rv;
4685 }
4686
4687 static void
4688 pdc20262_dma_start(v, channel, drive)
4689 void *v;
4690 int channel, drive;
4691 {
4692 struct pciide_softc *sc = v;
4693 struct pciide_dma_maps *dma_maps =
4694 &sc->pciide_channels[channel].dma_maps[drive];
4695 int atapi;
4696
4697 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4698 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4699 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4700 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4701 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4702 PDC262_ATAPI(channel), atapi);
4703 }
4704
4705 pciide_dma_start(v, channel, drive);
4706 }
4707
4708 int
4709 pdc20262_dma_finish(v, channel, drive, force)
4710 void *v;
4711 int channel, drive;
4712 int force;
4713 {
4714 struct pciide_softc *sc = v;
4715 struct pciide_dma_maps *dma_maps =
4716 &sc->pciide_channels[channel].dma_maps[drive];
4717 struct channel_softc *chp;
4718 int atapi, error;
4719
4720 error = pciide_dma_finish(v, channel, drive, force);
4721
4722 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4723 chp = sc->wdc_chanarray[channel];
4724 atapi = 0;
4725 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4726 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4727 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4728 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4729 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4730 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4731 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4732 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4733 atapi = PDC262_ATAPI_UDMA;
4734 }
4735 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4736 PDC262_ATAPI(channel), atapi);
4737 }
4738
4739 return error;
4740 }
4741
4742 void
4743 opti_chip_map(sc, pa)
4744 struct pciide_softc *sc;
4745 struct pci_attach_args *pa;
4746 {
4747 struct pciide_channel *cp;
4748 bus_size_t cmdsize, ctlsize;
4749 pcireg_t interface;
4750 u_int8_t init_ctrl;
4751 int channel;
4752
4753 if (pciide_chipen(sc, pa) == 0)
4754 return;
4755 aprint_normal("%s: bus-master DMA support present",
4756 sc->sc_wdcdev.sc_dev.dv_xname);
4757
4758 /*
4759 * XXXSCW:
4760 * There seem to be a couple of buggy revisions/implementations
4761 * of the OPTi pciide chipset. This kludge seems to fix one of
4762 * the reported problems (PR/11644) but still fails for the
4763 * other (PR/13151), although the latter may be due to other
4764 * issues too...
4765 */
4766 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4767 aprint_normal(" but disabled due to chip rev. <= 0x12");
4768 sc->sc_dma_ok = 0;
4769 } else
4770 pciide_mapreg_dma(sc, pa);
4771
4772 aprint_normal("\n");
4773
4774 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4775 WDC_CAPABILITY_MODE;
4776 sc->sc_wdcdev.PIO_cap = 4;
4777 if (sc->sc_dma_ok) {
4778 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4779 sc->sc_wdcdev.irqack = pciide_irqack;
4780 sc->sc_wdcdev.DMA_cap = 2;
4781 }
4782 sc->sc_wdcdev.set_modes = opti_setup_channel;
4783
4784 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4785 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4786
4787 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4788 OPTI_REG_INIT_CONTROL);
4789
4790 interface = PCI_INTERFACE(pa->pa_class);
4791
4792 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4793 cp = &sc->pciide_channels[channel];
4794 if (pciide_chansetup(sc, channel, interface) == 0)
4795 continue;
4796 if (channel == 1 &&
4797 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4798 aprint_normal("%s: %s channel ignored (disabled)\n",
4799 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4800 continue;
4801 }
4802 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4803 pciide_pci_intr);
4804 if (cp->hw_ok == 0)
4805 continue;
4806 pciide_map_compat_intr(pa, cp, channel, interface);
4807 if (cp->hw_ok == 0)
4808 continue;
4809 opti_setup_channel(&cp->wdc_channel);
4810 }
4811 }
4812
4813 void
4814 opti_setup_channel(chp)
4815 struct channel_softc *chp;
4816 {
4817 struct ata_drive_datas *drvp;
4818 struct pciide_channel *cp = (struct pciide_channel*)chp;
4819 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4820 int drive, spd;
4821 int mode[2];
4822 u_int8_t rv, mr;
4823
4824 /*
4825 * The `Delay' and `Address Setup Time' fields of the
4826 * Miscellaneous Register are always zero initially.
4827 */
4828 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4829 mr &= ~(OPTI_MISC_DELAY_MASK |
4830 OPTI_MISC_ADDR_SETUP_MASK |
4831 OPTI_MISC_INDEX_MASK);
4832
4833 /* Prime the control register before setting timing values */
4834 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4835
4836 /* Determine the clockrate of the PCIbus the chip is attached to */
4837 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4838 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4839
4840 /* setup DMA if needed */
4841 pciide_channel_dma_setup(cp);
4842
4843 for (drive = 0; drive < 2; drive++) {
4844 drvp = &chp->ch_drive[drive];
4845 /* If no drive, skip */
4846 if ((drvp->drive_flags & DRIVE) == 0) {
4847 mode[drive] = -1;
4848 continue;
4849 }
4850
4851 if ((drvp->drive_flags & DRIVE_DMA)) {
4852 /*
4853 * Timings will be used for both PIO and DMA,
4854 * so adjust DMA mode if needed
4855 */
4856 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4857 drvp->PIO_mode = drvp->DMA_mode + 2;
4858 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4859 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4860 drvp->PIO_mode - 2 : 0;
4861 if (drvp->DMA_mode == 0)
4862 drvp->PIO_mode = 0;
4863
4864 mode[drive] = drvp->DMA_mode + 5;
4865 } else
4866 mode[drive] = drvp->PIO_mode;
4867
4868 if (drive && mode[0] >= 0 &&
4869 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4870 /*
4871 * Can't have two drives using different values
4872 * for `Address Setup Time'.
4873 * Slow down the faster drive to compensate.
4874 */
4875 int d = (opti_tim_as[spd][mode[0]] >
4876 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4877
4878 mode[d] = mode[1-d];
4879 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4880 chp->ch_drive[d].DMA_mode = 0;
4881 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4882 }
4883 }
4884
4885 for (drive = 0; drive < 2; drive++) {
4886 int m;
4887 if ((m = mode[drive]) < 0)
4888 continue;
4889
4890 /* Set the Address Setup Time and select appropriate index */
4891 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4892 rv |= OPTI_MISC_INDEX(drive);
4893 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4894
4895 /* Set the pulse width and recovery timing parameters */
4896 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4897 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4898 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4899 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4900
4901 /* Set the Enhanced Mode register appropriately */
4902 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4903 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4904 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4905 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4906 }
4907
4908 /* Finally, enable the timings */
4909 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4910
4911 pciide_print_modes(cp);
4912 }
4913
4914 #define ACARD_IS_850(sc) \
4915 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4916
4917 void
4918 acard_chip_map(sc, pa)
4919 struct pciide_softc *sc;
4920 struct pci_attach_args *pa;
4921 {
4922 struct pciide_channel *cp;
4923 int i;
4924 pcireg_t interface;
4925 bus_size_t cmdsize, ctlsize;
4926
4927 if (pciide_chipen(sc, pa) == 0)
4928 return;
4929
4930 /*
4931 * when the chip is in native mode it identifies itself as a
4932 * 'misc mass storage'. Fake interface in this case.
4933 */
4934 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4935 interface = PCI_INTERFACE(pa->pa_class);
4936 } else {
4937 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4938 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4939 }
4940
4941 aprint_normal("%s: bus-master DMA support present",
4942 sc->sc_wdcdev.sc_dev.dv_xname);
4943 pciide_mapreg_dma(sc, pa);
4944 aprint_normal("\n");
4945 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4946 WDC_CAPABILITY_MODE;
4947
4948 if (sc->sc_dma_ok) {
4949 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4950 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4951 sc->sc_wdcdev.irqack = pciide_irqack;
4952 }
4953 sc->sc_wdcdev.PIO_cap = 4;
4954 sc->sc_wdcdev.DMA_cap = 2;
4955 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4956
4957 sc->sc_wdcdev.set_modes = acard_setup_channel;
4958 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4959 sc->sc_wdcdev.nchannels = 2;
4960
4961 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4962 cp = &sc->pciide_channels[i];
4963 if (pciide_chansetup(sc, i, interface) == 0)
4964 continue;
4965 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4966 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4967 &ctlsize, pciide_pci_intr);
4968 } else {
4969 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4970 &cmdsize, &ctlsize);
4971 }
4972 if (cp->hw_ok == 0)
4973 return;
4974 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4975 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4976 wdcattach(&cp->wdc_channel);
4977 acard_setup_channel(&cp->wdc_channel);
4978 }
4979 if (!ACARD_IS_850(sc)) {
4980 u_int32_t reg;
4981 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4982 reg &= ~ATP860_CTRL_INT;
4983 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4984 }
4985 }
4986
4987 void
4988 acard_setup_channel(chp)
4989 struct channel_softc *chp;
4990 {
4991 struct ata_drive_datas *drvp;
4992 struct pciide_channel *cp = (struct pciide_channel*)chp;
4993 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4994 int channel = chp->channel;
4995 int drive;
4996 u_int32_t idetime, udma_mode;
4997 u_int32_t idedma_ctl;
4998
4999 /* setup DMA if needed */
5000 pciide_channel_dma_setup(cp);
5001
5002 if (ACARD_IS_850(sc)) {
5003 idetime = 0;
5004 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
5005 udma_mode &= ~ATP850_UDMA_MASK(channel);
5006 } else {
5007 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
5008 idetime &= ~ATP860_SETTIME_MASK(channel);
5009 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
5010 udma_mode &= ~ATP860_UDMA_MASK(channel);
5011
5012 /* check 80 pins cable */
5013 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
5014 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
5015 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5016 & ATP860_CTRL_80P(chp->channel)) {
5017 if (chp->ch_drive[0].UDMA_mode > 2)
5018 chp->ch_drive[0].UDMA_mode = 2;
5019 if (chp->ch_drive[1].UDMA_mode > 2)
5020 chp->ch_drive[1].UDMA_mode = 2;
5021 }
5022 }
5023 }
5024
5025 idedma_ctl = 0;
5026
5027 /* Per drive settings */
5028 for (drive = 0; drive < 2; drive++) {
5029 drvp = &chp->ch_drive[drive];
5030 /* If no drive, skip */
5031 if ((drvp->drive_flags & DRIVE) == 0)
5032 continue;
5033 /* add timing values, setup DMA if needed */
5034 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5035 (drvp->drive_flags & DRIVE_UDMA)) {
5036 /* use Ultra/DMA */
5037 if (ACARD_IS_850(sc)) {
5038 idetime |= ATP850_SETTIME(drive,
5039 acard_act_udma[drvp->UDMA_mode],
5040 acard_rec_udma[drvp->UDMA_mode]);
5041 udma_mode |= ATP850_UDMA_MODE(channel, drive,
5042 acard_udma_conf[drvp->UDMA_mode]);
5043 } else {
5044 idetime |= ATP860_SETTIME(channel, drive,
5045 acard_act_udma[drvp->UDMA_mode],
5046 acard_rec_udma[drvp->UDMA_mode]);
5047 udma_mode |= ATP860_UDMA_MODE(channel, drive,
5048 acard_udma_conf[drvp->UDMA_mode]);
5049 }
5050 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5051 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5052 (drvp->drive_flags & DRIVE_DMA)) {
5053 /* use Multiword DMA */
5054 drvp->drive_flags &= ~DRIVE_UDMA;
5055 if (ACARD_IS_850(sc)) {
5056 idetime |= ATP850_SETTIME(drive,
5057 acard_act_dma[drvp->DMA_mode],
5058 acard_rec_dma[drvp->DMA_mode]);
5059 } else {
5060 idetime |= ATP860_SETTIME(channel, drive,
5061 acard_act_dma[drvp->DMA_mode],
5062 acard_rec_dma[drvp->DMA_mode]);
5063 }
5064 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5065 } else {
5066 /* PIO only */
5067 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5068 if (ACARD_IS_850(sc)) {
5069 idetime |= ATP850_SETTIME(drive,
5070 acard_act_pio[drvp->PIO_mode],
5071 acard_rec_pio[drvp->PIO_mode]);
5072 } else {
5073 idetime |= ATP860_SETTIME(channel, drive,
5074 acard_act_pio[drvp->PIO_mode],
5075 acard_rec_pio[drvp->PIO_mode]);
5076 }
5077 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
5078 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5079 | ATP8x0_CTRL_EN(channel));
5080 }
5081 }
5082
5083 if (idedma_ctl != 0) {
5084 /* Add software bits in status register */
5085 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5086 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5087 }
5088 pciide_print_modes(cp);
5089
5090 if (ACARD_IS_850(sc)) {
5091 pci_conf_write(sc->sc_pc, sc->sc_tag,
5092 ATP850_IDETIME(channel), idetime);
5093 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
5094 } else {
5095 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
5096 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
5097 }
5098 }
5099
5100 int
5101 acard_pci_intr(arg)
5102 void *arg;
5103 {
5104 struct pciide_softc *sc = arg;
5105 struct pciide_channel *cp;
5106 struct channel_softc *wdc_cp;
5107 int rv = 0;
5108 int dmastat, i, crv;
5109
5110 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5111 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5112 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5113 if ((dmastat & IDEDMA_CTL_INTR) == 0)
5114 continue;
5115 cp = &sc->pciide_channels[i];
5116 wdc_cp = &cp->wdc_channel;
5117 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
5118 (void)wdcintr(wdc_cp);
5119 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5120 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5121 continue;
5122 }
5123 crv = wdcintr(wdc_cp);
5124 if (crv == 0)
5125 printf("%s:%d: bogus intr\n",
5126 sc->sc_wdcdev.sc_dev.dv_xname, i);
5127 else if (crv == 1)
5128 rv = 1;
5129 else if (rv == 0)
5130 rv = crv;
5131 }
5132 return rv;
5133 }
5134
5135 static int
5136 sl82c105_bugchk(struct pci_attach_args *pa)
5137 {
5138
5139 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
5140 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
5141 return (0);
5142
5143 if (PCI_REVISION(pa->pa_class) <= 0x05)
5144 return (1);
5145
5146 return (0);
5147 }
5148
5149 void
5150 sl82c105_chip_map(sc, pa)
5151 struct pciide_softc *sc;
5152 struct pci_attach_args *pa;
5153 {
5154 struct pciide_channel *cp;
5155 bus_size_t cmdsize, ctlsize;
5156 pcireg_t interface, idecr;
5157 int channel;
5158
5159 if (pciide_chipen(sc, pa) == 0)
5160 return;
5161
5162 aprint_normal("%s: bus-master DMA support present",
5163 sc->sc_wdcdev.sc_dev.dv_xname);
5164
5165 /*
5166 * Check to see if we're part of the Winbond 83c553 Southbridge.
5167 * If so, we need to disable DMA on rev. <= 5 of that chip.
5168 */
5169 if (pci_find_device(pa, sl82c105_bugchk)) {
5170 aprint_normal(" but disabled due to 83c553 rev. <= 0x05");
5171 sc->sc_dma_ok = 0;
5172 } else
5173 pciide_mapreg_dma(sc, pa);
5174 aprint_normal("\n");
5175
5176 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
5177 WDC_CAPABILITY_MODE;
5178 sc->sc_wdcdev.PIO_cap = 4;
5179 if (sc->sc_dma_ok) {
5180 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
5181 sc->sc_wdcdev.irqack = pciide_irqack;
5182 sc->sc_wdcdev.DMA_cap = 2;
5183 }
5184 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
5185
5186 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5187 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5188
5189 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
5190
5191 interface = PCI_INTERFACE(pa->pa_class);
5192
5193 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5194 cp = &sc->pciide_channels[channel];
5195 if (pciide_chansetup(sc, channel, interface) == 0)
5196 continue;
5197 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
5198 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
5199 aprint_normal("%s: %s channel ignored (disabled)\n",
5200 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
5201 continue;
5202 }
5203 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5204 pciide_pci_intr);
5205 if (cp->hw_ok == 0)
5206 continue;
5207 pciide_map_compat_intr(pa, cp, channel, interface);
5208 if (cp->hw_ok == 0)
5209 continue;
5210 sl82c105_setup_channel(&cp->wdc_channel);
5211 }
5212 }
5213
5214 void
5215 sl82c105_setup_channel(chp)
5216 struct channel_softc *chp;
5217 {
5218 struct ata_drive_datas *drvp;
5219 struct pciide_channel *cp = (struct pciide_channel*)chp;
5220 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5221 int pxdx_reg, drive;
5222 pcireg_t pxdx;
5223
5224 /* Set up DMA if needed. */
5225 pciide_channel_dma_setup(cp);
5226
5227 for (drive = 0; drive < 2; drive++) {
5228 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
5229 : SYMPH_P1D0CR) + (drive * 4);
5230
5231 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
5232
5233 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
5234 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
5235
5236 drvp = &chp->ch_drive[drive];
5237 /* If no drive, skip. */
5238 if ((drvp->drive_flags & DRIVE) == 0) {
5239 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5240 continue;
5241 }
5242
5243 if (drvp->drive_flags & DRIVE_DMA) {
5244 /*
5245 * Timings will be used for both PIO and DMA,
5246 * so adjust DMA mode if needed.
5247 */
5248 if (drvp->PIO_mode >= 3) {
5249 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
5250 drvp->DMA_mode = drvp->PIO_mode - 2;
5251 if (drvp->DMA_mode < 1) {
5252 /*
5253 * Can't mix both PIO and DMA.
5254 * Disable DMA.
5255 */
5256 drvp->drive_flags &= ~DRIVE_DMA;
5257 }
5258 } else {
5259 /*
5260 * Can't mix both PIO and DMA. Disable
5261 * DMA.
5262 */
5263 drvp->drive_flags &= ~DRIVE_DMA;
5264 }
5265 }
5266
5267 if (drvp->drive_flags & DRIVE_DMA) {
5268 /* Use multi-word DMA. */
5269 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
5270 PxDx_CMD_ON_SHIFT;
5271 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
5272 } else {
5273 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
5274 PxDx_CMD_ON_SHIFT;
5275 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
5276 }
5277
5278 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
5279
5280 /* ...and set the mode for this drive. */
5281 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5282 }
5283
5284 pciide_print_modes(cp);
5285 }
5286
5287 void
5288 serverworks_chip_map(sc, pa)
5289 struct pciide_softc *sc;
5290 struct pci_attach_args *pa;
5291 {
5292 struct pciide_channel *cp;
5293 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
5294 pcitag_t pcib_tag;
5295 int channel;
5296 bus_size_t cmdsize, ctlsize;
5297
5298 if (pciide_chipen(sc, pa) == 0)
5299 return;
5300
5301 aprint_normal("%s: bus-master DMA support present",
5302 sc->sc_wdcdev.sc_dev.dv_xname);
5303 pciide_mapreg_dma(sc, pa);
5304 aprint_normal("\n");
5305 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5306 WDC_CAPABILITY_MODE;
5307
5308 if (sc->sc_dma_ok) {
5309 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5310 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5311 sc->sc_wdcdev.irqack = pciide_irqack;
5312 }
5313 sc->sc_wdcdev.PIO_cap = 4;
5314 sc->sc_wdcdev.DMA_cap = 2;
5315 switch (sc->sc_pp->ide_product) {
5316 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
5317 sc->sc_wdcdev.UDMA_cap = 2;
5318 break;
5319 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
5320 if (PCI_REVISION(pa->pa_class) < 0x92)
5321 sc->sc_wdcdev.UDMA_cap = 4;
5322 else
5323 sc->sc_wdcdev.UDMA_cap = 5;
5324 break;
5325 case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
5326 sc->sc_wdcdev.UDMA_cap = 5;
5327 break;
5328 }
5329
5330 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
5331 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5332 sc->sc_wdcdev.nchannels = 2;
5333
5334 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5335 cp = &sc->pciide_channels[channel];
5336 if (pciide_chansetup(sc, channel, interface) == 0)
5337 continue;
5338 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5339 serverworks_pci_intr);
5340 if (cp->hw_ok == 0)
5341 return;
5342 pciide_map_compat_intr(pa, cp, channel, interface);
5343 if (cp->hw_ok == 0)
5344 return;
5345 serverworks_setup_channel(&cp->wdc_channel);
5346 }
5347
5348 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
5349 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
5350 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
5351 }
5352
5353 void
5354 serverworks_setup_channel(chp)
5355 struct channel_softc *chp;
5356 {
5357 struct ata_drive_datas *drvp;
5358 struct pciide_channel *cp = (struct pciide_channel*)chp;
5359 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5360 int channel = chp->channel;
5361 int drive, unit;
5362 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
5363 u_int32_t idedma_ctl;
5364 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
5365 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
5366
5367 /* setup DMA if needed */
5368 pciide_channel_dma_setup(cp);
5369
5370 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
5371 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
5372 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
5373 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
5374
5375 pio_time &= ~(0xffff << (16 * channel));
5376 dma_time &= ~(0xffff << (16 * channel));
5377 pio_mode &= ~(0xff << (8 * channel + 16));
5378 udma_mode &= ~(0xff << (8 * channel + 16));
5379 udma_mode &= ~(3 << (2 * channel));
5380
5381 idedma_ctl = 0;
5382
5383 /* Per drive settings */
5384 for (drive = 0; drive < 2; drive++) {
5385 drvp = &chp->ch_drive[drive];
5386 /* If no drive, skip */
5387 if ((drvp->drive_flags & DRIVE) == 0)
5388 continue;
5389 unit = drive + 2 * channel;
5390 /* add timing values, setup DMA if needed */
5391 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
5392 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
5393 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5394 (drvp->drive_flags & DRIVE_UDMA)) {
5395 /* use Ultra/DMA, check for 80-pin cable */
5396 if (drvp->UDMA_mode > 2 &&
5397 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
5398 drvp->UDMA_mode = 2;
5399 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5400 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
5401 udma_mode |= 1 << unit;
5402 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5403 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5404 (drvp->drive_flags & DRIVE_DMA)) {
5405 /* use Multiword DMA */
5406 drvp->drive_flags &= ~DRIVE_UDMA;
5407 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5408 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5409 } else {
5410 /* PIO only */
5411 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5412 }
5413 }
5414
5415 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
5416 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
5417 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
5418 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
5419 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
5420
5421 if (idedma_ctl != 0) {
5422 /* Add software bits in status register */
5423 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5424 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5425 }
5426 pciide_print_modes(cp);
5427 }
5428
5429 int
5430 serverworks_pci_intr(arg)
5431 void *arg;
5432 {
5433 struct pciide_softc *sc = arg;
5434 struct pciide_channel *cp;
5435 struct channel_softc *wdc_cp;
5436 int rv = 0;
5437 int dmastat, i, crv;
5438
5439 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5440 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5441 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5442 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
5443 IDEDMA_CTL_INTR)
5444 continue;
5445 cp = &sc->pciide_channels[i];
5446 wdc_cp = &cp->wdc_channel;
5447 crv = wdcintr(wdc_cp);
5448 if (crv == 0) {
5449 printf("%s:%d: bogus intr\n",
5450 sc->sc_wdcdev.sc_dev.dv_xname, i);
5451 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5452 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5453 } else
5454 rv = 1;
5455 }
5456 return rv;
5457 }
5458
5459 void
5460 artisea_chip_map(sc, pa)
5461 struct pciide_softc *sc;
5462 struct pci_attach_args *pa;
5463 {
5464 struct pciide_channel *cp;
5465 bus_size_t cmdsize, ctlsize;
5466 pcireg_t interface;
5467 int channel;
5468
5469 if (pciide_chipen(sc, pa) == 0)
5470 return;
5471
5472 aprint_normal("%s: bus-master DMA support resent",
5473 sc->sc_wdcdev.sc_dev.dv_xname);
5474 #ifndef PCIIDE_I31244_ENABLEDMA
5475 if (PCI_REVISION(pa->pa_class) == 0) {
5476 aprint_normal(" but disabled due to rev. 0");
5477 sc->sc_dma_ok = 0;
5478 } else
5479 #endif
5480 pciide_mapreg_dma(sc, pa);
5481 aprint_normal("\n");
5482
5483 /*
5484 * XXX Configure LEDs to show activity.
5485 */
5486
5487 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5488 WDC_CAPABILITY_MODE;
5489 sc->sc_wdcdev.PIO_cap = 4;
5490 if (sc->sc_dma_ok) {
5491 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5492 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5493 sc->sc_wdcdev.irqack = pciide_irqack;
5494 sc->sc_wdcdev.DMA_cap = 2;
5495 sc->sc_wdcdev.UDMA_cap = 6;
5496 }
5497 sc->sc_wdcdev.set_modes = sata_setup_channel;
5498
5499 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5500 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5501
5502 interface = PCI_INTERFACE(pa->pa_class);
5503
5504 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5505 cp = &sc->pciide_channels[channel];
5506 if (pciide_chansetup(sc, channel, interface) == 0)
5507 continue;
5508 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5509 pciide_pci_intr);
5510 if (cp->hw_ok == 0)
5511 continue;
5512 pciide_map_compat_intr(pa, cp, channel, interface);
5513 sata_setup_channel(&cp->wdc_channel);
5514 }
5515 }
5516