pciide.c revision 1.191 1 /* $NetBSD: pciide.c,v 1.191 2003/04/28 05:20:31 nakayama Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.191 2003/04/28 05:20:31 nakayama Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/pciide_i31244_reg.h>
123 #include <dev/pci/pciide_sii3112_reg.h>
124 #include <dev/pci/cy82c693var.h>
125
126 #include "opt_pciide.h"
127
128 static const char dmaerrfmt[] =
129 "%s:%d: unable to %s table DMA map for drive %d, error=%d\n";
130
131 /* inlines for reading/writing 8-bit PCI registers */
132 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
133 int));
134 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
135 int, u_int8_t));
136
137 static __inline u_int8_t
138 pciide_pci_read(pc, pa, reg)
139 pci_chipset_tag_t pc;
140 pcitag_t pa;
141 int reg;
142 {
143
144 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
145 ((reg & 0x03) * 8) & 0xff);
146 }
147
148 static __inline void
149 pciide_pci_write(pc, pa, reg, val)
150 pci_chipset_tag_t pc;
151 pcitag_t pa;
152 int reg;
153 u_int8_t val;
154 {
155 pcireg_t pcival;
156
157 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
158 pcival &= ~(0xff << ((reg & 0x03) * 8));
159 pcival |= (val << ((reg & 0x03) * 8));
160 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
161 }
162
163 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164
165 void sata_setup_channel __P((struct channel_softc*));
166
167 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void piix_setup_channel __P((struct channel_softc*));
169 void piix3_4_setup_channel __P((struct channel_softc*));
170 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
171 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
172 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
173
174 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void amd7x6_setup_channel __P((struct channel_softc*));
176
177 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void apollo_setup_channel __P((struct channel_softc*));
179
180 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cmd0643_9_setup_channel __P((struct channel_softc*));
183 void cmd_channel_map __P((struct pci_attach_args *,
184 struct pciide_softc *, int));
185 int cmd_pci_intr __P((void *));
186 void cmd646_9_irqack __P((struct channel_softc *));
187 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
188 void cmd680_setup_channel __P((struct channel_softc*));
189 void cmd680_channel_map __P((struct pci_attach_args *,
190 struct pciide_softc *, int));
191
192 void cmd3112_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void cmd3112_setup_channel __P((struct channel_softc*));
194
195 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
196 void cy693_setup_channel __P((struct channel_softc*));
197
198 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void sis_setup_channel __P((struct channel_softc*));
200 void sis96x_setup_channel __P((struct channel_softc*));
201 static int sis_hostbr_match __P(( struct pci_attach_args *));
202 static int sis_south_match __P(( struct pci_attach_args *));
203
204 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
205 void acer_setup_channel __P((struct channel_softc*));
206 int acer_pci_intr __P((void *));
207
208 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
209 void pdc202xx_setup_channel __P((struct channel_softc*));
210 void pdc20268_setup_channel __P((struct channel_softc*));
211 int pdc202xx_pci_intr __P((void *));
212 int pdc20265_pci_intr __P((void *));
213 static void pdc20262_dma_start __P((void*, int, int));
214 static int pdc20262_dma_finish __P((void*, int, int, int));
215
216 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
217 void opti_setup_channel __P((struct channel_softc*));
218
219 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
220 void hpt_setup_channel __P((struct channel_softc*));
221 int hpt_pci_intr __P((void *));
222
223 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
224 void acard_setup_channel __P((struct channel_softc*));
225 int acard_pci_intr __P((void *));
226
227 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
228 void serverworks_setup_channel __P((struct channel_softc*));
229 int serverworks_pci_intr __P((void *));
230
231 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
232 void sl82c105_setup_channel __P((struct channel_softc*));
233
234 void pciide_channel_dma_setup __P((struct pciide_channel *));
235 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
236 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
237 void pciide_dma_start __P((void*, int, int));
238 int pciide_dma_finish __P((void*, int, int, int));
239 void pciide_irqack __P((struct channel_softc *));
240 void pciide_print_modes __P((struct pciide_channel *));
241
242 void artisea_chip_map __P((struct pciide_softc*, struct pci_attach_args *));
243
244 struct pciide_product_desc {
245 u_int32_t ide_product;
246 int ide_flags;
247 const char *ide_name;
248 /* map and setup chip, probe drives */
249 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
250 };
251
252 /* Flags for ide_flags */
253 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
254 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
255
256 /* Default product description for devices not known from this controller */
257 const struct pciide_product_desc default_product_desc = {
258 0,
259 0,
260 "Generic PCI IDE controller",
261 default_chip_map,
262 };
263
264 const struct pciide_product_desc pciide_intel_products[] = {
265 { PCI_PRODUCT_INTEL_82092AA,
266 0,
267 "Intel 82092AA IDE controller",
268 default_chip_map,
269 },
270 { PCI_PRODUCT_INTEL_82371FB_IDE,
271 0,
272 "Intel 82371FB IDE controller (PIIX)",
273 piix_chip_map,
274 },
275 { PCI_PRODUCT_INTEL_82371SB_IDE,
276 0,
277 "Intel 82371SB IDE Interface (PIIX3)",
278 piix_chip_map,
279 },
280 { PCI_PRODUCT_INTEL_82371AB_IDE,
281 0,
282 "Intel 82371AB IDE controller (PIIX4)",
283 piix_chip_map,
284 },
285 { PCI_PRODUCT_INTEL_82440MX_IDE,
286 0,
287 "Intel 82440MX IDE controller",
288 piix_chip_map
289 },
290 { PCI_PRODUCT_INTEL_82801AA_IDE,
291 0,
292 "Intel 82801AA IDE Controller (ICH)",
293 piix_chip_map,
294 },
295 { PCI_PRODUCT_INTEL_82801AB_IDE,
296 0,
297 "Intel 82801AB IDE Controller (ICH0)",
298 piix_chip_map,
299 },
300 { PCI_PRODUCT_INTEL_82801BA_IDE,
301 0,
302 "Intel 82801BA IDE Controller (ICH2)",
303 piix_chip_map,
304 },
305 { PCI_PRODUCT_INTEL_82801BAM_IDE,
306 0,
307 "Intel 82801BAM IDE Controller (ICH2-M)",
308 piix_chip_map,
309 },
310 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
311 0,
312 "Intel 82801CA IDE Controller (ICH3)",
313 piix_chip_map,
314 },
315 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
316 0,
317 "Intel 82801CA IDE Controller (ICH3)",
318 piix_chip_map,
319 },
320 { PCI_PRODUCT_INTEL_82801DB_IDE,
321 0,
322 "Intel 82801DB IDE Controller (ICH4)",
323 piix_chip_map,
324 },
325 { PCI_PRODUCT_INTEL_82801DBM_IDE,
326 0,
327 "Intel 82801DBM IDE Controller (ICH4-M)",
328 piix_chip_map,
329 },
330 { PCI_PRODUCT_INTEL_31244,
331 0,
332 "Intel 31244 Serial ATA Controller",
333 artisea_chip_map,
334 },
335 { 0,
336 0,
337 NULL,
338 NULL
339 }
340 };
341
342 const struct pciide_product_desc pciide_amd_products[] = {
343 { PCI_PRODUCT_AMD_PBC756_IDE,
344 0,
345 "Advanced Micro Devices AMD756 IDE Controller",
346 amd7x6_chip_map
347 },
348 { PCI_PRODUCT_AMD_PBC766_IDE,
349 0,
350 "Advanced Micro Devices AMD766 IDE Controller",
351 amd7x6_chip_map
352 },
353 { PCI_PRODUCT_AMD_PBC768_IDE,
354 0,
355 "Advanced Micro Devices AMD768 IDE Controller",
356 amd7x6_chip_map
357 },
358 { PCI_PRODUCT_AMD_PBC8111_IDE,
359 0,
360 "Advanced Micro Devices AMD8111 IDE Controller",
361 amd7x6_chip_map
362 },
363 { 0,
364 0,
365 NULL,
366 NULL
367 }
368 };
369
370 const struct pciide_product_desc pciide_nvidia_products[] = {
371 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
372 0,
373 "NVIDIA nForce IDE Controller",
374 amd7x6_chip_map
375 },
376 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
377 0,
378 "NVIDIA nForce2 IDE Controller",
379 amd7x6_chip_map
380 },
381 { 0,
382 0,
383 NULL,
384 NULL
385 }
386 };
387
388 const struct pciide_product_desc pciide_cmd_products[] = {
389 { PCI_PRODUCT_CMDTECH_640,
390 0,
391 "CMD Technology PCI0640",
392 cmd_chip_map
393 },
394 { PCI_PRODUCT_CMDTECH_643,
395 0,
396 "CMD Technology PCI0643",
397 cmd0643_9_chip_map,
398 },
399 { PCI_PRODUCT_CMDTECH_646,
400 0,
401 "CMD Technology PCI0646",
402 cmd0643_9_chip_map,
403 },
404 { PCI_PRODUCT_CMDTECH_648,
405 IDE_PCI_CLASS_OVERRIDE,
406 "CMD Technology PCI0648",
407 cmd0643_9_chip_map,
408 },
409 { PCI_PRODUCT_CMDTECH_649,
410 IDE_PCI_CLASS_OVERRIDE,
411 "CMD Technology PCI0649",
412 cmd0643_9_chip_map,
413 },
414 { PCI_PRODUCT_CMDTECH_680,
415 IDE_PCI_CLASS_OVERRIDE,
416 "Silicon Image 0680",
417 cmd680_chip_map,
418 },
419 { PCI_PRODUCT_CMDTECH_3112,
420 IDE_PCI_CLASS_OVERRIDE,
421 "Silicon Image SATALink 3112",
422 cmd3112_chip_map,
423 },
424 { 0,
425 0,
426 NULL,
427 NULL
428 }
429 };
430
431 const struct pciide_product_desc pciide_via_products[] = {
432 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
433 0,
434 NULL,
435 apollo_chip_map,
436 },
437 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
438 0,
439 NULL,
440 apollo_chip_map,
441 },
442 { 0,
443 0,
444 NULL,
445 NULL
446 }
447 };
448
449 const struct pciide_product_desc pciide_cypress_products[] = {
450 { PCI_PRODUCT_CONTAQ_82C693,
451 IDE_16BIT_IOSPACE,
452 "Cypress 82C693 IDE Controller",
453 cy693_chip_map,
454 },
455 { 0,
456 0,
457 NULL,
458 NULL
459 }
460 };
461
462 const struct pciide_product_desc pciide_sis_products[] = {
463 { PCI_PRODUCT_SIS_5597_IDE,
464 0,
465 NULL,
466 sis_chip_map,
467 },
468 { 0,
469 0,
470 NULL,
471 NULL
472 }
473 };
474
475 const struct pciide_product_desc pciide_acer_products[] = {
476 { PCI_PRODUCT_ALI_M5229,
477 0,
478 "Acer Labs M5229 UDMA IDE Controller",
479 acer_chip_map,
480 },
481 { 0,
482 0,
483 NULL,
484 NULL
485 }
486 };
487
488 const struct pciide_product_desc pciide_promise_products[] = {
489 { PCI_PRODUCT_PROMISE_ULTRA33,
490 IDE_PCI_CLASS_OVERRIDE,
491 "Promise Ultra33/ATA Bus Master IDE Accelerator",
492 pdc202xx_chip_map,
493 },
494 { PCI_PRODUCT_PROMISE_ULTRA66,
495 IDE_PCI_CLASS_OVERRIDE,
496 "Promise Ultra66/ATA Bus Master IDE Accelerator",
497 pdc202xx_chip_map,
498 },
499 { PCI_PRODUCT_PROMISE_ULTRA100,
500 IDE_PCI_CLASS_OVERRIDE,
501 "Promise Ultra100/ATA Bus Master IDE Accelerator",
502 pdc202xx_chip_map,
503 },
504 { PCI_PRODUCT_PROMISE_ULTRA100X,
505 IDE_PCI_CLASS_OVERRIDE,
506 "Promise Ultra100/ATA Bus Master IDE Accelerator",
507 pdc202xx_chip_map,
508 },
509 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
510 IDE_PCI_CLASS_OVERRIDE,
511 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
512 pdc202xx_chip_map,
513 },
514 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
515 IDE_PCI_CLASS_OVERRIDE,
516 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
517 pdc202xx_chip_map,
518 },
519 { PCI_PRODUCT_PROMISE_ULTRA133,
520 IDE_PCI_CLASS_OVERRIDE,
521 "Promise Ultra133/ATA Bus Master IDE Accelerator",
522 pdc202xx_chip_map,
523 },
524 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
525 IDE_PCI_CLASS_OVERRIDE,
526 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
527 pdc202xx_chip_map,
528 },
529 { PCI_PRODUCT_PROMISE_MBULTRA133,
530 IDE_PCI_CLASS_OVERRIDE,
531 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
532 pdc202xx_chip_map,
533 },
534 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
535 IDE_PCI_CLASS_OVERRIDE,
536 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
537 pdc202xx_chip_map,
538 },
539 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
540 IDE_PCI_CLASS_OVERRIDE,
541 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
542 pdc202xx_chip_map,
543 },
544 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
545 IDE_PCI_CLASS_OVERRIDE,
546 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
547 pdc202xx_chip_map,
548 },
549 { 0,
550 0,
551 NULL,
552 NULL
553 }
554 };
555
556 const struct pciide_product_desc pciide_opti_products[] = {
557 { PCI_PRODUCT_OPTI_82C621,
558 0,
559 "OPTi 82c621 PCI IDE controller",
560 opti_chip_map,
561 },
562 { PCI_PRODUCT_OPTI_82C568,
563 0,
564 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
565 opti_chip_map,
566 },
567 { PCI_PRODUCT_OPTI_82D568,
568 0,
569 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
570 opti_chip_map,
571 },
572 { 0,
573 0,
574 NULL,
575 NULL
576 }
577 };
578
579 const struct pciide_product_desc pciide_triones_products[] = {
580 { PCI_PRODUCT_TRIONES_HPT366,
581 IDE_PCI_CLASS_OVERRIDE,
582 NULL,
583 hpt_chip_map,
584 },
585 { PCI_PRODUCT_TRIONES_HPT372,
586 IDE_PCI_CLASS_OVERRIDE,
587 NULL,
588 hpt_chip_map
589 },
590 { PCI_PRODUCT_TRIONES_HPT374,
591 IDE_PCI_CLASS_OVERRIDE,
592 NULL,
593 hpt_chip_map
594 },
595 { 0,
596 0,
597 NULL,
598 NULL
599 }
600 };
601
602 const struct pciide_product_desc pciide_acard_products[] = {
603 { PCI_PRODUCT_ACARD_ATP850U,
604 IDE_PCI_CLASS_OVERRIDE,
605 "Acard ATP850U Ultra33 IDE Controller",
606 acard_chip_map,
607 },
608 { PCI_PRODUCT_ACARD_ATP860,
609 IDE_PCI_CLASS_OVERRIDE,
610 "Acard ATP860 Ultra66 IDE Controller",
611 acard_chip_map,
612 },
613 { PCI_PRODUCT_ACARD_ATP860A,
614 IDE_PCI_CLASS_OVERRIDE,
615 "Acard ATP860-A Ultra66 IDE Controller",
616 acard_chip_map,
617 },
618 { 0,
619 0,
620 NULL,
621 NULL
622 }
623 };
624
625 const struct pciide_product_desc pciide_serverworks_products[] = {
626 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
627 0,
628 "ServerWorks OSB4 IDE Controller",
629 serverworks_chip_map,
630 },
631 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
632 0,
633 "ServerWorks CSB5 IDE Controller",
634 serverworks_chip_map,
635 },
636 { PCI_PRODUCT_SERVERWORKS_CSB6_IDE,
637 0,
638 "ServerWorks CSB6 RAID/IDE Controller",
639 serverworks_chip_map,
640 },
641 { 0,
642 0,
643 NULL,
644 }
645 };
646
647 const struct pciide_product_desc pciide_symphony_products[] = {
648 { PCI_PRODUCT_SYMPHONY_82C105,
649 0,
650 "Symphony Labs 82C105 IDE controller",
651 sl82c105_chip_map,
652 },
653 { 0,
654 0,
655 NULL,
656 }
657 };
658
659 const struct pciide_product_desc pciide_winbond_products[] = {
660 { PCI_PRODUCT_WINBOND_W83C553F_1,
661 0,
662 "Winbond W83C553F IDE controller",
663 sl82c105_chip_map,
664 },
665 { 0,
666 0,
667 NULL,
668 }
669 };
670
671 struct pciide_vendor_desc {
672 u_int32_t ide_vendor;
673 const struct pciide_product_desc *ide_products;
674 };
675
676 const struct pciide_vendor_desc pciide_vendors[] = {
677 { PCI_VENDOR_INTEL, pciide_intel_products },
678 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
679 { PCI_VENDOR_VIATECH, pciide_via_products },
680 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
681 { PCI_VENDOR_SIS, pciide_sis_products },
682 { PCI_VENDOR_ALI, pciide_acer_products },
683 { PCI_VENDOR_PROMISE, pciide_promise_products },
684 { PCI_VENDOR_AMD, pciide_amd_products },
685 { PCI_VENDOR_OPTI, pciide_opti_products },
686 { PCI_VENDOR_TRIONES, pciide_triones_products },
687 { PCI_VENDOR_ACARD, pciide_acard_products },
688 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
689 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
690 { PCI_VENDOR_WINBOND, pciide_winbond_products },
691 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
692 { 0, NULL }
693 };
694
695 /* options passed via the 'flags' config keyword */
696 #define PCIIDE_OPTIONS_DMA 0x01
697 #define PCIIDE_OPTIONS_NODMA 0x02
698
699 int pciide_match __P((struct device *, struct cfdata *, void *));
700 void pciide_attach __P((struct device *, struct device *, void *));
701
702 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
703 pciide_match, pciide_attach, NULL, NULL);
704
705 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
706 int pciide_mapregs_compat __P(( struct pci_attach_args *,
707 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
708 int pciide_mapregs_native __P((struct pci_attach_args *,
709 struct pciide_channel *, bus_size_t *, bus_size_t *,
710 int (*pci_intr) __P((void *))));
711 void pciide_mapreg_dma __P((struct pciide_softc *,
712 struct pci_attach_args *));
713 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
714 void pciide_mapchan __P((struct pci_attach_args *,
715 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
716 int (*pci_intr) __P((void *))));
717 int pciide_chan_candisable __P((struct pciide_channel *));
718 void pciide_map_compat_intr __P(( struct pci_attach_args *,
719 struct pciide_channel *, int, int));
720 int pciide_compat_intr __P((void *));
721 int pciide_pci_intr __P((void *));
722 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
723
724 const struct pciide_product_desc *
725 pciide_lookup_product(id)
726 u_int32_t id;
727 {
728 const struct pciide_product_desc *pp;
729 const struct pciide_vendor_desc *vp;
730
731 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
732 if (PCI_VENDOR(id) == vp->ide_vendor)
733 break;
734
735 if ((pp = vp->ide_products) == NULL)
736 return NULL;
737
738 for (; pp->chip_map != NULL; pp++)
739 if (PCI_PRODUCT(id) == pp->ide_product)
740 break;
741
742 if (pp->chip_map == NULL)
743 return NULL;
744 return pp;
745 }
746
747 int
748 pciide_match(parent, match, aux)
749 struct device *parent;
750 struct cfdata *match;
751 void *aux;
752 {
753 struct pci_attach_args *pa = aux;
754 const struct pciide_product_desc *pp;
755
756 /*
757 * Check the ID register to see that it's a PCI IDE controller.
758 * If it is, we assume that we can deal with it; it _should_
759 * work in a standardized way...
760 */
761 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
762 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
763 return (1);
764 }
765
766 /*
767 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
768 * controllers. Let see if we can deal with it anyway.
769 */
770 pp = pciide_lookup_product(pa->pa_id);
771 if (pp != NULL && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
772 return (1);
773 }
774
775 return (0);
776 }
777
778 void
779 pciide_attach(parent, self, aux)
780 struct device *parent, *self;
781 void *aux;
782 {
783 struct pci_attach_args *pa = aux;
784 pci_chipset_tag_t pc = pa->pa_pc;
785 pcitag_t tag = pa->pa_tag;
786 struct pciide_softc *sc = (struct pciide_softc *)self;
787 pcireg_t csr;
788 char devinfo[256];
789 const char *displaydev;
790
791 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
792 sc->sc_pp = pciide_lookup_product(pa->pa_id);
793 if (sc->sc_pp == NULL) {
794 sc->sc_pp = &default_product_desc;
795 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
796 displaydev = devinfo;
797 } else
798 displaydev = sc->sc_pp->ide_name;
799
800 /* if displaydev == NULL, printf is done in chip-specific map */
801 if (displaydev)
802 printf(": %s (rev. 0x%02x)\n", displaydev,
803 PCI_REVISION(pa->pa_class));
804
805 sc->sc_pc = pa->pa_pc;
806 sc->sc_tag = pa->pa_tag;
807
808 /* Set up DMA defaults; these might be adjusted by chip_map. */
809 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX;
810 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN;
811
812 #ifdef WDCDEBUG
813 if (wdcdebug_pciide_mask & DEBUG_PROBE)
814 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
815 #endif
816 sc->sc_pp->chip_map(sc, pa);
817
818 if (sc->sc_dma_ok) {
819 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
820 csr |= PCI_COMMAND_MASTER_ENABLE;
821 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
822 }
823 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
824 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
825 }
826
827 /* tell whether the chip is enabled or not */
828 int
829 pciide_chipen(sc, pa)
830 struct pciide_softc *sc;
831 struct pci_attach_args *pa;
832 {
833 pcireg_t csr;
834 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
835 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
836 PCI_COMMAND_STATUS_REG);
837 printf("%s: device disabled (at %s)\n",
838 sc->sc_wdcdev.sc_dev.dv_xname,
839 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
840 "device" : "bridge");
841 return 0;
842 }
843 return 1;
844 }
845
846 int
847 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
848 struct pci_attach_args *pa;
849 struct pciide_channel *cp;
850 int compatchan;
851 bus_size_t *cmdsizep, *ctlsizep;
852 {
853 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
854 struct channel_softc *wdc_cp = &cp->wdc_channel;
855
856 cp->compat = 1;
857 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
858 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
859
860 wdc_cp->cmd_iot = pa->pa_iot;
861 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
862 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
863 printf("%s: couldn't map %s channel cmd regs\n",
864 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
865 return (0);
866 }
867
868 wdc_cp->ctl_iot = pa->pa_iot;
869 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
870 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
871 printf("%s: couldn't map %s channel ctl regs\n",
872 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
873 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
874 PCIIDE_COMPAT_CMD_SIZE);
875 return (0);
876 }
877
878 return (1);
879 }
880
881 int
882 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
883 struct pci_attach_args * pa;
884 struct pciide_channel *cp;
885 bus_size_t *cmdsizep, *ctlsizep;
886 int (*pci_intr) __P((void *));
887 {
888 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
889 struct channel_softc *wdc_cp = &cp->wdc_channel;
890 const char *intrstr;
891 pci_intr_handle_t intrhandle;
892
893 cp->compat = 0;
894
895 if (sc->sc_pci_ih == NULL) {
896 if (pci_intr_map(pa, &intrhandle) != 0) {
897 printf("%s: couldn't map native-PCI interrupt\n",
898 sc->sc_wdcdev.sc_dev.dv_xname);
899 return 0;
900 }
901 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
902 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
903 intrhandle, IPL_BIO, pci_intr, sc);
904 if (sc->sc_pci_ih != NULL) {
905 printf("%s: using %s for native-PCI interrupt\n",
906 sc->sc_wdcdev.sc_dev.dv_xname,
907 intrstr ? intrstr : "unknown interrupt");
908 } else {
909 printf("%s: couldn't establish native-PCI interrupt",
910 sc->sc_wdcdev.sc_dev.dv_xname);
911 if (intrstr != NULL)
912 printf(" at %s", intrstr);
913 printf("\n");
914 return 0;
915 }
916 }
917 cp->ih = sc->sc_pci_ih;
918 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
919 PCI_MAPREG_TYPE_IO, 0,
920 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
921 printf("%s: couldn't map %s channel cmd regs\n",
922 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
923 return 0;
924 }
925
926 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
927 PCI_MAPREG_TYPE_IO, 0,
928 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
929 printf("%s: couldn't map %s channel ctl regs\n",
930 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
931 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
932 return 0;
933 }
934 /*
935 * In native mode, 4 bytes of I/O space are mapped for the control
936 * register, the control register is at offset 2. Pass the generic
937 * code a handle for only one byte at the right offset.
938 */
939 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
940 &wdc_cp->ctl_ioh) != 0) {
941 printf("%s: unable to subregion %s channel ctl regs\n",
942 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
943 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
944 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
945 return 0;
946 }
947 return (1);
948 }
949
950 void
951 pciide_mapreg_dma(sc, pa)
952 struct pciide_softc *sc;
953 struct pci_attach_args *pa;
954 {
955 pcireg_t maptype;
956 bus_addr_t addr;
957
958 /*
959 * Map DMA registers
960 *
961 * Note that sc_dma_ok is the right variable to test to see if
962 * DMA can be done. If the interface doesn't support DMA,
963 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
964 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
965 * non-zero if the interface supports DMA and the registers
966 * could be mapped.
967 *
968 * XXX Note that despite the fact that the Bus Master IDE specs
969 * XXX say that "The bus master IDE function uses 16 bytes of IO
970 * XXX space," some controllers (at least the United
971 * XXX Microelectronics UM8886BF) place it in memory space.
972 */
973 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
974 PCIIDE_REG_BUS_MASTER_DMA);
975
976 switch (maptype) {
977 case PCI_MAPREG_TYPE_IO:
978 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
979 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
980 &addr, NULL, NULL) == 0);
981 if (sc->sc_dma_ok == 0) {
982 printf(", but unused (couldn't query registers)");
983 break;
984 }
985 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
986 && addr >= 0x10000) {
987 sc->sc_dma_ok = 0;
988 printf(", but unused (registers at unsafe address "
989 "%#lx)", (unsigned long)addr);
990 break;
991 }
992 /* FALLTHROUGH */
993
994 case PCI_MAPREG_MEM_TYPE_32BIT:
995 sc->sc_dma_ok = (pci_mapreg_map(pa,
996 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
997 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
998 sc->sc_dmat = pa->pa_dmat;
999 if (sc->sc_dma_ok == 0) {
1000 printf(", but unused (couldn't map registers)");
1001 } else {
1002 sc->sc_wdcdev.dma_arg = sc;
1003 sc->sc_wdcdev.dma_init = pciide_dma_init;
1004 sc->sc_wdcdev.dma_start = pciide_dma_start;
1005 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
1006 }
1007
1008 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1009 PCIIDE_OPTIONS_NODMA) {
1010 printf(", but unused (forced off by config file)");
1011 sc->sc_dma_ok = 0;
1012 }
1013 break;
1014
1015 default:
1016 sc->sc_dma_ok = 0;
1017 printf(", but unsupported register maptype (0x%x)", maptype);
1018 }
1019 }
1020
1021 int
1022 pciide_compat_intr(arg)
1023 void *arg;
1024 {
1025 struct pciide_channel *cp = arg;
1026
1027 #ifdef DIAGNOSTIC
1028 /* should only be called for a compat channel */
1029 if (cp->compat == 0)
1030 panic("pciide compat intr called for non-compat chan %p", cp);
1031 #endif
1032 return (wdcintr(&cp->wdc_channel));
1033 }
1034
1035 int
1036 pciide_pci_intr(arg)
1037 void *arg;
1038 {
1039 struct pciide_softc *sc = arg;
1040 struct pciide_channel *cp;
1041 struct channel_softc *wdc_cp;
1042 int i, rv, crv;
1043
1044 rv = 0;
1045 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1046 cp = &sc->pciide_channels[i];
1047 wdc_cp = &cp->wdc_channel;
1048
1049 /* If a compat channel skip. */
1050 if (cp->compat)
1051 continue;
1052 /* if this channel not waiting for intr, skip */
1053 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1054 continue;
1055
1056 crv = wdcintr(wdc_cp);
1057 if (crv == 0)
1058 ; /* leave rv alone */
1059 else if (crv == 1)
1060 rv = 1; /* claim the intr */
1061 else if (rv == 0) /* crv should be -1 in this case */
1062 rv = crv; /* if we've done no better, take it */
1063 }
1064 return (rv);
1065 }
1066
1067 void
1068 pciide_channel_dma_setup(cp)
1069 struct pciide_channel *cp;
1070 {
1071 int drive;
1072 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1073 struct ata_drive_datas *drvp;
1074
1075 for (drive = 0; drive < 2; drive++) {
1076 drvp = &cp->wdc_channel.ch_drive[drive];
1077 /* If no drive, skip */
1078 if ((drvp->drive_flags & DRIVE) == 0)
1079 continue;
1080 /* setup DMA if needed */
1081 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1082 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1083 sc->sc_dma_ok == 0) {
1084 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1085 continue;
1086 }
1087 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1088 != 0) {
1089 /* Abort DMA setup */
1090 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1091 continue;
1092 }
1093 }
1094 }
1095
1096 int
1097 pciide_dma_table_setup(sc, channel, drive)
1098 struct pciide_softc *sc;
1099 int channel, drive;
1100 {
1101 bus_dma_segment_t seg;
1102 int error, rseg;
1103 const bus_size_t dma_table_size =
1104 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1105 struct pciide_dma_maps *dma_maps =
1106 &sc->pciide_channels[channel].dma_maps[drive];
1107
1108 /* If table was already allocated, just return */
1109 if (dma_maps->dma_table)
1110 return 0;
1111
1112 /* Allocate memory for the DMA tables and map it */
1113 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1114 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1115 BUS_DMA_NOWAIT)) != 0) {
1116 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1117 "allocate", drive, error);
1118 return error;
1119 }
1120 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1121 dma_table_size,
1122 (caddr_t *)&dma_maps->dma_table,
1123 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1124 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1125 "map", drive, error);
1126 return error;
1127 }
1128 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1129 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1130 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1131 /* Create and load table DMA map for this disk */
1132 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1133 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1134 &dma_maps->dmamap_table)) != 0) {
1135 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1136 "create", drive, error);
1137 return error;
1138 }
1139 if ((error = bus_dmamap_load(sc->sc_dmat,
1140 dma_maps->dmamap_table,
1141 dma_maps->dma_table,
1142 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1143 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1144 "load", drive, error);
1145 return error;
1146 }
1147 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1148 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1149 DEBUG_PROBE);
1150 /* Create a xfer DMA map for this drive */
1151 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1152 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary,
1153 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1154 &dma_maps->dmamap_xfer)) != 0) {
1155 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1156 "create xfer", drive, error);
1157 return error;
1158 }
1159 return 0;
1160 }
1161
1162 int
1163 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1164 void *v;
1165 int channel, drive;
1166 void *databuf;
1167 size_t datalen;
1168 int flags;
1169 {
1170 struct pciide_softc *sc = v;
1171 int error, seg;
1172 struct pciide_dma_maps *dma_maps =
1173 &sc->pciide_channels[channel].dma_maps[drive];
1174
1175 error = bus_dmamap_load(sc->sc_dmat,
1176 dma_maps->dmamap_xfer,
1177 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1178 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1179 if (error) {
1180 printf(dmaerrfmt, sc->sc_wdcdev.sc_dev.dv_xname, channel,
1181 "load xfer", drive, error);
1182 return error;
1183 }
1184
1185 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1186 dma_maps->dmamap_xfer->dm_mapsize,
1187 (flags & WDC_DMA_READ) ?
1188 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1189
1190 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1191 #ifdef DIAGNOSTIC
1192 /* A segment must not cross a 64k boundary */
1193 {
1194 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1195 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1196 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1197 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1198 printf("pciide_dma: segment %d physical addr 0x%lx"
1199 " len 0x%lx not properly aligned\n",
1200 seg, phys, len);
1201 panic("pciide_dma: buf align");
1202 }
1203 }
1204 #endif
1205 dma_maps->dma_table[seg].base_addr =
1206 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1207 dma_maps->dma_table[seg].byte_count =
1208 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1209 IDEDMA_BYTE_COUNT_MASK);
1210 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1211 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1212 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1213
1214 }
1215 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1216 htole32(IDEDMA_BYTE_COUNT_EOT);
1217
1218 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1219 dma_maps->dmamap_table->dm_mapsize,
1220 BUS_DMASYNC_PREWRITE);
1221
1222 /* Maps are ready. Start DMA function */
1223 #ifdef DIAGNOSTIC
1224 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1225 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1226 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1227 panic("pciide_dma_init: table align");
1228 }
1229 #endif
1230
1231 /* Clear status bits */
1232 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1233 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1234 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1235 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1236 /* Write table addr */
1237 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1238 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1239 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1240 /* set read/write */
1241 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1242 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1243 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1244 /* remember flags */
1245 dma_maps->dma_flags = flags;
1246 return 0;
1247 }
1248
1249 void
1250 pciide_dma_start(v, channel, drive)
1251 void *v;
1252 int channel, drive;
1253 {
1254 struct pciide_softc *sc = v;
1255
1256 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1257 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1258 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1259 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1260 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1261 }
1262
1263 int
1264 pciide_dma_finish(v, channel, drive, force)
1265 void *v;
1266 int channel, drive;
1267 int force;
1268 {
1269 struct pciide_softc *sc = v;
1270 u_int8_t status;
1271 int error = 0;
1272 struct pciide_dma_maps *dma_maps =
1273 &sc->pciide_channels[channel].dma_maps[drive];
1274
1275 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1276 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1277 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1278 DEBUG_XFERS);
1279
1280 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1281 return WDC_DMAST_NOIRQ;
1282
1283 /* stop DMA channel */
1284 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1285 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1286 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1287 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1288
1289 /* Unload the map of the data buffer */
1290 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1291 dma_maps->dmamap_xfer->dm_mapsize,
1292 (dma_maps->dma_flags & WDC_DMA_READ) ?
1293 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1294 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1295
1296 if ((status & IDEDMA_CTL_ERR) != 0) {
1297 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1298 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1299 error |= WDC_DMAST_ERR;
1300 }
1301
1302 if ((status & IDEDMA_CTL_INTR) == 0) {
1303 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1304 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1305 drive, status);
1306 error |= WDC_DMAST_NOIRQ;
1307 }
1308
1309 if ((status & IDEDMA_CTL_ACT) != 0) {
1310 /* data underrun, may be a valid condition for ATAPI */
1311 error |= WDC_DMAST_UNDER;
1312 }
1313 return error;
1314 }
1315
1316 void
1317 pciide_irqack(chp)
1318 struct channel_softc *chp;
1319 {
1320 struct pciide_channel *cp = (struct pciide_channel*)chp;
1321 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1322
1323 /* clear status bits in IDE DMA registers */
1324 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1325 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1326 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1327 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1328 }
1329
1330 /* some common code used by several chip_map */
1331 int
1332 pciide_chansetup(sc, channel, interface)
1333 struct pciide_softc *sc;
1334 int channel;
1335 pcireg_t interface;
1336 {
1337 struct pciide_channel *cp = &sc->pciide_channels[channel];
1338 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1339 cp->name = PCIIDE_CHANNEL_NAME(channel);
1340 cp->wdc_channel.channel = channel;
1341 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1342 cp->wdc_channel.ch_queue =
1343 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1344 if (cp->wdc_channel.ch_queue == NULL) {
1345 printf("%s %s channel: "
1346 "can't allocate memory for command queue",
1347 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1348 return 0;
1349 }
1350 printf("%s: %s channel %s to %s mode\n",
1351 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1352 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1353 "configured" : "wired",
1354 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1355 "native-PCI" : "compatibility");
1356 return 1;
1357 }
1358
1359 /* some common code used by several chip channel_map */
1360 void
1361 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1362 struct pci_attach_args *pa;
1363 struct pciide_channel *cp;
1364 pcireg_t interface;
1365 bus_size_t *cmdsizep, *ctlsizep;
1366 int (*pci_intr) __P((void *));
1367 {
1368 struct channel_softc *wdc_cp = &cp->wdc_channel;
1369
1370 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1371 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1372 pci_intr);
1373 else
1374 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1375 wdc_cp->channel, cmdsizep, ctlsizep);
1376
1377 if (cp->hw_ok == 0)
1378 return;
1379 wdc_cp->data32iot = wdc_cp->cmd_iot;
1380 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1381 wdcattach(wdc_cp);
1382 }
1383
1384 /*
1385 * Generic code to call to know if a channel can be disabled. Return 1
1386 * if channel can be disabled, 0 if not
1387 */
1388 int
1389 pciide_chan_candisable(cp)
1390 struct pciide_channel *cp;
1391 {
1392 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1393 struct channel_softc *wdc_cp = &cp->wdc_channel;
1394
1395 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1396 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1397 printf("%s: disabling %s channel (no drives)\n",
1398 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1399 cp->hw_ok = 0;
1400 return 1;
1401 }
1402 return 0;
1403 }
1404
1405 /*
1406 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1407 * Set hw_ok=0 on failure
1408 */
1409 void
1410 pciide_map_compat_intr(pa, cp, compatchan, interface)
1411 struct pci_attach_args *pa;
1412 struct pciide_channel *cp;
1413 int compatchan, interface;
1414 {
1415 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1416 struct channel_softc *wdc_cp = &cp->wdc_channel;
1417
1418 if (cp->hw_ok == 0)
1419 return;
1420 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1421 return;
1422
1423 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1424 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1425 pa, compatchan, pciide_compat_intr, cp);
1426 if (cp->ih == NULL) {
1427 #endif
1428 printf("%s: no compatibility interrupt for use by %s "
1429 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1430 cp->hw_ok = 0;
1431 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1432 }
1433 #endif
1434 }
1435
1436 void
1437 pciide_print_modes(cp)
1438 struct pciide_channel *cp;
1439 {
1440 wdc_print_modes(&cp->wdc_channel);
1441 }
1442
1443 void
1444 default_chip_map(sc, pa)
1445 struct pciide_softc *sc;
1446 struct pci_attach_args *pa;
1447 {
1448 struct pciide_channel *cp;
1449 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1450 pcireg_t csr;
1451 int channel, drive;
1452 struct ata_drive_datas *drvp;
1453 u_int8_t idedma_ctl;
1454 bus_size_t cmdsize, ctlsize;
1455 char *failreason;
1456
1457 if (pciide_chipen(sc, pa) == 0)
1458 return;
1459
1460 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1461 printf("%s: bus-master DMA support present",
1462 sc->sc_wdcdev.sc_dev.dv_xname);
1463 if (sc->sc_pp == &default_product_desc &&
1464 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1465 PCIIDE_OPTIONS_DMA) == 0) {
1466 printf(", but unused (no driver support)");
1467 sc->sc_dma_ok = 0;
1468 } else {
1469 pciide_mapreg_dma(sc, pa);
1470 if (sc->sc_dma_ok != 0)
1471 printf(", used without full driver "
1472 "support");
1473 }
1474 } else {
1475 printf("%s: hardware does not support DMA",
1476 sc->sc_wdcdev.sc_dev.dv_xname);
1477 sc->sc_dma_ok = 0;
1478 }
1479 printf("\n");
1480 if (sc->sc_dma_ok) {
1481 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1482 sc->sc_wdcdev.irqack = pciide_irqack;
1483 }
1484 sc->sc_wdcdev.PIO_cap = 0;
1485 sc->sc_wdcdev.DMA_cap = 0;
1486
1487 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1488 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1489 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1490
1491 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1492 cp = &sc->pciide_channels[channel];
1493 if (pciide_chansetup(sc, channel, interface) == 0)
1494 continue;
1495 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1496 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1497 &ctlsize, pciide_pci_intr);
1498 } else {
1499 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1500 channel, &cmdsize, &ctlsize);
1501 }
1502 if (cp->hw_ok == 0)
1503 continue;
1504 /*
1505 * Check to see if something appears to be there.
1506 */
1507 failreason = NULL;
1508 if (!wdcprobe(&cp->wdc_channel)) {
1509 failreason = "not responding; disabled or no drives?";
1510 goto next;
1511 }
1512 /*
1513 * Now, make sure it's actually attributable to this PCI IDE
1514 * channel by trying to access the channel again while the
1515 * PCI IDE controller's I/O space is disabled. (If the
1516 * channel no longer appears to be there, it belongs to
1517 * this controller.) YUCK!
1518 */
1519 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1520 PCI_COMMAND_STATUS_REG);
1521 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1522 csr & ~PCI_COMMAND_IO_ENABLE);
1523 if (wdcprobe(&cp->wdc_channel))
1524 failreason = "other hardware responding at addresses";
1525 pci_conf_write(sc->sc_pc, sc->sc_tag,
1526 PCI_COMMAND_STATUS_REG, csr);
1527 next:
1528 if (failreason) {
1529 printf("%s: %s channel ignored (%s)\n",
1530 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1531 failreason);
1532 cp->hw_ok = 0;
1533 bus_space_unmap(cp->wdc_channel.cmd_iot,
1534 cp->wdc_channel.cmd_ioh, cmdsize);
1535 if (interface & PCIIDE_INTERFACE_PCI(channel))
1536 bus_space_unmap(cp->wdc_channel.ctl_iot,
1537 cp->ctl_baseioh, ctlsize);
1538 else
1539 bus_space_unmap(cp->wdc_channel.ctl_iot,
1540 cp->wdc_channel.ctl_ioh, ctlsize);
1541 } else {
1542 pciide_map_compat_intr(pa, cp, channel, interface);
1543 }
1544 if (cp->hw_ok) {
1545 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1546 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1547 wdcattach(&cp->wdc_channel);
1548 }
1549 }
1550
1551 if (sc->sc_dma_ok == 0)
1552 return;
1553
1554 /* Allocate DMA maps */
1555 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1556 idedma_ctl = 0;
1557 cp = &sc->pciide_channels[channel];
1558 for (drive = 0; drive < 2; drive++) {
1559 drvp = &cp->wdc_channel.ch_drive[drive];
1560 /* If no drive, skip */
1561 if ((drvp->drive_flags & DRIVE) == 0)
1562 continue;
1563 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1564 continue;
1565 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1566 /* Abort DMA setup */
1567 printf("%s:%d:%d: can't allocate DMA maps, "
1568 "using PIO transfers\n",
1569 sc->sc_wdcdev.sc_dev.dv_xname,
1570 channel, drive);
1571 drvp->drive_flags &= ~DRIVE_DMA;
1572 }
1573 printf("%s:%d:%d: using DMA data transfers\n",
1574 sc->sc_wdcdev.sc_dev.dv_xname,
1575 channel, drive);
1576 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1577 }
1578 if (idedma_ctl != 0) {
1579 /* Add software bits in status register */
1580 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1581 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1582 idedma_ctl);
1583 }
1584 }
1585 }
1586
1587 void
1588 sata_setup_channel(chp)
1589 struct channel_softc *chp;
1590 {
1591 struct ata_drive_datas *drvp;
1592 int drive;
1593 u_int32_t idedma_ctl;
1594 struct pciide_channel *cp = (struct pciide_channel*)chp;
1595 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
1596
1597 /* setup DMA if needed */
1598 pciide_channel_dma_setup(cp);
1599
1600 idedma_ctl = 0;
1601
1602 for (drive = 0; drive < 2; drive++) {
1603 drvp = &chp->ch_drive[drive];
1604 /* If no drive, skip */
1605 if ((drvp->drive_flags & DRIVE) == 0)
1606 continue;
1607 if (drvp->drive_flags & DRIVE_UDMA) {
1608 /* use Ultra/DMA */
1609 drvp->drive_flags &= ~DRIVE_DMA;
1610 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1611 } else if (drvp->drive_flags & DRIVE_DMA) {
1612 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1613 }
1614 }
1615
1616 /*
1617 * Nothing to do to setup modes; it is meaningless in S-ATA
1618 * (but many S-ATA drives still want to get the SET_FEATURE
1619 * command).
1620 */
1621 if (idedma_ctl != 0) {
1622 /* Add software bits in status register */
1623 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1624 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1625 idedma_ctl);
1626 }
1627 pciide_print_modes(cp);
1628 }
1629
1630 void
1631 piix_chip_map(sc, pa)
1632 struct pciide_softc *sc;
1633 struct pci_attach_args *pa;
1634 {
1635 struct pciide_channel *cp;
1636 int channel;
1637 u_int32_t idetim;
1638 bus_size_t cmdsize, ctlsize;
1639
1640 if (pciide_chipen(sc, pa) == 0)
1641 return;
1642
1643 printf("%s: bus-master DMA support present",
1644 sc->sc_wdcdev.sc_dev.dv_xname);
1645 pciide_mapreg_dma(sc, pa);
1646 printf("\n");
1647 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1648 WDC_CAPABILITY_MODE;
1649 if (sc->sc_dma_ok) {
1650 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1651 sc->sc_wdcdev.irqack = pciide_irqack;
1652 switch(sc->sc_pp->ide_product) {
1653 case PCI_PRODUCT_INTEL_82371AB_IDE:
1654 case PCI_PRODUCT_INTEL_82440MX_IDE:
1655 case PCI_PRODUCT_INTEL_82801AA_IDE:
1656 case PCI_PRODUCT_INTEL_82801AB_IDE:
1657 case PCI_PRODUCT_INTEL_82801BA_IDE:
1658 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1659 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1660 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1661 case PCI_PRODUCT_INTEL_82801DB_IDE:
1662 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1663 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1664 }
1665 }
1666 sc->sc_wdcdev.PIO_cap = 4;
1667 sc->sc_wdcdev.DMA_cap = 2;
1668 switch(sc->sc_pp->ide_product) {
1669 case PCI_PRODUCT_INTEL_82801AA_IDE:
1670 sc->sc_wdcdev.UDMA_cap = 4;
1671 break;
1672 case PCI_PRODUCT_INTEL_82801BA_IDE:
1673 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1674 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1675 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1676 case PCI_PRODUCT_INTEL_82801DB_IDE:
1677 case PCI_PRODUCT_INTEL_82801DBM_IDE:
1678 sc->sc_wdcdev.UDMA_cap = 5;
1679 break;
1680 default:
1681 sc->sc_wdcdev.UDMA_cap = 2;
1682 }
1683 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1684 sc->sc_wdcdev.set_modes = piix_setup_channel;
1685 else
1686 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1687 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1688 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1689
1690 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1691 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1692 DEBUG_PROBE);
1693 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1694 WDCDEBUG_PRINT((", sidetim=0x%x",
1695 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1696 DEBUG_PROBE);
1697 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1698 WDCDEBUG_PRINT((", udamreg 0x%x",
1699 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1700 DEBUG_PROBE);
1701 }
1702 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1703 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1704 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1705 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1706 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1707 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1708 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1709 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1710 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1711 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1712 DEBUG_PROBE);
1713 }
1714
1715 }
1716 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1717
1718 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1719 cp = &sc->pciide_channels[channel];
1720 /* PIIX is compat-only */
1721 if (pciide_chansetup(sc, channel, 0) == 0)
1722 continue;
1723 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1724 if ((PIIX_IDETIM_READ(idetim, channel) &
1725 PIIX_IDETIM_IDE) == 0) {
1726 printf("%s: %s channel ignored (disabled)\n",
1727 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1728 continue;
1729 }
1730 /* PIIX are compat-only pciide devices */
1731 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1732 if (cp->hw_ok == 0)
1733 continue;
1734 if (pciide_chan_candisable(cp)) {
1735 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1736 channel);
1737 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1738 idetim);
1739 }
1740 pciide_map_compat_intr(pa, cp, channel, 0);
1741 if (cp->hw_ok == 0)
1742 continue;
1743 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1744 }
1745
1746 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1747 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1748 DEBUG_PROBE);
1749 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1750 WDCDEBUG_PRINT((", sidetim=0x%x",
1751 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1752 DEBUG_PROBE);
1753 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1754 WDCDEBUG_PRINT((", udamreg 0x%x",
1755 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1756 DEBUG_PROBE);
1757 }
1758 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1759 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1760 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1761 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1762 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1763 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1764 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1765 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1766 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1767 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1768 DEBUG_PROBE);
1769 }
1770 }
1771 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1772 }
1773
1774 void
1775 piix_setup_channel(chp)
1776 struct channel_softc *chp;
1777 {
1778 u_int8_t mode[2], drive;
1779 u_int32_t oidetim, idetim, idedma_ctl;
1780 struct pciide_channel *cp = (struct pciide_channel*)chp;
1781 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1782 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1783
1784 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1785 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1786 idedma_ctl = 0;
1787
1788 /* set up new idetim: Enable IDE registers decode */
1789 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1790 chp->channel);
1791
1792 /* setup DMA */
1793 pciide_channel_dma_setup(cp);
1794
1795 /*
1796 * Here we have to mess up with drives mode: PIIX can't have
1797 * different timings for master and slave drives.
1798 * We need to find the best combination.
1799 */
1800
1801 /* If both drives supports DMA, take the lower mode */
1802 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1803 (drvp[1].drive_flags & DRIVE_DMA)) {
1804 mode[0] = mode[1] =
1805 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1806 drvp[0].DMA_mode = mode[0];
1807 drvp[1].DMA_mode = mode[1];
1808 goto ok;
1809 }
1810 /*
1811 * If only one drive supports DMA, use its mode, and
1812 * put the other one in PIO mode 0 if mode not compatible
1813 */
1814 if (drvp[0].drive_flags & DRIVE_DMA) {
1815 mode[0] = drvp[0].DMA_mode;
1816 mode[1] = drvp[1].PIO_mode;
1817 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1818 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1819 mode[1] = drvp[1].PIO_mode = 0;
1820 goto ok;
1821 }
1822 if (drvp[1].drive_flags & DRIVE_DMA) {
1823 mode[1] = drvp[1].DMA_mode;
1824 mode[0] = drvp[0].PIO_mode;
1825 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1826 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1827 mode[0] = drvp[0].PIO_mode = 0;
1828 goto ok;
1829 }
1830 /*
1831 * If both drives are not DMA, takes the lower mode, unless
1832 * one of them is PIO mode < 2
1833 */
1834 if (drvp[0].PIO_mode < 2) {
1835 mode[0] = drvp[0].PIO_mode = 0;
1836 mode[1] = drvp[1].PIO_mode;
1837 } else if (drvp[1].PIO_mode < 2) {
1838 mode[1] = drvp[1].PIO_mode = 0;
1839 mode[0] = drvp[0].PIO_mode;
1840 } else {
1841 mode[0] = mode[1] =
1842 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1843 drvp[0].PIO_mode = mode[0];
1844 drvp[1].PIO_mode = mode[1];
1845 }
1846 ok: /* The modes are setup */
1847 for (drive = 0; drive < 2; drive++) {
1848 if (drvp[drive].drive_flags & DRIVE_DMA) {
1849 idetim |= piix_setup_idetim_timings(
1850 mode[drive], 1, chp->channel);
1851 goto end;
1852 }
1853 }
1854 /* If we are there, none of the drives are DMA */
1855 if (mode[0] >= 2)
1856 idetim |= piix_setup_idetim_timings(
1857 mode[0], 0, chp->channel);
1858 else
1859 idetim |= piix_setup_idetim_timings(
1860 mode[1], 0, chp->channel);
1861 end: /*
1862 * timing mode is now set up in the controller. Enable
1863 * it per-drive
1864 */
1865 for (drive = 0; drive < 2; drive++) {
1866 /* If no drive, skip */
1867 if ((drvp[drive].drive_flags & DRIVE) == 0)
1868 continue;
1869 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1870 if (drvp[drive].drive_flags & DRIVE_DMA)
1871 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1872 }
1873 if (idedma_ctl != 0) {
1874 /* Add software bits in status register */
1875 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1876 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1877 idedma_ctl);
1878 }
1879 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1880 pciide_print_modes(cp);
1881 }
1882
1883 void
1884 piix3_4_setup_channel(chp)
1885 struct channel_softc *chp;
1886 {
1887 struct ata_drive_datas *drvp;
1888 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1889 struct pciide_channel *cp = (struct pciide_channel*)chp;
1890 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1891 int drive;
1892 int channel = chp->channel;
1893
1894 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1895 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1896 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1897 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1898 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1899 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1900 PIIX_SIDETIM_RTC_MASK(channel));
1901
1902 idedma_ctl = 0;
1903 /* If channel disabled, no need to go further */
1904 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1905 return;
1906 /* set up new idetim: Enable IDE registers decode */
1907 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1908
1909 /* setup DMA if needed */
1910 pciide_channel_dma_setup(cp);
1911
1912 for (drive = 0; drive < 2; drive++) {
1913 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1914 PIIX_UDMATIM_SET(0x3, channel, drive));
1915 drvp = &chp->ch_drive[drive];
1916 /* If no drive, skip */
1917 if ((drvp->drive_flags & DRIVE) == 0)
1918 continue;
1919 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1920 (drvp->drive_flags & DRIVE_UDMA) == 0))
1921 goto pio;
1922
1923 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1924 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1925 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1926 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1927 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1928 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1929 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1930 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1931 ideconf |= PIIX_CONFIG_PINGPONG;
1932 }
1933 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1934 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1935 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1936 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1937 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
1938 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE) {
1939 /* setup Ultra/100 */
1940 if (drvp->UDMA_mode > 2 &&
1941 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1942 drvp->UDMA_mode = 2;
1943 if (drvp->UDMA_mode > 4) {
1944 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1945 } else {
1946 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1947 if (drvp->UDMA_mode > 2) {
1948 ideconf |= PIIX_CONFIG_UDMA66(channel,
1949 drive);
1950 } else {
1951 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1952 drive);
1953 }
1954 }
1955 }
1956 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1957 /* setup Ultra/66 */
1958 if (drvp->UDMA_mode > 2 &&
1959 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1960 drvp->UDMA_mode = 2;
1961 if (drvp->UDMA_mode > 2)
1962 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1963 else
1964 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1965 }
1966 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1967 (drvp->drive_flags & DRIVE_UDMA)) {
1968 /* use Ultra/DMA */
1969 drvp->drive_flags &= ~DRIVE_DMA;
1970 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1971 udmareg |= PIIX_UDMATIM_SET(
1972 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1973 } else {
1974 /* use Multiword DMA */
1975 drvp->drive_flags &= ~DRIVE_UDMA;
1976 if (drive == 0) {
1977 idetim |= piix_setup_idetim_timings(
1978 drvp->DMA_mode, 1, channel);
1979 } else {
1980 sidetim |= piix_setup_sidetim_timings(
1981 drvp->DMA_mode, 1, channel);
1982 idetim =PIIX_IDETIM_SET(idetim,
1983 PIIX_IDETIM_SITRE, channel);
1984 }
1985 }
1986 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1987
1988 pio: /* use PIO mode */
1989 idetim |= piix_setup_idetim_drvs(drvp);
1990 if (drive == 0) {
1991 idetim |= piix_setup_idetim_timings(
1992 drvp->PIO_mode, 0, channel);
1993 } else {
1994 sidetim |= piix_setup_sidetim_timings(
1995 drvp->PIO_mode, 0, channel);
1996 idetim =PIIX_IDETIM_SET(idetim,
1997 PIIX_IDETIM_SITRE, channel);
1998 }
1999 }
2000 if (idedma_ctl != 0) {
2001 /* Add software bits in status register */
2002 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2003 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
2004 idedma_ctl);
2005 }
2006 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
2007 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
2008 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
2009 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
2010 pciide_print_modes(cp);
2011 }
2012
2013
2014 /* setup ISP and RTC fields, based on mode */
2015 static u_int32_t
2016 piix_setup_idetim_timings(mode, dma, channel)
2017 u_int8_t mode;
2018 u_int8_t dma;
2019 u_int8_t channel;
2020 {
2021
2022 if (dma)
2023 return PIIX_IDETIM_SET(0,
2024 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
2025 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
2026 channel);
2027 else
2028 return PIIX_IDETIM_SET(0,
2029 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
2030 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
2031 channel);
2032 }
2033
2034 /* setup DTE, PPE, IE and TIME field based on PIO mode */
2035 static u_int32_t
2036 piix_setup_idetim_drvs(drvp)
2037 struct ata_drive_datas *drvp;
2038 {
2039 u_int32_t ret = 0;
2040 struct channel_softc *chp = drvp->chnl_softc;
2041 u_int8_t channel = chp->channel;
2042 u_int8_t drive = drvp->drive;
2043
2044 /*
2045 * If drive is using UDMA, timings setups are independant
2046 * So just check DMA and PIO here.
2047 */
2048 if (drvp->drive_flags & DRIVE_DMA) {
2049 /* if mode = DMA mode 0, use compatible timings */
2050 if ((drvp->drive_flags & DRIVE_DMA) &&
2051 drvp->DMA_mode == 0) {
2052 drvp->PIO_mode = 0;
2053 return ret;
2054 }
2055 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2056 /*
2057 * PIO and DMA timings are the same, use fast timings for PIO
2058 * too, else use compat timings.
2059 */
2060 if ((piix_isp_pio[drvp->PIO_mode] !=
2061 piix_isp_dma[drvp->DMA_mode]) ||
2062 (piix_rtc_pio[drvp->PIO_mode] !=
2063 piix_rtc_dma[drvp->DMA_mode]))
2064 drvp->PIO_mode = 0;
2065 /* if PIO mode <= 2, use compat timings for PIO */
2066 if (drvp->PIO_mode <= 2) {
2067 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
2068 channel);
2069 return ret;
2070 }
2071 }
2072
2073 /*
2074 * Now setup PIO modes. If mode < 2, use compat timings.
2075 * Else enable fast timings. Enable IORDY and prefetch/post
2076 * if PIO mode >= 3.
2077 */
2078
2079 if (drvp->PIO_mode < 2)
2080 return ret;
2081
2082 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2083 if (drvp->PIO_mode >= 3) {
2084 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2085 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2086 }
2087 return ret;
2088 }
2089
2090 /* setup values in SIDETIM registers, based on mode */
2091 static u_int32_t
2092 piix_setup_sidetim_timings(mode, dma, channel)
2093 u_int8_t mode;
2094 u_int8_t dma;
2095 u_int8_t channel;
2096 {
2097 if (dma)
2098 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2099 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2100 else
2101 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2102 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2103 }
2104
2105 void
2106 amd7x6_chip_map(sc, pa)
2107 struct pciide_softc *sc;
2108 struct pci_attach_args *pa;
2109 {
2110 struct pciide_channel *cp;
2111 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2112 int channel;
2113 pcireg_t chanenable;
2114 bus_size_t cmdsize, ctlsize;
2115
2116 if (pciide_chipen(sc, pa) == 0)
2117 return;
2118 printf("%s: bus-master DMA support present",
2119 sc->sc_wdcdev.sc_dev.dv_xname);
2120 pciide_mapreg_dma(sc, pa);
2121 printf("\n");
2122 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2123 WDC_CAPABILITY_MODE;
2124 if (sc->sc_dma_ok) {
2125 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2126 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2127 sc->sc_wdcdev.irqack = pciide_irqack;
2128 }
2129 sc->sc_wdcdev.PIO_cap = 4;
2130 sc->sc_wdcdev.DMA_cap = 2;
2131
2132 switch (sc->sc_pci_vendor) {
2133 case PCI_VENDOR_AMD:
2134 switch (sc->sc_pp->ide_product) {
2135 case PCI_PRODUCT_AMD_PBC766_IDE:
2136 case PCI_PRODUCT_AMD_PBC768_IDE:
2137 case PCI_PRODUCT_AMD_PBC8111_IDE:
2138 sc->sc_wdcdev.UDMA_cap = 5;
2139 break;
2140 default:
2141 sc->sc_wdcdev.UDMA_cap = 4;
2142 }
2143 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2144 break;
2145
2146 case PCI_VENDOR_NVIDIA:
2147 switch (sc->sc_pp->ide_product) {
2148 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2149 sc->sc_wdcdev.UDMA_cap = 5;
2150 break;
2151 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2152 sc->sc_wdcdev.UDMA_cap = 6;
2153 break;
2154 }
2155 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2156 break;
2157
2158 default:
2159 panic("amd7x6_chip_map: unknown vendor");
2160 }
2161 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2162 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2163 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2164 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2165 AMD7X6_CHANSTATUS_EN(sc));
2166
2167 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2168 DEBUG_PROBE);
2169 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2170 cp = &sc->pciide_channels[channel];
2171 if (pciide_chansetup(sc, channel, interface) == 0)
2172 continue;
2173
2174 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2175 printf("%s: %s channel ignored (disabled)\n",
2176 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2177 continue;
2178 }
2179 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2180 pciide_pci_intr);
2181
2182 if (pciide_chan_candisable(cp))
2183 chanenable &= ~AMD7X6_CHAN_EN(channel);
2184 pciide_map_compat_intr(pa, cp, channel, interface);
2185 if (cp->hw_ok == 0)
2186 continue;
2187
2188 amd7x6_setup_channel(&cp->wdc_channel);
2189 }
2190 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2191 chanenable);
2192 return;
2193 }
2194
2195 void
2196 amd7x6_setup_channel(chp)
2197 struct channel_softc *chp;
2198 {
2199 u_int32_t udmatim_reg, datatim_reg;
2200 u_int8_t idedma_ctl;
2201 int mode, drive;
2202 struct ata_drive_datas *drvp;
2203 struct pciide_channel *cp = (struct pciide_channel*)chp;
2204 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2205 #ifndef PCIIDE_AMD756_ENABLEDMA
2206 int rev = PCI_REVISION(
2207 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2208 #endif
2209
2210 idedma_ctl = 0;
2211 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2212 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2213 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2214 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2215
2216 /* setup DMA if needed */
2217 pciide_channel_dma_setup(cp);
2218
2219 for (drive = 0; drive < 2; drive++) {
2220 drvp = &chp->ch_drive[drive];
2221 /* If no drive, skip */
2222 if ((drvp->drive_flags & DRIVE) == 0)
2223 continue;
2224 /* add timing values, setup DMA if needed */
2225 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2226 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2227 mode = drvp->PIO_mode;
2228 goto pio;
2229 }
2230 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2231 (drvp->drive_flags & DRIVE_UDMA)) {
2232 /* use Ultra/DMA */
2233 drvp->drive_flags &= ~DRIVE_DMA;
2234 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2235 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2236 AMD7X6_UDMA_TIME(chp->channel, drive,
2237 amd7x6_udma_tim[drvp->UDMA_mode]);
2238 /* can use PIO timings, MW DMA unused */
2239 mode = drvp->PIO_mode;
2240 } else {
2241 /* use Multiword DMA, but only if revision is OK */
2242 drvp->drive_flags &= ~DRIVE_UDMA;
2243 #ifndef PCIIDE_AMD756_ENABLEDMA
2244 /*
2245 * The workaround doesn't seem to be necessary
2246 * with all drives, so it can be disabled by
2247 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2248 * triggered.
2249 */
2250 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2251 sc->sc_pp->ide_product ==
2252 PCI_PRODUCT_AMD_PBC756_IDE &&
2253 AMD756_CHIPREV_DISABLEDMA(rev)) {
2254 printf("%s:%d:%d: multi-word DMA disabled due "
2255 "to chip revision\n",
2256 sc->sc_wdcdev.sc_dev.dv_xname,
2257 chp->channel, drive);
2258 mode = drvp->PIO_mode;
2259 drvp->drive_flags &= ~DRIVE_DMA;
2260 goto pio;
2261 }
2262 #endif
2263 /* mode = min(pio, dma+2) */
2264 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2265 mode = drvp->PIO_mode;
2266 else
2267 mode = drvp->DMA_mode + 2;
2268 }
2269 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2270
2271 pio: /* setup PIO mode */
2272 if (mode <= 2) {
2273 drvp->DMA_mode = 0;
2274 drvp->PIO_mode = 0;
2275 mode = 0;
2276 } else {
2277 drvp->PIO_mode = mode;
2278 drvp->DMA_mode = mode - 2;
2279 }
2280 datatim_reg |=
2281 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2282 amd7x6_pio_set[mode]) |
2283 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2284 amd7x6_pio_rec[mode]);
2285 }
2286 if (idedma_ctl != 0) {
2287 /* Add software bits in status register */
2288 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2289 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2290 idedma_ctl);
2291 }
2292 pciide_print_modes(cp);
2293 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2294 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2295 }
2296
2297 void
2298 apollo_chip_map(sc, pa)
2299 struct pciide_softc *sc;
2300 struct pci_attach_args *pa;
2301 {
2302 struct pciide_channel *cp;
2303 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2304 int channel;
2305 u_int32_t ideconf;
2306 bus_size_t cmdsize, ctlsize;
2307 pcitag_t pcib_tag;
2308 pcireg_t pcib_id, pcib_class;
2309
2310 if (pciide_chipen(sc, pa) == 0)
2311 return;
2312 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2313 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2314 /* and read ID and rev of the ISA bridge */
2315 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2316 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2317 printf(": VIA Technologies ");
2318 switch (PCI_PRODUCT(pcib_id)) {
2319 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2320 printf("VT82C586 (Apollo VP) ");
2321 if(PCI_REVISION(pcib_class) >= 0x02) {
2322 printf("ATA33 controller\n");
2323 sc->sc_wdcdev.UDMA_cap = 2;
2324 } else {
2325 printf("controller\n");
2326 sc->sc_wdcdev.UDMA_cap = 0;
2327 }
2328 break;
2329 case PCI_PRODUCT_VIATECH_VT82C596A:
2330 printf("VT82C596A (Apollo Pro) ");
2331 if (PCI_REVISION(pcib_class) >= 0x12) {
2332 printf("ATA66 controller\n");
2333 sc->sc_wdcdev.UDMA_cap = 4;
2334 } else {
2335 printf("ATA33 controller\n");
2336 sc->sc_wdcdev.UDMA_cap = 2;
2337 }
2338 break;
2339 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2340 printf("VT82C686A (Apollo KX133) ");
2341 if (PCI_REVISION(pcib_class) >= 0x40) {
2342 printf("ATA100 controller\n");
2343 sc->sc_wdcdev.UDMA_cap = 5;
2344 } else {
2345 printf("ATA66 controller\n");
2346 sc->sc_wdcdev.UDMA_cap = 4;
2347 }
2348 break;
2349 case PCI_PRODUCT_VIATECH_VT8231:
2350 printf("VT8231 ATA100 controller\n");
2351 sc->sc_wdcdev.UDMA_cap = 5;
2352 break;
2353 case PCI_PRODUCT_VIATECH_VT8233:
2354 printf("VT8233 ATA100 controller\n");
2355 sc->sc_wdcdev.UDMA_cap = 5;
2356 break;
2357 case PCI_PRODUCT_VIATECH_VT8233A:
2358 printf("VT8233A ATA133 controller\n");
2359 sc->sc_wdcdev.UDMA_cap = 6;
2360 break;
2361 case PCI_PRODUCT_VIATECH_VT8235:
2362 printf("VT8235 ATA133 controller\n");
2363 sc->sc_wdcdev.UDMA_cap = 6;
2364 break;
2365 default:
2366 printf("unknown ATA controller\n");
2367 sc->sc_wdcdev.UDMA_cap = 0;
2368 }
2369
2370 printf("%s: bus-master DMA support present",
2371 sc->sc_wdcdev.sc_dev.dv_xname);
2372 pciide_mapreg_dma(sc, pa);
2373 printf("\n");
2374 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2375 WDC_CAPABILITY_MODE;
2376 if (sc->sc_dma_ok) {
2377 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2378 sc->sc_wdcdev.irqack = pciide_irqack;
2379 if (sc->sc_wdcdev.UDMA_cap > 0)
2380 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2381 }
2382 sc->sc_wdcdev.PIO_cap = 4;
2383 sc->sc_wdcdev.DMA_cap = 2;
2384 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2385 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2386 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2387
2388 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2389 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2390 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2391 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2392 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2393 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2394 DEBUG_PROBE);
2395
2396 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2397 cp = &sc->pciide_channels[channel];
2398 if (pciide_chansetup(sc, channel, interface) == 0)
2399 continue;
2400
2401 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2402 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2403 printf("%s: %s channel ignored (disabled)\n",
2404 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2405 continue;
2406 }
2407 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2408 pciide_pci_intr);
2409 if (cp->hw_ok == 0)
2410 continue;
2411 if (pciide_chan_candisable(cp)) {
2412 ideconf &= ~APO_IDECONF_EN(channel);
2413 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2414 ideconf);
2415 }
2416 pciide_map_compat_intr(pa, cp, channel, interface);
2417
2418 if (cp->hw_ok == 0)
2419 continue;
2420 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2421 }
2422 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2423 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2424 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2425 }
2426
2427 void
2428 apollo_setup_channel(chp)
2429 struct channel_softc *chp;
2430 {
2431 u_int32_t udmatim_reg, datatim_reg;
2432 u_int8_t idedma_ctl;
2433 int mode, drive;
2434 struct ata_drive_datas *drvp;
2435 struct pciide_channel *cp = (struct pciide_channel*)chp;
2436 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2437
2438 idedma_ctl = 0;
2439 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2440 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2441 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2442 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2443
2444 /* setup DMA if needed */
2445 pciide_channel_dma_setup(cp);
2446
2447 for (drive = 0; drive < 2; drive++) {
2448 drvp = &chp->ch_drive[drive];
2449 /* If no drive, skip */
2450 if ((drvp->drive_flags & DRIVE) == 0)
2451 continue;
2452 /* add timing values, setup DMA if needed */
2453 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2454 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2455 mode = drvp->PIO_mode;
2456 goto pio;
2457 }
2458 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2459 (drvp->drive_flags & DRIVE_UDMA)) {
2460 /* use Ultra/DMA */
2461 drvp->drive_flags &= ~DRIVE_DMA;
2462 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2463 APO_UDMA_EN_MTH(chp->channel, drive);
2464 if (sc->sc_wdcdev.UDMA_cap == 6) {
2465 /* 8233a */
2466 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2467 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2468 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2469 /* 686b */
2470 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2471 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2472 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2473 /* 596b or 686a */
2474 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2475 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2476 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2477 } else {
2478 /* 596a or 586b */
2479 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2480 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2481 }
2482 /* can use PIO timings, MW DMA unused */
2483 mode = drvp->PIO_mode;
2484 } else {
2485 /* use Multiword DMA */
2486 drvp->drive_flags &= ~DRIVE_UDMA;
2487 /* mode = min(pio, dma+2) */
2488 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2489 mode = drvp->PIO_mode;
2490 else
2491 mode = drvp->DMA_mode + 2;
2492 }
2493 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2494
2495 pio: /* setup PIO mode */
2496 if (mode <= 2) {
2497 drvp->DMA_mode = 0;
2498 drvp->PIO_mode = 0;
2499 mode = 0;
2500 } else {
2501 drvp->PIO_mode = mode;
2502 drvp->DMA_mode = mode - 2;
2503 }
2504 datatim_reg |=
2505 APO_DATATIM_PULSE(chp->channel, drive,
2506 apollo_pio_set[mode]) |
2507 APO_DATATIM_RECOV(chp->channel, drive,
2508 apollo_pio_rec[mode]);
2509 }
2510 if (idedma_ctl != 0) {
2511 /* Add software bits in status register */
2512 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2513 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2514 idedma_ctl);
2515 }
2516 pciide_print_modes(cp);
2517 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2518 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2519 }
2520
2521 void
2522 cmd_channel_map(pa, sc, channel)
2523 struct pci_attach_args *pa;
2524 struct pciide_softc *sc;
2525 int channel;
2526 {
2527 struct pciide_channel *cp = &sc->pciide_channels[channel];
2528 bus_size_t cmdsize, ctlsize;
2529 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2530 int interface, one_channel;
2531
2532 /*
2533 * The 0648/0649 can be told to identify as a RAID controller.
2534 * In this case, we have to fake interface
2535 */
2536 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2537 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2538 PCIIDE_INTERFACE_SETTABLE(1);
2539 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2540 CMD_CONF_DSA1)
2541 interface |= PCIIDE_INTERFACE_PCI(0) |
2542 PCIIDE_INTERFACE_PCI(1);
2543 } else {
2544 interface = PCI_INTERFACE(pa->pa_class);
2545 }
2546
2547 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2548 cp->name = PCIIDE_CHANNEL_NAME(channel);
2549 cp->wdc_channel.channel = channel;
2550 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2551
2552 /*
2553 * Older CMD64X doesn't have independant channels
2554 */
2555 switch (sc->sc_pp->ide_product) {
2556 case PCI_PRODUCT_CMDTECH_649:
2557 one_channel = 0;
2558 break;
2559 default:
2560 one_channel = 1;
2561 break;
2562 }
2563
2564 if (channel > 0 && one_channel) {
2565 cp->wdc_channel.ch_queue =
2566 sc->pciide_channels[0].wdc_channel.ch_queue;
2567 } else {
2568 cp->wdc_channel.ch_queue =
2569 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2570 }
2571 if (cp->wdc_channel.ch_queue == NULL) {
2572 printf("%s %s channel: "
2573 "can't allocate memory for command queue",
2574 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2575 return;
2576 }
2577
2578 printf("%s: %s channel %s to %s mode\n",
2579 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2580 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2581 "configured" : "wired",
2582 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2583 "native-PCI" : "compatibility");
2584
2585 /*
2586 * with a CMD PCI64x, if we get here, the first channel is enabled:
2587 * there's no way to disable the first channel without disabling
2588 * the whole device
2589 */
2590 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2591 printf("%s: %s channel ignored (disabled)\n",
2592 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2593 return;
2594 }
2595
2596 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2597 if (cp->hw_ok == 0)
2598 return;
2599 if (channel == 1) {
2600 if (pciide_chan_candisable(cp)) {
2601 ctrl &= ~CMD_CTRL_2PORT;
2602 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2603 CMD_CTRL, ctrl);
2604 }
2605 }
2606 pciide_map_compat_intr(pa, cp, channel, interface);
2607 }
2608
2609 int
2610 cmd_pci_intr(arg)
2611 void *arg;
2612 {
2613 struct pciide_softc *sc = arg;
2614 struct pciide_channel *cp;
2615 struct channel_softc *wdc_cp;
2616 int i, rv, crv;
2617 u_int32_t priirq, secirq;
2618
2619 rv = 0;
2620 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2621 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2622 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2623 cp = &sc->pciide_channels[i];
2624 wdc_cp = &cp->wdc_channel;
2625 /* If a compat channel skip. */
2626 if (cp->compat)
2627 continue;
2628 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2629 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2630 crv = wdcintr(wdc_cp);
2631 if (crv == 0)
2632 printf("%s:%d: bogus intr\n",
2633 sc->sc_wdcdev.sc_dev.dv_xname, i);
2634 else
2635 rv = 1;
2636 }
2637 }
2638 return rv;
2639 }
2640
2641 void
2642 cmd_chip_map(sc, pa)
2643 struct pciide_softc *sc;
2644 struct pci_attach_args *pa;
2645 {
2646 int channel;
2647
2648 /*
2649 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2650 * and base adresses registers can be disabled at
2651 * hardware level. In this case, the device is wired
2652 * in compat mode and its first channel is always enabled,
2653 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2654 * In fact, it seems that the first channel of the CMD PCI0640
2655 * can't be disabled.
2656 */
2657
2658 #ifdef PCIIDE_CMD064x_DISABLE
2659 if (pciide_chipen(sc, pa) == 0)
2660 return;
2661 #endif
2662
2663 printf("%s: hardware does not support DMA\n",
2664 sc->sc_wdcdev.sc_dev.dv_xname);
2665 sc->sc_dma_ok = 0;
2666
2667 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2668 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2669 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2670
2671 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2672 cmd_channel_map(pa, sc, channel);
2673 }
2674 }
2675
2676 void
2677 cmd0643_9_chip_map(sc, pa)
2678 struct pciide_softc *sc;
2679 struct pci_attach_args *pa;
2680 {
2681 struct pciide_channel *cp;
2682 int channel;
2683 pcireg_t rev = PCI_REVISION(pa->pa_class);
2684
2685 /*
2686 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2687 * and base adresses registers can be disabled at
2688 * hardware level. In this case, the device is wired
2689 * in compat mode and its first channel is always enabled,
2690 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2691 * In fact, it seems that the first channel of the CMD PCI0640
2692 * can't be disabled.
2693 */
2694
2695 #ifdef PCIIDE_CMD064x_DISABLE
2696 if (pciide_chipen(sc, pa) == 0)
2697 return;
2698 #endif
2699 printf("%s: bus-master DMA support present",
2700 sc->sc_wdcdev.sc_dev.dv_xname);
2701 pciide_mapreg_dma(sc, pa);
2702 printf("\n");
2703 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2704 WDC_CAPABILITY_MODE;
2705 if (sc->sc_dma_ok) {
2706 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2707 switch (sc->sc_pp->ide_product) {
2708 case PCI_PRODUCT_CMDTECH_649:
2709 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2710 sc->sc_wdcdev.UDMA_cap = 5;
2711 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2712 break;
2713 case PCI_PRODUCT_CMDTECH_648:
2714 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2715 sc->sc_wdcdev.UDMA_cap = 4;
2716 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2717 break;
2718 case PCI_PRODUCT_CMDTECH_646:
2719 if (rev >= CMD0646U2_REV) {
2720 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2721 sc->sc_wdcdev.UDMA_cap = 2;
2722 } else if (rev >= CMD0646U_REV) {
2723 /*
2724 * Linux's driver claims that the 646U is broken
2725 * with UDMA. Only enable it if we know what we're
2726 * doing
2727 */
2728 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2729 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2730 sc->sc_wdcdev.UDMA_cap = 2;
2731 #endif
2732 /* explicitly disable UDMA */
2733 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2734 CMD_UDMATIM(0), 0);
2735 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2736 CMD_UDMATIM(1), 0);
2737 }
2738 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2739 break;
2740 default:
2741 sc->sc_wdcdev.irqack = pciide_irqack;
2742 }
2743 }
2744
2745 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2746 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2747 sc->sc_wdcdev.PIO_cap = 4;
2748 sc->sc_wdcdev.DMA_cap = 2;
2749 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2750
2751 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2752 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2753 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2754 DEBUG_PROBE);
2755
2756 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2757 cp = &sc->pciide_channels[channel];
2758 cmd_channel_map(pa, sc, channel);
2759 if (cp->hw_ok == 0)
2760 continue;
2761 cmd0643_9_setup_channel(&cp->wdc_channel);
2762 }
2763 /*
2764 * note - this also makes sure we clear the irq disable and reset
2765 * bits
2766 */
2767 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2768 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2769 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2770 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2771 DEBUG_PROBE);
2772 }
2773
2774 void
2775 cmd0643_9_setup_channel(chp)
2776 struct channel_softc *chp;
2777 {
2778 struct ata_drive_datas *drvp;
2779 u_int8_t tim;
2780 u_int32_t idedma_ctl, udma_reg;
2781 int drive;
2782 struct pciide_channel *cp = (struct pciide_channel*)chp;
2783 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2784
2785 idedma_ctl = 0;
2786 /* setup DMA if needed */
2787 pciide_channel_dma_setup(cp);
2788
2789 for (drive = 0; drive < 2; drive++) {
2790 drvp = &chp->ch_drive[drive];
2791 /* If no drive, skip */
2792 if ((drvp->drive_flags & DRIVE) == 0)
2793 continue;
2794 /* add timing values, setup DMA if needed */
2795 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2796 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2797 if (drvp->drive_flags & DRIVE_UDMA) {
2798 /* UltraDMA on a 646U2, 0648 or 0649 */
2799 drvp->drive_flags &= ~DRIVE_DMA;
2800 udma_reg = pciide_pci_read(sc->sc_pc,
2801 sc->sc_tag, CMD_UDMATIM(chp->channel));
2802 if (drvp->UDMA_mode > 2 &&
2803 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2804 CMD_BICSR) &
2805 CMD_BICSR_80(chp->channel)) == 0)
2806 drvp->UDMA_mode = 2;
2807 if (drvp->UDMA_mode > 2)
2808 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2809 else if (sc->sc_wdcdev.UDMA_cap > 2)
2810 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2811 udma_reg |= CMD_UDMATIM_UDMA(drive);
2812 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2813 CMD_UDMATIM_TIM_OFF(drive));
2814 udma_reg |=
2815 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2816 CMD_UDMATIM_TIM_OFF(drive));
2817 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2818 CMD_UDMATIM(chp->channel), udma_reg);
2819 } else {
2820 /*
2821 * use Multiword DMA.
2822 * Timings will be used for both PIO and DMA,
2823 * so adjust DMA mode if needed
2824 * if we have a 0646U2/8/9, turn off UDMA
2825 */
2826 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2827 udma_reg = pciide_pci_read(sc->sc_pc,
2828 sc->sc_tag,
2829 CMD_UDMATIM(chp->channel));
2830 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2831 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2832 CMD_UDMATIM(chp->channel),
2833 udma_reg);
2834 }
2835 if (drvp->PIO_mode >= 3 &&
2836 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2837 drvp->DMA_mode = drvp->PIO_mode - 2;
2838 }
2839 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2840 }
2841 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2842 }
2843 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2844 CMD_DATA_TIM(chp->channel, drive), tim);
2845 }
2846 if (idedma_ctl != 0) {
2847 /* Add software bits in status register */
2848 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2849 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2850 idedma_ctl);
2851 }
2852 pciide_print_modes(cp);
2853 }
2854
2855 void
2856 cmd646_9_irqack(chp)
2857 struct channel_softc *chp;
2858 {
2859 u_int32_t priirq, secirq;
2860 struct pciide_channel *cp = (struct pciide_channel*)chp;
2861 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2862
2863 if (chp->channel == 0) {
2864 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2865 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2866 } else {
2867 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2868 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2869 }
2870 pciide_irqack(chp);
2871 }
2872
2873 void
2874 cmd680_chip_map(sc, pa)
2875 struct pciide_softc *sc;
2876 struct pci_attach_args *pa;
2877 {
2878 struct pciide_channel *cp;
2879 int channel;
2880
2881 if (pciide_chipen(sc, pa) == 0)
2882 return;
2883 printf("%s: bus-master DMA support present",
2884 sc->sc_wdcdev.sc_dev.dv_xname);
2885 pciide_mapreg_dma(sc, pa);
2886 printf("\n");
2887 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2888 WDC_CAPABILITY_MODE;
2889 if (sc->sc_dma_ok) {
2890 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2891 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2892 sc->sc_wdcdev.UDMA_cap = 6;
2893 sc->sc_wdcdev.irqack = pciide_irqack;
2894 }
2895
2896 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2897 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2898 sc->sc_wdcdev.PIO_cap = 4;
2899 sc->sc_wdcdev.DMA_cap = 2;
2900 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2901
2902 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2903 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2904 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2905 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2906 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2907 cp = &sc->pciide_channels[channel];
2908 cmd680_channel_map(pa, sc, channel);
2909 if (cp->hw_ok == 0)
2910 continue;
2911 cmd680_setup_channel(&cp->wdc_channel);
2912 }
2913 }
2914
2915 void
2916 cmd680_channel_map(pa, sc, channel)
2917 struct pci_attach_args *pa;
2918 struct pciide_softc *sc;
2919 int channel;
2920 {
2921 struct pciide_channel *cp = &sc->pciide_channels[channel];
2922 bus_size_t cmdsize, ctlsize;
2923 int interface, i, reg;
2924 static const u_int8_t init_val[] =
2925 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2926 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2927
2928 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2929 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2930 PCIIDE_INTERFACE_SETTABLE(1);
2931 interface |= PCIIDE_INTERFACE_PCI(0) |
2932 PCIIDE_INTERFACE_PCI(1);
2933 } else {
2934 interface = PCI_INTERFACE(pa->pa_class);
2935 }
2936
2937 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2938 cp->name = PCIIDE_CHANNEL_NAME(channel);
2939 cp->wdc_channel.channel = channel;
2940 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2941
2942 cp->wdc_channel.ch_queue =
2943 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2944 if (cp->wdc_channel.ch_queue == NULL) {
2945 printf("%s %s channel: "
2946 "can't allocate memory for command queue",
2947 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2948 return;
2949 }
2950
2951 /* XXX */
2952 reg = 0xa2 + channel * 16;
2953 for (i = 0; i < sizeof(init_val); i++)
2954 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2955
2956 printf("%s: %s channel %s to %s mode\n",
2957 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2958 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2959 "configured" : "wired",
2960 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2961 "native-PCI" : "compatibility");
2962
2963 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2964 if (cp->hw_ok == 0)
2965 return;
2966 pciide_map_compat_intr(pa, cp, channel, interface);
2967 }
2968
2969 void
2970 cmd680_setup_channel(chp)
2971 struct channel_softc *chp;
2972 {
2973 struct ata_drive_datas *drvp;
2974 u_int8_t mode, off, scsc;
2975 u_int16_t val;
2976 u_int32_t idedma_ctl;
2977 int drive;
2978 struct pciide_channel *cp = (struct pciide_channel*)chp;
2979 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2980 pci_chipset_tag_t pc = sc->sc_pc;
2981 pcitag_t pa = sc->sc_tag;
2982 static const u_int8_t udma2_tbl[] =
2983 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2984 static const u_int8_t udma_tbl[] =
2985 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2986 static const u_int16_t dma_tbl[] =
2987 { 0x2208, 0x10c2, 0x10c1 };
2988 static const u_int16_t pio_tbl[] =
2989 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2990
2991 idedma_ctl = 0;
2992 pciide_channel_dma_setup(cp);
2993 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2994
2995 for (drive = 0; drive < 2; drive++) {
2996 drvp = &chp->ch_drive[drive];
2997 /* If no drive, skip */
2998 if ((drvp->drive_flags & DRIVE) == 0)
2999 continue;
3000 mode &= ~(0x03 << (drive * 4));
3001 if (drvp->drive_flags & DRIVE_UDMA) {
3002 drvp->drive_flags &= ~DRIVE_DMA;
3003 off = 0xa0 + chp->channel * 16;
3004 if (drvp->UDMA_mode > 2 &&
3005 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
3006 drvp->UDMA_mode = 2;
3007 scsc = pciide_pci_read(pc, pa, 0x8a);
3008 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
3009 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
3010 scsc = pciide_pci_read(pc, pa, 0x8a);
3011 if ((scsc & 0x30) == 0)
3012 drvp->UDMA_mode = 5;
3013 }
3014 mode |= 0x03 << (drive * 4);
3015 off = 0xac + chp->channel * 16 + drive * 2;
3016 val = pciide_pci_read(pc, pa, off) & ~0x3f;
3017 if (scsc & 0x30)
3018 val |= udma2_tbl[drvp->UDMA_mode];
3019 else
3020 val |= udma_tbl[drvp->UDMA_mode];
3021 pciide_pci_write(pc, pa, off, val);
3022 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3023 } else if (drvp->drive_flags & DRIVE_DMA) {
3024 mode |= 0x02 << (drive * 4);
3025 off = 0xa8 + chp->channel * 16 + drive * 2;
3026 val = dma_tbl[drvp->DMA_mode];
3027 pciide_pci_write(pc, pa, off, val & 0xff);
3028 pciide_pci_write(pc, pa, off, val >> 8);
3029 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3030 } else {
3031 mode |= 0x01 << (drive * 4);
3032 off = 0xa4 + chp->channel * 16 + drive * 2;
3033 val = pio_tbl[drvp->PIO_mode];
3034 pciide_pci_write(pc, pa, off, val & 0xff);
3035 pciide_pci_write(pc, pa, off, val >> 8);
3036 }
3037 }
3038
3039 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
3040 if (idedma_ctl != 0) {
3041 /* Add software bits in status register */
3042 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3043 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3044 idedma_ctl);
3045 }
3046 pciide_print_modes(cp);
3047 }
3048
3049 void
3050 cmd3112_chip_map(sc, pa)
3051 struct pciide_softc *sc;
3052 struct pci_attach_args *pa;
3053 {
3054 struct pciide_channel *cp;
3055 bus_size_t cmdsize, ctlsize;
3056 pcireg_t interface;
3057 int channel;
3058
3059 if (pciide_chipen(sc, pa) == 0)
3060 return;
3061
3062 printf("%s: bus-master DMA support present",
3063 sc->sc_wdcdev.sc_dev.dv_xname);
3064 pciide_mapreg_dma(sc, pa);
3065 printf("\n");
3066
3067 /*
3068 * Rev. <= 0x01 of the 3112 have a bug that can cause data
3069 * corruption if DMA transfers cross an 8K boundary. This is
3070 * apparently hard to tickle, but we'll go ahead and play it
3071 * safe.
3072 */
3073 if (PCI_REVISION(pa->pa_class) <= 0x01) {
3074 sc->sc_dma_maxsegsz = 8192;
3075 sc->sc_dma_boundary = 8192;
3076 }
3077
3078 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3079 WDC_CAPABILITY_MODE;
3080 sc->sc_wdcdev.PIO_cap = 4;
3081 if (sc->sc_dma_ok) {
3082 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3083 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3084 sc->sc_wdcdev.irqack = pciide_irqack;
3085 sc->sc_wdcdev.DMA_cap = 2;
3086 sc->sc_wdcdev.UDMA_cap = 6;
3087 }
3088 sc->sc_wdcdev.set_modes = cmd3112_setup_channel;
3089
3090 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3091 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3092
3093 /*
3094 * The 3112 can be told to identify as a RAID controller.
3095 * In this case, we have to fake interface
3096 */
3097 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3098 interface = PCI_INTERFACE(pa->pa_class);
3099 } else {
3100 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3101 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3102 }
3103
3104 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3105 cp = &sc->pciide_channels[channel];
3106 if (pciide_chansetup(sc, channel, interface) == 0)
3107 continue;
3108 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3109 pciide_pci_intr);
3110 if (cp->hw_ok == 0)
3111 continue;
3112 pciide_map_compat_intr(pa, cp, channel, interface);
3113 cmd3112_setup_channel(&cp->wdc_channel);
3114 }
3115 }
3116
3117 void
3118 cmd3112_setup_channel(chp)
3119 struct channel_softc *chp;
3120 {
3121 struct ata_drive_datas *drvp;
3122 int drive;
3123 u_int32_t idedma_ctl, dtm;
3124 struct pciide_channel *cp = (struct pciide_channel*)chp;
3125 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc;
3126
3127 /* setup DMA if needed */
3128 pciide_channel_dma_setup(cp);
3129
3130 idedma_ctl = 0;
3131 dtm = 0;
3132
3133 for (drive = 0; drive < 2; drive++) {
3134 drvp = &chp->ch_drive[drive];
3135 /* If no drive, skip */
3136 if ((drvp->drive_flags & DRIVE) == 0)
3137 continue;
3138 if (drvp->drive_flags & DRIVE_UDMA) {
3139 /* use Ultra/DMA */
3140 drvp->drive_flags &= ~DRIVE_DMA;
3141 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3142 dtm |= DTM_IDEx_DMA;
3143 } else if (drvp->drive_flags & DRIVE_DMA) {
3144 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3145 dtm |= DTM_IDEx_DMA;
3146 } else {
3147 dtm |= DTM_IDEx_PIO;
3148 }
3149 }
3150
3151 /*
3152 * Nothing to do to setup modes; it is meaningless in S-ATA
3153 * (but many S-ATA drives still want to get the SET_FEATURE
3154 * command).
3155 */
3156 if (idedma_ctl != 0) {
3157 /* Add software bits in status register */
3158 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3159 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3160 idedma_ctl);
3161 }
3162 pci_conf_write(sc->sc_pc, sc->sc_tag,
3163 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm);
3164 pciide_print_modes(cp);
3165 }
3166
3167 void
3168 cy693_chip_map(sc, pa)
3169 struct pciide_softc *sc;
3170 struct pci_attach_args *pa;
3171 {
3172 struct pciide_channel *cp;
3173 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3174 bus_size_t cmdsize, ctlsize;
3175
3176 if (pciide_chipen(sc, pa) == 0)
3177 return;
3178 /*
3179 * this chip has 2 PCI IDE functions, one for primary and one for
3180 * secondary. So we need to call pciide_mapregs_compat() with
3181 * the real channel
3182 */
3183 if (pa->pa_function == 1) {
3184 sc->sc_cy_compatchan = 0;
3185 } else if (pa->pa_function == 2) {
3186 sc->sc_cy_compatchan = 1;
3187 } else {
3188 printf("%s: unexpected PCI function %d\n",
3189 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3190 return;
3191 }
3192 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
3193 printf("%s: bus-master DMA support present",
3194 sc->sc_wdcdev.sc_dev.dv_xname);
3195 pciide_mapreg_dma(sc, pa);
3196 } else {
3197 printf("%s: hardware does not support DMA",
3198 sc->sc_wdcdev.sc_dev.dv_xname);
3199 sc->sc_dma_ok = 0;
3200 }
3201 printf("\n");
3202
3203 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3204 if (sc->sc_cy_handle == NULL) {
3205 printf("%s: unable to map hyperCache control registers\n",
3206 sc->sc_wdcdev.sc_dev.dv_xname);
3207 sc->sc_dma_ok = 0;
3208 }
3209
3210 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3211 WDC_CAPABILITY_MODE;
3212 if (sc->sc_dma_ok) {
3213 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3214 sc->sc_wdcdev.irqack = pciide_irqack;
3215 }
3216 sc->sc_wdcdev.PIO_cap = 4;
3217 sc->sc_wdcdev.DMA_cap = 2;
3218 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3219
3220 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3221 sc->sc_wdcdev.nchannels = 1;
3222
3223 /* Only one channel for this chip; if we are here it's enabled */
3224 cp = &sc->pciide_channels[0];
3225 sc->wdc_chanarray[0] = &cp->wdc_channel;
3226 cp->name = PCIIDE_CHANNEL_NAME(0);
3227 cp->wdc_channel.channel = 0;
3228 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3229 cp->wdc_channel.ch_queue =
3230 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3231 if (cp->wdc_channel.ch_queue == NULL) {
3232 printf("%s primary channel: "
3233 "can't allocate memory for command queue",
3234 sc->sc_wdcdev.sc_dev.dv_xname);
3235 return;
3236 }
3237 printf("%s: primary channel %s to ",
3238 sc->sc_wdcdev.sc_dev.dv_xname,
3239 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3240 "configured" : "wired");
3241 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3242 printf("native-PCI");
3243 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3244 pciide_pci_intr);
3245 } else {
3246 printf("compatibility");
3247 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3248 &cmdsize, &ctlsize);
3249 }
3250 printf(" mode\n");
3251 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3252 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3253 wdcattach(&cp->wdc_channel);
3254 if (pciide_chan_candisable(cp)) {
3255 pci_conf_write(sc->sc_pc, sc->sc_tag,
3256 PCI_COMMAND_STATUS_REG, 0);
3257 }
3258 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3259 if (cp->hw_ok == 0)
3260 return;
3261 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3262 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3263 cy693_setup_channel(&cp->wdc_channel);
3264 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3265 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3266 }
3267
3268 void
3269 cy693_setup_channel(chp)
3270 struct channel_softc *chp;
3271 {
3272 struct ata_drive_datas *drvp;
3273 int drive;
3274 u_int32_t cy_cmd_ctrl;
3275 u_int32_t idedma_ctl;
3276 struct pciide_channel *cp = (struct pciide_channel*)chp;
3277 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3278 int dma_mode = -1;
3279
3280 cy_cmd_ctrl = idedma_ctl = 0;
3281
3282 /* setup DMA if needed */
3283 pciide_channel_dma_setup(cp);
3284
3285 for (drive = 0; drive < 2; drive++) {
3286 drvp = &chp->ch_drive[drive];
3287 /* If no drive, skip */
3288 if ((drvp->drive_flags & DRIVE) == 0)
3289 continue;
3290 /* add timing values, setup DMA if needed */
3291 if (drvp->drive_flags & DRIVE_DMA) {
3292 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3293 /* use Multiword DMA */
3294 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3295 dma_mode = drvp->DMA_mode;
3296 }
3297 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3298 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3299 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3300 CY_CMD_CTRL_IOW_REC_OFF(drive));
3301 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3302 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3303 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3304 CY_CMD_CTRL_IOR_REC_OFF(drive));
3305 }
3306 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3307 chp->ch_drive[0].DMA_mode = dma_mode;
3308 chp->ch_drive[1].DMA_mode = dma_mode;
3309
3310 if (dma_mode == -1)
3311 dma_mode = 0;
3312
3313 if (sc->sc_cy_handle != NULL) {
3314 /* Note: `multiple' is implied. */
3315 cy82c693_write(sc->sc_cy_handle,
3316 (sc->sc_cy_compatchan == 0) ?
3317 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3318 }
3319
3320 pciide_print_modes(cp);
3321
3322 if (idedma_ctl != 0) {
3323 /* Add software bits in status register */
3324 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3325 IDEDMA_CTL, idedma_ctl);
3326 }
3327 }
3328
3329 static struct sis_hostbr_type {
3330 u_int16_t id;
3331 u_int8_t rev;
3332 u_int8_t udma_mode;
3333 char *name;
3334 u_int8_t type;
3335 #define SIS_TYPE_NOUDMA 0
3336 #define SIS_TYPE_66 1
3337 #define SIS_TYPE_100OLD 2
3338 #define SIS_TYPE_100NEW 3
3339 #define SIS_TYPE_133OLD 4
3340 #define SIS_TYPE_133NEW 5
3341 #define SIS_TYPE_SOUTH 6
3342 } sis_hostbr_type[] = {
3343 /* Most infos here are from sos (at) freebsd.org */
3344 {PCI_PRODUCT_SIS_530HB, 0x00, 4, "530", SIS_TYPE_66},
3345 #if 0
3346 /*
3347 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3348 * have problems with UDMA (info provided by Christos)
3349 */
3350 {PCI_PRODUCT_SIS_530HB, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA},
3351 #endif
3352 {PCI_PRODUCT_SIS_540HB, 0x00, 4, "540", SIS_TYPE_66},
3353 {PCI_PRODUCT_SIS_550HB, 0x00, 4, "550", SIS_TYPE_66},
3354 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66},
3355 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66},
3356 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW},
3357 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW},
3358 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW},
3359 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH},
3360 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH},
3361 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH},
3362 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH},
3363 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH},
3364 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH},
3365 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH},
3366 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH},
3367 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH},
3368 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD},
3369 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW},
3370 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW},
3371 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH},
3372 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW},
3373 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH},
3374 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH},
3375 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH},
3376 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH},
3377 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH},
3378 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH},
3379 /*
3380 * From sos (at) freebsd.org: the 0x961 ID will never be found in real world
3381 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW},
3382 */
3383 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW},
3384 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW},
3385 };
3386
3387 static struct sis_hostbr_type *sis_hostbr_type_match;
3388
3389 static int
3390 sis_hostbr_match(pa)
3391 struct pci_attach_args *pa;
3392 {
3393 int i;
3394 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS)
3395 return 0;
3396 sis_hostbr_type_match = NULL;
3397 for (i = 0;
3398 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]);
3399 i++) {
3400 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id &&
3401 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev)
3402 sis_hostbr_type_match = &sis_hostbr_type[i];
3403 }
3404 return (sis_hostbr_type_match != NULL);
3405 }
3406
3407 static int sis_south_match(pa)
3408 struct pci_attach_args *pa;
3409 {
3410 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS &&
3411 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 &&
3412 PCI_REVISION(pa->pa_class) >= 0x10);
3413 }
3414
3415 void
3416 sis_chip_map(sc, pa)
3417 struct pciide_softc *sc;
3418 struct pci_attach_args *pa;
3419 {
3420 struct pciide_channel *cp;
3421 int channel;
3422 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3423 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3424 pcireg_t rev = PCI_REVISION(pa->pa_class);
3425 bus_size_t cmdsize, ctlsize;
3426
3427 if (pciide_chipen(sc, pa) == 0)
3428 return;
3429 printf(": Silicon Integrated System ");
3430 pci_find_device(NULL, sis_hostbr_match);
3431 if (sis_hostbr_type_match) {
3432 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) {
3433 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57,
3434 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3435 SIS_REG_57) & 0x7f);
3436 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag,
3437 PCI_ID_REG)) == SIS_PRODUCT_5518) {
3438 printf("96X UDMA%d",
3439 sis_hostbr_type_match->udma_mode);
3440 sc->sis_type = SIS_TYPE_133NEW;
3441 sc->sc_wdcdev.UDMA_cap =
3442 sis_hostbr_type_match->udma_mode;
3443 } else {
3444 if (pci_find_device(NULL, sis_south_match)) {
3445 sc->sis_type = SIS_TYPE_133OLD;
3446 sc->sc_wdcdev.UDMA_cap =
3447 sis_hostbr_type_match->udma_mode;
3448 } else {
3449 sc->sis_type = SIS_TYPE_100NEW;
3450 sc->sc_wdcdev.UDMA_cap =
3451 sis_hostbr_type_match->udma_mode;
3452 }
3453 }
3454 } else {
3455 sc->sis_type = sis_hostbr_type_match->type;
3456 sc->sc_wdcdev.UDMA_cap =
3457 sis_hostbr_type_match->udma_mode;
3458 }
3459 printf(sis_hostbr_type_match->name);
3460 } else {
3461 printf("5597/5598");
3462 if (rev >= 0xd0) {
3463 sc->sc_wdcdev.UDMA_cap = 2;
3464 sc->sis_type = SIS_TYPE_66;
3465 } else {
3466 sc->sc_wdcdev.UDMA_cap = 0;
3467 sc->sis_type = SIS_TYPE_NOUDMA;
3468 }
3469 }
3470 printf(" IDE controller (rev. 0x%02x)\n", PCI_REVISION(pa->pa_class));
3471 printf("%s: bus-master DMA support present",
3472 sc->sc_wdcdev.sc_dev.dv_xname);
3473 pciide_mapreg_dma(sc, pa);
3474 printf("\n");
3475
3476 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3477 WDC_CAPABILITY_MODE;
3478 if (sc->sc_dma_ok) {
3479 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3480 sc->sc_wdcdev.irqack = pciide_irqack;
3481 if (sc->sis_type >= SIS_TYPE_66)
3482 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3483 }
3484
3485 sc->sc_wdcdev.PIO_cap = 4;
3486 sc->sc_wdcdev.DMA_cap = 2;
3487
3488 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3489 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3490 switch(sc->sis_type) {
3491 case SIS_TYPE_NOUDMA:
3492 case SIS_TYPE_66:
3493 case SIS_TYPE_100OLD:
3494 sc->sc_wdcdev.set_modes = sis_setup_channel;
3495 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3496 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3497 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC);
3498 break;
3499 case SIS_TYPE_100NEW:
3500 case SIS_TYPE_133OLD:
3501 sc->sc_wdcdev.set_modes = sis_setup_channel;
3502 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49,
3503 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01);
3504 break;
3505 case SIS_TYPE_133NEW:
3506 sc->sc_wdcdev.set_modes = sis96x_setup_channel;
3507 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50,
3508 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7);
3509 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52,
3510 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7);
3511 break;
3512 }
3513
3514
3515 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3516 cp = &sc->pciide_channels[channel];
3517 if (pciide_chansetup(sc, channel, interface) == 0)
3518 continue;
3519 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3520 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3521 printf("%s: %s channel ignored (disabled)\n",
3522 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3523 continue;
3524 }
3525 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3526 pciide_pci_intr);
3527 if (cp->hw_ok == 0)
3528 continue;
3529 if (pciide_chan_candisable(cp)) {
3530 if (channel == 0)
3531 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3532 else
3533 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3534 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3535 sis_ctr0);
3536 }
3537 pciide_map_compat_intr(pa, cp, channel, interface);
3538 if (cp->hw_ok == 0)
3539 continue;
3540 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3541 }
3542 }
3543
3544 void
3545 sis96x_setup_channel(chp)
3546 struct channel_softc *chp;
3547 {
3548 struct ata_drive_datas *drvp;
3549 int drive;
3550 u_int32_t sis_tim;
3551 u_int32_t idedma_ctl;
3552 int regtim;
3553 struct pciide_channel *cp = (struct pciide_channel*)chp;
3554 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3555
3556 sis_tim = 0;
3557 idedma_ctl = 0;
3558 /* setup DMA if needed */
3559 pciide_channel_dma_setup(cp);
3560
3561 for (drive = 0; drive < 2; drive++) {
3562 regtim = SIS_TIM133(
3563 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
3564 chp->channel, drive);
3565 drvp = &chp->ch_drive[drive];
3566 /* If no drive, skip */
3567 if ((drvp->drive_flags & DRIVE) == 0)
3568 continue;
3569 /* add timing values, setup DMA if needed */
3570 if (drvp->drive_flags & DRIVE_UDMA) {
3571 /* use Ultra/DMA */
3572 drvp->drive_flags &= ~DRIVE_DMA;
3573 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3574 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) {
3575 if (drvp->UDMA_mode > 2)
3576 drvp->UDMA_mode = 2;
3577 }
3578 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
3579 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3580 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3581 } else if (drvp->drive_flags & DRIVE_DMA) {
3582 /*
3583 * use Multiword DMA
3584 * Timings will be used for both PIO and DMA,
3585 * so adjust DMA mode if needed
3586 */
3587 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3588 drvp->PIO_mode = drvp->DMA_mode + 2;
3589 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3590 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3591 drvp->PIO_mode - 2 : 0;
3592 sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
3593 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3594 } else {
3595 sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
3596 }
3597 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
3598 "channel %d drive %d: 0x%x (reg 0x%x)\n",
3599 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE);
3600 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
3601 }
3602 if (idedma_ctl != 0) {
3603 /* Add software bits in status register */
3604 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3605 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3606 idedma_ctl);
3607 }
3608 pciide_print_modes(cp);
3609 }
3610
3611 void
3612 sis_setup_channel(chp)
3613 struct channel_softc *chp;
3614 {
3615 struct ata_drive_datas *drvp;
3616 int drive;
3617 u_int32_t sis_tim;
3618 u_int32_t idedma_ctl;
3619 struct pciide_channel *cp = (struct pciide_channel*)chp;
3620 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3621
3622 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3623 "channel %d 0x%x\n", chp->channel,
3624 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3625 DEBUG_PROBE);
3626 sis_tim = 0;
3627 idedma_ctl = 0;
3628 /* setup DMA if needed */
3629 pciide_channel_dma_setup(cp);
3630
3631 for (drive = 0; drive < 2; drive++) {
3632 drvp = &chp->ch_drive[drive];
3633 /* If no drive, skip */
3634 if ((drvp->drive_flags & DRIVE) == 0)
3635 continue;
3636 /* add timing values, setup DMA if needed */
3637 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3638 (drvp->drive_flags & DRIVE_UDMA) == 0)
3639 goto pio;
3640
3641 if (drvp->drive_flags & DRIVE_UDMA) {
3642 /* use Ultra/DMA */
3643 drvp->drive_flags &= ~DRIVE_DMA;
3644 if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
3645 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) {
3646 if (drvp->UDMA_mode > 2)
3647 drvp->UDMA_mode = 2;
3648 }
3649 switch (sc->sis_type) {
3650 case SIS_TYPE_66:
3651 case SIS_TYPE_100OLD:
3652 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
3653 SIS_TIM66_UDMA_TIME_OFF(drive);
3654 break;
3655 case SIS_TYPE_100NEW:
3656 sis_tim |=
3657 sis_udma100new_tim[drvp->UDMA_mode] <<
3658 SIS_TIM100_UDMA_TIME_OFF(drive);
3659 case SIS_TYPE_133OLD:
3660 sis_tim |=
3661 sis_udma133old_tim[drvp->UDMA_mode] <<
3662 SIS_TIM100_UDMA_TIME_OFF(drive);
3663 break;
3664 default:
3665 printf("unknown SiS IDE type %d\n",
3666 sc->sis_type);
3667 }
3668 } else {
3669 /*
3670 * use Multiword DMA
3671 * Timings will be used for both PIO and DMA,
3672 * so adjust DMA mode if needed
3673 */
3674 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3675 drvp->PIO_mode = drvp->DMA_mode + 2;
3676 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3677 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3678 drvp->PIO_mode - 2 : 0;
3679 if (drvp->DMA_mode == 0)
3680 drvp->PIO_mode = 0;
3681 }
3682 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3683 pio: switch (sc->sis_type) {
3684 case SIS_TYPE_NOUDMA:
3685 case SIS_TYPE_66:
3686 case SIS_TYPE_100OLD:
3687 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3688 SIS_TIM66_ACT_OFF(drive);
3689 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3690 SIS_TIM66_REC_OFF(drive);
3691 break;
3692 case SIS_TYPE_100NEW:
3693 case SIS_TYPE_133OLD:
3694 sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3695 SIS_TIM100_ACT_OFF(drive);
3696 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3697 SIS_TIM100_REC_OFF(drive);
3698 break;
3699 default:
3700 printf("unknown SiS IDE type %d\n",
3701 sc->sis_type);
3702 }
3703 }
3704 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3705 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3706 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3707 if (idedma_ctl != 0) {
3708 /* Add software bits in status register */
3709 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3710 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3711 idedma_ctl);
3712 }
3713 pciide_print_modes(cp);
3714 }
3715
3716 void
3717 acer_chip_map(sc, pa)
3718 struct pciide_softc *sc;
3719 struct pci_attach_args *pa;
3720 {
3721 struct pciide_channel *cp;
3722 int channel;
3723 pcireg_t cr, interface;
3724 bus_size_t cmdsize, ctlsize;
3725 pcireg_t rev = PCI_REVISION(pa->pa_class);
3726
3727 if (pciide_chipen(sc, pa) == 0)
3728 return;
3729 printf("%s: bus-master DMA support present",
3730 sc->sc_wdcdev.sc_dev.dv_xname);
3731 pciide_mapreg_dma(sc, pa);
3732 printf("\n");
3733 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3734 WDC_CAPABILITY_MODE;
3735 if (sc->sc_dma_ok) {
3736 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3737 if (rev >= 0x20) {
3738 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3739 if (rev >= 0xC4)
3740 sc->sc_wdcdev.UDMA_cap = 5;
3741 else if (rev >= 0xC2)
3742 sc->sc_wdcdev.UDMA_cap = 4;
3743 else
3744 sc->sc_wdcdev.UDMA_cap = 2;
3745 }
3746 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3747 sc->sc_wdcdev.irqack = pciide_irqack;
3748 }
3749
3750 sc->sc_wdcdev.PIO_cap = 4;
3751 sc->sc_wdcdev.DMA_cap = 2;
3752 sc->sc_wdcdev.set_modes = acer_setup_channel;
3753 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3754 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3755
3756 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3757 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3758 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3759
3760 /* Enable "microsoft register bits" R/W. */
3761 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3762 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3763 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3764 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3765 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3766 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3767 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3768 ~ACER_CHANSTATUSREGS_RO);
3769 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3770 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3771 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3772 /* Don't use cr, re-read the real register content instead */
3773 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3774 PCI_CLASS_REG));
3775
3776 /* From linux: enable "Cable Detection" */
3777 if (rev >= 0xC2) {
3778 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3779 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3780 | ACER_0x4B_CDETECT);
3781 }
3782
3783 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3784 cp = &sc->pciide_channels[channel];
3785 if (pciide_chansetup(sc, channel, interface) == 0)
3786 continue;
3787 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3788 printf("%s: %s channel ignored (disabled)\n",
3789 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3790 continue;
3791 }
3792 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3793 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3794 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3795 if (cp->hw_ok == 0)
3796 continue;
3797 if (pciide_chan_candisable(cp)) {
3798 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3799 pci_conf_write(sc->sc_pc, sc->sc_tag,
3800 PCI_CLASS_REG, cr);
3801 }
3802 pciide_map_compat_intr(pa, cp, channel, interface);
3803 acer_setup_channel(&cp->wdc_channel);
3804 }
3805 }
3806
3807 void
3808 acer_setup_channel(chp)
3809 struct channel_softc *chp;
3810 {
3811 struct ata_drive_datas *drvp;
3812 int drive;
3813 u_int32_t acer_fifo_udma;
3814 u_int32_t idedma_ctl;
3815 struct pciide_channel *cp = (struct pciide_channel*)chp;
3816 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3817
3818 idedma_ctl = 0;
3819 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3820 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3821 acer_fifo_udma), DEBUG_PROBE);
3822 /* setup DMA if needed */
3823 pciide_channel_dma_setup(cp);
3824
3825 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3826 DRIVE_UDMA) { /* check 80 pins cable */
3827 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3828 ACER_0x4A_80PIN(chp->channel)) {
3829 if (chp->ch_drive[0].UDMA_mode > 2)
3830 chp->ch_drive[0].UDMA_mode = 2;
3831 if (chp->ch_drive[1].UDMA_mode > 2)
3832 chp->ch_drive[1].UDMA_mode = 2;
3833 }
3834 }
3835
3836 for (drive = 0; drive < 2; drive++) {
3837 drvp = &chp->ch_drive[drive];
3838 /* If no drive, skip */
3839 if ((drvp->drive_flags & DRIVE) == 0)
3840 continue;
3841 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3842 "channel %d drive %d 0x%x\n", chp->channel, drive,
3843 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3844 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3845 /* clear FIFO/DMA mode */
3846 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3847 ACER_UDMA_EN(chp->channel, drive) |
3848 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3849
3850 /* add timing values, setup DMA if needed */
3851 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3852 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3853 acer_fifo_udma |=
3854 ACER_FTH_OPL(chp->channel, drive, 0x1);
3855 goto pio;
3856 }
3857
3858 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3859 if (drvp->drive_flags & DRIVE_UDMA) {
3860 /* use Ultra/DMA */
3861 drvp->drive_flags &= ~DRIVE_DMA;
3862 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3863 acer_fifo_udma |=
3864 ACER_UDMA_TIM(chp->channel, drive,
3865 acer_udma[drvp->UDMA_mode]);
3866 /* XXX disable if one drive < UDMA3 ? */
3867 if (drvp->UDMA_mode >= 3) {
3868 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3869 ACER_0x4B,
3870 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3871 ACER_0x4B) | ACER_0x4B_UDMA66);
3872 }
3873 } else {
3874 /*
3875 * use Multiword DMA
3876 * Timings will be used for both PIO and DMA,
3877 * so adjust DMA mode if needed
3878 */
3879 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3880 drvp->PIO_mode = drvp->DMA_mode + 2;
3881 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3882 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3883 drvp->PIO_mode - 2 : 0;
3884 if (drvp->DMA_mode == 0)
3885 drvp->PIO_mode = 0;
3886 }
3887 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3888 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3889 ACER_IDETIM(chp->channel, drive),
3890 acer_pio[drvp->PIO_mode]);
3891 }
3892 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3893 acer_fifo_udma), DEBUG_PROBE);
3894 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3895 if (idedma_ctl != 0) {
3896 /* Add software bits in status register */
3897 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3898 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3899 idedma_ctl);
3900 }
3901 pciide_print_modes(cp);
3902 }
3903
3904 int
3905 acer_pci_intr(arg)
3906 void *arg;
3907 {
3908 struct pciide_softc *sc = arg;
3909 struct pciide_channel *cp;
3910 struct channel_softc *wdc_cp;
3911 int i, rv, crv;
3912 u_int32_t chids;
3913
3914 rv = 0;
3915 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3916 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3917 cp = &sc->pciide_channels[i];
3918 wdc_cp = &cp->wdc_channel;
3919 /* If a compat channel skip. */
3920 if (cp->compat)
3921 continue;
3922 if (chids & ACER_CHIDS_INT(i)) {
3923 crv = wdcintr(wdc_cp);
3924 if (crv == 0)
3925 printf("%s:%d: bogus intr\n",
3926 sc->sc_wdcdev.sc_dev.dv_xname, i);
3927 else
3928 rv = 1;
3929 }
3930 }
3931 return rv;
3932 }
3933
3934 void
3935 hpt_chip_map(sc, pa)
3936 struct pciide_softc *sc;
3937 struct pci_attach_args *pa;
3938 {
3939 struct pciide_channel *cp;
3940 int i, compatchan, revision;
3941 pcireg_t interface;
3942 bus_size_t cmdsize, ctlsize;
3943
3944 if (pciide_chipen(sc, pa) == 0)
3945 return;
3946 revision = PCI_REVISION(pa->pa_class);
3947 printf(": Triones/Highpoint ");
3948 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3949 printf("HPT374 IDE Controller\n");
3950 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3951 printf("HPT372 IDE Controller\n");
3952 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3953 if (revision == HPT372_REV)
3954 printf("HPT372 IDE Controller\n");
3955 else if (revision == HPT370_REV)
3956 printf("HPT370 IDE Controller\n");
3957 else if (revision == HPT370A_REV)
3958 printf("HPT370A IDE Controller\n");
3959 else if (revision == HPT366_REV)
3960 printf("HPT366 IDE Controller\n");
3961 else
3962 printf("unknown HPT IDE controller rev %d\n", revision);
3963 } else
3964 printf("unknown HPT IDE controller 0x%x\n",
3965 sc->sc_pp->ide_product);
3966
3967 /*
3968 * when the chip is in native mode it identifies itself as a
3969 * 'misc mass storage'. Fake interface in this case.
3970 */
3971 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3972 interface = PCI_INTERFACE(pa->pa_class);
3973 } else {
3974 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3975 PCIIDE_INTERFACE_PCI(0);
3976 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3977 (revision == HPT370_REV || revision == HPT370A_REV ||
3978 revision == HPT372_REV)) ||
3979 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3980 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3981 interface |= PCIIDE_INTERFACE_PCI(1);
3982 }
3983
3984 printf("%s: bus-master DMA support present",
3985 sc->sc_wdcdev.sc_dev.dv_xname);
3986 pciide_mapreg_dma(sc, pa);
3987 printf("\n");
3988 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3989 WDC_CAPABILITY_MODE;
3990 if (sc->sc_dma_ok) {
3991 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3992 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3993 sc->sc_wdcdev.irqack = pciide_irqack;
3994 }
3995 sc->sc_wdcdev.PIO_cap = 4;
3996 sc->sc_wdcdev.DMA_cap = 2;
3997
3998 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3999 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4000 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4001 revision == HPT366_REV) {
4002 sc->sc_wdcdev.UDMA_cap = 4;
4003 /*
4004 * The 366 has 2 PCI IDE functions, one for primary and one
4005 * for secondary. So we need to call pciide_mapregs_compat()
4006 * with the real channel
4007 */
4008 if (pa->pa_function == 0) {
4009 compatchan = 0;
4010 } else if (pa->pa_function == 1) {
4011 compatchan = 1;
4012 } else {
4013 printf("%s: unexpected PCI function %d\n",
4014 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
4015 return;
4016 }
4017 sc->sc_wdcdev.nchannels = 1;
4018 } else {
4019 sc->sc_wdcdev.nchannels = 2;
4020 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
4021 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4022 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4023 revision == HPT372_REV))
4024 sc->sc_wdcdev.UDMA_cap = 6;
4025 else
4026 sc->sc_wdcdev.UDMA_cap = 5;
4027 }
4028 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4029 cp = &sc->pciide_channels[i];
4030 if (sc->sc_wdcdev.nchannels > 1) {
4031 compatchan = i;
4032 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
4033 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
4034 printf("%s: %s channel ignored (disabled)\n",
4035 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4036 continue;
4037 }
4038 }
4039 if (pciide_chansetup(sc, i, interface) == 0)
4040 continue;
4041 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4042 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4043 &ctlsize, hpt_pci_intr);
4044 } else {
4045 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
4046 &cmdsize, &ctlsize);
4047 }
4048 if (cp->hw_ok == 0)
4049 return;
4050 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4051 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4052 wdcattach(&cp->wdc_channel);
4053 hpt_setup_channel(&cp->wdc_channel);
4054 }
4055 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4056 (revision == HPT370_REV || revision == HPT370A_REV ||
4057 revision == HPT372_REV)) ||
4058 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4059 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
4060 /*
4061 * HPT370_REV and highter has a bit to disable interrupts,
4062 * make sure to clear it
4063 */
4064 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
4065 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
4066 ~HPT_CSEL_IRQDIS);
4067 }
4068 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
4069 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
4070 revision == HPT372_REV ) ||
4071 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
4072 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
4073 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
4074 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
4075 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
4076 return;
4077 }
4078
4079 void
4080 hpt_setup_channel(chp)
4081 struct channel_softc *chp;
4082 {
4083 struct ata_drive_datas *drvp;
4084 int drive;
4085 int cable;
4086 u_int32_t before, after;
4087 u_int32_t idedma_ctl;
4088 struct pciide_channel *cp = (struct pciide_channel*)chp;
4089 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4090 int revision =
4091 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
4092
4093 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
4094
4095 /* setup DMA if needed */
4096 pciide_channel_dma_setup(cp);
4097
4098 idedma_ctl = 0;
4099
4100 /* Per drive settings */
4101 for (drive = 0; drive < 2; drive++) {
4102 drvp = &chp->ch_drive[drive];
4103 /* If no drive, skip */
4104 if ((drvp->drive_flags & DRIVE) == 0)
4105 continue;
4106 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
4107 HPT_IDETIM(chp->channel, drive));
4108
4109 /* add timing values, setup DMA if needed */
4110 if (drvp->drive_flags & DRIVE_UDMA) {
4111 /* use Ultra/DMA */
4112 drvp->drive_flags &= ~DRIVE_DMA;
4113 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
4114 drvp->UDMA_mode > 2)
4115 drvp->UDMA_mode = 2;
4116 switch (sc->sc_pp->ide_product) {
4117 case PCI_PRODUCT_TRIONES_HPT374:
4118 after = hpt374_udma[drvp->UDMA_mode];
4119 break;
4120 case PCI_PRODUCT_TRIONES_HPT372:
4121 after = hpt372_udma[drvp->UDMA_mode];
4122 break;
4123 case PCI_PRODUCT_TRIONES_HPT366:
4124 default:
4125 switch(revision) {
4126 case HPT372_REV:
4127 after = hpt372_udma[drvp->UDMA_mode];
4128 break;
4129 case HPT370_REV:
4130 case HPT370A_REV:
4131 after = hpt370_udma[drvp->UDMA_mode];
4132 break;
4133 case HPT366_REV:
4134 default:
4135 after = hpt366_udma[drvp->UDMA_mode];
4136 break;
4137 }
4138 }
4139 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4140 } else if (drvp->drive_flags & DRIVE_DMA) {
4141 /*
4142 * use Multiword DMA.
4143 * Timings will be used for both PIO and DMA, so adjust
4144 * DMA mode if needed
4145 */
4146 if (drvp->PIO_mode >= 3 &&
4147 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
4148 drvp->DMA_mode = drvp->PIO_mode - 2;
4149 }
4150 switch (sc->sc_pp->ide_product) {
4151 case PCI_PRODUCT_TRIONES_HPT374:
4152 after = hpt374_dma[drvp->DMA_mode];
4153 break;
4154 case PCI_PRODUCT_TRIONES_HPT372:
4155 after = hpt372_dma[drvp->DMA_mode];
4156 break;
4157 case PCI_PRODUCT_TRIONES_HPT366:
4158 default:
4159 switch(revision) {
4160 case HPT372_REV:
4161 after = hpt372_dma[drvp->DMA_mode];
4162 break;
4163 case HPT370_REV:
4164 case HPT370A_REV:
4165 after = hpt370_dma[drvp->DMA_mode];
4166 break;
4167 case HPT366_REV:
4168 default:
4169 after = hpt366_dma[drvp->DMA_mode];
4170 break;
4171 }
4172 }
4173 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4174 } else {
4175 /* PIO only */
4176 switch (sc->sc_pp->ide_product) {
4177 case PCI_PRODUCT_TRIONES_HPT374:
4178 after = hpt374_pio[drvp->PIO_mode];
4179 break;
4180 case PCI_PRODUCT_TRIONES_HPT372:
4181 after = hpt372_pio[drvp->PIO_mode];
4182 break;
4183 case PCI_PRODUCT_TRIONES_HPT366:
4184 default:
4185 switch(revision) {
4186 case HPT372_REV:
4187 after = hpt372_pio[drvp->PIO_mode];
4188 break;
4189 case HPT370_REV:
4190 case HPT370A_REV:
4191 after = hpt370_pio[drvp->PIO_mode];
4192 break;
4193 case HPT366_REV:
4194 default:
4195 after = hpt366_pio[drvp->PIO_mode];
4196 break;
4197 }
4198 }
4199 }
4200 pci_conf_write(sc->sc_pc, sc->sc_tag,
4201 HPT_IDETIM(chp->channel, drive), after);
4202 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
4203 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
4204 after, before), DEBUG_PROBE);
4205 }
4206 if (idedma_ctl != 0) {
4207 /* Add software bits in status register */
4208 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4209 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4210 idedma_ctl);
4211 }
4212 pciide_print_modes(cp);
4213 }
4214
4215 int
4216 hpt_pci_intr(arg)
4217 void *arg;
4218 {
4219 struct pciide_softc *sc = arg;
4220 struct pciide_channel *cp;
4221 struct channel_softc *wdc_cp;
4222 int rv = 0;
4223 int dmastat, i, crv;
4224
4225 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4226 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4227 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4228 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4229 IDEDMA_CTL_INTR)
4230 continue;
4231 cp = &sc->pciide_channels[i];
4232 wdc_cp = &cp->wdc_channel;
4233 crv = wdcintr(wdc_cp);
4234 if (crv == 0) {
4235 printf("%s:%d: bogus intr\n",
4236 sc->sc_wdcdev.sc_dev.dv_xname, i);
4237 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4238 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4239 } else
4240 rv = 1;
4241 }
4242 return rv;
4243 }
4244
4245
4246 /* Macros to test product */
4247 #define PDC_IS_262(sc) \
4248 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
4249 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4250 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4251 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4252 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4253 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4254 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4255 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4256 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4257 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4258 #define PDC_IS_265(sc) \
4259 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
4260 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
4261 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4262 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4263 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4264 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4265 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4266 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4267 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4268 #define PDC_IS_268(sc) \
4269 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
4270 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
4271 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4272 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4273 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4274 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4275 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4276 #define PDC_IS_276(sc) \
4277 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
4278 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
4279 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
4280 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
4281 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
4282
4283 void
4284 pdc202xx_chip_map(sc, pa)
4285 struct pciide_softc *sc;
4286 struct pci_attach_args *pa;
4287 {
4288 struct pciide_channel *cp;
4289 int channel;
4290 pcireg_t interface, st, mode;
4291 bus_size_t cmdsize, ctlsize;
4292
4293 if (!PDC_IS_268(sc)) {
4294 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4295 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
4296 st), DEBUG_PROBE);
4297 }
4298 if (pciide_chipen(sc, pa) == 0)
4299 return;
4300
4301 /* turn off RAID mode */
4302 if (!PDC_IS_268(sc))
4303 st &= ~PDC2xx_STATE_IDERAID;
4304
4305 /*
4306 * can't rely on the PCI_CLASS_REG content if the chip was in raid
4307 * mode. We have to fake interface
4308 */
4309 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
4310 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
4311 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4312
4313 printf("%s: bus-master DMA support present",
4314 sc->sc_wdcdev.sc_dev.dv_xname);
4315 pciide_mapreg_dma(sc, pa);
4316 printf("\n");
4317 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4318 WDC_CAPABILITY_MODE;
4319 if (sc->sc_dma_ok) {
4320 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4321 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4322 sc->sc_wdcdev.irqack = pciide_irqack;
4323 }
4324 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
4325 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
4326 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
4327 sc->sc_wdcdev.PIO_cap = 4;
4328 sc->sc_wdcdev.DMA_cap = 2;
4329 if (PDC_IS_276(sc))
4330 sc->sc_wdcdev.UDMA_cap = 6;
4331 else if (PDC_IS_265(sc))
4332 sc->sc_wdcdev.UDMA_cap = 5;
4333 else if (PDC_IS_262(sc))
4334 sc->sc_wdcdev.UDMA_cap = 4;
4335 else
4336 sc->sc_wdcdev.UDMA_cap = 2;
4337 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
4338 pdc20268_setup_channel : pdc202xx_setup_channel;
4339 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4340 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4341
4342 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||
4343 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||
4344 sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X) {
4345 sc->sc_wdcdev.dma_start = pdc20262_dma_start;
4346 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish;
4347 }
4348
4349 if (!PDC_IS_268(sc)) {
4350 /* setup failsafe defaults */
4351 mode = 0;
4352 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
4353 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
4354 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
4355 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
4356 for (channel = 0;
4357 channel < sc->sc_wdcdev.nchannels;
4358 channel++) {
4359 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4360 "drive 0 initial timings 0x%x, now 0x%x\n",
4361 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4362 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
4363 DEBUG_PROBE);
4364 pci_conf_write(sc->sc_pc, sc->sc_tag,
4365 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
4366 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
4367 "drive 1 initial timings 0x%x, now 0x%x\n",
4368 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
4369 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
4370 pci_conf_write(sc->sc_pc, sc->sc_tag,
4371 PDC2xx_TIM(channel, 1), mode);
4372 }
4373
4374 mode = PDC2xx_SCR_DMA;
4375 if (PDC_IS_262(sc)) {
4376 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
4377 } else {
4378 /* the BIOS set it up this way */
4379 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
4380 }
4381 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
4382 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
4383 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
4384 "now 0x%x\n",
4385 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4386 PDC2xx_SCR),
4387 mode), DEBUG_PROBE);
4388 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4389 PDC2xx_SCR, mode);
4390
4391 /* controller initial state register is OK even without BIOS */
4392 /* Set DMA mode to IDE DMA compatibility */
4393 mode =
4394 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
4395 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
4396 DEBUG_PROBE);
4397 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
4398 mode | 0x1);
4399 mode =
4400 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
4401 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
4402 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
4403 mode | 0x1);
4404 }
4405
4406 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4407 cp = &sc->pciide_channels[channel];
4408 if (pciide_chansetup(sc, channel, interface) == 0)
4409 continue;
4410 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
4411 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
4412 printf("%s: %s channel ignored (disabled)\n",
4413 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4414 continue;
4415 }
4416 if (PDC_IS_265(sc))
4417 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4418 pdc20265_pci_intr);
4419 else
4420 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4421 pdc202xx_pci_intr);
4422 if (cp->hw_ok == 0)
4423 continue;
4424 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
4425 st &= ~(PDC_IS_262(sc) ?
4426 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
4427 pciide_map_compat_intr(pa, cp, channel, interface);
4428 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
4429 }
4430 if (!PDC_IS_268(sc)) {
4431 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4432 "0x%x\n", st), DEBUG_PROBE);
4433 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4434 }
4435 return;
4436 }
4437
4438 void
4439 pdc202xx_setup_channel(chp)
4440 struct channel_softc *chp;
4441 {
4442 struct ata_drive_datas *drvp;
4443 int drive;
4444 pcireg_t mode, st;
4445 u_int32_t idedma_ctl, scr, atapi;
4446 struct pciide_channel *cp = (struct pciide_channel*)chp;
4447 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4448 int channel = chp->channel;
4449
4450 /* setup DMA if needed */
4451 pciide_channel_dma_setup(cp);
4452
4453 idedma_ctl = 0;
4454 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4455 sc->sc_wdcdev.sc_dev.dv_xname,
4456 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4457 DEBUG_PROBE);
4458
4459 /* Per channel settings */
4460 if (PDC_IS_262(sc)) {
4461 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4462 PDC262_U66);
4463 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4464 /* Trim UDMA mode */
4465 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4466 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4467 chp->ch_drive[0].UDMA_mode <= 2) ||
4468 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4469 chp->ch_drive[1].UDMA_mode <= 2)) {
4470 if (chp->ch_drive[0].UDMA_mode > 2)
4471 chp->ch_drive[0].UDMA_mode = 2;
4472 if (chp->ch_drive[1].UDMA_mode > 2)
4473 chp->ch_drive[1].UDMA_mode = 2;
4474 }
4475 /* Set U66 if needed */
4476 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4477 chp->ch_drive[0].UDMA_mode > 2) ||
4478 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4479 chp->ch_drive[1].UDMA_mode > 2))
4480 scr |= PDC262_U66_EN(channel);
4481 else
4482 scr &= ~PDC262_U66_EN(channel);
4483 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4484 PDC262_U66, scr);
4485 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4486 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4487 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4488 PDC262_ATAPI(channel))), DEBUG_PROBE);
4489 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4490 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4491 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4492 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4493 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4494 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4495 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4496 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4497 atapi = 0;
4498 else
4499 atapi = PDC262_ATAPI_UDMA;
4500 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4501 PDC262_ATAPI(channel), atapi);
4502 }
4503 }
4504 for (drive = 0; drive < 2; drive++) {
4505 drvp = &chp->ch_drive[drive];
4506 /* If no drive, skip */
4507 if ((drvp->drive_flags & DRIVE) == 0)
4508 continue;
4509 mode = 0;
4510 if (drvp->drive_flags & DRIVE_UDMA) {
4511 /* use Ultra/DMA */
4512 drvp->drive_flags &= ~DRIVE_DMA;
4513 mode = PDC2xx_TIM_SET_MB(mode,
4514 pdc2xx_udma_mb[drvp->UDMA_mode]);
4515 mode = PDC2xx_TIM_SET_MC(mode,
4516 pdc2xx_udma_mc[drvp->UDMA_mode]);
4517 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4518 } else if (drvp->drive_flags & DRIVE_DMA) {
4519 mode = PDC2xx_TIM_SET_MB(mode,
4520 pdc2xx_dma_mb[drvp->DMA_mode]);
4521 mode = PDC2xx_TIM_SET_MC(mode,
4522 pdc2xx_dma_mc[drvp->DMA_mode]);
4523 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4524 } else {
4525 mode = PDC2xx_TIM_SET_MB(mode,
4526 pdc2xx_dma_mb[0]);
4527 mode = PDC2xx_TIM_SET_MC(mode,
4528 pdc2xx_dma_mc[0]);
4529 }
4530 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4531 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4532 if (drvp->drive_flags & DRIVE_ATA)
4533 mode |= PDC2xx_TIM_PRE;
4534 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4535 if (drvp->PIO_mode >= 3) {
4536 mode |= PDC2xx_TIM_IORDY;
4537 if (drive == 0)
4538 mode |= PDC2xx_TIM_IORDYp;
4539 }
4540 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4541 "timings 0x%x\n",
4542 sc->sc_wdcdev.sc_dev.dv_xname,
4543 chp->channel, drive, mode), DEBUG_PROBE);
4544 pci_conf_write(sc->sc_pc, sc->sc_tag,
4545 PDC2xx_TIM(chp->channel, drive), mode);
4546 }
4547 if (idedma_ctl != 0) {
4548 /* Add software bits in status register */
4549 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4550 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4551 idedma_ctl);
4552 }
4553 pciide_print_modes(cp);
4554 }
4555
4556 void
4557 pdc20268_setup_channel(chp)
4558 struct channel_softc *chp;
4559 {
4560 struct ata_drive_datas *drvp;
4561 int drive;
4562 u_int32_t idedma_ctl;
4563 struct pciide_channel *cp = (struct pciide_channel*)chp;
4564 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4565 int u100;
4566
4567 /* setup DMA if needed */
4568 pciide_channel_dma_setup(cp);
4569
4570 idedma_ctl = 0;
4571
4572 /* I don't know what this is for, FreeBSD does it ... */
4573 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4574 IDEDMA_CMD + 0x1, 0x0b);
4575
4576 /*
4577 * I don't know what this is for; FreeBSD checks this ... this is not
4578 * cable type detect.
4579 */
4580 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4581 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
4582
4583 for (drive = 0; drive < 2; drive++) {
4584 drvp = &chp->ch_drive[drive];
4585 /* If no drive, skip */
4586 if ((drvp->drive_flags & DRIVE) == 0)
4587 continue;
4588 if (drvp->drive_flags & DRIVE_UDMA) {
4589 /* use Ultra/DMA */
4590 drvp->drive_flags &= ~DRIVE_DMA;
4591 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4592 if (drvp->UDMA_mode > 2 && u100 == 0)
4593 drvp->UDMA_mode = 2;
4594 } else if (drvp->drive_flags & DRIVE_DMA) {
4595 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4596 }
4597 }
4598 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4599 if (idedma_ctl != 0) {
4600 /* Add software bits in status register */
4601 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4602 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4603 idedma_ctl);
4604 }
4605 pciide_print_modes(cp);
4606 }
4607
4608 int
4609 pdc202xx_pci_intr(arg)
4610 void *arg;
4611 {
4612 struct pciide_softc *sc = arg;
4613 struct pciide_channel *cp;
4614 struct channel_softc *wdc_cp;
4615 int i, rv, crv;
4616 u_int32_t scr;
4617
4618 rv = 0;
4619 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4620 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4621 cp = &sc->pciide_channels[i];
4622 wdc_cp = &cp->wdc_channel;
4623 /* If a compat channel skip. */
4624 if (cp->compat)
4625 continue;
4626 if (scr & PDC2xx_SCR_INT(i)) {
4627 crv = wdcintr(wdc_cp);
4628 if (crv == 0)
4629 printf("%s:%d: bogus intr (reg 0x%x)\n",
4630 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4631 else
4632 rv = 1;
4633 }
4634 }
4635 return rv;
4636 }
4637
4638 int
4639 pdc20265_pci_intr(arg)
4640 void *arg;
4641 {
4642 struct pciide_softc *sc = arg;
4643 struct pciide_channel *cp;
4644 struct channel_softc *wdc_cp;
4645 int i, rv, crv;
4646 u_int32_t dmastat;
4647
4648 rv = 0;
4649 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4650 cp = &sc->pciide_channels[i];
4651 wdc_cp = &cp->wdc_channel;
4652 /* If a compat channel skip. */
4653 if (cp->compat)
4654 continue;
4655 /*
4656 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4657 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4658 * So use it instead (requires 2 reg reads instead of 1,
4659 * but we can't do it another way).
4660 */
4661 dmastat = bus_space_read_1(sc->sc_dma_iot,
4662 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4663 if((dmastat & IDEDMA_CTL_INTR) == 0)
4664 continue;
4665 crv = wdcintr(wdc_cp);
4666 if (crv == 0)
4667 printf("%s:%d: bogus intr\n",
4668 sc->sc_wdcdev.sc_dev.dv_xname, i);
4669 else
4670 rv = 1;
4671 }
4672 return rv;
4673 }
4674
4675 static void
4676 pdc20262_dma_start(v, channel, drive)
4677 void *v;
4678 int channel, drive;
4679 {
4680 struct pciide_softc *sc = v;
4681 struct pciide_dma_maps *dma_maps =
4682 &sc->pciide_channels[channel].dma_maps[drive];
4683 int atapi;
4684
4685 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4686 atapi = (dma_maps->dma_flags & WDC_DMA_READ) ?
4687 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE;
4688 atapi |= dma_maps->dmamap_xfer->dm_mapsize >> 1;
4689 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4690 PDC262_ATAPI(channel), atapi);
4691 }
4692
4693 pciide_dma_start(v, channel, drive);
4694 }
4695
4696 int
4697 pdc20262_dma_finish(v, channel, drive, force)
4698 void *v;
4699 int channel, drive;
4700 int force;
4701 {
4702 struct pciide_softc *sc = v;
4703 struct pciide_dma_maps *dma_maps =
4704 &sc->pciide_channels[channel].dma_maps[drive];
4705 struct channel_softc *chp;
4706 int atapi, error;
4707
4708 error = pciide_dma_finish(v, channel, drive, force);
4709
4710 if (dma_maps->dma_flags & WDC_DMA_LBA48) {
4711 chp = sc->wdc_chanarray[channel];
4712 atapi = 0;
4713 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4714 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4715 if ((!(chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4716 (chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4717 !(chp->ch_drive[1].drive_flags & DRIVE_DMA)) &&
4718 (!(chp->ch_drive[1].drive_flags & DRIVE_UDMA) ||
4719 (chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4720 !(chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4721 atapi = PDC262_ATAPI_UDMA;
4722 }
4723 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4724 PDC262_ATAPI(channel), atapi);
4725 }
4726
4727 return error;
4728 }
4729
4730 void
4731 opti_chip_map(sc, pa)
4732 struct pciide_softc *sc;
4733 struct pci_attach_args *pa;
4734 {
4735 struct pciide_channel *cp;
4736 bus_size_t cmdsize, ctlsize;
4737 pcireg_t interface;
4738 u_int8_t init_ctrl;
4739 int channel;
4740
4741 if (pciide_chipen(sc, pa) == 0)
4742 return;
4743 printf("%s: bus-master DMA support present",
4744 sc->sc_wdcdev.sc_dev.dv_xname);
4745
4746 /*
4747 * XXXSCW:
4748 * There seem to be a couple of buggy revisions/implementations
4749 * of the OPTi pciide chipset. This kludge seems to fix one of
4750 * the reported problems (PR/11644) but still fails for the
4751 * other (PR/13151), although the latter may be due to other
4752 * issues too...
4753 */
4754 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4755 printf(" but disabled due to chip rev. <= 0x12");
4756 sc->sc_dma_ok = 0;
4757 } else
4758 pciide_mapreg_dma(sc, pa);
4759
4760 printf("\n");
4761
4762 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4763 WDC_CAPABILITY_MODE;
4764 sc->sc_wdcdev.PIO_cap = 4;
4765 if (sc->sc_dma_ok) {
4766 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4767 sc->sc_wdcdev.irqack = pciide_irqack;
4768 sc->sc_wdcdev.DMA_cap = 2;
4769 }
4770 sc->sc_wdcdev.set_modes = opti_setup_channel;
4771
4772 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4773 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4774
4775 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4776 OPTI_REG_INIT_CONTROL);
4777
4778 interface = PCI_INTERFACE(pa->pa_class);
4779
4780 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4781 cp = &sc->pciide_channels[channel];
4782 if (pciide_chansetup(sc, channel, interface) == 0)
4783 continue;
4784 if (channel == 1 &&
4785 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4786 printf("%s: %s channel ignored (disabled)\n",
4787 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4788 continue;
4789 }
4790 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4791 pciide_pci_intr);
4792 if (cp->hw_ok == 0)
4793 continue;
4794 pciide_map_compat_intr(pa, cp, channel, interface);
4795 if (cp->hw_ok == 0)
4796 continue;
4797 opti_setup_channel(&cp->wdc_channel);
4798 }
4799 }
4800
4801 void
4802 opti_setup_channel(chp)
4803 struct channel_softc *chp;
4804 {
4805 struct ata_drive_datas *drvp;
4806 struct pciide_channel *cp = (struct pciide_channel*)chp;
4807 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4808 int drive, spd;
4809 int mode[2];
4810 u_int8_t rv, mr;
4811
4812 /*
4813 * The `Delay' and `Address Setup Time' fields of the
4814 * Miscellaneous Register are always zero initially.
4815 */
4816 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4817 mr &= ~(OPTI_MISC_DELAY_MASK |
4818 OPTI_MISC_ADDR_SETUP_MASK |
4819 OPTI_MISC_INDEX_MASK);
4820
4821 /* Prime the control register before setting timing values */
4822 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4823
4824 /* Determine the clockrate of the PCIbus the chip is attached to */
4825 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4826 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4827
4828 /* setup DMA if needed */
4829 pciide_channel_dma_setup(cp);
4830
4831 for (drive = 0; drive < 2; drive++) {
4832 drvp = &chp->ch_drive[drive];
4833 /* If no drive, skip */
4834 if ((drvp->drive_flags & DRIVE) == 0) {
4835 mode[drive] = -1;
4836 continue;
4837 }
4838
4839 if ((drvp->drive_flags & DRIVE_DMA)) {
4840 /*
4841 * Timings will be used for both PIO and DMA,
4842 * so adjust DMA mode if needed
4843 */
4844 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4845 drvp->PIO_mode = drvp->DMA_mode + 2;
4846 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4847 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4848 drvp->PIO_mode - 2 : 0;
4849 if (drvp->DMA_mode == 0)
4850 drvp->PIO_mode = 0;
4851
4852 mode[drive] = drvp->DMA_mode + 5;
4853 } else
4854 mode[drive] = drvp->PIO_mode;
4855
4856 if (drive && mode[0] >= 0 &&
4857 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4858 /*
4859 * Can't have two drives using different values
4860 * for `Address Setup Time'.
4861 * Slow down the faster drive to compensate.
4862 */
4863 int d = (opti_tim_as[spd][mode[0]] >
4864 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4865
4866 mode[d] = mode[1-d];
4867 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4868 chp->ch_drive[d].DMA_mode = 0;
4869 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4870 }
4871 }
4872
4873 for (drive = 0; drive < 2; drive++) {
4874 int m;
4875 if ((m = mode[drive]) < 0)
4876 continue;
4877
4878 /* Set the Address Setup Time and select appropriate index */
4879 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4880 rv |= OPTI_MISC_INDEX(drive);
4881 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4882
4883 /* Set the pulse width and recovery timing parameters */
4884 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4885 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4886 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4887 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4888
4889 /* Set the Enhanced Mode register appropriately */
4890 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4891 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4892 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4893 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4894 }
4895
4896 /* Finally, enable the timings */
4897 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4898
4899 pciide_print_modes(cp);
4900 }
4901
4902 #define ACARD_IS_850(sc) \
4903 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4904
4905 void
4906 acard_chip_map(sc, pa)
4907 struct pciide_softc *sc;
4908 struct pci_attach_args *pa;
4909 {
4910 struct pciide_channel *cp;
4911 int i;
4912 pcireg_t interface;
4913 bus_size_t cmdsize, ctlsize;
4914
4915 if (pciide_chipen(sc, pa) == 0)
4916 return;
4917
4918 /*
4919 * when the chip is in native mode it identifies itself as a
4920 * 'misc mass storage'. Fake interface in this case.
4921 */
4922 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4923 interface = PCI_INTERFACE(pa->pa_class);
4924 } else {
4925 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4926 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4927 }
4928
4929 printf("%s: bus-master DMA support present",
4930 sc->sc_wdcdev.sc_dev.dv_xname);
4931 pciide_mapreg_dma(sc, pa);
4932 printf("\n");
4933 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4934 WDC_CAPABILITY_MODE;
4935
4936 if (sc->sc_dma_ok) {
4937 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4938 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4939 sc->sc_wdcdev.irqack = pciide_irqack;
4940 }
4941 sc->sc_wdcdev.PIO_cap = 4;
4942 sc->sc_wdcdev.DMA_cap = 2;
4943 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4944
4945 sc->sc_wdcdev.set_modes = acard_setup_channel;
4946 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4947 sc->sc_wdcdev.nchannels = 2;
4948
4949 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4950 cp = &sc->pciide_channels[i];
4951 if (pciide_chansetup(sc, i, interface) == 0)
4952 continue;
4953 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4954 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4955 &ctlsize, pciide_pci_intr);
4956 } else {
4957 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4958 &cmdsize, &ctlsize);
4959 }
4960 if (cp->hw_ok == 0)
4961 return;
4962 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4963 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4964 wdcattach(&cp->wdc_channel);
4965 acard_setup_channel(&cp->wdc_channel);
4966 }
4967 if (!ACARD_IS_850(sc)) {
4968 u_int32_t reg;
4969 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4970 reg &= ~ATP860_CTRL_INT;
4971 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4972 }
4973 }
4974
4975 void
4976 acard_setup_channel(chp)
4977 struct channel_softc *chp;
4978 {
4979 struct ata_drive_datas *drvp;
4980 struct pciide_channel *cp = (struct pciide_channel*)chp;
4981 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4982 int channel = chp->channel;
4983 int drive;
4984 u_int32_t idetime, udma_mode;
4985 u_int32_t idedma_ctl;
4986
4987 /* setup DMA if needed */
4988 pciide_channel_dma_setup(cp);
4989
4990 if (ACARD_IS_850(sc)) {
4991 idetime = 0;
4992 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4993 udma_mode &= ~ATP850_UDMA_MASK(channel);
4994 } else {
4995 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4996 idetime &= ~ATP860_SETTIME_MASK(channel);
4997 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4998 udma_mode &= ~ATP860_UDMA_MASK(channel);
4999
5000 /* check 80 pins cable */
5001 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
5002 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
5003 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5004 & ATP860_CTRL_80P(chp->channel)) {
5005 if (chp->ch_drive[0].UDMA_mode > 2)
5006 chp->ch_drive[0].UDMA_mode = 2;
5007 if (chp->ch_drive[1].UDMA_mode > 2)
5008 chp->ch_drive[1].UDMA_mode = 2;
5009 }
5010 }
5011 }
5012
5013 idedma_ctl = 0;
5014
5015 /* Per drive settings */
5016 for (drive = 0; drive < 2; drive++) {
5017 drvp = &chp->ch_drive[drive];
5018 /* If no drive, skip */
5019 if ((drvp->drive_flags & DRIVE) == 0)
5020 continue;
5021 /* add timing values, setup DMA if needed */
5022 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5023 (drvp->drive_flags & DRIVE_UDMA)) {
5024 /* use Ultra/DMA */
5025 if (ACARD_IS_850(sc)) {
5026 idetime |= ATP850_SETTIME(drive,
5027 acard_act_udma[drvp->UDMA_mode],
5028 acard_rec_udma[drvp->UDMA_mode]);
5029 udma_mode |= ATP850_UDMA_MODE(channel, drive,
5030 acard_udma_conf[drvp->UDMA_mode]);
5031 } else {
5032 idetime |= ATP860_SETTIME(channel, drive,
5033 acard_act_udma[drvp->UDMA_mode],
5034 acard_rec_udma[drvp->UDMA_mode]);
5035 udma_mode |= ATP860_UDMA_MODE(channel, drive,
5036 acard_udma_conf[drvp->UDMA_mode]);
5037 }
5038 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5039 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5040 (drvp->drive_flags & DRIVE_DMA)) {
5041 /* use Multiword DMA */
5042 drvp->drive_flags &= ~DRIVE_UDMA;
5043 if (ACARD_IS_850(sc)) {
5044 idetime |= ATP850_SETTIME(drive,
5045 acard_act_dma[drvp->DMA_mode],
5046 acard_rec_dma[drvp->DMA_mode]);
5047 } else {
5048 idetime |= ATP860_SETTIME(channel, drive,
5049 acard_act_dma[drvp->DMA_mode],
5050 acard_rec_dma[drvp->DMA_mode]);
5051 }
5052 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5053 } else {
5054 /* PIO only */
5055 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5056 if (ACARD_IS_850(sc)) {
5057 idetime |= ATP850_SETTIME(drive,
5058 acard_act_pio[drvp->PIO_mode],
5059 acard_rec_pio[drvp->PIO_mode]);
5060 } else {
5061 idetime |= ATP860_SETTIME(channel, drive,
5062 acard_act_pio[drvp->PIO_mode],
5063 acard_rec_pio[drvp->PIO_mode]);
5064 }
5065 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
5066 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
5067 | ATP8x0_CTRL_EN(channel));
5068 }
5069 }
5070
5071 if (idedma_ctl != 0) {
5072 /* Add software bits in status register */
5073 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5074 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5075 }
5076 pciide_print_modes(cp);
5077
5078 if (ACARD_IS_850(sc)) {
5079 pci_conf_write(sc->sc_pc, sc->sc_tag,
5080 ATP850_IDETIME(channel), idetime);
5081 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
5082 } else {
5083 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
5084 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
5085 }
5086 }
5087
5088 int
5089 acard_pci_intr(arg)
5090 void *arg;
5091 {
5092 struct pciide_softc *sc = arg;
5093 struct pciide_channel *cp;
5094 struct channel_softc *wdc_cp;
5095 int rv = 0;
5096 int dmastat, i, crv;
5097
5098 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5099 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5100 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5101 if ((dmastat & IDEDMA_CTL_INTR) == 0)
5102 continue;
5103 cp = &sc->pciide_channels[i];
5104 wdc_cp = &cp->wdc_channel;
5105 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
5106 (void)wdcintr(wdc_cp);
5107 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5108 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5109 continue;
5110 }
5111 crv = wdcintr(wdc_cp);
5112 if (crv == 0)
5113 printf("%s:%d: bogus intr\n",
5114 sc->sc_wdcdev.sc_dev.dv_xname, i);
5115 else if (crv == 1)
5116 rv = 1;
5117 else if (rv == 0)
5118 rv = crv;
5119 }
5120 return rv;
5121 }
5122
5123 static int
5124 sl82c105_bugchk(struct pci_attach_args *pa)
5125 {
5126
5127 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
5128 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
5129 return (0);
5130
5131 if (PCI_REVISION(pa->pa_class) <= 0x05)
5132 return (1);
5133
5134 return (0);
5135 }
5136
5137 void
5138 sl82c105_chip_map(sc, pa)
5139 struct pciide_softc *sc;
5140 struct pci_attach_args *pa;
5141 {
5142 struct pciide_channel *cp;
5143 bus_size_t cmdsize, ctlsize;
5144 pcireg_t interface, idecr;
5145 int channel;
5146
5147 if (pciide_chipen(sc, pa) == 0)
5148 return;
5149
5150 printf("%s: bus-master DMA support present",
5151 sc->sc_wdcdev.sc_dev.dv_xname);
5152
5153 /*
5154 * Check to see if we're part of the Winbond 83c553 Southbridge.
5155 * If so, we need to disable DMA on rev. <= 5 of that chip.
5156 */
5157 if (pci_find_device(pa, sl82c105_bugchk)) {
5158 printf(" but disabled due to 83c553 rev. <= 0x05");
5159 sc->sc_dma_ok = 0;
5160 } else
5161 pciide_mapreg_dma(sc, pa);
5162 printf("\n");
5163
5164 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
5165 WDC_CAPABILITY_MODE;
5166 sc->sc_wdcdev.PIO_cap = 4;
5167 if (sc->sc_dma_ok) {
5168 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
5169 sc->sc_wdcdev.irqack = pciide_irqack;
5170 sc->sc_wdcdev.DMA_cap = 2;
5171 }
5172 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
5173
5174 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5175 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5176
5177 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
5178
5179 interface = PCI_INTERFACE(pa->pa_class);
5180
5181 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5182 cp = &sc->pciide_channels[channel];
5183 if (pciide_chansetup(sc, channel, interface) == 0)
5184 continue;
5185 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
5186 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
5187 printf("%s: %s channel ignored (disabled)\n",
5188 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
5189 continue;
5190 }
5191 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5192 pciide_pci_intr);
5193 if (cp->hw_ok == 0)
5194 continue;
5195 pciide_map_compat_intr(pa, cp, channel, interface);
5196 if (cp->hw_ok == 0)
5197 continue;
5198 sl82c105_setup_channel(&cp->wdc_channel);
5199 }
5200 }
5201
5202 void
5203 sl82c105_setup_channel(chp)
5204 struct channel_softc *chp;
5205 {
5206 struct ata_drive_datas *drvp;
5207 struct pciide_channel *cp = (struct pciide_channel*)chp;
5208 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5209 int pxdx_reg, drive;
5210 pcireg_t pxdx;
5211
5212 /* Set up DMA if needed. */
5213 pciide_channel_dma_setup(cp);
5214
5215 for (drive = 0; drive < 2; drive++) {
5216 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
5217 : SYMPH_P1D0CR) + (drive * 4);
5218
5219 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
5220
5221 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
5222 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
5223
5224 drvp = &chp->ch_drive[drive];
5225 /* If no drive, skip. */
5226 if ((drvp->drive_flags & DRIVE) == 0) {
5227 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5228 continue;
5229 }
5230
5231 if (drvp->drive_flags & DRIVE_DMA) {
5232 /*
5233 * Timings will be used for both PIO and DMA,
5234 * so adjust DMA mode if needed.
5235 */
5236 if (drvp->PIO_mode >= 3) {
5237 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
5238 drvp->DMA_mode = drvp->PIO_mode - 2;
5239 if (drvp->DMA_mode < 1) {
5240 /*
5241 * Can't mix both PIO and DMA.
5242 * Disable DMA.
5243 */
5244 drvp->drive_flags &= ~DRIVE_DMA;
5245 }
5246 } else {
5247 /*
5248 * Can't mix both PIO and DMA. Disable
5249 * DMA.
5250 */
5251 drvp->drive_flags &= ~DRIVE_DMA;
5252 }
5253 }
5254
5255 if (drvp->drive_flags & DRIVE_DMA) {
5256 /* Use multi-word DMA. */
5257 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
5258 PxDx_CMD_ON_SHIFT;
5259 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
5260 } else {
5261 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
5262 PxDx_CMD_ON_SHIFT;
5263 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
5264 }
5265
5266 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
5267
5268 /* ...and set the mode for this drive. */
5269 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
5270 }
5271
5272 pciide_print_modes(cp);
5273 }
5274
5275 void
5276 serverworks_chip_map(sc, pa)
5277 struct pciide_softc *sc;
5278 struct pci_attach_args *pa;
5279 {
5280 struct pciide_channel *cp;
5281 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
5282 pcitag_t pcib_tag;
5283 int channel;
5284 bus_size_t cmdsize, ctlsize;
5285
5286 if (pciide_chipen(sc, pa) == 0)
5287 return;
5288
5289 printf("%s: bus-master DMA support present",
5290 sc->sc_wdcdev.sc_dev.dv_xname);
5291 pciide_mapreg_dma(sc, pa);
5292 printf("\n");
5293 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5294 WDC_CAPABILITY_MODE;
5295
5296 if (sc->sc_dma_ok) {
5297 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5298 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5299 sc->sc_wdcdev.irqack = pciide_irqack;
5300 }
5301 sc->sc_wdcdev.PIO_cap = 4;
5302 sc->sc_wdcdev.DMA_cap = 2;
5303 switch (sc->sc_pp->ide_product) {
5304 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
5305 sc->sc_wdcdev.UDMA_cap = 2;
5306 break;
5307 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
5308 if (PCI_REVISION(pa->pa_class) < 0x92)
5309 sc->sc_wdcdev.UDMA_cap = 4;
5310 else
5311 sc->sc_wdcdev.UDMA_cap = 5;
5312 break;
5313 case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
5314 sc->sc_wdcdev.UDMA_cap = 5;
5315 break;
5316 }
5317
5318 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
5319 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5320 sc->sc_wdcdev.nchannels = 2;
5321
5322 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5323 cp = &sc->pciide_channels[channel];
5324 if (pciide_chansetup(sc, channel, interface) == 0)
5325 continue;
5326 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5327 serverworks_pci_intr);
5328 if (cp->hw_ok == 0)
5329 return;
5330 pciide_map_compat_intr(pa, cp, channel, interface);
5331 if (cp->hw_ok == 0)
5332 return;
5333 serverworks_setup_channel(&cp->wdc_channel);
5334 }
5335
5336 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
5337 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
5338 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
5339 }
5340
5341 void
5342 serverworks_setup_channel(chp)
5343 struct channel_softc *chp;
5344 {
5345 struct ata_drive_datas *drvp;
5346 struct pciide_channel *cp = (struct pciide_channel*)chp;
5347 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
5348 int channel = chp->channel;
5349 int drive, unit;
5350 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
5351 u_int32_t idedma_ctl;
5352 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
5353 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
5354
5355 /* setup DMA if needed */
5356 pciide_channel_dma_setup(cp);
5357
5358 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
5359 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
5360 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
5361 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
5362
5363 pio_time &= ~(0xffff << (16 * channel));
5364 dma_time &= ~(0xffff << (16 * channel));
5365 pio_mode &= ~(0xff << (8 * channel + 16));
5366 udma_mode &= ~(0xff << (8 * channel + 16));
5367 udma_mode &= ~(3 << (2 * channel));
5368
5369 idedma_ctl = 0;
5370
5371 /* Per drive settings */
5372 for (drive = 0; drive < 2; drive++) {
5373 drvp = &chp->ch_drive[drive];
5374 /* If no drive, skip */
5375 if ((drvp->drive_flags & DRIVE) == 0)
5376 continue;
5377 unit = drive + 2 * channel;
5378 /* add timing values, setup DMA if needed */
5379 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
5380 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
5381 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
5382 (drvp->drive_flags & DRIVE_UDMA)) {
5383 /* use Ultra/DMA, check for 80-pin cable */
5384 if (drvp->UDMA_mode > 2 &&
5385 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
5386 drvp->UDMA_mode = 2;
5387 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5388 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
5389 udma_mode |= 1 << unit;
5390 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5391 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
5392 (drvp->drive_flags & DRIVE_DMA)) {
5393 /* use Multiword DMA */
5394 drvp->drive_flags &= ~DRIVE_UDMA;
5395 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
5396 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
5397 } else {
5398 /* PIO only */
5399 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
5400 }
5401 }
5402
5403 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
5404 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
5405 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
5406 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
5407 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
5408
5409 if (idedma_ctl != 0) {
5410 /* Add software bits in status register */
5411 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5412 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
5413 }
5414 pciide_print_modes(cp);
5415 }
5416
5417 int
5418 serverworks_pci_intr(arg)
5419 void *arg;
5420 {
5421 struct pciide_softc *sc = arg;
5422 struct pciide_channel *cp;
5423 struct channel_softc *wdc_cp;
5424 int rv = 0;
5425 int dmastat, i, crv;
5426
5427 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
5428 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5429 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
5430 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
5431 IDEDMA_CTL_INTR)
5432 continue;
5433 cp = &sc->pciide_channels[i];
5434 wdc_cp = &cp->wdc_channel;
5435 crv = wdcintr(wdc_cp);
5436 if (crv == 0) {
5437 printf("%s:%d: bogus intr\n",
5438 sc->sc_wdcdev.sc_dev.dv_xname, i);
5439 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
5440 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
5441 } else
5442 rv = 1;
5443 }
5444 return rv;
5445 }
5446
5447 void
5448 artisea_chip_map(sc, pa)
5449 struct pciide_softc *sc;
5450 struct pci_attach_args *pa;
5451 {
5452 struct pciide_channel *cp;
5453 bus_size_t cmdsize, ctlsize;
5454 pcireg_t interface;
5455 int channel;
5456
5457 if (pciide_chipen(sc, pa) == 0)
5458 return;
5459
5460 printf("%s: bus-master DMA support resent",
5461 sc->sc_wdcdev.sc_dev.dv_xname);
5462 #ifndef PCIIDE_I31244_ENABLEDMA
5463 if (PCI_REVISION(pa->pa_class) == 0) {
5464 printf(" but disabled due to rev. 0");
5465 sc->sc_dma_ok = 0;
5466 } else
5467 #endif
5468 pciide_mapreg_dma(sc, pa);
5469 printf("\n");
5470
5471 /*
5472 * XXX Configure LEDs to show activity.
5473 */
5474
5475 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
5476 WDC_CAPABILITY_MODE;
5477 sc->sc_wdcdev.PIO_cap = 4;
5478 if (sc->sc_dma_ok) {
5479 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
5480 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
5481 sc->sc_wdcdev.irqack = pciide_irqack;
5482 sc->sc_wdcdev.DMA_cap = 2;
5483 sc->sc_wdcdev.UDMA_cap = 6;
5484 }
5485 sc->sc_wdcdev.set_modes = sata_setup_channel;
5486
5487 sc->sc_wdcdev.channels = sc->wdc_chanarray;
5488 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
5489
5490 interface = PCI_INTERFACE(pa->pa_class);
5491
5492 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
5493 cp = &sc->pciide_channels[channel];
5494 if (pciide_chansetup(sc, channel, interface) == 0)
5495 continue;
5496 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
5497 pciide_pci_intr);
5498 if (cp->hw_ok == 0)
5499 continue;
5500 pciide_map_compat_intr(pa, cp, channel, interface);
5501 sata_setup_channel(&cp->wdc_channel);
5502 }
5503 }
5504