pciide.c revision 1.180 1 /* $NetBSD: pciide.c,v 1.180 2003/01/27 18:21:23 thorpej Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.180 2003/01/27 18:21:23 thorpej Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd680_setup_channel __P((struct channel_softc*));
182 void cmd680_channel_map __P((struct pci_attach_args *,
183 struct pciide_softc *, int));
184
185 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void cy693_setup_channel __P((struct channel_softc*));
187
188 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void sis_setup_channel __P((struct channel_softc*));
190 static int sis_hostbr_match __P(( struct pci_attach_args *));
191
192 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void acer_setup_channel __P((struct channel_softc*));
194 int acer_pci_intr __P((void *));
195
196 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void pdc202xx_setup_channel __P((struct channel_softc*));
198 void pdc20268_setup_channel __P((struct channel_softc*));
199 int pdc202xx_pci_intr __P((void *));
200 int pdc20265_pci_intr __P((void *));
201
202 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void opti_setup_channel __P((struct channel_softc*));
204
205 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void hpt_setup_channel __P((struct channel_softc*));
207 int hpt_pci_intr __P((void *));
208
209 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void acard_setup_channel __P((struct channel_softc*));
211 int acard_pci_intr __P((void *));
212
213 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void serverworks_setup_channel __P((struct channel_softc*));
215 int serverworks_pci_intr __P((void *));
216
217 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void sl82c105_setup_channel __P((struct channel_softc*));
219
220 void pciide_channel_dma_setup __P((struct pciide_channel *));
221 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
222 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
223 void pciide_dma_start __P((void*, int, int));
224 int pciide_dma_finish __P((void*, int, int, int));
225 void pciide_irqack __P((struct channel_softc *));
226 void pciide_print_modes __P((struct pciide_channel *));
227
228 struct pciide_product_desc {
229 u_int32_t ide_product;
230 int ide_flags;
231 const char *ide_name;
232 /* map and setup chip, probe drives */
233 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
234 };
235
236 /* Flags for ide_flags */
237 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
238 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
239
240 /* Default product description for devices not known from this controller */
241 const struct pciide_product_desc default_product_desc = {
242 0,
243 0,
244 "Generic PCI IDE controller",
245 default_chip_map,
246 };
247
248 const struct pciide_product_desc pciide_intel_products[] = {
249 { PCI_PRODUCT_INTEL_82092AA,
250 0,
251 "Intel 82092AA IDE controller",
252 default_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82371FB_IDE,
255 0,
256 "Intel 82371FB IDE controller (PIIX)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82371SB_IDE,
260 0,
261 "Intel 82371SB IDE Interface (PIIX3)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82371AB_IDE,
265 0,
266 "Intel 82371AB IDE controller (PIIX4)",
267 piix_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82440MX_IDE,
270 0,
271 "Intel 82440MX IDE controller",
272 piix_chip_map
273 },
274 { PCI_PRODUCT_INTEL_82801AA_IDE,
275 0,
276 "Intel 82801AA IDE Controller (ICH)",
277 piix_chip_map,
278 },
279 { PCI_PRODUCT_INTEL_82801AB_IDE,
280 0,
281 "Intel 82801AB IDE Controller (ICH0)",
282 piix_chip_map,
283 },
284 { PCI_PRODUCT_INTEL_82801BA_IDE,
285 0,
286 "Intel 82801BA IDE Controller (ICH2)",
287 piix_chip_map,
288 },
289 { PCI_PRODUCT_INTEL_82801BAM_IDE,
290 0,
291 "Intel 82801BAM IDE Controller (ICH2)",
292 piix_chip_map,
293 },
294 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
295 0,
296 "Intel 82801CA IDE Controller",
297 piix_chip_map,
298 },
299 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
300 0,
301 "Intel 82801CA IDE Controller",
302 piix_chip_map,
303 },
304 { PCI_PRODUCT_INTEL_82801DB_IDE,
305 0,
306 "Intel 82801DB IDE Controller (ICH4)",
307 piix_chip_map,
308 },
309 { 0,
310 0,
311 NULL,
312 NULL
313 }
314 };
315
316 const struct pciide_product_desc pciide_amd_products[] = {
317 { PCI_PRODUCT_AMD_PBC756_IDE,
318 0,
319 "Advanced Micro Devices AMD756 IDE Controller",
320 amd7x6_chip_map
321 },
322 { PCI_PRODUCT_AMD_PBC766_IDE,
323 0,
324 "Advanced Micro Devices AMD766 IDE Controller",
325 amd7x6_chip_map
326 },
327 { PCI_PRODUCT_AMD_PBC768_IDE,
328 0,
329 "Advanced Micro Devices AMD768 IDE Controller",
330 amd7x6_chip_map
331 },
332 { PCI_PRODUCT_AMD_PBC8111_IDE,
333 0,
334 "Advanced Micro Devices AMD8111 IDE Controller",
335 amd7x6_chip_map
336 },
337 { 0,
338 0,
339 NULL,
340 NULL
341 }
342 };
343
344 const struct pciide_product_desc pciide_nvidia_products[] = {
345 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
346 0,
347 "NVIDIA nForce IDE Controller",
348 amd7x6_chip_map
349 },
350 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
351 0,
352 "NVIDIA nForce2 IDE Controller",
353 amd7x6_chip_map
354 },
355 { 0,
356 0,
357 NULL,
358 NULL
359 }
360 };
361
362 const struct pciide_product_desc pciide_cmd_products[] = {
363 { PCI_PRODUCT_CMDTECH_640,
364 0,
365 "CMD Technology PCI0640",
366 cmd_chip_map
367 },
368 { PCI_PRODUCT_CMDTECH_643,
369 0,
370 "CMD Technology PCI0643",
371 cmd0643_9_chip_map,
372 },
373 { PCI_PRODUCT_CMDTECH_646,
374 0,
375 "CMD Technology PCI0646",
376 cmd0643_9_chip_map,
377 },
378 { PCI_PRODUCT_CMDTECH_648,
379 IDE_PCI_CLASS_OVERRIDE,
380 "CMD Technology PCI0648",
381 cmd0643_9_chip_map,
382 },
383 { PCI_PRODUCT_CMDTECH_649,
384 IDE_PCI_CLASS_OVERRIDE,
385 "CMD Technology PCI0649",
386 cmd0643_9_chip_map,
387 },
388 { PCI_PRODUCT_CMDTECH_680,
389 IDE_PCI_CLASS_OVERRIDE,
390 "Silicon Image 0680",
391 cmd680_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 NULL
397 }
398 };
399
400 const struct pciide_product_desc pciide_via_products[] = {
401 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
402 0,
403 NULL,
404 apollo_chip_map,
405 },
406 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
407 0,
408 NULL,
409 apollo_chip_map,
410 },
411 { 0,
412 0,
413 NULL,
414 NULL
415 }
416 };
417
418 const struct pciide_product_desc pciide_cypress_products[] = {
419 { PCI_PRODUCT_CONTAQ_82C693,
420 IDE_16BIT_IOSPACE,
421 "Cypress 82C693 IDE Controller",
422 cy693_chip_map,
423 },
424 { 0,
425 0,
426 NULL,
427 NULL
428 }
429 };
430
431 const struct pciide_product_desc pciide_sis_products[] = {
432 { PCI_PRODUCT_SIS_5597_IDE,
433 0,
434 "Silicon Integrated System 5597/5598 IDE controller",
435 sis_chip_map,
436 },
437 { 0,
438 0,
439 NULL,
440 NULL
441 }
442 };
443
444 const struct pciide_product_desc pciide_acer_products[] = {
445 { PCI_PRODUCT_ALI_M5229,
446 0,
447 "Acer Labs M5229 UDMA IDE Controller",
448 acer_chip_map,
449 },
450 { 0,
451 0,
452 NULL,
453 NULL
454 }
455 };
456
457 const struct pciide_product_desc pciide_promise_products[] = {
458 { PCI_PRODUCT_PROMISE_ULTRA33,
459 IDE_PCI_CLASS_OVERRIDE,
460 "Promise Ultra33/ATA Bus Master IDE Accelerator",
461 pdc202xx_chip_map,
462 },
463 { PCI_PRODUCT_PROMISE_ULTRA66,
464 IDE_PCI_CLASS_OVERRIDE,
465 "Promise Ultra66/ATA Bus Master IDE Accelerator",
466 pdc202xx_chip_map,
467 },
468 { PCI_PRODUCT_PROMISE_ULTRA100,
469 IDE_PCI_CLASS_OVERRIDE,
470 "Promise Ultra100/ATA Bus Master IDE Accelerator",
471 pdc202xx_chip_map,
472 },
473 { PCI_PRODUCT_PROMISE_ULTRA100X,
474 IDE_PCI_CLASS_OVERRIDE,
475 "Promise Ultra100/ATA Bus Master IDE Accelerator",
476 pdc202xx_chip_map,
477 },
478 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
479 IDE_PCI_CLASS_OVERRIDE,
480 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
481 pdc202xx_chip_map,
482 },
483 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
484 IDE_PCI_CLASS_OVERRIDE,
485 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
486 pdc202xx_chip_map,
487 },
488 { PCI_PRODUCT_PROMISE_ULTRA133,
489 IDE_PCI_CLASS_OVERRIDE,
490 "Promise Ultra133/ATA Bus Master IDE Accelerator",
491 pdc202xx_chip_map,
492 },
493 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
494 IDE_PCI_CLASS_OVERRIDE,
495 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
496 pdc202xx_chip_map,
497 },
498 { PCI_PRODUCT_PROMISE_MBULTRA133,
499 IDE_PCI_CLASS_OVERRIDE,
500 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
501 pdc202xx_chip_map,
502 },
503 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
504 IDE_PCI_CLASS_OVERRIDE,
505 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
506 pdc202xx_chip_map,
507 },
508 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
509 IDE_PCI_CLASS_OVERRIDE,
510 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
511 pdc202xx_chip_map,
512 },
513 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
514 IDE_PCI_CLASS_OVERRIDE,
515 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
516 pdc202xx_chip_map,
517 },
518 { 0,
519 0,
520 NULL,
521 NULL
522 }
523 };
524
525 const struct pciide_product_desc pciide_opti_products[] = {
526 { PCI_PRODUCT_OPTI_82C621,
527 0,
528 "OPTi 82c621 PCI IDE controller",
529 opti_chip_map,
530 },
531 { PCI_PRODUCT_OPTI_82C568,
532 0,
533 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
534 opti_chip_map,
535 },
536 { PCI_PRODUCT_OPTI_82D568,
537 0,
538 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
539 opti_chip_map,
540 },
541 { 0,
542 0,
543 NULL,
544 NULL
545 }
546 };
547
548 const struct pciide_product_desc pciide_triones_products[] = {
549 { PCI_PRODUCT_TRIONES_HPT366,
550 IDE_PCI_CLASS_OVERRIDE,
551 NULL,
552 hpt_chip_map,
553 },
554 { PCI_PRODUCT_TRIONES_HPT372,
555 IDE_PCI_CLASS_OVERRIDE,
556 NULL,
557 hpt_chip_map
558 },
559 { PCI_PRODUCT_TRIONES_HPT374,
560 IDE_PCI_CLASS_OVERRIDE,
561 NULL,
562 hpt_chip_map
563 },
564 { 0,
565 0,
566 NULL,
567 NULL
568 }
569 };
570
571 const struct pciide_product_desc pciide_acard_products[] = {
572 { PCI_PRODUCT_ACARD_ATP850U,
573 IDE_PCI_CLASS_OVERRIDE,
574 "Acard ATP850U Ultra33 IDE Controller",
575 acard_chip_map,
576 },
577 { PCI_PRODUCT_ACARD_ATP860,
578 IDE_PCI_CLASS_OVERRIDE,
579 "Acard ATP860 Ultra66 IDE Controller",
580 acard_chip_map,
581 },
582 { PCI_PRODUCT_ACARD_ATP860A,
583 IDE_PCI_CLASS_OVERRIDE,
584 "Acard ATP860-A Ultra66 IDE Controller",
585 acard_chip_map,
586 },
587 { 0,
588 0,
589 NULL,
590 NULL
591 }
592 };
593
594 const struct pciide_product_desc pciide_serverworks_products[] = {
595 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
596 0,
597 "ServerWorks OSB4 IDE Controller",
598 serverworks_chip_map,
599 },
600 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
601 0,
602 "ServerWorks CSB5 IDE Controller",
603 serverworks_chip_map,
604 },
605 { 0,
606 0,
607 NULL,
608 }
609 };
610
611 const struct pciide_product_desc pciide_symphony_products[] = {
612 { PCI_PRODUCT_SYMPHONY_82C105,
613 0,
614 "Symphony Labs 82C105 IDE controller",
615 sl82c105_chip_map,
616 },
617 { 0,
618 0,
619 NULL,
620 }
621 };
622
623 const struct pciide_product_desc pciide_winbond_products[] = {
624 { PCI_PRODUCT_WINBOND_W83C553F_1,
625 0,
626 "Winbond W83C553F IDE controller",
627 sl82c105_chip_map,
628 },
629 { 0,
630 0,
631 NULL,
632 }
633 };
634
635 struct pciide_vendor_desc {
636 u_int32_t ide_vendor;
637 const struct pciide_product_desc *ide_products;
638 };
639
640 const struct pciide_vendor_desc pciide_vendors[] = {
641 { PCI_VENDOR_INTEL, pciide_intel_products },
642 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
643 { PCI_VENDOR_VIATECH, pciide_via_products },
644 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
645 { PCI_VENDOR_SIS, pciide_sis_products },
646 { PCI_VENDOR_ALI, pciide_acer_products },
647 { PCI_VENDOR_PROMISE, pciide_promise_products },
648 { PCI_VENDOR_AMD, pciide_amd_products },
649 { PCI_VENDOR_OPTI, pciide_opti_products },
650 { PCI_VENDOR_TRIONES, pciide_triones_products },
651 { PCI_VENDOR_ACARD, pciide_acard_products },
652 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
653 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
654 { PCI_VENDOR_WINBOND, pciide_winbond_products },
655 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
656 { 0, NULL }
657 };
658
659 /* options passed via the 'flags' config keyword */
660 #define PCIIDE_OPTIONS_DMA 0x01
661 #define PCIIDE_OPTIONS_NODMA 0x02
662
663 int pciide_match __P((struct device *, struct cfdata *, void *));
664 void pciide_attach __P((struct device *, struct device *, void *));
665
666 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
667 pciide_match, pciide_attach, NULL, NULL);
668
669 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
670 int pciide_mapregs_compat __P(( struct pci_attach_args *,
671 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
672 int pciide_mapregs_native __P((struct pci_attach_args *,
673 struct pciide_channel *, bus_size_t *, bus_size_t *,
674 int (*pci_intr) __P((void *))));
675 void pciide_mapreg_dma __P((struct pciide_softc *,
676 struct pci_attach_args *));
677 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
678 void pciide_mapchan __P((struct pci_attach_args *,
679 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
680 int (*pci_intr) __P((void *))));
681 int pciide_chan_candisable __P((struct pciide_channel *));
682 void pciide_map_compat_intr __P(( struct pci_attach_args *,
683 struct pciide_channel *, int, int));
684 int pciide_compat_intr __P((void *));
685 int pciide_pci_intr __P((void *));
686 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
687
688 const struct pciide_product_desc *
689 pciide_lookup_product(id)
690 u_int32_t id;
691 {
692 const struct pciide_product_desc *pp;
693 const struct pciide_vendor_desc *vp;
694
695 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
696 if (PCI_VENDOR(id) == vp->ide_vendor)
697 break;
698
699 if ((pp = vp->ide_products) == NULL)
700 return NULL;
701
702 for (; pp->chip_map != NULL; pp++)
703 if (PCI_PRODUCT(id) == pp->ide_product)
704 break;
705
706 if (pp->chip_map == NULL)
707 return NULL;
708 return pp;
709 }
710
711 int
712 pciide_match(parent, match, aux)
713 struct device *parent;
714 struct cfdata *match;
715 void *aux;
716 {
717 struct pci_attach_args *pa = aux;
718 const struct pciide_product_desc *pp;
719
720 /*
721 * Check the ID register to see that it's a PCI IDE controller.
722 * If it is, we assume that we can deal with it; it _should_
723 * work in a standardized way...
724 */
725 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
726 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
727 return (1);
728 }
729
730 /*
731 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
732 * controllers. Let see if we can deal with it anyway.
733 */
734 pp = pciide_lookup_product(pa->pa_id);
735 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
736 return (1);
737 }
738
739 return (0);
740 }
741
742 void
743 pciide_attach(parent, self, aux)
744 struct device *parent, *self;
745 void *aux;
746 {
747 struct pci_attach_args *pa = aux;
748 pci_chipset_tag_t pc = pa->pa_pc;
749 pcitag_t tag = pa->pa_tag;
750 struct pciide_softc *sc = (struct pciide_softc *)self;
751 pcireg_t csr;
752 char devinfo[256];
753 const char *displaydev;
754
755 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
756 sc->sc_pp = pciide_lookup_product(pa->pa_id);
757 if (sc->sc_pp == NULL) {
758 sc->sc_pp = &default_product_desc;
759 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
760 displaydev = devinfo;
761 } else
762 displaydev = sc->sc_pp->ide_name;
763
764 /* if displaydev == NULL, printf is done in chip-specific map */
765 if (displaydev)
766 printf(": %s (rev. 0x%02x)\n", displaydev,
767 PCI_REVISION(pa->pa_class));
768
769 sc->sc_pc = pa->pa_pc;
770 sc->sc_tag = pa->pa_tag;
771 #ifdef WDCDEBUG
772 if (wdcdebug_pciide_mask & DEBUG_PROBE)
773 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
774 #endif
775 sc->sc_pp->chip_map(sc, pa);
776
777 if (sc->sc_dma_ok) {
778 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
779 csr |= PCI_COMMAND_MASTER_ENABLE;
780 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
781 }
782 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
783 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
784 }
785
786 /* tell whether the chip is enabled or not */
787 int
788 pciide_chipen(sc, pa)
789 struct pciide_softc *sc;
790 struct pci_attach_args *pa;
791 {
792 pcireg_t csr;
793 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
794 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
795 PCI_COMMAND_STATUS_REG);
796 printf("%s: device disabled (at %s)\n",
797 sc->sc_wdcdev.sc_dev.dv_xname,
798 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
799 "device" : "bridge");
800 return 0;
801 }
802 return 1;
803 }
804
805 int
806 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
807 struct pci_attach_args *pa;
808 struct pciide_channel *cp;
809 int compatchan;
810 bus_size_t *cmdsizep, *ctlsizep;
811 {
812 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
813 struct channel_softc *wdc_cp = &cp->wdc_channel;
814
815 cp->compat = 1;
816 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
817 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
818
819 wdc_cp->cmd_iot = pa->pa_iot;
820 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
821 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
822 printf("%s: couldn't map %s channel cmd regs\n",
823 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
824 return (0);
825 }
826
827 wdc_cp->ctl_iot = pa->pa_iot;
828 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
829 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
830 printf("%s: couldn't map %s channel ctl regs\n",
831 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
832 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
833 PCIIDE_COMPAT_CMD_SIZE);
834 return (0);
835 }
836
837 return (1);
838 }
839
840 int
841 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
842 struct pci_attach_args * pa;
843 struct pciide_channel *cp;
844 bus_size_t *cmdsizep, *ctlsizep;
845 int (*pci_intr) __P((void *));
846 {
847 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
848 struct channel_softc *wdc_cp = &cp->wdc_channel;
849 const char *intrstr;
850 pci_intr_handle_t intrhandle;
851
852 cp->compat = 0;
853
854 if (sc->sc_pci_ih == NULL) {
855 if (pci_intr_map(pa, &intrhandle) != 0) {
856 printf("%s: couldn't map native-PCI interrupt\n",
857 sc->sc_wdcdev.sc_dev.dv_xname);
858 return 0;
859 }
860 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
861 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
862 intrhandle, IPL_BIO, pci_intr, sc);
863 if (sc->sc_pci_ih != NULL) {
864 printf("%s: using %s for native-PCI interrupt\n",
865 sc->sc_wdcdev.sc_dev.dv_xname,
866 intrstr ? intrstr : "unknown interrupt");
867 } else {
868 printf("%s: couldn't establish native-PCI interrupt",
869 sc->sc_wdcdev.sc_dev.dv_xname);
870 if (intrstr != NULL)
871 printf(" at %s", intrstr);
872 printf("\n");
873 return 0;
874 }
875 }
876 cp->ih = sc->sc_pci_ih;
877 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
878 PCI_MAPREG_TYPE_IO, 0,
879 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
880 printf("%s: couldn't map %s channel cmd regs\n",
881 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
882 return 0;
883 }
884
885 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
886 PCI_MAPREG_TYPE_IO, 0,
887 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
888 printf("%s: couldn't map %s channel ctl regs\n",
889 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
890 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
891 return 0;
892 }
893 /*
894 * In native mode, 4 bytes of I/O space are mapped for the control
895 * register, the control register is at offset 2. Pass the generic
896 * code a handle for only one byte at the right offset.
897 */
898 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
899 &wdc_cp->ctl_ioh) != 0) {
900 printf("%s: unable to subregion %s channel ctl regs\n",
901 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
902 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
903 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
904 return 0;
905 }
906 return (1);
907 }
908
909 void
910 pciide_mapreg_dma(sc, pa)
911 struct pciide_softc *sc;
912 struct pci_attach_args *pa;
913 {
914 pcireg_t maptype;
915 bus_addr_t addr;
916
917 /*
918 * Map DMA registers
919 *
920 * Note that sc_dma_ok is the right variable to test to see if
921 * DMA can be done. If the interface doesn't support DMA,
922 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
923 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
924 * non-zero if the interface supports DMA and the registers
925 * could be mapped.
926 *
927 * XXX Note that despite the fact that the Bus Master IDE specs
928 * XXX say that "The bus master IDE function uses 16 bytes of IO
929 * XXX space," some controllers (at least the United
930 * XXX Microelectronics UM8886BF) place it in memory space.
931 */
932 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
933 PCIIDE_REG_BUS_MASTER_DMA);
934
935 switch (maptype) {
936 case PCI_MAPREG_TYPE_IO:
937 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
938 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
939 &addr, NULL, NULL) == 0);
940 if (sc->sc_dma_ok == 0) {
941 printf(", but unused (couldn't query registers)");
942 break;
943 }
944 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
945 && addr >= 0x10000) {
946 sc->sc_dma_ok = 0;
947 printf(", but unused (registers at unsafe address "
948 "%#lx)", (unsigned long)addr);
949 break;
950 }
951 /* FALLTHROUGH */
952
953 case PCI_MAPREG_MEM_TYPE_32BIT:
954 sc->sc_dma_ok = (pci_mapreg_map(pa,
955 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
956 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
957 sc->sc_dmat = pa->pa_dmat;
958 if (sc->sc_dma_ok == 0) {
959 printf(", but unused (couldn't map registers)");
960 } else {
961 sc->sc_wdcdev.dma_arg = sc;
962 sc->sc_wdcdev.dma_init = pciide_dma_init;
963 sc->sc_wdcdev.dma_start = pciide_dma_start;
964 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
965 }
966
967 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
968 PCIIDE_OPTIONS_NODMA) {
969 printf(", but unused (forced off by config file)");
970 sc->sc_dma_ok = 0;
971 }
972 break;
973
974 default:
975 sc->sc_dma_ok = 0;
976 printf(", but unsupported register maptype (0x%x)", maptype);
977 }
978 }
979
980 int
981 pciide_compat_intr(arg)
982 void *arg;
983 {
984 struct pciide_channel *cp = arg;
985
986 #ifdef DIAGNOSTIC
987 /* should only be called for a compat channel */
988 if (cp->compat == 0)
989 panic("pciide compat intr called for non-compat chan %p", cp);
990 #endif
991 return (wdcintr(&cp->wdc_channel));
992 }
993
994 int
995 pciide_pci_intr(arg)
996 void *arg;
997 {
998 struct pciide_softc *sc = arg;
999 struct pciide_channel *cp;
1000 struct channel_softc *wdc_cp;
1001 int i, rv, crv;
1002
1003 rv = 0;
1004 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1005 cp = &sc->pciide_channels[i];
1006 wdc_cp = &cp->wdc_channel;
1007
1008 /* If a compat channel skip. */
1009 if (cp->compat)
1010 continue;
1011 /* if this channel not waiting for intr, skip */
1012 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1013 continue;
1014
1015 crv = wdcintr(wdc_cp);
1016 if (crv == 0)
1017 ; /* leave rv alone */
1018 else if (crv == 1)
1019 rv = 1; /* claim the intr */
1020 else if (rv == 0) /* crv should be -1 in this case */
1021 rv = crv; /* if we've done no better, take it */
1022 }
1023 return (rv);
1024 }
1025
1026 void
1027 pciide_channel_dma_setup(cp)
1028 struct pciide_channel *cp;
1029 {
1030 int drive;
1031 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1032 struct ata_drive_datas *drvp;
1033
1034 for (drive = 0; drive < 2; drive++) {
1035 drvp = &cp->wdc_channel.ch_drive[drive];
1036 /* If no drive, skip */
1037 if ((drvp->drive_flags & DRIVE) == 0)
1038 continue;
1039 /* setup DMA if needed */
1040 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1041 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1042 sc->sc_dma_ok == 0) {
1043 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1044 continue;
1045 }
1046 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1047 != 0) {
1048 /* Abort DMA setup */
1049 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1050 continue;
1051 }
1052 }
1053 }
1054
1055 int
1056 pciide_dma_table_setup(sc, channel, drive)
1057 struct pciide_softc *sc;
1058 int channel, drive;
1059 {
1060 bus_dma_segment_t seg;
1061 int error, rseg;
1062 const bus_size_t dma_table_size =
1063 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1064 struct pciide_dma_maps *dma_maps =
1065 &sc->pciide_channels[channel].dma_maps[drive];
1066
1067 /* If table was already allocated, just return */
1068 if (dma_maps->dma_table)
1069 return 0;
1070
1071 /* Allocate memory for the DMA tables and map it */
1072 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1073 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1074 BUS_DMA_NOWAIT)) != 0) {
1075 printf("%s:%d: unable to allocate table DMA for "
1076 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1077 channel, drive, error);
1078 return error;
1079 }
1080 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1081 dma_table_size,
1082 (caddr_t *)&dma_maps->dma_table,
1083 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1084 printf("%s:%d: unable to map table DMA for"
1085 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1086 channel, drive, error);
1087 return error;
1088 }
1089 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1090 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1091 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1092
1093 /* Create and load table DMA map for this disk */
1094 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1095 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1096 &dma_maps->dmamap_table)) != 0) {
1097 printf("%s:%d: unable to create table DMA map for "
1098 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1099 channel, drive, error);
1100 return error;
1101 }
1102 if ((error = bus_dmamap_load(sc->sc_dmat,
1103 dma_maps->dmamap_table,
1104 dma_maps->dma_table,
1105 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1106 printf("%s:%d: unable to load table DMA map for "
1107 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1108 channel, drive, error);
1109 return error;
1110 }
1111 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1112 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1113 DEBUG_PROBE);
1114 /* Create a xfer DMA map for this drive */
1115 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1116 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1117 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1118 &dma_maps->dmamap_xfer)) != 0) {
1119 printf("%s:%d: unable to create xfer DMA map for "
1120 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1121 channel, drive, error);
1122 return error;
1123 }
1124 return 0;
1125 }
1126
1127 int
1128 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1129 void *v;
1130 int channel, drive;
1131 void *databuf;
1132 size_t datalen;
1133 int flags;
1134 {
1135 struct pciide_softc *sc = v;
1136 int error, seg;
1137 struct pciide_dma_maps *dma_maps =
1138 &sc->pciide_channels[channel].dma_maps[drive];
1139
1140 error = bus_dmamap_load(sc->sc_dmat,
1141 dma_maps->dmamap_xfer,
1142 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1143 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1144 if (error) {
1145 printf("%s:%d: unable to load xfer DMA map for"
1146 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1147 channel, drive, error);
1148 return error;
1149 }
1150
1151 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1152 dma_maps->dmamap_xfer->dm_mapsize,
1153 (flags & WDC_DMA_READ) ?
1154 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1155
1156 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1157 #ifdef DIAGNOSTIC
1158 /* A segment must not cross a 64k boundary */
1159 {
1160 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1161 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1162 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1163 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1164 printf("pciide_dma: segment %d physical addr 0x%lx"
1165 " len 0x%lx not properly aligned\n",
1166 seg, phys, len);
1167 panic("pciide_dma: buf align");
1168 }
1169 }
1170 #endif
1171 dma_maps->dma_table[seg].base_addr =
1172 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1173 dma_maps->dma_table[seg].byte_count =
1174 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1175 IDEDMA_BYTE_COUNT_MASK);
1176 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1177 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1178 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1179
1180 }
1181 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1182 htole32(IDEDMA_BYTE_COUNT_EOT);
1183
1184 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1185 dma_maps->dmamap_table->dm_mapsize,
1186 BUS_DMASYNC_PREWRITE);
1187
1188 /* Maps are ready. Start DMA function */
1189 #ifdef DIAGNOSTIC
1190 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1191 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1192 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1193 panic("pciide_dma_init: table align");
1194 }
1195 #endif
1196
1197 /* Clear status bits */
1198 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1199 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1200 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1201 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1202 /* Write table addr */
1203 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1204 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1205 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1206 /* set read/write */
1207 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1208 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1209 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1210 /* remember flags */
1211 dma_maps->dma_flags = flags;
1212 return 0;
1213 }
1214
1215 void
1216 pciide_dma_start(v, channel, drive)
1217 void *v;
1218 int channel, drive;
1219 {
1220 struct pciide_softc *sc = v;
1221
1222 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1223 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1224 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1225 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1226 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1227 }
1228
1229 int
1230 pciide_dma_finish(v, channel, drive, force)
1231 void *v;
1232 int channel, drive;
1233 int force;
1234 {
1235 struct pciide_softc *sc = v;
1236 u_int8_t status;
1237 int error = 0;
1238 struct pciide_dma_maps *dma_maps =
1239 &sc->pciide_channels[channel].dma_maps[drive];
1240
1241 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1242 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1243 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1244 DEBUG_XFERS);
1245
1246 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1247 return WDC_DMAST_NOIRQ;
1248
1249 /* stop DMA channel */
1250 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1251 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1252 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1253 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1254
1255 /* Unload the map of the data buffer */
1256 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1257 dma_maps->dmamap_xfer->dm_mapsize,
1258 (dma_maps->dma_flags & WDC_DMA_READ) ?
1259 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1260 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1261
1262 if ((status & IDEDMA_CTL_ERR) != 0) {
1263 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1264 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1265 error |= WDC_DMAST_ERR;
1266 }
1267
1268 if ((status & IDEDMA_CTL_INTR) == 0) {
1269 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1270 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1271 drive, status);
1272 error |= WDC_DMAST_NOIRQ;
1273 }
1274
1275 if ((status & IDEDMA_CTL_ACT) != 0) {
1276 /* data underrun, may be a valid condition for ATAPI */
1277 error |= WDC_DMAST_UNDER;
1278 }
1279 return error;
1280 }
1281
1282 void
1283 pciide_irqack(chp)
1284 struct channel_softc *chp;
1285 {
1286 struct pciide_channel *cp = (struct pciide_channel*)chp;
1287 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1288
1289 /* clear status bits in IDE DMA registers */
1290 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1291 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1292 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1293 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1294 }
1295
1296 /* some common code used by several chip_map */
1297 int
1298 pciide_chansetup(sc, channel, interface)
1299 struct pciide_softc *sc;
1300 int channel;
1301 pcireg_t interface;
1302 {
1303 struct pciide_channel *cp = &sc->pciide_channels[channel];
1304 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1305 cp->name = PCIIDE_CHANNEL_NAME(channel);
1306 cp->wdc_channel.channel = channel;
1307 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1308 cp->wdc_channel.ch_queue =
1309 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1310 if (cp->wdc_channel.ch_queue == NULL) {
1311 printf("%s %s channel: "
1312 "can't allocate memory for command queue",
1313 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1314 return 0;
1315 }
1316 printf("%s: %s channel %s to %s mode\n",
1317 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1318 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1319 "configured" : "wired",
1320 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1321 "native-PCI" : "compatibility");
1322 return 1;
1323 }
1324
1325 /* some common code used by several chip channel_map */
1326 void
1327 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1328 struct pci_attach_args *pa;
1329 struct pciide_channel *cp;
1330 pcireg_t interface;
1331 bus_size_t *cmdsizep, *ctlsizep;
1332 int (*pci_intr) __P((void *));
1333 {
1334 struct channel_softc *wdc_cp = &cp->wdc_channel;
1335
1336 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1337 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1338 pci_intr);
1339 else
1340 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1341 wdc_cp->channel, cmdsizep, ctlsizep);
1342
1343 if (cp->hw_ok == 0)
1344 return;
1345 wdc_cp->data32iot = wdc_cp->cmd_iot;
1346 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1347 wdcattach(wdc_cp);
1348 }
1349
1350 /*
1351 * Generic code to call to know if a channel can be disabled. Return 1
1352 * if channel can be disabled, 0 if not
1353 */
1354 int
1355 pciide_chan_candisable(cp)
1356 struct pciide_channel *cp;
1357 {
1358 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1359 struct channel_softc *wdc_cp = &cp->wdc_channel;
1360
1361 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1362 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1363 printf("%s: disabling %s channel (no drives)\n",
1364 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1365 cp->hw_ok = 0;
1366 return 1;
1367 }
1368 return 0;
1369 }
1370
1371 /*
1372 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1373 * Set hw_ok=0 on failure
1374 */
1375 void
1376 pciide_map_compat_intr(pa, cp, compatchan, interface)
1377 struct pci_attach_args *pa;
1378 struct pciide_channel *cp;
1379 int compatchan, interface;
1380 {
1381 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1382 struct channel_softc *wdc_cp = &cp->wdc_channel;
1383
1384 if (cp->hw_ok == 0)
1385 return;
1386 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1387 return;
1388
1389 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1390 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1391 pa, compatchan, pciide_compat_intr, cp);
1392 if (cp->ih == NULL) {
1393 #endif
1394 printf("%s: no compatibility interrupt for use by %s "
1395 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1396 cp->hw_ok = 0;
1397 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1398 }
1399 #endif
1400 }
1401
1402 void
1403 pciide_print_modes(cp)
1404 struct pciide_channel *cp;
1405 {
1406 wdc_print_modes(&cp->wdc_channel);
1407 }
1408
1409 void
1410 default_chip_map(sc, pa)
1411 struct pciide_softc *sc;
1412 struct pci_attach_args *pa;
1413 {
1414 struct pciide_channel *cp;
1415 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1416 pcireg_t csr;
1417 int channel, drive;
1418 struct ata_drive_datas *drvp;
1419 u_int8_t idedma_ctl;
1420 bus_size_t cmdsize, ctlsize;
1421 char *failreason;
1422
1423 if (pciide_chipen(sc, pa) == 0)
1424 return;
1425
1426 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1427 printf("%s: bus-master DMA support present",
1428 sc->sc_wdcdev.sc_dev.dv_xname);
1429 if (sc->sc_pp == &default_product_desc &&
1430 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1431 PCIIDE_OPTIONS_DMA) == 0) {
1432 printf(", but unused (no driver support)");
1433 sc->sc_dma_ok = 0;
1434 } else {
1435 pciide_mapreg_dma(sc, pa);
1436 if (sc->sc_dma_ok != 0)
1437 printf(", used without full driver "
1438 "support");
1439 }
1440 } else {
1441 printf("%s: hardware does not support DMA",
1442 sc->sc_wdcdev.sc_dev.dv_xname);
1443 sc->sc_dma_ok = 0;
1444 }
1445 printf("\n");
1446 if (sc->sc_dma_ok) {
1447 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1448 sc->sc_wdcdev.irqack = pciide_irqack;
1449 }
1450 sc->sc_wdcdev.PIO_cap = 0;
1451 sc->sc_wdcdev.DMA_cap = 0;
1452
1453 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1454 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1455 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1456
1457 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1458 cp = &sc->pciide_channels[channel];
1459 if (pciide_chansetup(sc, channel, interface) == 0)
1460 continue;
1461 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1462 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1463 &ctlsize, pciide_pci_intr);
1464 } else {
1465 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1466 channel, &cmdsize, &ctlsize);
1467 }
1468 if (cp->hw_ok == 0)
1469 continue;
1470 /*
1471 * Check to see if something appears to be there.
1472 */
1473 failreason = NULL;
1474 if (!wdcprobe(&cp->wdc_channel)) {
1475 failreason = "not responding; disabled or no drives?";
1476 goto next;
1477 }
1478 /*
1479 * Now, make sure it's actually attributable to this PCI IDE
1480 * channel by trying to access the channel again while the
1481 * PCI IDE controller's I/O space is disabled. (If the
1482 * channel no longer appears to be there, it belongs to
1483 * this controller.) YUCK!
1484 */
1485 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1486 PCI_COMMAND_STATUS_REG);
1487 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1488 csr & ~PCI_COMMAND_IO_ENABLE);
1489 if (wdcprobe(&cp->wdc_channel))
1490 failreason = "other hardware responding at addresses";
1491 pci_conf_write(sc->sc_pc, sc->sc_tag,
1492 PCI_COMMAND_STATUS_REG, csr);
1493 next:
1494 if (failreason) {
1495 printf("%s: %s channel ignored (%s)\n",
1496 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1497 failreason);
1498 cp->hw_ok = 0;
1499 bus_space_unmap(cp->wdc_channel.cmd_iot,
1500 cp->wdc_channel.cmd_ioh, cmdsize);
1501 if (interface & PCIIDE_INTERFACE_PCI(channel))
1502 bus_space_unmap(cp->wdc_channel.ctl_iot,
1503 cp->ctl_baseioh, ctlsize);
1504 else
1505 bus_space_unmap(cp->wdc_channel.ctl_iot,
1506 cp->wdc_channel.ctl_ioh, ctlsize);
1507 } else {
1508 pciide_map_compat_intr(pa, cp, channel, interface);
1509 }
1510 if (cp->hw_ok) {
1511 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1512 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1513 wdcattach(&cp->wdc_channel);
1514 }
1515 }
1516
1517 if (sc->sc_dma_ok == 0)
1518 return;
1519
1520 /* Allocate DMA maps */
1521 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1522 idedma_ctl = 0;
1523 cp = &sc->pciide_channels[channel];
1524 for (drive = 0; drive < 2; drive++) {
1525 drvp = &cp->wdc_channel.ch_drive[drive];
1526 /* If no drive, skip */
1527 if ((drvp->drive_flags & DRIVE) == 0)
1528 continue;
1529 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1530 continue;
1531 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1532 /* Abort DMA setup */
1533 printf("%s:%d:%d: can't allocate DMA maps, "
1534 "using PIO transfers\n",
1535 sc->sc_wdcdev.sc_dev.dv_xname,
1536 channel, drive);
1537 drvp->drive_flags &= ~DRIVE_DMA;
1538 }
1539 printf("%s:%d:%d: using DMA data transfers\n",
1540 sc->sc_wdcdev.sc_dev.dv_xname,
1541 channel, drive);
1542 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1543 }
1544 if (idedma_ctl != 0) {
1545 /* Add software bits in status register */
1546 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1547 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1548 idedma_ctl);
1549 }
1550 }
1551 }
1552
1553 void
1554 piix_chip_map(sc, pa)
1555 struct pciide_softc *sc;
1556 struct pci_attach_args *pa;
1557 {
1558 struct pciide_channel *cp;
1559 int channel;
1560 u_int32_t idetim;
1561 bus_size_t cmdsize, ctlsize;
1562
1563 if (pciide_chipen(sc, pa) == 0)
1564 return;
1565
1566 printf("%s: bus-master DMA support present",
1567 sc->sc_wdcdev.sc_dev.dv_xname);
1568 pciide_mapreg_dma(sc, pa);
1569 printf("\n");
1570 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1571 WDC_CAPABILITY_MODE;
1572 if (sc->sc_dma_ok) {
1573 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1574 sc->sc_wdcdev.irqack = pciide_irqack;
1575 switch(sc->sc_pp->ide_product) {
1576 case PCI_PRODUCT_INTEL_82371AB_IDE:
1577 case PCI_PRODUCT_INTEL_82440MX_IDE:
1578 case PCI_PRODUCT_INTEL_82801AA_IDE:
1579 case PCI_PRODUCT_INTEL_82801AB_IDE:
1580 case PCI_PRODUCT_INTEL_82801BA_IDE:
1581 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1582 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1583 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1584 case PCI_PRODUCT_INTEL_82801DB_IDE:
1585 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1586 }
1587 }
1588 sc->sc_wdcdev.PIO_cap = 4;
1589 sc->sc_wdcdev.DMA_cap = 2;
1590 switch(sc->sc_pp->ide_product) {
1591 case PCI_PRODUCT_INTEL_82801AA_IDE:
1592 sc->sc_wdcdev.UDMA_cap = 4;
1593 break;
1594 case PCI_PRODUCT_INTEL_82801BA_IDE:
1595 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1596 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1597 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1598 case PCI_PRODUCT_INTEL_82801DB_IDE:
1599 sc->sc_wdcdev.UDMA_cap = 5;
1600 break;
1601 default:
1602 sc->sc_wdcdev.UDMA_cap = 2;
1603 }
1604 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1605 sc->sc_wdcdev.set_modes = piix_setup_channel;
1606 else
1607 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1608 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1609 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1610
1611 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1612 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1613 DEBUG_PROBE);
1614 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1615 WDCDEBUG_PRINT((", sidetim=0x%x",
1616 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1617 DEBUG_PROBE);
1618 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1619 WDCDEBUG_PRINT((", udamreg 0x%x",
1620 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1621 DEBUG_PROBE);
1622 }
1623 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1624 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1625 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1626 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1627 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1628 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1629 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1630 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1631 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1632 DEBUG_PROBE);
1633 }
1634
1635 }
1636 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1637
1638 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1639 cp = &sc->pciide_channels[channel];
1640 /* PIIX is compat-only */
1641 if (pciide_chansetup(sc, channel, 0) == 0)
1642 continue;
1643 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1644 if ((PIIX_IDETIM_READ(idetim, channel) &
1645 PIIX_IDETIM_IDE) == 0) {
1646 printf("%s: %s channel ignored (disabled)\n",
1647 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1648 continue;
1649 }
1650 /* PIIX are compat-only pciide devices */
1651 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1652 if (cp->hw_ok == 0)
1653 continue;
1654 if (pciide_chan_candisable(cp)) {
1655 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1656 channel);
1657 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1658 idetim);
1659 }
1660 pciide_map_compat_intr(pa, cp, channel, 0);
1661 if (cp->hw_ok == 0)
1662 continue;
1663 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1664 }
1665
1666 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1667 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1668 DEBUG_PROBE);
1669 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1670 WDCDEBUG_PRINT((", sidetim=0x%x",
1671 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1672 DEBUG_PROBE);
1673 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1674 WDCDEBUG_PRINT((", udamreg 0x%x",
1675 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1676 DEBUG_PROBE);
1677 }
1678 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1679 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1680 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1681 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1682 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1683 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1684 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1685 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1686 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1687 DEBUG_PROBE);
1688 }
1689 }
1690 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1691 }
1692
1693 void
1694 piix_setup_channel(chp)
1695 struct channel_softc *chp;
1696 {
1697 u_int8_t mode[2], drive;
1698 u_int32_t oidetim, idetim, idedma_ctl;
1699 struct pciide_channel *cp = (struct pciide_channel*)chp;
1700 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1701 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1702
1703 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1704 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1705 idedma_ctl = 0;
1706
1707 /* set up new idetim: Enable IDE registers decode */
1708 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1709 chp->channel);
1710
1711 /* setup DMA */
1712 pciide_channel_dma_setup(cp);
1713
1714 /*
1715 * Here we have to mess up with drives mode: PIIX can't have
1716 * different timings for master and slave drives.
1717 * We need to find the best combination.
1718 */
1719
1720 /* If both drives supports DMA, take the lower mode */
1721 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1722 (drvp[1].drive_flags & DRIVE_DMA)) {
1723 mode[0] = mode[1] =
1724 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1725 drvp[0].DMA_mode = mode[0];
1726 drvp[1].DMA_mode = mode[1];
1727 goto ok;
1728 }
1729 /*
1730 * If only one drive supports DMA, use its mode, and
1731 * put the other one in PIO mode 0 if mode not compatible
1732 */
1733 if (drvp[0].drive_flags & DRIVE_DMA) {
1734 mode[0] = drvp[0].DMA_mode;
1735 mode[1] = drvp[1].PIO_mode;
1736 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1737 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1738 mode[1] = drvp[1].PIO_mode = 0;
1739 goto ok;
1740 }
1741 if (drvp[1].drive_flags & DRIVE_DMA) {
1742 mode[1] = drvp[1].DMA_mode;
1743 mode[0] = drvp[0].PIO_mode;
1744 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1745 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1746 mode[0] = drvp[0].PIO_mode = 0;
1747 goto ok;
1748 }
1749 /*
1750 * If both drives are not DMA, takes the lower mode, unless
1751 * one of them is PIO mode < 2
1752 */
1753 if (drvp[0].PIO_mode < 2) {
1754 mode[0] = drvp[0].PIO_mode = 0;
1755 mode[1] = drvp[1].PIO_mode;
1756 } else if (drvp[1].PIO_mode < 2) {
1757 mode[1] = drvp[1].PIO_mode = 0;
1758 mode[0] = drvp[0].PIO_mode;
1759 } else {
1760 mode[0] = mode[1] =
1761 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1762 drvp[0].PIO_mode = mode[0];
1763 drvp[1].PIO_mode = mode[1];
1764 }
1765 ok: /* The modes are setup */
1766 for (drive = 0; drive < 2; drive++) {
1767 if (drvp[drive].drive_flags & DRIVE_DMA) {
1768 idetim |= piix_setup_idetim_timings(
1769 mode[drive], 1, chp->channel);
1770 goto end;
1771 }
1772 }
1773 /* If we are there, none of the drives are DMA */
1774 if (mode[0] >= 2)
1775 idetim |= piix_setup_idetim_timings(
1776 mode[0], 0, chp->channel);
1777 else
1778 idetim |= piix_setup_idetim_timings(
1779 mode[1], 0, chp->channel);
1780 end: /*
1781 * timing mode is now set up in the controller. Enable
1782 * it per-drive
1783 */
1784 for (drive = 0; drive < 2; drive++) {
1785 /* If no drive, skip */
1786 if ((drvp[drive].drive_flags & DRIVE) == 0)
1787 continue;
1788 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1789 if (drvp[drive].drive_flags & DRIVE_DMA)
1790 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1791 }
1792 if (idedma_ctl != 0) {
1793 /* Add software bits in status register */
1794 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1795 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1796 idedma_ctl);
1797 }
1798 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1799 pciide_print_modes(cp);
1800 }
1801
1802 void
1803 piix3_4_setup_channel(chp)
1804 struct channel_softc *chp;
1805 {
1806 struct ata_drive_datas *drvp;
1807 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1808 struct pciide_channel *cp = (struct pciide_channel*)chp;
1809 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1810 int drive;
1811 int channel = chp->channel;
1812
1813 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1814 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1815 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1816 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1817 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1818 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1819 PIIX_SIDETIM_RTC_MASK(channel));
1820
1821 idedma_ctl = 0;
1822 /* If channel disabled, no need to go further */
1823 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1824 return;
1825 /* set up new idetim: Enable IDE registers decode */
1826 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1827
1828 /* setup DMA if needed */
1829 pciide_channel_dma_setup(cp);
1830
1831 for (drive = 0; drive < 2; drive++) {
1832 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1833 PIIX_UDMATIM_SET(0x3, channel, drive));
1834 drvp = &chp->ch_drive[drive];
1835 /* If no drive, skip */
1836 if ((drvp->drive_flags & DRIVE) == 0)
1837 continue;
1838 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1839 (drvp->drive_flags & DRIVE_UDMA) == 0))
1840 goto pio;
1841
1842 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1843 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1844 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1845 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1846 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1847 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1848 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1849 ideconf |= PIIX_CONFIG_PINGPONG;
1850 }
1851 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1852 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1853 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1854 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1855 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1856 /* setup Ultra/100 */
1857 if (drvp->UDMA_mode > 2 &&
1858 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1859 drvp->UDMA_mode = 2;
1860 if (drvp->UDMA_mode > 4) {
1861 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1862 } else {
1863 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1864 if (drvp->UDMA_mode > 2) {
1865 ideconf |= PIIX_CONFIG_UDMA66(channel,
1866 drive);
1867 } else {
1868 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1869 drive);
1870 }
1871 }
1872 }
1873 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1874 /* setup Ultra/66 */
1875 if (drvp->UDMA_mode > 2 &&
1876 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1877 drvp->UDMA_mode = 2;
1878 if (drvp->UDMA_mode > 2)
1879 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1880 else
1881 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1882 }
1883 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1884 (drvp->drive_flags & DRIVE_UDMA)) {
1885 /* use Ultra/DMA */
1886 drvp->drive_flags &= ~DRIVE_DMA;
1887 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1888 udmareg |= PIIX_UDMATIM_SET(
1889 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1890 } else {
1891 /* use Multiword DMA */
1892 drvp->drive_flags &= ~DRIVE_UDMA;
1893 if (drive == 0) {
1894 idetim |= piix_setup_idetim_timings(
1895 drvp->DMA_mode, 1, channel);
1896 } else {
1897 sidetim |= piix_setup_sidetim_timings(
1898 drvp->DMA_mode, 1, channel);
1899 idetim =PIIX_IDETIM_SET(idetim,
1900 PIIX_IDETIM_SITRE, channel);
1901 }
1902 }
1903 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1904
1905 pio: /* use PIO mode */
1906 idetim |= piix_setup_idetim_drvs(drvp);
1907 if (drive == 0) {
1908 idetim |= piix_setup_idetim_timings(
1909 drvp->PIO_mode, 0, channel);
1910 } else {
1911 sidetim |= piix_setup_sidetim_timings(
1912 drvp->PIO_mode, 0, channel);
1913 idetim =PIIX_IDETIM_SET(idetim,
1914 PIIX_IDETIM_SITRE, channel);
1915 }
1916 }
1917 if (idedma_ctl != 0) {
1918 /* Add software bits in status register */
1919 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1920 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1921 idedma_ctl);
1922 }
1923 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1924 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1925 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1926 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1927 pciide_print_modes(cp);
1928 }
1929
1930
1931 /* setup ISP and RTC fields, based on mode */
1932 static u_int32_t
1933 piix_setup_idetim_timings(mode, dma, channel)
1934 u_int8_t mode;
1935 u_int8_t dma;
1936 u_int8_t channel;
1937 {
1938
1939 if (dma)
1940 return PIIX_IDETIM_SET(0,
1941 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1942 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1943 channel);
1944 else
1945 return PIIX_IDETIM_SET(0,
1946 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1947 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1948 channel);
1949 }
1950
1951 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1952 static u_int32_t
1953 piix_setup_idetim_drvs(drvp)
1954 struct ata_drive_datas *drvp;
1955 {
1956 u_int32_t ret = 0;
1957 struct channel_softc *chp = drvp->chnl_softc;
1958 u_int8_t channel = chp->channel;
1959 u_int8_t drive = drvp->drive;
1960
1961 /*
1962 * If drive is using UDMA, timings setups are independant
1963 * So just check DMA and PIO here.
1964 */
1965 if (drvp->drive_flags & DRIVE_DMA) {
1966 /* if mode = DMA mode 0, use compatible timings */
1967 if ((drvp->drive_flags & DRIVE_DMA) &&
1968 drvp->DMA_mode == 0) {
1969 drvp->PIO_mode = 0;
1970 return ret;
1971 }
1972 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1973 /*
1974 * PIO and DMA timings are the same, use fast timings for PIO
1975 * too, else use compat timings.
1976 */
1977 if ((piix_isp_pio[drvp->PIO_mode] !=
1978 piix_isp_dma[drvp->DMA_mode]) ||
1979 (piix_rtc_pio[drvp->PIO_mode] !=
1980 piix_rtc_dma[drvp->DMA_mode]))
1981 drvp->PIO_mode = 0;
1982 /* if PIO mode <= 2, use compat timings for PIO */
1983 if (drvp->PIO_mode <= 2) {
1984 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1985 channel);
1986 return ret;
1987 }
1988 }
1989
1990 /*
1991 * Now setup PIO modes. If mode < 2, use compat timings.
1992 * Else enable fast timings. Enable IORDY and prefetch/post
1993 * if PIO mode >= 3.
1994 */
1995
1996 if (drvp->PIO_mode < 2)
1997 return ret;
1998
1999 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2000 if (drvp->PIO_mode >= 3) {
2001 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2002 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2003 }
2004 return ret;
2005 }
2006
2007 /* setup values in SIDETIM registers, based on mode */
2008 static u_int32_t
2009 piix_setup_sidetim_timings(mode, dma, channel)
2010 u_int8_t mode;
2011 u_int8_t dma;
2012 u_int8_t channel;
2013 {
2014 if (dma)
2015 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2016 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2017 else
2018 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2019 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2020 }
2021
2022 void
2023 amd7x6_chip_map(sc, pa)
2024 struct pciide_softc *sc;
2025 struct pci_attach_args *pa;
2026 {
2027 struct pciide_channel *cp;
2028 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2029 int channel;
2030 pcireg_t chanenable;
2031 bus_size_t cmdsize, ctlsize;
2032
2033 if (pciide_chipen(sc, pa) == 0)
2034 return;
2035 printf("%s: bus-master DMA support present",
2036 sc->sc_wdcdev.sc_dev.dv_xname);
2037 pciide_mapreg_dma(sc, pa);
2038 printf("\n");
2039 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2040 WDC_CAPABILITY_MODE;
2041 if (sc->sc_dma_ok) {
2042 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2043 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2044 sc->sc_wdcdev.irqack = pciide_irqack;
2045 }
2046 sc->sc_wdcdev.PIO_cap = 4;
2047 sc->sc_wdcdev.DMA_cap = 2;
2048
2049 switch (sc->sc_pci_vendor) {
2050 case PCI_VENDOR_AMD:
2051 switch (sc->sc_pp->ide_product) {
2052 case PCI_PRODUCT_AMD_PBC766_IDE:
2053 case PCI_PRODUCT_AMD_PBC768_IDE:
2054 case PCI_PRODUCT_AMD_PBC8111_IDE:
2055 sc->sc_wdcdev.UDMA_cap = 5;
2056 break;
2057 default:
2058 sc->sc_wdcdev.UDMA_cap = 4;
2059 }
2060 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2061 break;
2062
2063 case PCI_VENDOR_NVIDIA:
2064 switch (sc->sc_pp->ide_product) {
2065 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2066 sc->sc_wdcdev.UDMA_cap = 5;
2067 break;
2068 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2069 sc->sc_wdcdev.UDMA_cap = 6;
2070 break;
2071 }
2072 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2073 break;
2074
2075 default:
2076 panic("amd7x6_chip_map: unknown vendor");
2077 }
2078 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2079 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2080 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2081 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2082 AMD7X6_CHANSTATUS_EN(sc));
2083
2084 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2085 DEBUG_PROBE);
2086 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2087 cp = &sc->pciide_channels[channel];
2088 if (pciide_chansetup(sc, channel, interface) == 0)
2089 continue;
2090
2091 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2092 printf("%s: %s channel ignored (disabled)\n",
2093 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2094 continue;
2095 }
2096 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2097 pciide_pci_intr);
2098
2099 if (pciide_chan_candisable(cp))
2100 chanenable &= ~AMD7X6_CHAN_EN(channel);
2101 pciide_map_compat_intr(pa, cp, channel, interface);
2102 if (cp->hw_ok == 0)
2103 continue;
2104
2105 amd7x6_setup_channel(&cp->wdc_channel);
2106 }
2107 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2108 chanenable);
2109 return;
2110 }
2111
2112 void
2113 amd7x6_setup_channel(chp)
2114 struct channel_softc *chp;
2115 {
2116 u_int32_t udmatim_reg, datatim_reg;
2117 u_int8_t idedma_ctl;
2118 int mode, drive;
2119 struct ata_drive_datas *drvp;
2120 struct pciide_channel *cp = (struct pciide_channel*)chp;
2121 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2122 #ifndef PCIIDE_AMD756_ENABLEDMA
2123 int rev = PCI_REVISION(
2124 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2125 #endif
2126
2127 idedma_ctl = 0;
2128 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2129 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2130 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2131 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2132
2133 /* setup DMA if needed */
2134 pciide_channel_dma_setup(cp);
2135
2136 for (drive = 0; drive < 2; drive++) {
2137 drvp = &chp->ch_drive[drive];
2138 /* If no drive, skip */
2139 if ((drvp->drive_flags & DRIVE) == 0)
2140 continue;
2141 /* add timing values, setup DMA if needed */
2142 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2143 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2144 mode = drvp->PIO_mode;
2145 goto pio;
2146 }
2147 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2148 (drvp->drive_flags & DRIVE_UDMA)) {
2149 /* use Ultra/DMA */
2150 drvp->drive_flags &= ~DRIVE_DMA;
2151 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2152 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2153 AMD7X6_UDMA_TIME(chp->channel, drive,
2154 amd7x6_udma_tim[drvp->UDMA_mode]);
2155 /* can use PIO timings, MW DMA unused */
2156 mode = drvp->PIO_mode;
2157 } else {
2158 /* use Multiword DMA, but only if revision is OK */
2159 drvp->drive_flags &= ~DRIVE_UDMA;
2160 #ifndef PCIIDE_AMD756_ENABLEDMA
2161 /*
2162 * The workaround doesn't seem to be necessary
2163 * with all drives, so it can be disabled by
2164 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2165 * triggered.
2166 */
2167 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2168 sc->sc_pp->ide_product ==
2169 PCI_PRODUCT_AMD_PBC756_IDE &&
2170 AMD756_CHIPREV_DISABLEDMA(rev)) {
2171 printf("%s:%d:%d: multi-word DMA disabled due "
2172 "to chip revision\n",
2173 sc->sc_wdcdev.sc_dev.dv_xname,
2174 chp->channel, drive);
2175 mode = drvp->PIO_mode;
2176 drvp->drive_flags &= ~DRIVE_DMA;
2177 goto pio;
2178 }
2179 #endif
2180 /* mode = min(pio, dma+2) */
2181 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2182 mode = drvp->PIO_mode;
2183 else
2184 mode = drvp->DMA_mode + 2;
2185 }
2186 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2187
2188 pio: /* setup PIO mode */
2189 if (mode <= 2) {
2190 drvp->DMA_mode = 0;
2191 drvp->PIO_mode = 0;
2192 mode = 0;
2193 } else {
2194 drvp->PIO_mode = mode;
2195 drvp->DMA_mode = mode - 2;
2196 }
2197 datatim_reg |=
2198 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2199 amd7x6_pio_set[mode]) |
2200 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2201 amd7x6_pio_rec[mode]);
2202 }
2203 if (idedma_ctl != 0) {
2204 /* Add software bits in status register */
2205 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2206 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2207 idedma_ctl);
2208 }
2209 pciide_print_modes(cp);
2210 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2211 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2212 }
2213
2214 void
2215 apollo_chip_map(sc, pa)
2216 struct pciide_softc *sc;
2217 struct pci_attach_args *pa;
2218 {
2219 struct pciide_channel *cp;
2220 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2221 int channel;
2222 u_int32_t ideconf;
2223 bus_size_t cmdsize, ctlsize;
2224 pcitag_t pcib_tag;
2225 pcireg_t pcib_id, pcib_class;
2226
2227 if (pciide_chipen(sc, pa) == 0)
2228 return;
2229 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2230 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2231 /* and read ID and rev of the ISA bridge */
2232 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2233 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2234 printf(": VIA Technologies ");
2235 switch (PCI_PRODUCT(pcib_id)) {
2236 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2237 printf("VT82C586 (Apollo VP) ");
2238 if(PCI_REVISION(pcib_class) >= 0x02) {
2239 printf("ATA33 controller\n");
2240 sc->sc_wdcdev.UDMA_cap = 2;
2241 } else {
2242 printf("controller\n");
2243 sc->sc_wdcdev.UDMA_cap = 0;
2244 }
2245 break;
2246 case PCI_PRODUCT_VIATECH_VT82C596A:
2247 printf("VT82C596A (Apollo Pro) ");
2248 if (PCI_REVISION(pcib_class) >= 0x12) {
2249 printf("ATA66 controller\n");
2250 sc->sc_wdcdev.UDMA_cap = 4;
2251 } else {
2252 printf("ATA33 controller\n");
2253 sc->sc_wdcdev.UDMA_cap = 2;
2254 }
2255 break;
2256 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2257 printf("VT82C686A (Apollo KX133) ");
2258 if (PCI_REVISION(pcib_class) >= 0x40) {
2259 printf("ATA100 controller\n");
2260 sc->sc_wdcdev.UDMA_cap = 5;
2261 } else {
2262 printf("ATA66 controller\n");
2263 sc->sc_wdcdev.UDMA_cap = 4;
2264 }
2265 break;
2266 case PCI_PRODUCT_VIATECH_VT8231:
2267 printf("VT8231 ATA100 controller\n");
2268 sc->sc_wdcdev.UDMA_cap = 5;
2269 break;
2270 case PCI_PRODUCT_VIATECH_VT8233:
2271 printf("VT8233 ATA100 controller\n");
2272 sc->sc_wdcdev.UDMA_cap = 5;
2273 break;
2274 case PCI_PRODUCT_VIATECH_VT8233A:
2275 printf("VT8233A ATA133 controller\n");
2276 sc->sc_wdcdev.UDMA_cap = 6;
2277 break;
2278 case PCI_PRODUCT_VIATECH_VT8235:
2279 printf("VT8235 ATA133 controller\n");
2280 sc->sc_wdcdev.UDMA_cap = 6;
2281 break;
2282 default:
2283 printf("unknown ATA controller\n");
2284 sc->sc_wdcdev.UDMA_cap = 0;
2285 }
2286
2287 printf("%s: bus-master DMA support present",
2288 sc->sc_wdcdev.sc_dev.dv_xname);
2289 pciide_mapreg_dma(sc, pa);
2290 printf("\n");
2291 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2292 WDC_CAPABILITY_MODE;
2293 if (sc->sc_dma_ok) {
2294 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2295 sc->sc_wdcdev.irqack = pciide_irqack;
2296 if (sc->sc_wdcdev.UDMA_cap > 0)
2297 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2298 }
2299 sc->sc_wdcdev.PIO_cap = 4;
2300 sc->sc_wdcdev.DMA_cap = 2;
2301 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2302 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2303 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2304
2305 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2306 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2307 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2308 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2309 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2310 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2311 DEBUG_PROBE);
2312
2313 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2314 cp = &sc->pciide_channels[channel];
2315 if (pciide_chansetup(sc, channel, interface) == 0)
2316 continue;
2317
2318 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2319 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2320 printf("%s: %s channel ignored (disabled)\n",
2321 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2322 continue;
2323 }
2324 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2325 pciide_pci_intr);
2326 if (cp->hw_ok == 0)
2327 continue;
2328 if (pciide_chan_candisable(cp)) {
2329 ideconf &= ~APO_IDECONF_EN(channel);
2330 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2331 ideconf);
2332 }
2333 pciide_map_compat_intr(pa, cp, channel, interface);
2334
2335 if (cp->hw_ok == 0)
2336 continue;
2337 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2338 }
2339 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2340 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2341 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2342 }
2343
2344 void
2345 apollo_setup_channel(chp)
2346 struct channel_softc *chp;
2347 {
2348 u_int32_t udmatim_reg, datatim_reg;
2349 u_int8_t idedma_ctl;
2350 int mode, drive;
2351 struct ata_drive_datas *drvp;
2352 struct pciide_channel *cp = (struct pciide_channel*)chp;
2353 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2354
2355 idedma_ctl = 0;
2356 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2357 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2358 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2359 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2360
2361 /* setup DMA if needed */
2362 pciide_channel_dma_setup(cp);
2363
2364 for (drive = 0; drive < 2; drive++) {
2365 drvp = &chp->ch_drive[drive];
2366 /* If no drive, skip */
2367 if ((drvp->drive_flags & DRIVE) == 0)
2368 continue;
2369 /* add timing values, setup DMA if needed */
2370 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2371 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2372 mode = drvp->PIO_mode;
2373 goto pio;
2374 }
2375 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2376 (drvp->drive_flags & DRIVE_UDMA)) {
2377 /* use Ultra/DMA */
2378 drvp->drive_flags &= ~DRIVE_DMA;
2379 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2380 APO_UDMA_EN_MTH(chp->channel, drive);
2381 if (sc->sc_wdcdev.UDMA_cap == 6) {
2382 /* 8233a */
2383 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2384 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2385 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2386 /* 686b */
2387 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2388 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2389 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2390 /* 596b or 686a */
2391 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2392 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2393 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2394 } else {
2395 /* 596a or 586b */
2396 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2397 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2398 }
2399 /* can use PIO timings, MW DMA unused */
2400 mode = drvp->PIO_mode;
2401 } else {
2402 /* use Multiword DMA */
2403 drvp->drive_flags &= ~DRIVE_UDMA;
2404 /* mode = min(pio, dma+2) */
2405 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2406 mode = drvp->PIO_mode;
2407 else
2408 mode = drvp->DMA_mode + 2;
2409 }
2410 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2411
2412 pio: /* setup PIO mode */
2413 if (mode <= 2) {
2414 drvp->DMA_mode = 0;
2415 drvp->PIO_mode = 0;
2416 mode = 0;
2417 } else {
2418 drvp->PIO_mode = mode;
2419 drvp->DMA_mode = mode - 2;
2420 }
2421 datatim_reg |=
2422 APO_DATATIM_PULSE(chp->channel, drive,
2423 apollo_pio_set[mode]) |
2424 APO_DATATIM_RECOV(chp->channel, drive,
2425 apollo_pio_rec[mode]);
2426 }
2427 if (idedma_ctl != 0) {
2428 /* Add software bits in status register */
2429 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2430 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2431 idedma_ctl);
2432 }
2433 pciide_print_modes(cp);
2434 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2435 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2436 }
2437
2438 void
2439 cmd_channel_map(pa, sc, channel)
2440 struct pci_attach_args *pa;
2441 struct pciide_softc *sc;
2442 int channel;
2443 {
2444 struct pciide_channel *cp = &sc->pciide_channels[channel];
2445 bus_size_t cmdsize, ctlsize;
2446 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2447 int interface, one_channel;
2448
2449 /*
2450 * The 0648/0649 can be told to identify as a RAID controller.
2451 * In this case, we have to fake interface
2452 */
2453 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2454 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2455 PCIIDE_INTERFACE_SETTABLE(1);
2456 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2457 CMD_CONF_DSA1)
2458 interface |= PCIIDE_INTERFACE_PCI(0) |
2459 PCIIDE_INTERFACE_PCI(1);
2460 } else {
2461 interface = PCI_INTERFACE(pa->pa_class);
2462 }
2463
2464 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2465 cp->name = PCIIDE_CHANNEL_NAME(channel);
2466 cp->wdc_channel.channel = channel;
2467 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2468
2469 /*
2470 * Older CMD64X doesn't have independant channels
2471 */
2472 switch (sc->sc_pp->ide_product) {
2473 case PCI_PRODUCT_CMDTECH_649:
2474 one_channel = 0;
2475 break;
2476 default:
2477 one_channel = 1;
2478 break;
2479 }
2480
2481 if (channel > 0 && one_channel) {
2482 cp->wdc_channel.ch_queue =
2483 sc->pciide_channels[0].wdc_channel.ch_queue;
2484 } else {
2485 cp->wdc_channel.ch_queue =
2486 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2487 }
2488 if (cp->wdc_channel.ch_queue == NULL) {
2489 printf("%s %s channel: "
2490 "can't allocate memory for command queue",
2491 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2492 return;
2493 }
2494
2495 printf("%s: %s channel %s to %s mode\n",
2496 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2497 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2498 "configured" : "wired",
2499 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2500 "native-PCI" : "compatibility");
2501
2502 /*
2503 * with a CMD PCI64x, if we get here, the first channel is enabled:
2504 * there's no way to disable the first channel without disabling
2505 * the whole device
2506 */
2507 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2508 printf("%s: %s channel ignored (disabled)\n",
2509 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2510 return;
2511 }
2512
2513 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2514 if (cp->hw_ok == 0)
2515 return;
2516 if (channel == 1) {
2517 if (pciide_chan_candisable(cp)) {
2518 ctrl &= ~CMD_CTRL_2PORT;
2519 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2520 CMD_CTRL, ctrl);
2521 }
2522 }
2523 pciide_map_compat_intr(pa, cp, channel, interface);
2524 }
2525
2526 int
2527 cmd_pci_intr(arg)
2528 void *arg;
2529 {
2530 struct pciide_softc *sc = arg;
2531 struct pciide_channel *cp;
2532 struct channel_softc *wdc_cp;
2533 int i, rv, crv;
2534 u_int32_t priirq, secirq;
2535
2536 rv = 0;
2537 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2538 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2539 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2540 cp = &sc->pciide_channels[i];
2541 wdc_cp = &cp->wdc_channel;
2542 /* If a compat channel skip. */
2543 if (cp->compat)
2544 continue;
2545 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2546 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2547 crv = wdcintr(wdc_cp);
2548 if (crv == 0)
2549 printf("%s:%d: bogus intr\n",
2550 sc->sc_wdcdev.sc_dev.dv_xname, i);
2551 else
2552 rv = 1;
2553 }
2554 }
2555 return rv;
2556 }
2557
2558 void
2559 cmd_chip_map(sc, pa)
2560 struct pciide_softc *sc;
2561 struct pci_attach_args *pa;
2562 {
2563 int channel;
2564
2565 /*
2566 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2567 * and base adresses registers can be disabled at
2568 * hardware level. In this case, the device is wired
2569 * in compat mode and its first channel is always enabled,
2570 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2571 * In fact, it seems that the first channel of the CMD PCI0640
2572 * can't be disabled.
2573 */
2574
2575 #ifdef PCIIDE_CMD064x_DISABLE
2576 if (pciide_chipen(sc, pa) == 0)
2577 return;
2578 #endif
2579
2580 printf("%s: hardware does not support DMA\n",
2581 sc->sc_wdcdev.sc_dev.dv_xname);
2582 sc->sc_dma_ok = 0;
2583
2584 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2585 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2586 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2587
2588 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2589 cmd_channel_map(pa, sc, channel);
2590 }
2591 }
2592
2593 void
2594 cmd0643_9_chip_map(sc, pa)
2595 struct pciide_softc *sc;
2596 struct pci_attach_args *pa;
2597 {
2598 struct pciide_channel *cp;
2599 int channel;
2600 pcireg_t rev = PCI_REVISION(pa->pa_class);
2601
2602 /*
2603 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2604 * and base adresses registers can be disabled at
2605 * hardware level. In this case, the device is wired
2606 * in compat mode and its first channel is always enabled,
2607 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2608 * In fact, it seems that the first channel of the CMD PCI0640
2609 * can't be disabled.
2610 */
2611
2612 #ifdef PCIIDE_CMD064x_DISABLE
2613 if (pciide_chipen(sc, pa) == 0)
2614 return;
2615 #endif
2616 printf("%s: bus-master DMA support present",
2617 sc->sc_wdcdev.sc_dev.dv_xname);
2618 pciide_mapreg_dma(sc, pa);
2619 printf("\n");
2620 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2621 WDC_CAPABILITY_MODE;
2622 if (sc->sc_dma_ok) {
2623 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2624 switch (sc->sc_pp->ide_product) {
2625 case PCI_PRODUCT_CMDTECH_649:
2626 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2627 sc->sc_wdcdev.UDMA_cap = 5;
2628 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2629 break;
2630 case PCI_PRODUCT_CMDTECH_648:
2631 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2632 sc->sc_wdcdev.UDMA_cap = 4;
2633 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2634 break;
2635 case PCI_PRODUCT_CMDTECH_646:
2636 if (rev >= CMD0646U2_REV) {
2637 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2638 sc->sc_wdcdev.UDMA_cap = 2;
2639 } else if (rev >= CMD0646U_REV) {
2640 /*
2641 * Linux's driver claims that the 646U is broken
2642 * with UDMA. Only enable it if we know what we're
2643 * doing
2644 */
2645 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2646 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2647 sc->sc_wdcdev.UDMA_cap = 2;
2648 #endif
2649 /* explicitly disable UDMA */
2650 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2651 CMD_UDMATIM(0), 0);
2652 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2653 CMD_UDMATIM(1), 0);
2654 }
2655 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2656 break;
2657 default:
2658 sc->sc_wdcdev.irqack = pciide_irqack;
2659 }
2660 }
2661
2662 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2663 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2664 sc->sc_wdcdev.PIO_cap = 4;
2665 sc->sc_wdcdev.DMA_cap = 2;
2666 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2667
2668 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2669 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2670 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2671 DEBUG_PROBE);
2672
2673 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2674 cp = &sc->pciide_channels[channel];
2675 cmd_channel_map(pa, sc, channel);
2676 if (cp->hw_ok == 0)
2677 continue;
2678 cmd0643_9_setup_channel(&cp->wdc_channel);
2679 }
2680 /*
2681 * note - this also makes sure we clear the irq disable and reset
2682 * bits
2683 */
2684 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2685 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2686 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2687 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2688 DEBUG_PROBE);
2689 }
2690
2691 void
2692 cmd0643_9_setup_channel(chp)
2693 struct channel_softc *chp;
2694 {
2695 struct ata_drive_datas *drvp;
2696 u_int8_t tim;
2697 u_int32_t idedma_ctl, udma_reg;
2698 int drive;
2699 struct pciide_channel *cp = (struct pciide_channel*)chp;
2700 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2701
2702 idedma_ctl = 0;
2703 /* setup DMA if needed */
2704 pciide_channel_dma_setup(cp);
2705
2706 for (drive = 0; drive < 2; drive++) {
2707 drvp = &chp->ch_drive[drive];
2708 /* If no drive, skip */
2709 if ((drvp->drive_flags & DRIVE) == 0)
2710 continue;
2711 /* add timing values, setup DMA if needed */
2712 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2713 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2714 if (drvp->drive_flags & DRIVE_UDMA) {
2715 /* UltraDMA on a 646U2, 0648 or 0649 */
2716 drvp->drive_flags &= ~DRIVE_DMA;
2717 udma_reg = pciide_pci_read(sc->sc_pc,
2718 sc->sc_tag, CMD_UDMATIM(chp->channel));
2719 if (drvp->UDMA_mode > 2 &&
2720 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2721 CMD_BICSR) &
2722 CMD_BICSR_80(chp->channel)) == 0)
2723 drvp->UDMA_mode = 2;
2724 if (drvp->UDMA_mode > 2)
2725 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2726 else if (sc->sc_wdcdev.UDMA_cap > 2)
2727 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2728 udma_reg |= CMD_UDMATIM_UDMA(drive);
2729 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2730 CMD_UDMATIM_TIM_OFF(drive));
2731 udma_reg |=
2732 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2733 CMD_UDMATIM_TIM_OFF(drive));
2734 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2735 CMD_UDMATIM(chp->channel), udma_reg);
2736 } else {
2737 /*
2738 * use Multiword DMA.
2739 * Timings will be used for both PIO and DMA,
2740 * so adjust DMA mode if needed
2741 * if we have a 0646U2/8/9, turn off UDMA
2742 */
2743 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2744 udma_reg = pciide_pci_read(sc->sc_pc,
2745 sc->sc_tag,
2746 CMD_UDMATIM(chp->channel));
2747 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2748 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2749 CMD_UDMATIM(chp->channel),
2750 udma_reg);
2751 }
2752 if (drvp->PIO_mode >= 3 &&
2753 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2754 drvp->DMA_mode = drvp->PIO_mode - 2;
2755 }
2756 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2757 }
2758 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2759 }
2760 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2761 CMD_DATA_TIM(chp->channel, drive), tim);
2762 }
2763 if (idedma_ctl != 0) {
2764 /* Add software bits in status register */
2765 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2766 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2767 idedma_ctl);
2768 }
2769 pciide_print_modes(cp);
2770 }
2771
2772 void
2773 cmd646_9_irqack(chp)
2774 struct channel_softc *chp;
2775 {
2776 u_int32_t priirq, secirq;
2777 struct pciide_channel *cp = (struct pciide_channel*)chp;
2778 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2779
2780 if (chp->channel == 0) {
2781 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2782 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2783 } else {
2784 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2785 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2786 }
2787 pciide_irqack(chp);
2788 }
2789
2790 void
2791 cmd680_chip_map(sc, pa)
2792 struct pciide_softc *sc;
2793 struct pci_attach_args *pa;
2794 {
2795 struct pciide_channel *cp;
2796 int channel;
2797
2798 if (pciide_chipen(sc, pa) == 0)
2799 return;
2800 printf("%s: bus-master DMA support present",
2801 sc->sc_wdcdev.sc_dev.dv_xname);
2802 pciide_mapreg_dma(sc, pa);
2803 printf("\n");
2804 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2805 WDC_CAPABILITY_MODE;
2806 if (sc->sc_dma_ok) {
2807 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2808 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2809 sc->sc_wdcdev.UDMA_cap = 6;
2810 sc->sc_wdcdev.irqack = pciide_irqack;
2811 }
2812
2813 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2814 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2815 sc->sc_wdcdev.PIO_cap = 4;
2816 sc->sc_wdcdev.DMA_cap = 2;
2817 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2818
2819 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2820 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2821 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2822 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2823 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2824 cp = &sc->pciide_channels[channel];
2825 cmd680_channel_map(pa, sc, channel);
2826 if (cp->hw_ok == 0)
2827 continue;
2828 cmd680_setup_channel(&cp->wdc_channel);
2829 }
2830 }
2831
2832 void
2833 cmd680_channel_map(pa, sc, channel)
2834 struct pci_attach_args *pa;
2835 struct pciide_softc *sc;
2836 int channel;
2837 {
2838 struct pciide_channel *cp = &sc->pciide_channels[channel];
2839 bus_size_t cmdsize, ctlsize;
2840 int interface, i, reg;
2841 static const u_int8_t init_val[] =
2842 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2843 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2844
2845 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2846 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2847 PCIIDE_INTERFACE_SETTABLE(1);
2848 interface |= PCIIDE_INTERFACE_PCI(0) |
2849 PCIIDE_INTERFACE_PCI(1);
2850 } else {
2851 interface = PCI_INTERFACE(pa->pa_class);
2852 }
2853
2854 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2855 cp->name = PCIIDE_CHANNEL_NAME(channel);
2856 cp->wdc_channel.channel = channel;
2857 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2858
2859 cp->wdc_channel.ch_queue =
2860 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2861 if (cp->wdc_channel.ch_queue == NULL) {
2862 printf("%s %s channel: "
2863 "can't allocate memory for command queue",
2864 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2865 return;
2866 }
2867
2868 /* XXX */
2869 reg = 0xa2 + channel * 16;
2870 for (i = 0; i < sizeof(init_val); i++)
2871 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2872
2873 printf("%s: %s channel %s to %s mode\n",
2874 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2875 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2876 "configured" : "wired",
2877 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2878 "native-PCI" : "compatibility");
2879
2880 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2881 if (cp->hw_ok == 0)
2882 return;
2883 pciide_map_compat_intr(pa, cp, channel, interface);
2884 }
2885
2886 void
2887 cmd680_setup_channel(chp)
2888 struct channel_softc *chp;
2889 {
2890 struct ata_drive_datas *drvp;
2891 u_int8_t mode, off, scsc;
2892 u_int16_t val;
2893 u_int32_t idedma_ctl;
2894 int drive;
2895 struct pciide_channel *cp = (struct pciide_channel*)chp;
2896 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2897 pci_chipset_tag_t pc = sc->sc_pc;
2898 pcitag_t pa = sc->sc_tag;
2899 static const u_int8_t udma2_tbl[] =
2900 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2901 static const u_int8_t udma_tbl[] =
2902 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2903 static const u_int16_t dma_tbl[] =
2904 { 0x2208, 0x10c2, 0x10c1 };
2905 static const u_int16_t pio_tbl[] =
2906 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2907
2908 idedma_ctl = 0;
2909 pciide_channel_dma_setup(cp);
2910 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2911
2912 for (drive = 0; drive < 2; drive++) {
2913 drvp = &chp->ch_drive[drive];
2914 /* If no drive, skip */
2915 if ((drvp->drive_flags & DRIVE) == 0)
2916 continue;
2917 mode &= ~(0x03 << (drive * 4));
2918 if (drvp->drive_flags & DRIVE_UDMA) {
2919 drvp->drive_flags &= ~DRIVE_DMA;
2920 off = 0xa0 + chp->channel * 16;
2921 if (drvp->UDMA_mode > 2 &&
2922 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
2923 drvp->UDMA_mode = 2;
2924 scsc = pciide_pci_read(pc, pa, 0x8a);
2925 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
2926 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
2927 scsc = pciide_pci_read(pc, pa, 0x8a);
2928 if ((scsc & 0x30) == 0)
2929 drvp->UDMA_mode = 5;
2930 }
2931 mode |= 0x03 << (drive * 4);
2932 off = 0xac + chp->channel * 16 + drive * 2;
2933 val = pciide_pci_read(pc, pa, off) & ~0x3f;
2934 if (scsc & 0x30)
2935 val |= udma2_tbl[drvp->UDMA_mode];
2936 else
2937 val |= udma_tbl[drvp->UDMA_mode];
2938 pciide_pci_write(pc, pa, off, val);
2939 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2940 } else if (drvp->drive_flags & DRIVE_DMA) {
2941 mode |= 0x02 << (drive * 4);
2942 off = 0xa8 + chp->channel * 16 + drive * 2;
2943 val = dma_tbl[drvp->DMA_mode];
2944 pciide_pci_write(pc, pa, off, val & 0xff);
2945 pciide_pci_write(pc, pa, off, val >> 8);
2946 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2947 } else {
2948 mode |= 0x01 << (drive * 4);
2949 off = 0xa4 + chp->channel * 16 + drive * 2;
2950 val = pio_tbl[drvp->PIO_mode];
2951 pciide_pci_write(pc, pa, off, val & 0xff);
2952 pciide_pci_write(pc, pa, off, val >> 8);
2953 }
2954 }
2955
2956 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
2957 if (idedma_ctl != 0) {
2958 /* Add software bits in status register */
2959 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2960 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2961 idedma_ctl);
2962 }
2963 pciide_print_modes(cp);
2964 }
2965
2966 void
2967 cy693_chip_map(sc, pa)
2968 struct pciide_softc *sc;
2969 struct pci_attach_args *pa;
2970 {
2971 struct pciide_channel *cp;
2972 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2973 bus_size_t cmdsize, ctlsize;
2974
2975 if (pciide_chipen(sc, pa) == 0)
2976 return;
2977 /*
2978 * this chip has 2 PCI IDE functions, one for primary and one for
2979 * secondary. So we need to call pciide_mapregs_compat() with
2980 * the real channel
2981 */
2982 if (pa->pa_function == 1) {
2983 sc->sc_cy_compatchan = 0;
2984 } else if (pa->pa_function == 2) {
2985 sc->sc_cy_compatchan = 1;
2986 } else {
2987 printf("%s: unexpected PCI function %d\n",
2988 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2989 return;
2990 }
2991 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2992 printf("%s: bus-master DMA support present",
2993 sc->sc_wdcdev.sc_dev.dv_xname);
2994 pciide_mapreg_dma(sc, pa);
2995 } else {
2996 printf("%s: hardware does not support DMA",
2997 sc->sc_wdcdev.sc_dev.dv_xname);
2998 sc->sc_dma_ok = 0;
2999 }
3000 printf("\n");
3001
3002 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3003 if (sc->sc_cy_handle == NULL) {
3004 printf("%s: unable to map hyperCache control registers\n",
3005 sc->sc_wdcdev.sc_dev.dv_xname);
3006 sc->sc_dma_ok = 0;
3007 }
3008
3009 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3010 WDC_CAPABILITY_MODE;
3011 if (sc->sc_dma_ok) {
3012 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3013 sc->sc_wdcdev.irqack = pciide_irqack;
3014 }
3015 sc->sc_wdcdev.PIO_cap = 4;
3016 sc->sc_wdcdev.DMA_cap = 2;
3017 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3018
3019 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3020 sc->sc_wdcdev.nchannels = 1;
3021
3022 /* Only one channel for this chip; if we are here it's enabled */
3023 cp = &sc->pciide_channels[0];
3024 sc->wdc_chanarray[0] = &cp->wdc_channel;
3025 cp->name = PCIIDE_CHANNEL_NAME(0);
3026 cp->wdc_channel.channel = 0;
3027 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3028 cp->wdc_channel.ch_queue =
3029 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3030 if (cp->wdc_channel.ch_queue == NULL) {
3031 printf("%s primary channel: "
3032 "can't allocate memory for command queue",
3033 sc->sc_wdcdev.sc_dev.dv_xname);
3034 return;
3035 }
3036 printf("%s: primary channel %s to ",
3037 sc->sc_wdcdev.sc_dev.dv_xname,
3038 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3039 "configured" : "wired");
3040 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3041 printf("native-PCI");
3042 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3043 pciide_pci_intr);
3044 } else {
3045 printf("compatibility");
3046 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3047 &cmdsize, &ctlsize);
3048 }
3049 printf(" mode\n");
3050 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3051 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3052 wdcattach(&cp->wdc_channel);
3053 if (pciide_chan_candisable(cp)) {
3054 pci_conf_write(sc->sc_pc, sc->sc_tag,
3055 PCI_COMMAND_STATUS_REG, 0);
3056 }
3057 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3058 if (cp->hw_ok == 0)
3059 return;
3060 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3061 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3062 cy693_setup_channel(&cp->wdc_channel);
3063 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3064 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3065 }
3066
3067 void
3068 cy693_setup_channel(chp)
3069 struct channel_softc *chp;
3070 {
3071 struct ata_drive_datas *drvp;
3072 int drive;
3073 u_int32_t cy_cmd_ctrl;
3074 u_int32_t idedma_ctl;
3075 struct pciide_channel *cp = (struct pciide_channel*)chp;
3076 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3077 int dma_mode = -1;
3078
3079 cy_cmd_ctrl = idedma_ctl = 0;
3080
3081 /* setup DMA if needed */
3082 pciide_channel_dma_setup(cp);
3083
3084 for (drive = 0; drive < 2; drive++) {
3085 drvp = &chp->ch_drive[drive];
3086 /* If no drive, skip */
3087 if ((drvp->drive_flags & DRIVE) == 0)
3088 continue;
3089 /* add timing values, setup DMA if needed */
3090 if (drvp->drive_flags & DRIVE_DMA) {
3091 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3092 /* use Multiword DMA */
3093 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3094 dma_mode = drvp->DMA_mode;
3095 }
3096 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3097 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3098 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3099 CY_CMD_CTRL_IOW_REC_OFF(drive));
3100 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3101 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3102 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3103 CY_CMD_CTRL_IOR_REC_OFF(drive));
3104 }
3105 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3106 chp->ch_drive[0].DMA_mode = dma_mode;
3107 chp->ch_drive[1].DMA_mode = dma_mode;
3108
3109 if (dma_mode == -1)
3110 dma_mode = 0;
3111
3112 if (sc->sc_cy_handle != NULL) {
3113 /* Note: `multiple' is implied. */
3114 cy82c693_write(sc->sc_cy_handle,
3115 (sc->sc_cy_compatchan == 0) ?
3116 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3117 }
3118
3119 pciide_print_modes(cp);
3120
3121 if (idedma_ctl != 0) {
3122 /* Add software bits in status register */
3123 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3124 IDEDMA_CTL, idedma_ctl);
3125 }
3126 }
3127
3128 static int
3129 sis_hostbr_match(pa)
3130 struct pci_attach_args *pa;
3131 {
3132 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
3133 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
3134 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
3135 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
3136 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735) ||
3137 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_745)));
3138 }
3139
3140 void
3141 sis_chip_map(sc, pa)
3142 struct pciide_softc *sc;
3143 struct pci_attach_args *pa;
3144 {
3145 struct pciide_channel *cp;
3146 int channel;
3147 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3148 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3149 pcireg_t rev = PCI_REVISION(pa->pa_class);
3150 bus_size_t cmdsize, ctlsize;
3151 pcitag_t pchb_tag;
3152 pcireg_t pchb_id, pchb_class;
3153
3154 if (pciide_chipen(sc, pa) == 0)
3155 return;
3156 printf("%s: bus-master DMA support present",
3157 sc->sc_wdcdev.sc_dev.dv_xname);
3158 pciide_mapreg_dma(sc, pa);
3159 printf("\n");
3160
3161 /* get a PCI tag for the host bridge (function 0 of the same device) */
3162 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
3163 /* and read ID and rev of the ISA bridge */
3164 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
3165 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
3166
3167 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3168 WDC_CAPABILITY_MODE;
3169 if (sc->sc_dma_ok) {
3170 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3171 sc->sc_wdcdev.irqack = pciide_irqack;
3172 /*
3173 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3174 * have problems with UDMA (info provided by Christos)
3175 */
3176 if (rev >= 0xd0 &&
3177 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
3178 PCI_REVISION(pchb_class) >= 0x03))
3179 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3180 }
3181
3182 sc->sc_wdcdev.PIO_cap = 4;
3183 sc->sc_wdcdev.DMA_cap = 2;
3184 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
3185 /*
3186 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
3187 * chipsets.
3188 */
3189 sc->sc_wdcdev.UDMA_cap =
3190 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
3191 sc->sc_wdcdev.set_modes = sis_setup_channel;
3192
3193 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3194 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3195
3196 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3197 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3198 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
3199
3200 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3201 cp = &sc->pciide_channels[channel];
3202 if (pciide_chansetup(sc, channel, interface) == 0)
3203 continue;
3204 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3205 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3206 printf("%s: %s channel ignored (disabled)\n",
3207 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3208 continue;
3209 }
3210 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3211 pciide_pci_intr);
3212 if (cp->hw_ok == 0)
3213 continue;
3214 if (pciide_chan_candisable(cp)) {
3215 if (channel == 0)
3216 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3217 else
3218 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3219 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3220 sis_ctr0);
3221 }
3222 pciide_map_compat_intr(pa, cp, channel, interface);
3223 if (cp->hw_ok == 0)
3224 continue;
3225 sis_setup_channel(&cp->wdc_channel);
3226 }
3227 }
3228
3229 void
3230 sis_setup_channel(chp)
3231 struct channel_softc *chp;
3232 {
3233 struct ata_drive_datas *drvp;
3234 int drive;
3235 u_int32_t sis_tim;
3236 u_int32_t idedma_ctl;
3237 struct pciide_channel *cp = (struct pciide_channel*)chp;
3238 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3239
3240 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3241 "channel %d 0x%x\n", chp->channel,
3242 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3243 DEBUG_PROBE);
3244 sis_tim = 0;
3245 idedma_ctl = 0;
3246 /* setup DMA if needed */
3247 pciide_channel_dma_setup(cp);
3248
3249 for (drive = 0; drive < 2; drive++) {
3250 drvp = &chp->ch_drive[drive];
3251 /* If no drive, skip */
3252 if ((drvp->drive_flags & DRIVE) == 0)
3253 continue;
3254 /* add timing values, setup DMA if needed */
3255 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3256 (drvp->drive_flags & DRIVE_UDMA) == 0)
3257 goto pio;
3258
3259 if (drvp->drive_flags & DRIVE_UDMA) {
3260 /* use Ultra/DMA */
3261 drvp->drive_flags &= ~DRIVE_DMA;
3262 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3263 SIS_TIM_UDMA_TIME_OFF(drive);
3264 sis_tim |= SIS_TIM_UDMA_EN(drive);
3265 } else {
3266 /*
3267 * use Multiword DMA
3268 * Timings will be used for both PIO and DMA,
3269 * so adjust DMA mode if needed
3270 */
3271 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3272 drvp->PIO_mode = drvp->DMA_mode + 2;
3273 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3274 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3275 drvp->PIO_mode - 2 : 0;
3276 if (drvp->DMA_mode == 0)
3277 drvp->PIO_mode = 0;
3278 }
3279 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3280 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3281 SIS_TIM_ACT_OFF(drive);
3282 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3283 SIS_TIM_REC_OFF(drive);
3284 }
3285 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3286 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3287 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3288 if (idedma_ctl != 0) {
3289 /* Add software bits in status register */
3290 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3291 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3292 idedma_ctl);
3293 }
3294 pciide_print_modes(cp);
3295 }
3296
3297 void
3298 acer_chip_map(sc, pa)
3299 struct pciide_softc *sc;
3300 struct pci_attach_args *pa;
3301 {
3302 struct pciide_channel *cp;
3303 int channel;
3304 pcireg_t cr, interface;
3305 bus_size_t cmdsize, ctlsize;
3306 pcireg_t rev = PCI_REVISION(pa->pa_class);
3307
3308 if (pciide_chipen(sc, pa) == 0)
3309 return;
3310 printf("%s: bus-master DMA support present",
3311 sc->sc_wdcdev.sc_dev.dv_xname);
3312 pciide_mapreg_dma(sc, pa);
3313 printf("\n");
3314 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3315 WDC_CAPABILITY_MODE;
3316 if (sc->sc_dma_ok) {
3317 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3318 if (rev >= 0x20) {
3319 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3320 if (rev >= 0xC4)
3321 sc->sc_wdcdev.UDMA_cap = 5;
3322 else if (rev >= 0xC2)
3323 sc->sc_wdcdev.UDMA_cap = 4;
3324 else
3325 sc->sc_wdcdev.UDMA_cap = 2;
3326 }
3327 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3328 sc->sc_wdcdev.irqack = pciide_irqack;
3329 }
3330
3331 sc->sc_wdcdev.PIO_cap = 4;
3332 sc->sc_wdcdev.DMA_cap = 2;
3333 sc->sc_wdcdev.set_modes = acer_setup_channel;
3334 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3335 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3336
3337 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3338 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3339 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3340
3341 /* Enable "microsoft register bits" R/W. */
3342 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3343 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3344 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3345 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3346 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3347 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3348 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3349 ~ACER_CHANSTATUSREGS_RO);
3350 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3351 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3352 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3353 /* Don't use cr, re-read the real register content instead */
3354 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3355 PCI_CLASS_REG));
3356
3357 /* From linux: enable "Cable Detection" */
3358 if (rev >= 0xC2) {
3359 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3360 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3361 | ACER_0x4B_CDETECT);
3362 }
3363
3364 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3365 cp = &sc->pciide_channels[channel];
3366 if (pciide_chansetup(sc, channel, interface) == 0)
3367 continue;
3368 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3369 printf("%s: %s channel ignored (disabled)\n",
3370 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3371 continue;
3372 }
3373 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3374 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3375 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3376 if (cp->hw_ok == 0)
3377 continue;
3378 if (pciide_chan_candisable(cp)) {
3379 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3380 pci_conf_write(sc->sc_pc, sc->sc_tag,
3381 PCI_CLASS_REG, cr);
3382 }
3383 pciide_map_compat_intr(pa, cp, channel, interface);
3384 acer_setup_channel(&cp->wdc_channel);
3385 }
3386 }
3387
3388 void
3389 acer_setup_channel(chp)
3390 struct channel_softc *chp;
3391 {
3392 struct ata_drive_datas *drvp;
3393 int drive;
3394 u_int32_t acer_fifo_udma;
3395 u_int32_t idedma_ctl;
3396 struct pciide_channel *cp = (struct pciide_channel*)chp;
3397 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3398
3399 idedma_ctl = 0;
3400 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3401 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3402 acer_fifo_udma), DEBUG_PROBE);
3403 /* setup DMA if needed */
3404 pciide_channel_dma_setup(cp);
3405
3406 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3407 DRIVE_UDMA) { /* check 80 pins cable */
3408 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3409 ACER_0x4A_80PIN(chp->channel)) {
3410 if (chp->ch_drive[0].UDMA_mode > 2)
3411 chp->ch_drive[0].UDMA_mode = 2;
3412 if (chp->ch_drive[1].UDMA_mode > 2)
3413 chp->ch_drive[1].UDMA_mode = 2;
3414 }
3415 }
3416
3417 for (drive = 0; drive < 2; drive++) {
3418 drvp = &chp->ch_drive[drive];
3419 /* If no drive, skip */
3420 if ((drvp->drive_flags & DRIVE) == 0)
3421 continue;
3422 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3423 "channel %d drive %d 0x%x\n", chp->channel, drive,
3424 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3425 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3426 /* clear FIFO/DMA mode */
3427 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3428 ACER_UDMA_EN(chp->channel, drive) |
3429 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3430
3431 /* add timing values, setup DMA if needed */
3432 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3433 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3434 acer_fifo_udma |=
3435 ACER_FTH_OPL(chp->channel, drive, 0x1);
3436 goto pio;
3437 }
3438
3439 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3440 if (drvp->drive_flags & DRIVE_UDMA) {
3441 /* use Ultra/DMA */
3442 drvp->drive_flags &= ~DRIVE_DMA;
3443 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3444 acer_fifo_udma |=
3445 ACER_UDMA_TIM(chp->channel, drive,
3446 acer_udma[drvp->UDMA_mode]);
3447 /* XXX disable if one drive < UDMA3 ? */
3448 if (drvp->UDMA_mode >= 3) {
3449 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3450 ACER_0x4B,
3451 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3452 ACER_0x4B) | ACER_0x4B_UDMA66);
3453 }
3454 } else {
3455 /*
3456 * use Multiword DMA
3457 * Timings will be used for both PIO and DMA,
3458 * so adjust DMA mode if needed
3459 */
3460 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3461 drvp->PIO_mode = drvp->DMA_mode + 2;
3462 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3463 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3464 drvp->PIO_mode - 2 : 0;
3465 if (drvp->DMA_mode == 0)
3466 drvp->PIO_mode = 0;
3467 }
3468 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3469 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3470 ACER_IDETIM(chp->channel, drive),
3471 acer_pio[drvp->PIO_mode]);
3472 }
3473 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3474 acer_fifo_udma), DEBUG_PROBE);
3475 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3476 if (idedma_ctl != 0) {
3477 /* Add software bits in status register */
3478 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3479 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3480 idedma_ctl);
3481 }
3482 pciide_print_modes(cp);
3483 }
3484
3485 int
3486 acer_pci_intr(arg)
3487 void *arg;
3488 {
3489 struct pciide_softc *sc = arg;
3490 struct pciide_channel *cp;
3491 struct channel_softc *wdc_cp;
3492 int i, rv, crv;
3493 u_int32_t chids;
3494
3495 rv = 0;
3496 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3497 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3498 cp = &sc->pciide_channels[i];
3499 wdc_cp = &cp->wdc_channel;
3500 /* If a compat channel skip. */
3501 if (cp->compat)
3502 continue;
3503 if (chids & ACER_CHIDS_INT(i)) {
3504 crv = wdcintr(wdc_cp);
3505 if (crv == 0)
3506 printf("%s:%d: bogus intr\n",
3507 sc->sc_wdcdev.sc_dev.dv_xname, i);
3508 else
3509 rv = 1;
3510 }
3511 }
3512 return rv;
3513 }
3514
3515 void
3516 hpt_chip_map(sc, pa)
3517 struct pciide_softc *sc;
3518 struct pci_attach_args *pa;
3519 {
3520 struct pciide_channel *cp;
3521 int i, compatchan, revision;
3522 pcireg_t interface;
3523 bus_size_t cmdsize, ctlsize;
3524
3525 if (pciide_chipen(sc, pa) == 0)
3526 return;
3527 revision = PCI_REVISION(pa->pa_class);
3528 printf(": Triones/Highpoint ");
3529 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3530 printf("HPT374 IDE Controller\n");
3531 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3532 printf("HPT372 IDE Controller\n");
3533 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3534 if (revision == HPT372_REV)
3535 printf("HPT372 IDE Controller\n");
3536 else if (revision == HPT370_REV)
3537 printf("HPT370 IDE Controller\n");
3538 else if (revision == HPT370A_REV)
3539 printf("HPT370A IDE Controller\n");
3540 else if (revision == HPT366_REV)
3541 printf("HPT366 IDE Controller\n");
3542 else
3543 printf("unknown HPT IDE controller rev %d\n", revision);
3544 } else
3545 printf("unknown HPT IDE controller 0x%x\n",
3546 sc->sc_pp->ide_product);
3547
3548 /*
3549 * when the chip is in native mode it identifies itself as a
3550 * 'misc mass storage'. Fake interface in this case.
3551 */
3552 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3553 interface = PCI_INTERFACE(pa->pa_class);
3554 } else {
3555 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3556 PCIIDE_INTERFACE_PCI(0);
3557 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3558 (revision == HPT370_REV || revision == HPT370A_REV ||
3559 revision == HPT372_REV)) ||
3560 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3561 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3562 interface |= PCIIDE_INTERFACE_PCI(1);
3563 }
3564
3565 printf("%s: bus-master DMA support present",
3566 sc->sc_wdcdev.sc_dev.dv_xname);
3567 pciide_mapreg_dma(sc, pa);
3568 printf("\n");
3569 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3570 WDC_CAPABILITY_MODE;
3571 if (sc->sc_dma_ok) {
3572 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3573 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3574 sc->sc_wdcdev.irqack = pciide_irqack;
3575 }
3576 sc->sc_wdcdev.PIO_cap = 4;
3577 sc->sc_wdcdev.DMA_cap = 2;
3578
3579 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3580 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3581 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3582 revision == HPT366_REV) {
3583 sc->sc_wdcdev.UDMA_cap = 4;
3584 /*
3585 * The 366 has 2 PCI IDE functions, one for primary and one
3586 * for secondary. So we need to call pciide_mapregs_compat()
3587 * with the real channel
3588 */
3589 if (pa->pa_function == 0) {
3590 compatchan = 0;
3591 } else if (pa->pa_function == 1) {
3592 compatchan = 1;
3593 } else {
3594 printf("%s: unexpected PCI function %d\n",
3595 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3596 return;
3597 }
3598 sc->sc_wdcdev.nchannels = 1;
3599 } else {
3600 sc->sc_wdcdev.nchannels = 2;
3601 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3602 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3603 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3604 revision == HPT372_REV))
3605 sc->sc_wdcdev.UDMA_cap = 6;
3606 else
3607 sc->sc_wdcdev.UDMA_cap = 5;
3608 }
3609 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3610 cp = &sc->pciide_channels[i];
3611 if (sc->sc_wdcdev.nchannels > 1) {
3612 compatchan = i;
3613 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3614 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3615 printf("%s: %s channel ignored (disabled)\n",
3616 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3617 continue;
3618 }
3619 }
3620 if (pciide_chansetup(sc, i, interface) == 0)
3621 continue;
3622 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3623 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3624 &ctlsize, hpt_pci_intr);
3625 } else {
3626 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3627 &cmdsize, &ctlsize);
3628 }
3629 if (cp->hw_ok == 0)
3630 return;
3631 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3632 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3633 wdcattach(&cp->wdc_channel);
3634 hpt_setup_channel(&cp->wdc_channel);
3635 }
3636 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3637 (revision == HPT370_REV || revision == HPT370A_REV ||
3638 revision == HPT372_REV)) ||
3639 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3640 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3641 /*
3642 * HPT370_REV and highter has a bit to disable interrupts,
3643 * make sure to clear it
3644 */
3645 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3646 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3647 ~HPT_CSEL_IRQDIS);
3648 }
3649 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3650 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3651 revision == HPT372_REV ) ||
3652 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3653 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3654 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3655 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3656 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3657 return;
3658 }
3659
3660 void
3661 hpt_setup_channel(chp)
3662 struct channel_softc *chp;
3663 {
3664 struct ata_drive_datas *drvp;
3665 int drive;
3666 int cable;
3667 u_int32_t before, after;
3668 u_int32_t idedma_ctl;
3669 struct pciide_channel *cp = (struct pciide_channel*)chp;
3670 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3671 int revision =
3672 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3673
3674 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3675
3676 /* setup DMA if needed */
3677 pciide_channel_dma_setup(cp);
3678
3679 idedma_ctl = 0;
3680
3681 /* Per drive settings */
3682 for (drive = 0; drive < 2; drive++) {
3683 drvp = &chp->ch_drive[drive];
3684 /* If no drive, skip */
3685 if ((drvp->drive_flags & DRIVE) == 0)
3686 continue;
3687 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3688 HPT_IDETIM(chp->channel, drive));
3689
3690 /* add timing values, setup DMA if needed */
3691 if (drvp->drive_flags & DRIVE_UDMA) {
3692 /* use Ultra/DMA */
3693 drvp->drive_flags &= ~DRIVE_DMA;
3694 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3695 drvp->UDMA_mode > 2)
3696 drvp->UDMA_mode = 2;
3697 switch (sc->sc_pp->ide_product) {
3698 case PCI_PRODUCT_TRIONES_HPT374:
3699 after = hpt374_udma[drvp->UDMA_mode];
3700 break;
3701 case PCI_PRODUCT_TRIONES_HPT372:
3702 after = hpt372_udma[drvp->UDMA_mode];
3703 break;
3704 case PCI_PRODUCT_TRIONES_HPT366:
3705 default:
3706 switch(revision) {
3707 case HPT372_REV:
3708 after = hpt372_udma[drvp->UDMA_mode];
3709 break;
3710 case HPT370_REV:
3711 case HPT370A_REV:
3712 after = hpt370_udma[drvp->UDMA_mode];
3713 break;
3714 case HPT366_REV:
3715 default:
3716 after = hpt366_udma[drvp->UDMA_mode];
3717 break;
3718 }
3719 }
3720 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3721 } else if (drvp->drive_flags & DRIVE_DMA) {
3722 /*
3723 * use Multiword DMA.
3724 * Timings will be used for both PIO and DMA, so adjust
3725 * DMA mode if needed
3726 */
3727 if (drvp->PIO_mode >= 3 &&
3728 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3729 drvp->DMA_mode = drvp->PIO_mode - 2;
3730 }
3731 switch (sc->sc_pp->ide_product) {
3732 case PCI_PRODUCT_TRIONES_HPT374:
3733 after = hpt374_dma[drvp->DMA_mode];
3734 break;
3735 case PCI_PRODUCT_TRIONES_HPT372:
3736 after = hpt372_dma[drvp->DMA_mode];
3737 break;
3738 case PCI_PRODUCT_TRIONES_HPT366:
3739 default:
3740 switch(revision) {
3741 case HPT372_REV:
3742 after = hpt372_dma[drvp->DMA_mode];
3743 break;
3744 case HPT370_REV:
3745 case HPT370A_REV:
3746 after = hpt370_dma[drvp->DMA_mode];
3747 break;
3748 case HPT366_REV:
3749 default:
3750 after = hpt366_dma[drvp->DMA_mode];
3751 break;
3752 }
3753 }
3754 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3755 } else {
3756 /* PIO only */
3757 switch (sc->sc_pp->ide_product) {
3758 case PCI_PRODUCT_TRIONES_HPT374:
3759 after = hpt374_pio[drvp->PIO_mode];
3760 break;
3761 case PCI_PRODUCT_TRIONES_HPT372:
3762 after = hpt372_pio[drvp->PIO_mode];
3763 break;
3764 case PCI_PRODUCT_TRIONES_HPT366:
3765 default:
3766 switch(revision) {
3767 case HPT372_REV:
3768 after = hpt372_pio[drvp->PIO_mode];
3769 break;
3770 case HPT370_REV:
3771 case HPT370A_REV:
3772 after = hpt370_pio[drvp->PIO_mode];
3773 break;
3774 case HPT366_REV:
3775 default:
3776 after = hpt366_pio[drvp->PIO_mode];
3777 break;
3778 }
3779 }
3780 }
3781 pci_conf_write(sc->sc_pc, sc->sc_tag,
3782 HPT_IDETIM(chp->channel, drive), after);
3783 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3784 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3785 after, before), DEBUG_PROBE);
3786 }
3787 if (idedma_ctl != 0) {
3788 /* Add software bits in status register */
3789 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3790 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3791 idedma_ctl);
3792 }
3793 pciide_print_modes(cp);
3794 }
3795
3796 int
3797 hpt_pci_intr(arg)
3798 void *arg;
3799 {
3800 struct pciide_softc *sc = arg;
3801 struct pciide_channel *cp;
3802 struct channel_softc *wdc_cp;
3803 int rv = 0;
3804 int dmastat, i, crv;
3805
3806 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3807 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3808 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3809 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3810 IDEDMA_CTL_INTR)
3811 continue;
3812 cp = &sc->pciide_channels[i];
3813 wdc_cp = &cp->wdc_channel;
3814 crv = wdcintr(wdc_cp);
3815 if (crv == 0) {
3816 printf("%s:%d: bogus intr\n",
3817 sc->sc_wdcdev.sc_dev.dv_xname, i);
3818 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3819 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3820 } else
3821 rv = 1;
3822 }
3823 return rv;
3824 }
3825
3826
3827 /* Macros to test product */
3828 #define PDC_IS_262(sc) \
3829 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3830 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3831 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3832 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3833 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3834 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3835 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3836 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
3837 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
3838 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
3839 #define PDC_IS_265(sc) \
3840 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3841 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3842 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3843 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3844 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3845 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3846 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
3847 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
3848 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
3849 #define PDC_IS_268(sc) \
3850 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3851 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3852 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3853 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3854 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
3855 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
3856 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
3857 #define PDC_IS_276(sc) \
3858 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3859 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3860 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
3861 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
3862 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
3863
3864 void
3865 pdc202xx_chip_map(sc, pa)
3866 struct pciide_softc *sc;
3867 struct pci_attach_args *pa;
3868 {
3869 struct pciide_channel *cp;
3870 int channel;
3871 pcireg_t interface, st, mode;
3872 bus_size_t cmdsize, ctlsize;
3873
3874 if (!PDC_IS_268(sc)) {
3875 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3876 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3877 st), DEBUG_PROBE);
3878 }
3879 if (pciide_chipen(sc, pa) == 0)
3880 return;
3881
3882 /* turn off RAID mode */
3883 if (!PDC_IS_268(sc))
3884 st &= ~PDC2xx_STATE_IDERAID;
3885
3886 /*
3887 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3888 * mode. We have to fake interface
3889 */
3890 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3891 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3892 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3893
3894 printf("%s: bus-master DMA support present",
3895 sc->sc_wdcdev.sc_dev.dv_xname);
3896 pciide_mapreg_dma(sc, pa);
3897 printf("\n");
3898 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3899 WDC_CAPABILITY_MODE;
3900 if (sc->sc_dma_ok) {
3901 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3902 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3903 sc->sc_wdcdev.irqack = pciide_irqack;
3904 }
3905 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
3906 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
3907 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
3908 sc->sc_wdcdev.PIO_cap = 4;
3909 sc->sc_wdcdev.DMA_cap = 2;
3910 if (PDC_IS_276(sc))
3911 sc->sc_wdcdev.UDMA_cap = 6;
3912 else if (PDC_IS_265(sc))
3913 sc->sc_wdcdev.UDMA_cap = 5;
3914 else if (PDC_IS_262(sc))
3915 sc->sc_wdcdev.UDMA_cap = 4;
3916 else
3917 sc->sc_wdcdev.UDMA_cap = 2;
3918 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3919 pdc20268_setup_channel : pdc202xx_setup_channel;
3920 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3921 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3922
3923 if (!PDC_IS_268(sc)) {
3924 /* setup failsafe defaults */
3925 mode = 0;
3926 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3927 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3928 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3929 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3930 for (channel = 0;
3931 channel < sc->sc_wdcdev.nchannels;
3932 channel++) {
3933 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3934 "drive 0 initial timings 0x%x, now 0x%x\n",
3935 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3936 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3937 DEBUG_PROBE);
3938 pci_conf_write(sc->sc_pc, sc->sc_tag,
3939 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3940 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3941 "drive 1 initial timings 0x%x, now 0x%x\n",
3942 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3943 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3944 pci_conf_write(sc->sc_pc, sc->sc_tag,
3945 PDC2xx_TIM(channel, 1), mode);
3946 }
3947
3948 mode = PDC2xx_SCR_DMA;
3949 if (PDC_IS_262(sc)) {
3950 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3951 } else {
3952 /* the BIOS set it up this way */
3953 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3954 }
3955 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3956 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3957 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3958 "now 0x%x\n",
3959 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3960 PDC2xx_SCR),
3961 mode), DEBUG_PROBE);
3962 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3963 PDC2xx_SCR, mode);
3964
3965 /* controller initial state register is OK even without BIOS */
3966 /* Set DMA mode to IDE DMA compatibility */
3967 mode =
3968 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3969 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3970 DEBUG_PROBE);
3971 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3972 mode | 0x1);
3973 mode =
3974 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3975 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3976 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3977 mode | 0x1);
3978 }
3979
3980 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3981 cp = &sc->pciide_channels[channel];
3982 if (pciide_chansetup(sc, channel, interface) == 0)
3983 continue;
3984 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3985 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3986 printf("%s: %s channel ignored (disabled)\n",
3987 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3988 continue;
3989 }
3990 if (PDC_IS_265(sc))
3991 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3992 pdc20265_pci_intr);
3993 else
3994 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3995 pdc202xx_pci_intr);
3996 if (cp->hw_ok == 0)
3997 continue;
3998 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3999 st &= ~(PDC_IS_262(sc) ?
4000 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
4001 pciide_map_compat_intr(pa, cp, channel, interface);
4002 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
4003 }
4004 if (!PDC_IS_268(sc)) {
4005 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4006 "0x%x\n", st), DEBUG_PROBE);
4007 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4008 }
4009 return;
4010 }
4011
4012 void
4013 pdc202xx_setup_channel(chp)
4014 struct channel_softc *chp;
4015 {
4016 struct ata_drive_datas *drvp;
4017 int drive;
4018 pcireg_t mode, st;
4019 u_int32_t idedma_ctl, scr, atapi;
4020 struct pciide_channel *cp = (struct pciide_channel*)chp;
4021 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4022 int channel = chp->channel;
4023
4024 /* setup DMA if needed */
4025 pciide_channel_dma_setup(cp);
4026
4027 idedma_ctl = 0;
4028 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4029 sc->sc_wdcdev.sc_dev.dv_xname,
4030 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4031 DEBUG_PROBE);
4032
4033 /* Per channel settings */
4034 if (PDC_IS_262(sc)) {
4035 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4036 PDC262_U66);
4037 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4038 /* Trim UDMA mode */
4039 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4040 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4041 chp->ch_drive[0].UDMA_mode <= 2) ||
4042 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4043 chp->ch_drive[1].UDMA_mode <= 2)) {
4044 if (chp->ch_drive[0].UDMA_mode > 2)
4045 chp->ch_drive[0].UDMA_mode = 2;
4046 if (chp->ch_drive[1].UDMA_mode > 2)
4047 chp->ch_drive[1].UDMA_mode = 2;
4048 }
4049 /* Set U66 if needed */
4050 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4051 chp->ch_drive[0].UDMA_mode > 2) ||
4052 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4053 chp->ch_drive[1].UDMA_mode > 2))
4054 scr |= PDC262_U66_EN(channel);
4055 else
4056 scr &= ~PDC262_U66_EN(channel);
4057 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4058 PDC262_U66, scr);
4059 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4060 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4061 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4062 PDC262_ATAPI(channel))), DEBUG_PROBE);
4063 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4064 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4065 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4066 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4067 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4068 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4069 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4070 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4071 atapi = 0;
4072 else
4073 atapi = PDC262_ATAPI_UDMA;
4074 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4075 PDC262_ATAPI(channel), atapi);
4076 }
4077 }
4078 for (drive = 0; drive < 2; drive++) {
4079 drvp = &chp->ch_drive[drive];
4080 /* If no drive, skip */
4081 if ((drvp->drive_flags & DRIVE) == 0)
4082 continue;
4083 mode = 0;
4084 if (drvp->drive_flags & DRIVE_UDMA) {
4085 /* use Ultra/DMA */
4086 drvp->drive_flags &= ~DRIVE_DMA;
4087 mode = PDC2xx_TIM_SET_MB(mode,
4088 pdc2xx_udma_mb[drvp->UDMA_mode]);
4089 mode = PDC2xx_TIM_SET_MC(mode,
4090 pdc2xx_udma_mc[drvp->UDMA_mode]);
4091 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4092 } else if (drvp->drive_flags & DRIVE_DMA) {
4093 mode = PDC2xx_TIM_SET_MB(mode,
4094 pdc2xx_dma_mb[drvp->DMA_mode]);
4095 mode = PDC2xx_TIM_SET_MC(mode,
4096 pdc2xx_dma_mc[drvp->DMA_mode]);
4097 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4098 } else {
4099 mode = PDC2xx_TIM_SET_MB(mode,
4100 pdc2xx_dma_mb[0]);
4101 mode = PDC2xx_TIM_SET_MC(mode,
4102 pdc2xx_dma_mc[0]);
4103 }
4104 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4105 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4106 if (drvp->drive_flags & DRIVE_ATA)
4107 mode |= PDC2xx_TIM_PRE;
4108 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4109 if (drvp->PIO_mode >= 3) {
4110 mode |= PDC2xx_TIM_IORDY;
4111 if (drive == 0)
4112 mode |= PDC2xx_TIM_IORDYp;
4113 }
4114 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4115 "timings 0x%x\n",
4116 sc->sc_wdcdev.sc_dev.dv_xname,
4117 chp->channel, drive, mode), DEBUG_PROBE);
4118 pci_conf_write(sc->sc_pc, sc->sc_tag,
4119 PDC2xx_TIM(chp->channel, drive), mode);
4120 }
4121 if (idedma_ctl != 0) {
4122 /* Add software bits in status register */
4123 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4124 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4125 idedma_ctl);
4126 }
4127 pciide_print_modes(cp);
4128 }
4129
4130 void
4131 pdc20268_setup_channel(chp)
4132 struct channel_softc *chp;
4133 {
4134 struct ata_drive_datas *drvp;
4135 int drive;
4136 u_int32_t idedma_ctl;
4137 struct pciide_channel *cp = (struct pciide_channel*)chp;
4138 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4139 int u100;
4140
4141 /* setup DMA if needed */
4142 pciide_channel_dma_setup(cp);
4143
4144 idedma_ctl = 0;
4145
4146 /* I don't know what this is for, FreeBSD does it ... */
4147 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4148 IDEDMA_CMD + 0x1, 0x0b);
4149
4150 /*
4151 * I don't know what this is for; FreeBSD checks this ... this is not
4152 * cable type detect.
4153 */
4154 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4155 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
4156
4157 for (drive = 0; drive < 2; drive++) {
4158 drvp = &chp->ch_drive[drive];
4159 /* If no drive, skip */
4160 if ((drvp->drive_flags & DRIVE) == 0)
4161 continue;
4162 if (drvp->drive_flags & DRIVE_UDMA) {
4163 /* use Ultra/DMA */
4164 drvp->drive_flags &= ~DRIVE_DMA;
4165 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4166 if (drvp->UDMA_mode > 2 && u100 == 0)
4167 drvp->UDMA_mode = 2;
4168 } else if (drvp->drive_flags & DRIVE_DMA) {
4169 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4170 }
4171 }
4172 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4173 if (idedma_ctl != 0) {
4174 /* Add software bits in status register */
4175 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4176 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4177 idedma_ctl);
4178 }
4179 pciide_print_modes(cp);
4180 }
4181
4182 int
4183 pdc202xx_pci_intr(arg)
4184 void *arg;
4185 {
4186 struct pciide_softc *sc = arg;
4187 struct pciide_channel *cp;
4188 struct channel_softc *wdc_cp;
4189 int i, rv, crv;
4190 u_int32_t scr;
4191
4192 rv = 0;
4193 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4194 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4195 cp = &sc->pciide_channels[i];
4196 wdc_cp = &cp->wdc_channel;
4197 /* If a compat channel skip. */
4198 if (cp->compat)
4199 continue;
4200 if (scr & PDC2xx_SCR_INT(i)) {
4201 crv = wdcintr(wdc_cp);
4202 if (crv == 0)
4203 printf("%s:%d: bogus intr (reg 0x%x)\n",
4204 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4205 else
4206 rv = 1;
4207 }
4208 }
4209 return rv;
4210 }
4211
4212 int
4213 pdc20265_pci_intr(arg)
4214 void *arg;
4215 {
4216 struct pciide_softc *sc = arg;
4217 struct pciide_channel *cp;
4218 struct channel_softc *wdc_cp;
4219 int i, rv, crv;
4220 u_int32_t dmastat;
4221
4222 rv = 0;
4223 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4224 cp = &sc->pciide_channels[i];
4225 wdc_cp = &cp->wdc_channel;
4226 /* If a compat channel skip. */
4227 if (cp->compat)
4228 continue;
4229 /*
4230 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4231 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4232 * So use it instead (requires 2 reg reads instead of 1,
4233 * but we can't do it another way).
4234 */
4235 dmastat = bus_space_read_1(sc->sc_dma_iot,
4236 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4237 if((dmastat & IDEDMA_CTL_INTR) == 0)
4238 continue;
4239 crv = wdcintr(wdc_cp);
4240 if (crv == 0)
4241 printf("%s:%d: bogus intr\n",
4242 sc->sc_wdcdev.sc_dev.dv_xname, i);
4243 else
4244 rv = 1;
4245 }
4246 return rv;
4247 }
4248
4249 void
4250 opti_chip_map(sc, pa)
4251 struct pciide_softc *sc;
4252 struct pci_attach_args *pa;
4253 {
4254 struct pciide_channel *cp;
4255 bus_size_t cmdsize, ctlsize;
4256 pcireg_t interface;
4257 u_int8_t init_ctrl;
4258 int channel;
4259
4260 if (pciide_chipen(sc, pa) == 0)
4261 return;
4262 printf("%s: bus-master DMA support present",
4263 sc->sc_wdcdev.sc_dev.dv_xname);
4264
4265 /*
4266 * XXXSCW:
4267 * There seem to be a couple of buggy revisions/implementations
4268 * of the OPTi pciide chipset. This kludge seems to fix one of
4269 * the reported problems (PR/11644) but still fails for the
4270 * other (PR/13151), although the latter may be due to other
4271 * issues too...
4272 */
4273 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4274 printf(" but disabled due to chip rev. <= 0x12");
4275 sc->sc_dma_ok = 0;
4276 } else
4277 pciide_mapreg_dma(sc, pa);
4278
4279 printf("\n");
4280
4281 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4282 WDC_CAPABILITY_MODE;
4283 sc->sc_wdcdev.PIO_cap = 4;
4284 if (sc->sc_dma_ok) {
4285 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4286 sc->sc_wdcdev.irqack = pciide_irqack;
4287 sc->sc_wdcdev.DMA_cap = 2;
4288 }
4289 sc->sc_wdcdev.set_modes = opti_setup_channel;
4290
4291 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4292 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4293
4294 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4295 OPTI_REG_INIT_CONTROL);
4296
4297 interface = PCI_INTERFACE(pa->pa_class);
4298
4299 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4300 cp = &sc->pciide_channels[channel];
4301 if (pciide_chansetup(sc, channel, interface) == 0)
4302 continue;
4303 if (channel == 1 &&
4304 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4305 printf("%s: %s channel ignored (disabled)\n",
4306 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4307 continue;
4308 }
4309 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4310 pciide_pci_intr);
4311 if (cp->hw_ok == 0)
4312 continue;
4313 pciide_map_compat_intr(pa, cp, channel, interface);
4314 if (cp->hw_ok == 0)
4315 continue;
4316 opti_setup_channel(&cp->wdc_channel);
4317 }
4318 }
4319
4320 void
4321 opti_setup_channel(chp)
4322 struct channel_softc *chp;
4323 {
4324 struct ata_drive_datas *drvp;
4325 struct pciide_channel *cp = (struct pciide_channel*)chp;
4326 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4327 int drive, spd;
4328 int mode[2];
4329 u_int8_t rv, mr;
4330
4331 /*
4332 * The `Delay' and `Address Setup Time' fields of the
4333 * Miscellaneous Register are always zero initially.
4334 */
4335 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4336 mr &= ~(OPTI_MISC_DELAY_MASK |
4337 OPTI_MISC_ADDR_SETUP_MASK |
4338 OPTI_MISC_INDEX_MASK);
4339
4340 /* Prime the control register before setting timing values */
4341 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4342
4343 /* Determine the clockrate of the PCIbus the chip is attached to */
4344 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4345 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4346
4347 /* setup DMA if needed */
4348 pciide_channel_dma_setup(cp);
4349
4350 for (drive = 0; drive < 2; drive++) {
4351 drvp = &chp->ch_drive[drive];
4352 /* If no drive, skip */
4353 if ((drvp->drive_flags & DRIVE) == 0) {
4354 mode[drive] = -1;
4355 continue;
4356 }
4357
4358 if ((drvp->drive_flags & DRIVE_DMA)) {
4359 /*
4360 * Timings will be used for both PIO and DMA,
4361 * so adjust DMA mode if needed
4362 */
4363 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4364 drvp->PIO_mode = drvp->DMA_mode + 2;
4365 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4366 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4367 drvp->PIO_mode - 2 : 0;
4368 if (drvp->DMA_mode == 0)
4369 drvp->PIO_mode = 0;
4370
4371 mode[drive] = drvp->DMA_mode + 5;
4372 } else
4373 mode[drive] = drvp->PIO_mode;
4374
4375 if (drive && mode[0] >= 0 &&
4376 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4377 /*
4378 * Can't have two drives using different values
4379 * for `Address Setup Time'.
4380 * Slow down the faster drive to compensate.
4381 */
4382 int d = (opti_tim_as[spd][mode[0]] >
4383 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4384
4385 mode[d] = mode[1-d];
4386 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4387 chp->ch_drive[d].DMA_mode = 0;
4388 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4389 }
4390 }
4391
4392 for (drive = 0; drive < 2; drive++) {
4393 int m;
4394 if ((m = mode[drive]) < 0)
4395 continue;
4396
4397 /* Set the Address Setup Time and select appropriate index */
4398 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4399 rv |= OPTI_MISC_INDEX(drive);
4400 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4401
4402 /* Set the pulse width and recovery timing parameters */
4403 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4404 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4405 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4406 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4407
4408 /* Set the Enhanced Mode register appropriately */
4409 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4410 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4411 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4412 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4413 }
4414
4415 /* Finally, enable the timings */
4416 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4417
4418 pciide_print_modes(cp);
4419 }
4420
4421 #define ACARD_IS_850(sc) \
4422 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4423
4424 void
4425 acard_chip_map(sc, pa)
4426 struct pciide_softc *sc;
4427 struct pci_attach_args *pa;
4428 {
4429 struct pciide_channel *cp;
4430 int i;
4431 pcireg_t interface;
4432 bus_size_t cmdsize, ctlsize;
4433
4434 if (pciide_chipen(sc, pa) == 0)
4435 return;
4436
4437 /*
4438 * when the chip is in native mode it identifies itself as a
4439 * 'misc mass storage'. Fake interface in this case.
4440 */
4441 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4442 interface = PCI_INTERFACE(pa->pa_class);
4443 } else {
4444 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4445 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4446 }
4447
4448 printf("%s: bus-master DMA support present",
4449 sc->sc_wdcdev.sc_dev.dv_xname);
4450 pciide_mapreg_dma(sc, pa);
4451 printf("\n");
4452 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4453 WDC_CAPABILITY_MODE;
4454
4455 if (sc->sc_dma_ok) {
4456 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4457 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4458 sc->sc_wdcdev.irqack = pciide_irqack;
4459 }
4460 sc->sc_wdcdev.PIO_cap = 4;
4461 sc->sc_wdcdev.DMA_cap = 2;
4462 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4463
4464 sc->sc_wdcdev.set_modes = acard_setup_channel;
4465 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4466 sc->sc_wdcdev.nchannels = 2;
4467
4468 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4469 cp = &sc->pciide_channels[i];
4470 if (pciide_chansetup(sc, i, interface) == 0)
4471 continue;
4472 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4473 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4474 &ctlsize, pciide_pci_intr);
4475 } else {
4476 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4477 &cmdsize, &ctlsize);
4478 }
4479 if (cp->hw_ok == 0)
4480 return;
4481 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4482 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4483 wdcattach(&cp->wdc_channel);
4484 acard_setup_channel(&cp->wdc_channel);
4485 }
4486 if (!ACARD_IS_850(sc)) {
4487 u_int32_t reg;
4488 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4489 reg &= ~ATP860_CTRL_INT;
4490 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4491 }
4492 }
4493
4494 void
4495 acard_setup_channel(chp)
4496 struct channel_softc *chp;
4497 {
4498 struct ata_drive_datas *drvp;
4499 struct pciide_channel *cp = (struct pciide_channel*)chp;
4500 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4501 int channel = chp->channel;
4502 int drive;
4503 u_int32_t idetime, udma_mode;
4504 u_int32_t idedma_ctl;
4505
4506 /* setup DMA if needed */
4507 pciide_channel_dma_setup(cp);
4508
4509 if (ACARD_IS_850(sc)) {
4510 idetime = 0;
4511 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4512 udma_mode &= ~ATP850_UDMA_MASK(channel);
4513 } else {
4514 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4515 idetime &= ~ATP860_SETTIME_MASK(channel);
4516 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4517 udma_mode &= ~ATP860_UDMA_MASK(channel);
4518
4519 /* check 80 pins cable */
4520 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4521 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4522 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4523 & ATP860_CTRL_80P(chp->channel)) {
4524 if (chp->ch_drive[0].UDMA_mode > 2)
4525 chp->ch_drive[0].UDMA_mode = 2;
4526 if (chp->ch_drive[1].UDMA_mode > 2)
4527 chp->ch_drive[1].UDMA_mode = 2;
4528 }
4529 }
4530 }
4531
4532 idedma_ctl = 0;
4533
4534 /* Per drive settings */
4535 for (drive = 0; drive < 2; drive++) {
4536 drvp = &chp->ch_drive[drive];
4537 /* If no drive, skip */
4538 if ((drvp->drive_flags & DRIVE) == 0)
4539 continue;
4540 /* add timing values, setup DMA if needed */
4541 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4542 (drvp->drive_flags & DRIVE_UDMA)) {
4543 /* use Ultra/DMA */
4544 if (ACARD_IS_850(sc)) {
4545 idetime |= ATP850_SETTIME(drive,
4546 acard_act_udma[drvp->UDMA_mode],
4547 acard_rec_udma[drvp->UDMA_mode]);
4548 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4549 acard_udma_conf[drvp->UDMA_mode]);
4550 } else {
4551 idetime |= ATP860_SETTIME(channel, drive,
4552 acard_act_udma[drvp->UDMA_mode],
4553 acard_rec_udma[drvp->UDMA_mode]);
4554 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4555 acard_udma_conf[drvp->UDMA_mode]);
4556 }
4557 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4558 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4559 (drvp->drive_flags & DRIVE_DMA)) {
4560 /* use Multiword DMA */
4561 drvp->drive_flags &= ~DRIVE_UDMA;
4562 if (ACARD_IS_850(sc)) {
4563 idetime |= ATP850_SETTIME(drive,
4564 acard_act_dma[drvp->DMA_mode],
4565 acard_rec_dma[drvp->DMA_mode]);
4566 } else {
4567 idetime |= ATP860_SETTIME(channel, drive,
4568 acard_act_dma[drvp->DMA_mode],
4569 acard_rec_dma[drvp->DMA_mode]);
4570 }
4571 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4572 } else {
4573 /* PIO only */
4574 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4575 if (ACARD_IS_850(sc)) {
4576 idetime |= ATP850_SETTIME(drive,
4577 acard_act_pio[drvp->PIO_mode],
4578 acard_rec_pio[drvp->PIO_mode]);
4579 } else {
4580 idetime |= ATP860_SETTIME(channel, drive,
4581 acard_act_pio[drvp->PIO_mode],
4582 acard_rec_pio[drvp->PIO_mode]);
4583 }
4584 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4585 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4586 | ATP8x0_CTRL_EN(channel));
4587 }
4588 }
4589
4590 if (idedma_ctl != 0) {
4591 /* Add software bits in status register */
4592 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4593 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4594 }
4595 pciide_print_modes(cp);
4596
4597 if (ACARD_IS_850(sc)) {
4598 pci_conf_write(sc->sc_pc, sc->sc_tag,
4599 ATP850_IDETIME(channel), idetime);
4600 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4601 } else {
4602 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4603 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4604 }
4605 }
4606
4607 int
4608 acard_pci_intr(arg)
4609 void *arg;
4610 {
4611 struct pciide_softc *sc = arg;
4612 struct pciide_channel *cp;
4613 struct channel_softc *wdc_cp;
4614 int rv = 0;
4615 int dmastat, i, crv;
4616
4617 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4618 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4619 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4620 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4621 continue;
4622 cp = &sc->pciide_channels[i];
4623 wdc_cp = &cp->wdc_channel;
4624 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4625 (void)wdcintr(wdc_cp);
4626 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4627 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4628 continue;
4629 }
4630 crv = wdcintr(wdc_cp);
4631 if (crv == 0)
4632 printf("%s:%d: bogus intr\n",
4633 sc->sc_wdcdev.sc_dev.dv_xname, i);
4634 else if (crv == 1)
4635 rv = 1;
4636 else if (rv == 0)
4637 rv = crv;
4638 }
4639 return rv;
4640 }
4641
4642 static int
4643 sl82c105_bugchk(struct pci_attach_args *pa)
4644 {
4645
4646 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4647 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4648 return (0);
4649
4650 if (PCI_REVISION(pa->pa_class) <= 0x05)
4651 return (1);
4652
4653 return (0);
4654 }
4655
4656 void
4657 sl82c105_chip_map(sc, pa)
4658 struct pciide_softc *sc;
4659 struct pci_attach_args *pa;
4660 {
4661 struct pciide_channel *cp;
4662 bus_size_t cmdsize, ctlsize;
4663 pcireg_t interface, idecr;
4664 int channel;
4665
4666 if (pciide_chipen(sc, pa) == 0)
4667 return;
4668
4669 printf("%s: bus-master DMA support present",
4670 sc->sc_wdcdev.sc_dev.dv_xname);
4671
4672 /*
4673 * Check to see if we're part of the Winbond 83c553 Southbridge.
4674 * If so, we need to disable DMA on rev. <= 5 of that chip.
4675 */
4676 if (pci_find_device(pa, sl82c105_bugchk)) {
4677 printf(" but disabled due to 83c553 rev. <= 0x05");
4678 sc->sc_dma_ok = 0;
4679 } else
4680 pciide_mapreg_dma(sc, pa);
4681 printf("\n");
4682
4683 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4684 WDC_CAPABILITY_MODE;
4685 sc->sc_wdcdev.PIO_cap = 4;
4686 if (sc->sc_dma_ok) {
4687 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4688 sc->sc_wdcdev.irqack = pciide_irqack;
4689 sc->sc_wdcdev.DMA_cap = 2;
4690 }
4691 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4692
4693 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4694 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4695
4696 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4697
4698 interface = PCI_INTERFACE(pa->pa_class);
4699
4700 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4701 cp = &sc->pciide_channels[channel];
4702 if (pciide_chansetup(sc, channel, interface) == 0)
4703 continue;
4704 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4705 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4706 printf("%s: %s channel ignored (disabled)\n",
4707 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4708 continue;
4709 }
4710 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4711 pciide_pci_intr);
4712 if (cp->hw_ok == 0)
4713 continue;
4714 pciide_map_compat_intr(pa, cp, channel, interface);
4715 if (cp->hw_ok == 0)
4716 continue;
4717 sl82c105_setup_channel(&cp->wdc_channel);
4718 }
4719 }
4720
4721 void
4722 sl82c105_setup_channel(chp)
4723 struct channel_softc *chp;
4724 {
4725 struct ata_drive_datas *drvp;
4726 struct pciide_channel *cp = (struct pciide_channel*)chp;
4727 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4728 int pxdx_reg, drive;
4729 pcireg_t pxdx;
4730
4731 /* Set up DMA if needed. */
4732 pciide_channel_dma_setup(cp);
4733
4734 for (drive = 0; drive < 2; drive++) {
4735 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4736 : SYMPH_P1D0CR) + (drive * 4);
4737
4738 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4739
4740 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4741 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4742
4743 drvp = &chp->ch_drive[drive];
4744 /* If no drive, skip. */
4745 if ((drvp->drive_flags & DRIVE) == 0) {
4746 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4747 continue;
4748 }
4749
4750 if (drvp->drive_flags & DRIVE_DMA) {
4751 /*
4752 * Timings will be used for both PIO and DMA,
4753 * so adjust DMA mode if needed.
4754 */
4755 if (drvp->PIO_mode >= 3) {
4756 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4757 drvp->DMA_mode = drvp->PIO_mode - 2;
4758 if (drvp->DMA_mode < 1) {
4759 /*
4760 * Can't mix both PIO and DMA.
4761 * Disable DMA.
4762 */
4763 drvp->drive_flags &= ~DRIVE_DMA;
4764 }
4765 } else {
4766 /*
4767 * Can't mix both PIO and DMA. Disable
4768 * DMA.
4769 */
4770 drvp->drive_flags &= ~DRIVE_DMA;
4771 }
4772 }
4773
4774 if (drvp->drive_flags & DRIVE_DMA) {
4775 /* Use multi-word DMA. */
4776 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4777 PxDx_CMD_ON_SHIFT;
4778 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4779 } else {
4780 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4781 PxDx_CMD_ON_SHIFT;
4782 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4783 }
4784
4785 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4786
4787 /* ...and set the mode for this drive. */
4788 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4789 }
4790
4791 pciide_print_modes(cp);
4792 }
4793
4794 void
4795 serverworks_chip_map(sc, pa)
4796 struct pciide_softc *sc;
4797 struct pci_attach_args *pa;
4798 {
4799 struct pciide_channel *cp;
4800 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4801 pcitag_t pcib_tag;
4802 int channel;
4803 bus_size_t cmdsize, ctlsize;
4804
4805 if (pciide_chipen(sc, pa) == 0)
4806 return;
4807
4808 printf("%s: bus-master DMA support present",
4809 sc->sc_wdcdev.sc_dev.dv_xname);
4810 pciide_mapreg_dma(sc, pa);
4811 printf("\n");
4812 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4813 WDC_CAPABILITY_MODE;
4814
4815 if (sc->sc_dma_ok) {
4816 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4817 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4818 sc->sc_wdcdev.irqack = pciide_irqack;
4819 }
4820 sc->sc_wdcdev.PIO_cap = 4;
4821 sc->sc_wdcdev.DMA_cap = 2;
4822 switch (sc->sc_pp->ide_product) {
4823 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4824 sc->sc_wdcdev.UDMA_cap = 2;
4825 break;
4826 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4827 if (PCI_REVISION(pa->pa_class) < 0x92)
4828 sc->sc_wdcdev.UDMA_cap = 4;
4829 else
4830 sc->sc_wdcdev.UDMA_cap = 5;
4831 break;
4832 }
4833
4834 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4835 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4836 sc->sc_wdcdev.nchannels = 2;
4837
4838 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4839 cp = &sc->pciide_channels[channel];
4840 if (pciide_chansetup(sc, channel, interface) == 0)
4841 continue;
4842 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4843 serverworks_pci_intr);
4844 if (cp->hw_ok == 0)
4845 return;
4846 pciide_map_compat_intr(pa, cp, channel, interface);
4847 if (cp->hw_ok == 0)
4848 return;
4849 serverworks_setup_channel(&cp->wdc_channel);
4850 }
4851
4852 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4853 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4854 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4855 }
4856
4857 void
4858 serverworks_setup_channel(chp)
4859 struct channel_softc *chp;
4860 {
4861 struct ata_drive_datas *drvp;
4862 struct pciide_channel *cp = (struct pciide_channel*)chp;
4863 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4864 int channel = chp->channel;
4865 int drive, unit;
4866 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4867 u_int32_t idedma_ctl;
4868 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4869 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4870
4871 /* setup DMA if needed */
4872 pciide_channel_dma_setup(cp);
4873
4874 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4875 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4876 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4877 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4878
4879 pio_time &= ~(0xffff << (16 * channel));
4880 dma_time &= ~(0xffff << (16 * channel));
4881 pio_mode &= ~(0xff << (8 * channel + 16));
4882 udma_mode &= ~(0xff << (8 * channel + 16));
4883 udma_mode &= ~(3 << (2 * channel));
4884
4885 idedma_ctl = 0;
4886
4887 /* Per drive settings */
4888 for (drive = 0; drive < 2; drive++) {
4889 drvp = &chp->ch_drive[drive];
4890 /* If no drive, skip */
4891 if ((drvp->drive_flags & DRIVE) == 0)
4892 continue;
4893 unit = drive + 2 * channel;
4894 /* add timing values, setup DMA if needed */
4895 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4896 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4897 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4898 (drvp->drive_flags & DRIVE_UDMA)) {
4899 /* use Ultra/DMA, check for 80-pin cable */
4900 if (drvp->UDMA_mode > 2 &&
4901 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4902 drvp->UDMA_mode = 2;
4903 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4904 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4905 udma_mode |= 1 << unit;
4906 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4907 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4908 (drvp->drive_flags & DRIVE_DMA)) {
4909 /* use Multiword DMA */
4910 drvp->drive_flags &= ~DRIVE_UDMA;
4911 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4912 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4913 } else {
4914 /* PIO only */
4915 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4916 }
4917 }
4918
4919 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4920 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4921 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4922 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4923 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4924
4925 if (idedma_ctl != 0) {
4926 /* Add software bits in status register */
4927 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4928 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4929 }
4930 pciide_print_modes(cp);
4931 }
4932
4933 int
4934 serverworks_pci_intr(arg)
4935 void *arg;
4936 {
4937 struct pciide_softc *sc = arg;
4938 struct pciide_channel *cp;
4939 struct channel_softc *wdc_cp;
4940 int rv = 0;
4941 int dmastat, i, crv;
4942
4943 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4944 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4945 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4946 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4947 IDEDMA_CTL_INTR)
4948 continue;
4949 cp = &sc->pciide_channels[i];
4950 wdc_cp = &cp->wdc_channel;
4951 crv = wdcintr(wdc_cp);
4952 if (crv == 0) {
4953 printf("%s:%d: bogus intr\n",
4954 sc->sc_wdcdev.sc_dev.dv_xname, i);
4955 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4956 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4957 } else
4958 rv = 1;
4959 }
4960 return rv;
4961 }
4962