pciide.c revision 1.177 1 /* $NetBSD: pciide.c,v 1.177 2003/01/24 04:53:13 thorpej Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.177 2003/01/24 04:53:13 thorpej Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd680_setup_channel __P((struct channel_softc*));
182 void cmd680_channel_map __P((struct pci_attach_args *,
183 struct pciide_softc *, int));
184
185 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void cy693_setup_channel __P((struct channel_softc*));
187
188 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void sis_setup_channel __P((struct channel_softc*));
190 static int sis_hostbr_match __P(( struct pci_attach_args *));
191
192 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void acer_setup_channel __P((struct channel_softc*));
194 int acer_pci_intr __P((void *));
195
196 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void pdc202xx_setup_channel __P((struct channel_softc*));
198 void pdc20268_setup_channel __P((struct channel_softc*));
199 int pdc202xx_pci_intr __P((void *));
200 int pdc20265_pci_intr __P((void *));
201
202 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void opti_setup_channel __P((struct channel_softc*));
204
205 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void hpt_setup_channel __P((struct channel_softc*));
207 int hpt_pci_intr __P((void *));
208
209 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void acard_setup_channel __P((struct channel_softc*));
211 int acard_pci_intr __P((void *));
212
213 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void serverworks_setup_channel __P((struct channel_softc*));
215 int serverworks_pci_intr __P((void *));
216
217 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void sl82c105_setup_channel __P((struct channel_softc*));
219
220 void pciide_channel_dma_setup __P((struct pciide_channel *));
221 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
222 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
223 void pciide_dma_start __P((void*, int, int));
224 int pciide_dma_finish __P((void*, int, int, int));
225 void pciide_irqack __P((struct channel_softc *));
226 void pciide_print_modes __P((struct pciide_channel *));
227
228 struct pciide_product_desc {
229 u_int32_t ide_product;
230 int ide_flags;
231 const char *ide_name;
232 /* map and setup chip, probe drives */
233 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
234 };
235
236 /* Flags for ide_flags */
237 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
238 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
239
240 /* Default product description for devices not known from this controller */
241 const struct pciide_product_desc default_product_desc = {
242 0,
243 0,
244 "Generic PCI IDE controller",
245 default_chip_map,
246 };
247
248 const struct pciide_product_desc pciide_intel_products[] = {
249 { PCI_PRODUCT_INTEL_82092AA,
250 0,
251 "Intel 82092AA IDE controller",
252 default_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82371FB_IDE,
255 0,
256 "Intel 82371FB IDE controller (PIIX)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82371SB_IDE,
260 0,
261 "Intel 82371SB IDE Interface (PIIX3)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82371AB_IDE,
265 0,
266 "Intel 82371AB IDE controller (PIIX4)",
267 piix_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82440MX_IDE,
270 0,
271 "Intel 82440MX IDE controller",
272 piix_chip_map
273 },
274 { PCI_PRODUCT_INTEL_82801AA_IDE,
275 0,
276 "Intel 82801AA IDE Controller (ICH)",
277 piix_chip_map,
278 },
279 { PCI_PRODUCT_INTEL_82801AB_IDE,
280 0,
281 "Intel 82801AB IDE Controller (ICH0)",
282 piix_chip_map,
283 },
284 { PCI_PRODUCT_INTEL_82801BA_IDE,
285 0,
286 "Intel 82801BA IDE Controller (ICH2)",
287 piix_chip_map,
288 },
289 { PCI_PRODUCT_INTEL_82801BAM_IDE,
290 0,
291 "Intel 82801BAM IDE Controller (ICH2)",
292 piix_chip_map,
293 },
294 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
295 0,
296 "Intel 82801CA IDE Controller",
297 piix_chip_map,
298 },
299 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
300 0,
301 "Intel 82801CA IDE Controller",
302 piix_chip_map,
303 },
304 { PCI_PRODUCT_INTEL_82801DB_IDE,
305 0,
306 "Intel 82801DB IDE Controller (ICH4)",
307 piix_chip_map,
308 },
309 { 0,
310 0,
311 NULL,
312 NULL
313 }
314 };
315
316 const struct pciide_product_desc pciide_amd_products[] = {
317 { PCI_PRODUCT_AMD_PBC756_IDE,
318 0,
319 "Advanced Micro Devices AMD756 IDE Controller",
320 amd7x6_chip_map
321 },
322 { PCI_PRODUCT_AMD_PBC766_IDE,
323 0,
324 "Advanced Micro Devices AMD766 IDE Controller",
325 amd7x6_chip_map
326 },
327 { PCI_PRODUCT_AMD_PBC768_IDE,
328 0,
329 "Advanced Micro Devices AMD768 IDE Controller",
330 amd7x6_chip_map
331 },
332 { PCI_PRODUCT_AMD_PBC8111_IDE,
333 0,
334 "Advanced Micro Devices AMD8111 IDE Controller",
335 amd7x6_chip_map
336 },
337 { 0,
338 0,
339 NULL,
340 NULL
341 }
342 };
343
344 const struct pciide_product_desc pciide_nvidia_products[] = {
345 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
346 0,
347 "NVIDIA nForce IDE Controller",
348 amd7x6_chip_map
349 },
350 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
351 0,
352 "NVIDIA nForce2 IDE Controller",
353 amd7x6_chip_map
354 },
355 { 0,
356 0,
357 NULL,
358 NULL
359 }
360 };
361
362 const struct pciide_product_desc pciide_cmd_products[] = {
363 { PCI_PRODUCT_CMDTECH_640,
364 0,
365 "CMD Technology PCI0640",
366 cmd_chip_map
367 },
368 { PCI_PRODUCT_CMDTECH_643,
369 0,
370 "CMD Technology PCI0643",
371 cmd0643_9_chip_map,
372 },
373 { PCI_PRODUCT_CMDTECH_646,
374 0,
375 "CMD Technology PCI0646",
376 cmd0643_9_chip_map,
377 },
378 { PCI_PRODUCT_CMDTECH_648,
379 IDE_PCI_CLASS_OVERRIDE,
380 "CMD Technology PCI0648",
381 cmd0643_9_chip_map,
382 },
383 { PCI_PRODUCT_CMDTECH_649,
384 IDE_PCI_CLASS_OVERRIDE,
385 "CMD Technology PCI0649",
386 cmd0643_9_chip_map,
387 },
388 { PCI_PRODUCT_CMDTECH_680,
389 IDE_PCI_CLASS_OVERRIDE,
390 "Silicon Image 0680",
391 cmd680_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 NULL
397 }
398 };
399
400 const struct pciide_product_desc pciide_via_products[] = {
401 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
402 0,
403 NULL,
404 apollo_chip_map,
405 },
406 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
407 0,
408 NULL,
409 apollo_chip_map,
410 },
411 { 0,
412 0,
413 NULL,
414 NULL
415 }
416 };
417
418 const struct pciide_product_desc pciide_cypress_products[] = {
419 { PCI_PRODUCT_CONTAQ_82C693,
420 IDE_16BIT_IOSPACE,
421 "Cypress 82C693 IDE Controller",
422 cy693_chip_map,
423 },
424 { 0,
425 0,
426 NULL,
427 NULL
428 }
429 };
430
431 const struct pciide_product_desc pciide_sis_products[] = {
432 { PCI_PRODUCT_SIS_5597_IDE,
433 0,
434 "Silicon Integrated System 5597/5598 IDE controller",
435 sis_chip_map,
436 },
437 { 0,
438 0,
439 NULL,
440 NULL
441 }
442 };
443
444 const struct pciide_product_desc pciide_acer_products[] = {
445 { PCI_PRODUCT_ALI_M5229,
446 0,
447 "Acer Labs M5229 UDMA IDE Controller",
448 acer_chip_map,
449 },
450 { 0,
451 0,
452 NULL,
453 NULL
454 }
455 };
456
457 const struct pciide_product_desc pciide_promise_products[] = {
458 { PCI_PRODUCT_PROMISE_ULTRA33,
459 IDE_PCI_CLASS_OVERRIDE,
460 "Promise Ultra33/ATA Bus Master IDE Accelerator",
461 pdc202xx_chip_map,
462 },
463 { PCI_PRODUCT_PROMISE_ULTRA66,
464 IDE_PCI_CLASS_OVERRIDE,
465 "Promise Ultra66/ATA Bus Master IDE Accelerator",
466 pdc202xx_chip_map,
467 },
468 { PCI_PRODUCT_PROMISE_ULTRA100,
469 IDE_PCI_CLASS_OVERRIDE,
470 "Promise Ultra100/ATA Bus Master IDE Accelerator",
471 pdc202xx_chip_map,
472 },
473 { PCI_PRODUCT_PROMISE_ULTRA100X,
474 IDE_PCI_CLASS_OVERRIDE,
475 "Promise Ultra100/ATA Bus Master IDE Accelerator",
476 pdc202xx_chip_map,
477 },
478 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
479 IDE_PCI_CLASS_OVERRIDE,
480 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
481 pdc202xx_chip_map,
482 },
483 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
484 IDE_PCI_CLASS_OVERRIDE,
485 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
486 pdc202xx_chip_map,
487 },
488 { PCI_PRODUCT_PROMISE_ULTRA133,
489 IDE_PCI_CLASS_OVERRIDE,
490 "Promise Ultra133/ATA Bus Master IDE Accelerator",
491 pdc202xx_chip_map,
492 },
493 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
494 IDE_PCI_CLASS_OVERRIDE,
495 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
496 pdc202xx_chip_map,
497 },
498 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
499 IDE_PCI_CLASS_OVERRIDE,
500 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
501 pdc202xx_chip_map,
502 },
503 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
504 IDE_PCI_CLASS_OVERRIDE,
505 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
506 pdc202xx_chip_map,
507 },
508 { 0,
509 0,
510 NULL,
511 NULL
512 }
513 };
514
515 const struct pciide_product_desc pciide_opti_products[] = {
516 { PCI_PRODUCT_OPTI_82C621,
517 0,
518 "OPTi 82c621 PCI IDE controller",
519 opti_chip_map,
520 },
521 { PCI_PRODUCT_OPTI_82C568,
522 0,
523 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
524 opti_chip_map,
525 },
526 { PCI_PRODUCT_OPTI_82D568,
527 0,
528 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
529 opti_chip_map,
530 },
531 { 0,
532 0,
533 NULL,
534 NULL
535 }
536 };
537
538 const struct pciide_product_desc pciide_triones_products[] = {
539 { PCI_PRODUCT_TRIONES_HPT366,
540 IDE_PCI_CLASS_OVERRIDE,
541 NULL,
542 hpt_chip_map,
543 },
544 { PCI_PRODUCT_TRIONES_HPT372,
545 IDE_PCI_CLASS_OVERRIDE,
546 NULL,
547 hpt_chip_map
548 },
549 { PCI_PRODUCT_TRIONES_HPT374,
550 IDE_PCI_CLASS_OVERRIDE,
551 NULL,
552 hpt_chip_map
553 },
554 { 0,
555 0,
556 NULL,
557 NULL
558 }
559 };
560
561 const struct pciide_product_desc pciide_acard_products[] = {
562 { PCI_PRODUCT_ACARD_ATP850U,
563 IDE_PCI_CLASS_OVERRIDE,
564 "Acard ATP850U Ultra33 IDE Controller",
565 acard_chip_map,
566 },
567 { PCI_PRODUCT_ACARD_ATP860,
568 IDE_PCI_CLASS_OVERRIDE,
569 "Acard ATP860 Ultra66 IDE Controller",
570 acard_chip_map,
571 },
572 { PCI_PRODUCT_ACARD_ATP860A,
573 IDE_PCI_CLASS_OVERRIDE,
574 "Acard ATP860-A Ultra66 IDE Controller",
575 acard_chip_map,
576 },
577 { 0,
578 0,
579 NULL,
580 NULL
581 }
582 };
583
584 const struct pciide_product_desc pciide_serverworks_products[] = {
585 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
586 0,
587 "ServerWorks OSB4 IDE Controller",
588 serverworks_chip_map,
589 },
590 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
591 0,
592 "ServerWorks CSB5 IDE Controller",
593 serverworks_chip_map,
594 },
595 { 0,
596 0,
597 NULL,
598 }
599 };
600
601 const struct pciide_product_desc pciide_symphony_products[] = {
602 { PCI_PRODUCT_SYMPHONY_82C105,
603 0,
604 "Symphony Labs 82C105 IDE controller",
605 sl82c105_chip_map,
606 },
607 { 0,
608 0,
609 NULL,
610 }
611 };
612
613 const struct pciide_product_desc pciide_winbond_products[] = {
614 { PCI_PRODUCT_WINBOND_W83C553F_1,
615 0,
616 "Winbond W83C553F IDE controller",
617 sl82c105_chip_map,
618 },
619 { 0,
620 0,
621 NULL,
622 }
623 };
624
625 struct pciide_vendor_desc {
626 u_int32_t ide_vendor;
627 const struct pciide_product_desc *ide_products;
628 };
629
630 const struct pciide_vendor_desc pciide_vendors[] = {
631 { PCI_VENDOR_INTEL, pciide_intel_products },
632 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
633 { PCI_VENDOR_VIATECH, pciide_via_products },
634 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
635 { PCI_VENDOR_SIS, pciide_sis_products },
636 { PCI_VENDOR_ALI, pciide_acer_products },
637 { PCI_VENDOR_PROMISE, pciide_promise_products },
638 { PCI_VENDOR_AMD, pciide_amd_products },
639 { PCI_VENDOR_OPTI, pciide_opti_products },
640 { PCI_VENDOR_TRIONES, pciide_triones_products },
641 { PCI_VENDOR_ACARD, pciide_acard_products },
642 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
643 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
644 { PCI_VENDOR_WINBOND, pciide_winbond_products },
645 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
646 { 0, NULL }
647 };
648
649 /* options passed via the 'flags' config keyword */
650 #define PCIIDE_OPTIONS_DMA 0x01
651 #define PCIIDE_OPTIONS_NODMA 0x02
652
653 int pciide_match __P((struct device *, struct cfdata *, void *));
654 void pciide_attach __P((struct device *, struct device *, void *));
655
656 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
657 pciide_match, pciide_attach, NULL, NULL);
658
659 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
660 int pciide_mapregs_compat __P(( struct pci_attach_args *,
661 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
662 int pciide_mapregs_native __P((struct pci_attach_args *,
663 struct pciide_channel *, bus_size_t *, bus_size_t *,
664 int (*pci_intr) __P((void *))));
665 void pciide_mapreg_dma __P((struct pciide_softc *,
666 struct pci_attach_args *));
667 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
668 void pciide_mapchan __P((struct pci_attach_args *,
669 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
670 int (*pci_intr) __P((void *))));
671 int pciide_chan_candisable __P((struct pciide_channel *));
672 void pciide_map_compat_intr __P(( struct pci_attach_args *,
673 struct pciide_channel *, int, int));
674 int pciide_compat_intr __P((void *));
675 int pciide_pci_intr __P((void *));
676 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
677
678 const struct pciide_product_desc *
679 pciide_lookup_product(id)
680 u_int32_t id;
681 {
682 const struct pciide_product_desc *pp;
683 const struct pciide_vendor_desc *vp;
684
685 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
686 if (PCI_VENDOR(id) == vp->ide_vendor)
687 break;
688
689 if ((pp = vp->ide_products) == NULL)
690 return NULL;
691
692 for (; pp->chip_map != NULL; pp++)
693 if (PCI_PRODUCT(id) == pp->ide_product)
694 break;
695
696 if (pp->chip_map == NULL)
697 return NULL;
698 return pp;
699 }
700
701 int
702 pciide_match(parent, match, aux)
703 struct device *parent;
704 struct cfdata *match;
705 void *aux;
706 {
707 struct pci_attach_args *pa = aux;
708 const struct pciide_product_desc *pp;
709
710 /*
711 * Check the ID register to see that it's a PCI IDE controller.
712 * If it is, we assume that we can deal with it; it _should_
713 * work in a standardized way...
714 */
715 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
716 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
717 return (1);
718 }
719
720 /*
721 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
722 * controllers. Let see if we can deal with it anyway.
723 */
724 pp = pciide_lookup_product(pa->pa_id);
725 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
726 return (1);
727 }
728
729 return (0);
730 }
731
732 void
733 pciide_attach(parent, self, aux)
734 struct device *parent, *self;
735 void *aux;
736 {
737 struct pci_attach_args *pa = aux;
738 pci_chipset_tag_t pc = pa->pa_pc;
739 pcitag_t tag = pa->pa_tag;
740 struct pciide_softc *sc = (struct pciide_softc *)self;
741 pcireg_t csr;
742 char devinfo[256];
743 const char *displaydev;
744
745 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
746 sc->sc_pp = pciide_lookup_product(pa->pa_id);
747 if (sc->sc_pp == NULL) {
748 sc->sc_pp = &default_product_desc;
749 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
750 displaydev = devinfo;
751 } else
752 displaydev = sc->sc_pp->ide_name;
753
754 /* if displaydev == NULL, printf is done in chip-specific map */
755 if (displaydev)
756 printf(": %s (rev. 0x%02x)\n", displaydev,
757 PCI_REVISION(pa->pa_class));
758
759 sc->sc_pc = pa->pa_pc;
760 sc->sc_tag = pa->pa_tag;
761 #ifdef WDCDEBUG
762 if (wdcdebug_pciide_mask & DEBUG_PROBE)
763 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
764 #endif
765 sc->sc_pp->chip_map(sc, pa);
766
767 if (sc->sc_dma_ok) {
768 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
769 csr |= PCI_COMMAND_MASTER_ENABLE;
770 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
771 }
772 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
773 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
774 }
775
776 /* tell whether the chip is enabled or not */
777 int
778 pciide_chipen(sc, pa)
779 struct pciide_softc *sc;
780 struct pci_attach_args *pa;
781 {
782 pcireg_t csr;
783 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
784 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
785 PCI_COMMAND_STATUS_REG);
786 printf("%s: device disabled (at %s)\n",
787 sc->sc_wdcdev.sc_dev.dv_xname,
788 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
789 "device" : "bridge");
790 return 0;
791 }
792 return 1;
793 }
794
795 int
796 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
797 struct pci_attach_args *pa;
798 struct pciide_channel *cp;
799 int compatchan;
800 bus_size_t *cmdsizep, *ctlsizep;
801 {
802 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
803 struct channel_softc *wdc_cp = &cp->wdc_channel;
804
805 cp->compat = 1;
806 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
807 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
808
809 wdc_cp->cmd_iot = pa->pa_iot;
810 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
811 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
812 printf("%s: couldn't map %s channel cmd regs\n",
813 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
814 return (0);
815 }
816
817 wdc_cp->ctl_iot = pa->pa_iot;
818 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
819 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
820 printf("%s: couldn't map %s channel ctl regs\n",
821 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
822 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
823 PCIIDE_COMPAT_CMD_SIZE);
824 return (0);
825 }
826
827 return (1);
828 }
829
830 int
831 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
832 struct pci_attach_args * pa;
833 struct pciide_channel *cp;
834 bus_size_t *cmdsizep, *ctlsizep;
835 int (*pci_intr) __P((void *));
836 {
837 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
838 struct channel_softc *wdc_cp = &cp->wdc_channel;
839 const char *intrstr;
840 pci_intr_handle_t intrhandle;
841
842 cp->compat = 0;
843
844 if (sc->sc_pci_ih == NULL) {
845 if (pci_intr_map(pa, &intrhandle) != 0) {
846 printf("%s: couldn't map native-PCI interrupt\n",
847 sc->sc_wdcdev.sc_dev.dv_xname);
848 return 0;
849 }
850 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
851 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
852 intrhandle, IPL_BIO, pci_intr, sc);
853 if (sc->sc_pci_ih != NULL) {
854 printf("%s: using %s for native-PCI interrupt\n",
855 sc->sc_wdcdev.sc_dev.dv_xname,
856 intrstr ? intrstr : "unknown interrupt");
857 } else {
858 printf("%s: couldn't establish native-PCI interrupt",
859 sc->sc_wdcdev.sc_dev.dv_xname);
860 if (intrstr != NULL)
861 printf(" at %s", intrstr);
862 printf("\n");
863 return 0;
864 }
865 }
866 cp->ih = sc->sc_pci_ih;
867 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
868 PCI_MAPREG_TYPE_IO, 0,
869 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
870 printf("%s: couldn't map %s channel cmd regs\n",
871 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
872 return 0;
873 }
874
875 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
876 PCI_MAPREG_TYPE_IO, 0,
877 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
878 printf("%s: couldn't map %s channel ctl regs\n",
879 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
880 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
881 return 0;
882 }
883 /*
884 * In native mode, 4 bytes of I/O space are mapped for the control
885 * register, the control register is at offset 2. Pass the generic
886 * code a handle for only one byte at the right offset.
887 */
888 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
889 &wdc_cp->ctl_ioh) != 0) {
890 printf("%s: unable to subregion %s channel ctl regs\n",
891 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
892 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
893 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
894 return 0;
895 }
896 return (1);
897 }
898
899 void
900 pciide_mapreg_dma(sc, pa)
901 struct pciide_softc *sc;
902 struct pci_attach_args *pa;
903 {
904 pcireg_t maptype;
905 bus_addr_t addr;
906
907 /*
908 * Map DMA registers
909 *
910 * Note that sc_dma_ok is the right variable to test to see if
911 * DMA can be done. If the interface doesn't support DMA,
912 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
913 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
914 * non-zero if the interface supports DMA and the registers
915 * could be mapped.
916 *
917 * XXX Note that despite the fact that the Bus Master IDE specs
918 * XXX say that "The bus master IDE function uses 16 bytes of IO
919 * XXX space," some controllers (at least the United
920 * XXX Microelectronics UM8886BF) place it in memory space.
921 */
922 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
923 PCIIDE_REG_BUS_MASTER_DMA);
924
925 switch (maptype) {
926 case PCI_MAPREG_TYPE_IO:
927 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
928 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
929 &addr, NULL, NULL) == 0);
930 if (sc->sc_dma_ok == 0) {
931 printf(", but unused (couldn't query registers)");
932 break;
933 }
934 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
935 && addr >= 0x10000) {
936 sc->sc_dma_ok = 0;
937 printf(", but unused (registers at unsafe address "
938 "%#lx)", (unsigned long)addr);
939 break;
940 }
941 /* FALLTHROUGH */
942
943 case PCI_MAPREG_MEM_TYPE_32BIT:
944 sc->sc_dma_ok = (pci_mapreg_map(pa,
945 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
946 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
947 sc->sc_dmat = pa->pa_dmat;
948 if (sc->sc_dma_ok == 0) {
949 printf(", but unused (couldn't map registers)");
950 } else {
951 sc->sc_wdcdev.dma_arg = sc;
952 sc->sc_wdcdev.dma_init = pciide_dma_init;
953 sc->sc_wdcdev.dma_start = pciide_dma_start;
954 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
955 }
956
957 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
958 PCIIDE_OPTIONS_NODMA) {
959 printf(", but unused (forced off by config file)");
960 sc->sc_dma_ok = 0;
961 }
962 break;
963
964 default:
965 sc->sc_dma_ok = 0;
966 printf(", but unsupported register maptype (0x%x)", maptype);
967 }
968 }
969
970 int
971 pciide_compat_intr(arg)
972 void *arg;
973 {
974 struct pciide_channel *cp = arg;
975
976 #ifdef DIAGNOSTIC
977 /* should only be called for a compat channel */
978 if (cp->compat == 0)
979 panic("pciide compat intr called for non-compat chan %p", cp);
980 #endif
981 return (wdcintr(&cp->wdc_channel));
982 }
983
984 int
985 pciide_pci_intr(arg)
986 void *arg;
987 {
988 struct pciide_softc *sc = arg;
989 struct pciide_channel *cp;
990 struct channel_softc *wdc_cp;
991 int i, rv, crv;
992
993 rv = 0;
994 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
995 cp = &sc->pciide_channels[i];
996 wdc_cp = &cp->wdc_channel;
997
998 /* If a compat channel skip. */
999 if (cp->compat)
1000 continue;
1001 /* if this channel not waiting for intr, skip */
1002 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1003 continue;
1004
1005 crv = wdcintr(wdc_cp);
1006 if (crv == 0)
1007 ; /* leave rv alone */
1008 else if (crv == 1)
1009 rv = 1; /* claim the intr */
1010 else if (rv == 0) /* crv should be -1 in this case */
1011 rv = crv; /* if we've done no better, take it */
1012 }
1013 return (rv);
1014 }
1015
1016 void
1017 pciide_channel_dma_setup(cp)
1018 struct pciide_channel *cp;
1019 {
1020 int drive;
1021 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1022 struct ata_drive_datas *drvp;
1023
1024 for (drive = 0; drive < 2; drive++) {
1025 drvp = &cp->wdc_channel.ch_drive[drive];
1026 /* If no drive, skip */
1027 if ((drvp->drive_flags & DRIVE) == 0)
1028 continue;
1029 /* setup DMA if needed */
1030 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1031 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1032 sc->sc_dma_ok == 0) {
1033 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1034 continue;
1035 }
1036 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1037 != 0) {
1038 /* Abort DMA setup */
1039 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1040 continue;
1041 }
1042 }
1043 }
1044
1045 int
1046 pciide_dma_table_setup(sc, channel, drive)
1047 struct pciide_softc *sc;
1048 int channel, drive;
1049 {
1050 bus_dma_segment_t seg;
1051 int error, rseg;
1052 const bus_size_t dma_table_size =
1053 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1054 struct pciide_dma_maps *dma_maps =
1055 &sc->pciide_channels[channel].dma_maps[drive];
1056
1057 /* If table was already allocated, just return */
1058 if (dma_maps->dma_table)
1059 return 0;
1060
1061 /* Allocate memory for the DMA tables and map it */
1062 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1063 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1064 BUS_DMA_NOWAIT)) != 0) {
1065 printf("%s:%d: unable to allocate table DMA for "
1066 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1067 channel, drive, error);
1068 return error;
1069 }
1070 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1071 dma_table_size,
1072 (caddr_t *)&dma_maps->dma_table,
1073 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1074 printf("%s:%d: unable to map table DMA for"
1075 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1076 channel, drive, error);
1077 return error;
1078 }
1079 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1080 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1081 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1082
1083 /* Create and load table DMA map for this disk */
1084 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1085 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1086 &dma_maps->dmamap_table)) != 0) {
1087 printf("%s:%d: unable to create table DMA map for "
1088 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1089 channel, drive, error);
1090 return error;
1091 }
1092 if ((error = bus_dmamap_load(sc->sc_dmat,
1093 dma_maps->dmamap_table,
1094 dma_maps->dma_table,
1095 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1096 printf("%s:%d: unable to load table DMA map for "
1097 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1098 channel, drive, error);
1099 return error;
1100 }
1101 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1102 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1103 DEBUG_PROBE);
1104 /* Create a xfer DMA map for this drive */
1105 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1106 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1107 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1108 &dma_maps->dmamap_xfer)) != 0) {
1109 printf("%s:%d: unable to create xfer DMA map for "
1110 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1111 channel, drive, error);
1112 return error;
1113 }
1114 return 0;
1115 }
1116
1117 int
1118 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1119 void *v;
1120 int channel, drive;
1121 void *databuf;
1122 size_t datalen;
1123 int flags;
1124 {
1125 struct pciide_softc *sc = v;
1126 int error, seg;
1127 struct pciide_dma_maps *dma_maps =
1128 &sc->pciide_channels[channel].dma_maps[drive];
1129
1130 error = bus_dmamap_load(sc->sc_dmat,
1131 dma_maps->dmamap_xfer,
1132 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1133 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1134 if (error) {
1135 printf("%s:%d: unable to load xfer DMA map for"
1136 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1137 channel, drive, error);
1138 return error;
1139 }
1140
1141 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1142 dma_maps->dmamap_xfer->dm_mapsize,
1143 (flags & WDC_DMA_READ) ?
1144 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1145
1146 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1147 #ifdef DIAGNOSTIC
1148 /* A segment must not cross a 64k boundary */
1149 {
1150 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1151 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1152 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1153 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1154 printf("pciide_dma: segment %d physical addr 0x%lx"
1155 " len 0x%lx not properly aligned\n",
1156 seg, phys, len);
1157 panic("pciide_dma: buf align");
1158 }
1159 }
1160 #endif
1161 dma_maps->dma_table[seg].base_addr =
1162 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1163 dma_maps->dma_table[seg].byte_count =
1164 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1165 IDEDMA_BYTE_COUNT_MASK);
1166 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1167 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1168 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1169
1170 }
1171 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1172 htole32(IDEDMA_BYTE_COUNT_EOT);
1173
1174 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1175 dma_maps->dmamap_table->dm_mapsize,
1176 BUS_DMASYNC_PREWRITE);
1177
1178 /* Maps are ready. Start DMA function */
1179 #ifdef DIAGNOSTIC
1180 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1181 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1182 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1183 panic("pciide_dma_init: table align");
1184 }
1185 #endif
1186
1187 /* Clear status bits */
1188 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1189 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1190 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1191 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1192 /* Write table addr */
1193 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1194 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1195 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1196 /* set read/write */
1197 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1198 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1199 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1200 /* remember flags */
1201 dma_maps->dma_flags = flags;
1202 return 0;
1203 }
1204
1205 void
1206 pciide_dma_start(v, channel, drive)
1207 void *v;
1208 int channel, drive;
1209 {
1210 struct pciide_softc *sc = v;
1211
1212 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1213 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1214 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1215 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1216 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1217 }
1218
1219 int
1220 pciide_dma_finish(v, channel, drive, force)
1221 void *v;
1222 int channel, drive;
1223 int force;
1224 {
1225 struct pciide_softc *sc = v;
1226 u_int8_t status;
1227 int error = 0;
1228 struct pciide_dma_maps *dma_maps =
1229 &sc->pciide_channels[channel].dma_maps[drive];
1230
1231 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1232 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1233 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1234 DEBUG_XFERS);
1235
1236 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1237 return WDC_DMAST_NOIRQ;
1238
1239 /* stop DMA channel */
1240 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1241 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1242 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1243 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1244
1245 /* Unload the map of the data buffer */
1246 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1247 dma_maps->dmamap_xfer->dm_mapsize,
1248 (dma_maps->dma_flags & WDC_DMA_READ) ?
1249 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1250 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1251
1252 if ((status & IDEDMA_CTL_ERR) != 0) {
1253 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1254 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1255 error |= WDC_DMAST_ERR;
1256 }
1257
1258 if ((status & IDEDMA_CTL_INTR) == 0) {
1259 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1260 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1261 drive, status);
1262 error |= WDC_DMAST_NOIRQ;
1263 }
1264
1265 if ((status & IDEDMA_CTL_ACT) != 0) {
1266 /* data underrun, may be a valid condition for ATAPI */
1267 error |= WDC_DMAST_UNDER;
1268 }
1269 return error;
1270 }
1271
1272 void
1273 pciide_irqack(chp)
1274 struct channel_softc *chp;
1275 {
1276 struct pciide_channel *cp = (struct pciide_channel*)chp;
1277 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1278
1279 /* clear status bits in IDE DMA registers */
1280 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1281 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1282 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1283 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1284 }
1285
1286 /* some common code used by several chip_map */
1287 int
1288 pciide_chansetup(sc, channel, interface)
1289 struct pciide_softc *sc;
1290 int channel;
1291 pcireg_t interface;
1292 {
1293 struct pciide_channel *cp = &sc->pciide_channels[channel];
1294 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1295 cp->name = PCIIDE_CHANNEL_NAME(channel);
1296 cp->wdc_channel.channel = channel;
1297 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1298 cp->wdc_channel.ch_queue =
1299 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1300 if (cp->wdc_channel.ch_queue == NULL) {
1301 printf("%s %s channel: "
1302 "can't allocate memory for command queue",
1303 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1304 return 0;
1305 }
1306 printf("%s: %s channel %s to %s mode\n",
1307 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1308 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1309 "configured" : "wired",
1310 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1311 "native-PCI" : "compatibility");
1312 return 1;
1313 }
1314
1315 /* some common code used by several chip channel_map */
1316 void
1317 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1318 struct pci_attach_args *pa;
1319 struct pciide_channel *cp;
1320 pcireg_t interface;
1321 bus_size_t *cmdsizep, *ctlsizep;
1322 int (*pci_intr) __P((void *));
1323 {
1324 struct channel_softc *wdc_cp = &cp->wdc_channel;
1325
1326 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1327 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1328 pci_intr);
1329 else
1330 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1331 wdc_cp->channel, cmdsizep, ctlsizep);
1332
1333 if (cp->hw_ok == 0)
1334 return;
1335 wdc_cp->data32iot = wdc_cp->cmd_iot;
1336 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1337 wdcattach(wdc_cp);
1338 }
1339
1340 /*
1341 * Generic code to call to know if a channel can be disabled. Return 1
1342 * if channel can be disabled, 0 if not
1343 */
1344 int
1345 pciide_chan_candisable(cp)
1346 struct pciide_channel *cp;
1347 {
1348 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1349 struct channel_softc *wdc_cp = &cp->wdc_channel;
1350
1351 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1352 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1353 printf("%s: disabling %s channel (no drives)\n",
1354 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1355 cp->hw_ok = 0;
1356 return 1;
1357 }
1358 return 0;
1359 }
1360
1361 /*
1362 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1363 * Set hw_ok=0 on failure
1364 */
1365 void
1366 pciide_map_compat_intr(pa, cp, compatchan, interface)
1367 struct pci_attach_args *pa;
1368 struct pciide_channel *cp;
1369 int compatchan, interface;
1370 {
1371 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1372 struct channel_softc *wdc_cp = &cp->wdc_channel;
1373
1374 if (cp->hw_ok == 0)
1375 return;
1376 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1377 return;
1378
1379 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1380 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1381 pa, compatchan, pciide_compat_intr, cp);
1382 if (cp->ih == NULL) {
1383 #endif
1384 printf("%s: no compatibility interrupt for use by %s "
1385 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1386 cp->hw_ok = 0;
1387 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1388 }
1389 #endif
1390 }
1391
1392 void
1393 pciide_print_modes(cp)
1394 struct pciide_channel *cp;
1395 {
1396 wdc_print_modes(&cp->wdc_channel);
1397 }
1398
1399 void
1400 default_chip_map(sc, pa)
1401 struct pciide_softc *sc;
1402 struct pci_attach_args *pa;
1403 {
1404 struct pciide_channel *cp;
1405 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1406 pcireg_t csr;
1407 int channel, drive;
1408 struct ata_drive_datas *drvp;
1409 u_int8_t idedma_ctl;
1410 bus_size_t cmdsize, ctlsize;
1411 char *failreason;
1412
1413 if (pciide_chipen(sc, pa) == 0)
1414 return;
1415
1416 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1417 printf("%s: bus-master DMA support present",
1418 sc->sc_wdcdev.sc_dev.dv_xname);
1419 if (sc->sc_pp == &default_product_desc &&
1420 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1421 PCIIDE_OPTIONS_DMA) == 0) {
1422 printf(", but unused (no driver support)");
1423 sc->sc_dma_ok = 0;
1424 } else {
1425 pciide_mapreg_dma(sc, pa);
1426 if (sc->sc_dma_ok != 0)
1427 printf(", used without full driver "
1428 "support");
1429 }
1430 } else {
1431 printf("%s: hardware does not support DMA",
1432 sc->sc_wdcdev.sc_dev.dv_xname);
1433 sc->sc_dma_ok = 0;
1434 }
1435 printf("\n");
1436 if (sc->sc_dma_ok) {
1437 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1438 sc->sc_wdcdev.irqack = pciide_irqack;
1439 }
1440 sc->sc_wdcdev.PIO_cap = 0;
1441 sc->sc_wdcdev.DMA_cap = 0;
1442
1443 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1444 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1445 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1446
1447 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1448 cp = &sc->pciide_channels[channel];
1449 if (pciide_chansetup(sc, channel, interface) == 0)
1450 continue;
1451 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1452 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1453 &ctlsize, pciide_pci_intr);
1454 } else {
1455 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1456 channel, &cmdsize, &ctlsize);
1457 }
1458 if (cp->hw_ok == 0)
1459 continue;
1460 /*
1461 * Check to see if something appears to be there.
1462 */
1463 failreason = NULL;
1464 if (!wdcprobe(&cp->wdc_channel)) {
1465 failreason = "not responding; disabled or no drives?";
1466 goto next;
1467 }
1468 /*
1469 * Now, make sure it's actually attributable to this PCI IDE
1470 * channel by trying to access the channel again while the
1471 * PCI IDE controller's I/O space is disabled. (If the
1472 * channel no longer appears to be there, it belongs to
1473 * this controller.) YUCK!
1474 */
1475 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1476 PCI_COMMAND_STATUS_REG);
1477 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1478 csr & ~PCI_COMMAND_IO_ENABLE);
1479 if (wdcprobe(&cp->wdc_channel))
1480 failreason = "other hardware responding at addresses";
1481 pci_conf_write(sc->sc_pc, sc->sc_tag,
1482 PCI_COMMAND_STATUS_REG, csr);
1483 next:
1484 if (failreason) {
1485 printf("%s: %s channel ignored (%s)\n",
1486 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1487 failreason);
1488 cp->hw_ok = 0;
1489 bus_space_unmap(cp->wdc_channel.cmd_iot,
1490 cp->wdc_channel.cmd_ioh, cmdsize);
1491 if (interface & PCIIDE_INTERFACE_PCI(channel))
1492 bus_space_unmap(cp->wdc_channel.ctl_iot,
1493 cp->ctl_baseioh, ctlsize);
1494 else
1495 bus_space_unmap(cp->wdc_channel.ctl_iot,
1496 cp->wdc_channel.ctl_ioh, ctlsize);
1497 } else {
1498 pciide_map_compat_intr(pa, cp, channel, interface);
1499 }
1500 if (cp->hw_ok) {
1501 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1502 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1503 wdcattach(&cp->wdc_channel);
1504 }
1505 }
1506
1507 if (sc->sc_dma_ok == 0)
1508 return;
1509
1510 /* Allocate DMA maps */
1511 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1512 idedma_ctl = 0;
1513 cp = &sc->pciide_channels[channel];
1514 for (drive = 0; drive < 2; drive++) {
1515 drvp = &cp->wdc_channel.ch_drive[drive];
1516 /* If no drive, skip */
1517 if ((drvp->drive_flags & DRIVE) == 0)
1518 continue;
1519 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1520 continue;
1521 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1522 /* Abort DMA setup */
1523 printf("%s:%d:%d: can't allocate DMA maps, "
1524 "using PIO transfers\n",
1525 sc->sc_wdcdev.sc_dev.dv_xname,
1526 channel, drive);
1527 drvp->drive_flags &= ~DRIVE_DMA;
1528 }
1529 printf("%s:%d:%d: using DMA data transfers\n",
1530 sc->sc_wdcdev.sc_dev.dv_xname,
1531 channel, drive);
1532 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1533 }
1534 if (idedma_ctl != 0) {
1535 /* Add software bits in status register */
1536 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1537 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1538 idedma_ctl);
1539 }
1540 }
1541 }
1542
1543 void
1544 piix_chip_map(sc, pa)
1545 struct pciide_softc *sc;
1546 struct pci_attach_args *pa;
1547 {
1548 struct pciide_channel *cp;
1549 int channel;
1550 u_int32_t idetim;
1551 bus_size_t cmdsize, ctlsize;
1552
1553 if (pciide_chipen(sc, pa) == 0)
1554 return;
1555
1556 printf("%s: bus-master DMA support present",
1557 sc->sc_wdcdev.sc_dev.dv_xname);
1558 pciide_mapreg_dma(sc, pa);
1559 printf("\n");
1560 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1561 WDC_CAPABILITY_MODE;
1562 if (sc->sc_dma_ok) {
1563 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1564 sc->sc_wdcdev.irqack = pciide_irqack;
1565 switch(sc->sc_pp->ide_product) {
1566 case PCI_PRODUCT_INTEL_82371AB_IDE:
1567 case PCI_PRODUCT_INTEL_82440MX_IDE:
1568 case PCI_PRODUCT_INTEL_82801AA_IDE:
1569 case PCI_PRODUCT_INTEL_82801AB_IDE:
1570 case PCI_PRODUCT_INTEL_82801BA_IDE:
1571 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1572 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1573 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1574 case PCI_PRODUCT_INTEL_82801DB_IDE:
1575 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1576 }
1577 }
1578 sc->sc_wdcdev.PIO_cap = 4;
1579 sc->sc_wdcdev.DMA_cap = 2;
1580 switch(sc->sc_pp->ide_product) {
1581 case PCI_PRODUCT_INTEL_82801AA_IDE:
1582 sc->sc_wdcdev.UDMA_cap = 4;
1583 break;
1584 case PCI_PRODUCT_INTEL_82801BA_IDE:
1585 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1586 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1587 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1588 case PCI_PRODUCT_INTEL_82801DB_IDE:
1589 sc->sc_wdcdev.UDMA_cap = 5;
1590 break;
1591 default:
1592 sc->sc_wdcdev.UDMA_cap = 2;
1593 }
1594 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1595 sc->sc_wdcdev.set_modes = piix_setup_channel;
1596 else
1597 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1598 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1599 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1600
1601 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1602 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1603 DEBUG_PROBE);
1604 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1605 WDCDEBUG_PRINT((", sidetim=0x%x",
1606 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1607 DEBUG_PROBE);
1608 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1609 WDCDEBUG_PRINT((", udamreg 0x%x",
1610 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1611 DEBUG_PROBE);
1612 }
1613 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1614 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1615 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1616 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1617 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1618 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1619 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1620 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1621 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1622 DEBUG_PROBE);
1623 }
1624
1625 }
1626 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1627
1628 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1629 cp = &sc->pciide_channels[channel];
1630 /* PIIX is compat-only */
1631 if (pciide_chansetup(sc, channel, 0) == 0)
1632 continue;
1633 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1634 if ((PIIX_IDETIM_READ(idetim, channel) &
1635 PIIX_IDETIM_IDE) == 0) {
1636 printf("%s: %s channel ignored (disabled)\n",
1637 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1638 continue;
1639 }
1640 /* PIIX are compat-only pciide devices */
1641 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1642 if (cp->hw_ok == 0)
1643 continue;
1644 if (pciide_chan_candisable(cp)) {
1645 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1646 channel);
1647 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1648 idetim);
1649 }
1650 pciide_map_compat_intr(pa, cp, channel, 0);
1651 if (cp->hw_ok == 0)
1652 continue;
1653 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1654 }
1655
1656 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1657 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1658 DEBUG_PROBE);
1659 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1660 WDCDEBUG_PRINT((", sidetim=0x%x",
1661 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1662 DEBUG_PROBE);
1663 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1664 WDCDEBUG_PRINT((", udamreg 0x%x",
1665 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1666 DEBUG_PROBE);
1667 }
1668 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1669 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1670 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1671 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1672 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1673 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1674 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1675 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1676 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1677 DEBUG_PROBE);
1678 }
1679 }
1680 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1681 }
1682
1683 void
1684 piix_setup_channel(chp)
1685 struct channel_softc *chp;
1686 {
1687 u_int8_t mode[2], drive;
1688 u_int32_t oidetim, idetim, idedma_ctl;
1689 struct pciide_channel *cp = (struct pciide_channel*)chp;
1690 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1691 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1692
1693 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1694 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1695 idedma_ctl = 0;
1696
1697 /* set up new idetim: Enable IDE registers decode */
1698 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1699 chp->channel);
1700
1701 /* setup DMA */
1702 pciide_channel_dma_setup(cp);
1703
1704 /*
1705 * Here we have to mess up with drives mode: PIIX can't have
1706 * different timings for master and slave drives.
1707 * We need to find the best combination.
1708 */
1709
1710 /* If both drives supports DMA, take the lower mode */
1711 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1712 (drvp[1].drive_flags & DRIVE_DMA)) {
1713 mode[0] = mode[1] =
1714 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1715 drvp[0].DMA_mode = mode[0];
1716 drvp[1].DMA_mode = mode[1];
1717 goto ok;
1718 }
1719 /*
1720 * If only one drive supports DMA, use its mode, and
1721 * put the other one in PIO mode 0 if mode not compatible
1722 */
1723 if (drvp[0].drive_flags & DRIVE_DMA) {
1724 mode[0] = drvp[0].DMA_mode;
1725 mode[1] = drvp[1].PIO_mode;
1726 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1727 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1728 mode[1] = drvp[1].PIO_mode = 0;
1729 goto ok;
1730 }
1731 if (drvp[1].drive_flags & DRIVE_DMA) {
1732 mode[1] = drvp[1].DMA_mode;
1733 mode[0] = drvp[0].PIO_mode;
1734 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1735 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1736 mode[0] = drvp[0].PIO_mode = 0;
1737 goto ok;
1738 }
1739 /*
1740 * If both drives are not DMA, takes the lower mode, unless
1741 * one of them is PIO mode < 2
1742 */
1743 if (drvp[0].PIO_mode < 2) {
1744 mode[0] = drvp[0].PIO_mode = 0;
1745 mode[1] = drvp[1].PIO_mode;
1746 } else if (drvp[1].PIO_mode < 2) {
1747 mode[1] = drvp[1].PIO_mode = 0;
1748 mode[0] = drvp[0].PIO_mode;
1749 } else {
1750 mode[0] = mode[1] =
1751 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1752 drvp[0].PIO_mode = mode[0];
1753 drvp[1].PIO_mode = mode[1];
1754 }
1755 ok: /* The modes are setup */
1756 for (drive = 0; drive < 2; drive++) {
1757 if (drvp[drive].drive_flags & DRIVE_DMA) {
1758 idetim |= piix_setup_idetim_timings(
1759 mode[drive], 1, chp->channel);
1760 goto end;
1761 }
1762 }
1763 /* If we are there, none of the drives are DMA */
1764 if (mode[0] >= 2)
1765 idetim |= piix_setup_idetim_timings(
1766 mode[0], 0, chp->channel);
1767 else
1768 idetim |= piix_setup_idetim_timings(
1769 mode[1], 0, chp->channel);
1770 end: /*
1771 * timing mode is now set up in the controller. Enable
1772 * it per-drive
1773 */
1774 for (drive = 0; drive < 2; drive++) {
1775 /* If no drive, skip */
1776 if ((drvp[drive].drive_flags & DRIVE) == 0)
1777 continue;
1778 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1779 if (drvp[drive].drive_flags & DRIVE_DMA)
1780 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1781 }
1782 if (idedma_ctl != 0) {
1783 /* Add software bits in status register */
1784 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1785 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1786 idedma_ctl);
1787 }
1788 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1789 pciide_print_modes(cp);
1790 }
1791
1792 void
1793 piix3_4_setup_channel(chp)
1794 struct channel_softc *chp;
1795 {
1796 struct ata_drive_datas *drvp;
1797 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1798 struct pciide_channel *cp = (struct pciide_channel*)chp;
1799 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1800 int drive;
1801 int channel = chp->channel;
1802
1803 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1804 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1805 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1806 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1807 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1808 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1809 PIIX_SIDETIM_RTC_MASK(channel));
1810
1811 idedma_ctl = 0;
1812 /* If channel disabled, no need to go further */
1813 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1814 return;
1815 /* set up new idetim: Enable IDE registers decode */
1816 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1817
1818 /* setup DMA if needed */
1819 pciide_channel_dma_setup(cp);
1820
1821 for (drive = 0; drive < 2; drive++) {
1822 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1823 PIIX_UDMATIM_SET(0x3, channel, drive));
1824 drvp = &chp->ch_drive[drive];
1825 /* If no drive, skip */
1826 if ((drvp->drive_flags & DRIVE) == 0)
1827 continue;
1828 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1829 (drvp->drive_flags & DRIVE_UDMA) == 0))
1830 goto pio;
1831
1832 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1833 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1834 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1835 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1836 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1837 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1838 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1839 ideconf |= PIIX_CONFIG_PINGPONG;
1840 }
1841 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1842 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1843 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1844 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1845 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1846 /* setup Ultra/100 */
1847 if (drvp->UDMA_mode > 2 &&
1848 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1849 drvp->UDMA_mode = 2;
1850 if (drvp->UDMA_mode > 4) {
1851 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1852 } else {
1853 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1854 if (drvp->UDMA_mode > 2) {
1855 ideconf |= PIIX_CONFIG_UDMA66(channel,
1856 drive);
1857 } else {
1858 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1859 drive);
1860 }
1861 }
1862 }
1863 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1864 /* setup Ultra/66 */
1865 if (drvp->UDMA_mode > 2 &&
1866 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1867 drvp->UDMA_mode = 2;
1868 if (drvp->UDMA_mode > 2)
1869 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1870 else
1871 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1872 }
1873 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1874 (drvp->drive_flags & DRIVE_UDMA)) {
1875 /* use Ultra/DMA */
1876 drvp->drive_flags &= ~DRIVE_DMA;
1877 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1878 udmareg |= PIIX_UDMATIM_SET(
1879 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1880 } else {
1881 /* use Multiword DMA */
1882 drvp->drive_flags &= ~DRIVE_UDMA;
1883 if (drive == 0) {
1884 idetim |= piix_setup_idetim_timings(
1885 drvp->DMA_mode, 1, channel);
1886 } else {
1887 sidetim |= piix_setup_sidetim_timings(
1888 drvp->DMA_mode, 1, channel);
1889 idetim =PIIX_IDETIM_SET(idetim,
1890 PIIX_IDETIM_SITRE, channel);
1891 }
1892 }
1893 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1894
1895 pio: /* use PIO mode */
1896 idetim |= piix_setup_idetim_drvs(drvp);
1897 if (drive == 0) {
1898 idetim |= piix_setup_idetim_timings(
1899 drvp->PIO_mode, 0, channel);
1900 } else {
1901 sidetim |= piix_setup_sidetim_timings(
1902 drvp->PIO_mode, 0, channel);
1903 idetim =PIIX_IDETIM_SET(idetim,
1904 PIIX_IDETIM_SITRE, channel);
1905 }
1906 }
1907 if (idedma_ctl != 0) {
1908 /* Add software bits in status register */
1909 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1910 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1911 idedma_ctl);
1912 }
1913 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1914 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1915 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1916 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1917 pciide_print_modes(cp);
1918 }
1919
1920
1921 /* setup ISP and RTC fields, based on mode */
1922 static u_int32_t
1923 piix_setup_idetim_timings(mode, dma, channel)
1924 u_int8_t mode;
1925 u_int8_t dma;
1926 u_int8_t channel;
1927 {
1928
1929 if (dma)
1930 return PIIX_IDETIM_SET(0,
1931 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1932 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1933 channel);
1934 else
1935 return PIIX_IDETIM_SET(0,
1936 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1937 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1938 channel);
1939 }
1940
1941 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1942 static u_int32_t
1943 piix_setup_idetim_drvs(drvp)
1944 struct ata_drive_datas *drvp;
1945 {
1946 u_int32_t ret = 0;
1947 struct channel_softc *chp = drvp->chnl_softc;
1948 u_int8_t channel = chp->channel;
1949 u_int8_t drive = drvp->drive;
1950
1951 /*
1952 * If drive is using UDMA, timings setups are independant
1953 * So just check DMA and PIO here.
1954 */
1955 if (drvp->drive_flags & DRIVE_DMA) {
1956 /* if mode = DMA mode 0, use compatible timings */
1957 if ((drvp->drive_flags & DRIVE_DMA) &&
1958 drvp->DMA_mode == 0) {
1959 drvp->PIO_mode = 0;
1960 return ret;
1961 }
1962 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1963 /*
1964 * PIO and DMA timings are the same, use fast timings for PIO
1965 * too, else use compat timings.
1966 */
1967 if ((piix_isp_pio[drvp->PIO_mode] !=
1968 piix_isp_dma[drvp->DMA_mode]) ||
1969 (piix_rtc_pio[drvp->PIO_mode] !=
1970 piix_rtc_dma[drvp->DMA_mode]))
1971 drvp->PIO_mode = 0;
1972 /* if PIO mode <= 2, use compat timings for PIO */
1973 if (drvp->PIO_mode <= 2) {
1974 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1975 channel);
1976 return ret;
1977 }
1978 }
1979
1980 /*
1981 * Now setup PIO modes. If mode < 2, use compat timings.
1982 * Else enable fast timings. Enable IORDY and prefetch/post
1983 * if PIO mode >= 3.
1984 */
1985
1986 if (drvp->PIO_mode < 2)
1987 return ret;
1988
1989 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1990 if (drvp->PIO_mode >= 3) {
1991 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1992 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1993 }
1994 return ret;
1995 }
1996
1997 /* setup values in SIDETIM registers, based on mode */
1998 static u_int32_t
1999 piix_setup_sidetim_timings(mode, dma, channel)
2000 u_int8_t mode;
2001 u_int8_t dma;
2002 u_int8_t channel;
2003 {
2004 if (dma)
2005 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2006 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2007 else
2008 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2009 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2010 }
2011
2012 void
2013 amd7x6_chip_map(sc, pa)
2014 struct pciide_softc *sc;
2015 struct pci_attach_args *pa;
2016 {
2017 struct pciide_channel *cp;
2018 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2019 int channel;
2020 pcireg_t chanenable;
2021 bus_size_t cmdsize, ctlsize;
2022
2023 if (pciide_chipen(sc, pa) == 0)
2024 return;
2025 printf("%s: bus-master DMA support present",
2026 sc->sc_wdcdev.sc_dev.dv_xname);
2027 pciide_mapreg_dma(sc, pa);
2028 printf("\n");
2029 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2030 WDC_CAPABILITY_MODE;
2031 if (sc->sc_dma_ok) {
2032 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2033 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2034 sc->sc_wdcdev.irqack = pciide_irqack;
2035 }
2036 sc->sc_wdcdev.PIO_cap = 4;
2037 sc->sc_wdcdev.DMA_cap = 2;
2038
2039 switch (sc->sc_pci_vendor) {
2040 case PCI_VENDOR_AMD:
2041 switch (sc->sc_pp->ide_product) {
2042 case PCI_PRODUCT_AMD_PBC766_IDE:
2043 case PCI_PRODUCT_AMD_PBC768_IDE:
2044 case PCI_PRODUCT_AMD_PBC8111_IDE:
2045 sc->sc_wdcdev.UDMA_cap = 5;
2046 break;
2047 default:
2048 sc->sc_wdcdev.UDMA_cap = 4;
2049 }
2050 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2051 break;
2052
2053 case PCI_VENDOR_NVIDIA:
2054 switch (sc->sc_pp->ide_product) {
2055 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2056 sc->sc_wdcdev.UDMA_cap = 5;
2057 break;
2058 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2059 sc->sc_wdcdev.UDMA_cap = 5; /* XXX */
2060 break;
2061 }
2062 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2063 break;
2064
2065 default:
2066 panic("amd7x6_chip_map: unknown vendor");
2067 }
2068 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2069 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2070 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2071 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2072 AMD7X6_CHANSTATUS_EN(sc));
2073
2074 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2075 DEBUG_PROBE);
2076 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2077 cp = &sc->pciide_channels[channel];
2078 if (pciide_chansetup(sc, channel, interface) == 0)
2079 continue;
2080
2081 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2082 printf("%s: %s channel ignored (disabled)\n",
2083 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2084 continue;
2085 }
2086 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2087 pciide_pci_intr);
2088
2089 if (pciide_chan_candisable(cp))
2090 chanenable &= ~AMD7X6_CHAN_EN(channel);
2091 pciide_map_compat_intr(pa, cp, channel, interface);
2092 if (cp->hw_ok == 0)
2093 continue;
2094
2095 amd7x6_setup_channel(&cp->wdc_channel);
2096 }
2097 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2098 chanenable);
2099 return;
2100 }
2101
2102 void
2103 amd7x6_setup_channel(chp)
2104 struct channel_softc *chp;
2105 {
2106 u_int32_t udmatim_reg, datatim_reg;
2107 u_int8_t idedma_ctl;
2108 int mode, drive;
2109 struct ata_drive_datas *drvp;
2110 struct pciide_channel *cp = (struct pciide_channel*)chp;
2111 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2112 #ifndef PCIIDE_AMD756_ENABLEDMA
2113 int rev = PCI_REVISION(
2114 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2115 #endif
2116
2117 idedma_ctl = 0;
2118 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2119 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2120 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2121 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2122
2123 /* setup DMA if needed */
2124 pciide_channel_dma_setup(cp);
2125
2126 for (drive = 0; drive < 2; drive++) {
2127 drvp = &chp->ch_drive[drive];
2128 /* If no drive, skip */
2129 if ((drvp->drive_flags & DRIVE) == 0)
2130 continue;
2131 /* add timing values, setup DMA if needed */
2132 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2133 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2134 mode = drvp->PIO_mode;
2135 goto pio;
2136 }
2137 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2138 (drvp->drive_flags & DRIVE_UDMA)) {
2139 /* use Ultra/DMA */
2140 drvp->drive_flags &= ~DRIVE_DMA;
2141 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2142 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2143 AMD7X6_UDMA_TIME(chp->channel, drive,
2144 amd7x6_udma_tim[drvp->UDMA_mode]);
2145 /* can use PIO timings, MW DMA unused */
2146 mode = drvp->PIO_mode;
2147 } else {
2148 /* use Multiword DMA, but only if revision is OK */
2149 drvp->drive_flags &= ~DRIVE_UDMA;
2150 #ifndef PCIIDE_AMD756_ENABLEDMA
2151 /*
2152 * The workaround doesn't seem to be necessary
2153 * with all drives, so it can be disabled by
2154 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2155 * triggered.
2156 */
2157 if (sc->sc_pp->ide_product ==
2158 PCI_PRODUCT_AMD_PBC756_IDE &&
2159 AMD756_CHIPREV_DISABLEDMA(rev)) {
2160 printf("%s:%d:%d: multi-word DMA disabled due "
2161 "to chip revision\n",
2162 sc->sc_wdcdev.sc_dev.dv_xname,
2163 chp->channel, drive);
2164 mode = drvp->PIO_mode;
2165 drvp->drive_flags &= ~DRIVE_DMA;
2166 goto pio;
2167 }
2168 #endif
2169 /* mode = min(pio, dma+2) */
2170 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2171 mode = drvp->PIO_mode;
2172 else
2173 mode = drvp->DMA_mode + 2;
2174 }
2175 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2176
2177 pio: /* setup PIO mode */
2178 if (mode <= 2) {
2179 drvp->DMA_mode = 0;
2180 drvp->PIO_mode = 0;
2181 mode = 0;
2182 } else {
2183 drvp->PIO_mode = mode;
2184 drvp->DMA_mode = mode - 2;
2185 }
2186 datatim_reg |=
2187 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2188 amd7x6_pio_set[mode]) |
2189 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2190 amd7x6_pio_rec[mode]);
2191 }
2192 if (idedma_ctl != 0) {
2193 /* Add software bits in status register */
2194 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2195 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2196 idedma_ctl);
2197 }
2198 pciide_print_modes(cp);
2199 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2200 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2201 }
2202
2203 void
2204 apollo_chip_map(sc, pa)
2205 struct pciide_softc *sc;
2206 struct pci_attach_args *pa;
2207 {
2208 struct pciide_channel *cp;
2209 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2210 int channel;
2211 u_int32_t ideconf;
2212 bus_size_t cmdsize, ctlsize;
2213 pcitag_t pcib_tag;
2214 pcireg_t pcib_id, pcib_class;
2215
2216 if (pciide_chipen(sc, pa) == 0)
2217 return;
2218 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2219 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2220 /* and read ID and rev of the ISA bridge */
2221 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2222 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2223 printf(": VIA Technologies ");
2224 switch (PCI_PRODUCT(pcib_id)) {
2225 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2226 printf("VT82C586 (Apollo VP) ");
2227 if(PCI_REVISION(pcib_class) >= 0x02) {
2228 printf("ATA33 controller\n");
2229 sc->sc_wdcdev.UDMA_cap = 2;
2230 } else {
2231 printf("controller\n");
2232 sc->sc_wdcdev.UDMA_cap = 0;
2233 }
2234 break;
2235 case PCI_PRODUCT_VIATECH_VT82C596A:
2236 printf("VT82C596A (Apollo Pro) ");
2237 if (PCI_REVISION(pcib_class) >= 0x12) {
2238 printf("ATA66 controller\n");
2239 sc->sc_wdcdev.UDMA_cap = 4;
2240 } else {
2241 printf("ATA33 controller\n");
2242 sc->sc_wdcdev.UDMA_cap = 2;
2243 }
2244 break;
2245 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2246 printf("VT82C686A (Apollo KX133) ");
2247 if (PCI_REVISION(pcib_class) >= 0x40) {
2248 printf("ATA100 controller\n");
2249 sc->sc_wdcdev.UDMA_cap = 5;
2250 } else {
2251 printf("ATA66 controller\n");
2252 sc->sc_wdcdev.UDMA_cap = 4;
2253 }
2254 break;
2255 case PCI_PRODUCT_VIATECH_VT8231:
2256 printf("VT8231 ATA100 controller\n");
2257 sc->sc_wdcdev.UDMA_cap = 5;
2258 break;
2259 case PCI_PRODUCT_VIATECH_VT8233:
2260 printf("VT8233 ATA100 controller\n");
2261 sc->sc_wdcdev.UDMA_cap = 5;
2262 break;
2263 case PCI_PRODUCT_VIATECH_VT8233A:
2264 printf("VT8233A ATA133 controller\n");
2265 sc->sc_wdcdev.UDMA_cap = 6;
2266 break;
2267 case PCI_PRODUCT_VIATECH_VT8235:
2268 printf("VT8235 ATA133 controller\n");
2269 sc->sc_wdcdev.UDMA_cap = 6;
2270 break;
2271 default:
2272 printf("unknown ATA controller\n");
2273 sc->sc_wdcdev.UDMA_cap = 0;
2274 }
2275
2276 printf("%s: bus-master DMA support present",
2277 sc->sc_wdcdev.sc_dev.dv_xname);
2278 pciide_mapreg_dma(sc, pa);
2279 printf("\n");
2280 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2281 WDC_CAPABILITY_MODE;
2282 if (sc->sc_dma_ok) {
2283 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2284 sc->sc_wdcdev.irqack = pciide_irqack;
2285 if (sc->sc_wdcdev.UDMA_cap > 0)
2286 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2287 }
2288 sc->sc_wdcdev.PIO_cap = 4;
2289 sc->sc_wdcdev.DMA_cap = 2;
2290 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2291 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2292 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2293
2294 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2295 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2296 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2297 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2298 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2299 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2300 DEBUG_PROBE);
2301
2302 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2303 cp = &sc->pciide_channels[channel];
2304 if (pciide_chansetup(sc, channel, interface) == 0)
2305 continue;
2306
2307 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2308 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2309 printf("%s: %s channel ignored (disabled)\n",
2310 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2311 continue;
2312 }
2313 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2314 pciide_pci_intr);
2315 if (cp->hw_ok == 0)
2316 continue;
2317 if (pciide_chan_candisable(cp)) {
2318 ideconf &= ~APO_IDECONF_EN(channel);
2319 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2320 ideconf);
2321 }
2322 pciide_map_compat_intr(pa, cp, channel, interface);
2323
2324 if (cp->hw_ok == 0)
2325 continue;
2326 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2327 }
2328 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2329 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2330 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2331 }
2332
2333 void
2334 apollo_setup_channel(chp)
2335 struct channel_softc *chp;
2336 {
2337 u_int32_t udmatim_reg, datatim_reg;
2338 u_int8_t idedma_ctl;
2339 int mode, drive;
2340 struct ata_drive_datas *drvp;
2341 struct pciide_channel *cp = (struct pciide_channel*)chp;
2342 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2343
2344 idedma_ctl = 0;
2345 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2346 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2347 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2348 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2349
2350 /* setup DMA if needed */
2351 pciide_channel_dma_setup(cp);
2352
2353 for (drive = 0; drive < 2; drive++) {
2354 drvp = &chp->ch_drive[drive];
2355 /* If no drive, skip */
2356 if ((drvp->drive_flags & DRIVE) == 0)
2357 continue;
2358 /* add timing values, setup DMA if needed */
2359 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2360 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2361 mode = drvp->PIO_mode;
2362 goto pio;
2363 }
2364 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2365 (drvp->drive_flags & DRIVE_UDMA)) {
2366 /* use Ultra/DMA */
2367 drvp->drive_flags &= ~DRIVE_DMA;
2368 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2369 APO_UDMA_EN_MTH(chp->channel, drive);
2370 if (sc->sc_wdcdev.UDMA_cap == 6) {
2371 /* 8233a */
2372 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2373 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2374 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2375 /* 686b */
2376 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2377 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2378 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2379 /* 596b or 686a */
2380 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2381 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2382 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2383 } else {
2384 /* 596a or 586b */
2385 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2386 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2387 }
2388 /* can use PIO timings, MW DMA unused */
2389 mode = drvp->PIO_mode;
2390 } else {
2391 /* use Multiword DMA */
2392 drvp->drive_flags &= ~DRIVE_UDMA;
2393 /* mode = min(pio, dma+2) */
2394 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2395 mode = drvp->PIO_mode;
2396 else
2397 mode = drvp->DMA_mode + 2;
2398 }
2399 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2400
2401 pio: /* setup PIO mode */
2402 if (mode <= 2) {
2403 drvp->DMA_mode = 0;
2404 drvp->PIO_mode = 0;
2405 mode = 0;
2406 } else {
2407 drvp->PIO_mode = mode;
2408 drvp->DMA_mode = mode - 2;
2409 }
2410 datatim_reg |=
2411 APO_DATATIM_PULSE(chp->channel, drive,
2412 apollo_pio_set[mode]) |
2413 APO_DATATIM_RECOV(chp->channel, drive,
2414 apollo_pio_rec[mode]);
2415 }
2416 if (idedma_ctl != 0) {
2417 /* Add software bits in status register */
2418 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2419 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2420 idedma_ctl);
2421 }
2422 pciide_print_modes(cp);
2423 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2424 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2425 }
2426
2427 void
2428 cmd_channel_map(pa, sc, channel)
2429 struct pci_attach_args *pa;
2430 struct pciide_softc *sc;
2431 int channel;
2432 {
2433 struct pciide_channel *cp = &sc->pciide_channels[channel];
2434 bus_size_t cmdsize, ctlsize;
2435 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2436 int interface, one_channel;
2437
2438 /*
2439 * The 0648/0649 can be told to identify as a RAID controller.
2440 * In this case, we have to fake interface
2441 */
2442 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2443 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2444 PCIIDE_INTERFACE_SETTABLE(1);
2445 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2446 CMD_CONF_DSA1)
2447 interface |= PCIIDE_INTERFACE_PCI(0) |
2448 PCIIDE_INTERFACE_PCI(1);
2449 } else {
2450 interface = PCI_INTERFACE(pa->pa_class);
2451 }
2452
2453 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2454 cp->name = PCIIDE_CHANNEL_NAME(channel);
2455 cp->wdc_channel.channel = channel;
2456 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2457
2458 /*
2459 * Older CMD64X doesn't have independant channels
2460 */
2461 switch (sc->sc_pp->ide_product) {
2462 case PCI_PRODUCT_CMDTECH_649:
2463 one_channel = 0;
2464 break;
2465 default:
2466 one_channel = 1;
2467 break;
2468 }
2469
2470 if (channel > 0 && one_channel) {
2471 cp->wdc_channel.ch_queue =
2472 sc->pciide_channels[0].wdc_channel.ch_queue;
2473 } else {
2474 cp->wdc_channel.ch_queue =
2475 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2476 }
2477 if (cp->wdc_channel.ch_queue == NULL) {
2478 printf("%s %s channel: "
2479 "can't allocate memory for command queue",
2480 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2481 return;
2482 }
2483
2484 printf("%s: %s channel %s to %s mode\n",
2485 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2486 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2487 "configured" : "wired",
2488 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2489 "native-PCI" : "compatibility");
2490
2491 /*
2492 * with a CMD PCI64x, if we get here, the first channel is enabled:
2493 * there's no way to disable the first channel without disabling
2494 * the whole device
2495 */
2496 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2497 printf("%s: %s channel ignored (disabled)\n",
2498 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2499 return;
2500 }
2501
2502 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2503 if (cp->hw_ok == 0)
2504 return;
2505 if (channel == 1) {
2506 if (pciide_chan_candisable(cp)) {
2507 ctrl &= ~CMD_CTRL_2PORT;
2508 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2509 CMD_CTRL, ctrl);
2510 }
2511 }
2512 pciide_map_compat_intr(pa, cp, channel, interface);
2513 }
2514
2515 int
2516 cmd_pci_intr(arg)
2517 void *arg;
2518 {
2519 struct pciide_softc *sc = arg;
2520 struct pciide_channel *cp;
2521 struct channel_softc *wdc_cp;
2522 int i, rv, crv;
2523 u_int32_t priirq, secirq;
2524
2525 rv = 0;
2526 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2527 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2528 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2529 cp = &sc->pciide_channels[i];
2530 wdc_cp = &cp->wdc_channel;
2531 /* If a compat channel skip. */
2532 if (cp->compat)
2533 continue;
2534 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2535 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2536 crv = wdcintr(wdc_cp);
2537 if (crv == 0)
2538 printf("%s:%d: bogus intr\n",
2539 sc->sc_wdcdev.sc_dev.dv_xname, i);
2540 else
2541 rv = 1;
2542 }
2543 }
2544 return rv;
2545 }
2546
2547 void
2548 cmd_chip_map(sc, pa)
2549 struct pciide_softc *sc;
2550 struct pci_attach_args *pa;
2551 {
2552 int channel;
2553
2554 /*
2555 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2556 * and base adresses registers can be disabled at
2557 * hardware level. In this case, the device is wired
2558 * in compat mode and its first channel is always enabled,
2559 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2560 * In fact, it seems that the first channel of the CMD PCI0640
2561 * can't be disabled.
2562 */
2563
2564 #ifdef PCIIDE_CMD064x_DISABLE
2565 if (pciide_chipen(sc, pa) == 0)
2566 return;
2567 #endif
2568
2569 printf("%s: hardware does not support DMA\n",
2570 sc->sc_wdcdev.sc_dev.dv_xname);
2571 sc->sc_dma_ok = 0;
2572
2573 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2574 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2575 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2576
2577 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2578 cmd_channel_map(pa, sc, channel);
2579 }
2580 }
2581
2582 void
2583 cmd0643_9_chip_map(sc, pa)
2584 struct pciide_softc *sc;
2585 struct pci_attach_args *pa;
2586 {
2587 struct pciide_channel *cp;
2588 int channel;
2589 pcireg_t rev = PCI_REVISION(pa->pa_class);
2590
2591 /*
2592 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2593 * and base adresses registers can be disabled at
2594 * hardware level. In this case, the device is wired
2595 * in compat mode and its first channel is always enabled,
2596 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2597 * In fact, it seems that the first channel of the CMD PCI0640
2598 * can't be disabled.
2599 */
2600
2601 #ifdef PCIIDE_CMD064x_DISABLE
2602 if (pciide_chipen(sc, pa) == 0)
2603 return;
2604 #endif
2605 printf("%s: bus-master DMA support present",
2606 sc->sc_wdcdev.sc_dev.dv_xname);
2607 pciide_mapreg_dma(sc, pa);
2608 printf("\n");
2609 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2610 WDC_CAPABILITY_MODE;
2611 if (sc->sc_dma_ok) {
2612 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2613 switch (sc->sc_pp->ide_product) {
2614 case PCI_PRODUCT_CMDTECH_649:
2615 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2616 sc->sc_wdcdev.UDMA_cap = 5;
2617 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2618 break;
2619 case PCI_PRODUCT_CMDTECH_648:
2620 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2621 sc->sc_wdcdev.UDMA_cap = 4;
2622 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2623 break;
2624 case PCI_PRODUCT_CMDTECH_646:
2625 if (rev >= CMD0646U2_REV) {
2626 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2627 sc->sc_wdcdev.UDMA_cap = 2;
2628 } else if (rev >= CMD0646U_REV) {
2629 /*
2630 * Linux's driver claims that the 646U is broken
2631 * with UDMA. Only enable it if we know what we're
2632 * doing
2633 */
2634 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2635 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2636 sc->sc_wdcdev.UDMA_cap = 2;
2637 #endif
2638 /* explicitly disable UDMA */
2639 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2640 CMD_UDMATIM(0), 0);
2641 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2642 CMD_UDMATIM(1), 0);
2643 }
2644 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2645 break;
2646 default:
2647 sc->sc_wdcdev.irqack = pciide_irqack;
2648 }
2649 }
2650
2651 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2652 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2653 sc->sc_wdcdev.PIO_cap = 4;
2654 sc->sc_wdcdev.DMA_cap = 2;
2655 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2656
2657 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2658 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2659 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2660 DEBUG_PROBE);
2661
2662 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2663 cp = &sc->pciide_channels[channel];
2664 cmd_channel_map(pa, sc, channel);
2665 if (cp->hw_ok == 0)
2666 continue;
2667 cmd0643_9_setup_channel(&cp->wdc_channel);
2668 }
2669 /*
2670 * note - this also makes sure we clear the irq disable and reset
2671 * bits
2672 */
2673 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2674 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2675 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2676 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2677 DEBUG_PROBE);
2678 }
2679
2680 void
2681 cmd0643_9_setup_channel(chp)
2682 struct channel_softc *chp;
2683 {
2684 struct ata_drive_datas *drvp;
2685 u_int8_t tim;
2686 u_int32_t idedma_ctl, udma_reg;
2687 int drive;
2688 struct pciide_channel *cp = (struct pciide_channel*)chp;
2689 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2690
2691 idedma_ctl = 0;
2692 /* setup DMA if needed */
2693 pciide_channel_dma_setup(cp);
2694
2695 for (drive = 0; drive < 2; drive++) {
2696 drvp = &chp->ch_drive[drive];
2697 /* If no drive, skip */
2698 if ((drvp->drive_flags & DRIVE) == 0)
2699 continue;
2700 /* add timing values, setup DMA if needed */
2701 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2702 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2703 if (drvp->drive_flags & DRIVE_UDMA) {
2704 /* UltraDMA on a 646U2, 0648 or 0649 */
2705 drvp->drive_flags &= ~DRIVE_DMA;
2706 udma_reg = pciide_pci_read(sc->sc_pc,
2707 sc->sc_tag, CMD_UDMATIM(chp->channel));
2708 if (drvp->UDMA_mode > 2 &&
2709 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2710 CMD_BICSR) &
2711 CMD_BICSR_80(chp->channel)) == 0)
2712 drvp->UDMA_mode = 2;
2713 if (drvp->UDMA_mode > 2)
2714 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2715 else if (sc->sc_wdcdev.UDMA_cap > 2)
2716 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2717 udma_reg |= CMD_UDMATIM_UDMA(drive);
2718 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2719 CMD_UDMATIM_TIM_OFF(drive));
2720 udma_reg |=
2721 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2722 CMD_UDMATIM_TIM_OFF(drive));
2723 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2724 CMD_UDMATIM(chp->channel), udma_reg);
2725 } else {
2726 /*
2727 * use Multiword DMA.
2728 * Timings will be used for both PIO and DMA,
2729 * so adjust DMA mode if needed
2730 * if we have a 0646U2/8/9, turn off UDMA
2731 */
2732 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2733 udma_reg = pciide_pci_read(sc->sc_pc,
2734 sc->sc_tag,
2735 CMD_UDMATIM(chp->channel));
2736 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2737 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2738 CMD_UDMATIM(chp->channel),
2739 udma_reg);
2740 }
2741 if (drvp->PIO_mode >= 3 &&
2742 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2743 drvp->DMA_mode = drvp->PIO_mode - 2;
2744 }
2745 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2746 }
2747 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2748 }
2749 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2750 CMD_DATA_TIM(chp->channel, drive), tim);
2751 }
2752 if (idedma_ctl != 0) {
2753 /* Add software bits in status register */
2754 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2755 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2756 idedma_ctl);
2757 }
2758 pciide_print_modes(cp);
2759 }
2760
2761 void
2762 cmd646_9_irqack(chp)
2763 struct channel_softc *chp;
2764 {
2765 u_int32_t priirq, secirq;
2766 struct pciide_channel *cp = (struct pciide_channel*)chp;
2767 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2768
2769 if (chp->channel == 0) {
2770 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2771 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2772 } else {
2773 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2774 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2775 }
2776 pciide_irqack(chp);
2777 }
2778
2779 void
2780 cmd680_chip_map(sc, pa)
2781 struct pciide_softc *sc;
2782 struct pci_attach_args *pa;
2783 {
2784 struct pciide_channel *cp;
2785 int channel;
2786
2787 if (pciide_chipen(sc, pa) == 0)
2788 return;
2789 printf("%s: bus-master DMA support present",
2790 sc->sc_wdcdev.sc_dev.dv_xname);
2791 pciide_mapreg_dma(sc, pa);
2792 printf("\n");
2793 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2794 WDC_CAPABILITY_MODE;
2795 if (sc->sc_dma_ok) {
2796 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2797 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2798 sc->sc_wdcdev.UDMA_cap = 6;
2799 sc->sc_wdcdev.irqack = pciide_irqack;
2800 }
2801
2802 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2803 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2804 sc->sc_wdcdev.PIO_cap = 4;
2805 sc->sc_wdcdev.DMA_cap = 2;
2806 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2807
2808 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2809 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2810 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2811 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2812 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2813 cp = &sc->pciide_channels[channel];
2814 cmd680_channel_map(pa, sc, channel);
2815 if (cp->hw_ok == 0)
2816 continue;
2817 cmd680_setup_channel(&cp->wdc_channel);
2818 }
2819 }
2820
2821 void
2822 cmd680_channel_map(pa, sc, channel)
2823 struct pci_attach_args *pa;
2824 struct pciide_softc *sc;
2825 int channel;
2826 {
2827 struct pciide_channel *cp = &sc->pciide_channels[channel];
2828 bus_size_t cmdsize, ctlsize;
2829 int interface, i, reg;
2830 static const u_int8_t init_val[] =
2831 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2832 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2833
2834 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2835 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2836 PCIIDE_INTERFACE_SETTABLE(1);
2837 interface |= PCIIDE_INTERFACE_PCI(0) |
2838 PCIIDE_INTERFACE_PCI(1);
2839 } else {
2840 interface = PCI_INTERFACE(pa->pa_class);
2841 }
2842
2843 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2844 cp->name = PCIIDE_CHANNEL_NAME(channel);
2845 cp->wdc_channel.channel = channel;
2846 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2847
2848 cp->wdc_channel.ch_queue =
2849 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2850 if (cp->wdc_channel.ch_queue == NULL) {
2851 printf("%s %s channel: "
2852 "can't allocate memory for command queue",
2853 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2854 return;
2855 }
2856
2857 /* XXX */
2858 reg = 0xa2 + channel * 16;
2859 for (i = 0; i < sizeof(init_val); i++)
2860 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2861
2862 printf("%s: %s channel %s to %s mode\n",
2863 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2864 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2865 "configured" : "wired",
2866 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2867 "native-PCI" : "compatibility");
2868
2869 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2870 if (cp->hw_ok == 0)
2871 return;
2872 pciide_map_compat_intr(pa, cp, channel, interface);
2873 }
2874
2875 void
2876 cmd680_setup_channel(chp)
2877 struct channel_softc *chp;
2878 {
2879 struct ata_drive_datas *drvp;
2880 u_int8_t mode, off, scsc;
2881 u_int16_t val;
2882 u_int32_t idedma_ctl;
2883 int drive;
2884 struct pciide_channel *cp = (struct pciide_channel*)chp;
2885 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2886 pci_chipset_tag_t pc = sc->sc_pc;
2887 pcitag_t pa = sc->sc_tag;
2888 static const u_int8_t udma2_tbl[] =
2889 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2890 static const u_int8_t udma_tbl[] =
2891 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2892 static const u_int16_t dma_tbl[] =
2893 { 0x2208, 0x10c2, 0x10c1 };
2894 static const u_int16_t pio_tbl[] =
2895 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2896
2897 idedma_ctl = 0;
2898 pciide_channel_dma_setup(cp);
2899 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2900
2901 for (drive = 0; drive < 2; drive++) {
2902 drvp = &chp->ch_drive[drive];
2903 /* If no drive, skip */
2904 if ((drvp->drive_flags & DRIVE) == 0)
2905 continue;
2906 mode &= ~(0x03 << (drive * 4));
2907 if (drvp->drive_flags & DRIVE_UDMA) {
2908 drvp->drive_flags &= ~DRIVE_DMA;
2909 off = 0xa0 + chp->channel * 16;
2910 if (drvp->UDMA_mode > 2 &&
2911 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
2912 drvp->UDMA_mode = 2;
2913 scsc = pciide_pci_read(pc, pa, 0x8a);
2914 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
2915 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
2916 scsc = pciide_pci_read(pc, pa, 0x8a);
2917 if ((scsc & 0x30) == 0)
2918 drvp->UDMA_mode = 5;
2919 }
2920 mode |= 0x03 << (drive * 4);
2921 off = 0xac + chp->channel * 16 + drive * 2;
2922 val = pciide_pci_read(pc, pa, off) & ~0x3f;
2923 if (scsc & 0x30)
2924 val |= udma2_tbl[drvp->UDMA_mode];
2925 else
2926 val |= udma_tbl[drvp->UDMA_mode];
2927 pciide_pci_write(pc, pa, off, val);
2928 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2929 } else if (drvp->drive_flags & DRIVE_DMA) {
2930 mode |= 0x02 << (drive * 4);
2931 off = 0xa8 + chp->channel * 16 + drive * 2;
2932 val = dma_tbl[drvp->DMA_mode];
2933 pciide_pci_write(pc, pa, off, val & 0xff);
2934 pciide_pci_write(pc, pa, off, val >> 8);
2935 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2936 } else {
2937 mode |= 0x01 << (drive * 4);
2938 off = 0xa4 + chp->channel * 16 + drive * 2;
2939 val = pio_tbl[drvp->PIO_mode];
2940 pciide_pci_write(pc, pa, off, val & 0xff);
2941 pciide_pci_write(pc, pa, off, val >> 8);
2942 }
2943 }
2944
2945 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
2946 if (idedma_ctl != 0) {
2947 /* Add software bits in status register */
2948 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2949 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2950 idedma_ctl);
2951 }
2952 pciide_print_modes(cp);
2953 }
2954
2955 void
2956 cy693_chip_map(sc, pa)
2957 struct pciide_softc *sc;
2958 struct pci_attach_args *pa;
2959 {
2960 struct pciide_channel *cp;
2961 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2962 bus_size_t cmdsize, ctlsize;
2963
2964 if (pciide_chipen(sc, pa) == 0)
2965 return;
2966 /*
2967 * this chip has 2 PCI IDE functions, one for primary and one for
2968 * secondary. So we need to call pciide_mapregs_compat() with
2969 * the real channel
2970 */
2971 if (pa->pa_function == 1) {
2972 sc->sc_cy_compatchan = 0;
2973 } else if (pa->pa_function == 2) {
2974 sc->sc_cy_compatchan = 1;
2975 } else {
2976 printf("%s: unexpected PCI function %d\n",
2977 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2978 return;
2979 }
2980 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2981 printf("%s: bus-master DMA support present",
2982 sc->sc_wdcdev.sc_dev.dv_xname);
2983 pciide_mapreg_dma(sc, pa);
2984 } else {
2985 printf("%s: hardware does not support DMA",
2986 sc->sc_wdcdev.sc_dev.dv_xname);
2987 sc->sc_dma_ok = 0;
2988 }
2989 printf("\n");
2990
2991 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2992 if (sc->sc_cy_handle == NULL) {
2993 printf("%s: unable to map hyperCache control registers\n",
2994 sc->sc_wdcdev.sc_dev.dv_xname);
2995 sc->sc_dma_ok = 0;
2996 }
2997
2998 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2999 WDC_CAPABILITY_MODE;
3000 if (sc->sc_dma_ok) {
3001 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3002 sc->sc_wdcdev.irqack = pciide_irqack;
3003 }
3004 sc->sc_wdcdev.PIO_cap = 4;
3005 sc->sc_wdcdev.DMA_cap = 2;
3006 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3007
3008 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3009 sc->sc_wdcdev.nchannels = 1;
3010
3011 /* Only one channel for this chip; if we are here it's enabled */
3012 cp = &sc->pciide_channels[0];
3013 sc->wdc_chanarray[0] = &cp->wdc_channel;
3014 cp->name = PCIIDE_CHANNEL_NAME(0);
3015 cp->wdc_channel.channel = 0;
3016 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3017 cp->wdc_channel.ch_queue =
3018 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3019 if (cp->wdc_channel.ch_queue == NULL) {
3020 printf("%s primary channel: "
3021 "can't allocate memory for command queue",
3022 sc->sc_wdcdev.sc_dev.dv_xname);
3023 return;
3024 }
3025 printf("%s: primary channel %s to ",
3026 sc->sc_wdcdev.sc_dev.dv_xname,
3027 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3028 "configured" : "wired");
3029 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3030 printf("native-PCI");
3031 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3032 pciide_pci_intr);
3033 } else {
3034 printf("compatibility");
3035 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3036 &cmdsize, &ctlsize);
3037 }
3038 printf(" mode\n");
3039 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3040 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3041 wdcattach(&cp->wdc_channel);
3042 if (pciide_chan_candisable(cp)) {
3043 pci_conf_write(sc->sc_pc, sc->sc_tag,
3044 PCI_COMMAND_STATUS_REG, 0);
3045 }
3046 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3047 if (cp->hw_ok == 0)
3048 return;
3049 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3050 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3051 cy693_setup_channel(&cp->wdc_channel);
3052 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3053 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3054 }
3055
3056 void
3057 cy693_setup_channel(chp)
3058 struct channel_softc *chp;
3059 {
3060 struct ata_drive_datas *drvp;
3061 int drive;
3062 u_int32_t cy_cmd_ctrl;
3063 u_int32_t idedma_ctl;
3064 struct pciide_channel *cp = (struct pciide_channel*)chp;
3065 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3066 int dma_mode = -1;
3067
3068 cy_cmd_ctrl = idedma_ctl = 0;
3069
3070 /* setup DMA if needed */
3071 pciide_channel_dma_setup(cp);
3072
3073 for (drive = 0; drive < 2; drive++) {
3074 drvp = &chp->ch_drive[drive];
3075 /* If no drive, skip */
3076 if ((drvp->drive_flags & DRIVE) == 0)
3077 continue;
3078 /* add timing values, setup DMA if needed */
3079 if (drvp->drive_flags & DRIVE_DMA) {
3080 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3081 /* use Multiword DMA */
3082 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3083 dma_mode = drvp->DMA_mode;
3084 }
3085 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3086 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3087 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3088 CY_CMD_CTRL_IOW_REC_OFF(drive));
3089 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3090 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3091 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3092 CY_CMD_CTRL_IOR_REC_OFF(drive));
3093 }
3094 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3095 chp->ch_drive[0].DMA_mode = dma_mode;
3096 chp->ch_drive[1].DMA_mode = dma_mode;
3097
3098 if (dma_mode == -1)
3099 dma_mode = 0;
3100
3101 if (sc->sc_cy_handle != NULL) {
3102 /* Note: `multiple' is implied. */
3103 cy82c693_write(sc->sc_cy_handle,
3104 (sc->sc_cy_compatchan == 0) ?
3105 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3106 }
3107
3108 pciide_print_modes(cp);
3109
3110 if (idedma_ctl != 0) {
3111 /* Add software bits in status register */
3112 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3113 IDEDMA_CTL, idedma_ctl);
3114 }
3115 }
3116
3117 static int
3118 sis_hostbr_match(pa)
3119 struct pci_attach_args *pa;
3120 {
3121 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
3122 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
3123 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
3124 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
3125 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735) ||
3126 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_745)));
3127 }
3128
3129 void
3130 sis_chip_map(sc, pa)
3131 struct pciide_softc *sc;
3132 struct pci_attach_args *pa;
3133 {
3134 struct pciide_channel *cp;
3135 int channel;
3136 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3137 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3138 pcireg_t rev = PCI_REVISION(pa->pa_class);
3139 bus_size_t cmdsize, ctlsize;
3140 pcitag_t pchb_tag;
3141 pcireg_t pchb_id, pchb_class;
3142
3143 if (pciide_chipen(sc, pa) == 0)
3144 return;
3145 printf("%s: bus-master DMA support present",
3146 sc->sc_wdcdev.sc_dev.dv_xname);
3147 pciide_mapreg_dma(sc, pa);
3148 printf("\n");
3149
3150 /* get a PCI tag for the host bridge (function 0 of the same device) */
3151 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
3152 /* and read ID and rev of the ISA bridge */
3153 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
3154 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
3155
3156 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3157 WDC_CAPABILITY_MODE;
3158 if (sc->sc_dma_ok) {
3159 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3160 sc->sc_wdcdev.irqack = pciide_irqack;
3161 /*
3162 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3163 * have problems with UDMA (info provided by Christos)
3164 */
3165 if (rev >= 0xd0 &&
3166 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
3167 PCI_REVISION(pchb_class) >= 0x03))
3168 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3169 }
3170
3171 sc->sc_wdcdev.PIO_cap = 4;
3172 sc->sc_wdcdev.DMA_cap = 2;
3173 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
3174 /*
3175 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
3176 * chipsets.
3177 */
3178 sc->sc_wdcdev.UDMA_cap =
3179 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
3180 sc->sc_wdcdev.set_modes = sis_setup_channel;
3181
3182 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3183 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3184
3185 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3186 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3187 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
3188
3189 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3190 cp = &sc->pciide_channels[channel];
3191 if (pciide_chansetup(sc, channel, interface) == 0)
3192 continue;
3193 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3194 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3195 printf("%s: %s channel ignored (disabled)\n",
3196 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3197 continue;
3198 }
3199 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3200 pciide_pci_intr);
3201 if (cp->hw_ok == 0)
3202 continue;
3203 if (pciide_chan_candisable(cp)) {
3204 if (channel == 0)
3205 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3206 else
3207 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3208 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3209 sis_ctr0);
3210 }
3211 pciide_map_compat_intr(pa, cp, channel, interface);
3212 if (cp->hw_ok == 0)
3213 continue;
3214 sis_setup_channel(&cp->wdc_channel);
3215 }
3216 }
3217
3218 void
3219 sis_setup_channel(chp)
3220 struct channel_softc *chp;
3221 {
3222 struct ata_drive_datas *drvp;
3223 int drive;
3224 u_int32_t sis_tim;
3225 u_int32_t idedma_ctl;
3226 struct pciide_channel *cp = (struct pciide_channel*)chp;
3227 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3228
3229 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3230 "channel %d 0x%x\n", chp->channel,
3231 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3232 DEBUG_PROBE);
3233 sis_tim = 0;
3234 idedma_ctl = 0;
3235 /* setup DMA if needed */
3236 pciide_channel_dma_setup(cp);
3237
3238 for (drive = 0; drive < 2; drive++) {
3239 drvp = &chp->ch_drive[drive];
3240 /* If no drive, skip */
3241 if ((drvp->drive_flags & DRIVE) == 0)
3242 continue;
3243 /* add timing values, setup DMA if needed */
3244 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3245 (drvp->drive_flags & DRIVE_UDMA) == 0)
3246 goto pio;
3247
3248 if (drvp->drive_flags & DRIVE_UDMA) {
3249 /* use Ultra/DMA */
3250 drvp->drive_flags &= ~DRIVE_DMA;
3251 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3252 SIS_TIM_UDMA_TIME_OFF(drive);
3253 sis_tim |= SIS_TIM_UDMA_EN(drive);
3254 } else {
3255 /*
3256 * use Multiword DMA
3257 * Timings will be used for both PIO and DMA,
3258 * so adjust DMA mode if needed
3259 */
3260 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3261 drvp->PIO_mode = drvp->DMA_mode + 2;
3262 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3263 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3264 drvp->PIO_mode - 2 : 0;
3265 if (drvp->DMA_mode == 0)
3266 drvp->PIO_mode = 0;
3267 }
3268 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3269 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3270 SIS_TIM_ACT_OFF(drive);
3271 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3272 SIS_TIM_REC_OFF(drive);
3273 }
3274 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3275 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3276 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3277 if (idedma_ctl != 0) {
3278 /* Add software bits in status register */
3279 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3280 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3281 idedma_ctl);
3282 }
3283 pciide_print_modes(cp);
3284 }
3285
3286 void
3287 acer_chip_map(sc, pa)
3288 struct pciide_softc *sc;
3289 struct pci_attach_args *pa;
3290 {
3291 struct pciide_channel *cp;
3292 int channel;
3293 pcireg_t cr, interface;
3294 bus_size_t cmdsize, ctlsize;
3295 pcireg_t rev = PCI_REVISION(pa->pa_class);
3296
3297 if (pciide_chipen(sc, pa) == 0)
3298 return;
3299 printf("%s: bus-master DMA support present",
3300 sc->sc_wdcdev.sc_dev.dv_xname);
3301 pciide_mapreg_dma(sc, pa);
3302 printf("\n");
3303 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3304 WDC_CAPABILITY_MODE;
3305 if (sc->sc_dma_ok) {
3306 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3307 if (rev >= 0x20) {
3308 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3309 if (rev >= 0xC4)
3310 sc->sc_wdcdev.UDMA_cap = 5;
3311 else if (rev >= 0xC2)
3312 sc->sc_wdcdev.UDMA_cap = 4;
3313 else
3314 sc->sc_wdcdev.UDMA_cap = 2;
3315 }
3316 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3317 sc->sc_wdcdev.irqack = pciide_irqack;
3318 }
3319
3320 sc->sc_wdcdev.PIO_cap = 4;
3321 sc->sc_wdcdev.DMA_cap = 2;
3322 sc->sc_wdcdev.set_modes = acer_setup_channel;
3323 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3324 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3325
3326 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3327 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3328 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3329
3330 /* Enable "microsoft register bits" R/W. */
3331 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3332 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3333 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3334 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3335 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3336 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3337 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3338 ~ACER_CHANSTATUSREGS_RO);
3339 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3340 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3341 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3342 /* Don't use cr, re-read the real register content instead */
3343 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3344 PCI_CLASS_REG));
3345
3346 /* From linux: enable "Cable Detection" */
3347 if (rev >= 0xC2) {
3348 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3349 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3350 | ACER_0x4B_CDETECT);
3351 }
3352
3353 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3354 cp = &sc->pciide_channels[channel];
3355 if (pciide_chansetup(sc, channel, interface) == 0)
3356 continue;
3357 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3358 printf("%s: %s channel ignored (disabled)\n",
3359 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3360 continue;
3361 }
3362 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3363 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3364 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3365 if (cp->hw_ok == 0)
3366 continue;
3367 if (pciide_chan_candisable(cp)) {
3368 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3369 pci_conf_write(sc->sc_pc, sc->sc_tag,
3370 PCI_CLASS_REG, cr);
3371 }
3372 pciide_map_compat_intr(pa, cp, channel, interface);
3373 acer_setup_channel(&cp->wdc_channel);
3374 }
3375 }
3376
3377 void
3378 acer_setup_channel(chp)
3379 struct channel_softc *chp;
3380 {
3381 struct ata_drive_datas *drvp;
3382 int drive;
3383 u_int32_t acer_fifo_udma;
3384 u_int32_t idedma_ctl;
3385 struct pciide_channel *cp = (struct pciide_channel*)chp;
3386 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3387
3388 idedma_ctl = 0;
3389 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3390 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3391 acer_fifo_udma), DEBUG_PROBE);
3392 /* setup DMA if needed */
3393 pciide_channel_dma_setup(cp);
3394
3395 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3396 DRIVE_UDMA) { /* check 80 pins cable */
3397 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3398 ACER_0x4A_80PIN(chp->channel)) {
3399 if (chp->ch_drive[0].UDMA_mode > 2)
3400 chp->ch_drive[0].UDMA_mode = 2;
3401 if (chp->ch_drive[1].UDMA_mode > 2)
3402 chp->ch_drive[1].UDMA_mode = 2;
3403 }
3404 }
3405
3406 for (drive = 0; drive < 2; drive++) {
3407 drvp = &chp->ch_drive[drive];
3408 /* If no drive, skip */
3409 if ((drvp->drive_flags & DRIVE) == 0)
3410 continue;
3411 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3412 "channel %d drive %d 0x%x\n", chp->channel, drive,
3413 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3414 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3415 /* clear FIFO/DMA mode */
3416 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3417 ACER_UDMA_EN(chp->channel, drive) |
3418 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3419
3420 /* add timing values, setup DMA if needed */
3421 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3422 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3423 acer_fifo_udma |=
3424 ACER_FTH_OPL(chp->channel, drive, 0x1);
3425 goto pio;
3426 }
3427
3428 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3429 if (drvp->drive_flags & DRIVE_UDMA) {
3430 /* use Ultra/DMA */
3431 drvp->drive_flags &= ~DRIVE_DMA;
3432 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3433 acer_fifo_udma |=
3434 ACER_UDMA_TIM(chp->channel, drive,
3435 acer_udma[drvp->UDMA_mode]);
3436 /* XXX disable if one drive < UDMA3 ? */
3437 if (drvp->UDMA_mode >= 3) {
3438 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3439 ACER_0x4B,
3440 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3441 ACER_0x4B) | ACER_0x4B_UDMA66);
3442 }
3443 } else {
3444 /*
3445 * use Multiword DMA
3446 * Timings will be used for both PIO and DMA,
3447 * so adjust DMA mode if needed
3448 */
3449 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3450 drvp->PIO_mode = drvp->DMA_mode + 2;
3451 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3452 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3453 drvp->PIO_mode - 2 : 0;
3454 if (drvp->DMA_mode == 0)
3455 drvp->PIO_mode = 0;
3456 }
3457 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3458 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3459 ACER_IDETIM(chp->channel, drive),
3460 acer_pio[drvp->PIO_mode]);
3461 }
3462 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3463 acer_fifo_udma), DEBUG_PROBE);
3464 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3465 if (idedma_ctl != 0) {
3466 /* Add software bits in status register */
3467 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3468 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3469 idedma_ctl);
3470 }
3471 pciide_print_modes(cp);
3472 }
3473
3474 int
3475 acer_pci_intr(arg)
3476 void *arg;
3477 {
3478 struct pciide_softc *sc = arg;
3479 struct pciide_channel *cp;
3480 struct channel_softc *wdc_cp;
3481 int i, rv, crv;
3482 u_int32_t chids;
3483
3484 rv = 0;
3485 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3486 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3487 cp = &sc->pciide_channels[i];
3488 wdc_cp = &cp->wdc_channel;
3489 /* If a compat channel skip. */
3490 if (cp->compat)
3491 continue;
3492 if (chids & ACER_CHIDS_INT(i)) {
3493 crv = wdcintr(wdc_cp);
3494 if (crv == 0)
3495 printf("%s:%d: bogus intr\n",
3496 sc->sc_wdcdev.sc_dev.dv_xname, i);
3497 else
3498 rv = 1;
3499 }
3500 }
3501 return rv;
3502 }
3503
3504 void
3505 hpt_chip_map(sc, pa)
3506 struct pciide_softc *sc;
3507 struct pci_attach_args *pa;
3508 {
3509 struct pciide_channel *cp;
3510 int i, compatchan, revision;
3511 pcireg_t interface;
3512 bus_size_t cmdsize, ctlsize;
3513
3514 if (pciide_chipen(sc, pa) == 0)
3515 return;
3516 revision = PCI_REVISION(pa->pa_class);
3517 printf(": Triones/Highpoint ");
3518 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3519 printf("HPT374 IDE Controller\n");
3520 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3521 printf("HPT372 IDE Controller\n");
3522 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3523 if (revision == HPT372_REV)
3524 printf("HPT372 IDE Controller\n");
3525 else if (revision == HPT370_REV)
3526 printf("HPT370 IDE Controller\n");
3527 else if (revision == HPT370A_REV)
3528 printf("HPT370A IDE Controller\n");
3529 else if (revision == HPT366_REV)
3530 printf("HPT366 IDE Controller\n");
3531 else
3532 printf("unknown HPT IDE controller rev %d\n", revision);
3533 } else
3534 printf("unknown HPT IDE controller 0x%x\n",
3535 sc->sc_pp->ide_product);
3536
3537 /*
3538 * when the chip is in native mode it identifies itself as a
3539 * 'misc mass storage'. Fake interface in this case.
3540 */
3541 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3542 interface = PCI_INTERFACE(pa->pa_class);
3543 } else {
3544 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3545 PCIIDE_INTERFACE_PCI(0);
3546 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3547 (revision == HPT370_REV || revision == HPT370A_REV ||
3548 revision == HPT372_REV)) ||
3549 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3550 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3551 interface |= PCIIDE_INTERFACE_PCI(1);
3552 }
3553
3554 printf("%s: bus-master DMA support present",
3555 sc->sc_wdcdev.sc_dev.dv_xname);
3556 pciide_mapreg_dma(sc, pa);
3557 printf("\n");
3558 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3559 WDC_CAPABILITY_MODE;
3560 if (sc->sc_dma_ok) {
3561 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3562 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3563 sc->sc_wdcdev.irqack = pciide_irqack;
3564 }
3565 sc->sc_wdcdev.PIO_cap = 4;
3566 sc->sc_wdcdev.DMA_cap = 2;
3567
3568 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3569 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3570 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3571 revision == HPT366_REV) {
3572 sc->sc_wdcdev.UDMA_cap = 4;
3573 /*
3574 * The 366 has 2 PCI IDE functions, one for primary and one
3575 * for secondary. So we need to call pciide_mapregs_compat()
3576 * with the real channel
3577 */
3578 if (pa->pa_function == 0) {
3579 compatchan = 0;
3580 } else if (pa->pa_function == 1) {
3581 compatchan = 1;
3582 } else {
3583 printf("%s: unexpected PCI function %d\n",
3584 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3585 return;
3586 }
3587 sc->sc_wdcdev.nchannels = 1;
3588 } else {
3589 sc->sc_wdcdev.nchannels = 2;
3590 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3591 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3592 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3593 revision == HPT372_REV))
3594 sc->sc_wdcdev.UDMA_cap = 6;
3595 else
3596 sc->sc_wdcdev.UDMA_cap = 5;
3597 }
3598 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3599 cp = &sc->pciide_channels[i];
3600 if (sc->sc_wdcdev.nchannels > 1) {
3601 compatchan = i;
3602 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3603 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3604 printf("%s: %s channel ignored (disabled)\n",
3605 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3606 continue;
3607 }
3608 }
3609 if (pciide_chansetup(sc, i, interface) == 0)
3610 continue;
3611 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3612 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3613 &ctlsize, hpt_pci_intr);
3614 } else {
3615 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3616 &cmdsize, &ctlsize);
3617 }
3618 if (cp->hw_ok == 0)
3619 return;
3620 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3621 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3622 wdcattach(&cp->wdc_channel);
3623 hpt_setup_channel(&cp->wdc_channel);
3624 }
3625 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3626 (revision == HPT370_REV || revision == HPT370A_REV ||
3627 revision == HPT372_REV)) ||
3628 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3629 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3630 /*
3631 * HPT370_REV and highter has a bit to disable interrupts,
3632 * make sure to clear it
3633 */
3634 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3635 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3636 ~HPT_CSEL_IRQDIS);
3637 }
3638 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3639 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3640 revision == HPT372_REV ) ||
3641 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3642 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3643 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3644 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3645 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3646 return;
3647 }
3648
3649 void
3650 hpt_setup_channel(chp)
3651 struct channel_softc *chp;
3652 {
3653 struct ata_drive_datas *drvp;
3654 int drive;
3655 int cable;
3656 u_int32_t before, after;
3657 u_int32_t idedma_ctl;
3658 struct pciide_channel *cp = (struct pciide_channel*)chp;
3659 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3660 int revision =
3661 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3662
3663 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3664
3665 /* setup DMA if needed */
3666 pciide_channel_dma_setup(cp);
3667
3668 idedma_ctl = 0;
3669
3670 /* Per drive settings */
3671 for (drive = 0; drive < 2; drive++) {
3672 drvp = &chp->ch_drive[drive];
3673 /* If no drive, skip */
3674 if ((drvp->drive_flags & DRIVE) == 0)
3675 continue;
3676 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3677 HPT_IDETIM(chp->channel, drive));
3678
3679 /* add timing values, setup DMA if needed */
3680 if (drvp->drive_flags & DRIVE_UDMA) {
3681 /* use Ultra/DMA */
3682 drvp->drive_flags &= ~DRIVE_DMA;
3683 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3684 drvp->UDMA_mode > 2)
3685 drvp->UDMA_mode = 2;
3686 switch (sc->sc_pp->ide_product) {
3687 case PCI_PRODUCT_TRIONES_HPT374:
3688 after = hpt374_udma[drvp->UDMA_mode];
3689 break;
3690 case PCI_PRODUCT_TRIONES_HPT372:
3691 after = hpt372_udma[drvp->UDMA_mode];
3692 break;
3693 case PCI_PRODUCT_TRIONES_HPT366:
3694 default:
3695 switch(revision) {
3696 case HPT372_REV:
3697 after = hpt372_udma[drvp->UDMA_mode];
3698 break;
3699 case HPT370_REV:
3700 case HPT370A_REV:
3701 after = hpt370_udma[drvp->UDMA_mode];
3702 break;
3703 case HPT366_REV:
3704 default:
3705 after = hpt366_udma[drvp->UDMA_mode];
3706 break;
3707 }
3708 }
3709 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3710 } else if (drvp->drive_flags & DRIVE_DMA) {
3711 /*
3712 * use Multiword DMA.
3713 * Timings will be used for both PIO and DMA, so adjust
3714 * DMA mode if needed
3715 */
3716 if (drvp->PIO_mode >= 3 &&
3717 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3718 drvp->DMA_mode = drvp->PIO_mode - 2;
3719 }
3720 switch (sc->sc_pp->ide_product) {
3721 case PCI_PRODUCT_TRIONES_HPT374:
3722 after = hpt374_dma[drvp->DMA_mode];
3723 break;
3724 case PCI_PRODUCT_TRIONES_HPT372:
3725 after = hpt372_dma[drvp->DMA_mode];
3726 break;
3727 case PCI_PRODUCT_TRIONES_HPT366:
3728 default:
3729 switch(revision) {
3730 case HPT372_REV:
3731 after = hpt372_dma[drvp->DMA_mode];
3732 break;
3733 case HPT370_REV:
3734 case HPT370A_REV:
3735 after = hpt370_dma[drvp->DMA_mode];
3736 break;
3737 case HPT366_REV:
3738 default:
3739 after = hpt366_dma[drvp->DMA_mode];
3740 break;
3741 }
3742 }
3743 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3744 } else {
3745 /* PIO only */
3746 switch (sc->sc_pp->ide_product) {
3747 case PCI_PRODUCT_TRIONES_HPT374:
3748 after = hpt374_pio[drvp->PIO_mode];
3749 break;
3750 case PCI_PRODUCT_TRIONES_HPT372:
3751 after = hpt372_pio[drvp->PIO_mode];
3752 break;
3753 case PCI_PRODUCT_TRIONES_HPT366:
3754 default:
3755 switch(revision) {
3756 case HPT372_REV:
3757 after = hpt372_pio[drvp->PIO_mode];
3758 break;
3759 case HPT370_REV:
3760 case HPT370A_REV:
3761 after = hpt370_pio[drvp->PIO_mode];
3762 break;
3763 case HPT366_REV:
3764 default:
3765 after = hpt366_pio[drvp->PIO_mode];
3766 break;
3767 }
3768 }
3769 }
3770 pci_conf_write(sc->sc_pc, sc->sc_tag,
3771 HPT_IDETIM(chp->channel, drive), after);
3772 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3773 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3774 after, before), DEBUG_PROBE);
3775 }
3776 if (idedma_ctl != 0) {
3777 /* Add software bits in status register */
3778 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3779 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3780 idedma_ctl);
3781 }
3782 pciide_print_modes(cp);
3783 }
3784
3785 int
3786 hpt_pci_intr(arg)
3787 void *arg;
3788 {
3789 struct pciide_softc *sc = arg;
3790 struct pciide_channel *cp;
3791 struct channel_softc *wdc_cp;
3792 int rv = 0;
3793 int dmastat, i, crv;
3794
3795 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3796 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3797 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3798 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3799 IDEDMA_CTL_INTR)
3800 continue;
3801 cp = &sc->pciide_channels[i];
3802 wdc_cp = &cp->wdc_channel;
3803 crv = wdcintr(wdc_cp);
3804 if (crv == 0) {
3805 printf("%s:%d: bogus intr\n",
3806 sc->sc_wdcdev.sc_dev.dv_xname, i);
3807 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3808 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3809 } else
3810 rv = 1;
3811 }
3812 return rv;
3813 }
3814
3815
3816 /* Macros to test product */
3817 #define PDC_IS_262(sc) \
3818 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3819 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3820 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3821 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3822 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3823 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3824 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3825 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3826 #define PDC_IS_265(sc) \
3827 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3828 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3829 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3830 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3831 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3832 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3833 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3834 #define PDC_IS_268(sc) \
3835 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3836 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3837 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3838 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3839 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3840 #define PDC_IS_276(sc) \
3841 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3842 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3843 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3844
3845 void
3846 pdc202xx_chip_map(sc, pa)
3847 struct pciide_softc *sc;
3848 struct pci_attach_args *pa;
3849 {
3850 struct pciide_channel *cp;
3851 int channel;
3852 pcireg_t interface, st, mode;
3853 bus_size_t cmdsize, ctlsize;
3854
3855 if (!PDC_IS_268(sc)) {
3856 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3857 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3858 st), DEBUG_PROBE);
3859 }
3860 if (pciide_chipen(sc, pa) == 0)
3861 return;
3862
3863 /* turn off RAID mode */
3864 if (!PDC_IS_268(sc))
3865 st &= ~PDC2xx_STATE_IDERAID;
3866
3867 /*
3868 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3869 * mode. We have to fake interface
3870 */
3871 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3872 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3873 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3874
3875 printf("%s: bus-master DMA support present",
3876 sc->sc_wdcdev.sc_dev.dv_xname);
3877 pciide_mapreg_dma(sc, pa);
3878 printf("\n");
3879 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3880 WDC_CAPABILITY_MODE;
3881 if (sc->sc_dma_ok) {
3882 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3883 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3884 sc->sc_wdcdev.irqack = pciide_irqack;
3885 }
3886 sc->sc_wdcdev.PIO_cap = 4;
3887 sc->sc_wdcdev.DMA_cap = 2;
3888 if (PDC_IS_276(sc))
3889 sc->sc_wdcdev.UDMA_cap = 6;
3890 else if (PDC_IS_265(sc))
3891 sc->sc_wdcdev.UDMA_cap = 5;
3892 else if (PDC_IS_262(sc))
3893 sc->sc_wdcdev.UDMA_cap = 4;
3894 else
3895 sc->sc_wdcdev.UDMA_cap = 2;
3896 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3897 pdc20268_setup_channel : pdc202xx_setup_channel;
3898 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3899 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3900
3901 if (!PDC_IS_268(sc)) {
3902 /* setup failsafe defaults */
3903 mode = 0;
3904 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3905 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3906 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3907 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3908 for (channel = 0;
3909 channel < sc->sc_wdcdev.nchannels;
3910 channel++) {
3911 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3912 "drive 0 initial timings 0x%x, now 0x%x\n",
3913 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3914 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3915 DEBUG_PROBE);
3916 pci_conf_write(sc->sc_pc, sc->sc_tag,
3917 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3918 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3919 "drive 1 initial timings 0x%x, now 0x%x\n",
3920 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3921 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3922 pci_conf_write(sc->sc_pc, sc->sc_tag,
3923 PDC2xx_TIM(channel, 1), mode);
3924 }
3925
3926 mode = PDC2xx_SCR_DMA;
3927 if (PDC_IS_262(sc)) {
3928 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3929 } else {
3930 /* the BIOS set it up this way */
3931 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3932 }
3933 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3934 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3935 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3936 "now 0x%x\n",
3937 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3938 PDC2xx_SCR),
3939 mode), DEBUG_PROBE);
3940 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3941 PDC2xx_SCR, mode);
3942
3943 /* controller initial state register is OK even without BIOS */
3944 /* Set DMA mode to IDE DMA compatibility */
3945 mode =
3946 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3947 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3948 DEBUG_PROBE);
3949 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3950 mode | 0x1);
3951 mode =
3952 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3953 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3954 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3955 mode | 0x1);
3956 }
3957
3958 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3959 cp = &sc->pciide_channels[channel];
3960 if (pciide_chansetup(sc, channel, interface) == 0)
3961 continue;
3962 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3963 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3964 printf("%s: %s channel ignored (disabled)\n",
3965 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3966 continue;
3967 }
3968 if (PDC_IS_265(sc))
3969 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3970 pdc20265_pci_intr);
3971 else
3972 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3973 pdc202xx_pci_intr);
3974 if (cp->hw_ok == 0)
3975 continue;
3976 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3977 st &= ~(PDC_IS_262(sc) ?
3978 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3979 pciide_map_compat_intr(pa, cp, channel, interface);
3980 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3981 }
3982 if (!PDC_IS_268(sc)) {
3983 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3984 "0x%x\n", st), DEBUG_PROBE);
3985 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3986 }
3987 return;
3988 }
3989
3990 void
3991 pdc202xx_setup_channel(chp)
3992 struct channel_softc *chp;
3993 {
3994 struct ata_drive_datas *drvp;
3995 int drive;
3996 pcireg_t mode, st;
3997 u_int32_t idedma_ctl, scr, atapi;
3998 struct pciide_channel *cp = (struct pciide_channel*)chp;
3999 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4000 int channel = chp->channel;
4001
4002 /* setup DMA if needed */
4003 pciide_channel_dma_setup(cp);
4004
4005 idedma_ctl = 0;
4006 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4007 sc->sc_wdcdev.sc_dev.dv_xname,
4008 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4009 DEBUG_PROBE);
4010
4011 /* Per channel settings */
4012 if (PDC_IS_262(sc)) {
4013 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4014 PDC262_U66);
4015 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4016 /* Trim UDMA mode */
4017 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4018 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4019 chp->ch_drive[0].UDMA_mode <= 2) ||
4020 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4021 chp->ch_drive[1].UDMA_mode <= 2)) {
4022 if (chp->ch_drive[0].UDMA_mode > 2)
4023 chp->ch_drive[0].UDMA_mode = 2;
4024 if (chp->ch_drive[1].UDMA_mode > 2)
4025 chp->ch_drive[1].UDMA_mode = 2;
4026 }
4027 /* Set U66 if needed */
4028 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4029 chp->ch_drive[0].UDMA_mode > 2) ||
4030 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4031 chp->ch_drive[1].UDMA_mode > 2))
4032 scr |= PDC262_U66_EN(channel);
4033 else
4034 scr &= ~PDC262_U66_EN(channel);
4035 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4036 PDC262_U66, scr);
4037 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4038 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4039 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4040 PDC262_ATAPI(channel))), DEBUG_PROBE);
4041 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4042 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4043 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4044 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4045 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4046 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4047 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4048 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4049 atapi = 0;
4050 else
4051 atapi = PDC262_ATAPI_UDMA;
4052 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4053 PDC262_ATAPI(channel), atapi);
4054 }
4055 }
4056 for (drive = 0; drive < 2; drive++) {
4057 drvp = &chp->ch_drive[drive];
4058 /* If no drive, skip */
4059 if ((drvp->drive_flags & DRIVE) == 0)
4060 continue;
4061 mode = 0;
4062 if (drvp->drive_flags & DRIVE_UDMA) {
4063 /* use Ultra/DMA */
4064 drvp->drive_flags &= ~DRIVE_DMA;
4065 mode = PDC2xx_TIM_SET_MB(mode,
4066 pdc2xx_udma_mb[drvp->UDMA_mode]);
4067 mode = PDC2xx_TIM_SET_MC(mode,
4068 pdc2xx_udma_mc[drvp->UDMA_mode]);
4069 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4070 } else if (drvp->drive_flags & DRIVE_DMA) {
4071 mode = PDC2xx_TIM_SET_MB(mode,
4072 pdc2xx_dma_mb[drvp->DMA_mode]);
4073 mode = PDC2xx_TIM_SET_MC(mode,
4074 pdc2xx_dma_mc[drvp->DMA_mode]);
4075 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4076 } else {
4077 mode = PDC2xx_TIM_SET_MB(mode,
4078 pdc2xx_dma_mb[0]);
4079 mode = PDC2xx_TIM_SET_MC(mode,
4080 pdc2xx_dma_mc[0]);
4081 }
4082 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4083 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4084 if (drvp->drive_flags & DRIVE_ATA)
4085 mode |= PDC2xx_TIM_PRE;
4086 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4087 if (drvp->PIO_mode >= 3) {
4088 mode |= PDC2xx_TIM_IORDY;
4089 if (drive == 0)
4090 mode |= PDC2xx_TIM_IORDYp;
4091 }
4092 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4093 "timings 0x%x\n",
4094 sc->sc_wdcdev.sc_dev.dv_xname,
4095 chp->channel, drive, mode), DEBUG_PROBE);
4096 pci_conf_write(sc->sc_pc, sc->sc_tag,
4097 PDC2xx_TIM(chp->channel, drive), mode);
4098 }
4099 if (idedma_ctl != 0) {
4100 /* Add software bits in status register */
4101 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4102 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4103 idedma_ctl);
4104 }
4105 pciide_print_modes(cp);
4106 }
4107
4108 void
4109 pdc20268_setup_channel(chp)
4110 struct channel_softc *chp;
4111 {
4112 struct ata_drive_datas *drvp;
4113 int drive;
4114 u_int32_t idedma_ctl;
4115 struct pciide_channel *cp = (struct pciide_channel*)chp;
4116 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4117 int u100;
4118
4119 /* setup DMA if needed */
4120 pciide_channel_dma_setup(cp);
4121
4122 idedma_ctl = 0;
4123
4124 /* I don't know what this is for, FreeBSD does it ... */
4125 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4126 IDEDMA_CMD + 0x1, 0x0b);
4127
4128 /*
4129 * I don't know what this is for; FreeBSD checks this ... this is not
4130 * cable type detect.
4131 */
4132 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4133 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
4134
4135 for (drive = 0; drive < 2; drive++) {
4136 drvp = &chp->ch_drive[drive];
4137 /* If no drive, skip */
4138 if ((drvp->drive_flags & DRIVE) == 0)
4139 continue;
4140 if (drvp->drive_flags & DRIVE_UDMA) {
4141 /* use Ultra/DMA */
4142 drvp->drive_flags &= ~DRIVE_DMA;
4143 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4144 if (drvp->UDMA_mode > 2 && u100 == 0)
4145 drvp->UDMA_mode = 2;
4146 } else if (drvp->drive_flags & DRIVE_DMA) {
4147 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4148 }
4149 }
4150 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4151 if (idedma_ctl != 0) {
4152 /* Add software bits in status register */
4153 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4154 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4155 idedma_ctl);
4156 }
4157 pciide_print_modes(cp);
4158 }
4159
4160 int
4161 pdc202xx_pci_intr(arg)
4162 void *arg;
4163 {
4164 struct pciide_softc *sc = arg;
4165 struct pciide_channel *cp;
4166 struct channel_softc *wdc_cp;
4167 int i, rv, crv;
4168 u_int32_t scr;
4169
4170 rv = 0;
4171 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4172 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4173 cp = &sc->pciide_channels[i];
4174 wdc_cp = &cp->wdc_channel;
4175 /* If a compat channel skip. */
4176 if (cp->compat)
4177 continue;
4178 if (scr & PDC2xx_SCR_INT(i)) {
4179 crv = wdcintr(wdc_cp);
4180 if (crv == 0)
4181 printf("%s:%d: bogus intr (reg 0x%x)\n",
4182 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4183 else
4184 rv = 1;
4185 }
4186 }
4187 return rv;
4188 }
4189
4190 int
4191 pdc20265_pci_intr(arg)
4192 void *arg;
4193 {
4194 struct pciide_softc *sc = arg;
4195 struct pciide_channel *cp;
4196 struct channel_softc *wdc_cp;
4197 int i, rv, crv;
4198 u_int32_t dmastat;
4199
4200 rv = 0;
4201 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4202 cp = &sc->pciide_channels[i];
4203 wdc_cp = &cp->wdc_channel;
4204 /* If a compat channel skip. */
4205 if (cp->compat)
4206 continue;
4207 /*
4208 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4209 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4210 * So use it instead (requires 2 reg reads instead of 1,
4211 * but we can't do it another way).
4212 */
4213 dmastat = bus_space_read_1(sc->sc_dma_iot,
4214 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4215 if((dmastat & IDEDMA_CTL_INTR) == 0)
4216 continue;
4217 crv = wdcintr(wdc_cp);
4218 if (crv == 0)
4219 printf("%s:%d: bogus intr\n",
4220 sc->sc_wdcdev.sc_dev.dv_xname, i);
4221 else
4222 rv = 1;
4223 }
4224 return rv;
4225 }
4226
4227 void
4228 opti_chip_map(sc, pa)
4229 struct pciide_softc *sc;
4230 struct pci_attach_args *pa;
4231 {
4232 struct pciide_channel *cp;
4233 bus_size_t cmdsize, ctlsize;
4234 pcireg_t interface;
4235 u_int8_t init_ctrl;
4236 int channel;
4237
4238 if (pciide_chipen(sc, pa) == 0)
4239 return;
4240 printf("%s: bus-master DMA support present",
4241 sc->sc_wdcdev.sc_dev.dv_xname);
4242
4243 /*
4244 * XXXSCW:
4245 * There seem to be a couple of buggy revisions/implementations
4246 * of the OPTi pciide chipset. This kludge seems to fix one of
4247 * the reported problems (PR/11644) but still fails for the
4248 * other (PR/13151), although the latter may be due to other
4249 * issues too...
4250 */
4251 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4252 printf(" but disabled due to chip rev. <= 0x12");
4253 sc->sc_dma_ok = 0;
4254 } else
4255 pciide_mapreg_dma(sc, pa);
4256
4257 printf("\n");
4258
4259 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4260 WDC_CAPABILITY_MODE;
4261 sc->sc_wdcdev.PIO_cap = 4;
4262 if (sc->sc_dma_ok) {
4263 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4264 sc->sc_wdcdev.irqack = pciide_irqack;
4265 sc->sc_wdcdev.DMA_cap = 2;
4266 }
4267 sc->sc_wdcdev.set_modes = opti_setup_channel;
4268
4269 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4270 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4271
4272 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4273 OPTI_REG_INIT_CONTROL);
4274
4275 interface = PCI_INTERFACE(pa->pa_class);
4276
4277 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4278 cp = &sc->pciide_channels[channel];
4279 if (pciide_chansetup(sc, channel, interface) == 0)
4280 continue;
4281 if (channel == 1 &&
4282 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4283 printf("%s: %s channel ignored (disabled)\n",
4284 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4285 continue;
4286 }
4287 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4288 pciide_pci_intr);
4289 if (cp->hw_ok == 0)
4290 continue;
4291 pciide_map_compat_intr(pa, cp, channel, interface);
4292 if (cp->hw_ok == 0)
4293 continue;
4294 opti_setup_channel(&cp->wdc_channel);
4295 }
4296 }
4297
4298 void
4299 opti_setup_channel(chp)
4300 struct channel_softc *chp;
4301 {
4302 struct ata_drive_datas *drvp;
4303 struct pciide_channel *cp = (struct pciide_channel*)chp;
4304 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4305 int drive, spd;
4306 int mode[2];
4307 u_int8_t rv, mr;
4308
4309 /*
4310 * The `Delay' and `Address Setup Time' fields of the
4311 * Miscellaneous Register are always zero initially.
4312 */
4313 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4314 mr &= ~(OPTI_MISC_DELAY_MASK |
4315 OPTI_MISC_ADDR_SETUP_MASK |
4316 OPTI_MISC_INDEX_MASK);
4317
4318 /* Prime the control register before setting timing values */
4319 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4320
4321 /* Determine the clockrate of the PCIbus the chip is attached to */
4322 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4323 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4324
4325 /* setup DMA if needed */
4326 pciide_channel_dma_setup(cp);
4327
4328 for (drive = 0; drive < 2; drive++) {
4329 drvp = &chp->ch_drive[drive];
4330 /* If no drive, skip */
4331 if ((drvp->drive_flags & DRIVE) == 0) {
4332 mode[drive] = -1;
4333 continue;
4334 }
4335
4336 if ((drvp->drive_flags & DRIVE_DMA)) {
4337 /*
4338 * Timings will be used for both PIO and DMA,
4339 * so adjust DMA mode if needed
4340 */
4341 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4342 drvp->PIO_mode = drvp->DMA_mode + 2;
4343 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4344 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4345 drvp->PIO_mode - 2 : 0;
4346 if (drvp->DMA_mode == 0)
4347 drvp->PIO_mode = 0;
4348
4349 mode[drive] = drvp->DMA_mode + 5;
4350 } else
4351 mode[drive] = drvp->PIO_mode;
4352
4353 if (drive && mode[0] >= 0 &&
4354 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4355 /*
4356 * Can't have two drives using different values
4357 * for `Address Setup Time'.
4358 * Slow down the faster drive to compensate.
4359 */
4360 int d = (opti_tim_as[spd][mode[0]] >
4361 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4362
4363 mode[d] = mode[1-d];
4364 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4365 chp->ch_drive[d].DMA_mode = 0;
4366 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4367 }
4368 }
4369
4370 for (drive = 0; drive < 2; drive++) {
4371 int m;
4372 if ((m = mode[drive]) < 0)
4373 continue;
4374
4375 /* Set the Address Setup Time and select appropriate index */
4376 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4377 rv |= OPTI_MISC_INDEX(drive);
4378 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4379
4380 /* Set the pulse width and recovery timing parameters */
4381 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4382 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4383 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4384 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4385
4386 /* Set the Enhanced Mode register appropriately */
4387 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4388 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4389 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4390 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4391 }
4392
4393 /* Finally, enable the timings */
4394 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4395
4396 pciide_print_modes(cp);
4397 }
4398
4399 #define ACARD_IS_850(sc) \
4400 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4401
4402 void
4403 acard_chip_map(sc, pa)
4404 struct pciide_softc *sc;
4405 struct pci_attach_args *pa;
4406 {
4407 struct pciide_channel *cp;
4408 int i;
4409 pcireg_t interface;
4410 bus_size_t cmdsize, ctlsize;
4411
4412 if (pciide_chipen(sc, pa) == 0)
4413 return;
4414
4415 /*
4416 * when the chip is in native mode it identifies itself as a
4417 * 'misc mass storage'. Fake interface in this case.
4418 */
4419 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4420 interface = PCI_INTERFACE(pa->pa_class);
4421 } else {
4422 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4423 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4424 }
4425
4426 printf("%s: bus-master DMA support present",
4427 sc->sc_wdcdev.sc_dev.dv_xname);
4428 pciide_mapreg_dma(sc, pa);
4429 printf("\n");
4430 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4431 WDC_CAPABILITY_MODE;
4432
4433 if (sc->sc_dma_ok) {
4434 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4435 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4436 sc->sc_wdcdev.irqack = pciide_irqack;
4437 }
4438 sc->sc_wdcdev.PIO_cap = 4;
4439 sc->sc_wdcdev.DMA_cap = 2;
4440 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4441
4442 sc->sc_wdcdev.set_modes = acard_setup_channel;
4443 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4444 sc->sc_wdcdev.nchannels = 2;
4445
4446 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4447 cp = &sc->pciide_channels[i];
4448 if (pciide_chansetup(sc, i, interface) == 0)
4449 continue;
4450 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4451 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4452 &ctlsize, pciide_pci_intr);
4453 } else {
4454 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4455 &cmdsize, &ctlsize);
4456 }
4457 if (cp->hw_ok == 0)
4458 return;
4459 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4460 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4461 wdcattach(&cp->wdc_channel);
4462 acard_setup_channel(&cp->wdc_channel);
4463 }
4464 if (!ACARD_IS_850(sc)) {
4465 u_int32_t reg;
4466 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4467 reg &= ~ATP860_CTRL_INT;
4468 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4469 }
4470 }
4471
4472 void
4473 acard_setup_channel(chp)
4474 struct channel_softc *chp;
4475 {
4476 struct ata_drive_datas *drvp;
4477 struct pciide_channel *cp = (struct pciide_channel*)chp;
4478 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4479 int channel = chp->channel;
4480 int drive;
4481 u_int32_t idetime, udma_mode;
4482 u_int32_t idedma_ctl;
4483
4484 /* setup DMA if needed */
4485 pciide_channel_dma_setup(cp);
4486
4487 if (ACARD_IS_850(sc)) {
4488 idetime = 0;
4489 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4490 udma_mode &= ~ATP850_UDMA_MASK(channel);
4491 } else {
4492 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4493 idetime &= ~ATP860_SETTIME_MASK(channel);
4494 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4495 udma_mode &= ~ATP860_UDMA_MASK(channel);
4496
4497 /* check 80 pins cable */
4498 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4499 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4500 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4501 & ATP860_CTRL_80P(chp->channel)) {
4502 if (chp->ch_drive[0].UDMA_mode > 2)
4503 chp->ch_drive[0].UDMA_mode = 2;
4504 if (chp->ch_drive[1].UDMA_mode > 2)
4505 chp->ch_drive[1].UDMA_mode = 2;
4506 }
4507 }
4508 }
4509
4510 idedma_ctl = 0;
4511
4512 /* Per drive settings */
4513 for (drive = 0; drive < 2; drive++) {
4514 drvp = &chp->ch_drive[drive];
4515 /* If no drive, skip */
4516 if ((drvp->drive_flags & DRIVE) == 0)
4517 continue;
4518 /* add timing values, setup DMA if needed */
4519 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4520 (drvp->drive_flags & DRIVE_UDMA)) {
4521 /* use Ultra/DMA */
4522 if (ACARD_IS_850(sc)) {
4523 idetime |= ATP850_SETTIME(drive,
4524 acard_act_udma[drvp->UDMA_mode],
4525 acard_rec_udma[drvp->UDMA_mode]);
4526 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4527 acard_udma_conf[drvp->UDMA_mode]);
4528 } else {
4529 idetime |= ATP860_SETTIME(channel, drive,
4530 acard_act_udma[drvp->UDMA_mode],
4531 acard_rec_udma[drvp->UDMA_mode]);
4532 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4533 acard_udma_conf[drvp->UDMA_mode]);
4534 }
4535 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4536 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4537 (drvp->drive_flags & DRIVE_DMA)) {
4538 /* use Multiword DMA */
4539 drvp->drive_flags &= ~DRIVE_UDMA;
4540 if (ACARD_IS_850(sc)) {
4541 idetime |= ATP850_SETTIME(drive,
4542 acard_act_dma[drvp->DMA_mode],
4543 acard_rec_dma[drvp->DMA_mode]);
4544 } else {
4545 idetime |= ATP860_SETTIME(channel, drive,
4546 acard_act_dma[drvp->DMA_mode],
4547 acard_rec_dma[drvp->DMA_mode]);
4548 }
4549 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4550 } else {
4551 /* PIO only */
4552 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4553 if (ACARD_IS_850(sc)) {
4554 idetime |= ATP850_SETTIME(drive,
4555 acard_act_pio[drvp->PIO_mode],
4556 acard_rec_pio[drvp->PIO_mode]);
4557 } else {
4558 idetime |= ATP860_SETTIME(channel, drive,
4559 acard_act_pio[drvp->PIO_mode],
4560 acard_rec_pio[drvp->PIO_mode]);
4561 }
4562 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4563 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4564 | ATP8x0_CTRL_EN(channel));
4565 }
4566 }
4567
4568 if (idedma_ctl != 0) {
4569 /* Add software bits in status register */
4570 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4571 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4572 }
4573 pciide_print_modes(cp);
4574
4575 if (ACARD_IS_850(sc)) {
4576 pci_conf_write(sc->sc_pc, sc->sc_tag,
4577 ATP850_IDETIME(channel), idetime);
4578 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4579 } else {
4580 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4581 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4582 }
4583 }
4584
4585 int
4586 acard_pci_intr(arg)
4587 void *arg;
4588 {
4589 struct pciide_softc *sc = arg;
4590 struct pciide_channel *cp;
4591 struct channel_softc *wdc_cp;
4592 int rv = 0;
4593 int dmastat, i, crv;
4594
4595 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4596 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4597 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4598 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4599 continue;
4600 cp = &sc->pciide_channels[i];
4601 wdc_cp = &cp->wdc_channel;
4602 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4603 (void)wdcintr(wdc_cp);
4604 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4605 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4606 continue;
4607 }
4608 crv = wdcintr(wdc_cp);
4609 if (crv == 0)
4610 printf("%s:%d: bogus intr\n",
4611 sc->sc_wdcdev.sc_dev.dv_xname, i);
4612 else if (crv == 1)
4613 rv = 1;
4614 else if (rv == 0)
4615 rv = crv;
4616 }
4617 return rv;
4618 }
4619
4620 static int
4621 sl82c105_bugchk(struct pci_attach_args *pa)
4622 {
4623
4624 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4625 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4626 return (0);
4627
4628 if (PCI_REVISION(pa->pa_class) <= 0x05)
4629 return (1);
4630
4631 return (0);
4632 }
4633
4634 void
4635 sl82c105_chip_map(sc, pa)
4636 struct pciide_softc *sc;
4637 struct pci_attach_args *pa;
4638 {
4639 struct pciide_channel *cp;
4640 bus_size_t cmdsize, ctlsize;
4641 pcireg_t interface, idecr;
4642 int channel;
4643
4644 if (pciide_chipen(sc, pa) == 0)
4645 return;
4646
4647 printf("%s: bus-master DMA support present",
4648 sc->sc_wdcdev.sc_dev.dv_xname);
4649
4650 /*
4651 * Check to see if we're part of the Winbond 83c553 Southbridge.
4652 * If so, we need to disable DMA on rev. <= 5 of that chip.
4653 */
4654 if (pci_find_device(pa, sl82c105_bugchk)) {
4655 printf(" but disabled due to 83c553 rev. <= 0x05");
4656 sc->sc_dma_ok = 0;
4657 } else
4658 pciide_mapreg_dma(sc, pa);
4659 printf("\n");
4660
4661 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4662 WDC_CAPABILITY_MODE;
4663 sc->sc_wdcdev.PIO_cap = 4;
4664 if (sc->sc_dma_ok) {
4665 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4666 sc->sc_wdcdev.irqack = pciide_irqack;
4667 sc->sc_wdcdev.DMA_cap = 2;
4668 }
4669 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4670
4671 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4672 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4673
4674 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4675
4676 interface = PCI_INTERFACE(pa->pa_class);
4677
4678 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4679 cp = &sc->pciide_channels[channel];
4680 if (pciide_chansetup(sc, channel, interface) == 0)
4681 continue;
4682 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4683 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4684 printf("%s: %s channel ignored (disabled)\n",
4685 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4686 continue;
4687 }
4688 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4689 pciide_pci_intr);
4690 if (cp->hw_ok == 0)
4691 continue;
4692 pciide_map_compat_intr(pa, cp, channel, interface);
4693 if (cp->hw_ok == 0)
4694 continue;
4695 sl82c105_setup_channel(&cp->wdc_channel);
4696 }
4697 }
4698
4699 void
4700 sl82c105_setup_channel(chp)
4701 struct channel_softc *chp;
4702 {
4703 struct ata_drive_datas *drvp;
4704 struct pciide_channel *cp = (struct pciide_channel*)chp;
4705 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4706 int pxdx_reg, drive;
4707 pcireg_t pxdx;
4708
4709 /* Set up DMA if needed. */
4710 pciide_channel_dma_setup(cp);
4711
4712 for (drive = 0; drive < 2; drive++) {
4713 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4714 : SYMPH_P1D0CR) + (drive * 4);
4715
4716 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4717
4718 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4719 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4720
4721 drvp = &chp->ch_drive[drive];
4722 /* If no drive, skip. */
4723 if ((drvp->drive_flags & DRIVE) == 0) {
4724 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4725 continue;
4726 }
4727
4728 if (drvp->drive_flags & DRIVE_DMA) {
4729 /*
4730 * Timings will be used for both PIO and DMA,
4731 * so adjust DMA mode if needed.
4732 */
4733 if (drvp->PIO_mode >= 3) {
4734 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4735 drvp->DMA_mode = drvp->PIO_mode - 2;
4736 if (drvp->DMA_mode < 1) {
4737 /*
4738 * Can't mix both PIO and DMA.
4739 * Disable DMA.
4740 */
4741 drvp->drive_flags &= ~DRIVE_DMA;
4742 }
4743 } else {
4744 /*
4745 * Can't mix both PIO and DMA. Disable
4746 * DMA.
4747 */
4748 drvp->drive_flags &= ~DRIVE_DMA;
4749 }
4750 }
4751
4752 if (drvp->drive_flags & DRIVE_DMA) {
4753 /* Use multi-word DMA. */
4754 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4755 PxDx_CMD_ON_SHIFT;
4756 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4757 } else {
4758 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4759 PxDx_CMD_ON_SHIFT;
4760 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4761 }
4762
4763 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4764
4765 /* ...and set the mode for this drive. */
4766 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4767 }
4768
4769 pciide_print_modes(cp);
4770 }
4771
4772 void
4773 serverworks_chip_map(sc, pa)
4774 struct pciide_softc *sc;
4775 struct pci_attach_args *pa;
4776 {
4777 struct pciide_channel *cp;
4778 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4779 pcitag_t pcib_tag;
4780 int channel;
4781 bus_size_t cmdsize, ctlsize;
4782
4783 if (pciide_chipen(sc, pa) == 0)
4784 return;
4785
4786 printf("%s: bus-master DMA support present",
4787 sc->sc_wdcdev.sc_dev.dv_xname);
4788 pciide_mapreg_dma(sc, pa);
4789 printf("\n");
4790 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4791 WDC_CAPABILITY_MODE;
4792
4793 if (sc->sc_dma_ok) {
4794 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4795 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4796 sc->sc_wdcdev.irqack = pciide_irqack;
4797 }
4798 sc->sc_wdcdev.PIO_cap = 4;
4799 sc->sc_wdcdev.DMA_cap = 2;
4800 switch (sc->sc_pp->ide_product) {
4801 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4802 sc->sc_wdcdev.UDMA_cap = 2;
4803 break;
4804 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4805 if (PCI_REVISION(pa->pa_class) < 0x92)
4806 sc->sc_wdcdev.UDMA_cap = 4;
4807 else
4808 sc->sc_wdcdev.UDMA_cap = 5;
4809 break;
4810 }
4811
4812 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4813 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4814 sc->sc_wdcdev.nchannels = 2;
4815
4816 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4817 cp = &sc->pciide_channels[channel];
4818 if (pciide_chansetup(sc, channel, interface) == 0)
4819 continue;
4820 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4821 serverworks_pci_intr);
4822 if (cp->hw_ok == 0)
4823 return;
4824 pciide_map_compat_intr(pa, cp, channel, interface);
4825 if (cp->hw_ok == 0)
4826 return;
4827 serverworks_setup_channel(&cp->wdc_channel);
4828 }
4829
4830 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4831 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4832 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4833 }
4834
4835 void
4836 serverworks_setup_channel(chp)
4837 struct channel_softc *chp;
4838 {
4839 struct ata_drive_datas *drvp;
4840 struct pciide_channel *cp = (struct pciide_channel*)chp;
4841 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4842 int channel = chp->channel;
4843 int drive, unit;
4844 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4845 u_int32_t idedma_ctl;
4846 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4847 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4848
4849 /* setup DMA if needed */
4850 pciide_channel_dma_setup(cp);
4851
4852 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4853 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4854 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4855 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4856
4857 pio_time &= ~(0xffff << (16 * channel));
4858 dma_time &= ~(0xffff << (16 * channel));
4859 pio_mode &= ~(0xff << (8 * channel + 16));
4860 udma_mode &= ~(0xff << (8 * channel + 16));
4861 udma_mode &= ~(3 << (2 * channel));
4862
4863 idedma_ctl = 0;
4864
4865 /* Per drive settings */
4866 for (drive = 0; drive < 2; drive++) {
4867 drvp = &chp->ch_drive[drive];
4868 /* If no drive, skip */
4869 if ((drvp->drive_flags & DRIVE) == 0)
4870 continue;
4871 unit = drive + 2 * channel;
4872 /* add timing values, setup DMA if needed */
4873 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4874 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4875 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4876 (drvp->drive_flags & DRIVE_UDMA)) {
4877 /* use Ultra/DMA, check for 80-pin cable */
4878 if (drvp->UDMA_mode > 2 &&
4879 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4880 drvp->UDMA_mode = 2;
4881 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4882 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4883 udma_mode |= 1 << unit;
4884 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4885 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4886 (drvp->drive_flags & DRIVE_DMA)) {
4887 /* use Multiword DMA */
4888 drvp->drive_flags &= ~DRIVE_UDMA;
4889 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4890 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4891 } else {
4892 /* PIO only */
4893 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4894 }
4895 }
4896
4897 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4898 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4899 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4900 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4901 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4902
4903 if (idedma_ctl != 0) {
4904 /* Add software bits in status register */
4905 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4906 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4907 }
4908 pciide_print_modes(cp);
4909 }
4910
4911 int
4912 serverworks_pci_intr(arg)
4913 void *arg;
4914 {
4915 struct pciide_softc *sc = arg;
4916 struct pciide_channel *cp;
4917 struct channel_softc *wdc_cp;
4918 int rv = 0;
4919 int dmastat, i, crv;
4920
4921 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4922 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4923 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4924 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4925 IDEDMA_CTL_INTR)
4926 continue;
4927 cp = &sc->pciide_channels[i];
4928 wdc_cp = &cp->wdc_channel;
4929 crv = wdcintr(wdc_cp);
4930 if (crv == 0) {
4931 printf("%s:%d: bogus intr\n",
4932 sc->sc_wdcdev.sc_dev.dv_xname, i);
4933 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4934 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4935 } else
4936 rv = 1;
4937 }
4938 return rv;
4939 }
4940