pciide.c revision 1.181 1 /* $NetBSD: pciide.c,v 1.181 2003/02/28 22:07:05 enami Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.181 2003/02/28 22:07:05 enami Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd680_setup_channel __P((struct channel_softc*));
182 void cmd680_channel_map __P((struct pci_attach_args *,
183 struct pciide_softc *, int));
184
185 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void cy693_setup_channel __P((struct channel_softc*));
187
188 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void sis_setup_channel __P((struct channel_softc*));
190 static int sis_hostbr_match __P(( struct pci_attach_args *));
191
192 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void acer_setup_channel __P((struct channel_softc*));
194 int acer_pci_intr __P((void *));
195
196 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void pdc202xx_setup_channel __P((struct channel_softc*));
198 void pdc20268_setup_channel __P((struct channel_softc*));
199 int pdc202xx_pci_intr __P((void *));
200 int pdc20265_pci_intr __P((void *));
201
202 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void opti_setup_channel __P((struct channel_softc*));
204
205 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void hpt_setup_channel __P((struct channel_softc*));
207 int hpt_pci_intr __P((void *));
208
209 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void acard_setup_channel __P((struct channel_softc*));
211 int acard_pci_intr __P((void *));
212
213 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void serverworks_setup_channel __P((struct channel_softc*));
215 int serverworks_pci_intr __P((void *));
216
217 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void sl82c105_setup_channel __P((struct channel_softc*));
219
220 void pciide_channel_dma_setup __P((struct pciide_channel *));
221 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
222 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
223 void pciide_dma_start __P((void*, int, int));
224 int pciide_dma_finish __P((void*, int, int, int));
225 void pciide_irqack __P((struct channel_softc *));
226 void pciide_print_modes __P((struct pciide_channel *));
227
228 struct pciide_product_desc {
229 u_int32_t ide_product;
230 int ide_flags;
231 const char *ide_name;
232 /* map and setup chip, probe drives */
233 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
234 };
235
236 /* Flags for ide_flags */
237 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
238 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
239
240 /* Default product description for devices not known from this controller */
241 const struct pciide_product_desc default_product_desc = {
242 0,
243 0,
244 "Generic PCI IDE controller",
245 default_chip_map,
246 };
247
248 const struct pciide_product_desc pciide_intel_products[] = {
249 { PCI_PRODUCT_INTEL_82092AA,
250 0,
251 "Intel 82092AA IDE controller",
252 default_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82371FB_IDE,
255 0,
256 "Intel 82371FB IDE controller (PIIX)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82371SB_IDE,
260 0,
261 "Intel 82371SB IDE Interface (PIIX3)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82371AB_IDE,
265 0,
266 "Intel 82371AB IDE controller (PIIX4)",
267 piix_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82440MX_IDE,
270 0,
271 "Intel 82440MX IDE controller",
272 piix_chip_map
273 },
274 { PCI_PRODUCT_INTEL_82801AA_IDE,
275 0,
276 "Intel 82801AA IDE Controller (ICH)",
277 piix_chip_map,
278 },
279 { PCI_PRODUCT_INTEL_82801AB_IDE,
280 0,
281 "Intel 82801AB IDE Controller (ICH0)",
282 piix_chip_map,
283 },
284 { PCI_PRODUCT_INTEL_82801BA_IDE,
285 0,
286 "Intel 82801BA IDE Controller (ICH2)",
287 piix_chip_map,
288 },
289 { PCI_PRODUCT_INTEL_82801BAM_IDE,
290 0,
291 "Intel 82801BAM IDE Controller (ICH2)",
292 piix_chip_map,
293 },
294 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
295 0,
296 "Intel 82801CA IDE Controller",
297 piix_chip_map,
298 },
299 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
300 0,
301 "Intel 82801CA IDE Controller",
302 piix_chip_map,
303 },
304 { PCI_PRODUCT_INTEL_82801DB_IDE,
305 0,
306 "Intel 82801DB IDE Controller (ICH4)",
307 piix_chip_map,
308 },
309 { 0,
310 0,
311 NULL,
312 NULL
313 }
314 };
315
316 const struct pciide_product_desc pciide_amd_products[] = {
317 { PCI_PRODUCT_AMD_PBC756_IDE,
318 0,
319 "Advanced Micro Devices AMD756 IDE Controller",
320 amd7x6_chip_map
321 },
322 { PCI_PRODUCT_AMD_PBC766_IDE,
323 0,
324 "Advanced Micro Devices AMD766 IDE Controller",
325 amd7x6_chip_map
326 },
327 { PCI_PRODUCT_AMD_PBC768_IDE,
328 0,
329 "Advanced Micro Devices AMD768 IDE Controller",
330 amd7x6_chip_map
331 },
332 { PCI_PRODUCT_AMD_PBC8111_IDE,
333 0,
334 "Advanced Micro Devices AMD8111 IDE Controller",
335 amd7x6_chip_map
336 },
337 { 0,
338 0,
339 NULL,
340 NULL
341 }
342 };
343
344 const struct pciide_product_desc pciide_nvidia_products[] = {
345 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
346 0,
347 "NVIDIA nForce IDE Controller",
348 amd7x6_chip_map
349 },
350 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
351 0,
352 "NVIDIA nForce2 IDE Controller",
353 amd7x6_chip_map
354 },
355 { 0,
356 0,
357 NULL,
358 NULL
359 }
360 };
361
362 const struct pciide_product_desc pciide_cmd_products[] = {
363 { PCI_PRODUCT_CMDTECH_640,
364 0,
365 "CMD Technology PCI0640",
366 cmd_chip_map
367 },
368 { PCI_PRODUCT_CMDTECH_643,
369 0,
370 "CMD Technology PCI0643",
371 cmd0643_9_chip_map,
372 },
373 { PCI_PRODUCT_CMDTECH_646,
374 0,
375 "CMD Technology PCI0646",
376 cmd0643_9_chip_map,
377 },
378 { PCI_PRODUCT_CMDTECH_648,
379 IDE_PCI_CLASS_OVERRIDE,
380 "CMD Technology PCI0648",
381 cmd0643_9_chip_map,
382 },
383 { PCI_PRODUCT_CMDTECH_649,
384 IDE_PCI_CLASS_OVERRIDE,
385 "CMD Technology PCI0649",
386 cmd0643_9_chip_map,
387 },
388 { PCI_PRODUCT_CMDTECH_680,
389 IDE_PCI_CLASS_OVERRIDE,
390 "Silicon Image 0680",
391 cmd680_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 NULL
397 }
398 };
399
400 const struct pciide_product_desc pciide_via_products[] = {
401 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
402 0,
403 NULL,
404 apollo_chip_map,
405 },
406 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
407 0,
408 NULL,
409 apollo_chip_map,
410 },
411 { 0,
412 0,
413 NULL,
414 NULL
415 }
416 };
417
418 const struct pciide_product_desc pciide_cypress_products[] = {
419 { PCI_PRODUCT_CONTAQ_82C693,
420 IDE_16BIT_IOSPACE,
421 "Cypress 82C693 IDE Controller",
422 cy693_chip_map,
423 },
424 { 0,
425 0,
426 NULL,
427 NULL
428 }
429 };
430
431 const struct pciide_product_desc pciide_sis_products[] = {
432 { PCI_PRODUCT_SIS_5597_IDE,
433 0,
434 "Silicon Integrated System 5597/5598 IDE controller",
435 sis_chip_map,
436 },
437 { 0,
438 0,
439 NULL,
440 NULL
441 }
442 };
443
444 const struct pciide_product_desc pciide_acer_products[] = {
445 { PCI_PRODUCT_ALI_M5229,
446 0,
447 "Acer Labs M5229 UDMA IDE Controller",
448 acer_chip_map,
449 },
450 { 0,
451 0,
452 NULL,
453 NULL
454 }
455 };
456
457 const struct pciide_product_desc pciide_promise_products[] = {
458 { PCI_PRODUCT_PROMISE_ULTRA33,
459 IDE_PCI_CLASS_OVERRIDE,
460 "Promise Ultra33/ATA Bus Master IDE Accelerator",
461 pdc202xx_chip_map,
462 },
463 { PCI_PRODUCT_PROMISE_ULTRA66,
464 IDE_PCI_CLASS_OVERRIDE,
465 "Promise Ultra66/ATA Bus Master IDE Accelerator",
466 pdc202xx_chip_map,
467 },
468 { PCI_PRODUCT_PROMISE_ULTRA100,
469 IDE_PCI_CLASS_OVERRIDE,
470 "Promise Ultra100/ATA Bus Master IDE Accelerator",
471 pdc202xx_chip_map,
472 },
473 { PCI_PRODUCT_PROMISE_ULTRA100X,
474 IDE_PCI_CLASS_OVERRIDE,
475 "Promise Ultra100/ATA Bus Master IDE Accelerator",
476 pdc202xx_chip_map,
477 },
478 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
479 IDE_PCI_CLASS_OVERRIDE,
480 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
481 pdc202xx_chip_map,
482 },
483 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
484 IDE_PCI_CLASS_OVERRIDE,
485 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
486 pdc202xx_chip_map,
487 },
488 { PCI_PRODUCT_PROMISE_ULTRA133,
489 IDE_PCI_CLASS_OVERRIDE,
490 "Promise Ultra133/ATA Bus Master IDE Accelerator",
491 pdc202xx_chip_map,
492 },
493 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
494 IDE_PCI_CLASS_OVERRIDE,
495 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
496 pdc202xx_chip_map,
497 },
498 { PCI_PRODUCT_PROMISE_MBULTRA133,
499 IDE_PCI_CLASS_OVERRIDE,
500 "Promise Ultra133/ATA Bus Master IDE Accelerator (MB)",
501 pdc202xx_chip_map,
502 },
503 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
504 IDE_PCI_CLASS_OVERRIDE,
505 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
506 pdc202xx_chip_map,
507 },
508 { PCI_PRODUCT_PROMISE_FASTTRAK133LITE,
509 IDE_PCI_CLASS_OVERRIDE,
510 "Promise Fasttrak133 Lite Bus Master IDE Accelerator",
511 pdc202xx_chip_map,
512 },
513 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
514 IDE_PCI_CLASS_OVERRIDE,
515 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
516 pdc202xx_chip_map,
517 },
518 { 0,
519 0,
520 NULL,
521 NULL
522 }
523 };
524
525 const struct pciide_product_desc pciide_opti_products[] = {
526 { PCI_PRODUCT_OPTI_82C621,
527 0,
528 "OPTi 82c621 PCI IDE controller",
529 opti_chip_map,
530 },
531 { PCI_PRODUCT_OPTI_82C568,
532 0,
533 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
534 opti_chip_map,
535 },
536 { PCI_PRODUCT_OPTI_82D568,
537 0,
538 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
539 opti_chip_map,
540 },
541 { 0,
542 0,
543 NULL,
544 NULL
545 }
546 };
547
548 const struct pciide_product_desc pciide_triones_products[] = {
549 { PCI_PRODUCT_TRIONES_HPT366,
550 IDE_PCI_CLASS_OVERRIDE,
551 NULL,
552 hpt_chip_map,
553 },
554 { PCI_PRODUCT_TRIONES_HPT372,
555 IDE_PCI_CLASS_OVERRIDE,
556 NULL,
557 hpt_chip_map
558 },
559 { PCI_PRODUCT_TRIONES_HPT374,
560 IDE_PCI_CLASS_OVERRIDE,
561 NULL,
562 hpt_chip_map
563 },
564 { 0,
565 0,
566 NULL,
567 NULL
568 }
569 };
570
571 const struct pciide_product_desc pciide_acard_products[] = {
572 { PCI_PRODUCT_ACARD_ATP850U,
573 IDE_PCI_CLASS_OVERRIDE,
574 "Acard ATP850U Ultra33 IDE Controller",
575 acard_chip_map,
576 },
577 { PCI_PRODUCT_ACARD_ATP860,
578 IDE_PCI_CLASS_OVERRIDE,
579 "Acard ATP860 Ultra66 IDE Controller",
580 acard_chip_map,
581 },
582 { PCI_PRODUCT_ACARD_ATP860A,
583 IDE_PCI_CLASS_OVERRIDE,
584 "Acard ATP860-A Ultra66 IDE Controller",
585 acard_chip_map,
586 },
587 { 0,
588 0,
589 NULL,
590 NULL
591 }
592 };
593
594 const struct pciide_product_desc pciide_serverworks_products[] = {
595 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
596 0,
597 "ServerWorks OSB4 IDE Controller",
598 serverworks_chip_map,
599 },
600 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
601 0,
602 "ServerWorks CSB5 IDE Controller",
603 serverworks_chip_map,
604 },
605 { PCI_PRODUCT_SERVERWORKS_CSB6_IDE,
606 0,
607 "ServerWorks CSB6 RAID/IDE Controller",
608 serverworks_chip_map,
609 },
610 { 0,
611 0,
612 NULL,
613 }
614 };
615
616 const struct pciide_product_desc pciide_symphony_products[] = {
617 { PCI_PRODUCT_SYMPHONY_82C105,
618 0,
619 "Symphony Labs 82C105 IDE controller",
620 sl82c105_chip_map,
621 },
622 { 0,
623 0,
624 NULL,
625 }
626 };
627
628 const struct pciide_product_desc pciide_winbond_products[] = {
629 { PCI_PRODUCT_WINBOND_W83C553F_1,
630 0,
631 "Winbond W83C553F IDE controller",
632 sl82c105_chip_map,
633 },
634 { 0,
635 0,
636 NULL,
637 }
638 };
639
640 struct pciide_vendor_desc {
641 u_int32_t ide_vendor;
642 const struct pciide_product_desc *ide_products;
643 };
644
645 const struct pciide_vendor_desc pciide_vendors[] = {
646 { PCI_VENDOR_INTEL, pciide_intel_products },
647 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
648 { PCI_VENDOR_VIATECH, pciide_via_products },
649 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
650 { PCI_VENDOR_SIS, pciide_sis_products },
651 { PCI_VENDOR_ALI, pciide_acer_products },
652 { PCI_VENDOR_PROMISE, pciide_promise_products },
653 { PCI_VENDOR_AMD, pciide_amd_products },
654 { PCI_VENDOR_OPTI, pciide_opti_products },
655 { PCI_VENDOR_TRIONES, pciide_triones_products },
656 { PCI_VENDOR_ACARD, pciide_acard_products },
657 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
658 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
659 { PCI_VENDOR_WINBOND, pciide_winbond_products },
660 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
661 { 0, NULL }
662 };
663
664 /* options passed via the 'flags' config keyword */
665 #define PCIIDE_OPTIONS_DMA 0x01
666 #define PCIIDE_OPTIONS_NODMA 0x02
667
668 int pciide_match __P((struct device *, struct cfdata *, void *));
669 void pciide_attach __P((struct device *, struct device *, void *));
670
671 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
672 pciide_match, pciide_attach, NULL, NULL);
673
674 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
675 int pciide_mapregs_compat __P(( struct pci_attach_args *,
676 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
677 int pciide_mapregs_native __P((struct pci_attach_args *,
678 struct pciide_channel *, bus_size_t *, bus_size_t *,
679 int (*pci_intr) __P((void *))));
680 void pciide_mapreg_dma __P((struct pciide_softc *,
681 struct pci_attach_args *));
682 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
683 void pciide_mapchan __P((struct pci_attach_args *,
684 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
685 int (*pci_intr) __P((void *))));
686 int pciide_chan_candisable __P((struct pciide_channel *));
687 void pciide_map_compat_intr __P(( struct pci_attach_args *,
688 struct pciide_channel *, int, int));
689 int pciide_compat_intr __P((void *));
690 int pciide_pci_intr __P((void *));
691 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
692
693 const struct pciide_product_desc *
694 pciide_lookup_product(id)
695 u_int32_t id;
696 {
697 const struct pciide_product_desc *pp;
698 const struct pciide_vendor_desc *vp;
699
700 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
701 if (PCI_VENDOR(id) == vp->ide_vendor)
702 break;
703
704 if ((pp = vp->ide_products) == NULL)
705 return NULL;
706
707 for (; pp->chip_map != NULL; pp++)
708 if (PCI_PRODUCT(id) == pp->ide_product)
709 break;
710
711 if (pp->chip_map == NULL)
712 return NULL;
713 return pp;
714 }
715
716 int
717 pciide_match(parent, match, aux)
718 struct device *parent;
719 struct cfdata *match;
720 void *aux;
721 {
722 struct pci_attach_args *pa = aux;
723 const struct pciide_product_desc *pp;
724
725 /*
726 * Check the ID register to see that it's a PCI IDE controller.
727 * If it is, we assume that we can deal with it; it _should_
728 * work in a standardized way...
729 */
730 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
731 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
732 return (1);
733 }
734
735 /*
736 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
737 * controllers. Let see if we can deal with it anyway.
738 */
739 pp = pciide_lookup_product(pa->pa_id);
740 if (pp != NULL && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
741 return (1);
742 }
743
744 return (0);
745 }
746
747 void
748 pciide_attach(parent, self, aux)
749 struct device *parent, *self;
750 void *aux;
751 {
752 struct pci_attach_args *pa = aux;
753 pci_chipset_tag_t pc = pa->pa_pc;
754 pcitag_t tag = pa->pa_tag;
755 struct pciide_softc *sc = (struct pciide_softc *)self;
756 pcireg_t csr;
757 char devinfo[256];
758 const char *displaydev;
759
760 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
761 sc->sc_pp = pciide_lookup_product(pa->pa_id);
762 if (sc->sc_pp == NULL) {
763 sc->sc_pp = &default_product_desc;
764 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
765 displaydev = devinfo;
766 } else
767 displaydev = sc->sc_pp->ide_name;
768
769 /* if displaydev == NULL, printf is done in chip-specific map */
770 if (displaydev)
771 printf(": %s (rev. 0x%02x)\n", displaydev,
772 PCI_REVISION(pa->pa_class));
773
774 sc->sc_pc = pa->pa_pc;
775 sc->sc_tag = pa->pa_tag;
776 #ifdef WDCDEBUG
777 if (wdcdebug_pciide_mask & DEBUG_PROBE)
778 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
779 #endif
780 sc->sc_pp->chip_map(sc, pa);
781
782 if (sc->sc_dma_ok) {
783 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
784 csr |= PCI_COMMAND_MASTER_ENABLE;
785 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
786 }
787 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
788 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
789 }
790
791 /* tell whether the chip is enabled or not */
792 int
793 pciide_chipen(sc, pa)
794 struct pciide_softc *sc;
795 struct pci_attach_args *pa;
796 {
797 pcireg_t csr;
798 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
799 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
800 PCI_COMMAND_STATUS_REG);
801 printf("%s: device disabled (at %s)\n",
802 sc->sc_wdcdev.sc_dev.dv_xname,
803 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
804 "device" : "bridge");
805 return 0;
806 }
807 return 1;
808 }
809
810 int
811 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
812 struct pci_attach_args *pa;
813 struct pciide_channel *cp;
814 int compatchan;
815 bus_size_t *cmdsizep, *ctlsizep;
816 {
817 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
818 struct channel_softc *wdc_cp = &cp->wdc_channel;
819
820 cp->compat = 1;
821 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
822 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
823
824 wdc_cp->cmd_iot = pa->pa_iot;
825 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
826 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
827 printf("%s: couldn't map %s channel cmd regs\n",
828 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
829 return (0);
830 }
831
832 wdc_cp->ctl_iot = pa->pa_iot;
833 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
834 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
835 printf("%s: couldn't map %s channel ctl regs\n",
836 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
837 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
838 PCIIDE_COMPAT_CMD_SIZE);
839 return (0);
840 }
841
842 return (1);
843 }
844
845 int
846 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
847 struct pci_attach_args * pa;
848 struct pciide_channel *cp;
849 bus_size_t *cmdsizep, *ctlsizep;
850 int (*pci_intr) __P((void *));
851 {
852 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
853 struct channel_softc *wdc_cp = &cp->wdc_channel;
854 const char *intrstr;
855 pci_intr_handle_t intrhandle;
856
857 cp->compat = 0;
858
859 if (sc->sc_pci_ih == NULL) {
860 if (pci_intr_map(pa, &intrhandle) != 0) {
861 printf("%s: couldn't map native-PCI interrupt\n",
862 sc->sc_wdcdev.sc_dev.dv_xname);
863 return 0;
864 }
865 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
866 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
867 intrhandle, IPL_BIO, pci_intr, sc);
868 if (sc->sc_pci_ih != NULL) {
869 printf("%s: using %s for native-PCI interrupt\n",
870 sc->sc_wdcdev.sc_dev.dv_xname,
871 intrstr ? intrstr : "unknown interrupt");
872 } else {
873 printf("%s: couldn't establish native-PCI interrupt",
874 sc->sc_wdcdev.sc_dev.dv_xname);
875 if (intrstr != NULL)
876 printf(" at %s", intrstr);
877 printf("\n");
878 return 0;
879 }
880 }
881 cp->ih = sc->sc_pci_ih;
882 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
883 PCI_MAPREG_TYPE_IO, 0,
884 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
885 printf("%s: couldn't map %s channel cmd regs\n",
886 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
887 return 0;
888 }
889
890 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
891 PCI_MAPREG_TYPE_IO, 0,
892 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
893 printf("%s: couldn't map %s channel ctl regs\n",
894 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
895 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
896 return 0;
897 }
898 /*
899 * In native mode, 4 bytes of I/O space are mapped for the control
900 * register, the control register is at offset 2. Pass the generic
901 * code a handle for only one byte at the right offset.
902 */
903 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
904 &wdc_cp->ctl_ioh) != 0) {
905 printf("%s: unable to subregion %s channel ctl regs\n",
906 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
907 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
908 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
909 return 0;
910 }
911 return (1);
912 }
913
914 void
915 pciide_mapreg_dma(sc, pa)
916 struct pciide_softc *sc;
917 struct pci_attach_args *pa;
918 {
919 pcireg_t maptype;
920 bus_addr_t addr;
921
922 /*
923 * Map DMA registers
924 *
925 * Note that sc_dma_ok is the right variable to test to see if
926 * DMA can be done. If the interface doesn't support DMA,
927 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
928 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
929 * non-zero if the interface supports DMA and the registers
930 * could be mapped.
931 *
932 * XXX Note that despite the fact that the Bus Master IDE specs
933 * XXX say that "The bus master IDE function uses 16 bytes of IO
934 * XXX space," some controllers (at least the United
935 * XXX Microelectronics UM8886BF) place it in memory space.
936 */
937 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
938 PCIIDE_REG_BUS_MASTER_DMA);
939
940 switch (maptype) {
941 case PCI_MAPREG_TYPE_IO:
942 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
943 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
944 &addr, NULL, NULL) == 0);
945 if (sc->sc_dma_ok == 0) {
946 printf(", but unused (couldn't query registers)");
947 break;
948 }
949 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
950 && addr >= 0x10000) {
951 sc->sc_dma_ok = 0;
952 printf(", but unused (registers at unsafe address "
953 "%#lx)", (unsigned long)addr);
954 break;
955 }
956 /* FALLTHROUGH */
957
958 case PCI_MAPREG_MEM_TYPE_32BIT:
959 sc->sc_dma_ok = (pci_mapreg_map(pa,
960 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
961 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
962 sc->sc_dmat = pa->pa_dmat;
963 if (sc->sc_dma_ok == 0) {
964 printf(", but unused (couldn't map registers)");
965 } else {
966 sc->sc_wdcdev.dma_arg = sc;
967 sc->sc_wdcdev.dma_init = pciide_dma_init;
968 sc->sc_wdcdev.dma_start = pciide_dma_start;
969 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
970 }
971
972 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
973 PCIIDE_OPTIONS_NODMA) {
974 printf(", but unused (forced off by config file)");
975 sc->sc_dma_ok = 0;
976 }
977 break;
978
979 default:
980 sc->sc_dma_ok = 0;
981 printf(", but unsupported register maptype (0x%x)", maptype);
982 }
983 }
984
985 int
986 pciide_compat_intr(arg)
987 void *arg;
988 {
989 struct pciide_channel *cp = arg;
990
991 #ifdef DIAGNOSTIC
992 /* should only be called for a compat channel */
993 if (cp->compat == 0)
994 panic("pciide compat intr called for non-compat chan %p", cp);
995 #endif
996 return (wdcintr(&cp->wdc_channel));
997 }
998
999 int
1000 pciide_pci_intr(arg)
1001 void *arg;
1002 {
1003 struct pciide_softc *sc = arg;
1004 struct pciide_channel *cp;
1005 struct channel_softc *wdc_cp;
1006 int i, rv, crv;
1007
1008 rv = 0;
1009 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
1010 cp = &sc->pciide_channels[i];
1011 wdc_cp = &cp->wdc_channel;
1012
1013 /* If a compat channel skip. */
1014 if (cp->compat)
1015 continue;
1016 /* if this channel not waiting for intr, skip */
1017 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1018 continue;
1019
1020 crv = wdcintr(wdc_cp);
1021 if (crv == 0)
1022 ; /* leave rv alone */
1023 else if (crv == 1)
1024 rv = 1; /* claim the intr */
1025 else if (rv == 0) /* crv should be -1 in this case */
1026 rv = crv; /* if we've done no better, take it */
1027 }
1028 return (rv);
1029 }
1030
1031 void
1032 pciide_channel_dma_setup(cp)
1033 struct pciide_channel *cp;
1034 {
1035 int drive;
1036 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1037 struct ata_drive_datas *drvp;
1038
1039 for (drive = 0; drive < 2; drive++) {
1040 drvp = &cp->wdc_channel.ch_drive[drive];
1041 /* If no drive, skip */
1042 if ((drvp->drive_flags & DRIVE) == 0)
1043 continue;
1044 /* setup DMA if needed */
1045 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1046 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1047 sc->sc_dma_ok == 0) {
1048 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1049 continue;
1050 }
1051 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1052 != 0) {
1053 /* Abort DMA setup */
1054 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1055 continue;
1056 }
1057 }
1058 }
1059
1060 int
1061 pciide_dma_table_setup(sc, channel, drive)
1062 struct pciide_softc *sc;
1063 int channel, drive;
1064 {
1065 bus_dma_segment_t seg;
1066 int error, rseg;
1067 const bus_size_t dma_table_size =
1068 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1069 struct pciide_dma_maps *dma_maps =
1070 &sc->pciide_channels[channel].dma_maps[drive];
1071
1072 /* If table was already allocated, just return */
1073 if (dma_maps->dma_table)
1074 return 0;
1075
1076 /* Allocate memory for the DMA tables and map it */
1077 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1078 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1079 BUS_DMA_NOWAIT)) != 0) {
1080 printf("%s:%d: unable to allocate table DMA for "
1081 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1082 channel, drive, error);
1083 return error;
1084 }
1085 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1086 dma_table_size,
1087 (caddr_t *)&dma_maps->dma_table,
1088 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1089 printf("%s:%d: unable to map table DMA for"
1090 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1091 channel, drive, error);
1092 return error;
1093 }
1094 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1095 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1096 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1097
1098 /* Create and load table DMA map for this disk */
1099 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1100 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1101 &dma_maps->dmamap_table)) != 0) {
1102 printf("%s:%d: unable to create table DMA map for "
1103 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1104 channel, drive, error);
1105 return error;
1106 }
1107 if ((error = bus_dmamap_load(sc->sc_dmat,
1108 dma_maps->dmamap_table,
1109 dma_maps->dma_table,
1110 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1111 printf("%s:%d: unable to load table DMA map for "
1112 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1113 channel, drive, error);
1114 return error;
1115 }
1116 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1117 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1118 DEBUG_PROBE);
1119 /* Create a xfer DMA map for this drive */
1120 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1121 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1122 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1123 &dma_maps->dmamap_xfer)) != 0) {
1124 printf("%s:%d: unable to create xfer DMA map for "
1125 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1126 channel, drive, error);
1127 return error;
1128 }
1129 return 0;
1130 }
1131
1132 int
1133 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1134 void *v;
1135 int channel, drive;
1136 void *databuf;
1137 size_t datalen;
1138 int flags;
1139 {
1140 struct pciide_softc *sc = v;
1141 int error, seg;
1142 struct pciide_dma_maps *dma_maps =
1143 &sc->pciide_channels[channel].dma_maps[drive];
1144
1145 error = bus_dmamap_load(sc->sc_dmat,
1146 dma_maps->dmamap_xfer,
1147 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1148 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1149 if (error) {
1150 printf("%s:%d: unable to load xfer DMA map for"
1151 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1152 channel, drive, error);
1153 return error;
1154 }
1155
1156 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1157 dma_maps->dmamap_xfer->dm_mapsize,
1158 (flags & WDC_DMA_READ) ?
1159 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1160
1161 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1162 #ifdef DIAGNOSTIC
1163 /* A segment must not cross a 64k boundary */
1164 {
1165 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1166 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1167 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1168 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1169 printf("pciide_dma: segment %d physical addr 0x%lx"
1170 " len 0x%lx not properly aligned\n",
1171 seg, phys, len);
1172 panic("pciide_dma: buf align");
1173 }
1174 }
1175 #endif
1176 dma_maps->dma_table[seg].base_addr =
1177 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1178 dma_maps->dma_table[seg].byte_count =
1179 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1180 IDEDMA_BYTE_COUNT_MASK);
1181 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1182 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1183 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1184
1185 }
1186 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1187 htole32(IDEDMA_BYTE_COUNT_EOT);
1188
1189 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1190 dma_maps->dmamap_table->dm_mapsize,
1191 BUS_DMASYNC_PREWRITE);
1192
1193 /* Maps are ready. Start DMA function */
1194 #ifdef DIAGNOSTIC
1195 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1196 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1197 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1198 panic("pciide_dma_init: table align");
1199 }
1200 #endif
1201
1202 /* Clear status bits */
1203 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1204 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1205 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1206 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1207 /* Write table addr */
1208 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1209 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1210 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1211 /* set read/write */
1212 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1213 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1214 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1215 /* remember flags */
1216 dma_maps->dma_flags = flags;
1217 return 0;
1218 }
1219
1220 void
1221 pciide_dma_start(v, channel, drive)
1222 void *v;
1223 int channel, drive;
1224 {
1225 struct pciide_softc *sc = v;
1226
1227 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1228 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1229 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1230 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1231 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1232 }
1233
1234 int
1235 pciide_dma_finish(v, channel, drive, force)
1236 void *v;
1237 int channel, drive;
1238 int force;
1239 {
1240 struct pciide_softc *sc = v;
1241 u_int8_t status;
1242 int error = 0;
1243 struct pciide_dma_maps *dma_maps =
1244 &sc->pciide_channels[channel].dma_maps[drive];
1245
1246 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1247 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1248 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1249 DEBUG_XFERS);
1250
1251 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1252 return WDC_DMAST_NOIRQ;
1253
1254 /* stop DMA channel */
1255 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1256 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1257 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1258 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1259
1260 /* Unload the map of the data buffer */
1261 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1262 dma_maps->dmamap_xfer->dm_mapsize,
1263 (dma_maps->dma_flags & WDC_DMA_READ) ?
1264 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1265 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1266
1267 if ((status & IDEDMA_CTL_ERR) != 0) {
1268 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1269 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1270 error |= WDC_DMAST_ERR;
1271 }
1272
1273 if ((status & IDEDMA_CTL_INTR) == 0) {
1274 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1275 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1276 drive, status);
1277 error |= WDC_DMAST_NOIRQ;
1278 }
1279
1280 if ((status & IDEDMA_CTL_ACT) != 0) {
1281 /* data underrun, may be a valid condition for ATAPI */
1282 error |= WDC_DMAST_UNDER;
1283 }
1284 return error;
1285 }
1286
1287 void
1288 pciide_irqack(chp)
1289 struct channel_softc *chp;
1290 {
1291 struct pciide_channel *cp = (struct pciide_channel*)chp;
1292 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1293
1294 /* clear status bits in IDE DMA registers */
1295 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1296 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1297 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1298 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1299 }
1300
1301 /* some common code used by several chip_map */
1302 int
1303 pciide_chansetup(sc, channel, interface)
1304 struct pciide_softc *sc;
1305 int channel;
1306 pcireg_t interface;
1307 {
1308 struct pciide_channel *cp = &sc->pciide_channels[channel];
1309 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1310 cp->name = PCIIDE_CHANNEL_NAME(channel);
1311 cp->wdc_channel.channel = channel;
1312 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1313 cp->wdc_channel.ch_queue =
1314 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1315 if (cp->wdc_channel.ch_queue == NULL) {
1316 printf("%s %s channel: "
1317 "can't allocate memory for command queue",
1318 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1319 return 0;
1320 }
1321 printf("%s: %s channel %s to %s mode\n",
1322 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1323 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1324 "configured" : "wired",
1325 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1326 "native-PCI" : "compatibility");
1327 return 1;
1328 }
1329
1330 /* some common code used by several chip channel_map */
1331 void
1332 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1333 struct pci_attach_args *pa;
1334 struct pciide_channel *cp;
1335 pcireg_t interface;
1336 bus_size_t *cmdsizep, *ctlsizep;
1337 int (*pci_intr) __P((void *));
1338 {
1339 struct channel_softc *wdc_cp = &cp->wdc_channel;
1340
1341 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1342 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1343 pci_intr);
1344 else
1345 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1346 wdc_cp->channel, cmdsizep, ctlsizep);
1347
1348 if (cp->hw_ok == 0)
1349 return;
1350 wdc_cp->data32iot = wdc_cp->cmd_iot;
1351 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1352 wdcattach(wdc_cp);
1353 }
1354
1355 /*
1356 * Generic code to call to know if a channel can be disabled. Return 1
1357 * if channel can be disabled, 0 if not
1358 */
1359 int
1360 pciide_chan_candisable(cp)
1361 struct pciide_channel *cp;
1362 {
1363 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1364 struct channel_softc *wdc_cp = &cp->wdc_channel;
1365
1366 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1367 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1368 printf("%s: disabling %s channel (no drives)\n",
1369 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1370 cp->hw_ok = 0;
1371 return 1;
1372 }
1373 return 0;
1374 }
1375
1376 /*
1377 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1378 * Set hw_ok=0 on failure
1379 */
1380 void
1381 pciide_map_compat_intr(pa, cp, compatchan, interface)
1382 struct pci_attach_args *pa;
1383 struct pciide_channel *cp;
1384 int compatchan, interface;
1385 {
1386 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1387 struct channel_softc *wdc_cp = &cp->wdc_channel;
1388
1389 if (cp->hw_ok == 0)
1390 return;
1391 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1392 return;
1393
1394 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1395 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1396 pa, compatchan, pciide_compat_intr, cp);
1397 if (cp->ih == NULL) {
1398 #endif
1399 printf("%s: no compatibility interrupt for use by %s "
1400 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1401 cp->hw_ok = 0;
1402 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1403 }
1404 #endif
1405 }
1406
1407 void
1408 pciide_print_modes(cp)
1409 struct pciide_channel *cp;
1410 {
1411 wdc_print_modes(&cp->wdc_channel);
1412 }
1413
1414 void
1415 default_chip_map(sc, pa)
1416 struct pciide_softc *sc;
1417 struct pci_attach_args *pa;
1418 {
1419 struct pciide_channel *cp;
1420 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1421 pcireg_t csr;
1422 int channel, drive;
1423 struct ata_drive_datas *drvp;
1424 u_int8_t idedma_ctl;
1425 bus_size_t cmdsize, ctlsize;
1426 char *failreason;
1427
1428 if (pciide_chipen(sc, pa) == 0)
1429 return;
1430
1431 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1432 printf("%s: bus-master DMA support present",
1433 sc->sc_wdcdev.sc_dev.dv_xname);
1434 if (sc->sc_pp == &default_product_desc &&
1435 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1436 PCIIDE_OPTIONS_DMA) == 0) {
1437 printf(", but unused (no driver support)");
1438 sc->sc_dma_ok = 0;
1439 } else {
1440 pciide_mapreg_dma(sc, pa);
1441 if (sc->sc_dma_ok != 0)
1442 printf(", used without full driver "
1443 "support");
1444 }
1445 } else {
1446 printf("%s: hardware does not support DMA",
1447 sc->sc_wdcdev.sc_dev.dv_xname);
1448 sc->sc_dma_ok = 0;
1449 }
1450 printf("\n");
1451 if (sc->sc_dma_ok) {
1452 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1453 sc->sc_wdcdev.irqack = pciide_irqack;
1454 }
1455 sc->sc_wdcdev.PIO_cap = 0;
1456 sc->sc_wdcdev.DMA_cap = 0;
1457
1458 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1459 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1460 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1461
1462 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1463 cp = &sc->pciide_channels[channel];
1464 if (pciide_chansetup(sc, channel, interface) == 0)
1465 continue;
1466 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1467 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1468 &ctlsize, pciide_pci_intr);
1469 } else {
1470 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1471 channel, &cmdsize, &ctlsize);
1472 }
1473 if (cp->hw_ok == 0)
1474 continue;
1475 /*
1476 * Check to see if something appears to be there.
1477 */
1478 failreason = NULL;
1479 if (!wdcprobe(&cp->wdc_channel)) {
1480 failreason = "not responding; disabled or no drives?";
1481 goto next;
1482 }
1483 /*
1484 * Now, make sure it's actually attributable to this PCI IDE
1485 * channel by trying to access the channel again while the
1486 * PCI IDE controller's I/O space is disabled. (If the
1487 * channel no longer appears to be there, it belongs to
1488 * this controller.) YUCK!
1489 */
1490 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1491 PCI_COMMAND_STATUS_REG);
1492 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1493 csr & ~PCI_COMMAND_IO_ENABLE);
1494 if (wdcprobe(&cp->wdc_channel))
1495 failreason = "other hardware responding at addresses";
1496 pci_conf_write(sc->sc_pc, sc->sc_tag,
1497 PCI_COMMAND_STATUS_REG, csr);
1498 next:
1499 if (failreason) {
1500 printf("%s: %s channel ignored (%s)\n",
1501 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1502 failreason);
1503 cp->hw_ok = 0;
1504 bus_space_unmap(cp->wdc_channel.cmd_iot,
1505 cp->wdc_channel.cmd_ioh, cmdsize);
1506 if (interface & PCIIDE_INTERFACE_PCI(channel))
1507 bus_space_unmap(cp->wdc_channel.ctl_iot,
1508 cp->ctl_baseioh, ctlsize);
1509 else
1510 bus_space_unmap(cp->wdc_channel.ctl_iot,
1511 cp->wdc_channel.ctl_ioh, ctlsize);
1512 } else {
1513 pciide_map_compat_intr(pa, cp, channel, interface);
1514 }
1515 if (cp->hw_ok) {
1516 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1517 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1518 wdcattach(&cp->wdc_channel);
1519 }
1520 }
1521
1522 if (sc->sc_dma_ok == 0)
1523 return;
1524
1525 /* Allocate DMA maps */
1526 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1527 idedma_ctl = 0;
1528 cp = &sc->pciide_channels[channel];
1529 for (drive = 0; drive < 2; drive++) {
1530 drvp = &cp->wdc_channel.ch_drive[drive];
1531 /* If no drive, skip */
1532 if ((drvp->drive_flags & DRIVE) == 0)
1533 continue;
1534 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1535 continue;
1536 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1537 /* Abort DMA setup */
1538 printf("%s:%d:%d: can't allocate DMA maps, "
1539 "using PIO transfers\n",
1540 sc->sc_wdcdev.sc_dev.dv_xname,
1541 channel, drive);
1542 drvp->drive_flags &= ~DRIVE_DMA;
1543 }
1544 printf("%s:%d:%d: using DMA data transfers\n",
1545 sc->sc_wdcdev.sc_dev.dv_xname,
1546 channel, drive);
1547 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1548 }
1549 if (idedma_ctl != 0) {
1550 /* Add software bits in status register */
1551 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1552 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1553 idedma_ctl);
1554 }
1555 }
1556 }
1557
1558 void
1559 piix_chip_map(sc, pa)
1560 struct pciide_softc *sc;
1561 struct pci_attach_args *pa;
1562 {
1563 struct pciide_channel *cp;
1564 int channel;
1565 u_int32_t idetim;
1566 bus_size_t cmdsize, ctlsize;
1567
1568 if (pciide_chipen(sc, pa) == 0)
1569 return;
1570
1571 printf("%s: bus-master DMA support present",
1572 sc->sc_wdcdev.sc_dev.dv_xname);
1573 pciide_mapreg_dma(sc, pa);
1574 printf("\n");
1575 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1576 WDC_CAPABILITY_MODE;
1577 if (sc->sc_dma_ok) {
1578 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1579 sc->sc_wdcdev.irqack = pciide_irqack;
1580 switch(sc->sc_pp->ide_product) {
1581 case PCI_PRODUCT_INTEL_82371AB_IDE:
1582 case PCI_PRODUCT_INTEL_82440MX_IDE:
1583 case PCI_PRODUCT_INTEL_82801AA_IDE:
1584 case PCI_PRODUCT_INTEL_82801AB_IDE:
1585 case PCI_PRODUCT_INTEL_82801BA_IDE:
1586 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1587 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1588 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1589 case PCI_PRODUCT_INTEL_82801DB_IDE:
1590 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1591 }
1592 }
1593 sc->sc_wdcdev.PIO_cap = 4;
1594 sc->sc_wdcdev.DMA_cap = 2;
1595 switch(sc->sc_pp->ide_product) {
1596 case PCI_PRODUCT_INTEL_82801AA_IDE:
1597 sc->sc_wdcdev.UDMA_cap = 4;
1598 break;
1599 case PCI_PRODUCT_INTEL_82801BA_IDE:
1600 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1601 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1602 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1603 case PCI_PRODUCT_INTEL_82801DB_IDE:
1604 sc->sc_wdcdev.UDMA_cap = 5;
1605 break;
1606 default:
1607 sc->sc_wdcdev.UDMA_cap = 2;
1608 }
1609 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1610 sc->sc_wdcdev.set_modes = piix_setup_channel;
1611 else
1612 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1613 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1614 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1615
1616 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1617 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1618 DEBUG_PROBE);
1619 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1620 WDCDEBUG_PRINT((", sidetim=0x%x",
1621 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1622 DEBUG_PROBE);
1623 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1624 WDCDEBUG_PRINT((", udamreg 0x%x",
1625 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1626 DEBUG_PROBE);
1627 }
1628 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1629 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1630 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1631 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1632 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1633 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1634 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1635 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1636 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1637 DEBUG_PROBE);
1638 }
1639
1640 }
1641 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1642
1643 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1644 cp = &sc->pciide_channels[channel];
1645 /* PIIX is compat-only */
1646 if (pciide_chansetup(sc, channel, 0) == 0)
1647 continue;
1648 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1649 if ((PIIX_IDETIM_READ(idetim, channel) &
1650 PIIX_IDETIM_IDE) == 0) {
1651 printf("%s: %s channel ignored (disabled)\n",
1652 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1653 continue;
1654 }
1655 /* PIIX are compat-only pciide devices */
1656 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1657 if (cp->hw_ok == 0)
1658 continue;
1659 if (pciide_chan_candisable(cp)) {
1660 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1661 channel);
1662 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1663 idetim);
1664 }
1665 pciide_map_compat_intr(pa, cp, channel, 0);
1666 if (cp->hw_ok == 0)
1667 continue;
1668 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1669 }
1670
1671 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1672 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1673 DEBUG_PROBE);
1674 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1675 WDCDEBUG_PRINT((", sidetim=0x%x",
1676 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1677 DEBUG_PROBE);
1678 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1679 WDCDEBUG_PRINT((", udamreg 0x%x",
1680 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1681 DEBUG_PROBE);
1682 }
1683 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1684 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1685 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1686 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1687 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1688 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1689 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1690 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1691 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1692 DEBUG_PROBE);
1693 }
1694 }
1695 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1696 }
1697
1698 void
1699 piix_setup_channel(chp)
1700 struct channel_softc *chp;
1701 {
1702 u_int8_t mode[2], drive;
1703 u_int32_t oidetim, idetim, idedma_ctl;
1704 struct pciide_channel *cp = (struct pciide_channel*)chp;
1705 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1706 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1707
1708 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1709 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1710 idedma_ctl = 0;
1711
1712 /* set up new idetim: Enable IDE registers decode */
1713 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1714 chp->channel);
1715
1716 /* setup DMA */
1717 pciide_channel_dma_setup(cp);
1718
1719 /*
1720 * Here we have to mess up with drives mode: PIIX can't have
1721 * different timings for master and slave drives.
1722 * We need to find the best combination.
1723 */
1724
1725 /* If both drives supports DMA, take the lower mode */
1726 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1727 (drvp[1].drive_flags & DRIVE_DMA)) {
1728 mode[0] = mode[1] =
1729 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1730 drvp[0].DMA_mode = mode[0];
1731 drvp[1].DMA_mode = mode[1];
1732 goto ok;
1733 }
1734 /*
1735 * If only one drive supports DMA, use its mode, and
1736 * put the other one in PIO mode 0 if mode not compatible
1737 */
1738 if (drvp[0].drive_flags & DRIVE_DMA) {
1739 mode[0] = drvp[0].DMA_mode;
1740 mode[1] = drvp[1].PIO_mode;
1741 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1742 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1743 mode[1] = drvp[1].PIO_mode = 0;
1744 goto ok;
1745 }
1746 if (drvp[1].drive_flags & DRIVE_DMA) {
1747 mode[1] = drvp[1].DMA_mode;
1748 mode[0] = drvp[0].PIO_mode;
1749 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1750 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1751 mode[0] = drvp[0].PIO_mode = 0;
1752 goto ok;
1753 }
1754 /*
1755 * If both drives are not DMA, takes the lower mode, unless
1756 * one of them is PIO mode < 2
1757 */
1758 if (drvp[0].PIO_mode < 2) {
1759 mode[0] = drvp[0].PIO_mode = 0;
1760 mode[1] = drvp[1].PIO_mode;
1761 } else if (drvp[1].PIO_mode < 2) {
1762 mode[1] = drvp[1].PIO_mode = 0;
1763 mode[0] = drvp[0].PIO_mode;
1764 } else {
1765 mode[0] = mode[1] =
1766 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1767 drvp[0].PIO_mode = mode[0];
1768 drvp[1].PIO_mode = mode[1];
1769 }
1770 ok: /* The modes are setup */
1771 for (drive = 0; drive < 2; drive++) {
1772 if (drvp[drive].drive_flags & DRIVE_DMA) {
1773 idetim |= piix_setup_idetim_timings(
1774 mode[drive], 1, chp->channel);
1775 goto end;
1776 }
1777 }
1778 /* If we are there, none of the drives are DMA */
1779 if (mode[0] >= 2)
1780 idetim |= piix_setup_idetim_timings(
1781 mode[0], 0, chp->channel);
1782 else
1783 idetim |= piix_setup_idetim_timings(
1784 mode[1], 0, chp->channel);
1785 end: /*
1786 * timing mode is now set up in the controller. Enable
1787 * it per-drive
1788 */
1789 for (drive = 0; drive < 2; drive++) {
1790 /* If no drive, skip */
1791 if ((drvp[drive].drive_flags & DRIVE) == 0)
1792 continue;
1793 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1794 if (drvp[drive].drive_flags & DRIVE_DMA)
1795 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1796 }
1797 if (idedma_ctl != 0) {
1798 /* Add software bits in status register */
1799 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1800 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1801 idedma_ctl);
1802 }
1803 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1804 pciide_print_modes(cp);
1805 }
1806
1807 void
1808 piix3_4_setup_channel(chp)
1809 struct channel_softc *chp;
1810 {
1811 struct ata_drive_datas *drvp;
1812 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1813 struct pciide_channel *cp = (struct pciide_channel*)chp;
1814 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1815 int drive;
1816 int channel = chp->channel;
1817
1818 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1819 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1820 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1821 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1822 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1823 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1824 PIIX_SIDETIM_RTC_MASK(channel));
1825
1826 idedma_ctl = 0;
1827 /* If channel disabled, no need to go further */
1828 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1829 return;
1830 /* set up new idetim: Enable IDE registers decode */
1831 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1832
1833 /* setup DMA if needed */
1834 pciide_channel_dma_setup(cp);
1835
1836 for (drive = 0; drive < 2; drive++) {
1837 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1838 PIIX_UDMATIM_SET(0x3, channel, drive));
1839 drvp = &chp->ch_drive[drive];
1840 /* If no drive, skip */
1841 if ((drvp->drive_flags & DRIVE) == 0)
1842 continue;
1843 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1844 (drvp->drive_flags & DRIVE_UDMA) == 0))
1845 goto pio;
1846
1847 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1848 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1849 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1850 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1851 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1852 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1853 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1854 ideconf |= PIIX_CONFIG_PINGPONG;
1855 }
1856 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1857 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1858 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1859 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1860 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1861 /* setup Ultra/100 */
1862 if (drvp->UDMA_mode > 2 &&
1863 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1864 drvp->UDMA_mode = 2;
1865 if (drvp->UDMA_mode > 4) {
1866 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1867 } else {
1868 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1869 if (drvp->UDMA_mode > 2) {
1870 ideconf |= PIIX_CONFIG_UDMA66(channel,
1871 drive);
1872 } else {
1873 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1874 drive);
1875 }
1876 }
1877 }
1878 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1879 /* setup Ultra/66 */
1880 if (drvp->UDMA_mode > 2 &&
1881 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1882 drvp->UDMA_mode = 2;
1883 if (drvp->UDMA_mode > 2)
1884 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1885 else
1886 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1887 }
1888 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1889 (drvp->drive_flags & DRIVE_UDMA)) {
1890 /* use Ultra/DMA */
1891 drvp->drive_flags &= ~DRIVE_DMA;
1892 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1893 udmareg |= PIIX_UDMATIM_SET(
1894 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1895 } else {
1896 /* use Multiword DMA */
1897 drvp->drive_flags &= ~DRIVE_UDMA;
1898 if (drive == 0) {
1899 idetim |= piix_setup_idetim_timings(
1900 drvp->DMA_mode, 1, channel);
1901 } else {
1902 sidetim |= piix_setup_sidetim_timings(
1903 drvp->DMA_mode, 1, channel);
1904 idetim =PIIX_IDETIM_SET(idetim,
1905 PIIX_IDETIM_SITRE, channel);
1906 }
1907 }
1908 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1909
1910 pio: /* use PIO mode */
1911 idetim |= piix_setup_idetim_drvs(drvp);
1912 if (drive == 0) {
1913 idetim |= piix_setup_idetim_timings(
1914 drvp->PIO_mode, 0, channel);
1915 } else {
1916 sidetim |= piix_setup_sidetim_timings(
1917 drvp->PIO_mode, 0, channel);
1918 idetim =PIIX_IDETIM_SET(idetim,
1919 PIIX_IDETIM_SITRE, channel);
1920 }
1921 }
1922 if (idedma_ctl != 0) {
1923 /* Add software bits in status register */
1924 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1925 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1926 idedma_ctl);
1927 }
1928 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1929 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1930 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1931 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1932 pciide_print_modes(cp);
1933 }
1934
1935
1936 /* setup ISP and RTC fields, based on mode */
1937 static u_int32_t
1938 piix_setup_idetim_timings(mode, dma, channel)
1939 u_int8_t mode;
1940 u_int8_t dma;
1941 u_int8_t channel;
1942 {
1943
1944 if (dma)
1945 return PIIX_IDETIM_SET(0,
1946 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1947 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1948 channel);
1949 else
1950 return PIIX_IDETIM_SET(0,
1951 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1952 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1953 channel);
1954 }
1955
1956 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1957 static u_int32_t
1958 piix_setup_idetim_drvs(drvp)
1959 struct ata_drive_datas *drvp;
1960 {
1961 u_int32_t ret = 0;
1962 struct channel_softc *chp = drvp->chnl_softc;
1963 u_int8_t channel = chp->channel;
1964 u_int8_t drive = drvp->drive;
1965
1966 /*
1967 * If drive is using UDMA, timings setups are independant
1968 * So just check DMA and PIO here.
1969 */
1970 if (drvp->drive_flags & DRIVE_DMA) {
1971 /* if mode = DMA mode 0, use compatible timings */
1972 if ((drvp->drive_flags & DRIVE_DMA) &&
1973 drvp->DMA_mode == 0) {
1974 drvp->PIO_mode = 0;
1975 return ret;
1976 }
1977 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1978 /*
1979 * PIO and DMA timings are the same, use fast timings for PIO
1980 * too, else use compat timings.
1981 */
1982 if ((piix_isp_pio[drvp->PIO_mode] !=
1983 piix_isp_dma[drvp->DMA_mode]) ||
1984 (piix_rtc_pio[drvp->PIO_mode] !=
1985 piix_rtc_dma[drvp->DMA_mode]))
1986 drvp->PIO_mode = 0;
1987 /* if PIO mode <= 2, use compat timings for PIO */
1988 if (drvp->PIO_mode <= 2) {
1989 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1990 channel);
1991 return ret;
1992 }
1993 }
1994
1995 /*
1996 * Now setup PIO modes. If mode < 2, use compat timings.
1997 * Else enable fast timings. Enable IORDY and prefetch/post
1998 * if PIO mode >= 3.
1999 */
2000
2001 if (drvp->PIO_mode < 2)
2002 return ret;
2003
2004 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
2005 if (drvp->PIO_mode >= 3) {
2006 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
2007 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
2008 }
2009 return ret;
2010 }
2011
2012 /* setup values in SIDETIM registers, based on mode */
2013 static u_int32_t
2014 piix_setup_sidetim_timings(mode, dma, channel)
2015 u_int8_t mode;
2016 u_int8_t dma;
2017 u_int8_t channel;
2018 {
2019 if (dma)
2020 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2021 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2022 else
2023 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2024 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2025 }
2026
2027 void
2028 amd7x6_chip_map(sc, pa)
2029 struct pciide_softc *sc;
2030 struct pci_attach_args *pa;
2031 {
2032 struct pciide_channel *cp;
2033 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2034 int channel;
2035 pcireg_t chanenable;
2036 bus_size_t cmdsize, ctlsize;
2037
2038 if (pciide_chipen(sc, pa) == 0)
2039 return;
2040 printf("%s: bus-master DMA support present",
2041 sc->sc_wdcdev.sc_dev.dv_xname);
2042 pciide_mapreg_dma(sc, pa);
2043 printf("\n");
2044 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2045 WDC_CAPABILITY_MODE;
2046 if (sc->sc_dma_ok) {
2047 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2048 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2049 sc->sc_wdcdev.irqack = pciide_irqack;
2050 }
2051 sc->sc_wdcdev.PIO_cap = 4;
2052 sc->sc_wdcdev.DMA_cap = 2;
2053
2054 switch (sc->sc_pci_vendor) {
2055 case PCI_VENDOR_AMD:
2056 switch (sc->sc_pp->ide_product) {
2057 case PCI_PRODUCT_AMD_PBC766_IDE:
2058 case PCI_PRODUCT_AMD_PBC768_IDE:
2059 case PCI_PRODUCT_AMD_PBC8111_IDE:
2060 sc->sc_wdcdev.UDMA_cap = 5;
2061 break;
2062 default:
2063 sc->sc_wdcdev.UDMA_cap = 4;
2064 }
2065 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2066 break;
2067
2068 case PCI_VENDOR_NVIDIA:
2069 switch (sc->sc_pp->ide_product) {
2070 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2071 sc->sc_wdcdev.UDMA_cap = 5;
2072 break;
2073 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2074 sc->sc_wdcdev.UDMA_cap = 6;
2075 break;
2076 }
2077 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2078 break;
2079
2080 default:
2081 panic("amd7x6_chip_map: unknown vendor");
2082 }
2083 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2084 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2085 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2086 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2087 AMD7X6_CHANSTATUS_EN(sc));
2088
2089 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2090 DEBUG_PROBE);
2091 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2092 cp = &sc->pciide_channels[channel];
2093 if (pciide_chansetup(sc, channel, interface) == 0)
2094 continue;
2095
2096 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2097 printf("%s: %s channel ignored (disabled)\n",
2098 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2099 continue;
2100 }
2101 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2102 pciide_pci_intr);
2103
2104 if (pciide_chan_candisable(cp))
2105 chanenable &= ~AMD7X6_CHAN_EN(channel);
2106 pciide_map_compat_intr(pa, cp, channel, interface);
2107 if (cp->hw_ok == 0)
2108 continue;
2109
2110 amd7x6_setup_channel(&cp->wdc_channel);
2111 }
2112 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2113 chanenable);
2114 return;
2115 }
2116
2117 void
2118 amd7x6_setup_channel(chp)
2119 struct channel_softc *chp;
2120 {
2121 u_int32_t udmatim_reg, datatim_reg;
2122 u_int8_t idedma_ctl;
2123 int mode, drive;
2124 struct ata_drive_datas *drvp;
2125 struct pciide_channel *cp = (struct pciide_channel*)chp;
2126 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2127 #ifndef PCIIDE_AMD756_ENABLEDMA
2128 int rev = PCI_REVISION(
2129 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2130 #endif
2131
2132 idedma_ctl = 0;
2133 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2134 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2135 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2136 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2137
2138 /* setup DMA if needed */
2139 pciide_channel_dma_setup(cp);
2140
2141 for (drive = 0; drive < 2; drive++) {
2142 drvp = &chp->ch_drive[drive];
2143 /* If no drive, skip */
2144 if ((drvp->drive_flags & DRIVE) == 0)
2145 continue;
2146 /* add timing values, setup DMA if needed */
2147 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2148 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2149 mode = drvp->PIO_mode;
2150 goto pio;
2151 }
2152 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2153 (drvp->drive_flags & DRIVE_UDMA)) {
2154 /* use Ultra/DMA */
2155 drvp->drive_flags &= ~DRIVE_DMA;
2156 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2157 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2158 AMD7X6_UDMA_TIME(chp->channel, drive,
2159 amd7x6_udma_tim[drvp->UDMA_mode]);
2160 /* can use PIO timings, MW DMA unused */
2161 mode = drvp->PIO_mode;
2162 } else {
2163 /* use Multiword DMA, but only if revision is OK */
2164 drvp->drive_flags &= ~DRIVE_UDMA;
2165 #ifndef PCIIDE_AMD756_ENABLEDMA
2166 /*
2167 * The workaround doesn't seem to be necessary
2168 * with all drives, so it can be disabled by
2169 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2170 * triggered.
2171 */
2172 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2173 sc->sc_pp->ide_product ==
2174 PCI_PRODUCT_AMD_PBC756_IDE &&
2175 AMD756_CHIPREV_DISABLEDMA(rev)) {
2176 printf("%s:%d:%d: multi-word DMA disabled due "
2177 "to chip revision\n",
2178 sc->sc_wdcdev.sc_dev.dv_xname,
2179 chp->channel, drive);
2180 mode = drvp->PIO_mode;
2181 drvp->drive_flags &= ~DRIVE_DMA;
2182 goto pio;
2183 }
2184 #endif
2185 /* mode = min(pio, dma+2) */
2186 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2187 mode = drvp->PIO_mode;
2188 else
2189 mode = drvp->DMA_mode + 2;
2190 }
2191 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2192
2193 pio: /* setup PIO mode */
2194 if (mode <= 2) {
2195 drvp->DMA_mode = 0;
2196 drvp->PIO_mode = 0;
2197 mode = 0;
2198 } else {
2199 drvp->PIO_mode = mode;
2200 drvp->DMA_mode = mode - 2;
2201 }
2202 datatim_reg |=
2203 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2204 amd7x6_pio_set[mode]) |
2205 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2206 amd7x6_pio_rec[mode]);
2207 }
2208 if (idedma_ctl != 0) {
2209 /* Add software bits in status register */
2210 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2211 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2212 idedma_ctl);
2213 }
2214 pciide_print_modes(cp);
2215 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2216 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2217 }
2218
2219 void
2220 apollo_chip_map(sc, pa)
2221 struct pciide_softc *sc;
2222 struct pci_attach_args *pa;
2223 {
2224 struct pciide_channel *cp;
2225 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2226 int channel;
2227 u_int32_t ideconf;
2228 bus_size_t cmdsize, ctlsize;
2229 pcitag_t pcib_tag;
2230 pcireg_t pcib_id, pcib_class;
2231
2232 if (pciide_chipen(sc, pa) == 0)
2233 return;
2234 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2235 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2236 /* and read ID and rev of the ISA bridge */
2237 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2238 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2239 printf(": VIA Technologies ");
2240 switch (PCI_PRODUCT(pcib_id)) {
2241 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2242 printf("VT82C586 (Apollo VP) ");
2243 if(PCI_REVISION(pcib_class) >= 0x02) {
2244 printf("ATA33 controller\n");
2245 sc->sc_wdcdev.UDMA_cap = 2;
2246 } else {
2247 printf("controller\n");
2248 sc->sc_wdcdev.UDMA_cap = 0;
2249 }
2250 break;
2251 case PCI_PRODUCT_VIATECH_VT82C596A:
2252 printf("VT82C596A (Apollo Pro) ");
2253 if (PCI_REVISION(pcib_class) >= 0x12) {
2254 printf("ATA66 controller\n");
2255 sc->sc_wdcdev.UDMA_cap = 4;
2256 } else {
2257 printf("ATA33 controller\n");
2258 sc->sc_wdcdev.UDMA_cap = 2;
2259 }
2260 break;
2261 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2262 printf("VT82C686A (Apollo KX133) ");
2263 if (PCI_REVISION(pcib_class) >= 0x40) {
2264 printf("ATA100 controller\n");
2265 sc->sc_wdcdev.UDMA_cap = 5;
2266 } else {
2267 printf("ATA66 controller\n");
2268 sc->sc_wdcdev.UDMA_cap = 4;
2269 }
2270 break;
2271 case PCI_PRODUCT_VIATECH_VT8231:
2272 printf("VT8231 ATA100 controller\n");
2273 sc->sc_wdcdev.UDMA_cap = 5;
2274 break;
2275 case PCI_PRODUCT_VIATECH_VT8233:
2276 printf("VT8233 ATA100 controller\n");
2277 sc->sc_wdcdev.UDMA_cap = 5;
2278 break;
2279 case PCI_PRODUCT_VIATECH_VT8233A:
2280 printf("VT8233A ATA133 controller\n");
2281 sc->sc_wdcdev.UDMA_cap = 6;
2282 break;
2283 case PCI_PRODUCT_VIATECH_VT8235:
2284 printf("VT8235 ATA133 controller\n");
2285 sc->sc_wdcdev.UDMA_cap = 6;
2286 break;
2287 default:
2288 printf("unknown ATA controller\n");
2289 sc->sc_wdcdev.UDMA_cap = 0;
2290 }
2291
2292 printf("%s: bus-master DMA support present",
2293 sc->sc_wdcdev.sc_dev.dv_xname);
2294 pciide_mapreg_dma(sc, pa);
2295 printf("\n");
2296 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2297 WDC_CAPABILITY_MODE;
2298 if (sc->sc_dma_ok) {
2299 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2300 sc->sc_wdcdev.irqack = pciide_irqack;
2301 if (sc->sc_wdcdev.UDMA_cap > 0)
2302 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2303 }
2304 sc->sc_wdcdev.PIO_cap = 4;
2305 sc->sc_wdcdev.DMA_cap = 2;
2306 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2307 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2308 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2309
2310 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2311 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2312 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2313 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2314 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2315 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2316 DEBUG_PROBE);
2317
2318 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2319 cp = &sc->pciide_channels[channel];
2320 if (pciide_chansetup(sc, channel, interface) == 0)
2321 continue;
2322
2323 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2324 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2325 printf("%s: %s channel ignored (disabled)\n",
2326 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2327 continue;
2328 }
2329 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2330 pciide_pci_intr);
2331 if (cp->hw_ok == 0)
2332 continue;
2333 if (pciide_chan_candisable(cp)) {
2334 ideconf &= ~APO_IDECONF_EN(channel);
2335 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2336 ideconf);
2337 }
2338 pciide_map_compat_intr(pa, cp, channel, interface);
2339
2340 if (cp->hw_ok == 0)
2341 continue;
2342 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2343 }
2344 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2345 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2346 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2347 }
2348
2349 void
2350 apollo_setup_channel(chp)
2351 struct channel_softc *chp;
2352 {
2353 u_int32_t udmatim_reg, datatim_reg;
2354 u_int8_t idedma_ctl;
2355 int mode, drive;
2356 struct ata_drive_datas *drvp;
2357 struct pciide_channel *cp = (struct pciide_channel*)chp;
2358 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2359
2360 idedma_ctl = 0;
2361 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2362 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2363 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2364 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2365
2366 /* setup DMA if needed */
2367 pciide_channel_dma_setup(cp);
2368
2369 for (drive = 0; drive < 2; drive++) {
2370 drvp = &chp->ch_drive[drive];
2371 /* If no drive, skip */
2372 if ((drvp->drive_flags & DRIVE) == 0)
2373 continue;
2374 /* add timing values, setup DMA if needed */
2375 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2376 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2377 mode = drvp->PIO_mode;
2378 goto pio;
2379 }
2380 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2381 (drvp->drive_flags & DRIVE_UDMA)) {
2382 /* use Ultra/DMA */
2383 drvp->drive_flags &= ~DRIVE_DMA;
2384 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2385 APO_UDMA_EN_MTH(chp->channel, drive);
2386 if (sc->sc_wdcdev.UDMA_cap == 6) {
2387 /* 8233a */
2388 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2389 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2390 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2391 /* 686b */
2392 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2393 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2394 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2395 /* 596b or 686a */
2396 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2397 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2398 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2399 } else {
2400 /* 596a or 586b */
2401 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2402 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2403 }
2404 /* can use PIO timings, MW DMA unused */
2405 mode = drvp->PIO_mode;
2406 } else {
2407 /* use Multiword DMA */
2408 drvp->drive_flags &= ~DRIVE_UDMA;
2409 /* mode = min(pio, dma+2) */
2410 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2411 mode = drvp->PIO_mode;
2412 else
2413 mode = drvp->DMA_mode + 2;
2414 }
2415 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2416
2417 pio: /* setup PIO mode */
2418 if (mode <= 2) {
2419 drvp->DMA_mode = 0;
2420 drvp->PIO_mode = 0;
2421 mode = 0;
2422 } else {
2423 drvp->PIO_mode = mode;
2424 drvp->DMA_mode = mode - 2;
2425 }
2426 datatim_reg |=
2427 APO_DATATIM_PULSE(chp->channel, drive,
2428 apollo_pio_set[mode]) |
2429 APO_DATATIM_RECOV(chp->channel, drive,
2430 apollo_pio_rec[mode]);
2431 }
2432 if (idedma_ctl != 0) {
2433 /* Add software bits in status register */
2434 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2435 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2436 idedma_ctl);
2437 }
2438 pciide_print_modes(cp);
2439 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2440 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2441 }
2442
2443 void
2444 cmd_channel_map(pa, sc, channel)
2445 struct pci_attach_args *pa;
2446 struct pciide_softc *sc;
2447 int channel;
2448 {
2449 struct pciide_channel *cp = &sc->pciide_channels[channel];
2450 bus_size_t cmdsize, ctlsize;
2451 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2452 int interface, one_channel;
2453
2454 /*
2455 * The 0648/0649 can be told to identify as a RAID controller.
2456 * In this case, we have to fake interface
2457 */
2458 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2459 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2460 PCIIDE_INTERFACE_SETTABLE(1);
2461 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2462 CMD_CONF_DSA1)
2463 interface |= PCIIDE_INTERFACE_PCI(0) |
2464 PCIIDE_INTERFACE_PCI(1);
2465 } else {
2466 interface = PCI_INTERFACE(pa->pa_class);
2467 }
2468
2469 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2470 cp->name = PCIIDE_CHANNEL_NAME(channel);
2471 cp->wdc_channel.channel = channel;
2472 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2473
2474 /*
2475 * Older CMD64X doesn't have independant channels
2476 */
2477 switch (sc->sc_pp->ide_product) {
2478 case PCI_PRODUCT_CMDTECH_649:
2479 one_channel = 0;
2480 break;
2481 default:
2482 one_channel = 1;
2483 break;
2484 }
2485
2486 if (channel > 0 && one_channel) {
2487 cp->wdc_channel.ch_queue =
2488 sc->pciide_channels[0].wdc_channel.ch_queue;
2489 } else {
2490 cp->wdc_channel.ch_queue =
2491 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2492 }
2493 if (cp->wdc_channel.ch_queue == NULL) {
2494 printf("%s %s channel: "
2495 "can't allocate memory for command queue",
2496 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2497 return;
2498 }
2499
2500 printf("%s: %s channel %s to %s mode\n",
2501 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2502 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2503 "configured" : "wired",
2504 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2505 "native-PCI" : "compatibility");
2506
2507 /*
2508 * with a CMD PCI64x, if we get here, the first channel is enabled:
2509 * there's no way to disable the first channel without disabling
2510 * the whole device
2511 */
2512 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2513 printf("%s: %s channel ignored (disabled)\n",
2514 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2515 return;
2516 }
2517
2518 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2519 if (cp->hw_ok == 0)
2520 return;
2521 if (channel == 1) {
2522 if (pciide_chan_candisable(cp)) {
2523 ctrl &= ~CMD_CTRL_2PORT;
2524 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2525 CMD_CTRL, ctrl);
2526 }
2527 }
2528 pciide_map_compat_intr(pa, cp, channel, interface);
2529 }
2530
2531 int
2532 cmd_pci_intr(arg)
2533 void *arg;
2534 {
2535 struct pciide_softc *sc = arg;
2536 struct pciide_channel *cp;
2537 struct channel_softc *wdc_cp;
2538 int i, rv, crv;
2539 u_int32_t priirq, secirq;
2540
2541 rv = 0;
2542 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2543 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2544 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2545 cp = &sc->pciide_channels[i];
2546 wdc_cp = &cp->wdc_channel;
2547 /* If a compat channel skip. */
2548 if (cp->compat)
2549 continue;
2550 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2551 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2552 crv = wdcintr(wdc_cp);
2553 if (crv == 0)
2554 printf("%s:%d: bogus intr\n",
2555 sc->sc_wdcdev.sc_dev.dv_xname, i);
2556 else
2557 rv = 1;
2558 }
2559 }
2560 return rv;
2561 }
2562
2563 void
2564 cmd_chip_map(sc, pa)
2565 struct pciide_softc *sc;
2566 struct pci_attach_args *pa;
2567 {
2568 int channel;
2569
2570 /*
2571 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2572 * and base adresses registers can be disabled at
2573 * hardware level. In this case, the device is wired
2574 * in compat mode and its first channel is always enabled,
2575 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2576 * In fact, it seems that the first channel of the CMD PCI0640
2577 * can't be disabled.
2578 */
2579
2580 #ifdef PCIIDE_CMD064x_DISABLE
2581 if (pciide_chipen(sc, pa) == 0)
2582 return;
2583 #endif
2584
2585 printf("%s: hardware does not support DMA\n",
2586 sc->sc_wdcdev.sc_dev.dv_xname);
2587 sc->sc_dma_ok = 0;
2588
2589 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2590 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2591 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2592
2593 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2594 cmd_channel_map(pa, sc, channel);
2595 }
2596 }
2597
2598 void
2599 cmd0643_9_chip_map(sc, pa)
2600 struct pciide_softc *sc;
2601 struct pci_attach_args *pa;
2602 {
2603 struct pciide_channel *cp;
2604 int channel;
2605 pcireg_t rev = PCI_REVISION(pa->pa_class);
2606
2607 /*
2608 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2609 * and base adresses registers can be disabled at
2610 * hardware level. In this case, the device is wired
2611 * in compat mode and its first channel is always enabled,
2612 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2613 * In fact, it seems that the first channel of the CMD PCI0640
2614 * can't be disabled.
2615 */
2616
2617 #ifdef PCIIDE_CMD064x_DISABLE
2618 if (pciide_chipen(sc, pa) == 0)
2619 return;
2620 #endif
2621 printf("%s: bus-master DMA support present",
2622 sc->sc_wdcdev.sc_dev.dv_xname);
2623 pciide_mapreg_dma(sc, pa);
2624 printf("\n");
2625 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2626 WDC_CAPABILITY_MODE;
2627 if (sc->sc_dma_ok) {
2628 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2629 switch (sc->sc_pp->ide_product) {
2630 case PCI_PRODUCT_CMDTECH_649:
2631 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2632 sc->sc_wdcdev.UDMA_cap = 5;
2633 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2634 break;
2635 case PCI_PRODUCT_CMDTECH_648:
2636 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2637 sc->sc_wdcdev.UDMA_cap = 4;
2638 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2639 break;
2640 case PCI_PRODUCT_CMDTECH_646:
2641 if (rev >= CMD0646U2_REV) {
2642 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2643 sc->sc_wdcdev.UDMA_cap = 2;
2644 } else if (rev >= CMD0646U_REV) {
2645 /*
2646 * Linux's driver claims that the 646U is broken
2647 * with UDMA. Only enable it if we know what we're
2648 * doing
2649 */
2650 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2651 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2652 sc->sc_wdcdev.UDMA_cap = 2;
2653 #endif
2654 /* explicitly disable UDMA */
2655 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2656 CMD_UDMATIM(0), 0);
2657 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2658 CMD_UDMATIM(1), 0);
2659 }
2660 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2661 break;
2662 default:
2663 sc->sc_wdcdev.irqack = pciide_irqack;
2664 }
2665 }
2666
2667 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2668 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2669 sc->sc_wdcdev.PIO_cap = 4;
2670 sc->sc_wdcdev.DMA_cap = 2;
2671 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2672
2673 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2674 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2675 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2676 DEBUG_PROBE);
2677
2678 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2679 cp = &sc->pciide_channels[channel];
2680 cmd_channel_map(pa, sc, channel);
2681 if (cp->hw_ok == 0)
2682 continue;
2683 cmd0643_9_setup_channel(&cp->wdc_channel);
2684 }
2685 /*
2686 * note - this also makes sure we clear the irq disable and reset
2687 * bits
2688 */
2689 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2690 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2691 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2692 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2693 DEBUG_PROBE);
2694 }
2695
2696 void
2697 cmd0643_9_setup_channel(chp)
2698 struct channel_softc *chp;
2699 {
2700 struct ata_drive_datas *drvp;
2701 u_int8_t tim;
2702 u_int32_t idedma_ctl, udma_reg;
2703 int drive;
2704 struct pciide_channel *cp = (struct pciide_channel*)chp;
2705 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2706
2707 idedma_ctl = 0;
2708 /* setup DMA if needed */
2709 pciide_channel_dma_setup(cp);
2710
2711 for (drive = 0; drive < 2; drive++) {
2712 drvp = &chp->ch_drive[drive];
2713 /* If no drive, skip */
2714 if ((drvp->drive_flags & DRIVE) == 0)
2715 continue;
2716 /* add timing values, setup DMA if needed */
2717 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2718 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2719 if (drvp->drive_flags & DRIVE_UDMA) {
2720 /* UltraDMA on a 646U2, 0648 or 0649 */
2721 drvp->drive_flags &= ~DRIVE_DMA;
2722 udma_reg = pciide_pci_read(sc->sc_pc,
2723 sc->sc_tag, CMD_UDMATIM(chp->channel));
2724 if (drvp->UDMA_mode > 2 &&
2725 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2726 CMD_BICSR) &
2727 CMD_BICSR_80(chp->channel)) == 0)
2728 drvp->UDMA_mode = 2;
2729 if (drvp->UDMA_mode > 2)
2730 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2731 else if (sc->sc_wdcdev.UDMA_cap > 2)
2732 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2733 udma_reg |= CMD_UDMATIM_UDMA(drive);
2734 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2735 CMD_UDMATIM_TIM_OFF(drive));
2736 udma_reg |=
2737 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2738 CMD_UDMATIM_TIM_OFF(drive));
2739 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2740 CMD_UDMATIM(chp->channel), udma_reg);
2741 } else {
2742 /*
2743 * use Multiword DMA.
2744 * Timings will be used for both PIO and DMA,
2745 * so adjust DMA mode if needed
2746 * if we have a 0646U2/8/9, turn off UDMA
2747 */
2748 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2749 udma_reg = pciide_pci_read(sc->sc_pc,
2750 sc->sc_tag,
2751 CMD_UDMATIM(chp->channel));
2752 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2753 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2754 CMD_UDMATIM(chp->channel),
2755 udma_reg);
2756 }
2757 if (drvp->PIO_mode >= 3 &&
2758 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2759 drvp->DMA_mode = drvp->PIO_mode - 2;
2760 }
2761 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2762 }
2763 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2764 }
2765 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2766 CMD_DATA_TIM(chp->channel, drive), tim);
2767 }
2768 if (idedma_ctl != 0) {
2769 /* Add software bits in status register */
2770 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2771 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2772 idedma_ctl);
2773 }
2774 pciide_print_modes(cp);
2775 }
2776
2777 void
2778 cmd646_9_irqack(chp)
2779 struct channel_softc *chp;
2780 {
2781 u_int32_t priirq, secirq;
2782 struct pciide_channel *cp = (struct pciide_channel*)chp;
2783 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2784
2785 if (chp->channel == 0) {
2786 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2787 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2788 } else {
2789 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2790 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2791 }
2792 pciide_irqack(chp);
2793 }
2794
2795 void
2796 cmd680_chip_map(sc, pa)
2797 struct pciide_softc *sc;
2798 struct pci_attach_args *pa;
2799 {
2800 struct pciide_channel *cp;
2801 int channel;
2802
2803 if (pciide_chipen(sc, pa) == 0)
2804 return;
2805 printf("%s: bus-master DMA support present",
2806 sc->sc_wdcdev.sc_dev.dv_xname);
2807 pciide_mapreg_dma(sc, pa);
2808 printf("\n");
2809 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2810 WDC_CAPABILITY_MODE;
2811 if (sc->sc_dma_ok) {
2812 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2813 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2814 sc->sc_wdcdev.UDMA_cap = 6;
2815 sc->sc_wdcdev.irqack = pciide_irqack;
2816 }
2817
2818 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2819 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2820 sc->sc_wdcdev.PIO_cap = 4;
2821 sc->sc_wdcdev.DMA_cap = 2;
2822 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2823
2824 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2825 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2826 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2827 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2828 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2829 cp = &sc->pciide_channels[channel];
2830 cmd680_channel_map(pa, sc, channel);
2831 if (cp->hw_ok == 0)
2832 continue;
2833 cmd680_setup_channel(&cp->wdc_channel);
2834 }
2835 }
2836
2837 void
2838 cmd680_channel_map(pa, sc, channel)
2839 struct pci_attach_args *pa;
2840 struct pciide_softc *sc;
2841 int channel;
2842 {
2843 struct pciide_channel *cp = &sc->pciide_channels[channel];
2844 bus_size_t cmdsize, ctlsize;
2845 int interface, i, reg;
2846 static const u_int8_t init_val[] =
2847 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2848 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2849
2850 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2851 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2852 PCIIDE_INTERFACE_SETTABLE(1);
2853 interface |= PCIIDE_INTERFACE_PCI(0) |
2854 PCIIDE_INTERFACE_PCI(1);
2855 } else {
2856 interface = PCI_INTERFACE(pa->pa_class);
2857 }
2858
2859 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2860 cp->name = PCIIDE_CHANNEL_NAME(channel);
2861 cp->wdc_channel.channel = channel;
2862 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2863
2864 cp->wdc_channel.ch_queue =
2865 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2866 if (cp->wdc_channel.ch_queue == NULL) {
2867 printf("%s %s channel: "
2868 "can't allocate memory for command queue",
2869 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2870 return;
2871 }
2872
2873 /* XXX */
2874 reg = 0xa2 + channel * 16;
2875 for (i = 0; i < sizeof(init_val); i++)
2876 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2877
2878 printf("%s: %s channel %s to %s mode\n",
2879 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2880 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2881 "configured" : "wired",
2882 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2883 "native-PCI" : "compatibility");
2884
2885 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2886 if (cp->hw_ok == 0)
2887 return;
2888 pciide_map_compat_intr(pa, cp, channel, interface);
2889 }
2890
2891 void
2892 cmd680_setup_channel(chp)
2893 struct channel_softc *chp;
2894 {
2895 struct ata_drive_datas *drvp;
2896 u_int8_t mode, off, scsc;
2897 u_int16_t val;
2898 u_int32_t idedma_ctl;
2899 int drive;
2900 struct pciide_channel *cp = (struct pciide_channel*)chp;
2901 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2902 pci_chipset_tag_t pc = sc->sc_pc;
2903 pcitag_t pa = sc->sc_tag;
2904 static const u_int8_t udma2_tbl[] =
2905 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2906 static const u_int8_t udma_tbl[] =
2907 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2908 static const u_int16_t dma_tbl[] =
2909 { 0x2208, 0x10c2, 0x10c1 };
2910 static const u_int16_t pio_tbl[] =
2911 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2912
2913 idedma_ctl = 0;
2914 pciide_channel_dma_setup(cp);
2915 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2916
2917 for (drive = 0; drive < 2; drive++) {
2918 drvp = &chp->ch_drive[drive];
2919 /* If no drive, skip */
2920 if ((drvp->drive_flags & DRIVE) == 0)
2921 continue;
2922 mode &= ~(0x03 << (drive * 4));
2923 if (drvp->drive_flags & DRIVE_UDMA) {
2924 drvp->drive_flags &= ~DRIVE_DMA;
2925 off = 0xa0 + chp->channel * 16;
2926 if (drvp->UDMA_mode > 2 &&
2927 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
2928 drvp->UDMA_mode = 2;
2929 scsc = pciide_pci_read(pc, pa, 0x8a);
2930 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
2931 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
2932 scsc = pciide_pci_read(pc, pa, 0x8a);
2933 if ((scsc & 0x30) == 0)
2934 drvp->UDMA_mode = 5;
2935 }
2936 mode |= 0x03 << (drive * 4);
2937 off = 0xac + chp->channel * 16 + drive * 2;
2938 val = pciide_pci_read(pc, pa, off) & ~0x3f;
2939 if (scsc & 0x30)
2940 val |= udma2_tbl[drvp->UDMA_mode];
2941 else
2942 val |= udma_tbl[drvp->UDMA_mode];
2943 pciide_pci_write(pc, pa, off, val);
2944 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2945 } else if (drvp->drive_flags & DRIVE_DMA) {
2946 mode |= 0x02 << (drive * 4);
2947 off = 0xa8 + chp->channel * 16 + drive * 2;
2948 val = dma_tbl[drvp->DMA_mode];
2949 pciide_pci_write(pc, pa, off, val & 0xff);
2950 pciide_pci_write(pc, pa, off, val >> 8);
2951 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2952 } else {
2953 mode |= 0x01 << (drive * 4);
2954 off = 0xa4 + chp->channel * 16 + drive * 2;
2955 val = pio_tbl[drvp->PIO_mode];
2956 pciide_pci_write(pc, pa, off, val & 0xff);
2957 pciide_pci_write(pc, pa, off, val >> 8);
2958 }
2959 }
2960
2961 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
2962 if (idedma_ctl != 0) {
2963 /* Add software bits in status register */
2964 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2965 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2966 idedma_ctl);
2967 }
2968 pciide_print_modes(cp);
2969 }
2970
2971 void
2972 cy693_chip_map(sc, pa)
2973 struct pciide_softc *sc;
2974 struct pci_attach_args *pa;
2975 {
2976 struct pciide_channel *cp;
2977 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2978 bus_size_t cmdsize, ctlsize;
2979
2980 if (pciide_chipen(sc, pa) == 0)
2981 return;
2982 /*
2983 * this chip has 2 PCI IDE functions, one for primary and one for
2984 * secondary. So we need to call pciide_mapregs_compat() with
2985 * the real channel
2986 */
2987 if (pa->pa_function == 1) {
2988 sc->sc_cy_compatchan = 0;
2989 } else if (pa->pa_function == 2) {
2990 sc->sc_cy_compatchan = 1;
2991 } else {
2992 printf("%s: unexpected PCI function %d\n",
2993 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2994 return;
2995 }
2996 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2997 printf("%s: bus-master DMA support present",
2998 sc->sc_wdcdev.sc_dev.dv_xname);
2999 pciide_mapreg_dma(sc, pa);
3000 } else {
3001 printf("%s: hardware does not support DMA",
3002 sc->sc_wdcdev.sc_dev.dv_xname);
3003 sc->sc_dma_ok = 0;
3004 }
3005 printf("\n");
3006
3007 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
3008 if (sc->sc_cy_handle == NULL) {
3009 printf("%s: unable to map hyperCache control registers\n",
3010 sc->sc_wdcdev.sc_dev.dv_xname);
3011 sc->sc_dma_ok = 0;
3012 }
3013
3014 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3015 WDC_CAPABILITY_MODE;
3016 if (sc->sc_dma_ok) {
3017 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3018 sc->sc_wdcdev.irqack = pciide_irqack;
3019 }
3020 sc->sc_wdcdev.PIO_cap = 4;
3021 sc->sc_wdcdev.DMA_cap = 2;
3022 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3023
3024 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3025 sc->sc_wdcdev.nchannels = 1;
3026
3027 /* Only one channel for this chip; if we are here it's enabled */
3028 cp = &sc->pciide_channels[0];
3029 sc->wdc_chanarray[0] = &cp->wdc_channel;
3030 cp->name = PCIIDE_CHANNEL_NAME(0);
3031 cp->wdc_channel.channel = 0;
3032 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3033 cp->wdc_channel.ch_queue =
3034 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3035 if (cp->wdc_channel.ch_queue == NULL) {
3036 printf("%s primary channel: "
3037 "can't allocate memory for command queue",
3038 sc->sc_wdcdev.sc_dev.dv_xname);
3039 return;
3040 }
3041 printf("%s: primary channel %s to ",
3042 sc->sc_wdcdev.sc_dev.dv_xname,
3043 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3044 "configured" : "wired");
3045 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3046 printf("native-PCI");
3047 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3048 pciide_pci_intr);
3049 } else {
3050 printf("compatibility");
3051 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3052 &cmdsize, &ctlsize);
3053 }
3054 printf(" mode\n");
3055 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3056 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3057 wdcattach(&cp->wdc_channel);
3058 if (pciide_chan_candisable(cp)) {
3059 pci_conf_write(sc->sc_pc, sc->sc_tag,
3060 PCI_COMMAND_STATUS_REG, 0);
3061 }
3062 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3063 if (cp->hw_ok == 0)
3064 return;
3065 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3066 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3067 cy693_setup_channel(&cp->wdc_channel);
3068 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3069 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3070 }
3071
3072 void
3073 cy693_setup_channel(chp)
3074 struct channel_softc *chp;
3075 {
3076 struct ata_drive_datas *drvp;
3077 int drive;
3078 u_int32_t cy_cmd_ctrl;
3079 u_int32_t idedma_ctl;
3080 struct pciide_channel *cp = (struct pciide_channel*)chp;
3081 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3082 int dma_mode = -1;
3083
3084 cy_cmd_ctrl = idedma_ctl = 0;
3085
3086 /* setup DMA if needed */
3087 pciide_channel_dma_setup(cp);
3088
3089 for (drive = 0; drive < 2; drive++) {
3090 drvp = &chp->ch_drive[drive];
3091 /* If no drive, skip */
3092 if ((drvp->drive_flags & DRIVE) == 0)
3093 continue;
3094 /* add timing values, setup DMA if needed */
3095 if (drvp->drive_flags & DRIVE_DMA) {
3096 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3097 /* use Multiword DMA */
3098 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3099 dma_mode = drvp->DMA_mode;
3100 }
3101 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3102 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3103 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3104 CY_CMD_CTRL_IOW_REC_OFF(drive));
3105 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3106 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3107 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3108 CY_CMD_CTRL_IOR_REC_OFF(drive));
3109 }
3110 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3111 chp->ch_drive[0].DMA_mode = dma_mode;
3112 chp->ch_drive[1].DMA_mode = dma_mode;
3113
3114 if (dma_mode == -1)
3115 dma_mode = 0;
3116
3117 if (sc->sc_cy_handle != NULL) {
3118 /* Note: `multiple' is implied. */
3119 cy82c693_write(sc->sc_cy_handle,
3120 (sc->sc_cy_compatchan == 0) ?
3121 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3122 }
3123
3124 pciide_print_modes(cp);
3125
3126 if (idedma_ctl != 0) {
3127 /* Add software bits in status register */
3128 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3129 IDEDMA_CTL, idedma_ctl);
3130 }
3131 }
3132
3133 static int
3134 sis_hostbr_match(pa)
3135 struct pci_attach_args *pa;
3136 {
3137 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
3138 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
3139 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
3140 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
3141 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735) ||
3142 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_745)));
3143 }
3144
3145 void
3146 sis_chip_map(sc, pa)
3147 struct pciide_softc *sc;
3148 struct pci_attach_args *pa;
3149 {
3150 struct pciide_channel *cp;
3151 int channel;
3152 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3153 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3154 pcireg_t rev = PCI_REVISION(pa->pa_class);
3155 bus_size_t cmdsize, ctlsize;
3156 pcitag_t pchb_tag;
3157 pcireg_t pchb_id, pchb_class;
3158
3159 if (pciide_chipen(sc, pa) == 0)
3160 return;
3161 printf("%s: bus-master DMA support present",
3162 sc->sc_wdcdev.sc_dev.dv_xname);
3163 pciide_mapreg_dma(sc, pa);
3164 printf("\n");
3165
3166 /* get a PCI tag for the host bridge (function 0 of the same device) */
3167 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
3168 /* and read ID and rev of the ISA bridge */
3169 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
3170 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
3171
3172 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3173 WDC_CAPABILITY_MODE;
3174 if (sc->sc_dma_ok) {
3175 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3176 sc->sc_wdcdev.irqack = pciide_irqack;
3177 /*
3178 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3179 * have problems with UDMA (info provided by Christos)
3180 */
3181 if (rev >= 0xd0 &&
3182 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
3183 PCI_REVISION(pchb_class) >= 0x03))
3184 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3185 }
3186
3187 sc->sc_wdcdev.PIO_cap = 4;
3188 sc->sc_wdcdev.DMA_cap = 2;
3189 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
3190 /*
3191 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
3192 * chipsets.
3193 */
3194 sc->sc_wdcdev.UDMA_cap =
3195 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
3196 sc->sc_wdcdev.set_modes = sis_setup_channel;
3197
3198 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3199 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3200
3201 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3202 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3203 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
3204
3205 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3206 cp = &sc->pciide_channels[channel];
3207 if (pciide_chansetup(sc, channel, interface) == 0)
3208 continue;
3209 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3210 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3211 printf("%s: %s channel ignored (disabled)\n",
3212 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3213 continue;
3214 }
3215 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3216 pciide_pci_intr);
3217 if (cp->hw_ok == 0)
3218 continue;
3219 if (pciide_chan_candisable(cp)) {
3220 if (channel == 0)
3221 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3222 else
3223 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3224 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3225 sis_ctr0);
3226 }
3227 pciide_map_compat_intr(pa, cp, channel, interface);
3228 if (cp->hw_ok == 0)
3229 continue;
3230 sis_setup_channel(&cp->wdc_channel);
3231 }
3232 }
3233
3234 void
3235 sis_setup_channel(chp)
3236 struct channel_softc *chp;
3237 {
3238 struct ata_drive_datas *drvp;
3239 int drive;
3240 u_int32_t sis_tim;
3241 u_int32_t idedma_ctl;
3242 struct pciide_channel *cp = (struct pciide_channel*)chp;
3243 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3244
3245 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3246 "channel %d 0x%x\n", chp->channel,
3247 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3248 DEBUG_PROBE);
3249 sis_tim = 0;
3250 idedma_ctl = 0;
3251 /* setup DMA if needed */
3252 pciide_channel_dma_setup(cp);
3253
3254 for (drive = 0; drive < 2; drive++) {
3255 drvp = &chp->ch_drive[drive];
3256 /* If no drive, skip */
3257 if ((drvp->drive_flags & DRIVE) == 0)
3258 continue;
3259 /* add timing values, setup DMA if needed */
3260 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3261 (drvp->drive_flags & DRIVE_UDMA) == 0)
3262 goto pio;
3263
3264 if (drvp->drive_flags & DRIVE_UDMA) {
3265 /* use Ultra/DMA */
3266 drvp->drive_flags &= ~DRIVE_DMA;
3267 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3268 SIS_TIM_UDMA_TIME_OFF(drive);
3269 sis_tim |= SIS_TIM_UDMA_EN(drive);
3270 } else {
3271 /*
3272 * use Multiword DMA
3273 * Timings will be used for both PIO and DMA,
3274 * so adjust DMA mode if needed
3275 */
3276 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3277 drvp->PIO_mode = drvp->DMA_mode + 2;
3278 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3279 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3280 drvp->PIO_mode - 2 : 0;
3281 if (drvp->DMA_mode == 0)
3282 drvp->PIO_mode = 0;
3283 }
3284 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3285 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3286 SIS_TIM_ACT_OFF(drive);
3287 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3288 SIS_TIM_REC_OFF(drive);
3289 }
3290 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3291 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3292 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3293 if (idedma_ctl != 0) {
3294 /* Add software bits in status register */
3295 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3296 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3297 idedma_ctl);
3298 }
3299 pciide_print_modes(cp);
3300 }
3301
3302 void
3303 acer_chip_map(sc, pa)
3304 struct pciide_softc *sc;
3305 struct pci_attach_args *pa;
3306 {
3307 struct pciide_channel *cp;
3308 int channel;
3309 pcireg_t cr, interface;
3310 bus_size_t cmdsize, ctlsize;
3311 pcireg_t rev = PCI_REVISION(pa->pa_class);
3312
3313 if (pciide_chipen(sc, pa) == 0)
3314 return;
3315 printf("%s: bus-master DMA support present",
3316 sc->sc_wdcdev.sc_dev.dv_xname);
3317 pciide_mapreg_dma(sc, pa);
3318 printf("\n");
3319 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3320 WDC_CAPABILITY_MODE;
3321 if (sc->sc_dma_ok) {
3322 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3323 if (rev >= 0x20) {
3324 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3325 if (rev >= 0xC4)
3326 sc->sc_wdcdev.UDMA_cap = 5;
3327 else if (rev >= 0xC2)
3328 sc->sc_wdcdev.UDMA_cap = 4;
3329 else
3330 sc->sc_wdcdev.UDMA_cap = 2;
3331 }
3332 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3333 sc->sc_wdcdev.irqack = pciide_irqack;
3334 }
3335
3336 sc->sc_wdcdev.PIO_cap = 4;
3337 sc->sc_wdcdev.DMA_cap = 2;
3338 sc->sc_wdcdev.set_modes = acer_setup_channel;
3339 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3340 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3341
3342 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3343 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3344 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3345
3346 /* Enable "microsoft register bits" R/W. */
3347 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3348 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3349 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3350 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3351 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3352 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3353 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3354 ~ACER_CHANSTATUSREGS_RO);
3355 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3356 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3357 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3358 /* Don't use cr, re-read the real register content instead */
3359 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3360 PCI_CLASS_REG));
3361
3362 /* From linux: enable "Cable Detection" */
3363 if (rev >= 0xC2) {
3364 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3365 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3366 | ACER_0x4B_CDETECT);
3367 }
3368
3369 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3370 cp = &sc->pciide_channels[channel];
3371 if (pciide_chansetup(sc, channel, interface) == 0)
3372 continue;
3373 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3374 printf("%s: %s channel ignored (disabled)\n",
3375 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3376 continue;
3377 }
3378 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3379 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3380 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3381 if (cp->hw_ok == 0)
3382 continue;
3383 if (pciide_chan_candisable(cp)) {
3384 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3385 pci_conf_write(sc->sc_pc, sc->sc_tag,
3386 PCI_CLASS_REG, cr);
3387 }
3388 pciide_map_compat_intr(pa, cp, channel, interface);
3389 acer_setup_channel(&cp->wdc_channel);
3390 }
3391 }
3392
3393 void
3394 acer_setup_channel(chp)
3395 struct channel_softc *chp;
3396 {
3397 struct ata_drive_datas *drvp;
3398 int drive;
3399 u_int32_t acer_fifo_udma;
3400 u_int32_t idedma_ctl;
3401 struct pciide_channel *cp = (struct pciide_channel*)chp;
3402 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3403
3404 idedma_ctl = 0;
3405 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3406 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3407 acer_fifo_udma), DEBUG_PROBE);
3408 /* setup DMA if needed */
3409 pciide_channel_dma_setup(cp);
3410
3411 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3412 DRIVE_UDMA) { /* check 80 pins cable */
3413 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3414 ACER_0x4A_80PIN(chp->channel)) {
3415 if (chp->ch_drive[0].UDMA_mode > 2)
3416 chp->ch_drive[0].UDMA_mode = 2;
3417 if (chp->ch_drive[1].UDMA_mode > 2)
3418 chp->ch_drive[1].UDMA_mode = 2;
3419 }
3420 }
3421
3422 for (drive = 0; drive < 2; drive++) {
3423 drvp = &chp->ch_drive[drive];
3424 /* If no drive, skip */
3425 if ((drvp->drive_flags & DRIVE) == 0)
3426 continue;
3427 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3428 "channel %d drive %d 0x%x\n", chp->channel, drive,
3429 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3430 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3431 /* clear FIFO/DMA mode */
3432 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3433 ACER_UDMA_EN(chp->channel, drive) |
3434 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3435
3436 /* add timing values, setup DMA if needed */
3437 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3438 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3439 acer_fifo_udma |=
3440 ACER_FTH_OPL(chp->channel, drive, 0x1);
3441 goto pio;
3442 }
3443
3444 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3445 if (drvp->drive_flags & DRIVE_UDMA) {
3446 /* use Ultra/DMA */
3447 drvp->drive_flags &= ~DRIVE_DMA;
3448 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3449 acer_fifo_udma |=
3450 ACER_UDMA_TIM(chp->channel, drive,
3451 acer_udma[drvp->UDMA_mode]);
3452 /* XXX disable if one drive < UDMA3 ? */
3453 if (drvp->UDMA_mode >= 3) {
3454 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3455 ACER_0x4B,
3456 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3457 ACER_0x4B) | ACER_0x4B_UDMA66);
3458 }
3459 } else {
3460 /*
3461 * use Multiword DMA
3462 * Timings will be used for both PIO and DMA,
3463 * so adjust DMA mode if needed
3464 */
3465 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3466 drvp->PIO_mode = drvp->DMA_mode + 2;
3467 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3468 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3469 drvp->PIO_mode - 2 : 0;
3470 if (drvp->DMA_mode == 0)
3471 drvp->PIO_mode = 0;
3472 }
3473 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3474 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3475 ACER_IDETIM(chp->channel, drive),
3476 acer_pio[drvp->PIO_mode]);
3477 }
3478 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3479 acer_fifo_udma), DEBUG_PROBE);
3480 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3481 if (idedma_ctl != 0) {
3482 /* Add software bits in status register */
3483 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3484 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3485 idedma_ctl);
3486 }
3487 pciide_print_modes(cp);
3488 }
3489
3490 int
3491 acer_pci_intr(arg)
3492 void *arg;
3493 {
3494 struct pciide_softc *sc = arg;
3495 struct pciide_channel *cp;
3496 struct channel_softc *wdc_cp;
3497 int i, rv, crv;
3498 u_int32_t chids;
3499
3500 rv = 0;
3501 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3502 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3503 cp = &sc->pciide_channels[i];
3504 wdc_cp = &cp->wdc_channel;
3505 /* If a compat channel skip. */
3506 if (cp->compat)
3507 continue;
3508 if (chids & ACER_CHIDS_INT(i)) {
3509 crv = wdcintr(wdc_cp);
3510 if (crv == 0)
3511 printf("%s:%d: bogus intr\n",
3512 sc->sc_wdcdev.sc_dev.dv_xname, i);
3513 else
3514 rv = 1;
3515 }
3516 }
3517 return rv;
3518 }
3519
3520 void
3521 hpt_chip_map(sc, pa)
3522 struct pciide_softc *sc;
3523 struct pci_attach_args *pa;
3524 {
3525 struct pciide_channel *cp;
3526 int i, compatchan, revision;
3527 pcireg_t interface;
3528 bus_size_t cmdsize, ctlsize;
3529
3530 if (pciide_chipen(sc, pa) == 0)
3531 return;
3532 revision = PCI_REVISION(pa->pa_class);
3533 printf(": Triones/Highpoint ");
3534 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3535 printf("HPT374 IDE Controller\n");
3536 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3537 printf("HPT372 IDE Controller\n");
3538 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3539 if (revision == HPT372_REV)
3540 printf("HPT372 IDE Controller\n");
3541 else if (revision == HPT370_REV)
3542 printf("HPT370 IDE Controller\n");
3543 else if (revision == HPT370A_REV)
3544 printf("HPT370A IDE Controller\n");
3545 else if (revision == HPT366_REV)
3546 printf("HPT366 IDE Controller\n");
3547 else
3548 printf("unknown HPT IDE controller rev %d\n", revision);
3549 } else
3550 printf("unknown HPT IDE controller 0x%x\n",
3551 sc->sc_pp->ide_product);
3552
3553 /*
3554 * when the chip is in native mode it identifies itself as a
3555 * 'misc mass storage'. Fake interface in this case.
3556 */
3557 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3558 interface = PCI_INTERFACE(pa->pa_class);
3559 } else {
3560 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3561 PCIIDE_INTERFACE_PCI(0);
3562 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3563 (revision == HPT370_REV || revision == HPT370A_REV ||
3564 revision == HPT372_REV)) ||
3565 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3566 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3567 interface |= PCIIDE_INTERFACE_PCI(1);
3568 }
3569
3570 printf("%s: bus-master DMA support present",
3571 sc->sc_wdcdev.sc_dev.dv_xname);
3572 pciide_mapreg_dma(sc, pa);
3573 printf("\n");
3574 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3575 WDC_CAPABILITY_MODE;
3576 if (sc->sc_dma_ok) {
3577 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3578 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3579 sc->sc_wdcdev.irqack = pciide_irqack;
3580 }
3581 sc->sc_wdcdev.PIO_cap = 4;
3582 sc->sc_wdcdev.DMA_cap = 2;
3583
3584 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3585 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3586 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3587 revision == HPT366_REV) {
3588 sc->sc_wdcdev.UDMA_cap = 4;
3589 /*
3590 * The 366 has 2 PCI IDE functions, one for primary and one
3591 * for secondary. So we need to call pciide_mapregs_compat()
3592 * with the real channel
3593 */
3594 if (pa->pa_function == 0) {
3595 compatchan = 0;
3596 } else if (pa->pa_function == 1) {
3597 compatchan = 1;
3598 } else {
3599 printf("%s: unexpected PCI function %d\n",
3600 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3601 return;
3602 }
3603 sc->sc_wdcdev.nchannels = 1;
3604 } else {
3605 sc->sc_wdcdev.nchannels = 2;
3606 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3607 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3608 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3609 revision == HPT372_REV))
3610 sc->sc_wdcdev.UDMA_cap = 6;
3611 else
3612 sc->sc_wdcdev.UDMA_cap = 5;
3613 }
3614 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3615 cp = &sc->pciide_channels[i];
3616 if (sc->sc_wdcdev.nchannels > 1) {
3617 compatchan = i;
3618 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3619 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3620 printf("%s: %s channel ignored (disabled)\n",
3621 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3622 continue;
3623 }
3624 }
3625 if (pciide_chansetup(sc, i, interface) == 0)
3626 continue;
3627 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3628 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3629 &ctlsize, hpt_pci_intr);
3630 } else {
3631 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3632 &cmdsize, &ctlsize);
3633 }
3634 if (cp->hw_ok == 0)
3635 return;
3636 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3637 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3638 wdcattach(&cp->wdc_channel);
3639 hpt_setup_channel(&cp->wdc_channel);
3640 }
3641 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3642 (revision == HPT370_REV || revision == HPT370A_REV ||
3643 revision == HPT372_REV)) ||
3644 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3645 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3646 /*
3647 * HPT370_REV and highter has a bit to disable interrupts,
3648 * make sure to clear it
3649 */
3650 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3651 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3652 ~HPT_CSEL_IRQDIS);
3653 }
3654 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3655 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3656 revision == HPT372_REV ) ||
3657 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3658 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3659 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3660 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3661 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3662 return;
3663 }
3664
3665 void
3666 hpt_setup_channel(chp)
3667 struct channel_softc *chp;
3668 {
3669 struct ata_drive_datas *drvp;
3670 int drive;
3671 int cable;
3672 u_int32_t before, after;
3673 u_int32_t idedma_ctl;
3674 struct pciide_channel *cp = (struct pciide_channel*)chp;
3675 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3676 int revision =
3677 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3678
3679 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3680
3681 /* setup DMA if needed */
3682 pciide_channel_dma_setup(cp);
3683
3684 idedma_ctl = 0;
3685
3686 /* Per drive settings */
3687 for (drive = 0; drive < 2; drive++) {
3688 drvp = &chp->ch_drive[drive];
3689 /* If no drive, skip */
3690 if ((drvp->drive_flags & DRIVE) == 0)
3691 continue;
3692 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3693 HPT_IDETIM(chp->channel, drive));
3694
3695 /* add timing values, setup DMA if needed */
3696 if (drvp->drive_flags & DRIVE_UDMA) {
3697 /* use Ultra/DMA */
3698 drvp->drive_flags &= ~DRIVE_DMA;
3699 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3700 drvp->UDMA_mode > 2)
3701 drvp->UDMA_mode = 2;
3702 switch (sc->sc_pp->ide_product) {
3703 case PCI_PRODUCT_TRIONES_HPT374:
3704 after = hpt374_udma[drvp->UDMA_mode];
3705 break;
3706 case PCI_PRODUCT_TRIONES_HPT372:
3707 after = hpt372_udma[drvp->UDMA_mode];
3708 break;
3709 case PCI_PRODUCT_TRIONES_HPT366:
3710 default:
3711 switch(revision) {
3712 case HPT372_REV:
3713 after = hpt372_udma[drvp->UDMA_mode];
3714 break;
3715 case HPT370_REV:
3716 case HPT370A_REV:
3717 after = hpt370_udma[drvp->UDMA_mode];
3718 break;
3719 case HPT366_REV:
3720 default:
3721 after = hpt366_udma[drvp->UDMA_mode];
3722 break;
3723 }
3724 }
3725 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3726 } else if (drvp->drive_flags & DRIVE_DMA) {
3727 /*
3728 * use Multiword DMA.
3729 * Timings will be used for both PIO and DMA, so adjust
3730 * DMA mode if needed
3731 */
3732 if (drvp->PIO_mode >= 3 &&
3733 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3734 drvp->DMA_mode = drvp->PIO_mode - 2;
3735 }
3736 switch (sc->sc_pp->ide_product) {
3737 case PCI_PRODUCT_TRIONES_HPT374:
3738 after = hpt374_dma[drvp->DMA_mode];
3739 break;
3740 case PCI_PRODUCT_TRIONES_HPT372:
3741 after = hpt372_dma[drvp->DMA_mode];
3742 break;
3743 case PCI_PRODUCT_TRIONES_HPT366:
3744 default:
3745 switch(revision) {
3746 case HPT372_REV:
3747 after = hpt372_dma[drvp->DMA_mode];
3748 break;
3749 case HPT370_REV:
3750 case HPT370A_REV:
3751 after = hpt370_dma[drvp->DMA_mode];
3752 break;
3753 case HPT366_REV:
3754 default:
3755 after = hpt366_dma[drvp->DMA_mode];
3756 break;
3757 }
3758 }
3759 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3760 } else {
3761 /* PIO only */
3762 switch (sc->sc_pp->ide_product) {
3763 case PCI_PRODUCT_TRIONES_HPT374:
3764 after = hpt374_pio[drvp->PIO_mode];
3765 break;
3766 case PCI_PRODUCT_TRIONES_HPT372:
3767 after = hpt372_pio[drvp->PIO_mode];
3768 break;
3769 case PCI_PRODUCT_TRIONES_HPT366:
3770 default:
3771 switch(revision) {
3772 case HPT372_REV:
3773 after = hpt372_pio[drvp->PIO_mode];
3774 break;
3775 case HPT370_REV:
3776 case HPT370A_REV:
3777 after = hpt370_pio[drvp->PIO_mode];
3778 break;
3779 case HPT366_REV:
3780 default:
3781 after = hpt366_pio[drvp->PIO_mode];
3782 break;
3783 }
3784 }
3785 }
3786 pci_conf_write(sc->sc_pc, sc->sc_tag,
3787 HPT_IDETIM(chp->channel, drive), after);
3788 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3789 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3790 after, before), DEBUG_PROBE);
3791 }
3792 if (idedma_ctl != 0) {
3793 /* Add software bits in status register */
3794 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3795 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3796 idedma_ctl);
3797 }
3798 pciide_print_modes(cp);
3799 }
3800
3801 int
3802 hpt_pci_intr(arg)
3803 void *arg;
3804 {
3805 struct pciide_softc *sc = arg;
3806 struct pciide_channel *cp;
3807 struct channel_softc *wdc_cp;
3808 int rv = 0;
3809 int dmastat, i, crv;
3810
3811 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3812 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3813 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3814 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3815 IDEDMA_CTL_INTR)
3816 continue;
3817 cp = &sc->pciide_channels[i];
3818 wdc_cp = &cp->wdc_channel;
3819 crv = wdcintr(wdc_cp);
3820 if (crv == 0) {
3821 printf("%s:%d: bogus intr\n",
3822 sc->sc_wdcdev.sc_dev.dv_xname, i);
3823 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3824 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3825 } else
3826 rv = 1;
3827 }
3828 return rv;
3829 }
3830
3831
3832 /* Macros to test product */
3833 #define PDC_IS_262(sc) \
3834 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3835 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3836 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3837 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3838 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3839 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3840 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3841 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
3842 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
3843 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
3844 #define PDC_IS_265(sc) \
3845 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3846 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3847 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3848 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3849 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3850 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3851 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
3852 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
3853 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
3854 #define PDC_IS_268(sc) \
3855 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3856 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3857 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3858 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3859 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
3860 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
3861 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
3862 #define PDC_IS_276(sc) \
3863 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3864 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3865 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_MBULTRA133 || \
3866 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2 || \
3867 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_FASTTRAK133LITE)
3868
3869 void
3870 pdc202xx_chip_map(sc, pa)
3871 struct pciide_softc *sc;
3872 struct pci_attach_args *pa;
3873 {
3874 struct pciide_channel *cp;
3875 int channel;
3876 pcireg_t interface, st, mode;
3877 bus_size_t cmdsize, ctlsize;
3878
3879 if (!PDC_IS_268(sc)) {
3880 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3881 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3882 st), DEBUG_PROBE);
3883 }
3884 if (pciide_chipen(sc, pa) == 0)
3885 return;
3886
3887 /* turn off RAID mode */
3888 if (!PDC_IS_268(sc))
3889 st &= ~PDC2xx_STATE_IDERAID;
3890
3891 /*
3892 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3893 * mode. We have to fake interface
3894 */
3895 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3896 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3897 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3898
3899 printf("%s: bus-master DMA support present",
3900 sc->sc_wdcdev.sc_dev.dv_xname);
3901 pciide_mapreg_dma(sc, pa);
3902 printf("\n");
3903 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3904 WDC_CAPABILITY_MODE;
3905 if (sc->sc_dma_ok) {
3906 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3907 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3908 sc->sc_wdcdev.irqack = pciide_irqack;
3909 }
3910 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
3911 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
3912 sc->sc_wdcdev.cap |= WDC_CAPABILITY_RAID;
3913 sc->sc_wdcdev.PIO_cap = 4;
3914 sc->sc_wdcdev.DMA_cap = 2;
3915 if (PDC_IS_276(sc))
3916 sc->sc_wdcdev.UDMA_cap = 6;
3917 else if (PDC_IS_265(sc))
3918 sc->sc_wdcdev.UDMA_cap = 5;
3919 else if (PDC_IS_262(sc))
3920 sc->sc_wdcdev.UDMA_cap = 4;
3921 else
3922 sc->sc_wdcdev.UDMA_cap = 2;
3923 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3924 pdc20268_setup_channel : pdc202xx_setup_channel;
3925 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3926 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3927
3928 if (!PDC_IS_268(sc)) {
3929 /* setup failsafe defaults */
3930 mode = 0;
3931 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3932 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3933 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3934 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3935 for (channel = 0;
3936 channel < sc->sc_wdcdev.nchannels;
3937 channel++) {
3938 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3939 "drive 0 initial timings 0x%x, now 0x%x\n",
3940 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3941 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3942 DEBUG_PROBE);
3943 pci_conf_write(sc->sc_pc, sc->sc_tag,
3944 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3945 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3946 "drive 1 initial timings 0x%x, now 0x%x\n",
3947 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3948 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3949 pci_conf_write(sc->sc_pc, sc->sc_tag,
3950 PDC2xx_TIM(channel, 1), mode);
3951 }
3952
3953 mode = PDC2xx_SCR_DMA;
3954 if (PDC_IS_262(sc)) {
3955 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3956 } else {
3957 /* the BIOS set it up this way */
3958 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3959 }
3960 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3961 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3962 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3963 "now 0x%x\n",
3964 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3965 PDC2xx_SCR),
3966 mode), DEBUG_PROBE);
3967 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3968 PDC2xx_SCR, mode);
3969
3970 /* controller initial state register is OK even without BIOS */
3971 /* Set DMA mode to IDE DMA compatibility */
3972 mode =
3973 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3974 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3975 DEBUG_PROBE);
3976 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3977 mode | 0x1);
3978 mode =
3979 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3980 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3981 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3982 mode | 0x1);
3983 }
3984
3985 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3986 cp = &sc->pciide_channels[channel];
3987 if (pciide_chansetup(sc, channel, interface) == 0)
3988 continue;
3989 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3990 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3991 printf("%s: %s channel ignored (disabled)\n",
3992 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3993 continue;
3994 }
3995 if (PDC_IS_265(sc))
3996 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3997 pdc20265_pci_intr);
3998 else
3999 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4000 pdc202xx_pci_intr);
4001 if (cp->hw_ok == 0)
4002 continue;
4003 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
4004 st &= ~(PDC_IS_262(sc) ?
4005 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
4006 pciide_map_compat_intr(pa, cp, channel, interface);
4007 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
4008 }
4009 if (!PDC_IS_268(sc)) {
4010 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
4011 "0x%x\n", st), DEBUG_PROBE);
4012 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
4013 }
4014 return;
4015 }
4016
4017 void
4018 pdc202xx_setup_channel(chp)
4019 struct channel_softc *chp;
4020 {
4021 struct ata_drive_datas *drvp;
4022 int drive;
4023 pcireg_t mode, st;
4024 u_int32_t idedma_ctl, scr, atapi;
4025 struct pciide_channel *cp = (struct pciide_channel*)chp;
4026 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4027 int channel = chp->channel;
4028
4029 /* setup DMA if needed */
4030 pciide_channel_dma_setup(cp);
4031
4032 idedma_ctl = 0;
4033 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4034 sc->sc_wdcdev.sc_dev.dv_xname,
4035 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4036 DEBUG_PROBE);
4037
4038 /* Per channel settings */
4039 if (PDC_IS_262(sc)) {
4040 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4041 PDC262_U66);
4042 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4043 /* Trim UDMA mode */
4044 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4045 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4046 chp->ch_drive[0].UDMA_mode <= 2) ||
4047 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4048 chp->ch_drive[1].UDMA_mode <= 2)) {
4049 if (chp->ch_drive[0].UDMA_mode > 2)
4050 chp->ch_drive[0].UDMA_mode = 2;
4051 if (chp->ch_drive[1].UDMA_mode > 2)
4052 chp->ch_drive[1].UDMA_mode = 2;
4053 }
4054 /* Set U66 if needed */
4055 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4056 chp->ch_drive[0].UDMA_mode > 2) ||
4057 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4058 chp->ch_drive[1].UDMA_mode > 2))
4059 scr |= PDC262_U66_EN(channel);
4060 else
4061 scr &= ~PDC262_U66_EN(channel);
4062 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4063 PDC262_U66, scr);
4064 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4065 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4066 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4067 PDC262_ATAPI(channel))), DEBUG_PROBE);
4068 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4069 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4070 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4071 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4072 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4073 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4074 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4075 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4076 atapi = 0;
4077 else
4078 atapi = PDC262_ATAPI_UDMA;
4079 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4080 PDC262_ATAPI(channel), atapi);
4081 }
4082 }
4083 for (drive = 0; drive < 2; drive++) {
4084 drvp = &chp->ch_drive[drive];
4085 /* If no drive, skip */
4086 if ((drvp->drive_flags & DRIVE) == 0)
4087 continue;
4088 mode = 0;
4089 if (drvp->drive_flags & DRIVE_UDMA) {
4090 /* use Ultra/DMA */
4091 drvp->drive_flags &= ~DRIVE_DMA;
4092 mode = PDC2xx_TIM_SET_MB(mode,
4093 pdc2xx_udma_mb[drvp->UDMA_mode]);
4094 mode = PDC2xx_TIM_SET_MC(mode,
4095 pdc2xx_udma_mc[drvp->UDMA_mode]);
4096 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4097 } else if (drvp->drive_flags & DRIVE_DMA) {
4098 mode = PDC2xx_TIM_SET_MB(mode,
4099 pdc2xx_dma_mb[drvp->DMA_mode]);
4100 mode = PDC2xx_TIM_SET_MC(mode,
4101 pdc2xx_dma_mc[drvp->DMA_mode]);
4102 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4103 } else {
4104 mode = PDC2xx_TIM_SET_MB(mode,
4105 pdc2xx_dma_mb[0]);
4106 mode = PDC2xx_TIM_SET_MC(mode,
4107 pdc2xx_dma_mc[0]);
4108 }
4109 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4110 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4111 if (drvp->drive_flags & DRIVE_ATA)
4112 mode |= PDC2xx_TIM_PRE;
4113 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4114 if (drvp->PIO_mode >= 3) {
4115 mode |= PDC2xx_TIM_IORDY;
4116 if (drive == 0)
4117 mode |= PDC2xx_TIM_IORDYp;
4118 }
4119 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4120 "timings 0x%x\n",
4121 sc->sc_wdcdev.sc_dev.dv_xname,
4122 chp->channel, drive, mode), DEBUG_PROBE);
4123 pci_conf_write(sc->sc_pc, sc->sc_tag,
4124 PDC2xx_TIM(chp->channel, drive), mode);
4125 }
4126 if (idedma_ctl != 0) {
4127 /* Add software bits in status register */
4128 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4129 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4130 idedma_ctl);
4131 }
4132 pciide_print_modes(cp);
4133 }
4134
4135 void
4136 pdc20268_setup_channel(chp)
4137 struct channel_softc *chp;
4138 {
4139 struct ata_drive_datas *drvp;
4140 int drive;
4141 u_int32_t idedma_ctl;
4142 struct pciide_channel *cp = (struct pciide_channel*)chp;
4143 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4144 int u100;
4145
4146 /* setup DMA if needed */
4147 pciide_channel_dma_setup(cp);
4148
4149 idedma_ctl = 0;
4150
4151 /* I don't know what this is for, FreeBSD does it ... */
4152 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4153 IDEDMA_CMD + 0x1, 0x0b);
4154
4155 /*
4156 * I don't know what this is for; FreeBSD checks this ... this is not
4157 * cable type detect.
4158 */
4159 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4160 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
4161
4162 for (drive = 0; drive < 2; drive++) {
4163 drvp = &chp->ch_drive[drive];
4164 /* If no drive, skip */
4165 if ((drvp->drive_flags & DRIVE) == 0)
4166 continue;
4167 if (drvp->drive_flags & DRIVE_UDMA) {
4168 /* use Ultra/DMA */
4169 drvp->drive_flags &= ~DRIVE_DMA;
4170 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4171 if (drvp->UDMA_mode > 2 && u100 == 0)
4172 drvp->UDMA_mode = 2;
4173 } else if (drvp->drive_flags & DRIVE_DMA) {
4174 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4175 }
4176 }
4177 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4178 if (idedma_ctl != 0) {
4179 /* Add software bits in status register */
4180 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4181 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4182 idedma_ctl);
4183 }
4184 pciide_print_modes(cp);
4185 }
4186
4187 int
4188 pdc202xx_pci_intr(arg)
4189 void *arg;
4190 {
4191 struct pciide_softc *sc = arg;
4192 struct pciide_channel *cp;
4193 struct channel_softc *wdc_cp;
4194 int i, rv, crv;
4195 u_int32_t scr;
4196
4197 rv = 0;
4198 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4199 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4200 cp = &sc->pciide_channels[i];
4201 wdc_cp = &cp->wdc_channel;
4202 /* If a compat channel skip. */
4203 if (cp->compat)
4204 continue;
4205 if (scr & PDC2xx_SCR_INT(i)) {
4206 crv = wdcintr(wdc_cp);
4207 if (crv == 0)
4208 printf("%s:%d: bogus intr (reg 0x%x)\n",
4209 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4210 else
4211 rv = 1;
4212 }
4213 }
4214 return rv;
4215 }
4216
4217 int
4218 pdc20265_pci_intr(arg)
4219 void *arg;
4220 {
4221 struct pciide_softc *sc = arg;
4222 struct pciide_channel *cp;
4223 struct channel_softc *wdc_cp;
4224 int i, rv, crv;
4225 u_int32_t dmastat;
4226
4227 rv = 0;
4228 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4229 cp = &sc->pciide_channels[i];
4230 wdc_cp = &cp->wdc_channel;
4231 /* If a compat channel skip. */
4232 if (cp->compat)
4233 continue;
4234 /*
4235 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4236 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4237 * So use it instead (requires 2 reg reads instead of 1,
4238 * but we can't do it another way).
4239 */
4240 dmastat = bus_space_read_1(sc->sc_dma_iot,
4241 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4242 if((dmastat & IDEDMA_CTL_INTR) == 0)
4243 continue;
4244 crv = wdcintr(wdc_cp);
4245 if (crv == 0)
4246 printf("%s:%d: bogus intr\n",
4247 sc->sc_wdcdev.sc_dev.dv_xname, i);
4248 else
4249 rv = 1;
4250 }
4251 return rv;
4252 }
4253
4254 void
4255 opti_chip_map(sc, pa)
4256 struct pciide_softc *sc;
4257 struct pci_attach_args *pa;
4258 {
4259 struct pciide_channel *cp;
4260 bus_size_t cmdsize, ctlsize;
4261 pcireg_t interface;
4262 u_int8_t init_ctrl;
4263 int channel;
4264
4265 if (pciide_chipen(sc, pa) == 0)
4266 return;
4267 printf("%s: bus-master DMA support present",
4268 sc->sc_wdcdev.sc_dev.dv_xname);
4269
4270 /*
4271 * XXXSCW:
4272 * There seem to be a couple of buggy revisions/implementations
4273 * of the OPTi pciide chipset. This kludge seems to fix one of
4274 * the reported problems (PR/11644) but still fails for the
4275 * other (PR/13151), although the latter may be due to other
4276 * issues too...
4277 */
4278 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4279 printf(" but disabled due to chip rev. <= 0x12");
4280 sc->sc_dma_ok = 0;
4281 } else
4282 pciide_mapreg_dma(sc, pa);
4283
4284 printf("\n");
4285
4286 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4287 WDC_CAPABILITY_MODE;
4288 sc->sc_wdcdev.PIO_cap = 4;
4289 if (sc->sc_dma_ok) {
4290 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4291 sc->sc_wdcdev.irqack = pciide_irqack;
4292 sc->sc_wdcdev.DMA_cap = 2;
4293 }
4294 sc->sc_wdcdev.set_modes = opti_setup_channel;
4295
4296 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4297 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4298
4299 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4300 OPTI_REG_INIT_CONTROL);
4301
4302 interface = PCI_INTERFACE(pa->pa_class);
4303
4304 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4305 cp = &sc->pciide_channels[channel];
4306 if (pciide_chansetup(sc, channel, interface) == 0)
4307 continue;
4308 if (channel == 1 &&
4309 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4310 printf("%s: %s channel ignored (disabled)\n",
4311 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4312 continue;
4313 }
4314 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4315 pciide_pci_intr);
4316 if (cp->hw_ok == 0)
4317 continue;
4318 pciide_map_compat_intr(pa, cp, channel, interface);
4319 if (cp->hw_ok == 0)
4320 continue;
4321 opti_setup_channel(&cp->wdc_channel);
4322 }
4323 }
4324
4325 void
4326 opti_setup_channel(chp)
4327 struct channel_softc *chp;
4328 {
4329 struct ata_drive_datas *drvp;
4330 struct pciide_channel *cp = (struct pciide_channel*)chp;
4331 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4332 int drive, spd;
4333 int mode[2];
4334 u_int8_t rv, mr;
4335
4336 /*
4337 * The `Delay' and `Address Setup Time' fields of the
4338 * Miscellaneous Register are always zero initially.
4339 */
4340 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4341 mr &= ~(OPTI_MISC_DELAY_MASK |
4342 OPTI_MISC_ADDR_SETUP_MASK |
4343 OPTI_MISC_INDEX_MASK);
4344
4345 /* Prime the control register before setting timing values */
4346 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4347
4348 /* Determine the clockrate of the PCIbus the chip is attached to */
4349 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4350 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4351
4352 /* setup DMA if needed */
4353 pciide_channel_dma_setup(cp);
4354
4355 for (drive = 0; drive < 2; drive++) {
4356 drvp = &chp->ch_drive[drive];
4357 /* If no drive, skip */
4358 if ((drvp->drive_flags & DRIVE) == 0) {
4359 mode[drive] = -1;
4360 continue;
4361 }
4362
4363 if ((drvp->drive_flags & DRIVE_DMA)) {
4364 /*
4365 * Timings will be used for both PIO and DMA,
4366 * so adjust DMA mode if needed
4367 */
4368 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4369 drvp->PIO_mode = drvp->DMA_mode + 2;
4370 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4371 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4372 drvp->PIO_mode - 2 : 0;
4373 if (drvp->DMA_mode == 0)
4374 drvp->PIO_mode = 0;
4375
4376 mode[drive] = drvp->DMA_mode + 5;
4377 } else
4378 mode[drive] = drvp->PIO_mode;
4379
4380 if (drive && mode[0] >= 0 &&
4381 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4382 /*
4383 * Can't have two drives using different values
4384 * for `Address Setup Time'.
4385 * Slow down the faster drive to compensate.
4386 */
4387 int d = (opti_tim_as[spd][mode[0]] >
4388 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4389
4390 mode[d] = mode[1-d];
4391 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4392 chp->ch_drive[d].DMA_mode = 0;
4393 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4394 }
4395 }
4396
4397 for (drive = 0; drive < 2; drive++) {
4398 int m;
4399 if ((m = mode[drive]) < 0)
4400 continue;
4401
4402 /* Set the Address Setup Time and select appropriate index */
4403 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4404 rv |= OPTI_MISC_INDEX(drive);
4405 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4406
4407 /* Set the pulse width and recovery timing parameters */
4408 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4409 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4410 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4411 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4412
4413 /* Set the Enhanced Mode register appropriately */
4414 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4415 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4416 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4417 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4418 }
4419
4420 /* Finally, enable the timings */
4421 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4422
4423 pciide_print_modes(cp);
4424 }
4425
4426 #define ACARD_IS_850(sc) \
4427 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4428
4429 void
4430 acard_chip_map(sc, pa)
4431 struct pciide_softc *sc;
4432 struct pci_attach_args *pa;
4433 {
4434 struct pciide_channel *cp;
4435 int i;
4436 pcireg_t interface;
4437 bus_size_t cmdsize, ctlsize;
4438
4439 if (pciide_chipen(sc, pa) == 0)
4440 return;
4441
4442 /*
4443 * when the chip is in native mode it identifies itself as a
4444 * 'misc mass storage'. Fake interface in this case.
4445 */
4446 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4447 interface = PCI_INTERFACE(pa->pa_class);
4448 } else {
4449 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4450 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4451 }
4452
4453 printf("%s: bus-master DMA support present",
4454 sc->sc_wdcdev.sc_dev.dv_xname);
4455 pciide_mapreg_dma(sc, pa);
4456 printf("\n");
4457 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4458 WDC_CAPABILITY_MODE;
4459
4460 if (sc->sc_dma_ok) {
4461 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4462 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4463 sc->sc_wdcdev.irqack = pciide_irqack;
4464 }
4465 sc->sc_wdcdev.PIO_cap = 4;
4466 sc->sc_wdcdev.DMA_cap = 2;
4467 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4468
4469 sc->sc_wdcdev.set_modes = acard_setup_channel;
4470 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4471 sc->sc_wdcdev.nchannels = 2;
4472
4473 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4474 cp = &sc->pciide_channels[i];
4475 if (pciide_chansetup(sc, i, interface) == 0)
4476 continue;
4477 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4478 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4479 &ctlsize, pciide_pci_intr);
4480 } else {
4481 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4482 &cmdsize, &ctlsize);
4483 }
4484 if (cp->hw_ok == 0)
4485 return;
4486 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4487 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4488 wdcattach(&cp->wdc_channel);
4489 acard_setup_channel(&cp->wdc_channel);
4490 }
4491 if (!ACARD_IS_850(sc)) {
4492 u_int32_t reg;
4493 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4494 reg &= ~ATP860_CTRL_INT;
4495 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4496 }
4497 }
4498
4499 void
4500 acard_setup_channel(chp)
4501 struct channel_softc *chp;
4502 {
4503 struct ata_drive_datas *drvp;
4504 struct pciide_channel *cp = (struct pciide_channel*)chp;
4505 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4506 int channel = chp->channel;
4507 int drive;
4508 u_int32_t idetime, udma_mode;
4509 u_int32_t idedma_ctl;
4510
4511 /* setup DMA if needed */
4512 pciide_channel_dma_setup(cp);
4513
4514 if (ACARD_IS_850(sc)) {
4515 idetime = 0;
4516 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4517 udma_mode &= ~ATP850_UDMA_MASK(channel);
4518 } else {
4519 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4520 idetime &= ~ATP860_SETTIME_MASK(channel);
4521 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4522 udma_mode &= ~ATP860_UDMA_MASK(channel);
4523
4524 /* check 80 pins cable */
4525 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4526 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4527 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4528 & ATP860_CTRL_80P(chp->channel)) {
4529 if (chp->ch_drive[0].UDMA_mode > 2)
4530 chp->ch_drive[0].UDMA_mode = 2;
4531 if (chp->ch_drive[1].UDMA_mode > 2)
4532 chp->ch_drive[1].UDMA_mode = 2;
4533 }
4534 }
4535 }
4536
4537 idedma_ctl = 0;
4538
4539 /* Per drive settings */
4540 for (drive = 0; drive < 2; drive++) {
4541 drvp = &chp->ch_drive[drive];
4542 /* If no drive, skip */
4543 if ((drvp->drive_flags & DRIVE) == 0)
4544 continue;
4545 /* add timing values, setup DMA if needed */
4546 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4547 (drvp->drive_flags & DRIVE_UDMA)) {
4548 /* use Ultra/DMA */
4549 if (ACARD_IS_850(sc)) {
4550 idetime |= ATP850_SETTIME(drive,
4551 acard_act_udma[drvp->UDMA_mode],
4552 acard_rec_udma[drvp->UDMA_mode]);
4553 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4554 acard_udma_conf[drvp->UDMA_mode]);
4555 } else {
4556 idetime |= ATP860_SETTIME(channel, drive,
4557 acard_act_udma[drvp->UDMA_mode],
4558 acard_rec_udma[drvp->UDMA_mode]);
4559 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4560 acard_udma_conf[drvp->UDMA_mode]);
4561 }
4562 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4563 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4564 (drvp->drive_flags & DRIVE_DMA)) {
4565 /* use Multiword DMA */
4566 drvp->drive_flags &= ~DRIVE_UDMA;
4567 if (ACARD_IS_850(sc)) {
4568 idetime |= ATP850_SETTIME(drive,
4569 acard_act_dma[drvp->DMA_mode],
4570 acard_rec_dma[drvp->DMA_mode]);
4571 } else {
4572 idetime |= ATP860_SETTIME(channel, drive,
4573 acard_act_dma[drvp->DMA_mode],
4574 acard_rec_dma[drvp->DMA_mode]);
4575 }
4576 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4577 } else {
4578 /* PIO only */
4579 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4580 if (ACARD_IS_850(sc)) {
4581 idetime |= ATP850_SETTIME(drive,
4582 acard_act_pio[drvp->PIO_mode],
4583 acard_rec_pio[drvp->PIO_mode]);
4584 } else {
4585 idetime |= ATP860_SETTIME(channel, drive,
4586 acard_act_pio[drvp->PIO_mode],
4587 acard_rec_pio[drvp->PIO_mode]);
4588 }
4589 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4590 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4591 | ATP8x0_CTRL_EN(channel));
4592 }
4593 }
4594
4595 if (idedma_ctl != 0) {
4596 /* Add software bits in status register */
4597 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4598 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4599 }
4600 pciide_print_modes(cp);
4601
4602 if (ACARD_IS_850(sc)) {
4603 pci_conf_write(sc->sc_pc, sc->sc_tag,
4604 ATP850_IDETIME(channel), idetime);
4605 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4606 } else {
4607 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4608 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4609 }
4610 }
4611
4612 int
4613 acard_pci_intr(arg)
4614 void *arg;
4615 {
4616 struct pciide_softc *sc = arg;
4617 struct pciide_channel *cp;
4618 struct channel_softc *wdc_cp;
4619 int rv = 0;
4620 int dmastat, i, crv;
4621
4622 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4623 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4624 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4625 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4626 continue;
4627 cp = &sc->pciide_channels[i];
4628 wdc_cp = &cp->wdc_channel;
4629 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4630 (void)wdcintr(wdc_cp);
4631 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4632 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4633 continue;
4634 }
4635 crv = wdcintr(wdc_cp);
4636 if (crv == 0)
4637 printf("%s:%d: bogus intr\n",
4638 sc->sc_wdcdev.sc_dev.dv_xname, i);
4639 else if (crv == 1)
4640 rv = 1;
4641 else if (rv == 0)
4642 rv = crv;
4643 }
4644 return rv;
4645 }
4646
4647 static int
4648 sl82c105_bugchk(struct pci_attach_args *pa)
4649 {
4650
4651 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4652 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4653 return (0);
4654
4655 if (PCI_REVISION(pa->pa_class) <= 0x05)
4656 return (1);
4657
4658 return (0);
4659 }
4660
4661 void
4662 sl82c105_chip_map(sc, pa)
4663 struct pciide_softc *sc;
4664 struct pci_attach_args *pa;
4665 {
4666 struct pciide_channel *cp;
4667 bus_size_t cmdsize, ctlsize;
4668 pcireg_t interface, idecr;
4669 int channel;
4670
4671 if (pciide_chipen(sc, pa) == 0)
4672 return;
4673
4674 printf("%s: bus-master DMA support present",
4675 sc->sc_wdcdev.sc_dev.dv_xname);
4676
4677 /*
4678 * Check to see if we're part of the Winbond 83c553 Southbridge.
4679 * If so, we need to disable DMA on rev. <= 5 of that chip.
4680 */
4681 if (pci_find_device(pa, sl82c105_bugchk)) {
4682 printf(" but disabled due to 83c553 rev. <= 0x05");
4683 sc->sc_dma_ok = 0;
4684 } else
4685 pciide_mapreg_dma(sc, pa);
4686 printf("\n");
4687
4688 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4689 WDC_CAPABILITY_MODE;
4690 sc->sc_wdcdev.PIO_cap = 4;
4691 if (sc->sc_dma_ok) {
4692 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4693 sc->sc_wdcdev.irqack = pciide_irqack;
4694 sc->sc_wdcdev.DMA_cap = 2;
4695 }
4696 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4697
4698 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4699 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4700
4701 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4702
4703 interface = PCI_INTERFACE(pa->pa_class);
4704
4705 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4706 cp = &sc->pciide_channels[channel];
4707 if (pciide_chansetup(sc, channel, interface) == 0)
4708 continue;
4709 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4710 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4711 printf("%s: %s channel ignored (disabled)\n",
4712 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4713 continue;
4714 }
4715 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4716 pciide_pci_intr);
4717 if (cp->hw_ok == 0)
4718 continue;
4719 pciide_map_compat_intr(pa, cp, channel, interface);
4720 if (cp->hw_ok == 0)
4721 continue;
4722 sl82c105_setup_channel(&cp->wdc_channel);
4723 }
4724 }
4725
4726 void
4727 sl82c105_setup_channel(chp)
4728 struct channel_softc *chp;
4729 {
4730 struct ata_drive_datas *drvp;
4731 struct pciide_channel *cp = (struct pciide_channel*)chp;
4732 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4733 int pxdx_reg, drive;
4734 pcireg_t pxdx;
4735
4736 /* Set up DMA if needed. */
4737 pciide_channel_dma_setup(cp);
4738
4739 for (drive = 0; drive < 2; drive++) {
4740 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4741 : SYMPH_P1D0CR) + (drive * 4);
4742
4743 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4744
4745 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4746 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4747
4748 drvp = &chp->ch_drive[drive];
4749 /* If no drive, skip. */
4750 if ((drvp->drive_flags & DRIVE) == 0) {
4751 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4752 continue;
4753 }
4754
4755 if (drvp->drive_flags & DRIVE_DMA) {
4756 /*
4757 * Timings will be used for both PIO and DMA,
4758 * so adjust DMA mode if needed.
4759 */
4760 if (drvp->PIO_mode >= 3) {
4761 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4762 drvp->DMA_mode = drvp->PIO_mode - 2;
4763 if (drvp->DMA_mode < 1) {
4764 /*
4765 * Can't mix both PIO and DMA.
4766 * Disable DMA.
4767 */
4768 drvp->drive_flags &= ~DRIVE_DMA;
4769 }
4770 } else {
4771 /*
4772 * Can't mix both PIO and DMA. Disable
4773 * DMA.
4774 */
4775 drvp->drive_flags &= ~DRIVE_DMA;
4776 }
4777 }
4778
4779 if (drvp->drive_flags & DRIVE_DMA) {
4780 /* Use multi-word DMA. */
4781 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4782 PxDx_CMD_ON_SHIFT;
4783 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4784 } else {
4785 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4786 PxDx_CMD_ON_SHIFT;
4787 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4788 }
4789
4790 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4791
4792 /* ...and set the mode for this drive. */
4793 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4794 }
4795
4796 pciide_print_modes(cp);
4797 }
4798
4799 void
4800 serverworks_chip_map(sc, pa)
4801 struct pciide_softc *sc;
4802 struct pci_attach_args *pa;
4803 {
4804 struct pciide_channel *cp;
4805 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4806 pcitag_t pcib_tag;
4807 int channel;
4808 bus_size_t cmdsize, ctlsize;
4809
4810 if (pciide_chipen(sc, pa) == 0)
4811 return;
4812
4813 printf("%s: bus-master DMA support present",
4814 sc->sc_wdcdev.sc_dev.dv_xname);
4815 pciide_mapreg_dma(sc, pa);
4816 printf("\n");
4817 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4818 WDC_CAPABILITY_MODE;
4819
4820 if (sc->sc_dma_ok) {
4821 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4822 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4823 sc->sc_wdcdev.irqack = pciide_irqack;
4824 }
4825 sc->sc_wdcdev.PIO_cap = 4;
4826 sc->sc_wdcdev.DMA_cap = 2;
4827 switch (sc->sc_pp->ide_product) {
4828 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4829 sc->sc_wdcdev.UDMA_cap = 2;
4830 break;
4831 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4832 if (PCI_REVISION(pa->pa_class) < 0x92)
4833 sc->sc_wdcdev.UDMA_cap = 4;
4834 else
4835 sc->sc_wdcdev.UDMA_cap = 5;
4836 break;
4837 case PCI_PRODUCT_SERVERWORKS_CSB6_IDE:
4838 sc->sc_wdcdev.UDMA_cap = 5;
4839 break;
4840 }
4841
4842 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4843 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4844 sc->sc_wdcdev.nchannels = 2;
4845
4846 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4847 cp = &sc->pciide_channels[channel];
4848 if (pciide_chansetup(sc, channel, interface) == 0)
4849 continue;
4850 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4851 serverworks_pci_intr);
4852 if (cp->hw_ok == 0)
4853 return;
4854 pciide_map_compat_intr(pa, cp, channel, interface);
4855 if (cp->hw_ok == 0)
4856 return;
4857 serverworks_setup_channel(&cp->wdc_channel);
4858 }
4859
4860 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4861 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4862 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4863 }
4864
4865 void
4866 serverworks_setup_channel(chp)
4867 struct channel_softc *chp;
4868 {
4869 struct ata_drive_datas *drvp;
4870 struct pciide_channel *cp = (struct pciide_channel*)chp;
4871 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4872 int channel = chp->channel;
4873 int drive, unit;
4874 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4875 u_int32_t idedma_ctl;
4876 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4877 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4878
4879 /* setup DMA if needed */
4880 pciide_channel_dma_setup(cp);
4881
4882 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4883 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4884 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4885 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4886
4887 pio_time &= ~(0xffff << (16 * channel));
4888 dma_time &= ~(0xffff << (16 * channel));
4889 pio_mode &= ~(0xff << (8 * channel + 16));
4890 udma_mode &= ~(0xff << (8 * channel + 16));
4891 udma_mode &= ~(3 << (2 * channel));
4892
4893 idedma_ctl = 0;
4894
4895 /* Per drive settings */
4896 for (drive = 0; drive < 2; drive++) {
4897 drvp = &chp->ch_drive[drive];
4898 /* If no drive, skip */
4899 if ((drvp->drive_flags & DRIVE) == 0)
4900 continue;
4901 unit = drive + 2 * channel;
4902 /* add timing values, setup DMA if needed */
4903 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4904 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4905 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4906 (drvp->drive_flags & DRIVE_UDMA)) {
4907 /* use Ultra/DMA, check for 80-pin cable */
4908 if (drvp->UDMA_mode > 2 &&
4909 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4910 drvp->UDMA_mode = 2;
4911 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4912 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4913 udma_mode |= 1 << unit;
4914 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4915 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4916 (drvp->drive_flags & DRIVE_DMA)) {
4917 /* use Multiword DMA */
4918 drvp->drive_flags &= ~DRIVE_UDMA;
4919 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4920 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4921 } else {
4922 /* PIO only */
4923 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4924 }
4925 }
4926
4927 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4928 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4929 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4930 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4931 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4932
4933 if (idedma_ctl != 0) {
4934 /* Add software bits in status register */
4935 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4936 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4937 }
4938 pciide_print_modes(cp);
4939 }
4940
4941 int
4942 serverworks_pci_intr(arg)
4943 void *arg;
4944 {
4945 struct pciide_softc *sc = arg;
4946 struct pciide_channel *cp;
4947 struct channel_softc *wdc_cp;
4948 int rv = 0;
4949 int dmastat, i, crv;
4950
4951 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4952 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4953 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4954 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4955 IDEDMA_CTL_INTR)
4956 continue;
4957 cp = &sc->pciide_channels[i];
4958 wdc_cp = &cp->wdc_channel;
4959 crv = wdcintr(wdc_cp);
4960 if (crv == 0) {
4961 printf("%s:%d: bogus intr\n",
4962 sc->sc_wdcdev.sc_dev.dv_xname, i);
4963 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4964 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4965 } else
4966 rv = 1;
4967 }
4968 return rv;
4969 }
4970