pciide.c revision 1.178 1 /* $NetBSD: pciide.c,v 1.178 2003/01/24 05:51:04 thorpej Exp $ */
2
3
4 /*
5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by Manuel Bouyer.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35
36 /*
37 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by Christopher G. Demetriou
50 * for the NetBSD Project.
51 * 4. The name of the author may not be used to endorse or promote products
52 * derived from this software without specific prior written permission
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 */
65
66 /*
67 * PCI IDE controller driver.
68 *
69 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70 * sys/dev/pci/ppb.c, revision 1.16).
71 *
72 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73 * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74 * 5/16/94" from the PCI SIG.
75 *
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.178 2003/01/24 05:51:04 thorpej Exp $");
80
81 #ifndef WDCDEBUG
82 #define WDCDEBUG
83 #endif
84
85 #define DEBUG_DMA 0x01
86 #define DEBUG_XFERS 0x02
87 #define DEBUG_FUNCS 0x08
88 #define DEBUG_PROBE 0x10
89 #ifdef WDCDEBUG
90 int wdcdebug_pciide_mask = 0;
91 #define WDCDEBUG_PRINT(args, level) \
92 if (wdcdebug_pciide_mask & (level)) printf args
93 #else
94 #define WDCDEBUG_PRINT(args, level)
95 #endif
96 #include <sys/param.h>
97 #include <sys/systm.h>
98 #include <sys/device.h>
99 #include <sys/malloc.h>
100
101 #include <uvm/uvm_extern.h>
102
103 #include <machine/endian.h>
104
105 #include <dev/pci/pcireg.h>
106 #include <dev/pci/pcivar.h>
107 #include <dev/pci/pcidevs.h>
108 #include <dev/pci/pciidereg.h>
109 #include <dev/pci/pciidevar.h>
110 #include <dev/pci/pciide_piix_reg.h>
111 #include <dev/pci/pciide_amd_reg.h>
112 #include <dev/pci/pciide_apollo_reg.h>
113 #include <dev/pci/pciide_cmd_reg.h>
114 #include <dev/pci/pciide_cy693_reg.h>
115 #include <dev/pci/pciide_sis_reg.h>
116 #include <dev/pci/pciide_acer_reg.h>
117 #include <dev/pci/pciide_pdc202xx_reg.h>
118 #include <dev/pci/pciide_opti_reg.h>
119 #include <dev/pci/pciide_hpt_reg.h>
120 #include <dev/pci/pciide_acard_reg.h>
121 #include <dev/pci/pciide_sl82c105_reg.h>
122 #include <dev/pci/cy82c693var.h>
123
124 #include "opt_pciide.h"
125
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 int, u_int8_t));
131
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 pci_chipset_tag_t pc;
135 pcitag_t pa;
136 int reg;
137 {
138
139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 ((reg & 0x03) * 8) & 0xff);
141 }
142
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 pci_chipset_tag_t pc;
146 pcitag_t pa;
147 int reg;
148 u_int8_t val;
149 {
150 pcireg_t pcival;
151
152 pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 pcival &= ~(0xff << ((reg & 0x03) * 8));
154 pcival |= (val << ((reg & 0x03) * 8));
155 pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 struct pciide_softc *, int));
178 int cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180 void cmd680_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void cmd680_setup_channel __P((struct channel_softc*));
182 void cmd680_channel_map __P((struct pci_attach_args *,
183 struct pciide_softc *, int));
184
185 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void cy693_setup_channel __P((struct channel_softc*));
187
188 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void sis_setup_channel __P((struct channel_softc*));
190 static int sis_hostbr_match __P(( struct pci_attach_args *));
191
192 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 void acer_setup_channel __P((struct channel_softc*));
194 int acer_pci_intr __P((void *));
195
196 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void pdc202xx_setup_channel __P((struct channel_softc*));
198 void pdc20268_setup_channel __P((struct channel_softc*));
199 int pdc202xx_pci_intr __P((void *));
200 int pdc20265_pci_intr __P((void *));
201
202 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void opti_setup_channel __P((struct channel_softc*));
204
205 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void hpt_setup_channel __P((struct channel_softc*));
207 int hpt_pci_intr __P((void *));
208
209 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void acard_setup_channel __P((struct channel_softc*));
211 int acard_pci_intr __P((void *));
212
213 void serverworks_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
214 void serverworks_setup_channel __P((struct channel_softc*));
215 int serverworks_pci_intr __P((void *));
216
217 void sl82c105_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
218 void sl82c105_setup_channel __P((struct channel_softc*));
219
220 void pciide_channel_dma_setup __P((struct pciide_channel *));
221 int pciide_dma_table_setup __P((struct pciide_softc*, int, int));
222 int pciide_dma_init __P((void*, int, int, void *, size_t, int));
223 void pciide_dma_start __P((void*, int, int));
224 int pciide_dma_finish __P((void*, int, int, int));
225 void pciide_irqack __P((struct channel_softc *));
226 void pciide_print_modes __P((struct pciide_channel *));
227
228 struct pciide_product_desc {
229 u_int32_t ide_product;
230 int ide_flags;
231 const char *ide_name;
232 /* map and setup chip, probe drives */
233 void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
234 };
235
236 /* Flags for ide_flags */
237 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */
238 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */
239
240 /* Default product description for devices not known from this controller */
241 const struct pciide_product_desc default_product_desc = {
242 0,
243 0,
244 "Generic PCI IDE controller",
245 default_chip_map,
246 };
247
248 const struct pciide_product_desc pciide_intel_products[] = {
249 { PCI_PRODUCT_INTEL_82092AA,
250 0,
251 "Intel 82092AA IDE controller",
252 default_chip_map,
253 },
254 { PCI_PRODUCT_INTEL_82371FB_IDE,
255 0,
256 "Intel 82371FB IDE controller (PIIX)",
257 piix_chip_map,
258 },
259 { PCI_PRODUCT_INTEL_82371SB_IDE,
260 0,
261 "Intel 82371SB IDE Interface (PIIX3)",
262 piix_chip_map,
263 },
264 { PCI_PRODUCT_INTEL_82371AB_IDE,
265 0,
266 "Intel 82371AB IDE controller (PIIX4)",
267 piix_chip_map,
268 },
269 { PCI_PRODUCT_INTEL_82440MX_IDE,
270 0,
271 "Intel 82440MX IDE controller",
272 piix_chip_map
273 },
274 { PCI_PRODUCT_INTEL_82801AA_IDE,
275 0,
276 "Intel 82801AA IDE Controller (ICH)",
277 piix_chip_map,
278 },
279 { PCI_PRODUCT_INTEL_82801AB_IDE,
280 0,
281 "Intel 82801AB IDE Controller (ICH0)",
282 piix_chip_map,
283 },
284 { PCI_PRODUCT_INTEL_82801BA_IDE,
285 0,
286 "Intel 82801BA IDE Controller (ICH2)",
287 piix_chip_map,
288 },
289 { PCI_PRODUCT_INTEL_82801BAM_IDE,
290 0,
291 "Intel 82801BAM IDE Controller (ICH2)",
292 piix_chip_map,
293 },
294 { PCI_PRODUCT_INTEL_82801CA_IDE_1,
295 0,
296 "Intel 82801CA IDE Controller",
297 piix_chip_map,
298 },
299 { PCI_PRODUCT_INTEL_82801CA_IDE_2,
300 0,
301 "Intel 82801CA IDE Controller",
302 piix_chip_map,
303 },
304 { PCI_PRODUCT_INTEL_82801DB_IDE,
305 0,
306 "Intel 82801DB IDE Controller (ICH4)",
307 piix_chip_map,
308 },
309 { 0,
310 0,
311 NULL,
312 NULL
313 }
314 };
315
316 const struct pciide_product_desc pciide_amd_products[] = {
317 { PCI_PRODUCT_AMD_PBC756_IDE,
318 0,
319 "Advanced Micro Devices AMD756 IDE Controller",
320 amd7x6_chip_map
321 },
322 { PCI_PRODUCT_AMD_PBC766_IDE,
323 0,
324 "Advanced Micro Devices AMD766 IDE Controller",
325 amd7x6_chip_map
326 },
327 { PCI_PRODUCT_AMD_PBC768_IDE,
328 0,
329 "Advanced Micro Devices AMD768 IDE Controller",
330 amd7x6_chip_map
331 },
332 { PCI_PRODUCT_AMD_PBC8111_IDE,
333 0,
334 "Advanced Micro Devices AMD8111 IDE Controller",
335 amd7x6_chip_map
336 },
337 { 0,
338 0,
339 NULL,
340 NULL
341 }
342 };
343
344 const struct pciide_product_desc pciide_nvidia_products[] = {
345 { PCI_PRODUCT_NVIDIA_NFORCE_ATA100,
346 0,
347 "NVIDIA nForce IDE Controller",
348 amd7x6_chip_map
349 },
350 { PCI_PRODUCT_NVIDIA_NFORCE2_ATA133,
351 0,
352 "NVIDIA nForce2 IDE Controller",
353 amd7x6_chip_map
354 },
355 { 0,
356 0,
357 NULL,
358 NULL
359 }
360 };
361
362 const struct pciide_product_desc pciide_cmd_products[] = {
363 { PCI_PRODUCT_CMDTECH_640,
364 0,
365 "CMD Technology PCI0640",
366 cmd_chip_map
367 },
368 { PCI_PRODUCT_CMDTECH_643,
369 0,
370 "CMD Technology PCI0643",
371 cmd0643_9_chip_map,
372 },
373 { PCI_PRODUCT_CMDTECH_646,
374 0,
375 "CMD Technology PCI0646",
376 cmd0643_9_chip_map,
377 },
378 { PCI_PRODUCT_CMDTECH_648,
379 IDE_PCI_CLASS_OVERRIDE,
380 "CMD Technology PCI0648",
381 cmd0643_9_chip_map,
382 },
383 { PCI_PRODUCT_CMDTECH_649,
384 IDE_PCI_CLASS_OVERRIDE,
385 "CMD Technology PCI0649",
386 cmd0643_9_chip_map,
387 },
388 { PCI_PRODUCT_CMDTECH_680,
389 IDE_PCI_CLASS_OVERRIDE,
390 "Silicon Image 0680",
391 cmd680_chip_map,
392 },
393 { 0,
394 0,
395 NULL,
396 NULL
397 }
398 };
399
400 const struct pciide_product_desc pciide_via_products[] = {
401 { PCI_PRODUCT_VIATECH_VT82C586_IDE,
402 0,
403 NULL,
404 apollo_chip_map,
405 },
406 { PCI_PRODUCT_VIATECH_VT82C586A_IDE,
407 0,
408 NULL,
409 apollo_chip_map,
410 },
411 { 0,
412 0,
413 NULL,
414 NULL
415 }
416 };
417
418 const struct pciide_product_desc pciide_cypress_products[] = {
419 { PCI_PRODUCT_CONTAQ_82C693,
420 IDE_16BIT_IOSPACE,
421 "Cypress 82C693 IDE Controller",
422 cy693_chip_map,
423 },
424 { 0,
425 0,
426 NULL,
427 NULL
428 }
429 };
430
431 const struct pciide_product_desc pciide_sis_products[] = {
432 { PCI_PRODUCT_SIS_5597_IDE,
433 0,
434 "Silicon Integrated System 5597/5598 IDE controller",
435 sis_chip_map,
436 },
437 { 0,
438 0,
439 NULL,
440 NULL
441 }
442 };
443
444 const struct pciide_product_desc pciide_acer_products[] = {
445 { PCI_PRODUCT_ALI_M5229,
446 0,
447 "Acer Labs M5229 UDMA IDE Controller",
448 acer_chip_map,
449 },
450 { 0,
451 0,
452 NULL,
453 NULL
454 }
455 };
456
457 const struct pciide_product_desc pciide_promise_products[] = {
458 { PCI_PRODUCT_PROMISE_ULTRA33,
459 IDE_PCI_CLASS_OVERRIDE,
460 "Promise Ultra33/ATA Bus Master IDE Accelerator",
461 pdc202xx_chip_map,
462 },
463 { PCI_PRODUCT_PROMISE_ULTRA66,
464 IDE_PCI_CLASS_OVERRIDE,
465 "Promise Ultra66/ATA Bus Master IDE Accelerator",
466 pdc202xx_chip_map,
467 },
468 { PCI_PRODUCT_PROMISE_ULTRA100,
469 IDE_PCI_CLASS_OVERRIDE,
470 "Promise Ultra100/ATA Bus Master IDE Accelerator",
471 pdc202xx_chip_map,
472 },
473 { PCI_PRODUCT_PROMISE_ULTRA100X,
474 IDE_PCI_CLASS_OVERRIDE,
475 "Promise Ultra100/ATA Bus Master IDE Accelerator",
476 pdc202xx_chip_map,
477 },
478 { PCI_PRODUCT_PROMISE_ULTRA100TX2,
479 IDE_PCI_CLASS_OVERRIDE,
480 "Promise Ultra100TX2/ATA Bus Master IDE Accelerator",
481 pdc202xx_chip_map,
482 },
483 { PCI_PRODUCT_PROMISE_ULTRA100TX2v2,
484 IDE_PCI_CLASS_OVERRIDE,
485 "Promise Ultra100TX2v2/ATA Bus Master IDE Accelerator",
486 pdc202xx_chip_map,
487 },
488 { PCI_PRODUCT_PROMISE_ULTRA133,
489 IDE_PCI_CLASS_OVERRIDE,
490 "Promise Ultra133/ATA Bus Master IDE Accelerator",
491 pdc202xx_chip_map,
492 },
493 { PCI_PRODUCT_PROMISE_ULTRA133TX2,
494 IDE_PCI_CLASS_OVERRIDE,
495 "Promise Ultra133TX2/ATA Bus Master IDE Accelerator",
496 pdc202xx_chip_map,
497 },
498 { PCI_PRODUCT_PROMISE_ULTRA133TX2v2,
499 IDE_PCI_CLASS_OVERRIDE,
500 "Promise Ultra133TX2v2/ATA Bus Master IDE Accelerator",
501 pdc202xx_chip_map,
502 },
503 { PCI_PRODUCT_PROMISE_SATA150TX2PLUS,
504 IDE_PCI_CLASS_OVERRIDE,
505 "Promise Serial ATA/150 TX2plus Bus Master IDE Accelerator",
506 pdc202xx_chip_map,
507 },
508 { 0,
509 0,
510 NULL,
511 NULL
512 }
513 };
514
515 const struct pciide_product_desc pciide_opti_products[] = {
516 { PCI_PRODUCT_OPTI_82C621,
517 0,
518 "OPTi 82c621 PCI IDE controller",
519 opti_chip_map,
520 },
521 { PCI_PRODUCT_OPTI_82C568,
522 0,
523 "OPTi 82c568 (82c621 compatible) PCI IDE controller",
524 opti_chip_map,
525 },
526 { PCI_PRODUCT_OPTI_82D568,
527 0,
528 "OPTi 82d568 (82c621 compatible) PCI IDE controller",
529 opti_chip_map,
530 },
531 { 0,
532 0,
533 NULL,
534 NULL
535 }
536 };
537
538 const struct pciide_product_desc pciide_triones_products[] = {
539 { PCI_PRODUCT_TRIONES_HPT366,
540 IDE_PCI_CLASS_OVERRIDE,
541 NULL,
542 hpt_chip_map,
543 },
544 { PCI_PRODUCT_TRIONES_HPT372,
545 IDE_PCI_CLASS_OVERRIDE,
546 NULL,
547 hpt_chip_map
548 },
549 { PCI_PRODUCT_TRIONES_HPT374,
550 IDE_PCI_CLASS_OVERRIDE,
551 NULL,
552 hpt_chip_map
553 },
554 { 0,
555 0,
556 NULL,
557 NULL
558 }
559 };
560
561 const struct pciide_product_desc pciide_acard_products[] = {
562 { PCI_PRODUCT_ACARD_ATP850U,
563 IDE_PCI_CLASS_OVERRIDE,
564 "Acard ATP850U Ultra33 IDE Controller",
565 acard_chip_map,
566 },
567 { PCI_PRODUCT_ACARD_ATP860,
568 IDE_PCI_CLASS_OVERRIDE,
569 "Acard ATP860 Ultra66 IDE Controller",
570 acard_chip_map,
571 },
572 { PCI_PRODUCT_ACARD_ATP860A,
573 IDE_PCI_CLASS_OVERRIDE,
574 "Acard ATP860-A Ultra66 IDE Controller",
575 acard_chip_map,
576 },
577 { 0,
578 0,
579 NULL,
580 NULL
581 }
582 };
583
584 const struct pciide_product_desc pciide_serverworks_products[] = {
585 { PCI_PRODUCT_SERVERWORKS_OSB4_IDE,
586 0,
587 "ServerWorks OSB4 IDE Controller",
588 serverworks_chip_map,
589 },
590 { PCI_PRODUCT_SERVERWORKS_CSB5_IDE,
591 0,
592 "ServerWorks CSB5 IDE Controller",
593 serverworks_chip_map,
594 },
595 { 0,
596 0,
597 NULL,
598 }
599 };
600
601 const struct pciide_product_desc pciide_symphony_products[] = {
602 { PCI_PRODUCT_SYMPHONY_82C105,
603 0,
604 "Symphony Labs 82C105 IDE controller",
605 sl82c105_chip_map,
606 },
607 { 0,
608 0,
609 NULL,
610 }
611 };
612
613 const struct pciide_product_desc pciide_winbond_products[] = {
614 { PCI_PRODUCT_WINBOND_W83C553F_1,
615 0,
616 "Winbond W83C553F IDE controller",
617 sl82c105_chip_map,
618 },
619 { 0,
620 0,
621 NULL,
622 }
623 };
624
625 struct pciide_vendor_desc {
626 u_int32_t ide_vendor;
627 const struct pciide_product_desc *ide_products;
628 };
629
630 const struct pciide_vendor_desc pciide_vendors[] = {
631 { PCI_VENDOR_INTEL, pciide_intel_products },
632 { PCI_VENDOR_CMDTECH, pciide_cmd_products },
633 { PCI_VENDOR_VIATECH, pciide_via_products },
634 { PCI_VENDOR_CONTAQ, pciide_cypress_products },
635 { PCI_VENDOR_SIS, pciide_sis_products },
636 { PCI_VENDOR_ALI, pciide_acer_products },
637 { PCI_VENDOR_PROMISE, pciide_promise_products },
638 { PCI_VENDOR_AMD, pciide_amd_products },
639 { PCI_VENDOR_OPTI, pciide_opti_products },
640 { PCI_VENDOR_TRIONES, pciide_triones_products },
641 { PCI_VENDOR_ACARD, pciide_acard_products },
642 { PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
643 { PCI_VENDOR_SYMPHONY, pciide_symphony_products },
644 { PCI_VENDOR_WINBOND, pciide_winbond_products },
645 { PCI_VENDOR_NVIDIA, pciide_nvidia_products },
646 { 0, NULL }
647 };
648
649 /* options passed via the 'flags' config keyword */
650 #define PCIIDE_OPTIONS_DMA 0x01
651 #define PCIIDE_OPTIONS_NODMA 0x02
652
653 int pciide_match __P((struct device *, struct cfdata *, void *));
654 void pciide_attach __P((struct device *, struct device *, void *));
655
656 CFATTACH_DECL(pciide, sizeof(struct pciide_softc),
657 pciide_match, pciide_attach, NULL, NULL);
658
659 int pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
660 int pciide_mapregs_compat __P(( struct pci_attach_args *,
661 struct pciide_channel *, int, bus_size_t *, bus_size_t*));
662 int pciide_mapregs_native __P((struct pci_attach_args *,
663 struct pciide_channel *, bus_size_t *, bus_size_t *,
664 int (*pci_intr) __P((void *))));
665 void pciide_mapreg_dma __P((struct pciide_softc *,
666 struct pci_attach_args *));
667 int pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
668 void pciide_mapchan __P((struct pci_attach_args *,
669 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
670 int (*pci_intr) __P((void *))));
671 int pciide_chan_candisable __P((struct pciide_channel *));
672 void pciide_map_compat_intr __P(( struct pci_attach_args *,
673 struct pciide_channel *, int, int));
674 int pciide_compat_intr __P((void *));
675 int pciide_pci_intr __P((void *));
676 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
677
678 const struct pciide_product_desc *
679 pciide_lookup_product(id)
680 u_int32_t id;
681 {
682 const struct pciide_product_desc *pp;
683 const struct pciide_vendor_desc *vp;
684
685 for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
686 if (PCI_VENDOR(id) == vp->ide_vendor)
687 break;
688
689 if ((pp = vp->ide_products) == NULL)
690 return NULL;
691
692 for (; pp->chip_map != NULL; pp++)
693 if (PCI_PRODUCT(id) == pp->ide_product)
694 break;
695
696 if (pp->chip_map == NULL)
697 return NULL;
698 return pp;
699 }
700
701 int
702 pciide_match(parent, match, aux)
703 struct device *parent;
704 struct cfdata *match;
705 void *aux;
706 {
707 struct pci_attach_args *pa = aux;
708 const struct pciide_product_desc *pp;
709
710 /*
711 * Check the ID register to see that it's a PCI IDE controller.
712 * If it is, we assume that we can deal with it; it _should_
713 * work in a standardized way...
714 */
715 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
716 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
717 return (1);
718 }
719
720 /*
721 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
722 * controllers. Let see if we can deal with it anyway.
723 */
724 pp = pciide_lookup_product(pa->pa_id);
725 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
726 return (1);
727 }
728
729 return (0);
730 }
731
732 void
733 pciide_attach(parent, self, aux)
734 struct device *parent, *self;
735 void *aux;
736 {
737 struct pci_attach_args *pa = aux;
738 pci_chipset_tag_t pc = pa->pa_pc;
739 pcitag_t tag = pa->pa_tag;
740 struct pciide_softc *sc = (struct pciide_softc *)self;
741 pcireg_t csr;
742 char devinfo[256];
743 const char *displaydev;
744
745 sc->sc_pci_vendor = PCI_VENDOR(pa->pa_id);
746 sc->sc_pp = pciide_lookup_product(pa->pa_id);
747 if (sc->sc_pp == NULL) {
748 sc->sc_pp = &default_product_desc;
749 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
750 displaydev = devinfo;
751 } else
752 displaydev = sc->sc_pp->ide_name;
753
754 /* if displaydev == NULL, printf is done in chip-specific map */
755 if (displaydev)
756 printf(": %s (rev. 0x%02x)\n", displaydev,
757 PCI_REVISION(pa->pa_class));
758
759 sc->sc_pc = pa->pa_pc;
760 sc->sc_tag = pa->pa_tag;
761 #ifdef WDCDEBUG
762 if (wdcdebug_pciide_mask & DEBUG_PROBE)
763 pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
764 #endif
765 sc->sc_pp->chip_map(sc, pa);
766
767 if (sc->sc_dma_ok) {
768 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
769 csr |= PCI_COMMAND_MASTER_ENABLE;
770 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
771 }
772 WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
773 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
774 }
775
776 /* tell whether the chip is enabled or not */
777 int
778 pciide_chipen(sc, pa)
779 struct pciide_softc *sc;
780 struct pci_attach_args *pa;
781 {
782 pcireg_t csr;
783 if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
784 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
785 PCI_COMMAND_STATUS_REG);
786 printf("%s: device disabled (at %s)\n",
787 sc->sc_wdcdev.sc_dev.dv_xname,
788 (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
789 "device" : "bridge");
790 return 0;
791 }
792 return 1;
793 }
794
795 int
796 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
797 struct pci_attach_args *pa;
798 struct pciide_channel *cp;
799 int compatchan;
800 bus_size_t *cmdsizep, *ctlsizep;
801 {
802 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
803 struct channel_softc *wdc_cp = &cp->wdc_channel;
804
805 cp->compat = 1;
806 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
807 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
808
809 wdc_cp->cmd_iot = pa->pa_iot;
810 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
811 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
812 printf("%s: couldn't map %s channel cmd regs\n",
813 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
814 return (0);
815 }
816
817 wdc_cp->ctl_iot = pa->pa_iot;
818 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
819 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
820 printf("%s: couldn't map %s channel ctl regs\n",
821 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
822 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
823 PCIIDE_COMPAT_CMD_SIZE);
824 return (0);
825 }
826
827 return (1);
828 }
829
830 int
831 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
832 struct pci_attach_args * pa;
833 struct pciide_channel *cp;
834 bus_size_t *cmdsizep, *ctlsizep;
835 int (*pci_intr) __P((void *));
836 {
837 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
838 struct channel_softc *wdc_cp = &cp->wdc_channel;
839 const char *intrstr;
840 pci_intr_handle_t intrhandle;
841
842 cp->compat = 0;
843
844 if (sc->sc_pci_ih == NULL) {
845 if (pci_intr_map(pa, &intrhandle) != 0) {
846 printf("%s: couldn't map native-PCI interrupt\n",
847 sc->sc_wdcdev.sc_dev.dv_xname);
848 return 0;
849 }
850 intrstr = pci_intr_string(pa->pa_pc, intrhandle);
851 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
852 intrhandle, IPL_BIO, pci_intr, sc);
853 if (sc->sc_pci_ih != NULL) {
854 printf("%s: using %s for native-PCI interrupt\n",
855 sc->sc_wdcdev.sc_dev.dv_xname,
856 intrstr ? intrstr : "unknown interrupt");
857 } else {
858 printf("%s: couldn't establish native-PCI interrupt",
859 sc->sc_wdcdev.sc_dev.dv_xname);
860 if (intrstr != NULL)
861 printf(" at %s", intrstr);
862 printf("\n");
863 return 0;
864 }
865 }
866 cp->ih = sc->sc_pci_ih;
867 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
868 PCI_MAPREG_TYPE_IO, 0,
869 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
870 printf("%s: couldn't map %s channel cmd regs\n",
871 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
872 return 0;
873 }
874
875 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
876 PCI_MAPREG_TYPE_IO, 0,
877 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
878 printf("%s: couldn't map %s channel ctl regs\n",
879 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
880 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
881 return 0;
882 }
883 /*
884 * In native mode, 4 bytes of I/O space are mapped for the control
885 * register, the control register is at offset 2. Pass the generic
886 * code a handle for only one byte at the right offset.
887 */
888 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
889 &wdc_cp->ctl_ioh) != 0) {
890 printf("%s: unable to subregion %s channel ctl regs\n",
891 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
892 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
893 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
894 return 0;
895 }
896 return (1);
897 }
898
899 void
900 pciide_mapreg_dma(sc, pa)
901 struct pciide_softc *sc;
902 struct pci_attach_args *pa;
903 {
904 pcireg_t maptype;
905 bus_addr_t addr;
906
907 /*
908 * Map DMA registers
909 *
910 * Note that sc_dma_ok is the right variable to test to see if
911 * DMA can be done. If the interface doesn't support DMA,
912 * sc_dma_ok will never be non-zero. If the DMA regs couldn't
913 * be mapped, it'll be zero. I.e., sc_dma_ok will only be
914 * non-zero if the interface supports DMA and the registers
915 * could be mapped.
916 *
917 * XXX Note that despite the fact that the Bus Master IDE specs
918 * XXX say that "The bus master IDE function uses 16 bytes of IO
919 * XXX space," some controllers (at least the United
920 * XXX Microelectronics UM8886BF) place it in memory space.
921 */
922 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
923 PCIIDE_REG_BUS_MASTER_DMA);
924
925 switch (maptype) {
926 case PCI_MAPREG_TYPE_IO:
927 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
928 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
929 &addr, NULL, NULL) == 0);
930 if (sc->sc_dma_ok == 0) {
931 printf(", but unused (couldn't query registers)");
932 break;
933 }
934 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
935 && addr >= 0x10000) {
936 sc->sc_dma_ok = 0;
937 printf(", but unused (registers at unsafe address "
938 "%#lx)", (unsigned long)addr);
939 break;
940 }
941 /* FALLTHROUGH */
942
943 case PCI_MAPREG_MEM_TYPE_32BIT:
944 sc->sc_dma_ok = (pci_mapreg_map(pa,
945 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
946 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
947 sc->sc_dmat = pa->pa_dmat;
948 if (sc->sc_dma_ok == 0) {
949 printf(", but unused (couldn't map registers)");
950 } else {
951 sc->sc_wdcdev.dma_arg = sc;
952 sc->sc_wdcdev.dma_init = pciide_dma_init;
953 sc->sc_wdcdev.dma_start = pciide_dma_start;
954 sc->sc_wdcdev.dma_finish = pciide_dma_finish;
955 }
956
957 if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
958 PCIIDE_OPTIONS_NODMA) {
959 printf(", but unused (forced off by config file)");
960 sc->sc_dma_ok = 0;
961 }
962 break;
963
964 default:
965 sc->sc_dma_ok = 0;
966 printf(", but unsupported register maptype (0x%x)", maptype);
967 }
968 }
969
970 int
971 pciide_compat_intr(arg)
972 void *arg;
973 {
974 struct pciide_channel *cp = arg;
975
976 #ifdef DIAGNOSTIC
977 /* should only be called for a compat channel */
978 if (cp->compat == 0)
979 panic("pciide compat intr called for non-compat chan %p", cp);
980 #endif
981 return (wdcintr(&cp->wdc_channel));
982 }
983
984 int
985 pciide_pci_intr(arg)
986 void *arg;
987 {
988 struct pciide_softc *sc = arg;
989 struct pciide_channel *cp;
990 struct channel_softc *wdc_cp;
991 int i, rv, crv;
992
993 rv = 0;
994 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
995 cp = &sc->pciide_channels[i];
996 wdc_cp = &cp->wdc_channel;
997
998 /* If a compat channel skip. */
999 if (cp->compat)
1000 continue;
1001 /* if this channel not waiting for intr, skip */
1002 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
1003 continue;
1004
1005 crv = wdcintr(wdc_cp);
1006 if (crv == 0)
1007 ; /* leave rv alone */
1008 else if (crv == 1)
1009 rv = 1; /* claim the intr */
1010 else if (rv == 0) /* crv should be -1 in this case */
1011 rv = crv; /* if we've done no better, take it */
1012 }
1013 return (rv);
1014 }
1015
1016 void
1017 pciide_channel_dma_setup(cp)
1018 struct pciide_channel *cp;
1019 {
1020 int drive;
1021 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1022 struct ata_drive_datas *drvp;
1023
1024 for (drive = 0; drive < 2; drive++) {
1025 drvp = &cp->wdc_channel.ch_drive[drive];
1026 /* If no drive, skip */
1027 if ((drvp->drive_flags & DRIVE) == 0)
1028 continue;
1029 /* setup DMA if needed */
1030 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1031 (drvp->drive_flags & DRIVE_UDMA) == 0) ||
1032 sc->sc_dma_ok == 0) {
1033 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1034 continue;
1035 }
1036 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
1037 != 0) {
1038 /* Abort DMA setup */
1039 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
1040 continue;
1041 }
1042 }
1043 }
1044
1045 int
1046 pciide_dma_table_setup(sc, channel, drive)
1047 struct pciide_softc *sc;
1048 int channel, drive;
1049 {
1050 bus_dma_segment_t seg;
1051 int error, rseg;
1052 const bus_size_t dma_table_size =
1053 sizeof(struct idedma_table) * NIDEDMA_TABLES;
1054 struct pciide_dma_maps *dma_maps =
1055 &sc->pciide_channels[channel].dma_maps[drive];
1056
1057 /* If table was already allocated, just return */
1058 if (dma_maps->dma_table)
1059 return 0;
1060
1061 /* Allocate memory for the DMA tables and map it */
1062 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
1063 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
1064 BUS_DMA_NOWAIT)) != 0) {
1065 printf("%s:%d: unable to allocate table DMA for "
1066 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1067 channel, drive, error);
1068 return error;
1069 }
1070 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
1071 dma_table_size,
1072 (caddr_t *)&dma_maps->dma_table,
1073 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
1074 printf("%s:%d: unable to map table DMA for"
1075 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1076 channel, drive, error);
1077 return error;
1078 }
1079 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
1080 "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
1081 (unsigned long)seg.ds_addr), DEBUG_PROBE);
1082
1083 /* Create and load table DMA map for this disk */
1084 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
1085 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
1086 &dma_maps->dmamap_table)) != 0) {
1087 printf("%s:%d: unable to create table DMA map for "
1088 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1089 channel, drive, error);
1090 return error;
1091 }
1092 if ((error = bus_dmamap_load(sc->sc_dmat,
1093 dma_maps->dmamap_table,
1094 dma_maps->dma_table,
1095 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
1096 printf("%s:%d: unable to load table DMA map for "
1097 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1098 channel, drive, error);
1099 return error;
1100 }
1101 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
1102 (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
1103 DEBUG_PROBE);
1104 /* Create a xfer DMA map for this drive */
1105 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1106 NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1107 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1108 &dma_maps->dmamap_xfer)) != 0) {
1109 printf("%s:%d: unable to create xfer DMA map for "
1110 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1111 channel, drive, error);
1112 return error;
1113 }
1114 return 0;
1115 }
1116
1117 int
1118 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1119 void *v;
1120 int channel, drive;
1121 void *databuf;
1122 size_t datalen;
1123 int flags;
1124 {
1125 struct pciide_softc *sc = v;
1126 int error, seg;
1127 struct pciide_dma_maps *dma_maps =
1128 &sc->pciide_channels[channel].dma_maps[drive];
1129
1130 error = bus_dmamap_load(sc->sc_dmat,
1131 dma_maps->dmamap_xfer,
1132 databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1133 ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1134 if (error) {
1135 printf("%s:%d: unable to load xfer DMA map for"
1136 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1137 channel, drive, error);
1138 return error;
1139 }
1140
1141 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1142 dma_maps->dmamap_xfer->dm_mapsize,
1143 (flags & WDC_DMA_READ) ?
1144 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1145
1146 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1147 #ifdef DIAGNOSTIC
1148 /* A segment must not cross a 64k boundary */
1149 {
1150 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1151 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1152 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1153 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1154 printf("pciide_dma: segment %d physical addr 0x%lx"
1155 " len 0x%lx not properly aligned\n",
1156 seg, phys, len);
1157 panic("pciide_dma: buf align");
1158 }
1159 }
1160 #endif
1161 dma_maps->dma_table[seg].base_addr =
1162 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1163 dma_maps->dma_table[seg].byte_count =
1164 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1165 IDEDMA_BYTE_COUNT_MASK);
1166 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1167 seg, le32toh(dma_maps->dma_table[seg].byte_count),
1168 le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1169
1170 }
1171 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1172 htole32(IDEDMA_BYTE_COUNT_EOT);
1173
1174 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1175 dma_maps->dmamap_table->dm_mapsize,
1176 BUS_DMASYNC_PREWRITE);
1177
1178 /* Maps are ready. Start DMA function */
1179 #ifdef DIAGNOSTIC
1180 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1181 printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1182 (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1183 panic("pciide_dma_init: table align");
1184 }
1185 #endif
1186
1187 /* Clear status bits */
1188 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1189 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1190 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1191 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1192 /* Write table addr */
1193 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1194 IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1195 dma_maps->dmamap_table->dm_segs[0].ds_addr);
1196 /* set read/write */
1197 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1198 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1199 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1200 /* remember flags */
1201 dma_maps->dma_flags = flags;
1202 return 0;
1203 }
1204
1205 void
1206 pciide_dma_start(v, channel, drive)
1207 void *v;
1208 int channel, drive;
1209 {
1210 struct pciide_softc *sc = v;
1211
1212 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1213 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1214 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1215 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1216 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1217 }
1218
1219 int
1220 pciide_dma_finish(v, channel, drive, force)
1221 void *v;
1222 int channel, drive;
1223 int force;
1224 {
1225 struct pciide_softc *sc = v;
1226 u_int8_t status;
1227 int error = 0;
1228 struct pciide_dma_maps *dma_maps =
1229 &sc->pciide_channels[channel].dma_maps[drive];
1230
1231 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1232 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1233 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1234 DEBUG_XFERS);
1235
1236 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1237 return WDC_DMAST_NOIRQ;
1238
1239 /* stop DMA channel */
1240 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1241 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1242 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1243 IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1244
1245 /* Unload the map of the data buffer */
1246 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1247 dma_maps->dmamap_xfer->dm_mapsize,
1248 (dma_maps->dma_flags & WDC_DMA_READ) ?
1249 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1250 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1251
1252 if ((status & IDEDMA_CTL_ERR) != 0) {
1253 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1254 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1255 error |= WDC_DMAST_ERR;
1256 }
1257
1258 if ((status & IDEDMA_CTL_INTR) == 0) {
1259 printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1260 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1261 drive, status);
1262 error |= WDC_DMAST_NOIRQ;
1263 }
1264
1265 if ((status & IDEDMA_CTL_ACT) != 0) {
1266 /* data underrun, may be a valid condition for ATAPI */
1267 error |= WDC_DMAST_UNDER;
1268 }
1269 return error;
1270 }
1271
1272 void
1273 pciide_irqack(chp)
1274 struct channel_softc *chp;
1275 {
1276 struct pciide_channel *cp = (struct pciide_channel*)chp;
1277 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1278
1279 /* clear status bits in IDE DMA registers */
1280 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1281 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1282 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1283 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1284 }
1285
1286 /* some common code used by several chip_map */
1287 int
1288 pciide_chansetup(sc, channel, interface)
1289 struct pciide_softc *sc;
1290 int channel;
1291 pcireg_t interface;
1292 {
1293 struct pciide_channel *cp = &sc->pciide_channels[channel];
1294 sc->wdc_chanarray[channel] = &cp->wdc_channel;
1295 cp->name = PCIIDE_CHANNEL_NAME(channel);
1296 cp->wdc_channel.channel = channel;
1297 cp->wdc_channel.wdc = &sc->sc_wdcdev;
1298 cp->wdc_channel.ch_queue =
1299 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1300 if (cp->wdc_channel.ch_queue == NULL) {
1301 printf("%s %s channel: "
1302 "can't allocate memory for command queue",
1303 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1304 return 0;
1305 }
1306 printf("%s: %s channel %s to %s mode\n",
1307 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1308 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1309 "configured" : "wired",
1310 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1311 "native-PCI" : "compatibility");
1312 return 1;
1313 }
1314
1315 /* some common code used by several chip channel_map */
1316 void
1317 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1318 struct pci_attach_args *pa;
1319 struct pciide_channel *cp;
1320 pcireg_t interface;
1321 bus_size_t *cmdsizep, *ctlsizep;
1322 int (*pci_intr) __P((void *));
1323 {
1324 struct channel_softc *wdc_cp = &cp->wdc_channel;
1325
1326 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1327 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1328 pci_intr);
1329 else
1330 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1331 wdc_cp->channel, cmdsizep, ctlsizep);
1332
1333 if (cp->hw_ok == 0)
1334 return;
1335 wdc_cp->data32iot = wdc_cp->cmd_iot;
1336 wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1337 wdcattach(wdc_cp);
1338 }
1339
1340 /*
1341 * Generic code to call to know if a channel can be disabled. Return 1
1342 * if channel can be disabled, 0 if not
1343 */
1344 int
1345 pciide_chan_candisable(cp)
1346 struct pciide_channel *cp;
1347 {
1348 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1349 struct channel_softc *wdc_cp = &cp->wdc_channel;
1350
1351 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1352 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1353 printf("%s: disabling %s channel (no drives)\n",
1354 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1355 cp->hw_ok = 0;
1356 return 1;
1357 }
1358 return 0;
1359 }
1360
1361 /*
1362 * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1363 * Set hw_ok=0 on failure
1364 */
1365 void
1366 pciide_map_compat_intr(pa, cp, compatchan, interface)
1367 struct pci_attach_args *pa;
1368 struct pciide_channel *cp;
1369 int compatchan, interface;
1370 {
1371 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1372 struct channel_softc *wdc_cp = &cp->wdc_channel;
1373
1374 if (cp->hw_ok == 0)
1375 return;
1376 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1377 return;
1378
1379 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1380 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1381 pa, compatchan, pciide_compat_intr, cp);
1382 if (cp->ih == NULL) {
1383 #endif
1384 printf("%s: no compatibility interrupt for use by %s "
1385 "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1386 cp->hw_ok = 0;
1387 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1388 }
1389 #endif
1390 }
1391
1392 void
1393 pciide_print_modes(cp)
1394 struct pciide_channel *cp;
1395 {
1396 wdc_print_modes(&cp->wdc_channel);
1397 }
1398
1399 void
1400 default_chip_map(sc, pa)
1401 struct pciide_softc *sc;
1402 struct pci_attach_args *pa;
1403 {
1404 struct pciide_channel *cp;
1405 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1406 pcireg_t csr;
1407 int channel, drive;
1408 struct ata_drive_datas *drvp;
1409 u_int8_t idedma_ctl;
1410 bus_size_t cmdsize, ctlsize;
1411 char *failreason;
1412
1413 if (pciide_chipen(sc, pa) == 0)
1414 return;
1415
1416 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1417 printf("%s: bus-master DMA support present",
1418 sc->sc_wdcdev.sc_dev.dv_xname);
1419 if (sc->sc_pp == &default_product_desc &&
1420 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1421 PCIIDE_OPTIONS_DMA) == 0) {
1422 printf(", but unused (no driver support)");
1423 sc->sc_dma_ok = 0;
1424 } else {
1425 pciide_mapreg_dma(sc, pa);
1426 if (sc->sc_dma_ok != 0)
1427 printf(", used without full driver "
1428 "support");
1429 }
1430 } else {
1431 printf("%s: hardware does not support DMA",
1432 sc->sc_wdcdev.sc_dev.dv_xname);
1433 sc->sc_dma_ok = 0;
1434 }
1435 printf("\n");
1436 if (sc->sc_dma_ok) {
1437 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1438 sc->sc_wdcdev.irqack = pciide_irqack;
1439 }
1440 sc->sc_wdcdev.PIO_cap = 0;
1441 sc->sc_wdcdev.DMA_cap = 0;
1442
1443 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1444 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1445 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1446
1447 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1448 cp = &sc->pciide_channels[channel];
1449 if (pciide_chansetup(sc, channel, interface) == 0)
1450 continue;
1451 if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1452 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1453 &ctlsize, pciide_pci_intr);
1454 } else {
1455 cp->hw_ok = pciide_mapregs_compat(pa, cp,
1456 channel, &cmdsize, &ctlsize);
1457 }
1458 if (cp->hw_ok == 0)
1459 continue;
1460 /*
1461 * Check to see if something appears to be there.
1462 */
1463 failreason = NULL;
1464 if (!wdcprobe(&cp->wdc_channel)) {
1465 failreason = "not responding; disabled or no drives?";
1466 goto next;
1467 }
1468 /*
1469 * Now, make sure it's actually attributable to this PCI IDE
1470 * channel by trying to access the channel again while the
1471 * PCI IDE controller's I/O space is disabled. (If the
1472 * channel no longer appears to be there, it belongs to
1473 * this controller.) YUCK!
1474 */
1475 csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1476 PCI_COMMAND_STATUS_REG);
1477 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1478 csr & ~PCI_COMMAND_IO_ENABLE);
1479 if (wdcprobe(&cp->wdc_channel))
1480 failreason = "other hardware responding at addresses";
1481 pci_conf_write(sc->sc_pc, sc->sc_tag,
1482 PCI_COMMAND_STATUS_REG, csr);
1483 next:
1484 if (failreason) {
1485 printf("%s: %s channel ignored (%s)\n",
1486 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1487 failreason);
1488 cp->hw_ok = 0;
1489 bus_space_unmap(cp->wdc_channel.cmd_iot,
1490 cp->wdc_channel.cmd_ioh, cmdsize);
1491 if (interface & PCIIDE_INTERFACE_PCI(channel))
1492 bus_space_unmap(cp->wdc_channel.ctl_iot,
1493 cp->ctl_baseioh, ctlsize);
1494 else
1495 bus_space_unmap(cp->wdc_channel.ctl_iot,
1496 cp->wdc_channel.ctl_ioh, ctlsize);
1497 } else {
1498 pciide_map_compat_intr(pa, cp, channel, interface);
1499 }
1500 if (cp->hw_ok) {
1501 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1502 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1503 wdcattach(&cp->wdc_channel);
1504 }
1505 }
1506
1507 if (sc->sc_dma_ok == 0)
1508 return;
1509
1510 /* Allocate DMA maps */
1511 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1512 idedma_ctl = 0;
1513 cp = &sc->pciide_channels[channel];
1514 for (drive = 0; drive < 2; drive++) {
1515 drvp = &cp->wdc_channel.ch_drive[drive];
1516 /* If no drive, skip */
1517 if ((drvp->drive_flags & DRIVE) == 0)
1518 continue;
1519 if ((drvp->drive_flags & DRIVE_DMA) == 0)
1520 continue;
1521 if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1522 /* Abort DMA setup */
1523 printf("%s:%d:%d: can't allocate DMA maps, "
1524 "using PIO transfers\n",
1525 sc->sc_wdcdev.sc_dev.dv_xname,
1526 channel, drive);
1527 drvp->drive_flags &= ~DRIVE_DMA;
1528 }
1529 printf("%s:%d:%d: using DMA data transfers\n",
1530 sc->sc_wdcdev.sc_dev.dv_xname,
1531 channel, drive);
1532 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1533 }
1534 if (idedma_ctl != 0) {
1535 /* Add software bits in status register */
1536 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1537 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1538 idedma_ctl);
1539 }
1540 }
1541 }
1542
1543 void
1544 piix_chip_map(sc, pa)
1545 struct pciide_softc *sc;
1546 struct pci_attach_args *pa;
1547 {
1548 struct pciide_channel *cp;
1549 int channel;
1550 u_int32_t idetim;
1551 bus_size_t cmdsize, ctlsize;
1552
1553 if (pciide_chipen(sc, pa) == 0)
1554 return;
1555
1556 printf("%s: bus-master DMA support present",
1557 sc->sc_wdcdev.sc_dev.dv_xname);
1558 pciide_mapreg_dma(sc, pa);
1559 printf("\n");
1560 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1561 WDC_CAPABILITY_MODE;
1562 if (sc->sc_dma_ok) {
1563 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1564 sc->sc_wdcdev.irqack = pciide_irqack;
1565 switch(sc->sc_pp->ide_product) {
1566 case PCI_PRODUCT_INTEL_82371AB_IDE:
1567 case PCI_PRODUCT_INTEL_82440MX_IDE:
1568 case PCI_PRODUCT_INTEL_82801AA_IDE:
1569 case PCI_PRODUCT_INTEL_82801AB_IDE:
1570 case PCI_PRODUCT_INTEL_82801BA_IDE:
1571 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1572 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1573 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1574 case PCI_PRODUCT_INTEL_82801DB_IDE:
1575 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1576 }
1577 }
1578 sc->sc_wdcdev.PIO_cap = 4;
1579 sc->sc_wdcdev.DMA_cap = 2;
1580 switch(sc->sc_pp->ide_product) {
1581 case PCI_PRODUCT_INTEL_82801AA_IDE:
1582 sc->sc_wdcdev.UDMA_cap = 4;
1583 break;
1584 case PCI_PRODUCT_INTEL_82801BA_IDE:
1585 case PCI_PRODUCT_INTEL_82801BAM_IDE:
1586 case PCI_PRODUCT_INTEL_82801CA_IDE_1:
1587 case PCI_PRODUCT_INTEL_82801CA_IDE_2:
1588 case PCI_PRODUCT_INTEL_82801DB_IDE:
1589 sc->sc_wdcdev.UDMA_cap = 5;
1590 break;
1591 default:
1592 sc->sc_wdcdev.UDMA_cap = 2;
1593 }
1594 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1595 sc->sc_wdcdev.set_modes = piix_setup_channel;
1596 else
1597 sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1598 sc->sc_wdcdev.channels = sc->wdc_chanarray;
1599 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1600
1601 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1602 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1603 DEBUG_PROBE);
1604 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1605 WDCDEBUG_PRINT((", sidetim=0x%x",
1606 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1607 DEBUG_PROBE);
1608 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1609 WDCDEBUG_PRINT((", udamreg 0x%x",
1610 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1611 DEBUG_PROBE);
1612 }
1613 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1614 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1615 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1616 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1617 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1618 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1619 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1620 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1621 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1622 DEBUG_PROBE);
1623 }
1624
1625 }
1626 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1627
1628 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1629 cp = &sc->pciide_channels[channel];
1630 /* PIIX is compat-only */
1631 if (pciide_chansetup(sc, channel, 0) == 0)
1632 continue;
1633 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1634 if ((PIIX_IDETIM_READ(idetim, channel) &
1635 PIIX_IDETIM_IDE) == 0) {
1636 printf("%s: %s channel ignored (disabled)\n",
1637 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1638 continue;
1639 }
1640 /* PIIX are compat-only pciide devices */
1641 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1642 if (cp->hw_ok == 0)
1643 continue;
1644 if (pciide_chan_candisable(cp)) {
1645 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1646 channel);
1647 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1648 idetim);
1649 }
1650 pciide_map_compat_intr(pa, cp, channel, 0);
1651 if (cp->hw_ok == 0)
1652 continue;
1653 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1654 }
1655
1656 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1657 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1658 DEBUG_PROBE);
1659 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1660 WDCDEBUG_PRINT((", sidetim=0x%x",
1661 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1662 DEBUG_PROBE);
1663 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1664 WDCDEBUG_PRINT((", udamreg 0x%x",
1665 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1666 DEBUG_PROBE);
1667 }
1668 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1669 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1670 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1671 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1672 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1673 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1674 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1675 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1676 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1677 DEBUG_PROBE);
1678 }
1679 }
1680 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1681 }
1682
1683 void
1684 piix_setup_channel(chp)
1685 struct channel_softc *chp;
1686 {
1687 u_int8_t mode[2], drive;
1688 u_int32_t oidetim, idetim, idedma_ctl;
1689 struct pciide_channel *cp = (struct pciide_channel*)chp;
1690 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1691 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1692
1693 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1694 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1695 idedma_ctl = 0;
1696
1697 /* set up new idetim: Enable IDE registers decode */
1698 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1699 chp->channel);
1700
1701 /* setup DMA */
1702 pciide_channel_dma_setup(cp);
1703
1704 /*
1705 * Here we have to mess up with drives mode: PIIX can't have
1706 * different timings for master and slave drives.
1707 * We need to find the best combination.
1708 */
1709
1710 /* If both drives supports DMA, take the lower mode */
1711 if ((drvp[0].drive_flags & DRIVE_DMA) &&
1712 (drvp[1].drive_flags & DRIVE_DMA)) {
1713 mode[0] = mode[1] =
1714 min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1715 drvp[0].DMA_mode = mode[0];
1716 drvp[1].DMA_mode = mode[1];
1717 goto ok;
1718 }
1719 /*
1720 * If only one drive supports DMA, use its mode, and
1721 * put the other one in PIO mode 0 if mode not compatible
1722 */
1723 if (drvp[0].drive_flags & DRIVE_DMA) {
1724 mode[0] = drvp[0].DMA_mode;
1725 mode[1] = drvp[1].PIO_mode;
1726 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1727 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1728 mode[1] = drvp[1].PIO_mode = 0;
1729 goto ok;
1730 }
1731 if (drvp[1].drive_flags & DRIVE_DMA) {
1732 mode[1] = drvp[1].DMA_mode;
1733 mode[0] = drvp[0].PIO_mode;
1734 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1735 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1736 mode[0] = drvp[0].PIO_mode = 0;
1737 goto ok;
1738 }
1739 /*
1740 * If both drives are not DMA, takes the lower mode, unless
1741 * one of them is PIO mode < 2
1742 */
1743 if (drvp[0].PIO_mode < 2) {
1744 mode[0] = drvp[0].PIO_mode = 0;
1745 mode[1] = drvp[1].PIO_mode;
1746 } else if (drvp[1].PIO_mode < 2) {
1747 mode[1] = drvp[1].PIO_mode = 0;
1748 mode[0] = drvp[0].PIO_mode;
1749 } else {
1750 mode[0] = mode[1] =
1751 min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1752 drvp[0].PIO_mode = mode[0];
1753 drvp[1].PIO_mode = mode[1];
1754 }
1755 ok: /* The modes are setup */
1756 for (drive = 0; drive < 2; drive++) {
1757 if (drvp[drive].drive_flags & DRIVE_DMA) {
1758 idetim |= piix_setup_idetim_timings(
1759 mode[drive], 1, chp->channel);
1760 goto end;
1761 }
1762 }
1763 /* If we are there, none of the drives are DMA */
1764 if (mode[0] >= 2)
1765 idetim |= piix_setup_idetim_timings(
1766 mode[0], 0, chp->channel);
1767 else
1768 idetim |= piix_setup_idetim_timings(
1769 mode[1], 0, chp->channel);
1770 end: /*
1771 * timing mode is now set up in the controller. Enable
1772 * it per-drive
1773 */
1774 for (drive = 0; drive < 2; drive++) {
1775 /* If no drive, skip */
1776 if ((drvp[drive].drive_flags & DRIVE) == 0)
1777 continue;
1778 idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1779 if (drvp[drive].drive_flags & DRIVE_DMA)
1780 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1781 }
1782 if (idedma_ctl != 0) {
1783 /* Add software bits in status register */
1784 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1785 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1786 idedma_ctl);
1787 }
1788 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1789 pciide_print_modes(cp);
1790 }
1791
1792 void
1793 piix3_4_setup_channel(chp)
1794 struct channel_softc *chp;
1795 {
1796 struct ata_drive_datas *drvp;
1797 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1798 struct pciide_channel *cp = (struct pciide_channel*)chp;
1799 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1800 int drive;
1801 int channel = chp->channel;
1802
1803 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1804 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1805 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1806 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1807 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1808 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1809 PIIX_SIDETIM_RTC_MASK(channel));
1810
1811 idedma_ctl = 0;
1812 /* If channel disabled, no need to go further */
1813 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1814 return;
1815 /* set up new idetim: Enable IDE registers decode */
1816 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1817
1818 /* setup DMA if needed */
1819 pciide_channel_dma_setup(cp);
1820
1821 for (drive = 0; drive < 2; drive++) {
1822 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1823 PIIX_UDMATIM_SET(0x3, channel, drive));
1824 drvp = &chp->ch_drive[drive];
1825 /* If no drive, skip */
1826 if ((drvp->drive_flags & DRIVE) == 0)
1827 continue;
1828 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1829 (drvp->drive_flags & DRIVE_UDMA) == 0))
1830 goto pio;
1831
1832 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1833 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1834 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1835 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1836 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1837 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1838 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1839 ideconf |= PIIX_CONFIG_PINGPONG;
1840 }
1841 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1842 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1843 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
1844 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
1845 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE) {
1846 /* setup Ultra/100 */
1847 if (drvp->UDMA_mode > 2 &&
1848 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1849 drvp->UDMA_mode = 2;
1850 if (drvp->UDMA_mode > 4) {
1851 ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1852 } else {
1853 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1854 if (drvp->UDMA_mode > 2) {
1855 ideconf |= PIIX_CONFIG_UDMA66(channel,
1856 drive);
1857 } else {
1858 ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1859 drive);
1860 }
1861 }
1862 }
1863 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1864 /* setup Ultra/66 */
1865 if (drvp->UDMA_mode > 2 &&
1866 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1867 drvp->UDMA_mode = 2;
1868 if (drvp->UDMA_mode > 2)
1869 ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1870 else
1871 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1872 }
1873 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1874 (drvp->drive_flags & DRIVE_UDMA)) {
1875 /* use Ultra/DMA */
1876 drvp->drive_flags &= ~DRIVE_DMA;
1877 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1878 udmareg |= PIIX_UDMATIM_SET(
1879 piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1880 } else {
1881 /* use Multiword DMA */
1882 drvp->drive_flags &= ~DRIVE_UDMA;
1883 if (drive == 0) {
1884 idetim |= piix_setup_idetim_timings(
1885 drvp->DMA_mode, 1, channel);
1886 } else {
1887 sidetim |= piix_setup_sidetim_timings(
1888 drvp->DMA_mode, 1, channel);
1889 idetim =PIIX_IDETIM_SET(idetim,
1890 PIIX_IDETIM_SITRE, channel);
1891 }
1892 }
1893 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1894
1895 pio: /* use PIO mode */
1896 idetim |= piix_setup_idetim_drvs(drvp);
1897 if (drive == 0) {
1898 idetim |= piix_setup_idetim_timings(
1899 drvp->PIO_mode, 0, channel);
1900 } else {
1901 sidetim |= piix_setup_sidetim_timings(
1902 drvp->PIO_mode, 0, channel);
1903 idetim =PIIX_IDETIM_SET(idetim,
1904 PIIX_IDETIM_SITRE, channel);
1905 }
1906 }
1907 if (idedma_ctl != 0) {
1908 /* Add software bits in status register */
1909 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1910 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1911 idedma_ctl);
1912 }
1913 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1914 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1915 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1916 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1917 pciide_print_modes(cp);
1918 }
1919
1920
1921 /* setup ISP and RTC fields, based on mode */
1922 static u_int32_t
1923 piix_setup_idetim_timings(mode, dma, channel)
1924 u_int8_t mode;
1925 u_int8_t dma;
1926 u_int8_t channel;
1927 {
1928
1929 if (dma)
1930 return PIIX_IDETIM_SET(0,
1931 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1932 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1933 channel);
1934 else
1935 return PIIX_IDETIM_SET(0,
1936 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1937 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1938 channel);
1939 }
1940
1941 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1942 static u_int32_t
1943 piix_setup_idetim_drvs(drvp)
1944 struct ata_drive_datas *drvp;
1945 {
1946 u_int32_t ret = 0;
1947 struct channel_softc *chp = drvp->chnl_softc;
1948 u_int8_t channel = chp->channel;
1949 u_int8_t drive = drvp->drive;
1950
1951 /*
1952 * If drive is using UDMA, timings setups are independant
1953 * So just check DMA and PIO here.
1954 */
1955 if (drvp->drive_flags & DRIVE_DMA) {
1956 /* if mode = DMA mode 0, use compatible timings */
1957 if ((drvp->drive_flags & DRIVE_DMA) &&
1958 drvp->DMA_mode == 0) {
1959 drvp->PIO_mode = 0;
1960 return ret;
1961 }
1962 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1963 /*
1964 * PIO and DMA timings are the same, use fast timings for PIO
1965 * too, else use compat timings.
1966 */
1967 if ((piix_isp_pio[drvp->PIO_mode] !=
1968 piix_isp_dma[drvp->DMA_mode]) ||
1969 (piix_rtc_pio[drvp->PIO_mode] !=
1970 piix_rtc_dma[drvp->DMA_mode]))
1971 drvp->PIO_mode = 0;
1972 /* if PIO mode <= 2, use compat timings for PIO */
1973 if (drvp->PIO_mode <= 2) {
1974 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1975 channel);
1976 return ret;
1977 }
1978 }
1979
1980 /*
1981 * Now setup PIO modes. If mode < 2, use compat timings.
1982 * Else enable fast timings. Enable IORDY and prefetch/post
1983 * if PIO mode >= 3.
1984 */
1985
1986 if (drvp->PIO_mode < 2)
1987 return ret;
1988
1989 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1990 if (drvp->PIO_mode >= 3) {
1991 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1992 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1993 }
1994 return ret;
1995 }
1996
1997 /* setup values in SIDETIM registers, based on mode */
1998 static u_int32_t
1999 piix_setup_sidetim_timings(mode, dma, channel)
2000 u_int8_t mode;
2001 u_int8_t dma;
2002 u_int8_t channel;
2003 {
2004 if (dma)
2005 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
2006 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
2007 else
2008 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
2009 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
2010 }
2011
2012 void
2013 amd7x6_chip_map(sc, pa)
2014 struct pciide_softc *sc;
2015 struct pci_attach_args *pa;
2016 {
2017 struct pciide_channel *cp;
2018 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2019 int channel;
2020 pcireg_t chanenable;
2021 bus_size_t cmdsize, ctlsize;
2022
2023 if (pciide_chipen(sc, pa) == 0)
2024 return;
2025 printf("%s: bus-master DMA support present",
2026 sc->sc_wdcdev.sc_dev.dv_xname);
2027 pciide_mapreg_dma(sc, pa);
2028 printf("\n");
2029 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2030 WDC_CAPABILITY_MODE;
2031 if (sc->sc_dma_ok) {
2032 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2033 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2034 sc->sc_wdcdev.irqack = pciide_irqack;
2035 }
2036 sc->sc_wdcdev.PIO_cap = 4;
2037 sc->sc_wdcdev.DMA_cap = 2;
2038
2039 switch (sc->sc_pci_vendor) {
2040 case PCI_VENDOR_AMD:
2041 switch (sc->sc_pp->ide_product) {
2042 case PCI_PRODUCT_AMD_PBC766_IDE:
2043 case PCI_PRODUCT_AMD_PBC768_IDE:
2044 case PCI_PRODUCT_AMD_PBC8111_IDE:
2045 sc->sc_wdcdev.UDMA_cap = 5;
2046 break;
2047 default:
2048 sc->sc_wdcdev.UDMA_cap = 4;
2049 }
2050 sc->sc_amd_regbase = AMD7X6_AMD_REGBASE;
2051 break;
2052
2053 case PCI_VENDOR_NVIDIA:
2054 switch (sc->sc_pp->ide_product) {
2055 case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
2056 sc->sc_wdcdev.UDMA_cap = 5;
2057 break;
2058 case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
2059 sc->sc_wdcdev.UDMA_cap = 6;
2060 break;
2061 }
2062 sc->sc_amd_regbase = AMD7X6_NVIDIA_REGBASE;
2063 break;
2064
2065 default:
2066 panic("amd7x6_chip_map: unknown vendor");
2067 }
2068 sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
2069 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2070 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2071 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag,
2072 AMD7X6_CHANSTATUS_EN(sc));
2073
2074 WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
2075 DEBUG_PROBE);
2076 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2077 cp = &sc->pciide_channels[channel];
2078 if (pciide_chansetup(sc, channel, interface) == 0)
2079 continue;
2080
2081 if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
2082 printf("%s: %s channel ignored (disabled)\n",
2083 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2084 continue;
2085 }
2086 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2087 pciide_pci_intr);
2088
2089 if (pciide_chan_candisable(cp))
2090 chanenable &= ~AMD7X6_CHAN_EN(channel);
2091 pciide_map_compat_intr(pa, cp, channel, interface);
2092 if (cp->hw_ok == 0)
2093 continue;
2094
2095 amd7x6_setup_channel(&cp->wdc_channel);
2096 }
2097 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN(sc),
2098 chanenable);
2099 return;
2100 }
2101
2102 void
2103 amd7x6_setup_channel(chp)
2104 struct channel_softc *chp;
2105 {
2106 u_int32_t udmatim_reg, datatim_reg;
2107 u_int8_t idedma_ctl;
2108 int mode, drive;
2109 struct ata_drive_datas *drvp;
2110 struct pciide_channel *cp = (struct pciide_channel*)chp;
2111 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2112 #ifndef PCIIDE_AMD756_ENABLEDMA
2113 int rev = PCI_REVISION(
2114 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2115 #endif
2116
2117 idedma_ctl = 0;
2118 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc));
2119 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc));
2120 datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
2121 udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
2122
2123 /* setup DMA if needed */
2124 pciide_channel_dma_setup(cp);
2125
2126 for (drive = 0; drive < 2; drive++) {
2127 drvp = &chp->ch_drive[drive];
2128 /* If no drive, skip */
2129 if ((drvp->drive_flags & DRIVE) == 0)
2130 continue;
2131 /* add timing values, setup DMA if needed */
2132 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2133 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2134 mode = drvp->PIO_mode;
2135 goto pio;
2136 }
2137 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2138 (drvp->drive_flags & DRIVE_UDMA)) {
2139 /* use Ultra/DMA */
2140 drvp->drive_flags &= ~DRIVE_DMA;
2141 udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
2142 AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
2143 AMD7X6_UDMA_TIME(chp->channel, drive,
2144 amd7x6_udma_tim[drvp->UDMA_mode]);
2145 /* can use PIO timings, MW DMA unused */
2146 mode = drvp->PIO_mode;
2147 } else {
2148 /* use Multiword DMA, but only if revision is OK */
2149 drvp->drive_flags &= ~DRIVE_UDMA;
2150 #ifndef PCIIDE_AMD756_ENABLEDMA
2151 /*
2152 * The workaround doesn't seem to be necessary
2153 * with all drives, so it can be disabled by
2154 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2155 * triggered.
2156 */
2157 if (sc->sc_pci_vendor == PCI_VENDOR_AMD &&
2158 sc->sc_pp->ide_product ==
2159 PCI_PRODUCT_AMD_PBC756_IDE &&
2160 AMD756_CHIPREV_DISABLEDMA(rev)) {
2161 printf("%s:%d:%d: multi-word DMA disabled due "
2162 "to chip revision\n",
2163 sc->sc_wdcdev.sc_dev.dv_xname,
2164 chp->channel, drive);
2165 mode = drvp->PIO_mode;
2166 drvp->drive_flags &= ~DRIVE_DMA;
2167 goto pio;
2168 }
2169 #endif
2170 /* mode = min(pio, dma+2) */
2171 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2172 mode = drvp->PIO_mode;
2173 else
2174 mode = drvp->DMA_mode + 2;
2175 }
2176 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2177
2178 pio: /* setup PIO mode */
2179 if (mode <= 2) {
2180 drvp->DMA_mode = 0;
2181 drvp->PIO_mode = 0;
2182 mode = 0;
2183 } else {
2184 drvp->PIO_mode = mode;
2185 drvp->DMA_mode = mode - 2;
2186 }
2187 datatim_reg |=
2188 AMD7X6_DATATIM_PULSE(chp->channel, drive,
2189 amd7x6_pio_set[mode]) |
2190 AMD7X6_DATATIM_RECOV(chp->channel, drive,
2191 amd7x6_pio_rec[mode]);
2192 }
2193 if (idedma_ctl != 0) {
2194 /* Add software bits in status register */
2195 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2196 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2197 idedma_ctl);
2198 }
2199 pciide_print_modes(cp);
2200 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM(sc), datatim_reg);
2201 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA(sc), udmatim_reg);
2202 }
2203
2204 void
2205 apollo_chip_map(sc, pa)
2206 struct pciide_softc *sc;
2207 struct pci_attach_args *pa;
2208 {
2209 struct pciide_channel *cp;
2210 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2211 int channel;
2212 u_int32_t ideconf;
2213 bus_size_t cmdsize, ctlsize;
2214 pcitag_t pcib_tag;
2215 pcireg_t pcib_id, pcib_class;
2216
2217 if (pciide_chipen(sc, pa) == 0)
2218 return;
2219 /* get a PCI tag for the ISA bridge (function 0 of the same device) */
2220 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2221 /* and read ID and rev of the ISA bridge */
2222 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2223 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2224 printf(": VIA Technologies ");
2225 switch (PCI_PRODUCT(pcib_id)) {
2226 case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2227 printf("VT82C586 (Apollo VP) ");
2228 if(PCI_REVISION(pcib_class) >= 0x02) {
2229 printf("ATA33 controller\n");
2230 sc->sc_wdcdev.UDMA_cap = 2;
2231 } else {
2232 printf("controller\n");
2233 sc->sc_wdcdev.UDMA_cap = 0;
2234 }
2235 break;
2236 case PCI_PRODUCT_VIATECH_VT82C596A:
2237 printf("VT82C596A (Apollo Pro) ");
2238 if (PCI_REVISION(pcib_class) >= 0x12) {
2239 printf("ATA66 controller\n");
2240 sc->sc_wdcdev.UDMA_cap = 4;
2241 } else {
2242 printf("ATA33 controller\n");
2243 sc->sc_wdcdev.UDMA_cap = 2;
2244 }
2245 break;
2246 case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2247 printf("VT82C686A (Apollo KX133) ");
2248 if (PCI_REVISION(pcib_class) >= 0x40) {
2249 printf("ATA100 controller\n");
2250 sc->sc_wdcdev.UDMA_cap = 5;
2251 } else {
2252 printf("ATA66 controller\n");
2253 sc->sc_wdcdev.UDMA_cap = 4;
2254 }
2255 break;
2256 case PCI_PRODUCT_VIATECH_VT8231:
2257 printf("VT8231 ATA100 controller\n");
2258 sc->sc_wdcdev.UDMA_cap = 5;
2259 break;
2260 case PCI_PRODUCT_VIATECH_VT8233:
2261 printf("VT8233 ATA100 controller\n");
2262 sc->sc_wdcdev.UDMA_cap = 5;
2263 break;
2264 case PCI_PRODUCT_VIATECH_VT8233A:
2265 printf("VT8233A ATA133 controller\n");
2266 sc->sc_wdcdev.UDMA_cap = 6;
2267 break;
2268 case PCI_PRODUCT_VIATECH_VT8235:
2269 printf("VT8235 ATA133 controller\n");
2270 sc->sc_wdcdev.UDMA_cap = 6;
2271 break;
2272 default:
2273 printf("unknown ATA controller\n");
2274 sc->sc_wdcdev.UDMA_cap = 0;
2275 }
2276
2277 printf("%s: bus-master DMA support present",
2278 sc->sc_wdcdev.sc_dev.dv_xname);
2279 pciide_mapreg_dma(sc, pa);
2280 printf("\n");
2281 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2282 WDC_CAPABILITY_MODE;
2283 if (sc->sc_dma_ok) {
2284 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2285 sc->sc_wdcdev.irqack = pciide_irqack;
2286 if (sc->sc_wdcdev.UDMA_cap > 0)
2287 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2288 }
2289 sc->sc_wdcdev.PIO_cap = 4;
2290 sc->sc_wdcdev.DMA_cap = 2;
2291 sc->sc_wdcdev.set_modes = apollo_setup_channel;
2292 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2293 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2294
2295 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2296 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2297 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2298 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2299 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2300 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2301 DEBUG_PROBE);
2302
2303 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2304 cp = &sc->pciide_channels[channel];
2305 if (pciide_chansetup(sc, channel, interface) == 0)
2306 continue;
2307
2308 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2309 if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2310 printf("%s: %s channel ignored (disabled)\n",
2311 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2312 continue;
2313 }
2314 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2315 pciide_pci_intr);
2316 if (cp->hw_ok == 0)
2317 continue;
2318 if (pciide_chan_candisable(cp)) {
2319 ideconf &= ~APO_IDECONF_EN(channel);
2320 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2321 ideconf);
2322 }
2323 pciide_map_compat_intr(pa, cp, channel, interface);
2324
2325 if (cp->hw_ok == 0)
2326 continue;
2327 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2328 }
2329 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2330 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2331 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2332 }
2333
2334 void
2335 apollo_setup_channel(chp)
2336 struct channel_softc *chp;
2337 {
2338 u_int32_t udmatim_reg, datatim_reg;
2339 u_int8_t idedma_ctl;
2340 int mode, drive;
2341 struct ata_drive_datas *drvp;
2342 struct pciide_channel *cp = (struct pciide_channel*)chp;
2343 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2344
2345 idedma_ctl = 0;
2346 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2347 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2348 datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2349 udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2350
2351 /* setup DMA if needed */
2352 pciide_channel_dma_setup(cp);
2353
2354 for (drive = 0; drive < 2; drive++) {
2355 drvp = &chp->ch_drive[drive];
2356 /* If no drive, skip */
2357 if ((drvp->drive_flags & DRIVE) == 0)
2358 continue;
2359 /* add timing values, setup DMA if needed */
2360 if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2361 (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2362 mode = drvp->PIO_mode;
2363 goto pio;
2364 }
2365 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2366 (drvp->drive_flags & DRIVE_UDMA)) {
2367 /* use Ultra/DMA */
2368 drvp->drive_flags &= ~DRIVE_DMA;
2369 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2370 APO_UDMA_EN_MTH(chp->channel, drive);
2371 if (sc->sc_wdcdev.UDMA_cap == 6) {
2372 /* 8233a */
2373 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2374 drive, apollo_udma133_tim[drvp->UDMA_mode]);
2375 } else if (sc->sc_wdcdev.UDMA_cap == 5) {
2376 /* 686b */
2377 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2378 drive, apollo_udma100_tim[drvp->UDMA_mode]);
2379 } else if (sc->sc_wdcdev.UDMA_cap == 4) {
2380 /* 596b or 686a */
2381 udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2382 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2383 drive, apollo_udma66_tim[drvp->UDMA_mode]);
2384 } else {
2385 /* 596a or 586b */
2386 udmatim_reg |= APO_UDMA_TIME(chp->channel,
2387 drive, apollo_udma33_tim[drvp->UDMA_mode]);
2388 }
2389 /* can use PIO timings, MW DMA unused */
2390 mode = drvp->PIO_mode;
2391 } else {
2392 /* use Multiword DMA */
2393 drvp->drive_flags &= ~DRIVE_UDMA;
2394 /* mode = min(pio, dma+2) */
2395 if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2396 mode = drvp->PIO_mode;
2397 else
2398 mode = drvp->DMA_mode + 2;
2399 }
2400 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2401
2402 pio: /* setup PIO mode */
2403 if (mode <= 2) {
2404 drvp->DMA_mode = 0;
2405 drvp->PIO_mode = 0;
2406 mode = 0;
2407 } else {
2408 drvp->PIO_mode = mode;
2409 drvp->DMA_mode = mode - 2;
2410 }
2411 datatim_reg |=
2412 APO_DATATIM_PULSE(chp->channel, drive,
2413 apollo_pio_set[mode]) |
2414 APO_DATATIM_RECOV(chp->channel, drive,
2415 apollo_pio_rec[mode]);
2416 }
2417 if (idedma_ctl != 0) {
2418 /* Add software bits in status register */
2419 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2420 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2421 idedma_ctl);
2422 }
2423 pciide_print_modes(cp);
2424 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2425 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2426 }
2427
2428 void
2429 cmd_channel_map(pa, sc, channel)
2430 struct pci_attach_args *pa;
2431 struct pciide_softc *sc;
2432 int channel;
2433 {
2434 struct pciide_channel *cp = &sc->pciide_channels[channel];
2435 bus_size_t cmdsize, ctlsize;
2436 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2437 int interface, one_channel;
2438
2439 /*
2440 * The 0648/0649 can be told to identify as a RAID controller.
2441 * In this case, we have to fake interface
2442 */
2443 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2444 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2445 PCIIDE_INTERFACE_SETTABLE(1);
2446 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2447 CMD_CONF_DSA1)
2448 interface |= PCIIDE_INTERFACE_PCI(0) |
2449 PCIIDE_INTERFACE_PCI(1);
2450 } else {
2451 interface = PCI_INTERFACE(pa->pa_class);
2452 }
2453
2454 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2455 cp->name = PCIIDE_CHANNEL_NAME(channel);
2456 cp->wdc_channel.channel = channel;
2457 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2458
2459 /*
2460 * Older CMD64X doesn't have independant channels
2461 */
2462 switch (sc->sc_pp->ide_product) {
2463 case PCI_PRODUCT_CMDTECH_649:
2464 one_channel = 0;
2465 break;
2466 default:
2467 one_channel = 1;
2468 break;
2469 }
2470
2471 if (channel > 0 && one_channel) {
2472 cp->wdc_channel.ch_queue =
2473 sc->pciide_channels[0].wdc_channel.ch_queue;
2474 } else {
2475 cp->wdc_channel.ch_queue =
2476 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2477 }
2478 if (cp->wdc_channel.ch_queue == NULL) {
2479 printf("%s %s channel: "
2480 "can't allocate memory for command queue",
2481 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2482 return;
2483 }
2484
2485 printf("%s: %s channel %s to %s mode\n",
2486 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2487 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2488 "configured" : "wired",
2489 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2490 "native-PCI" : "compatibility");
2491
2492 /*
2493 * with a CMD PCI64x, if we get here, the first channel is enabled:
2494 * there's no way to disable the first channel without disabling
2495 * the whole device
2496 */
2497 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2498 printf("%s: %s channel ignored (disabled)\n",
2499 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2500 return;
2501 }
2502
2503 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2504 if (cp->hw_ok == 0)
2505 return;
2506 if (channel == 1) {
2507 if (pciide_chan_candisable(cp)) {
2508 ctrl &= ~CMD_CTRL_2PORT;
2509 pciide_pci_write(pa->pa_pc, pa->pa_tag,
2510 CMD_CTRL, ctrl);
2511 }
2512 }
2513 pciide_map_compat_intr(pa, cp, channel, interface);
2514 }
2515
2516 int
2517 cmd_pci_intr(arg)
2518 void *arg;
2519 {
2520 struct pciide_softc *sc = arg;
2521 struct pciide_channel *cp;
2522 struct channel_softc *wdc_cp;
2523 int i, rv, crv;
2524 u_int32_t priirq, secirq;
2525
2526 rv = 0;
2527 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2528 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2529 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2530 cp = &sc->pciide_channels[i];
2531 wdc_cp = &cp->wdc_channel;
2532 /* If a compat channel skip. */
2533 if (cp->compat)
2534 continue;
2535 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2536 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2537 crv = wdcintr(wdc_cp);
2538 if (crv == 0)
2539 printf("%s:%d: bogus intr\n",
2540 sc->sc_wdcdev.sc_dev.dv_xname, i);
2541 else
2542 rv = 1;
2543 }
2544 }
2545 return rv;
2546 }
2547
2548 void
2549 cmd_chip_map(sc, pa)
2550 struct pciide_softc *sc;
2551 struct pci_attach_args *pa;
2552 {
2553 int channel;
2554
2555 /*
2556 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2557 * and base adresses registers can be disabled at
2558 * hardware level. In this case, the device is wired
2559 * in compat mode and its first channel is always enabled,
2560 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2561 * In fact, it seems that the first channel of the CMD PCI0640
2562 * can't be disabled.
2563 */
2564
2565 #ifdef PCIIDE_CMD064x_DISABLE
2566 if (pciide_chipen(sc, pa) == 0)
2567 return;
2568 #endif
2569
2570 printf("%s: hardware does not support DMA\n",
2571 sc->sc_wdcdev.sc_dev.dv_xname);
2572 sc->sc_dma_ok = 0;
2573
2574 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2575 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2576 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2577
2578 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2579 cmd_channel_map(pa, sc, channel);
2580 }
2581 }
2582
2583 void
2584 cmd0643_9_chip_map(sc, pa)
2585 struct pciide_softc *sc;
2586 struct pci_attach_args *pa;
2587 {
2588 struct pciide_channel *cp;
2589 int channel;
2590 pcireg_t rev = PCI_REVISION(pa->pa_class);
2591
2592 /*
2593 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2594 * and base adresses registers can be disabled at
2595 * hardware level. In this case, the device is wired
2596 * in compat mode and its first channel is always enabled,
2597 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2598 * In fact, it seems that the first channel of the CMD PCI0640
2599 * can't be disabled.
2600 */
2601
2602 #ifdef PCIIDE_CMD064x_DISABLE
2603 if (pciide_chipen(sc, pa) == 0)
2604 return;
2605 #endif
2606 printf("%s: bus-master DMA support present",
2607 sc->sc_wdcdev.sc_dev.dv_xname);
2608 pciide_mapreg_dma(sc, pa);
2609 printf("\n");
2610 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2611 WDC_CAPABILITY_MODE;
2612 if (sc->sc_dma_ok) {
2613 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2614 switch (sc->sc_pp->ide_product) {
2615 case PCI_PRODUCT_CMDTECH_649:
2616 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2617 sc->sc_wdcdev.UDMA_cap = 5;
2618 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2619 break;
2620 case PCI_PRODUCT_CMDTECH_648:
2621 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2622 sc->sc_wdcdev.UDMA_cap = 4;
2623 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2624 break;
2625 case PCI_PRODUCT_CMDTECH_646:
2626 if (rev >= CMD0646U2_REV) {
2627 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2628 sc->sc_wdcdev.UDMA_cap = 2;
2629 } else if (rev >= CMD0646U_REV) {
2630 /*
2631 * Linux's driver claims that the 646U is broken
2632 * with UDMA. Only enable it if we know what we're
2633 * doing
2634 */
2635 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2636 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2637 sc->sc_wdcdev.UDMA_cap = 2;
2638 #endif
2639 /* explicitly disable UDMA */
2640 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2641 CMD_UDMATIM(0), 0);
2642 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2643 CMD_UDMATIM(1), 0);
2644 }
2645 sc->sc_wdcdev.irqack = cmd646_9_irqack;
2646 break;
2647 default:
2648 sc->sc_wdcdev.irqack = pciide_irqack;
2649 }
2650 }
2651
2652 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2653 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2654 sc->sc_wdcdev.PIO_cap = 4;
2655 sc->sc_wdcdev.DMA_cap = 2;
2656 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2657
2658 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2659 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2660 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2661 DEBUG_PROBE);
2662
2663 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2664 cp = &sc->pciide_channels[channel];
2665 cmd_channel_map(pa, sc, channel);
2666 if (cp->hw_ok == 0)
2667 continue;
2668 cmd0643_9_setup_channel(&cp->wdc_channel);
2669 }
2670 /*
2671 * note - this also makes sure we clear the irq disable and reset
2672 * bits
2673 */
2674 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2675 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2676 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2677 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2678 DEBUG_PROBE);
2679 }
2680
2681 void
2682 cmd0643_9_setup_channel(chp)
2683 struct channel_softc *chp;
2684 {
2685 struct ata_drive_datas *drvp;
2686 u_int8_t tim;
2687 u_int32_t idedma_ctl, udma_reg;
2688 int drive;
2689 struct pciide_channel *cp = (struct pciide_channel*)chp;
2690 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2691
2692 idedma_ctl = 0;
2693 /* setup DMA if needed */
2694 pciide_channel_dma_setup(cp);
2695
2696 for (drive = 0; drive < 2; drive++) {
2697 drvp = &chp->ch_drive[drive];
2698 /* If no drive, skip */
2699 if ((drvp->drive_flags & DRIVE) == 0)
2700 continue;
2701 /* add timing values, setup DMA if needed */
2702 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2703 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2704 if (drvp->drive_flags & DRIVE_UDMA) {
2705 /* UltraDMA on a 646U2, 0648 or 0649 */
2706 drvp->drive_flags &= ~DRIVE_DMA;
2707 udma_reg = pciide_pci_read(sc->sc_pc,
2708 sc->sc_tag, CMD_UDMATIM(chp->channel));
2709 if (drvp->UDMA_mode > 2 &&
2710 (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2711 CMD_BICSR) &
2712 CMD_BICSR_80(chp->channel)) == 0)
2713 drvp->UDMA_mode = 2;
2714 if (drvp->UDMA_mode > 2)
2715 udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2716 else if (sc->sc_wdcdev.UDMA_cap > 2)
2717 udma_reg |= CMD_UDMATIM_UDMA33(drive);
2718 udma_reg |= CMD_UDMATIM_UDMA(drive);
2719 udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2720 CMD_UDMATIM_TIM_OFF(drive));
2721 udma_reg |=
2722 (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2723 CMD_UDMATIM_TIM_OFF(drive));
2724 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2725 CMD_UDMATIM(chp->channel), udma_reg);
2726 } else {
2727 /*
2728 * use Multiword DMA.
2729 * Timings will be used for both PIO and DMA,
2730 * so adjust DMA mode if needed
2731 * if we have a 0646U2/8/9, turn off UDMA
2732 */
2733 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2734 udma_reg = pciide_pci_read(sc->sc_pc,
2735 sc->sc_tag,
2736 CMD_UDMATIM(chp->channel));
2737 udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2738 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2739 CMD_UDMATIM(chp->channel),
2740 udma_reg);
2741 }
2742 if (drvp->PIO_mode >= 3 &&
2743 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2744 drvp->DMA_mode = drvp->PIO_mode - 2;
2745 }
2746 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2747 }
2748 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2749 }
2750 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2751 CMD_DATA_TIM(chp->channel, drive), tim);
2752 }
2753 if (idedma_ctl != 0) {
2754 /* Add software bits in status register */
2755 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2756 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2757 idedma_ctl);
2758 }
2759 pciide_print_modes(cp);
2760 }
2761
2762 void
2763 cmd646_9_irqack(chp)
2764 struct channel_softc *chp;
2765 {
2766 u_int32_t priirq, secirq;
2767 struct pciide_channel *cp = (struct pciide_channel*)chp;
2768 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2769
2770 if (chp->channel == 0) {
2771 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2772 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2773 } else {
2774 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2775 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2776 }
2777 pciide_irqack(chp);
2778 }
2779
2780 void
2781 cmd680_chip_map(sc, pa)
2782 struct pciide_softc *sc;
2783 struct pci_attach_args *pa;
2784 {
2785 struct pciide_channel *cp;
2786 int channel;
2787
2788 if (pciide_chipen(sc, pa) == 0)
2789 return;
2790 printf("%s: bus-master DMA support present",
2791 sc->sc_wdcdev.sc_dev.dv_xname);
2792 pciide_mapreg_dma(sc, pa);
2793 printf("\n");
2794 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2795 WDC_CAPABILITY_MODE;
2796 if (sc->sc_dma_ok) {
2797 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2798 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2799 sc->sc_wdcdev.UDMA_cap = 6;
2800 sc->sc_wdcdev.irqack = pciide_irqack;
2801 }
2802
2803 sc->sc_wdcdev.channels = sc->wdc_chanarray;
2804 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2805 sc->sc_wdcdev.PIO_cap = 4;
2806 sc->sc_wdcdev.DMA_cap = 2;
2807 sc->sc_wdcdev.set_modes = cmd680_setup_channel;
2808
2809 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
2810 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
2811 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
2812 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
2813 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2814 cp = &sc->pciide_channels[channel];
2815 cmd680_channel_map(pa, sc, channel);
2816 if (cp->hw_ok == 0)
2817 continue;
2818 cmd680_setup_channel(&cp->wdc_channel);
2819 }
2820 }
2821
2822 void
2823 cmd680_channel_map(pa, sc, channel)
2824 struct pci_attach_args *pa;
2825 struct pciide_softc *sc;
2826 int channel;
2827 {
2828 struct pciide_channel *cp = &sc->pciide_channels[channel];
2829 bus_size_t cmdsize, ctlsize;
2830 int interface, i, reg;
2831 static const u_int8_t init_val[] =
2832 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
2833 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
2834
2835 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2836 interface = PCIIDE_INTERFACE_SETTABLE(0) |
2837 PCIIDE_INTERFACE_SETTABLE(1);
2838 interface |= PCIIDE_INTERFACE_PCI(0) |
2839 PCIIDE_INTERFACE_PCI(1);
2840 } else {
2841 interface = PCI_INTERFACE(pa->pa_class);
2842 }
2843
2844 sc->wdc_chanarray[channel] = &cp->wdc_channel;
2845 cp->name = PCIIDE_CHANNEL_NAME(channel);
2846 cp->wdc_channel.channel = channel;
2847 cp->wdc_channel.wdc = &sc->sc_wdcdev;
2848
2849 cp->wdc_channel.ch_queue =
2850 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2851 if (cp->wdc_channel.ch_queue == NULL) {
2852 printf("%s %s channel: "
2853 "can't allocate memory for command queue",
2854 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2855 return;
2856 }
2857
2858 /* XXX */
2859 reg = 0xa2 + channel * 16;
2860 for (i = 0; i < sizeof(init_val); i++)
2861 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
2862
2863 printf("%s: %s channel %s to %s mode\n",
2864 sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2865 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2866 "configured" : "wired",
2867 (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2868 "native-PCI" : "compatibility");
2869
2870 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr);
2871 if (cp->hw_ok == 0)
2872 return;
2873 pciide_map_compat_intr(pa, cp, channel, interface);
2874 }
2875
2876 void
2877 cmd680_setup_channel(chp)
2878 struct channel_softc *chp;
2879 {
2880 struct ata_drive_datas *drvp;
2881 u_int8_t mode, off, scsc;
2882 u_int16_t val;
2883 u_int32_t idedma_ctl;
2884 int drive;
2885 struct pciide_channel *cp = (struct pciide_channel*)chp;
2886 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2887 pci_chipset_tag_t pc = sc->sc_pc;
2888 pcitag_t pa = sc->sc_tag;
2889 static const u_int8_t udma2_tbl[] =
2890 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
2891 static const u_int8_t udma_tbl[] =
2892 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
2893 static const u_int16_t dma_tbl[] =
2894 { 0x2208, 0x10c2, 0x10c1 };
2895 static const u_int16_t pio_tbl[] =
2896 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
2897
2898 idedma_ctl = 0;
2899 pciide_channel_dma_setup(cp);
2900 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4);
2901
2902 for (drive = 0; drive < 2; drive++) {
2903 drvp = &chp->ch_drive[drive];
2904 /* If no drive, skip */
2905 if ((drvp->drive_flags & DRIVE) == 0)
2906 continue;
2907 mode &= ~(0x03 << (drive * 4));
2908 if (drvp->drive_flags & DRIVE_UDMA) {
2909 drvp->drive_flags &= ~DRIVE_DMA;
2910 off = 0xa0 + chp->channel * 16;
2911 if (drvp->UDMA_mode > 2 &&
2912 (pciide_pci_read(pc, pa, off) & 0x01) == 0)
2913 drvp->UDMA_mode = 2;
2914 scsc = pciide_pci_read(pc, pa, 0x8a);
2915 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
2916 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
2917 scsc = pciide_pci_read(pc, pa, 0x8a);
2918 if ((scsc & 0x30) == 0)
2919 drvp->UDMA_mode = 5;
2920 }
2921 mode |= 0x03 << (drive * 4);
2922 off = 0xac + chp->channel * 16 + drive * 2;
2923 val = pciide_pci_read(pc, pa, off) & ~0x3f;
2924 if (scsc & 0x30)
2925 val |= udma2_tbl[drvp->UDMA_mode];
2926 else
2927 val |= udma_tbl[drvp->UDMA_mode];
2928 pciide_pci_write(pc, pa, off, val);
2929 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2930 } else if (drvp->drive_flags & DRIVE_DMA) {
2931 mode |= 0x02 << (drive * 4);
2932 off = 0xa8 + chp->channel * 16 + drive * 2;
2933 val = dma_tbl[drvp->DMA_mode];
2934 pciide_pci_write(pc, pa, off, val & 0xff);
2935 pciide_pci_write(pc, pa, off, val >> 8);
2936 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2937 } else {
2938 mode |= 0x01 << (drive * 4);
2939 off = 0xa4 + chp->channel * 16 + drive * 2;
2940 val = pio_tbl[drvp->PIO_mode];
2941 pciide_pci_write(pc, pa, off, val & 0xff);
2942 pciide_pci_write(pc, pa, off, val >> 8);
2943 }
2944 }
2945
2946 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode);
2947 if (idedma_ctl != 0) {
2948 /* Add software bits in status register */
2949 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2950 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2951 idedma_ctl);
2952 }
2953 pciide_print_modes(cp);
2954 }
2955
2956 void
2957 cy693_chip_map(sc, pa)
2958 struct pciide_softc *sc;
2959 struct pci_attach_args *pa;
2960 {
2961 struct pciide_channel *cp;
2962 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2963 bus_size_t cmdsize, ctlsize;
2964
2965 if (pciide_chipen(sc, pa) == 0)
2966 return;
2967 /*
2968 * this chip has 2 PCI IDE functions, one for primary and one for
2969 * secondary. So we need to call pciide_mapregs_compat() with
2970 * the real channel
2971 */
2972 if (pa->pa_function == 1) {
2973 sc->sc_cy_compatchan = 0;
2974 } else if (pa->pa_function == 2) {
2975 sc->sc_cy_compatchan = 1;
2976 } else {
2977 printf("%s: unexpected PCI function %d\n",
2978 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2979 return;
2980 }
2981 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2982 printf("%s: bus-master DMA support present",
2983 sc->sc_wdcdev.sc_dev.dv_xname);
2984 pciide_mapreg_dma(sc, pa);
2985 } else {
2986 printf("%s: hardware does not support DMA",
2987 sc->sc_wdcdev.sc_dev.dv_xname);
2988 sc->sc_dma_ok = 0;
2989 }
2990 printf("\n");
2991
2992 sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2993 if (sc->sc_cy_handle == NULL) {
2994 printf("%s: unable to map hyperCache control registers\n",
2995 sc->sc_wdcdev.sc_dev.dv_xname);
2996 sc->sc_dma_ok = 0;
2997 }
2998
2999 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3000 WDC_CAPABILITY_MODE;
3001 if (sc->sc_dma_ok) {
3002 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3003 sc->sc_wdcdev.irqack = pciide_irqack;
3004 }
3005 sc->sc_wdcdev.PIO_cap = 4;
3006 sc->sc_wdcdev.DMA_cap = 2;
3007 sc->sc_wdcdev.set_modes = cy693_setup_channel;
3008
3009 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3010 sc->sc_wdcdev.nchannels = 1;
3011
3012 /* Only one channel for this chip; if we are here it's enabled */
3013 cp = &sc->pciide_channels[0];
3014 sc->wdc_chanarray[0] = &cp->wdc_channel;
3015 cp->name = PCIIDE_CHANNEL_NAME(0);
3016 cp->wdc_channel.channel = 0;
3017 cp->wdc_channel.wdc = &sc->sc_wdcdev;
3018 cp->wdc_channel.ch_queue =
3019 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
3020 if (cp->wdc_channel.ch_queue == NULL) {
3021 printf("%s primary channel: "
3022 "can't allocate memory for command queue",
3023 sc->sc_wdcdev.sc_dev.dv_xname);
3024 return;
3025 }
3026 printf("%s: primary channel %s to ",
3027 sc->sc_wdcdev.sc_dev.dv_xname,
3028 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
3029 "configured" : "wired");
3030 if (interface & PCIIDE_INTERFACE_PCI(0)) {
3031 printf("native-PCI");
3032 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
3033 pciide_pci_intr);
3034 } else {
3035 printf("compatibility");
3036 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
3037 &cmdsize, &ctlsize);
3038 }
3039 printf(" mode\n");
3040 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3041 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3042 wdcattach(&cp->wdc_channel);
3043 if (pciide_chan_candisable(cp)) {
3044 pci_conf_write(sc->sc_pc, sc->sc_tag,
3045 PCI_COMMAND_STATUS_REG, 0);
3046 }
3047 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
3048 if (cp->hw_ok == 0)
3049 return;
3050 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
3051 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
3052 cy693_setup_channel(&cp->wdc_channel);
3053 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
3054 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
3055 }
3056
3057 void
3058 cy693_setup_channel(chp)
3059 struct channel_softc *chp;
3060 {
3061 struct ata_drive_datas *drvp;
3062 int drive;
3063 u_int32_t cy_cmd_ctrl;
3064 u_int32_t idedma_ctl;
3065 struct pciide_channel *cp = (struct pciide_channel*)chp;
3066 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3067 int dma_mode = -1;
3068
3069 cy_cmd_ctrl = idedma_ctl = 0;
3070
3071 /* setup DMA if needed */
3072 pciide_channel_dma_setup(cp);
3073
3074 for (drive = 0; drive < 2; drive++) {
3075 drvp = &chp->ch_drive[drive];
3076 /* If no drive, skip */
3077 if ((drvp->drive_flags & DRIVE) == 0)
3078 continue;
3079 /* add timing values, setup DMA if needed */
3080 if (drvp->drive_flags & DRIVE_DMA) {
3081 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3082 /* use Multiword DMA */
3083 if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
3084 dma_mode = drvp->DMA_mode;
3085 }
3086 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3087 CY_CMD_CTRL_IOW_PULSE_OFF(drive));
3088 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3089 CY_CMD_CTRL_IOW_REC_OFF(drive));
3090 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
3091 CY_CMD_CTRL_IOR_PULSE_OFF(drive));
3092 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
3093 CY_CMD_CTRL_IOR_REC_OFF(drive));
3094 }
3095 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
3096 chp->ch_drive[0].DMA_mode = dma_mode;
3097 chp->ch_drive[1].DMA_mode = dma_mode;
3098
3099 if (dma_mode == -1)
3100 dma_mode = 0;
3101
3102 if (sc->sc_cy_handle != NULL) {
3103 /* Note: `multiple' is implied. */
3104 cy82c693_write(sc->sc_cy_handle,
3105 (sc->sc_cy_compatchan == 0) ?
3106 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
3107 }
3108
3109 pciide_print_modes(cp);
3110
3111 if (idedma_ctl != 0) {
3112 /* Add software bits in status register */
3113 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3114 IDEDMA_CTL, idedma_ctl);
3115 }
3116 }
3117
3118 static int
3119 sis_hostbr_match(pa)
3120 struct pci_attach_args *pa;
3121 {
3122 return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
3123 ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
3124 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
3125 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
3126 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735) ||
3127 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_745)));
3128 }
3129
3130 void
3131 sis_chip_map(sc, pa)
3132 struct pciide_softc *sc;
3133 struct pci_attach_args *pa;
3134 {
3135 struct pciide_channel *cp;
3136 int channel;
3137 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
3138 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
3139 pcireg_t rev = PCI_REVISION(pa->pa_class);
3140 bus_size_t cmdsize, ctlsize;
3141 pcitag_t pchb_tag;
3142 pcireg_t pchb_id, pchb_class;
3143
3144 if (pciide_chipen(sc, pa) == 0)
3145 return;
3146 printf("%s: bus-master DMA support present",
3147 sc->sc_wdcdev.sc_dev.dv_xname);
3148 pciide_mapreg_dma(sc, pa);
3149 printf("\n");
3150
3151 /* get a PCI tag for the host bridge (function 0 of the same device) */
3152 pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
3153 /* and read ID and rev of the ISA bridge */
3154 pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
3155 pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
3156
3157 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3158 WDC_CAPABILITY_MODE;
3159 if (sc->sc_dma_ok) {
3160 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3161 sc->sc_wdcdev.irqack = pciide_irqack;
3162 /*
3163 * controllers associated to a rev 0x2 530 Host to PCI Bridge
3164 * have problems with UDMA (info provided by Christos)
3165 */
3166 if (rev >= 0xd0 &&
3167 (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
3168 PCI_REVISION(pchb_class) >= 0x03))
3169 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3170 }
3171
3172 sc->sc_wdcdev.PIO_cap = 4;
3173 sc->sc_wdcdev.DMA_cap = 2;
3174 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
3175 /*
3176 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
3177 * chipsets.
3178 */
3179 sc->sc_wdcdev.UDMA_cap =
3180 pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
3181 sc->sc_wdcdev.set_modes = sis_setup_channel;
3182
3183 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3184 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3185
3186 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
3187 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
3188 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
3189
3190 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3191 cp = &sc->pciide_channels[channel];
3192 if (pciide_chansetup(sc, channel, interface) == 0)
3193 continue;
3194 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
3195 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
3196 printf("%s: %s channel ignored (disabled)\n",
3197 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3198 continue;
3199 }
3200 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3201 pciide_pci_intr);
3202 if (cp->hw_ok == 0)
3203 continue;
3204 if (pciide_chan_candisable(cp)) {
3205 if (channel == 0)
3206 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
3207 else
3208 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
3209 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
3210 sis_ctr0);
3211 }
3212 pciide_map_compat_intr(pa, cp, channel, interface);
3213 if (cp->hw_ok == 0)
3214 continue;
3215 sis_setup_channel(&cp->wdc_channel);
3216 }
3217 }
3218
3219 void
3220 sis_setup_channel(chp)
3221 struct channel_softc *chp;
3222 {
3223 struct ata_drive_datas *drvp;
3224 int drive;
3225 u_int32_t sis_tim;
3226 u_int32_t idedma_ctl;
3227 struct pciide_channel *cp = (struct pciide_channel*)chp;
3228 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3229
3230 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
3231 "channel %d 0x%x\n", chp->channel,
3232 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
3233 DEBUG_PROBE);
3234 sis_tim = 0;
3235 idedma_ctl = 0;
3236 /* setup DMA if needed */
3237 pciide_channel_dma_setup(cp);
3238
3239 for (drive = 0; drive < 2; drive++) {
3240 drvp = &chp->ch_drive[drive];
3241 /* If no drive, skip */
3242 if ((drvp->drive_flags & DRIVE) == 0)
3243 continue;
3244 /* add timing values, setup DMA if needed */
3245 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3246 (drvp->drive_flags & DRIVE_UDMA) == 0)
3247 goto pio;
3248
3249 if (drvp->drive_flags & DRIVE_UDMA) {
3250 /* use Ultra/DMA */
3251 drvp->drive_flags &= ~DRIVE_DMA;
3252 sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
3253 SIS_TIM_UDMA_TIME_OFF(drive);
3254 sis_tim |= SIS_TIM_UDMA_EN(drive);
3255 } else {
3256 /*
3257 * use Multiword DMA
3258 * Timings will be used for both PIO and DMA,
3259 * so adjust DMA mode if needed
3260 */
3261 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3262 drvp->PIO_mode = drvp->DMA_mode + 2;
3263 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3264 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3265 drvp->PIO_mode - 2 : 0;
3266 if (drvp->DMA_mode == 0)
3267 drvp->PIO_mode = 0;
3268 }
3269 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3270 pio: sis_tim |= sis_pio_act[drvp->PIO_mode] <<
3271 SIS_TIM_ACT_OFF(drive);
3272 sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
3273 SIS_TIM_REC_OFF(drive);
3274 }
3275 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
3276 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
3277 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
3278 if (idedma_ctl != 0) {
3279 /* Add software bits in status register */
3280 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3281 IDEDMA_CTL+ (IDEDMA_SCH_OFFSET * chp->channel),
3282 idedma_ctl);
3283 }
3284 pciide_print_modes(cp);
3285 }
3286
3287 void
3288 acer_chip_map(sc, pa)
3289 struct pciide_softc *sc;
3290 struct pci_attach_args *pa;
3291 {
3292 struct pciide_channel *cp;
3293 int channel;
3294 pcireg_t cr, interface;
3295 bus_size_t cmdsize, ctlsize;
3296 pcireg_t rev = PCI_REVISION(pa->pa_class);
3297
3298 if (pciide_chipen(sc, pa) == 0)
3299 return;
3300 printf("%s: bus-master DMA support present",
3301 sc->sc_wdcdev.sc_dev.dv_xname);
3302 pciide_mapreg_dma(sc, pa);
3303 printf("\n");
3304 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3305 WDC_CAPABILITY_MODE;
3306 if (sc->sc_dma_ok) {
3307 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3308 if (rev >= 0x20) {
3309 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3310 if (rev >= 0xC4)
3311 sc->sc_wdcdev.UDMA_cap = 5;
3312 else if (rev >= 0xC2)
3313 sc->sc_wdcdev.UDMA_cap = 4;
3314 else
3315 sc->sc_wdcdev.UDMA_cap = 2;
3316 }
3317 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3318 sc->sc_wdcdev.irqack = pciide_irqack;
3319 }
3320
3321 sc->sc_wdcdev.PIO_cap = 4;
3322 sc->sc_wdcdev.DMA_cap = 2;
3323 sc->sc_wdcdev.set_modes = acer_setup_channel;
3324 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3325 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3326
3327 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3328 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3329 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3330
3331 /* Enable "microsoft register bits" R/W. */
3332 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3333 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3334 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3335 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3336 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3337 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3338 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3339 ~ACER_CHANSTATUSREGS_RO);
3340 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3341 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3342 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3343 /* Don't use cr, re-read the real register content instead */
3344 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3345 PCI_CLASS_REG));
3346
3347 /* From linux: enable "Cable Detection" */
3348 if (rev >= 0xC2) {
3349 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3350 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3351 | ACER_0x4B_CDETECT);
3352 }
3353
3354 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3355 cp = &sc->pciide_channels[channel];
3356 if (pciide_chansetup(sc, channel, interface) == 0)
3357 continue;
3358 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3359 printf("%s: %s channel ignored (disabled)\n",
3360 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3361 continue;
3362 }
3363 /* newer controllers seems to lack the ACER_CHIDS. Sigh */
3364 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3365 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3366 if (cp->hw_ok == 0)
3367 continue;
3368 if (pciide_chan_candisable(cp)) {
3369 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3370 pci_conf_write(sc->sc_pc, sc->sc_tag,
3371 PCI_CLASS_REG, cr);
3372 }
3373 pciide_map_compat_intr(pa, cp, channel, interface);
3374 acer_setup_channel(&cp->wdc_channel);
3375 }
3376 }
3377
3378 void
3379 acer_setup_channel(chp)
3380 struct channel_softc *chp;
3381 {
3382 struct ata_drive_datas *drvp;
3383 int drive;
3384 u_int32_t acer_fifo_udma;
3385 u_int32_t idedma_ctl;
3386 struct pciide_channel *cp = (struct pciide_channel*)chp;
3387 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3388
3389 idedma_ctl = 0;
3390 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3391 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3392 acer_fifo_udma), DEBUG_PROBE);
3393 /* setup DMA if needed */
3394 pciide_channel_dma_setup(cp);
3395
3396 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3397 DRIVE_UDMA) { /* check 80 pins cable */
3398 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3399 ACER_0x4A_80PIN(chp->channel)) {
3400 if (chp->ch_drive[0].UDMA_mode > 2)
3401 chp->ch_drive[0].UDMA_mode = 2;
3402 if (chp->ch_drive[1].UDMA_mode > 2)
3403 chp->ch_drive[1].UDMA_mode = 2;
3404 }
3405 }
3406
3407 for (drive = 0; drive < 2; drive++) {
3408 drvp = &chp->ch_drive[drive];
3409 /* If no drive, skip */
3410 if ((drvp->drive_flags & DRIVE) == 0)
3411 continue;
3412 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3413 "channel %d drive %d 0x%x\n", chp->channel, drive,
3414 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3415 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3416 /* clear FIFO/DMA mode */
3417 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3418 ACER_UDMA_EN(chp->channel, drive) |
3419 ACER_UDMA_TIM(chp->channel, drive, 0x7));
3420
3421 /* add timing values, setup DMA if needed */
3422 if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3423 (drvp->drive_flags & DRIVE_UDMA) == 0) {
3424 acer_fifo_udma |=
3425 ACER_FTH_OPL(chp->channel, drive, 0x1);
3426 goto pio;
3427 }
3428
3429 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3430 if (drvp->drive_flags & DRIVE_UDMA) {
3431 /* use Ultra/DMA */
3432 drvp->drive_flags &= ~DRIVE_DMA;
3433 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3434 acer_fifo_udma |=
3435 ACER_UDMA_TIM(chp->channel, drive,
3436 acer_udma[drvp->UDMA_mode]);
3437 /* XXX disable if one drive < UDMA3 ? */
3438 if (drvp->UDMA_mode >= 3) {
3439 pciide_pci_write(sc->sc_pc, sc->sc_tag,
3440 ACER_0x4B,
3441 pciide_pci_read(sc->sc_pc, sc->sc_tag,
3442 ACER_0x4B) | ACER_0x4B_UDMA66);
3443 }
3444 } else {
3445 /*
3446 * use Multiword DMA
3447 * Timings will be used for both PIO and DMA,
3448 * so adjust DMA mode if needed
3449 */
3450 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3451 drvp->PIO_mode = drvp->DMA_mode + 2;
3452 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3453 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3454 drvp->PIO_mode - 2 : 0;
3455 if (drvp->DMA_mode == 0)
3456 drvp->PIO_mode = 0;
3457 }
3458 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3459 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag,
3460 ACER_IDETIM(chp->channel, drive),
3461 acer_pio[drvp->PIO_mode]);
3462 }
3463 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3464 acer_fifo_udma), DEBUG_PROBE);
3465 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3466 if (idedma_ctl != 0) {
3467 /* Add software bits in status register */
3468 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3469 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3470 idedma_ctl);
3471 }
3472 pciide_print_modes(cp);
3473 }
3474
3475 int
3476 acer_pci_intr(arg)
3477 void *arg;
3478 {
3479 struct pciide_softc *sc = arg;
3480 struct pciide_channel *cp;
3481 struct channel_softc *wdc_cp;
3482 int i, rv, crv;
3483 u_int32_t chids;
3484
3485 rv = 0;
3486 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3487 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3488 cp = &sc->pciide_channels[i];
3489 wdc_cp = &cp->wdc_channel;
3490 /* If a compat channel skip. */
3491 if (cp->compat)
3492 continue;
3493 if (chids & ACER_CHIDS_INT(i)) {
3494 crv = wdcintr(wdc_cp);
3495 if (crv == 0)
3496 printf("%s:%d: bogus intr\n",
3497 sc->sc_wdcdev.sc_dev.dv_xname, i);
3498 else
3499 rv = 1;
3500 }
3501 }
3502 return rv;
3503 }
3504
3505 void
3506 hpt_chip_map(sc, pa)
3507 struct pciide_softc *sc;
3508 struct pci_attach_args *pa;
3509 {
3510 struct pciide_channel *cp;
3511 int i, compatchan, revision;
3512 pcireg_t interface;
3513 bus_size_t cmdsize, ctlsize;
3514
3515 if (pciide_chipen(sc, pa) == 0)
3516 return;
3517 revision = PCI_REVISION(pa->pa_class);
3518 printf(": Triones/Highpoint ");
3519 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3520 printf("HPT374 IDE Controller\n");
3521 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372)
3522 printf("HPT372 IDE Controller\n");
3523 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) {
3524 if (revision == HPT372_REV)
3525 printf("HPT372 IDE Controller\n");
3526 else if (revision == HPT370_REV)
3527 printf("HPT370 IDE Controller\n");
3528 else if (revision == HPT370A_REV)
3529 printf("HPT370A IDE Controller\n");
3530 else if (revision == HPT366_REV)
3531 printf("HPT366 IDE Controller\n");
3532 else
3533 printf("unknown HPT IDE controller rev %d\n", revision);
3534 } else
3535 printf("unknown HPT IDE controller 0x%x\n",
3536 sc->sc_pp->ide_product);
3537
3538 /*
3539 * when the chip is in native mode it identifies itself as a
3540 * 'misc mass storage'. Fake interface in this case.
3541 */
3542 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3543 interface = PCI_INTERFACE(pa->pa_class);
3544 } else {
3545 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3546 PCIIDE_INTERFACE_PCI(0);
3547 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3548 (revision == HPT370_REV || revision == HPT370A_REV ||
3549 revision == HPT372_REV)) ||
3550 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3551 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3552 interface |= PCIIDE_INTERFACE_PCI(1);
3553 }
3554
3555 printf("%s: bus-master DMA support present",
3556 sc->sc_wdcdev.sc_dev.dv_xname);
3557 pciide_mapreg_dma(sc, pa);
3558 printf("\n");
3559 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3560 WDC_CAPABILITY_MODE;
3561 if (sc->sc_dma_ok) {
3562 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3563 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3564 sc->sc_wdcdev.irqack = pciide_irqack;
3565 }
3566 sc->sc_wdcdev.PIO_cap = 4;
3567 sc->sc_wdcdev.DMA_cap = 2;
3568
3569 sc->sc_wdcdev.set_modes = hpt_setup_channel;
3570 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3571 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3572 revision == HPT366_REV) {
3573 sc->sc_wdcdev.UDMA_cap = 4;
3574 /*
3575 * The 366 has 2 PCI IDE functions, one for primary and one
3576 * for secondary. So we need to call pciide_mapregs_compat()
3577 * with the real channel
3578 */
3579 if (pa->pa_function == 0) {
3580 compatchan = 0;
3581 } else if (pa->pa_function == 1) {
3582 compatchan = 1;
3583 } else {
3584 printf("%s: unexpected PCI function %d\n",
3585 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3586 return;
3587 }
3588 sc->sc_wdcdev.nchannels = 1;
3589 } else {
3590 sc->sc_wdcdev.nchannels = 2;
3591 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 ||
3592 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3593 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3594 revision == HPT372_REV))
3595 sc->sc_wdcdev.UDMA_cap = 6;
3596 else
3597 sc->sc_wdcdev.UDMA_cap = 5;
3598 }
3599 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3600 cp = &sc->pciide_channels[i];
3601 if (sc->sc_wdcdev.nchannels > 1) {
3602 compatchan = i;
3603 if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3604 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3605 printf("%s: %s channel ignored (disabled)\n",
3606 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3607 continue;
3608 }
3609 }
3610 if (pciide_chansetup(sc, i, interface) == 0)
3611 continue;
3612 if (interface & PCIIDE_INTERFACE_PCI(i)) {
3613 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3614 &ctlsize, hpt_pci_intr);
3615 } else {
3616 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3617 &cmdsize, &ctlsize);
3618 }
3619 if (cp->hw_ok == 0)
3620 return;
3621 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3622 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3623 wdcattach(&cp->wdc_channel);
3624 hpt_setup_channel(&cp->wdc_channel);
3625 }
3626 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3627 (revision == HPT370_REV || revision == HPT370A_REV ||
3628 revision == HPT372_REV)) ||
3629 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3630 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) {
3631 /*
3632 * HPT370_REV and highter has a bit to disable interrupts,
3633 * make sure to clear it
3634 */
3635 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3636 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3637 ~HPT_CSEL_IRQDIS);
3638 }
3639 /* set clocks, etc (mandatory on 372/4, optional otherwise) */
3640 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 &&
3641 revision == HPT372_REV ) ||
3642 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372 ||
3643 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374)
3644 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2,
3645 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) &
3646 HPT_SC2_MAEN) | HPT_SC2_OSC_EN);
3647 return;
3648 }
3649
3650 void
3651 hpt_setup_channel(chp)
3652 struct channel_softc *chp;
3653 {
3654 struct ata_drive_datas *drvp;
3655 int drive;
3656 int cable;
3657 u_int32_t before, after;
3658 u_int32_t idedma_ctl;
3659 struct pciide_channel *cp = (struct pciide_channel*)chp;
3660 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3661 int revision =
3662 PCI_REVISION(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
3663
3664 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3665
3666 /* setup DMA if needed */
3667 pciide_channel_dma_setup(cp);
3668
3669 idedma_ctl = 0;
3670
3671 /* Per drive settings */
3672 for (drive = 0; drive < 2; drive++) {
3673 drvp = &chp->ch_drive[drive];
3674 /* If no drive, skip */
3675 if ((drvp->drive_flags & DRIVE) == 0)
3676 continue;
3677 before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3678 HPT_IDETIM(chp->channel, drive));
3679
3680 /* add timing values, setup DMA if needed */
3681 if (drvp->drive_flags & DRIVE_UDMA) {
3682 /* use Ultra/DMA */
3683 drvp->drive_flags &= ~DRIVE_DMA;
3684 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3685 drvp->UDMA_mode > 2)
3686 drvp->UDMA_mode = 2;
3687 switch (sc->sc_pp->ide_product) {
3688 case PCI_PRODUCT_TRIONES_HPT374:
3689 after = hpt374_udma[drvp->UDMA_mode];
3690 break;
3691 case PCI_PRODUCT_TRIONES_HPT372:
3692 after = hpt372_udma[drvp->UDMA_mode];
3693 break;
3694 case PCI_PRODUCT_TRIONES_HPT366:
3695 default:
3696 switch(revision) {
3697 case HPT372_REV:
3698 after = hpt372_udma[drvp->UDMA_mode];
3699 break;
3700 case HPT370_REV:
3701 case HPT370A_REV:
3702 after = hpt370_udma[drvp->UDMA_mode];
3703 break;
3704 case HPT366_REV:
3705 default:
3706 after = hpt366_udma[drvp->UDMA_mode];
3707 break;
3708 }
3709 }
3710 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3711 } else if (drvp->drive_flags & DRIVE_DMA) {
3712 /*
3713 * use Multiword DMA.
3714 * Timings will be used for both PIO and DMA, so adjust
3715 * DMA mode if needed
3716 */
3717 if (drvp->PIO_mode >= 3 &&
3718 (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3719 drvp->DMA_mode = drvp->PIO_mode - 2;
3720 }
3721 switch (sc->sc_pp->ide_product) {
3722 case PCI_PRODUCT_TRIONES_HPT374:
3723 after = hpt374_dma[drvp->DMA_mode];
3724 break;
3725 case PCI_PRODUCT_TRIONES_HPT372:
3726 after = hpt372_dma[drvp->DMA_mode];
3727 break;
3728 case PCI_PRODUCT_TRIONES_HPT366:
3729 default:
3730 switch(revision) {
3731 case HPT372_REV:
3732 after = hpt372_dma[drvp->DMA_mode];
3733 break;
3734 case HPT370_REV:
3735 case HPT370A_REV:
3736 after = hpt370_dma[drvp->DMA_mode];
3737 break;
3738 case HPT366_REV:
3739 default:
3740 after = hpt366_dma[drvp->DMA_mode];
3741 break;
3742 }
3743 }
3744 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3745 } else {
3746 /* PIO only */
3747 switch (sc->sc_pp->ide_product) {
3748 case PCI_PRODUCT_TRIONES_HPT374:
3749 after = hpt374_pio[drvp->PIO_mode];
3750 break;
3751 case PCI_PRODUCT_TRIONES_HPT372:
3752 after = hpt372_pio[drvp->PIO_mode];
3753 break;
3754 case PCI_PRODUCT_TRIONES_HPT366:
3755 default:
3756 switch(revision) {
3757 case HPT372_REV:
3758 after = hpt372_pio[drvp->PIO_mode];
3759 break;
3760 case HPT370_REV:
3761 case HPT370A_REV:
3762 after = hpt370_pio[drvp->PIO_mode];
3763 break;
3764 case HPT366_REV:
3765 default:
3766 after = hpt366_pio[drvp->PIO_mode];
3767 break;
3768 }
3769 }
3770 }
3771 pci_conf_write(sc->sc_pc, sc->sc_tag,
3772 HPT_IDETIM(chp->channel, drive), after);
3773 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3774 "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3775 after, before), DEBUG_PROBE);
3776 }
3777 if (idedma_ctl != 0) {
3778 /* Add software bits in status register */
3779 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3780 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
3781 idedma_ctl);
3782 }
3783 pciide_print_modes(cp);
3784 }
3785
3786 int
3787 hpt_pci_intr(arg)
3788 void *arg;
3789 {
3790 struct pciide_softc *sc = arg;
3791 struct pciide_channel *cp;
3792 struct channel_softc *wdc_cp;
3793 int rv = 0;
3794 int dmastat, i, crv;
3795
3796 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3797 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3798 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3799 if((dmastat & ( IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
3800 IDEDMA_CTL_INTR)
3801 continue;
3802 cp = &sc->pciide_channels[i];
3803 wdc_cp = &cp->wdc_channel;
3804 crv = wdcintr(wdc_cp);
3805 if (crv == 0) {
3806 printf("%s:%d: bogus intr\n",
3807 sc->sc_wdcdev.sc_dev.dv_xname, i);
3808 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3809 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3810 } else
3811 rv = 1;
3812 }
3813 return rv;
3814 }
3815
3816
3817 /* Macros to test product */
3818 #define PDC_IS_262(sc) \
3819 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 || \
3820 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3821 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3822 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3823 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3824 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3825 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3826 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3827 #define PDC_IS_265(sc) \
3828 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 || \
3829 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X || \
3830 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3831 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3832 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3833 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3834 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3835 #define PDC_IS_268(sc) \
3836 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2 || \
3837 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100TX2v2 || \
3838 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3839 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3840 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3841 #define PDC_IS_276(sc) \
3842 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133 || \
3843 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2 || \
3844 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA133TX2v2)
3845
3846 void
3847 pdc202xx_chip_map(sc, pa)
3848 struct pciide_softc *sc;
3849 struct pci_attach_args *pa;
3850 {
3851 struct pciide_channel *cp;
3852 int channel;
3853 pcireg_t interface, st, mode;
3854 bus_size_t cmdsize, ctlsize;
3855
3856 if (!PDC_IS_268(sc)) {
3857 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3858 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n",
3859 st), DEBUG_PROBE);
3860 }
3861 if (pciide_chipen(sc, pa) == 0)
3862 return;
3863
3864 /* turn off RAID mode */
3865 if (!PDC_IS_268(sc))
3866 st &= ~PDC2xx_STATE_IDERAID;
3867
3868 /*
3869 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3870 * mode. We have to fake interface
3871 */
3872 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3873 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE))
3874 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3875
3876 printf("%s: bus-master DMA support present",
3877 sc->sc_wdcdev.sc_dev.dv_xname);
3878 pciide_mapreg_dma(sc, pa);
3879 printf("\n");
3880 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3881 WDC_CAPABILITY_MODE;
3882 if (sc->sc_dma_ok) {
3883 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3884 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3885 sc->sc_wdcdev.irqack = pciide_irqack;
3886 }
3887 sc->sc_wdcdev.PIO_cap = 4;
3888 sc->sc_wdcdev.DMA_cap = 2;
3889 if (PDC_IS_276(sc))
3890 sc->sc_wdcdev.UDMA_cap = 6;
3891 else if (PDC_IS_265(sc))
3892 sc->sc_wdcdev.UDMA_cap = 5;
3893 else if (PDC_IS_262(sc))
3894 sc->sc_wdcdev.UDMA_cap = 4;
3895 else
3896 sc->sc_wdcdev.UDMA_cap = 2;
3897 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ?
3898 pdc20268_setup_channel : pdc202xx_setup_channel;
3899 sc->sc_wdcdev.channels = sc->wdc_chanarray;
3900 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3901
3902 if (!PDC_IS_268(sc)) {
3903 /* setup failsafe defaults */
3904 mode = 0;
3905 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3906 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3907 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3908 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3909 for (channel = 0;
3910 channel < sc->sc_wdcdev.nchannels;
3911 channel++) {
3912 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3913 "drive 0 initial timings 0x%x, now 0x%x\n",
3914 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3915 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3916 DEBUG_PROBE);
3917 pci_conf_write(sc->sc_pc, sc->sc_tag,
3918 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp);
3919 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d "
3920 "drive 1 initial timings 0x%x, now 0x%x\n",
3921 channel, pci_conf_read(sc->sc_pc, sc->sc_tag,
3922 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3923 pci_conf_write(sc->sc_pc, sc->sc_tag,
3924 PDC2xx_TIM(channel, 1), mode);
3925 }
3926
3927 mode = PDC2xx_SCR_DMA;
3928 if (PDC_IS_262(sc)) {
3929 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3930 } else {
3931 /* the BIOS set it up this way */
3932 mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3933 }
3934 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3935 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3936 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, "
3937 "now 0x%x\n",
3938 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3939 PDC2xx_SCR),
3940 mode), DEBUG_PROBE);
3941 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3942 PDC2xx_SCR, mode);
3943
3944 /* controller initial state register is OK even without BIOS */
3945 /* Set DMA mode to IDE DMA compatibility */
3946 mode =
3947 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3948 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode),
3949 DEBUG_PROBE);
3950 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3951 mode | 0x1);
3952 mode =
3953 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3954 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3955 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3956 mode | 0x1);
3957 }
3958
3959 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3960 cp = &sc->pciide_channels[channel];
3961 if (pciide_chansetup(sc, channel, interface) == 0)
3962 continue;
3963 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3964 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3965 printf("%s: %s channel ignored (disabled)\n",
3966 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3967 continue;
3968 }
3969 if (PDC_IS_265(sc))
3970 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3971 pdc20265_pci_intr);
3972 else
3973 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3974 pdc202xx_pci_intr);
3975 if (cp->hw_ok == 0)
3976 continue;
3977 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp))
3978 st &= ~(PDC_IS_262(sc) ?
3979 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3980 pciide_map_compat_intr(pa, cp, channel, interface);
3981 sc->sc_wdcdev.set_modes(&cp->wdc_channel);
3982 }
3983 if (!PDC_IS_268(sc)) {
3984 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state "
3985 "0x%x\n", st), DEBUG_PROBE);
3986 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3987 }
3988 return;
3989 }
3990
3991 void
3992 pdc202xx_setup_channel(chp)
3993 struct channel_softc *chp;
3994 {
3995 struct ata_drive_datas *drvp;
3996 int drive;
3997 pcireg_t mode, st;
3998 u_int32_t idedma_ctl, scr, atapi;
3999 struct pciide_channel *cp = (struct pciide_channel*)chp;
4000 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4001 int channel = chp->channel;
4002
4003 /* setup DMA if needed */
4004 pciide_channel_dma_setup(cp);
4005
4006 idedma_ctl = 0;
4007 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
4008 sc->sc_wdcdev.sc_dev.dv_xname,
4009 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
4010 DEBUG_PROBE);
4011
4012 /* Per channel settings */
4013 if (PDC_IS_262(sc)) {
4014 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4015 PDC262_U66);
4016 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
4017 /* Trim UDMA mode */
4018 if ((st & PDC262_STATE_80P(channel)) != 0 ||
4019 (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4020 chp->ch_drive[0].UDMA_mode <= 2) ||
4021 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4022 chp->ch_drive[1].UDMA_mode <= 2)) {
4023 if (chp->ch_drive[0].UDMA_mode > 2)
4024 chp->ch_drive[0].UDMA_mode = 2;
4025 if (chp->ch_drive[1].UDMA_mode > 2)
4026 chp->ch_drive[1].UDMA_mode = 2;
4027 }
4028 /* Set U66 if needed */
4029 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
4030 chp->ch_drive[0].UDMA_mode > 2) ||
4031 (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
4032 chp->ch_drive[1].UDMA_mode > 2))
4033 scr |= PDC262_U66_EN(channel);
4034 else
4035 scr &= ~PDC262_U66_EN(channel);
4036 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4037 PDC262_U66, scr);
4038 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
4039 sc->sc_wdcdev.sc_dev.dv_xname, channel,
4040 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4041 PDC262_ATAPI(channel))), DEBUG_PROBE);
4042 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
4043 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
4044 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4045 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4046 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
4047 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
4048 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
4049 (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
4050 atapi = 0;
4051 else
4052 atapi = PDC262_ATAPI_UDMA;
4053 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
4054 PDC262_ATAPI(channel), atapi);
4055 }
4056 }
4057 for (drive = 0; drive < 2; drive++) {
4058 drvp = &chp->ch_drive[drive];
4059 /* If no drive, skip */
4060 if ((drvp->drive_flags & DRIVE) == 0)
4061 continue;
4062 mode = 0;
4063 if (drvp->drive_flags & DRIVE_UDMA) {
4064 /* use Ultra/DMA */
4065 drvp->drive_flags &= ~DRIVE_DMA;
4066 mode = PDC2xx_TIM_SET_MB(mode,
4067 pdc2xx_udma_mb[drvp->UDMA_mode]);
4068 mode = PDC2xx_TIM_SET_MC(mode,
4069 pdc2xx_udma_mc[drvp->UDMA_mode]);
4070 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4071 } else if (drvp->drive_flags & DRIVE_DMA) {
4072 mode = PDC2xx_TIM_SET_MB(mode,
4073 pdc2xx_dma_mb[drvp->DMA_mode]);
4074 mode = PDC2xx_TIM_SET_MC(mode,
4075 pdc2xx_dma_mc[drvp->DMA_mode]);
4076 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4077 } else {
4078 mode = PDC2xx_TIM_SET_MB(mode,
4079 pdc2xx_dma_mb[0]);
4080 mode = PDC2xx_TIM_SET_MC(mode,
4081 pdc2xx_dma_mc[0]);
4082 }
4083 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
4084 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
4085 if (drvp->drive_flags & DRIVE_ATA)
4086 mode |= PDC2xx_TIM_PRE;
4087 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
4088 if (drvp->PIO_mode >= 3) {
4089 mode |= PDC2xx_TIM_IORDY;
4090 if (drive == 0)
4091 mode |= PDC2xx_TIM_IORDYp;
4092 }
4093 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
4094 "timings 0x%x\n",
4095 sc->sc_wdcdev.sc_dev.dv_xname,
4096 chp->channel, drive, mode), DEBUG_PROBE);
4097 pci_conf_write(sc->sc_pc, sc->sc_tag,
4098 PDC2xx_TIM(chp->channel, drive), mode);
4099 }
4100 if (idedma_ctl != 0) {
4101 /* Add software bits in status register */
4102 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4103 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4104 idedma_ctl);
4105 }
4106 pciide_print_modes(cp);
4107 }
4108
4109 void
4110 pdc20268_setup_channel(chp)
4111 struct channel_softc *chp;
4112 {
4113 struct ata_drive_datas *drvp;
4114 int drive;
4115 u_int32_t idedma_ctl;
4116 struct pciide_channel *cp = (struct pciide_channel*)chp;
4117 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4118 int u100;
4119
4120 /* setup DMA if needed */
4121 pciide_channel_dma_setup(cp);
4122
4123 idedma_ctl = 0;
4124
4125 /* I don't know what this is for, FreeBSD does it ... */
4126 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4127 IDEDMA_CMD + 0x1, 0x0b);
4128
4129 /*
4130 * I don't know what this is for; FreeBSD checks this ... this is not
4131 * cable type detect.
4132 */
4133 u100 = (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4134 IDEDMA_CMD + 0x3) & 0x04) ? 0 : 1;
4135
4136 for (drive = 0; drive < 2; drive++) {
4137 drvp = &chp->ch_drive[drive];
4138 /* If no drive, skip */
4139 if ((drvp->drive_flags & DRIVE) == 0)
4140 continue;
4141 if (drvp->drive_flags & DRIVE_UDMA) {
4142 /* use Ultra/DMA */
4143 drvp->drive_flags &= ~DRIVE_DMA;
4144 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4145 if (drvp->UDMA_mode > 2 && u100 == 0)
4146 drvp->UDMA_mode = 2;
4147 } else if (drvp->drive_flags & DRIVE_DMA) {
4148 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4149 }
4150 }
4151 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */
4152 if (idedma_ctl != 0) {
4153 /* Add software bits in status register */
4154 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4155 IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
4156 idedma_ctl);
4157 }
4158 pciide_print_modes(cp);
4159 }
4160
4161 int
4162 pdc202xx_pci_intr(arg)
4163 void *arg;
4164 {
4165 struct pciide_softc *sc = arg;
4166 struct pciide_channel *cp;
4167 struct channel_softc *wdc_cp;
4168 int i, rv, crv;
4169 u_int32_t scr;
4170
4171 rv = 0;
4172 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
4173 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4174 cp = &sc->pciide_channels[i];
4175 wdc_cp = &cp->wdc_channel;
4176 /* If a compat channel skip. */
4177 if (cp->compat)
4178 continue;
4179 if (scr & PDC2xx_SCR_INT(i)) {
4180 crv = wdcintr(wdc_cp);
4181 if (crv == 0)
4182 printf("%s:%d: bogus intr (reg 0x%x)\n",
4183 sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
4184 else
4185 rv = 1;
4186 }
4187 }
4188 return rv;
4189 }
4190
4191 int
4192 pdc20265_pci_intr(arg)
4193 void *arg;
4194 {
4195 struct pciide_softc *sc = arg;
4196 struct pciide_channel *cp;
4197 struct channel_softc *wdc_cp;
4198 int i, rv, crv;
4199 u_int32_t dmastat;
4200
4201 rv = 0;
4202 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4203 cp = &sc->pciide_channels[i];
4204 wdc_cp = &cp->wdc_channel;
4205 /* If a compat channel skip. */
4206 if (cp->compat)
4207 continue;
4208 /*
4209 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
4210 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
4211 * So use it instead (requires 2 reg reads instead of 1,
4212 * but we can't do it another way).
4213 */
4214 dmastat = bus_space_read_1(sc->sc_dma_iot,
4215 sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4216 if((dmastat & IDEDMA_CTL_INTR) == 0)
4217 continue;
4218 crv = wdcintr(wdc_cp);
4219 if (crv == 0)
4220 printf("%s:%d: bogus intr\n",
4221 sc->sc_wdcdev.sc_dev.dv_xname, i);
4222 else
4223 rv = 1;
4224 }
4225 return rv;
4226 }
4227
4228 void
4229 opti_chip_map(sc, pa)
4230 struct pciide_softc *sc;
4231 struct pci_attach_args *pa;
4232 {
4233 struct pciide_channel *cp;
4234 bus_size_t cmdsize, ctlsize;
4235 pcireg_t interface;
4236 u_int8_t init_ctrl;
4237 int channel;
4238
4239 if (pciide_chipen(sc, pa) == 0)
4240 return;
4241 printf("%s: bus-master DMA support present",
4242 sc->sc_wdcdev.sc_dev.dv_xname);
4243
4244 /*
4245 * XXXSCW:
4246 * There seem to be a couple of buggy revisions/implementations
4247 * of the OPTi pciide chipset. This kludge seems to fix one of
4248 * the reported problems (PR/11644) but still fails for the
4249 * other (PR/13151), although the latter may be due to other
4250 * issues too...
4251 */
4252 if (PCI_REVISION(pa->pa_class) <= 0x12) {
4253 printf(" but disabled due to chip rev. <= 0x12");
4254 sc->sc_dma_ok = 0;
4255 } else
4256 pciide_mapreg_dma(sc, pa);
4257
4258 printf("\n");
4259
4260 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4261 WDC_CAPABILITY_MODE;
4262 sc->sc_wdcdev.PIO_cap = 4;
4263 if (sc->sc_dma_ok) {
4264 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4265 sc->sc_wdcdev.irqack = pciide_irqack;
4266 sc->sc_wdcdev.DMA_cap = 2;
4267 }
4268 sc->sc_wdcdev.set_modes = opti_setup_channel;
4269
4270 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4271 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4272
4273 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4274 OPTI_REG_INIT_CONTROL);
4275
4276 interface = PCI_INTERFACE(pa->pa_class);
4277
4278 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4279 cp = &sc->pciide_channels[channel];
4280 if (pciide_chansetup(sc, channel, interface) == 0)
4281 continue;
4282 if (channel == 1 &&
4283 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4284 printf("%s: %s channel ignored (disabled)\n",
4285 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4286 continue;
4287 }
4288 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4289 pciide_pci_intr);
4290 if (cp->hw_ok == 0)
4291 continue;
4292 pciide_map_compat_intr(pa, cp, channel, interface);
4293 if (cp->hw_ok == 0)
4294 continue;
4295 opti_setup_channel(&cp->wdc_channel);
4296 }
4297 }
4298
4299 void
4300 opti_setup_channel(chp)
4301 struct channel_softc *chp;
4302 {
4303 struct ata_drive_datas *drvp;
4304 struct pciide_channel *cp = (struct pciide_channel*)chp;
4305 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4306 int drive, spd;
4307 int mode[2];
4308 u_int8_t rv, mr;
4309
4310 /*
4311 * The `Delay' and `Address Setup Time' fields of the
4312 * Miscellaneous Register are always zero initially.
4313 */
4314 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4315 mr &= ~(OPTI_MISC_DELAY_MASK |
4316 OPTI_MISC_ADDR_SETUP_MASK |
4317 OPTI_MISC_INDEX_MASK);
4318
4319 /* Prime the control register before setting timing values */
4320 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4321
4322 /* Determine the clockrate of the PCIbus the chip is attached to */
4323 spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4324 spd &= OPTI_STRAP_PCI_SPEED_MASK;
4325
4326 /* setup DMA if needed */
4327 pciide_channel_dma_setup(cp);
4328
4329 for (drive = 0; drive < 2; drive++) {
4330 drvp = &chp->ch_drive[drive];
4331 /* If no drive, skip */
4332 if ((drvp->drive_flags & DRIVE) == 0) {
4333 mode[drive] = -1;
4334 continue;
4335 }
4336
4337 if ((drvp->drive_flags & DRIVE_DMA)) {
4338 /*
4339 * Timings will be used for both PIO and DMA,
4340 * so adjust DMA mode if needed
4341 */
4342 if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4343 drvp->PIO_mode = drvp->DMA_mode + 2;
4344 if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4345 drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4346 drvp->PIO_mode - 2 : 0;
4347 if (drvp->DMA_mode == 0)
4348 drvp->PIO_mode = 0;
4349
4350 mode[drive] = drvp->DMA_mode + 5;
4351 } else
4352 mode[drive] = drvp->PIO_mode;
4353
4354 if (drive && mode[0] >= 0 &&
4355 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4356 /*
4357 * Can't have two drives using different values
4358 * for `Address Setup Time'.
4359 * Slow down the faster drive to compensate.
4360 */
4361 int d = (opti_tim_as[spd][mode[0]] >
4362 opti_tim_as[spd][mode[1]]) ? 0 : 1;
4363
4364 mode[d] = mode[1-d];
4365 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4366 chp->ch_drive[d].DMA_mode = 0;
4367 chp->ch_drive[d].drive_flags &= ~DRIVE_DMA;
4368 }
4369 }
4370
4371 for (drive = 0; drive < 2; drive++) {
4372 int m;
4373 if ((m = mode[drive]) < 0)
4374 continue;
4375
4376 /* Set the Address Setup Time and select appropriate index */
4377 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4378 rv |= OPTI_MISC_INDEX(drive);
4379 opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4380
4381 /* Set the pulse width and recovery timing parameters */
4382 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4383 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4384 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4385 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4386
4387 /* Set the Enhanced Mode register appropriately */
4388 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4389 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4390 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4391 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4392 }
4393
4394 /* Finally, enable the timings */
4395 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4396
4397 pciide_print_modes(cp);
4398 }
4399
4400 #define ACARD_IS_850(sc) \
4401 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4402
4403 void
4404 acard_chip_map(sc, pa)
4405 struct pciide_softc *sc;
4406 struct pci_attach_args *pa;
4407 {
4408 struct pciide_channel *cp;
4409 int i;
4410 pcireg_t interface;
4411 bus_size_t cmdsize, ctlsize;
4412
4413 if (pciide_chipen(sc, pa) == 0)
4414 return;
4415
4416 /*
4417 * when the chip is in native mode it identifies itself as a
4418 * 'misc mass storage'. Fake interface in this case.
4419 */
4420 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4421 interface = PCI_INTERFACE(pa->pa_class);
4422 } else {
4423 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4424 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4425 }
4426
4427 printf("%s: bus-master DMA support present",
4428 sc->sc_wdcdev.sc_dev.dv_xname);
4429 pciide_mapreg_dma(sc, pa);
4430 printf("\n");
4431 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4432 WDC_CAPABILITY_MODE;
4433
4434 if (sc->sc_dma_ok) {
4435 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4436 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4437 sc->sc_wdcdev.irqack = pciide_irqack;
4438 }
4439 sc->sc_wdcdev.PIO_cap = 4;
4440 sc->sc_wdcdev.DMA_cap = 2;
4441 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4442
4443 sc->sc_wdcdev.set_modes = acard_setup_channel;
4444 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4445 sc->sc_wdcdev.nchannels = 2;
4446
4447 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4448 cp = &sc->pciide_channels[i];
4449 if (pciide_chansetup(sc, i, interface) == 0)
4450 continue;
4451 if (interface & PCIIDE_INTERFACE_PCI(i)) {
4452 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4453 &ctlsize, pciide_pci_intr);
4454 } else {
4455 cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4456 &cmdsize, &ctlsize);
4457 }
4458 if (cp->hw_ok == 0)
4459 return;
4460 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4461 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4462 wdcattach(&cp->wdc_channel);
4463 acard_setup_channel(&cp->wdc_channel);
4464 }
4465 if (!ACARD_IS_850(sc)) {
4466 u_int32_t reg;
4467 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4468 reg &= ~ATP860_CTRL_INT;
4469 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4470 }
4471 }
4472
4473 void
4474 acard_setup_channel(chp)
4475 struct channel_softc *chp;
4476 {
4477 struct ata_drive_datas *drvp;
4478 struct pciide_channel *cp = (struct pciide_channel*)chp;
4479 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4480 int channel = chp->channel;
4481 int drive;
4482 u_int32_t idetime, udma_mode;
4483 u_int32_t idedma_ctl;
4484
4485 /* setup DMA if needed */
4486 pciide_channel_dma_setup(cp);
4487
4488 if (ACARD_IS_850(sc)) {
4489 idetime = 0;
4490 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4491 udma_mode &= ~ATP850_UDMA_MASK(channel);
4492 } else {
4493 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4494 idetime &= ~ATP860_SETTIME_MASK(channel);
4495 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4496 udma_mode &= ~ATP860_UDMA_MASK(channel);
4497
4498 /* check 80 pins cable */
4499 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
4500 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
4501 if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4502 & ATP860_CTRL_80P(chp->channel)) {
4503 if (chp->ch_drive[0].UDMA_mode > 2)
4504 chp->ch_drive[0].UDMA_mode = 2;
4505 if (chp->ch_drive[1].UDMA_mode > 2)
4506 chp->ch_drive[1].UDMA_mode = 2;
4507 }
4508 }
4509 }
4510
4511 idedma_ctl = 0;
4512
4513 /* Per drive settings */
4514 for (drive = 0; drive < 2; drive++) {
4515 drvp = &chp->ch_drive[drive];
4516 /* If no drive, skip */
4517 if ((drvp->drive_flags & DRIVE) == 0)
4518 continue;
4519 /* add timing values, setup DMA if needed */
4520 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4521 (drvp->drive_flags & DRIVE_UDMA)) {
4522 /* use Ultra/DMA */
4523 if (ACARD_IS_850(sc)) {
4524 idetime |= ATP850_SETTIME(drive,
4525 acard_act_udma[drvp->UDMA_mode],
4526 acard_rec_udma[drvp->UDMA_mode]);
4527 udma_mode |= ATP850_UDMA_MODE(channel, drive,
4528 acard_udma_conf[drvp->UDMA_mode]);
4529 } else {
4530 idetime |= ATP860_SETTIME(channel, drive,
4531 acard_act_udma[drvp->UDMA_mode],
4532 acard_rec_udma[drvp->UDMA_mode]);
4533 udma_mode |= ATP860_UDMA_MODE(channel, drive,
4534 acard_udma_conf[drvp->UDMA_mode]);
4535 }
4536 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4537 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4538 (drvp->drive_flags & DRIVE_DMA)) {
4539 /* use Multiword DMA */
4540 drvp->drive_flags &= ~DRIVE_UDMA;
4541 if (ACARD_IS_850(sc)) {
4542 idetime |= ATP850_SETTIME(drive,
4543 acard_act_dma[drvp->DMA_mode],
4544 acard_rec_dma[drvp->DMA_mode]);
4545 } else {
4546 idetime |= ATP860_SETTIME(channel, drive,
4547 acard_act_dma[drvp->DMA_mode],
4548 acard_rec_dma[drvp->DMA_mode]);
4549 }
4550 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4551 } else {
4552 /* PIO only */
4553 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4554 if (ACARD_IS_850(sc)) {
4555 idetime |= ATP850_SETTIME(drive,
4556 acard_act_pio[drvp->PIO_mode],
4557 acard_rec_pio[drvp->PIO_mode]);
4558 } else {
4559 idetime |= ATP860_SETTIME(channel, drive,
4560 acard_act_pio[drvp->PIO_mode],
4561 acard_rec_pio[drvp->PIO_mode]);
4562 }
4563 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4564 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4565 | ATP8x0_CTRL_EN(channel));
4566 }
4567 }
4568
4569 if (idedma_ctl != 0) {
4570 /* Add software bits in status register */
4571 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4572 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4573 }
4574 pciide_print_modes(cp);
4575
4576 if (ACARD_IS_850(sc)) {
4577 pci_conf_write(sc->sc_pc, sc->sc_tag,
4578 ATP850_IDETIME(channel), idetime);
4579 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4580 } else {
4581 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4582 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4583 }
4584 }
4585
4586 int
4587 acard_pci_intr(arg)
4588 void *arg;
4589 {
4590 struct pciide_softc *sc = arg;
4591 struct pciide_channel *cp;
4592 struct channel_softc *wdc_cp;
4593 int rv = 0;
4594 int dmastat, i, crv;
4595
4596 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4597 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4598 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4599 if ((dmastat & IDEDMA_CTL_INTR) == 0)
4600 continue;
4601 cp = &sc->pciide_channels[i];
4602 wdc_cp = &cp->wdc_channel;
4603 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4604 (void)wdcintr(wdc_cp);
4605 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4606 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4607 continue;
4608 }
4609 crv = wdcintr(wdc_cp);
4610 if (crv == 0)
4611 printf("%s:%d: bogus intr\n",
4612 sc->sc_wdcdev.sc_dev.dv_xname, i);
4613 else if (crv == 1)
4614 rv = 1;
4615 else if (rv == 0)
4616 rv = crv;
4617 }
4618 return rv;
4619 }
4620
4621 static int
4622 sl82c105_bugchk(struct pci_attach_args *pa)
4623 {
4624
4625 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_WINBOND ||
4626 PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_WINBOND_W83C553F_0)
4627 return (0);
4628
4629 if (PCI_REVISION(pa->pa_class) <= 0x05)
4630 return (1);
4631
4632 return (0);
4633 }
4634
4635 void
4636 sl82c105_chip_map(sc, pa)
4637 struct pciide_softc *sc;
4638 struct pci_attach_args *pa;
4639 {
4640 struct pciide_channel *cp;
4641 bus_size_t cmdsize, ctlsize;
4642 pcireg_t interface, idecr;
4643 int channel;
4644
4645 if (pciide_chipen(sc, pa) == 0)
4646 return;
4647
4648 printf("%s: bus-master DMA support present",
4649 sc->sc_wdcdev.sc_dev.dv_xname);
4650
4651 /*
4652 * Check to see if we're part of the Winbond 83c553 Southbridge.
4653 * If so, we need to disable DMA on rev. <= 5 of that chip.
4654 */
4655 if (pci_find_device(pa, sl82c105_bugchk)) {
4656 printf(" but disabled due to 83c553 rev. <= 0x05");
4657 sc->sc_dma_ok = 0;
4658 } else
4659 pciide_mapreg_dma(sc, pa);
4660 printf("\n");
4661
4662 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_DATA16 |
4663 WDC_CAPABILITY_MODE;
4664 sc->sc_wdcdev.PIO_cap = 4;
4665 if (sc->sc_dma_ok) {
4666 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4667 sc->sc_wdcdev.irqack = pciide_irqack;
4668 sc->sc_wdcdev.DMA_cap = 2;
4669 }
4670 sc->sc_wdcdev.set_modes = sl82c105_setup_channel;
4671
4672 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4673 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4674
4675 idecr = pci_conf_read(sc->sc_pc, sc->sc_tag, SYMPH_IDECSR);
4676
4677 interface = PCI_INTERFACE(pa->pa_class);
4678
4679 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4680 cp = &sc->pciide_channels[channel];
4681 if (pciide_chansetup(sc, channel, interface) == 0)
4682 continue;
4683 if ((channel == 0 && (idecr & IDECR_P0EN) == 0) ||
4684 (channel == 1 && (idecr & IDECR_P1EN) == 0)) {
4685 printf("%s: %s channel ignored (disabled)\n",
4686 sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4687 continue;
4688 }
4689 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4690 pciide_pci_intr);
4691 if (cp->hw_ok == 0)
4692 continue;
4693 pciide_map_compat_intr(pa, cp, channel, interface);
4694 if (cp->hw_ok == 0)
4695 continue;
4696 sl82c105_setup_channel(&cp->wdc_channel);
4697 }
4698 }
4699
4700 void
4701 sl82c105_setup_channel(chp)
4702 struct channel_softc *chp;
4703 {
4704 struct ata_drive_datas *drvp;
4705 struct pciide_channel *cp = (struct pciide_channel*)chp;
4706 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4707 int pxdx_reg, drive;
4708 pcireg_t pxdx;
4709
4710 /* Set up DMA if needed. */
4711 pciide_channel_dma_setup(cp);
4712
4713 for (drive = 0; drive < 2; drive++) {
4714 pxdx_reg = ((chp->channel == 0) ? SYMPH_P0D0CR
4715 : SYMPH_P1D0CR) + (drive * 4);
4716
4717 pxdx = pci_conf_read(sc->sc_pc, sc->sc_tag, pxdx_reg);
4718
4719 pxdx &= ~(PxDx_CMD_ON_MASK|PxDx_CMD_OFF_MASK);
4720 pxdx &= ~(PxDx_PWEN|PxDx_RDYEN|PxDx_RAEN);
4721
4722 drvp = &chp->ch_drive[drive];
4723 /* If no drive, skip. */
4724 if ((drvp->drive_flags & DRIVE) == 0) {
4725 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4726 continue;
4727 }
4728
4729 if (drvp->drive_flags & DRIVE_DMA) {
4730 /*
4731 * Timings will be used for both PIO and DMA,
4732 * so adjust DMA mode if needed.
4733 */
4734 if (drvp->PIO_mode >= 3) {
4735 if ((drvp->DMA_mode + 2) > drvp->PIO_mode)
4736 drvp->DMA_mode = drvp->PIO_mode - 2;
4737 if (drvp->DMA_mode < 1) {
4738 /*
4739 * Can't mix both PIO and DMA.
4740 * Disable DMA.
4741 */
4742 drvp->drive_flags &= ~DRIVE_DMA;
4743 }
4744 } else {
4745 /*
4746 * Can't mix both PIO and DMA. Disable
4747 * DMA.
4748 */
4749 drvp->drive_flags &= ~DRIVE_DMA;
4750 }
4751 }
4752
4753 if (drvp->drive_flags & DRIVE_DMA) {
4754 /* Use multi-word DMA. */
4755 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_on <<
4756 PxDx_CMD_ON_SHIFT;
4757 pxdx |= symph_mw_dma_times[drvp->DMA_mode].cmd_off;
4758 } else {
4759 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_on <<
4760 PxDx_CMD_ON_SHIFT;
4761 pxdx |= symph_pio_times[drvp->PIO_mode].cmd_off;
4762 }
4763
4764 /* XXX PxDx_PWEN? PxDx_RDYEN? PxDx_RAEN? */
4765
4766 /* ...and set the mode for this drive. */
4767 pci_conf_write(sc->sc_pc, sc->sc_tag, pxdx_reg, pxdx);
4768 }
4769
4770 pciide_print_modes(cp);
4771 }
4772
4773 void
4774 serverworks_chip_map(sc, pa)
4775 struct pciide_softc *sc;
4776 struct pci_attach_args *pa;
4777 {
4778 struct pciide_channel *cp;
4779 pcireg_t interface = PCI_INTERFACE(pa->pa_class);
4780 pcitag_t pcib_tag;
4781 int channel;
4782 bus_size_t cmdsize, ctlsize;
4783
4784 if (pciide_chipen(sc, pa) == 0)
4785 return;
4786
4787 printf("%s: bus-master DMA support present",
4788 sc->sc_wdcdev.sc_dev.dv_xname);
4789 pciide_mapreg_dma(sc, pa);
4790 printf("\n");
4791 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4792 WDC_CAPABILITY_MODE;
4793
4794 if (sc->sc_dma_ok) {
4795 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4796 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4797 sc->sc_wdcdev.irqack = pciide_irqack;
4798 }
4799 sc->sc_wdcdev.PIO_cap = 4;
4800 sc->sc_wdcdev.DMA_cap = 2;
4801 switch (sc->sc_pp->ide_product) {
4802 case PCI_PRODUCT_SERVERWORKS_OSB4_IDE:
4803 sc->sc_wdcdev.UDMA_cap = 2;
4804 break;
4805 case PCI_PRODUCT_SERVERWORKS_CSB5_IDE:
4806 if (PCI_REVISION(pa->pa_class) < 0x92)
4807 sc->sc_wdcdev.UDMA_cap = 4;
4808 else
4809 sc->sc_wdcdev.UDMA_cap = 5;
4810 break;
4811 }
4812
4813 sc->sc_wdcdev.set_modes = serverworks_setup_channel;
4814 sc->sc_wdcdev.channels = sc->wdc_chanarray;
4815 sc->sc_wdcdev.nchannels = 2;
4816
4817 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4818 cp = &sc->pciide_channels[channel];
4819 if (pciide_chansetup(sc, channel, interface) == 0)
4820 continue;
4821 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4822 serverworks_pci_intr);
4823 if (cp->hw_ok == 0)
4824 return;
4825 pciide_map_compat_intr(pa, cp, channel, interface);
4826 if (cp->hw_ok == 0)
4827 return;
4828 serverworks_setup_channel(&cp->wdc_channel);
4829 }
4830
4831 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
4832 pci_conf_write(pa->pa_pc, pcib_tag, 0x64,
4833 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000);
4834 }
4835
4836 void
4837 serverworks_setup_channel(chp)
4838 struct channel_softc *chp;
4839 {
4840 struct ata_drive_datas *drvp;
4841 struct pciide_channel *cp = (struct pciide_channel*)chp;
4842 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4843 int channel = chp->channel;
4844 int drive, unit;
4845 u_int32_t pio_time, dma_time, pio_mode, udma_mode;
4846 u_int32_t idedma_ctl;
4847 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20};
4848 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20};
4849
4850 /* setup DMA if needed */
4851 pciide_channel_dma_setup(cp);
4852
4853 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40);
4854 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44);
4855 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48);
4856 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54);
4857
4858 pio_time &= ~(0xffff << (16 * channel));
4859 dma_time &= ~(0xffff << (16 * channel));
4860 pio_mode &= ~(0xff << (8 * channel + 16));
4861 udma_mode &= ~(0xff << (8 * channel + 16));
4862 udma_mode &= ~(3 << (2 * channel));
4863
4864 idedma_ctl = 0;
4865
4866 /* Per drive settings */
4867 for (drive = 0; drive < 2; drive++) {
4868 drvp = &chp->ch_drive[drive];
4869 /* If no drive, skip */
4870 if ((drvp->drive_flags & DRIVE) == 0)
4871 continue;
4872 unit = drive + 2 * channel;
4873 /* add timing values, setup DMA if needed */
4874 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1));
4875 pio_mode |= drvp->PIO_mode << (4 * unit + 16);
4876 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4877 (drvp->drive_flags & DRIVE_UDMA)) {
4878 /* use Ultra/DMA, check for 80-pin cable */
4879 if (drvp->UDMA_mode > 2 &&
4880 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG)) & (1 << (14 + channel))) == 0)
4881 drvp->UDMA_mode = 2;
4882 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4883 udma_mode |= drvp->UDMA_mode << (4 * unit + 16);
4884 udma_mode |= 1 << unit;
4885 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4886 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4887 (drvp->drive_flags & DRIVE_DMA)) {
4888 /* use Multiword DMA */
4889 drvp->drive_flags &= ~DRIVE_UDMA;
4890 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1));
4891 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4892 } else {
4893 /* PIO only */
4894 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4895 }
4896 }
4897
4898 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time);
4899 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time);
4900 if (sc->sc_pp->ide_product != PCI_PRODUCT_SERVERWORKS_OSB4_IDE)
4901 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode);
4902 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode);
4903
4904 if (idedma_ctl != 0) {
4905 /* Add software bits in status register */
4906 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4907 IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4908 }
4909 pciide_print_modes(cp);
4910 }
4911
4912 int
4913 serverworks_pci_intr(arg)
4914 void *arg;
4915 {
4916 struct pciide_softc *sc = arg;
4917 struct pciide_channel *cp;
4918 struct channel_softc *wdc_cp;
4919 int rv = 0;
4920 int dmastat, i, crv;
4921
4922 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4923 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4924 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4925 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) !=
4926 IDEDMA_CTL_INTR)
4927 continue;
4928 cp = &sc->pciide_channels[i];
4929 wdc_cp = &cp->wdc_channel;
4930 crv = wdcintr(wdc_cp);
4931 if (crv == 0) {
4932 printf("%s:%d: bogus intr\n",
4933 sc->sc_wdcdev.sc_dev.dv_xname, i);
4934 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4935 IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4936 } else
4937 rv = 1;
4938 }
4939 return rv;
4940 }
4941